blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
a1bec318cc7e715bee1656bd922740810efa5961 | Python | LeafyQ/MIT | /CS6.0002/3Graph.py | UTF-8 | 111 | 2.875 | 3 | [] | no_license | #
class Vertex(object):
def __init__(self, name):
self.name = name
def __str__(self):
print self.name
| true |
abf8ae700b2b37584f7f39bb35d905c56cd19dc6 | Python | johnatanbrayan/uri_online_judge_solved_in_python | /uriBeginner/1008_salary.py | UTF-8 | 1,104 | 4.625 | 5 | [] | no_license | '''
# -*- coding: utf-8 -*-
====1008_salary====
Write a program that reads an employee's number, his/her worked hours number in a month and the amount he received per hour.
Print the employee's number and salary that he/she will receive at end of the month, with two decimal places.
Don’t forget to print the line's end after the result, otherwise you will receive “Presentation Error”.
Don’t forget the space before and after the equal signal and after the U$.
Input
The input file contains 2 integer numbers and 1 value of floating point, representing the number, worked hours amount and the
amount the employee receives per worked hour.
Output
Print the number and the employee's salary, according to the given example, with a blank space before and after the equal signal.
=========Result_Test=========
====Input====
25
100
5.50
====Output====
NUMBER = 25
SALARY = U$ 550.00
'''
employeerNumber = int(input())
amountWorkHours = int(input())
salaryPerHours = float(input())
salary = amountWorkHours * salaryPerHours
print("NUMBER = {}\nSALARY = U$ {:.2f}".format(employeerNumber,salary))
| true |
b3ba3f2d800701ff86827383f1a05ac8a8e11672 | Python | dfs-a/complete--script | /Excel/Excel_script/presell_list.py | UTF-8 | 9,805 | 2.859375 | 3 | [] | no_license | import xlwings as xw
"""
思路:
1,取出预售表中的项目名称与板块对应表中的项目名称
2,将两张表中的项目名称作比较,取板块对应表中的板块
"""
# 实例化应用
app = xw.App(visible=False,add_book=False)
# 工作簿
wb = app.books.add()
# 工作表
sht1 = wb.sheets["sheet1"]
class presell:
def __init__(self):
# 打开excel表格文件 P_l ---> Presell_list
self.P_l = app.books.open("../Excel_Test/9月预售信息_原.xlsx")
# 打开板块对应表 Ple_li ---> plate_list
self.Ple_li = app.books.open('../Excel_Test/板块对应表.xlsx')
def Get_project_name(self):
# 选中表单
sht = self.P_l.sheets["Sheet1"]
# 读取预售表单项目名称 pt_name ---> project_name
pt_name = sht.range("c4:c18").value
pt_name[1] = "滨江公馆"
pt_name[-1] = "集美郡"
pt_name_dict = {}
for k,v in enumerate(pt_name):
pt_name_dict[k] = v
# print(pt_name_dict)
#商业房
business_home = sht.range("e4:e18").value
# print(business_home)
#住宅房
residence_home = sht.range("f4:f18").value
# print(residence_home)
"""完成对应表的项目名称"""
twos = []
ones = []
for pt_names,busines_home,residences_home in zip(pt_name,business_home,residence_home):
# print(pt_names,busines_home,residences_home)
if busines_home is not None and residences_home is not None:
twos.append(pt_names)
else:
ones.append(pt_names)
li = twos*2
for i in li:
ones.append(i)
ones.sort()
return ones,pt_name,sht
# """ ones已排好序的项目名称 """
# --->此次项目扩展小知识
# ones.sort() --->列表排序sort() 此方法将列表排序之后是不能重新赋值给变量的,
# lis = sorted(ones) --->要想把列表排序了 还把排好序后的列表赋值给变量就需要使用sorted()排序方法
def Plates(self,ones):
# 选中表单
sht_two = self.Ple_li.sheets['Sheet1']
# 读取板块表的项目名称以及板块 pt_name_two ---> project_name plates ----> plate
pt_name_two = sht_two.range("a2:a93").value
pt_name_two[75] = '吾悦广场商住项目4号、5号、6号地块'
pt_name_two[1] = '阳光尚都'
plates = sht_two.range("b2:b93").value
# print(pt_name_two,plates)
dic = {}
for pt_names,plate in zip(pt_name_two,plates):
if pt_names in ones:
dic[pt_names] = plate
# print(dic)
one_list = []
for one in ones:
if one in dic.keys():
one_list.append(dic[one])
return one_list
# """ 完成对应表的板块片区 """
""" 月份 """
def Month(self):
Month_ = "2020/9/1"
return Month_
def Prepare(self,pt_name,ones,sht):
# 此函数实现开发商,楼栋号,预售套房等信息
""" 获取预售证号 pt_name ---> 统计表的项目名称"""
prepare_name = sht.range("k4:k18").value
# print(prepare_name)
prepare_dict = {}
# pt_names[6] = '吾悦广场商住项目4号、5号、6号地块_1'
pt_name_1 = pt_name
pt_name_1[5] = "吾悦广场商住项目4号、5号、6号地块_1"
pt_name_1[7] = "安康万达ONE_1"
pt_name_1[10] = "长兴锦源_1"
# print(pt_name_1)
for prt_name,prepare_names in zip(pt_name_1,prepare_name):
prepare_dict[prt_name]=prepare_names
# print(prepare_dict)
# print(ones)
ones_1 = ones
ones_1[9] = "安康万达ONE_1"
ones_1[14] = "长兴锦源_1"
ones_1[4] = '吾悦广场商住项目4号、5号、6号地块_1'
ones_1[6] = '吾悦广场商住项目4号、5号、6号地块_1'
# print(ones_1)
prepare_key = prepare_dict.keys()
prepare_id = []
for i in ones_1:
if i in prepare_key:
prepare_id.append(prepare_dict[i])
#楼幢号
Floor_Building = sht.range("d4:d18").value
Floor_dict = {}
for prt_name,Floor_id in zip(pt_name_1,Floor_Building):
Floor_dict[prt_name] = Floor_id
Floor_key = Floor_dict.keys()
Floor_id = []
for i in ones_1:
if i in Floor_key:
Floor_id.append(Floor_dict[i])
#物业类型,物业细分
# 商业房
business_home = sht.range("e4:e18").value
# 住宅房
residence_home = sht.range("f4:f18").value
bus_id_dict = {}
res_id_dict = {}
for prt_name,business_homes,residences_home in zip(pt_name,business_home,residence_home):
bus_id_dict[prt_name] = business_homes
res_id_dict[prt_name] = residences_home
presell_all_number = bus_id_dict.values()
residences_all_number_1 = res_id_dict.values()
set_1 = []
for i in ones_1:
if i not in set_1:
set_1.append(i)
#商用
bus_dict = {}
for i in set_1:
if i in bus_id_dict.keys():
bus_dict[i] = bus_id_dict[i]
#住宅
res_dict = {}
for i in set_1:
if i in res_id_dict.keys():
res_dict[i] = res_id_dict[i]
#商用
dus_id = bus_dict.values()
#住宅
res_id = res_dict.values()
presell_all_number_2 = []
for dus,res in zip(dus_id,res_id):
if dus is None and res is not None:
presell_all_number_2.append("住宅")
elif dus is not None and res is None:
presell_all_number_2.append("商业")
elif dus is not None and res is not None:
presell_all_number_2.extend(["商业","住宅"])
# 预售套数
presell_number = []
for dus_s,res_s in zip(bus_dict.values(),res_dict.values()):
# print(dus_s,res_s)
if dus_s is None and res_s is not None:
presell_number.append(res_s)
elif dus_s is not None and res_s is None:
presell_number.append(dus_s)
elif dus_s is not None and res_s is not None:
presell_number.extend([dus_s,res_s])
# print(presell_number)
# 预售面积
# 商业面积
business_area = sht.range("h4:h18").value
# 住宅面积
residence_area = sht.range("i4:i18").value
buss_id_dict = {}
ress_id_dict = {}
for prt_name, business_one_area, residences_one_area in zip(pt_name, business_area, residence_area):
buss_id_dict[prt_name] = business_one_area
ress_id_dict[prt_name] = residences_one_area
#商业
bus_all_dict = {}
for i in set_1:
if i in buss_id_dict.keys():
bus_all_dict[i] = buss_id_dict[i]
# 住宅
res_all_dict = {}
for i in set_1:
if i in ress_id_dict.keys():
res_all_dict[i] = ress_id_dict[i]
area = []
for dus_all,res_all in zip(bus_all_dict.values(),res_all_dict.values()):
if dus_all is not None and res_all is None:
area.append(dus_all)
elif dus_all is None and res_all is not None:
area.append(res_all)
else:
area.extend([dus_all,res_all])
# 开发商
developers = sht.range("b4:b18").value
developers_dict = {}
for prt_name_s,deve in zip(pt_name_1,developers):
developers_dict[prt_name_s] = deve
deve_keys = developers_dict.keys()
# print(developers_dict)
deve_list = []
for i in ones_1:
if i in deve_keys:
deve_list.append(developers_dict[i])
return prepare_id,Floor_id,presell_all_number_2,presell_number,area,deve_list
def run(self):
ones, pt_name, sht = self.Get_project_name()
one_list = self.Plates(ones)
Month_ = self.Month()
self.Prepare(pt_name,ones,sht)
prepare_id,Floor_id,presell_all_number_2,presell_number,area,deve_list = self.Prepare(pt_name,ones,sht)
# print(Floor_id)
sht1.range("a1").value = ['项目名称','月度','板块','证号','预售楼栋','物业类别','物业细分','预售套数','预售面积','开发企业']
sht1.range("a2").options(transpose=True).value = ones
sht1.range("c2").options(transpose=True).value = one_list
sht1.range("b2").options(transpose=True).value = [Month_ for i in range(20)]
sht1.range("d2").options(transpose=True).value = prepare_id
sht1.range("e2").options(transpose=True).value = Floor_id
sht1.range("f2").options(transpose=True).value = presell_all_number_2
sht1.range("g2").options(transpose=True).value = presell_all_number_2
sht1.range("h2").options(transpose=True).value = presell_number
sht1.range("i2").options(transpose=True).value = area
sht1.range("j2").options(transpose=True).value = deve_list
wb.save("预售许可统计表.xlsx")
wb.close()
app.quit()
if __name__ == '__main__':
prese = presell()
prese.run() | true |
d74a1a8bd6fdac187ce256e45da0089e766c2fbb | Python | sarahfulkerson/python-games | /cardlib.py | UTF-8 | 5,629 | 3.796875 | 4 | [] | no_license | #! /usr/bin/env python3
# https://projecteuler.net/problem=54
from __future__ import print_function
from utillib import values, suits
class Card:
"""
Represents a playing card in a card game.
Attributes:
value
Holds the value of the card.
Available values: '2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K', 'A'
suit
Holds the suit of the card.
Available values: 'C', 'D', 'H', 'S'
suitRank
When True, all relational operators will consider both values and suits in comparisons.
When False, all relational operators will consider only values in comparisons.
Defaults to False. 'suitRank=True' still needs implementation.
"""
def __init__(self, value, suit, suitRank=False):
self.value = str(value).upper()
self.suit = suit.upper()
self.suitRank = suitRank
def getValue(self):
"""Returns the value of 'value'."""
return self.value
def getSuit(self):
"""Returns the value of 'suit'."""
return self.suit
def getSuitRank(self):
"""Returns the value of 'suitRank'."""
return self.suitRank
def __eq__(self, other):
"""
self == other
Returns true if the Cards are equal.
"""
if isinstance(other, Card):
if self.suitRank == False:
return values.index(self.value) == values.index(other.value)
else:
pass
return False
def __ne__(self, other):
"""
self != other
Returns true if the Cards are not equal.
"""
if isinstance(other, Card):
if self.suitRank == False:
return values.index(self.value) != values.index(other.value)
else:
pass
return False
def __gt__(self, other):
"""
self > other
Returns true if the Card 'self' is greater than Card 'other'.
"""
if self.suitRank == False:
return values.index(self.value) > values.index(other.value)
else:
pass
def __ge__(self, other):
"""
self >= other
Returns true if the Card 'self' is greater than or equal to Card 'other'.
"""
if self.suitRank == False:
return values.index(self.value) >= values.index(other.value)
else:
pass
def __lt__(self, other):
"""
self < other
Returns true if the Card 'self' is less than Card 'other'.
"""
if self.suitRank == False:
return values.index(self.value) < values.index(other.value)
else:
pass
def __le__(self, other):
"""
self <= other
Returns true if the Card 'self' is less than or equal to Card 'other'.
"""
if self.suitRank == False:
return values.index(self.value) <= values.index(other.value)
else:
pass
def __repr__(self):
return "%s('%s', '%s')" % (self.__class__.__name__, self.value, self.suit)
def __str__(self):
return "%s: value = '%s', suit = '%s'" % (self.__class__.__name__, self.value, suits.get(self.suit))
class Hand(list):
"""
This class extends the 'list' built-in type and adds methods for processing
instances of class Card.
"""
def __init__(self, *pargs):
list.__init__([])
self.extend([*pargs])
self.sort()
def getValues(self, *, distinctvalues=False):
"""
Returns the values of all Cards in the Hand.
"""
v = []
if distinctvalues == True:
for c in self:
val = c.getValue()
if val not in v:
v.append(val)
else:
for c in self:
v.append(c.getValue())
return v
def getSuits(self, *, distinctsuits=False):
"""
Returns the suits of all Cards in the Hand.
"""
s = []
if distinctsuits == True:
for c in self:
val = c.getSuit()
if val not in s:
s.append(val)
else:
for c in self:
s.append(c.getSuit())
return s
def getHighCard(self, pos=-1):
"""
Returns the high card in the hand. Optional argument 'pos' defaults to the last
item in the hand, but can be changed with a passed in parg.
"""
hand = sorted(self)
return hand[pos]
def __str__(self):
result = "%s:\n" % self.__class__.__name__
pos = 1
for c in self:
result = '%s%s\t%s\n' % (result, pos, str(c))
pos += 1
return result
def __repr__(self):
result = self.__class__.__name__
tup = tuple()
for c in self:
tup += (c,)
if len(tup) == 1:
result = result + repr(tup)[:-2] + ')'
else:
result = result + repr(tup)
return result
class Deck:
# [Card(x,y) for x in suits for y in values]
pass
if __name__ == '__main__':
from pokerlib import handrankfuncs
a = Card('a', 'd')
b = Card('k', 'd')
c = Card('q', 'd')
d = Card('j', 'd')
e = Card('t', 'd')
h = Hand(a,b,c,d,e)
print('%s : %s' % ('~~~Hand type'.ljust(15, '~'), '~~~Result'.ljust(13, '~')))
for func in handrankfuncs:
print("%s : %s" % (func.__name__.ljust(15), func(h)))
print('High card: %s' % h.getHighCard())
| true |
26d3a94a388bd6ffd9246fa5507bdffe061d3a9c | Python | KorsPav/telegram-bot-1 | /bot.py | UTF-8 | 927 | 2.796875 | 3 | [] | no_license | import requests
import telebot
from bs4 import BeautifulSoup
from constants import TOKEN
bot = telebot.TeleBot(TOKEN)
def get_info():
response = requests.get('https://covid19.who.int/region/euro/country/ua')
soup = BeautifulSoup(response.content, 'html.parser')
res = soup.find_all('span')
idx = None
for i in res:
if 'deaths' in str(i):
idx = res.index(i)
num = res[idx].text.rstrip(' deaths')
return num
@bot.message_handler(commands=['start'])
def send_welcome(message):
bot.reply_to(message, "Работаю. Напиши любой текст для получения обновления")
@bot.message_handler(func=lambda message: True)
def reply_any(message):
num = get_info()
msg = f'Всего смертей от covid-19 в Украине на данный момент: {num}'
bot.send_message(message.chat.id, text=msg)
bot.polling()
| true |
32110aa75d4afbe3efefdd124b35d1bbceec5905 | Python | tilfex/Coursera-Python | /02/regular-expression.py | UTF-8 | 259 | 2.71875 | 3 | [] | no_license | import re
filehandle = open('regex_sum_1252287.txt')
calcsum = 0
for line in filehandle:
if len(re.findall('[0-9]+', line))>0:
x = re.findall('[0-9]+', line)
for num in x:
calcsum = calcsum + int(num)
print(calcsum)
| true |
f8f4b9972bee530577c8095fddae05290e724f90 | Python | happa64/AtCoder_Beginner_Contest | /AGC/AGC003/AGC003-C.py | UTF-8 | 569 | 2.78125 | 3 | [] | no_license | # https://atcoder.jp/contests/agc003/submissions/15716116
# C - BBuBBBlesort!
import sys
from collections import defaultdict
sys.setrecursionlimit(10 ** 7)
input = sys.stdin.readline
f_inf = float('inf')
mod = 10 ** 9 + 7
def resolve():
n = int(input())
A = [int(input()) for _ in range(n)]
A_S = sorted(A)
IDX = defaultdict(int)
for idx, a in enumerate(A_S):
IDX[a] = idx + 1
res = 0
for i in range(n):
if IDX[A[i]] % 2 != (i + 1) % 2:
res += 1
print(res // 2)
if __name__ == '__main__':
resolve()
| true |
e8e46ba5bc4b36d31ecab3c9669ff4f709f759b9 | Python | ionvision/frnn | /analysis/build_gifs_readme.py | UTF-8 | 2,719 | 2.6875 | 3 | [] | no_license | import imageio
import glob
import scipy.ndimage as ndim
import scipy.misc as sm
import numpy as np
def preprocess_predictions(frames, height=66, width=82):
frames = [(f if len(f.shape) == 3 else np.expand_dims(f, axis=2)) for f in frames]
frames = [(f if f.shape[2] == 3 else np.concatenate([f, f, f], axis=2)) for f in frames]
frames = frames[:5] + [sm.imresize(f, frames[0].shape[:2]) for f in frames[5:]]
# Pad frames to fit expected width
padding = (((height - frames[0].shape[0]) / 2,)*2, ((width - frames[0].shape[1]) / 2,)*2, (0, 0))
frames = [np.pad(f, padding, 'constant', constant_values=255) for f in frames]
# Add border to predictions
for i in range(5, len(frames)):
frames[i][:1, :, ...] = [[[255, 0, 0]]]
frames[i][-1:, :, ...] = [[[255, 0, 0]]]
frames[i][:, :1, ...] = [[[255, 0, 0]]]
frames[i][:, -1:, ...] = [[[255, 0, 0]]]
return frames
def generate_instance_sequence(path):
f_method = sorted(glob.glob(path + 'g*.png'))[-5:] + sorted(glob.glob(path + 'frnn_*.png'))
return preprocess_predictions([ndim.imread(f) for f in f_method])
def build_sequences(name, paths):
# Prepare sequences
instances = [generate_instance_sequence(p) for p in paths]
s_h, s_w = instances[0][0].shape[:2]
# Prepare blank frames
frame = 255 * np.ones((3*(s_h + 15) - 15, 8*(s_w + 15) - 15, 3), dtype=np.float32)
frames = [np.copy(frame) for _ in range(15)]
# Merge sequences
for i, f in enumerate(zip(*instances)):
for j, m in enumerate(f):
l, t = (j % 8) * (s_w + 15), (j / 8) * (s_h + 15)
frames[i][t:t+s_h, l:l+s_w] = m
# Generate GIF
imageio.mimsave(name, frames, duration=0.5)
if __name__ == '__main__':
PATH_IN = '/home/moliu/Documents/Papers/Supplementary/images/qualitative/'
PATH_OUT = '../'
build_sequences(PATH_OUT + 'examples.gif', [
PATH_IN + 'mmnist_l1/s12/', PATH_IN + 'mmnist_l1/s11/', PATH_IN + 'mmnist_l1/s13/',
PATH_IN + 'mmnist_l1/s17/', PATH_IN + 'mmnist_l1/s20/', PATH_IN + 'mmnist_l1/s21/',
PATH_IN + 'mmnist_l1/s11_n/', PATH_IN + 'mmnist_l1/s5_n/', PATH_IN + 'kth_l1/s31/',
PATH_IN + 'kth_l1/s37/', PATH_IN + 'kth_l1/s77/', PATH_IN + 'kth_l1/s23/',
PATH_IN + 'kth_l1/s43/', PATH_IN + 'kth_l1/s75/', PATH_IN + 'kth_l1/s97/',
PATH_IN + 'kth_l1/s37_2/', PATH_IN + 'ucf101_l1/s8/', PATH_IN + 'ucf101_l1/s9_last/',
PATH_IN + 'ucf101_l1/s9_mean/', PATH_IN + 'ucf101_l1/s21/', PATH_IN + 'ucf101_l1/s37/',
PATH_IN + 'ucf101_l1/s44/', PATH_IN + 'ucf101_l1/s28/', PATH_IN + 'ucf101_l1/s41/',
])
| true |
20fa7153852ecaa54a4a6e82fac49a46eef61e72 | Python | playdafuture/YouDMC | /docker/api/rest_api/resources/rating.py | UTF-8 | 2,023 | 2.65625 | 3 | [] | no_license | from flask_restful import Resource, reqparse
from rest_api.models.rating import RatingModel
from flask_jwt_extended import (
jwt_required,
get_jwt_identity
)
class RateComment(Resource):
parser = reqparse.RequestParser()
parser.add_argument(
'comment_id', type=int, required=True, help="comment_id cannot be blank."
)
# parser.add_argument(
# 'user_id', type=int, required=True, help="user_id cannot be blank."
# )
parser.add_argument(
'rating', type=int, required=True, help="raing cannot be blank."
)
@jwt_required
def post(self):
user_id = int(get_jwt_identity()['id'])
data = self.parser.parse_args()
if data['rating'] !=1 and data['rating'] !=-1:
return {
"message":"rating can only have value of 1 or -1."
},400
# if RatingModel.does_exist(user_id=user_id, comment_id=data['comment_id']):
# return{
# "message":"each user can only rate a comment once."
# },400
# check if rating exists
rating = RatingModel.find_by_comment_id_user_id(
comment_id = data['comment_id'],
user_id = user_id
)
# if exists update existing ratings to 1 or 0 or -1
if rating:
if data['rating']==1:
if rating.rating == 1:
rating.rating = 0
else:
rating.rating = 1
if data['rating'] == -1:
if rating.rating == -1:
rating.rating = 0
else:
rating.rating = -1
# if not exists, create new rating
else:
rating = RatingModel(
comment_id=data['comment_id'],
user_id=user_id,
rating=data['rating']
)
rating.save_to_db()
return {
"message":"rating saved.",
"rating" : rating.rating
}
| true |
58c479082e58e2edb00b312361671593ce7e642a | Python | Walter213/PythonCode | /PythonForFunStuff/Translate.py | UTF-8 | 3,277 | 2.84375 | 3 | [] | no_license | import os
import io
import textwrap
from tkinter import *
from tkinter.ttk import *
from tkinter import ttk
from googletrans import Translator
class Translate(object):
def __init__(self, master):
frame = Frame(master)
frame.grid()
tabControl = ttk.Notebook(root)
tabControl.configure(width=350, height=400)
self.translate_tab = ttk.Frame(tabControl)
tabControl.add(self.translate_tab, text="Translator")
tabControl.grid()
self.translate_tab.grid_propagate(0)
self.speak_it = BooleanVar()
self.languages = { 'Arabic': 'ar', 'Belarusian': 'be',
'Bulgarian': 'bg', 'Bosnian': 'bs',
'Czech': 'cs', 'Danish': 'da',
'German': 'de', 'Greek': 'el',
'English': 'en', 'Spanish': 'es',
'Persian': 'fa', 'Finnish': 'fi',
'French': 'fr', 'Irish': 'ga',
'Hebrew': 'he', 'Hindew': 'hi',
'Croatian': 'hr', 'Haitian': 'ht',
'Hungarian': 'hu', 'Armanian': 'hy',
'Indonisian': 'id', 'Italian': 'it',
'Japanese': 'ja', 'Korean': 'ko',
'Kurdish': 'ku', 'Latin': 'li',
'Lithuanian': 'lt', 'Latvian': 'lv',
'Dutch': 'nl', 'Norwegian': 'no',
'Polish': 'pl', 'Portuguese': 'pt',
'Romanian': 'ro', 'Russian': 'ru',
'Somalia': 'so', 'Albanian': 'sq',
'Serbian': 'sr', 'Swedish': 'sv',
'Swahili': 'sw', 'Turkish': 'tr',
'Vietnamese': 'vi'}
self.translate_page()
def translate_page(self):
self.top_label = Label(self.translate_tab, text="Enter Word(s): ")
self.top_label.grid(column=0, row=0)
self.entry = Entry(self.translate_tab, width=48)
self.entry.grid(column=0, row=2, columnspan=3, padx=4, pady=4)
self.language_label = Label(self.translate_tab, text="Language :")
self.language_label.grid(column=0, row =2, pady=4)
self.language_DDL = ttk.Combobox(self.translate_tab, values=[*self.languages.keys()])
self.language_DDL.grid(column=1, row=2)
self.language_DDL.current(0)
self.translate_button = Button(self.translate_tab, text="Translate", command=self.translate_fuction)
self.translate_button.grid(column=0, row=3, pady= 14)
self.translated_frame = LabelFrame(self.translate_tab, text="Word:", width=300, height=50)
self.translated_frame.grid(column=0, row=4, columnspan=3)
self.translated_frame.grid_propagate(0)
self.translated_result = Label(self.translated_frame, text="")
self.translated_result.grid()
def translate_fuction(self):
translate = self.entry.get()
language = self.languages.get(self.language_DDL.get())
translated_word = self.translate_func(translate, language)
self.translated_result.configure(text=translated_word)
def translate_func(self, words, language):
translator = Translator(service_urls=["translate.google.com"])
translation = translator.translate(words, dest=language)
return translation
if __name__ == '__main__':
root = Tk()
root.title("Translator")
root.geometry("350x430")
Translate(root)
root.mainloop() | true |
21e8fddf18b52c9198aef800246e90c92ec98315 | Python | manionan/project-euler | /problem45.py | UTF-8 | 637 | 2.921875 | 3 | [] | no_license | import math
def is_power(n):
int_root = int(math.sqrt(n))
if int_root*int_root == n:
return True
else:
return False
def find_common(max_r):
for r in range(144,max_r):
c = 2*r*(2*r - 1)
sq1 = 1 + 4*3*c
if is_power(sq1) == False:
continue
q_num = 1 + math.sqrt(sq1)
if q_num % 6 != 0:
continue
sq2 = 1 + 4*c
if is_power(sq2) == False:
continue
p_num = -1 + math.sqrt(sq2)
if p_num % 2 != 0:
continue
hex_num = r*(2*r - 1)
return hex_num
print find_common(1000000)
| true |
38715d04eae7ea3f69e446ad24dc611deb690439 | Python | joosm/convertcloud | /convertcloud/converter.py | UTF-8 | 3,438 | 2.8125 | 3 | [] | no_license | #! /usr/bin/env python
import os
import sys
import struct
import io
import numpy as np
from .formats import Load, Header
class Converter:
def __init__(self):
self._rgb = None
self._rgba = None
self._decode = None
self.points = None
self.fields = None
def _get_name(self, path):
"""
Returns basename and extension of path
"""
return os.path.splitext(os.path.basename(path))
def load_points(self, path):
self.points = []
self.fields = []
load = Load()
print("Reading: ", path)
name, extension = self._get_name(path)
if extension == ".pcd":
self.points, self.fields = load.pcd(path)
elif extension == ".ply":
self.points, self.fields = load.ply(path)
elif extension == ".zdf":
self.points, self.fields = load.zdf(path)
elif extension in [".stl", ".STL"]:
self.points, self.fields = load.stl(path)
elif extension == ".xyz":
self.points = load.xyz(path)
elif extension == ".a3d":
self.points = load.a3d(path)
else:
print("Error: Unknown file extension {}".format(extension))
sys.exit(1)
self._decode_points()
for field in self.fields:
if field.name == 'red' and self._rgba == None:
self._rgb = True
elif field.name == 'alpha':
self._rgba = True
self._rgb = False
def _decode_points(self):
for num, point in enumerate(self.points):
if isinstance(point[0], bytes):
self.points[num] = [val.decode() for val in point]
else:
break
self.points = np.array(self.points).astype("float32")
def convert(self, path):
print('Saving point cloud to', path)
name, extension = self._get_name(path)
header = self._generate_header(extension)
with open(path, "wb") as f:
f.write(header.encode())
for pt in self.points:
if self._rgb:
f.write("{} {} {} {} {} {}\n".format(\
pt[0], pt[1], pt[2],\
int(pt[3]), int(pt[4]), int(pt[5])).encode())
elif self._rgba:
f.write("{} {} {} {} {} {} {}\n".format(\
pt[0], pt[1], pt[2],\
int(pt[3]), int(pt[4]), int(pt[5]), int(pt[6])).encode())
else:
f.write("{} {} {}\n".format(pt[0], pt[1], pt[2]).encode())
def _generate_header(self, extension):
header_gen = Header(len(self.points), self.fields, self._rgb, self._rgba)
if extension == ".ply":
header = header_gen.ply()
elif extension == ".pcd":
header = header_gen.pcd()
elif extension in [".xyz", ".a3d"]:
header = ''
else:
print("Error: Can't convert to {}".format(extension))
sys.exit(1)
return header
def main():
if len(sys.argv) != 3:
print("usage: converter <original.format1> <converted.format2>")
print("formats supported: .ply, .pcd, .xyz, .zdf")
sys.exit(1)
c = Converter()
c.load_points(sys.argv[1])
c.convert(sys.argv[2])
if __name__ == "__main__":
main()
| true |
a2a2249e942ac5dce2033c72dcf32083ee0a92e6 | Python | ChunqiWang/UdacityProject2 | /submission/code/project2.py | UTF-8 | 22,650 | 2.703125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 6 23:11:10 2019
@author: chunqi
"""
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from moviepy.editor import VideoFileClip
class advancedLaneFinding:
def __init__(self):
self.left_fit = np.array([0,0,0])
self.right_fit = np.array([0,0,0])
# Define conversions in x and y from pixels space to meters
self.ym_per_pix = 30/720 # meters per pixel in y dimension
self.xm_per_pix = 3.7/700 # meters per pixel in x dimension
# intialize image shape
self.imageSize = (0,0)
self.firstFrame = 1
return
def cameraCalibration(self,boardWidth=9,boardHeight=6):
# function to find object and image points for camera calibration
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((boardHeight*boardWidth,3), np.float32)
objp[:,:2] = np.mgrid[0:boardWidth,0:boardHeight].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
self.objpoints = [] # 3d points in real world space
self.imgpoints = [] # 2d points in image plane.
# Make a list of calibration images
images = glob.glob('../camera_cal/calibration*.jpg')
# Step through the list and search for chessboard corners
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (9,6),None)
# If found, add object points, image points
if ret == True:
self.objpoints.append(objp)
self.imgpoints.append(corners)
# Draw and display the corners
img = cv2.drawChessboardCorners(img, (9,6), corners, ret)
# uncomment to save figure
# cv2.imshow('image',img)
# cv2.imwrite('../output_images/chessBoard/'+fname[14:], img)
# cv2.waitKey(500)
# cv2.destroyAllWindows()
# cv2.waitKey(1)
def undistort(self,img):
# function to create undistort images
# Use cv2.calibrateCamera() and cv2.undistort()
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(self.objpoints, self.imgpoints, img.shape[1:], None, None)
undist = cv2.undistort(img, mtx, dist, None, mtx)
return undist
def undistortTest(self,path):
# function to test self.undistort with provided test images
images = glob.glob(path)
for fname in images:
img = cv2.imread(fname)
undist = self.undistort(img)
# uncomment to save figure
# cv2.imwrite('../output_images/undistort/'+fname[15:], undist)
def thresholdImage(self,img, s_thresh=(170, 255), l_thresh = 100, sx_thresh=(40, 100)):
# function to created thresholded binary image
# Convert to HLS color space and separate the V channel
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
l_channel = hls[:,:,1]
s_channel = hls[:,:,2]
# Sobel x
sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivative in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
# Threshold x gradient
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1
# Threshold color channel
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1
# Remove Dark Point
s_binary[(l_channel <= l_thresh)] = 0
# Stack each channel
color_binary = np.dstack(( np.zeros_like(sxbinary), sxbinary, s_binary)) * 255
# Combine x gradient and color channel
combined_binary = np.zeros_like(sxbinary)
combined_binary[(s_binary == 1) | (sxbinary == 1)] = 1
return color_binary, combined_binary
def thresholdTest(self,path):
# function to test self.thresholdImage with provided test images
images = glob.glob(path)
for fname in images:
img = cv2.imread(fname)
color_binary, combined_binary = self.thresholdImage(img)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
ax1.set_title('Stacked thresholds')
ax1.imshow(color_binary)
ax2.set_title('Combined S channel and gradient thresholds')
ax2.imshow(combined_binary, cmap='gray')
plt.savefig('../output_images/threshold/'+fname[-9:])
plt.imsave('../output_images/threshold/v2'+fname[-9:], combined_binary, cmap=cm.gray)
def birdsEye(self, img, s=[[592,450],[688,450],[1120,720],[200,720]],d=[[240,0],[1040,0],[1040,720],[240,720]]):
# function to warp the images to birdeye view
self.imageSize = (img.shape[1], img.shape[0])
src = np.float32(s)
dst = np.float32(d)
M = cv2.getPerspectiveTransform(src, dst)
warped = cv2.warpPerspective(img, M, self.imageSize)
return warped
def birdsEyeTest(self,path):
# function to test self.birdsEye with provided test images
images = glob.glob(path)
for fname in images:
img = cv2.imread(fname)
warped = self.birdsEye(img)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(9, 6))
f.tight_layout()
ax1.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
ax1.set_title('Undistorted Image', fontsize=20)
ax2.imshow(cv2.cvtColor(warped, cv2.COLOR_BGR2RGB))
ax2.set_title('Undistorted and Warped Image', fontsize=20)
plt.savefig('../output_images/birdeye/'+fname[-9:])
cv2.imwrite('../output_images/birdeye/v2'+fname[-9:], warped)
def find_lane_pixels(self,binary_warped):
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped[self.imageSize[1]//2:,:], axis=0)
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped))
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]//2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# HYPERPARAMETERS
# Choose the number of sliding windows
nwindows = 9
# Set the width of the windows +/- margin
margin = 100
# Set minimum number of pixels found to recenter window
minpix = 50
# Set height of windows - based on nwindows above and image shape
window_height = np.int(self.imageSize[1]//nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated later for each window in nwindows
leftx_current = leftx_base
rightx_current = rightx_base
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = self.imageSize[1] - (window+1)*window_height
win_y_high = self.imageSize[1] - window*window_height
### TO-DO: Find the four below boundaries of the window ###
win_xleft_low = leftx_current - margin # Update this
win_xleft_high = leftx_current + margin # Update this
win_xright_low = rightx_current - margin # Update this
win_xright_high = rightx_current + margin # Update this
# Draw the windows on the visualization image
cv2.rectangle(out_img,(win_xleft_low,win_y_low),
(win_xleft_high,win_y_high),(0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),
(win_xright_high,win_y_high),(0,255,0), 2)
### TO-DO: Identify the nonzero pixels in x and y within the window ###
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
### TO-DO: If you found > minpix pixels, recenter next window ###
### (`right` or `leftx_current`) on their mean position ###\
# Remove this when you add your function
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices (previously was a list of lists of pixels)
try:
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
except ValueError:
# Avoids an error if the above is not implemented fully
pass
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
return leftx, lefty, rightx, righty, out_img
def fit_firstPoly(self, binary_warped):
# Find our lane pixels first
leftx, lefty, rightx, righty, out_img = self.find_lane_pixels(binary_warped)
### TO-DO: Fit a second order polynomial to each using `np.polyfit` ###
self.left_fit = np.polyfit(lefty, leftx, 2)
self.right_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting
ploty = np.linspace(0, self.imageSize[1]-1, self.imageSize[1])
try:
left_fitx = self.left_fit[0]*ploty**2 + self.left_fit[1]*ploty + self.left_fit[2]
right_fitx = self.right_fit[0]*ploty**2 + self.right_fit[1]*ploty + self.right_fit[2]
except TypeError:
# Avoids an error if `left` and `right_fit` are still none or incorrect
print('The function failed to fit a line!')
left_fitx = 1*ploty**2 + 1*ploty
right_fitx = 1*ploty**2 + 1*ploty
# Colors in the left and right lane regions
out_img[lefty, leftx] = [255, 0, 0]
out_img[righty, rightx] = [0, 0, 255]
self.firstFrame = 0
return out_img
def fit_poly(self, leftx, lefty, rightx, righty):
### TO-DO: Fit a second order polynomial to each with np.polyfit() ###
self.left_fit = np.polyfit(lefty, leftx, 2)
self.right_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting
ploty = np.linspace(0, self.imageSize[1]-1, self.imageSize[1])
### TO-DO: Calc both polynomials using ploty, left_fit and right_fit ###
left_fitx = self.left_fit[0]*ploty**2 + self.left_fit[1]*ploty + self.left_fit[2]
right_fitx = self.right_fit[0]*ploty**2 + self.right_fit[1]*ploty + self.right_fit[2]
return left_fitx, right_fitx, ploty
def search_around_poly(self, binary_warped):
# HYPERPARAMETER
# Choose the width of the margin around the previous polynomial to search
margin = 80
# Grab activated pixels
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
### TO-DO: Set the area of search based on activated x-values ###
### within the +/- margin of our polynomial function ###
left_lane_inds = ((nonzerox > (self.left_fit[0]*(nonzeroy**2) + self.left_fit[1]*nonzeroy +
self.left_fit[2] - margin)) & (nonzerox < (self.left_fit[0]*(nonzeroy**2) +
self.left_fit[1]*nonzeroy + self.left_fit[2] + margin)))
right_lane_inds = ((nonzerox > (self.right_fit[0]*(nonzeroy**2) + self.right_fit[1]*nonzeroy +
self.right_fit[2] - margin)) & (nonzerox < (self.right_fit[0]*(nonzeroy**2) +
self.right_fit[1]*nonzeroy + self.right_fit[2] + margin)))
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit new polynomials
left_fitx, right_fitx, ploty = self.fit_poly(leftx, lefty, rightx, righty)
return left_fitx, right_fitx, ploty
def search_around_poly_test (self, path):
images = glob.glob(path)
test1.cameraCalibration()
for fname in images:
img = cv2.imread(fname)
undist = self.undistort(img)
color_binary, combined_binary = self.thresholdImage(undist)
binary_warped = self.birdsEye(combined_binary)
rectangular = self.fit_firstPoly(binary_warped)
cv2.imwrite('../output_images/polynomialfit/'+fname[-9:], rectangular)
# Choose the width of the margin around the previous polynomial to search
margin = 80
# Grab activated pixels
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
left_lane_inds = ((nonzerox > (self.left_fit[0]*(nonzeroy**2) + self.left_fit[1]*nonzeroy +
self.left_fit[2] - margin)) & (nonzerox < (self.left_fit[0]*(nonzeroy**2) +
self.left_fit[1]*nonzeroy + self.left_fit[2] + margin)))
right_lane_inds = ((nonzerox > (self.right_fit[0]*(nonzeroy**2) + self.right_fit[1]*nonzeroy +
self.right_fit[2] - margin)) & (nonzerox < (self.right_fit[0]*(nonzeroy**2) +
self.right_fit[1]*nonzeroy + self.right_fit[2] + margin)))
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit new polynomials
left_fitx, right_fitx, ploty = self.fit_poly(leftx, lefty, rightx, righty)
## Visualization ##
# Create an image to draw on and an image to show the selection window
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
window_img = np.zeros_like(out_img)
# Color in left and right line pixels
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
# Generate a polygon to illustrate the search window area
# And recast the x and y points into usable format for cv2.fillPoly()
left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin, ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin, ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
# Draw the lane onto the warped blank image
cv2.fillPoly(window_img, np.int_([left_line_pts]), (0,255, 0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (0,255, 0))
result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
# Plots the left and right polynomials on the lane lines
cv2.imwrite('../output_images/polynomialfit/v2'+fname[-9:], result)
plt.imshow(result)
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
plt.savefig('../output_images/polynomialfit/v3'+fname[-9:])
plt.close()
def measure_curvature_real(self, leftx, rightx, ploty):
# Calculates the curvature of polynomial functions in meters.
left_fit_cr = np.polyfit(ploty*self.ym_per_pix, leftx*self.xm_per_pix, 2)
right_fit_cr = np.polyfit(ploty*self.ym_per_pix, rightx*self.xm_per_pix, 2)
# Define y-value where we want radius of curvature
# We'll choose the maximum y-value, corresponding to the bottom of the image
y_eval = np.max(ploty)
##### TO-DO: Implement the calculation of R_curve (radius of curvature) #####
left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*self.ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*self.ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
return left_curverad, right_curverad
def measure_vehicle_position(self, left_detected, right_detected):
# function to compute the vehicle offset
line_center = (left_detected +right_detected)/2
offset = (self.imageSize[0]/2 - line_center) * self.xm_per_pix
return offset
def inverse_perspective(self, undist, warped, left_fitx, right_fitx, ploty, d=[[592,450],[688,450],[1120,720],[200,720]],s=[[240,0],[1040,0],[1040,720],[240,720]]):
# funtion to inverse transform
out_img = np.dstack((warped, warped, warped))*255
left_line_window = np.array(np.transpose(np.vstack([left_fitx, ploty])))
right_line_window = np.array(np.flipud(np.transpose(np.vstack([right_fitx, ploty]))))
line_points = np.vstack((left_line_window, right_line_window))
cv2.fillPoly(out_img, np.int_([line_points]), [0,255, 0])
src = np.float32(s)
dst = np.float32(d)
Minv = cv2.getPerspectiveTransform(src, dst)
unwarped = cv2.warpPerspective(out_img, Minv, self.imageSize, flags=cv2.INTER_LINEAR)
result_img = cv2.addWeighted(undist, 1, unwarped, 0.3, 0)
plt.imshow(cv2.cvtColor(result_img, cv2.COLOR_BGR2RGB))
return result_img
def add_word(self, result_img, left_curverad, right_curverad, offset):
font = cv2.FONT_HERSHEY_DUPLEX
curverad = (left_curverad + right_curverad) * 0.5
text1 = 'Radius of Curvature = ' + '{:04.2f}'.format(curverad) + 'm'
cv2.putText(result_img, text1, (50,50), font, 1.5, (255,255,255), 2, cv2.LINE_AA)
if offset > 0:
direction = 'right'
elif offset < 0:
direction = 'left'
abs_offset = abs(offset)
text2 = 'Vehicle is {:03.2f}'.format(abs_offset) + 'm ' + direction + ' of center'
cv2.putText(result_img, text2, (50,120), font, 1.5, (255,255,255), 2, cv2.LINE_AA)
return result_img
def pipe_line(self,img):
self.cameraCalibration()
undist = self.undistort(img)
color_binary, combined_binary = self.thresholdImage(undist)
warped = self.birdsEye(combined_binary)
if self.firstFrame:
self.fit_firstPoly(warped)
left_fitx, right_fitx, ploty = self.search_around_poly(warped)
left_curverad, right_curverad = self.measure_curvature_real(left_fitx, right_fitx, ploty)
offset = self.measure_vehicle_position(left_fitx[-1], right_fitx[-1])
result_img = self.inverse_perspective(undist, warped, left_fitx, right_fitx, ploty)
note_img = self.add_word(result_img, left_curverad, right_curverad, offset)
return note_img
if __name__ == '__main__':
test1 = advancedLaneFinding()
#test1.thresholdTest('../output_images/undistort/test*.jpg')
#test1.birdsEyeTest('../output_images/threshold/v2test*.jpg')
#test1.search_around_poly_test('../output_images/threshold/v2test*.jpg')
'''
test1.cameraCalibration()
img = cv2.imread('../test_images/test5.jpg')
undist = test1.undistort(img)
color_binary, combined_binary = test1.thresholdImage(undist)
warped = test1.birdsEye(combined_binary)
test1.fit_firstPoly(warped)
left_fitx, right_fitx, ploty= test1.search_around_poly(warped)
left_curverad, right_curverad= test1.measure_curvature_real(left_fitx, right_fitx, ploty)
result = test1.inverse_perspective(undist,warped,left_fitx, right_fitx, ploty)
offset = test1.measure_vehicle_position(left_fitx[-1], right_fitx[-1])
note_result = test1.add_word(result, left_curverad, right_curverad, offset)
cv2.imwrite('../output_images/test/test5final.jpg', note_result)
'''
output = 'project_video_output.mp4'
clip1 = VideoFileClip("../project_video.mp4")
white_clip = clip1.fl_image(test1.pipe_line)
white_clip.write_videofile(output, audio=False)
#NOTE: this function expects color images!!'''
# plt.imshow(img)
#test1.undistortTest('../test_images/test*.jpg')
#test1.undistortTest('../camera_cal/calibration*.jpg')
#test1.thresholdTest('../output_images/undistort/test*.jpg')
#test1.birdsEyeTest('../output_images/threshold/v2test*.jpg')
| true |
b69da94f8ac06d2633d72c374bbecf3f8e75d9cb | Python | oakyuez/master-thesis | /Data Import/03_Join.py | UTF-8 | 2,606 | 3.40625 | 3 | [] | no_license | import pandas as pd
# Laden der Datensatz aus 01_importData.py
nela = pd.read_csv("C:/Datasets/nela2018.csv", index_col=None, low_memory=False)
kaggle = pd.read_csv("C:/Datasets/kaggle.csv", index_col=None, low_memory=False)
# Laden der csv, welche im Expose die Tabelle 12 ist. Sie enthält nur die Quellen und das politische Label
labels = pd.read_excel("C:/Datasets/Labels.xlsx", index_col=None)
# Behalte nur den Nachrichteninhalt und die Quelle der Nachricht
kaggle = kaggle.loc[:,["publication", "content"]]
# Ändere den Spaltennamen um, damit beide Datensätze gemerged werden können
kaggle.rename(columns={"publication":"source"}, inplace=True)
# Vereinheitlichen der Spaltennamen wie sie im Kaggle-Datensatz vorkommen
kaggle["source"][kaggle["source"]=="Verge"] = "The Verge"
kaggle["source"][kaggle["source"]=="Atlantic"] = "The Atlantic"
kaggle["source"][kaggle["source"]=="Guardian"] = "The Guardian"
kaggle["source"][kaggle["source"]=="New York Times"] = "The New York Times"
# Behalte nur den Nachrichteninhalt und die Quelle der Nachricht
nela = nela.loc[:, ["source", "content"]]
# Vereinen beide Datemsätze zu einem Datensatz
news = pd.concat([kaggle, nela])
# Füge den Quellen ihr politisches Label hinzu
inner_join = pd.merge(news, labels, left_on="source", right_on="Quelle", how="inner")
inner_join = inner_join.loc[:, ["source", "content", "Label"]]
inner_join["source"].value_counts()
# Speichere die Daten als csv ab
inner_join.to_csv("C:/Datasets/nela_kaggle.csv")
inner_join["source"].value_counts().to_csv("C:/Datasets/nela_kaggle_combined_machbarkeitsstudie.csv")
# Machbarkeitsstudie bzw. statistische Auswertung des Inner Join
print(inner_join.shape)
print(inner_join["source"].value_counts())
print(inner_join["source"].unique())
print(len(inner_join["source"].unique()))
print(inner_join.isna().sum())
print(inner_join["Label"].value_counts())
print(inner_join["Label"].unique())
# Splitten des riesigen Dataframes in einzelne Dataframes, je eins für eine politische Klasse
news_leanLeft = inner_join[inner_join["Label"]=="Lean Left"]
news_Left = inner_join[inner_join["Label"]=="Left"]
news_Center = inner_join[inner_join["Label"]=="Center"]
news_leanRight = inner_join[inner_join["Label"]=="Lean Right"]
news_Right = inner_join[inner_join["Label"]=="Right"]
# Speichere die einzelnen DF in CSV's ab
news_Left.to_csv("C:/Datasets/news_Left.csv")
news_leanLeft.to_csv("C:/Datasets/news_leanLeft.csv")
news_Center.to_csv("C:/Datasets/news_Center.csv")
news_leanRight.to_csv("C:/Datasets/news_leanRight.csv")
news_Right.to_csv("C:/Datasets/news_Right.csv") | true |
e855b6bd886bf3d2ec576f9da77bc366a7d87646 | Python | siva237/python_classes | /regular_expressions/search_match.py | UTF-8 | 294 | 3.515625 | 4 | [] | no_license | import re
# string = "cake and cookies"
# pattern = "cake"
#
# out = re.search(pattern,string)
# print(out.group())
string = "icecream"
pattern = "c"
out = re.match(pattern,string)
print(out)
string1 = "cake and cookie"
pattern1 = "cake"
out1 = re.match(pattern1,string1)
print(out1.group()) | true |
5475d4a50df242f133dae21b7bf55f7ceff617f3 | Python | Edgeeeeee/pycode | /code/select_student.py | UTF-8 | 839 | 3.3125 | 3 | [] | no_license | '''
# 重名的被覆盖了!
f = open('students.txt',encoding = 'utf-8')
tup = ([],)
for line in f.readlines():
t = line.strip().split("\t")
dic[t[0]] = t
while True:
name = input("请输入姓名")
if name in dic:
print(dic[name])
else:
print("不存在")
f.close
'''
f = open('students.txt',encoding = 'utf-8')
lines = f.readlines()
while True:
name = input("input name:")
a = 0
for i in lines:
if name in i.split('\t')[0]:
a += 1
print(i)
if a == 0:
print("None")
f.close()
'''
f = open('students.txt',encoding = 'utf-8')
lines = f.readlines()
while True:
name = input("input name:")
a = 0
for i in lines:
if name in i:
a += 1
print(i)
else:
f.close()
''' | true |
2d01f18bc2b45ad6c71fbc4210950d6172329509 | Python | IEP/submissions | /ch08/ch08p23.py | UTF-8 | 259 | 3.390625 | 3 | [] | no_license | #!/bin/python
def prime(x):
limit = int(x**0.5)
for i in range(2,limit+1):
if x % i == 0:
return False
return True
for i in range(int(input())):
if prime(int(input())):
print('YA')
else:
print('BUKAN')
| true |
0b9c0e902242519279c6790fd1fb912f1590395e | Python | KaggleBreak/daejun_kagglehackathon | /part1/차금강/Reinforcement_Policy_Gradient_Keras/policy_gradient.py | UTF-8 | 3,710 | 2.78125 | 3 | [] | no_license | import copy
import numpy as np
from game import Game
from keras.layers import Dense
from keras.optimizers import Adam
from keras.models import Sequential
from keras import backend as K
EPISODE = 20000000
class REINFORCEMENTAgent:
def __init__(self):
self.load_model = True
self.action_space = [0,1,2]
self.action_size = len(self.action_space)
self.state_size = 60
self.discount_factor = 0.99
self.learning_rate = 0.001
self.model = self.build_model()
self.optimizer = self.build_optimizer()
self.states, self.actions, self.rewards = [], [], []
if self.load_model:
self.model.load_weights('reinforce.h5')
def build_model(self):
model = Sequential()
model.add(Dense(60, input_dim=self.state_size, activation='relu'))
model.add(Dense(60, activation='relu'))
model.add(Dense(30, activation='relu'))
model.add(Dense(15, activation='relu'))
model.add(Dense(15, activation='relu'))
model.add(Dense(self.action_size, activation='softmax'))
model.summary()
return model
def build_optimizer(self):
action = K.placeholder(shape=[None, self.action_size])
discounted_rewards = K.placeholder(shape=[None, ])
action_prob = K.sum(action*self.model.output, axis=1)
cross_entropy = K.log(action_prob) * discounted_rewards
loss = -K.sum(cross_entropy)
optimizer = Adam(lr=self.learning_rate)
updates = optimizer.get_updates(self.model.trainable_weights, [], loss)
train = K.function([self.model.input, action, discounted_rewards], [], updates=updates)
return train
def get_action(self, state):
policy = self.model.predict(state)[0]
return np.random.choice(self.action_size, 1, p=policy)[0]
def discount_rewards(self, rewards):
discounted_rewards = np.zeros_like(rewards)
running_add = 0
for t in reversed(range(0, len(rewards))):
running_add = running_add * self.discount_factor + rewards[t]
discounted_rewards[t] = running_add
return discounted_rewards
def append_sample(self, state, action, reward):
self.states.append(state[0])
self.rewards.append(reward)
act = np.zeros(self.action_size)
act[action] = 1
self.actions.append(act)
def train_model(self):
discounted_rewards = np.float32(self.discount_rewards(self.rewards))
discounted_rewards -= np.mean(discounted_rewards)
discounted_rewards /= np.std(discounted_rewards)
self.optimizer([self.states, self.actions, discounted_rewards])
self.states, self.actions, self.rewards = [], [], []
if __name__=="__main__":
game = Game(6,10,show_game="False")
agent = REINFORCEMENTAgent()
global_step = 0
scores, episodes = [], []
for e in range(EPISODE):
done = False
score = 0
state = game.reset()
state = np.reshape(state, [1, 60])
while not done:
global_step += 1
action = agent.get_action(state)
next_state, reward, done = game.step(action)
next_state = np.reshape(next_state, [1, 60])
agent.append_sample(state, action, reward)
score += reward
state = copy.deepcopy(next_state)
if done:
agent.train_model()
scores.append(score)
episodes.append(e)
print("episode:", e, "score:", score, "time_step:", global_step)
global_step = 0
if e%10 == 0:
agent.model.save_weights("reinforce.h5") | true |
fbffdce5c659c124a52fbcec600a017ab1738d1f | Python | pikinder/unupervised-bci | /decoder/legacy.py | UTF-8 | 12,491 | 2.96875 | 3 | [] | no_license | """
Re-use of the original (research) code for unsupervised BCI.
This code has a wrapper in the erp_decoder.py
"""
import numpy as np
from scipy.misc import logsumexp
def mv_normal_cov_like(labels, data, sigma_t):
"""
Compute the likelihood of a gaussian...]:
:param labels:
:param data:
:param sigma_t:
:return:
"""
kwad_deel = np.array(labels - data).flatten()
exp_deel = np.sum(-1. / 2. / sigma_t[0, 0] * (kwad_deel) ** 2.)
return np.log(1. / (np.sqrt(np.pi * 2. * sigma_t[0, 0]))) * 1.0 * kwad_deel.shape[0] + exp_deel
class p300_speller_base:
def __init__(self, w, mu_w, delta_w, sigma_t, nr_commands, max_delta_w):
'''
Create a new basic P300 speller
w -- the initial value for the classifier weight vector
mu_w -- the mean of the prior on w
delta_w -- If this value is updated automatically, then it is assumed to be isotropic
sigma_t -- the variance on the projection into one dimension
nr_commands -- THe amount of different options to choose from in the speller
max_delta_w -- The maximum value for the precision on the weight vector. Introduced to avoid singularities (10^3 recommended)
'''
self.w = w.copy()
self.mu_w = mu_w.copy()
self.delta_w = delta_w.copy()
self.sigma_t = sigma_t.copy() # Beta mag geen NP array zijn?
# Prepare for the data and the stimuli, see description of structure above!
self.data = []
self.stimuli = []
# Meta information which is essential for good P300 working
self.nr_commands = nr_commands # How many different characters/commands are there to choose from
self.max_delta_w = max_delta_w
self.data_dim = self.w.shape[0]
self.label = np.array([1., -1.])
# Caching xTx and xTy for speed reasons
self.xTx = np.zeros((self.data_dim, self.data_dim))
self.xTy = [[] for a_i in range(self.nr_commands)]
def add_letter(self, letter_data, letter_stimuli):
'''
add the data for a single letter
data -- A numpy array containing the EEG data for character
Each row contains the EEG for a single stimulus
stimuli -- A numpy array containing the stimuli for this letter. One intensification per column,
The rows contain the characters intensified. -1 indicates no valid intensification
This value can be used when the number of characters intensified is variable
'''
self.data.extend(letter_data)
self.stimuli.extend(letter_stimuli)
# update the xtx and xty
for c_i in range(len(letter_data)):
self.xTx += np.dot(letter_data[c_i].T, letter_data[c_i])
# Update cached xTy
for c_i in range(len(letter_data)):
# Loop over assigned commands
for a_i in range(self.nr_commands):
stimuli_counts = np.sum(letter_stimuli[c_i] == a_i, axis=0)
data_p = letter_data[c_i][stimuli_counts > 0, :]
data_n = letter_data[c_i][stimuli_counts == 0, :]
temp_xTy = np.dot(data_p.T, self.label[0] * np.ones((data_p.shape[0], 1)))
temp_xTy += np.dot(data_n.T, self.label[1] * np.ones((data_n.shape[0], 1)))
self.xTy[a_i].append(temp_xTy)
def add_data_to_letter(self, letter_data, letter_stimuli, letter_index):
'''
'''
self.data[letter_index] = np.vstack((self.data[letter_index], letter_data))
self.stimuli[letter_index] = np.hstack((self.stimuli[letter_index], letter_stimuli))
self.xTx += np.dot(letter_data.T, letter_data)
for a_i in range(self.nr_commands):
stimuli_counts = np.sum(letter_stimuli == a_i, axis=0)
data_p = letter_data[stimuli_counts > 0, :]
data_n = letter_data[stimuli_counts == 0, :]
self.xTy[a_i][letter_index] += np.dot(data_p.T, self.label[0] * np.ones((data_p.shape[0], 1)))
self.xTy[a_i][letter_index] += np.dot(data_n.T, self.label[1] * np.ones((data_n.shape[0], 1)))
def _expectation(self):
'''
Execute the entire expectation step
Stores the marginal probability for each character in self.probs
'''
projection = self._compute_projection(self.data)
likelihoods = self._compute_individual_likelihoods(projection)
(self.probs, self.data_log_likelihood) = self._compute_character_probabilities(likelihoods, self.stimuli)
def _maximization(self):
'''
Execute all of the maximizations steps
'''
self._maximization_w()
self._maximization_sigma_t()
self._maximization_delta_w()
def _compute_character_probabilities(self, likelihoods, stimuli):
'''
'''
print " NOT IMPLEMENTED IN BASE CLASS "
def _compute_projection(self, data):
'''
For all the data, compute the projection into one dimension
returns a python list with one array per character
'''
return [np.dot(data[k], self.w) for k in range(len(data))]
def _compute_individual_likelihoods(self, projection):
'''
Compute the likelihood for the individual projected points
Returns a list of numpy matrices
Each element in the list corresponds to a character
Each row corresponds to a data points
The columns are first the likelihood for P300, second the non P300 likelihood
'''
likelihoods = []
# Loop over characters
# print "BUG^^"
for c_i in range(len(projection)):
# Build a 2D array which contains the mean squared error between projection and the labels
# Continue with making it the log of a gaussian :)
cur_lik = np.tile(np.atleast_2d(projection[c_i]), (1, len(self.label))) - np.atleast_2d(self.label)
cur_lik = cur_lik ** 2.
cur_lik *= -0.5 / self.sigma_t[0, 0]
cur_lik -= np.log(np.sqrt(2.0 * np.pi * self.sigma_t[0, 0]))
likelihoods.append(cur_lik)
return likelihoods
def _maximization_w(self):
'''
Maximize the weight vector
'''
xTy = np.zeros((self.data_dim, 1))
# Loop over characters
for c_i in range(len(self.data)):
cur_probs = self.probs[c_i, :]
# Loop over assignations of characters
for a_i in range(self.nr_commands):
xTy += cur_probs[a_i] * self.xTy[a_i][c_i]
# Add the prior
xTy += np.dot(self.delta_w * self.sigma_t, self.mu_w)
# Invert it all :), compute w
self.w = np.dot(np.linalg.inv(self.xTx + self.sigma_t[0, 0] * self.delta_w), xTy)
def _maximization_delta_w(self):
'''
Execute the maximization operation for the precision on the weigth vector
'''
self.delta_w = 1. * (self.data_dim) / np.dot(self.w.T - self.mu_w.T, self.w - self.mu_w) * np.eye(self.data_dim)
if self.delta_w[0][0] >= self.max_delta_w:
self.delta_w = self.max_delta_w * np.eye(self.delta_w.shape[0])
def _maximization_sigma_t(self, projection=None):
'''
Maximization update for the precision parameter
'''
if projection == None:
projection = self._compute_projection(self.data)
self.sigma_t = 0. * self.sigma_t
number_data_points = sum([projection[n].shape[0] for n in range(len(projection))])
for c_i in range(len(self.data)):
cur_probs = self.probs[c_i, :]
# Loop over assignations
for a_i in range(self.nr_commands):
# print "stimulus a_i: ", a_i
stimuli_counts = np.sum(self.stimuli[c_i] == a_i, axis=0)
data_p = projection[c_i][stimuli_counts.T > 0, :]
data_n = projection[c_i][stimuli_counts.T == 0, :]
# print data_p.shape, "data_n.shape: ", data_n.shape
# Substract the labels
data_p -= self.label[0] * np.ones((data_p.shape[0], 1))
data_n -= self.label[1] * np.ones((data_n.shape[0], 1))
data_it = np.vstack([data_p, data_n])
# Add weightd version of projection error to the
self.sigma_t += cur_probs[a_i] * np.atleast_2d(
np.sum(data_it * data_it, axis=0)) / number_data_points # element_wise mult
def do_individual_intens(self, tot_data):
outputs = np.zeros(tot_data.shape[0])
for i in range(tot_data.shape[0]):
prob_p = mv_normal_cov_like(self.label[0] * np.ones(1),
np.dot(tot_data[i:i + 1, :], self.w), self.sigma_t)
prob_n = mv_normal_cov_like(self.label[1] * np.ones(1),
np.dot(tot_data[i:i + 1, :], self.w), self.sigma_t)
outputs[i] = np.exp(prob_p - logsumexp(np.array([prob_n, prob_p])))
return outputs
class p300_speller_unigram(p300_speller_base):
'''
P300 speller based on an unigram language model
The speller with Uniform probabilities for each character boils down to the standard
unsupervised speller.
returns (probs,data_log_lik)
'''
def __init__(self,w,mu_w,delta_w,sigma_t,nr_commands,max_delta_w,prior_command_log_probs):
p300_speller_base.__init__(self,w,mu_w,delta_w,sigma_t,nr_commands,max_delta_w)
self.prior_command_log_probs = prior_command_log_probs.copy()
def _compute_character_probabilities(self,likelihoods,stimuli):
'''
Expectation using an unigram language model
'''
data_log_likelihood = 0.0
probs = np.zeros((len(likelihoods),self.nr_commands))
# Loop over characters
for c_i in range(len(likelihoods)):
# Current probabilities
cur_probs = probs[c_i,:]
#Loop over possible assignations
for a_i in range(self.nr_commands):
## Select data which should contains P300 and which not give the current character is correct
stimuli_counts = np.sum(stimuli[c_i]==a_i,axis=0)
cur_probs[a_i]=self.prior_command_log_probs[a_i]
cur_probs[a_i]+=np.sum(likelihoods[c_i][stimuli_counts>0,0]) # P300 given character
cur_probs[a_i]+=np.sum(likelihoods[c_i][stimuli_counts==0,1]) # non P300 given character
# Normalize
normalizing = logsumexp(cur_probs)
cur_probs[:] = np.exp(cur_probs-normalizing)
data_log_likelihood += normalizing
# Return both the probabilities and the data log likelihood
return (probs,data_log_likelihood)
class online_speller:
def __init__(self, w, speller_class, *args):
self.spellers = [speller_class(w, *args), speller_class(-w, *args)]
def _expectation(self):
for speller in self.spellers:
speller._expectation()
##
liks = np.array([speller.data_log_likelihood for speller in self.spellers])
best_id = np.argmax(liks)
self.probs = self.spellers[best_id].probs
self.data_log_likelihood = self.spellers[best_id].data_log_likelihood
def _maximization(self):
for speller in self.spellers:
speller._maximization()
def select_best_vector_redo_expectation(self):
liks = np.array([speller.data_log_likelihood for speller in self.spellers])
best_id = np.argmax(liks)
worst_id = (best_id + 1) % 2
self.spellers[worst_id].w = -1.0 * self.spellers[best_id].w.copy()
self.spellers[worst_id].sigma_t = 1.0 * self.spellers[best_id].sigma_t.copy()
self.spellers[worst_id].delta_w = 1.0 * self.spellers[best_id].delta_w.copy()
self.spellers[worst_id]._expectation()
def add_letter(self, letter_data, letter_stimuli):
for speller in self.spellers:
speller.add_letter(letter_data, letter_stimuli)
def best_probs(self):
liks = np.array([speller.data_log_likelihood for speller in self.spellers])
best_id = np.argmax(liks)
return self.spellers[best_id].probs | true |
7a2560e578d20c2ae19cb69491fae30370b9c72b | Python | cenanypirany/2048_Solver | /algo_class.py | UTF-8 | 801 | 3.390625 | 3 | [
"MIT"
] | permissive | import random
class Sequence():
def __init__(self, sequence):
sequences = {
'cclkwise': ['up','left','down','right'],
'clkwise': ['up','right','down','left'],
'weighted_pattern': [random.choices(('up','down','left','right'), weights=(.1,.4,.4,.1))[0] for i in range(50)],
'contra': ['up','up','down','down','left','right','left','right'],
'rand_pattern': [random.choices(('up','down','left','right'))[0] for i in range(50)]
}
self.sequence = sequences[sequence]
self.index = 0
def get_move(self):
curr_move = self.sequence[self.index]
if self.index < len(self.sequence) - 1:
self.index += 1
else:
self.index = 0
return curr_move
| true |
b50ad6698115bbb6ad5f7095c824092f4a4f0f91 | Python | silnrsi/palaso-python | /lib/palaso/sfm/style.py | UTF-8 | 9,573 | 2.59375 | 3 | [
"MIT"
] | permissive | """
The STY stylesheet file parser module.
This defines the database schema for STY files necessary to drive the SFM DB
parser and pre-processing to remove comments, etc.
"""
__author__ = "Tim Eves"
__date__ = "06 January 2020"
__copyright__ = "Copyright © 2020 SIL International"
__license__ = "MIT"
__email__ = "tim_eves@sil.org"
# History:
# 09-Nov-2010 tse Update to use unique field type's set object and fix poor
# quality error messages that fail to identify the source
# file.
# 26-Jan-2010 tse Rewrote to use new palaso.sfm.records module.
# 11-Jan-2010 tse Initial version.
import re
from . import records, ErrorLevel
import warnings
from collections import abc
from .records import sequence, unique
from .records import UnrecoverableError
_comment = re.compile(r'\s*#(?:!|.*$)')
_markers = re.compile(r'^\s*\\[^\s\\]+\s')
def _munge_records(rs):
yield from ((r.pop('Marker').lstrip(), r) for r in rs)
class CaselessStr(str):
def __eq__(self, b):
return self.casefold().__eq__(b.casefold())
def __hash__(self):
return self.casefold().__hash__()
class Marker(dict):
def __init__(self, iterable=(), **kwarg):
self.update(iterable)
self.update(kwarg)
def __getitem__(self, key):
return super().__getitem__(key.casefold())
def __setitem__(self, key, value):
return super().__setitem__(CaselessStr(key), value)
def __delitem__(self, key):
return super().__delitem__(key.casefold())
def __contains__(self, key):
return super().__contains__(key.casefold())
def copy(self):
return Marker(self)
def get(self, key, *args, **kwds):
return super().get(key.casefold(), *args, **kwds)
def pop(self, key, *args, **kwargs):
return super().pop(key.casefold(), *args, **kwargs)
def setdefault(self, key, *args, **kwargs):
super().setdefault(CaselessStr(key), *args, **kwargs)
def update(self, iterable=(), **kwarg):
if isinstance(iterable, abc.Mapping):
iterable = iterable.items()
super().update({CaselessStr(k): v for k, v in iterable})
super().update({CaselessStr(k): v for k, v in kwarg.items()})
_fields = Marker({
'Marker': (str, UnrecoverableError(
'Start of record marker: {0} missing')),
'Endmarker': (str, None),
'Name': (str, None),
'Description': (str, None),
'OccursUnder': (unique(sequence(str)), {None}),
'TextProperties': (unique(sequence(CaselessStr)), set()),
'TextType': (CaselessStr, 'Unspecified'),
'StyleType': (CaselessStr, None),
# 'Attributes': (sequence(str), None)
# 'Rank': (int, None),
# 'FontSize': (int, None),
# 'Regular': (flag, False),
# 'Bold': (flag, False),
# 'Italic': (flag, False),
# 'Underline': (flag, False),
# 'Superscript': (flag, False),
# 'Smallcaps': (flag, False),
# 'Justification': (str, 'Left'),
# 'SpaceBefore': (int, 0),
# 'SpaceAfter': (int, 0),
# 'FirstLineIndent': (float, 0),
# 'LeftMargin': (float, 0),
# 'RightMargin': (float, 0),
# 'Color': (int, 0),
})
def parse(source, error_level=ErrorLevel.Content):
'''
>>> from pprint import pprint
>>> r = parse(r"""
... \\Marker toc1
... \\Name toc1 - File - Long Table of Contents Text
... \\Description Long table of contents text
... \\OccursUnder h h1 h2 h3
... \\Rank 1
... \\TextType Other
... \\TextProperties paragraph publishable vernacular
... \\StyleType Paragraph
... \\FontSize 12
... \\Italic
... \\Bold
... \\Color 16384
... #!\\Attributes attr size ?ref""".splitlines(True))
>>> pprint((r,
... sorted(r['toc1']['occursunder']),
... sorted(r['toc1']['textproperties'])))
... # doctest: +ELLIPSIS
({'toc1': {'Attributes': 'attr size ?ref',
'Bold': '',
'Color': '16384',
'Description': 'Long table of contents text',
'Endmarker': None,
'FontSize': '12',
'Italic': '',
'Name': 'toc1 - File - Long Table of Contents Text',
'OccursUnder': {...},
'Rank': '1',
'StyleType': 'Paragraph',
'TextProperties': {...},
'TextType': 'Other'}},
['h', 'h1', 'h2', 'h3'],
['paragraph', 'publishable', 'vernacular'])
>>> r = parse(r"""
... \\Marker dummy1
... \\Name dummy1 - File - dummy marker definition
... \\Description A marker used for demos
... \\OccursUnder id NEST
... \\TextType Other
... \\Bold
... \\Color 12345""".splitlines(True))
>>> pprint((sorted(r.items()),
... sorted(r['dummy1']['OccursUnder'])))
... # doctest: +ELLIPSIS
([('dummy1',
{'Bold': '',
'Color': '12345',
'Description': 'A marker used for demos',
'Endmarker': None,
'Name': 'dummy1 - File - dummy marker definition',
'OccursUnder': {...},
'StyleType': None,
'TextProperties': set(),
'TextType': 'Other'})],
['NEST', 'id'])
''' # noqa
# strip comments out
no_comments = (_comment.sub('', ln) for ln in source)
with warnings.catch_warnings():
warnings.simplefilter(
"always" if error_level > ErrorLevel.Content else "ignore")
rec_parser = records.parser(
no_comments,
records.Schema(
'Marker',
type(_fields)(
{CaselessStr(k): v for k, v in _fields.items()})),
error_level=error_level)
rec_parser.source = getattr(source, 'name', '<string>')
recs = iter(rec_parser)
next(recs, None)
res = dict(_munge_records(recs))
_reify(res)
return res
def _reify(sheet):
for r in sheet.values():
for f, v in r.items():
if isinstance(v, records.sfm.Text):
r[f] = str(v)
def update_sheet(sheet, ammendments={}, field_replace=False, **kwds):
"""
Merge update an existing sheet with records from a supplied dictionary and
any keyword arguments as well. Only non defaulted fields for each record
in ammendments or keyword args will overrite the fields in any marker
records with matching marker names. The OccursUnder and TextProperties
fields of a records are merged by taking the union of new and old, unless
the field_replace keyword parameter is True
This updated sheet is also returned.
sheet: The sheet to be updated.
ammendments: A Mapping from marker names to marker records continaing
the fields to be updated.
field_replace: When True replace OccursUnder and TextProperties.
When False merge them instead. Defaults to False.
**kwds: marker id keywords assigned to marker records continaing
the fields to be updated.
>>> from pprint import pprint
>>> base = parse(r'''
... \\Marker test
... \\Name test - A test'''.splitlines(True))
>>> pprint(base)
{'test': {'Description': None,
'Endmarker': None,
'Name': 'test - A test',
'OccursUnder': {None},
'StyleType': None,
'TextProperties': set(),
'TextType': 'Unspecified'}}
>>> pprint(update_sheet(base,
... test={'OccursUnder': {'p'}, 'FontSize': '12'},
... test2={'Name': 'test2 - new marker'}))
... # doctest: +ELLIPSIS
{'test': {'Description': None,
'Endmarker': None,
'FontSize': '12',
'Name': 'test - A test',
'OccursUnder': {...},
'StyleType': None,
'TextProperties': set(),
'TextType': 'Unspecified'},
'test2': {'Name': 'test2 - new marker'}}
>>> update = parse(r'''
... \\Marker test
... \\Name test - A test
... \\TextType Note'''.splitlines(True))
>>> pprint(update)
{'test': {'Description': None,
'Endmarker': None,
'Name': 'test - A test',
'OccursUnder': {None},
'StyleType': None,
'TextProperties': set(),
'TextType': 'Note'}}
>>> pprint(update_sheet(base, update))
... # doctest: +ELLIPSIS
{'test': {'Description': None,
'Endmarker': None,
'FontSize': '12',
'Name': 'test - A test',
'OccursUnder': {...},
'StyleType': None,
'TextProperties': set(),
'TextType': 'Note'},
'test2': {'Name': 'test2 - new marker'}}
"""
ammendments.update(**kwds)
for marker, new_meta in ammendments.items():
try:
meta = sheet[marker]
if not field_replace:
meta['OccursUnder'].update(new_meta.pop('OccursUnder', set()))
meta['TextProperties'].update(new_meta.pop('TextProperties',
set()))
meta.update(
fv for fv in new_meta.items()
if fv[0] not in _fields or fv[1] != _fields[fv[0]][1])
except KeyError:
sheet[marker] = new_meta
return sheet
| true |
5fc87b6d9e310edc0351b34067e0cbaaa62d04e5 | Python | tomijarvi/kerrokantasi | /kerrokantasi/settings/util.py | UTF-8 | 1,952 | 2.671875 | 3 | [
"MIT"
] | permissive | import copy
import os
from importlib.util import find_spec
def load_local_settings(settings, module_name):
"""
Load local settings from `module_name`.
Search for a `local_settings` module, load its code and execute it in the
`settings` dict. All of the settings declared in the sertings dict are thus available
to the local_settings module. The settings dict is updated.
"""
local_settings_spec = find_spec(module_name)
if local_settings_spec:
local_settings_code = local_settings_spec.loader.get_code(module_name)
exec(local_settings_code, settings)
def load_secret_key(settings):
"""
Load a secret key from `.django_secret` if one is not already set.
:param settings: Settings dict
:type settings: dict
"""
if settings.get("SECRET_KEY"):
return
secret_file = os.path.join(settings.get("BASE_DIR"), '.django_secret')
if os.path.isfile(secret_file):
with open(secret_file) as secret:
settings["SECRET_KEY"] = secret.read().strip()
return
from django.utils.crypto import get_random_string
try:
settings["SECRET_KEY"] = secret_key = get_random_string(64)
with open(secret_file, 'w') as secret:
os.chmod(secret_file, 0o0600)
secret.write(secret_key)
secret.close()
print("Secret key file %s generated." % secret_file)
except IOError:
raise Exception(
'Please create a %s file with random characters to generate your secret key!' % secret_file
)
def get_settings(settings_module):
"""
Get a copy of the settings (upper-cased variables) declared in the given settings module.
:param settings_module: A settings module
:type settings_module: module
:return: Dict of settings
:rtype: dict[str, object]
"""
return copy.deepcopy({k: v for (k, v) in vars(settings_module).items() if k.isupper()})
| true |
356beca1dfe9ae65744715dfc1b42153d8292c11 | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_97/1664.py | UTF-8 | 1,435 | 3.0625 | 3 | [] | no_license | from itertools import permutations as pnr, combinations as cnr
def isRe(x, y):
a, b = list(str(x)), list(str(y))
for i in range(len(a)):
a.insert(0, a.pop())
if a == b:
return True
return False
if __name__ == "__main__":
fi = open("input.txt", "r")
fo = open("output", "w")
tests = int(fi.readline())
for test in range(tests):
line = fi.readline()
nums = line.split()
a = int(nums[0])
b = int(nums[1])
res = 0
ans = []
for x in xrange(a, b + 1):
perm = []
for p in pnr(list(str(x))):
n = int("".join(p))
if n >= a and n <= b and n not in perm:
perm.append(n)
#print "p" ,p
perm.sort()
#print perm
if len(perm) > 1 and perm not in ans:
ans.append(perm)
for a in ans:
if len(a) > 2:
for f in cnr(a, 2):
#print a, f, isRe(f[0], f[1])
if isRe(f[0], f[1]):
res += 1
else:
#print a, isRe(a[0], a[1])
if isRe(a[0], a[1]):
res += 1
fo.write("Case #" + str(test + 1) + ": " + str(res) + "\n")
fo.close()
fi.close()
| true |
dc3abdfa4e0a14d2ac979558843307482b690bd3 | Python | KamilRuchala/kodyNadmiarowe | /Hamming/operacje.py | UTF-8 | 944 | 3.21875 | 3 | [] | no_license | import numpy # trzeba doinstalowac
def suma(wiersz):
suma=0
for i in xrange(len(wiersz)):
suma=suma+wiersz[i]
if suma %2 == 0:
return 0
else:
return 1
def mnozenie(m1,inf):
inf2=[]
for x in inf:
tmp=int(x)
inf2.append(tmp)
l_k=len(m1[0]) # liczba kolumn
l_w=len(m1) # liczba wierszy
wynikowa=numpy.zeros(shape=(l_w,l_k)) #macierz wynikowa
for x in xrange(l_w):
for i in xrange(l_k):
wynikowa[x,i]=m1[x,i]*inf2[x]
return wynikowa
def mnozenie2(m1,inf):
inf2=[]
for x in inf:
tmp=int(x)
inf2.append(tmp)
l_k=len(m1[0]) # liczba kolumn
l_w=len(m1) # liczba wierszy
wynikowa=numpy.zeros(shape=(l_w,l_k)) #macierz wynikowa
for x in xrange(l_w):
for i in xrange(l_k):
wynikowa[x,i]=m1[x,i]*inf2[i]
return wynikowa
def stringer(tablica): # zmieniam tablice na string
string=''.join(str(x) for x in tablica)
return string
def inter(string):
tab=[]
for i in string:
tab.append(int(i))
return tab
| true |
811135b16042eb8023b1741bd87d599830e237d2 | Python | jonasgrebe/pt-deep-image-colorization | /trainer.py | UTF-8 | 13,908 | 2.984375 | 3 | [] | no_license | from typing import Tuple, List, Callable, Dict, Any
import torch
import numpy as np
import os
import cv2
import log
class Trainer():
def __init__(self, logger: log.Logger,
generator: torch.nn.Module, discriminator: torch.nn.Module,
g_optimizer: torch.optim.Optimizer, d_optimizer: torch.optim.Optimizer,
pixel_loss: torch.nn.Module, adversarial_loss: torch.nn.Module,
transform_input: Callable = lambda x: x, transform_output: Callable = lambda x: x,
hypers: Dict[str, Any] ={}, device: torch.device ='cuda:0') -> None:
""" Trainer: Specific helper class for training a generative adversarial network for the task of
image colorization.
Parameters
----------
logger : logger.Logger
Logger instance that handles the logging of the given hyperparameters, the losses, the validation results, etc.
generator : torch.nn.Module
Generator module that expects a 1-channel image tensor and returns a 3-channel image tensor. Note that this module
should already be transferred to the given device.
discriminator : torch.nn.Module
Discriminator module that expects a 3-channel image tensor and returns a single scalar tensor. Note that this module
should already be transferred to the given device.
g_optimizer : torch.optim.Optimizer
Optimizer over the learnable parameters of the generator.
d_optimizer : torch.optim.Optimizer
Optimizer over the learnable parameters of the discriminator.
pixel_loss : torch.nn.Module
Per-pixel loss module that compares the input and target images component-wise.
adversarial_loss : torch.nn.Module
Adversarial loss module that judges the discriminator's output.
transform_input : Callable
Input transformation that is applied to each of the images before feeding it through the network.
transform_output : Callable
Output transformation that reverses the input transformation.
hypers : Dict[str, Any]
Dictionary of hyperparameters.
device : torch.device
PyTorch device of the generator and discriminator.
"""
# initialize epoch
self.epoch = 0
# set generator and discriminator
self.generator = generator
self.discriminator = discriminator
# set optimizers
self.g_optimizer = g_optimizer
self.d_optimizer = d_optimizer
# set pixel and adversarial loss
self.pxl_loss = pixel_loss
self.adv_loss = adversarial_loss
# set logger
self.logger = logger
# set input and output transformations
self.transform_input = transform_input
self.transform_output = transform_output
# set hyperparameter dictionary and device
self.hypers = hypers
self.device = device
def training_step(self, img_batch: torch.Tensor) -> Dict[str, torch.Tensor]:
""" Trains the generator and the discriminator on the given image batch.
Parameters
----------
img_batch : torch.Tensor
Input image tensor.
Returns
-------
Dict[str, torch.Tensor]
.Tuple with a dictionary that holds all the loss_name's together with their losss values
"""
# reset the gradients of the discriminator
self.discriminator.zero_grad()
# let discriminator judge the real images
d_real_batch = self.discriminator(img_batch)
# define adversarial loss targets
real_target = torch.ones_like(d_real_batch)
fake_target = torch.zeros_like(d_real_batch)
# compute loss and backward it
adv_d_real_loss = self.adv_loss(d_real_batch, real_target * 0.9) * self.hypers['adv_d_loss_weight']
adv_d_real_loss.backward()
# split images into L and AB channels
L_batch, AB_batch = img_batch[:,:1], img_batch[:,1:]
# generate colors based on L
g_AB_batch = self.generator(L_batch)
# construct fake images by concatenation
fake_batch = torch.cat([L_batch, g_AB_batch], dim=1)
# let discriminator judge the fake images (without a gradient flow through the Generator)
d_fake_batch = self.discriminator(fake_batch.detach())
# compute loss and backward it
adv_d_fake_loss = self.adv_loss(d_fake_batch, fake_target) * self.hypers['adv_d_loss_weight']
adv_d_fake_loss.backward()
# add adversarial losses for logging
adv_d_loss = adv_d_real_loss + adv_d_fake_loss
# optimize the discriminator's parameters based on the computed gradients
self.d_optimizer.step()
# reset the gradients of the generator
self.generator.zero_grad()
# let the discriminator judge the fake images
d_batch = self.discriminator(fake_batch)
# compute loss and backward it (but keeping the forward information inside the generator)
adv_g_loss = self.adv_loss(d_batch, real_target) * self.hypers['adv_g_loss_weight']
adv_g_loss.backward(retain_graph=True)
# compute per-pixel loss and backward it
pxl_loss = self.pxl_loss(AB_batch, g_AB_batch) * self.hypers['pxl_loss_weight']
pxl_loss.backward()
# optimize the generator's parameters based on the computed gradients
self.g_optimizer.step()
# put all of the losses in a dictionary
loss_dict = {'pxl_loss': pxl_loss, 'adv_g_loss': adv_g_loss, 'adv_d_loss': adv_d_loss}
return loss_dict
def forward(self, img_batch: torch.Tensor) -> List[torch.Tensor]:
""" Forwards a given batch of images through the network and returns a list of relevant output batches for visualization .
Parameters
----------
img_batch : torch.Tensor
Returns
-------
List[torch.Tensor]
List of relevant output batches for visualization
"""
# feed L channel through the generator and create the fake images afterwards
L_batch, AB_batch = img_batch[:,:1], img_batch[:,1:]
g_AB_batch = self.generator(L_batch)
# build the fake images
fake_batch = torch.cat([L_batch, g_AB_batch], dim=1)
# ask the discriminator for its opinion
d_real_batch = self.discriminator(img_batch)
d_fake_batch_g = self.discriminator(fake_batch)
# put all of the relevant batches in a list and return it
batches = [img_batch, AB_batch, L_batch, g_AB_batch, fake_batch]
return batches
def fit(self, train_dataset: torch.utils.data.Dataset,
val_dataset: torch.utils.data.Dataset = None,
batch_size: int = 1, epochs: int = 10) -> None:
""" Fits the models of this trainer to the given training dataset. If no validation dataset is provided,
no validation will be performed.
Parameters
----------
train_dataset : torch.utils.data.Dataset
Dataset with training data.
val_dataset : torch.utils.data.Dataset
Dataset with validation data.
batch_size : int
Number of data samples in a batch during training.
epochs : int
Number of epochs for training.
"""
# create dataloader for the training data
dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=0)
# initial validation
if self.epoch == 0 and val_dataset is not None:
self.validate(val_dataset)
self.save_checkpoint()
# for each of the epochs:
for self.epoch in range(self.epoch+1, self.epoch+epochs+1):
self.logger.set_mode('training')
self.logger.new_epoch()
# for each of the batches in this epoch:
for step, batch in enumerate(dataloader):
self.logger.new_step()
# set mode of both networks to training
self.generator.train()
self.discriminator.train()
img_batch = self.transform_input(batch).to(self.device)
loss_dict = self.training_step(img_batch)
# log and print losses
self.logger.log_losses(loss_dict)
status = f'[{self.epoch}: {step}/{len(dataloader)}] ' + ' | '.join([f'{loss_name}: {value:.6f}' for loss_name,value in loss_dict.items()])
print(status)
# if validation data is given
if val_dataset is not None:
# validate the adversarial network
self.validate(val_dataset)
# save models and optimizers in checkpoint
self.save_checkpoint()
def validate(self, dataset: torch.utils.data.Dataset) -> None:
""" Validates the network on a given dataset.
Parameters
----------
dataset : torch.utils.data.Dataset
Dataset with validation data.
"""
# create dataloader for the validation data
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=0)
# set mode of both networks to evaluation
self.generator.eval()
self.discriminator.eval()
self.logger.set_mode('validation')
# for each validation sample:
for step, batch in enumerate(dataloader):
# transform image values to the range (-1, 1)
img_batch = self.transform_input(batch).to(self.device)
# forward input batch through the adversarial network
batches = self.forward(img_batch)
conc_images = self.visualize_prediction(batches)
self.logger.log_images(np.array(conc_images), step, dataformats='NHWC')
def visualize_prediction(self, batches) -> List[np.ndarray]:
# detach all batches
img_batch, AB_batch, L_batch, g_AB_batch, fake_batch = map(lambda x: x.detach(), batches)
# fill missing channels
L_batch, AB_batch, g_AB_batch = torch.cat([L_batch, torch.zeros_like(AB_batch)], dim=1), torch.cat([torch.zeros_like(L_batch), AB_batch], dim=1), torch.cat([torch.zeros_like(L_batch), g_AB_batch], dim=1)
# concatenate all batches, move the result to the cpu and transform it to numpy
conc_batch = torch.cat([img_batch, AB_batch, L_batch, g_AB_batch, fake_batch], dim=3)
conc_batch = conc_batch.cpu().numpy()
# convert all images to RGB
conc_batch = self.transform_output(conc_batch).astype('uint8')
conc_images = [cv2.cvtColor(conc.transpose(1, 2, 0), cv2.COLOR_LAB2RGB) for conc in conc_batch]
return conc_images
def test(self, dataset: torch.utils.data.Dataset) -> None:
""" Tests the network on a given testing dataset.
Parameters
----------
dataset : type
Dataset with testing data.
"""
# create dataloader for the testing data
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=0)
# set mode of both networks to evaluation
self.generator.eval()
self.discriminator.eval()
self.logger.set_mode('testing')
# for each validation sample:
for step, batch in enumerate(dataloader):
batches = self.forward(batch)
img_batch, AB_batch, L_batch, g_AB_batch, fake_batch = map(lambda x: x.detach(), batches)
g_AB_batch = torch.cat([torch.zeros_like(L_batch), g_AB_batch], dim=1)
conc_batch = torch.cat([img_batch, g_AB_batch, fake_batch], dim=3)
conc_batch = conc_batch.cpu().numpy()
conc_batch = self.transform_output(conc_batch).astype('uint8')
conc_images = [cv2.cvtColor(conc.transpose(1, 2, 0), cv2.COLOR_LAB2RGB) for conc in conc_batch]
self.logger.log_images(np.array(conc_images), step, dataformats='NHWC')
def save_checkpoint(self) -> None:
""" Saves the parameters and states of the models and optimizers in a single checkpoint file.
"""
# create the checkpoint directory if it does not exist
checkpoint_dir = os.path.join(self.logger.log_dir, self.logger.run_name, 'checkpoints')
if not os.path.isdir(checkpoint_dir):
os.makedirs(checkpoint_dir)
# create the checkpoint
checkpoint = {
'epoch': self.epoch,
'generator': self.generator,
'discriminator': self.discriminator,
'g_optimizer': self.g_optimizer,
'd_optimizer': self.d_optimizer
}
# save the checkpoint
torch.save(checkpoint, os.path.join(checkpoint_dir, f'{self.epoch}.ckpt'))
def load_checkpoint(self, epoch: int) -> None:
""" Loads the checkpoint of a given epoch and restores the parameters and states
of all models and optimizers.
Parameters
----------
epoch : int
Number of epoch to restore.
"""
# load the checkpoint of the given epoch
checkpoint_dir = os.path.join(self.logger.log_dir, self.logger.run_name, 'checkpoints')
checkpoint = torch.load(os.path.join(checkpoint_dir, f'{epoch}.ckpt'))
# restore the information from the checkpoint
self.epoch = checkpoint['epoch']
self.generator = checkpoint['generator']
self.discriminator = checkpoint['discriminator']
self.g_optimizer = checkpoint['g_optimizer']
self.d_optimizer = checkpoint['d_optimizer']
# inform the logger about the restored epoch
self.logger.set_epoch(checkpoint['epoch'])
| true |
bcc96f0986620073c9ff2f0d602daf70f0179e74 | Python | mhfowler/slack-to-arena | /slack_to_arena.py | UTF-8 | 3,685 | 2.546875 | 3 | [] | no_license | import sys
import os
import re
from time import sleep
from slacktoarena.utils.constants import PROJECT_PATH, RATE_LIMIT_SLEEP_TIME
from slacktoarena.utils.parse_slack_export import parse_slack_export
from slacktoarena.utils.arena_syncer import ArenaSyncer
def slack_to_arena(path_to_slack_export, arena_username, save_images=False):
"""
parses a slack export at the given path, and syncs all links found in all channels to the are.na
of the given username
:param path_to_slack_export: string path to unzipped slack export
:param arena_username: string username of are.na user to sync to
:return: None
"""
print('++ archiving links from {} to the are.na user {}'.format(path_to_slack_export, arena_username))
# parse the slack export
print('++ parsing all the links from the slack export')
slack_channels_dict = parse_slack_export(input_path=path_to_slack_export, save_files=False)
# print total number of links
total_links = 0
for channel, links in slack_channels_dict.items():
total_links += len(links)
print('++ uploading {} links'.format(total_links))
# initialize are.na api
print('++ initializing are.na api')
arena_syncer = ArenaSyncer(username=arena_username)
# for each channel, sync all of its links to arena
num_links = 0
for channel_title, links in slack_channels_dict.items():
# if the channel was already synced, let's skip it for now
if arena_syncer.has_channel(channel_title):
# TODO: figure out what to do here
pass
# continue
# if the channel was not already synced, then sync it
print('++ saving links for slack channel: {}'.format(channel_title))
for link in links:
num_links += 1
if not num_links % 20:
completion_percent = (num_links / float(total_links)) * 100
print('++ {}/{} {}%'.format(num_links, total_links, completion_percent))
if 'slack.com' in link:
if save_images:
match = re.match('.*slack.com/files/(\S+)/(\S+)/(\S+)', link)
# TODO: figure out how to handle these
print('++ skipping {}'.format(link))
continue
# clean links with title in the link
match = re.match('(.*)(\|.*)', link)
if match:
print('++ trimming title from link {}'.format(match.group(2)))
link = match.group(1)
try:
arena_syncer.save_link_to_arena(channel_title=channel_title, link=link)
sleep(RATE_LIMIT_SLEEP_TIME)
except Exception as e:
print('++ WW: failed to save link {}'.format(link))
if __name__ == '__main__':
# boolean flag to parse arguments from CLI or read hardcoded args
USE_CLI = True
# parse arguments from command line
if USE_CLI:
export_path = sys.argv[1]
username = sys.argv[2]
# or hardcode them
else:
data_dir = os.path.abspath(os.path.dirname(PROJECT_PATH))
# export_path = os.path.join(data_dir, 'sfpc/sfpc_slack_export_nov14-2016')
# export_path = os.path.join(data_dir, 'sfpc/sfpc_slack_export_nov14-2016')
export_path = os.path.join(data_dir, 'darkslack/darkslack_export')
# export_path = os.path.join(data_dir, 'learning-gardens/Learning Gardens Slack export Feb 10 2018')
# export_path = os.path.join(data_dir, 'computerlab/computerlab_slack_export_nov11_2016')
username = 'dark-slack'
# run the script
slack_to_arena(path_to_slack_export=export_path, arena_username=username)
| true |
dba72f3bde83a9eac766611b69869e078a67f506 | Python | zmoog/alchemy | /alchemy/cash/models.py | UTF-8 | 1,930 | 2.59375 | 3 | [] | no_license | # -*- encoding: utf-8 -*-
from django.db import models
from django import forms
from django.contrib.auth.models import User
import datetime
ACCOUNT_TYPES = (
('as', 'Asset'),
('cc', 'Conto corrente'),
('eq', 'Equity'),
('ex', 'Expenses'),
('in', 'Income'),
('cr', 'Carta di credito'),
)
class Account(models.Model):
"""
'luogo' dove risiede il denaro (portafogli, conto corrente, tipologia di spesa, etc..).
"""
name = models.CharField(max_length=50)
type = models.CharField(max_length=2, choices=ACCOUNT_TYPES)
balance = models.DecimalField(max_digits=10, decimal_places=2)
def __unicode__(self):
return self.name
class Meta:
ordering = ['name']
class Transfer(models.Model):
"""
Trasferimento di denaro da un Account ad un'altro.
"""
amount = models.DecimalField(max_digits = 10, decimal_places = 2)
source = models.ForeignKey(Account, related_name = 'source', help_text="Account da cui prelevare")
destination = models.ForeignKey(Account, related_name = 'destination', help_text="Account in cui depositare")
description = models.TextField(help_text="Descrizione del contenuto dell'operazione di trasferimento")
validity_date = models.DateField(default=datetime.date.today, help_text="Data in cui il trasferimento è diventato effettivo. Ad esempio la valuta di un bonifico, o la data in cui hai acquistato il pane")
created_on = models.DateTimeField(auto_now_add = True)
updated_on = models.DateTimeField(auto_now = True)
def __unicode__(self):
return "%d da %s a %s" % (self.amount, self.source.name, self.destination.name)
class Meta:
ordering = ['validity_date']
class TransferForm(forms.ModelForm):
amount = forms.DecimalField(max_digits=10, decimal_places=2, localize=True, help_text="Quantità di denaro da trasferire")
class Meta:
model = Transfer
| true |
1a0feb417908d77098edd03a564ae1e1c32afe4f | Python | bentaljaard/orchestrator | /job.py | UTF-8 | 82 | 2.59375 | 3 | [
"Apache-2.0"
] | permissive | def printTaskName(task):
key=next(iter(dict(task)))
print(key)
return "success" | true |
98e272815d97fa9101635c6781609de27b70ac84 | Python | wrightsteven/insuranceProject | /insuranceProject.py | UTF-8 | 5,076 | 3.171875 | 3 | [] | no_license | import tkinter as tk
countryList = ["US", "Mexico", "Switzerland", "Turkey", "Brazil", "Sudan", "Cambodia"]
lowRiskDict = {"US":.01, "Mexico":.02, "Switzerland":.005, "Turkey":.011, "Brazil":.015, "Sudan":.022, "Cambodia":.022}
smokerOnlyDict = {"US":.02, "Mexico":.04, "Switzerland":.02, "Turkey":.03, "Brazil":.03, "Sudan":.04, "Cambodia":.05}
obeseOnlyDict = {"US":.015, "Mexico":.035, "Switzerland":.01, "Turkey":.019, "Brazil":.022, "Sudan":.038, "Cambodia":.045}
smokerAndObeseDict = {"US":.03, "Mexico":.08, "Switzerland":.03, "Turkey":.04, "Brazil":.055, "Sudan":.09, "Cambodia":.09}
avgClaimDict = {"US":100, "Mexico":157, "Switzerland":95, "Turkey":123, "Brazil":146, "Sudan":187, "Cambodia":199}
size = int(input("Enter number of individuals to be insured: "))
smokerOnly = int(input("Enter number of individuals who smoke but are not obese: "))
obeseOnly = int(input("Enter number of individuals who are obese but do not smoke: "))
smokerAndObese = int(input("Enter number of individuals who smoke and are obese: "))
lowRisk = size - (smokerAndObese + smokerOnly + obeseOnly)
country = (input("Please enter a country: "))
if country in countryList:
country = country
else:
country = input("Please enter a valid choice: ")
class expectedClaims():
def risk(self,country):
if country == "US":
self.lowRiskFactor = lowRiskDict["US"]
elif country == "Mexico":
self.lowRiskFactor = lowRiskDict["Mexico"]
elif country == "Switzerland":
self.lowRiskFactor = lowRiskDict["Switzerland"]
elif country == "Turkey":
self.lowRiskFactor = lowRiskDict["Turkey"]
elif country == "Brazil":
self.lowRiskFactor = lowRiskDict["Brazil"]
elif country == "Sudan":
self.lowRiskFactor = lowRiskDict["Sudan"]
elif country == "Cambodia":
self.lowRiskFactor = lowRiskDict["Cambodia"]
if country == "US":
self.smokerOnlyRiskFactor = smokerOnlyDict["US"]
elif country == "Mexico":
self.smokerOnlyRiskFactor = smokerOnlyDict["Mexico"]
elif country == "Switzerland":
self.smokerOnlyRiskFactor = smokerOnlyDict["Switzerland"]
elif country == "Turkey":
self.smokerOnlyRiskFactor = smokerOnlyDict["Turkey"]
elif country == "Brazil":
self.smokerOnlyRiskFactor = smokerOnlyDict["Brazil"]
elif country == "Sudan":
self.smokerOnlyRiskFactor = smokerOnlyDict["Sudan"]
elif country == "Cambodia":
self.smokerOnlyRiskFactor = smokerOnlyDict["Cambodia"]
if country == "US":
self.obeseOnlyRiskFactor = obeseOnlyDict["US"]
elif country == "Mexico":
self.obeseOnlyRiskFactor = obeseOnlyDict["Mexico"]
elif country == "Switzerland":
self.obeseOnlyRiskFactor = obeseOnlyDict["Switzerland"]
elif country == "Turkey":
self.obeseOnlyRiskFactor = obeseOnlyDict["Turkey"]
elif country == "Brazil":
self.obeseOnlyRiskFactor = obeseOnlyDict["Brazil"]
elif country == "Sudan":
self.obeseOnlyRiskFactor = obeseOnlyDict["Sudan"]
elif country == "Cambodia":
self.obeseOnlyRiskFactor = obeseOnlyDict["Cambodia"]
if country == "US":
self.smokerAndObeseRiskFactor = smokerAndObeseDict["US"]
elif country == "Mexico":
self.smokerAndObeseRiskFactor = smokerAndObeseDict["Mexico"]
elif country == "Switzerland":
self.smokerAndObeseRiskFactor = smokerAndObeseDict["Switzerland"]
elif country == "Turkey":
self.smokerAndObeseRiskFactor = smokerAndObeseDict["Turkey"]
elif country == "Brazil":
self.smokerAndObeseRiskFactor = smokerAndObeseDict["Brazil"]
elif country == "Sudan":
self.smokerAndObeseRiskFactor = smokerAndObeseDict["Sudan"]
elif country == "Cambodia":
self.smokerAndObeseRiskFactor = smokerAndObeseDict["Cambodia"]
self.totalRiskFactor = (self.lowRiskFactor * (lowRisk/size)) + (self.smokerOnlyRiskFactor * (smokerOnly/size)) + (self.obeseOnlyRiskFactor * (obeseOnly/size)) + (self.smokerAndObeseRiskFactor * (smokerAndObese/size))
return (self.totalRiskFactor)
def claims(self,country):
if country == "US":
self.avgClaim = avgClaimDict["US"]
elif country == "Mexico":
self.avgClaim = avgClaimDict["Mexico"]
elif country == "Switzerland":
self.avgClaim = avgClaimDict["Switzerland"]
elif country == "Turkey":
self.avgClaim = avgClaimDict["Turkey"]
elif country == "Brazil":
self.avgClaim = avgClaimDict["Brazil"]
elif country == "Sudan":
self.avgClaim = avgClaimDict["Sudan"]
elif country == "Cambodia":
self.avgClaim = avgClaimDict["Cambodia"]
return self.avgClaim
e = expectedClaims()
totalClaims = (size * e.claims(country) * e.risk(country))
print(totalClaims) | true |
2038004dd9a3627ad3d003fe85c301907061057c | Python | LucasNote/hyperskill-python | /SimpleBankingSystem/project1/project11.py | UTF-8 | 6,045 | 3.953125 | 4 | [] | no_license | # Work on project. Stage 1/4: Card anatomy
"""
The very first digit is the Major Industry Identifier (MII), which tells you
what sort of institution issued the card.
1 and 2 are issued by airlines
3 is issued by travel and entertainment
4 and 5 are issued by banking and financial institutions
6 is issued by merchandising and banking
7 is issued by petroleum companies
8 is issued by telecommunications companies
9 is issued by national assignment
In our banking system, credit cards should begin with 4.
The first six digits are the Issuer Identification Number (IIN).
These can be used to look up where the card originated from. If you have access to a list
that provides detail on who owns each IIN, you can see who issued the card just by reading the card number.
Here are a few you might recognize:
Visa: 4*****
American Express (AMEX): 34**** or 37****
Mastercard: 51**** to 55****
In our banking system, the IIN must be 400000.
The seventh digit to the second-to-last digit is the customer account number.
Most companies use just 9 digits for the account numbers, but it’s possible to use up to 12.
This means that using the current algorithm for credit cards, the world can issue about a trillion cards
before it has to change the system.
We often see 16-digit credit card numbers today, but it’s possible to issue a card with up to 19 digits
using the current system. In the future, we may see longer numbers becoming more common.
In our banking system, the customer account number can be any, but it should be unique.
And the whole card number should be 16-digit length.
The very last digit of a credit card is the check digit or checksum. It is used to validate
the credit card number using the Luhn algorithm, which we will explain in the next stage of this project.
For now, the checksum can be any digit you like.
"""
# Objectives
"""
Objectives
You should allow customers to create a new account in our banking system.
Once the program starts, you should print the menu:
1. Create an account
2. Log into account
0. Exit
If the customer chooses ‘Create an account’, you should generate a new card number which
satisfies all the conditions described above. Then you should generate a PIN code that
belongs to the generated card number. A PIN code is a sequence of any 4 digits.
PIN should be generated in a range from 0000 to 9999.
If the customer chooses ‘Log into account’, you should ask them to enter their card information.
Your program should store all generated data until it is terminated so that a user is able to
log into any of the created accounts by a card number and its pin. You can use an array to store the information.
After all information is entered correctly, you should allow the user to check the account balance;
right after creating the account, the balance should be 0. It should also be possible to
log out of the account and exit the program.
"""
import random
id_index = 0
inn = '400000' # Issuer Identification Number (IIN)
can_length = 9 # Customer Identification Number
checksum = '5' # any value is okay for now
current_account = None
accounts = [] # card_num, pin, balance, logged_in
def get_new_card_id():
global inn
global id_index
global checksum
id_index = id_index + 1
can_str = f"{id_index:09d}"
new_id = inn + can_str + checksum
return new_id
def get_new_pin():
num = random.randint(1000,9999)
return str(num)
def get_balance(card_id):
found = find_account(card_id)
index = found[0]
account = found[1]
if index > -1:
print(f'Balance: {account[2]}')
print()
# return account[2]
def create_account():
global accounts
new_id = get_new_card_id()
new_pin = get_new_pin()
accounts.append([new_id, new_pin, 0, False])
print('Your card has been created')
print('Your card number:')
print(new_id)
print('Your card PIN:')
print(new_pin)
print()
def find_account(card_id):
global accounts
for index, row in enumerate(accounts):
try:
column = row.index(card_id)
except ValueError:
continue
# return row, index, column
return [index, row]
return [-1, -1]
def update_account(index, account):
global accounts
accounts[index] = account
def print_menu():
global current_account
if current_account is None:
print('1. Create an account')
print('2. Log into account')
print('0. Exit')
else:
print('1. Balance')
print('2. Log out')
print('0. Exit')
def log_in():
global current_account
# find id and pin from the account array
card_str = input('Enter your card number:')
pin_str = input('Enter your PIN:')
found = find_account(card_str)
index = found[0]
account = found[1]
if index > -1 and account[1] == pin_str:
account[3] = True
update_account(index, account)
current_account = account
print('You have successfully logged in!')
print()
else:
print()
print('Wrong card number or PIN!')
print()
return account
def log_out(card_id):
global current_account
found = find_account(card_id)
index = found[0]
account = found[1]
if index > -1:
account[3] = False
update_account(index, account)
current_account = None
print('You have successfully logged out!')
print()
choice = -1
while choice != "0":
print_menu()
choice = input()
if choice == '0':
current_account = None
print("Bye!")
break
else:
number = int(choice)
if current_account is None:
if number == 1:
create_account()
elif number == 2:
log_in()
else:
card_id = current_account[0]
if number == 1:
get_balance(card_id)
elif number == 2:
log_out(card_id)
| true |
a425936460dbe758dbf936f5deab3d2ecad5650d | Python | mintheon/Practice-Algorithm | /Jaehwan-Hong/baekjoon/beginner_100th/10998.py | UTF-8 | 53 | 2.84375 | 3 | [] | no_license | # A x B
A, B = map(int, input().split())
print(A * B) | true |
5e63ed8e5a1cfbfa9d138d680bd4d5e53b5a93ff | Python | galihsetyaawan/latian-python1 | /fundamental2-tipe-data.py | UTF-8 | 478 | 3.890625 | 4 | [] | no_license |
#tipe data skalar -> tipe data sederhana
anak1 = 'Eko'
anak2 = 'Dwi'
anak3 = 'Tri'
anak4 = 'Catur'
print(anak1)
print(anak2)
print(anak3)
print(anak4)
#tipe data list/array
anak = ['Eko','Dwi','Tri','Catur']
print(anak)
anak.append('Panca')
print(anak)
print('\nsapa anak kedua')
print(f'Hai {anak[1]}!')
print('\nsapa semuanya')
for d in anak:
print(f'Hai {d}!')
print('\nsapa semuanya : cara ribet')
for a in range (0,len(anak)):
print(f'{a+1}.hai {anak[a]}') | true |
2b89200ca578ae3c848276ed9fbd5476816a38f3 | Python | UnifoxPythonSquad/realsung | /Section4/read_write_files.py | UTF-8 | 817 | 3.3125 | 3 | [] | no_license | #문제1
f1 = open("test.txt", 'w')
f1.write("Life is too short!")
f1.close()
f2 = open("test.txt", 'r')
print(f2.read())
f2.close()
#문제2
i = input("저장할 내용을 입력하세요 :")
f = open('test.txt', 'a')
f.write(i)
f.close()
#문제3
f = open('abc.txt', 'r')
lines = f.readlines()
f.close()
rlines = reversed(lines)
f = open('abc.txt', 'w')
for line in rlines:
line = line.strip()
f.write(line)
f.write('\n')
f.close()
#문제4
n = open('pro4.txt', 'r')
rp = n.read()
n.close()
rp = rp.replace('java','python')
f = open('pro4.txt', 'w')
f.write(rp)
f.close()
#문제5
f = open("sample.txt")
lines = f.readlines( )
f.close( )
total = 0
for line in lines:
score = int(line)
total += score
average = total / len(lines)
f = open("result.txt", "w")
f.write(str(average))
f.close() | true |
fb76817188710587616327c6af52b96d127665e1 | Python | DevParapalli/JEE-Mains | /jee_mains/calculation.py | UTF-8 | 4,757 | 2.53125 | 3 | [
"MIT"
] | permissive | # CUSTOM ABBR USED
# PSC = Physics Single Correct
# PSN = Physics Single Not Correct
# PSL = Physics Single Left
# PHYS = Physics Single
import json
from .constants import BASE_DIR
from .generation import generate
def section_calculate_marks(section_dict, answer_dict):
CORRECT = {}
NOTCORRECT = {}
LEFT = {}
for key in section_dict:
answer = answer_dict.get(key, 'D')
question = section_dict[key]
# 4 Cases
# "Not" is found in status
if question['status'].find('Not') != -1: # Question Is Not Answered.
question['correct_answer'] = answer
LEFT[key] = (question)
elif answer == "D": # Question has been Dropped
question['correct_answer'] = "Question Dropped"
CORRECT[key] = (question)
elif 'or' in answer: # Multiple Correct
answers = [ans.split() for ans in answer.split('or')]
if question['answer_given'] in answers:
CORRECT[key] = (question)
else:
NOTCORRECT[key] = (question)
elif question['answer_given'] == answer: # The question is correctly attempted.
question['correct_answer'] = answer
CORRECT[key] = (question)
elif question['answer_given'] != answer: # The question is incorrectly attempted.
question['correct_answer'] = answer
NOTCORRECT[key] = (question)
return len(CORRECT), len(NOTCORRECT), len(LEFT), CORRECT, NOTCORRECT, LEFT
def calculate(response_dict, answer_dict):
MARKS = 0 # Marks Counter
QUESTIONS = {
"PSC":{}, # +4
"PSN":{}, # -1
"PSL":{}, # +0
"PIC":{}, # +4
"PIN":{}, # +0
"PIL":{}, # +0
"CSC":{}, # +4
"CSN":{}, # -1
"CSL":{}, # +0
"CIC":{}, # +4
"CIN":{}, # +0
"CIL":{}, # +0
"MSC":{}, # +4
"MSN":{}, # -1
"MSL":{}, # +0
"MIC":{}, # +4
"MIN":{}, # +0
"MIL":{}, # +0
"__INF__":{}
}
INFO = response_dict['info']
PHYS = response_dict['physics-single']
CHMS = response_dict['chemistry-single']
MTHS = response_dict['maths-single']
PHYI = response_dict['physics-integer']
CHMI = response_dict['chemistry-integer']
MTHI = response_dict['maths-integer']
# Physics Single Calc
t_correct, t_incorrect, t_left = 0, 0, 0
correct, incorrect, left, QUESTIONS['PSC'], QUESTIONS['PSN'], QUESTIONS['PSL'] = section_calculate_marks(PHYS, answer_dict)
MARKS += (correct * 4)
MARKS += (incorrect * -1)
MARKS += (left * 0)
t_correct += correct
t_incorrect += incorrect
t_left += left
# Physics Integer Calc
correct, incorrect, left, QUESTIONS['PIC'], QUESTIONS['PIN'], QUESTIONS['PIL'] = section_calculate_marks(PHYI, answer_dict)
MARKS += (correct * 4)
MARKS += (incorrect * 0)
MARKS += (left * 0)
t_correct += correct
t_incorrect += incorrect
t_left += left
# Chemistry Single Calc
correct, incorrect, left, QUESTIONS['CSC'], QUESTIONS['CSN'], QUESTIONS['CSL'] = section_calculate_marks(CHMS, answer_dict)
MARKS += (correct * 4)
MARKS += (incorrect * -1)
MARKS += (left * 0)
t_correct += correct
t_incorrect += incorrect
t_left += left
# Chemistry Integer Calc
correct, incorrect, left, QUESTIONS['CIC'], QUESTIONS['CIN'], QUESTIONS['CIL'] = section_calculate_marks(CHMI, answer_dict)
MARKS += (correct * 4)
MARKS += (incorrect * 0)
MARKS += (left * 0)
t_correct += correct
t_incorrect += incorrect
t_left += left
# Maths Single Calc
correct, incorrect, left, QUESTIONS['MSC'], QUESTIONS['MSN'], QUESTIONS['MSL'] = section_calculate_marks(MTHS, answer_dict)
MARKS += (correct * 4)
MARKS += (incorrect * -1)
MARKS += (left * 0)
t_correct += correct
t_incorrect += incorrect
t_left += left
# Maths Integer Calc
correct, incorrect, left, QUESTIONS['MIC'], QUESTIONS['MIN'], QUESTIONS['MIL'] = section_calculate_marks(MTHI, answer_dict)
MARKS += (correct * 4)
MARKS += (incorrect * 0)
MARKS += (left * 0)
t_correct += correct
t_incorrect += incorrect
t_left += left
with open(BASE_DIR / 'temp' / 'final_results.json', 'w') as file:
INFO['Marks'] = MARKS
INFO['correct'] = t_correct
INFO['incorrect'] = t_incorrect
INFO['left'] = t_left
QUESTIONS['__INF__'] = INFO
file.write(json.dumps(QUESTIONS))
generate(QUESTIONS) # Responsible for the Generation Logic
return QUESTIONS
| true |
c0cd7ac4bc08fef577fa8d19c1dd3acc90d15b0f | Python | tpham393/ec503PacmanBot | /Game without GUI/game_test.py | UTF-8 | 5,073 | 3 | 3 | [] | no_license | # A version of the game with a GUI you need to have pygame to run this
# pip install pygame
import pygame
import math
import random
import time
import game_funcs as g
from random import randint as ri
import threading
pacman_x, pacman_y = 1, 3;
ghost_x, ghost_y = 3, 3;
goal_x, goal_y = 3, 1;
moves = [];
gridsize = [5, 5];
pacman = [['False' for x in range(gridsize[0])] for y in range(gridsize[1])]
ghost = [['False' for x in range(gridsize[0])] for y in range(gridsize[1])]
goal = [['False' for x in range(gridsize[0])] for y in range(gridsize[1])]
pacman[pacman_y][pacman_x] = 'True';
ghost[ghost_y][ghost_x] = 'True';
goal[goal_y][goal_x] = 'True';
won = 'False'
lost = 'False'
turn_count=-1
ended=False
class Game():
def __init__(self):
pass
pygame.init()
pygame.font.init()
width, height = 65*gridsize[0], 65*gridsize[1]+74
self.screen = pygame.display.set_mode((width,height))
pygame.display.set_caption("Game")
global pacman
global pacman_x
global pacman_y
global ghost
global ghost_x
global ghost_y
global goal
global goal_x
global goal_y
global won
global tied
global loaded
global turn_count
global ended
self.clock = pygame.time.Clock()
self.initGraphics()
def initGraphics(self):
self.linev=pygame.image.load("Graphics/line.png")
self.lineh=pygame.transform.rotate(pygame.image.load("Graphics/line.png"), -90)
self.score=pygame.image.load("Graphics/score.png")
self.p=pygame.image.load("Graphics/p.png")
self.g=pygame.image.load("Graphics/g.png")
self.w=pygame.image.load("Graphics/w.png")
def drawBoard(self):
for x in range(gridsize[0]):
for y in range(gridsize[1]-1):
self.screen.blit(self.lineh, [x*64+5, (y+1)*64])
for y in range(gridsize[1]):
for x in range(gridsize[0]-1):
self.screen.blit(self.linev, [(x+1)*64, (y)*64+5])
for x in range(gridsize[0]):
for y in range(gridsize[1]):
if ghost[y][x] == 'True':
self.screen.blit(self.g, [(x)*64+5, (y)*64+5])
elif pacman[y][x] == 'True':
self.screen.blit(self.p, [(x)*64+5, (y)*64+5])
elif goal[y][x] == 'True':
self.screen.blit(self.w, [(x)*64+5, (y)*64+5])
def drawHUD(self):
global won
if not ended:
self.screen.blit(self.score,[0, 65*gridsize[1]])
myfont = pygame.font.SysFont(None, 32)
label1 = myfont.render("1: Up, 2: Right", 1, (255,255,255))
label2 = myfont.render("3: Down, 4: Left", 1, (255,255,255))
else:
self.screen.blit(self.score,[0, 65*gridsize[1]])
myfont = pygame.font.SysFont(None, 32)
label2 = myfont.render("", 1, (255,255,255))
if won == 'False':
label1 = myfont.render("You've been eaten!", 1, (255,255,255))
else:
label1 = myfont.render("You've won!", 1, (255,255,255))
self.screen.blit(label1,(5,65*gridsize[1] + 10))
self.screen.blit(label2,(5,65*gridsize[1] + 40))
def update(self):
global loaded
global pacman_x
global pacman_y
global ghost_x
global ghost_y
global goal
global won
global tied
if not ended:
self.clock.tick(60)
self.screen.fill(0)
self.drawBoard()
self.drawHUD()
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit()
key=pygame.key.get_pressed()
pygame.display.flip()
else:
self.end()
def end(self):
self.screen.fill(0)
self.drawBoard()
self.drawHUD()
pygame.display.flip()
time.sleep(5)
exit()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit()
pygame.display.flip()
tg = Game()
def turn_timer():
tg.update()
threading.Timer(0.1, turn_timer).start()
turn_timer();
while not ended:
time.sleep(1)
move = ri(1,4);
pacman_x2, pacman_y2, ghost_x2, ghost_y2, goal_x, goal_y, ended, won, moved = g.game_func(move, pacman_x, pacman_y, ghost_x, ghost_y, goal_x, goal_y)
if moved:
moves.append(move)
pacman[pacman_y][pacman_x]='False'
ghost[ghost_y][ghost_x]='False'
pacman_x, pacman_y, ghost_x, ghost_y = pacman_x2, pacman_y2, ghost_x2, ghost_y2
pacman[pacman_y][pacman_x]='True'
ghost[ghost_y][ghost_x]='True'
print(moves)
| true |
3c82cab9a5dd4f4b182b2a48515df0393f0352a3 | Python | Igorxyz/Simple_password_generator | /Simple_password_generator.py | UTF-8 | 865 | 3.28125 | 3 | [] | no_license | #!/usr/bin/python3
import sys
import random
import getpass
import time
start = time.time()
password_options = ["1234567890", "abcdefghijklmopqrstxyz", "!@#$%", "abcdefghijklmopqrstxyz".upper()]
print("CLI PASSWORD GENERATOR")
print("*" * 80)
print()
try:
password_length = int(input("Choose password length: "))
characters_list = []
for i in range(0, password_length):
characters_list.append(random.choice("".join(password_options)))
print("Generated password: {}".format("".join(characters_list)))
except KeyboardInterrupt:
print()
print("Program interrupted by the user: {}".format(getpass.getuser()))
print("Terminating program...")
sys.exit()
except ValueError:
print()
print("Password length has to be a number")
sys.exit()
end = time.time()
print("Execution time: {} seconds".format(round(float(end - start),2)))
| true |
0d10a96b0c8c97152194f5b26e867faa44956a5a | Python | XifeiNi/LeetCode-Traversal | /python/string/swap_adjacent_LR_string.py | UTF-8 | 532 | 3.125 | 3 | [] | no_license | class Solution:
def canTransform(self, start: str, end: str) -> bool:
l = 0
r = 0
for i in range(len(start)):
if start[i] == 'R':
r += 1
if end[i] == "L":
l += 1
if start[i] == 'L':
l -= 1
if end[i] == 'R':
r -= 1
if (l < 0 or r != 0) and (l != 0 or r < 0):
return False
if l == 0 and r == 0:
return True
else:
return False
| true |
9bf0cee10404884b3971ec91999c7a1b875e9285 | Python | runningfire/Projects | /golem_data.py | UTF-8 | 1,848 | 2.75 | 3 | [] | no_license | #!/usr/bin/python2
# -*- coding: utf-8 -*-
from numpy import *
from urllib2 import urlopen
import shutil
import os
def golem_data(shot, diagn):
"""
Simple interface for GOLEM database
Use:
obj = golem_data(10011, 'loop_voltage')
plot(obj.tvec, obj.data)
d - object containing all availible informations
d.tvec - time vector Nx1
d.data - data matrix NxM
Example:
from golem_data import golem_data
from matplotlib.pyplot import *
obj1 = golem_data(10689 , 'electron_temperature')
obj2 = golem_data(10689 , 'spectrometr:temperature')
plot(obj1.tvec,obj1.data, label=obj1.labels) %
errorbar(obj2.tvec, obj2.data, xerr=obj2.tvec_err, yerr=[obj2.data_err[:,0], obj2.data_err[:,1]], label=obj2.labels )
xlabel(obj2.ax_labels[0])
ylabel(obj2.ax_labels[1])
legend([obj1.name, obj2.name])
axis([obj1.plasma_start, obj1.plasma_end, 0, None])
title(obj1.info)
show()
"""
remote_file = "http://golem.fjfi.cvut.cz/utils/data/"+str(shot)+"/"+diagn+'.npz'
gfile = DataSource().open(remote_file) # remote file
print(gfile)
try:
d = dict(load(gfile))
except IOError:
raise IOError('Missing diagnostic ' + str(diagn) + ' in shot ' + str(shot))
if not 'tvec' in d and 't_start' in d:
d['tvec'] = linspace( d.pop('t_start'), d.pop('t_end'), len(d['data']) )
try:
if 'scale' in d:
d['data'] = double(d['data']) * d.pop('scale') # rescale data
except:
pass
return Data( **d )
class Data(object):
""" Simple data handling object"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __repr__(self):
return "Data object, keys:\n" + str(self.__dict__.keys())
def __getitem__(self, key):
return getattr(self, key)
| true |
9950177e449ee6439924bbd2eb73b2b50fdd13d4 | Python | bashia/Holdem | /tester/bot_wrapper.py | UTF-8 | 559 | 2.53125 | 3 | [] | no_license | from messages import Action
class BotWrapper(object):
def __init__(self, bot, *args, **kwargs):
self.id = kwargs['id']
self.credits = kwargs['credits']
self.bot = bot(*args, **kwargs)
@staticmethod
def filter_action(input_action):
return input_action
def turn(self):
try:
return self.bot.turn()
except Exception, e:
print "bot threw exception:",e
return Action('fold')
def __repr__(self):
return self.bot.__repr__() | true |
1ee9b7679ad23d1bc6c828fc26ba878896485a55 | Python | carlosevmoura/courses-notes | /programming/python-curso_em_video/exercises/ex003.py | UTF-8 | 289 | 3.9375 | 4 | [
"MIT"
] | permissive | numero1 = input('Digite um número: ')
numero2 = input('Digite mais um número: ')
soma = numero1 + numero2
print('A soma (errada) destes números vale {}.'.format(soma))
soma = int(numero1) + int(numero2)
print('A soma entre os números {0} e {1} vale {2}.'.format(numero1,numero2,soma)) | true |
71108e7ce493ec00fac86cda10d1faa4eaed4adb | Python | Jordan-type/Simple-python-GUI-Applications-using-tkinter | /app.py | UTF-8 | 2,694 | 2.78125 | 3 | [] | no_license | import tkinter as tk
from tkinter import filedialog, Menu, Text
import os
root = tk.Tk()
root.title("Favarite Apps")
apps = []
if os.path.isfile('save.txt'):
with open('save.txt', 'r') as f:
tempApps = f.read()
tempApps = tempApps.split(',')
apps = [x for x in tempApps if x.strip()]
# check if it opens saved programs
# print(tempApps)
def addApp():
for widget in frame.winfo_children():
widget.destroy()
filename = filedialog.askopenfilename(initialdir="/", title="Select File",
filetypes=(("Executables", "*.exe"), ("All files", "*.*")))
apps.append(filename)
# print("filename is:", filename)
for app in apps:
label=tk.Label(frame, text=app, bg="gray", font=("Arial", 12))
label.pack()
def runApp():
for app in apps:
os.startfile(app)
# Menu and Submenu
menubar = Menu(root)
fileMenu = Menu(menubar, tearoff=0)
fileMenu.add_command(label="Create new Session", command=addApp)
fileMenu.add_command(label="Open/Run Apps", command=runApp)
fileMenu.add_command(label="Save")
fileMenu.add_command(label="Save as...")
fileMenu.add_command(label="Close")
fileMenu.add_separator()
fileMenu.add_command(label="Exit", command=root.quit)
menubar.add_cascade(label="File", menu=fileMenu)
editMenu = Menu(menubar, tearoff=0)
editMenu.add_command(label="Undo")
editMenu.add_separator()
editMenu.add_command(label="Cut")
editMenu.add_command(label="Copy")
editMenu.add_command(label="Paste")
editMenu.add_command(label="Delete")
editMenu.add_command(label="Select All")
menubar.add_cascade(label="Edit", menu=editMenu)
helpMenu = Menu(menubar, tearoff=0)
helpMenu.add_command(label="Help Index")
helpMenu.add_command(label="About...")
menubar.add_cascade(label="Help", menu=helpMenu)
# End of menubar and menu Items
# pack is used to show objects in the window
canvas = tk.Canvas(root, height=400, width=1000, bg="#263D42")
canvas.pack()
frame = tk.Frame(root, bg="white")
frame.place(relheight=0.7, relwidth=0.8, relx=0.1, rely=0.1)
openFile = tk.Button(root, text="Open File", padx=10, pady=5, fg="white", bg="#263D42", command=addApp)
openFile.pack(side="right")
runApps = tk.Button(root, text="Run Apps", padx=10, pady=5, fg="white", bg="#263D42", command=runApp)
runApps.pack(side="right")
for app in apps:
label = tk.Label(frame, text=app, font=("Arial", 12))
label.pack()
root.config(menu=menubar)
root.mainloop()
# save the details for future open
with open('save.txt', 'w') as f:
for app in apps:
f.write(app + ',')
| true |
388241697edf092f55a8b6c57351dfadb40f8bf3 | Python | TomographicImaging/CIL | /Wrappers/Python/cil/framework/BlockDataContainer.py | UTF-8 | 26,006 | 3.015625 | 3 | [
"Apache-2.0",
"BSD-3-Clause",
"GPL-3.0-only"
] | permissive | # -*- coding: utf-8 -*-
# Copyright 2019 United Kingdom Research and Innovation
# Copyright 2019 The University of Manchester
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# CIL Developers, listed at: https://github.com/TomographicImaging/CIL/blob/master/NOTICE.txt
import numpy
from numbers import Number
import functools
from cil.utilities.multiprocessing import NUM_THREADS
class BlockDataContainer(object):
'''Class to hold DataContainers as column vector
Provides basic algebra between BlockDataContainer's, DataContainer's and
subclasses and Numbers
1) algebra between `BlockDataContainer`s will be element-wise, only if
the shape of the 2 `BlockDataContainer`s is the same, otherwise it
will fail
2) algebra between `BlockDataContainer`s and `list` or `numpy array` will
work as long as the number of `rows` and element of the arrays match,
indipendently on the fact that the `BlockDataContainer` could be nested
3) algebra between `BlockDataContainer` and one `DataContainer` is possible.
It will require all the `DataContainers` in the block to be
compatible with the `DataContainer` we want to operate with.
4) algebra between `BlockDataContainer` and a `Number` is possible and it
will be done with each element of the `BlockDataContainer` even if nested
A = [ [B,C] , D]
A * 3 = [ 3 * [B,C] , 3* D] = [ [ 3*B, 3*C] , 3*D ]
'''
ADD = 'add'
SUBTRACT = 'subtract'
MULTIPLY = 'multiply'
DIVIDE = 'divide'
POWER = 'power'
SAPYB = 'sapyb'
MAXIMUM = 'maximum'
MINIMUM = 'minimum'
ABS = 'abs'
SIGN = 'sign'
SQRT = 'sqrt'
CONJUGATE = 'conjugate'
__array_priority__ = 1
__container_priority__ = 2
@property
def dtype(self):
return tuple(i.dtype for i in self.containers)
def __init__(self, *args, **kwargs):
''''''
self.containers = args
self.index = 0
self.geometry = None
#if len(set([i.shape for i in self.containers])):
# self.geometry = self.containers[0].geometry
shape = kwargs.get('shape', None)
if shape is None:
shape = (len(args),1)
# shape = (len(args),1)
self.shape = shape
n_elements = functools.reduce(lambda x,y: x*y, shape, 1)
if len(args) != n_elements:
raise ValueError(
'Dimension and size do not match: expected {} got {}'
.format(n_elements, len(args)))
def __iter__(self):
'''BlockDataContainer is Iterable'''
return self
def next(self):
'''python2 backwards compatibility'''
return self.__next__()
def __next__(self):
try:
out = self[self.index]
except IndexError as ie:
raise StopIteration()
self.index+=1
return out
def is_compatible(self, other):
'''basic check if the size of the 2 objects fit'''
if isinstance(other, Number):
return True
elif isinstance(other, (list, tuple, numpy.ndarray)) :
for ot in other:
if not isinstance(ot, Number):
raise ValueError('List/ numpy array can only contain numbers {}'\
.format(type(ot)))
return len(self.containers) == len(other)
elif isinstance(other, BlockDataContainer):
return len(self.containers) == len(other.containers)
else:
# this should work for other as DataContainers and children
ret = True
for i, el in enumerate(self.containers):
if isinstance(el, BlockDataContainer):
a = el.is_compatible(other)
else:
a = el.shape == other.shape
ret = ret and a
# probably will raise
return ret
def get_item(self, row):
if row > self.shape[0]:
raise ValueError('Requested row {} > max {}'.format(row, self.shape[0]))
return self.containers[row]
def __getitem__(self, row):
return self.get_item(row)
def add(self, other, *args, **kwargs):
'''Algebra: add method of BlockDataContainer with number/DataContainer or BlockDataContainer
:param: other (number, DataContainer or subclasses or BlockDataContainer
:param: out (optional): provides a placehold for the resul.
'''
out = kwargs.get('out', None)
if out is not None:
self.binary_operations(BlockDataContainer.ADD, other, *args, **kwargs)
else:
return self.binary_operations(BlockDataContainer.ADD, other, *args, **kwargs)
def subtract(self, other, *args, **kwargs):
'''Algebra: subtract method of BlockDataContainer with number/DataContainer or BlockDataContainer
:param: other (number, DataContainer or subclasses or BlockDataContainer
:param: out (optional): provides a placehold for the resul.
'''
out = kwargs.get('out', None)
if out is not None:
self.binary_operations(BlockDataContainer.SUBTRACT, other, *args, **kwargs)
else:
return self.binary_operations(BlockDataContainer.SUBTRACT, other, *args, **kwargs)
def multiply(self, other, *args, **kwargs):
'''Algebra: multiply method of BlockDataContainer with number/DataContainer or BlockDataContainer
:param: other (number, DataContainer or subclasses or BlockDataContainer
:param: out (optional): provides a placehold for the resul.
'''
out = kwargs.get('out', None)
if out is not None:
self.binary_operations(BlockDataContainer.MULTIPLY, other, *args, **kwargs)
else:
return self.binary_operations(BlockDataContainer.MULTIPLY, other, *args, **kwargs)
def divide(self, other, *args, **kwargs):
'''Algebra: divide method of BlockDataContainer with number/DataContainer or BlockDataContainer
:param: other (number, DataContainer or subclasses or BlockDataContainer
:param: out (optional): provides a placehold for the resul.
'''
out = kwargs.get('out', None)
if out is not None:
self.binary_operations(BlockDataContainer.DIVIDE, other, *args, **kwargs)
else:
return self.binary_operations(BlockDataContainer.DIVIDE, other, *args, **kwargs)
def power(self, other, *args, **kwargs):
'''Algebra: power method of BlockDataContainer with number/DataContainer or BlockDataContainer
:param: other (number, DataContainer or subclasses or BlockDataContainer
:param: out (optional): provides a placehold for the resul.
'''
out = kwargs.get('out', None)
if out is not None:
self.binary_operations(BlockDataContainer.POWER, other, *args, **kwargs)
else:
return self.binary_operations(BlockDataContainer.POWER, other, *args, **kwargs)
def maximum(self, other, *args, **kwargs):
'''Algebra: power method of BlockDataContainer with number/DataContainer or BlockDataContainer
:param: other (number, DataContainer or subclasses or BlockDataContainer
:param: out (optional): provides a placehold for the resul.
'''
out = kwargs.get('out', None)
if out is not None:
self.binary_operations(BlockDataContainer.MAXIMUM, other, *args, **kwargs)
else:
return self.binary_operations(BlockDataContainer.MAXIMUM, other, *args, **kwargs)
def minimum(self, other, *args, **kwargs):
'''Algebra: power method of BlockDataContainer with number/DataContainer or BlockDataContainer
:param: other (number, DataContainer or subclasses or BlockDataContainer
:param: out (optional): provides a placehold for the resul.
'''
out = kwargs.get('out', None)
if out is not None:
self.binary_operations(BlockDataContainer.MINIMUM, other, *args, **kwargs)
else:
return self.binary_operations(BlockDataContainer.MINIMUM, other, *args, **kwargs)
def sapyb(self, a, y, b, out, num_threads = NUM_THREADS):
r'''performs axpby element-wise on the BlockDataContainer containers
Does the operation .. math:: a*x+b*y and stores the result in out, where x is self
:param a: scalar
:param b: scalar
:param y: compatible (Block)DataContainer
:param out: (Block)DataContainer to store the result
Example:
--------
a = 2
b = 3
ig = ImageGeometry(10,11)
x = ig.allocate(1)
y = ig.allocate(2)
bdc1 = BlockDataContainer(2*x, y)
bdc2 = BlockDataContainer(x, 2*y)
out = bdc1.sapyb(a,bdc2,b)
'''
if out is None:
raise ValueError("out container cannot be None")
kwargs = {'a':a, 'b':b, 'out':out, 'num_threads': NUM_THREADS}
self.binary_operations(BlockDataContainer.SAPYB, y, **kwargs)
def axpby(self, a, b, y, out, dtype=numpy.float32, num_threads = NUM_THREADS):
'''Deprecated method. Alias of sapyb'''
return self.sapyb(a,y,b,out,num_threads)
def binary_operations(self, operation, other, *args, **kwargs):
'''Algebra: generic method of algebric operation with BlockDataContainer with number/DataContainer or BlockDataContainer
Provides commutativity with DataContainer and subclasses, i.e. this
class's reverse algebric methods take precedence w.r.t. direct algebric
methods of DataContainer and subclasses.
This method is not to be used directly
'''
if not self.is_compatible(other):
raise ValueError('Incompatible for operation {}'.format(operation))
out = kwargs.get('out', None)
if isinstance(other, Number):
# try to do algebra with one DataContainer. Will raise error if not compatible
kw = kwargs.copy()
res = []
for i,el in enumerate(self.containers):
if operation == BlockDataContainer.ADD:
op = el.add
elif operation == BlockDataContainer.SUBTRACT:
op = el.subtract
elif operation == BlockDataContainer.MULTIPLY:
op = el.multiply
elif operation == BlockDataContainer.DIVIDE:
op = el.divide
elif operation == BlockDataContainer.POWER:
op = el.power
elif operation == BlockDataContainer.MAXIMUM:
op = el.maximum
elif operation == BlockDataContainer.MINIMUM:
op = el.minimum
else:
raise ValueError('Unsupported operation', operation)
if out is not None:
kw['out'] = out.get_item(i)
op(other, *args, **kw)
else:
res.append(op(other, *args, **kw))
if out is not None:
return
else:
return type(self)(*res, shape=self.shape)
elif isinstance(other, (list, tuple, numpy.ndarray, BlockDataContainer)):
kw = kwargs.copy()
res = []
if isinstance(other, BlockDataContainer):
the_other = other.containers
else:
the_other = other
for i,zel in enumerate(zip ( self.containers, the_other) ):
el = zel[0]
ot = zel[1]
if operation == BlockDataContainer.ADD:
op = el.add
elif operation == BlockDataContainer.SUBTRACT:
op = el.subtract
elif operation == BlockDataContainer.MULTIPLY:
op = el.multiply
elif operation == BlockDataContainer.DIVIDE:
op = el.divide
elif operation == BlockDataContainer.POWER:
op = el.power
elif operation == BlockDataContainer.MAXIMUM:
op = el.maximum
elif operation == BlockDataContainer.MINIMUM:
op = el.minimum
elif operation == BlockDataContainer.SAPYB:
if not isinstance(other, BlockDataContainer):
raise ValueError("{} cannot handle {}".format(operation, type(other)))
op = el.sapyb
else:
raise ValueError('Unsupported operation', operation)
if out is not None:
if operation == BlockDataContainer.SAPYB:
if isinstance(kw['a'], BlockDataContainer):
a = kw['a'].get_item(i)
else:
a = kw['a']
if isinstance(kw['b'], BlockDataContainer):
b = kw['b'].get_item(i)
else:
b = kw['b']
el.sapyb(a, ot, b, out.get_item(i), num_threads=kw['num_threads'])
else:
kw['out'] = out.get_item(i)
op(ot, *args, **kw)
else:
res.append(op(ot, *args, **kw))
if out is not None:
return
else:
return type(self)(*res, shape=self.shape)
else:
# try to do algebra with one DataContainer. Will raise error if not compatible
kw = kwargs.copy()
if operation != BlockDataContainer.SAPYB:
# remove keyworded argument related to SAPYB
for k in ['a','b','y', 'num_threads', 'dtype']:
if k in kw.keys():
kw.pop(k)
res = []
for i,el in enumerate(self.containers):
if operation == BlockDataContainer.ADD:
op = el.add
elif operation == BlockDataContainer.SUBTRACT:
op = el.subtract
elif operation == BlockDataContainer.MULTIPLY:
op = el.multiply
elif operation == BlockDataContainer.DIVIDE:
op = el.divide
elif operation == BlockDataContainer.POWER:
op = el.power
elif operation == BlockDataContainer.MAXIMUM:
op = el.maximum
elif operation == BlockDataContainer.MINIMUM:
op = el.minimum
elif operation == BlockDataContainer.SAPYB:
if isinstance(kw['a'], BlockDataContainer):
a = kw['a'].get_item(i)
else:
a = kw['a']
if isinstance(kw['b'], BlockDataContainer):
b = kw['b'].get_item(i)
else:
b = kw['b']
el.sapyb(a, other, b, out.get_item(i), kw['num_threads'])
# As axpyb cannot return anything we `continue` to skip the rest of the code block
continue
else:
raise ValueError('Unsupported operation', operation)
if out is not None:
kw['out'] = out.get_item(i)
op(other, *args, **kw)
else:
res.append(op(other, *args, **kw))
if out is not None:
return
else:
return type(self)(*res, shape=self.shape)
## unary operations
def unary_operations(self, operation, *args, **kwargs ):
'''Unary operation on BlockDataContainer:
generic method of unary operation with BlockDataContainer: abs, sign, sqrt and conjugate
This method is not to be used directly
'''
out = kwargs.get('out', None)
kw = kwargs.copy()
if out is None:
res = []
for el in self.containers:
if operation == BlockDataContainer.ABS:
op = el.abs
elif operation == BlockDataContainer.SIGN:
op = el.sign
elif operation == BlockDataContainer.SQRT:
op = el.sqrt
elif operation == BlockDataContainer.CONJUGATE:
op = el.conjugate
res.append(op(*args, **kw))
return BlockDataContainer(*res)
else:
kw.pop('out')
for el,elout in zip(self.containers, out.containers):
if operation == BlockDataContainer.ABS:
op = el.abs
elif operation == BlockDataContainer.SIGN:
op = el.sign
elif operation == BlockDataContainer.SQRT:
op = el.sqrt
elif operation == BlockDataContainer.CONJUGATE:
op = el.conjugate
kw['out'] = elout
op(*args, **kw)
def abs(self, *args, **kwargs):
return self.unary_operations(BlockDataContainer.ABS, *args, **kwargs)
def sign(self, *args, **kwargs):
return self.unary_operations(BlockDataContainer.SIGN, *args, **kwargs)
def sqrt(self, *args, **kwargs):
return self.unary_operations(BlockDataContainer.SQRT, *args, **kwargs)
def conjugate(self, *args, **kwargs):
return self.unary_operations(BlockDataContainer.CONJUGATE, *args, **kwargs)
# def abs(self, *args, **kwargs):
# return type(self)(*[ el.abs(*args, **kwargs) for el in self.containers], shape=self.shape)
# def sign(self, *args, **kwargs):
# return type(self)(*[ el.sign(*args, **kwargs) for el in self.containers], shape=self.shape)
# def sqrt(self, *args, **kwargs):
# return type(self)(*[ el.sqrt(*args, **kwargs) for el in self.containers], shape=self.shape)
# def conjugate(self, out=None):
# return type(self)(*[el.conjugate() for el in self.containers], shape=self.shape)
## reductions
def sum(self, *args, **kwargs):
return numpy.sum([ el.sum(*args, **kwargs) for el in self.containers])
def squared_norm(self):
y = numpy.asarray([el.squared_norm() for el in self.containers])
return y.sum()
def norm(self):
return numpy.sqrt(self.squared_norm())
def pnorm(self, p=2):
if p==1:
return sum(self.abs())
elif p==2:
tmp = functools.reduce(lambda a,b: a + b.conjugate()*b, self.containers, self.get_item(0) * 0 ).sqrt()
return tmp
else:
return ValueError('Not implemented')
def copy(self):
'''alias of clone'''
return self.clone()
def clone(self):
return type(self)(*[el.copy() for el in self.containers], shape=self.shape)
def fill(self, other):
if isinstance (other, BlockDataContainer):
if not self.is_compatible(other):
raise ValueError('Incompatible containers')
for el,ot in zip(self.containers, other.containers):
el.fill(ot)
else:
return ValueError('Cannot fill with object provided {}'.format(type(other)))
def __add__(self, other):
return self.add( other )
# __radd__
def __sub__(self, other):
return self.subtract( other )
# __rsub__
def __mul__(self, other):
return self.multiply(other)
# __rmul__
def __div__(self, other):
return self.divide(other)
# __rdiv__
def __truediv__(self, other):
return self.divide(other)
def __pow__(self, other):
return self.power(other)
# reverse operand
def __radd__(self, other):
'''Reverse addition
to make sure that this method is called rather than the __mul__ of a numpy array
the class constant __array_priority__ must be set > 0
https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__
'''
return self + other
# __radd__
def __rsub__(self, other):
'''Reverse subtraction
to make sure that this method is called rather than the __mul__ of a numpy array
the class constant __array_priority__ must be set > 0
https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__
'''
return (-1 * self) + other
# __rsub__
def __rmul__(self, other):
'''Reverse multiplication
to make sure that this method is called rather than the __mul__ of a numpy array
the class constant __array_priority__ must be set > 0
https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__
'''
return self * other
# __rmul__
def __rdiv__(self, other):
'''Reverse division
to make sure that this method is called rather than the __mul__ of a numpy array
the class constant __array_priority__ must be set > 0
https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__
'''
return pow(self / other, -1)
# __rdiv__
def __rtruediv__(self, other):
'''Reverse truedivision
to make sure that this method is called rather than the __mul__ of a numpy array
the class constant __array_priority__ must be set > 0
https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__
'''
return self.__rdiv__(other)
def __rpow__(self, other):
'''Reverse power
to make sure that this method is called rather than the __mul__ of a numpy array
the class constant __array_priority__ must be set > 0
https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__
'''
return other.power(self)
def __iadd__(self, other):
'''Inline addition'''
if isinstance (other, BlockDataContainer):
for el,ot in zip(self.containers, other.containers):
el += ot
elif isinstance(other, Number):
for el in self.containers:
el += other
elif isinstance(other, list) or isinstance(other, numpy.ndarray):
if not self.is_compatible(other):
raise ValueError('Incompatible for __iadd__')
for el,ot in zip(self.containers, other):
el += ot
return self
# __iadd__
def __isub__(self, other):
'''Inline subtraction'''
if isinstance (other, BlockDataContainer):
for el,ot in zip(self.containers, other.containers):
el -= ot
elif isinstance(other, Number):
for el in self.containers:
el -= other
elif isinstance(other, list) or isinstance(other, numpy.ndarray):
if not self.is_compatible(other):
raise ValueError('Incompatible for __isub__')
for el,ot in zip(self.containers, other):
el -= ot
return self
# __isub__
def __imul__(self, other):
'''Inline multiplication'''
if isinstance (other, BlockDataContainer):
for el,ot in zip(self.containers, other.containers):
el *= ot
elif isinstance(other, Number):
for el in self.containers:
el *= other
elif isinstance(other, list) or isinstance(other, numpy.ndarray):
if not self.is_compatible(other):
raise ValueError('Incompatible for __imul__')
for el,ot in zip(self.containers, other):
el *= ot
return self
# __imul__
def __idiv__(self, other):
'''Inline division'''
if isinstance (other, BlockDataContainer):
for el,ot in zip(self.containers, other.containers):
el /= ot
elif isinstance(other, Number):
for el in self.containers:
el /= other
elif isinstance(other, list) or isinstance(other, numpy.ndarray):
if not self.is_compatible(other):
raise ValueError('Incompatible for __idiv__')
for el,ot in zip(self.containers, other):
el /= ot
return self
# __rdiv__
def __itruediv__(self, other):
'''Inline truedivision'''
return self.__idiv__(other)
def __neg__(self):
""" Return - self """
return -1 * self
def dot(self, other):
#
tmp = [ self.containers[i].dot(other.containers[i]) for i in range(self.shape[0])]
return sum(tmp)
def __len__(self):
return self.shape[0]
| true |
00c15fb7c9931b31453e3b6d0b710bbb3e2b9d9f | Python | m2cci-NMZ/sasview | /src/sas/sasgui/plottools/PropertyDialog.py | UTF-8 | 4,290 | 2.859375 | 3 | [
"BSD-3-Clause"
] | permissive | """
"""
import wx
class Properties(wx.Dialog):
"""
"""
def __init__(self, parent, id=-1, title="Select the scale of the graph"):
wx.Dialog.__init__(self, parent, id, title)
self.parent = parent
vbox = wx.BoxSizer(wx.VERTICAL)
sizer = wx.GridBagSizer(5, 5)
x_size = 70
ix = 1
iy = 1
sizer.Add(wx.StaticText(self, -1, 'X'), (iy, ix))
ix += 2
sizer.Add(wx.StaticText(self, -1, 'Y'), (iy, ix))
ix += 2
sizer.Add(wx.StaticText(self, -1, 'View'), (iy, ix))
iy += 1
ix = 1
self.xvalue = wx.ComboBox(self, -1, style=wx.CB_READONLY)
x_size += self.xvalue.GetSize()[0]
sizer.Add(self.xvalue, (iy, ix), (1, 1), wx.ADJUST_MINSIZE, 0)
ix += 2
self.yvalue = wx.ComboBox(self, -1, style=wx.CB_READONLY)
x_size += self.yvalue.GetSize()[0]
sizer.Add(self.yvalue, (iy, ix), (1, 1), wx.ADJUST_MINSIZE, 0)
ix += 2
self.view = wx.ComboBox(self, -1, style=wx.CB_READONLY)
self.view.Bind(wx.EVT_COMBOBOX, self.viewChanged)
x_size += self.view.GetSize()[0]
self.view.SetMinSize((160, 30))
sizer.Add(self.view, (iy, ix), (1, 1),
wx.EXPAND | wx.RIGHT | wx.ADJUST_MINSIZE, 10)
self.SetMinSize((x_size, 50))
vbox.Add(sizer, 0, wx.EXPAND | wx.ADJUST_MINSIZE, 0)
cancel_button = wx.Button(self, wx.ID_CANCEL, 'Cancel')
ok_button = wx.Button(self, wx.ID_OK, "OK")
sizer_button = wx.BoxSizer(wx.HORIZONTAL)
sizer_button.Add((20, 20), 1, wx.EXPAND | wx.ADJUST_MINSIZE, 0)
sizer_button.Add(ok_button, 0, wx.LEFT | wx.RIGHT | wx.ADJUST_MINSIZE, 10)
sizer_button.Add(cancel_button, 0, wx.LEFT | wx.RIGHT | wx.ADJUST_MINSIZE, 10)
vbox.Add(sizer_button, 0,
wx.EXPAND | wx.TOP | wx.BOTTOM | wx.ADJUST_MINSIZE, 10)
# scale value for x
self.xvalue.SetValue("ln(x)")
self.xvalue.Insert("x", 0)
self.xvalue.Insert("x^(2)", 1)
self.xvalue.Insert("x^(4)", 2)
self.xvalue.Insert("ln(x)", 3)
self.xvalue.Insert("log10(x)", 4)
self.xvalue.Insert("log10(x^(4))", 5)
# scale value for y
self.yvalue.SetValue("ln(y)")
self.yvalue.Insert("y", 0)
self.yvalue.Insert("1/y", 1)
self.yvalue.Insert("ln(y)", 2)
self.yvalue.Insert("y^(2)", 3)
self.yvalue.Insert("y*x^(2)", 4)
self.yvalue.Insert("y*x^(4)", 5)
self.yvalue.Insert("1/sqrt(y)", 6)
self.yvalue.Insert("log10(y)", 7)
self.yvalue.Insert("ln(y*x)", 8)
self.yvalue.Insert("ln(y*x^(2))", 9)
self.yvalue.Insert("ln(y*x^(4))", 10)
self.yvalue.Insert("log10(y*x^(4))", 11)
# type of view or model used
self.view.SetValue("--")
self.view.Insert("--", 0)
self.view.Insert("Linear y vs x", 1)
self.view.Insert("Guinier lny vs x^(2)", 2)
self.view.Insert("XS Guinier ln(y*x) vs x^(2)", 3)
self.view.Insert("Porod y*x^(4) vs x^(4)", 4)
self.view.Insert("Kratky y*x^(2) vs x", 5)
self.SetSizer(vbox)
self.Fit()
self.Centre()
def viewChanged(self, event):
event.Skip()
view = self.view.GetValue()
if view == "Linear y vs x":
self.xvalue.SetValue("x")
self.yvalue.SetValue("y")
elif view == "Guinier lny vs x^(2)":
self.xvalue.SetValue("x^(2)")
self.yvalue.SetValue("ln(y)")
elif view == "XS Guinier ln(y*x) vs x^(2)":
self.xvalue.SetValue("x^(2)")
self.yvalue.SetValue("ln(y*x)")
elif view == "Porod y*x^(4) vs x^(4)":
self.xvalue.SetValue("x^(4)")
self.yvalue.SetValue("y*x^(4)")
elif view == "Kratky y*x^(2) vs x":
self.xvalue.SetValue("x")
self.yvalue.SetValue("y*x^(2)")
def setValues(self, x, y, view):
"""
"""
return self.xvalue.SetValue(x), self.yvalue.SetValue(y), \
self.view.SetValue(view)
def getValues(self):
"""
"""
return self.xvalue.GetValue(), self.yvalue.GetValue(), \
self.view.GetValue()
| true |
255df1daae883de1ddae9b9316ae244a181b86d9 | Python | Dominik12345/Masterarbeit | /arbeiten/Masterarbeit/Python/alpha_running_afix4.py | UTF-8 | 3,721 | 2.65625 | 3 | [] | no_license | #name of the figure
import matplotlib.pyplot as plt
import numpy as np
import pylab as p
import scipy.constants as const
import itertools
#####################################################
# Definition of the parameters and initial value --->
amz = 0.1185
Nc = 3.
Nd = 2.
nfc = 6.
nsc = 0.
nfd = 0.
nsd = 1.
nfj = 0.
nsj = 9.
def afX1(Nc,Nd,nfc,nsc,nfd,nsd,nfj,nsj):
return( (2./3.*1./2.*2.*(nfc+Nd*nfj)+1./3.*1./2.*(nsc+Nd*nsj)
-11./3.*Nc)/(16.*const.pi**2) )
def afY1(Nc,Nd,nfc,nsc,nfd,nsd,nfj,nsj):
return( ( (10./3.*Nc+2.*(Nc**2.-1.)/(2.*Nc) )*1./2.*2.*(nfc+Nd*nfj)+
(2./3.*Nc+4.*(Nc**2.-1.)/(2.*Nc))*1./2.*(nsc+Nd*nsj)-
34./3.*Nc**2. )/((16.*const.pi**2)**2) )
def afZ1(Nc,Nd,nfc,nsc,nfd,nsd,nfj,nsj):
return( (2.*(Nd**2.-1.)/(2.*Nd)*1./2.*2.*(Nd*nfj)
+4.*(Nd**2-1.)/(2.*Nd)*1./2.*Nd*nsj)/((16.*const.pi**2)**2))
# in afZ1 Nd <-> Nc ???
def afX2(Nc,Nd,nfc,nsc,nfd,nsd,nfj,nsj):
return(afX1(Nd,Nc,nfd,nsd,nfc,nsc,nfj,nsj))
def afY2(Nc,Nd,nfc,nsc,nfd,nsd,nfj,nsj):
return(afY1(Nd,Nc,nfd,nsd,nfc,nsc,nfj,nsj))
def afZ2(Nc,Nd,nfc,nsc,nfd,nsd,nfj,nsj):
return(afZ1(Nd,Nc,nfd,nsd,nfc,nsc,nfj,nsj))
X1 = afX1(Nc,Nd,nfc,nsc,nfd,nsd,nfj,nsj)*2*(4*const.pi)**1
Y1 = afY1(Nc,Nd,nfc,nsc,nfd,nsd,nfj,nsj)*2*(4*const.pi)**2
Z1 = afZ1(Nc,Nd,nfc,nsc,nfd,nsd,nfj,nsj)*2*(4*const.pi)**2
X2 = afX2(Nc,Nd,nfc,nsc,nfd,nsd,nfj,nsj)*2*(4*const.pi)**1
Y2 = afY2(Nc,Nd,nfc,nsc,nfd,nsd,nfj,nsj)*2*(4*const.pi)**2
Z2 = afZ2(Nc,Nd,nfc,nsc,nfd,nsd,nfj,nsj)*2*(4*const.pi)**2
# <--- Definition of the parameters and initial value
#####################################################
################
# read data --->
alpha11, alpha21, t1, alpha_SM1 = np.loadtxt(
'/home/dkahl/Documents/Masterarbeit/arbeiten/Masterarbeit/Python/data/alpha_running/alpha_running_3_2_6_0_0_1_0_9.txt',unpack = True)
#scale appropriately
t1 = t1[t1<100]
alpha11 = alpha11[:len(t1)]
alpha21 = alpha21[:len(t1)]
alpha_SM1 = alpha_SM1[:len(t1)]
# <--- read data
################
#############################
# RUNNING COUPLINGS 1 PLOT --->
fig2 = plt.figure()
ax21 = fig2.add_subplot(111)
#ax22 = fig2.add_subplot(212)
# coupling constants
#ax2.plot(t,alpha2, 'g:',label=r'$\alpha_2$')
#along separatrix
ax21.plot(t1,alpha11, color='0.',linestyle='-',label=r'$\alpha_\mathrm{s}$')
#ax21.plot(t1,0.1*alpha21, color='0.5',linestyle='-',label=r'$\alpha_2 \times 10^{-1}$')
ax21.plot(t1,alpha_SM1, 'k--',label=r'$\alpha_\mathrm{QCD}$')
#SM running
#labels etc
ax21.xaxis.grid(True)
ax21.set_xlabel(r'$t$')
ax21.set_ylabel(r'Kopplungsstärke')
#ax21.set_xticklabels([])
box = ax21.get_position()
ax21.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax21.legend(loc='center left',bbox_to_anchor=(1,0.5),frameon=False)
#safe
fig2.savefig('plots/alpha_running/Kopplungen1_afix4.pdf', bbox_inches='tight')
# <--- RUNNING COUPLINGS PLOT
#############################
#########################
# RELATIVE DEVIATION --->
def A1loop(t):
return(1./(1./amz - X1*t))
# plot
fig3 = plt.figure()
ax3 = fig3.add_subplot(111)
#ax3.plot(t_SM,alpha_SM,'r.',label=r'2-Loop')
#ax3.plot(t_SM,A1loop(t_SM),'k-',label=r'1-Loop')
ax3.plot(t1,(alpha11-alpha_SM1)/alpha_SM1,color='0.',linestyle='-',label=r'$\Delta \alpha$')
ax3.xaxis.grid(True)
ax3.set_xlabel(r'$t$')
ax3.legend(loc='center left',bbox_to_anchor=(1,0.5),frameon=False)
#safe
fig3.savefig('plots/alpha_running/relative_deviation_afix4.pdf', bbox_inches='tight')
# <--- RELATIVE DEVIATION
#########################
#########################
# get error at 1 TeV --->
import scipy as sci
t0 = np.log((10**18*10**9 )/(173.21*10**9))
print('t0 = '+str(t0))
arg = sci.argmin( (t1-t0)**2)
print(t1[arg],((alpha11-alpha_SM1)/alpha_SM1)[arg])
| true |
c2e652c3bfc5eee1edd8efd8eee920b281cbbfeb | Python | betoma/advent-2018-python | /04/advent-04.py | UTF-8 | 2,335 | 3.40625 | 3 | [] | no_license | from collections import defaultdict
from collections import Counter
def takeFirst(elem):
return elem[0]
list_of_actions = []
with open("input.txt") as f:
for line in f:
the_line = line.split("]", 1)
date = the_line[0][1:]
action = the_line[1].strip()
list_item = [date, action]
list_of_actions.append(list_item)
chrono_list = sorted(list_of_actions, key=takeFirst)
guard_total = Counter()
guard_minutes = {}
for item in chrono_list:
splaction = item[1].split()
if splaction[0] == "Guard":
current_guard = splaction[1]
if current_guard not in guard_minutes:
guard_minutes[current_guard] = Counter()
if splaction[0] == "falls":
sleep_start = item[0]
if splaction[0] == "wakes":
sleep_end = item[0]
start_minute = int(sleep_start.split(":", 1)[1])
end_minute = int(sleep_end.split(":", 1)[1])
sleep_length = end_minute - start_minute
guard_total[current_guard] += sleep_length
for value in range(0, sleep_length):
minute = start_minute + value
guard_minutes[current_guard][minute] += 1
sleepiest = guard_total.most_common(1)[0]
sleepy_guard = sleepiest[0]
sleepy_time = sleepiest[1]
sleepy_minute = guard_minutes[sleepy_guard].most_common(1)[0]
actual_minute = sleepy_minute[0]
minute_frequency = sleepy_minute[1]
answer_to_part_one = int(sleepy_guard[1:]) * int(actual_minute)
print(
"{} slept for {} minutes, and was mostly frequently asleep at 00:{} ({} times).".format(
sleepy_guard, sleepy_time, actual_minute, minute_frequency
)
)
print("Answer to part one is {}".format(answer_to_part_one))
# --- part two ---#
most_frequent_minutes = Counter()
for guard in guard_minutes:
if guard_minutes[guard]:
freq_min = guard_minutes[guard].most_common(1)[0]
most_frequent_minutes[(guard, freq_min[0])] = int(freq_min[1])
regularity = most_frequent_minutes.most_common(1)[0]
regular_sleeper = regularity[0][0]
regular_minute = regularity[0][1]
how_often = regularity[1]
answer_to_part_two = int(regular_sleeper[1:]) * int(regular_minute)
print(
"{} slept at 00:{} most often, {} times.".format(
regular_sleeper, regular_minute, how_often
)
)
print("Answer to part two is {}".format(answer_to_part_two))
| true |
b20bcf51582c74da76c445b445161f201ba7a1d8 | Python | udemirezen/xitorch | /xitorch/_impls/interpolate/extrap_utils.py | UTF-8 | 1,376 | 2.53125 | 3 | [
"MIT",
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | import torch
def get_extrap_pos(xqextrap, extrap, xmin=0.0, xmax=1.0):
# xqextrap: (nrq,)
xqnorm = (xqextrap - xmin) / (xmax - xmin)
if extrap == "periodic":
xqinside = xqnorm % 1.0
elif extrap == "mirror":
xqnorm = xqnorm.abs()
xqnorm_ceil = xqnorm.long() + 1
xqinside = (2 * (xqnorm_ceil // 2) - xqnorm) * (1 - (xqnorm_ceil % 2.0) * 2)
elif extrap == "bound":
xqinside = torch.clamp(xqnorm, 0.0, 1.0)
else:
raise RuntimeError("get_extrap_pos only work for periodic and mirror extrapolation")
return xqinside * (xmax - xmin) + xmin
def get_extrap_val(xqextrap, y, extrap):
# xqextrap: (nrq,)
# y: (*BY, nr)
shape = (*y.shape[:-1], xqextrap.shape[-1])
dtype = xqextrap.dtype
device = xqextrap.device
if extrap is None or extrap == "nan":
return torch.empty(shape, dtype=dtype, device=device) * float("nan")
elif isinstance(extrap, int) or isinstance(extrap, float) or \
(isinstance(extrap, torch.Tensor) and torch.numel(extrap) == 1):
return torch.zeros(shape, dtype=dtype, device=device) + extrap
elif hasattr(extrap, "__call__"):
return extrap(xqextrap).expand(*y.shape[:-1], -1) # (*BY, nrq)
else:
raise RuntimeError("Invalid extrap type (type: %s): %s" % (type(extrap), extrap))
| true |
2a65503cdadd4f82ca12afdf70660be04f6d64b6 | Python | dazzle111/python | /0122/pachong5.py | GB18030 | 611 | 3.109375 | 3 | [] | no_license | #ٶ
import string,urllib2
def baidu_tieba(url,begin_page,end_page):
for i in range(begin_page,end_page+1):
sName = string.zfill(i,5)+'.html'
print 'ص'+str(i)+'ҳ洢Ϊ'+sName+'...'
f= open(sName,'w+')
m = urllib2.urlopen(url+str(i)).read()
f.write(m)
f.close()
bdurl = str(raw_input(u'ɵĵַȥpn=\n'))
begin_page = int(raw_input(u'뿪ʼҳ'))
end_page = int(raw_input(u'صҳ'))
baidu_tieba(bdurl,begin_page,end_page)
| true |
cf8e71b71928995f4bdddf39f87e3e2bd51177ab | Python | popina1994/faculty-network | /Social Networks/language/language_converter.py | UTF-8 | 1,424 | 3.65625 | 4 | [] | no_license |
class CyrilicToLatin:
CYRILIC_ALPHABET = ['а', 'б', 'в', 'г', 'д', 'ђ', 'е', 'ж', 'з', 'и', 'ј', 'к',
'л', 'љ', 'м', 'н', 'њ', 'о', 'п', 'р', 'с', 'т', 'ћ', 'у',
'ф', 'х', 'ц', 'ч', 'џ', 'ш', 'А', 'Б', 'В', 'Г', 'Д', 'Ђ',
'Е', 'Ж', 'З', 'И', 'Ј', 'К', 'Л', 'Љ', 'М', 'Н', 'Њ', 'О',
'П', 'Р', 'С', 'Т', 'Ћ', 'У', 'Ф', 'Х', 'Ц', 'Ч', 'Џ', 'Ш']
LATIN_ALPHABET = ['a', 'b', 'v', 'g', 'd', 'đ', 'e', 'ž', 'z', 'i', 'j', 'k',
'l', 'lj', 'm', 'n', 'nj', 'o', 'p', 'r', 's', 't', 'ć', 'u',
'f', 'h', 'c', 'č', 'dž', 'š', 'A', 'B', 'V', 'G', 'D', 'Đ',
'E', 'Ž', 'Z', 'I', 'J', 'K', 'L', 'Lj', 'M', 'N', 'Nj', 'O',
'P', 'R', 'S', 'T', 'Ć', 'U', 'F', 'H', 'C', 'Č', 'Dž', 'Š']
CYRILIC_LATIN_ALPHABET = {}
for letter in CYRILIC_ALPHABET:
CYRILIC_LATIN_ALPHABET[letter] = LATIN_ALPHABET[CYRILIC_ALPHABET.index(letter)]
def convertCyrilicToLatin(string):
convStr = ""
if (string is None):
return convStr
if (type(string)) is not str:
return string
for letter in string:
latinLetter = CyrilicToLatin.CYRILIC_LATIN_ALPHABET.get(letter, letter)
convStr += latinLetter
return convStr | true |
195f2b590cce340da4b6e708ad20953ba63537ab | Python | Pangeamt/nectm | /tools/mosesdecoder-master/scripts/ems/support/defaultconfig.py | UTF-8 | 1,653 | 2.875 | 3 | [
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"LGPL-2.1-or-later",
"LicenseRef-scancode-other-copyleft",
"GPL-2.0-only",
"Apache-2.0"
] | permissive | #!/usr/bin/env python2
#
# This file is part of moses. Its use is licensed under the GNU Lesser General
# Public License version 2.1 or, at your option, any later version.
"""Version of ConfigParser which accepts default values."""
import ConfigParser
class Config:
"""Version of ConfigParser which accepts default values."""
def __init__(self, filename):
self.config = ConfigParser.SafeConfigParser()
cfh = open(filename)
self.config.readfp(cfh)
cfh.close()
def get(self, section, name, default=None):
if default is None or self.config.has_option(section, name):
return self.config.get(section, name)
else:
return default
def getint(self, section, name, default=None):
if default is None or self.config.has_option(section, name):
return self.config.getint(section, name)
else:
return default
def getboolean(self, section, name, default=None):
if default is None or self.config.has_option(section, name):
return self.config.getboolean(section, name)
else:
return default
def getfloat(self, section, name, default=None):
if default is None or self.config.has_option(section, name):
return self.config.getfloat(section, name)
else:
return default
def __str__(self):
ret = ""
for section in self.config.sections():
for option in self.config.options(section):
ret = ret + "%s:%s = %s\n" % (
section, option, self.config.get(section, option))
return ret
| true |
3d7f1cb6d5095a6cb3bf0dc2aee4b7df7f62c524 | Python | BigRLab/dict_db | /dict_db/dict_db.py | UTF-8 | 1,241 | 2.609375 | 3 | [
"MIT"
] | permissive | from redis_ds.redis_hash_dict import JSONRedisHashDict
from redis_ds.redis_list import JSONRedisList
from elastic_ds.doc_dict import ElasticDocDict
# DB Types
class Consts(object):
DB_REDIS = 'redis'
DB_ELASTIC = 'elastic'
DS_DICT = 'dict'
DS_LIST = 'list'
SER_JSON = 'json'
class DictDbFactory(object):
def __init__(self, db_type, default_ds_type=Consts.DS_DICT):
self._db_type = db_type
self._default_ds_type = default_ds_type
def create(self, path, name, ds_type=None):
if ds_type is None:
ds_type = self._default_ds_type
if self._db_type == Consts.DB_REDIS:
if isinstance(name, basestring) and len(name) > 0:
key = "%s_%s" % (path, name)
else:
key = path
if ds_type == Consts.DS_DICT:
return JSONRedisHashDict(key)
elif ds_type == Consts.DS_LIST:
return JSONRedisList(key)
elif self._db_type == Consts.DB_ELASTIC:
if ds_type == Consts.DS_DICT:
return ElasticDocDict(path, name)
elif ds_type == Consts.DS_LIST:
raise NotImplementedError("ElasticSearch list not available yet...")
| true |
93d3760e96df84591cc47f1b37e9f39549780bbf | Python | sailesh083/Website-Phishing | /svc.py | UTF-8 | 1,367 | 2.859375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 19 23:47:01 2019
@author: shashikant, sailesh, chirayu
"""
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 19 19:36:49 2019
@author: shashikant, sailesh, chirayu
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn import metrics
dataset=pd.read_csv('dataset_kaggle.csv')
dataset=dataset.drop(['index'],axis=1)
list=[0,1,6,10,11,12,13,14,15,16,18,21,23,25,27]
X=dataset.iloc[:,list]
y=dataset.loc[:,['Result']] #left side for row and right side for column
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20)
"""sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.fit_transform(X_test)
"""
from sklearn.svm import SVC
classifier=SVC(kernel='sigmoid',random_state=0)
classifier.fit(X_train,y_train)
pred_rfc=classifier.predict(X_test)
print("Accuracy:",metrics.accuracy_score(y_test, pred_rfc))
X_grid=np.arange(min(X), max(X),0.01)
X_grid=X_grid.reshape((len(X_grid),1))
plt.scatter(X, y, color='red')
plt.plot(X_grid, regressor.predict(X_grid), color='blue')
plt.title('Truth of bluff (SVM')
plt.xlabel('Position level')
plt.ylabel('salary')
plt.show()
| true |
ed44b098fcea892f2f3bb19abb121bbaf04c97c2 | Python | neophytecoder/SpeechRecognitionDataSelection | /recognition_comparison/add_to_datadir.py | UTF-8 | 1,341 | 2.71875 | 3 | [] | no_license | #!/usr/bin/python
# This script appends utterances dumped out from XML to a Kaldi datadir
import sys, re
basename=sys.argv[1]
outdir = sys.argv[2]
if len(sys.argv) > 3 and sys.argv[3]!="all" :
pmer_thresh=float(sys.argv[3])
else:
pmer_thresh = None
# open the output files in append mode
segments_file = open(outdir + '/segments', 'a')
utt2spk_file = open(outdir + '/utt2spk', 'a')
text_file = open(outdir + '/text', 'a')
for line in sys.stdin:
m = re.match(r'\w+speaker(\d+)\w+\s+(.*)', line)
if m:
spk = int(m.group(1))
t = m.group(2).split()
start = float(t[0])
end = float(t[1])
mer = float(t[2])
words = ' '.join(t[5:])
pmer = float(t[3])
awd = float(t[4])
segId = '%s_spk-%04d_seg-%07d:%07d' % (basename, spk, start*100, end*100)
spkId = '%s_spk-%04d' % (basename, spk)
# only add segments where the Matching Error Rate is below the prescribed threshhold
if (pmer_thresh == None or pmer <= pmer_thresh) and awd >= 0.165 and awd <= 0.66:
print >> segments_file, '%s %s %.2f %.2f' % (segId, basename, start, end )
print >> text_file, '%s %s' % (segId, words)
print >> utt2spk_file, '%s %s' % (segId, spkId)
# test
segments_file.close()
utt2spk_file.close()
text_file.close()
| true |
a21b9f8aee3066d3717da9039531cccefc47cbd2 | Python | hjia10/tigis_assignment3 | /web_visual_db/main.py | UTF-8 | 8,557 | 2.6875 | 3 | [] | no_license | #! /usr/bin/env python3
from jinja2 import Environment, FileSystemLoader
import cx_Oracle
import cgi
import cgitb
cgitb.enable(format='text')
class GraphicsArea:
def __init__(self, width, height, viewBox_x, viewBox_y, viewBox_width, viewBox_height):
self.width = f"{width}cm"
self.height = f"{height}cm"
self.viewBox_x = viewBox_x
self.viewBox_y = viewBox_y
self.viewBox_width = viewBox_width
self.viewBox_height = viewBox_height
self.viewBox_custom = f"{viewBox_x} {viewBox_y} {viewBox_width} {viewBox_height}"
class Field:
def __init__(self, field_id, lowx, lowy, hix, hiy, area, owner, crop_id):
# Parameters passed in during creation (ie fetched from database)
self.field_id = field_id
self.lowx = lowx
self.lowy = lowy
self.hix = hix
self.hiy = hiy
self.area = area
self.owner = owner
self.crop_id = crop_id
# Attributes calculated from object properties
self.width = hix - lowx
self.height = hiy - lowy
self.centroidx = (hix - lowx)/2 + lowx
self.centroidy = (hiy - lowy)/2 + lowy
# Default value for fill is 'none'. This property is dynamically added at runtime
self.fill = 'none'
def __repr__(self):
return f"Field({self.field_id}, {self.lowx}, {self.lowy}, {self.hix}, {self.hiy})"
def __str__(self):
return f"Field {self.field_id} - Bottom Left ({self.lowx}, {self.lowy}) Top Right ({self.hix}, {self.hiy})"
def show_info(self):
info=[]
info.append(f'Field ID : {str(self.field_id)}')
info.append(f'Lower X : {str(self.lowx)}')
info.append(f'Upper X : {str(self.hix)}')
info.append(f'Lower Y : {str(self.lowy)}')
info.append(f'Upper Y : {str(self.hiy)}')
return info
def draw_svg_rectangle(self):
svg_type = "rect"
fill = "red"
stroke = "black"
stroke_width = "0.5"
svg_string = f'<{svg_type} x="{str(self.lowx)}" y="{str(self.lowy)}" width="{str(self.width)}" height="{str(self.height)}" fill="{fill}" stroke="{stroke}" stroke-width="{stroke_width}"/>'
print(svg_string)
return svg_string
class Find:
def __init__(self, find_id, xcoord, ycoord, find_type, depth, field_notes):
self.find_id = find_id
self.xcoord = xcoord
self.ycoord = ycoord
self.find_type = find_type
self.depth = depth
self.field_notes = field_notes
self.class_name = 'none'
self.fill = get_find_colour(self.find_type)
def __repr__(self):
return f"Find({self.find_id}, {self.xcoord}, {self.ycoord})"
def __str__(self):
return f"Find {self.find_id} - Coordinates : ({self.xcoord}, {self.ycoord})"
def show_info(self):
print('Find ID : ' + str(self.find_id))
print(f'co-ordinates : ({str(self.xcoord)},{str(self.ycoord)})')
def draw_svg_circle(self):
svg_type = "circle"
fill = '"green"'
radius = "1"
svg_string = '<' + svg_type + ' cx = ' + '"' + str(self.xcoord) + '"' + ' cy=' + '"' + str(self.ycoord) + '"' + ' r=' + '"' + str(radius) + '"' + ' fill=' + fill + '/>'
print(svg_string)
return svg_string
class MyClass:
def __init__(self, class_type, name, period, use):
self.class_type = class_type
self.name = name
self.period = period
self.use = use
self.fill = 'none'
def __repr__(self):
return f"Class({self.class_type}, {self.name}, {self.period}, {self.use})"
def __str__(self):
return f"Class # {self.class_type} - {self.name}, Period : {self.period}, Use: {self.use})"
class Crop:
def __init__(self, crop, name, startseason, endseason):
self.crop = crop
self.name = name
self.startseason = startseason
self.endseason = endseason
self.fill = 'none'
def __repr__(self):
return f"Crop({self.crop}, {self.name}, {self.startseason}, {self.endseason})"
def __str__(self):
return f"Crop # {self.crop} - {self.name}, Start of Season: {self.startseason}, End of Season: {self.endseason})"
def get_field_colour(field_crop):
if field_crop == 'TURNIPS':
return '#A647FF' # purple
elif field_crop == 'OIL SEED RAPE':
return '#F3FC30' # pale yellow
elif field_crop == 'STRAWBERRIES':
return '#FD5959' # orangey red
elif field_crop == 'PEAS':
return '#91F708' # light green
elif field_crop == 'POTATOES':
return '#F9C89A' # lightish orange
else:
return 'none'
def get_find_colour(find_class):
if find_class == 1:
return '#9AA8F9' # light blue
elif find_class == 2:
return '#C8C8C8' # light grey
elif find_class == 3:
return '#ABC349' # flinty green
elif find_class == 4:
return '#D1BB00' # mustard colour
else:
return 'none'
def get_crop_name(crops, crop_id):
for crop in crops:
if crop.crop == crop_id:
return crop.name
else:
continue
def get_class_name(my_class, find_type):
for cls in my_class:
if cls.class_type == find_type:
return cls.name
else:
continue
def print_svg(width, height, viewbox):
return f'<svg width="{width}" height="{height}" viewBox="{viewbox}">'
def getDBdata(table_name, order_column):
results = []
conn = cx_Oracle.connect("student/train@geoslearn")
c = conn.cursor()
c.execute(f"SELECT * FROM {table_name} ORDER BY {order_column}")
if table_name == "MY_FIELDS":
fields_list = []
for row in c:
(a, b, c, d, e, f, g, h) = row
field_name = table_name[:-1] + str(a)
# print('Field Name is : ' + field_name)
field_name = Field(a, b, c, d, e, f, g, h)
fields_list.append(field_name)
results = fields_list
elif table_name == "MY_FINDS":
finds_list = []
for row in c:
(a, b, c, d, e, f) = row
find_name = table_name[:-1] + str(a)
# print('Field Name is : ' + field_name)
find_name = Find(a, b, c, d, e, f)
finds_list.append(find_name)
results = finds_list
elif table_name == "MY_CLASS":
classes_list = []
for row in c:
(a, b, c, d) = row
my_class = MyClass(a, b, c, d)
classes_list.append(my_class)
results = classes_list
elif table_name == "MY_CROPS":
crops_list = []
for row in c:
(a, b, c, d) = row
my_crop = Crop(a, b, c, d)
crops_list.append(my_crop)
results = crops_list
else:
print("Table Name not supported...")
conn.close()
return results
def assign_field_colours(fields, crops):
for field in fields:
for crop in crops:
if field.crop_id == crop.crop:
field.fill = get_field_colour(crop.name)
crop.fill = field.fill
else:
continue
def assign_find_colours(finds, classes):
for find in finds:
for cls in classes:
if find.find_type == cls.class_type:
find.fill = get_find_colour(cls.class_type)
cls.fill = find.fill
else:
continue
def assign_crop_names(fields, crops):
for field in fields:
field.crop_name = get_crop_name(crops, field.crop_id)
def assign_class_names(finds, classes):
for find in finds:
find.class_name = get_class_name(classes, find.find_type)
def render_html():
env = Environment(loader=FileSystemLoader('.'))
temp = env.get_template('index.html')
print(temp.render(fields=field_objects, finds=find_objects, classes=my_classes, crops=my_crops, g=graphics_area_for_svg))
my_classes = getDBdata("MY_CLASS", "TYPE")
my_crops = getDBdata("MY_CROPS", "CROP")
field_objects = getDBdata("MY_FIELDS", "FIELD_ID")
find_objects = getDBdata("MY_FINDS", "FIND_ID")
assign_field_colours(field_objects, my_crops)
assign_find_colours(find_objects, my_classes)
assign_crop_names(field_objects, my_crops)
assign_class_names(find_objects, my_classes)
graphics_area_for_svg = GraphicsArea(15, 15, -1, 1, 16, 18)
print(my_classes)
print(my_crops)
print(field_objects)
print(find_objects)
print(graphics_area_for_svg)
if __name__ == '__main__':
render_html() | true |
3d955fc513666c6f39c4c0d22a395300352c4bcd | Python | alapha23/GutInstinct | /verify/linear_actuator.py | UTF-8 | 667 | 2.5625 | 3 | [
"BSD-2-Clause"
] | permissive | import RPi.GPIO as GPIO
import time
PIN_1 = 15
PIN_2 = 23
try:
GPIO.setmode(GPIO.BCM)
GPIO.setup(PIN_1, GPIO.OUT)
GPIO.setup(PIN_2, GPIO.OUT)
if True:
GPIO.output(PIN_1, GPIO.LOW)
GPIO.output(PIN_2, GPIO.HIGH)
#shrink when M+ is lower than M-
time.sleep(3)
GPIO.output(PIN_2, GPIO.LOW)
GPIO.output(PIN_1, GPIO.HIGH)
time.sleep(8)
GPIO.cleanup()
except KeyboardInterrupt:
GPIO.cleanup()
| true |
1e2339d225a14cda2f45aecbfe1998fb5ea1fdac | Python | lili-n-f/Nobrega-Liliana-2021-2 | /RandomNumber.py | UTF-8 | 4,530 | 3.859375 | 4 | [] | no_license | from Game import Game
import random
class RandomNumber(Game):
def __init__(self, requirement, name, award, rules, questions):
super().__init__(requirement, name, award, rules, questions)
def ask_for_clues(self, player, range_upper_limit, number, guess):
"""Método para dar al usuario la opción de obtener una pista en un juego (si es que todavía le quedan pistas
al jugador y aún hay pistas que mostrarle). En este caso, te dice si el número adivinado está muy/un poco por arriba/por abajo del número real.
Args:
player (Player): instancia de la clase Player asociado al jugador que juega.
range_upper_limit (int): límite superior del rango de números entre los cuales se escogió un número aleatorio.
number (int): número que el usuario debe adivinar.
guess (int): número adivinado por el usuario.
"""
if player.get_clues() > 0 and self.clues_index < len(self.current_clues):
if input("\n¿Quieres una pista? [S] = sí, cualquier otro caracter = no: ").lower() == 's':
#se considera que un número está muy por encima/debajo cuando la diferencia entre lo adivinado y el número real es más grande que el resultado del límite superior del rango divido enteramente entre dos. se considera que está un poco por encima o debajo si la diferencia es menor a lo antes dicho.
if guess > number:
if guess-number > range_upper_limit//2:
print("Lo que dijiste está muy por encima.")
else:
print("Lo que dijiste está un poco por encima.")
else:
if number-guess > range_upper_limit//2:
print("Lo que dijiste está muy por debajo.")
else:
print("Lo que dijiste está un poco por debajo.")
self.clues_index += 1
player.use_clue()
def game_begins(self, player):
self.choose_random_question()
self.define_current_clues()
failed_tries = 0
win = False
print(f"\n{self.current_question['question']}\n")
number_range = self.current_question['question'][self.current_question['question'].index("entre")+6:].split("-") #esto toma el rango que retorna la api como un string, considerando que el rango se presenta cuando se dice '...entre [número inicial]-[número final]'. por tanto, desde el índice en el que empieza la palabra entre, sumada más 6 (para que el índice inicial sea después de esta palabra y el espacio después de la misma) hasta el final está el rango buscado. luego, se usa punto split con el caracter '-' ya que es este el que separa los números. se obtiene así una lista con 2 números: el primero siendo el inicio del rango y el segundo el final del rango.
number = random.randint(int(number_range[0]), int(number_range[1])) #aquí se escoge un número aleatorio entre los números del rango dado en la api
while player.get_lives() > 0:
try:
guess = int(input("Ingresa el número: "))
if guess == number:
print("✔️¡Número adivinado!✔️")
win = True
break
else:
print("❌Número incorrecto.❌")
failed_tries += 1
self.ask_for_clues(player, int(number_range[1]), number, guess)
except ValueError: #si el usuario ingresa un valor que no puede ser convertido a int
failed_tries += 1
print("❌Ingreso inválido.❌")
if failed_tries == 3: #según las reglas del juego, por cada tres intentos fallidos, se pierden 0.25 vidas.
player.lose_lives(0.25)
failed_tries = 0 #se reinicia el valor de los intentos fallidos (luego, cuando vuelva a equivocarse tres veces, perderá nuevamente las 0.25 vidas y se reiniciará de nuevo el conteo de intentos fallidos)
self.win_or_lose(player, win)
def win_or_lose(self, player, win):
if win:
print("\n🙌¡GANASTE EL RETO!🙌")
print(f"Premio: 🏆 {self.award} 🏆")
player.add_item_to_inventory(self.award.lower().replace("í", "i"))
self.won = True
else:
print("\n❌PERDISTE EL RETO❌")
| true |
d56a8b77870cbe576f02d254ff228a0e752d8ebc | Python | weatherhead99/ibroute | /ibroute/geolayer.py | UTF-8 | 1,631 | 2.75 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 14 21:17:16 2019
@author: danw
"""
from waypointdata import waypoint_in
from abc import abstractmethod
from typing import Tuple
from collections import namedtuple
from geopy.geocoders import Nominatim
from geopy.distance import geodesic
waypoint_geocoded = namedtuple("waypoint_geocoded",["id", "latlong",
"category", "state"])
LatLongT = Tuple[float,float]
class GeoLayer:
@abstractmethod
def get_lattitude_longitude(self, placename: str) -> LatLongT:
pass
@abstractmethod
def get_distance_miles(self, ll1: LatLongT, ll2: LatLongT) -> float:
pass
def get_geocoded_waypoint(self, wpin: waypoint_in) -> waypoint_geocoded:
latlong = self.get_lattitude_longitude("%s, %s, USA" %
(wpin.name,wpin.state.abbr))
wpout = waypoint_geocoded(id=wpin.id, latlong=latlong,
category=wpin.category, state=wpin.state)
return wpout
class GeopyNominatimLayer(GeoLayer):
def __init__(self):
self._api = Nominatim(user_agent="ibroute/0.1")
def get_lattitude_longitude(self, placename: str) -> LatLongT:
location = self._api.geocode(placename)
if location is None:
raise ValueError("couldn't geocode place: %s" % placename)
return (location.latitude, location.longitude)
def get_distance_miles(self, ll1: LatLongT, ll2: LatLongT) -> float:
dist = geodesic(ll1,ll2)
return dist.miles | true |
eac2b0524feca1063fecd19f5cfb4d30661c3356 | Python | Targetcatcherinc/mbti-net | /src/load_data.py | UTF-8 | 1,614 | 3.328125 | 3 | [] | no_license | # Author: Anthony Ma
# Date: 03/04/17
# load_data.py
import os
import sys
import numpy as np
USAGE_STR = """
# Usage
# python load_data.py <DATA_FILE> <PERCENTAGE_TRAIN>
# Arguments
# <DATA_FILE> Absolute path to shuffled data file with each row being a data point
# <PERCENTAGE_TRAIN> Percentage of rows to use for training. One minus this percentage
# for testing
# Example
python load_data.py /afs/ir.stanford.edu/users/a/k/akma327/cs224n/project/mbti-net/data/mbti_shuffled_data.txt 0.7
"""
K_MIN_ARG = 3
mbti_index = {"ISTJ" : 0, "ISFJ" :1, "INFJ" :2, "INTJ" :3, "ISTP" :4, "ISFP" : 5, "INFP":6, "INTP":7, "ESTP": 8, "ESFP":9, "ENFP":10, "ENTP":11, "ESTJ":12, "ESFJ":13, "ENFJ":14, "ENTJ":15}
def load_data(DATA_FILE, PERCENTAGE_TRAIN=0.7):
f = open(DATA_FILE, 'r')
all_data = []
for line in f:
linfo = line.strip().split("\t")
mbti, sentence_str = linfo[0], linfo[3]
mbti_one_hot = np.array([0.0]*16)
mbti_one_hot[mbti_index[mbti]] = 1
sentence_str = sentence_str.replace(".", " ").lower()
words = [w for w in sentence_str.split(" ") if w != ""]
all_data.append((words, mbti_one_hot))
num_data_points = len(all_data)
num_train_points = int(PERCENTAGE_TRAIN*num_data_points)
train_data = all_data[:num_train_points]
test_data = all_data[num_train_points:]
return train_data, test_data
if __name__ == "__main__":
if(len(sys.argv) < K_MIN_ARG):
print(USAGE_STR)
exit(1)
DATA_FILE, PERCENTAGE_TRAIN = (sys.argv[1], float(sys.argv[2]))
train_data, test_data = load_data(DATA_FILE, PERCENTAGE_TRAIN)
print(train_data[0:50], len(train_data), len(test_data))
| true |
495ee3c918fb056ff21c5412c7fca9e02c7ddceb | Python | hariketsheth/PyGrams | /col_named_tup.py | UTF-8 | 1,444 | 4.0625 | 4 | [] | no_license | from __future__ import print_function
from collections import namedtuple
'''Collections-NamedTuple (100 Marks)
Named tuples assign meaning to each position in a tuple and allow for more readable, self-documenting code. They can be used wherever regular tuples are used, and they add the ability to access fields by name instead of position index.
You are given a data containing test results of class. The dataset consists of two columns namely : 'marks' and 'name'. These two Columns can be in any order i.e. ('name' followed by 'marks' or vice versa).
You have to find the average marks for the whole class.
Input Format
First line will contain an Integer N, denoting the number of students.
Next line will contain two string denoting the column heading.
Next N lines will contain marks and name of the students respective of column headings.
Constraints
1 <= N <= 10^3
0 <= Marks <= 100
Output Format
Output the average marks of the class rounded off to two decimal places.
Sample TestCase 1
Input
3
marks names
10 arpit
20 anushka
35 rakshita
Output
21.67
'''
def main():
limit = input()
columns = raw_input().split()
stud_marks = namedtuple('Student', [columns[0],columns[1]])
marks_sum = 0
for _ in range(limit):
temp = raw_input().split()
s_record = stud_marks(marks = int(temp[0]), names = temp[1])
marks_sum += s_record.marks
print("%.2f" % float(float(marks_sum)/3), end='')
main() | true |
f3bb99747463a89c814c7873232a676fd7aadfd5 | Python | Tahmeed156/Numerical-Methods-Sessional | /1-root-finding/1.py | UTF-8 | 2,407 | 3.671875 | 4 | [] | no_license | import numpy as np
from matplotlib import pyplot as plt
def ln(z, n=5):
x = z-1
"""
Returns the values of ln(1+x),
iterated over n times
"""
prev = x
sum = prev
# Assigned first value, iterating 2 to n
for i in range(2, n+1):
term = prev * x * ((i-1)/i)
if i % 2 == 0:
sum -= term
else:
sum += term
prev = term
return sum
def ln_error(z, n=5):
x = z-1
"""
Returns the values of ln(1+x),
iterated over n times
"""
prev = x
sum = prev
error = []
# Assigned first value, iterating 2 to n
for i in range(2, n+1):
prev_sum = sum
term = prev * x * ((i-1)/i)
if i % 2 == 0:
sum -= term
else:
sum += term
prev = term
error.append(abs(sum-prev_sum)/sum)
return error
if __name__ == '__main__':
f1 = plt.figure()
plt.grid(True, axis='both', color='linen', linestyle='-', linewidth=2)
plt.axhline(y=0, color='lightgrey', linestyle='-')
plt.axvline(x=0, color='lightgrey', linestyle='-')
plt.ylabel('ln(1+x)')
plt.xlabel('x')
f2 = plt.figure()
plt.grid(True, axis='both', color='linen', linestyle='-', linewidth=2)
plt.axhline(y=0, color='lightgrey', linestyle='-')
plt.axvline(x=0, color='lightgrey', linestyle='-')
ax1 = f1.add_subplot(111)
ax2 = f2.add_subplot(111)
# a.
temp = input("Enter the value of x: ")
x = 1 + float(temp)
temp = input("Enter the value of n: ")
n = int(temp)
print("The value of ln(1+x): " + str(ln(x, n)))
# b.
dom = np.arange(-0.9, 1, 0.1)
func = [np.log(i+1) for i in dom]
ax1.plot(dom, func, color='C8', label='numpy log()')
ax1.legend(loc='best')
f1.savefig('1b.png', format='png', dpi=700)
# c.
n_arr = [1, 3, 4, 20, 50]
for j in range(0, 5):
my_func = [ln(i+1, n_arr[j]) for i in dom]
lab = str(n_arr[j]) + ' terms'
col = 'C' + str(j)
ax1.plot(dom, my_func, color=col, label=lab)
ax1.legend(loc='best')
f1.savefig('1c.png', format='png', dpi=700)
# d.
plt.ylabel('Relative error')
plt.xlabel('number of terms (2-50)')
dom = np.arange(2, 51, 1)
error_func = ln_error(1.5, 50)
ax2.plot(dom, error_func, color="C2")
f2.savefig('1d.png', format='png', dpi=700)
plt.show()
| true |
bc15adc8aa7d761d4b68b537e6b62ec4c8473fb5 | Python | PFZ86/LeetcodePractice | /Array/0275_HIndexII_M.py | UTF-8 | 754 | 3.6875 | 4 | [] | no_license | # https://leetcode.com/problems/h-index-ii/
# Solution 1: binary search
# find the largest h s.t. citations[N-h] >= h
# --> find the smallest i s.t. citations[i] >= N-i
class Solution(object):
def hIndex(self, citations):
"""
:type citations: List[int]
:rtype: int
"""
N = len(citations)
low, upp = 0, N - 1
res = 0
while low <= upp:
mid = low + (upp - low)/2
if citations[mid] >= N - mid:
# we need to find the smallest possible i,
# so don't return and keep searching
res = N - mid
upp = mid - 1
else:
low = mid + 1
return res
| true |
b447a56e5f431033bdb2c3a12ff21d9a9caca40b | Python | Pellizzon/ItalianProgrammingLanguage | /components/symbolTable.py | UTF-8 | 683 | 3.4375 | 3 | [] | no_license | class SymbolTable:
def __init__(self):
self.symbols = {}
def set(self, key, val):
if key in self.symbols:
self.symbols[key] = val
else:
raise ValueError(f"Cannot set value of undeclared variable '{key}'.")
def get(self, key):
if key in self.symbols:
return self.symbols[key]
else:
raise ValueError(f"Tried to access inexistent variable '{key}'")
def declare(self, key, val):
self.symbols[key] = val
def contains(self, key):
if key in self.symbols:
return True
return False
def dropReturn(self):
del self.symbols["return"]
| true |
31f50382ba3836f3e438753c21e30a01839f381e | Python | hrizantema-st/Programming_101solutions | /week3/3-Panda-Social-Network/panda_testing.py | UTF-8 | 5,180 | 2.828125 | 3 | [] | no_license | import unittest
from panda import Panda
from panda import PandaSocialNetwork
from panda import PandaAlreadyThere
from panda import PandasAlreadyFriends
class TestPandaClass(unittest.TestCase):
def setUp(self):
self.test_obj = Panda("Ivo", "ivo@pandamail.com", "male")
self.test_obj2 = Panda("Rado", "rado@pandamail.com", "male")
self.test_obj3 = Panda("Ivo", "ivo@pandamail.com", "male")
def test_init(self):
self.assertTrue(isinstance(self.test_obj, Panda))
def test_wrong_init(self):
with self.assertRaises(ValueError):
wrong_answ = Panda("mitio", "mitko.com", "male")
def test_str_method(self):
self.assertEqual(str(self.test_obj), "Ivo")
def test_eq_method(self):
self.assertFalse(self.test_obj == self.test_obj2)
self.assertEqual(self.test_obj, self.test_obj3)
def test_get_name(self):
self.assertEqual(self.test_obj.get_name(), "Ivo")
def test_get_email(self):
self.assertEqual(self.test_obj.get_email(), "ivo@pandamail.com")
def test_is_Female(self):
self.assertFalse(self.test_obj.isFemale())
def test_is_Male(self):
self.assertTrue(self.test_obj.isMale())
def test_gender(self):
self.assertEqual(self.test_obj.get_gender(), "male")
class TestingPandaSocialNetword(unittest.TestCase):
def setUp(self):
self.pandio = Panda("Ivo", "ivo@pandamail.com", "male")
self.pandio2 = Panda("Rado", "rado@pandamail.com", "male")
self.pandio3 = Panda("Azi", "rado@pandamail.com", "female")
self.pandiofriendless = Panda("tozi", "rado@pandamail.com", "female")
self.pp = Panda("Azidazi", "rado@pandamail.com", "female")
self.pandichki = PandaSocialNetwork()
def test_add_panda_function(self):
self.pandichki.add_panda(self.pandio)
self.assertTrue(self.pandio in self.pandichki._pandas)
with self.assertRaises(PandaAlreadyThere):
self.pandichki.add_panda(self.pandio)
def test_has_panda_func(self):
self.pandichki.add_panda(self.pandio)
self.assertTrue(self.pandichki.has_panda(self.pandio))
self.assertFalse(self.pandichki.has_panda(self.pandio2))
def test_make_friends(self):
self.pandichki.make_friends(self.pandio, self.pandio2)
self.assertTrue(self.pandio in self.pandichki._pandas[self.pandio2])
with self.assertRaises(PandasAlreadyFriends):
self.pandichki.make_friends(self.pandio2, self.pandio)
def test_are_friends(self):
self.pandichki.make_friends(self.pandio, self.pandio2)
self.assertTrue(self.pandichki.are_friends(self.pandio, self.pandio2))
self.assertFalse(self.pandichki.are_friends(self.pandio, self.pandio3))
def test_of_friendship(self):
self.pandichki.make_friends(self.pandio, self.pandio2)
self.assertEqual([self.pandio], self.pandichki.friends_of(self.pandio2))
self.pandichki.make_friends(self.pandio2, self.pandio3)
self.assertEqual([self.pandio, self.pandio3], self.pandichki.friends_of(self.pandio2))
self.assertFalse(self.pandichki.friends_of(self.pandiofriendless))
def test_connection_level(self):
self.pandichki.make_friends(self.pandio, self.pandio2)
self.pandichki.add_panda(self.pp)
self.assertEqual(1, self.pandichki.connection_level(self.pandio, self.pandio2))
self.pandichki.make_friends(self.pandio2, self.pandio3)
self.assertEqual(2, self.pandichki.connection_level(self.pandio, self.pandio3))
self.assertFalse(self.pandichki.connection_level(self.pandio, self.pandiofriendless))
self.assertEqual(-1, self.pandichki.connection_level(self.pandio, self.pp))
def test_are_connecter(self):
self.pandichki.make_friends(self.pandio, self.pandio2)
self.assertTrue(self.pandichki.are_connected(self.pandio, self.pandio2))
self.pandichki.add_panda(self.pp)
self.assertFalse(self.pandichki.are_connected(self.pandio, self.pp))
def test_connection_with_cycle(self):
pandas = [Panda("Ivo{}".format(i), "ivo@pandamail.com", "male") for i in range(5)]
network = PandaSocialNetwork()
network.make_friends(pandas[0], pandas[1])
network.make_friends(pandas[0], pandas[2])
network.make_friends(pandas[1], pandas[3])
network.make_friends(pandas[3], pandas[4])
network.make_friends(pandas[4], pandas[2])
self.assertTrue(network.connection_level(pandas[0], pandas[4]), 2)
def test_how_many_gender_in_network(self):
network = PandaSocialNetwork()
ivo = Panda("Ivo", "ivo@pandamail.com", "male")
rado = Panda("Rado", "rado@pandamail.com", "male")
tony = Panda("Tony", "tony@pandamail.com", "female")
for panda in [ivo, rado, tony]:
network.add_panda(panda)
network.make_friends(ivo, rado)
network.make_friends(rado, tony)
self.assertTrue(network.how_many_gender_in_network(1, rado, "female") == 1)
self.assertTrue(network.connection_level(ivo, rado) == 1)
self.assertTrue(network.connection_level(ivo, tony) == 2)
def test_how_many_gender_in_network_again(self):
pass
if __name__ == '__main__':
unittest.main()
| true |
d27666490d6238f7e2ea22246e084ceb19469f68 | Python | changediyasunny/Challenges | /leetcode_2018/248_strobogrammatic_numer_III.py | UTF-8 | 2,066 | 4.15625 | 4 | [] | no_license | """
248. Strobogrammatic Number III
A strobogrammatic number is a number that looks the same when rotated 180 degrees
(looked at upside down). Write a function to count the total strobogrammatic numbers
that exist in the range of low <= num <= high.
Example:
Input: low = "50", high = "100"
Output: 3
Explanation: 69, 88, and 96 are three strobogrammatic numbers.
"""
class Solution(object):
def strobogrammaticInRange(self, low, high):
"""
:type low: str
:type high: str
:rtype: int
"""
stack = ["", "0", "1", "8"]
low = int(low)
maxlen = len(high)
high = int(high)
cnt = 0
while stack:
# pop(0) in this case leads to TLE.
# stack grows exponentially because we append invalids
# and remove required
char = stack.pop()
if char and char[0] != "0" and low <= int(char) <= high:
cnt += 1
if len(char) <= maxlen-2:
for a, b in '00 11 69 88 96'.split():
stack.append(a+char+b)
return cnt if low != 0 else cnt + 1
### Using method to find such number of length
class Solution(object):
def strobogrammaticInRange(self, low, high):
"""
:type low: str
:type high: str
:rtype: int
"""
temp_list = []
for i in range(1, len(high)+1):
# 50 & 100
# this will go from length of 1, 2, 3
temp_list += self.number_of_length(i)
cnt = 0
for n in set(temp_list):
if int(n) >= int(low) and int(n) <= int(high):
cnt += 1
return cnt
def number_of_length(self, n):
nums = n % 2 * list('018') or ['']
result = []
while n > 1:
n = n - 2
temp_list = '00 11 69 88 96'.split()[n<2:]
for a, b in temp_list:
for nb in nums:
result.append(a+nb+b)
nums = result
result = []
return nums
| true |
bbcb62ce77052d484f84e14183f48de43cad36ab | Python | laqinfan/Latent-Semantic-Analysis-for-news-articles-clustering | /project-final/document-clustering.py | UTF-8 | 7,578 | 2.609375 | 3 | [] | no_license | import numpy as np
import pandas, os, sys, umap
from PIL import Image
import matplotlib.pyplot as plt
import seaborn as sb
from sklearn.datasets import fetch_20newsgroups
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from scipy.cluster.hierarchy import ward, dendrogram
from sklearn.metrics.pairwise import cosine_similarity
from gensim import corpora
from gensim.models.ldamodel import LdaModel
from gensim.models.coherencemodel import CoherenceModel
from gensim.models import LsiModel
from sklearn.cluster import AgglomerativeClustering
from sklearn.utils.extmath import randomized_svd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import LatentDirichletAllocation
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
'sci.electronics',
'sci.med',
'talk.religion.misc',
]
#fetch 20newsgroups data
data = fetch_20newsgroups(categories=categories,shuffle = True, random_state = 1, remove = ('headers', 'footers', 'quoters'))
doc = data.data
names = data.target_names
print(names)
######################## Preprocessing the data ########################
##Remove the punctuations, numbers, and special characters
news_df = pandas.DataFrame({'document':doc})
news_df['clean_doc'] = news_df['document'].str.replace("[^a-zA-Z#]", " ")
#remove short words which are not meaningful in the contexts
news_df['clean_doc'] = news_df['clean_doc'].apply(lambda x: ' '.join([w for w in x.split() if len(w)>3]))
# make all text lowercase
news_df['clean_doc'] = news_df['clean_doc'].apply(lambda x: x.lower())
##Tokenize the texts
##Remove stop words
stop_words = stopwords.words('english')
# tokenization
tokenized_doc = news_df['clean_doc'].apply(lambda x: x.split())
# remove stop-words
tokenized_doc = tokenized_doc.apply(lambda x: [item for item in x if item not in stop_words])
# de-tokenization
detokenized_doc = []
for i in range(len(news_df)):
t = ' '.join(tokenized_doc[i])
detokenized_doc.append(t)
news_df['clean_doc'] = detokenized_doc
### Document Term Matrix---tfidf
vector = TfidfVectorizer(stop_words='english', max_features= 1000, max_df = 0.5, smooth_idf=True)
X_matrix = vector.fit_transform(news_df['clean_doc'])
print(X_matrix.shape)
### reduce dimentionality using SVD
n_component = 100
svd_model = TruncatedSVD(n_components=n_component)
U, Sigma, VT = randomized_svd(X_matrix, n_components=n_component, n_iter=100, random_state=122)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd_model, normalizer)
X = lsa.fit_transform(X_matrix)
if sys.argv[1] == 'kmeans-no-lsa':
###compute the silhouette score of different number of clusters
range_clusters = [2,3,4,5,6,7,8,9,10]
silhouette_scores = []
for n in range_clusters:
cluster = KMeans(n_clusters = n, random_state=10)
cluster_label = cluster.fit_predict(X_matrix)
silhouette = silhouette_score(X_matrix, cluster_label)
print("Cluser Number:", n, " silhouette_score:", round(silhouette,4))
silhouette_scores.append(round(silhouette, 4))
###select the clsuter number of highest silhousette score
###KMeans to cluser the articles
var = input("Please input the number of clusters: ")
km = KMeans(n_clusters = int(var), random_state=10)
km.fit(X_matrix)
centers = km.cluster_centers_.argsort()[:, ::-1]
###get each word, which is actually a topic
topics = vector.get_feature_names()
for i in range(int(var)):
print("Cluster %d:" % i, end='')
for ind in centers[i, :8]:
print(' %s' % topics[ind], end='')
print()
centers = km.cluster_centers_.argsort()[:, ::-1]
clusters = km.labels_.tolist()
embedding = umap.UMAP(n_neighbors=100, min_dist=0.5, random_state=12).fit_transform(X_matrix)
plt.figure(figsize=(7,5))
plt.scatter(embedding[:, 0], embedding[:, 1],
c = clusters,
s = 10, # size
edgecolor='none'
)
# plt.show()
plt.savefig('km-no-lsa.png', dpi=200) #save figure as hr.png
image = Image.open('km-no-lsa.png')
image.show()
elif sys.argv[1] == 'kmeans-lsa':
###compute the silhouette score of different number of clusters
range_clusters = [2,3,4,5,6,7,8,9,10]
silhouette_scores = []
for n in range_clusters:
cluster = KMeans(n_clusters = n, random_state=10)
cluster_label = cluster.fit_predict(X)
silhouette = silhouette_score(X, cluster_label)
print("Cluser Number:", n, " silhouette_score:", round(silhouette,4))
silhouette_scores.append(round(silhouette, 4))
###select the clsuter number of highest silhousette score
###KMeans to cluser the articles
var = input("Please input the number of clusters: ")
km = KMeans(n_clusters = int(var), random_state=10)
km.fit(X)
centroids = svd_model.inverse_transform(km.cluster_centers_)
centers = centroids.argsort()[:, ::-1]
###get each word, which is actually a topic
topics = vector.get_feature_names()
for i in range(int(var)):
print("Cluster %d:" % i, end='')
for ind in centers[i, :8]:
print(' %s' % topics[ind], end='')
print()
clusters = km.labels_.tolist()
topics = U*Sigma
embedding = umap.UMAP(n_neighbors=100, min_dist=0.5, random_state=12).fit_transform(topics)
plt.figure(figsize=(7,5))
plt.scatter(embedding[:, 0], embedding[:, 1],
c = clusters,
s = 10, # size
edgecolor='none'
)
# plt.show()
plt.savefig('lsa.png', dpi=200) #save figure as hr.png
image = Image.open('lsa.png')
image.show()
elif sys.argv[1] == 'Hierarchical' or sys.argv[1] == 'hr':
#### Hierarchical clustering
distance = 1 - cosine_similarity(X_matrix)
linkage_matrix = ward(distance)
fig, ax = plt.subplots(figsize=(15, 20)) # set size
topics = vector.get_feature_names()
ax = dendrogram(linkage_matrix);
plt.tight_layout()
plt.savefig('hr.png', dpi=200) #save figure as hr.png
image = Image.open('hr.png')
image.show()
elif sys.argv[1] == 'lda':
#### Topic modeling for clustering
dictionary = corpora.Dictionary(tokenized_doc)
doc_term_matrix = [dictionary.doc2bow(doc) for doc in tokenized_doc]
number_of_topics = [2,3,4,5,6,7,8,9,10]
for n in number_of_topics:
ldamodel = LdaModel(doc_term_matrix, num_topics=n, id2word = dictionary)
m = CoherenceModel(model=ldamodel, texts=tokenized_doc, coherence='c_v')
covalue = m.get_coherence()
print("Cluster Number:", n, " Coherence Value:", round(covalue,4))
# generate LDA model
var = input("Please input the number of clusters: ")
vector_count = CountVectorizer()
X_transform = vector_count.fit_transform(news_df['clean_doc'])
terms = vector_count.get_feature_names()
lda = LatentDirichletAllocation(
n_components=int(var), max_iter=5,
learning_method='online', random_state=0)
lda.fit(X_transform)
for index, topic in enumerate(lda.components_):
terms_topic = zip(terms, topic)
sorted_terms = sorted(terms_topic, key= lambda x:x[1], reverse=True)[:8]
print("Cluster "+str(index)+": ", end='')
for t in sorted_terms:
print(t[0], " ", end='')
print()
| true |
3eee2c10acce88554e3e069dd8485de7f732f2ba | Python | mdmims/AzureIngester | /azure_ingester/helpers/azure_helper.py | UTF-8 | 1,824 | 2.640625 | 3 | [] | no_license | import requests
import json
import time
MAX_API_RETRY = 5
def fatal_code(response) -> bool:
"""
Return True/False if error response status code is within fatal http range
"""
return 400 <= response.status_code < 500
class AzureApp:
"""
Use Azure AD credentials to auth to Azure services
"""
def __init__(self, tenant_id, application_id, subscription_id, resource_group, client_secret):
self.tenant_id = tenant_id
self.client_id = application_id
self.subscription_id = subscription_id
self.resource_group = resource_group
self.client_secret = client_secret
self._oauth_token = None
self.grace_period = 300 # attempt to refresh azure oauth token after 300 seconds
@property
def oauth_token(self):
if not self._oauth_token or float(self._oauth_token.get("expires_on")) < time.time() + self.grace_period:
azure_auth_url = f"https://login.microsoft.com/{self.tenant_id}/oauth2/token"
azure_grant_type = "client_credentials"
azure_resource = "https://management.azure.com/"
headers = {
"Content-Type": "application/x-www-form-urlencoded"
}
data = {
"grant_type": azure_grant_type,
"client_id": self.client_id,
"client_secret": self.client_secret,
"resource": azure_resource,
}
try:
response = requests.post(azure_auth_url, headers=headers, data=data).json()
except Exception as e:
raise e
if "error" in response:
raise ValueError(json.dumps(response))
else:
self._oauth_token = response
return self._oauth_token.get("access_token")
| true |
703f06bd61c770f09b0a9a42098667873f4f438c | Python | BranGuzmen/ProgrammingPortfolio | /Python/ClassifierEvaluation/SourceCode/HW3_ClassifierEvaluation.py | UTF-8 | 13,098 | 3.21875 | 3 | [] | no_license | import sys
import time
import warnings
import numpy as np
import matplotlib.pyplot as plot
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import StandardScaler
# Scoring for classifiers
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import f1_score
from sklearn.metrics import recall_score
# Importing SVM, Decision Tree classifiers, LDA and Random Forest Classifier
from sklearn.svm import SVC as svm
from sklearn.tree import DecisionTreeClassifier as dt
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as lda
from sklearn.ensemble import RandomForestClassifier as rfc
# Display all values present in numpy array
np.set_printoptions(threshold=np.nan)
# Set the window size for graph
fig_size = plot.rcParams['figure.figsize']
fig_size[0],fig_size[1] = 12,9
plot.rcParams['figure.figsize'] = fig_size
# Supress warnings from sklearn. Comment out if you want warnings
warnings.filterwarnings('ignore')
# Classifier names used for graph plotting and printing predictions
class_names = ['SVM','Gini','IG','LDA','Rand Forest']
# Method for plotting scores
def score_plotter(graph,scores):
'''
Helper method to plot the scores for SVM, Gini, IG, and LDA on a bar graph.
param:
graph:
The graph the data will be plotted to
scores:
Values to be plotted on graph
'''
for score,name in zip(scores,class_names):
graph.bar(name,score, width=0.8)
def load(filename):
'''
Loads a file based on the file name parameter. Assumes that the last column is classes and the
rest are data.
Param:
filename:
Name of file to be opened
Return:
A tuple D=(X,y) where X is a numpy nxd array of observations attributes where X[i] comes from
the it-th row in filename; y is 1 column of observations in the same order.
'''
classes = list()
load_txt = np.loadtxt(filename,dtype=np.object,delimiter=',')
for index in load_txt[:,-1]:
if index == 'M':
classes.append(0)
else:
classes.append(1)
return load_txt[:,0:29],classes
def print_predictions(predictions,print_pred):
'''
Prints out the predictions of the classes for the dataset. Method also converts the classes from 0 and 1 to
M and B.
Params:
predictions:
A list of lists containing the predictions for each classifier
Return:
Prints a list of classes converted from 0 and 1 to M and B
'''
letter_class = list()
if print_pred:
print('\n\nPrinting Predictions\n')
for pred,clf in zip(predictions,class_names):
for num in pred:
if num == 0:
letter_class.append('M')
else:
letter_class.append('B')
print('\n{}:\n{d}'.format(clf,d=letter_class))
del letter_class[:]
print('\n')
else:
print('\n\nIf you want to see predictions, run program again with argument pred_print\n')
def print_scores(scores):
'''
Prints out the scores for Average precision, average recall and average F-measure.
Params:
scores:
A list of lists containing the scores for each score calculated.
Return:
Prints the average precision, average recall and average F-measure for all the classifiers present in the program.
'''
score_names = ['Average Precsion Scores','Average Recall Score','Average F-Measure']
for score,name in zip(scores,score_names):
print('\n{}:\n'.format(name))
for clf_score,clf in zip(score,class_names):
print('{}: {:.4f}%'.format(clf,clf_score*100))
def classifier_plotter(X_train, y_train):
'''
Takes the training data and runs through SVM, DT-Gini and DT-IG with multiple C values and max_leaf_nodes to try.
The method then creates a graph by taking the average of cross validation scores for that C value or max_leaf_node.
Params:
X_train:
List/s of features already standardized from the initial dataset
y_train:
List of classifiers for X_train taken from the original dataset
Return:
Outputs a graph of the average cross validation scores.
'''
i,d = 1,0
# Values to test
c_values = [0.01,0.1,1,10,100]
k_values = [2,5,10,20]
classifiers = ["SVM","DT-Gini & DT-IG"]
for clf in classifiers:
count = 1
if clf == "SVM":
if d == 0:
ax = plot.subplot(231)
ax.set_title(clf)
plot.ylabel('F-measure')
plot.xlabel('C values')
d += 1
print('SVM')
for c in c_values:
classi = svm(kernel='linear', C=c).fit(X_train,y_train)
scores = cross_val_score(classi, X_train, y_train, cv=10)
ax.plot(str(c),scores.mean(),'bs')
print('%d.) %.4f%%' %(count,scores.mean()*100))
count += 1
plot.axis([None,None,0.90,1])
print('\n')
i += 1
d = 0
elif clf == "DT-Gini & DT-IG":
count = 1
if d == 0:
ax = plot.subplot(232)
plot.ylabel('F-measure')
plot.xlabel('Max Leaf Nodes')
print(' Gini\tIG')
for k in k_values:
gini_class,ig_class = dt(criterion='gini', max_leaf_nodes=k), dt(criterion='entropy', max_leaf_nodes=k)
score_gini,score_ig = cross_val_score(gini_class, X_train, y_train, cv=10), cross_val_score(ig_class, X_train, y_train, cv=10)
ax.plot(str(k), score_gini.mean(), 'r.', str(k), score_ig.mean(), 'g.')
print('%d.) %.4f%%\t%.4f%%'%(count,score_gini.mean()*100,score_ig.mean()*100))
count += 1
plot.axis([None,None,0.889,0.96])
ax.legend(('Gini','IG'),loc=2)
print('\n')
i += 1
d = 0
else:
return "Should not get here."
def arguments(argv,train,test):
'''
Arguments will take any parameters passed along with the program and do one of five things:
1.) Will use the raw dataset without any standardization in all classifier methods and graphs.
2.) Will use a standardized dataset in all classifier methods and graphs.
3.) Will print out predictions for the standardized data set or the raw dataset depending on if
the arguement follows the first argument raw.
4.) Return a message to console notifying the user that they did not pass a valid argument.
5.) Use the standardized test set by default if no argument is provided.
For each case that the data is standardized, the test data will also be standardized.
Params:
argv:
Any arguments passed from the console
train:
Training data that will be standardized if asked
test:
Test data that will be standardized if train is standardized
Return:
train:
Either standardized data will be returned or the same dataset untouched will be returned depending on arguements
test:
If train has been returned standardized the so will test, otherwise it will be returned untouched
print_pred:
boolean returned True if predictions are to be printed, otherwise will be returned False
'''
if len(argv) >= 4:
print('Too many arguemnts provided. Using standardized data and will not print out classifiers predictions')
time.sleep(3)
train,test = StandardScaler().fit_transform(train),StandardScaler().fit_transform(test)
return train,test,False
if len(argv) >= 2 and len(argv) < 3:
if argv[1].lower() == 'raw':
print('\n\nUsing un-standardized data for classification.\nThis will take a bit.\n\n')
time.sleep(3)
return train,test,False
elif argv[1].lower() == 'standardized':
print('\n\nUsing standardized data for classification.\n\n')
time.sleep(3)
train,test = StandardScaler().fit_transform(train),StandardScaler().fit_transform(test)
return train,test,False
elif argv[1].lower() == 'print_pred':
print('\n\nWill print predictions for the classifiers on standardized dataset. Input print_pred after raw to print out its predictions\n\n')
time.sleep(3)
train,test = StandardScaler().fit_transform(train),StandardScaler().fit_transform(test)
return train,test,True
elif argv[1].lower() != 'standardized' or 'print_pred' or 'raw':
print('\n\nYou are using an unrecognized command.\nPlease use one of the following:\nstandardized: Does classification on a standardized'
'dataset.\nraw: Does classification on the raw dataset\nprint_pred: Prints predictions for all of the classifiers. (Can be used'
' after standardized and raw to print their classifiers)')
time.sleep(3)
sys.exit('\n\nTerminating Program.\n\n')
if len(argv) >= 3 and len(argv) < 4:
if argv[1].lower() == 'raw' and argv[2].lower() == 'print_pred':
print('\n\nUsing un-standardized data for classification and the predictions for all classifiers will be printed.\nThis will take a bit.\n\n')
time.sleep(3)
return train,test,True
elif argv[1].lower() == 'standardized' and argv[2].lower() == 'print_pred':
print('\n\nUsing standardized data for classification and the predictions for all classifiers wil be printed.\n\nNext time you can just input print_pred.\n\n')
time.sleep(3)
train,test = StandardScaler().fit_transform(train),StandardScaler().fit_transform(test)
return train,test,True
else:
print('\n\nYou are using an unrecognized command.\nPlease use one of the following:\nstandardized: Does classification on a standardized'
'dataset.\nraw: Does classification on the raw dataset\nprint_pred: Prints predictions for all of the classifiers. (Can be used'
'after standardized and raw to print their classifiers)')
time.sleep(3)
sys.exit('\n\nTerminating Program.\n\n')
if len(argv) <= 1:
print('\n\nUsing standardized data for classification by default since no aruguement was provided.\n\nEnter print_pred to print out predictions for classifiers using standardized data.'
'\nEnter raw to use raw data for classification.\nEnter raw print_pred to print out predictions for classifiers using raw data\n\n')
time.sleep(3)
train,test = StandardScaler().fit_transform(train),StandardScaler().fit_transform(test)
return train,test,False
def main():
train,test = load("cancer-data-train.csv"),load("cancer-data-test.csv")
X_train,y_train = train
X_test,y_test = test
X_train,X_test,print_pred = arguments(sys.argv,X_train,X_test)
fig = plot.figure()
# Passing training data and classes to find best C and number of leaf nodes to use. Also creating graphs to display this info
classifier_plotter(X_train, y_train)
# Setting up graphs for each plot
ax1 = fig.add_subplot(234)
ax1.set_title('Average Precsion Scores')
ax1.set_ylabel('Precsion Score')
ax1.set_xlabel('Classifier')
ax2 = fig.add_subplot(235)
ax2.set_title('Average Recall Scores')
ax2.set_ylabel('Recall Score')
ax2.set_xlabel('Classifier')
ax3 = fig.add_subplot(236)
ax3.set_title('Average F-measures')
ax3.set_ylabel('F-measure')
ax3.set_xlabel('Classifier')
# Create and train the classifiers
classifier_svm,classifier_gini,classifier_ig,classifier_lda = svm(kernel='linear', C=0.1),dt(criterion='gini',max_leaf_nodes=10),dt(criterion='entropy',max_leaf_nodes=5),lda()
classifier_svm.fit(X_train,y_train),classifier_gini.fit(X_train,y_train),classifier_ig.fit(X_train,y_train),classifier_lda.fit(X_train,y_train)
# Make the predictions
pred_svm,pred_gini,pred_ig,pred_lda = classifier_svm.predict(X_test),classifier_gini.predict(X_test),classifier_ig.predict(X_test),classifier_lda.predict(X_test)
# Calculate the precision, recall, f-measure
avg_precision_svm,avg_precision_gini,avg_precision_ig,avg_precision_lda = average_precision_score(y_test,pred_svm),average_precision_score(y_test,pred_gini),average_precision_score(y_test,pred_ig),average_precision_score(y_test,pred_lda)
recall_svm,recall_gini,recall_ig,recall_lda = recall_score(y_test,pred_svm, average='weighted'),recall_score(y_test,pred_gini, average='weighted'),recall_score(y_test,pred_ig, average='weighted'),recall_score(y_test,pred_lda, average='weighted')
f_svm,f_gini,f_ig,f_lda = f1_score(y_test,pred_svm,average='weighted'),f1_score(y_test,pred_gini,average='weighted'),f1_score(y_test,pred_ig,average='weighted'),f1_score(y_test,pred_lda,average='weighted')
################## Extra Credit #########################
# Train classifier and make predictions on test set
classifier_rfc = rfc(n_estimators=100,max_depth=2)
classifier_rfc.fit(X_train,y_train)
pred_rfc = classifier_rfc.predict(X_test)
#Calculate precision, recall and f-measure for Random Forest Classifier
avg_precision_rfc = average_precision_score(y_test,pred_rfc)
recall_rfc = recall_score(y_test,pred_rfc,average='weighted')
f_rfc = f1_score(y_test,pred_rfc,average='weighted')
#########################################################
# Printing scores and predictions
print_scores([[avg_precision_svm,avg_precision_gini,avg_precision_ig,avg_precision_lda,avg_precision_rfc],[recall_svm,recall_gini,recall_ig,recall_lda,recall_rfc],[f_svm,f_gini,f_ig,f_lda,f_rfc]])
print_predictions([pred_svm,pred_gini,pred_ig,pred_lda,pred_rfc],print_pred)
# Create the graphs for the scores
score_plotter(ax1,[avg_precision_svm,avg_precision_gini,avg_precision_ig,avg_precision_lda,avg_precision_rfc])
score_plotter(ax2,[recall_svm,recall_gini,recall_ig,recall_lda,recall_rfc])
score_plotter(ax3,[f_svm,f_gini,f_ig,f_lda,f_rfc])
plot.tight_layout(w_pad=1.5,h_pad=2.0)
plot.show()
if __name__ == '__main__':
main() | true |
b5a0b54de0019d0d69ce93bf38dafbaa1ddfe634 | Python | FabioMenacho/backend | /semana1/dia2/06-juego01.py | UTF-8 | 1,116 | 3.671875 | 4 | [] | no_license | # importo no toda la libreria random sino solamente choice para que no pese, ver documentación de pyhton
# random viene instalada en python
from random import choice
# JUEGO DE PIEDRA PAPEL O TIJERA
# Definir variables de entrada y salida
opciones = ("piedra", "papel", "tijera")
jugador = input("ingresa tu jugada: ")
computadora = choice(opciones)
print("La computadora eligió: " + computadora)
ganador = ""
# Lógica de la solución
if jugador == "piedra":
if computadora == "piedra":
ganador = "empate"
elif computadora =="papel":
ganador = "computadora"
elif computadora == "tijera":
ganador = "jugador"
elif jugador == "papel":
if computadora == "piedra":
ganador = "jugador"
elif computadora =="papel":
ganador = "empate"
elif computadora == "tijera":
ganador = "computadora"
elif jugador == "tijera":
if computadora == "piedra":
ganador = "computadora"
elif computadora =="papel":
ganador = "jugador"
elif computadora == "tijera":
ganador = "empate"
# Muestro el resultado
print ("El ganador es: " + ganador)
| true |
984e31d7e2846266839b458530db1f034d536b7b | Python | xiaosean/leetcode_python | /Q28_Implement-strStr.py | UTF-8 | 359 | 3.125 | 3 | [
"MIT"
] | permissive | class Solution:
def strStr(self, haystack: str, needle: str) -> int:
if needle == "":
return 0
if len(needle)-len(haystack)>0:
return -1
for idx, letter in enumerate(haystack[:len(haystack)-len(needle)+1]):
if haystack[idx:idx+len(needle)] == needle:
return idx
return -1 | true |
89b170d74d9e234daa71ebdf1f1af83739ed17d8 | Python | hiyounger/sdet05_demo | /qsong/uiauto0712/test_page.py | UTF-8 | 4,409 | 2.5625 | 3 | [] | no_license | #encoding:utf-8
import unittest
import json
from selenium import webdriver
from webdriverdemo.locationpage import LocationPage
class MyTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.driver = webdriver.Firefox()
def setUp(self):
# url_indexPage = "http://47.92.220.226:8000/webdriver/index.html"
# self.driver.get(url_indexPage)
#
# eles_locationTest = self.driver.find_element_by_xpath('//ul/li[1]/a')
# eles_locationTest.click()
self.location_page = LocationPage(self.driver)
self.location_page.open()
def test_register_case01(self):
# 1获取所要操作的控件
register_data = {
'username': u'qsong',
"email": u'qsong.vip@qq.com',
"password": u'hiyoung888',
"cpassword": u'hiyoung888'
}
self.location_page.register(register_data)
retmsg_text = self.location_page.element_return_msg_label.text
retmsg_json = json.loads(retmsg_text.split(u'成功:')[-1])
print(retmsg_json)
act = retmsg_json
exp = {"uid":0,"username":"qsong","password":"hiyoung888","email":"qsong.vip@qq.com"}
self.assertDictContainsSubset(exp,act)
def test_register_case02(self):
# 1获取所要操作的控件
register_data = {
'username': u'qsong',
"email": u'qsong.vip@qq.com',
"password": u'hiyoung888',
"cpassword": u'hiyoung889'
}
self.location_page.register(register_data)
retmsg_text = self.location_page.element_return_msg_label.text
print(retmsg_text)
act = retmsg_text
exp = u'两次输入的密码不一致'
self.assertEqual(exp,act)
def test_search_case03(self):
register_data = {
'username': u'qsong',
"email": u'qsong.vip@qq.com',
"password": u'hiyoung888',
"cpassword": u'hiyoung888'
}
self.location_page.register(register_data)
ele_insert_id = self.driver.find_element_by_xpath(u'//*[@id="search_uid"]')
ele_insert_id.send_keys(u'0')
ele_query_id = self.driver.find_element_by_xpath(u'//div[1]/input[2]')
ele_query_id.click()
ele_seamsg = self.driver.find_element_by_id(u'search_msg')
seamsg_text = ele_seamsg.text
seamsg_json = json.loads(seamsg_text)
print(seamsg_json)
act = seamsg_json
exp = {"uid": 0, "username": "qsong", "password": "hiyoung888", "email": "qsong.vip@qq.com"}
self.assertDictContainsSubset(exp, act)
def test_search_case04(self):
register_data = {
'username': u'qsong',
"email": u'qsong.vip@qq.com',
"password": u'hiyoung888',
"cpassword": u'hiyoung888'
}
self.location_page.register(register_data)
ele_insert_name = self.driver.find_element_by_xpath(u'//*[@id="search_uname"]')
ele_insert_name.send_keys(u'qsong')
ele_query_name = self.driver.find_element_by_xpath(u'//div[2]/input[2]')
ele_query_name.click()
ele_seamsg = self.driver.find_element_by_id(u'search_msg')
seamsg_text = ele_seamsg.text
seamsg_json = json.loads(seamsg_text)
print(seamsg_json)
act = seamsg_json
exp = {"uid": 0, "username": "qsong", "password": "hiyoung888", "email": "qsong.vip@qq.com"}
self.assertDictContainsSubset(exp, act)
def test_search_case05(self):
register_data = {
'username': u'qsong',
"email": u'qsong.vip@qq.com',
"password": u'hiyoung888',
"cpassword": u'hiyoung888'
}
self.location_page.register(register_data)
ele_insert_email = self.driver.find_element_by_xpath(u'//*[@id="search_email"]')
ele_insert_email.send_keys(u'qsong.vip@qq.com')
ele_query_email = self.driver.find_element_by_xpath(u'//div[3]/input[2]')
ele_query_email.click()
ele_seamsg = self.driver.find_element_by_id(u'search_msg')
seamsg_text = ele_seamsg.text
seamsg_json = json.loads(seamsg_text)
print(seamsg_json)
act = seamsg_json
exp = {"uid": 0, "username": "qsong", "password": "hiyoung888", "email": "qsong.vip@qq.com"}
self.assertDictContainsSubset(exp, act)
if __name__ == '__main__':
unittest.main()
| true |
c16ecf6827bf9c0b72d5759ea74c947987888fc9 | Python | leemengwei/GNRX | /trading/dependency_make_strategy.py | UTF-8 | 4,056 | 2.703125 | 3 | [] | no_license | import pulp
import pandas as pd
import sys,os
from IPython import embed
import config
import numpy as np
import dependency_compute_income
class StrategyAgent(object):
'''The StrategyAgent will put its strategic declaration to its strategic_fore_power column for later income-computing'''
def __init__(self, cap, data, midlong_price, benchmark_price, farm_loss, yonghu=[]):
self.cap = cap
self.data = data
self.midlong_price = midlong_price
self.benchmark_price = benchmark_price
self.farm_loss = farm_loss
self.yonghu = yonghu
def do_strategy(self, name):
# strategy of different scenarios:
print('--using strategy: %s'%name)
if name == 'origin':
new_declaration = self.no_strategy()
elif name == 'yonghu':
new_declaration = self.user_strategy()
elif name == 'riqian_minimum':
new_declaration = self.strategy_riqian_minimum()
elif name == 'riqian_maximum':
new_declaration = self.strategy_riqian_maximum()
elif name == 'use_pulp':
new_declaration = self.strategy_use_pulp()
else:
print("Unkown strategy: %s"%name)
sys.exit()
# Will later put into use
return new_declaration
def no_strategy(self):
'''blind strategy'''
new_declaration = self.data['fore_power']
return new_declaration
def user_strategy(self):
'''blind strategy'''
new_declaration = self.data['fore_power'].copy()
if len(self.yonghu) > 0:
new_declaration[-len(self.yonghu):] = self.yonghu
else:
pass
return new_declaration
def strategy_riqian_minimum(self):
'''blind strategy'''
new_declaration = 0
return new_declaration
def strategy_riqian_maximum(self):
'''blind strategy'''
new_declaration = self.cap
return new_declaration
def strategy_use_pulp(self):
'''bright strategy'''
# use pulp package:
# step1, define problem:
self.problem = pulp.LpProblem(name='spot_income_optimizer', sense=pulp.const.LpMaximize)
# step2, set variables:
X = []
for i in range(0, len(self.data)):
X.append(pulp.LpVariable('x%s'%i, lowBound=0, upBound=self.cap))
# step3, set objective:
self.data['strategic_fore_power'] = X # Note: this will later serve as follows: fore_power --> midlong_compose --> riqian_clean
IncomeCalculator = dependency_compute_income.MengXiIncomeCalculator(self.data, self.midlong_price, self.benchmark_price, self.farm_loss)
IncomeCalculator.compute_income()
z = IncomeCalculator.income_all
# step4, define constraints:
constraints = []
if len(config.solar_hours)>0 and len(X)==96:
for idx,x in enumerate(X):
if int(str(x).strip('x'))<config.solar_hours[0]*4 or int(str(x).strip('x'))>config.solar_hours[-1]*4:
constraints.append(X[idx] == 0)
# solve:
for constraint in constraints:
self.problem += constraint
self.problem += z
pulp.LpSolverDefault.msg=False
status = pulp.LpStatus[self.problem.solve()]
result = pulp.value(self.problem.objective)
new_declaration = self._parse_pulp_solution()
return new_declaration
def _parse_pulp_solution(self):
'''Mind param sequence'''
try:
seq = np.argsort(np.array([i.strip('x') for i in str(self.problem.variables())[1:-1].split(', ')], dtype=int))
except Exception as e:
print('Dummy solving variables, did you put columns in positions?', e)
sorted_variables = np.array(self.problem.variables())[seq].tolist()
solution = []
for idx,var in enumerate(sorted_variables):
solution.append(var.varValue)
solution = pd.DataFrame(solution, index=self.data.index)
return solution
| true |
8a968aa793a24410d0b6d6bbbaa594a23626bec7 | Python | stjohn/stjohn.github.io | /teaching/seminar4/s17/cw4.py | UTF-8 | 873 | 2.65625 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 24 11:17:29 2017
@author: stjohn
"""
import folium
from folium.plugins import MarkerCluster
import pandas as pd
#import numpy as np
cuny = pd.read_csv('cunyLocations.csv')
print (cuny)
mapCUNY = folium.Map(location=[40.75, -74.125])
coords = []
popups = []
for index,row in cuny.iterrows():
lat = row["Latitude"]
lon = row["Longitude"]
name = row["Campus"]
coords.append([lat,lon])
popups.append(name)
#mapCUNY.simple_marker([lat, lon], popup=name, clustered_marker = True)
mapCUNY.add_children(MarkerCluster(locations=coords, popups = popups))
mapCUNY.save(outfile='cunyLocations.html')
'''
map = folium.Map(location=[40.75, -73.99], zoom_start=10, tiles="Mapbox Bright")
map.geo_json(geo_path = 'zoning2.json', fill_opacity=0.2)
map.create_map(path='zoning.html')
''' | true |
384169bed84ac2b731f25102214fa6aca0564ed1 | Python | brettdh/pyinstaller-signal-handling | /main.py | UTF-8 | 561 | 2.765625 | 3 | [] | no_license | #!/usr/bin/env python
import signal
import time
from signal import SIGHUP, SIGUSR1, SIGUSR2
import requests
running = True
def handler(sig, *args):
print(' | got signal {}; exiting cleanly'.format(sig))
global running
running = False
signals = [SIGHUP, SIGUSR1, SIGUSR2]
for sig in signals:
signal.signal(sig, handler)
while running:
r = requests.get('https://httpbin.org/get')
print(' | {} {} {}'.format(r.status_code, r.reason, r.url))
for _ in range(60):
time.sleep(1)
if not running:
break
| true |
14175c0db55b9a42b1bb478e4639ae2c3412dc67 | Python | mridullpandey/faq-bot | /faq-publish-api/tests/unit/test_user_service_map_user.py | UTF-8 | 1,073 | 2.578125 | 3 | [
"MIT"
] | permissive | import unittest
import os
import uuid
import names
import random
from datetime import datetime, timedelta
from services.user_service import UserService
from dotenv import load_dotenv
load_dotenv()
class TestMapUser(unittest.TestCase):
LOCAL_DB_FILE = '/data//datastores/local_test.sqlite3'
def setUp(self):
os.environ['USER_TOPICS_DATASTORE_CONNECTION_STRING'] = 'sqlite://' + self.LOCAL_DB_FILE + '?check_same_thread=False'
self.user_service = UserService()
def tearDown(self):
self.user_service = None
if os.path.exists('.' + self.LOCAL_DB_FILE):
os.remove('.' + self.LOCAL_DB_FILE)
def test_map_user(self):
id = str(uuid.uuid4())
name = names.get_full_name()
created = datetime.now().timestamp()
user = self.user_service.map_user(id, name, created)
self.assertIsNotNone(user)
self.assertTrue(user['id'] == id)
self.assertTrue(user['name'] == name)
self.assertTrue(user['created'] == created)
if __name__ == '__main__':
unittest.main()
| true |
7a4a4e9ef297288df80797cefe13ddaddaad24dc | Python | semensorokin/educational-nlp-projects | /py_prog/XML_/XML.py | UTF-8 | 1,176 | 3.015625 | 3 | [] | no_license | import xml.etree.ElementTree as ET
tree = ET.parse('xml_test.xml')
root = tree.getroot()
print(root.tag)
for students in root:
print (students.tag)
print(students.attrib)
if (len(students))!=0:
for subjects in students:
print(subjects.attrib['name']+ ':'+ subjects.text)
#student= ET.SubElement(root, 'student', {'name':'Doctor', 'surname': 'Who'})
#tree.write('xml_test.xml')
def add_smth(root):
name=input()
surname=input()
student= ET.SubElement(root, 'student', {'name':name, 'surname': surname})
print('Grade of proga:')
proga=input()
subject_p = ET.SubElement(student, 'subject', {'name': "Proga"})
subject_p.text = proga
print('Grade of matan:')
matan=input()
subject_p = ET.SubElement(student, 'subject', {'name': "Matan"})
subject_p.text = matan
print('Grade of eng:')
eng=input()
subject_p = ET.SubElement(student, 'subject', {'name': "Eng"})
subject_p.text = eng
print('Grade of linal:')
linal=input()
subject_p = ET.SubElement(student, 'subject', {'name': "Linal"})
subject_p.text = linal
tree.write('xml_test.xml')
print('ok')
add_smth(root)
| true |
a9d65b92cb743b4f42d35f4e226d861528d901a4 | Python | kuceran3/thesis | /readChunk.py | UTF-8 | 231 | 2.8125 | 3 | [] | no_license | import sys
import struct
if (len(sys.argv) < 2):
print("Usage: python readChunk.py <INPUT_FILE>")
exit()
with open(sys.argv[1], 'rb') as inp:
while(True):
a = inp.read(1)
if(not a):
break
print(struct.unpack('b', a))
| true |
7de4898884d46bf9f6dc49d16412583876b94610 | Python | chrigu6/python_webserver_from_scratch | /server.py | UTF-8 | 698 | 2.796875 | 3 | [] | no_license | import socket
import typing
from Request import Request
HOST = "127.0.0.1"
PORT = 9000
RESPONSE = b""""\
HTTP/1.1 200 OK
Content-type: text/html
Content-length: 15
<h1>Hello!</h1>""".replace(b"\n", b"\r\n")
with socket.socket() as server_sock:
server_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_sock.bind((HOST, PORT))
server_sock.listen(0)
print(f"Listening on {HOST}:{PORT}...")
while True:
client_sock, client_addr = server_sock.accept()
print(f"New connection from {client_addr}.")
with client_sock:
request = Request.from_socket(client_sock)
print(request)
client_sock.sendall(RESPONSE) | true |
5b459df85685c1a98adbdbf26ceecf8d0cd482a9 | Python | rafaelbattesti/poster-kata | /svc-datagen/core/data_access.py | UTF-8 | 2,767 | 2.515625 | 3 | [] | no_license | """
Author: Rafael Battesti - rafaelbattesti.com
Since: 2019-11-20
Module to provide data access, from and to external or internal persistence(i.e. csv for loading)
"""
import csv
import os
import swapi
import psycopg2 as pg
# DBs
# TODO: Investigate docker-compose .env
PG_HOST = os.getenv("POSTGRES_HOST", "svc-postgres")
PG_USER = os.getenv("POSTGRES_USER", "postgres")
PG_PASS = os.getenv("POSTGRES_PASSWORD", "YqEa3$R8wpUJVJJc")
PG_BASE_CONNECTION_STRING = "postgresql://{}:{}@{}/{}"
# SWS
SWS_DB = "sws"
SWS_CONNECTION_STRING = PG_BASE_CONNECTION_STRING.format(
PG_USER, PG_PASS, PG_HOST, SWS_DB
)
SWS_DIM_CUSTOMER_TGT = "ordering.dim_customer"
SWS_DIM_PRODUCT_TGT = "ordering.dim_product"
SWS_DIM_PROMO_TGT = "ordering.dim_promo_code"
SWS_FACT_SALES_ORDER_TGT = "ordering.fact_sales_order"
SWS_FACT_SALES_ORDER_ITEM_TGT = "ordering.fact_sales_order_item"
# CSV
SWS_DIM_CUSTOMER_CSV = "dim_customer.csv"
SWS_DIM_PRODUCT_CSV = "dim_product.csv"
SWS_DIM_PROMO_CODE_CSV = "dim_promo_code.csv"
SWS_FACT_SALES_ORDER_CSV = "fact_sales_order.csv"
SWS_FACT_SALES_ORDER_ITEM_CSV = "fact_sales_order_item.csv"
def get_sws_conn():
""" Get a psycopg2 connection to sws """
return pg.connect(SWS_CONNECTION_STRING)
def source_swapi_starship_raw():
""" Fetch raw starship data """
return swapi.get_all("starships")
def save_csv(df, csv_file):
df.to_csv(
csv_file, index=True, header=False, quoting=csv.QUOTE_MINIMAL, sep=",",
)
def save_dim_customer_csv(df_dim_customer):
save_csv(df_dim_customer, SWS_DIM_CUSTOMER_CSV)
def save_dim_product_csv(df_dim_product):
save_csv(df_dim_product, SWS_DIM_PRODUCT_CSV)
def save_dim_promo_code_csv(df_dim_promo_code):
save_csv(df_dim_promo_code, SWS_DIM_PROMO_CODE_CSV)
def save_fact_sales_order_csv(df_fact_sales_order):
save_csv(df_fact_sales_order, SWS_FACT_SALES_ORDER_CSV)
def save_fact_sales_order_item_csv(df_fact_sales_order_item):
save_csv(df_fact_sales_order_item, SWS_FACT_SALES_ORDER_ITEM_CSV)
def push_sws_table(csv, table):
conn = get_sws_conn()
cursor = conn.cursor()
sql = """COPY {} FROM STDIN WITH CSV DELIMITER AS ','"""
with open(csv) as f:
cursor.copy_expert(
sql=sql.format(table), file=f,
)
conn.commit()
def push_sws_dim_customer():
push_sws_table(SWS_DIM_CUSTOMER_CSV, SWS_DIM_CUSTOMER_TGT)
def push_sws_dim_product():
push_sws_table(SWS_DIM_PRODUCT_CSV, SWS_DIM_PRODUCT_TGT)
def push_sws_dim_promo_code():
push_sws_table(SWS_DIM_PROMO_CODE_CSV, SWS_DIM_PROMO_TGT)
def push_sws_fact_sales_order():
push_sws_table(SWS_FACT_SALES_ORDER_CSV, SWS_FACT_SALES_ORDER_TGT)
def push_sws_fact_sales_order_item():
push_sws_table(SWS_FACT_SALES_ORDER_ITEM_CSV, SWS_FACT_SALES_ORDER_ITEM_TGT)
| true |
a4f74afc9b33046af4766af2a1a480645b3d419b | Python | manishbhusal16/CE_lab2_09 | /MergeSort.py | UTF-8 | 1,766 | 3.515625 | 4 | [] | no_license | import random
import unittest
from time import time
import matplotlib.pyplot as plt
class TestMergeSort(unittest.TestCase):
def testSort(self):
data = [7,8,1,4,2,5,3]
merge_sort(data)
self.assertListEqual(data,[1,2,3,4,5,7,8])
def merge_sort(alist):
if len(alist)>1:
mid = len(alist)//2
lefthalf = alist[:mid]
righthalf = alist[mid:]
merge_sort(lefthalf)
merge_sort(righthalf)
i=0
j=0
k=0
while i < len(lefthalf) and j < len(righthalf):
if lefthalf[i] < righthalf[j]:
alist[k]=lefthalf[i]
i=i+1
else:
alist[k]=righthalf[j]
j=j+1
k=k+1
while i < len(lefthalf):
alist[k]=lefthalf[i]
i=i+1
k=k+1
while j < len(righthalf):
alist[k]=righthalf[j]
j=j+1
k=k+1
def plot():
print("Calculating . . .please wait")
exeTime_dic = {}
for i in range (500,10000,1000):
randomValue = random.sample(range(i),i)
start=time()
merge_sort(randomValue)
end=time()
calc=end-start
exeTime_dic[i] = calc
exeTime = list(exeTime_dic.values())
inpSize = list(exeTime_dic.keys())
plt.plot(inpSize, exeTime)
plt.xlabel('Input Size(n)')
plt.ylabel('Execution Time (sec)')
plt.title("Merge Sort")
plt.xticks(inpSize)
plt.yticks(exeTime)
plt.show()
data = [7,8,1,4,2,5,3]
merge_sort(data)
print(data)
plot()
#To Test Merge Sort Uncomment Below Two code and Comment code outside function
# if __name__ == '__main__':
# unittest.main()
| true |
28457b390d6921e8de0837a9febd42291e27c3af | Python | 69SomeoneElse69/Homework | /Task2.py | UTF-8 | 665 | 3.984375 | 4 | [] | no_license | # Пользователь вводит время в секундах.
User_Seconds = int(input('Введите количество секунд: '))
# Переведите время в часы, минуты и секунды и выведите в формате чч:мм:сс.
Hourse = str(User_Seconds//3600)
Minutes = (User_Seconds // 60) % 60
Seconds = User_Seconds % 60
# Используйте форматирование строк.
if Minutes < 10:
Minutes = '0' + str(Minutes)
else:
Minutes = str(Minutes)
if Seconds < 10:
Seconds = '0' + str(Seconds)
else:
Seconds = str(Seconds)
print(Hourse + ':' + Minutes + ':' + Seconds)
| true |
3d67f8f246f1640f4a7066cebb133b1178a772c4 | Python | RC1595/fizzbuzz | /app.py | UTF-8 | 511 | 3.734375 | 4 | [] | no_license | number = [3, 5, 15, 60, 37, 48, 61, 88, 95, 70, 235, 540, 125, 444, 655]
def fizz_buzz(number):
if number % 3 == 0 and number % 5 == 0:
print("FizzBuzz")
elif number % 5 == 0:
print("Buzz")
elif number % 3 == 0:
print("Fizz")
else:
print(number)
fizz_buzz(3)
fizz_buzz(5)
fizz_buzz(15)
fizz_buzz(60)
fizz_buzz(37)
fizz_buzz(48)
fizz_buzz(61)
fizz_buzz(88)
fizz_buzz(95)
fizz_buzz(70)
fizz_buzz(235)
fizz_buzz(540)
fizz_buzz(125)
fizz_buzz(444)
fizz_buzz(655)
| true |
8706471b662e844f9fb57cb59c0655df1b311c2a | Python | Leslieaj/Opacity_for_RTA | /normalform.py | UTF-8 | 4,532 | 2.6875 | 3 | [] | no_license | #define normal form
from projection import *
class NForm:
def __init__(self, x1, x2, k, N):
self.x1 = x1
self.x2 = x2
self.k = k
self.N = N
class WNForm:
def __init__(self, x1, x2, k):
self.x1 = x1
self.x2 = x2
self.k = k
def gcd(a, b):
#assert a > 0 and b > 0,'parameters must be greater than 0.'
while True:
if a >= b:
if a % b == 0:
return b
else:
a, b = a - b, b
else:
a, b = b, a
def lcm(a, b):
#assert a > 0 and b > 0,'parameters must be greater than 0.'
return int(a * b / gcd(a, b))
def intersect_constraint(c1, c2):
min_bn1 = None
max_bn1 = None
if c1.closed_min == True:
min_bn1 = BracketNum(c1.min_value, Bracket.LC)
else:
min_bn1 = BracketNum(c1.min_value, Bracket.LO)
if c1.closed_max == True:
max_bn1 = BracketNum(c1.max_value, Bracket.RC)
else:
max_bn1 = BracketNum(c1.max_value, Bracket.RO)
min_bn2 = None
max_bn2 = None
if c2.closed_min == True:
min_bn2 = BracketNum(c2.min_value, Bracket.LC)
else:
min_bn2 = BracketNum(c2.min_value, Bracket.LO)
if c2.closed_max == True:
max_bn2 = BracketNum(c2.max_value, Bracket.RC)
else:
max_bn2 = BracketNum(c2.max_value, Bracket.RO)
bnlist = [min_bn1, max_bn1, min_bn2, max_bn2]
bnlist.sort()
left_bn = bnlist[1]
right_bn = bnlist[2]
if left_bn in [min_bn1, min_bn2] and right in [max_bn1, max_bn2]:
return Constraint(left_bn.getbn()+','+right_bn.getbn()), True
else:
return Constraint("(0,0)"), False
def unintersect_intervals(uintervals):
unintersect = []
floor_bn = BracketNum('0',Bracket.LC)
ceil_bn = BracketNum('+',Bracket.RO)
key_bns = []
for constraint in uintervals:
min_bn = None
max_bn = None
temp_min = constraint.min_value
temp_minb = None
if constraint.closed_min == True:
temp_minb = Bracket.LC
else:
temp_minb = Bracket.LO
temp_max = constraint.max_value
temp_maxb = None
if constraint.closed_max == True:
temp_maxb = Bracket.RC
else:
temp_maxb = Bracket.RO
min_bn = BracketNum(temp_min, temp_minb)
max_bn = BracketNum(temp_max, temp_maxb)
if min_bn not in key_bns:
key_bns+= [min_bn]
if max_bn not in key_bns:
key_bns+=[max_bn]
key_bnsc = copy.deepcopy(key_bns)
for bn in key_bns:
bnc = bn.complement()
if bnc not in key_bnsc:
key_bnsc.append(bnc)
if floor_bn not in key_bnsc:
key_bnsc.append(floor_bn)
if ceil_bn not in key_bnsc:
key_bnsc.append(ceil_bn)
key_bnsc.sort()
for index in range(len(key_bnsc)):
if index%2 == 0:
temp_constraint = Constraint(key_bnsc[index].getbn()+','+key_bnsc[index+1].getbn())
unintersect.append(constraint)
return unintersect
def union_intervals_to_nform(uintervals):
if len(uintervals) >= 1:
x1 = unintersect_intervals(uintervals)
k = 1
constraint = x1[len(x1)-1]
N = None
x2 = []
if constraint.max_value == '+':
N = int(constraint.min_value)+1
left,_ = constraint.guard.split(',')
right = str(N) + ')'
new_constraint = Constraint(left+','+right)
x1 = x1[:-1]
x1.append(new_constraint)
x2.append(Constraint('['+str(N)+','+str(N+1)+')'))
else:
N = int(constraint.max_value)+1
return NForm(x1,x2,k,N)
def nform_union(X, Y):
m = lcm(X.k, Y.K)
new_x1 = []
new_x1.extend(X.x1)
new_x1.extend(Y.x1)
new_x1 = unintersect_intervals(new_x1)
m_k_1 = m/X.k - 1
m_l_1 = m/Y.k - 1
new_x2 = []
for i in range(m_k_1 + 1):
k_constraint = Constraint('['+str(i * X.k)+','+str(i * X.k)+']')
for constraint in X.x2:
new_constraint = add_constraints(constraint, k_constraint)
new_x2.append(new_constraint)
for i in range(m_l_1 + 1):
l_constraint = Constraint('['+str(i * Y.k)+','+str(i * Y.k)+']')
for constraint in Y.x2:
new_constraint = add_constraints(constraint, l_constraint)
new_x2.append(new_constraint)
new_x2 = unintersect_intervals(new_x2)
return WNForm(new_x1, new_x2, m)
| true |
9db301f72c7a759bdc88384e4dbeab62f8957afa | Python | Bobinar/aoc2019 | /day_3.py | UTF-8 | 1,712 | 3.515625 | 4 | [] | no_license |
def build_set_from_ops(ops):
first_set = set()
current_position = (0, 0)
distance = 0
position_to_distance = dict()
for op in ops:
if op[0] == 'R':
direction = (1, 0)
if op[0] == 'L':
direction = (-1, 0)
if op[0] == 'U':
direction = (0, 1)
if op[0] == 'D':
direction = (0, -1)
magnitude = int(op[1:])
for i in range(magnitude):
current_position = (current_position[0] + direction[0], current_position[1] + direction[1])
first_set.add(current_position)
distance = distance + 1
if not current_position in position_to_distance.keys():
position_to_distance[current_position] = distance
return first_set, position_to_distance
if __name__ == '__main__':
print("day three")
#code = [1,9,10,3,2,3,11,0,99,30,40,50]
code = []
with open('day_3.txt', 'r') as input_file:
line = input_file.readline()
ops = line.split(',')
first_set, first_position_to_distance= build_set_from_ops(ops)
line = input_file.readline()
ops = line.split(',')
second_set, second_position_to_distance= build_set_from_ops(ops)
intersections = first_set.intersection(second_set)
print(intersections)
min = 1000000
min_position = None
for intersection in intersections:
#distance = abs(intersection[0]) + abs(intersection[1])
distance = first_position_to_distance[intersection] + second_position_to_distance[intersection]
if distance < min:
min = distance
min_position = intersection
print(min)
print(min_position)
| true |
faf477695a6bc146a012df9e7171a9585e126d49 | Python | sjaca10/paho-basic-orm | /source/ORM_example_client.py | UTF-8 | 2,986 | 2.734375 | 3 | [] | no_license | from pymongo import MongoClient
import MySQLdb
import paho.mqtt.client as mqtt
import json
import psycopg2
def on_connect(client, userdata, rc):
print "Client connected with result {0}".format(rc)
client.subscribe("mongodb/company/ping")
client.subscribe("mysql/company/ping")
client.subscribe("redis/company/ping")
client.subscribe("postgresql/company/ping")
def on_subscribe(client, userdata, mid, granted_qos):
print "Subscribed with Quality of Service {0}".format(granted_qos)
def on_message(client, userdata, msg):
print "Message received"
data = msg.topic.split("/")
client.insert(data[0], data[1], data[2], msg.payload)
def insert(dbms, database, table, payload):
print "Information will insert on database {0} in table {1} at {2}".format(database, table, dbms)
data = json.JSONDecoder().decode(payload)
if dbms == "mongodb":
client.mongo(database, table, data)
elif dbms == "mysql":
client.mysql(database, table, data)
elif dbms == "redis":
print "Inserted on {0}".format(dbms)
elif dbms == "postgresql":
client.postgresql(database, table, data)
else:
print "Unsupported DBMS"
def mongo(database, table, data):
# Getting a client
mongo = MongoClient('mongodb://localhost:27017')
# Getting a database
db = mongo[database]
# Inserting data on a collection
item = db[table].insert_one(data)
# Showing result
print "Inserted item {0} on {1}".format(item.inserted_id, database)
def mysql(database, table, data):
# Getting a client
mysql = MySQLdb.connect(host = "localhost", user = "root", passwd = "root", db = database)
# Creating a cursor
cursor = mysql.cursor()
# Insert data
cursor.execute("INSERT INTO " + table + " (latitude, longitude) VALUES("+ str(data["latitude"]) + ","+ str(data["longitude"]) +")")
# Confirm changes
mysql.commit()
# Closing connections
cursor.close()
mysql.close()
# Showing result
print "Inserted tuple {0} on {1}".format(cursor.lastrowid, database)
def postgresql(database, table, data):
# Connect to an existing database
postgresql = psycopg2.connect(database = database, user = "companyuser", password = "companyuser")
# Open a cursor to perform database operations
cursor = postgresql.cursor()
# Pass data to fill a query placeholders and let Psycopg perform
# the correct conversion (no more SQL injections!)
cursor.execute("INSERT INTO " + table + " (latitude, longitude) VALUES (%s, %s)", (data["latitude"], data["longitude"]))
# make the changes to the database persistent
postgresql.commit()
# Close communication with the database
cursor.close()
postgresql.close()
# Showing result
print "Inserted tuple {0} on {1}".format(cursor.lastrowid, database)
client = mqtt.Client()
client.on_connect = on_connect
client.on_subscribe = on_subscribe
client.on_message = on_message
client.insert = insert
client.mongo = mongo
client.mysql = mysql
client.postgresql = postgresql
client.connect("localhost", 1883, 60)
client.loop_forever() | true |
0735a3e5be4318bd29a5fc16dc99c2b6bcf2dadc | Python | Luolingwei/LeetCode | /BitManipulation/Q1404_Number of Steps to Reduce a Number in Binary Representation to One.py | UTF-8 | 1,209 | 3.890625 | 4 | [] | no_license |
# 思路1: 直接将数字用二进制表示进行操作
# 思路2: 对string进行操作, 从后往前扫描, 除以2为往右移1位, 加1为在尾部加1, 如果尾部为1那么为奇数,否则为偶数
# 如果当前数字s[i]+carry=0, 需要1步操作(除以2), carry置为0
# 如果当前数字s[i]+carry=1, 需要2步操作(加1, 除以2), carry置为1
# 如果当前数字s[i]+carry=2, 需要1步操作(除以2), carry置为1
# 扫描到头部时停止,如果carry=1, 则还需要1步(除以2)完成
class Solution:
# def numSteps(self, s: str):
# n = int(s,2)
# ans = 0
# while n!=1:
# n = n+1 if n&1 else n>>1
# ans += 1
# return ans
def numSteps(self, s: str):
carry = 0
ans = 0
i = len(s)-1
while i>0:
curN = carry + int(s[i])
if curN == 0:
ans += 1
carry = 0
elif curN == 1:
ans += 2
carry = 1
else:
ans += 1
carry = 1
i-=1
return ans + carry
a=Solution()
print(a.numSteps("1101"))
print(a.numSteps("10"))
print(a.numSteps("1")) | true |
f5debc861c5afd7371071c1a3255eaee794d363d | Python | PennyLaneAI/pennylane | /pennylane/transforms/specs.py | UTF-8 | 6,583 | 2.671875 | 3 | [
"Apache-2.0"
] | permissive | # Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code for resource estimation"""
import inspect
import pennylane as qml
def _get_absolute_import_path(fn):
return f"{inspect.getmodule(fn).__name__}.{fn.__name__}"
def specs(qnode, max_expansion=None, expansion_strategy=None):
"""Resource information about a quantum circuit.
This transform converts a QNode into a callable that provides resource information
about the circuit.
Args:
qnode (.QNode): the QNode to calculate the specifications for
Keyword Args:
max_expansion (int): The number of times the internal circuit should be expanded when
calculating the specification. Defaults to ``qnode.max_expansion``.
expansion_strategy (str): The strategy to use when circuit expansions or decompositions
are required.
- ``gradient``: The QNode will attempt to decompose
the internal circuit such that all circuit operations are supported by the gradient
method.
- ``device``: The QNode will attempt to decompose the internal circuit
such that all circuit operations are natively supported by the device.
Returns:
A function that has the same argument signature as ``qnode``. This function
returns a dictionary of information about qnode structure.
**Example**
.. code-block:: python3
x = np.array([0.1, 0.2])
dev = qml.device('default.qubit', wires=2)
@qml.qnode(dev, diff_method="parameter-shift", shifts=np.pi / 4)
def circuit(x, add_ry=True):
qml.RX(x[0], wires=0)
qml.CNOT(wires=(0,1))
if add_ry:
qml.RY(x[1], wires=1)
return qml.probs(wires=(0,1))
>>> qml.specs(circuit)(x, add_ry=False)
{'resources': Resources(num_wires=2, num_gates=2, gate_types=defaultdict(<class 'int'>, {'RX': 1, 'CNOT': 1}),
gate_sizes=defaultdict(int, {1: 1, 2: 1}), depth=2, shots=Shots(total_shots=None, shot_vector=())),
'gate_sizes': defaultdict(int, {1: 1, 2: 1}),
'gate_types': defaultdict(int, {'RX': 1, 'CNOT': 1}),
'num_operations': 2,
'num_observables': 1,
'num_diagonalizing_gates': 0,
'num_used_wires': 2,
'num_trainable_params': 1,
'depth': 2,
'num_device_wires': 2,
'device_name': 'default.qubit',
'expansion_strategy': 'gradient',
'gradient_options': {'shifts': 0.7853981633974483},
'interface': 'auto',
'diff_method': 'parameter-shift',
'gradient_fn': 'pennylane.gradients.parameter_shift.param_shift',
'num_gradient_executions': 2}
"""
def specs_qnode(*args, **kwargs):
"""Returns information on the structure and makeup of provided QNode.
Dictionary keys:
* ``"num_operations"`` number of operations in the qnode
* ``"num_observables"`` number of observables in the qnode
* ``"num_diagonalizing_gates"`` number of diagonalizing gates required for execution of the qnode
* ``"resources"``: a :class:`~.resource.Resources` object containing resource quantities used by the qnode
* ``"num_used_wires"``: number of wires used by the circuit
* ``"num_device_wires"``: number of wires in device
* ``"depth"``: longest path in directed acyclic graph representation
* ``"device_name"``: name of QNode device
* ``"expansion_strategy"``: string specifying method for decomposing operations in the circuit
* ``"gradient_options"``: additional configurations for gradient computations
* ``"interface"``: autodiff framework to dispatch to for the qnode execution
* ``"diff_method"``: a string specifying the differntiation method
* ``"gradient_fn"``: executable to compute the gradient of the qnode
Potential Additional Information:
* ``"num_trainable_params"``: number of individual scalars that are trainable
* ``"num_gradient_executions"``: number of times circuit will execute when
calculating the derivative
Returns:
dict[str, Union[defaultdict,int]]: dictionaries that contain QNode specifications
"""
initial_max_expansion = qnode.max_expansion
initial_expansion_strategy = getattr(qnode, "expansion_strategy", None)
try:
qnode.max_expansion = initial_max_expansion if max_expansion is None else max_expansion
qnode.expansion_strategy = expansion_strategy or initial_expansion_strategy
qnode.construct(args, kwargs)
finally:
qnode.max_expansion = initial_max_expansion
qnode.expansion_strategy = initial_expansion_strategy
info = qnode.qtape.specs.copy()
info["num_device_wires"] = qnode.device.num_wires
info["device_name"] = qnode.device.short_name
info["expansion_strategy"] = qnode.expansion_strategy
info["gradient_options"] = qnode.gradient_kwargs
info["interface"] = qnode.interface
info["diff_method"] = (
_get_absolute_import_path(qnode.diff_method)
if callable(qnode.diff_method)
else qnode.diff_method
)
if isinstance(qnode.gradient_fn, qml.gradients.gradient_transform):
info["gradient_fn"] = _get_absolute_import_path(qnode.gradient_fn)
try:
info["num_gradient_executions"] = len(qnode.gradient_fn(qnode.qtape)[0])
except Exception as e: # pylint: disable=broad-except
# In the case of a broad exception, we don't want the `qml.specs` transform
# to fail. Instead, we simply indicate that the number of gradient executions
# is not supported for the reason specified.
info["num_gradient_executions"] = f"NotSupported: {str(e)}"
else:
info["gradient_fn"] = qnode.gradient_fn
return info
return specs_qnode
| true |
6e3e6ef297dc4ca9598613aedcee3e2100dd93c3 | Python | kmac3063/Training | /ACMP/75.py | UTF-8 | 29 | 2.796875 | 3 | [] | no_license | n = int(input())
print(45**n) | true |
76c2b1cab0757909de6c390d4380a64a57ea1747 | Python | Aasthaengg/IBMdataset | /Python_codes/p03502/s694767468.py | UTF-8 | 65 | 3 | 3 | [] | no_license | X = input()
print(['No','Yes'][int(X)%sum(int(T) for T in X)==0]) | true |
406799c427b6410201352983769bd3972f7aa53b | Python | sunbo449/wms | /utils/test.py | UTF-8 | 230 | 2.71875 | 3 | [] | no_license | """
本文件测试使用
"""
info = [
{'k1':(1),'k2':{'k9':'oldboy','k10':'一天天'}},
(11,22,33,44),
{199,2,3,4,5},
True,
['武沛齐','景女神',{'extra':("alex",'eric',[18,20])}]
]
print(info[1][3])
| true |
d225255c6f12654d53a09d7ebb6ae06f433d1172 | Python | reallinfo/proyectoIV | /old/tests/test_recurso.py | UTF-8 | 2,349 | 3.359375 | 3 | [
"MIT"
] | permissive |
"""
Tests del archivo src/recurso.py
"""
# Modificación del path para incluir las demás carpetas, por comodidad a la hora de importar.
import sys, platform
if platform.system is 'Windows':
sys.path.append(".\src")
else:
sys.path.append("./src")
import unittest
from recurso import Recurso
class FallaStr(object):
''' Clase auxiliar que hace fallar el método str() para probar algunas funcionalidades. '''
def __str__(self):
raise Exception
class TestRecurso(unittest.TestCase):
def test_creacion(self):
r = Recurso('prueba', 5, 10)
r2 = Recurso('prueba', 10, 5)
print("Testeando constructor... ", end = '')
self.assertTrue(r.get_t_min() <= r.get_t_max(), "t_min > t_max")
self.assertTrue(r2.get_t_min() <= r2.get_t_max(), "t_min > t_max")
self.assertTrue(r.get_t_min() > -1, "t_min es menor que cero")
self.assertTrue(r.get_t_max() > -1, "t_max es menor que cero")
self.assertTrue(type(r.get_nombre()) == str, "Nombre no es un string")
with self.assertWarns(UserWarning):
r = Recurso('prueba', 'x', 2)
with self.assertWarns(UserWarning):
r = Recurso('prueba', -5, 10)
print("OK")
def test_setters(self):
print("Testeando setters... ", end = '')
r = Recurso('prueba', 5, 20)
self.assertFalse(r.set_t_min(-1), "set_t_min acepta números negativos")
self.assertFalse(r.set_t_min("str"), "set_t_min acepta tipos != int")
self.assertFalse(r.set_t_max(-1), "set_t_max acepta números negativos")
self.assertFalse(r.set_t_max("str"), "set_t_max tipos != int")
self.assertTrue(r.set_nombre("str"), "El nombre no se cambia correctamente")
self.assertFalse(r.set_nombre(FallaStr()), "str() no da error")
r.set_t_max(21)
self.assertFalse(r.get_t_min() > r.get_t_max(), "set_t_max permite que t_min > t_max")
r.set_t_min(20)
self.assertFalse(r.get_t_min() > r.get_t_max(), "set_t_min permite que t_min > t_max")
r.set_t_min(22)
self.assertFalse(r.get_t_min() > r.get_t_max(), "set_t_min permite que t_min > t_max")
r.set_t_max(1)
self.assertFalse(r.get_t_min() > r.get_t_max(), "set_t_max permite que t_min > t_max")
print("OK")
def test_print(self):
r = Recurso('algo', 2, 3)
self.assertTrue(type(str(r)) is str, "La función de impresión no devuelve str")
if __name__ == '__main__':
print("INICIANDO TEST DE LA CLASE RECURSO (src/recurso.py)")
unittest.main()
| true |
f811f6dd5d2c0877d18fc6be314b52e020ca8559 | Python | infra-ops/cloud-ops | /ansible-1/training/rhca-410/aws/plugins/callback/boto.py | UTF-8 | 330 | 2.875 | 3 | [] | no_license | import boto
import json
# Create an SNS client
sns = boto3.client('sns')
# Publish a simple message to the specified SNS topic
response = sns.publish(
TopicArn='arn:aws:sns:us-east-1:758637906269:notify-me:1e1955f1-ff4b-4784-82ca-19c31e152740',
Message='Hello World!',
)
# Print out the response
print(response)
| true |
157b567adf79c92800d60f931fa0f5aa58071068 | Python | immaxchen/python-msgbox | /msgbox.py | UTF-8 | 1,237 | 2.609375 | 3 | [] | no_license | import os
if os.name == "nt":
import ctypes
def MessageBox(text, caption="", options=0):
return ctypes.windll.user32.MessageBoxW(0, text, caption, options)
else:
import textwrap
import tkinter
from PIL import Image, ImageTk
def MessageBox(text, caption="", options=0):
if not options == 0:
errmsg = "Options other than 0 (OK only) is not yet implemented."
raise NotImplementedError(errmsg)
room = " " * 8
icon = Image.new("RGBA", (32, 32), (0, 0, 0, 0))
text = "\n".join(
[
line + room
for para in text.split("\n")
for line in textwrap.wrap(para, width=76)
]
).ljust(32)
root = tkinter.Tk()
root.title(caption)
root.iconphoto(root, ImageTk.PhotoImage(icon))
lbl = tkinter.Label(root, text=text, bg="white", justify="left")
lbl.pack(ipadx=12, ipady=24)
btn = tkinter.Button(root, text="OK", width=11, command=root.destroy)
btn.pack(padx=12, pady=12, anchor="e")
root.lift()
root.attributes("-topmost", True)
root.attributes("-topmost", False)
root.mainloop()
return 1
| true |
954867f2b544f24d9a031604487ed338d604fbf2 | Python | amit000/flask_api | /tests/Integration/test_order.py | UTF-8 | 1,748 | 2.515625 | 3 | [] | no_license | from tests.Integration.base_test import BaseTest
from schemas.order import ItemsToOrderSchema
from schemas.order import OrderSchema
from schemas.item import ItemSchema
from models.item import ItemModel
from models.order import OrderModel
from models.order import ItemsToOrders
class TestOrder(BaseTest):
items_to_order = ItemsToOrderSchema()
order_schema = OrderSchema()
item_schema = ItemSchema()
def setUp(self):
BaseTest.setUp(self)
with self.app_context():
new_item = self.item_schema.load({"name": "test_name", "price": "22.43"})
new_item1 = self.item_schema.load({"name": "test_name1", "price": "23.47"})
new_item1.upsert_item()
new_item.upsert_item()
order = OrderModel(
items_list=[
ItemsToOrders(item_id=1, quantity=3),
ItemsToOrders(item_id=2, quantity=2),
],
status="pending",
)
order.save_to_db()
def test_order(self):
with self.app_context():
print(self.order_schema.dump(OrderModel.find_all(), many=True))
def test_set_status(self):
with self.app_context():
order1 = OrderModel.find_by_id(1)
order1.set_status("failed")
self.assertEqual("failed", order1.status)
def test_amount(self):
with self.app_context():
order1 = OrderModel.find_by_id(1)
self.assertEqual(11527, order1.amount)
def test_description(self):
with self.app_context():
order1 = OrderModel.find_by_id(1)
self.assertListEqual(
["3 x test_name1", "2 x test_name"], order1.description
)
| true |
7340ef155e1f3cf5fd407faf392fb3229704ffee | Python | childe/leetcode | /add-two-numbers/solution.py | UTF-8 | 903 | 3.28125 | 3 | [] | no_license | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
head = ListNode(0)
node = head
plus = 0
while l1 and l2:
n = l1.val + l2.val + plus
node.next = ListNode(n % 10)
node = node.next
plus = n // 10
l1 = l1.next
l2 = l2.next
if l1 is None:
l = l2
else:
l = l1
while l:
n = l.val + plus
node.next = ListNode(n % 10)
node = node.next
plus = n // 10
l = l.next
if plus:
node.next = ListNode(1)
return head.next
| true |
835d5c49ddd4f2ef17a5307595288aad47109b91 | Python | huy-hng/Sandbox | /python/tests/huys_logging_test.py | UTF-8 | 643 | 2.6875 | 3 | [] | no_license | import __init__
from huys_python.templates.logging import configure_loggers, get_logger
configure_loggers('debug')
def basic_decorator(function):
logger = get_logger('decorator')
def wrapper(*args, **kwargs):
# do decorator stuff
result = function(*args, **kwargs)
logger.debug(function.__name__)
# do more decorator stuff
return result
return wrapper
@basic_decorator
def some_function():
logger = get_logger('some_function')
logger.debug('doing some function')
some_function()
configure_loggers('info')
logger = get_logger('test')
logger.debug('debug')
logger.info('info')
| true |
e79e6655aa9445578e9c96cf48060026cd56a24f | Python | PaulTomchik/MillionSongDataset_ETL | /etl-docker/scripts/JSON_to_CSV.py | UTF-8 | 990 | 2.828125 | 3 | [] | no_license | #!/usr/bin/env python
import os
import csv
import json
# Based on code found here: http://stackoverflow.com/a/1872081
# For running on the host.
PATH_TO_DIR = '../../data/projections/data_A/'
# For using the docker container.
# PATH_TO_DIR = '/data/projections/data_A'
def convert_JSON_to_CSV ():
fields = [
'artist_hotttnesss',
'title',
'energy',
'loudness',
'tempo',
'artist_name',
'track_id',
'key',
'year',
'song_hotttnesss'
]
inFilePath = os.path.join(PATH_TO_DIR, 'data_A.json')
outFilePath = os.path.join(PATH_TO_DIR, 'data_A.csv')
csvWriter = csv.writer(open(outFilePath, 'w'))
csvWriter.writerow(fields)
with open(inFilePath) as inFile:
for jsonRow in inFile:
data = json.loads(jsonRow)
csvWriter.writerow([unicode(data[field]).encode("utf-8") for field in fields])
if __name__ == '__main__':
convert_JSON_to_CSV()
| true |
0009f9213067c1a3db47813851e5fd9322fac9b1 | Python | sohils/Catadioptric-Object-Detection | /hog_omni.py | UTF-8 | 2,292 | 2.703125 | 3 | [] | no_license | import numpy as np
import cv2
from matplotlib import pyplot as plt
from scipy.interpolate import RectBivariateSpline
# def get_omni_gradient():
# # Calculate gradient
# gx = cv2.Sobel(im, cv2.CV_32F, 1, 0, ksize=1)
# gy = cv2.Sobel(im, cv2.CV_32F, 0, 1, ksize=1)
# mag, angle = cv2.cartToPolar(gx, gy, angleInDegrees=True)
# im_shape = im.shape
# centre = [i/2 for i in im_shape]
# riemannian_inv = np.zeros(im.shape)
# x = np.arange(im_shape[0])
# y = np.arange(im_shape[1])
# xv, yv = np.meshgrid(y, x)
# xv = xv - centre[1]
# yv = centre[0] - yv
# print(xv[0], yv[0])
# riemannian_inv = np.square((np.square(xv) + np.sqaure(yv) + 4))/16
def extract_omni_window(im,r1,r2,th1,th2):
x_range = np.arange(0,im.shape[0])
y_range = np.arange(0,im.shape[1])
im_spline_r = RectBivariateSpline(x_range, y_range, im[:,:,0])
im_spline_g = RectBivariateSpline(x_range, y_range, im[:,:,1])
im_spline_b = RectBivariateSpline(x_range, y_range, im[:,:,2])
im_rect = np.zeros((int(np.floor(np.abs(r1-r2))),5*int(np.floor(np.abs((r2-r1)*(th2-th1)/2)))))
omni_indices_i = np.linspace(r2, r1, im_rect.shape[0])
omni_indices_j = np.linspace(th2, th1, im_rect.shape[1])
cos_omni_indices_j = np.cos(omni_indices_j)
sin_omni_indices_j = np.sin(omni_indices_j)
x_i_r = np.repeat(omni_indices_i, im_rect.shape[1])
x_i = im.shape[0]/2 - x_i_r*(np.tile(sin_omni_indices_j, im_rect.shape[0]).flatten())
y_i = im.shape[1]/2 + x_i_r*(np.tile(cos_omni_indices_j, im_rect.shape[0]).flatten())
image = im_spline_r.ev(x_i,y_i).reshape(im_rect.shape)
image=np.dstack((image,im_spline_g.ev(x_i,y_i).reshape(im_rect.shape)))
image=np.dstack((image,im_spline_b.ev(x_i,y_i).reshape(im_rect.shape)))
print(image.shape)
plt.imshow(image)
plt.show()
# cv2.imshow("rect", im_rect)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
return image
if __name__ == "__main__":
# Read image
im = cv2.imread('../data/cctag/19_59_51_356.png')
# im = cv2.imread('color-wheel.png')
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
print(im.shape)
im = np.float32(im) / 255.0
# im = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
extract_omni_window(im,350,400,0,0.5) | true |
e8a060ccc0c851b0168da47a74e0685d967ca8a0 | Python | gobboo/MCsniperBOT | /database/users.py | UTF-8 | 2,632 | 2.640625 | 3 | [] | no_license | from ast import literal_eval as make_tuple
from database.postgres_handler import execute_sql
from database.postgres_handler import query_sql
"""
Generic / Reusable Queries
"""
async def increment_column(
table: str, column: str, amount: int, condition: str
) -> None:
execute_sql(f"UPDATE {table} SET {column}={column} + {amount} {condition}")
"""
Levelling System Queries
"""
async def user_exists(user_id: int) -> bool:
if query_sql(f"SELECT * FROM users WHERE user_id={user_id}") is None:
return False
return True
async def create_user(user_id: int, username: str):
execute_sql(f"INSERT INTO users VALUES({user_id}, '{username}', 0, 0)")
async def get_xp(user_id: int, username: str) -> int:
if not await user_exists(user_id):
await create_user(user_id, username)
return query_sql(f"SELECT experience FROM users WHERE user_id={user_id};")[0]
async def get_user_count() -> int:
"""
Get number of users in database
"""
return query_sql("SELECT COUNT (*) FROM USERS")[0]
class ThisShouldntHappen(Exception):
pass
async def get_user_rank(user_id: int) -> (int, int):
"""
Get user rank in db
e.g. 100 would be they have the 100th highest xp in the db
"""
if not await user_exists(user_id):
await create_user(user_id)
total_count = await get_user_count()
leaderboard_query = query_sql(
"SELECT user_id FROM users ORDER BY experience DESC", False
)
leaderboard = [row[0] for row in leaderboard_query]
position = leaderboard.index(user_id) + 1
return position, total_count
async def get_lb():
lb_query = query_sql(
"SELECT user_id, experience FROM users ORDER BY experience DESC", False
)
return lb_query
"""
Captcha Queries
"""
async def get_captcha_data(user_id: int) -> list:
return query_sql(
f"SELECT user_id, captcha, attempts FROM captcha_users WHERE user_id={user_id}"
)
async def require_captcha(user_id: int) -> bool:
if query_sql(f"SELECT * FROM captcha_users WHERE user_id={user_id}") is None:
return False
return True
async def set_captcha(user_id: int, captcha: str) -> None:
if await get_captcha_data(user_id) is not None:
return execute_sql(
f"UPDATE captcha_users SET captcha='{captcha}', attempts=0 WHERE user_id={user_id}"
)
return execute_sql(
f"INSERT INTO captcha_users (user_id, captcha, attempts) VALUES ({user_id}, '{captcha}', 0)"
)
async def captcha_completed(user_id: int) -> None:
execute_sql(f"DELETE FROM captcha_users WHERE user_id={user_id}")
| true |
805e80d07f2eae3c33160d7d95e6d674797a54a1 | Python | YiZheWangTw/VPythonTutorial | /03.等速度直線運動/3_1DMotion.py | UTF-8 | 1,292 | 3.65625 | 4 | [] | no_license | """
VPython教學: 3.等速度直線運動
Ver. 1: 2018/2/18
Ver. 2: 2019/9/5
作者: 王一哲
"""
from vpython import *
"""
1. 參數設定, 設定變數及初始值
"""
size = 0.1 # 木塊邊長
L = 1 # 地板長度
v = 0.03 # 木塊速度
t = 0 # 時間
dt = 0.01 # 時間間隔
"""
2. 畫面設定
"""
scene = canvas(title="1D Motion", width=800, height=600, x=0, y=0, center=vec(0, 0.1, 0), background=vec(0, 0.6, 0.6))
floor = box(pos=vec(0, 0, 0), size=vec(L, 0.1*size, 0.5*L), color=color.blue)
cube = box(pos=vec(-0.5*L + 0.5*size, 0.55*size, 0), size=vec(size, size, size), color=color.red, v=vec(v, 0, 0))
#cube = box(pos=vec(-0.5*L + 0.5*size, 0.55*size, 0), length=size, height=size, width=size, color=color.red, v=vec(v, 0, 0))
gd = graph(title="x-t plot", width=600, height=450, x=0, y=600, xtitle="t(s)", ytitle="x(m)")
gd2 = graph(title="v-t plot", width=600, height=450, x=0, y=1050, xtitle="t(s)", ytitle="v(m/s)")
xt = gcurve(graph=gd, color=color.red)
vt = gcurve(graph=gd2, color=color.red)
"""
3. 物體運動部分, 木塊到達地板邊緣時停止執行
"""
while(cube.pos.x <= 0.5*L- 0.5*size):
rate(1000)
cube.pos.x += v*dt
xt.plot(pos = (t, cube.pos.x))
vt.plot(pos = (t, cube.v.x))
t += dt
print("t = ", t)
| true |
c11000c8c130334f6c5153ba9b5d866f4a109ce9 | Python | gmaze/argodmqc_owc | /ow_calibration/change_dates/cal2dec/cal2dec_test.py | UTF-8 | 2,236 | 3.390625 | 3 | [] | no_license | """
-----cal2dec Test File-----
Written by: Edward Small
When: 26/09/2019
Contains unit tests to check functionality of the `cal2dec` function
To run this test specifically, look at the documentation at:
https://gitlab.noc.soton.ac.uk/edsmall/bodc-dmqc-python
"""
import unittest
from ow_calibration.change_dates.cal2dec.cal2dec import cal2dec
class Cal2decTestCase(unittest.TestCase):
"""
Test cases for cal2dec function
"""
def test_returns_float(self):
"""
Check return type is a float
:return: Nothing
"""
print("Testing return type is a float...")
date = cal2dec(0, 1)
self.assertTrue(isinstance(date, float), "cal2dec should return a float")
#PUT RETURN HERE
def test_throws_error_if_month_too_large(self):
"""
Check that an error is thrown if the month exceeds 12
:return: Nothing
"""
print("Testing exception is thrown if month is out of scope...")
with self.assertRaises(Exception) as month_out_of_scope:
cal2dec(13, 1, 0, 0)
self.assertTrue('Month is out of scope' in str(month_out_of_scope.exception))
def test_returns_0_for_first_day(self):
"""
Check that the first possible day is 0
:return: Nothing
"""
print("Testing that day 1 returns 0...")
self.assertEqual(cal2dec(0, 1, 0, 0), 0, "Should return 0 on first day")
def test_returns_365_for_last_day(self):
"""
Check that the last hour in the last day of the last month return 365
:return: Nothing
"""
print("Testing that the 24th hour on the last day returns 365")
self.assertEqual(cal2dec(11, 31, 24, 0), 365, "last value should be 365")
def test_no_day_larger_than_366(self):
"""
Check for error if day is larger than 366 (leap year)
:return: Nothing
"""
print("Testing that an error is thrown if a date exceeds 365")
with self.assertRaises(Exception) as date_out_of_scope:
cal2dec(11, 31, 300, 300)
self.assertTrue('Day is out of scope of the year' in str(date_out_of_scope.exception))
if __name__ == '__main__':
unittest.main()
| true |
abd36f4c899df2253c2c005d0b07894d8344e61d | Python | 1zjz/Beam-Deflection-Visualiser | /Beam-Deflection-Visualiser-master/choice4.py | UTF-8 | 2,490 | 3.109375 | 3 | [] | no_license | #--------------------------Initialization--------------------------------------------
import pandas as pd
import numpy as np
import os
from display_menu import *
#defining menu options to display
save_menu_options= np.array(["Change Directory",
"Stay in the current directory",
"Display the files in current directory",
"Quit"])
def load(load_forces,load_positions,beam_length,beam_type):
#the load function loads a file into the program. It splits the matrix of a saved file up and assign the certain values to
#load_forces,load_positions,beam_length and beam_type.
#Input: User input
path=os.getcwd()
while True:
#displaying directory
print("Now you are in this directory; \n ",os.getcwd(),"\n")
#displaying menu
button=display_menu(save_menu_options)
if button==1:
#redefining path
path=input("Type the file location you want to load the file from\n>>")
elif button==2:
#staying with this directory
path=os.getcwd()
elif button==3:
#printing content of current directory
print(os.listdir(path) )
continue
elif button==4:
#While loop is broken with QUIT option
break
while True:
try:
print("Q . Quit")
filename=input("What is the name of the file?\n>>")
if filename=="Q"or filename=="q":break #implementing quit option to go one submenu back
#reading the file in and storing in variable df
df = pd.read_csv(path+"/"+filename, header=None)
#assigning values
beam_length=df[0][1]
beam_type=df[0][0]
load_forces=df.loc[0,:]
load_positions=df.loc[1,:]
load_forces=load_forces.loc[1:][:]
load_positions=load_positions.loc[1:][:]
load_forces = np.array([float(x) for x in load_forces])
load_positions = np.array([float(x) for x in load_positions])
#if for some reason the beam support type is not equal 1 or 2 we set it to both ends by default
if beam_type!=1 or beam_type!=2:
beam_type=1
#error message
except:
print(filename+" file is not found please make sure the followings;\n* Make sure you are in the correct directory\n"+
"* Make sure you type the correct extension(.csv)\n")
continue
break
break
#return values
return load_forces,load_positions, beam_length, beam_type
| true |