blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
550c220d04990e4d36926a807bd6c5c623580cf6
|
Python
|
JasmineLf/ProjectMePy
|
/venv/Code/20200422Item/Students.py
|
UTF-8
| 10,074
| 3.609375
| 4
|
[] |
no_license
|
'''
时间:2020-4-22 23:13:49
作者:LF
邮箱:jksj27250#gmail.com
主题:
学生信息管理系统
内容:
1.学生信息维护
1.1.录入学生信息
1.2.删除学生信息
1.3.修改学生信息
2.查询/统计
2.1.按学生姓名查找
2.2.按学生ID查找
2.3.查询并显示所有学生信息
2.4.统计学生总人数
3.排序
3.1升序排序
3.2降序排序
'''
# -*- coding: UTF-8 -*-
import os,re,sys
#显示菜单函数
def menu():
#输出菜单
print(""
"$###################学生信息管理系统####################$\n"
"$ $\n"
"$ 功 能 菜 单 $\n"
"$ 1.录入学生信息 $\n"
"$ 2.查找学生信息 $\n"
"$ 3.删除学生信息 $\n"
"$ 4.修改学生信息 $\n"
"$ 5.排序 $\n"
"$ 6.统计学生总人数 $\n"
"$ 7.显示所有学生信息 $\n"
"$ 0.退出系统 $\n"
"$ $\n"
"$######################################################$"
"")
#保存学生信息到文件
def save(student):
try:
student_txt = open("student_information.txt",'a') #以追加的方式打开
except Exception as e:
student_txt = open("student_information.txt",'W') #文件不存在,创建文件并打开
for info in student:
student_txt.write(str(info)+'\n') #按行存储,添加换行符
student_txt.close() #关闭文件
#录入学生信息函数
def insert():
print("开始录入学生信息......\n")
studentList = [] #保存学生信息列表
mark = True
while mark:
id = input("请输入学生学号ID(如1001):")
if not id :
break
name = input("请输入学生姓名:(如赵海棠):")
if not name:
break
try:
english = int(input("请输入英语成绩:(如100)"))
python = int(input("请输入python成绩:(如100)"))
C = int(input("请输入C语言成绩:(如100"))
except:
print("输入无效,不是整数数值......重新录入信息\n")
continue
#将学生成绩保存到字典
stdent = {"ID":id,"name":name,"english":english,"python":python,"C":C}
studentList.append(stdent) #将学生字典添加到列表
inputMark = input("是否继续添加?(Y/N):")
if inputMark == 'Y':
mark = True
else:
mark = False
save(studentList)
print("学生信息录入完毕!!!\n")
#将保存在列表中的学生信息显示出来
def show_student(studentList):
if not studentList: #如果没有要显示的数据
print("(^@_@^) 无数据信息 (^@_@^)\n")
return
#定义标题显示格式
format_title = "{:^6}{:^12}\t{:^10}\t{:^10}\t{:^10}\t{:^12}"
print(format_title.format("ID","姓名","英语成绩","Python成绩",
"C语言成绩","总成绩"))
#定义具体内容显示格式
format_data = "{:^6}{:^14}\t{:^12}\t{:^12}\t{:^12}\t{:^14}"
for info in studentList: #通过for循环将列表中的数据全部显示出来
print(format_data.format(str(info.get("ID")),
info.get("name"),str(info.get("english")),
str(info.get("python")),str(info.get("C")),
str(info.get("english")+info.get("python")+
info.get("C")).center(12)))
#查找学生信息函数
def search():
print("开始查找学生信息......\n")
#删除学生信息函数
def delete():
print("开始删除学生信息......\n")
mark = True
while mark:
studentId = input("请输入需删除学生ID:")
if studentId != "": #判断输入要删除的学生是否存在
if os.path.exists("student_information.txt"): #判断文件是否存在
with open("student_information.txt",'r') as rfile: #打开文件
student_old = rfile.readlines() #读取全部内容
else:
student_old = []
ifdel = False
if student_old:
with open("student_information.txt","w") as wfile:
d = {}
for list in student_old:
d = dict(eval(list))
if d['ID'] != studentId:
wfile.write(str(d)+"\n")
else:
ifdel = True
if ifdel:
print("ID 为 %s 的学生信息已经删除......"%studentId)
else:
print("没有找到ID为%s 的学生信息......"%studentId)
else:
print("无学生信息......")
break
show()
inputMark = input("是否继续删除?(Y/N):")
if inputMark == 'Y':
mark = True
elif inputMark == 'y':
mark = True
else:
mark = False
print("删除学生信息结束!!!")
#修改学生信息函数
def modify():
print("开始修改学生信息\n")
show() #显示全部学生信息
if os.path.exists("student_information.txt"): #判断文件是否存在
with open("student_information.txt","r") as rfile: #打开文件
student_old = rfile.readlines() #读取全部内容
else:
return
studentid = input("请输入要修改的学生ID:(如1001)")
with open("student_information.txt","w") as wfile:
for student in student_old:
d = dict(eval(student))
if d["ID"] == studentid:
print("已经找到该学生,可以修改其信息!")
while True: #输入要修改的信息
try:
d["name"] = input("请输入姓名:")
d["english"] = input("请输入英语成绩:")
d["python"] = input("请输入python成绩:")
d["C"] = input("请输入C语言成绩:")
except:
print("输入有误,重写输入!!!\n")
else:
break #跳出循环
student = str(d) #将字典转化为字符串
wfile.write(student + '\n') #将修改信息写入文件
print("修改成功!!!")
else:
wfile.write(student) #将未修改信息写入文件
mark = input("是否继续修改其他学生信息?(Y/N)\n")
if mark == 'Y':
modify()
elif mark == 'y':
modify() #重新执行修改操作
else:
show() #显示修改后所有学生信息
#排序
def sort():
print("排序\n")
#统计学生总数
def total():
print("统计学生总数\n")
#显示所有学生信息
def show():
print("开始显示所有学生信息\n")
student_new = []
if os.path.exists("student_information.txt"):
with open("student_information.txt",'r') as rfile:
student_old = rfile.readlines() #读取全部内瓤
for list in student_old:
student_new.append(eval(list)) #将找到的学生信息保存到列表中
if student_new:
show_student(student_new)
else:
print("占未保存学生信息\n")
print("已录入学生信息答应完成!!!")
#主函数
def main():
ctrl = True #标记是否退出系统
while(ctrl):
menu() #显示菜单
option = input("请选择:") #选择菜单项
option_str = re.sub("\D","",option)
if option_str in ['0','1','2','3','4','5','6','7']:
option_int = int(option_str)
if option_int == 0: #退出系统
print("您已经退出学生信息系统!\n")
ctrl = False
elif option_int == 1: #录入学生信息
insert()
elif option_int == 2: #查找学生成绩信息
search()
elif option_int == 3: #删除学生成绩信息
delete()
elif option_int == 4: #修改学生成绩信息
modify()
elif option_int == 5: #排序
sort()
elif option_int == 6: #统计学生总数
total()
elif option_int == 7: #显示所有学生信息
show()
if __name__ == '__main__':
main()
| true
|
8e21b17a7d7e00ca82552d3057d5921382ae7591
|
Python
|
RepnikovPavel/kursach_filtr_of_signal
|
/EXTRACT_AND_SHOW_DATA/load_saples_from_file.py
|
UTF-8
| 1,315
| 2.6875
| 3
|
[] |
no_license
|
import os
import numpy as np
# self.samples, self.labels = load_all_samples_and_labels(data_path, metki_path)
def load_all_samples_and_labels(data_path, metki_path): #так как данных мало, то никаких batches и онлайн подгрузки данных не будет. просто грузим все разом
#все выполняется только если что-то записано:
# if os.stat(data_path).st_size != 0 and os.stat(metki_path).st_size != 0:
#читаем id
file_with_id = open(data_path + "\\unic_tmp_id.txt", "r" )
id= file_with_id.read()
file_with_id.close()
tmp_int_id = int(id)
number_of_samples = tmp_int_id #в штуках
tmp_list= []
for i in range(number_of_samples):
tmp_list.append(np.load(data_path + "\\sample_"+str(i)+".npy"))
samples= np.array(tmp_list)
#теперь грузим метки:
list_with_labels = []
for i in range(number_of_samples):
tmp_file_with_metka = open(metki_path + "\\metka_" + str(i) +".txt","r")
tmp_metka = tmp_file_with_metka.read()
list_with_labels.append(tmp_metka)
tmp_file_with_metka.close()
return samples , list_with_labels
| true
|
3b7d206d3ea38fa0e080bbb4a44088262c4c0603
|
Python
|
madaan/minimal-text-diffusion
|
/src/utils/show_sampling_progress.py
|
UTF-8
| 1,638
| 4.125
| 4
|
[
"MIT"
] |
permissive
|
from typing import List
list_of_colors_from_red_to_blue = [f"\033[38;2;{r};0;{b}m" for r, b in zip(range(255, 0, -10), range(0, 255, 10))]
def pprint_sentences(sentences: List[str], banner: str = "", sep: str = ""):
"""
Given a list of sentences, prints them with a gradient of colors from red to blue
"""
print()
print(f"\033[1m{'=' * 20} {banner} {'=' * 20}\033[0m")
for i, sentence in enumerate(sentences):
sentence_color = list_of_colors_from_red_to_blue[i]
if i == len(sentences) - 1:
print(f"\033[38;5;{sentence_color}{sentence}\033[0m")
else:
print(f"\033[38;5;{sentence_color}{sentence}\033[0m", end=sep)
print()
if __name__ == '__main__':
sentences = [
"This is a sentence",
"This is another sentence",
"This is a third sentence",
"This is a fourth sentence",
"This is a fifth sentence",
"This is a sixth sentence",
"This is a seventh sentence",
"This is an eighth sentence",
"This is a ninth sentence",
"This is a tenth sentence",
"This is an eleventh sentence",
"This is a twelfth sentence",
"This is a thirteenth sentence",
"This is a fourteenth sentence",
"This is a fifteenth sentence",
"This is a sixteenth sentence",
"This is a seventeenth sentence",
"This is an eighteenth sentence",
"This is a nineteenth sentence",
"This is a twentieth sentence",
]
for i in range(1, len(sentences) + 1):
pprint_sentences(sentences[:i], sep= " -> ")
print("---")
| true
|
f6bb0b93c1debdf22edb87b6000be8f94b89a656
|
Python
|
wbogatyrewa/checkstudent
|
/app.py
|
UTF-8
| 16,463
| 2.5625
| 3
|
[] |
no_license
|
import dash
import dash_html_components as html
import pandas as pd
import base64
from datetime import datetime as dt
import dash_core_components as dcc
from dash.dependencies import Input, Output, State
import random
import re
from flask_sqlalchemy import SQLAlchemy
from flask import Flask
""" Запуск приложения """
server = Flask(__name__)
app = dash.Dash(__name__, suppress_callback_exceptions=True)
""" Подключение к базе данных """
con = server.config[
'SQLALCHEMY_DATABASE_URI'] = 'mysql://c19441_checkoutheadergang_na4u_r:RiZyiVodxivon88@localhost/c19441_checkoutheadergang_na4u_r?charset=utf8'
db = SQLAlchemy(server)
""" Выводится страница сайта """
app.layout = html.Div([
dcc.Location(id='url', refresh=False),
html.Div(id='page-content')
])
""" Собираем данные, которые отправляются в базу """
sent_data = {
"id_stud": '',
"name": '',
"groups": '',
"photo": ''
}
def encode_image(image_file):
""" Энкодим фотографии, чтобы выводить их в веб-приложение.
Функция возвращает энкод фото"""
encoded = base64.b64encode(open(image_file, 'rb').read())
return 'data:image/png;base64,{}'.format(encoded.decode())
def students(con):
""" Получаем таблицу Students из базы данных.
Преобразуем ее к DataFrame.
Функция возвращает DataFrame таблицы """
Students = pd.read_sql('SELECT * FROM Students', con)
dict_students = Students.to_dict('records')
pd.DataFrame(dict_students)
return dict_students
def log(con):
""" Получаем таблицу Log из базы данных.
Преобразуем ее к DataFrame
Функция возвращает DataFrame таблицы"""
Log = pd.read_sql('SELECT * FROM Log', con)
dict_log = Log.to_dict('records')
pd.DataFrame(dict_log)
return dict_log
def timeLessons():
""" Время занятий """
return ['8:00 - 9:35', '9:45 - 11:20',
'11:30 - 13:05', '13:35 - 15:10',
'15:20 - 16:55', '17:05 - 18:40']
def checkStudent(photo):
""" Возвращается статус студента на занятии.
Если в базе нет фотографии (т.е. длина фото == 0) студента на занятии, то возвращяем 'Отсутствовал'.
Если есть фотография студента на занятии( т.е. длина фото != 0), то возвращаем 'Присутствовал' """
if len(photo) == 0:
return 'Отсутствовал'
else:
return 'Присутствовал'
def create_id():
""" Рандомно генерируем id нового студента.
Нужно сделать неповторяющее id.
А еще нужно обновлять id при каждом заходе в 'Паспорт студента' """
id = random.randint(1000000, 99999999)
return id
""" Главная страница сайта, выводит результаты учета посещаемости в таблицу,
в которой отражается статус студента – его присутствие или отсутствие.
На главной странице сайта есть возможность выбрать дату и время лекции или семинара. """
main_page_layout = html.Div([
html.H1('Учет посещаемости', style={'textAlign': 'center', 'color': '#373a3c'}),
html.H3('Комната: 103/3г', style={'color': '#373a3c'}),
html.Div([
# Календарь с возможностью выбора даты занятия
dcc.DatePickerSingle(
id='my-date-picker-single',
min_date_allowed=dt(2020, 1, 1),
max_date_allowed=dt(2020, 12, 31),
initial_visible_month=dt(2020, 5, 22),
placeholder='Выберите дату занятия',
date=dt(2020, 5, 22)
),
html.Div(id='output-container-date-picker-single'),
html.Br(),
# Выпадающий список, в котором можно выбрать время занятия
dcc.Dropdown(style={'width': '200px'},
options=[{'label': j, 'value': j} for j in timeLessons()],
placeholder='Выберите время занятия'),
]),
html.Br(),
# Ссылка для перехода к log
dcc.Link('Лог занятия', href='/log'),
html.Br(),
# Вывод результатов учета посещаемости в таблице
html.Table(style={'table-layout': 'fixed',
'width': '100%',
'text-align': 'left',
'color': '#373a3c',
'font-family': 'sans-serif'},
children=[
html.Div([html.Tr([
html.Th(style={'width': '400px', 'padding': '20px', 'border-collapse': 'collapse',
'color': 'white', 'background-color': '#373a3c'},
children=j) for j in ['ФИО', 'Фото', 'Статус']
]),
]),
html.Div([html.Tr([
html.Td(style={'width': '400px', 'padding': '20px',
'border-bottom': '2px solid #dee2e6', 'border-collapse': 'collapse'},
children=students(con)[i]['name']),
html.Td(style={'width': '400px', 'padding': '20px',
'border-bottom': '2px solid #dee2e6', 'border-collapse': 'collapse'},
children=html.Img(src=encode_image('../' + students(con)[i]['photo']), height=100,
width=125)),
html.Td(style={'width': '400px', 'padding': '20px',
'border-bottom': '2px solid #dee2e6', 'border-collapse': 'collapse'},
children=checkStudent(log(con)[i]['photo']))
]) for i in range(len(students(con)))
])
])
])
@app.callback(
Output('output-container-date-picker-single', 'children'),
[Input('my-date-picker-single', 'date')])
def update_output(date):
""" Возвращает дату, выбранную пользователем через календарь """
if date is not None:
date = dt.strptime(re.split('T| ', date)[0], '%Y-%m-%d')
date_string = date.strftime('%B %d, %Y')
return ' '
def passport(id):
""" Страница 'Паспорт студента', которая позволяет добавлять данные студента и его эталонную фотографию. """
# Группы студентов, доступные для выбора
groups = ['КЭ-101', 'КЭ-102', 'КЭ-103',
'КЭ-201', 'КЭ-202', 'КЭ-203',
'КЭ-301', 'КЭ-302', 'КЭ-303',
'КЭ-401', 'КЭ-402', 'КЭ-403']
# отправляем id в словарь, который отправляется в базу
sent_data["id_stud"] = id
return html.Div(children=[
html.Div(id="zatemnenie", children=[
html.A(className='close', href='http://checkoutheadergang.na4u.ru/log'),
html.Div(id="okno", children=[
html.H1("Паспорт студента"),
html.Div(style={'textAlign': 'left'},
children=[
# Выводим id, которое присваивается конкретному студенту
html.P('id: %s' % id),
# Поле для ввода фамилии, имени и отчества студента
html.Div([html.P('ФИО: '),
html.Div([dcc.Input(id='input_name', type='text', size='65',
placeholder='Введите ФИО', name='name')]),
html.P(id='output_name')]),
# Выпадающий список групп студентов
html.Div([
html.P('Группа: '),
dcc.Dropdown(
id='dropdown_group',
options=[{'label': j, 'value': j} for j in groups],
placeholder='Выберете группу',
),
html.P(id='dropdown_group_out')
]),
# Кнопка загрузки эталонной фотографии студента
html.P('Фото: '),
dcc.Upload(id='upload-image',
children=html.Button('Выбрать фото'),
multiple=True),
html.Div(id='output-image-upload')
]),
# По кнопке 'Сохранить' отправляем введенные данные в базу
html.Button(id="save-button", children="Сохранить", type='submit'),
html.Div(id='press-save-state'),
])
]),
# Кнопка перехода на страницу 'Паспорт студента'
html.A(href='#zatemnenie', className='open', children='Добавить студента')
])
""" В Логе занятия находятся данные о студенте: его id, имя, группа, время фото и фото, полученное на занятии. """
log_page_layout = html.Div([
html.H1('Лог занятия', style={'textAlign': 'center', 'color': '#373a3c'}),
# Выводим дату и время занятия
html.H2(style={'textAlign': 'center', 'color': '#373a3c'},
children=log(con)[0]['time'][:11]),
# Вызов функции со страницей 'Паспорт студента'
html.Div(passport(create_id())),
# Таблица с данными студентов на занятии (если студент отсутствовал, но не может находиться в этой таблице)
html.Table(style={'table-layout': 'fixed',
'width': '100%',
'text-align': 'left',
'color': '#373a3c',
'font-family': 'sans-serif'},
children=[
html.Div(style={},
children=[
html.Tr([
html.Th(style={'width': '300px', 'padding': '20px', 'border-collapse': 'collapse',
'color': 'white', 'background-color': '#373a3c'},
children=j) for j in ['Время кадра', 'Фото', 'id', 'ФИО', 'Группа']
]),
]),
html.Div([
html.Tr([
html.Td(style={'width': '300px', 'padding': '20px',
'border-bottom': '2px solid #dee2e6', 'border-collapse': 'collapse', },
children=log(con)[i]['time'][11:]),
html.Td(style={'width': '300px', 'padding': '20px',
'border-bottom': '2px solid #dee2e6', 'border-collapse': 'collapse'},
children=
html.Img(src=encode_image('../' + log(con)[i]['photo']), height=100, width=125)
),
html.Td(style={'width': '300px', 'padding': '20px',
'border-bottom': '2px solid #dee2e6', 'border-collapse': 'collapse'},
children=log(con)[i]['id_stud']),
html.Td(style={'width': '300px', 'padding': '20px',
'border-bottom': '2px solid #dee2e6', 'border-collapse': 'collapse'},
children=students(con)[i]['name']),
html.Td(style={'width': '300px', 'padding': '20px',
'border-bottom': '2px solid #dee2e6', 'border-collapse': 'collapse'},
children=students(con)[i]['groups'])
]) for i in range(len(students(con)))
])
])
])
def parse_contents(contents, filename):
""" Декодим загруженную пользователем фотографию.
Генерируем путь до фото и сохраняем его в наш словарь, а само фото загружаем на сервер """
edit_string = contents[23:]
edit_string.replace("\n", "")
imgdata = base64.b64decode(edit_string)
filename = '{}_true_image.jpg'.format(sent_data['id_stud'])
with open("../app/student/" + filename, 'wb') as f:
f.write(imgdata)
# Отправляем путь до фото в базу
sent_data["photo"] = "app/student/" + filename
return html.Div([
html.Img(src=contents, height=100, width=125),
html.P(filename)
])
@app.callback(
Output('output_name', 'children'),
[Input('input_name', 'value')]
)
def input_fio(input_name):
""" Введенные пользователем ФИО сохраняем в наш словарь """
sent_data["name"] = input_name
return input_name
@app.callback(
Output('dropdown_group_out', 'children'),
[Input('dropdown_group', 'value')]
)
def update_dropdown(group):
""" Выбранная пользователем группа сохраняется в наш словарь """
sent_data["groups"] = group
return group
@app.callback(
Output('output-image-upload', 'children'),
[Input('upload-image', 'contents')],
[State('upload-image', 'filename')]
)
def update_output(list_of_contents, list_of_names):
""" Загружается пользователем фотография """
if list_of_contents is not None:
photo = [parse_contents(c, n) for c, n in zip(list_of_contents, list_of_names)]
return photo
@app.callback(
Output('press-save-state', 'children'),
[Input('save-button', 'n_clicks')]
)
def sent_data_of_student(n_clicks):
""" Отправляем собранные данные в базу """
if n_clicks:
# еще раз подключимся к базе данных
con = server.config[
'SQLALCHEMY_DATABASE_URI'] = 'mysql://c19441_checkoutheadergang_na4u_r:RiZyiVodxivon88@localhost/c19441_checkoutheadergang_na4u_r?charset=utf8'
sentData_to_df = pd.DataFrame([sent_data])
sentData_to_df.to_sql(name="Students", con=con, if_exists='append', index=False)
return 'Сохранено'
@app.callback(Output('page-content', 'children'),
[Input('url', 'pathname')])
def display_page(pathname):
""" Если ссылка == '/log', то запускаем страницу 'Лог занятия'.
Если ссылка другая, то запускаем главную страницу приложения """
if pathname == '/log':
return log_page_layout
else:
return main_page_layout
if __name__ == '__main__':
app.run_server(debug=True)
| true
|
681a7efb7977f43885a058a44e8519b8e9c13d64
|
Python
|
toilatester/sample-automation-frameworks-across-languages
|
/python/GUI/POM/KnowledgePage/KnowledgeValidation.py
|
UTF-8
| 2,092
| 2.65625
| 3
|
[
"Apache-2.0"
] |
permissive
|
from typing import List
from Core.Base.BaseValidation import BaseValidation
from Core.DatabaseFactory.DatabaseType import DatabaseType
from Model.Bot import BotDataModel
from Model.QnA import QnADataModel
from Utils.DatabaseUtils import DatabaseHelper
from .KnowledgePage import KnowledgePage
class KnowledgeValidation(BaseValidation):
def __init__(self):
BaseValidation.__init__(self)
self.__db_helper = DatabaseHelper(DatabaseType.MONGO_DB).database_query
self.__knowledge = KnowledgePage()
self.__bot = BotDataModel()
self.__qna = QnADataModel()
def should_added_faq_url_successfully(self, actual_url: List['str'], expect_url: str):
self.assertion.should_contain_in_list(actual_url, expect_url, "Has error in add FAQ url")
def should_added_correctly_url_data(self, bot_name, actual_data):
bots: List['BotDataModel.BotInformation'] = self.__bot.get_bots_via_bot_name(bot_name)
list_qna: List[QnADataModel.QnAInformation] = self.__qna.get_qna_via_bot_id(bots[0].bot_id)
expected_data = []
for qna in list_qna:
expected_data.append({"questions": [qna.qna_question], "answer": qna.qna_answer})
self.assertion.should_be_equal(expected_data, actual_data,
"Has difference in faq data \nExpected: {} \nActual: {} ".format(expected_data,
actual_data))
def should_create_question_pair_table_with_empty_data(self):
# It seem to get the first table without check if it is Manual Q&A table
data_table = self.__knowledge.get_question_pair_data_in_gui()
print(f"Data table {data_table}")
is_has_data = lambda input_length: len(input_length) > 0
data_table = [data_row for data_row in data_table if
is_has_data(data_row["questions"])]
self.assertion.should_be_equal(len(data_table), 0, "Init new question pair with existing data")
| true
|
a632b1ccbe823893811b32dcb2411f82b67451e7
|
Python
|
Lmarcho/Research
|
/testlist.py
|
UTF-8
| 609
| 3.78125
| 4
|
[] |
no_license
|
child1 = {
"name" : "Emil",
"year" : 2004
}
child2 = {
"name" : "Tobias",
"year" : 2007
}
child3 = {
"name" : "Linus",
"year" : 2011
}
myfamily = {
"child1" : child1,
"child2" : child2,
"child3" : child3
}
# for i in range (1,len(myfamily)+1):
# print(i)
# x=myfamily.get("child"+str(i)).get("name")
# y=myfamily.get("child"+str(i)).get("year")
# print (x)
# print(y)
thislist = ["apple", "banana", "cherry", "orange", "kiwi", "melon", "mango"]
for x in range(1,len(thislist)):
for y in range(1,len(thislist)):
if x!=y:
print(thislist[x]+"---"+thislist[y])
| true
|
9d440e6d2d4458a98de53ea80eba4be855e8a42f
|
Python
|
swipswaps/grade.py
|
/gradepy/grade.py
|
UTF-8
| 14,315
| 3.140625
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""Object oriented grading of python modules.
Defines an abstract base class, Tester, to be used for grading
python modules. This module differs from standard testing
strategies in two major ways. First, it relies on a correct
implementation of the module, making the creation of test
cases very easy. Second, it is designed to identify graded
quality of performance, rather than a binary PASS/FAIL
judgement. It also provides Error Carried Forward functionality.
See show_grade.py for example usage.
"""
from __future__ import print_function
import traceback
import inspect
import imp
import os
import re
import string
import sys
import utils
class Check(object):
"""Provides an interface for testing with Tester.
Args:
expr (str): a python expression, the value of which will be verified.
note (str): a note to provide in the feedback if `expr` does not
evalueate to the same value under the student and master mdoules.
See show_grade.py for example usage. Key things to note are that side
effects occur just as if you executed expr in the calling scope, and that
exceptions raised while evaluating will be caught and recordede. Both expr
and note will be formatted with the calling scope's name space, thus they
can include {variable_name}s.
"""
def __init__(self, expr, note='', stdin=(), check=None, stdout_check=None):
# Yes, python allows us to access the local name space of the
# calling function (or module). This prevents us from requiring
# the user to supply locals() as an argument.
if not isinstance(expr, str):
raise TypeError('Check expr must be a string.')
self.env = inspect.stack()[1][0].f_locals
self.expr = literal_format(expr, **self.env)
self._check = check
self._stdout_check = stdout_check
# Set note.
if note:
self.note = '\n Note: ' + literal_format(note, **self.env)
else:
self.note = ''
# Write supplied stdin.
if isinstance(stdin, str):
sys.stdin.put(stdin)
else:
for x in stdin:
sys.stdin.put(x)
# Evaluate expr within env.
module_env = self.env['module'].__dict__
with utils.capture_stdout() as out:
try:
self.val = eval(self.expr, module_env, self.env)
except Exception as e:
self.val = StudentException(e, skip=4)
if out.captured:
self.stdout = '----begin stdout----\n' + out.captured + '\n-----end stdout-----'
else:
self.stdout = None
def check(self, student_val):
"""Returns True if student_val is correct."""
master = self # This function should only ever be called as master.check()
if not self.env['module'].__name__.startswith('master'):
raise TestError('Attempted to call check() from the student Check.')
if self._check:
# Use the check function provided, which may be
# more lenient than 100% match to master.val
return self._check(master.val, student_val)
else:
return master.val == student_val and type(master.val) == type(student_val)
def stdout_check(self, student_stdout):
"""Returns True if student stdout is correct."""
master = self
if not self.env['module'].__name__.startswith('master'):
raise TestError('Attempted to call stdout_check() from the student Check.')
if self._stdout_check:
return self._stdout_check(master.stdout, student_stdout)
else:
return master.stdout == student_stdout
class Tester(object):
"""A class for grading modules."""
def __init__(self, master_mod, points=0, note=None):
self.master_mod = master_mod
self._adjust_modules(master_mod)
self.log_correct = False
self.setup_func = None
self.test_funcs = []
self.points = points
self.note = note
self.stdin = FakeStdin()
sys.stdin = self.stdin
def __call__(self, student_file, log_func=print, func_re=None):
"""Runs the tests on one student submission."""
# This state is student specific, and is thus reset upon every call.
self.log = log_func
self.bad_funcs = set()
if self.setup_func:
self.setup_func(student_file)
self.student_mod, self.ecf_mod = self._get_modules(student_file)
self._adjust_modules(self.student_mod, self.ecf_mod)
# Banner.
self.log('\n\n' + '=' * 70)
self.log('Automated testing for ' + student_file)
self.log('=' * 70)
if self.note:
self.log('\n' + self.note + '\n')
if self.points:
self.log('Maximum points: {}'.format(self.points))
if func_re:
self.log("Filtering test functions by regex: '{}'".format(func_re.pattern))
tests = (f for f in self.test_funcs if func_re.search(f.__name__))
else:
tests= self.test_funcs
self._run_tests(tests)
def setup(self, every_time):
def decorator(setup_func):
def full_setup_func(student_file):
path = os.path.dirname(student_file)
file = os.path.join(path, '.gradepy')
# Check if setup has already been run.
if not every_time and os.path.exists(file):
return
# Run the setup function from test script.
setup_func(student_file)
# Mark the directory as having been setup.
with open(file, 'a+') as f:
f.write('DEBUG')
self.setup_func = full_setup_func
return decorator
def register(self, tests=[], depends=[], manual=False):
"""Decorator to mark a function as a test function of this Tester.
Optionally, specifies the student functions that the function
with the function names as strings."""
def decorator(test_func):
self.test_funcs.append(test_func)
setattr(test_func, 'tests', set(tests))
setattr(test_func, 'depends', set(depends))
setattr(test_func, 'manual', manual)
return decorator
def _get_modules(self, student_file):
"""Returns the student module and a copy for error carried forward."""
path = os.path.dirname(student_file)
mod_name = os.path.basename(student_file)[:-3]
sys.path.append(path)
try:
mod_junk = imp.find_module(mod_name, [path])
except ImportError as e:
if not os.path.isfile(student_file):
raise IOError("No such file: '{}'"
.format(student_file))
else:
raise e
else:
student_mod = imp.load_module('student_mod', *mod_junk)
ecf_mod = imp.load_module('ecf_mod', *mod_junk)
assert student_mod is not ecf_mod
return student_mod, ecf_mod
def _adjust_modules(self, *modules):
for mod in modules:
# Don't print the message for raw_input
#mod.raw_input = lambda msg=None: raw_input()
pass
def _run_tests(self, test_funcs):
"""Runs all test methods of the instance as given by self.tests."""
#methods = inspect.getmembers(self, predicate=inspect.ismethod)
for tm in test_funcs:
self._run_test(tm)
return self
def _run_test(self, test, ecf=False):
"""Runs a single test method.
Args:
test (callable): a test method of self
"""
if not ecf: # only write header for the first try
self.log('\n{:-^50}'.format('( ' + test.__name__ + ' )'))
if test.__doc__:
self.log('"""' + test.__doc__.strip() + '"""')
if test.manual:
self.log('')
self._run_manual_test(test)
return
student_mod = self.ecf_mod if ecf else self.student_mod
student_out = test(student_mod)
master_out = test(self.master_mod)
mistakes = self._compare(master_out, student_out)
if any(mistakes):
self._handle_ecf(test, ecf)
else:
self.log('All tests passed!')
return mistakes
def _run_manual_test(self, test):
self.stdin.clear()
try:
test(self.master_mod, self.student_mod)
except Exception as e:
err = StudentException(e)
self.log('\nFatal exception in manual testing function. '
'Cannot finish test.\n' + str(err))
def _compare(self, master_out, student_out):
# Compute all of master_out first so that stdin/stdout doesn't get mixed
# between student and master.
master_out = list(master_out)
self.stdin.clear() # don't let unused stdin bleed into this test func
mistakes = []
for master in master_out:
try:
student = next(student_out)
except StopIteration:
# Test function is done with student, but wasn't done with master.
raise TestError('Test function yielded not enough Checks for student')
except Exception as e:
err = StudentException(e, skip=3)
self.log('\nFatal exception in student code. '
'Cannot finish test.\n' + str(err))
break
else: # no exception
if isinstance(master.val, StudentException):
# The test function should never raise exceptions when using
# the master module. The test function must be broken.
raise TestError('Exception raised when running test function '
'using master module:\n' + master.val.full_tb)
mistakes.append(self._compare_one(master, student))
# Test function is done with master, confirm that it is done with student.
foo = next(student_out, None)
if foo is not None:
raise TestError('Test function yielded too many Checks for student.')
return mistakes
def _compare_one(self, master, student):
if isinstance(student.val, StudentException):
self.log(literal_format('\n{master.expr:q} should be {master.val}, '
'but student code raised an exception:\n'
'{student.val}{student.note:q}', **locals()))
return True
mistake = False
if not master.check(student.val):
self.log(literal_format('\n✘ {master.expr:q} should be {master.val}, '
'but it is {student.val}{student.note:q}', **locals()))
mistake = True
if not master.stdout_check(student.stdout):
self.log(literal_format('\n✘ {master.expr:q} should print:\n{master.stdout:q}'
'\n\nbut it actually prints:\n{student.stdout:q}{student.note:q}', **locals()))
mistake = True
if self.log_correct and not mistake:
if student.val:
self.log(literal_format('\n✓ {master.expr:q} is {student.val}', **locals()))
if student.stdout:
self.log(literal_format('\n✓ {master.expr:q} prints:\n{student.stdout:q}', **locals()))
return mistake
def _handle_ecf(self, test, ecf):
# See if this test benefits from ECF.
if hasattr(test, 'depends') and not ecf:
bad_helpers = [f for f in test.depends if f in self.bad_funcs]
if bad_helpers:
self.log('Trying again with helper functions corrected.')
mistakes = self._run_test(test, ecf=True)
if not any(mistakes):
self.log('Problem solved!')
# Fix self.ecf_mod for later tested functions.
if hasattr(test, 'tests'):
self.bad_funcs |= test.tests
for func_name in test.tests:
# Update ecf module with master version of function
master_func = getattr(self.master_mod, func_name)
setattr(self.ecf_mod, func_name, master_func)
class StudentException(Exception):
"""Represents an exception that occurred in student code.
This class should always be instantiated in an except block:
"""
def __init__(self, exception, skip=1):
self.exception = exception
tb = traceback.format_exc()
self.full_tb = tb
self.tb = tb.split('\n', skip)[-1].rstrip()
def __str__(self):
return self.tb
class TestError(Exception):
"""Indicates that something is wrong with the test script."""
def literal_format(fmt_string, **kwargs):
"""Formats strings, keeping quotations in string values.
>>> literal_format('string: {foo}', foo='bar')
"string: 'bar'"
Using the q spec will remove quotations, as in standard formatting.
>>> literal_format('string: {foo:q}', foo='bar')
"string: bar"
"""
class Template(string.Formatter):
def format_field(self, value, spec):
if spec.endswith('q'):
spec = spec[:-1] + 's'
elif isinstance(value, str):
value = value.encode('string-escape')
value = "'" + value + "'"
return super(Template, self).format_field(value, spec)
result = Template().format(fmt_string, **kwargs)
return result
from collections import deque
class FakeStdin:
def __init__(self):
self._queue = deque()
def put(self, line):
self._queue.append(line)
def readline(self):
try:
line = self._queue.popleft()
if callable(line):
# This allows something like (lambda: time.sleep(1) or 'foo').
line = line()
# We write the line to make it look like someone typed it.
sys.stdout.write(line + '\n')
return line
except IndexError:
raise IOError('No stdin available.')
def clear(self):
self._queue.clear()
| true
|
514add44cc87c5316b33b8deed9771fe28eb0983
|
Python
|
Neverous/spoj
|
/potega.py
|
UTF-8
| 134
| 2.78125
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
#2009
#Maciej Szeptuch
#XIV LO Wrocław
import sys
a, b = sys.stdin.read().split(' ')
a = int(a)
b = int(b)
print a**b
| true
|
f02738394a4d9522780638c89daa0a96a8228ef3
|
Python
|
Jerome-h/embedded-systems
|
/Submission/IoT Data Aggregation/data_check.py
|
UTF-8
| 2,892
| 3.125
| 3
|
[] |
no_license
|
import pandas as pd
import glob
devices=[] # array to hold device names
filedatas=[] # array to hold files
times=[] # 2d array to hold times of knocks - times[device][times]
files=glob.glob('*.csv') # create array of csv files
for entry in range(len(files)):
device = files[entry].replace('-', '')
device = device.replace('.csv', '')
devices.append(device) # create list of all device names
# (same as file name but with .csv and - removed)
# create 2d array of knock times where each row is a different device
for z in range(len(files)):
filedatas.append(pd.read_csv(files[z], sep=',',header=None))
times.append([])
for entry in range(len(filedatas[z])):
if(filedatas[z].values[entry][4] == True):
times[z].append(filedatas[z].values[entry][1])
knocks=[]
present = False
# Creates new 2d array (knocks) where first value in each row is time of knocks
# boolean 'present' will ensure there are no duplicates
for entry in range(len(times)):
for x in range(len(times[entry])):
for i in range(len(knocks)):
if knocks[i][0]==times[entry][x]:
present=True
if not present:
knocks.append([times[entry][x]])
present=False
# Goes through 2d array 'knocks' and appends devices which were knocked at
# each time to the relevent row
for entry in range(len(knocks)):
for x in range(len(times)):
for y in range(len(times[x])):
if times[x][y]==knocks[entry][0]:
knocks[entry].append(devices[x])
# Print number of knocks which occured at each time across all devices
print("\n")
for entry in range(len(knocks)):
print("At time: " + str(knocks[entry][0]) + " there were ", len(knocks[entry])-1, " knocks.")
# Print the times when <35% of the devices experienced knocks and the names of those which did
print("")
for entry in range(len(knocks)):
if len(knocks[entry])-1 <= 0.35*len(devices):
print("\nAt time: " + str(knocks[entry][0]) + " only ", len(knocks[entry])-1,
" containers were knocked, check individual containers:", end=" ")
for x in range(len(knocks[entry])):
if x>0:
print(str(knocks[entry][x]), end=", ")
# Print the times when >90% of the devices experienced knocks, also prints the percentage of
# all devices which experienced this knock
print("\n")
for entry in range(len(knocks)):
if len(knocks[entry])-1 >= 0.90*len(devices):
percent = (len(knocks[entry])-1)/len(devices)
percent = percent*100
percent = int(round(percent))
print("\nAt time: " + str(knocks[entry][0]) + ", " + str(percent) +" % of the containers were knocked, possible shipment incident")
print("\n")
| true
|
cd4c76a2c3496d8dd7fbbc054200e7e94b754d94
|
Python
|
ehsansh84/services
|
/news_project/bigtc/statistics.py
|
UTF-8
| 4,691
| 2.53125
| 3
|
[] |
no_license
|
import sys
sys.path.append("/root/ehsan/services")
from public_data import *
col = db['news']
col_statistics = db['statistics']
col_categories = db['categories']
col_sub_categories = db['sub_categories']
col_rss = db['rss']
col_sources = db['sources']
def extract_categories():
categories = []
sub_categories = []
i = 0
news = col.find({})
# Extract all news categories and sub categories
try:
for item in news:
if not item['category'] in categories:
categories.append(item['category'])
# if len(item['sub_category']) == 1:
# if not item['sub_category'] in sub_categories:
# sub_categories.append(item['sub_category'])
# else:
# print(item['sub_category'])
i += 1
if i % 1000 == 0:
print(i)
except Exception, e:
print('ERROR: '+e.message)
# Send them to collection
for item in categories:
col_categories.insert({'name': item})
for item in sub_categories:
col_sub_categories.insert({'name': item})
# Update categories with statistics info
r_categories = col_categories.find({})
for category in r_categories:
news_count = col.count({'category': category['name']})
col_categories.update({'name': category['name']}, {'$set': {'count': news_count}} )
# r_sub_categories = col_sub_categories.find({})
# for sub_category in r_sub_categories:
# news_count = col.count({'sub_category': sub_categories['name']})
# col_categories.update({'name': sub_categories['name']}, {'count': news_count})
def cat_mapping():
cat_data = [
{'name': 'Sport', 'labels': ['sports','Sports', 'sport']},
{'name': 'World', 'labels': ['world']},
{'name': 'Tech', 'labels': ['Technology', 'tech']},
{'name': 'Business', 'labels': ['business']},
{'name': 'Health', 'labels': ['health']},
{'name': 'Entertainment', 'labels': ['entertainment']},
{'name': 'Arts', 'labels': ['arts', 'Arts & Culture']},
{'name': 'Politics', 'labels': ['politics', 'Politics Headlines']},
{'name': 'Lifestyle', 'labels': ['lifestyle', 'Style', 'Living']},
{'name': 'U.S', 'labels': ['U.S.', 'US', 'US Headlines', 'U.S. ', 'US News', 'us']},
{'name': 'Science', 'labels': ['science']},
{'name': 'Women', 'labels': []},
{'name': 'Travel', 'labels': ['travel']}
]
# map_rss
rss = col_rss.find()
for item in rss:
for cat in cat_data:
for label in cat['labels']:
if item['category'] == label:
col_rss.update({'link': item['link']}, {'$set': {'category': cat['name']}})
def news_mapping():
cat_data = [
{'name': 'Sport', 'labels': ['sports','Sports', 'sport']},
{'name': 'World', 'labels': ['world']},
{'name': 'Tech', 'labels': ['Technology', 'tech']},
{'name': 'Business', 'labels': ['business']},
{'name': 'Health', 'labels': ['health']},
{'name': 'Entertainment', 'labels': ['entertainment']},
{'name': 'Arts', 'labels': ['arts', 'Arts & Culture']},
{'name': 'Politics', 'labels': ['politics', 'Politics Headlines']},
{'name': 'Lifestyle', 'labels': ['lifestyle', 'Style', 'Living']},
{'name': 'U.S', 'labels': ['U.S.', 'US', 'US Headlines', 'U.S. ', 'US News', 'us']},
{'name': 'Science', 'labels': ['science']},
{'name': 'Women', 'labels': []},
{'name': 'Travel', 'labels': ['travel']}
]
i = 0
# map_rss
news = col.find()
for item in news:
i += 1
if i % 1000 == 0:
print('==================== %s ====================' % i)
for cat in cat_data:
for label in cat['labels']:
if item['category'] == label:
print('Updating %s to %s' % (item['category'],cat['name']))
col.update({'link': item['link']}, {'$set': {'category': cat['name']}})
def extract_sources():
rss = col_rss.find()
source_list = []
for item in rss:
if not item['source'] in source_list:
source_list.append(item['source'])
for item in source_list:
col_sources.insert({'name': item, 'selector': ''})
print(source_list)
def update_source_info():
sources = col_sources.find()
for item in sources:
count = col.count({'source': item['name']})
col_sources.update({'name': item['name']}, {'$set': {'news_count': count}})
# cat_mapping()
# news_mapping()
# extract_categories()
# extract_sources()
# update_source_info()
| true
|
e09ada7e503ab55563e2147dd1227e1440d8528f
|
Python
|
david1983/rtads_ml
|
/tests/test.py
|
UTF-8
| 78
| 2.6875
| 3
|
[] |
no_license
|
def sum(a,b):
return a+b
def test_answer():
assert sum(1,1) == 2
| true
|
3982faaabc57728a2189d0b5dcc85b620c44ad39
|
Python
|
alpha-atul/Selenium_Core
|
/Headless_Browser/headless.py
|
UTF-8
| 928
| 2.859375
| 3
|
[] |
no_license
|
from selenium import webdriver
options = webdriver.ChromeOptions()
options.headless = True
driver = webdriver.Chrome(options=options)
driver.maximize_window()
driver.implicitly_wait(30)
driver.get("https://www.google.com/")
navigationStart = driver.execute_script("return window.performance.timing.navigationStart")
responseStart = driver.execute_script("return window.performance.timing.responseStart")
domComplete = driver.execute_script("return window.performance.timing.domComplete")
''' Calculate the performance'''
backendPerformance_calc = responseStart - navigationStart
frontendPerformance_calc = domComplete - responseStart
print("Back End: %s" % backendPerformance_calc)
print("Front End: %s" % frontendPerformance_calc)
ele = driver.find_element_by_name("q")
ele.send_keys("Maserati")
ele.submit()
list1 = driver.find_elements_by_css_selector("div.r > a")
for item in list1:
print(item.text)
driver.quit()
| true
|
f291d3811e2df5eafa4bf7c0923ff72ee216c567
|
Python
|
JoeSeff/hackerrank-python
|
/019 hackerrank - Introduction to Sets/main.py
|
UTF-8
| 266
| 3.328125
| 3
|
[] |
no_license
|
def average(array):
sum_set = set(array)
sum = 0
for item in sum_set:
sum += item
return sum/len(sum_set)
if __name__ == '__main__':
n = int(input())
arr = list(map(int, input().split()))
result = average(arr)
print(result)
| true
|
a641d22aee5ce35b35c20d930167875e0f49ebc9
|
Python
|
jacobhal/ML-DD2421
|
/lab3py/test.py
|
UTF-8
| 918
| 2.984375
| 3
|
[] |
no_license
|
import matrixfuns as m
#import lab3 as l
# Dimensions of Matrices
rows = 7
cols = 4
depth = 5
matrixzeros = m.gen2dMatrix(0, rows, cols)
m.printMatrix(matrixzeros)
matrixones = m.gen2dMatrix(1, rows, cols)
m.printMatrix(matrixones)
matrix3dzeros = m.gen3dMatrix(0, rows, cols, depth)
m.printMatrix(matrix3dzeros)
matrix3dones = m.gen3dMatrix(1, rows, cols, depth)
m.printMatrix(matrix3dones)
matrix2d3x3 = m.gen2d3x3Matrix((1,2,3), (4,5,6), (7,8,9))
m.printMatrix(matrix2d3x3)
diagonal = m.getDiagonal(matrix2d3x3)
print(diagonal)
matrixRevert = m.getDiagonal(diagonal)
m.printMatrix(matrixRevert)
# This works
print(1.0/matrix2d3x3)
matrixSubtracted = m.broadcast(matrix2d3x3, (1,1,1), 'subtract')
m.printMatrix(matrixSubtracted)
matrixAdded = m.broadcast(matrix2d3x3, (1,1,1), 'add')
m.printMatrix(matrixAdded)
#X, labels = l.genBlobs(centers=5)
#print(m.nli(X, labels))
#print(m.nliClass(X, labels, 0))
| true
|
9118ab2ba34f0b1f6ccf238bb99d290a6346c54d
|
Python
|
rrebase/algorithms
|
/d03_binary_search.py
|
UTF-8
| 441
| 4.34375
| 4
|
[] |
no_license
|
# Binary search algorithm
def binary_search(array, item):
lower, upper = 0, len(array)
while lower < upper:
mid = (lower + upper) // 2 # middle
if array[mid] < item:
lower = mid + 1
elif array[mid] > item:
upper = mid
else:
return mid
return False
l = [9, 21, 25, 26, 30, 35, 38, 63, 64, 65, 69, 70, 71, 77, 78, 80, 89]
print(binary_search(l, 25)) # -> 2
| true
|
d95b27dd96c6fb051be7a74bd48033f910d33d3c
|
Python
|
yamaxin/hogwarts-lg4-yamaxin
|
/python_class/tonglao.py
|
UTF-8
| 2,138
| 4.0625
| 4
|
[] |
no_license
|
'''定义一个天山童姥类 ,类名为TongLao,属性有血量,武力值(通过传入的参数得到)。TongLao类里面有2个方法,
see_people方法,需要传入一个name参数,如果传入”WYZ”(无崖子),则打印,“师弟!!!!”,如果传入“李秋水”,打印“师弟是我的!”,如果传入“丁春秋”,打印“叛徒!我杀了你”
fight_zms方法(天山折梅手),调用天山折梅手方法会将自己的武力值提升10倍,血量缩减2倍。需要传入敌人的hp,power,进行一回合制对打,打完之后,比较双方血量。血多的一方获胜。
定义一个XuZhu类,继承于童姥。虚竹宅心仁厚不想打架。所以虚竹只有一个read(念经)的方法。每次调用都会打印“罪过罪过”
加入模块化改造
'''
class TongLao:
def __init__(self, my_hp, my_power, enemy_hp, enemy_power):
# 定义自己的武力值
self.my_hp = my_hp
self.my_power = my_power
# 定义敌人的血量和武力值
self.enemy_hp = enemy_hp
self.enemy_power = enemy_power
# 定义see_people方法
def see_people(self, name):
# 定义name参数
self.name = name
# 根据传入的name,进行分支判断
if self.name == 'WYZ':
print('师弟!!!!')
elif self.name == '李秋水':
print('师弟是我的!')
elif self.name == '丁春秋':
print('叛徒!我杀了你')
# 定义fight_zms方法
def fight_zms(self):
# 重新计算自己的血量和武力值
self.my_new_hp = self.my_hp/2
self.my_new_power = self.my_power*10
# 计算剩余的武力值
self.my_hp1 = self.my_new_hp - self.enemy_power
self.enemy_hp1 = self.enemy_hp - self.my_new_power
# 判断谁的血量小于等于0
if self.my_hp1 <= 0:
print('我输了')
# 满足条件跳出循环
elif self.enemy_hp1 <= 0:
print('我赢了')
tonglao=TongLao(1000,1000,200,100)
tonglao.fight_zms()
| true
|
c118e5c5f6cb94c183e10436ca399ce32f55f24e
|
Python
|
sydsim/structurized_object_classifier
|
/features/sequence.py
|
UTF-8
| 1,325
| 2.546875
| 3
|
[] |
no_license
|
import tensorflow as tf
import numpy as np
from . import SOCFeature
####
class SOCSequenceFeature(SOCFeature):
''' abstract number feature '''
def __init__(self,
module,
elem,
list_length,
optional=False,
):
super().__init__(module=module, optional=optional)
self.elem = elem
self.list_length = list_length
self.add_element(shape=[1], dtype=tf.int32, name='seq_len')
for shape in self.elem.tensor_shape:
self.add_element(shape=[list_length] + shape['shape'],
dtype=shape['dtype'],
name='list_%d_%s' % (list_length, shape['name']))
def dropout(self, training=True):
feed_dict = super().dropout(training)
feed_dict.update(self.elem.dropout(training))
return feed_dict
def transform(self, obj):
output = [[len(obj)]]
t = [self.elem.transform(x) for x in obj[-self.list_length:]]
t += [self.elem.zeros()] * (self.list_length - len(obj))
output += [al for al in zip(*t)]
return output
def zeros(self):
output = [[0]]
t = [self.elem.zeros()] * self.list_length
output += [al for al in zip(*t)]
return output
| true
|
8fd40a72ab7130eb4c53152ab7df047d456ef2fc
|
Python
|
Pirouz-Nourian/earthy_20
|
/House_of_Heritage/A3_Structuring/process/Code/_4_rebuildTriangles.py
|
UTF-8
| 1,223
| 2.515625
| 3
|
[
"CC-BY-4.0",
"MIT",
"LicenseRef-scancode-other-permissive"
] |
permissive
|
# AR3B011 EARTHY (2020/21 Q1)
# Zaatari Refugee Camp Heritage Center Project: "Bayt alturath"
# Group Members: Selina Bitting, Barbara Foolen de Oliveira, Inaka Sema, Yamini Patidar, Maimuna Bala Shehu
import rhinoscriptsyntax as rs
import math
import cmath
levels = levels
#Create the list of triangles in the levels
sel_tri = []
for i in range(len(levels)):
if levels[i]==selected_level:
sel_tri.append(tris[i])
def addToBlock(slice,step):
z = slice+step
return z
# Use names to reinterpret values for next part of code
### WIP -- This part of the code will rebuild the triangles by selecting A/B/C imag/real
### Then add some z step to it in order to create a 'faceted face'
###############
triangles = complexTri_sorted
blockLevel = sel_tri
bH = blockHeight
blockFace = []
for i in range(len(levels)):
horiz = bH*levels[i]
stepUp = horiz + bH
for color, A, B, C in triangles:
Apt = rs.AddPoint(A.real, A.imag, addToBlock(horiz,stepUp))
Bpt = rs.AddPoint(B.real, B.imag, addToBlock(horiz,stepUp))
Cpt = rs.AddPoint(C.real, C.imag, addToBlock(horiz,stepUp))
singleTri = rs.AddPolyline((Apt,Bpt,Cpt,Apt))
blockFace.append(singleTri)
# Output the faces of the blocks to the next piece of code
| true
|
b0f2b3b0191148d2144866ac5652ed50fde0edef
|
Python
|
wellstseng/XStocker
|
/src/global_func.py
|
UTF-8
| 796
| 2.84375
| 3
|
[] |
no_license
|
from datetime import timedelta, date
import datetime
import os
def daterange(start_date, end_date):
for n in range(int ((start_date - end_date).days)):
yield start_date - timedelta(n)
def get_latest_file_date(relative_path):
p = os.path.abspath(relative_path)
max_date = None
for _, _, filenames in os.walk(p):
for file_name in filenames:
if ".csv" in file_name:
d = int(file_name.replace(".csv", ""))
if max_date == None or max_date < d:
max_date = d
if max_date == None:
return "2000/1/1"
d = datetime.datetime.strptime(str(max_date), "%Y%m%d").date().strftime("%Y/%m/%d")
return d
def get_abs_path(relative_path:str):
return os.path.abspath(relative_path)
| true
|
cb9354fb70ec0197fa31fee2e3dc6d98a9c7de8a
|
Python
|
cuiboautotest/learnpython3
|
/算法练习/面试/最长连续相同字符长度.py
|
UTF-8
| 153
| 2.84375
| 3
|
[] |
no_license
|
s = "cc"
res=1
count=1
for i in range(1,len(s)):
if s[i]==s[i-1]:
count+=1
res = max(count, res)
else:
count=1
print(res)
| true
|
4c12d875571c6a135b4edba60df961a35ade2059
|
Python
|
lclarkmichalek/noughtsandcrosses
|
/src/server.py
|
UTF-8
| 7,677
| 2.734375
| 3
|
[] |
no_license
|
import select
import socket
import threading
import time
import json
class Server(object):
def __init__(self, address, options):
self._options = options
self._address = address
self._epool = EventPool()
self._tpool = ThreadPool(self._epool)
def start(self):
self._tpool.addThread(ServerThread, 'Server', self._address,
self._options)
self._tpool.runThread('Server')
def stop(self):
self._tpool.killAll()
class ThreadPool(object):
def __init__(self, eventpool):
self._threads = {}
self._epool = eventpool
def addThread(self, thread, id, *args, **kwargs):
self._threads[id] = thread(*args, **kwargs)
self._threads[id].id = id
self._threads[id]._tpool = self
self._threads[id]._epool = self._epool
self._epool.addThread(self._threads[id])
def runThread(self, id):
self._threads[id].start()
def startThread(self, id):
self._threads[id].start()
self._threads[id].status = "Running"
def pauseThread(self, id):
self._threads[id].interupt()
self._threads[id].status = "Paused"
def resumeThread(self, id):
self._threads[id].resume()
self._threads[id].status = "Running"
def killThread(self, id, interval):
self._threads[id].interupt()
time.sleep(interval)
self._threads[id].kill()
time.sleep(interval)
del self._threads[id]
self._epool.removeThread(id)
def killAll(self):
#Can't use iterations because dict changes size when
#threads are deleted
while len(self._threads):
self.killThread(self._threads.keys()[0], 1)
class EventPool(object):
def __init__(self):
self._queues = {}
self._shutdown = False
def addThread(self, thread):
self._queues[thread.id] = []
def removeThread(self, id):
del self._queues[id]
def addEvent(self, event):
if self._shutdown: return
print event.toJson()
for id in event.recipients:
if id == "Self":
self._queues[threading.currentThread().id].append(event)
if not id in self._queues.keys():
raise RuntimeWarning("Message addressed to unknown recipient")
self._queues[id].appent(event)
event.sender = threading.currentThread().id
del event.recipients
del event.priority
def queuedEvents(self):
id = threading.currentThread().id
return len(self._queues[id])
def nextEvent(self):
id = threading.currentThread().id
return self._queues[id].pop(0)
def shutdown(self):
self._shutdown = True
def close(self, interval):
self.shutdown()
time.sleep(interval)
del self
class Event(object):
def __init__(self, Header="", Content=[], Recipients=[], Priority=0):
self.header = Header
self.content = Content
self.recipients = Recipients
self.priority = Priority
def toJson(self):
message = json.dumps([self.header,self.content,self.recipients,
self.priority])
return message
class ServerThread(threading.Thread):
def __init__(self, address, options):
threading.Thread.__init__(self)
self._options = options
self._sock = socket.socket(socket.AF_INET,
socket.SOCK_STREAM)
self._sock.bind(address)
self._sock.listen(self._options["Players"])
self._killed = False
self._noConnected = 0
def run(self):
while not self._killed:
if self._noConnected < self._options["Players"]:
(clientsocket, address) = self._sock.accept()
self._tpool.addThread(ClientThread, address, clientsocket)
self._tpool.runThread(address)
self._noConnected += 1
else:
time.sleep(1)
def threadDisconnected(self):
self._noConnected -= 1
def interupt(self):
pass
def kill(self):
self._killed = True
class ClientThread(threading.Thread):
def __init__(self, socket):
threading.Thread.__init__(self)
self._sock = socket
self._sock.setblocking(0)
self._moving = 0
self._options = {'Timeout': 1}
def run(self):
self._interupted = False
self._killed = False
while not self._checkRun():
input = self._read()
if input is not '':
print input
self._addEvent(input)
if self._checkRun(): break
if self._epool.queuedEvents() > 0:
event = self._epool.nextEvent()
if event.header == "snd":
self._write(event.toJson())
elif event.header == "strdat":
self._data[event.content["key"]] = event.content["value"]
elif event.header == "rqstdat":
key = event.content["key"]
value = self._data[key]
ev = Event(Header = "snddat", recipients = [event.sender])
ev.content = {value: key}
self._epool.addEvent(ev)
elif event.header == "snddat":
self._write(event.toJson())
def interupt(self):
self._interupted = True
def resume(self):
self._interupted = False
def kill(self):
self._killed = True
def _addEvent(self, input):
ev = json.loads(input)
self._epool.addEvent(Event(Header = ev[0], Content = ev[1],
Recipients = ev[2], Priority = ev[3]))
def _checkRun(self):
while not (self._killed | self._interupted):
if self._killed: return True
elif self._interupted:
time.sleep(0.1)
return False
def _read(self):
if self._check()[0]:
self._moving += 1
if self._moving >= self._options["Timeout"]:
raise RuntimeError("ClientThread %s timeout" % str(self.id))
else:
self._moving = 0
while not self._check()[0]:
time.sleep(0.1)
length = self._sock.recv(2)
try:
length = int(length)
except ValueError:
raise RuntimeError
content = self._sock.recv(length)
return content
def _write(self, content):
class ToLong(Exception): pass
length = len(content)
if length > 100:
raise ToLong
if length < 10:
length = '0' + str(length)
else:
length = str(length)
self._sock.send(str(length) + content)
def _check(self):
timeout = self._options["Timeout"]
readable, null, null = select.select([self._sock], [], [], timeout)
null, writeable, null = select.select([], [self._sock], [], timeout)
null, null, errorable = select.select([], [], [self._sock], timeout)
del null
if bool(errorable): raise RuntimeError(
"ClientThread %s socket raised error" % self.id)
return (bool(readable), bool(writeable))
| true
|
f4da1b9bba0a1573707ba6422e7863412233aad2
|
Python
|
AndrewLu1992/lintcodes
|
/115_unique-paths-ii/unique-paths-ii.py
|
UTF-8
| 1,178
| 3.0625
| 3
|
[] |
no_license
|
# coding:utf-8
'''
@Copyright:LintCode
@Author: hanqiao
@Problem: http://www.lintcode.com/problem/unique-paths-ii
@Language: Python
@Datetime: 16-05-29 14:11
'''
class Solution:
"""
@param obstacleGrid: An list of lists of integers
@return: An integer
"""
def uniquePathsWithObstacles(self, obstacleGrid):
# write your code here
#corner case
if obstacleGrid is None or len(obstacleGrid) == 0 \
or obstacleGrid[0] is None or len(obstacleGrid[0]) == 0:
return 1
n = len(obstacleGrid[0])
m = len(obstacleGrid)
i = 0
j = 0
state = [[0 for _ in range(n)] for __ in range(m)]
while i < n and obstacleGrid[0][i] == 0:
state[0][i] = 1
i = i + 1
while j < m and obstacleGrid[j][0] == 0:
state[j][0] = 1
j = j + 1
for k in range(1, m):
for z in range(1, n):
if obstacleGrid[k][z] == 1:
state[k][z] = 0
else:
state[k][z] = state[k - 1][z] + state[k][z - 1]
return state[-1][-1]
| true
|
e9a0f49373cb8a083495a2106aec6a8672f31505
|
Python
|
etzellux/Python-Demos
|
/NumPy/numpy_basics_4_operators.py
|
UTF-8
| 732
| 3.65625
| 4
|
[] |
no_license
|
#OPERATORS
import numpy as np
list1 = np.arange(0,9,1).reshape(3,3)
list2 = np.arange(9,18,1).reshape(3,3)
list3 = np.array(list1*list2)
list4 = np.array(np.cross(list1,list2)) #cross çarpım
list5 = np.array(list1.dot(list2)) #nokta çarpım
list6 = np.array(list1 @ list2) #nokta çarpım
list7 = np.ones((3,3))
list8 = np.random.random((3,3)) #(n,m)
list9 = list7 + list8
list10 = np.random.randint(0,20,5) #(low,high,size)
print(list10.max()) #arrayin maxını verir
print(list10.min()) #arrayin minini verir
print(list10.sum()) #arraydeki sayilari toplar
print(list1.sum(axis=0)) #sütuna göre toplam
print(list1.sum(axis=1)) #satıra göre toplam
print(list1.min(axis=1)) #satırdaki en küçük sayılar
| true
|
eac8fb3a814e345162f6902a0d35c8de39f11d05
|
Python
|
paulsblackfriday/Auto-Facebook-Messenger
|
/v2.0-fbchat/updateFriends.py
|
UTF-8
| 1,234
| 3.09375
| 3
|
[] |
no_license
|
# This script is used to update the friends database
import fbchat
import sqlite3
username = input('Facebook email: ')
password = input('Facebook password: ')
# Login to facebook
client = fbchat.Client(username, password)
# Connect to SQL Database
connection = sqlite3.connect('FB_Friends.db')
cursor = connection.cursor()
def getUsers(client, Relation, Gender, Category, Adj):
while True:
friends = client.getUsers(input('Name to find:'))
for friend in friends:
print(friend)
reply = input('Add to list ?')
if reply == 'y':
NickName = input('NickName: ')
cursor.execute('INSERT INTO friends values(?,?,?,?,?,?,?,?)',\
(Relation, Category, friend.name, friend.url, friend.uid, NickName,
Gender, Adj))
connection.commit()
elif reply == 'q':
return
else:
print('Discarded!')
# Fill in Table
while True:
Gender = input('Gender:')
Category = input('Category: ')
Adj = input('Adj: ')
Relation = input('Relation: ')
print(Gender, Category, Adj, Relation)
getUsers(client, Relation, Gender, Category, Adj)
| true
|
f42dbcc87413d244ad21b0252f0b7541348cc1cb
|
Python
|
MathildeGarrigues/m2ro_project_tabu_search
|
/flight.py
|
UTF-8
| 618
| 3.046875
| 3
|
[] |
no_license
|
import numpy as np
import os
class Flight:
"""Classe définissant un vol, caractérisé par :
- son numéro
- son heure d'arrivée
- son heure de départ
- les portes sur lesquelles il peut attérir
- la date d'entrée à la porte qui lui est attribuée
- la date de départ de la porte attribuée
- la porte qui lui est attribuée"""
def __init__(self):
self.number = 0
self.arrival_time = int(0)
self.departure_time = int(100)
self.compatible_gates = []
self.contribution = 0
self.gate = int(9999)
self.assigned = False
| true
|
d003394364b082962d24e04af29af82935682801
|
Python
|
zehuag/gdpy3
|
/src/processors/GTC/meshgrid.py
|
UTF-8
| 1,838
| 2.515625
| 3
|
[
"MIT"
] |
permissive
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018 shmilee
'''
Source fortran code:
v110922
-------
diagnosis.F90, subroutine diagnosis:37-50
!!diagnosis xy
if(mype==1)then
open(341,file='meshgrid.out',status='replace')
do i=0,mpsi
write(341,*)psimesh(i)
write(341,*)sprpsi(psimesh(i))
write(341,*)qmesh(i)
write(341,*)kapatmti(i)
write(341,*)kapatmte(i)
write(341,*)kapatmni(i)
write(341,*)kapatmne(i)
enddo
close(341)
endif
'''
import numpy
from ..basecore import BaseCore, log
__all__ = ['MeshgridCoreV110922']
class MeshgridCoreV110922(BaseCore):
'''
Meshgrid data
1) psimesh, sprpsi, qmesh, kapatmti, kapatmte, kapatmni, kapatmne
Shape of the array data is (mpsi+1,).
'''
__slots__ = []
instructions = ['dig']
filepatterns = ['^(?P<group>meshgrid)\.out$',
'.*/(?P<group>meshgrid)\.out$']
grouppattern = '^meshgrid$'
_datakeys = (
'psimesh', 'sprpsi', 'qmesh',
'kapatmti', 'kapatmte', 'kapatmni', 'kapatmne')
def _dig(self):
'''Read 'meshgrid.out'.'''
with self.rawloader.get(self.file) as f:
log.ddebug("Read file '%s'." % self.file)
outdata = f.readlines()
sd = {}
shape = (7, len(outdata) // 7)
outdata = outdata[:len(outdata) // 7 * 7]
if len(outdata) % 7 != 0:
log.warn("Missing some raw data in '%s'! Guess the shape '%s'."
% (self.file, shape))
log.debug("Filling datakeys: %s ..." % str(self._datakeys[:]))
outdata = numpy.array([float(n.strip()) for n in outdata])
outdata = outdata.reshape(shape, order='F')
for i, key in enumerate(self._datakeys):
sd.update({key: outdata[i]})
return sd
| true
|
e283eec9bc4c4570734368f1f9ef14a571f3ed35
|
Python
|
ashvatb21/cs440
|
/template4/tes.py
|
UTF-8
| 5,548
| 3.4375
| 3
|
[] |
no_license
|
"""
Part 3: Here you should improve viterbi to use better laplace smoothing for unseen words
This should do better than baseline and your first implementation of viterbi, especially on unseen words
"""
import math
def viterbi_2(train, test):
'''
input: training data (list of sentences, with tags on the words)
test data (list of sentences, no tags on the words)
output: list of sentences with tags on the words
E.g., [[(word1, tag1), (word2, tag2)], [(word3, tag3), (word4, tag4)]]
'''
p_initial, p_emission, p_transition, prob_of_tag = training(train)
p_emission = laplace_smoothening(p_emission, prob_of_tag)
p_transition = laplace_smoothening(p_transition, prob_of_tag)
# print(p_transition)
predictions = build_viterbi_predictions(p_emission, p_transition, test)
return predictions
def laplace_smoothening(p_emission, alpha):
for key in p_emission:
words_in_tag = p_emission[key]
total_words_in_tag = sum(words_in_tag.values())
for word in words_in_tag:
words_in_tag[word] = math.log((words_in_tag[word] + alpha[key]) / (total_words_in_tag + alpha[key]*(len(words_in_tag) + 1)))
words_in_tag["UNK"] = math.log((alpha[key]) / (total_words_in_tag + alpha[key]*(len(words_in_tag) + 1)))
# print(p_emission)
return p_emission
def training(train):
# transition probability
# emission probability
# initial probability
p_emission = {}
p_transition = {}
p_initial = 1
tag_list = {}
hapax = {}
for line in train[:]:
previous_tag = "START"
# emission probability
for word, tag in line:
if tag in p_emission:
each_tag_map = p_emission.get(tag)
each_tag_map[word] = each_tag_map.get(word, 0) + 1
p_emission[tag] = each_tag_map
else:
p_emission[tag] = {word: 1}
if tag in tag_list:
tag_list[tag] += 1
else:
tag_list[tag] = 1
# transition probability
if tag in p_transition:
each_tag_map = p_transition.get(tag)
each_tag_map[previous_tag] = each_tag_map.get(previous_tag, 0) + 1
p_transition[tag] = each_tag_map
previous_tag = tag
else:
p_transition[tag] = {previous_tag: 1}
previous_tag = tag
for key in p_emission:
d = p_emission[key]
for k in d:
if d[k] == 1:
if key in hapax:
hapax[key].append(k)
else:
hapax[key] = [k]
prob_of_tag = {}
for key in hapax:
prob_of_tag[key] = len(hapax[key])
total_words_in_hapax = sum(prob_of_tag.values())
for key in hapax:
prob_of_tag[key] /= (total_words_in_hapax * 100000)
list_of_tags = list(p_emission.keys())
for key in list_of_tags:
if key not in prob_of_tag:
prob_of_tag[key] = 0.00001/total_words_in_hapax
return p_initial, p_emission, p_transition, prob_of_tag
def build_viterbi_predictions(p_emission, p_transition, test):
result = []
i = 0
for each_line in test[:]:
# print(i)
i += 1
tagged_line = build_viterbi(each_line, p_emission, p_transition)
result.append(tagged_line)
# print(result)
return result
def build_viterbi(line, p_emission, p_transition):
V = [{}]
list_of_tags = list(p_emission.keys())
no_of_words = len(line)
for tag in list_of_tags:
if line[0] in p_emission:
V[0][tag] = {"prob": p_emission[tag].get(line[0]), "prev": None}
else:
V[0][tag] = {"prob": p_emission[tag].get('UNK'), "prev": None}
for t in range(1, no_of_words):
V.append({})
for tag in list_of_tags:
max_tr_prob = V[t-1][list_of_tags[0]].get("prob")
if list_of_tags[0] in p_transition[tag]:
max_tr_prob += p_transition[tag].get(list_of_tags[0])
else:
max_tr_prob += p_transition[tag].get("UNK")
prev_st_selected = list_of_tags[0]
for prev_st in list_of_tags[1:]:
if V[t-1][prev_st].get("prob"):
tr_prob = V[t-1][prev_st].get("prob")
else:
tr_prob = 0
if prev_st in p_transition[tag]:
tr_prob += p_transition[tag].get(prev_st)
else:
tr_prob += p_transition[tag].get("UNK")
if tr_prob > max_tr_prob:
max_tr_prob = tr_prob
prev_st_selected = prev_st
max_prob = max_tr_prob
if line[t] in p_emission[tag]:
max_prob += p_emission[tag].get(line[t])
else:
max_prob += p_emission[tag].get("UNK")
V[t][tag] = {"prob": max_prob, "prev": prev_st_selected}
opt = []
max_prob = -9999999
previous = 'None'
for st, data in V[-1].items():
if data["prob"] > max_prob:
max_prob = data["prob"]
best_st = st
opt.append(best_st)
previous = best_st
for t in range(len(V) - 2, -1, -1):
opt.insert(0, V[t+1][previous].get("prev"))
previous = V[t+1][previous].get("prev")
opt.pop(0)
opt.insert(0, 'START')
l = []
for index, word in enumerate(line):
l.append((word, opt[index]))
return l
| true
|
455a63e717edfc0ff7a66f9e1255d454d05a2ab9
|
Python
|
sudhirkk/CIS530
|
/util/removeSolutions.py
|
UTF-8
| 267
| 2.765625
| 3
|
[] |
no_license
|
#!/usr/bin/python
import sys,re
total=''
cutout=False
for l in sys.stdin:
if '### BEGIN SOLUTION' in l:
cutout=True
if '### END SOLUTION' in l:
cutout=False
l=l.replace('### END SOLUTION','### YOUR CODE HERE!')
if cutout: continue
total+=l
print(total)
| true
|
cda11b1f43dea351d848572740f3b67c025677cb
|
Python
|
csxeba/Karakterlap
|
/Kijelzo.py
|
UTF-8
| 4,336
| 2.78125
| 3
|
[] |
no_license
|
# -*- coding: Utf-8 -*-
from Karakterlap.data.frame_foTul_kijelzo import *
from Karakterlap.data.frame_harcertekek import *
from Karakterlap.data.frame_kepzettsegek import *
from Karakterlap.data.frame_pszi_magia import *
from Karakterlap.data.frame_szemelyes import *
from Karakterlap.data.objektumok import *
class KarakterlapAblak(Tk):
"""A karakterlap ablak osztálya"""
def __init__(self):
Tk.__init__(self)
self.tl = None
self.tl_aktiv = None
self.bot_frame = None
globz.kar = karakter.Karakter(self)
self.title("M.A.G.U.S. karakterlap - Új Törvénykönyv alapján")
# A frame_top tartalma: 3 további részre tagolódik vízszintesen.
fotul = FrameFoTulKijelzo(self)
fotul.grid(row=0, column=0)
szemelyes = FrameSzemelyes(self)
szemelyes.grid(row=0, column=1)
frame_mid = Frame(self)
frame_mid.grid(row=1, column=0, columnspan=3)
self.buttons = {}
for c in ("Harcértékek", "Képzettségek", "Pszi-Mágia"):
self.buttons[c] = Button(frame_mid, width=24, height=3,
text=c, font=14, bd=5,
command=lambda arg=c: self.init_tl(arg))
self.buttons[c].pack(side=LEFT)
def init_tl(self, melyik):
try:
self.tl.destroy()
except AttributeError:
self.tl_aktiv = None
if self.tl_aktiv == melyik:
self.close_tl()
return
self.tl = Toplevel(self)
self.tl.geometry('+100+100')
for c in self.buttons:
self.buttons[c].configure(relief=RAISED)
self.buttons[melyik].configure(relief=SUNKEN)
self.bot_frame = {"Harcértékek": FrameHarcertekek,
"Képzettségek": FrameKepzettsegek,
"Pszi-Mágia": FramePsziMagia,
}[melyik](self.tl)
self.bot_frame.pack()
self.tl.protocol('WM_DELETE_WINDOW', self.close_tl)
self.tl_aktiv = melyik
def close_tl(self):
self.buttons[self.tl_aktiv].configure(relief=RAISED)
self.tl.destroy()
self.tl_aktiv = None
if __name__ == '__main__':
globz.modeflag = "karakterlap"
globz.rootwin = KarakterlapAblak()
for tipus in hasznos.slist(resource.fegyverek):
for fnev in hasznos.slist(resource.fegyverek[tipus]):
globz.fegyverek[fnev] = Fegyver(globz.kar, tipus, fnev)
root = KarakterlapAblak()
root.mainloop()
"""
TODO:
##############################################################################
- frame_pszi_magia-ban meg kell csinálni a fokokat és képzettségeket, hogy ugyan
azon a var-on legyenek, mint a másik frame-en.
- Képzettségek toplevelben rosszul működik a Kp visszanyerés
- A harcérték-növelések Toplevelben vhogy reprezentálni kéne a maradék KAP-ok
számát.
- Az említett pontoknál figyelembe kell venni a képzettségekből adódó bónuszokat,
de oda kell figyelni, mert a képzettség különböző fokai különböző mennyiségű
pontot adnak, így ha 3. szinten valaminek felveszi a 4. fokát és addig 3. fokú
volt, onnantól több bónusz jár. Ezt reset-kor el kell tudni számolni valahogy.
- Be kell állítani a Topleveleket, hogy hová kerüljenek a képernyőn geometry()
metódussal kell, ('+x+y') formátumban kell megadni.
- Harcértékek mellé be kell huzalozni a páncélt és a pajzsot.
- Képzettségeket is be kell huzalozni normálisan!
- Felugró moduloknál a hibaüzenetek hátraküldik a modul főablakát.
Ez csúnya és sokszor a főablak mögé kerül miattuk. Destroy()-olni kellene a
modul ablakát a hibaüzenet után (vagy eliminálni a hülye hibaüzenetet)
- A fegyvereket is meg kellene csinálni, hogy kiválaszthatóak legyenek.
ÉS HÁT IGEN... ÚJRA KÉNE GONDOLNI AZ EGÉSZET:
AZ LENNE A LEGJOBB, HA A KARAKTERRŐL LENNE EGY KIINDULÁSI ÁLLAPOT ELMENTVE,
HA ESETLEG A JÚZER ÚJRA KÍVÁNNÁ OSZTANI MINDEN PONTJÁT, AKKOR LEHESSEN CSINÁLNI
EGY <ULTIMATE RESET ALL>-T ÉS VISSZAÁLLÍTANI MINDENT A KIINDULÁSI ÁLLAPOTBA.
SŐT MÉG JOBB LENNE, HA KÜLÖN TUDNA VISSZA-RESETELNI EGY KIINDULÁSI ÁLLAPOTBA ÉS
KÜLÖN TUDNA VISSZA-RESETELNI ELSŐ SZINTRE, HA ELTÉRNE A KETTŐ EGYMÁSTÓL.
"""
| true
|
67e93b2a0903f595f3a705b7b2935b94181a64bc
|
Python
|
shaadomanthra/cbpython-advanced
|
/day20/DayPredictor/models/YearModel.py
|
UTF-8
| 249
| 2.765625
| 3
|
[] |
no_license
|
from models.db import *
class YearModel:
def insert(self,year,leap):
conn = connect('year.db')
query = f"INSERT INTO leapyear (year,leap) VALUES({year},{leap})"
execute(conn,query)
print("One record inserted")
| true
|
86be03ecb71a36e70e1295a4d0d89eb5e4c98f20
|
Python
|
bascker/py-note
|
/src/base/coroutine.py
|
UTF-8
| 732
| 3.1875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/python
"""
协程(coroutine)的使用
@author bascker
"""
import os
import sys
sys.path.append(os.getcwd())
from src.utils.applogger import AppLogger
LOG = AppLogger("coroutine.py")
def contain(string):
"""
协程接受的字符串,是否包含 string
@param string
"""
LOG.info("contain for string: %s" % string)
while True:
# 获取一个协程
line = (yield)
LOG.info('The line "%s" is contains string: %s' % (line, string in line))
def main():
fn = contain("python")
# 启动协程
next(fn)
# 发送字符串
fn.send("hello world")
fn.send("hello python")
# 关闭协程
fn.close()
if __name__ == '__main__':
main()
| true
|
358c9ab8a15e672dd201c178b9df4ae45d457df4
|
Python
|
muteshi/anagram-hackerrack-solution
|
/anagram.py
|
UTF-8
| 2,241
| 4.03125
| 4
|
[] |
no_license
|
def anagram(s):
"""
Function to divide a string into two and return
minimum number of changes required to make an anagram
"""
len_of_str=len(s)
if len_of_str % 2 != 0:
return -1
# if length is not divisible string not anagram
string1 = ''.join(sorted(s[:len_of_str//2]))
string2 = ''.join(sorted(s[len_of_str//2:]))
if string1 == string2:
return 0
#replace any substring that is not in second string
for char in string1:
if char not in string2:
string1= string1.replace(char,'')
#Ensure that string 1 does not have extra substrings than string 2
if string1.count(char) > string2.count(char):
char_count= string1.count(char) - string2.count(char)
string1 = string1.replace(char,'',char_count)
return len(string2)- len(string1)
def making_anagram(s1,s2):
"""
Function to return an integer representing the minimum
number of deletions needed to make the strings anagrams
"""
s1 = ''.join(sorted(s1))
s2 = ''.join(sorted(s2))
s1_len = len(s1)
s2_len = len(s2)
chars_s1=chars_s2 = min_moves = 0
s1_char_cnt=s2_char_cnt=0
#edge case check
if s1 == s2:
return 0
for char in s1: #delete chars appearing in string 1 not in string 2
if char not in s2:
s1= s1.replace(char,'')
for char in s2:
if char not in s1:
s2= s2.replace(char,'')
if s2==s1: #strings are matching return the deletions
chars_s1 = s1_len-len(s1)
chars_s2 = s2_len-len(s2)
min_moves = chars_s1 + chars_s2
return min_moves
else: #strings not matching compare char counts in both strings
for char in s1:
if s1.count(char) < s2.count(char):
s1_char_cnt = s2.count(char) - s1.count(char)
s2 = s2.replace(char,'',s1_char_cnt)
else:
s2_char_cnt = s1.count(char) - s2.count(char)
s1 = s1.replace(char,'',s2_char_cnt)
new_chars_s1 = s1_len - len(s1)
new_chars_s2 = s2_len - len(s2)
min_moves = new_chars_s1 + new_chars_s2
return min_moves
| true
|
7146403fbd9b033234fee379b9b42c339efb0870
|
Python
|
robintw/gputools
|
/gputools/convolve/convolve.py
|
UTF-8
| 3,660
| 2.796875
| 3
|
[] |
no_license
|
import logging
logger = logging.getLogger(__name__)
import os
import numpy as np
from gputools import OCLProgram, OCLArray, OCLImage, get_device
from gputools.core.ocltypes import assert_bufs_type
import sys
from abspath import abspath
def convolve(data,h , res_g = None):
"""
convolves 1d-3d data with kernel h
data and h can either be numpy arrays or gpu buffer objects (OCLArray,
which must be float32 then)
boundary conditions are clamping to zero at edge.
"""
if not len(data.shape) in [1,2,3]:
raise ValueError("dim = %s not supported"%(len(data.shape)))
if len(data.shape) != len(h.shape):
raise ValueError("dimemnsion of data (%s) and h (%s) are different"%(len(data.shape), len(h.shape)))
if isinstance(data,OCLArray) and isinstance(h,OCLArray):
return _convolve_buf(data,h, res_g)
elif isinstance(data,np.ndarray) and isinstance(h,np.ndarray):
return _convolve_np(data,h)
else:
raise TypeError("unknown types ()"%(type(data),type(h)))
return
def _convolve_np(data, h):
"""
numpy variant
"""
data_g = OCLArray.from_array(data.astype(np.float32, copy = False))
h_g = OCLArray.from_array(h.astype(np.float32, copy = False))
return _convolve_buf(data_g, h_g).get()
def _convolve_buf(data_g, h_g , res_g = None):
"""
buffer variant
"""
assert_bufs_type(np.float32,data_g,h_g)
prog = OCLProgram(abspath("kernels/convolve.cl"))
if res_g is None:
res_g = OCLArray.empty(data_g.shape,dtype=np.float32)
Nhs = [np.int32(n) for n in h_g.shape]
kernel_name = "convolve%sd_buf"%(len(data_g.shape))
prog.run_kernel(kernel_name,data_g.shape[::-1],None,
data_g.data,h_g.data,res_g.data,
*Nhs)
return res_g
def _convolve3_old(data,h, dev = None):
"""convolves 3d data with kernel h on the GPU Device dev
boundary conditions are clamping to edge.
h is converted to float32
if dev == None the default one is used
"""
if dev is None:
dev = get_device()
if dev is None:
raise ValueError("no OpenCLDevice found...")
dtype = data.dtype.type
dtypes_options = {np.float32:"",
np.uint16:"-D SHORTTYPE"}
if not dtype in dtypes_options.keys():
raise TypeError("data type %s not supported yet, please convert to:"%dtype,dtypes_options.keys())
prog = OCLProgram(abspath("kernels/convolve3.cl"),
build_options = dtypes_options[dtype])
hbuf = OCLArray.from_array(h.astype(np.float32))
img = OCLImage.from_array(data)
res = OCLArray.empty(data.shape,dtype=np.float32)
Ns = [np.int32(n) for n in data.shape+h.shape]
prog.run_kernel("convolve3d",img.shape,None,
img,hbuf.data,res.data,
*Ns)
return res.get()
def test_convolve():
from time import time
data = np.ones((100,120,140))
h = np.ones((10,11,12))
# out = convolve(data,h)
out = convolve(data[0,...],h[0,...])
out = convolve(data[0,0,...],h[0,0,...])
if __name__ == '__main__':
# test_convolve()
N = 100
ndim = 3
d = np.zeros([N+3*i for i,n in enumerate(range(ndim))],np.float32)
h = np.ones((11,)*ndim,np.float32)
ind = [np.random.randint(0,n,int(np.prod(d.shape)**(1./d.ndim))/10) for n in d.shape]
d[tuple(ind)] = 1.
h *= 1./np.sum(h)
out1 = convolve(d,h)
d_g = OCLArray.from_array(d)
h_g = OCLArray.from_array(h)
res_g = convolve(d_g,h_g)
out2 = res_g.get()
| true
|
0080a386cf268ae5c3a84af778897f5aaea9b2f2
|
Python
|
JJong0416/Algorithm
|
/Leetcode/String/Most_Common_Word_819/819_20210228_eugene-doobu.py
|
UTF-8
| 1,124
| 3.296875
| 3
|
[] |
no_license
|
import operator
class Solution:
def mostCommonWord(self, paragraph: str, banned: List[str]) -> str:
# 특수문자 제거를 위해
paragraph = paragraph.lower().replace('!', ' ').replace('?', ' ').replace('\'', ' ').replace(',', ' ').replace(';', ' ').replace('.', ' ')
words = paragraph.split(' ')
wordCountDict = {}
for word in words:
# 특수문자를 공백으로 변환시 공백 키값이 생기는것을 방지
if word == '':
continue
if word in wordCountDict:
wordCountDict[word] += 1
else:
wordCountDict[word] = 1
# 딕셔너리를 벨류값을 기준으로 정렬
sdict= sorted(wordCountDict.items(), key=operator.itemgetter(1))
banCheker = 1
for key in wordCountDict:
for banword in banned:
if(sdict[len(sdict) - banCheker][0] == banword):
banCheker += 1
break
return sdict[len(sdict) - banCheker][0]
| true
|
a77010c059e759d8570d6b9d5a4a085a58d2b117
|
Python
|
yurekliisa/BBC
|
/Server/ChatBot/venv/Lib/site-packages/pygubu/widgets/editabletreeview.py
|
UTF-8
| 10,717
| 2.671875
| 3
|
[
"Apache-2.0"
] |
permissive
|
# encoding: utf8
#
# Copyright 2012-2013 Alejandro Autalán
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
#
# For further info, check https://github.com/alejandroautalan/pygubu
from __future__ import unicode_literals
import functools
try:
import tkinter as tk
import tkinter.ttk as ttk
except:
import Tkinter as tk
import ttk
class EditableTreeview(ttk.Treeview):
"""A simple editable treeview
It uses the following events from Treeview:
<<TreviewSelect>>
<4>
<5>
<KeyRelease>
<Home>
<End>
<Configure>
<Button-1>
<ButtonRelease-1>
<Motion>
If you need them use add=True when calling bind method.
It Generates two virtual events:
<<TreeviewInplaceEdit>>
<<TreeviewCellEdited>>
The first is used to configure cell editors.
The second is called after a cell was changed.
You can know wich cell is being configured or edited, using:
get_event_info()
"""
def __init__(self, master=None, **kw):
ttk.Treeview.__init__(self, master, **kw)
self._curfocus = None
self._inplace_widgets = {}
self._inplace_widgets_show = {}
self._inplace_vars = {}
self._header_clicked = False
self._header_dragged = False
self.bind('<<TreeviewSelect>>', self.__check_focus)
#Wheel events?
self.bind('<4>', lambda e: self.after_idle(self.__updateWnds))
self.bind('<5>', lambda e: self.after_idle(self.__updateWnds))
#self.bind('<ButtonRelease-1>', self.__check_focus)
self.bind('<KeyRelease>', self.__check_focus)
self.bind('<Home>', functools.partial(self.__on_key_press, 'Home'))
self.bind('<End>', functools.partial(self.__on_key_press, 'End'))
self.bind('<Button-1>', self.__on_button1)
self.bind('<ButtonRelease-1>', self.__on_button1_release)
self.bind('<Motion>', self.__on_mouse_motion)
self.bind('<Configure>',
lambda e: self.after_idle(self.__updateWnds))
def __on_button1(self, event):
r = event.widget.identify_region(event.x, event.y)
if r in ('separator', 'header'):
self._header_clicked = True
def __on_mouse_motion(self, event):
if self._header_clicked:
self._header_dragged = True
def __on_button1_release(self, event):
if self._header_dragged:
self.after_idle(self.__updateWnds)
self._header_clicked = False
self._header_dragged = False
def __on_key_press(self, key, event):
if key == 'Home':
self.selection_set("")
self.focus(self.get_children()[0])
if key == 'End':
self.selection_set("")
self.focus(self.get_children()[-1])
def delete(self, *items):
self.after_idle(self.__updateWnds)
ttk.Treeview.delete(self, *items)
def yview(self, *args):
"""Update inplace widgets position when doing vertical scroll"""
self.after_idle(self.__updateWnds)
ttk.Treeview.yview(self, *args)
def yview_scroll(self, number, what):
self.after_idle(self.__updateWnds)
ttk.Treeview.yview_scroll(self, number, what)
def yview_moveto(self, fraction):
self.after_idle(self.__updateWnds)
ttk.Treeview.yview_moveto(self, fraction)
def xview(self, *args):
"""Update inplace widgets position when doing horizontal scroll"""
self.after_idle(self.__updateWnds)
ttk.Treeview.xview(self, *args)
def xview_scroll(self, number, what):
self.after_idle(self.__updateWnds)
ttk.Treeview.xview_scroll(self, number, what)
def xview_moveto(self, fraction):
self.after_idle(self.__updateWnds)
ttk.Treeview.xview_moveto(self, fraction)
def __check_focus(self, event):
"""Checks if the focus has changed"""
#print('Event:', event.type, event.x, event.y)
changed = False
if not self._curfocus:
changed = True
elif self._curfocus != self.focus():
self.__clear_inplace_widgets()
changed = True
newfocus = self.focus()
if changed:
if newfocus:
#print('Focus changed to:', newfocus)
self._curfocus= newfocus
self.__focus(newfocus)
self.__updateWnds()
def __focus(self, item):
"""Called when focus item has changed"""
cols = self.__get_display_columns()
for col in cols:
self.__event_info =(col,item)
self.event_generate('<<TreeviewInplaceEdit>>')
if col in self._inplace_widgets:
w = self._inplace_widgets[col]
w.bind('<Key-Tab>',
lambda e: w.tk_focusNext().focus_set())
w.bind('<Shift-Key-Tab>',
lambda e: w.tk_focusPrev().focus_set())
def __updateWnds(self, event=None):
if not self._curfocus:
return
item = self._curfocus
cols = self.__get_display_columns()
for col in cols:
if col in self._inplace_widgets:
wnd = self._inplace_widgets[col]
bbox = ''
if self.exists(item):
bbox = self.bbox(item, column=col)
if bbox == '':
wnd.place_forget()
elif col in self._inplace_widgets_show:
wnd.place(x=bbox[0], y=bbox[1],
width=bbox[2], height=bbox[3])
def __clear_inplace_widgets(self):
"""Remove all inplace edit widgets."""
cols = self.__get_display_columns()
#print('Clear:', cols)
for c in cols:
if c in self._inplace_widgets:
widget = self._inplace_widgets[c]
widget.place_forget()
self._inplace_widgets_show.pop(c, None)
#widget.destroy()
#del self._inplace_widgets[c]
def __get_display_columns(self):
cols = self.cget('displaycolumns')
show = (str(s) for s in self.cget('show'))
if '#all' in cols:
cols = self.cget('columns') + ('#0',)
elif 'tree' in show:
cols = cols + ('#0',)
return cols
def get_event_info(self):
return self.__event_info;
def __get_value(self, col, item):
if col == '#0':
return self.item(item, 'text')
else:
return self.set(item, col)
def __set_value(self, col, item, value):
if col == '#0':
self.item(item, text=value)
else:
self.set(item, col, value)
self.__event_info =(col,item)
self.event_generate('<<TreeviewCellEdited>>')
def __update_value(self, col, item):
if not self.exists(item):
return
value = self.__get_value(col, item)
newvalue = self._inplace_vars[col].get()
if value != newvalue:
self.__set_value(col, item, newvalue)
def inplace_entry(self, col, item):
if col not in self._inplace_vars:
self._inplace_vars[col] = tk.StringVar()
svar = self._inplace_vars[col]
svar.set(self.__get_value(col, item))
if col not in self._inplace_widgets:
self._inplace_widgets[col] = ttk.Entry(self, textvariable=svar)
entry = self._inplace_widgets[col]
entry.bind('<Unmap>', lambda e: self.__update_value(col, item))
entry.bind('<FocusOut>', lambda e: self.__update_value(col, item))
self._inplace_widgets_show[col] = True
def inplace_checkbutton(self, col, item, onvalue='True', offvalue='False'):
if col not in self._inplace_vars:
self._inplace_vars[col] = tk.StringVar()
svar = self._inplace_vars[col]
svar.set(self.__get_value(col, item))
if col not in self._inplace_widgets:
self._inplace_widgets[col] = ttk.Checkbutton(self,
textvariable=svar, variable=svar, onvalue=onvalue, offvalue=offvalue)
cb = self._inplace_widgets[col]
cb.bind('<Unmap>', lambda e: self.__update_value(col, item))
cb.bind('<FocusOut>', lambda e: self.__update_value(col, item))
self._inplace_widgets_show[col] = True
def inplace_combobox(self, col, item, values, readonly=True):
state = 'readonly' if readonly else 'normal'
if col not in self._inplace_vars:
self._inplace_vars[col] = tk.StringVar()
svar = self._inplace_vars[col]
svar.set(self.__get_value(col, item))
if col not in self._inplace_widgets:
self._inplace_widgets[col] = ttk.Combobox(self,
textvariable=svar, values=values, state=state)
cb = self._inplace_widgets[col]
cb.bind('<Unmap>', lambda e: self.__update_value(col, item))
cb.bind('<FocusOut>', lambda e: self.__update_value(col, item))
self._inplace_widgets_show[col] = True
def inplace_spinbox(self, col, item, min, max, step):
if col not in self._inplace_vars:
self._inplace_vars[col] = tk.StringVar()
svar = self._inplace_vars[col]
svar.set(self.__get_value(col, item))
if col not in self._inplace_widgets:
self._inplace_widgets[col] = tk.Spinbox(self,
textvariable=svar, from_=min, to=max, increment=step)
sb = self._inplace_widgets[col]
sb.bind('<Unmap>', lambda e: self.__update_value(col, item))
cb.bind('<FocusOut>', lambda e: self.__update_value(col, item))
self._inplace_widgets_show[col] = True
def inplace_custom(self, col, item, widget):
if col not in self._inplace_vars:
self._inplace_vars[col] = tk.StringVar()
svar = self._inplace_vars[col]
svar.set(self.__get_value(col, item))
self._inplace_widgets[col] = widget
widget.bind('<Unmap>', lambda e: self.__update_value(col, item))
widget.bind('<FocusOut>', lambda e: self.__update_value(col, item))
self._inplace_widgets_show[col] = True
| true
|
8e1849c0cf34f2a7df881146f50374b4e03b9cf6
|
Python
|
total-c/hw_07
|
/Task_01.py
|
UTF-8
| 1,252
| 4.46875
| 4
|
[] |
no_license
|
# Описать функцию fact2(n), вычисляющую двойной факториал: n!! = 1·3·5·...·n,
# если n — нечетное;
# n!! = 2·4·6·...·n, если n — четное (n > 0 — параметр целого типа).
# С помощью этой функции найти двойные факториалы пяти случайных целых чисел.
import random
def fact2(n: int) -> int:
"""Функция fact2 расчитывает двойной фактириал полученного значения"""
factorial = 1
if not n % 2:
for i in range(2, n+1, 2):
factorial = factorial * i
return factorial
elif n % 2:
for i in range(1, n+1, 2):
factorial = factorial * i
return factorial
def main():
"""Функция main генерирует 5 случайных чисел и передает их в переменную n.
Предназначена для тестирования функции fact2"""
list_random = []
for i in range(5):
list_random.append(random.randint(1, 20))
for i in list_random:
n = i
print(fact2(n))
if __name__ == '__main__':
main()
| true
|
71d036b34875e4abe5d42026f1d54a8d2b95271c
|
Python
|
acid9reen/Artezio
|
/Lesson_8_http_regexp/task4.py
|
UTF-8
| 456
| 3.859375
| 4
|
[] |
no_license
|
'''Date time regexp'''
import re
def main():
'''Date time regexp'''
date_time = input("Enter the date time: ")
reg = r"\d{4}-([0]\d|1[0-2])-([0-2]\d|3[01])T" \
r"([0-1]\d|2[0-3]):[0-5]\d:[0-5]\d([+-][0-2]\d:[0-5]\d|Z)"
pattern = re.compile(reg)
match = re.search(pattern, date_time)
if match:
print("Date time is valid.")
else:
print("Invalid date time.")
if __name__ == '__main__':
main()
| true
|
d257debf3d3de5318b25db367ad8e1212013f174
|
Python
|
LanguidCat/BaekJoon
|
/2935-소음/2935.py
|
UTF-8
| 138
| 3.296875
| 3
|
[] |
no_license
|
import sys
A = int(sys.stdin.readline())
T = input()
B = int(sys.stdin.readline())
if T == '*': print(A * B)
else: print(A + B)
| true
|
ecb8cbddb7ab26174aeb9b5dc2024a2e7ebe0783
|
Python
|
ParkJeongseop/Algorithm
|
/Python/3003.py
|
UTF-8
| 126
| 2.859375
| 3
|
[] |
no_license
|
b = [1, 1, 2, 2, 2, 8]
x = list(map(int, input().split()))
answer = [str(b[i]-x[i]) for i in range(6)]
print(' '.join(answer))
| true
|
ca14137f2cfa41dbf9d56add6febc2037184c957
|
Python
|
AmirAbaskohi/Congestion-Control
|
/rtt_diagrams/rtt.py
|
UTF-8
| 1,263
| 2.5625
| 3
|
[] |
no_license
|
import matplotlib.pyplot as plt
time_steps = []
reno_flow1 = []
reno_flow2 = []
cubic_flow1 = []
cubic_flow2 = []
yeah_flow1 = []
yeah_flow2 = []
f = open("./reno/reno1.tr")
for i in f.readlines():
temp = i.split()
time_steps.append(int(temp[0]))
reno_flow1.append(round(float(temp[2])))
f = open("./reno/reno2.tr")
for i in f.readlines():
temp = i.split()
reno_flow2.append(round(float(temp[2])))
f = open("./cubic/cubic1.tr")
for i in f.readlines():
temp = i.split()
cubic_flow1.append(round(float(temp[2])))
f = open("./cubic/cubic2.tr")
for i in f.readlines():
temp = i.split()
cubic_flow2.append(round(float(temp[2])))
f = open("./yeah/yeah1.tr")
for i in f.readlines():
temp = i.split()
yeah_flow1.append(round(float(temp[2])))
f = open("./yeah/yeah2.tr")
for i in f.readlines():
temp = i.split()
yeah_flow2.append(round(float(temp[2])))
plt.plot(time_steps, reno_flow1, label = "reno_flow1")
plt.plot(time_steps, reno_flow2, label = "reno_flow2")
plt.plot(time_steps, cubic_flow1, label = "cubic_flow1")
plt.plot(time_steps, cubic_flow2, label = "cubic_flow2")
plt.plot(time_steps, yeah_flow1, label = "yeah_flow1")
plt.plot(time_steps, yeah_flow2, label = "yeah_flow2")
plt.legend()
plt.show()
| true
|
fae7abd73fbb0e84821939165591c0a81e3c7621
|
Python
|
AliShakoori89/Calculater
|
/Main.py
|
UTF-8
| 2,683
| 2.546875
| 3
|
[] |
no_license
|
from tkinter import *
import GUI
if __name__ == "__main__":
root = Tk()
GUI_Frame = GUI.BotGUI(root)
B=Button(root,text=' 9 ',activebackground="grey",command=lambda : GUI_Frame.press(9), height=1, width=7)
B.grid(row=8, column=2)
B=Button(root,text=' 8 ',activebackground="grey",command=lambda : GUI_Frame.press(8), height=1, width=7)
B.grid(row=8, column=1)
B=Button(root,text=' 7 ',activebackground="grey",command=lambda : GUI_Frame.press(7), height=1, width=7)
B.grid(row=8, column=0)
B=Button(root,text=' 6 ',activebackground="grey",command=lambda : GUI_Frame.press(6), height=1, width=7)
B.grid(row=7, column=2)
B=Button(root,text=' 5 ',activebackground="grey",command=lambda : GUI_Frame.press(5), height=1, width=7)
B.grid(row=7, column=1)
B=Button(root,text=' 4 ',activebackground="grey",command=lambda : GUI_Frame.press(4), height=1, width=7)
B.grid(row=7, column=0)
B=Button(root,text=' 3 ',activebackground="grey",command=lambda : GUI_Frame.press(3), height=1, width=7)
B.grid(row=6, column=2)
B=Button(root,text=' 2 ',activebackground="grey",command=lambda : GUI_Frame.press(2), height=1, width=7)
B.grid(row=6, column=1)
B=Button(root,text=' 1 ',activebackground="grey",command=lambda : GUI_Frame.press(1), height=1, width=7)
B.grid(row=6, column=0)
B=Button(root,text=" √ ",activebackground="grey",command = GUI_Frame.square, height=1, width=7)
B.grid(row=5, column=0)
B=Button(root,text=" x²",activebackground="grey",command = GUI_Frame.exponent, height=1, width=7)
B.grid(row=5, column=1)
B=Button(root,text="1/x",activebackground="grey",command = GUI_Frame.one_div_x, height=1, width=7)
B.grid(row=5, column=2)
B=Button(root,text=' + ',activebackground="grey",command=lambda : GUI_Frame.press(' + '), height=1, width=7)
B.grid(row=5, column=3)
B=Button(root,text=' - ',activebackground="grey",command=lambda : GUI_Frame.press(' - '), height=1, width=7)
B.grid(row=6, column=3)
B=Button(root,text=' * ',activebackground="grey",command=lambda : GUI_Frame.press(' * '), height=1, width=7)
B.grid(row=7, column=3)
B=Button(root,text=' / ',activebackground="grey",command=lambda : GUI_Frame.press(' / '), height=1, width=7)
B.grid(row=8, column=3)
B=Button(root,text=" = ", bg='lightgreen', activebackground="grey",command = GUI_Frame.equalpress, height=1, width=24)
B.place(x=64,y=133)
clear = Button(root, text='Clear', fg='black', bg='pink',activebackground="grey",command = GUI_Frame.clear, height=1, width=7)
clear.grid(row=9, column=0)
# guifunc = GUI.GUI_Functions(root)
root.mainloop()
| true
|
c001b1b93417020fea170578cf81bb4c9f28180e
|
Python
|
Parallel-Sanskrit-Corpora/data
|
/python/mahabharata_parsing/app/fix_translation.py
|
UTF-8
| 1,249
| 2.65625
| 3
|
[
"MIT"
] |
permissive
|
# -*- coding: utf-8 -*-
import MySQLdb
import re
from difflib import SequenceMatcher
db = MySQLdb.connect(user="sanskrit", passwd="sanskrit", db="sanskrit", charset="utf8")
cursor = db.cursor()
def remove_extra_chars(string):
regexp = re.compile('॥(\d+)+॥')
changed_string = re.sub(regexp, '', string)
return changed_string
def similar(a, b):
return SequenceMatcher(None, a, b).ratio()
def index():
cursor.execute("""SELECT a.add_str, a.code, a.id FROM aranyakaparva a""")
rows = cursor.fetchall()
add_string = None
for row in rows:
additional = row[0]
code = row[1]
id = row[2]
if additional != None and add_string != additional:
add_string = additional
pass
add_parts = additional.split('—')
if add_parts and len(add_parts) == 2:
cursor.execute("""UPDATE aranyakaparva SET from_value=%s, to_value=%s WHERE id=%s""", (add_parts[0], add_parts[1], id))
# if additional is None:
# cursor.execute("""UPDATE aranyakaparva SET add_str=%s WHERE id=%s""", (add_string, id))
# print(add_string, code)
db.commit()
if __name__ == '__main__':
index()
cursor.close()
db.close()
| true
|
93b24ad3ed63642be9d046dff29be1cc7c87d63b
|
Python
|
m0in92/Corona-dataset
|
/script_with_internet_access.py
|
UTF-8
| 3,454
| 2.859375
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 14 22:55:56 2020
@author: Moin Ahmed
This python script finds the total corona cases and deaths stat from every country from worldometers.info website.
It then appends these stats to a data_csv csv file.
"""
from urllib.request import Request, urlopen
from bs4 import BeautifulSoup as soup
import csv
import numpy as np
import pandas as pd
from datetime import datetime
from name_clean import name_clean
import requests
myurl = 'https://www.worldometers.info/coronavirus/'
uClient = Request(myurl, headers={'User-Agent':'Mozilla/5.0'})
page_html = urlopen(uClient).read()
file = soup(page_html, 'html.parser')
name, t_cases, n_cases, t_deaths, n_deaths, t_recovered, active_cases, serious_condition, n_test, continent=list(),list(), list(), list(), list(),list(),list(),list(),list(),list()
time_date_now = datetime.now()
last_updated_on_website = file.find('div',class_='content-inner').find_all('div')[1].text.split(': ')[1]
for i in range(len(list(file.tbody.find_all('tr')))):
name.append(list(file.table.tbody.find_all('tr')[i])[1].text)
t_cases.append(list(file.tbody.find_all('tr')[i])[3].text)
n_cases.append(list(file.tbody.find_all('tr')[i])[5].text)
t_deaths.append(list(file.tbody.find_all('tr')[i])[7].text)
n_deaths.append(list(file.tbody.find_all('tr')[i])[9].text)
t_recovered.append(list(file.tbody.find_all('tr')[i])[11].text)
active_cases.append(list(file.tbody.find_all('tr')[i])[13].text)
serious_condition.append(list(file.tbody.find_all('tr')[i])[15].text)
n_test.append(list(file.tbody.find_all('tr')[i])[-6].text)
continent.append(list(file.tbody.find_all('tr')[i])[-2].text)
# for i in range(len(list(file.table.tbody.find_all('tr',class_ ='even')))):
# name.append(list(file.table.tbody.find_all('tr',class_ ='even')[i])[1].text)
# t_cases.append(list(file.table.tbody.find_all('tr',class_ ='even')[i])[3].text)
# n_cases.append(list(file.table.tbody.find_all('tr',class_ ='even')[i])[5].text)
# t_deaths.append(list(file.table.tbody.find_all('tr',class_ ='even')[i])[7].text)
# n_deaths.append(list(file.table.tbody.find_all('tr',class_ ='even')[i])[9].text)
# t_recovered.append(list(file.table.tbody.find_all('tr',class_ ='even')[i])[11].text)
# active_cases.append(list(file.table.tbody.find_all('tr',class_ ='even')[i])[13].text)
# serious_condition.append(list(file.table.tbody.find_all('tr',class_ ='even')[i])[15].text)
# n_test.append(list(file.table.tbody.find_all('tr',class_ ='even')[i])[-6].text)
# continent.append(list(file.table.tbody.find_all('tr',class_ ='even')[i])[-2].text)
name_clean(name)
# df = pd.DataFrame(np.array([t_cases, n_cases, t_deaths, n_deaths, t_recovered, active_cases,serious_condition, n_test, continent]).transpose(),index=name,columns=['t_cases, n_cases, t_deaths, n_deaths, t_recovered, active_cases, serious_condition, n_test, Continent'.split(',')])
# df['time_stamp'] = time_date_now
# df['last_updated_on_webpage']=last_updated_on_website
csv_file = open('data_csv.csv','a')
csv_writer = csv.writer(csv_file)
for i in range(len(name)):
csv_writer.writerow([name[i],t_cases[i], n_cases[i], t_deaths[i], n_deaths[i], t_recovered[i], active_cases[i],serious_condition[i], n_test[i], continent[i], last_updated_on_website, time_date_now])
csv_file.close()
| true
|
de922e9fb5ec469f8aa8745e4f1c13a10060acc5
|
Python
|
Renzerth/Coronografia-UdeA-EAFIT
|
/python/to_transfer/Analytical-sol/integral_bessel.py
|
UTF-8
| 2,413
| 2.546875
| 3
|
[] |
no_license
|
# Prueba #
##########
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from scipy.special import gamma, j1, jv
from scipy.misc import factorial
from scipy.integrate import quad, simps
# parametros
N = 2
L = 1
jobs = 0
mobs = L*(1 + jobs*N)
f1 = 1.0
f2 = 0.2#0.1
fFR = 1.6
wl = 532e-9
z0 = f2 - mobs*f2**2 / (L * fFR)
a = 5.e-2
b = 1000*a
w0 = a/10.
k = 2*np.pi / wl
j = np.array([-1,0]) #np.array([-1,0])
m = L*(1 + j*N)
print (m)
#rmax = 2*10.e-6 #Gaussian case
rmax = a*f2/f1
NN = 63 #64/4
x = np.linspace(-2*rmax/1,2*rmax/1,NN)
#x = np.linspace(-5,5,NN)
X,Y = np.meshgrid(x,x)
R = np.sqrt(X**2+Y**2)
T = np.arctan2(Y,X)
def integrand_bessel(rho,m,r):
alpha = 0.5*1j*k * (m/(L*fFR) + z0/f2**2 - 1/f1 - 1/f2)
return j1(k*a/f1 * rho) * np.exp(-alpha * rho**2) * jv(m, k*r/f2 * rho)
def integrand_gauss(rho,m,r):
#z0 = f2 - m*f2**2 / (L * fFR)
alpha = 0.5*1j*k * (m/(L*fFR) + z0/f2**2 - 1/f2) + 1/w0**2
return 1j * np.exp(-alpha * rho**2) * jv(m, k*r/f2 * rho) * rho
def do_image(m, gauss = True):
#rho = np.linspace(0,10*a,1000000)
rho = np.linspace(0,10*a,1000000)
R_unique = list(set(R.flatten()))
u_m = np.zeros((NN,NN)) + 1j*np.zeros((NN,NN))
arg = np.pi * m / (L*N)
if gauss:
integrand = integrand_gauss
K1 = (np.exp(-1j*arg) * np.sinc(arg / np.pi) #The sinc in np is defined as sinc(x) = sin(pi*x) / pi*x
* (k/f2) * (1j**(3*m+1))
* np.exp(1j*k*(f2+z0))
* np.exp(1j*m*T))
else:
integrand = integrand_bessel
K1 = (np.exp(-1j*arg) * np.sinc(arg / np.pi)
* (k*a/f2) * (1j**(3*abs(m)-2))
* np.exp(1j*k*(f2+z0))
* np.exp(1j*m*T))
id = 0
for r in R_unique:
I2 = np.trapz(integrand(rho, m, r), x = rho)
indices = np.where(R == r)
u_m[indices] = I2
print (id)
id+=1
return K1 * u_m
U_RT = 0 #U(r,theta)
for i in m:
U_RT += do_image(i, gauss = False)
INT = abs(U_RT)**2
INT_max = np.max(INT)
plt.imshow(INT / INT_max) #, norm = colors.LogNorm())
plt.colorbar()
plt.savefig("int_bessel._N%d_L%d_mobs%d.png"%(N,L,mobs))
#plt.savefig("int_gauss_N%d_L%d_mobs%d.png"%(N,L,mobs))
plt.show()
#integral from 0 to inf of (J1(2pi*a*rho / lamb*f1) * exp(-alpha*rho**2) * Jm(k*rho*r/f2)) drho
| true
|
52c3d6afb1d522a98ef3a0fe68d410c8d31fc631
|
Python
|
ManikhweSchool/Introduction-To-Java-Programming
|
/Python/For Loops/Exercise 2.5.10/Exercise_2_5_10a.py
|
UTF-8
| 144
| 3.75
| 4
|
[] |
no_license
|
rows = eval(input('Enter number of rows : '))
columns = eval(input('Enter number of columns : '))
for i in range(rows):
print('*'*columns)
| true
|
c20c7bfba0066588f6266d4d9128f8039501aa31
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p03548/s402182384.py
|
UTF-8
| 205
| 3.078125
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
X, Y, Z = map(int, input().split())
used_space = Y + (Z * 2)
ans = 0
while True:
if used_space > X:
break
used_space = used_space + Y + Z
ans += 1
print(ans)
| true
|
9cbb125d601eb443abb244a7da0806a74868f7bc
|
Python
|
unfit-raven/yolo-octo
|
/CST100 Object Oriented Software Development/Week Two/turtleloopcircle.py
|
UTF-8
| 1,095
| 3.90625
| 4
|
[] |
no_license
|
import turtle ## Import turtle library
wn = turtle.Screen()## Create graphics window
wn.bgcolor("blue") ## Set window background color to blue
frank = turtle.Turtle()
frank.hideturtle() ## Turn off turtle animation
frank.speed(0) ## Animation as fast as possible
## Bottom circle of snowman
frank.color("white")
frank.begin_fill()
for i in range(72):
frank.right(5)
frank.forward(13)
frank.end_fill()
## Move turtle up to top of next circle
frank.up() ## Put tail up
frank.left(90)
frank.forward(208)
frank.right(90)
frank.down() ## Put tail down
## Middle circle of snowman
frank.color("white")
frank.begin_fill()
for i in range(60):
frank.right(6)
frank.forward(11)
frank.end_fill()
## Move turtle up to top of next circle
frank.up() ## Put tail up
frank.left(90)
frank.forward(140)
frank.right(90)
frank.down() ## Put tail down
## Top circle of snowman
frank.color("white")
frank.begin_fill()
for i in range(40):
frank.right(9)
frank.forward(11)
frank.end_fill()
wn.exitonclick() ## Close window on mouse click
| true
|
870ad2a7882c23e30bca5e2e1264266dbb249aed
|
Python
|
wkroach/learning-machine-learning-
|
/naive bayes/bayes.py
|
UTF-8
| 5,477
| 3.34375
| 3
|
[] |
no_license
|
"""
naive bayes code and simple testing data
adapted from MLIA in python 3.5.1
test on surface3
"""
from numpy import*
def load_data_set():
posting_list=[['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],
['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],
['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],
['stop', 'posting', 'stupid', 'worthless', 'garbage'],
['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],
['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]
class_vec = [0,1,0,1,0,1]
return posting_list,class_vec
def create_vocab_list(data_set):
'''生成单词集 data_set为文本'''
vocab_set = set()
for document in data_set:
vocab_set = vocab_set | set(document)
return list(vocab_set)
list_of_posts, list_classes = load_data_set()
def set_of_words_2_vec(vocab_list, input_set):
''' 由词集模型生成文本向量,即每个词是否出现过
vocab_list为目标文本,input_set为词集'''
return_vec = [0] * len(vocab_list)
for word in input_set:
if word in vocab_list:
return_vec[vocab_list.index(word)] = 1
else: print("the word: %s is not in my Vocabulary!" % word)
return return_vec
def train_nb0 (train_martix, train_category):
'''训练算法, 原理见书本
train_martix为训练词向量集,train_category为训练集的标签'''
num_train_docs = len(train_martix)
num_words = len(train_martix[0])
p_abusive = sum(train_category)/float(num_train_docs)
p0_num = ones(num_words); p1_num = ones(num_words)#初始分子为1,分母为2,防止概率为0和1时对结果产生较大影响
p0_denom = 2.0; p1_denom = 2.0
for i in range(num_train_docs):
if train_category[i] == 1:
p1_num += train_martix[i]
p1_denom += sum(train_martix[i])
else:
p0_num += train_martix[i]
p0_denom += sum(train_martix[i])
p1_vect = log(p1_num/p1_denom)
p0_vect = log(p0_num/p0_denom)
return p0_vect, p1_vect, p_abusive
def classify_nb(vec2_classify, p0_vect,p1_vect,p_class1):
'''分类
vec2_classify为词向量(array),后面的均为概率向量或概率
'''
p1 = sum(vec2_classify * p1_vect) + log(p_class1)#取对数防止下溺出
p0 = sum(vec2_classify * p0_vect) + log(1.0 - p_class1)
if p1 > p0:
return 1
else:
return 0
def testing_bayes():
'''测试算法'''
list_of_posts, list_classes = load_data_set()
my_vocab_list = create_vocab_list(list_of_posts)
train_mat = []
for postin_doc in list_of_posts:
train_mat.append(set_of_words_2_vec(my_vocab_list, postin_doc))
p0V, p1V, pAb = train_nb0(array(train_mat), array(list_classes))
test_entry = ['love', 'my', 'dalmation']
this_doc = array(set_of_words_2_vec(my_vocab_list, test_entry))
print("['love', 'my', 'dalmation']:",classify_nb(this_doc, p0V, p1V, pAb))
test_entry = ['stupid','garbage']
this_doc = array(set_of_words_2_vec(my_vocab_list, test_entry))
print("['stupid','garbage']:",classify_nb(this_doc, p0V, p1V, pAb))
def bag_of_words_2_vec_mn(vocab_list, input_set):
'''词袋模型,记录每个词出现次数
参数与词集模型一致
'''
return_vec = [0] * len(vocab_list)
for word in input_set:
if word in vocab_list:
return_vec[vocab_list.index(word)] += 1
return return_vec
def textParse (bigString):
'''
用正则表达式处理文本,之选长度大于等于3的字符串
bigstring 为需要处理的字符串
'''
import re
listOfTokens = re.split (r'\W*', bigString)
return [tok.lower () for tok in listOfTokens if len (tok) > 2]
def spamTest ():
'''
the data in the book is wrong!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
23th text in the email/ham is encoded in ansi!!!!!!!!!
fuck !!!!
'''
import random
docList = []; classList = []; fullText = []
for i in range (1,26):
fr = open ('email/spam/%d.txt' % i)
wordList = textParse (fr.read ())
docList.append (wordList)
fullText.extend (wordList)
classList.append (1)
fr2 = open ('email/ham/%d.txt' % i)
wordList = textParse (fr2.read ())
docList.append (wordList)
fullText.extend (wordList)
classList.append (0)
vocabList = create_vocab_list (docList)
trainingSet = list (range (50)); testSet = []
for i in range (10) :#选10个作训练样本,其余作测试
randIndex = int (random.uniform (0,len(trainingSet)))
testSet.append (trainingSet [randIndex])
del(trainingSet [randIndex])
trainMat = []; trainClasses = []
for docIndex in trainingSet :
trainMat.append (set_of_words_2_vec (vocabList, docList [docIndex]))
trainClasses.append (classList [docIndex])
p0V, p1V, pSpam = train_nb0 (array (trainMat), array (trainClasses))
errorCount = 0
for docIndex in testSet :
wordVector = set_of_words_2_vec (vocabList, docList [docIndex])
a = classify_nb (array (wordVector), p0V, p1V, pSpam)
b = classList [docIndex]
if a != b :
errorCount += 1
print ('the error rate is: ', errorCount / len (testSet))
| true
|
bf5f6bb6fddd83be76c3b6862cb89f5a404d7c5c
|
Python
|
ps1on1ck/methods_of_collecting_and_processing_data_from_Internet
|
/5/m_client.py
|
UTF-8
| 695
| 3.015625
| 3
|
[] |
no_license
|
from pymongo import MongoClient
class MClient:
def __init__(self, host, port, db_name, collection_name):
client = MongoClient(host, port)
db = client[db_name]
self.collection = db[collection_name]
def insert_many(self, items):
return self.collection.insert_many(items)
def insert_many_if_not_exist(self, items):
for item in items:
self.insert_if_not_exist('vacancy_id', item)
def insert_if_not_exist(self, name, item):
if not self.is_exist(name, item[name]):
self.collection.insert_one(item)
def is_exist(self, name, field):
return bool(self.collection.find_one({name: {'$in': [field]}}))
| true
|
37de811fde299dcdc2227367afdddc14f8012e4c
|
Python
|
RobRoseKnows/umbc-cs-projects
|
/umbc/CMSC/4XX/473/Assignments/a01/3f.py
|
UTF-8
| 685
| 2.78125
| 3
|
[] |
no_license
|
from collections import Counter
cnt = Counter()
with open('en_ewt-ud-train.conllu') as f:
line = f.readline()
while line:
if line[0] != '#' and line[0] != '\n':
cols = line.split('\t')
word = cols[1]
cnt[word] += 1
line = f.readline()
cnt2 = Counter()
with open('en_ewt-ud-dev.conllu') as f:
line = f.readline()
while line:
if line[0] != '#' and line[0] != '\n':
cols = line.split('\t')
word = cols[1]
cnt2[word] += 1
line = f.readline()
print("types missing: {}".format(len(set(cnt.keys()) - set(cnt2.keys()))))
print("tokens: {}".format(sum(cnt2.values())))
| true
|
dd4892c5a0b675d1c97fb91a5ca8115801a2bbca
|
Python
|
jaescalante02/IA2
|
/proyectofinal/transform_data.py
|
UTF-8
| 1,594
| 2.953125
| 3
|
[] |
no_license
|
import sys
import datetime
training = False if (sys.argv[1]=='0') else True
def read_file(filename):
with open(filename) as f:
aux = [str(x) for x in f.readline().split()]
array = []
for line in f: # read rest of lines
s=line.split()
array2=[s[0]] + [float(x) for x in s[1:]]
array.append(array2)
return array[::-1]
def operar(datos):
new_datos = []
for x in datos:
n=[]
d=datetime.datetime.strptime(x[0], "%d/%m/%y").date()
n.append(d.day)
n.append(d.month)
n.append(d.weekday())
n.append(x[1]-x[2])
n.append(x[4]-x[2])
n.append(x[5]-x[2])
n.append(0 if (x[3]<0) else 1)
new_datos.append(n)
return new_datos
def imprimir(fname, outname, datos, num):
fp = open('datos/'+outname+str(num-1), 'w+')
i=num-1
for x in datos[num-1:]:
for a in datos[i-num+1:i]:
for b in a[3:]:
if(isinstance(b, float)):
fp.write(str(round(b, 3)))
else:
fp.write(str(b))
fp.write(' ')
for c in datos[i][:3]:
if(isinstance(c, float)):
fp.write(str(round(c, 3)))
else:
fp.write(str(c))
fp.write(' ')
if(training):
fp.write(str(datos[i][6]))
fp.write('\n')
i+=1
fp.close()
fname = sys.argv[3]
comb = int(sys.argv[2])
datos = read_file(fname)
print len(datos), 'datos'
datos=operar(datos)
for i in list(range(2, comb+1)):
imprimir(fname, sys.argv[4], datos, i)
| true
|
fcdb2b699af0f8e8cf732256452d8daf983807bf
|
Python
|
janhenrikbern/VT_OPT
|
/optimal.py
|
UTF-8
| 4,750
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
import numpy as np
import metrics
from vehicles.point_car import PointCar
from path_finding import find_valid_trajectory
from track import load_track
import argparse
import matplotlib.pyplot as plt
# Code in this file is based on the racing line optimization algorithm presented
# in the post http://phungdinhthang.com/2016/12/16/calculate-racing-lines-automatically/?i=1
#
parser = argparse.ArgumentParser()
parser.add_argument(
"--plot", default=False, help="plot trajectories"
)
a = parser.parse_args()
def distance(x1: np.ndarray, x2: np.ndarray) -> float:
return np.sqrt(np.sum((x1 - x2)**2))
def segmentValue(prev, cur, nxt, alpha, beta):
v1 = cur - prev
v2 = nxt - cur
v1_mag = distance(cur, prev)
v2_mag = distance(nxt, cur)
cos_angle = np.dot(v1, v2) / (v1_mag * v2_mag)
return beta * cos_angle * (v1_mag + v2_mag) - alpha * (v1_mag + v2_mag)
class RouteValue:
def __init__(self, val, idx, coor):
self.nxt = idx
self.val = val
self.coor = coor
def __str__(self) -> str:
return f"{self.val}\n"
def buildRouteSegments(parameterized_track, alpha, beta):
n_states = len(parameterized_track[0])
RVM = []
# Build RVM from finish line to start line
reversed_trellis = list(reversed(parameterized_track))
for i, site in enumerate(reversed_trellis):
cur_line = []
for j, cur in enumerate(site):
if i == 0:
prev_line = []
for idx in range(n_states):
# value beyond the finish is 0.0
prev_line.append(RouteValue(0.0, idx, cur))
cur_line.append(prev_line)
else:
prev_line = []
idx = i + 1 if i + 1 < len(reversed_trellis) else 0
for prev in reversed_trellis[idx]:
best_val = -np.Infinity
best_idx = None
for l, nxt in enumerate(reversed_trellis[i-1]):
seg_val = 0.0 # set segment value to 0 if start line
if i + 1 < len(reversed_trellis):
seg_val = segmentValue(prev, cur, nxt, alpha, beta)
val = seg_val + RVM[i-1][l][j].val
if val > best_val:
best_val = val
best_idx = l
prev_line.append(RouteValue(best_val, best_idx, cur))
cur_line.append(prev_line)
RVM.append(cur_line)
return list(reversed(RVM))
def get_best_path(RVM, states):
dists = []
paths = []
for idx in range(states):
path = []
nxt = idx
cur = idx
for line in RVM:
tmp = line[nxt][cur]
cur = nxt
nxt = tmp.nxt
path.append(tmp.coor)
paths.append(path)
x = [i[0] for i in path]
y = [i[1] for i in path]
dist = metrics.summed_distance(x, y)
dists.append(dist)
return paths[np.argmin(dists)]
def run(parameterized_track, alpha, beta):
# compute all possible waypoint values
RVM = buildRouteSegments(parameterized_track, alpha, beta)
# collect best path
path = get_best_path(RVM, len(parameterized_track[0]))
return path
def run_gridsearch(parameterized_track, n_segments=10):
for i in np.linspace(0.3, 0.7, num = n_segments+1, endpoint=True , dtype=float):
print(i)
path = run(parameterized_track, alpha=1.0 - i, beta=i)
x = [i[0] for i in path]
y = [i[1] for i in path]
print(metrics.summed_distance(x, y))
car = PointCar(*path[0])
car.theta = car.heading(path[1])
for coor in path[1:]:
car.update(coor, is_coor=True)
print(car.travel_time)
if __name__ == "__main__":
IMG_PATH = "./tracks/loop.png"
track = load_track(IMG_PATH)
# Set to a valid point in trajectory
car = PointCar(150, 200)
STATES = 30 # a waypoint for every 0.5 of the track width
trellis = find_valid_trajectory(car, track, states=STATES)
run_gridsearch(trellis, n_segments=10)
# path = run(trellis, alpha=.9, beta=0.1)
# x = [i[0] for i in path]
# y = [i[1] for i in path]
# print(metrics.summed_distance(x, y))
# car = PointCar(*path[0])
# car.theta = car.heading(path[1])
# for coor in path[1:]:
# car.update(coor, is_coor=True)
# print(car.travel_time)
if a.plot:
fig, ax = plt.subplots()
ax.imshow(track)
plt.xlabel("Meters")
plt.ylabel("Meters")
ax.fill(x, y, facecolor='none', edgecolor='black', linestyle="-.", label="Centerline")
plt.legend(loc=4)
plt.show()
| true
|
ad65e7679c6369b4ecbedbcc68e0d2874bf23640
|
Python
|
MaxouGJ/LS6
|
/TPNOTE/Maxime_Gallais_Jimenez.py
|
UTF-8
| 1,578
| 2.859375
| 3
|
[] |
no_license
|
import re
def question1c(l) :
return l[4]+l[len(l)-1]
#
def question2e(l):
return [i for i in l if i >= 8]
#
def question3c(l):
res = []
for i in l :
if len(i)>=3:
res.append([2*j for j in i])
return res
#
def question4a(fichier):
f = open(fichier)
res = re.findall(r"\w*[Aa]\w*", f.read())
f.close()
return res;
#
def question5f(t):
d = {}
for i in t :
if i in d :
d[i] += 1
else : d[i] = 1
return d
#
def question6a(s):
return re.findall(r"\d{4,}", s)
#
def question7a(fichier):
f = open(fichier)
exp = re.compile(r"<([a-z]*?)> (.*?) </\1>")
m = exp.findall(f.read())
f.close()
return {i:j for (i,j) in m}
#
def question8a(fichier):
f = open(fichier)
s = f.read()
exp = re.compile(r"(\d+\.\d+),(-{0,1}\d+\.\d+),(\d{1,2}:\d{2,2}:\d{2,2}[NS]),(\d{1,2}:\d{2,2}:\d{2,2}[WE]),(\S+)")
m = exp.findall(s)
f.close()
d = {}
for (a,b,c,x,e) in m :
d[e] = (a,b)
return d
#
def question9b(d, lat_ref):
return [i for i in d if (float(d[i][0])>lat_ref and float(d[i][0])<lat_ref+1) ]
#
def question10a(d, pref):
res = []
for i in d :
if re.match(pref, i) : res.append(i)
res = sorted(res)
s = ""
for j in res :
s += j+"\t"
return s
#
def question11a(d):
d2 = {}
for i in d :
if d[i][0] in d2 :
d2[d[i][0]] = d2[d[i][0]]+"\t"+i
else :
d2[d[i][0]] = i
return d2
#
def question12b(d):
return (min(d),d[min(d)])
#
| true
|
846b6e54eb6e244b49039f468dd80e847f07ed97
|
Python
|
nameless-Chatoyant/pytorch-workspace
|
/AtrousConv/model.py
|
UTF-8
| 1,697
| 2.640625
| 3
|
[] |
no_license
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from loss import FocalLoss
from cfgs.config import cfg
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.channels = cfg.channels
self.channels.append(cfg.class_num)
self.AtrousConvs = nn.ModuleList()
for layer_idx, dilation in enumerate(cfg.dilations):
AtrousConv = []
if layer_idx == 0:
ch_in = 3
else:
ch_in = self.channels[layer_idx - 1]
AtrousConv.append(nn.Conv2d(in_channels = ch_in,
out_channels = self.channels[layer_idx],
kernel_size = cfg.kernel_size[layer_idx],
stride = 1,
padding = 0,
dilation = cfg.dilations[layer_idx],
bias = not cfg.with_bn))
if cfg.with_bn:
AtrousConv.append(nn.BatchNorm1d(self.channels[layer_idx]))
if layer_idx != len(cfg.dilations) - 1:
AtrousConv.append(nn.ReLU())
AtrousConv = nn.Sequential(*AtrousConv)
self.AtrousConvs.append(AtrousConv)
def forward(self, x):
print('Input:', x.size())
for conv_idx,AtrousConv in enumerate(self.AtrousConvs):
x = F.pad(x, (cfg.dilations[conv_idx], cfg.dilations[conv_idx], cfg.dilations[conv_idx], cfg.dilations[conv_idx]))
x = AtrousConv(x)
print('Conv{}:'.format(conv_idx), x.size())
return x
if __name__ == '__main__':
net = Net()
print(net)
| true
|
a76badef38082e0f093f47607ee1a73cd0d171b9
|
Python
|
uk-gov-mirror/ministryofjustice.analytical-platform-aws-security
|
/modules/sns-guardduty-slack/sns_guardduty_slack.py
|
UTF-8
| 2,769
| 2.71875
| 3
|
[] |
no_license
|
"""
Lambda function receiving SNS notifications from GuardDuty
Reformat and send notifications to Slack
"""
import json
import logging
import os
from urllib.request import Request, urlopen
from urllib.error import URLError, HTTPError
SLACK_CHANNEL = os.environ['SLACK_CHANNEL']
SLACK_HOOK_URL = os.environ['HOOK_URL']
LOGGER = logging.getLogger()
LOGGER.setLevel(logging.INFO)
def lambda_handler(event, _):
"""
Main method lambda_handler(event)
"""
LOGGER.info("Event: %s", event)
json_message = event['Records'][0]['Sns']['Message']
loaded_json = json.loads(json_message)
severity = float(loaded_json['detail']['severity'])
severity_message = ""
if severity < 4:
severity_message = ":large_blue_circle: LOW"
elif 4 <= severity < 7:
severity_message = ":warning: MEDIUM"
elif severity >= 7:
severity_message = ":fire: HIGH"
message = ":amazon: *AWS Account:* {} *Time:* {} \n".format(
loaded_json['account'],
loaded_json['time']
)
message = "{}*Alert level:* {} \n".format(
message,
severity_message
)
message = "{}*Type:* {}\n".format(
message,
loaded_json['detail']['type']
)
message = "{}*Title:* {}\n".format(
message,
loaded_json['detail']['title']
)
message = "{}*Description:* {}\n".format(
message,
loaded_json['detail']['description']
)
message = "{}*Severity:* {}\n".format(
message,
loaded_json['detail']['severity']
)
message = "{}*Event First Seen:* {}\n".format(
message,
loaded_json['detail']['service']['eventFirstSeen']
)
message = "{}*Event Last Seen:* {}\n".format(
message,
loaded_json['detail']['service']['eventLastSeen']
)
message = "{}*Target Resource:* {}\n".format(
message, json.dumps(loaded_json['detail']['resource'])
)
message = "{}*Action:* {}\n".format(
message,
json.dumps(loaded_json['detail']['service']['action'])
)
message = "{}*Additional information:* {}\n".format(
message,
json.dumps(loaded_json['detail']['service']['additionalInfo'])
)
slack_message = {
'channel': SLACK_CHANNEL,
'username': "AWS GuardDuty",
'text': message,
'icon_emoji' : ":guardsman:"
}
req = Request(SLACK_HOOK_URL, json.dumps(slack_message).encode('utf-8'))
try:
response = urlopen(req)
response.read()
LOGGER.info("Message posted to %s", slack_message['channel'])
except HTTPError as err:
LOGGER.error("Request failed: %d %s", err.code, err.reason)
except URLError as err:
LOGGER.error("Server connection failed: %s", err.reason)
| true
|
4597098bb6c1c1d354d839e5116f321c4d1777ec
|
Python
|
vladklokun/font_renamer
|
/tests/test_table_name.py
|
UTF-8
| 10,799
| 2.828125
| 3
|
[] |
no_license
|
"""Tests the facilities for working with the OpenType `name` table.
"""
# Test classes do not use the self argument, but are required to accept it
# pylint: disable=no-self-use
import typing as typ
import fontTools.ttLib.tables._n_a_m_e as tt_table_name
import hypothesis as h
import pytest
import font_renamer.table_name as fr_name
from . import conftest as cft
from . import hypothesis_strategies as h_strat_custom
from . import types as tt
NameTable = tt_table_name.table__n_a_m_e
def name_table_contains_record(
table: NameTable,
name_string: str,
name_id: int,
platform_id: int,
platform_encoding_id: int,
language_id: int,
) -> bool:
"""Returns `True` if a name record that matches the provided parameters was
found in the provided `NameTable`. Returns `False` otherwise.
"""
found = False
for nr in table.names:
if (
nr.string == name_string
and nr.nameID == name_id
and nr.platformID == platform_id
and nr.platEncID == platform_encoding_id
and nr.langID == language_id
):
found = True
break
return found
class TestINameRecord:
"""Tests the interface for name records."""
def test_instantiation_raises_type_error(self) -> None:
"""Tests if instantiation of the interface raises a `TypeError`."""
with pytest.raises(TypeError):
fr_name.PNameRecord()
class TestNameRecord:
"""Tests the name records."""
@h.given(
# The `draw()` function is injected by the Hypothesis' internals, so
# there is no need to provide it explicitly
# pylint: disable=no-value-for-parameter
name_rec_tuple=h_strat_custom.name_record_tuples_valid()
)
def test_init_valid_params_ok(
self,
name_rec_tuple: tt.NameRecordTupleValid,
) -> None:
"""Tests if instantiation with valid parameters proceeds ok."""
name_record = fr_name.NameRecord(**name_rec_tuple._asdict())
assert name_record
assert name_rec_tuple.string == name_record.string
@h.given(
# pylint: disable=no-value-for-parameter
name_record_tuple=h_strat_custom.name_record_tuples_valid()
)
def test_from_ft_name_record_valid_params_inits_ok(
self,
name_record_tuple: tt.NameRecordTupleValid,
ft_name_record_from_name_record_tuple: typ.Callable[
[tt.NameRecordTupleValid], tt_table_name.NameRecord
],
) -> None:
"""Tests if calling `from_ft_name_record()` on a valid fontTools
`NameRecord` properly initializes a `NameRecord` object."""
ft_name_record = ft_name_record_from_name_record_tuple(
name_record_tuple
)
name_record = fr_name.NameRecord.from_ft_name_record(ft_name_record)
assert name_record
assert name_record.string == ft_name_record.string
assert name_record.name_id == ft_name_record.nameID
assert name_record.platform_id == ft_name_record.platformID
assert name_record.platform_encoding_id == ft_name_record.platEncID
assert name_record.language_id == ft_name_record.langID
@h.given(
# pylint: disable=no-value-for-parameter
name_record_tuple_invalid=h_strat_custom.name_record_tuples_invalid()
)
def test_from_ft_name_record_invalid_params_raises_value_error(
self,
name_record_tuple_invalid: tt.NameRecordTupleMaybe,
ft_name_record_from_name_record_tuple: typ.Callable[
[tt.NameRecordTupleMaybe], tt_table_name.NameRecord
],
) -> None:
"""Tests if attempting to instantiate a `NameRecord` from a malformed
`fontTools.NameRecord` raises a `ValueError`.
"""
ft_name_record = ft_name_record_from_name_record_tuple(
name_record_tuple_invalid
)
with pytest.raises(ValueError):
fr_name.NameRecord.from_ft_name_record(ft_name_record)
class TestINameTableAdapter:
"""Tests the interface for `name` table adapters."""
def test_instantiation_raises_type_error(self) -> None:
"""Tests if instantition of the interface raises a TypeError."""
with pytest.raises(TypeError):
fr_name.PNameTable()
class TestNameTableAdapter:
"""Tests the `name` table adapter."""
def test_instantiation_ok(self, name_table_empty: NameTable) -> None:
"""Tests if an instance of `NameTableAdapter` can be created."""
adapter = fr_name.TTLibToNameTableAdapter(name_table_empty)
assert adapter
assert adapter._table == name_table_empty
assert hasattr(adapter._table, "names")
def test_get_name_record_existing_returns_namerecord(
self,
raw_name_records: typ.Sequence[cft.NameRecordTuple],
make_name_table: typ.Callable[
[typ.Iterable[cft.NameRecordTuple]], NameTable
],
) -> None:
"""Tests if calling `get_name_record()` with the parameters that match
a name record in the bound `name` table returns a corresponding name
record.
Inputs:
- A list of raw name records.
- A function for creating `NameTable` instances from raw name
records.
Expected result:
- `get_name_record()` returns a `NameRecord` object.
- Attributes of the returned `NameRecord` match the attributes of
the raw name record.
"""
raw_name_rec = raw_name_records[0]
table = make_name_table(raw_name_records)
adapter = fr_name.TTLibToNameTableAdapter(table)
(
name_string,
name_id,
platform_id,
platform_encoding_id,
language_id,
) = raw_name_rec
name_rec = adapter.get_name_record(
name_id,
platform_id,
platform_encoding_id,
language_id,
)
assert name_rec
assert isinstance(name_rec, tt_table_name.NameRecord)
assert name_rec.string == name_string
assert name_rec.nameID == name_id
assert name_rec.platformID == platform_id
assert name_rec.platEncID == platform_encoding_id
assert name_rec.langID == language_id
def test_get_name_record_empty_table_raises_key_error(
self,
name_table_empty: NameTable,
) -> None:
"""Tests if calling `get_name_record()` while the adapted table is
empty raises `KeyError`.
Inputs:
- name_table_empty: an empty `NameTable`.
Expected result:
- `get_name_record()` raises a `KeyError`.
"""
adapter = fr_name.TTLibToNameTableAdapter(name_table_empty)
with pytest.raises(KeyError):
adapter.get_name_record(
name_id=0, platform_id=0, platform_encoding_id=0, language_id=0
)
def test_get_name_string_empty_table_raises_key_error(
self,
name_table_empty: NameTable,
) -> None:
"""Tests if calling `get_name_string()` while the adapted table is
empty raises a `KeyError`.
Inputs:
- name_table_empty: an empty `NameTable`.
Expected result:
- `get_name_string()` raises a `KeyError`.
"""
adapter = fr_name.TTLibToNameTableAdapter(name_table_empty)
with pytest.raises(KeyError):
adapter.get_name_string(
name_id=0, platform_id=0, platform_encoding_id=0, language_id=0
)
def test_get_name_string_existing_returns_matching_string(
self,
raw_name_records: typ.Sequence[cft.NameRecordTuple],
make_name_table: typ.Callable[
[typ.Iterable[cft.NameRecordTuple]], NameTable
],
) -> None:
"""Tests if calling `get_name_string()` with the parameters that match
a name record in the adapted table returns a name string of the name
record that corresponds to the provided parameters.
Inputs:
- raw_name_records: raw name records to create `NameRecord`
instances in a factory.
- make_name_table: factory for creating a `name` table with
provided name records.
"""
raw_name_record = raw_name_records[0]
(
expected_name_string,
name_id,
platform_id,
platform_encoding_id,
language_id,
) = raw_name_record
name_table = make_name_table(raw_name_records)
adapter = fr_name.TTLibToNameTableAdapter(name_table)
actual_name_string = adapter.get_name_string(
name_id=name_id,
platform_id=platform_id,
platform_encoding_id=platform_encoding_id,
language_id=language_id,
)
assert actual_name_string
assert isinstance(actual_name_string, str)
assert actual_name_string == expected_name_string
def test_set_name_string_valid_name_records_contains_set_names_afterwards(
self,
raw_name_records: typ.Sequence[cft.NameRecordTuple],
name_table_empty: NameTable,
) -> None:
"""Tests if calling `set_name_string()` with valid values for a name
record sets the corresponding name records in the adapted `name` table.
We consider name records to be successfully set if after calling the
method the adapted `name` table contains the name records that match
the name record description provided as the method call parameters
(`name_string`, `name_id`, `platform_id`, `platform_encoding_id`,
`language_id`).
"""
adapter = fr_name.TTLibToNameTableAdapter(name_table_empty)
for record in raw_name_records:
(
name_str,
name_id,
platform_id,
platform_encoding_id,
language_id,
) = record
adapter.set_name_string(
string=name_str,
name_id=name_id,
platform_id=platform_id,
platform_encoding_id=platform_encoding_id,
language_id=language_id,
)
for record in raw_name_records:
(
name_str,
name_id,
platform_id,
platform_encoding_id,
language_id,
) = record
assert name_table_contains_record(
adapter._table,
name_string=name_str,
name_id=name_id,
platform_id=platform_id,
platform_encoding_id=platform_encoding_id,
language_id=language_id,
)
| true
|
ec3e81aed54059e01dc4752117e9ed8608514c52
|
Python
|
saharma/PythonExercises
|
/Lab2/lab2ex12.py
|
UTF-8
| 157
| 3.921875
| 4
|
[] |
no_license
|
word = input("Enter a string")
vowels = {"a":0, "e":0, "i":0, "o":0, "u":0}
for letter in word:
if(letter in vowels):
vowels[letter] +=1
print(vowels)
| true
|
b9f13265fd2fbe06ffabb0ca4aa07b78827ecbda
|
Python
|
N-31V/robotic-software
|
/py/cnn_miracle.py
|
UTF-8
| 3,440
| 2.8125
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
import cv2 # working with, mainly resizing, images
import numpy as np # dealing with arrays
import os # dealing with directories
from random import shuffle # mixing up or currently ordered data that might lead our network astray in training.
from tqdm import tqdm # a nice pretty percentage bar for tasks. Thanks to viewer Daniel BA1/4hler for this suggestion
import tensorflow as tf
import tflearn
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
TRAIN_DIR = 'miracle/train'
TEST_DIR = 'miracle/test'
IMG_WIDTH = 160
IMG_HEIGHT = 120
LR = 1e-3
MODEL_NAME = 'miracle_{}_{}.model'.format('5conv-2', LR)
TEST=300
def label_img(img):
word_label = img.split('.')[-3]
if word_label == 'chocolate': return [1,0]
elif word_label == 'vanilla': return [1,0]
else: return [0,1]
def create_train_data():
training_data = []
for img in tqdm(os.listdir(TRAIN_DIR)):
label = label_img(img)
path = os.path.join(TRAIN_DIR,img)
img = cv2.imread(path,cv2.IMREAD_COLOR)
img = cv2.resize(img, (IMG_WIDTH,IMG_HEIGHT))
training_data.append([np.array(img),np.array(label)])
shuffle(training_data)
np.save('train_data.npy', training_data)
return training_data
def process_test_data():
testing_data = []
for img in tqdm(os.listdir(TEST_DIR)):
path = os.path.join(TEST_DIR,img)
img_num = img.split('.')[0]
img = cv2.imread(path,cv2.IMREAD_COLOR)
img = cv2.resize(img, (IMG_WIDTH,IMG_HEIGHT))
testing_data.append([img, img_num])
shuffle(testing_data)
np.save('test_data.npy', testing_data)
return testing_data
def make_model():
tf.reset_default_graph()
convnet = input_data(shape=[None, IMG_WIDTH, IMG_HEIGHT, 3], name='input')
convnet = conv_2d(convnet, 32, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)
convnet = conv_2d(convnet, 64, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)
convnet = conv_2d(convnet, 128, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)
convnet = conv_2d(convnet, 64, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)
convnet = conv_2d(convnet, 32, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)
convnet = fully_connected(convnet, 1024, activation='relu')
convnet = dropout(convnet, 0.8)
convnet = fully_connected(convnet, 2, activation='softmax')
convnet = regression(convnet, optimizer='adam', learning_rate=LR, loss='categorical_crossentropy', name='targets')
model = tflearn.DNN(convnet, tensorboard_dir='log')
if os.path.exists('{}.meta'.format(MODEL_NAME)):
model.load(MODEL_NAME)
print('model loaded!')
return model
if __name__ == '__main__':
model=make_model()
for i in range(1):
train_data = create_train_data()
train = train_data [: -TEST ]
test = train_data [ -TEST :]
X = np.array([i[0] for i in train]).reshape(-1,IMG_WIDTH,IMG_HEIGHT,3)
Y = np.array([i[1] for i in train])
test_x = np.array([i[0] for i in test]).reshape(-1,IMG_WIDTH,IMG_HEIGHT,3)
test_y = np.array([i[1] for i in test])
model.fit({'input': X}, {'targets': Y}, n_epoch=3, validation_set=({'input': test_x}, {'targets': test_y}), snapshot_step=500, show_metric=True, run_id=MODEL_NAME)
model.save(MODEL_NAME)
print('model save!')
| true
|
1cab3c517fe0b0fe4c8a399bd908097d46edf406
|
Python
|
muskanjindal24/18IT040_IT374_Python_Programming
|
/TASK4[Data Types]/2b.py
|
UTF-8
| 66
| 2.625
| 3
|
[] |
no_license
|
tuplex = 50, 100, 150, 200
print(tuplex)
tuplex = 1,
print(tuplex)
| true
|
36ba5bc7ca03008464ac194e3ad34746d6cbd750
|
Python
|
dvtate/single-files
|
/balls.py
|
UTF-8
| 1,570
| 3.234375
| 3
|
[
"MIT"
] |
permissive
|
import tkinter as tk;
import random;
import time;
def randomColor():
return "#%03x" % random.randint(0, 0xFFF);
class BouncyBall(object):
def __init__(self, canvas, width, height, color, outline = None):
self.canvas = canvas;
self.xv = random.randint(-10, 10);
self.yv = random.randint(-10, 10);
while self.xv == 0:
self.xv = random.randint(-10, 10);
while self.yv == 0:
self.yv = random.randint(-10, 10);
x = random.randint(10, width - 10);
y = random.randint(10, height - 10);
r = random.randint(5, 25);
self.rect = canvas.create_oval(x - r, y - r, x + r, y + r, fill = randomColor());
def move(self):
self.canvas.move(self.rect, self.xv, self.yv);
coords = self.canvas.coords(self.rect)
if coords[0] <= 0 or coords[2] >= WIDTH:
self.xv = -self.xv;
if coords[1] <= 0 or coords[3] >= WIDTH:
self.yv = -self.yv;
WIDTH = HEIGHT = 400;
window= tk.Tk();
canvas = tk.Canvas(window, width = WIDTH, height = HEIGHT, bg = "white");
canvas.grid();
window.wm_title("XD");
balls = [];
for i in range(100):
balls.append(BouncyBall(canvas, WIDTH, HEIGHT, randomColor()));
d_counter = 0;
while True:
for ball in balls:
ball.move();
d_counter += 1;
window.wm_title("XD" + 'D' * (d_counter % 25));
window.update();
time.sleep(0.01);
| true
|
59cf6fe6a4a3dcb353850c752e681f8d123a214c
|
Python
|
olivbak/az
|
/src/data/convert_client_data.py
|
UTF-8
| 564
| 2.5625
| 3
|
[] |
no_license
|
import pygame
from ..sprites.tank import Tank
from ..sprites.bullet import Bullet
class convert_client_data():
def __init__(self):
pass
def get_tanks_from_server_data(self,data):
tanks = pygame.sprite.Group()
for dic in data:
tanks.add(Tank (dic["pos"],dic["rotation"],dic["id"]))
return tanks
def get_bullets_from_server_data(self,data):
bullets = pygame.sprite.Group()
for dic in data:
bullets.add(Bullet(dic['pos'],dic['vel'],dic['id'],0,[]))
return bullets
| true
|
114f9b0837fd072d7461fa7b514b2bd40e741437
|
Python
|
rohanparmar/CMPT-120-Final-Project-Yet-Another-Image-Processor
|
/cmpt120imageProj.py
|
UTF-8
| 3,003
| 3.28125
| 3
|
[] |
no_license
|
# CMPT 120 Yet Another Image Processer
# Starter code for cmpt120imageProj.py
# ***do not modify this file***
import pygame
import numpy
def getImage(filename):
"""
Input: filename - string containing image filename to open
Returns: 2d array of RGB values
"""
image = pygame.image.load(filename)
return pygame.surfarray.array3d(image).tolist()
def saveImage(pixels, filename):
"""
Input: pixels - 2d array of RGB values
filename - string containing filename to save image
Output: Saves a file containing pixels
"""
nparray = numpy.asarray(pixels)
surf = pygame.surfarray.make_surface(nparray)
(width, height, colours) = nparray.shape
surf = pygame.display.set_mode((width, height))
pygame.surfarray.blit_array(surf, nparray)
pygame.image.save(surf, filename)
def showImage(pixels, title):
"""
Input: pixels - 2d array of RGB values
title - title of the window
Output: show the image in a window
"""
nparray = numpy.asarray(pixels)
surf = pygame.surfarray.make_surface(nparray)
(width, height, colours) = nparray.shape
pygame.display.init()
pygame.display.set_caption(title)
screen = pygame.display.set_mode((width, height))
screen.fill((225, 225, 225))
screen.blit(surf, (0, 0))
pygame.display.update()
def showInterface(pixels, title, textList):
"""
Input: pixels - 2d array of RGB values
title - title of the window
text - list of strings to be displayed at the bottom of the window
Output: show the image in a window
"""
nparray = numpy.asarray(pixels)
surf = pygame.surfarray.make_surface(nparray)
(width, height, colours) = nparray.shape
# set up the text to be displayed
fontObj = pygame.font.Font("DIN-Regular.ttf", 20)
textObjs = []
for line in textList:
textObjs += [fontObj.render(line, False, (255, 255, 255), (0, 0, 0))]
# find out the largest width within the lines
maxLineWidth = textObjs[0].get_width()
for lo in textObjs:
if maxLineWidth < lo.get_width():
maxLineWidth = lo.get_width()
# find out the width of the screen
width = max(width, maxLineWidth)
# set up the display
pygame.display.init()
pygame.display.set_caption(title + " (" + str(width) + "x" + str(height) + ")")
screen = pygame.display.set_mode((width, height + textObjs[0].get_height()*len(textObjs)))
screen.fill((0, 0, 0))
# add the image to the display
screen.blit(surf, (0, 0))
# add the texts to the display
i = 0
for textObj in textObjs:
screen.blit(textObj, (0, height + i * textObj.get_height()))
i += 1
# display everything
pygame.display.update()
def createBlackImage(width, height):
"""
Input: width - width of the filled image in pixels
height - height of the filled image in pixels
Output: 2d array of RGB values all set to zero
"""
return numpy.zeros((width, height, 3)).tolist()
| true
|
9aa5dce55ab0a1c3a4cd906f6fa7a326753dc4d6
|
Python
|
sandeepkumar8713/pythonapps
|
/26_sixthFolder/15_smallest_good_base.py
|
UTF-8
| 1,866
| 4.21875
| 4
|
[] |
no_license
|
# https://leetcode.com/problems/smallest-good-base/
# Question : Given an integer n represented as a string, return the smallest good base of n.
# We call k >= 2 a good base of n, if all digits of n base k are 1's.
#
# Example : Input: n = "13"
# Output: "3" (3 digit number system)
# Explanation: 13 base 3 is 111.
#
# Question Type : OddOne
# Used : ans^0 + ans^1 + ans^2 + ... + ans^N-1 = n
# As the base increases - from 2 to 8 to 16 the length of the string decreases.
# From this we can infer that N(no. of digits) must be less than or equal to len(bin(n)) - 2.
# Where the -2 is to ignore the prefix '0b'. i.e. bin(5) = '0b101'.
# Now we should do binary search from 2 to n - 1 base, with number of digits as N to 0.
# Logic:
# N = len(bin(n)[2:])
# for length in range(N, 0, -1):
# low = 2, high = n - 1
# while low <= high:
# guessBase = (low + high) // 2
# v = is_valid(guessBase, length, n)
# if v < 0: high = guessBase - 1
# elif v > 0: low = guessBase + 1
# else: return str(guessBase)
# Complexity : O(log n * log n)
def is_valid(base, length, n):
"""returns 0 if total == n, pos if n > total and neg if n < total"""
total = 0
for i in range(length):
total += base ** i
return n - total
def smallestGoodBase(n):
n = int(n)
N = len(bin(n)[2:])
for length in range(N, 0, -1):
low = 2
high = n - 1
while low <= high:
guessBase = (low + high) // 2
v = is_valid(guessBase, length, n)
if v < 0:
high = guessBase - 1
elif v > 0:
low = guessBase + 1
else:
return str(guessBase)
if __name__ == "__main__":
n = 13
print(smallestGoodBase(n))
| true
|
cf97ab1299e2bfcce4cb7fddbd6c920ceef5f29d
|
Python
|
alexmlamb/VariationalBayes
|
/lib/driver.py
|
UTF-8
| 4,373
| 2.578125
| 3
|
[] |
no_license
|
import theano
import theano.tensor as T
import numpy as np
import numpy.random as rng
from HiddenLayer import HiddenLayer
from Updates import Updates
import matplotlib.pyplot as plt
from RandomData import getData
import math
theano.config.flaotX = 'float32'
if __name__ == "__main__":
config = {}
config["learning_rate"] = 0.0001
config["number_epochs"] = 200000
config["report_epoch_ratio"] = 400
config["popups"] = True
numHidden = 800
numLatent = 800
numInput = 1
numOutput = 1
srng = theano.tensor.shared_randomstreams.RandomStreams(rng.randint(999999))
#N x 1
x = T.matrix()
#N x 1
observed_y = T.matrix()
h1 = HiddenLayer(x, num_in = numInput, num_out = numHidden, initialization = 'xavier', name = "h1", activation = "relu")
h2 = HiddenLayer(h1.output, num_in = numHidden, num_out = numHidden, initialization = 'xavier', name = "h2", activation = "relu")
z_mean = HiddenLayer(h2.output, num_in = numHidden, num_out = numLatent, initialization = 'xavier', name = 'z_mean', activation = None)
z_var = HiddenLayer(h2.output, num_in = numHidden, num_out = numLatent, initialization = 'xavier', name = 'z_var', activation = 'exp')
z_sampled = srng.normal(size = (100, numLatent))
z = z_sampled * z_var.output + z_mean.output
h3 = HiddenLayer(z, num_in = numLatent, num_out = numHidden, initialization = 'xavier', name = "h3", activation = "relu")
h4 = HiddenLayer(h3.output, num_in = numHidden, num_out = numHidden, initialization = 'xavier', name = "h4", activation = "relu")
y = HiddenLayer(h4.output, num_in = numHidden, num_out = numOutput, initialization = 'xavier', name = "output", activation = None)
h3_generated = HiddenLayer(z_sampled, num_in = numLatent, num_out = numHidden, initialization = 'xavier', params = h3.getParams(), name = "h3", activation = "relu")
h4_generated = HiddenLayer(h3_generated.output, num_in = numHidden, num_out = numHidden, initialization = 'xavier', params = h4.getParams(), name = "h4", activation = "relu")
y_generated = HiddenLayer(h4_generated.output, num_in = numHidden, num_out = numOutput, initialization = 'xavier', params = y.getParams(), name = "output", activation = None)
layers = [h1,z_mean,z_var,h2,h3,y,h4]
params = {}
for layer in layers:
layerParams = layer.getParams()
for paramKey in layerParams:
params[paramKey] = layerParams[paramKey]
print "params", params
variational_loss = 0.5 * T.sum(z_mean.output**2 + z_var.output - T.log(z_var.output) - 1.0)
loss = T.sum(T.sqr(y.output - observed_y)) + variational_loss
updateObj = Updates(params, loss, config["learning_rate"])
updates = updateObj.getUpdates()
train = theano.function(inputs = [x, observed_y], outputs = [y.output, loss, variational_loss, T.mean(z_mean.output), T.mean(z_var.output)], updates = updates)
print "Finished compiling training function"
sample = theano.function(inputs = [], outputs = [y_generated.output])
lossLst = []
xLst = getData(size = (1000)).tolist()
for epoch in range(0, config["number_epochs"]):
x = getData(size = (100,1))
y,loss,variational_loss,mean,var = train(x,x)
lossLst += [math.log(loss)]
if epoch % config["report_epoch_ratio"] == 0:
print "x", x[0]
print "y", y[0][0]
print "loss", loss
print "vloss", variational_loss
print "mean", mean
print "var", var
samples = []
for i in range(0, 10):
samples += sample()[0].tolist()
samples = np.asarray(samples)
print "sample mean", samples.mean()
print "sample p50", np.percentile(samples, 50.0)
print "sample p90", np.percentile(samples, 90.0)
print "true mean", np.asarray(xLst).mean()
print "true p50", np.percentile(np.asarray(xLst), 50.0)
print "true p90", np.percentile(np.asarray(xLst), 90.0)
if config["popups"]:
bins = np.arange(-2.0, 20.0, 2.0)
plt.hist(xLst, alpha = 0.5, bins = bins)
plt.hist(samples, alpha = 0.5, bins = bins)
plt.show()
plt.plot(lossLst)
plt.show()
| true
|
2aaa6692403f7b4b809fcf681e88997148257212
|
Python
|
vxlm/DataStructures-Algorithms
|
/dictionary.py
|
UTF-8
| 3,549
| 3.40625
| 3
|
[
"MIT"
] |
permissive
|
#Simple dictionary with hashes by modulo, table doubling for size
#and chaining for collisions
class Dictionary():
def __init__(self, size = 10):
self.dict = [[None,None] for i in range(size)]
self.size = size
self.inserted = 0
def __str__(self):
return str(self.dict)
def hashing(self, key):
return key % self.size
def insert(self, key, value):
if self.inserted >= self.size // 2 :
self.tableDoubling()
hashed = self.hashing(key)
if self.dict[hashed][0] == None:
self.dict[hashed]=[[key,value]]
self.inserted += 1
else:
self.dict[hashed].append([key,value])
self.inserted += 1
def find(self, key):
hashed = self.hashing(key)
for lst in (self.dict[hashed]):
if lst[0] == key:
return lst[1]
return None
def delete(self, key):
hashed = self.hashing(key)
for lst in (self.dict[hashed]):
if lst[0] == key:
self.dict[hashed].remove(lst)
if len(self.dict[hashed]) == 0:
self.dict[hashed].append(None)
self.dict[hashed].append(None)
def tableDoubling(self):
table = [[None,None] for i in range(self.size)]
self.dict.extend(table)
self.size *= 2
# Dictionary with open addressing instead
class Dictionary2():
def __init__(self, size = 10):
self.dict = [[None,None] for i in range(size)]
self.size = size
self.inserted = 0
def __str__(self):
return str(self.dict)
def hashing(self, key):
return key % self.size
def hashing2(self, key , counter):
return counter * key % self.size
def insert(self, key, value, counter = 1):
if self.inserted >= self.size // 2 :
self.tableDoubling()
hashed = self.hashing(key)
if self.dict[hashed][0] == None or self.dict[hashed][0] == "Deleted":
self.dict[hashed]=[[key,value]]
self.inserted += 1
return True
else:
hashed2=self.hashing2(key, counter)
probe=(hashed * counter * hashed2) % self.size
if self.dict[probed][0] == None:
self.dict[probed][0] == [[key, value]]
self.inserted += 1
else:
counter += 1
self.insert(key, value, counter)
def find(self, key, counter = 1):
hashed = self.hashing(key)
hashed2 = self.hashing2(key, counter)
if counter >= 2:
probe=(hashed * counter * hashed2) % self.size
if self.dict[probe][0][0] == key:
return self.dict[probe][0]
counter += 1
return self.find(key, counter)
if self.dict[hashed][0][0] == key:
return self.dict[hashed]
if self.dict[hashed][0][0] == None:
return None
else:
counter += 1
return self.find(key, counter)
def delete(self, key):
hashed = self.hashing(key)
for lst in (self.dict[hashed]):
if lst[0] == key:
self.dict[hashed].remove(lst)
if len(self.dict[hashed]) == 0:
self.dict[hashed].append("Deleted")
self.dict[hashed].append("Deleted")
def tableDoubling(self):
table = [[None,None] for i in range(self.size)]
self.dict.extend(table)
self.size *= 2
| true
|
db70d5602c4cf1ae0665512fa2b6a471179e4972
|
Python
|
snowdence/hcmus_ai_wumpus
|
/18127004_18127027_18127263/src/states/ScreenState.py
|
UTF-8
| 679
| 3.125
| 3
|
[
"MIT"
] |
permissive
|
from pygame.math import Vector2
class ScreenState:
screen_size: Vector2 = None
entities = []
def __init__(self):
pass
@property
def get_width(self):
return int(self.screen_size.x)
@property
def get_height(self):
return int(self.screen_size.y)
def is_inside(self, position):
return position.x >= 0 and position.y < self.get_width and position.y >= 0 and position.y < self.get_height
def find_unit(self, position):
for entity in self.entities:
if int(entity.position.x) == int(position.x) and int(entity.position.y) == int(position.y):
return entity
return None
| true
|
04cc2a200b6a1eb751335264f92484fab3adbd90
|
Python
|
nghiattran/my-notes
|
/InterestingProblems/countBattleships.py
|
UTF-8
| 1,841
| 4.0625
| 4
|
[] |
no_license
|
# Given an 2D board, count how many different battleships are in it. The battleships are represented with 'X's, empty slots are represented with '.'s. You may assume the following rules:
# You receive a valid board, made of only battleships or empty slots.
# Battleships can only be placed horizontally or vertically. In other words, they can only be made of the shape 1xN (1 row, N columns) or Nx1 (N rows, 1 column), where N can be of any size.
# At least one horizontal or vertical cell separates between two battleships - there are no adjacent battleships.
# Example:
# X..X
# ...X
# ...X
# In the above board there are 2 battleships.
# Invalid Example:
# ...X
# XXXX
# ...X
# This is an invalid board that you will not receive - as battleships will always have a cell separating between them.
# Follow up:
# Could you do it in one-pass, using only O(1) extra memory and without modifying the value of the board?
class Solution(object):
def countBattleships(self, board):
"""
:type board: List[List[str]]
:rtype: int
"""
cols = len(board)
rows = len(board[0])
ct = 0
for c in xrange(cols):
for r in xrange(rows):
# We count a battleship only first time we see it. If a battleship piece 'X'
# is encountered, it is only new if neither the upper or left components are not also
# pieces of the same battleship. These prior indices are guarenteed to already be explored by
# the time we get to the current board index.
if board[c][r] != 'X':
continue
if r > 0 and board[c][r - 1] == 'X':
continue
if c > 0 and board[c - 1][r] == 'X':
continue
ct += 1
return ct
| true
|
a78c5b18ed0444314c9b5286c37afa254f37219e
|
Python
|
onelab-eu/manifold
|
/manifold/operators/subquery.py
|
UTF-8
| 24,120
| 2.6875
| 3
|
[] |
no_license
|
import traceback
from types import StringTypes
from manifold.core.filter import Filter
from manifold.core.relation import Relation
from manifold.core.record import Record, LastRecord
from manifold.operators import Node, ChildStatus, ChildCallback
from manifold.operators.selection import Selection
from manifold.operators.projection import Projection
from manifold.util.predicate import Predicate, eq, contains, included
from manifold.util.log import Log
DUMPSTR_SUBQUERIES = "<subqueries>"
#------------------------------------------------------------------
# SUBQUERY node
#------------------------------------------------------------------
class SubQuery(Node):
"""
SUBQUERY operator (cf nested SELECT statements in SQL)
self.parent represents the main query involved in the SUBQUERY operation.
self.children represents each subqueries involved in the SUBQUERY operation.
"""
def __init__(self, parent, children_ast_relation_list):
"""
Constructor
Args:
parent: The main query (AST instance ?)
children_ast_relation_list: A list of (AST , Relation) tuples
"""
super(SubQuery, self).__init__()
# Parameters
self.parent = parent
# Remove potentially None children
# TODO how do we guarantee an answer to a subquery ? we should branch
# an empty FromList at query plane construction
self.children = []
self.relations = []
for ast, relation in children_ast_relation_list:
self.children.append(ast)
self.relations.append(relation)
# Member variables
self.parent_output = []
# Set up callbacks
old_cb = parent.get_callback()
parent.set_callback(self.parent_callback)
self.set_callback(old_cb)
self.query = self.parent.get_query().copy()
for i, child in enumerate(self.children):
self.query.fields.add(child.get_query().get_from())
# Adding dotted fields like "hops.ttl"
self.query.fields |= set([
".".join([
self.relations[i].get_relation_name(),
field_name
]) for field_name in child.get_query().get_select()
])
# Prepare array for storing results from children: parent result can
# only be propagated once all children have replied
self.child_results = []
self.status = ChildStatus(self.all_done)
for i, child in enumerate(self.children):
child.set_callback(ChildCallback(self, i))
self.child_results.append([])
print "init done"
# @returns(Query)
# def get_query(self):
# """
# \brief Returns the query representing the data produced by the nodes.
# \return query representing the data produced by the nodes.
# """
# # Query is unchanged XXX ???
# return Query(self.parent.get_query())
def dump(self, indent = 0):
"""
\brief Dump the current node
\param indent current indentation
"""
s = []
s.append(self.parent.dump(indent+1))
s.append(Node.dump(self, indent))
for child in self.children:
s.append(child.dump(indent + 1))
return "\n".join(s)
def __repr__(self):
return DUMPSTR_SUBQUERIES
def start(self):
"""
\brief Propagates a START message through the node
"""
# Start the parent first
self.parent.start()
def parent_callback(self, record):
"""
Processes records received by the parent node
Args:
record: A dictionary representing the received record
"""
if record.is_last():
# When we have received all parent records, we can run children
if self.parent_output:
self.run_children()
return
# Store the record for later...
self.parent_output.append(record)
# This method until everything is a record... XXX set up warnings
@staticmethod
def get_element_key(element, key):
if isinstance(element, Record):
return element.get_value(key)
elif isinstance(element, dict):
# record
return Record.get_value(element, key)
else:
# id or tuple(id1, id2, ...)
return element
def run_children(self):
"""
Run children queries (subqueries) assuming the parent query (main query)
has successfully ended.
"""
if not self.parent_output:
# No parent record, this is useless to run children queries.
self.send(LastRecord())
return
#print "=" * 80
#print self.parent_output
#print "=" * 80
if not self.children:
# The top operator has build a SubQuery node without child queries,
# so this SubQuery operator is useless and should be replaced by
# its main query.
Log.warning("SubQuery::run_children: no child node. The query plan could be improved")
self.send(LastRecord())
return
# Inspect the first parent record to deduce which fields have already
# been fetched
first_record = self.parent_output[0]
parent_fields = set(first_record.keys())
# Optimize child queries according to the fields already retrieved thanks
# to the parent query.
useless_children = set()
for i, child in enumerate(self.children[:]):
# Test whether the current child provides relevant fields (e.g.
# fields not already fetched in the parent record). If so, reduce
# the set of queried field in order to only retrieve relevant fields.
child_fields = child.get_query().get_select()
relation = self.relations[i]
relation_name = relation.get_relation_name()
already_fetched_fields = set()
if relation_name in parent_fields:
if relation.get_type() in [Relation.types.LINK_1N, Relation.types.LINK_1N_BACKWARDS]:
if relation_name in first_record and first_record[relation_name] and len(first_record[relation_name]) > 0:
if isinstance(first_record[relation_name][0], Record):
already_fetched_fields = set(first_record[relation_name][0].keys())
else:
# If we do not have a dict, we have only keys, so it's like we had no field of importance...
already_fetched_fields = set()
else:
already_fetched_fields = set()
else:
if relation_name in first_record and first_record[relation_name] and len(first_record[relation_name]) > 0:
already_fetched_fields = set(first_record[relation_name].keys())
else:
already_fetched_fields = set()
# XXX routerv2: we need to keep key used for subquery
key_field = relation.get_predicate().get_value()
relevant_fields = child_fields - already_fetched_fields
if not relevant_fields:
tmp = list()
for pr in self.parent_output:
tmp.extend(pr[relation_name])
self.child_results[i] = tmp # Records
useless_children.add(i)
continue
else:
relevant_fields |= frozenset([key_field]) # necessary ?
if child_fields != relevant_fields:
# XXX This seems to remove the key used for joining
self.children[i] = child.optimize_projection(relevant_fields)
# If every children are useless, this means that we already have full records
# thanks to the parent query, so we simply forward those records.
if len(self.children) == len(useless_children):
map(self.send, self.parent_output)
self.send(LastRecord())
return
# Loop through children and inject the appropriate parent results
for i, child in enumerate(self.children):
if i in useless_children: continue
# We have two cases:
# (1) either the parent query has subquery fields (a list of child
# ids + eventually some additional information)
# (2) either the child has a backreference to the parent
# ... eventually a partial reference in case of a 1..N relationship
#
# In all cases, we will collect all identifiers to proceed to a
# single child query for efficiency purposes, unless it's not
# possible (?).
#
# We have several parent records stored in self.parent_output
#
# /!\ Can we have a mix of (1) and (2) ? For now, let's suppose NO.
# * We could expect key information to be stored in the DBGraph
# The operation to be performed is understood only be looking at the predicate
relation = self.relations[i]
predicate = relation.get_predicate()
key, op, value = predicate.get_tuple()
if op == eq:
# 1..N
# Example: parent has slice_hrn, resource has a reference to slice
if relation.get_type() == Relation.types.LINK_1N_BACKWARDS:
parent_ids = [record[key] for record in self.parent_output]
if len(parent_ids) == 1:
parent_id, = parent_ids
filter_pred = Predicate(value, eq, parent_id)
else:
filter_pred = Predicate(value, included, parent_ids)
else:
parent_ids = []
for parent_record in self.parent_output:
record = Record.get_value(parent_record, key)
if not record:
record = []
# XXX Nothing to do for the case where the list of keys in the parent is empty
if relation.get_type() in [Relation.types.LINK_1N, Relation.types.LINK_1N_BACKWARDS]:
# we have a list of elements
# element = id or dict : cle simple
# = tuple or dict : cle multiple
parent_ids.extend([self.get_element_key(r, value) for r in record])
else:
parent_ids.append(self.get_element_key(record, value))
#if isinstance(key, tuple):
# parent_ids = [x for record in self.parent_output if key in record for x in record[key]]
#else:
# ##### record[key] = text, dict, or list of (text, dict)
# parent_ids = [record[key] for record in self.parent_output if key in record]
#
#if parent_ids and isinstance(parent_ids[0], dict):
# parent_ids = map(lambda x: x[value], parent_ids)
if len(parent_ids) == 1:
parent_id, = parent_ids
filter_pred = Predicate(value, eq, parent_id)
else:
filter_pred = Predicate(value, included, parent_ids)
# Injecting predicate
old_child_callback= child.get_callback()
self.children[i] = child.optimize_selection(Filter().filter_by(filter_pred))
self.children[i].set_callback(old_child_callback)
elif op == contains:
# 1..N
# Example: parent 'slice' has a list of 'user' keys == user_hrn
for slice in self.parent_output:
if not child.get_query().object in slice: continue
users = slice[key]
# users est soit une liste d'id, soit une liste de records
user_data = []
for user in users:
if isinstance(user, dict):
user_data.append(user)
else:
# have have a key
# XXX Take multiple keys into account
user_data.append({value: user})
# Let's inject user_data in the right child
child.inject(user_data, value, None)
else:
raise Exception, "No link between parent and child queries"
#print "*** before run children ***"
#self.dump()
# We make another loop since the children might have been modified in
# the previous one.
for i, child in enumerate(self.children):
if i in useless_children: continue
self.status.started(i)
for i, child in enumerate(self.children):
if i in useless_children: continue
child.start()
def all_done(self):
"""
\brief Called when all children of the current subquery are done: we
process results stored in the parent.
"""
try:
for parent_record in self.parent_output:
# Dispatching child results
for i, child in enumerate(self.children):
relation = self.relations[i]
predicate = relation.get_predicate()
key, op, value = predicate.get_tuple()
if op == eq:
# 1..N
# Example: parent has slice_hrn, resource has a reference to slice
# PARENT CHILD
# Predicate: (slice_hrn,) == slice
# Collect in parent all child such as they have a pointer to the parent
record = Record.get_value(parent_record, key)
if not record:
record = []
if not isinstance(record, (list, tuple, set, frozenset)):
record = [record]
if relation.get_type() in [Relation.types.LINK_1N, Relation.types.LINK_1N_BACKWARDS]:
# we have a list of elements
# element = id or dict : cle simple
# = tuple or dict : cle multiple
ids = [SubQuery.get_element_key(r, value) for r in record]
else:
ids = [SubQuery.get_element_key(record, value)]
if len(ids) == 1:
id, = ids
filter = Filter().filter_by(Predicate(value, eq, id))
else:
filter = Filter().filter_by(Predicate(value, included, ids))
#if isinstance(key, StringTypes):
# # simple key
# ids = [o[key]] if key in o else []
# #print "IDS=", ids
# #if ids and isinstance(ids[0], dict):
# # ids = map(lambda x: x[value], ids)
# # XXX we might have equality instead of IN in case of a single ID
# print "VALUE", value, "INCLUDED ids=", ids
# filter = Filter().filter_by(Predicate(value, included, ids))
#else:
# # Composite key, o[value] is a dictionary
# for field in value:
# filter = filter.filter_by(Predicate(field, included, o[value][field])) # o[value] might be multiple
parent_record[relation.get_relation_name()] = []
for child_record in self.child_results[i]:
if filter.match(child_record):
parent_record[relation.get_relation_name()].append(child_record)
elif op == contains:
# 1..N
# Example: parent 'slice' has a list of 'user' keys == user_hrn
# PARENT CHILD
# Predicate: user contains (user_hrn, )
# first, replace records by dictionaries. This only works for non-composite keys
if parent_record[child.query.object]:
record = parent_record[child.query.object][0]
if not isinstance(record, dict):
parent_record[child.query.object] = [{value: record} for record in parent_record[child.query.object]]
if isinstance(value, StringTypes):
for record in parent_record[child.query.object]:
# Find the corresponding record in child_results and update the one in the parent with it
for k, v in record.items():
filter = Filter().filter_by(Predicate(value, eq, record[value]))
for r in self.child_results[i]:
if filter.match(r):
record.update(r)
else:
for record in parent_record[child.query.object]:
# Find the corresponding record in child_results and update the one in the parent with it
for k, v in record.items():
filter = Filter()
for field in value:
filter = filter.filter_by(Predicate(field, eq, record[field]))
for r in self.child_results[i]:
if filter.match(r):
record.update(r)
else:
raise Exception, "No link between parent and child queries"
self.send(parent_record)
self.send(LastRecord())
except Exception, e:
print "EEE", e
traceback.print_exc()
def child_callback(self, child_id, record):
"""
\brief Processes records received by a child node
\param record dictionary representing the received record
"""
if record.is_last():
self.status.completed(child_id)
return
# Store the results for later...
self.child_results[child_id].append(record)
def inject(self, records, key, query):
"""
\brief Inject record / record keys into the node
\param records list of dictionaries representing records, or list of
"""
raise Exception, "Not implemented"
def optimize_selection(self, filter):
parent_filter, top_filter = Filter(), Filter()
for predicate in filter:
if predicate.get_field_names() <= self.parent.get_query().get_select():
parent_filter.add(predicate)
else:
Log.warning("SubQuery::optimize_selection() is only partially implemented : %r" % predicate)
top_filter.add(predicate)
if parent_filter:
self.parent = self.parent.optimize_selection(parent_filter)
self.parent.set_callback(self.parent_callback)
if top_filter:
return Selection(self, top_filter)
return self
def optimize_projection(self, fields):
"""
Propagates projection (SELECT) through a SubQuery node.
A field is relevant if:
- it is explicitely queried (i.e. it is a field involved in the projection)
- it is needed to perform the SubQuery (i.e. it is involved in a Predicate)
Args:
fields: A frozenset of String containing the fields involved in the projection.
Returns:
The optimized AST once this projection has been propagated.
"""
# 1) Determine for the parent and each child which fields are explicitely
# queried by the user.
# 2) Determine for the parent and each child which additionnal fields are
# required to join the parent records and the child records.
# 3) Optimize the parent AST and the child ASTs conseuqently.
# 4) Filter additionnal fields (see (2)) which have not been queried by
# adding a Projection node above the resulting optimized AST.
# 0) Initialization
parent_fields = set() # fields returned by the parent query
child_fields = dict() # fields returned by each child query
# 1) If the SELECT clause refers to "a.b", this is a Query related to the
# child subquery related to "a". If the SELECT clause refers to "b" this
# is always related to the parent query.
parent_fields = set([field for field in fields if not "." in field]) \
| set([field.split('.')[0] for field in fields if "." in field])
for i, child in enumerate(self.children[:]):
child_name = self.relations[i].get_relation_name()
child_fields[child_name] = set([field.split('.', 1)[1] for field in fields if field.startswith("%s." % child_name)])
# 2) Add to child_fields and parent_fields the field names needed to
# connect the parent to its children. If such fields are added, we will
# filter them in step (4). Once we have deduced for each child its
# queried fields (see (1)) and the fields needed to connect it to the
# parent query (2), we can start the to optimize the projection (3).
require_top_projection = False
for i, child in enumerate(self.children[:]):
relation = self.relations[i]
predicate = relation.get_predicate()
child_name = relation.get_relation_name()
if not predicate.get_field_names() <= parent_fields:
parent_fields |= predicate.get_field_names() # XXX jordan i don't understand this
require_top_projection = True
if not predicate.get_value_names() <= parent_fields:
require_top_projection = True
child_fields[child_name] |= predicate.get_value_names()
# 3) Optimize the main query (parent) and its subqueries (children)
for i, child in enumerate(self.children[:]):
relation = self.relations[i]
child_name = relation.get_relation_name()
self.children[i] = child.optimize_projection(child_fields[child_name])
# Note:
# if parent_fields < self.parent.get_query().get_select():
# This is not working if the parent has fields not in the subquery:
# eg. requested = slice_hrn, resource && parent = slice_hrn users
if self.parent.get_query().get_select() - parent_fields:
#self.parent = self.parent.optimize_projection(parent_fields)
self.parent = self.parent.optimize_projection(parent_fields.intersection(self.parent.get_query().get_select()))
# 4) Some fields (used to connect the parent node to its child node) may be not
# queried by the user. In this case, we ve to add a Projection
# node which will filter those fields.
if require_top_projection:
return Projection(self, fields) #jordan
return self
| true
|
cdefd2ad395a69ada95e1118b6ce8f7617d09066
|
Python
|
Engi20/Python-Programming-for-Beginners
|
/13. Exceptions/99.2.IndexError.py
|
UTF-8
| 126
| 2.90625
| 3
|
[] |
no_license
|
l = [10,20,30,40]
#print(l[10])
#print(l)
try:
print(l[10])
except IndexError as e:
print(e)
print(l)
| true
|
b5f57fd461404c8cf59d5fd8cd2e160e0f0df0e0
|
Python
|
tpsavard/ProjectEuler
|
/Python/euler_utils.py
|
UTF-8
| 879
| 3.9375
| 4
|
[] |
no_license
|
# Break a number down into its component digits, in descending order of place value
# Ex.: 1234 -> [1, 2, 3, 4]
def getDigitsDescendingOrder(val):
cur_val = val
digits = []
while cur_val > 0:
digits.insert(0, cur_val % 10)
cur_val = cur_val // 10
return digits
# Recombine an array of digits (in descending order of place value) into a number
# Ex.: [1, 2, 3, 4] -> 1234
def getNumber(digits):
val = 0
for digit in digits:
val *= 10
val += digit
return val
# Returns whether the given number is a prime or not
# Ex.: 2 -> False, 3 -> True
def isPrime(val):
# Taken from https://www.pythonpool.com/check-if-number-is-prime-in-python/
# Better library implementation: https://www.sympy.org/
for factor in range(2, int(val**.5) + 1):
if val & factor == 0:
return False
return True
| true
|
43bf2fd4c072ee33bcbec31ed47876952696e733
|
Python
|
NilsK1812/Shogun-alias-Shmong-Shmong
|
/Shogun_Feld.py
|
UTF-8
| 3,886
| 3.15625
| 3
|
[] |
no_license
|
from tkinter import *
import tkinter.messagebox
import random as r
class MyApp(Frame):
def __init__(self, master):
super().__init__(master)
#self.pack(fill=BOTH, expand=True)
master.geometry("800x800")
#frame to hold the playing field
self.f1 = Frame(master=master)
self.f1.pack(fill=BOTH, expand=True)
#frame to hold additional buttons (restart and quit)
self.f2 = Frame(master=master)
self.f2.pack()
restart = Button(master=self.f2, text="exit", command=self.quit)
restart.pack(side="left")
#parameter fuer die grid groesse
self.grid_length = 8
self.row = 0
self.knoepfe = []
self.knoepfe_farbe = []
self.create_buttons()
#make the grid layout expand
for x in range(self.grid_length):
self.f1.columnconfigure(x, weight = 1)
self.f1.rowconfigure(x, weight = 1)
def create_buttons(self):
self.black_first()
self.grey_first()
self.black_first()
self.grey_first()
self.black_first()
self.grey_first()
self.black_first()
self.grey_first()
def clicked(self, event):
print("Es geht!")
if(event.widget["highlightbackground"] == '#565656' or event.widget["highlightbackground"] == '#000000'):
tkinter.messagebox.showwarning("Warning","Auf dem Feld ist keine Figur")
else:
grid_info = event.widget.grid_info()
print("{}/{}".format(grid_info["column"],grid_info["row"]))
def black_first(self):
count = 0
random_zahl = 0
for x in range(self.grid_length):
b = Button(master=self.f1)
if(self.row == 0):
r.seed()
b["highlightbackground"] = 'red'
if(count == 4):
random_zahl = r.randint(1,2)
b["text"] = random_zahl
else:
random_zahl = r.randint(1,4)
b["text"] = random_zahl
count = count + 1
elif(count % 2):
b["highlightbackground"] = '#565656'
count = count + 1
else:
b["highlightbackground"] = '#000000'
count = count + 1
b.bind("<ButtonPress-1>", self.clicked)
b.grid(row=self.row, column=x, sticky=N+S+E+W)
if(count == 8):
count = 0
self.row = self.row + 1
self.knoepfe.append(b)
def grey_first(self):
count = 0
random_zahl = 0
for x in range(self.grid_length):
b = Button(master=self.f1)
if(self.row == 7):
r.seed()
b["highlightbackground"] = 'white'
if(count == 4):
random_zahl = r.randint(1,2)
b["text"] = random_zahl
else:
random_zahl = r.randint(1,4)
b["text"] = random_zahl
count = count + 1
elif(count % 2):
b["highlightbackground"] = '#000000'
count = count + 1
else:
b["highlightbackground"] = '#565656'
count = count + 1
b.grid(row=self.row, column=x, sticky=N+S+E+W)
if(count == 8):
count = 0
self.row = self.row + 1
self.knoepfe.append(b)
self.knoepfe_farbe.append(b["highlightbackground"])
tk_window = Tk()
tk_window.title("Shongun")
app = MyApp(tk_window)
app.mainloop()
| true
|
dc3a0ae520436e7f7dcda26255840051f45564cc
|
Python
|
fivefishstudios/GPIBoE-Gateway
|
/software/GPIB.py
|
UTF-8
| 12,370
| 2.734375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/python3
# last modified 2019/09
## DOC -------------------------------------------------------------------------
# This class generates a GPIB interface based on the IEEE 488.1 specifications.
# RESTRICTION: works only as single controller and must be either listener or
# talker.
# It works with a Raspberry Pi over SPI and a two MCP23S17 port expander.
# The read/write/trigger methods return a number as success/error indicator and
# an optional data or error string as list.
import spidev
import time
import signal
class GPIB:
# -- SPI configurations:
# # for RaspberryPi Model B
# SPI_MODULE = 0
# SPI_CS = 0
# for OrangePi Zero
SPI_MODULE = 1
SPI_CS = 0
# -- class-global debug flag
DEBUG = 0
## constructor / init
def __init__(self):
self.__DebugPrint('starting construction')
self.__SPICreate()
self.__ControllerInit()
self.__LEDsInit()
self._PE(1)
self.__DebugPrint('finished construction')
## #### METHODS ------------------------------------------------------------
# GPIB bus clear/init
def Init(self):
self._LED_ACT(1) # activity LED on
self._setIFC(1)
time.sleep(200e-6)
self._setIFC(0)
self._LED_ACT(0) # activity LED off
return 0
# GPIB change REN line state
def Remote(self, state):
self._LED_ACT(1) # activity LED on
self._setREN(state)
self._LED_ACT(0) # activity LED off
return 0
# GPIB write data to device with address
def Write(self, address, data):
if not (self._ValidateGPIBAddress(address)):
return [-2, 'invalid GPIB address']
if not (isinstance(data, str) and len(data) >= 0):
return [-3, 'invalid or empty data to write']
self.__DebugPrint('write to: ' + str(address) + '; data: ' + str(data))
self._LED_ACT(1) # activity LED on
self._Talk(1)
self._setATN(1)
self._GPIBWriteByte(0x5F)
self._GPIBWriteByte(0x3F)
self._GPIBWriteByte(address + 0x20)
self._setATN(0)
for char in data[:-1]:
self._GPIBWriteByte(ord(char))
self._setEOI(1)
self._GPIBWriteByte(ord(data[-1]))
self._setEOI(0)
self._LED_ACT(0) # activity LED off
return [0, '']
# GPIB read data from addressed device
def Read(self, address):
if not (self._ValidateGPIBAddress(address)):
return [-2, 'invalid GPIB address']
self.__DebugPrint('read from: ' + str(address))
self._LED_ACT(1) # activity LED on
self._Talk(1)
self._setATN(1)
self._GPIBWriteByte(0x5F)
self._GPIBWriteByte(0x3F)
self._GPIBWriteByte(address + 0x40)
self._setATN(0)
self._Talk(0)
try:
read_data = ''
while (True):
data, eoi_state = self._GPIBReadByte()
read_data += chr(data)
if ((read_data[-1] == '\n') or (eoi_state == 1)): # terminate read on \n or EOI
self._LED_ACT(0) # activity LED off
self.__DebugPrint('read complete ' + str(read_data))
return [0, read_data]
except Exception as err:
return [-4, ('GPIB read error: ' + str(err.args[0]))]
# GPIB send trigger command to addressed device
def Trigger(self, address):
if not (self._ValidateGPIBAddress(address)):
return [-2, 'invalid GPIB address']
self.__DebugPrint('triggering: ' + str(address))
self._LED_ACT(1) # activity LED on
self._Talk(1)
self._setATN(1)
self._GPIBWriteByte(0x5F)
self._GPIBWriteByte(0x3F)
self._GPIBWriteByte(address + 0x20)
self._GPIBWriteByte(0x08) # trigger command
self._setATN(0)
self._Talk(0)
self._LED_ACT(0) # activity LED off
return [0, '']
## #### GPIB helper functions --------------------------------------------
# validate GPIB address
def _ValidateGPIBAddress(self, address):
if ((address >= 1) and (address <= 30)):
return 1
else:
return 0
## #### data handling functions --------------------------------------------
# GPIB read timeout handler
def __GPIBTimeoutHandler(self, signum, frame):
self.__DebugPrint('TimeoutHandler SigNum: ' + str(signum))
self.__DebugPrint('TimeoutHandler SigNum: ' + str(frame))
raise Exception('Timeout occured!')
# GPIB read data byte with handshake
def _GPIBReadByte(self):
self._setNDAC(1)
self._setNRFD(0)
# initalize signal subsystem and set alarm to 1 sec
signal.signal(signal.SIGALRM, self.__GPIBTimeoutHandler)
signal.alarm(1)
while (self._getDAV() == 0):
self.__DebugPrint('waiting for DAV')
self._setNRFD(1)
data = self._getDATA()
eoi_state = self._getEOI()
self._setNDAC(0)
while (self._getDAV() == 1):
self.__DebugPrint('waiting for not-DAV')
self._setNDAC(1)
# stop timer
signal.alarm(0)
self.__DebugPrint('read: ' + chr(data) + '; EOI=' + str(eoi_state))
return data, eoi_state
# GPIB write data byte with handshake
def _GPIBWriteByte(self, data):
while (self._getNRFD() == 1):
self.__DebugPrint('waiting for NFRD')
self._setDATA(data)
self._setDAV(1)
while (self._getNDAC() == 1):
self.__DebugPrint('waiting for NDAC')
self._setDAV(0)
return 0
## #### interface functions ------------------------------------------------
# when calling this functions logic 1 equals to 1 (standard logic)
# Take control!
def __ControllerInit(self):
# data = 0x00, output
self.__SPIDataByteWrite(0x14, 0xFF)
self.__SPIDataByteWrite(0x00, 0x00)
# REN=1, IFC=0, NDAC=0, NRFD=0, DAV=0, EOI=1, ATN=1, SRQ=1
self.__SPIDataByteWrite(0x15, 0b10001111)
# REN=out, IFC=out, NDAC=in, NRFD=in, DAV=out, EOI=out, ATN=out, SRQ=in
self.__SPIDataByteWrite(0x01, 0b00110001)
# TE=1, SC=1, PE=0, DC=0; all outputs
self.__SPICtrlByteWrite(0x14, 0b11000000)
self.__SPICtrlByteWrite(0x00, 0x00)
# release IFC after 100µs
time.sleep(100e-6)
self.__SPIDataBitWrite(0x15, 6, 1)
self.__DebugPrint('finished GPIB SC init')
## GPIB driver control functions -------------------------------------------
# GPIB talk enable
def _Talk(self, enable):
if (enable):
# data not valid & DAV as output
self.__SPIDataBitWrite(0x15, 3, 1)
self.__SPIDataBitWrite(0x01, 3, 0)
# NRFD&NDAC as inputs
self.__SPIDataBitWrite(0x01, 4, 1)
self.__SPIDataBitWrite(0x01, 5, 1)
# set TE
self.__SPICtrlBitWrite(0x14, 7, 1)
# data as output
self.__SPIDataByteWrite(0x00, 0x00)
self._LED_TALK(1)
else:
# DAV as input
self.__SPIDataBitWrite(0x01, 3, 1)
# not ready for data, not accepted & NRFD,NDAC as outputs
self.__SPIDataBitWrite(0x15, 4, 0)
self.__SPIDataBitWrite(0x15, 5, 0)
self.__SPIDataBitWrite(0x01, 4, 0)
self.__SPIDataBitWrite(0x01, 5, 0)
# clear TE
self.__SPICtrlBitWrite(0x14, 7, 0)
# data as input
self.__SPIDataByteWrite(0x00, 0xFF)
self._LED_TALK(0)
## GPIB data transfer & line functions -------------------------------------
def _getDATA(self):
return self.__SPIDataByteRead(0x12)
def _setDATA(self, data):
self.__SPIDataByteWrite(0x14, ~data)
def _PE(self, enable): # GPIB data lines pullup control
self.__SPICtrlBitWrite(0x14, 5, enable)
## GPIB control line functions ---------------------------------------------
def _getSRQ(self):
return not self.__SPIDataBitRead(0x13, 0)
def _setATN(self, state):
self._LED_ATN(state)
self.__SPIDataBitWrite(0x15, 1, not state)
def _getEOI(self):
return not self.__SPIDataBitRead(0x13, 2)
def _setEOI(self, state):
self.__SPIDataBitWrite(0x15, 2, not state)
def _setIFC(self, state):
self.__SPIDataBitWrite(0x15, 6, not state)
def _setREN(self, state):
self.__SPIDataBitWrite(0x15, 7, not state)
## GPIB data handshake functions -------------------------------------------
def _getDAV(self):
return not self.__SPIDataBitRead(0x13, 3)
def _setDAV(self, state):
self.__SPIDataBitWrite(0x15, 3, not state)
def _getNRFD(self):
return not self.__SPIDataBitRead(0x13, 4)
def _setNRFD(self, state):
self.__SPIDataBitWrite(0x15, 4, not state)
def _getNDAC(self):
return not self.__SPIDataBitRead(0x13, 5)
def _setNDAC(self, state):
self.__SPIDataBitWrite(0x15, 5, not state)
## #### LED functions ------------------------------------------------------
def __LEDsInit(self):
self.__SPICtrlByteWrite(0x01, 0x00) # LEDs are outputs
# flash all LEDs once
self.__SPICtrlByteWrite(0x15, 0x00)
time.sleep(10e-3)
self.__SPICtrlByteWrite(0x15, 0xFF)
# LED 1
def _LED_ACT(self, state):
self.__SPICtrlBitWrite(0x15, 0, not state)
# LED 2
def _LED_TALK(self, state):
self.__SPICtrlBitWrite(0x15, 1, not state)
# LED 3
def _LED_ATN(self, state):
self.__SPICtrlBitWrite(0x15, 2, not state)
# LED 4
def __LED_ERR(self, state):
self.__SPICtrlBitWrite(0x15, 3, not state)
## SPI bit & byte manipulation methods -------------------------------------
# create SPI and return handle
def __SPICreate(self):
self.__spi = spidev.SpiDev()
self.__spi.open(self.SPI_MODULE, self.SPI_CS)
self.__spi.max_speed_hz = 10000000
self.__spi.xfer([0x40, 0x0A, 0x08, 0x08]) # enable hardware address decoder
self.__DebugPrint('finished SPI creation')
# set bit in control registers, MCP23S17 for control has address 0x42
def __SPICtrlBitWrite(self, register, bit, value):
if (bit < 8):
buf = self.__spi.xfer([0x43, register, 0x00])
buf = buf[2]
if (value):
buf = buf | (1 << bit)
else:
buf = buf & ~(1 << bit)
self.__spi.xfer([0x42, register, buf])
return 0
else:
return -1
# set byte in control register, MCP23S17 for control has address 0x42
def __SPICtrlByteWrite(self, register, value):
if (value < 256):
self.__spi.xfer([0x42, register, value])
return 0
else:
return -1
# set bit in data registers, MCP23S17 for data has address 0x40
def __SPIDataBitWrite(self, register, bit, value):
if (bit < 8):
buf = self.__spi.xfer([0x41, register, 0x00])
buf = buf[2]
if (value):
buf = buf | (1 << bit)
else:
buf = buf & ~(1 << bit)
self.__spi.xfer([0x40, register, buf])
else:
return -1
# set byte in data register, MCP23S17 for data has address 0x40
def __SPIDataByteWrite(self, register, value):
if (value < 256):
self.__spi.xfer([0x40, register, value])
return 0
else:
return -1
# read bit in data register, MCP23S17 for data has address 0x40
def __SPIDataBitRead(self, register, bit):
if (bit < 8):
buf = self.__spi.xfer([0x41, register, 0x00])
return ((buf[2] >> bit) & 1)
else:
return -1
# read byte in data register, MCP23S17 for data has address 0x40
def __SPIDataByteRead(self, register):
tmp = self.__spi.xfer([0x41, register, 0x00])
return (~tmp[2]) & 0xFF
## #### DEBUG functions ----------------------------------------------------
def __DebugPrint(self, text_buffer):
if (self.DEBUG):
print(f"[DEBUG] GPIB: {text_buffer!r}")
| true
|
92b4a3ea4ccc641ceee897045794f7e1228c7197
|
Python
|
MenacingManatee/python_practice
|
/python_basics/0x01/ignore.py
|
UTF-8
| 149
| 2.609375
| 3
|
[] |
no_license
|
#!/usr/bin/python
import signal
signal.signal(signal.SIGINT, signal.SIG_IGN)
print 'Your script can\'t be stopped with CTRL+C'
while 1:
continue
| true
|
96da56f8e09835541e5fbf2262e4329c77bd08cc
|
Python
|
dahbiz/sympy-plot-backends
|
/spb/series.py
|
UTF-8
| 77,064
| 2.515625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
from sympy import sympify, Tuple, symbols, solve, re, im
from sympy.geometry import (
Plane,
Polygon,
Circle,
Ellipse,
Line,
Segment,
Ray,
Line3D,
Curve,
Point2D,
Point3D,
)
from sympy.geometry.entity import GeometryEntity
from sympy.geometry.line import LinearEntity2D, LinearEntity3D
from sympy.core.relational import (
Equality,
GreaterThan,
LessThan,
Relational,
StrictLessThan,
StrictGreaterThan,
)
from sympy.logic.boolalg import BooleanFunction
from sympy.plotting.experimental_lambdify import (
vectorized_lambdify,
lambdify,
experimental_lambdify,
)
from sympy.plotting.intervalmath import interval
from spb.utils import get_lambda
import warnings
import numpy as np
### The base class for all series
class BaseSeries:
"""Base class for the data objects containing stuff to be plotted.
Explanation
===========
The backend should check if it supports the data series that it's given.
(eg TextBackend supports only LineOver1DRange).
It's the backend responsibility to know how to use the class of
data series that it's given.
Some data series classes are grouped (using a class attribute like is_2Dline)
according to the api they present (based only on convention). The backend is
not obliged to use that api (eg. The LineOver1DRange belongs to the
is_2Dline group and presents the get_points method, but the
TextBackend does not use the get_points method).
"""
# Some flags follow. The rationale for using flags instead of checking base
# classes is that setting multiple flags is simpler than multiple
# inheritance.
is_2Dline = False
is_3Dline = False
is_3Dsurface = False
is_contour = False
is_implicit = False
# Different from is_contour as the colormap in backend will be
# different
is_parametric = False
is_interactive = False
# An interactive series can update its data.
is_vector = False
is_2Dvector = False
is_3Dvector = False
# Represents a 2D or 3D vector
is_complex = False
# Represent a complex expression
is_point = False
# If True, the rendering will use points, not lines.
is_geometry = False
# If True, it represents an object of the sympy.geometry module
def __init__(self):
super().__init__()
@property
def is_3D(self):
flags3D = [self.is_3Dline, self.is_3Dsurface, self.is_3Dvector]
return any(flags3D)
@property
def is_line(self):
flagslines = [self.is_2Dline, self.is_3Dline]
return any(flagslines)
@staticmethod
def _discretize(start, end, N, scale="linear"):
if scale == "linear":
return np.linspace(start, end, N)
return np.geomspace(start, end, N)
@staticmethod
def _correct_size(a, b):
"""If `a` is a scalar, we need to convert its dimension to the
appropriate grid size given by `b`.
"""
if not isinstance(a, np.ndarray):
# `a` is a scalar (int or float)
a = np.array(a)
if a.shape != b.shape:
return a * np.ones_like(b)
return a
def get_data(self):
"""All child series should implement this method to return the
numerical data which can be used by a plotting library.
"""
raise NotImplementedError
### 2D lines
class Line2DBaseSeries(BaseSeries):
"""A base class for 2D lines."""
is_2Dline = True
def __init__(self, **kwargs):
super().__init__()
self.label = None
self.steps = kwargs.get("steps", False)
self.only_integers = kwargs.get("only_integers", False)
self.is_point = kwargs.get("is_point", False)
def get_data(self):
"""Return lists of coordinates for plotting the line.
Returns
=======
x: list
List of x-coordinates
y: list
List of y-coordinates
z: list (optional)
List of z-coordinates in case of Parametric3DLineSeries
param : list (optional)
List containing the parameter, in case of Parametric2DLineSeries
and Parametric3DLineSeries.
"""
points = self.get_points()
points = [np.array(p, dtype=np.float64) for p in points]
if self.steps is True:
if self.is_2Dline:
x, y = points[0], points[1]
x = np.array((x, x)).T.flatten()[1:]
y = np.array((y, y)).T.flatten()[:-1]
if self.is_parametric:
points = (x, y, points[2])
else:
points = (x, y)
elif self.is_3Dline:
x = np.repeat(points[0], 3)[2:]
y = np.repeat(points[1], 3)[:-2]
z = np.repeat(points[2], 3)[1:-1]
points = (x, y, z, points[3])
return points
class List2DSeries(Line2DBaseSeries):
"""Representation for a line consisting of list of points."""
def __init__(self, list_x, list_y, label="", **kwargs):
super().__init__()
self.list_x = np.array(list_x, dtype=np.float64)
self.list_y = np.array(list_y, dtype=np.float64)
self.label = label
self.is_point = kwargs.get("is_point", False)
def __str__(self):
return "list plot"
def get_points(self):
return (self.list_x, self.list_y)
class LineOver1DRangeSeries(Line2DBaseSeries):
"""Representation for a line consisting of a SymPy expression over a range."""
def __init__(self, expr, var_start_end, label="", **kwargs):
super().__init__(**kwargs)
self.expr = sympify(expr)
self.label = label
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.n = kwargs.get("n", 1000)
self.adaptive = kwargs.get("adaptive", True)
self.depth = kwargs.get("depth", 9)
self.xscale = kwargs.get("xscale", "linear")
self.polar = kwargs.get("polar", False)
def __str__(self):
return "cartesian line: %s for %s over %s" % (
str(self.expr),
str(self.var),
str((self.start, self.end)),
)
@staticmethod
def adaptive_sampling(f, start, end, max_depth=9, xscale="linear"):
"""The adaptive sampling is done by recursively checking if three
points are almost collinear. If they are not collinear, then more
points are added between those points.
Parameters
==========
f : callable
The function to be numerical evaluated
start, end : floats
start and end values of the discretized domain
max_depth : int
Controls the smootheness of the overall evaluation. The higher
the number, the smoother the function, the more memory will be
used by this recursive procedure. Default value is 9.
xscale : str
Discretization strategy. Can be "linear" or "log". Default to
"linear".
References
==========
.. [1] Adaptive polygonal approximation of parametric curves,
Luiz Henrique de Figueiredo.
"""
x_coords = []
y_coords = []
def sample(p, q, depth):
"""Samples recursively if three points are almost collinear.
For depth < max_depth, points are added irrespective of whether
they satisfy the collinearity condition or not. The maximum
depth allowed is max_depth.
"""
# Randomly sample to avoid aliasing.
random = 0.45 + np.random.rand() * 0.1
if xscale == "log":
xnew = 10 ** (
np.log10(p[0]) + random * (np.log10(q[0]) - np.log10(p[0]))
)
else:
xnew = p[0] + random * (q[0] - p[0])
ynew = f(xnew)
new_point = np.array([xnew, ynew])
# Maximum depth
if depth > max_depth:
x_coords.append(q[0])
y_coords.append(q[1])
# Sample irrespective of whether the line is flat till the
# depth of 6. We are not using linspace to avoid aliasing.
elif depth < max_depth:
sample(p, new_point, depth + 1)
sample(new_point, q, depth + 1)
# Sample ten points if complex values are encountered
# at both ends. If there is a real value in between, then
# sample those points further.
elif p[1] is None and q[1] is None:
if xscale == "log":
xarray = np.logspace(p[0], q[0], 10)
else:
xarray = np.linspace(p[0], q[0], 10)
yarray = list(map(f, xarray))
if any(y is not None for y in yarray):
for i in range(len(yarray) - 1):
if yarray[i] is not None or yarray[i + 1] is not None:
sample(
[xarray[i], yarray[i]],
[xarray[i + 1], yarray[i + 1]],
depth + 1,
)
# Sample further if one of the end points in None (i.e. a
# complex value) or the three points are not almost collinear.
elif (
p[1] is None
or q[1] is None
or new_point[1] is None
or not flat(p, new_point, q)
):
sample(p, new_point, depth + 1)
sample(new_point, q, depth + 1)
else:
x_coords.append(q[0])
y_coords.append(q[1])
f_start = f(start)
f_end = f(end)
x_coords.append(start)
y_coords.append(f_start)
sample(np.array([start, f_start]), np.array([end, f_end]), 0)
return x_coords, y_coords
def get_points(self):
"""Return lists of coordinates for plotting. Depending on the
`adaptive` option, this function will either use an adaptive algorithm
or it will uniformly sample the expression over the provided range.
Returns
=======
x: list
List of x-coordinates
y: list
List of y-coordinates
"""
if self.only_integers or not self.adaptive:
x, y = self._uniform_sampling()
x, y = np.array(x), np.array(y)
else:
f = lambdify([self.var], self.expr)
x, y = self.adaptive_sampling(
f, self.start, self.end, self.depth, self.xscale
)
if self.polar:
t = x.copy()
x = y * np.cos(t)
y = y * np.sin(t)
return x, y
def _uniform_sampling(self):
start, end, N = self.start, self.end, self.n
if self.only_integers is True:
start, end = int(start), int(end)
N = end - start + 1
list_x = self._discretize(start, end, N, scale=self.xscale)
f = vectorized_lambdify([self.var], self.expr)
list_y = f(list_x)
list_y = self._correct_size(list_y, list_x)
return list_x, list_y
class Parametric2DLineSeries(Line2DBaseSeries):
"""Representation for a line consisting of two parametric sympy expressions
over a range."""
is_parametric = True
def __init__(self, expr_x, expr_y, var_start_end, label="", **kwargs):
super().__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.label = label
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.n = kwargs.get("n", 300)
self.adaptive = kwargs.get("adaptive", True)
self.depth = kwargs.get("depth", 9)
self.scale = kwargs.get("xscale", "linear")
def __str__(self):
return "parametric cartesian line: (%s, %s) for %s over %s" % (
str(self.expr_x),
str(self.expr_y),
str(self.var),
str((self.start, self.end)),
)
def _uniform_sampling(self):
param = self._discretize(self.start, self.end, self.n, scale=self.scale)
fx = vectorized_lambdify([self.var], self.expr_x)
fy = vectorized_lambdify([self.var], self.expr_y)
list_x = fx(param)
list_y = fy(param)
# expr_x or expr_y may be scalars. This allows scalar components
# to be plotted as well
list_x = self._correct_size(list_x, param)
list_y = self._correct_size(list_y, param)
return list_x, list_y, param
@staticmethod
def adaptive_sampling(fx, fy, start, end, max_depth=9):
"""The adaptive sampling is done by recursively checking if three
points are almost collinear. If they are not collinear, then more
points are added between those points.
Parameters
==========
fx : callable
The function to be numerical evaluated in the horizontal
direction.
fy : callable
The function to be numerical evaluated in the vertical
direction.
start, end : floats
start and end values of the discretized domain
max_depth : int
Controls the smootheness of the overall evaluation. The higher
the number, the smoother the function, the more memory will be
used by this recursive procedure. Default value is 9.
References
==========
.. [1] Adaptive polygonal approximation of parametric curves,
Luiz Henrique de Figueiredo.
"""
x_coords = []
y_coords = []
param = []
def sample(param_p, param_q, p, q, depth):
"""Samples recursively if three points are almost collinear.
For depth < max_depth, points are added irrespective of whether
they satisfy the collinearity condition or not. The maximum
depth allowed is max_depth.
"""
# Randomly sample to avoid aliasing.
random = 0.45 + np.random.rand() * 0.1
param_new = param_p + random * (param_q - param_p)
xnew = fx(param_new)
ynew = fy(param_new)
new_point = np.array([xnew, ynew])
# Maximum depth
if depth > max_depth:
x_coords.append(q[0])
y_coords.append(q[1])
param.append(param_p)
# Sample irrespective of whether the line is flat till the
# depth of 6. We are not using linspace to avoid aliasing.
elif depth < max_depth:
sample(param_p, param_new, p, new_point, depth + 1)
sample(param_new, param_q, new_point, q, depth + 1)
# Sample ten points if complex values are encountered
# at both ends. If there is a real value in between, then
# sample those points further.
elif (p[0] is None and q[1] is None) or (p[1] is None and q[1] is None):
param_array = np.linspace(param_p, param_q, 10)
x_array = list(map(fx, param_array))
y_array = list(map(fy, param_array))
if any(
x is not None and y is not None for x, y in zip(x_array, y_array)
):
for i in range(len(y_array) - 1):
if (x_array[i] is not None and y_array[i] is not None) or (
x_array[i + 1] is not None and y_array[i + 1] is not None
):
point_a = [x_array[i], y_array[i]]
point_b = [x_array[i + 1], y_array[i + 1]]
sample(
param_array[i],
param_array[i],
point_a,
point_b,
depth + 1,
)
# Sample further if one of the end points in None (i.e. a complex
# value) or the three points are not almost collinear.
elif (
p[0] is None
or p[1] is None
or q[1] is None
or q[0] is None
or not flat(p, new_point, q)
):
sample(param_p, param_new, p, new_point, depth + 1)
sample(param_new, param_q, new_point, q, depth + 1)
else:
x_coords.append(q[0])
y_coords.append(q[1])
param.append(param_p)
f_start_x = fx(start)
f_start_y = fy(start)
start_array = [f_start_x, f_start_y]
f_end_x = fx(end)
f_end_y = fy(end)
end_array = [f_end_x, f_end_y]
x_coords.append(f_start_x)
y_coords.append(f_start_y)
param.append(start)
sample(start, end, start_array, end_array, 0)
return x_coords, y_coords, param
def get_points(self):
"""Return lists of coordinates for plotting. Depending on the
`adaptive` option, this function will either use an adaptive algorithm
or it will uniformly sample the expression over the provided range.
Returns
=======
x: list
List of x-coordinates
y: list
List of y-coordinates
"""
if not self.adaptive:
return self._uniform_sampling()
fx = lambdify([self.var], self.expr_x)
fy = lambdify([self.var], self.expr_y)
x_coords, y_coords, param = self.adaptive_sampling(
fx, fy, self.start, self.end, self.depth
)
return (x_coords, y_coords, param)
### 3D lines
class Line3DBaseSeries(Line2DBaseSeries):
"""A base class for 3D lines.
Most of the stuff is derived from Line2DBaseSeries."""
is_2Dline = False
is_3Dline = True
def __init__(self):
super().__init__()
class Parametric3DLineSeries(Line3DBaseSeries):
"""Representation for a 3D line consisting of three parametric sympy
expressions and a range."""
is_parametric = True
def __init__(self, expr_x, expr_y, expr_z, var_start_end, label="", **kwargs):
super().__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.expr_z = sympify(expr_z)
self.label = label
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.n = kwargs.get("n", 300)
self.scale = kwargs.get("xscale", "linear")
def __str__(self):
return "3D parametric cartesian line: (%s, %s, %s) for %s over %s" % (
str(self.expr_x),
str(self.expr_y),
str(self.expr_z),
str(self.var),
str((self.start, self.end)),
)
def get_points(self):
param = self._discretize(self.start, self.end, self.n, scale=self.scale)
fx = vectorized_lambdify([self.var], self.expr_x)
fy = vectorized_lambdify([self.var], self.expr_y)
fz = vectorized_lambdify([self.var], self.expr_z)
list_x = fx(param)
list_y = fy(param)
list_z = fz(param)
# expr_x, expr_y or expr_z may be scalars. This allows scalar components
# to be plotted as well
list_x = self._correct_size(list_x, param)
list_y = self._correct_size(list_y, param)
list_z = self._correct_size(list_z, param)
list_x = np.array(list_x, dtype=np.float64)
list_y = np.array(list_y, dtype=np.float64)
list_z = np.array(list_z, dtype=np.float64)
list_x = np.ma.masked_invalid(list_x)
list_y = np.ma.masked_invalid(list_y)
list_z = np.ma.masked_invalid(list_z)
return list_x, list_y, list_z, param
### Surfaces
class SurfaceBaseSeries(BaseSeries):
"""A base class for 3D surfaces."""
is_3Dsurface = True
def __init__(self):
super().__init__()
def _discretize(self, s1, e1, n1, scale1, s2, e2, n2, scale2):
mesh_x = super()._discretize(s1, e1, n1, scale1)
mesh_y = super()._discretize(s2, e2, n2, scale2)
return np.meshgrid(mesh_x, mesh_y)
class SurfaceOver2DRangeSeries(SurfaceBaseSeries):
"""Representation for a 3D surface consisting of a sympy expression and 2D
range."""
def __init__(self, expr, var_start_end_x, var_start_end_y, label="", **kwargs):
super().__init__()
self.complex_discr = kwargs.get("is_complex", False)
func = float if not self.complex_discr else complex
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = func(var_start_end_x[1])
self.end_x = func(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = func(var_start_end_y[1])
self.end_y = func(var_start_end_y[2])
self.label = label
self.n1 = kwargs.get("n1", 50)
self.n2 = kwargs.get("n2", 50)
self.xscale = kwargs.get("xscale", "linear")
self.yscale = kwargs.get("yscale", "linear")
def __str__(self):
return ("cartesian surface: %s for" " %s over %s and %s over %s") % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)),
)
def get_data(self):
mesh_x, mesh_y = self._discretize(
self.start_x,
self.end_x,
self.n1,
self.xscale,
self.start_y,
self.end_y,
self.n2,
self.yscale,
)
from sympy import lambdify
f = lambdify((self.var_x, self.var_y), self.expr)
# f = vectorized_lambdify((self.var_x, self.var_y), self.expr)
mesh_z = f(mesh_x, mesh_y)
mesh_z = self._correct_size(mesh_z, mesh_x)
if self.complex_discr:
return np.real(mesh_x), np.real(mesh_y), mesh_z
mesh_z = mesh_z.astype(np.float64)
mesh_z = np.ma.masked_invalid(mesh_z)
return mesh_x, mesh_y, mesh_z
class ParametricSurfaceSeries(SurfaceBaseSeries):
"""Representation for a 3D surface consisting of three parametric sympy
expressions and a range."""
is_parametric = True
def __init__(
self,
expr_x,
expr_y,
expr_z,
var_start_end_u,
var_start_end_v,
label="",
**kwargs
):
super().__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.expr_z = sympify(expr_z)
self.var_u = sympify(var_start_end_u[0])
self.start_u = float(var_start_end_u[1])
self.end_u = float(var_start_end_u[2])
self.var_v = sympify(var_start_end_v[0])
self.start_v = float(var_start_end_v[1])
self.end_v = float(var_start_end_v[2])
self.label = label
self.n1 = kwargs.get("n1", 50)
self.n2 = kwargs.get("n2", 50)
self.xscale = kwargs.get("xscale", "linear")
self.yscale = kwargs.get("yscale", "linear")
def __str__(self):
return (
"parametric cartesian surface: (%s, %s, %s) for"
" %s over %s and %s over %s"
) % (
str(self.expr_x),
str(self.expr_y),
str(self.expr_z),
str(self.var_u),
str((self.start_u, self.end_u)),
str(self.var_v),
str((self.start_v, self.end_v)),
)
def get_data(self):
mesh_u, mesh_v = self._discretize(
self.start_u,
self.end_u,
self.n1,
self.xscale,
self.start_v,
self.end_v,
self.n2,
self.yscale,
)
fx = vectorized_lambdify((self.var_u, self.var_v), self.expr_x)
fy = vectorized_lambdify((self.var_u, self.var_v), self.expr_y)
fz = vectorized_lambdify((self.var_u, self.var_v), self.expr_z)
mesh_x = fx(mesh_u, mesh_v)
mesh_y = fy(mesh_u, mesh_v)
mesh_z = fz(mesh_u, mesh_v)
mesh_x = self._correct_size(np.array(mesh_x, dtype=np.float64), mesh_u)
mesh_y = self._correct_size(np.array(mesh_y, dtype=np.float64), mesh_u)
mesh_z = self._correct_size(np.array(mesh_z, dtype=np.float64), mesh_u)
mesh_x = np.ma.masked_invalid(mesh_x)
mesh_y = np.ma.masked_invalid(mesh_y)
mesh_z = np.ma.masked_invalid(mesh_z)
return mesh_x, mesh_y, mesh_z
class ContourSeries(SurfaceOver2DRangeSeries):
"""Representation for a contour plot."""
is_3Dsurface = False
is_contour = True
def __str__(self):
return ("contour: %s for " "%s over %s and %s over %s") % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)),
)
class ImplicitSeries(BaseSeries):
"""Representation for Implicit plot
References
==========
.. [1] Jeffrey Allen Tupper. Reliable Two-Dimensional Graphing Methods for
Mathematical Formulae with Two Free Variables.
.. [2] Jeffrey Allen Tupper. Graphing Equations with Generalized Interval
Arithmetic. Master's thesis. University of Toronto, 1996
"""
is_implicit = True
def __init__(self, expr, var_start_end_x, var_start_end_y, label="", **kwargs):
super().__init__()
expr, has_equality = self._has_equality(sympify(expr))
self.expr = expr
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.has_equality = has_equality
self.n1 = kwargs.get("n1", 1000)
self.n2 = kwargs.get("n2", 1000)
self.label = label
self.adaptive = kwargs.get("adaptive", False)
self.xscale = kwargs.get("xscale", "linear")
self.yscale = kwargs.get("yscale", "linear")
if isinstance(expr, BooleanFunction):
self.adaptive = True
warnings.warn(
"The provided expression contains Boolean functions. "
+ "In order to plot the expression, the algorithm "
+ "automatically switched to an adaptive sampling."
)
# Check whether the depth is greater than 4 or less than 0.
depth = kwargs.get("depth", 0)
if depth > 4:
depth = 4
elif depth < 0:
depth = 0
self.depth = 4 + depth
def _has_equality(self, expr):
# Represents whether the expression contains an Equality, GreaterThan
# or LessThan
has_equality = False
def arg_expand(bool_expr):
"""
Recursively expands the arguments of an Boolean Function
"""
for arg in bool_expr.args:
if isinstance(arg, BooleanFunction):
arg_expand(arg)
elif isinstance(arg, Relational):
arg_list.append(arg)
arg_list = []
if isinstance(expr, BooleanFunction):
arg_expand(expr)
# Check whether there is an equality in the expression provided.
if any(isinstance(e, (Equality, GreaterThan, LessThan)) for e in arg_list):
has_equality = True
elif not isinstance(expr, Relational):
expr = Equality(expr, 0)
has_equality = True
elif isinstance(expr, (Equality, GreaterThan, LessThan)):
has_equality = True
return expr, has_equality
def __str__(self):
return ("Implicit equation: %s for " "%s over %s and %s over %s") % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)),
)
def get_data(self):
func = experimental_lambdify(
(self.var_x, self.var_y), self.expr, use_interval=True
)
xinterval = interval(self.start_x, self.end_x)
yinterval = interval(self.start_y, self.end_y)
try:
func(xinterval, yinterval)
except AttributeError:
# XXX: AttributeError("'list' object has no attribute 'is_real'")
# That needs fixing somehow - we shouldn't be catching
# AttributeError here.
if self.adaptive:
warnings.warn(
"Adaptive meshing could not be applied to the"
" expression. Using uniform meshing."
)
self.adaptive = False
if self.adaptive:
return self._get_raster_interval(func)
else:
return self._get_meshes_grid()
def _get_raster_interval(self, func):
"""Uses interval math to adaptively mesh and obtain the plot"""
k = self.depth
interval_list = []
# Create initial 32 divisions
xsample = np.linspace(self.start_x, self.end_x, 33)
ysample = np.linspace(self.start_y, self.end_y, 33)
# Add a small jitter so that there are no false positives for equality.
# Ex: y==x becomes True for x interval(1, 2) and y interval(1, 2)
# which will draw a rectangle.
jitterx = (
(np.random.rand(len(xsample)) * 2 - 1)
* (self.end_x - self.start_x)
/ 2 ** 20
)
jittery = (
(np.random.rand(len(ysample)) * 2 - 1)
* (self.end_y - self.start_y)
/ 2 ** 20
)
xsample += jitterx
ysample += jittery
xinter = [interval(x1, x2) for x1, x2 in zip(xsample[:-1], xsample[1:])]
yinter = [interval(y1, y2) for y1, y2 in zip(ysample[:-1], ysample[1:])]
interval_list = [[x, y] for x in xinter for y in yinter]
plot_list = []
# recursive call refinepixels which subdivides the intervals which are
# neither True nor False according to the expression.
def refine_pixels(interval_list):
"""Evaluates the intervals and subdivides the interval if the
expression is partially satisfied."""
temp_interval_list = []
plot_list = []
for intervals in interval_list:
# Convert the array indices to x and y values
intervalx = intervals[0]
intervaly = intervals[1]
func_eval = func(intervalx, intervaly)
# The expression is valid in the interval. Change the contour
# array values to 1.
if func_eval[1] is False or func_eval[0] is False:
pass
elif func_eval == (True, True):
plot_list.append([intervalx, intervaly])
elif func_eval[1] is None or func_eval[0] is None:
# Subdivide
avgx = intervalx.mid
avgy = intervaly.mid
a = interval(intervalx.start, avgx)
b = interval(avgx, intervalx.end)
c = interval(intervaly.start, avgy)
d = interval(avgy, intervaly.end)
temp_interval_list.append([a, c])
temp_interval_list.append([a, d])
temp_interval_list.append([b, c])
temp_interval_list.append([b, d])
return temp_interval_list, plot_list
while k >= 0 and len(interval_list):
interval_list, plot_list_temp = refine_pixels(interval_list)
plot_list.extend(plot_list_temp)
k = k - 1
# Check whether the expression represents an equality
# If it represents an equality, then none of the intervals
# would have satisfied the expression due to floating point
# differences. Add all the undecided values to the plot.
if self.has_equality:
for intervals in interval_list:
intervalx = intervals[0]
intervaly = intervals[1]
func_eval = func(intervalx, intervaly)
if func_eval[1] and func_eval[0] is not False:
plot_list.append([intervalx, intervaly])
return plot_list, "fill"
def _get_meshes_grid(self):
"""Generates the mesh for generating a contour.
In the case of equality, ``contour`` function of matplotlib can
be used. In other cases, matplotlib's ``contourf`` is used.
"""
expr, equality = self._preprocess_meshgrid_expression(self.expr)
xarray = self._discretize(self.start_x, self.end_x, self.n1, self.xscale)
yarray = self._discretize(self.start_y, self.end_y, self.n2, self.yscale)
x_grid, y_grid = np.meshgrid(xarray, yarray)
func = vectorized_lambdify((self.var_x, self.var_y), expr)
z_grid = func(x_grid, y_grid)
z_grid, ones = self._postprocess_meshgrid_result(z_grid, x_grid)
if equality:
return xarray, yarray, z_grid, ones, "contour"
else:
return xarray, yarray, z_grid, ones, "contourf"
@staticmethod
def _preprocess_meshgrid_expression(expr):
"""If the expression is a Relational, rewrite it as a single
expression. This method reduces code repetition.
Returns
=======
expr : Expr
The rewritten expression
equality : Boolean
Wheter the original expression was an Equality or not.
"""
equality = False
if isinstance(expr, Equality):
expr = expr.lhs - expr.rhs
equality = True
elif isinstance(expr, (GreaterThan, StrictGreaterThan)):
expr = expr.lhs - expr.rhs
elif isinstance(expr, (LessThan, StrictLessThan)):
expr = expr.rhs - expr.lhs
else:
raise NotImplementedError(
"The expression is not supported for "
"plotting in uniform meshed plot."
)
return expr, equality
@staticmethod
def _postprocess_meshgrid_result(z_grid, x_grid):
"""Bound the result to -1, 1. This method reduces code repetition.
While with Matplotlib we can directly plot the result z_grid and set the
contour levels, this is not possible with Plotly. Hence, Plotly will
use the ones matrix. The result will be slightly different: while
Matplotlib will render smooth lines, Plotly will looks
square-ish/segmented.
"""
z_grid = ImplicitSeries._correct_size(z_grid, x_grid)
# ones contains data useful to plot regions, or in case of Plotly,
# contour lines too.
ones = np.ones_like(z_grid, dtype=np.int8)
ones[np.ma.where(z_grid < 0)] = -1
return z_grid, ones
##############################################################################
# Finding the centers of line segments or mesh faces
##############################################################################
def centers_of_segments(array):
return np.mean(np.vstack((array[:-1], array[1:])), 0)
def centers_of_faces(array):
return np.mean(
np.dstack(
(
array[:-1, :-1],
array[1:, :-1],
array[:-1, 1:],
array[:-1, :-1],
)
),
2,
)
def flat(x, y, z, eps=1e-3):
"""Checks whether three points are almost collinear"""
# Workaround plotting piecewise (#8577):
# workaround for `lambdify` in `.experimental_lambdify` fails
# to return numerical values in some cases. Lower-level fix
# in `lambdify` is possible.
vector_a = (x - y).astype(np.float64)
vector_b = (z - y).astype(np.float64)
dot_product = np.dot(vector_a, vector_b)
vector_a_norm = np.linalg.norm(vector_a)
vector_b_norm = np.linalg.norm(vector_b)
cos_theta = dot_product / (vector_a_norm * vector_b_norm)
return abs(cos_theta + 1) < eps
class InteractiveSeries(BaseSeries):
"""Represent an interactive series, in which the expressions can be either
a line or a surface (parametric or not). On top of the usual ranges (x, y or
u, v, which must be provided), the expressions can use any number of
parameters.
This class internally convert the expressions to a lambda function, which is
evaluated by calling update_data(params), passing in all the necessary
parameters. Once update_data(params) has been executed, then get_data()
can be used.
NOTE: the __init__ method expects every expression to be already sympified.
"""
is_interactive = True
def __new__(cls, *args, **kwargs):
if isinstance(args[0][0], Plane):
return PlaneInteractiveSeries(*args, **kwargs)
elif isinstance(args[0][0], GeometryEntity) and (
not isinstance(args[0][0], Curve)
):
return GeometryInteractiveSeries(*args, **kwargs)
return object.__new__(cls)
def __init__(self, exprs, ranges, label="", **kwargs):
# take care of Curve from sympy.geometry, which can be seen as
# parametric series
if isinstance(exprs[0], Curve):
c = exprs[0]
exprs = c.functions
ranges = [c.limits]
# free symbols of the parameters
params = kwargs.get("params", dict())
# number of discretization points
self.n1 = kwargs.get("n1", 250)
self.n2 = kwargs.get("n2", 250)
self.n3 = kwargs.get("n3", 250)
n = [self.n1, self.n2, self.n3]
# TODO / NOTE: even though we have the ComplexSeries and
# ComplexInteractiveSeries classes, they are already doing a lot of work.
# For the moment, we are going to allow InteractiveSeries to be able
# to use complex discretization. In doing so, we can create 3D surfaces
# of the real/imaginary/absolute value of a function of 2 variables.
self.complex_discr = kwargs.get("is_complex", False)
castfunc = float if not self.complex_discr else complex
self.xscale = kwargs.get("xscale", "linear")
self.yscale = kwargs.get("yscale", "linear")
self.label = label
nexpr, npar = len(exprs), len(ranges)
if nexpr == 0:
raise ValueError(
"At least one expression must be provided."
+ "\nReceived: {}".format((exprs, ranges, label))
)
# # TODO: do I really need this?
# if npar > 2:
# raise ValueError(
# "Depending on the backend, only 2D and 3D plots are " +
# "supported (1 or 2 ranges at most). The provided " +
# "expressions uses {} ranges.".format(npar))
# set series attributes
if (nexpr == 1) and (exprs[0].has(BooleanFunction) or exprs[0].has(Relational)):
self.is_implicit = True
exprs = list(exprs)
exprs[0], self.equality = ImplicitSeries._preprocess_meshgrid_expression(
exprs[0]
)
elif (nexpr == 1) and (npar == 1):
self.is_2Dline = True
elif (nexpr == 2) and (npar == 1):
self.is_2Dline = True
self.is_parametric = True
# necessary to draw a gradient line with some backends
self.var = ranges[0][0]
self.start = float(ranges[0][1])
self.end = float(ranges[0][2])
elif (nexpr == 3) and (npar == 1):
self.is_3Dline = True
self.is_parametric = True
# necessary to draw a gradient line with some backends
self.var = ranges[0][0]
self.start = float(ranges[0][1])
self.end = float(ranges[0][2])
elif (nexpr == 1) and (npar == 2):
if kwargs.get("threed", False):
self.is_3Dsurface = True
else:
self.is_contour = True
elif (nexpr == 3) and (npar == 2):
self.is_3Dsurface = True
self.is_parametric = True
elif (nexpr == 2) and (npar == 2):
self.is_vector = True
self.is_slice = False
self.is_2Dvector = True
elif (nexpr == 3) and (npar == 3):
self.is_vector = True
self.is_3Dvector = True
self.is_slice = False
# from the expression's free symbols, remove the ones used in
# the parameters and the ranges
fs = set().union(*[e.free_symbols for e in exprs])
fs = fs.difference(params.keys()).difference([r[0] for r in ranges])
if len(fs) > 0:
raise ValueError(
"Incompatible expression and parameters.\n"
+ "Expression: {}\n".format((exprs, ranges, label))
+ "Specify what these symbols represent: {}\n".format(fs)
+ "Are they ranges or parameters?"
)
# if we are dealing with parametric expressions, we pack them into a
# Tuple so that it can be lambdified
self.expr = exprs[0] if len(exprs) == 1 else Tuple(*exprs, sympify=False)
# generate the lambda function
signature, f = get_lambda(self.expr)
self.signature = signature
self.function = f
# Discretize the ranges. In the following dictionary self.ranges:
# key: symbol associate to this particular range
# val: the numpy array representing the discretization
discr_symbols = []
discretizations = []
for i, r in enumerate(ranges):
discr_symbols.append(r[0])
scale = self.xscale
if i == 1: # y direction
scale = self.yscale
discretizations.append(
self._discretize(castfunc(r[1]), castfunc(r[2]), n[i], scale=scale)
)
if len(ranges) == 1:
# 2D or 3D lines
self.ranges = {k: v for k, v in zip(discr_symbols, discretizations)}
else:
_slice = kwargs.get("slice", None)
if _slice is not None:
# sliced 3D vector fields: the discretizations are provided by
# the plane or the surface
self.is_slice = True
kwargs2 = kwargs.copy()
kwargs2 = _set_discretization_points(kwargs2, SliceVector3DSeries)
slice_surf = _build_plane_series(_slice, ranges, **kwargs2)
self.ranges = {
k: v for k, v in zip(discr_symbols, slice_surf.get_data())
}
else:
# surfaces: needs mesh grids
meshes = np.meshgrid(*discretizations)
self.ranges = {k: v for k, v in zip(discr_symbols, meshes)}
self.data = None
if len(params) > 0:
self.update_data(params)
def _evaluate(self, params):
"""Update the data based on the values of the parameters.
Parameters
==========
params : dict
key: symbol associated to the parameter
val: the value
"""
args = []
for s in self.signature:
if s in params.keys():
args.append(params[s])
else:
args.append(self.ranges[s])
results = self.function(*args)
# discretized ranges all have the same shape. Take the first!
discr = list(self.ranges.values())[0]
if isinstance(results, (list, tuple)):
results = list(results)
for i, r in enumerate(results):
results[i] = self._correct_size(
# the evaluation might produce an int/float. Need this conversion!
np.array(r),
discr,
)
elif isinstance(results, (int, float)):
results = self._correct_size(
# the evaluation might produce an int/float. Need this conversion!
np.array(results),
discr,
)
return results
def update_data(self, params):
"""Update the data based on the values of the parameters.
Parameters
==========
params : dict
key: symbol associated to the parameter
val: the value
"""
results = self._evaluate(params)
if (
self.is_contour
or (self.is_3Dsurface and (not self.is_parametric))
or (self.is_2Dline and (not self.is_parametric))
):
# in the case of single-expression 2D lines or 3D surfaces
if self.complex_discr:
results = [*[np.real(r) for r in self.ranges.values()], results]
else:
results = [*self.ranges.values(), results]
self.data = results
elif self.is_implicit:
ranges = list(self.ranges.values())
xr = ranges[0]
yr = ranges[1]
results = ImplicitSeries._postprocess_meshgrid_result(results, xr)
results = [
xr[0, :],
yr[:, 0],
*results,
"contour" if self.equality else "contourf",
]
self.data = results
elif self.is_parametric and (self.is_3Dline or self.is_2Dline):
# also add the parameter
results = [*results, *self.ranges.values()]
self.data = results
elif self.is_vector:
# in order to plot a vector, we also need the discretized region
self.data = [*self.ranges.values(), *results]
else:
self.data = results
def get_data(self):
# if the expression depends only on the ranges, the user can call get_data
# directly without calling update_data
if (self.data is None) and (len(self.signature) == len(self.ranges)):
self.update_data(dict())
if self.data is None:
raise ValueError(
"To generate the numerical data, call update_data(params), "
+ "providing the necessary parameters."
)
return self.data
def __str__(self):
ranges = [(k, v[0], v[-1]) for k, v in self.ranges.items()]
return ("interactive expression: %s with ranges" " %s and parameters %s") % (
str(self.expr),
", ".join([str(r) for r in ranges]),
str(self.signature),
)
class ComplexSeries(BaseSeries):
"""Represent a complex number or a complex function."""
is_complex = True
is_point = False
is_domain_coloring = False
def __init__(self, expr, r, label="", **kwargs):
expr = sympify(expr)
nolist = False
if isinstance(expr, (list, tuple, Tuple)):
self.expr = expr
self.is_2Dline = True
self.is_point = True
self.var = None
self.start = None
self.end = None
else:
# we are not plotting list of complex points, but real/imag or
# magnitude/argument plots
nolist = True
self._init_attributes(expr, r, label, nolist, **kwargs)
def _init_attributes(self, expr, r, label, nolist, **kwargs):
"""This method reduces code repetition between ComplexSeries and
ComplexInteractiveSeries.
"""
self.function = None
if nolist:
self.var = sympify(r[0])
self.start = complex(r[1])
self.end = complex(r[2])
if self.start.imag == self.end.imag:
self.is_2Dline = True
self.adaptive = kwargs.get("adaptive", True)
self.depth = kwargs.get("depth", 9)
if kwargs.get("absarg", False):
self.adaptive = False
self.is_parametric = True
elif kwargs.get("threed", False):
self.is_3Dsurface = True
elif kwargs.get("abs", False) or kwargs.get("arg", False):
levels = kwargs.get("levels", (7, 4))
self.abs_levels = np.asarray(
[2.0 ** k for k in np.arange(0, levels[0]) - levels[0] // 2]
)
self.arg_levels = np.linspace(0.0, 2 * np.pi, levels[1], endpoint=False)
# https://github.com/nschloe/cplot/blob/main/src/cplot/_main.py
# assert levels in [-pi, pi], like np.angle
self.arg_levels = np.mod(self.arg_levels + np.pi, 2 * np.pi) - np.pi
# Contour levels must be increasing
self.arg_levels = np.sort(self.arg_levels)
is_level1 = (self.arg_levels > -np.pi + 0.1) & (
self.arg_levels < np.pi - 0.1
)
if kwargs.get("level1", True):
self.arg_levels = self.arg_levels[is_level1]
self.angle_func = np.angle
self.angle_range = (-np.pi, np.pi)
else:
self.arg_levels = self.arg_levels[~is_level1]
self.arg_levels = np.mod(self.arg_levels, 2 * np.pi)
self.angle_func = lambda k: np.mod(np.angle(k), 2 * np.pi)
self.angle_range = (0.0, 2 * np.pi)
self.is_contour = True
else:
self.is_domain_coloring = True
# TODO: do I need this???
from sympy import lambdify
self.function = lambdify([self.var], expr)
self.expr = sympify(expr)
if self.is_2Dline:
# could be lot of poles: need decent discretization in order to
# reliabily detect them.
self.n1 = kwargs.get("n1", 1000)
self.n2 = kwargs.get("n2", 1000)
else:
self.n1 = kwargs.get("n1", 300)
self.n2 = kwargs.get("n2", 300)
self.xscale = kwargs.get("xscale", "linear")
self.yscale = kwargs.get("yscale", "linear")
self.real = kwargs.get("real", False)
self.imag = kwargs.get("imag", False)
self.abs = kwargs.get("abs", False)
self.arg = kwargs.get("arg", False)
if self.abs and self.arg:
self.is_parametric = True
if self.is_parametric or self.abs:
self.label = "Abs(%s)" % label
elif self.arg:
self.label = "Arg(%s)" % label
elif self.real and self.imag:
self.label = label
elif self.real:
self.label = "re(%s)" % label
elif self.imag:
self.label = "im(%s)" % label
else:
self.label = label
# domain coloring mode
self.coloring = kwargs.get("coloring", "a")
if not isinstance(self.coloring, str):
raise ValueError("`coloring` must be of type string.")
self.coloring = self.coloring.lower()
self.phaseres = kwargs.get("phaseres", 20)
# these will be passed to cplot.get_srgb1
self.abs_scaling = kwargs.get("abs_scaling", "h-1")
self.colorspace = kwargs.get("colorspace", "cam16")
def _correct_output(self, x, r):
"""Obtain the correct output depending the initialized settings.
This method reduces code repetition between ComplexSeries and
ComplexInteractiveSeries.
Parameters
==========
x : np.ndarray
Discretized domain. Can be a complex line or a complex region.
r : np.ndarray
Numerical evaluation result.
"""
r = self._correct_size(np.array(r), np.array(x))
if self.start.imag == self.end.imag:
if self.is_parametric:
return np.real(x), np.absolute(r), np.angle(r)
elif self.real and self.imag:
return np.real(x), np.real(r), np.imag(r)
elif self.real:
return np.real(x), np.real(r)
elif self.imag:
return np.real(x), np.imag(r)
elif self.abs:
return np.real(x), np.absolute(r)
elif self.arg:
return np.real(x), np.angle(r)
return x, r
# 3D
if not self.is_domain_coloring:
if self.real and self.imag:
return np.real(x), np.imag(x), np.real(r), np.imag(r)
elif self.real:
return np.real(x), np.imag(x), np.real(r)
elif self.imag:
return np.real(x), np.imag(x), np.imag(r)
elif self.abs and self.arg:
return np.real(x), np.imag(x), np.absolute(r), np.angle(r)
elif self.is_contour and self.abs:
# NOTE: specific hack in order to get coloring="f" to work
# properly on MatplotlibBackend with cplot
return np.real(x), np.imag(x), np.absolute(r), r
elif self.is_contour and self.arg:
# NOTE: specific hack in order to get coloring="f" to work
# properly on MatplotlibBackend with cplot
return np.real(x), np.imag(x), np.angle(r), r
elif self.abs:
return np.real(x), np.imag(x), np.absolute(r)
elif self.arg:
return np.real(x), np.imag(x), np.angle(r)
# 2D or 3D domain coloring
return (
np.real(x),
np.imag(x),
np.dstack([np.absolute(r), np.angle(r)]),
*self._domain_coloring(r),
)
def adaptive_sampling(self, f):
"""The adaptive sampling is done by recursively checking if three
points are almost collinear. If they are not collinear, then more
points are added between those points.
Different from LineOver1DRangeSeries and Parametric2DLineSeries, this
is an instance method because I really need to access other useful
methods.
References
==========
.. [1] Adaptive polygonal approximation of parametric curves,
Luiz Henrique de Figueiredo.
"""
# TODO: need to modify this method in order to be able to work with
# absarg=True.
x_coords = []
y_coords = []
imag = np.imag(self.start)
def sample(p, q, depth):
"""Samples recursively if three points are almost collinear.
For depth < self.depth, points are added irrespective of whether
they satisfy the collinearity condition or not. The maximum
depth allowed is self.depth.
"""
# Randomly sample to avoid aliasing.
random = 0.45 + np.random.rand() * 0.1
if self.xscale == "log":
xnew = 10 ** (
np.log10(p[0]) + random * (np.log10(q[0]) - np.log10(p[0]))
)
else:
xnew = p[0] + random * (q[0] - p[0])
ynew = f(xnew + imag * 1j)
# _correct_output is going to return different number of elements,
# depending on the user-provided keyword arguments.
r = self._correct_output(xnew, ynew)
xnew, ynew = r[:2]
new_point = np.array([xnew, ynew])
# Maximum depth
if depth > self.depth:
x_coords.append(q[0])
y_coords.append(q[1])
# Sample irrespective of whether the line is flat till the
# depth of 6. We are not using linspace to avoid aliasing.
elif depth < self.depth:
sample(p, new_point, depth + 1)
sample(new_point, q, depth + 1)
# Sample ten points if complex values are encountered
# at both ends. If there is a real value in between, then
# sample those points further.
elif p[1] is None and q[1] is None:
if self.xscale == "log":
xarray = np.logspace(p[0], q[0], 10)
else:
xarray = np.linspace(p[0], q[0], 10)
yarray = list(map(f, xarray + imag * 1j))
if any(y is not None for y in yarray):
for i in range(len(yarray) - 1):
if yarray[i] is not None or yarray[i + 1] is not None:
sample(
[xarray[i], yarray[i]],
[xarray[i + 1], yarray[i + 1]],
depth + 1,
)
# Sample further if one of the end points in None (i.e. a
# complex value) or the three points are not almost collinear.
elif (
p[1] is None
or q[1] is None
or new_point[1] is None
or not flat(p, new_point, q)
):
sample(p, new_point, depth + 1)
sample(new_point, q, depth + 1)
else:
x_coords.append(q[0])
y_coords.append(q[1])
f_start = f(self.start)
f_end = f(self.end)
# _correct_output is going to return different number of elements,
# depending on the user-provided keyword arguments.
rs = self._correct_output(self.start, f_start)
re = self._correct_output(self.end, f_end)
start, f_start, end, f_end = rs[0], rs[1], re[0], re[1]
x_coords.append(start)
y_coords.append(f_start)
sample(np.array([start, f_start]), np.array([end, f_end]), 0)
return x_coords, y_coords
def _domain_coloring(self, w):
from spb.complex.hsv_color_grading import color_grading
from spb.complex.wegert import (
bw_stripes_phase,
bw_stripes_mag,
domain_coloring,
enhanced_domain_coloring,
enhanced_domain_coloring_phase,
enhanced_domain_coloring_mag,
bw_stripes_imag,
bw_stripes_real,
cartesian_chessboard,
polar_chessboard,
)
from cplot import get_srgb1
_mapping = {
"a": domain_coloring,
"b": enhanced_domain_coloring,
"c": enhanced_domain_coloring_mag,
"d": enhanced_domain_coloring_phase,
"e": color_grading,
"f": None,
"g": bw_stripes_mag,
"h": bw_stripes_phase,
"i": bw_stripes_real,
"j": bw_stripes_imag,
"k": cartesian_chessboard,
"l": polar_chessboard,
}
colorscale = None
if not self.coloring in _mapping.keys():
raise KeyError(
"`coloring` must be one of the following: {}".format(_mapping.keys())
)
if self.coloring == "f":
zn = 1 * np.exp(1j * np.linspace(0, 2 * np.pi, 256))
colorscale = get_srgb1(zn, self.abs_scaling, self.colorspace)
colorscale = (colorscale * 255).astype(np.uint8)
# shift the argument from [0, 2*pi] to [-pi, pi]
colorscale = np.roll(colorscale, int(len(colorscale) / 2), axis=0)
rgb = (get_srgb1(w, self.abs_scaling, self.colorspace) * 255).astype(
np.uint8
)
return rgb, colorscale
if self.coloring <= "e":
from matplotlib.colors import hsv_to_rgb
H = np.linspace(0, 1, 256)
S = V = np.ones_like(H)
colorscale = hsv_to_rgb(np.dstack([H, S, V]))
colorscale = (colorscale.reshape((-1, 3)) * 255).astype(np.uint8)
colorscale = np.roll(colorscale, int(len(colorscale) / 2), axis=0)
return _mapping[self.coloring](w, phaseres=self.phaseres), colorscale
def get_data(self):
if isinstance(self.expr, (list, tuple, Tuple)):
# list of complex points
x_list, y_list = [], []
for p in self.expr:
x_list.append(float(re(p)))
y_list.append(float(im(p)))
return x_list, y_list
if np.imag(self.start) == np.imag(self.end):
if self.adaptive:
x, y = self.adaptive_sampling(self.function)
return [np.array(t) for t in [x, y]]
else:
# compute the real/imaginary/magnitude/argument of the complex
# function
x = self._discretize(self.start, self.end, self.n1, self.xscale)
y = self.function(x + np.imag(self.start) * 1j)
return self._correct_output(x, y)
# Domain coloring
start_x = np.real(self.start)
end_x = np.real(self.end)
start_y = np.imag(self.start)
end_y = np.imag(self.end)
x = self._discretize(start_x, end_x, self.n1, self.xscale)
y = self._discretize(start_y, end_y, self.n2, self.yscale)
xx, yy = np.meshgrid(x, y)
domain = xx + 1j * yy
zz = self.function(domain)
return self._correct_output(domain, zz)
class ComplexInteractiveSeries(InteractiveSeries, ComplexSeries):
def __new__(cls, *args, **kwargs):
return object.__new__(cls)
def __init__(self, expr, r, label="", **kwargs):
params = kwargs.get("params", dict())
self._init_attributes(expr, r, label, True, **kwargs)
self.xscale = kwargs.get("xscale", "linear")
self.yscale = kwargs.get("yscale", "linear")
# from the expression's free symbols, remove the ones used in
# the parameters and the ranges
fs = expr.free_symbols
fs = fs.difference(params.keys()).difference(set([r[0]]))
if len(fs) > 0:
raise ValueError(
"Incompatible expression and parameters.\n"
+ "Expression: {}\n".format((expr, r, label))
+ "Specify what these symbols represent: {}\n".format(fs)
+ "Are they ranges or parameters?"
)
# generate the lambda function
signature, f = get_lambda(self.expr)
self.signature = signature
self.function = f
# Discretize the ranges. In the following dictionary self.ranges:
# key: symbol associate to this particular range
# val: the numpy array representing the discretization
if complex(r[1]).imag != complex(r[2]).imag:
# domain coloring
x = self._discretize(
complex(r[1]).real, complex(r[2]).real, self.n1, scale=self.xscale
)
y = self._discretize(
complex(r[1]).imag, complex(r[2]).imag, self.n2, scale=self.yscale
)
xx, yy = np.meshgrid(x, y)
zz = xx + 1j * yy
self.ranges = {self.var: zz}
else:
# line plot
x = self._discretize(
complex(r[1]).real, complex(r[2]).real, self.n1, scale=self.xscale
)
self.ranges = {self.var: x + 0j}
self.data = None
if len(params) > 0:
self.update_data(params)
def update_data(self, params):
results = self._evaluate(params)
self.data = self._correct_output(self.ranges[self.var], results)
def _set_discretization_points(kwargs, pt):
"""This function allows the user two use the keyword arguments n, n1 and n2
to specify the number of discretization points in two directions.
Parameters
==========
kwargs : dict
pt : type
The type of the series, which indicates the kind of plot we are
trying to create: plot, plot_parametric, ...
"""
if pt in [LineOver1DRangeSeries, Parametric2DLineSeries, Parametric3DLineSeries]:
if "n1" in kwargs.keys() and ("n" not in kwargs.keys()):
kwargs["n"] = kwargs["n1"]
elif pt in [
SurfaceOver2DRangeSeries,
ContourSeries,
ComplexSeries,
ParametricSurfaceSeries,
Vector2DSeries,
ComplexInteractiveSeries,
ImplicitSeries,
]:
if "n" in kwargs.keys():
kwargs["n1"] = kwargs["n"]
kwargs["n2"] = kwargs["n"]
elif pt in [Vector3DSeries, SliceVector3DSeries, InteractiveSeries]:
if "n" in kwargs.keys():
kwargs["n1"] = kwargs["n"]
kwargs["n2"] = kwargs["n"]
kwargs["n3"] = kwargs["n"]
return kwargs
class VectorBase(BaseSeries):
"""Represent a vector field."""
is_vector = True
is_2D = False
is_3D = False
is_slice = False
class Vector2DSeries(VectorBase):
"""Represents a 2D vector field."""
is_2Dvector = True
def __init__(self, u, v, range1, range2, label, **kwargs):
self.u = SurfaceOver2DRangeSeries(u, range1, range2, **kwargs)
self.v = SurfaceOver2DRangeSeries(v, range1, range2, **kwargs)
self.label = label
def get_data(self):
x, y, u = self.u.get_data()
_, _, v = self.v.get_data()
return x, y, self._correct_size(u, x), self._correct_size(v, x)
class Vector3DSeries(VectorBase):
"""Represents a 3D vector field."""
is_3D = True
is_3Dvector = True
def __init__(self, u, v, w, range_x, range_y, range_z, label="", **kwargs):
self.u = sympify(u)
self.v = sympify(v)
self.w = sympify(w)
self.var_x = sympify(range_x[0])
self.start_x = float(range_x[1])
self.end_x = float(range_x[2])
self.var_y = sympify(range_y[0])
self.start_y = float(range_y[1])
self.end_y = float(range_y[2])
self.var_z = sympify(range_z[0])
self.start_z = float(range_z[1])
self.end_z = float(range_z[2])
self.label = label
self.n1 = kwargs.get("n1", 10)
self.n2 = kwargs.get("n2", 10)
self.n3 = kwargs.get("n3", 10)
self.xscale = kwargs.get("xscale", "linear")
self.yscale = kwargs.get("yscale", "linear")
self.zscale = kwargs.get("zscale", "linear")
def _discretize(self):
"""This method allows to reduce code repetition."""
x = super()._discretize(self.start_x, self.end_x, self.n1, self.xscale)
y = super()._discretize(self.start_y, self.end_y, self.n2, self.yscale)
z = super()._discretize(self.start_z, self.end_z, self.n3, self.zscale)
return np.meshgrid(x, y, z)
def get_data(self):
x, y, z = self._discretize()
fu = vectorized_lambdify((self.var_x, self.var_y, self.var_z), self.u)
fv = vectorized_lambdify((self.var_x, self.var_y, self.var_z), self.v)
fw = vectorized_lambdify((self.var_x, self.var_y, self.var_z), self.w)
uu = fu(x, y, z)
vv = fv(x, y, z)
ww = fw(x, y, z)
uu = self._correct_size(uu, x)
vv = self._correct_size(vv, y)
ww = self._correct_size(ww, z)
def _convert(a):
a = np.array(a, dtype=np.float64)
return np.ma.masked_invalid(a)
return x, y, z, _convert(uu), _convert(vv), _convert(ww)
def _build_plane_series(plane, ranges, **kwargs):
"""This method reduced code repetition."""
if isinstance(plane, Plane):
return PlaneSeries(sympify(plane), *ranges, **kwargs)
else:
return SurfaceOver2DRangeSeries(plane, *ranges, **kwargs)
class SliceVector3DSeries(Vector3DSeries):
"""Represents a 3D vector field plotted over a slice, which can be a slice
plane or a slice surface.
"""
is_slice = True
def __init__(self, plane, u, v, w, range_x, range_y, range_z, label="", **kwargs):
self.plane = _build_plane_series(plane, [range_x, range_y, range_z], **kwargs)
super().__init__(u, v, w, range_x, range_y, range_z, label, **kwargs)
def _discretize(self):
"""This method allows to reduce code repetition."""
return self.plane.get_data()
class PlaneSeries(SurfaceBaseSeries):
"""Represents a plane in a 3D domain."""
is_3Dsurface = True
def __init__(
self, plane, x_range, y_range, z_range=None, label="", params=dict(), **kwargs
):
self.plane = sympify(plane)
if not isinstance(self.plane, Plane):
raise TypeError("`plane` must be an instance of sympy.geometry.Plane")
self.x_range = sympify(x_range)
self.y_range = sympify(y_range)
self.z_range = sympify(z_range)
self.label = label
self.n1 = kwargs.get("n1", 20)
self.n2 = kwargs.get("n2", 20)
self.n3 = kwargs.get("n3", 20)
self.xscale = kwargs.get("xscale", "linear")
self.yscale = kwargs.get("yscale", "linear")
self.zscale = kwargs.get("zscale", "linear")
self.params = params
def get_data(self):
x, y, z = symbols("x, y, z")
plane = self.plane.subs(self.params)
fs = plane.equation(x, y, z).free_symbols
xx, yy, zz = None, None, None
if fs == set([x]):
# parallel to yz plane (normal vector (1, 0, 0))
s = SurfaceOver2DRangeSeries(
self.plane.p1[0],
(x, *self.z_range[1:]),
(y, *self.y_range[1:]),
"",
n1=self.n3,
n2=self.n2,
xscale=self.xscale,
yscale=self.yscale,
zscale=self.zscale,
)
xx, yy, zz = s.get_data()
xx, yy, zz = zz, yy, xx
elif fs == set([y]):
# parallel to xz plane (normal vector (0, 1, 0))
s = SurfaceOver2DRangeSeries(
self.plane.p1[1],
(x, *self.x_range[1:]),
(y, *self.z_range[1:]),
"",
n1=self.n1,
n2=self.n3,
xscale=self.xscale,
yscale=self.yscale,
zscale=self.zscale,
)
xx, yy, zz = s.get_data()
xx, yy, zz = xx, zz, yy
else:
# parallel to xy plane, or any other plane
eq = plane.equation(x, y, z)
if z in eq.free_symbols:
eq = solve(eq, z)[0]
s = SurfaceOver2DRangeSeries(
eq,
(x, *self.x_range[1:]),
(y, *self.y_range[1:]),
"",
n1=self.n1,
n2=self.n2,
xscale=self.xscale,
yscale=self.yscale,
zscale=self.zscale,
)
xx, yy, zz = s.get_data()
if len(fs) > 1:
idx = np.logical_or(zz < self.z_range[1], zz > self.z_range[2])
zz[idx] = np.nan
return xx, yy, zz
class PlaneInteractiveSeries(PlaneSeries, InteractiveSeries):
"""Represent a geometric plane.
NOTE: In the MRO, PlaneSeries has the precedence over InteractiveSeries.
This is because Numpy and Scipy don't have correspondence with Plane.
Hence, we got to use get_data() implemented in PlaneSeries.
"""
def __new__(cls, *args, **kwargs):
return object.__new__(cls)
def __init__(self, exprs, ranges, label="", **kwargs):
PlaneSeries.__init__(self, exprs[0], *ranges, label=label, **kwargs)
def update_data(self, params):
self.params = params
class GeometrySeries(BaseSeries):
is_geometry = True
def __new__(cls, *args, **kwargs):
if isinstance(args[0], Plane):
return PlaneSeries(*args, **kwargs)
elif isinstance(args[0], Curve):
new_cls = (
Parametric2DLineSeries
if len(args[0].functions) == 2
else Parametric3DLineSeries
)
label = [a for a in args if isinstance(a, str)]
label = label[0] if len(label) > 0 else str(args[0])
return new_cls(*args[0].functions, args[0].limits, label, **kwargs)
return object.__new__(cls)
def __init__(self, expr, _range=None, label="", params=dict(), **kwargs):
if not isinstance(expr, GeometryEntity):
raise ValueError(
"`expr` must be a geomtric entity.\n"
+ "Received: type(expr) = {}\n".format(type(expr))
+ "Expr: {}".format(expr)
)
r = expr.free_symbols.difference(set(params.keys()))
if len(r) > 0:
raise ValueError(
"Too many free symbols. Please, specify the values of the "
+ "following symbols with the `params` dictionary: {}".format(r)
)
self.expr = expr
self._range = _range
self.label = label
self.params = params
self.fill = kwargs.get("fill", True)
if isinstance(expr, (LinearEntity3D, Point3D)):
self.is_3Dline = True
self.start = 0
self.end = 0
if isinstance(expr, Point3D):
self.is_point = True
elif isinstance(expr, LinearEntity2D) or (
isinstance(expr, (Polygon, Circle, Ellipse)) and (not self.fill)
):
self.is_2Dline = True
elif isinstance(expr, Point2D):
self.is_point = True
self.is_2Dline = True
def get_data(self):
expr = self.expr.subs(self.params)
if isinstance(expr, Point3D):
return (
np.array([expr.x], dtype=float),
np.array([expr.y], dtype=float),
np.array([expr.z], dtype=float),
np.array([0], dtype=float),
)
elif isinstance(expr, Point2D):
return np.array([expr.x], dtype=float), np.array([expr.y], dtype=float)
elif isinstance(expr, Polygon):
x = [float(v.x) for v in expr.vertices]
y = [float(v.y) for v in expr.vertices]
x.append(x[0])
y.append(y[0])
return np.array(x), np.array(y)
elif isinstance(expr, Circle):
cx, cy = float(expr.center[0]), float(expr.center[1])
r = float(expr.radius)
t = np.linspace(0, 2 * np.pi, 200)
x, y = cx + r * np.cos(t), cy + r * np.sin(t)
x = np.append(x, x[0])
y = np.append(y, y[0])
return x, y
elif isinstance(expr, Ellipse):
cx, cy = float(expr.center[0]), float(expr.center[1])
a = float(expr.hradius)
e = float(expr.eccentricity)
x = np.linspace(-a, a, 200)
y = np.sqrt((a ** 2 - x ** 2) * (1 - e ** 2))
x += cx
x, y = np.concatenate((x, x[::-1])), np.concatenate((cy + y, cy - y[::-1]))
x = np.append(x, x[0])
y = np.append(y, y[0])
return x, y
elif isinstance(expr, LinearEntity3D):
p1, p2 = expr.points
x = np.array([p1.x, p2.x], dtype=float)
y = np.array([p1.y, p2.y], dtype=float)
z = np.array([p1.z, p2.z], dtype=float)
param = np.zeros_like(x)
return x, y, z, param
elif isinstance(expr, (Segment, Ray)):
p1, p2 = expr.points
x = np.array([p1.x, p2.x])
y = np.array([p1.y, p2.y])
return x.astype(float), y.astype(float)
else: # Line
p1, p2 = expr.points
if self._range is None:
x = np.array([p1.x, p2.x])
y = np.array([p1.y, p2.y])
else:
m = expr.slope
q = p1[1] - m * p1[0]
x = np.array([self._range[1], self._range[2]])
y = m * x + q
return x.astype(float), y.astype(float)
class GeometryInteractiveSeries(GeometrySeries, InteractiveSeries):
"""Represent a geometry entity.
NOTE: In the MRO, GeometrySeries has the precedence over InteractiveSeries.
This is because Numpy and Scipy don't have correspondence with Line,
Segment, Polygon, ... Hence, we got to use get_data() implemented in
GeometrySeries.
"""
def __new__(cls, *args, **kwargs):
return object.__new__(cls)
def __init__(self, exprs, ranges, label="", **kwargs):
r = ranges[0] if len(ranges) > 0 else None
GeometrySeries.__init__(self, exprs[0], _range=r, label=label, **kwargs)
def update_data(self, params):
self.params = params
| true
|
063ea81565ca64c0635afd8e4f7cb7c62daa110e
|
Python
|
darkryder/DMG_project
|
/project/app/management/commands/create_main_attributes_file.py
|
UTF-8
| 1,506
| 2.609375
| 3
|
[] |
no_license
|
from glob import glob
from progressbar import ProgressBar
import os
from django.core.management.base import BaseCommand
TEMPLATE = """
NOMINAL = %(nominal)s
ORDINAL = %(ordinal)s
BINARY = %(binary)s
NUMERIC = %(numeric)s
"""
class Command(BaseCommand):
help = 'Create a main attribute file after merging all small attribute files'
def handle(self, *args, **options):
final_dict = {}
files = glob("attr_*")
if len(files) == 0:
print "No files found to merge"
return
pbar = ProgressBar(maxval=len(files) - 1).start()
for i, filename in enumerate(files):
with open(filename, 'r') as f:
data = f.read()
subset = eval(data) # hope no one is evil
final_dict.update(subset)
pbar.update(i)
pbar.finish()
categories = {x: [] for x in ('nominal', 'ordinal', 'binary', 'numeric')}
for attr_name, attr_type in final_dict.items():
if attr_type in ('nominal', 'ordinal', 'binary'):
categories[attr_type].append(attr_name)
elif attr_type in ('interval', 'ratio'):
categories['numeric'].append(attr_name)
else:
print "found weird", attr_type, "for", attr_name
filename = "attribute_classifications.py"
if os.path.exists(filename):
os.remove(filename)
output = open(filename, 'a+')
f.write(TEMPLATE % categories)
| true
|
1f47a1aa8febc54d322c2f25f53c7114807917c1
|
Python
|
YigalOrn/Afeka
|
/Course Software Engineering Seminar/Spam ANN/SpamANN_with_PCA_with_bias.py
|
UTF-8
| 6,186
| 2.96875
| 3
|
[] |
no_license
|
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from mlxtend.plotting import plot_confusion_matrix
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import PCA
from sklearn import preprocessing
import numpy as np
from random import shuffle
import time
# program constants
EPOCH_NUM = 30000
ALPHA = 0.001
PCA_NUM = 3
# this is the formula for the sigmoid function derivative
# the function argument is a matrix
def derivative(x):
return x * (1.0 - x)
# our chosen activation function
# the function argument is a matrix
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
# measure run time
start = time.time()
# -------------------------------------------------------------------
with open('spambase.data') as f:
lines = f.readlines()
# the data set is sorted by labels, we shuffle it in order to get better train/test sets
# notice that because of this shuffle the results may be different between runs
shuffle(lines)
tempX, tempY = [], []
for line in lines:
curr = line.split(',')
new_curr = [1.0]
for item in curr[:len(curr) - 1]:
new_curr.append(float(item))
tempX.append(new_curr)
tempY.append([float(curr[-1])])
# transform python lists to numpy matrices
X = np.array(tempX)
Y = np.array(tempY)
# from Neta:
# looking at the data set it appears that there are many points that are very close to each other
# we can disperse them a little to get a better graphical view of the data set without changing it
X = 1 + np.log(X + 0.01)
# take 80% for train and 25% for test
set_size = len(lines)
train_size = set_size * 80 // 100
# set part of the data set for training
X_train = X[:train_size]
Y_train = Y[:train_size]
# set part of the data set for testing
X_test = X[train_size:]
Y_test = Y[train_size:]
# -------------------------------------------------------------------
# -------------------------------------------------------------------
# feature scaling
# normalize X_train and save mean and std in object StandardScaler
scaler = preprocessing.StandardScaler()
X_train = scaler.fit_transform(X_train)
# pca = PCA(n_components=PCA_NUM)
pca = PCA()
Z = pca.fit_transform(X_train)
Z_train = Z[:, 0:PCA_NUM]
# set bias unit
Z_rows, Z_cols = Z_train.shape
Z_train_temp = np.ones((Z_rows, Z_cols + 1))
Z_train_temp[:, 1:Z_cols + 1] = Z_train
Z_train = Z_train_temp
# -------------------------------------------------------------------
# -------------------------------------------------------------------
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(Z_train[:, 1], Z_train[:, 2], Z_train[:, 3], c=Y_train[:, 0], marker='o')
ax.set_xlabel('Z0_train Feature')
ax.set_ylabel('Z1_train Feature')
ax.set_zlabel('Z2_train Feature')
plt.show()
# -------------------------------------------------------------------
# -------------------------------------------------------------------
# our ANN:
# we have 3 layers: input layer, hidden layer and output layer
# input layer has 4 nodes (1 for each feature + bias unit)
# hidden layer has 5 nodes (1 bias unit + 4 working neurons)
# output layer has 1 node
dim1 = Z_train.shape[1]
dim2 = 5
# randomly initialize the weight matrices to values between -1 to 1
np.random.seed(1)
weight1 = 2 * np.random.random((dim1, dim2)) - 1
weight2 = 2 * np.random.random((dim2, 1)) - 1
for j in range(EPOCH_NUM):
# forward propagation: first evaluate the output for each training sample
a_1 = Z_train
v2 = np.dot(a_1, weight1)
a_2 = sigmoid(v2)
# ------------------------
# add bias to layer_2
a_2_rows, a_2_cols = a_2.shape
a_2_temp = np.ones((a_2_rows, a_2_cols))
a_2_temp[:, 1:a_2_cols + 1] = a_2[:, 1:a_2_cols + 1]
a_2 = a_2_temp
# ------------------------
v3 = np.dot(a_2, weight2)
a_3 = sigmoid(v3)
# calculate the error, perform back propagation
delta_3 = Y_train - a_3
delta_2 = delta_3.dot(weight2.T) * derivative(a_2)
# update the weight vectors
weight2 += ALPHA * a_2.T.dot(delta_3)
weight1 += ALPHA * a_1.T.dot(delta_2)
# -------------------------------------------------------------------
# -------------------------------------------------------------------
# scale according to train data
X_test = scaler.transform(X_test)
# pca transform according to train data
Z = pca.transform(X_test)
Z_test = Z[:, 0:PCA_NUM]
# set bias unit
Z_rows, Z_cols = Z_test.shape
Z_test_temp = np.ones((Z_rows, Z_cols + 1))
Z_test_temp[:, 1:Z_cols + 1] = Z_test
Z_test = Z_test_temp
# evaluation on the testing data
a_1 = Z_test
a_2 = sigmoid(np.dot(a_1, weight1))
# ------------------------
# add bias to layer_2
a_2_rows, a_2_cols = a_2.shape
a_2_temp = np.ones((a_2_rows, a_2_cols))
a_2_temp[:, 1:a_2_cols + 1] = a_2[:, 1:a_2_cols + 1]
a_2 = a_2_temp
# ------------------------
# layer 3 is y_hat - our prediction
a_3 = sigmoid(np.dot(a_2, weight2))
# add Y_hat_test to plot the predictions as probabilities and so clear distinction between spam and not spam
Y_hat_test = np.array(a_3)
# -------------------------------------------------------------------
# -------------------------------------------------------------------
# if the (prediction-val > 0.5) then label as spam=1 else label not-spam=0
correct = 0
for i in range(len(a_3)):
if a_3[i][0] > 0.5:
a_3[i][0] = 1
else:
a_3[i][0] = 0
if a_3[i][0] == Y_test[i][0]:
correct += 1
cm = confusion_matrix(Y_test, a_3)
# -------------------------------------------------------------------
# ------------------------------------------------
# show final result
print("mail samples in test set:\n%d" % len(a_3))
print("correct predictions:\n%d" % correct)
print("model accuracy:\n%.4f" % (correct * 100.0 / len(a_3)))
fig, ax = plot_confusion_matrix(conf_mat=cm)
plt.show()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(Z_test[:, 1], Z_test[:, 2], Z_test[:, 3], c=Y_hat_test[:, 0], marker='o')
ax.set_xlabel('Z0_test Feature')
ax.set_ylabel('Z1_test Feature')
ax.set_zlabel('Z2_test Feature')
plt.show()
# ------------------------------------------------
end = time.time()
print("Run Time is sec: %.3f" % (end - start))
| true
|
cefb9981c06530bc5da651768c72dddfe694d72a
|
Python
|
viorelrj/tic-tac-toe-neural-net
|
/intelligence.py
|
UTF-8
| 2,481
| 3.703125
| 4
|
[] |
no_license
|
# This is the neural netowrk ai for tic tac toe.
# Input array consists of 27 elements: 3 per each cell of board, with each one described as follows:
# 1st - cell is free, 2nd cell is occupied by the ai, 3rd - cell is occupied by oponent
#
# There is only one hidden layer, with 10 elements, each representing a move: 9 cells and 10th - no move available.
#
# The output layer consists of one element which contains one cell - the index of cell with biggest value from hidden layer.
import numpy as np
class Intelligence:
def __init__(self, weights, learning_rate):
self.weights = weights
self.learning_rate = learning_rate
self.choice = None
self.error = None
def sigmoid(self, x):
return 1/(1+np.exp(-x))
def sigmoid_der(self, x):
return self.sigmoid(x)*(1-self.sigmoid(x))
def step(self, board):
# Convert board state into neural network input layer model, described in the header of the file
input_layer = np.zeros(27)
for cell, state in enumerate(board):
input_layer[np.int(cell * 3 + state)] = 1
hidden_layer = np.zeros(10)
for i in range(0, len(hidden_layer)):
hidden_layer[i] = self.sigmoid(np.dot(input_layer, self.weights[i]))
return np.argmax(hidden_layer)
def learn(self, board):
# Convert board state into neural network input layer model, described in the header of the file
input_layer = np.zeros(27)
for cell, state in enumerate(board):
input_layer[cell * 3 + state] = 1
# Feed Forward
hidden_layer = np.zeros(10)
for i in range(0, len(hidden_layer)):
hidden_layer[i] = self.sigmoid(np.dot(input_layer, self.weights[i]))
self.choice = np.argmax(hidden_layer)
# Back propagation
answer = board
answer[answer != 0] = 1
answer = np.abs(1 - answer)
if (np.count_nonzero(answer) == 9):
no_choice = 1
else:
no_choice = 0
answer = np.append(answer, no_choice)
self.error = hidden_layer - answer
for i in range(0, 10):
# backpropagation step 2
dcost_dpred = self.error[i]
dpred_dz = self.sigmoid_der(hidden_layer[i])
z_delta = dcost_dpred * dpred_dz
self.weights[i] -= self.learning_rate * np.dot(input_layer, z_delta)
return self.choice
| true
|
71353d030a8a6714432fe764f52be791746dc13a
|
Python
|
JariMutikainen/pyExercise
|
/bank/bank.py
|
UTF-8
| 2,830
| 4.21875
| 4
|
[] |
no_license
|
# Simulate a banking system in which the user can:
#
# 1. Open a new savings account with an initial deposit
# 2. Withdraw money from his existing savings account
# 3. Deposit money into his savings account
# 4. Show the current balance of his account
#
# Use the random number generator for creating 5-digit account numbers when
# creating a new account. To access a given account the user must provide
# the correct account number and the correct name of the account owner.
from random import randint
from account import Account
from sys import exit
class Bank:
def __init__(self):
self.accounts = {}
@staticmethod
def get_details():
owner = input('Enter your name: ')
acc_num = input('Enter your 5-digit account number: ')
return (owner, acc_num)
def creation(self):
owner = input('Enter your name: ')
initial_depo = input('Enter your initial deposit amount: ')
account_number = randint(1,100000)
while account_number in self.accounts.keys():
# Keep generating 5-digit random numbers until you find an
# unused one.
account_number = randint(1,100000)
acc_num = f'{account_number:0>5d}'
self.accounts[acc_num] = Account(owner, acc_num, initial_depo)
def deposit(self):
owner, acc_num = self.get_details()
depo_amount = input('Enter amount to be deposited: ')
self.accounts[acc_num].deposit(owner, acc_num, depo_amount)
def withdraw(self):
owner, acc_num = self.get_details()
wd_amount = input('Enter amount to be withdrawn: ')
self.accounts[acc_num].withdraw(owner, acc_num, wd_amount)
def query(self):
owner, acc_num = self.get_details()
try:
self.accounts[acc_num].show_balance(owner, acc_num)
except KeyError:
print(f'\n\tAccount number {acc_num} does not exist.')
def exit_(self):
print('\n\tThank you for using our banking system.')
exit(0)
def run_bank(self):
actions = {'1': self.creation,
'2': self.deposit,
'3': self.withdraw,
'4': self.query,
'5': self.exit_}
while True:
print('\nCreate new savings account: 1\n'
'Deposit money into the account: 2\n'
'Withdraw money from the account: 3\n'
'Show current balance of the account: 4\n'
'Exit banking system: 5\n')
choice = input('Select action: ')
try:
actions[choice]()
except KeyError:
print('\nIllegal input. try again.')
if __name__ == '__main__':
bank = Bank()
bank.run_bank()
| true
|
66b757a46ef5bc8192fb9079e3ba79102f010238
|
Python
|
MinaPecheux/Medusa
|
/medusa/algorithms/vigenere.py
|
UTF-8
| 4,223
| 2.609375
| 3
|
[] |
no_license
|
# Copyright 2020 Mina Pêcheux (mina.pecheux@gmail.com)
# ---------------------------
# Distributed under the MIT License:
# ==================================
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
# [Medusa] Mini Encoding/Decoding Utility with Simple Algorithms
# ------------------------------------------------------------------------------
__author__ = 'Mina Pêcheux'
__copyright__ = 'Copyright 2020, Mina Pêcheux'
from .common import Algorithm, ALPHABET
ENCODE_TABLE = {
c: {c2: ALPHABET[(j + i) % len(ALPHABET)]
for j, c2 in enumerate(ALPHABET)}
for i, c in enumerate(ALPHABET)
}
DECODE_TABLE = {
c: {ALPHABET[(j + i) % len(ALPHABET)]: c2
for j, c2 in enumerate(ALPHABET)}
for i, c in enumerate(ALPHABET)
}
class Vigenere(Algorithm):
_name = 'vigenere'
@staticmethod
def get_params():
return {'common': {'required': ['key', 'complement_key']}}
def check_secure(self, params, action=None):
if len(params['key']) == 0:
return False, '"key" cannot be empty'
if len(params['complement_key']) == 0:
return False, '"complement_key" cannot be empty'
return True, None
def encode(self, content, params):
key = params['key']
complement_key = params['complement_key']
key_rank = 0 # counter that goes through the characters of the key
complement_key_rank = 0 # counter that goes through the complement key
encoded = '' # result content
# go through the characters of the content to code
for c in content:
# apply Vigenere method
row = ENCODE_TABLE[key[key_rank]]
encoded += row[c]
# access new character of the key
last_key_rank = key_rank
k = complement_key[complement_key_rank]
key_rank = (key_rank + ord(k)) % len(key)
# if back to beginning of key
if key_rank <= last_key_rank:
# access next character of complement key
complement_key_rank = (complement_key_rank + 1) \
% len(complement_key)
return encoded
def decode(self, content, params):
key = params['key']
complement_key = params['complement_key']
key_rank = 0 # counter that goes through the characters of the key
complement_key_rank = 0 # counter that goes through the complement key
decoded = '' # result content
# go through the characters of the content to decode
for c in content:
row = DECODE_TABLE[key[key_rank]]
decoded += row[c]
# access new character of the key
last_key_rank = key_rank
k = complement_key[complement_key_rank]
key_rank = (key_rank + ord(k)) % len(key)
# if back to beginning of key
if key_rank <= last_key_rank:
# access next character of complement key
complement_key_rank = (complement_key_rank + 1) \
% len(complement_key)
return decoded
| true
|
106fdb5732ba66602a09170170015e49c0cbb6e2
|
Python
|
lutianming/afcp-alumni
|
/application/views.py
|
UTF-8
| 9,973
| 2.546875
| 3
|
[] |
no_license
|
"""
views.py
URL route handlers
Note that any handler params must match the URL route params.
For example the *say_hello* handler, handling the URL route '/hello/<username>',
must be passed *username* as the argument.
"""
from google.appengine.api import search as gsearch, mail
from google.appengine.ext import ndb
from google.appengine.runtime.apiproxy_errors import CapabilityDisabledError
from flask import request, render_template, flash, url_for, redirect
from flask_cache import Cache
import flask_login
from flask_login import current_user
from application import app
from decorators import login_required, admin_required
from forms import LoginForm, ChangePasswordForm, MemberInfoForm, SearchForm, ForgetPasswordForm, ResetPasswordForm, ChangeEmailForm, ActiveEmailForm
from models import MemberModel, ResetPasswordModel, ChangeEmailModel
from admin import update_document
import datetime
# Flask-Cache (configured to use App Engine Memcache API)
cache = Cache(app)
def home():
form = LoginForm(request.form)
return render_template('index.html', form=form)
@app.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm(request.form)
if current_user.is_authenticated():
return redirect(url_for('home'))
if request.method == 'POST' and form.validate():
email = form.email.data
password = form.password.data
remember = form.remember.data
member = MemberModel.query(MemberModel.email == email,
MemberModel.password == password).get()
if member:
flask_login.login_user(member, remember=remember)
member.last_login = datetime.datetime.now()
member.put()
# flash('log in', category="success")
return redirect(url_for('home'))
flash('login failed, wrong email or password', category="danger")
return render_template('login.html', form=form)
@app.route('/logout', methods=['GET', 'POST'])
@login_required
def logout():
flask_login.logout_user()
return redirect(url_for('home'))
@app.route('/forget_password', methods=['GET', 'POST'])
def forget_password():
form = ForgetPasswordForm(request.form)
if request.method == 'POST' and form.validate():
email = form.email.data
time = datetime.datetime.now() + datetime.timedelta(days=1)
model = ResetPasswordModel(
email=email,
expire_time=time
)
model.put()
link = url_for('reset_password', id=model.key.urlsafe())
sender = "admin@afcp-alumni.com"
message = mail.EmailMessage(sender=sender)
message.to = email
message.subject = "reset password"
message.body = """
reset your password by the following link:
{0}
""".format(link)
message.send()
flash(link)
return render_template('forget_password.html', form=form)
@app.route('/change_email', methods=['GET', 'POST'])
def change_email():
form = ChangeEmailForm(request.form)
if request.method == 'POST' and form.validate():
new_email = form.new_email.data
#test if new email is already used by others
exist = MemberModel.query(MemberModel.email==new_email).get()
if exist:
flash("this email address is already used by others")
else:
old_email = current_user.email
time = datetime.datetime.now() + datetime.timedelta(days=1)
model = ChangeEmailModel(
email=old_email,
new_email=new_email,
expire_time=time
)
model.put()
link = url_for('active_email', id=model.key.urlsafe())
sender = app.config["SENDER"]
message = mail.EmailMessage(sender=sender)
message.to = new_email
message.subject = "change email"
message.body = """
change your email by clicking the following link:
{0}
""".format(link)
message.send()
flash("mail sent, please follow the instuction in your mail to change your mail")
return redirect(url_for('account'))
return render_template('change_email.html', form=form)
@app.route('/active_email', methods=['GET', 'POST'])
def active_email():
form = LoginForm(request.form)
id = request.args.get('id', '')
key = ndb.Key(urlsafe=id)
r = key.get()
if not r or r.expire_time < datetime.datetime.now():
flash("not valided link")
return redirect(url_for('home'))
if request.method == 'POST' and form.validate():
old_email = form.email.data
password = form.password.data
new_email = r.new_email
member = MemberModel.query(MemberModel.email==old_email,
MemberModel.password==password).get()
if member:
member.email = new_email
member.put()
update_document(member)
flash('email updated')
key.delete()
return redirect(url_for('account'))
else:
flash('old email or password not validated')
return render_template('active_email.html', form=form, id=id)
@app.route('/reset_password', methods=['GET', 'POST'])
def reset_password():
form = ResetPasswordForm(request.form)
urlsafe = request.args.get('id', '')
key = ndb.Key(urlsafe=urlsafe)
r = key.get()
if not r or r.expire_time < datetime.datetime.now():
flash('not valide URL')
return redirect(url_for('home'))
if request.method == 'POST' and form.validate():
password = form.new_password.data
member = MemberModel.query(MemberModel.email==r.email).get()
member.password = password
flask_login.login_user(member)
member.last_login = datetime.datetime.now()
member.put()
#delete reset request model after change
key.delete()
flash('password changed')
return redirect(url_for('home'))
return render_template('reset_password.html', form=form, id=urlsafe)
@app.route('/account')
@login_required
def account():
return render_template('personal.html')
@app.route('/change_password', methods=['GET', 'POST'])
@login_required
def change_password():
form = ChangePasswordForm(request.form)
if request.method == 'POST' and form.validate():
oldpassword = form.old_password.data
newpassword = form.new_password.data
if oldpassword == current_user.password:
current_user.password = newpassword
current_user.put()
flash('password changed')
return redirect(url_for('account'))
else:
flash('wrong password')
return render_template('change_password.html', form=form)
@app.route('/update_info', methods=['GET', 'POST'])
@login_required
def update_info():
form = MemberInfoForm(request.form, obj=current_user)
if request.method == 'POST' and form.validate():
for field in form:
#need to comfirn that the field is not email or password
#which should be changed by other methods
if field.name == 'email' or field.name == 'password':
continue
setattr(current_user, field.name, field.data)
current_user.put()
update_document(current_user)
flash('info updated')
return redirect(url_for('account'))
return render_template('update_info.html', form=form)
@app.route('/members/')
@login_required
def members():
page_size = 20
page = int(request.args.get('page', 0))
query = MemberModel.query()
members = query.fetch(page_size, offset=page*page_size)
num_pages = query.count() / page_size
def pager_url(p):
return url_for('members', page=p)
return render_template('members.html', members=members,
page=page, num_pages=num_pages,
pager_url=pager_url)
@app.route('/member/<urlsafe>')
@login_required
def member(urlsafe):
key = ndb.Key(urlsafe=urlsafe)
member = key.get()
return render_template('member.html', member=member)
@app.route('/search')
@login_required
def search():
page_size = 20
page = int(request.args.get('page', 0))
q = request.args.get('q', '')
index = gsearch.Index(name='members')
options = gsearch.QueryOptions(
limit=page_size,
offset=page_size*page,
)
query = gsearch.Query(query_string=q, options=options)
result = index.search(query)
count = result.number_found
num_pages = count / page_size
def pager_url(p):
return url_for('search', q=q, page=p)
return render_template('search.html', results=result,
page=page,
num_pages=num_pages,
pager_url=pager_url,
q=q)
@app.errorhandler(401)
def unauthorized(e):
flash("unauthorized, you need to login first", category="danger")
return render_template("index.html"), 401
def has_no_empty_params(rule):
defaults = rule.defaults if rule.defaults is not None else ()
arguments = rule.arguments if rule.arguments is not None else ()
return len(defaults) >= len(arguments)
@app.route('/routes')
def routes():
rules = []
for rule in app.url_map.iter_rules():
if "GET" in rule.methods and has_no_empty_params(rule):
url = url_for(rule.endpoint)
rules.append(rule.endpoint + ': ' + url)
return '</br>'.join(rules)
@admin_required
def admin_only():
"""This view requires an admin account"""
return 'Super-seekrit admin page.'
@app.route('/upload_member_file')
def upload_member_file():
pass
def warmup():
"""App Engine warmup handler
See http://code.google.com/appengine/docs/python/config/appconfig.html#Warming_Requests
"""
return ''
| true
|
c9b4f8b6c00edc4ed1306a821ce65cfe7812ad57
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p03438/s126460973.py
|
UTF-8
| 211
| 3.3125
| 3
|
[] |
no_license
|
n=int(input())
a=list(map(int,input().split()))
b=list(map(int,input().split()))
cnt=0
for i in range(n):
x=a[i]
y=b[i]
if x>y:
cnt-=x-y
elif x<y:
cnt+=(y-x)//2
print('Yes' if cnt>=0 else 'No')
| true
|
e56fc5050d2e73a3da097ac8595bbffc4384db0a
|
Python
|
briandennis/MadPUG
|
/learning.py
|
UTF-8
| 1,481
| 4.40625
| 4
|
[] |
no_license
|
#Dictionaries - mutable SYNTAX: {'myVar': 'Hello'}
# .update({'a':'d'}) overrides old value
#print(dictionary.items()) prints all items
#lists SYNTAX ['here','is','a','list']
myDictionary = {
'a': '65',
'b': '98'
}
myList = ['this', 'is', 'my', 'list']
print(myList[1:3]) #[1,2)
print(myList[0:4:2]) # [0,4) skip every other
#list.append('element')
#list.extend(['multiple','items'])
#TUPLES
#basically a list that you cannot change
#in general, tuple for heterogenous data, good for function returns
myTuple = ('hello', 'this', 2)
print(myTuple)
#functionssssss
def squares_until(upper_limit):
squares = []
for n in range(upper_limit/2):
n_squared = n**2
if n_squared < upper_limit:
squares.append(n_squared)
else:
break
return squares
print(squares_until(50))
#EXCEPTIONS
try:
{}['a key that is not in the dictionary']
except KeyError:
print('Caught a KeyError!')
#CLASSES
class Adventurer(object):
def __init__(self, name, health, experience, level, adventurer_type):
self.name = name
self.health = health
self.experience = experience
self.level = level
self.adventurer_type = adventurer_type
def format(self):
format_str = ('Our adventurer\'s name is {0}')
return format_str.format(self.name)
myAdventurer = Adventurer('Brian', 100, 'none', 100,'Rogue')
print(myAdventurer.format())
| true
|
9da25c75b5ec5071a1f34ab4685cfcdfa28ce03a
|
Python
|
hugo-labixiaoxin/cs61a-self-edition
|
/lecture/code1.py
|
UTF-8
| 276
| 3.140625
| 3
|
[] |
no_license
|
from operator import floordiv,mod
def divide_exact(n,d):
"""Return the quotient and remainder of dividing N by D.
>>> q,r=divide_exact(2013,10)
>>> q
201
>>> r
3
"""
return floordiv(n,d),mod(n,d)
i,a=0,0
while i<3:
i=i+1
a=a+i
| true
|
479f2571a6a7255cc5ba3a05ab7af8133b65713a
|
Python
|
bigjay517/transmission-cleanup
|
/main.py
|
UTF-8
| 4,533
| 2.59375
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Transmission Cleanup
# (main.py)
#
# Copyright 2014 Nicolas Cova <nicolas.cova.work@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import time
import subprocess
import datetime
from datetime import date
def main():
try:
file1 = open("server.txt")
except:
file1 = open("server.txt","w")
file1.write("username\n")
file1.write("password\n")
file1.write("transmission.example.com:9091")
file1.close()
print("Created server.txt. Adapt this file for your server settings.")
return 1
temp = file1.read().splitlines()
username = temp[0]
password = temp[1]
remoteAddr = temp[2]
authUser = username + ":" + password
print("Found server information...")
print("Username: " + username)
print("Password: " + password)
print("Remote : " + remoteAddr)
try:
commandResult = subprocess.check_output(["transmission-remote",remoteAddr,"--auth", authUser, "-l"])
except subprocess.CalledProcessError:
dateAndTime = time.strftime("%H:%M:%S") + " " + time.strftime("%d/%m/%Y")
print(dateAndTime + " ERROR: something went wrong checking the torrents listing.")
return -1
splitResult = commandResult.decode().split("\n")
# Remove items which are just empty strings
while True:
try:
splitResult.remove("")
except ValueError:
# Insert error message here
break
# Remove first and last items so the list only contains torrent info.
# If the length of the list is smaller than 2, just exit.
if len(splitResult) > 2:
splitResult.pop(0)
splitResult.pop()
else:
return 0
# For the remaining items, check if any of them contains '100%' and
# add them to a completed torrents list.
completedTorrents = []
for item in splitResult:
if "100%" in item:
torrentId = item.lstrip().split()[0].rstrip("*")
torrentName = item[item.rfind(" "):len(item)].lstrip()
completedTorrents.append((torrentId,torrentName))
# For each torrentId in the completed Torrents list, remove the
# trailing spaces and execute the shell command to remove the torrent
torrentsRemoved = False
idleTorrents = []
try:
for item in completedTorrents:
commandResult = subprocess.check_output(["transmission-remote",remoteAddr,"--auth",authUser, "-t", item[0], "--info"])
splitResult = commandResult.decode().split("\n")
for line in splitResult:
if "Latest activity:" in line:
newLine = line.split(": ")
date_time_obj = datetime.datetime.strptime(newLine[1], '%c')
today = datetime.datetime.now()
timeDelta = today-date_time_obj
tooOld = datetime.timedelta(7)
if timeDelta>=tooOld:
#print(item[1])
#print(timeDelta)
#print('')
idleTorrents.append((item[0],item[1],timeDelta))
#print(date_time_obj.date())
#print(timeDelta)
#print(today)
#print(date_time_obj.date())
except subprocess.CalledProcessError:
dateAndTime = time.strftime("%H:%M:%S") + " " + time.strftime("%d/%m/%Y")
print(dateAndTime + " ERROR: something went wrong with 'check_output'. " + commandResult.decode())
return -1
for item in idleTorrents:
commandResult = subprocess.check_output(["transmission-remote",remoteAddr,"--auth",authUser,"-t",item[0],"-rad"])
print("Removed: " + item[1])
return 0
if __name__ == '__main__':
main()
| true
|
ea02db2da443546c53aa08d90d7daa3d42d9d757
|
Python
|
ed-cetera/project-euler-python
|
/043_solution.py
|
UTF-8
| 1,048
| 3.625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python3
import time
def permutations(lst):
if len(lst) == 1:
yield lst
else:
for permutation in permutations(lst[1:]):
for position in range(len(lst)):
yield permutation[:position] + lst[:1] + permutation[position:]
def main():
total_sum = 0
for permutation in permutations(["0", "1", "2", "3", "4",
"5", "6", "7", "8", "9"]):
number_str = "".join(permutation)
if (int(number_str[1:4]) % 2 == 0
and int(number_str[2:5]) % 3 == 0
and int(number_str[3:6]) % 5 == 0
and int(number_str[4:7]) % 7 == 0
and int(number_str[5:8]) % 11 == 0
and int(number_str[6:9]) % 13 == 0
and int(number_str[7:10]) % 17 == 0):
total_sum += int(number_str)
print("Solution:", total_sum)
if __name__ == "__main__":
start = time.time()
main()
end = time.time()
print("Duration: {0:0.6f}s".format(end - start))
| true
|
39226865aef5e9f456b0425caabb4f35d5a10efb
|
Python
|
simberaj/mobilib
|
/mobilib/raster.py
|
UTF-8
| 3,086
| 2.71875
| 3
|
[
"MIT"
] |
permissive
|
"""Old machinery for raster handling."""
import os
from typing import Tuple
import numpy as np
import gdal
import osr
# TODO this probably does not work; would be best rewritten using rasterio
class World:
def __init__(self, xcell, yrot, xrot, ycell, xorigin, yorigin):
self.xcell = xcell
self.yrot = yrot
self.xrot = xrot
self.ycell = ycell
self.xorigin = xorigin
self.yorigin = yorigin
self.matrix = np.array((
(self.yrot, self.xcell),
(self.ycell, self.xrot)
))
self.shift = np.array((self.xorigin + self.xcell / 2, self.yorigin + self.ycell / 2))
@classmethod
def from_file(cls, file):
return cls(*(float(line.strip()) for line in file.readlines()))
def raster_to_points(self, raster, nodata=[]):
nodata = set(nodata)
for pos, value in np.ndenumerate(raster):
if value not in nodata:
yield self.matrix.dot(pos) + self.shift, value
def to_gdal_tuple(self) -> Tuple[float, ...]:
return (self.xorigin, self.xcell, self.xrot, self.yorigin, self.yrot, self.ycell)
@classmethod
def create_rect(cls, xmin, ymin, cell_size):
return cls(cell_size, 0, 0, -cell_size, xmin, ymin)
def load_points(path, worldpath=None, nodata=None, **kwargs):
if worldpath is None:
worldpath = find_world_path(path)
with open(worldpath, 'r') as worldfile:
world = World.from_file(worldfile)
return world.raster_to_points(load(path, **kwargs), nodata=nodata)
def find_world_path(impath):
name, ext = os.path.splitext(impath)
for worldext in possible_world_extensions(ext):
worldname = name + worldext
if os.path.exists(worldname):
return worldname
raise FileNotFoundError('world file not found for ' + impath)
def possible_world_extensions(ext):
yield ext[:2] + ext[-1] + 'w'
yield ext + 'w'
yield '.wld'
def calculate_bounds(xs, ys, cell_size, extension=0):
return (
((xs.min().item() - extension) // cell_size) * cell_size,
((ys.min().item() - extension) // cell_size) * cell_size,
((xs.max().item() + extension) // cell_size + 1) * cell_size,
((ys.max().item() + extension) // cell_size + 1) * cell_size,
)
def to_geotiff(array: np.ndarray,
path: str,
world: World,
srid: int = 4326
) -> None:
assert array.ndim == 2
cols = array.shape[1]
rows = array.shape[0]
out_raster = gdal.GetDriverByName('GTiff').Create(
path, cols, rows, 1, gdal_raster_type(array.dtype.type)
)
out_raster.SetGeoTransform(world.to_gdal_tuple())
outband = out_raster.GetRasterBand(1)
outband.WriteArray(array)
osr_crs = osr.SpatialReference()
osr_crs.ImportFromEPSG(srid)
out_raster.SetProjection(osr_crs.ExportToWkt())
outband.FlushCache()
GDAL_TYPES = {
np.float32: gdal.GDT_Float32,
np.float64: gdal.GDT_Float64,
}
def gdal_raster_type(dtype):
return GDAL_TYPES[dtype]
| true
|
3a14337d5242837f19a22745e3d8c300d547d666
|
Python
|
RCI-sfx/Gillespie-algorithm
|
/question_4.py
|
UTF-8
| 1,712
| 3.0625
| 3
|
[] |
no_license
|
from numpy import random
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import poisson
k0 = 0.2 # transcription rate
k1 = 0.01 # degradation rate
Omega= 1. # cell size
k2= 5 # protein translation
m=0 # starting mRNA
p=0 # starting protein
t=0 # starting time
divtime= 1
m_store = [m]
t_store= [t]
div_index = []
for i in range(10000):
k0=random.uniform(0.2,0.4) # this can make K0 randomonly distrubted
r1=k0*Omega # rate for mRNA production
r2=k1*m #rate for mRNA degrdation
rtot= r1 +r2 #total reaction rates
randtot=random.rand()*rtot # random number between 0-1 times by rtot
if randtot <= r1: #determining which reaction to fire
m = m+1
else: #Rxn 2 fires (mRNA degradation)
m = m-1
randtime=random.exponential(1/rtot) #working out time interval
t= t + randtime
if t >= 1200*divtime: #working out when to divide
divtime = divtime + 1
m = np.random.binomial(m, 0.5) #allow odd division of integers +/- 0.5 equally
div_index.append(i) #store information
m_store.append(m)
t_store.append(t)
x= t_store[div_index[0]:div_index[1]] #working out one cell division mRNA
mean= np.mean(x)
var= np.var(x)
fano= (var/mean)
print(mean)
print(var)
print(fano)
mean= np.mean(m_store) #working out the total one cell division mRNA
var= np.var(m_store)
fano= (var/mean)
print(mean)
print(var)
print(fano)
plt.plot(t_store, m_store, color='orchid')
plt.xlabel('Time/seconds')
# Set the y axis label of the current axis.
plt.ylabel('[mRNA]')
# Set a title of the current axes.
plt.title('Gillespie simulation of [mRNA] in cell division ')
# show a legend on the plot
# Display a figure.
plt.show()
| true
|
54c8ae7ee1248b544d79d5a4bc5456acda197d09
|
Python
|
ehlymana/CellsAnalysis
|
/Code/linearBrightness.py
|
UTF-8
| 766
| 2.734375
| 3
|
[] |
no_license
|
import cv2
import matplotlib.pyplot as plt
import spasavanjeSlika as ss
def increaseBrightness (image, factor):
factorscaled = (factor / 100) + 1
return cv2.convertScaleAbs(image, 1, factorscaled)
def proba():
image = cv2.imread('image.jpeg')
brighterImage = increaseBrightness(image, 20)
plt.subplot(121), plt.imshow(image), plt.title('Originalna slika')
plt.xticks([]), plt.yticks([])
plt.subplot(122), plt.imshow(brighterImage), plt.title('Posvijetljena slika')
plt.xticks([]), plt.yticks([])
plt.show()
ss.spasiSliku("BrighterImages", "image", 2, brighterImage)
for i in range(0, 90):
slika = cv2.imread('./ROI/{}_ROI.jpg'.format(i+1))
nova = increaseBrightness(slika, 20)
ss.spasiSliku("Brightness", "LinearBrightness", i+1, nova)
| true
|
66e3b2f31a0cc64f76adddd9dd7a6b949c7f8e3a
|
Python
|
VenkateshMohan3434/hmm
|
/aa.py
|
UTF-8
| 471
| 2.53125
| 3
|
[] |
no_license
|
import numpy as np
import pandas as pd
from random import sample
import utils
# data_path = 'train_FD004.txt'
# data = utils.load_data(data_path)
# x = data.iloc[:, 2:26]
# x.insert(0, 'engine_no', data.iloc[:, 0])
# print(x[:][1:])
# a = [2,3,4]
# b = [3,5,6]
# c = []
# c.append(np.array(a)-np.array(b))
# c.append(np.array(b)-np.array(a))
# print(np.array(c))
a = np.identity(5)*0.95
print(a)
for i in range(a.shape[0] - 1):
a[i, i+1] = 0.05
a[-1, -1] = 1
print(a)
| true
|
8025a057c895493eb788aba44c9cb4ffea5f437f
|
Python
|
akrami/albaloo
|
/website.py
|
UTF-8
| 3,879
| 2.609375
| 3
|
[] |
no_license
|
import json
from time import sleep
import socket
import requests
import re
class Website:
"""
Website class
"""
def __init__(self, address):
self.address = address
self.ssllab_result = ''
self.ssllab_rating = ''
self.ip = '0.0.0.0'
self.redirect = False
self.hsts = False
def check_ssllab(self):
"""
Check SSLLAB Result and Score
:return: json or error
"""
payload = {
'host': self.address,
'startNew': 'on',
'all': 'done',
'maxAge': 23
}
try:
response = requests.get('https://api.ssllabs.com/api/v3/analyze', params=payload)
except requests.exceptions.ConnectionError:
sleep(20)
return self.check_ssllab()
except TimeoutError:
sleep(20)
return self.check_ssllab()
else:
if response.status_code == 200:
return self.__analyze_ssllab()
else:
return '{"status": "not available"}'
def __analyze_ssllab(self):
payload = {
'host': self.address,
'all': 'done',
'maxAge': 23
}
try:
response = requests.get('https://api.ssllabs.com/api/v3/analyze', params=payload)
except requests.exceptions.ConnectionError:
sleep(20)
return self.__analyze_ssllab()
except TimeoutError:
sleep(20)
return self.__analyze_ssllab()
else:
if response.status_code == 200:
json_response = response.json()
if json_response['status'] == 'READY':
self.ssllab_result = json.dumps(json_response)
return self.ssllab_result
elif json_response['status'] == 'ERROR':
self.ssllab_result = json.dumps(json_response)
return self.ssllab_result
else:
sleep(20)
return self.__analyze_ssllab()
else:
self.ssllab_result = '{"status": "not available"}'
return self.ssllab_result
def check_ip(self):
"""
Get Website IP
:return: string
"""
try:
self.ip = socket.gethostbyname(self.address)
except socket.gaierror:
self.ip = "Not Found"
finally:
return self.ip
def check_redirect(self):
"""
is http redirected to https
:return: boolean
"""
if re.match(r'^https://', self.address):
host = self.address.replace('https://', 'http://')
elif re.match(r'^http://', self.address):
host = self.address
else:
host = 'http://{0}'.format(self.address)
try:
response = requests.get(host)
except ConnectionError:
self.redirect = False
else:
self.redirect = True if re.match(r'^https://', response.url) else False
finally:
return self.redirect
def check_hsts(self):
"""
check if hsts is available on ssl
:return: boolean
"""
if re.match(r'^https://', self.address):
host = self.address
elif re.match(r'^http://', self.address):
host = self.address.replace('http://', 'https://')
else:
host = 'https://{0}'.format(self.address)
try:
response = requests.get(host)
except ConnectionError:
self.hsts = False
except requests.exceptions.SSLError:
self.hsts = False
else:
self.hsts = True if 'strict-transport-security' in response.headers else False
finally:
return self.hsts
| true
|
990f4dec20395f4a3443d7cb2e6b247a606fde72
|
Python
|
henchc/MHG-scansion
|
/CRF/new_bio_class_report.py
|
UTF-8
| 1,429
| 2.546875
| 3
|
[
"MIT"
] |
permissive
|
from CLFL_mdf_classification import classification_report
from CLFL_mdf_classification import precision_recall_fscore_support
from sklearn.preprocessing import LabelBinarizer
from itertools import chain
def bio_classification_report(y_true, y_pred):
"""
Classification report for a list of BIO-encoded sequences.
It computes token-level metrics and discards "O" labels.
Note that it requires scikit-learn 0.15+ (or a version from
github master) to calculate averages properly!
"""
lb = LabelBinarizer()
y_true_combined = lb.fit_transform(list(chain.from_iterable(y_true)))
y_pred_combined = lb.transform(list(chain.from_iterable(y_pred)))
tagset = set(lb.classes_) - {'O'}
tagset = sorted(tagset, key=lambda tag: tag.split('-', 1)[::-1])
class_indices = {cls: idx for idx, cls in enumerate(lb.classes_)}
labs = [class_indices[cls] for cls in tagset]
return((precision_recall_fscore_support(y_true_combined,
y_pred_combined,
labels=labs,
average=None,
sample_weight=None)),
(classification_report(
y_true_combined,
y_pred_combined,
labels=[class_indices[cls] for cls in tagset],
target_names=tagset,
)), labs)
| true
|
36680e141297b96804e1c4e399e00c18a1b15a96
|
Python
|
mdinunzio/pybind11example
|
/test.py
|
UTF-8
| 620
| 2.703125
| 3
|
[] |
no_license
|
import numpy as np
from build.Release.module_name import *
from concurrent.futures import ThreadPoolExecutor
import time
print(some_fn_python_name(3, 5))
my_class = PySomeClass(5.0)
print('multiplier is', my_class.multiplier)
my_class.multiplier = 10
print('multiplier is', my_class.multiplier)
print(my_class.multiply(8))
print(my_class.multiply_list([1,2,3,4,5]))
print(my_class.multiply_two(10, 20))
print(my_class.image)
start_time = time.time()
with ThreadPoolExecutor(4) as ex:
ex.map(lambda x: my_class.function_that_takes_a_while(), [None]*4)
print(f"Threaded fun took {time.time() - start_time} seconds")
| true
|
5c7b112eb11123f9f2b228329f2cb609febdc1bc
|
Python
|
jesubellido/RestorAppFixed
|
/restorapp.py
|
UTF-8
| 826
| 2.515625
| 3
|
[] |
no_license
|
# Import necessary things.
from flask import Flask, render_template, request, Response, session, jsonify
from model import entities
from database import connector
import json
import xml
# Initializes app.
app = Flask(__name__, static_url_path="/static", static_folder='static')
# I'll create an object from the manager class from the connector file,
# which will in turn create a database called 'db'.
db = connector.Manager()
# Create cache
cache = {}
# Create a database engine.
engine = db.create_engine()
@app.route('/')
def hello_world():
return render_template('welcome.html')
@app.route('/signUp')
def sign_up():
return render_template('signup.html')
@app.route('/logIn')
def log_in():
return render_template('login.html')
#This makes the app actually run:
if __name__ == '__main__':
app.run()
| true
|
925a3c1ee99aedab3a5c6f0696203b56c8bdb1a6
|
Python
|
LiLabAtVT/DeepTE
|
/scripts/DeepTE_generate_CNN_dataset.py
|
UTF-8
| 778
| 2.671875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env python
##import modules
from Bio import SeqIO
def change_format_for_ncc (input_ori_seq_file):
##initiate a dic to store output file
final_format_dic = {}
seq_count = 0
for seq_record in SeqIO.parse(input_ori_seq_file,'fasta'):
seq_count += 1
label = seq_record.id
final_format_dic[str(seq_count)] = {'label':label,'seq':str(seq_record.seq)}
return (final_format_dic)
def generate_target_line (final_format_dic):
final_format_line_dic = {}
seq_count = 0
for eachid in final_format_dic:
seq_count += 1
final_line = final_format_dic[eachid]['label'] + ',' + final_format_dic[eachid]['seq']
final_format_line_dic[str(seq_count)] = final_line
return (final_format_line_dic)
| true
|
ba342251cc012437c4e1120f38de674ab55ae671
|
Python
|
codedjw/DataAnalysis
|
/QYW_4th_Analysis/qyw_4th_analysis.py
|
UTF-8
| 17,241
| 2.796875
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: Jiawei Du
# email: jiaweidu.js@gmail.com
# added: 2016/03/31
import sys,os
#获取脚本文件的当前路径
def cur_file_dir():
#获取脚本路径
path = sys.path[0]
#判断为脚本文件还是py2exe编译后的文件,如果是脚本文件,则返回的是脚本的目录,如果是py2exe编译后的文件,则返回的是编译后的文件路径
if os.path.isdir(path):
return path
elif os.path.isfile(path):
return os.path.dirname(path)
#打印结果
cur_file_dir = cur_file_dir()
import matplotlib.pylab as plt, numpy as np
from matplotlib import font_manager
myFont = font_manager.FontProperties(fname='/Library/Fonts/Songti.ttc')
titleSize = 14
tipSize = 12
def drawPieChart(data, labels, title):
fig = plt.figure(dpi=100, figsize=(8,8))
# first axes
ax1 = fig.add_axes([0.1, 0.1, 0.8, 0.8])
patches, texts, autotexts = ax1.pie(data, labels=labels, autopct='%1.1f%%', colors=['yellowgreen', 'gold', 'lightskyblue', 'lightcoral'])
plt.setp(autotexts, fontproperties=myFont, size=tipSize)
plt.setp(texts, fontproperties=myFont, size=tipSize)
ax1.set_title(title,fontproperties=myFont, size=titleSize)
ax1.set_aspect(1)
#plt.show()
plt.savefig(cur_file_dir+'/'+title+'.png', format='png')
plt.cla()
plt.clf()
plt.close()
print 'drawPieChart',title,'over'
def drawNBarChart(data_label_colors, xindex, xlabel, ylabel, title):
n_groups = xindex.size
fig, ax = plt.subplots(dpi=100, figsize=(14,8))
index = np.arange(n_groups)
bar_width = 0.35
opacity = 0.4
error_config = {'ecolor': '0.3'}
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 1.005*height,
'%d' % int(height),
ha='center', va='bottom', fontproperties=myFont, size=tipSize-1)
i = 0
for data, label, color in data_label_colors:
rects = plt.bar(index+i*bar_width, data, bar_width,
alpha=opacity,
color=color,
error_kw=error_config,
label=label)
autolabel(rects)
i += 1
ax.yaxis.grid(True, which='major') #y坐标轴的网格使用主刻度
plt.xlabel(xlabel, fontproperties=myFont, size=titleSize)
plt.ylabel(ylabel, fontproperties=myFont, size=titleSize)
plt.title(title, fontproperties=myFont, size=titleSize)
plt.xticks(index + (len(data_label_colors)/2.)*bar_width, xindex, fontproperties=myFont, size=tipSize)
plt.legend(prop=font_manager.FontProperties(fname='/Library/Fonts/Songti.ttc', size=tipSize))
plt.tight_layout()
plt.savefig(cur_file_dir+'/'+title+'.png', format='png')
plt.cla()
plt.clf()
plt.close()
print 'drawNBarChart',title,'over'
def drawLineChart(pd_series, title, xlabel, ylabel, xticks, xticklabels):
ax = pd_series.plot(figsize=(14,8))
#for label in ax.get_xticklabels(): #xtick
# label.set_fontproperties(myFont)
for label in ax.get_yticklabels(): #ytick
label.set_fontproperties(myFont)
for label in ax.get_label(): # legend
label.set_fontproperties(myFont)
ax.set_title(title, fontproperties=myFont, size=titleSize)
ax.set_xlabel(xlabel, fontproperties=myFont, size=tipSize)
ax.set_ylabel(ylabel, fontproperties=myFont, size=tipSize)
ax.set_xticks(xticks)
ax.set_xticklabels(xticklabels, fontproperties=myFont, size=tipSize)
ax.xaxis.grid(True, which='major') #x坐标轴的网格使用主刻度
ax.yaxis.grid(True, which='major') #y坐标轴的网格使用主刻度
#ax.yaxis.grid(True, which='minor') #y坐标轴的网格使用次刻度
plt.savefig(cur_file_dir+'/'+title+'.png', format='png')
plt.cla()
plt.clf()
plt.close()
print 'drawLineChart',title,'over'
def drawBarAXBarChart(bar1_series, bar2_series, title, xlabel, y_bar1_label, y_bar2_label, xticklabels, bar1_label, bar2_label):
n_groups = xticklabels.size
fig, ax = plt.subplots(dpi=100, figsize=(16,8))
index = np.arange(n_groups)
bar_width = 0.25
opacity = 0.4
error_config = {'ecolor': '0.3'}
def autolabel(ax, rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 1.005*height,
'%d' % int(height),
ha='center', va='bottom', fontproperties=myFont, size=tipSize-1)
rects = plt.bar(index, bar1_series, bar_width,
alpha=opacity,
color='b',
error_kw=error_config,
label=bar1_label)
autolabel(ax,rects)
plt.xlabel(xlabel, fontproperties=myFont, size=titleSize)
plt.ylabel(y_bar1_label, fontproperties=myFont, size=titleSize, color='b')
for label in ax.get_yticklabels():
label.set_color('b')
plt.title(title, fontproperties=myFont, size=titleSize)
plt.xticks(index + (1/2.)*bar_width, xticklabels, fontproperties=myFont, size=tipSize)
plt.legend(prop=font_manager.FontProperties(fname='/Library/Fonts/Songti.ttc', size=tipSize))
print 'drawNBarChart',title,'1 over'
ax1 = ax.twinx()
rects1 = plt.bar(index+1*bar_width, bar2_series, bar_width,
alpha=opacity,
color='r',
error_kw=error_config,
label=bar2_label, axes=ax1)
autolabel(ax1,rects1)
for label in ax1.get_yticklabels():
label.set_color('r')
plt.ylabel(y_bar2_label, fontproperties=myFont, size=titleSize, color='r')
plt.xticks(index + (2/2.)*bar_width, xticklabels, fontproperties=myFont, size=tipSize)
plt.legend((rects, rects1), (bar1_label, bar2_label),prop=font_manager.FontProperties(fname='/Library/Fonts/Songti.ttc', size=tipSize))
plt.tight_layout()
plt.savefig(cur_file_dir+'/'+title+'.png', format='png')
plt.cla()
plt.clf()
plt.close()
print 'drawNBarChart',title,'2 over'
import pandas as pd, MySQLdb
conn = MySQLdb.connect(host='localhost',user='root',passwd='',db='qyw', charset='utf8')
try:
############ 用户地域分析 ###########
df_province_city_hid_all = pd.read_sql('''
SELECT HOSPITAL_ID, PROVINCE, CITY, COUNT(DISTINCT USER_ID) AS USER_CNT FROM (SELECT * FROM (SELECT * FROM qyw.qyw_4th_visit WHERE USER_ID > 0 ORDER BY USER_ID) AS t1 INNER JOIN (SELECT USER_ID AS CUS_ID, PROVINCE, CITY, BIRTHDAY, GENDER, REGISTER_DATE FROM qyw.qyw_4th_user ORDER BY CUS_ID) AS t2 ON t1.USER_ID = t2.CUS_ID) AS t3 WHERE PROVINCE IS NOT null AND CITY IS NOT null GROUP BY HOSPITAL_ID, PROVINCE, CITY ORDER BY HOSPITAL_ID, USER_CNT DESC;
''', con=conn)
###### pie charts ######
# merge hospitals
province_city_merge_all = df_province_city_hid_all.groupby(['PROVINCE', 'CITY'])['USER_CNT'].sum().reset_index().sort_values(['USER_CNT'],0,[False])
province_city_merge = pd.Series(province_city_merge_all['USER_CNT'].values, index=province_city_merge_all['PROVINCE']+' '+province_city_merge_all['CITY'], name=u'用户地域总分布(第三位至第十二位)')
drawPieChart(province_city_merge[2:12], province_city_merge[2:12].index, province_city_merge.name)
# each hospital
hospitals = [{270001:u'武汉市中心医院'}, {5510002:u'安徽省中医院'}]
for hospital in hospitals:
for hid, hname in hospital.items():
province_city_all = df_province_city_hid_all[df_province_city_hid_all['HOSPITAL_ID'] == hid]
province_city = pd.Series(province_city_all['USER_CNT'].values, index=province_city_all['PROVINCE']+' '+province_city_all['CITY'], name=hname+u'用户地域分布')
drawPieChart(province_city[1:10], province_city[1:10].index, province_city.name+u'(第二位至第十位)')
drawNBarChart([(province_city[0:10], hname, 'b')], province_city[0:10].index, u'市级', u'用户数量', province_city.name+u'(第一位至第十位)')
###### n bar charts ######
province_merge_all = df_province_city_hid_all.groupby(['HOSPITAL_ID','PROVINCE'])['USER_CNT'].sum().reset_index().sort_values(['PROVINCE','HOSPITAL_ID'],0,[True, False])
province_merge_less = pd.merge(province_merge_all[province_merge_all['PROVINCE']!=u'安徽'], province_merge_all[province_merge_all['PROVINCE']!=u'湖北'])
province_merge_list = [{u'典型医院用户地域分布(所有)':province_merge_all}, {u'典型医院用户地域分布(部分)':province_merge_less}]
bar_colors = ['b', 'r']
for province_merge in province_merge_list:
for title, data in province_merge.items():
province_merge_pivot = data.pivot('PROVINCE', 'HOSPITAL_ID', 'USER_CNT')
province_merge_pivot = province_merge_pivot.fillna(0)
data_label_colors = []
i = 0
for hospital in hospitals:
for hid, hname in hospital.items():
data_label_colors.append((province_merge_pivot[hid], hname, bar_colors[i]))
i += 1
drawNBarChart(data_label_colors, province_merge_pivot.index, u'省份', u'用户数量', title)
########## 登录方式分析 ##########
publicservice_hid_all = pd.read_sql('''
SELECT CONCAT_WS('@', IF(t2.PUBLIC_SERVICE_MEAN IS null, '其他渠道', t2.PUBLIC_SERVICE_MEAN), t1.PUBLIC_SERVICE_TYPE) AS PUBLIC_SERVICE_TYPE, USER_CNT FROM (SELECT PUBLIC_SERVICE_TYPE, COUNT(DISTINCT USER_ID) AS USER_CNT FROM qyw.qyw_4th_visit WHERE PUBLIC_SERVICE_TYPE IS NOT null GROUP BY PUBLIC_SERVICE_TYPE) AS t1 LEFT JOIN (SELECT * FROM qyw.qyw_4th_public_service_type) AS t2 ON t1.PUBLIC_SERVICE_TYPE = t2.PUBLIC_SERVICE_TYPE;
''', con=conn)
publicservice_hid = pd.Series(publicservice_hid_all['USER_CNT'].values, index=publicservice_hid_all['PUBLIC_SERVICE_TYPE'], name=u'用户登录方式分布')
####### pie charts ######
publicservice_hid_main = publicservice_hid.sort_values(0,False)
publicservice_hid_main.name = u'用户主要登录方式分布'
drawPieChart(publicservice_hid_main[:4], publicservice_hid_main[:4].index, publicservice_hid_main.name+u'饼图')
####### n bar charts ######
drawNBarChart([(publicservice_hid.values, u'登录方式', bar_colors[0])], publicservice_hid.index, u'登录方式', u'用户数量', publicservice_hid.name+u'柱状图')
######### 使用产品时间 ##########
sqls = [('''
SELECT CONCAT_WS(':',SUBSTRING_INDEX(SUBSTRING_INDEX(VISIT_TIME,' ',-1),':',2),'00') AS VISIT_TIME, COUNT(*) AS CNT FROM qyw.qyw_4th_visit GROUP BY SUBSTRING_INDEX(SUBSTRING_INDEX(VISIT_TIME,' ',-1),':',2) ORDER BY VISIT_TIME;
''',8,u'日均操作时间分布(分钟级)',u'操作时间',u'操作数量'),('''
SELECT CONCAT_WS(':',SUBSTRING_INDEX(VISIT_TIME,':',2),'00') AS VISIT_TIME, COUNT(*) AS CNT FROM qyw.qyw_4th_visit GROUP BY SUBSTRING_INDEX(VISIT_TIME,':',2) ORDER BY VISIT_TIME;
''',1,u'八日操作时间总分布(分钟级)',u'操作时间',u'操作数量'),('''
SELECT CONCAT_WS(':',SUBSTRING_INDEX(SUBSTRING_INDEX(VISIT_TIME,' ',-1),':',2),'00') AS VISIT_TIME, COUNT(DISTINCT USER_ID) AS CNT FROM qyw.qyw_4th_visit GROUP BY SUBSTRING_INDEX(SUBSTRING_INDEX(VISIT_TIME,' ',-1),':',2) ORDER BY VISIT_TIME;
''',8,u'日均在线用户时间分布(分钟级)',u'操作时间',u'用户数量'),('''
SELECT CONCAT_WS(':',SUBSTRING_INDEX(VISIT_TIME,':',2),'00') AS VISIT_TIME, COUNT(DISTINCT USER_ID) AS CNT FROM qyw.qyw_4th_visit GROUP BY SUBSTRING_INDEX(VISIT_TIME,':',2) ORDER BY VISIT_TIME;
''',1,u'八日在线用户总分布(分钟级)',u'操作时间',u'用户数量')]
for sql, divided, title, xlabel, ylabel in sqls:
df_frame = pd.read_sql(sql, con=conn)
df_series = pd.Series(df_frame['CNT'].values/divided, index=df_frame['VISIT_TIME'], name=title)
visit_times = []
cns = []
xticklabels = []
if divided == 1:
for i in xrange(8,16):
visit_day = '2016-03-'
if i < 10:
visit_day += '0'+str(i)+' '
else:
visit_day += str(i) + ' '
isXtick = True
for j in xrange(0,24):
if j < 10:
visit_hour = '0'+str(j)+':'
else:
visit_hour = str(j) + ':'
if j != 0:
isXtick = False
for k in xrange(0,60):
if k < 10:
visit_min = '0'+str(k)+':'
else:
visit_min = str(k) + ':'
if k != 0:
isXtick = False
visit_sec = '00'
visit_time = visit_day+visit_hour+visit_min+visit_sec
if isXtick:
xticklabels.append(visit_time)
cn = 0
try:
cn = df_series[visit_time]
visit_times.append(visit_time)
cns.append(cn)
except KeyError:
visit_times.append(visit_time)
cns.append(cn)
elif divided == 8:
for j in xrange(0,24):
if j < 10:
visit_hour = '0'+str(j)+':'
else:
visit_hour = str(j) + ':'
isXtick = True
if j % 3 != 0:
isXtick = False
for k in xrange(0,60):
if k < 10:
visit_min = '0'+str(k)+':'
else:
visit_min = str(k) + ':'
if k != 0:
isXtick = False
visit_sec = '00'
visit_time = visit_hour+visit_min+visit_sec
if isXtick:
xticklabels.append(visit_time)
cn = 0
try:
cn = df_series[visit_time]
visit_times.append(visit_time)
cns.append(cn)
except KeyError:
visit_times.append(visit_time)
cns.append(cn)
df_new_series = pd.Series(cns, index=visit_times, name=title)
drawLineChart(df_new_series, df_new_series.name, xlabel, ylabel, np.linspace(0, len(df_new_series), len(xticklabels)), xticklabels)
########## 用户消费金额分析 #########
df_amount = pd.read_sql('''SELECT t1.USER_ID, t1.VISIT_TIME, t2.MEANS, COUNT(*) AS VISIT_CNT, AVG(AMOUNT) AS AVG FROM qyw.qyw_4th_visit_pay_base AS t1 INNER JOIN (SELECT VISIT_OP, GROUP_CONCAT(MEAN) AS MEANS, GROUP_CONCAT(CATEGORY) AS CATEGORIES, COUNT(*) FROM qyw.qyw_4th_business_dict GROUP BY VISIT_OP) AS t2 ON t1.VISIT_OP=t2.VISIT_OP GROUP BY t1.USER_ID, t1.VISIT_TIME, t1.VISIT_OP ORDER BY t1.USER_ID, t1.VISIT_TIME;
''', con=conn)
### 消费金额 vs. 操作 ###
sr_op_cnt = df_amount.groupby(['MEANS'])['VISIT_CNT'].sum()
sr_op_sum = df_amount.groupby(['MEANS'])['AVG'].sum()
sr_op_mean = sr_op_sum / sr_op_cnt
drawPieChart(sr_op_cnt, sr_op_cnt.index, u'主要的支付类操作占比(操作数量)')
drawPieChart(sr_op_mean, sr_op_mean.index, u'主要的支付类操作涉及平均金额')
drawBarAXBarChart(sr_op_cnt, sr_op_mean, u'主要的支付类操作频次vs.平均金额', u'支付类操作', u'操作数量', u'平均金额', sr_op_cnt.index, u'操作频次', u'平均消费')
### 用户消费平均情况 ###
df_cnt = pd.DataFrame({'USER_ID':df_amount.groupby(['USER_ID'])['AVG'].count().index,'COUNT':df_amount.groupby(['USER_ID'])['AVG'].count()}, columns=['USER_ID','COUNT'])
df_mean = pd.DataFrame({'USER_ID':df_amount.groupby(['USER_ID'])['AVG'].mean().index,'MEAN':df_amount.groupby(['USER_ID'])['AVG'].mean()}, columns=['USER_ID','MEAN'])
df_merge = pd.merge(df_cnt, df_mean, on='USER_ID')
sr_amount = df_merge.groupby(['MEAN'])['USER_ID'].count().sort_values(ascending=False)
amount_users = [sr_amount[sr_amount.index > 10].sum(), sr_amount[sr_amount.index <= 10].sum()-sr_amount[sr_amount.index <= 5].sum(), sr_amount[sr_amount.index <= 5].sum()]
amount_users_index = [u'大于10元', u'介于10元至5元之间', u'小于5元'];
sr_user_amount = pd.Series(amount_users, index=amount_users_index)
sr_user_amount.title=u'用户消费金额总分布'
drawPieChart(sr_user_amount, sr_user_amount.index, sr_user_amount.title)
sr_amount.index = map(lambda x: round(x,2), sr_amount.index)
drawNBarChart([(sr_amount[:10].values, u'Top10平均消费指数', 'b')], sr_amount[:10].index, u'平均消费金额', u'消费人数', u'Top10用户平均消费指数')
except:
pass
finally:
conn.close()
| true
|
db22ea8f70ee5ebdfa316698fdebd84b3a607ea1
|
Python
|
ReliableDragon/NestedGenerator
|
/nested_choices.py
|
UTF-8
| 9,778
| 2.546875
| 3
|
[] |
no_license
|
import re
import random
import uuid
import math
import logging
import argparse
import state_clause_handler
import choices_util
import choices_validator
import choice_generator as choice_generator_mod
from state_regexes import STATE_REGEXES
class WeightedChoice():
def __init__(self, weight, choice, tag_num=1, clause=None):
self.weight = int(weight)
self.choice = choice
self.tag_num = tag_num
self.clause = clause
self.uuid = uuid.uuid4()
def __str__(self):
str_rep = f'({self.weight})[{self.tag_num}]'
if self.clause:
str_rep += f'{{{self.clause}}}'
str_rep += f'{self.choice}'
return str_rep
def __repr__(self):
return self.__str__()
class NestedChoices():
def __init__(self, namespace_id, choices_tree={}):
self.namespace_id = namespace_id
self.choices = choices_tree
self.subtables = {}
def __str__(self):
val = '{\n'
indent = 2
val += choices_util.recursive_dict_print(self.choices, indent)
val += '}'
return val
def __repr__(self):
return self.__str__()
@staticmethod
def load_from_file(filename):
with open(filename, 'r', encoding='utf-8') as choices_file:
choices_string = choices_file.read()
# Remove comments
choices_string = re.sub('(\n *)? *#.*$', '', choices_string, flags=re.MULTILINE)
# print(choices_string)
# Strip the namespace id off
namespace_id = choices_string.split('\n', 1)[0]
choices_string = choices_string.split('\n', 1)[1]
import_files = []
while ':' in choices_string[:choices_string.index('\n')]:
logging.debug(f'Importing from choices_string (trunc): {choices_string[:50]}')
required_module = choices_string.split('\n', 1)[0]
module_filename = required_module.split(':')[1]
#Strip the import off.
choices_string = choices_string.split('\n', 1)[1]
import_files.append(module_filename)
# Strip off the leading newline.
choices_string = choices_string.split('\n', 1)[1]
choices_validator.validate_choices(namespace_id, choices_string)
choices_tree = NestedChoices.choices_string_to_tree(choices_string)
resulting_nested_choices = NestedChoices(namespace_id, choices_tree)
for filename in import_files:
imported_choice = NestedChoices.load_from_file(filename)
resulting_nested_choices.register_subtable(imported_choice)
return resulting_nested_choices
@staticmethod
def choices_string_to_tree(choices_string):
choices_tree = {}
top_level_choices_list = choices_string.split('\n\n')
top_level_choices_list = [choice.strip() for choice in top_level_choices_list]
for top_level_choice_data in top_level_choices_list:
indent = 0
parent_choicedict_stack = [choices_tree]
tag_stack = [1]
nested_choices = choices_util.split_into_lines(top_level_choice_data)
top_level_choice = nested_choices.pop(0)
parent = load_choice_from_line(top_level_choice, tag_stack)
choices_tree[parent] = {}
current_dict = choices_tree[parent]
# Holds the value to go back to if the next node turns out to be a leaf.
prev_dict = choices_tree
for choice in nested_choices:
# Check how many spaces there are to find the indent level.
new_indent = len(choice) - len(choice.lstrip(' '))
if new_indent == indent:
current_dict = prev_dict
elif new_indent > indent:
parent_choicedict_stack.append(current_dict)
tag_stack.append(1)
# If there's a same indent next, this is the level we want to be on.
prev_dict = current_dict
elif new_indent < indent:
distance_up_stack = (indent - new_indent) // 2
for _ in range(distance_up_stack):
parent_choicedict_stack.pop()
tag_stack.pop()
current_dict = parent_choicedict_stack[-1]
# If there's a same indent next, this is the level we want to be on.
prev_dict = current_dict
indent = new_indent
if is_tag_marker(choice):
tag_stack[-1] += 1
continue
choice = choice[new_indent:]
weighted_choice = load_choice_from_line(choice, tag_stack)
current_dict[weighted_choice] = {}
current_dict = current_dict[weighted_choice]
return choices_tree
@staticmethod
def load_from_string_list(namespace_id, choices, probs=[]):
if probs:
assert len(probs) == len(choices), 'Got a list of probabilities that was not the same length as the list of choices!'
choices = [str(prob) + ' ' + choice for prob, choice in zip(probs, choices)]
else:
choices = ['1 ' + s for s in choices]
choices_string = '\n\n'.join(choices)
choices_validator.validate_choices(namespace_id, choices_string)
choices_tree = NestedChoices.choices_string_to_tree(choices_string)
return NestedChoices(namespace_id, choices_tree)
def register_subtable(self, nc):
self.subtables[nc.namespace_id] = nc
# Given a nested choices string from get_choices, randomly generates a choice
# from it. Parameters can be passed to affect how it runs.
#
# 'num' says how many choices to generate
#
# 'uniqueness_level' says at what level the returned choices should be
# unique. -1 means the lowest level, simply don't repeat the exact same
# choice, while positive values starting from 1 indicate which level
# of indentation will be removed if a previous choice chose it. A level of
# 0 indicates that repetition is okay.
#
# 'uniqueness_mode' can be 'each' or 'all'. 'all' means that uniqueness should
# only consider the choice as a whole, while 'each' means that each table
# has the uniqueness constraint applied to it. For example, a pattern of
# '$ a $' would be allowed to generate '1 a 2' and '1 a 3' under 'all', but
# under 'each' that would not be allowed, since the first value repeated itself.
# Not yet implemented.
def gen_choices(self, params={'num': 1, 'uniqueness_level': 0, 'uniqueness_mode':'each'}):
return self._gen_choices(params)[0]
# The same as gen_choices, but it has extra state data for internal use.
def _gen_choices(self, params={'num': 1, 'uniqueness_level': 0, 'uniqueness_mode':'each'}):
generated_choices = []
# used_choices = []
# state = {}
choice_generator = choice_generator_mod.ChoiceGenerator(self)
for i in range(params['num']):
# generated = False
# generated_choice = '$'
# dict_and_choice_backtrace = []
# level = 1
choices_dict = self.choices
generated_choice, state = choice_generator.gen_choice(choices_dict, params)
generated_choices.append(generated_choice)
return generated_choices, state
def call_subtable(self, subtable_id, params):
return self.subtables[subtable_id]._gen_choices(params)
def load_choice_from_line(line, tag_stack):
try:
weighted_choice = WeightedChoice(*line.split(' ', 1), tag_num=tag_stack[-1])
except ValueError as e:
if 'invalid literal for int() with base 10:' not in str(e):
raise
try:
weight, raw_clause, choice_text = line.split('%', 2)
except ValueError as e:
logging.error(f'Despite not having a space, {line} doesn\'t appear to have had a state clause. That\'s is weird. I\'m also not sure how it got past the format checking.')
raise
logging.info(f'weight: {weight}, clause: {raw_clause}, choice_text: "{choice_text}"')
# Choice must have a leading space now, if the format is being followed properly.
assert choice_text[0] == ' ', f'Choice {line} seems not to have had a space following the state clause affecting the probability. This is against the format.'
choice_text = choice_text[1:]
# clause_list = ['%' + clause + '%' for clause in raw_clause]
# clause_list = state_clause_handler.clause_list_from_raw_clause(raw_clause)
clause = '%' + raw_clause + '%'
weighted_choice = WeightedChoice(weight, choice_text, tag_num=tag_stack[-1], clause=clause)
except TypeError:
# Empty choice.
weighted_choice = WeightedChoice(line, '', tag_stack[-1])
return weighted_choice
def is_tag_marker(choice):
return re.match('( )+\$', choice)
if __name__ == "__main__":
logging_level = logging.WARNING
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--debug', action='store_true')
parser.add_argument('--info', action='store_true')
args = parser.parse_args()
if args.debug:
logging_level = logging.DEBUG
elif args.info:
logging_level = logging.INFO
logging.basicConfig(level=logging_level)
choices = NestedChoices.load_from_file('test_places.txt')
subtable = NestedChoices.load_from_string_list('countries_table', ['Germany', 'France', 'UK'], [5, 3, 1])
choices.register_subtable(subtable)
for choice in choices.gen_choices(params={'num': 4, 'uniqueness_level': -1}):
print(choice)
| true
|
a579b58773d007dcb81d28b8b974e98bf876a778
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p03759/s163335731.py
|
UTF-8
| 92
| 3.328125
| 3
|
[] |
no_license
|
[a,b,c] = list(map(int, input().split()))
if b-a == c-b:
print("YES")
else:
print("NO")
| true
|
458782ac77fe6713c9aaf10374b1f3683dc26e16
|
Python
|
lfwin/FreiburgSLAM
|
/hw9/hw9.py
|
UTF-8
| 1,283
| 2.59375
| 3
|
[] |
no_license
|
#!/usr/bin/python
import scipy
import matplotlib.pyplot as plt
import calibrate
# load the odometry measurements
odom_motions = scipy.loadtxt('odom_motions.dat')
# the motions as they are estimated by scan-matching
scanmatched_motions = scipy.loadtxt('scanmatched_motions.dat')
# create our measurements vector z
z = scipy.hstack((scanmatched_motions, odom_motions))
# perform the calibration
X = calibrate.ls_calibrate_odometry(z)
print('calibration result'),
print(X)
# apply the estimated calibration parameters
calibrated_motions = calibrate.apply_odometry_correction(X, odom_motions)
# compute the current odometry trajectory, the scanmatch result, and the calibrated odom
odom_trajectory = calibrate.compute_trajectory(odom_motions)
scanmatch_trajectory = calibrate.compute_trajectory(scanmatched_motions)
calibrated_trajectory = calibrate.compute_trajectory(calibrated_motions)
# plot the trajectories
plt.plot(odom_trajectory[:,0], odom_trajectory[:,1], color='b', label="Uncalibrated Odometry")
plt.plot(scanmatch_trajectory[:,0], scanmatch_trajectory[:,1], color='g', label="Scan-Matching")
plt.plot(calibrated_trajectory[:,0], calibrated_trajectory[:,1], color='r', label="Calibrated Odometry")
plt.legend(loc=1)
plt.show()
#plt.savefig('odometry-calibration.png')
| true
|
9ae10c4d2a8b7347151c2ff892528c44aa3c0aeb
|
Python
|
shrukerkar/ML-Fellowship
|
/BasicPython/StringReverse.py
|
UTF-8
| 812
| 4.34375
| 4
|
[] |
no_license
|
#Write a Python program to reverse a string.
#Method 1
def reverse(str):
str=""
for i in str:
str=i+str
return str
str="we3resource"
print("The original string is : ", end="")
print(str)
print("The reversed string(using loops) is : ", end="")
print(reverse(str ))
#Method 2
def reverse(s):
if len(s) == 0:
return s
else:
return reverse(s[1:]) + s[0]
s = "we3resource"
print("The original string is : ", end="")
print(s)
print("The reversed string(using recursion) is : ", end="")
print(reverse(s))
#Method 3
def reverse(string):
string = "".join(reversed(string))
return string
string = "we3resource"
print("The original string is : ", end="")
print(string)
print("The reversed string(using reversed) is : ", end="")
print(reverse(string))
| true
|
98ab6318ce37e699c5e67ba4ece42d09c2cf7b47
|
Python
|
avtar31193/GTPC-protocol
|
/TESTER/GTPC_VERDICT.py
|
UTF-8
| 1,409
| 2.765625
| 3
|
[] |
no_license
|
def IECompare(IE, IR):
for EveryElement in IE:
if EveryElement in IR:
if IE[EveryElement] == IR[EveryElement]:
continue
else:
return "Test Case Failed with IE Field Value wrong :{}".format(EveryElement)
else:
return "Test Case failed with IE Field Missing {}".format(EveryElement)
return "EQUAL"
def TestVerdict(DWAE, DWAR): #2
for EveryKey in DWAE:
if EveryKey in DWAR:
if EveryKey == 'IEs':
IECompareResult = IECompare(DWAE[EveryKey], DWAR[EveryKey])
if IECompareResult == "EQUAL":
continue
else:
return IECompareResult
for EveryIE in DWAE[EveryKey]:
if EveryIE in DWAR[EveryKey]:
if EveryIE=='RECOVERY':
IECompareResult = IECompare(DWAE[EveryKey][EveryIE], DWAR[EveryKey][EveryIE])
if IECompareResult == "EQUAL":
continue
else:
return IECompareResult
else:
return "Test case Failed With IE {} missing".format(EveryIE)
else:
if EveryKey !='MessageType':
if DWAE[EveryKey] == DWAR[EveryKey]:
continue
else:
if EveryKey == 'MessageLength':
print "continuing by ignoring MessageLength Field mismatch"
continue
else:
return "Test Case Failed with Header Field {} value Ex:{}, Value Rx:{}".format(EveryKey, DWAE[EveryKey],DWAR[EveryKey])
else:
return "Test Case Failed With Missing Field {}".format(EveryKey)
return "Test Case Passed"
| true
|