max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
ree/core/__init__.py
|
blackleg/reescrapper
| 0
|
12779951
|
from .scraper import Scraper
from .exceptions import ResponseCodeException, ResponseDataException, NoDataException, TimestampException
| 1.234375
| 1
|
tests/buildframe_test.py
|
SeBassTian23/Visual-Phenomics-Python
| 0
|
12779952
|
"""
Test file corresponding with photosynq_py.buildframe
"""
from unittest import TestCase
# import pandas as pd
# import numpy as np
import visual_phenomics_py as visual_phenomics_py
class BuildframeTest(TestCase):
"""
Test class corresponding with vppy.buildframe
"""
# def setUp(self):
# """
# Load json structures taken from the photosynq API,
# as well as a corresponding csv taken from the photosynq website
# """
# self.test_info = self.load_json(
# RESOURCE_FOLDER + "1224_info_from_api")["project"]
# self.test_data = self.load_json(
# RESOURCE_FOLDER + "1224_data_from_api")["data"]
# self.resource_dataframe = pd.read_csv(
# RESOURCE_FOLDER + "1224_csv_from_website.csv", index_col=0)
# def test_build_project_dataframe(self):
# """
# Pass canned json structures to
# :func:`~photosynq_py.buildframe.build_project_dataframe`,
# and assert that the resulting dataframe matches a canned csv.
# """
# # build a dataframe
# built_dataframe = ps.build_project_dataframe(
# self.test_info, self.test_data)
# built_dataframe = built_dataframe[PS_PROTOCOL_TO_TEST]
# # assert that built dataframe "datum_id" values match the "ID" column in the csv
# csv_ids = list(self.resource_dataframe.index)
# builtDatumIds = list(built_dataframe['datum_id'])
# self.assertListEqual(csv_ids, builtDatumIds, "build_project_dataframe() result datum_ids \
# do not match the ID column in test resources csv")
# # deubg
# print("test-resource columns: " + str(self.resource_dataframe.columns));
# print("built dataframe columns: " + str(built_dataframe.columns));
# # iterate through each relevant column in the csv
# for header in self.resource_dataframe.columns:
# if header in IGNORABLE_CSV_HEADERS:
# continue
# # assert that the column header exists in the built dataframe
# self.assertIn(header, built_dataframe.columns,
# "buildProjectDataFrame() result is missing header \"{0}\", \
# which is present in test resources csv".format(header))
# # assert that this column's contents match between the csv and the built dataframe
# (csv_col_data, built_col_data) = self.extract_column_data(
# header, built_dataframe)
# self.assert_columns_match(csv_col_data, built_col_data, header)
# def assert_columns_match(self, csv_col_data, built_col_data, header):
# """
# Assert that the two given lists/arrays are equivalent, accounting for the type of data they contain
# """
# # for numerical arrays, use a numpy testing method that allows for insignifcant differences due to number formatting
# if isinstance(built_col_data, np.ndarray):
# np.testing.assert_array_almost_equal(csv_col_data, built_col_data,
# err_msg="buildProjectDataFrame() result \"{0}\" numerical values do not match \
# the corresponding column in the test resources csv".format(header))
# # otherwise, assert that the two lists are exactly the same
# else:
# self.assertListEqual(csv_col_data, built_col_data,
# "buildProjectDataFrame() result \"{0}\" values do not match the \
# corresponding column in the test resources csv".format(header))
# def extract_column_data(self, header, built_dataframe):
# """
# Extract data for one column into a common format, from both the canned csv dataframe and the given dataframe generated from json structures.
# """
# # retrieve raw column contents from both the canned csv and the dataframe generated from json
# csv_col_data = list(self.resource_dataframe[header])
# built_col_data = list(built_dataframe[header][:])
# # if necessary, convert time data to a common format
# if header == "time":
# csv_col_data = [datetime.strptime(
# x, '%m/%d/%Y %I:%M %p') for x in csv_col_data]
# built_col_data = [datetime.strptime(
# x, ps.TIME_FORMAT) for x in built_col_data]
# built_col_data = [datetime(x.year, x.month, x.day, x.hour, x.minute)
# for x in built_col_data]
# # if necessary, convert numerical data to numpy arrays,
# elif isinstance(built_col_data[0], Number) or isinstance(csv_col_data[0], Number):
# csv_col_data = np.asarray(
# [None if x == 'null' else x for x in csv_col_data], dtype=float)
# built_col_data = np.asarray(
# [None if x == 'null' else x for x in built_col_data], dtype=float)
# # otherwise, just replace "null" entries with None
# else:
# csv_col_data = [None if x == 'null' else x for x in csv_col_data]
# # return both sets of column contents
# return (csv_col_data, built_col_data)
# def load_json(self, file):
# """
# load a json structure from a file
# """
# with open(file, "rb") as test_info_file:
# result_bytes = test_info_file.read()
# return json.loads(result_bytes.decode('utf-8'))
| 3.09375
| 3
|
users/views.py
|
sedexdev/scramble-api-frontend
| 0
|
12779953
|
from flask import (
Blueprint,
flash,
redirect,
render_template,
request,
url_for)
from flask_login import current_user, login_user, logout_user
from werkzeug.wrappers import Response
from .forms import (
AccountForm,
DeleteForm,
LoginForm,
RegisterForm,
ResetForm,
UpdateForm)
from utils import admin_auth, is_staging, make_api_request
user_blueprint = Blueprint(
'users',
__name__,
template_folder='templates')
@user_blueprint.route('/login', methods=['GET', 'POST'])
def login() -> Response:
if is_staging() and not admin_auth():
return redirect(url_for('admin.login_admin'))
form = LoginForm()
login_args = {'title': 'Login', 'form': form}
if form.validate_on_submit():
res = make_api_request('post', 'users/login', data={
'email': form.email.data,
'password': form.password.data})
if res.status_code == 200:
login_user(res.json().get('user'))
flash(res.message, 'message')
next = request.args.get('next')
if next is None or not next[0] == '/':
next = url_for('core.index')
return redirect(next)
else:
flash(res.message, 'warning')
return render_template('login.html', **login_args)
return render_template('login.html', **login_args)
@user_blueprint.route('/register', methods=['GET', 'POST'])
def register() -> Response:
if is_staging() and not admin_auth():
return redirect(url_for('admin.login_admin'))
form = RegisterForm()
register_args = {'title': 'Register', 'form': form}
if form.validate_on_submit():
res = make_api_request('post', 'users/register', data={
'email': form.email.data,
'password': form.password.data,
'confirm_pw': form.confirm_pw.data})
if res.status_code == 200:
flash(res.message, 'message')
return redirect(url_for('users.login'))
else:
flash(res.message, 'warning')
return render_template('register.html', **register_args)
return render_template('register.html', **register_args)
@user_blueprint.route('/reset', methods=['GET', 'POST'])
def reset() -> Response:
if is_staging() and not admin_auth():
return redirect(url_for('admin.login_admin'))
form = ResetForm()
reset_args = {'title': 'Reset', 'form': form}
if form.validate_on_submit():
res = make_api_request('post', 'users/reset', data={
'email': form.email.data})
if res.status_code == 200:
flash(res.message, 'message')
return redirect(url_for('users.update'))
else:
flash(res.message, 'warning')
return render_template('reset.html', **reset_args)
return render_template('reset_pw.html', **reset_args)
@user_blueprint.route('/update', methods=['GET', 'POST'])
def update() -> Response:
if is_staging() and not admin_auth():
return redirect(url_for('admin.login_admin'))
form = UpdateForm()
update_args = {'title': 'Update', 'form': form}
if form.validate_on_submit():
res = make_api_request('put', 'users/update', data={
'email': current_user.email,
'password': form.password.data,
'new_pw': form.new_pw.data,
'confirm_pw': form.confirm_pw.data})
if res.status_code == 200:
flash(res.message, 'message')
return redirect(url_for('users.login'))
else:
flash(res.message, 'warning')
return render_template('update.html', **update_args)
return render_template('update.html', **update_args)
@user_blueprint.route('/account', methods=['GET', 'POST'])
def account() -> Response:
if is_staging() and not admin_auth():
return redirect(url_for('admin.login_admin'))
form = AccountForm()
del_form = DeleteForm()
if form.validate_on_submit():
res = make_api_request('put', 'users/account', data={
'email': form.email.data,
'current_pw': form.current_pw.data,
'new_pw': form.new_pw.data,
'confirm_pw': form.confirm_pw.data,
})
if res.status_code == 200:
flash(res.message, 'message')
else:
flash(res.message, 'warning')
if del_form.validate_on_submit():
email = del_form.email.data,
if email != current_user.email:
flash('Invalid email', 'warning')
else:
res = make_api_request('delete', 'users/delete', data={
'email': email})
if res.status_code == 200:
flash(res.message, 'message')
return redirect(url_for('users.login'))
else:
flash(res.message, 'warning')
return render_template('account.html', form=form, del_form=del_form)
@user_blueprint.route('/logout', methods=['POST'])
def logout() -> Response:
logout_user()
flash('Logged out', 'message')
return redirect(url_for('users.login'))
| 2.28125
| 2
|
formations/views.py
|
Kgermando/catsr
| 0
|
12779954
|
<reponame>Kgermando/catsr<filename>formations/views.py<gh_stars>0
from django.shortcuts import render, get_object_or_404, redirect
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from formations.models import Formation
# Create your views here.
def formations_view(request):
"""
docstring
"""
formation = Formation.objects.all().order_by('-created')
paginator = Paginator(formation, 9)
page = request.GET.get('page')
try:
formation_list = paginator.page(page)
except PageNotAnInteger:
formation_list = paginator.page(1)
except EmptyPage:
formation_list = paginator.page(paginator.num_pages)
context = {
'formation_list': formation_list,
}
template_name = 'pages/formations/formations.html'
return render(request, template_name, context)
def formations_detail(request, slug):
"""
docstring
"""
formation = get_object_or_404(Formation, slug=slug)
formation_list = Formation.objects.all().order_by('-created')[:5]
formation.nombre_vues = formation.nombre_vues+1
formation.save()
context = {
'formation': formation,
'formation_list': formation_list
}
template_name = 'pages/formations/formations-detail.html'
return render(request, template_name, context)
| 2.109375
| 2
|
recipe_scrapers/__version__.py
|
michael-golfi/recipe-scrapers
| 0
|
12779955
|
__version__ = "13.3.0"
| 1.023438
| 1
|
2.7/ptp_path.py
|
SnoWolf2018/Python
| 0
|
12779956
|
<filename>2.7/ptp_path.py
import sys
import os
import numpy as np
'''path tests'''
#print 'current path'
#print os.getcwd()
#'''Your Shell Path'''
#print os.path.abspath(os.path.dirname(__file__))
#'''Your script Path'''
#
#print 'get upper path'
#print os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
#print os.path.abspath(os.path.dirname(os.getcwd()))
#print os.path.abspath(os.path.join(os.getcwd(),".."))
#
#print 'get upper upper path'
#print os.path.abspath(os.path.join(os.getcwd(),"../.."))
#script_path=os.path.dirname(__file__)
'''The code upper line cannot run in the script_path'''
script_path=os.path.abspath(os.path.dirname(__file__))
#print script_path
parent_path=os.path.dirname(script_path)
#print parent_path
filename=parent_path+'/Code/ch3code/data.csv'
#print filename
'''Run anywhere in any Linux PC'''
h,l=np.loadtxt(filename,delimiter=',',usecols=(4,5),unpack=True)
'''Run everywhere in Archlinux PC'''
#h,l=np.loadtxt('/home/archlinux/git/python/Code/ch3code/data.csv',delimiter=',',usecols=(4,5),unpack=True)
'''Run in Script Path'''
#h,l=np.loadtxt('../Code/ch3code/data.csv',delimiter=',',usecols=(4,5),unpack=True)
'''Run in Script Upper Path'''
#h,l=np.loadtxt('./Code/ch3code/data.csv',delimiter=',',usecols=(4,5),unpack=True)
print "highest =",np.max(h)
print "lowest =",np.min(l)
print (np.max(h)+np.min(l))/2
print "Spead high price",np.ptp(h)
print "Spead low price",np.ptp(l)
| 2.53125
| 3
|
app/enquiries/apps.py
|
uktrade/enquiry-mgmt-tool
| 3
|
12779957
|
<filename>app/enquiries/apps.py
from django.apps import AppConfig
class EnquiriesConfig(AppConfig):
name = "enquiries"
| 1.320313
| 1
|
data/main.py
|
oyjuffer/Battler-V1.1
| 0
|
12779958
|
### MAIN ###
# IMPORT LIBRARIES
import os
import pygame
# IMPORT GAME FILES
from . import constants
from . import control
from . import setup
from .states import main_menu, game_setup
# CLASSES
# FUNCTIONs
def main():
"""
This is the main function for the game; it sets up a game object and state dictionaries.
It then sets up the initital active state and runs the mainloop.
This can be found in control.py.
"""
game = control.Control()
state_dict = {constants.MAIN_MENU: main_menu.Main_Menu(),
constants.GAME_SETUP: game_setup.Game_Setup()}
game.setup_states(state_dict, constants.MAIN_MENU)
game.mainloop()
| 2.90625
| 3
|
winlogtimeline/ui/tag_settings.py
|
ShaneKent/PyEventLogViewer
| 4
|
12779959
|
from tkinter import *
from tkinter import messagebox
from tkinter.ttk import *
import re
class TagSettings(Toplevel):
def __init__(self, parent):
super().__init__(parent)
# Class variables
self.tags = dict()
self.changes_made = False
# Window Parameters
self.title('Record Highlight Settings')
self.resizable(width=False, height=False)
# Create and place the widgets
self._init_widgets()
self.populate_tags(parent.current_project.config.get('events', {}).get('colors', {}))
self._place_widgets()
def _init_widgets(self):
"""
Creates the elements of this window and sets configuration values.
:return:
"""
# Master container frame
self.container = Frame(self)
# Treeview for tags
self.listbox_container = Frame(self.container)
self.tag_list = Treeview(self.listbox_container, columns=('source', 'id'), show='headings')
# Set up the tree headings
self.tag_list.heading('source', text='Event Source', command=lambda: self.sort_column('source', False))
self.tag_list.heading('id', text='Event ID', command=lambda: self.sort_column('id', False))
# Set up the tree columns
self.tag_list.column('id', minwidth=0, width=60, stretch=NO)
self.tag_list.column('source', minwidth=0, width=100, stretch=YES)
self.tag_list.bind('<<TreeviewSelect>>', self.callback_update_select_background)
# Scrollbar settings
self.vsb = Scrollbar(self.listbox_container, orient='vertical', command=self.tag_list.yview)
self.hsb = Scrollbar(self.listbox_container, orient='horizontal', command=self.tag_list.xview)
self.tag_list.configure(yscrollcommand=self.vsb.set)
self.tag_list.configure(xscrollcommand=self.hsb.set)
# Color preview
self.color_block = Canvas(self.container, width=300, height=20, relief=SUNKEN)
self.color_block_rect = self.color_block.create_rectangle(0, 0, 301, 21, fill='#FFFFFF')
self.color_block_text = self.color_block.create_text(5, 5, anchor='nw',
text='The quick brown fox jumps over the lazy dog.')
# Sliders
self.slider_container = Frame(self.container)
# Red config
self.red = IntVar()
self.r_label = Label(self.slider_container, text='R: ')
self.r_slider = Scale(self.slider_container, from_=0, to=255, variable=self.red,
command=lambda *args: self.truncate(self.r_slider))
self.r_value_label = Label(self.slider_container, text='0')
self.red.trace('w', lambda *args: self.callback_update_label(self.red, self.r_value_label))
self.r_slider.set(255)
# Green config
self.green = IntVar()
self.g_label = Label(self.slider_container, text='G: ')
self.g_slider = Scale(self.slider_container, from_=0, to=255, variable=self.green,
command=lambda *args: self.truncate(self.g_slider))
self.g_value_label = Label(self.slider_container, text='0')
self.green.trace('w', lambda *args: self.callback_update_label(self.green, self.g_value_label))
self.g_slider.set(255)
# Blue config
self.blue = IntVar()
self.b_label = Label(self.slider_container, text='B: ')
self.b_slider = Scale(self.slider_container, from_=0, to=255, variable=self.blue,
command=lambda *args: self.truncate(self.b_slider))
self.b_value_label = Label(self.slider_container, text='0')
self.blue.trace('w', lambda *args: self.callback_update_label(self.blue, self.b_value_label))
self.b_slider.set(255)
# Buttons for editing tags
self.add_button = Button(self.container, text='Add', command=self.callback_add_tag, underline=0)
self.bind('<Alt-a>', self.callback_add_tag)
self.delete_button = Button(self.container, text='Delete', command=self.callback_remove_tag, underline=0)
self.bind('<Alt-d>', self.callback_remove_tag)
# Finish and cancel buttons
self.finish_button = Button(self.container, text='Finish', command=self.callback_finish, underline=0)
self.cancel_button = Button(self.container, text='Cancel', command=self.callback_cancel, underline=0)
self.bind('<Alt-f>', self.callback_finish)
self.bind('<Return>', self.callback_finish)
self.bind('<Alt-c>', self.callback_cancel)
self.bind('<Escape>', self.callback_cancel)
# Focus on window.
self.focus_set()
def _place_widgets(self):
"""
Lays out the elements in this window.
:return:
"""
padding = 3
# Listbox for tags
self.tag_list.grid(row=0, column=0, columnspan=4, sticky='NESW')
self.vsb.grid(row=0, column=4, sticky='NESW')
self.hsb.grid(row=1, column=0, sticky='NESW')
self.listbox_container.columnconfigure(0, weight=4)
self.listbox_container.grid(row=0, column=0, columnspan=5, padx=padding, pady=padding, sticky='NESW')
# Color box
self.color_block.grid(row=1, column=0, columnspan=5, padx=padding, pady=padding, sticky='NS')
# Red config
self.r_label.grid(row=2, column=0, sticky='EW')
self.r_slider.grid(row=2, column=1, columnspan=3, sticky='EW')
self.r_value_label.grid(row=2, column=4, sticky='EW')
# Green config
self.g_label.grid(row=3, column=0, sticky='EW')
self.g_slider.grid(row=3, column=1, columnspan=3, sticky='EW')
self.g_value_label.grid(row=3, column=4, sticky='EW')
# Blue config
self.b_label.grid(row=4, column=0, sticky='EW')
self.b_slider.grid(row=4, column=1, columnspan=3, sticky='EW')
self.b_value_label.grid(row=4, column=4, sticky='EW')
# Slider container
self.slider_container.columnconfigure(1, weight=4)
self.slider_container.columnconfigure(4, minsize=25)
self.slider_container.grid(row=2, column=0, columnspan=5, padx=padding, sticky='NESW')
# Buttons for editing tags
self.add_button.grid(row=5, column=1, padx=padding, pady=padding, sticky='E')
self.delete_button.grid(row=5, column=2, padx=padding, pady=padding, sticky='EW')
# Finish and cancel buttons
self.finish_button.grid(row=5, column=3, padx=padding, pady=padding, sticky='EW')
self.cancel_button.grid(row=5, column=4, padx=padding, pady=padding, sticky='EW')
# Master container frame
self.container.columnconfigure(1, minsize=100)
self.container.pack(side=LEFT, fill=BOTH)
@staticmethod
def truncate(slider):
"""
Used to truncate slider values since ttk doesn't support the resolution option.
:return:
"""
value = slider.get()
if int(value) != value:
slider.set(int(value))
def sort_column(self, col, reverse):
"""
Sorts the tag list based on a particular column.
:param col: The column to sort.
:param reverse: Whether or not to sort in reverse order.
:return:
"""
column_elements = [(self.tag_list.set(k, col), k) for k in self.tag_list.get_children('')]
if col == 'id':
column_elements = [(int(v), k) for v, k in column_elements]
column_elements.sort(reverse=reverse)
for index, (val, k) in enumerate(column_elements):
self.tag_list.move(k, '', index)
self.tag_list.heading(col, command=lambda _col=col: self.sort_column(_col, not reverse))
def callback_update_label(self, var, label):
"""
Callback used to update the label associated with a slider. Also updates the color associated with the tag.
:param var: The variable bound to the slider.
:param label: The label to update.
:return:
"""
label.config(text=str(int(var.get())))
self.update_tag()
def populate_tags(self, tags):
"""
Iterates over the tag dictionary and inserts each tag.
:param tags: A dictionary containing tag, color pairs. The color should be a hex string.
:return:
"""
tag_config = ((source, event, color) for source, events in tags.items() for event, color in events.items())
for source, event, color in tag_config:
self.insert_tag(source, event, color)
def insert_tag(self, source, event, color):
"""
Inserts a tag into the ui and the tag list.
:param source: The event source.
:param event: The event id as a string.
:param color: The color to associate with the tag as a string in hex format.
:return:
"""
tag = f'{source}::{event}'
self.tag_list.insert('', 'end', values=(source, int(event)), tags=(tag,))
self.tag_list.tag_configure(tag, background=color)
self.tags[source] = self.tags.get(source, dict())
self.tags[source][event] = color
def callback_update_select_background(self, event=None):
"""
Callback used to update the selection background and sliders to match the selection.
:return:
"""
selection = self.tag_list.focus()
if not selection:
return
source, event = (str(v) for v in self.tag_list.item(selection)['values'])
hex_color = self.tags[source][event]
# self.color_block.create_rectangle(0, 0, 301, 21, fill=hex_color)
self.color_block.itemconfigure(self.color_block_rect, fill=hex_color)
hex_color = hex_color.lstrip('#')
r, g, b = tuple(int(hex_color[i:i + 2], 16) for i in range(0, 5, 2))
self.r_slider.set(r)
self.g_slider.set(g)
self.b_slider.set(b)
def update_tag(self):
"""
Updates the colors associated with a tag
:return:
"""
selection = self.tag_list.focus()
if not selection:
return
source, event = (str(v) for v in self.tag_list.item(selection)['values'])
r, g, b = tuple(map(int, (self.r_slider.get(), self.g_slider.get(), self.b_slider.get())))
hex_color = f'#{r:02x}{g:02x}{b:02x}'
self.tags[source][event] = hex_color
self.color_block.itemconfigure(self.color_block_rect, fill=hex_color)
self.tag_list.tag_configure('::'.join((source, event)), background=hex_color)
self.changes_made = True
def callback_add_tag(self, event=None):
"""
Creates a dialog window for the user to enter a new tag.
:return:
"""
window = TagPrompt(self)
window.grab_set()
def callback_remove_tag(self, event=None):
selection = self.tag_list.focus()
if not selection:
return
source, event = (str(v) for v in self.tag_list.item(selection)['values'])
self.tags[source].pop(event)
if len(self.tags[source].keys()) == 0:
self.tags.pop(source)
self.tag_list.delete(selection)
self.changes_made = True
def callback_finish(self, event=None):
"""
Callback used to finish making changes to the tags and return to master.
:return:
"""
self.master.current_project.config['events'] = self.master.current_project.config.get('events', {})
self.master.current_project.config['events']['colors'] = self.tags
if self.master.timeline is not None:
self.master.timeline.update_tags(self.master.current_project.config['events']['colors'])
self.master.changes_made |= self.changes_made
self.destroy()
def callback_cancel(self, event=None):
"""
Callback used to discard changes made. Destroys the widget and returns control to the master
without making any changes.
:return:
"""
self.destroy()
def __destroy__(self):
"""
Returns focus and control to the master.
:return:
"""
self.grab_release()
class TagPrompt(Toplevel):
def __init__(self, parent):
super().__init__(parent)
# Window settings
self.title('New Tag')
self.resizable(width=False, height=False)
# Create and place the widgets
self._init_widgets()
self._place_widgets()
def _init_widgets(self):
self.container = Frame(self)
self.source_label = Label(self.container, text='Event Source')
self.source_entry = Entry(self.container)
self.id_label = Label(self.container, text='Event ID')
id_vcmd = (self.container.register(self.validate_command_id), '%d', '%P')
self.id_entry = Entry(self.container, validate='key', validatecommand=id_vcmd)
self.ok_button = Button(self.container, text='Ok', command=self.callback_ok)
def _place_widgets(self):
padding = 3
self.source_label.grid(row=0, column=0, columnspan=3, padx=padding, pady=padding, sticky='EW')
self.source_entry.grid(row=1, column=0, columnspan=3, padx=padding, pady=padding, sticky='EW')
self.id_label.grid(row=2, column=0, columnspan=3, padx=padding, pady=padding, sticky='EW')
self.id_entry.grid(row=3, column=0, columnspan=3, padx=padding, pady=padding, sticky='EW')
self.ok_button.grid(row=4, column=1, padx=padding, sticky='NESW')
self.container.pack()
@staticmethod
def validate_command_id(action, value):
"""
Restricts entry to only allow integers.
:return:
"""
if action != '1':
return True
if re.match(r'^[0-9]+$', value):
return True
return False
def callback_ok(self):
source, event = self.source_entry.get(), str(self.id_entry.get())
if not all((source, event)):
messagebox.showerror('Error', 'You must enter a value.')
return
if event in self.master.tags.get(source, {}):
messagebox.showerror('Error', 'That tag already exists.')
return
self.master.insert_tag(source, event, '#FFFFFF')
self.master.changes_made = True
self.destroy()
def __destroy__(self):
"""
Returns focus and control to the master.
:return:
"""
self.grab_release()
| 2.734375
| 3
|
tests/functional/test_filesystem.py
|
rena2damas/filemanager-service
| 0
|
12779960
|
<reponame>rena2damas/filemanager-service<filename>tests/functional/test_filesystem.py
import io
import subprocess
from base64 import b64encode
import pytest
from src.api.auth import AuthAPI
@pytest.fixture()
def auth(mocker):
mocker.patch.object(AuthAPI, "authenticate", return_value=True)
return {"Authorization": f"Basic {b64encode(b'user:pass').decode()}"}
class TestFilesystemGET:
def test_supported_paths(self, client):
response = client.get("/filesystem/supported-paths")
assert response.status_code == 200
def test_unauthorized_request_throws_401(self, client):
response = client.get("/filesystem/tmp/", headers={})
assert response.status_code == 401
def test_unsupported_value_throws_400(self, client, auth):
response = client.get("/filesystem/unsupported/", headers=auth)
assert response.status_code == 400
def test_valid_path_returns_200(self, client, auth, mocker):
mocker.patch("src.utils.shell", return_value="file.txt")
response = client.get("/filesystem/tmp/", headers=auth)
assert response.status_code == 200
assert response.json == ["file.txt"]
def test_error_path_returns_400(self, client, auth, mocker):
err = subprocess.CalledProcessError(cmd="", returncode=1, stderr="err")
mocker.patch("src.utils.shell", side_effect=err)
response = client.get("/filesystem/tmp/invalid/", headers=auth)
assert response.status_code == 400
assert response.json == {"code": 400, "message": "err", "reason": "Bad Request"}
def test_permission_denied_returns_403(self, client, auth, mocker):
stderr = "/tmp/root/: Permission denied"
err = subprocess.CalledProcessError(cmd="", returncode=1, stderr=stderr)
mocker.patch("src.utils.shell", side_effect=err)
response = client.get("/filesystem/tmp/root/", headers=auth)
assert response.status_code == 403
assert response.json == {
"code": 403,
"message": "permission denied",
"reason": "Forbidden",
}
def test_missing_path_returns_404(self, client, auth, mocker):
stderr = "/tmp/missing/: No such file or directory"
err = subprocess.CalledProcessError(cmd="", returncode=1, stderr=stderr)
mocker.patch("src.utils.shell", side_effect=err)
response = client.get("/filesystem/tmp/missing/", headers=auth)
assert response.status_code == 404
assert response.json == {
"code": 404,
"message": "no such file or directory",
"reason": "Not Found",
}
def test_file_attachment_returns_200(self, client, auth, mocker):
mocker.patch("src.utils.shell", side_effect=["file.txt", b""])
mocker.patch("src.utils.isfile", return_value=True)
headers = {**auth, "accept": "application/octet-stream"}
response = client.get("/filesystem/tmp/file.txt", headers=headers)
assert response.status_code == 200
assert (
response.headers["Content-Disposition"] == "attachment; filename=file.txt"
)
assert response.headers["Content-Type"] == "text/plain; charset=utf-8"
def test_directory_attachment_returns_200(self, client, auth, mocker):
mocker.patch("src.utils.shell", side_effect=["dir/", b""])
mocker.patch("src.utils.isdir", return_value=True)
headers = {**auth, "accept": "application/octet-stream"}
response = client.get("/filesystem/tmp/dir/", headers=headers)
assert response.status_code == 200
assert (
response.headers["Content-Disposition"] == "attachment; filename=dir.tar.gz"
)
assert response.headers["Content-Type"] == "application/x-tar"
def test_unsupported_accept_header_path_returns_400(self, client, auth):
headers = {**auth, "accept": "text/html"}
response = client.get("/filesystem/tmp/", headers=headers)
assert response.status_code == 400
assert response.json == {
"code": 400,
"message": "unsupported 'accept' HTTP header",
"reason": "Bad Request",
}
class TestFilesystemPOST:
def test_valid_file_returns_201(self, client, auth, mocker):
mocker.patch("src.utils.shell")
response = client.post(
"/filesystem/tmp/",
headers=auth,
data={"files": (io.BytesIO(b"text"), "file.txt")},
content_type="multipart/form-data",
)
assert response.status_code == 201
def test_path_not_a_directory_returns_400(self, client, auth, mocker):
stderr = "/tmp/file.txt: Not a directory"
err = subprocess.CalledProcessError(cmd="", returncode=1, stderr=stderr)
mocker.patch("src.utils.shell", side_effect=err)
response = client.post(
"/filesystem/tmp/file.txt",
headers=auth,
data={"files": (io.BytesIO(b"text"), "file.txt")},
content_type="multipart/form-data",
)
assert response.status_code == 400
def test_create_existing_file_returns_400(self, client, auth, mocker):
mocker.patch("src.utils.shell", return_value="file.txt")
response = client.post(
"/filesystem/tmp/file.txt",
headers=auth,
data={"files": (io.BytesIO(b"text"), "file.txt")},
content_type="multipart/form-data",
)
assert response.status_code == 400
def test_permission_denied_returns_403(self, client, auth, mocker):
stderr = "/tmp/root/: Permission denied"
err = subprocess.CalledProcessError(cmd="", returncode=1, stderr=stderr)
mocker.patch("src.utils.shell", side_effect=err)
response = client.post(
"/filesystem/tmp/root/",
headers=auth,
data={"files": (io.BytesIO(b"text"), "file.txt")},
content_type="multipart/form-data",
)
assert response.status_code == 403
assert response.json == {
"code": 403,
"message": "permission denied",
"reason": "Forbidden",
}
def test_missing_path_returns_404(self, client, auth, mocker):
stderr = "/tmp/missing/: No such file or directory"
err = subprocess.CalledProcessError(cmd="", returncode=1, stderr=stderr)
mocker.patch("src.utils.shell", side_effect=err)
response = client.post(
"/filesystem/tmp/missing/",
headers=auth,
data={"files": (io.BytesIO(b"text"), "file.txt")},
content_type="multipart/form-data",
)
assert response.status_code == 404
assert response.json == {
"code": 404,
"message": "no such file or directory",
"reason": "Not Found",
}
class TestFilesystemPUT:
def test_valid_file_returns_204(self, client, auth, mocker):
mocker.patch("src.utils.shell", return_value="file.txt")
response = client.put(
"/filesystem/tmp/file.txt",
headers=auth,
data={"files": (io.BytesIO(b"text"), "file.txt")},
content_type="multipart/form-data",
)
assert response.status_code == 204
def test_path_not_a_directory_returns_400(self, client, auth, mocker):
stderr = "/tmp/file.txt: Not a directory"
err = subprocess.CalledProcessError(cmd="", returncode=1, stderr=stderr)
mocker.patch("src.utils.shell", side_effect=err)
response = client.put(
"/filesystem/tmp/file.txt",
headers=auth,
data={"files": (io.BytesIO(b"text"), "file.txt")},
content_type="multipart/form-data",
)
assert response.status_code == 400
def test_permission_denied_returns_403(self, client, auth, mocker):
stderr = "/tmp/root/: Permission denied"
err = subprocess.CalledProcessError(cmd="", returncode=1, stderr=stderr)
mocker.patch("src.utils.shell", side_effect=err)
response = client.put(
"/filesystem/tmp/root/",
headers=auth,
data={"files": (io.BytesIO(b"text"), "file.txt")},
content_type="multipart/form-data",
)
assert response.status_code == 403
assert response.json == {
"code": 403,
"message": "permission denied",
"reason": "Forbidden",
}
def test_missing_path_returns_404(self, client, auth, mocker):
stderr = "/tmp/missing/: No such file or directory"
err = subprocess.CalledProcessError(cmd="", returncode=1, stderr=stderr)
mocker.patch("src.utils.shell", side_effect=err)
response = client.put(
"/filesystem/tmp/missing/",
headers=auth,
data={"files": (io.BytesIO(b"text"), "file.txt")},
content_type="multipart/form-data",
)
assert response.status_code == 404
assert response.json == {
"code": 404,
"message": "no such file or directory",
"reason": "Not Found",
}
def test_update_missing_file_returns_404(self, client, auth, mocker):
mocker.patch("src.utils.shell", return_value="")
response = client.put(
"/filesystem/tmp/",
headers=auth,
data={"files": (io.BytesIO(b"text"), "file.txt")},
content_type="multipart/form-data",
)
assert response.status_code == 404
assert response.json == {
"code": 404,
"message": "file does not exist",
"reason": "Not Found",
}
class TestFilesystemDELETE:
def test_valid_file_returns_204(self, client, auth, mocker):
mocker.patch("src.utils.shell")
response = client.delete("/filesystem/tmp/file.txt", headers=auth)
assert response.status_code == 204
def test_path_is_a_directory_returns_400(self, client, auth, mocker):
stderr = "/tmp/dir/: is a directory"
err = subprocess.CalledProcessError(cmd="", returncode=1, stderr=stderr)
mocker.patch("src.utils.shell", side_effect=err)
response = client.delete("/filesystem/tmp/dir/", headers=auth)
assert response.status_code == 400
def test_permission_denied_returns_403(self, client, auth, mocker):
stderr = "/tmp/root/: Permission denied"
err = subprocess.CalledProcessError(cmd="", returncode=1, stderr=stderr)
mocker.patch("src.utils.shell", side_effect=err)
response = client.delete("/filesystem/tmp/root/", headers=auth)
assert response.status_code == 403
assert response.json == {
"code": 403,
"message": "permission denied",
"reason": "Forbidden",
}
def test_delete_missing_file_returns_404(self, client, auth, mocker):
stderr = "/tmp/file.txt: No such file or directory"
err = subprocess.CalledProcessError(cmd="", returncode=1, stderr=stderr)
mocker.patch("src.utils.shell", side_effect=err)
response = client.delete("/filesystem/tmp/file.txt", headers=auth)
assert response.status_code == 404
assert response.json == {
"code": 404,
"message": "no such file or directory",
"reason": "Not Found",
}
| 2.203125
| 2
|
attr_functions.py
|
ecreager/beta-tcvae
| 0
|
12779961
|
"""
Convert ground truth latent classes into binary sensitive attributes
"""
def attr_fn_0(y):
return y[:,0] >= 1
def attr_fn_1(y):
return y[:,1] >= 1
def attr_fn_2(y):
return y[:,2] >= 3
def attr_fn_3(y):
return y[:,3] >= 20
def attr_fn_4(y):
return y[:,4] >= 16
def attr_fn_5(y):
return y[:,5] >= 16
dsprites_attr_fns = [attr_fn_0, attr_fn_1, attr_fn_2, attr_fn_3, attr_fn_4, attr_fn_5]
# celeba stuff
def attr_fn_chubby(a):
return a[:,13] > 0.
def attr_fn_eyeglasses(a):
return a[:,15] > 0.
def attr_fn_male(a):
return a[:,20] > 0.
def attr_fn_heavy_makeup(a):
return a[:,18] > 0.
CELEBA_SUBGROUPS = {
'H': attr_fn_heavy_makeup,
'S': lambda a: a[:,31] > 0., # smiling
'W': lambda a: a[:,36] > 0., # wears lipstick
'A': lambda a: a[:,2] > 0., # wears lipstick
'C': attr_fn_chubby,
'E': attr_fn_eyeglasses,
'M': attr_fn_male,
'C $\land$ E': lambda a: attr_fn_chubby(a) * attr_fn_eyeglasses(a),
'C $\land$ M': lambda a: attr_fn_chubby(a) * attr_fn_male(a),
'E $\land$ M': lambda a: attr_fn_eyeglasses(a) * attr_fn_male(a),
'C $\land$ $\\neg$ E': lambda a: attr_fn_chubby(a) * (1 - attr_fn_eyeglasses(a)),
'C $\land$ $\\neg$ M': lambda a: attr_fn_chubby(a) * (1 - attr_fn_male(a)),
'E $\land$ $\\neg$ M': lambda a: attr_fn_eyeglasses(a) * (1 - attr_fn_male(a)),
'$\\neg$ C $\land$ E': lambda a: (1 - attr_fn_chubby(a)) * attr_fn_eyeglasses(a),
'$\\neg$ C $\land$ M': lambda a: (1 - attr_fn_chubby(a)) * attr_fn_male(a),
'$\\neg$ E $\land$ M': lambda a: (1 - attr_fn_eyeglasses(a)) * attr_fn_male(a),
'$\\neg$ C $\land$ $\\neg$ E': lambda a: (1 - attr_fn_chubby(a)) * (1 - attr_fn_eyeglasses(a)),
'$\\neg$ C $\land$ $\\neg$ M': lambda a: (1 - attr_fn_chubby(a)) * (1 - attr_fn_male(a)),
'$\\neg$ E $\land$ $\\neg$ M': lambda a: (1 - attr_fn_eyeglasses(a)) * (1 - attr_fn_male(a)),
} # cf. generate_celeba_audit_table.format_subgroups
CELEBA_SENS_IDX = {
'C': [13],
'E': [15],
'M': [20],
'C $\land$ E': [13, 15],
'C $\land$ M': [13, 20],
'E $\land$ M': [15, 20],
'C $\land$ $\\neg$ E': [13, 15],
'C $\land$ $\\neg$ M': [13, 20],
'E $\land$ $\\neg$ M': [15, 20],
'$\\neg$ C $\land$ E': [13, 15],
'$\\neg$ C $\land$ M': [13, 20],
'$\\neg$ E $\land$ M': [15, 20],
'$\\neg$ C $\land$ $\\neg$ E': [13, 15],
'$\\neg$ C $\land$ $\\neg$ M': [13, 20],
'$\\neg$ E $\land$ $\\neg$ M': [15, 20],
} # maps named subgroups to the sensitive indices they depend on
# comcrime stuff
CC_ATTR_STRING = 'cc_attr_fn'
def create_cc_attr_fn(i):
def f(y):
# print('column', i)
return y[:, i] #>= 0.5 - should be already binarized
return f
cc_attr_fn_0 = create_cc_attr_fn(0)
cc_attr_fn_1 = create_cc_attr_fn(1)
cc_attr_fn_2 = create_cc_attr_fn(2)
cc_attr_fn_3 = create_cc_attr_fn(3)
cc_attr_fn_4 = create_cc_attr_fn(4)
cc_attr_fn_5 = create_cc_attr_fn(5)
cc_attr_fn_6 = create_cc_attr_fn(6)
cc_attr_fn_7 = create_cc_attr_fn(7)
cc_attr_fn_8 = create_cc_attr_fn(8)
cc_attr_fn_9 = create_cc_attr_fn(9)
cc_attr_fn_10 = create_cc_attr_fn(10)
cc_attr_fn_11 = create_cc_attr_fn(11)
cc_attr_fn_12 = create_cc_attr_fn(12)
cc_attr_fn_13 = create_cc_attr_fn(13)
cc_attr_fn_14 = create_cc_attr_fn(14)
cc_attr_fn_15 = create_cc_attr_fn(15)
cc_attr_fn_16 = create_cc_attr_fn(16)
cc_attr_fn_17 = create_cc_attr_fn(17)
cc_attr_fn_18 = create_cc_attr_fn(18)
if __name__ == '__main__':
import numpy as np
x = np.zeros((10, 10))
print('should print 5')
cc_attr_fn_5(x)
cc_attr_fn_6(x)
cc_attr_fn_7(x)
| 2.84375
| 3
|
challenge1-titanic/scripts/example.py
|
xdssio/xdss_academy_challenges
| 0
|
12779962
|
# coding: utf-8
# # Example
# In[3]:
import turicreate as tc
# ## Get the data
# In[22]:
data = 'path-to-data-here'
sf = tc.SFrame(data).dropna(columns=['Age'])
train, test = sf.random_split(fraction=0.8)
test, validations = test.random_split(fraction=0.5)
# ## Modeling
# In[27]:
from turicreate import logistic_classifier
model = logistic_classifier.create(train, target='Survived',validation_set=validations)
# ## Evaluate
# Use turi
# In[ ]:
| 2.3125
| 2
|
DLBio/pt_training.py
|
pgruening/dlbio
| 1
|
12779963
|
import argparse
import os
import random
import time
import warnings
from math import cos, pi
import cv2
import numpy as np
import torch
import torch.optim as optim
from DLBio.pt_train_printer import Printer
from DLBio.pytorch_helpers import get_lr
class ITrainInterface():
"""
TrainInterfaces handle the prediction of the network, the loss
computation and the computation of additional training metrics.
These steps can quickly change depending on the dataset, the model
architecture, the task and so on. Therefore, it is reasonable to
create separate modules that are passed to the Training class.
You need to implement the constructor and the train_step method,
if the computations in the validation step differ from the train_step
you need to overwrite val_step.
"""
def __init__(self, *args, **kwargs):
"""Constructor. Usually you need to provide and process:
- a model
- a device
- implement a loss function
- implement additional metrics
"""
raise NotImplementedError('Needs model and loss fcn and metrics')
def train_step(self, *args, **kwargs):
"""
In the Training class, this functions is called for each drawn batch
like this:
loss, metrics = self.train_interface.train_step(sample)
(for more information see '_train_step' method)
Accordingly, you should compute the loss based on the prediction of
your model and other metrics.
The loss is used to update the weights of the model
returns list with loss, metrics, counters, functions
subsets like loss, metrics, counters and loss, metrics are possible
"""
raise NotImplementedError('Implement to run training')
def val_step(self, *args, **kwargs):
"""
By default, the same code as in train_step is excecuted.
"""
# usually exactly the same as the train step
return self.train_step(*args, **kwargs)
def test_step(self, *args, **kwargs):
"""
By default, the same code as in val_step is excecuted.
"""
# usually exactly the same as the train step
return self.val_step(*args, **kwargs)
def after_training_process(self, *args, **kwargs):
"""
Use this if you want to run a specific process after the training that
depends on the model
"""
pass
class Training():
"""A Class that contains all necessary ingredients to train a pytorch
model. To start training, simply call the instantiated object with the
desired number of epochs, e.g.:
TODO: 'add_do_not_update' boolean for SAM optimization
training = Training(...)
training(100) # train for 100 epochs
"""
def __init__(
self, optimizer, data_loader, train_interface,
save_steps=-1, save_path=None,
printer=None, scheduler=None, clip=None,
retain_graph=False, val_data_loader=None, early_stopping=None,
validation_only=False, save_state_dict=False,
test_data_loader=None, batch_scheduler=None, start_epoch=0,
time_log_printer=None, stop_conditions=[]
):
"""Constructor
Parameters
----------
optimizer : pytorch optimizer
Controls the weight updates, see get_optimizer for more information
data_loader : pytorch dataloader
When iterated over in a for loop, data are returned in batches.
Note that the for loop is executed as
'for sample in data_loader:'
You need to specify what a sample actually is in the training-
interface.
train_interface : ITrainInterface
Computes the loss of a batch, see method _train_step
save_steps : int, optional
Every 'save_steps' the model is saved to 'save_path'. If 0, the
model is only saved on the end of the training. By default -1,
which means the model is not saved at all (if early_stopping is
None).
save_path : str, optional
Where to save the model, by default None. Needs to be specified if
save_steps != 1. Note that the model is always overwritten, i.e.,
there is only one '[model].pt' file after training at save_path.
printer : Printer (pt_train_printer), optional
Prints current training values to terminal and possibly a
log.json file. By default None, nothing is logged.
scheduler : pytorch scheduler, optional
Updates the learning rate according to some schedule. By default
None, no scheduling is used.
clip : float, optional
Gradient clipping, by default None, no gradient clipping
retain_graph : bool, optional
Needed for special backpropagation functions, see pytorch
documentation for more information. By default False.
val_data_loader : pytorch data_loader, optional
Can be used to validate/test the network performance. These data
are not used for training (but maybe early stopping). The model is
in eval-mode, when those data are processed. The val_step of the
TrainingInterface is applied to these data.
By default None, no validation is done.
early_stopping : EarlyStopping object, optional
Save the model based on a specified metric, each time the best
value of this metric is reached. By default None, no early stopping
validation_only: bool
When called, only the validation steps are computed. Note that, if
the flag is set to true, the model is not trained.
save_state_dict: save the model's state dict instead of the model
test_data_loader : pytorch data_loader, optional
Can be used to test the network performance. The model is
in eval-mode, when those data are processed. The test_step of the
TrainingInterface is applied to these data.
batch_scheduler: BatchScheduler object
For scheduling algorithms that adjust the learning
rate within an epoch, instead each epoch's end.
start_epoch: int
Set to a value other than 0 if a previous training is resumed.
In this case, start_epoch should be set to the last epoch the
previous training stopped.
time_log_printer: Printer (pt_train_printer)
If not none, the time needed for different training steps
is logged and written by this logger.
stop_conditions: List of [IStopCondition]
Similar to early stopping, stops the training based on a
train phase metric (no val- or test metric). Use, for example, to
quickly stop processes where the training does not converge.
Returns
-------
Training object
"""
self.optimizer = optimizer
self.data_loader = data_loader
assert issubclass(train_interface.__class__, ITrainInterface)
self.train_interface = train_interface
self.scheduler = scheduler
self.batch_scheduler = batch_scheduler
self.early_stopping = early_stopping
self.stop_conditions = stop_conditions
if printer is None:
self.printer = Printer(100, None)
else:
self.printer = printer
self.time_log_printer = time_log_printer
self.time_logger = TimeLogger(is_active=(time_log_printer is not None))
assert isinstance(save_steps, int)
if save_steps >= 0:
assert save_path is not None
self.do_save = save_steps >= 0 and save_path is not None
self.save_steps = save_steps
self.save_path = save_path
self.save_state_dict = save_state_dict
print(self.save_state_dict)
self.clip = clip
self.retain_graph = retain_graph
self.phases = ['train']
if val_data_loader is not None:
self.phases.append('validation')
if test_data_loader is not None:
self.phases.append('test')
# there should be no instance with ['train', 'test']. For now ['train', 'val'] should be used instead
# maybe this needs to be changed in the future
if 'test' in self.phases:
assert 'validation' in self.phases, 'No combination train and test allowed.'
self.validation_only = validation_only
if validation_only:
assert 'test' not in self.phases
self.phases = ['validation']
print('Running in validation only mode.')
self.data_loaders_ = {
'train': data_loader,
'validation': val_data_loader,
'test': test_data_loader
}
if start_epoch > 0:
self.start_ep = start_epoch + 1
else:
self.start_ep = 0
if not torch.cuda.is_available():
warnings.warn('No GPU detected. Training can be slow.')
# check for right order of training phases
if 'train' in self.phases and 'validation' in self.phases:
assert self.phases.index('train') == 0
assert self.phases.index('validation') == 1
if 'validation' in self.phases and 'test' in self.phases:
assert self.phases.index('validation') == 1
assert self.phases.index('test') == 2
def __call__(self, epochs_):
"""Train the model for a specified number of epochs
Parameters
----------
epochs_ : int
how many epochs for training
"""
self.printer.restart()
do_stop = False
if self.validation_only:
num_batches = 0
else:
num_batches = len(self.data_loaders_['train'])
if self.start_ep > 0:
if self.batch_scheduler is not None:
self._batch_schedule(
'train', self.start_ep, 0,
self.data_loaders_['train'].batch_size
)
if self.scheduler is not None:
# TODO: if resume, compute the learning rate beforehand
raise NotImplementedError
print('STARTING TRAINING')
for epoch in range(self.start_ep, epochs_):
self.printer.learning_rate = get_lr(self.optimizer)
for current_phase in self.phases:
if current_phase == 'train':
self.train_interface.model.train()
else:
self.train_interface.model.eval()
self.time_logger.start(current_phase + '_load_data')
for idx, sample in enumerate(self.data_loaders_[current_phase]):
self.time_logger.stop(current_phase + '_load_data')
self._batch_schedule(
current_phase, epoch, idx, num_batches
)
loss, metrics, counters, functions = self._iteration_step(
sample, current_phase)
self._update_printer(
epoch, loss, metrics, counters, functions, current_phase
)
if current_phase == 'train':
self._update_weights(loss)
self.time_logger.start(current_phase + '_load_data')
# ----------- end of phase ----------------------------
self.time_logger.stop(
current_phase + '_load_data', do_log=False
)
# do certain actions depending on which phase we are in
if self.early_stopping is not None and current_phase == 'validation':
do_stop = self.early_stopping(
self.printer.get_metrics(),
self.train_interface.model,
self.save_path,
self.save_state_dict
)
if self.stop_conditions and current_phase == 'train':
for sc in self.stop_conditions:
do_stop = sc(epoch, self.printer.get_metrics())
self.printer.on_epoch_end()
self._schedule(current_phase)
self._save(epoch, epochs_, current_phase)
# compute statistics on time values that are collected during
# the upper for-loop
if self.time_log_printer is not None:
self.time_log_printer.update(
torch.tensor([-1]), epoch, metrics=self.time_logger.get_data()
)
self.time_log_printer.on_epoch_end()
self.time_logger.restart()
if do_stop:
return
# -------------------end of epoch -------------------------------
def _iteration_step(self, sample, current_phase):
"""Compute loss and metrics
Parameters
----------
sample : anything provided by the data loader
typically the sample x and the corresponding label
current_phase : str
training, validation, or test
Returns
-------
float, dict
loss value that is used for gradient computation and dictionaries
with metrics, counters, and functions
"""
self.time_logger.start(current_phase + '_iteration_step')
if current_phase == 'validation':
with torch.no_grad():
output = self.train_interface.val_step(sample)
elif current_phase == 'test':
with torch.no_grad():
output = self.train_interface.test_step(sample)
else:
output = self.train_interface.train_step(sample)
functions = None
counters = None
if len(output) == 2:
loss, metrics = output[0], output[1]
elif len(output) == 3:
loss, metrics, counters = output[0], output[1], output[2]
else:
loss, metrics, counters = output[0], output[1], output[2]
functions = output[3]
self.time_logger.stop(current_phase + '_iteration_step')
return loss, metrics, counters, functions
def _update_weights(self, loss):
"""Compute gradient and apply backpropagation
from:
https://discuss.pytorch.org/t/what-step-backward-and-zero-grad-do/33301
Hopefully, you use them in the other order - opt.zero_grad(), loss.backward(), opt.step().
zero_grad clears old gradients from the last step (otherwise you’d just accumulate the gradients from all loss.backward() calls).
loss.backward() computes the derivative of the loss w.r.t. the parameters (or anything requiring gradients) using backpropagation.
opt.step() causes the optimizer to take a step based on the gradients of the parameters.
Parameters
----------
loss : float
error function the weight update is based on
"""
self.time_logger.start('update_weights')
self.optimizer.zero_grad()
self.time_logger.start('loss_backward')
loss.backward(retain_graph=self.retain_graph)
self.time_logger.stop('loss_backward')
if self.clip is not None:
torch.nn.utils.clip_grad_norm_(
self.train_interface.model.parameters(), self.clip
)
self.time_logger.start('opt_step')
self.optimizer.step()
self.time_logger.stop('opt_step')
self.time_logger.stop('update_weights')
def _update_printer(self, epoch, loss, metrics, counters, functions, current_phase):
"""Pass the necessary values to the printer
Parameters
----------
epoch : int
Current epoch
loss : float
Current loss value
metrics : dict
current_phase : str
If the current phase is validation, all metrics/losses/etc. are renamed
from [name] to val_[name]. If the current phase is test, all they are renamed to test_[name].
"""
self.time_logger.start(current_phase + '_update_printer')
if current_phase == 'train':
self.printer.update(loss, epoch, metrics, counters, functions)
else:
prefix = {'validation': 'val_', 'test': 'test_'}[current_phase]
if metrics is not None:
metrics = {prefix + k: v for (k, v) in metrics.items()}
if counters is not None:
counters = {prefix + k: v for (k, v) in counters.items()}
if functions is not None:
functions = {prefix + k: v for (k, v) in functions.items()}
self.printer.update(
loss, epoch, metrics,
counters, functions, loss_key=prefix + 'loss'
)
self.time_logger.stop(current_phase + '_update_printer')
self.printer.print_conditional()
def _schedule(self, current_phase):
"""Update the scheduler after each training epoch.
"""
if self.scheduler is not None and current_phase == 'train':
self.time_logger.start('schedule')
self.scheduler.step()
self.time_logger.stop('schedule')
def _batch_schedule(self, current_phase, epoch, iteration, num_batches):
"""Update the scheduler after each training batch.
"""
if self.batch_scheduler is not None and current_phase == 'train':
self.time_logger.start('batch_schedule')
self.batch_scheduler.step(epoch, iteration, num_batches)
self.time_logger.stop('batch_schedule')
def _save(self, epoch, epochs_, current_phase):
"""save the model to model path every 'save_steps' epochs.
Parameters
----------
epoch : int
current epoch
epochs_ : int
number of epochs for entire training
current_phase: str
is this function called after training, val or testing? Only after
validation, the model is saved.
"""
# only save after validation
if current_phase != 'validation' and 'validation' in self.phases:
return
if self.do_save:
self.time_logger.start('save')
is_last_epoch = (epoch == epochs_ - 1)
if self.save_steps > 0:
is_save_intervall = epoch % self.save_steps == 0
else:
is_save_intervall = False
if is_last_epoch or is_save_intervall:
torch_save_model(
self.train_interface.model,
self.save_path,
self.save_state_dict
)
self.time_logger.stop('save')
def get_optimizer(opt_id, parameters, learning_rate, **kwargs):
""" Simple getter function for a pytorch optimizer
Parameters
----------
opt_id : str
Which optimizer, e.g., SGD or Adam
parameters : model.parameters
pytorch variables that shall be updated, usually model.parameters()
is passed
learning_rate : float
Returns
-------
pytorch optimizer
Raises
------
ValueError
if unknown opt_id
"""
if opt_id == 'SGD':
if 'momentum' not in kwargs.keys():
warnings.warn(f'Using default momentum for SGD: {.9}')
if 'weight_decay' not in kwargs.keys():
warnings.warn(f'Using default weight_decay for SGD {0.}')
optimizer = optim.SGD(parameters,
lr=learning_rate,
momentum=kwargs.get('momentum', .9),
weight_decay=kwargs.get('weight_decay', 0.),
nesterov=kwargs.get('nesterov', False)
)
elif opt_id == 'Adam':
if 'weight_decay' not in kwargs.keys():
warnings.warn(f'Using default weight_decay for SGD {0.}')
optimizer = optim.Adam(
parameters,
lr=learning_rate,
weight_decay=kwargs.get('weight_decay', 0.)
)
elif opt_id == 'lamb':
from pytorch_lamb import Lamb
if 'weight_decay' not in kwargs.keys():
warnings.warn(f'Using default weight_decay for SGD {0.001}')
optimizer = Lamb(
parameters,
lr=learning_rate, weight_decay=kwargs.get('weight_decay', 0.001),
betas=(kwargs.get('beta0', .9), kwargs.get('beta1', .999))
)
elif opt_id == 'AdaDelta':
if 'weight_decay' not in kwargs.keys():
warnings.warn(f'Using default weight_decay for SGD {0.}')
optimizer = optim.Adadelta(
parameters,
lr=learning_rate,
weight_decay=kwargs.get('weight_decay', 0.),
rho=kwargs.get('rho', 0.9),
eps=kwargs.get('eps', 1e-3)
)
elif opt_id == 'RMSProb':
if 'weight_decay' not in kwargs.keys():
warnings.warn(f'Using default weight_decay for RMSprop {0.}')
optimizer = optim.RMSprop(
parameters,
lr = learning_rate,
alpha = kwargs.get('alpha', 0.99),
eps = kwargs.get('eps', 1e-08),
weight_decay = kwargs.get('weight_decay', 0.),
momentum = kwargs.get('momentum', 0.),
centered = kwargs.get('centered', False)
)
else:
raise ValueError(f'Unknown opt value: {opt_id}')
return optimizer
def get_scheduler(lr_steps, epochs, optimizer, gamma=.1, fixed_steps=None):
"""returns a pytorch scheduler
Parameters
----------
lr_steps : int
the learning rate is altered in 'lr_steps' uniformly steps
epochs : int
number of epochs for the entire training
optimizer : pytorch optimizer
gamma : float, optional
the learning rate is multiplied by gamma, by default .1
Returns
-------
pytorch scheduler
"""
if fixed_steps is not None:
assert lr_steps == 0, 'no lr_steps if fixed steps is used'
# might be filled with strings, when coming from argparse
fixed_steps = [int(x) for x in fixed_steps]
scheduler = optim.lr_scheduler.MultiStepLR(
optimizer, fixed_steps,
gamma=gamma
)
print(f'fixed rate scheduling at: {fixed_steps}')
return scheduler
if lr_steps < 1:
return None
assert lr_steps < epochs, f'Epochs must be greater than lr_steps but e:{epochs} < l:{lr_steps}'
step_size = epochs // lr_steps
print(f'Sched step size: {step_size}')
scheduler = optim.lr_scheduler.StepLR(
optimizer, step_size,
gamma=gamma, last_epoch=-1
)
return scheduler
def set_device(device=None, verbose=True):
"""Use if you have multiple GPUs, but you only want to use a subset.
Use the command 'nvidia-smi' in the terminal for more information on your
pc's gpu setup
Parameters
----------
device : int or list of int, optional
masks all devices but 'device'. By default None, all devices are
visible
"""
if device is not None:
if isinstance(device, list):
device = ','.join(str(x) for x in device)
else:
device = str(device)
os.environ['CUDA_VISIBLE_DEVICES'] = device
if verbose:
print(f'using device {device}')
def set_random_seed(seed):
"""Sets a seed for all training related random functions. The seed is only
identical on the same machine.
Parameters
----------
seed : int
"""
print(f'Setting seed: {seed}')
np.random.seed(seed)
torch.manual_seed(seed)
random.seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
cv2.setRNGSeed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
def _init_fn(worker_id):
np.random.seed(seed + worker_id)
# for debugging purposes some random numbers are generated
# output = {
# 'seed': seed,
# 'torch': torch.randn(1).item(),
# 'cuda': torch.cuda.FloatTensor(1).normal_().item(),
# 'numpy': float(np.random.randn(1)),
# 'python': random.randint(0, 5000)
# }
# with open(os.path.join(options.folder_name, 'rand_num_test.json'), 'w') as file:
# json.dump(output, file)
return _init_fn
def loss_verification(train_interface, data_loader, printer):
"""Run through one epoch and print the corresponding loss.
When using cross-entropy, the usual loss should be -ln(num_classes). If
not, there might be something wrong with your code.
Parameters
----------
train_interface : ITrainInterface
data_loader : pytorch data_loader
printer : Printer (pt_train_printer.py)
"""
# verify loss
print('Running loss verification')
with torch.no_grad():
mean_im = 0.0
std_im = 0.0
ctr = 0.0
for sample in data_loader:
mean_im += sample['x'].mean()
std_im += sample['x'].std()
ctr += 1.0
loss, metrics = train_interface.train_step(sample)
printer.update(loss, -1, metrics)
printer.print()
print(f'mean: {mean_im/ctr:.3f} std: {std_im/ctr:.3f}')
class EarlyStopping():
"""Save the best model depending on a specified metric on the validation
set.
Returns
-------
EarlyStopping
"""
def __init__(self, metric_key, get_max=True, epoch_thres=np.inf):
"""Constructor. You need to specify which metric should be observed,
if the value is better when decreased or increased.
For example:
EarlyStopping('val_acc', get_max=True, epoch_thres=10)
keeps track of the validation accuracy. If the current best validation
accuracy (starting from -inf) is exceeded, this value is saved and
the model is saved.
If after 10 epochs the best accuracy is not exceeded, the training
is stopped.
This object is used within the Training class.
Parameters
----------
metric_key : str
Which metric is observed. Needs to be a metric that is present in
the training_interface. val_[name] is also possible.
get_max : bool, optional
Save the model if the new observed metric is above the current best
value (True) or below it (False). By default True.
epoch_thres : int, optional
if the model has not bin saved for 'epoch_thres' epochs,
the training is stopped. By default np.inf, the model is trained
the full number of epochs.
"""
self.key = metric_key
self.get_max = get_max
self.no_update_counter = 0.
self.thres = epoch_thres
if self.thres < np.inf:
warnings.warn(
f'Early stopping: training is stopped after {self.thres} unchanged epochs.')
if get_max:
self.current_val = -np.inf
else:
self.current_val = +np.inf
def __call__(self, metrics, model, save_path, save_state_dict):
value = metrics[self.key]
self.no_update_counter += 1
if self.get_max:
if value > self.current_val:
self._update(value, model, save_path, save_state_dict)
else:
if value < self.current_val:
self._update(value, model, save_path, save_state_dict)
if self.no_update_counter > self.thres:
return True
else:
return False
def _update(self, value, model, save_path, save_state_dict):
self.no_update_counter = 0
self.current_val = value
torch_save_model(model, save_path, save_state_dict)
class IStopCondition():
def __call__(self, epoch, metrics):
raise NotImplementedError
def torch_save_model(model, save_path, save_state_dict):
print(f'saving model: {save_path}')
if save_state_dict:
print('save as state dict')
to_save = model.state_dict()
torch.save(to_save, save_path)
else:
torch.save(model, save_path)
print('model saved.')
def get_printer(print_intervall, log_file=None):
"""Convenience function, to get a printer without import pt_train_printer.
Note that only the basic keywords are passed on here!
Parameters
----------
print_intervall : int
print to terminal after n batches, if -1: no printing
log_file : str, optional
path to a json file, by default None: no log-file is saved.
Returns
-------
Printer
"""
return Printer(print_intervall, log_file=log_file)
# taken from https://github.com/d-li14/mobilenetv2.pytorch/blob/master/imagenet.py
class BatchScheduler():
def __init__(self, decay_type, optimizer, initial_learning_rate, warmup, num_epochs, gamma=.1):
self.optimizer = optimizer
self.lr = initial_learning_rate
self.warmup = warmup
self.num_epochs = num_epochs
self.decay_type = decay_type
self.gamma = gamma
def step(self, epoch, iteration, num_iter):
lr = self.optimizer.param_groups[0]['lr']
warmup_epoch = 5 if self.warmup else 0
warmup_iter = warmup_epoch * num_iter
current_iter = iteration + epoch * num_iter
max_iter = self.num_epochs * num_iter
if self.decay_type == 'step':
lr = self.lr * \
(self.gamma ** ((current_iter - warmup_iter) // (max_iter - warmup_iter)))
elif self.decay_type == 'cos':
lr = self.lr * \
(1 + cos(pi * (current_iter - warmup_iter) / (max_iter - warmup_iter))) / 2
elif self.decay_type == 'linear':
lr = self.lr * (1 - (current_iter - warmup_iter) /
(max_iter - warmup_iter))
else:
raise ValueError('Unknown lr mode {}'.format(self.decay_type))
if epoch < warmup_epoch:
lr = self.lr * current_iter / warmup_iter
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
class TimeLogger():
def __init__(self, is_active):
self.is_active = is_active
self.data = dict()
self.qu = dict()
self.functions = {
'mean': np.mean,
'min': np.min,
'max': np.max,
'std': np.std,
'median': np.median,
'sum': np.sum
}
def restart(self):
if not self.is_active:
return
self.data = dict()
def start(self, key):
if not self.is_active:
return
assert key not in self.qu.keys()
self.qu[key] = time.time()
def stop(self, key, do_log=True):
if not self.is_active:
return
start_time = self.qu.pop(key)
time_needed = time.time() - start_time
if do_log:
self._update(key, time_needed)
def _update(self, key, value):
assert self.is_active
if key not in self.data.keys():
self.data[key] = [value]
else:
self.data[key].append(value)
def get_data(self):
assert self.is_active
out = dict()
for key, values in self.data.items():
values = np.array(values)
for name, fcn in self.functions.items():
tmp = float(fcn(values))
out[key + '_' + name] = tmp
return out
def get_train_arg_parser(config):
# Deprecated function
"""Typical argument parser to train a neural network
Parameters
----------
config : module or object
default values for your project
Returns
-------
argument parser
use like this:
import config_module
...
...
def get_options():
parser = get_train_argparser(config_module)
parser.add_argument(...)
...
return parser.parse_args()
"""
parser = argparse.ArgumentParser()
parser.add_argument('--lr', type=float, default=config.LEARNING_RATE)
parser.add_argument('--wd', type=float, default=config.WEIGHT_DECAY)
parser.add_argument('--mom', type=float, default=config.MOMENTUM)
parser.add_argument('--opt', type=str, default=config.OPTIMIZER)
parser.add_argument('--bs', type=int, default=config.BATCH_SIZE)
parser.add_argument('--epochs', type=int, default=config.EPOCHS)
parser.add_argument('--lr_steps', type=int, default=config.LR_STEPS)
parser.add_argument('--nw', type=int, default=config.NUM_WORKERS)
parser.add_argument('--sv_int', type=int, default=config.SAVE_INTERVALL)
parser.add_argument('--model_type', type=str, default=config.MODEL_TYPE)
parser.add_argument('--seed', type=int, default=config.SEED)
parser.add_argument('--device', type=int, default=config.DEVICE)
parser.add_argument('--folder', type=str, default=config.DEF_FOLDER)
parser.add_argument('--model_name', type=str, default=config.MODEL_NAME)
parser.add_argument('--in_dim', type=int, default=config.INPUT_DIM)
parser.add_argument('--early_stopping', action='store_true')
parser.add_argument('--es_metric', type=str, default=config.ES_METRIC)
parser.add_argument('--num_classes', type=int, default=config.NUM_CLASSES)
# may be unnecessary for your project
parser.add_argument('--ds_len', type=int, default=config.DATASET_LENGTH)
parser.add_argument('--crop_size', type=int, default=config.CROP_SIZE)
return parser
| 3.09375
| 3
|
app/study/filterAccDis.py
|
kyoungd/material-stock-finder-app
| 0
|
12779964
|
import pandas as pd
import numpy as np
import logging
# IF CHOPPINESS INDEX >= 61.8 - -> MARKET IS CONSOLIDATING
# IF CHOPPINESS INDEX <= 38.2 - -> MARKET IS TRENDING
# https://medium.com/codex/detecting-ranging-and-trending-markets-with-choppiness-index-in-python-1942e6450b58
class WyckoffAccumlationDistribution:
def __init__(self):
self.lookback = 10
self.barCountDistribution = 3
self.barCountVolClimaxRebound = 2
self.barCountAccumulation = 7
self.minVolumeClimax = 5.0 # minimum volume climax - 600%
self.isConsolidating = 61.8
self.isTrending = 38.2
# IF CHOPPINESS INDEX >= 61.8 - -> MARKET IS CONSOLIDATING
def isAccumulating(self, value):
return value > self.isConsolidating
def isDistributing(self, value):
return value < self.isTrending
# **** Tricky part ****
# Because it uses previous data for choppiness, you cannot take an average of the chopiness.
# The average is already built-in to the calculation. So evaluate any of the data falls
# into consolidating or trending regions.
#
@staticmethod
def get_ci(high, low, close, lookback):
tr1 = pd.DataFrame(high - low).rename(columns={0: 'tr1'})
tr2 = pd.DataFrame(abs(high - close.shift(1))).rename(columns={0: 'tr2'})
tr3 = pd.DataFrame(abs(low - close.shift(1))).rename(columns={0: 'tr3'})
frames = [tr1, tr2, tr3]
tr = pd.concat(frames, axis=1, join='inner').dropna().max(axis=1)
atr = tr.rolling(1).mean()
highh = high.rolling(lookback).max()
lowl = low.rolling(lookback).min()
ci = 100 * np.log10((atr.rolling(lookback).sum()) /
(highh - lowl)) / np.log10(lookback)
return ci
def trimIndexes(self, ci:list, startIndex:int, endIndex:int):
if startIndex < 0:
startIndex = 0
if endIndex > len(ci):
endIndex = len(ci)
if startIndex >= endIndex:
startIndex = endIndex - 1
return startIndex, endIndex
def isDistributionPhase(self, ci: list, volClimaxIndex: int):
startIndex = volClimaxIndex - self.barCountDistribution - 1
endIndex = startIndex + self.barCountDistribution
startIndex, endIndex = self.trimIndexes(ci, startIndex, endIndex)
for i in range(startIndex, endIndex):
if self.isDistributing(ci[i]):
return True
return False
def isAccumulationValid(self, ci:list, volClimaxIndex:int):
endIndex = volClimaxIndex - self.barCountVolClimaxRebound
startIndex = endIndex - self.barCountAccumulation
startIndex, endIndex = self.trimIndexes(ci, startIndex, endIndex)
for value in ci[startIndex:endIndex]:
if self.isAccumulating(value):
return True
return False
def Run(self, symbol:str, df:pd.DataFrame, volClimax:float, volClimaxIndex:int):
try:
if volClimax > self.minVolumeClimax:
data = WyckoffAccumlationDistribution.get_ci(
df['High'], df['Low'], df['Close'], self.lookback)
data = data.dropna()
ci = data.to_numpy()[::-1]
isDistribute = self.isDistributionPhase(ci, volClimaxIndex)
isAccumulate = self.isAccumulationValid(ci, volClimaxIndex)
return isDistribute and isAccumulate
return False
except Exception as e:
logging.error(f'WyckoffAccumlationDistribution.Run: {symbol} - {e}')
print(f'WyckoffAccumlationDistribution.Run: {symbol} - {e}')
return False
def RunWickoff(self, symbol:str, dataf:pd.DataFrame):
df = dataf[::-1]
df.reset_index()
data = WyckoffAccumlationDistribution.get_ci(
df['High'], df['Low'], df['Close'], self.lookback)
data = data.dropna()
| 2.890625
| 3
|
python/OSGridConverter/coordinates/cartesian.py
|
EdwardBetts/OSGridConverter
| 6
|
12779965
|
'''
Created on 17 Mar 2018
@author: julianporter
'''
from OSGridConverter.algebra import Vector3
from OSGridConverter.mapping import Datum
from math import radians,degrees,sin,cos,sqrt,atan2,isnan
class Cartesian (Vector3):
def __init__(self,arg):
try:
phi=radians(arg.latitude)
l =radians(arg.longitude)
s=sin(phi)
c=cos(phi)
e=arg.ellipsoid.eccentricity(1)
nu=arg.ellipsoid.a/sqrt(1.0-e*s*s)
super(Cartesian,self).__init__([nu*c*cos(l),nu*c*sin(l),nu*(1-e)*s])
except:
super(Cartesian,self).__init__(arg)
def transform(self,oldTag,newTag):
if newTag==oldTag: return self
t1=Datum.get(oldTag).transform.inverse
t2=Datum.get(newTag).transform
return Cartesian(t2(t1(self)))
def toLatLong(self,ellipsoid):
try:
t=(1+ellipsoid.eb/self.normSquared)
s=t/sqrt(1+t*t)
c=s/t
if isnan(c): raise Exception()
phi=atan2(self.z+ellipsoid.eb*s*s*s,sqrt(self.x*self.x + self.y*self.y)-ellipsoid.ea*c*c*c)
except:
phi=0
l=atan2(self.y,self.x)
return(degrees(phi),degrees(l))
def __str__(self):
return '({},{},{})'.format(self.x,self.y,self.z)
def transformation(oldTag,newTag,vector):
if newTag==oldTag: return vector
t1=Datum.get(oldTag).transform.inverse
t2=Datum.get(newTag).transform
return Cartesian(t2(t1(vector)))
| 2.984375
| 3
|
ex18-cose_and_bulls.py
|
lew18/practicepython.org-mysolutions
| 0
|
12779966
|
"""
https://www.practicepython.org
Exercise 18: Cows and Bulls
3 chilis
Create a program that will play the “cows and bulls” game with the user.
The game works like this:
Randomly generate a 4-digit number. Ask the user to guess a 4-digit number.
For every digit that the user guessed correctly in the correct place, they
have a “cow”. For every digit the user guessed correctly in the wrong place
is a “bull.” Every time the user makes a guess, tell them how many “cows”
and “bulls” they have. Once the user guesses the correct number, the game
is over. Keep track of the number of guesses the user makes throughout teh
game and tell the user at the end.
Say the number generated by the computer is 1038. An example interaction
could look like this:
Welcome to the Cows and Bulls Game!
Enter a number:
>>> 1234
2 cows, 0 bulls
>>> 1256
1 cow, 1 bull
...
Until the user guesses the number.
"""
import random
def generate_target():
return(int(random.triangular() * 10000))
def compare(guess, target):
if guess > 9999:
print("guess must be 4 digits or less, try again.")
return False
cows = bulls = 0
g = [ int((guess % 10000) / 1000),
int((guess % 1000) / 100),
int((guess % 100) / 10),
int((guess % 10) / 1) ]
t = [ int((target % 10000) / 1000),
int((target % 1000) / 100),
int((target % 100) / 10),
int((target % 10) / 1) ]
for i in 3, 2, 1, 0:
if g[i] == t[i]:
g.pop(i)
t.pop(i)
cows += 1
for i in range(len(g)-1, -1, -1):
if g[i] in t:
t.pop(t.index(g[i]))
g.pop(i)
bulls += 1
if cows == 4:
return True
else:
print("cows: %d, bulls: %d " % (cows, bulls))
return False
if __name__ == "__main__":
target = generate_target()
print("target is %4d" % target)
guess_count = 1
guess = int(input("What's your first guess? "))
while False == compare(guess, target):
guess_count += 1
guess = int(input("What's your next guess? "))
print("Took %d guesses to guess %4d." % (guess_count, target))
| 4.375
| 4
|
public/views/base.py
|
cmisid/Wasty-Database
| 0
|
12779967
|
<filename>public/views/base.py
from django.http import HttpResponse
def ping(request):
return HttpResponse(status=200)
def parse_user(request):
users = json.loads(request.body)
return HttpResponse(status=200)
| 1.890625
| 2
|
poo/class/store.py
|
alfmorais/design-patterns-python
| 0
|
12779968
|
<filename>poo/class/store.py
class Store:
tax = 1.03
def __init__(self, address: str) -> None:
self.__address = address
def show_address(self) -> None:
print(self.__address)
@classmethod
def sales(cls) -> int:
return 40 * cls.tax
@classmethod
def change_tax(cls, new_tax: int) -> None:
cls.tax = new_tax
loja_praia = Store("Praia")
loja_downtown = Store("downtown")
loja_praia.show_address()
loja_downtown.show_address()
print(loja_praia.sales())
print(loja_downtown.sales())
loja_downtown.change_tax(1.50)
print(loja_praia.sales())
| 2.9375
| 3
|
HomeWorks/Rostik/helper.py
|
PhantomMind/FamousTeamDream
| 0
|
12779969
|
import time
mainIsOn = True
targetValue = -1
while mainIsOn:
print("Select category\n"
"0 - Close App\n"
"1 - Lists\n"
"2 - While\n")
if targetValue == -1:
try:
targetValue = int(input())
except ValueError as e:
print("Wrong statement. Try again!")
targetValue = -1
if targetValue == 1:
print("List = []\n"
"Initialize empty list\n"
"List = [a, b, c]\n"
"Initialize string List\n"
"List = [1, 2, 3]\n"
"Initialize string list\n")
time.sleep(5)
targetValue = -1
if targetValue == 2:
print("while boolean(True of False)\n"
"Or \n"
"a = 0\n"
"while a < 100:\n"
"print(do something)\n"
"a = a + 1")
print("Operator break\n"
"force stop circle while and if")
time.sleep(5)
targetValue = -1
if targetValue == 0:
print("Close app")
time.sleep(1)
print("5")
time.sleep(1)
print("4")
time.sleep(1)
print("3")
time.sleep(1)
print("2")
time.sleep(1)
print("1")
mainIsOn = False
time.sleep(1)
print("Bye Bye!")
| 3.65625
| 4
|
platform/hwconf_data/efr32mg21/modules/PIN/PIN_Snippets.py
|
lenloe1/v2.7
| 0
|
12779970
|
"""
Generated from a template
"""
import efr32mg21.PythonSnippet.RuntimeModel as RuntimeModel
from efr32mg21.modules.PIN.PIN_Defs import PORT_PINS
def activate_runtime():
pass
| 1.414063
| 1
|
zopeskel/plone25_buildout.py
|
jean/ZopeSkel
| 1
|
12779971
|
<reponame>jean/ZopeSkel<gh_stars>1-10
import copy
from zopeskel.plone3_buildout import Plone3Buildout
from zopeskel.base import get_var
class Plone25Buildout(Plone3Buildout):
_template_dir = 'templates/plone2.5_buildout'
summary = "A buildout for Plone 2.5 projects"
help = """
This template creates a buildout for Plone 2.5, appropriate for
development. If you also need ZEO or caching, you may wish to look
at the plone_hosting template.
"""
required_templates = ['plone3_buildout']
vars = copy.deepcopy(Plone3Buildout.vars)
get_var(vars, 'plone_version').default = "2.5.5"
# The Plone3Buildout has an appropriate "use-the-installer"
# message, but this wouldn't be right here, so let's
# override it.
pre_run_msg = ""
| 1.960938
| 2
|
mldictionary_api/routes/api.py
|
PabloEmidio/api-dictionary
| 7
|
12779972
|
<reponame>PabloEmidio/api-dictionary<gh_stars>1-10
import traceback
from flask import Blueprint, jsonify, request
from werkzeug.exceptions import NotFound, InternalServerError, TooManyRequests
from mldictionary_api.models import RedisRequests
from mldictionary_api.const import (
API_PREFIX,
DICTIONARIES,
LOCAL_ADDR,
TOTAL_REQUESTS_ALLOW,
TTL_REQUEST,
)
api = Blueprint('mldictionary_api', __name__, url_prefix=API_PREFIX)
@api.route('/dictionary/en/<word>/')
@api.route('/dictionary/pt/<word>/')
@api.route('/dictionary/es/<word>/')
@api.route('/translator/en-pt/<word>/')
@api.route('/translator/pt-en/<word>/')
def dictionary(word: str):
requests_db = RedisRequests()
choice = request.url.split('/')[5]
dictionary = DICTIONARIES[choice]
request_ip = request.remote_addr
total_requests = requests_db.get(f'requests:{request_ip}')
if not (meanings := dictionary.get_meanings(word)):
raise NotFound(f'"{word}" not found, check the spelling and try again')
if request_ip != LOCAL_ADDR:
if total_requests > TOTAL_REQUESTS_ALLOW:
raise TooManyRequests(
f'The address {request_ip} is allow to make only "{TOTAL_REQUESTS_ALLOW}" requests '
f'wait until {int(TTL_REQUEST / 60)} minutes and try again'
)
requests_db.set(f'requests:{request_ip}', str(total_requests + 1), TTL_REQUEST)
return jsonify({'source': dictionary.URL.format(word), 'meanings': meanings}), 200
@api.app_errorhandler(NotFound)
def not_found(err):
traceback.print_tb(err.__traceback__)
return jsonify({'message': err.description}), err.code
@api.app_errorhandler(TooManyRequests)
def too_many_requests(err):
traceback.print_tb(err.__traceback__)
return jsonify({'message': err.description}), err.code
@api.app_errorhandler(InternalServerError)
def internal_error(err):
traceback.print_tb(err.__traceback__)
return jsonify({'message': err.description}), err.code
@api.app_errorhandler(Exception)
def general_exception(err):
traceback.print_tb(err.__traceback__)
return jsonify({'message': 'Don\'t recognize erro'}), 500
| 2.3125
| 2
|
riccardo-negri/16/x.py
|
Tommimon/advent-of-code-2020
| 3
|
12779973
|
#!/usr/bin/env python
# Day 16 solution of Advent Of Code 2020 by <NAME>
# First part answer: 29878
# Second part answer: 855438643439
switch = False
constraints = []
tickets = []
with open('input.txt', 'r') as f:
for line in f.readlines():
if not switch:
try:
constraints.append(list(map(int, (line[line.index(':')+2:]).strip().replace("or", '-').split('-') )))
except ValueError:
switch = True
if switch:
if line != '\n' and line != "your ticket:\n" and line != "nearby tickets:\n":
tickets.append(list(map(int, line.strip().split(',') )))
real_tickets = []
error_rate = 0
for ticket in tickets:
overall_satis = True
for value in ticket:
satis = False
for constraint in constraints:
if constraint[0] <= value <= constraint[1] or constraint[2] <= value <= constraint[3]:
satis = True
if not satis:
overall_satis = False
error_rate += value
if overall_satis:
real_tickets.append(ticket)
print("First part answer: ", error_rate)
constraints_found = []
already_used_field_ID = []
last_field_ID = {}
matches = {}
while len(constraints_found) != len(constraints):
possible_matches = {}
for ind, constraint in enumerate(constraints):
if ind not in constraints_found:
possible_matches[ind] = 0
for field_ID in range(0, len(tickets[0])):
if field_ID not in already_used_field_ID:
satis = True
for ticket in real_tickets:
value = ticket[field_ID]
if not (constraint[0] <= value <= constraint[1] or constraint[2] <= value <= constraint[3]):
satis = False
break
if satis:
possible_matches[ind] += 1
last_field_ID[ind] = field_ID
for ind in possible_matches:
if possible_matches[ind] == 1:
constraints_found.append(ind)
already_used_field_ID.append(last_field_ID[ind])
matches[ind] = last_field_ID[ind] # ind is the number of the constraint
break
answer = 1
for i in range(0, 6): # the 'departure' fields are the first six
answer *= tickets[0][matches[i]]
print("Second part answer: ", answer)
| 3.125
| 3
|
graphite2opentsdb.py
|
amarin/graphite2opentsdb
| 0
|
12779974
|
<reponame>amarin/graphite2opentsdb
import copy
import time
import requests
import sys
from requests.exceptions import ConnectionError
import settings
USAGE = '''
Usage: %s <cmd> <args>
Supported commands:
- list:
Simply lists available metrics
- get <graphite.metric.name> [as <target_name>] [from <timeperiod>] [until <timeperiod>] tag1=value1 [tagN=valueN]:
Get Graphite metric <graphite.metric.name> data and return it in OpenTSDB import format, i.e:
<graphite.metric.name> <unix_time> <value> <tags>
if used with "as <target_name>", graphite metric name will replaced.
By default If "from" is omitted, it defaults to 24 hours ago.
If "until" is omitted, it defaults to the current time (now).
Timeperiod can be either in absolute or relative format,
described in Graphite data api description at http://graphite.readthedocs.org/en/latest/render_api.html
Absolute values can be set in the format HH:MM_YYMMDD, YYYYMMDD, MM/DD/YY, or any other at(1)-compatible time format.
Relative values is a length of time since the current time. It is always preceded by a minus sign ( - )
and followed by a unit of time. Valid units of time:
Abbreviation Unit
s Seconds
min Minutes
h Hours
d Days
w Weeks
mon 30 Days (month)
y 365 Days (year)
Example:
> get server.temperature.temp1 as temp1 from -7d entity=Obj100
''' % sys.argv[0]
def make_graphite_url(path):
graphite_host = settings.GRAPHITE_PORT == 80 and settings.GRAPHITE_HOST or "%s:%s" % (
settings.GRAPHITE_HOST, settings.GRAPHITE_PORT
)
return "%s://%s/%s" % (
settings.GRAPHITE_SCHEMA,
graphite_host,
path
)
def list_metrics(*args):
if args:
print("Arguments not needed here")
exit(64)
metrics_url = make_graphite_url("metrics/index.json")
try:
response = requests.get(metrics_url)
if 200 == response.status_code:
metrics_json = response.json()
if isinstance(metrics_json, list):
for m in metrics_json:
print(m)
else:
print("Unexpected response body")
else:
print("Unexpected response code\n: GET %s\n RESULT %s %s" % (
metrics_url,
response.status_code,
response.reason
))
except ConnectionError as exc:
print("Connection failed to %s: %s" % (metrics_url, exc.message))
def metric_data(*args):
parse_args = list()
parse_args.extend(args)
source_name = parse_args.pop(0)
target_name = "%s" % source_name
opt = None
from_spec = None
until_spec = None
tags = []
if 1 < len(parse_args):
print("parsing tags: {}".format(parse_args))
if "as" == parse_args[0]:
opt = parse_args.pop(0)
target_name = parse_args.pop(0)
print("Parsed {} {}, rest ones {}".format(opt.upper(), target_name, parse_args))
if "from" == parse_args[0]:
opt = parse_args.pop(0)
from_spec = parse_args.pop(0)
print("Parsed {} {}, rest ones {}".format(opt.upper(), from_spec, parse_args))
if "until" == parse_args[0]:
opt = parse_args.pop(0)
until_spec = parse_args.pop(0)
print("Parsed {} {}, rest ones {}".format(opt.upper(), from_spec, parse_args))
print("the rest should be tags {}".format(parse_args))
tags = parse_args[:]
else:
pass
url = "render/?"
url += "format=json"
url += "&_salt={}".format(time.time())
url += "&target={}".format(source_name)
if from_spec:
url += "&from={}".format(from_spec)
if until_spec:
url += "&until={}".format(until_spec)
metric_data_url = make_graphite_url(url)
try:
response = requests.get(metric_data_url)
if 200 == response.status_code:
metrics_json = response.json()
if not len(metrics_json):
print("Unexpected response body: no data")
exit(64)
if 'datapoints' in metrics_json[0]:
datapoints = metrics_json[0]["datapoints"]
for dp in datapoints:
print("%s %s %s %s" % (
target_name, dp[1], dp[0], " ".join(tags)
))
else:
print("Unexpected response body: no datapoints key")
exit(64)
else:
print("Unexpected response code\n: GET %s\n RESULT %s %s" % (
metric_data_url,
response.status_code,
response.reason
))
exit(64)
except ConnectionError as exc:
print("Connection failed to %s: %s" % (metric_data_url, exc.message))
exit(64)
if len(sys.argv) == 1:
print(USAGE)
exit(0)
else:
cmd = sys.argv[1]
commands = {
"list": list_metrics,
"get": metric_data
}
if cmd in commands:
func_args = [x for x in sys.argv[2:]]
commands[cmd](*func_args)
| 2.984375
| 3
|
model/model.py
|
naderabdalghani/doclense-flask-api
| 0
|
12779975
|
<gh_stars>0
import torch
import torchvision.models as models
import torch.optim as optim
from torchvision import datasets
import torchvision.transforms as transforms
from torch.utils.data import DataLoader, random_split
import numpy as np
from os import path
from PIL import Image
def get_class_names(dataset_path='dataset'):
dirname = path.dirname(__file__)
dataset_path = path.join(dirname, dataset_path)
dataset = datasets.ImageFolder(dataset_path)
class_names = [item[0] for item in dataset.classes]
return class_names
def init_data(dataset_path='dataset'):
batch_size = 20
data_transforms = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
dirname = path.dirname(__file__)
dataset_path = path.join(dirname, dataset_path)
dataset = datasets.ImageFolder(dataset_path, transform=data_transforms)
dataset_len = len(dataset)
train_len = int(0.6 * dataset_len)
valid_len = int(0.2 * dataset_len)
test_len = dataset_len - train_len - valid_len
train_data, valid_data, test_data = random_split(dataset, [train_len, valid_len, test_len])
train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
valid_loader = DataLoader(valid_data, batch_size=batch_size)
test_loader = DataLoader(test_data, batch_size=batch_size)
loaders = {'train': train_loader, 'valid': valid_loader, 'test': test_loader}
class_names = [item[0] for item in dataset.classes]
return loaders, class_names
def init_model():
model = models.wide_resnet50_2(pretrained=True)
for param in model.parameters():
param.requires_grad = False
n_inputs = model.fc.in_features
alphabet_length = 52
last_layer = torch.nn.Linear(n_inputs, alphabet_length)
model.fc = last_layer
model.fc.requires_grad = True
return model
def train(n_epochs, loaders, model):
valid_loss_min = np.Inf
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(model.fc.parameters(), lr=0.005, momentum=0.9)
for epoch in range(1, n_epochs + 1):
train_loss = 0.0
valid_loss = 0.0
###################
# train the model #
###################
model.train()
for batch_idx, (data, target) in enumerate(loaders['train']):
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the batch loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update training loss
train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss))
######################
# validate the model #
######################
model.eval()
for batch_idx, (data, target) in enumerate(loaders['valid']):
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the batch loss
loss = criterion(output, target)
# update average validation loss
valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (loss.data - valid_loss))
# print training/validation statistics
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch,
train_loss,
valid_loss
))
# save the model if validation loss has decreased
if valid_loss <= valid_loss_min:
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
valid_loss))
torch.save(model.state_dict(), "model.pt")
valid_loss_min = valid_loss
# return trained model
return model
def test(loaders, model):
test_loss = 0.
correct = 0.
total = 0.
criterion = torch.nn.CrossEntropyLoss()
model.eval()
for batch_idx, (data, target) in enumerate(loaders['test']):
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update average test loss
test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss))
# convert output probabilities to predicted class
pred = output.data.max(1, keepdim=True)[1]
# compare predictions to true label
correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy())
total += data.size(0)
print('Test Loss: {:.6f}\n'.format(test_loss))
print('\nTest Accuracy: %2d%% (%2d/%2d)' % (
100. * correct / total, correct, total))
def load_trained_model(model):
dirname = path.dirname(__file__)
model_path = path.join(dirname, 'model.pt')
if path.exists(model_path):
device = torch.device('cpu')
model.load_state_dict(torch.load(model_path, map_location=device))
print("Model loaded successfully")
return
print("Model needs to be trained first!")
return
def predict(model, img, class_names):
model.eval()
pil_img = Image.fromarray(img.astype(float) * 255).convert('RGB')
img_transforms = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
transformed_img = img_transforms(pil_img)
transformed_img = torch.unsqueeze(transformed_img, 0)
output = model(transformed_img)
_, prediction = torch.max(output, 1)
letter = class_names[prediction.item()]
return letter
def main():
loaders, class_names = init_data()
model = init_model()
model = train(5, loaders, model)
load_trained_model(model)
test(loaders, model)
if __name__ == "__main__":
main()
| 2.40625
| 2
|
bcs-ui/backend/bcs_web/apis/permissions.py
|
kayinli/bk-bcs
| 0
|
12779976
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 TH<NAME>, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import logging
from rest_framework import permissions
from backend.bcs_web.constants import ACCESS_TOKEN_KEY_NAME
from backend.components.paas_auth import get_access_token
from backend.utils import FancyDict, whitelist
logger = logging.getLogger(__name__)
class RemoteAccessPermission(permissions.BasePermission):
def has_permission(self, request, view):
if request.user.is_anonymous:
return False
return True
class AccessTokenPermission(RemoteAccessPermission):
message = "no valid access_token"
def has_permission(self, request, view):
has_perm = super().has_permission(request, view)
if not has_perm:
return False
access_token = request.META.get(ACCESS_TOKEN_KEY_NAME, "")
if access_token:
try:
from backend.components.paas_auth import get_user_by_access_token
except ImportError:
pass
else:
user = get_user_by_access_token(access_token)
if user.get("user_id") != request.user.username:
return False
request.user.token = FancyDict(access_token=access_token)
return True
return False
class ClientAccessTokenPermission(RemoteAccessPermission):
message = "no valid access_token"
def has_permission(self, request, view):
has_perm = super().has_permission(request, view)
if not has_perm:
return False
access_token = request.META.get(ACCESS_TOKEN_KEY_NAME, "")
request.user.token = FancyDict(user_access_token=access_token)
access_token = get_access_token().get("access_token")
request.user.token.access_token = access_token
return True
class BKAppPermission(permissions.BasePermission):
"""调用接口的app是否有项目权限"""
def has_permission(self, request, view):
has_perm = super().has_permission(request, view)
if not has_perm:
return False
project_id_or_code = view.kwargs.get("project_id_or_code")
if not project_id_or_code:
return False
app_code = request.user.client.app.app_code
return whitelist.can_access_webconsole(app_code, project_id_or_code)
| 2.171875
| 2
|
model/embedding/bert.py
|
0xflotus/BERT-pytorch
| 1
|
12779977
|
<gh_stars>1-10
import torch.nn as nn
from .token import TokenEmbedding
from .position import PositionalEmbedding
from .segment import SegmentEmbedding
class BERTEmbedding(nn.Module):
def __init__(self, vocab_size, embed_size, dropout=0.1):
super().__init__()
self.token = TokenEmbedding(vocab_size=vocab_size, embed_size=embed_size)
self.position = PositionalEmbedding(self.token.embedding_dim, dropout=dropout)
self.segment = SegmentEmbedding(embed_size=self.token.embedding_dim)
self.dropout = nn.Dropout(p=dropout)
def forward(self, sequence, segment_label):
x = self.position(self.token(sequence)) + self.segment(segment_label)
return self.dropout(x)
| 2.515625
| 3
|
data/projects/flutils/tests/unit/moduleutils/test_validate_attr_identifier.py
|
se2p/artifact-pynguin-ssbse2020
| 3
|
12779978
|
<reponame>se2p/artifact-pynguin-ssbse2020
import keyword
import unittest
from unittest.mock import (
Mock,
patch,
)
from flutils.moduleutils import _validate_attr_identifier
class TestValidateAttrIdentifier(unittest.TestCase):
def test_validate_attr_identifier__00(self) -> None:
arg = ''
line = ''
exp = ''
ret = _validate_attr_identifier(arg, line)
self.assertEqual(
ret,
exp,
msg=(
f'\n\n'
f'_validate_attr_identifier({arg!r}, {line!r})\n'
f'expected: {exp!r}\n'
f' got: {ret!r}\n'
)
)
def test_validate_attr_identifier__01(self) -> None:
arg = 'a_name'
line = ''
exp = 'a_name'
ret = _validate_attr_identifier(arg, line)
self.assertEqual(
ret,
exp,
msg=(
f'\n\n'
f'_validate_attr_identifier({arg!r}, {line!r})\n'
f'expected: {exp!r}\n'
f' got: {ret!r}\n'
)
)
def test_validate_attr_identifier__02(self) -> None:
arg = '-arg'
line = ''
with self.assertRaises(AttributeError):
_validate_attr_identifier(arg, line)
def test_validate_attr_identifier__03(self) -> None:
patcher = patch(
'flutils.moduleutils.keyword.iskeyword',
return_value=True
)
iskeyword = patcher.start()
self.addCleanup(patcher.stop)
with self.assertRaises(AttributeError):
_validate_attr_identifier('try', '')
iskeyword.assert_called_once_with('try')
def test_validate_attr_identifier__04(self) -> None:
patcher = patch(
'flutils.moduleutils.keyword.iskeyword',
return_value=False
)
iskeyword = patcher.start()
self.addCleanup(patcher.stop)
patcher = patch(
'flutils.moduleutils._BUILTIN_NAMES',
new=['__a_builtin_name__']
)
patcher.start()
self.addCleanup(patcher.stop)
with self.assertRaises(AttributeError):
_validate_attr_identifier('__a_builtin_name__', '')
iskeyword.assert_called_once_with('__a_builtin_name__')
def test_validate_attr_identifier__05(self) -> None:
patcher = patch(
'flutils.moduleutils.keyword.iskeyword',
return_value=False
)
iskeyword = patcher.start()
self.addCleanup(patcher.stop)
patcher = patch(
'flutils.moduleutils._BUILTIN_NAMES',
new=['__a_builtin_name__']
)
patcher.start()
self.addCleanup(patcher.stop)
patcher = patch(
'flutils.moduleutils._DUNDERS',
new=['__version__']
)
patcher.start()
self.addCleanup(patcher.stop)
with self.assertRaises(AttributeError):
_validate_attr_identifier('__version__', '')
iskeyword.assert_called_once_with('__version__')
| 2.40625
| 2
|
sopel/modules/soma.py
|
Ameenekosan/Yumiko
| 0
|
12779979
|
<gh_stars>0
#old modules from smircbot re-written in Python2
# coding=utf8
from sopel.module import commands, rule, priority, example
import requests
@commands("somafm")
def somafm(bot,trigger):
r = requests.get('http://somafm.com/channels.xml')
if r.status_code == 200:
bot.say("")
| 2.140625
| 2
|
tests/evernote/test_stored_note.py
|
KostyaEsmukov/SyncToG
| 8
|
12779980
|
<filename>tests/evernote/test_stored_note.py
import datetime
import os
from contextlib import ExitStack
from pathlib import Path
import pytest
import pytz
from synctogit.evernote.models import Note, NoteMetadata
from synctogit.evernote.stored_note import EvernoteStoredNote
from synctogit.filename_sanitizer import normalize_filename
from synctogit.service.notes.stored_note import CorruptedNoteError
@pytest.fixture
def note_html():
return (
"<!doctype html>\n"
"<!-- PLEASE DO NOT EDIT THIS FILE -->\n"
"<!-- All changes you've done here will be stashed on next sync -->\n"
"<!--+++++++++++++-->\n"
"<!-- guid: eaaaaaae-1797-4b92-ad11-f3f6e7ada8d7 -->\n"
"<!-- updateSequenceNum: 12345 -->\n"
"<!-- title: раз два три название -->\n"
"<!-- created: 2018-07-11 18:10:40+03:00 -->\n"
"<!-- updated: 2018-09-23 22:33:10+03:00 -->\n"
"<!----------------->\n"
"<html>\n"
"<head>\n"
)
@pytest.fixture
def note_header_vars():
return {
"guid": "eaaaaaae-1797-4b92-ad11-f3f6e7ada8d7",
"updateSequenceNum": "12345",
"title": "раз два три название",
"created": "2018-07-11 18:10:40+03:00",
"updated": "2018-09-23 22:33:10+03:00",
}
def test_note_to_html_valid():
timezone_input = pytz.timezone("Europe/Moscow")
timezone_output = pytz.timezone("Asia/Novosibirsk")
note = Note(
title="раз два",
update_sequence_num=12345,
guid="eaaaaaae-1797-4b92-ad11-f3f6e7ada8d7",
updated=timezone_input.localize(datetime.datetime(2018, 9, 27, 1, 14, 3)),
created=timezone_input.localize(datetime.datetime(2018, 9, 27, 1, 14, 3)),
html="<html><head></head><body>три четыре</body></html>".encode(),
resources={},
)
expected_html = (
"<!doctype html>\n"
"<!-- PLEASE DO NOT EDIT THIS FILE -->\n"
"<!-- All changes you've done here will be stashed on next sync -->\n"
"<!--+++++++++++++-->\n"
"<!-- guid: eaaaaaae-1797-4b92-ad11-f3f6e7ada8d7 -->\n"
"<!-- updateSequenceNum: 12345 -->\n"
"<!-- title: раз два -->\n"
"<!-- created: 2018-09-27 05:14:03+07:00 -->\n"
"<!-- updated: 2018-09-27 05:14:03+07:00 -->\n"
"<!----------------->\n"
"<html><head></head><body>три четыре</body></html>"
)
html = EvernoteStoredNote.note_to_html(note, timezone_output)
assert html.decode() == expected_html
def test_get_stored_note_metadata_valid(temp_dir, note_html, note_header_vars):
notes_dir = Path(temp_dir)
note = notes_dir / normalize_filename("Eleven ✨") / "Haircut" / "s1.html"
os.makedirs(str(note.parents[0]))
note.write_text(note_html)
guid, metadata = EvernoteStoredNote.get_stored_note_metadata(notes_dir, note)
assert guid == note_header_vars["guid"]
assert metadata == NoteMetadata(
dir=("Eleven _2728", "Haircut"),
name=("Eleven ✨", "Haircut", note_header_vars["title"]),
update_sequence_num=int(note_header_vars["updateSequenceNum"]),
file="s1.html",
)
@pytest.mark.parametrize(
"is_valid, parts",
[
# fmt: off
(False, []),
(True, ["a"]),
(True, ["a", "b"]),
(False, ["a", "b", "c"]),
# fmt: on
],
)
def test_get_stored_note_metadata_dir_parts(
temp_dir, note_html, note_header_vars, is_valid, parts
):
notes_dir = Path(temp_dir) / "a"
note = notes_dir.joinpath(*parts) / "s1.html"
os.makedirs(str(note.parents[0]))
note.write_text(note_html)
with ExitStack() as stack:
if not is_valid:
stack.enter_context(pytest.raises(CorruptedNoteError))
EvernoteStoredNote.get_stored_note_metadata(notes_dir, note)
@pytest.mark.parametrize(
"note_html",
[
(
# Missing title
"<!doctype html>\n"
"<!-- PLEASE DO NOT EDIT THIS FILE -->\n"
"<!-- All changes you've done here will be stashed on next sync -->\n"
"<!--+++++++++++++-->\n"
"<!-- guid: eaaaaaae-1797-4b92-ad11-f3f6e7ada8d7 -->\n"
"<!-- updateSequenceNum: 12345 -->\n"
"<!-- created: 2018-07-11 18:10:40+03:00 -->\n"
"<!-- updated: 2018-09-23 22:33:10+03:00 -->\n"
"<!----------------->\n"
"<html>\n"
),
(
# Missing guid
"<!doctype html>\n"
"<!-- PLEASE DO NOT EDIT THIS FILE -->\n"
"<!-- All changes you've done here will be stashed on next sync -->\n"
"<!--+++++++++++++-->\n"
"<!-- title: раз два три название -->\n"
"<!-- updateSequenceNum: 12345 -->\n"
"<!-- created: 2018-07-11 18:10:40+03:00 -->\n"
"<!-- updated: 2018-09-23 22:33:10+03:00 -->\n"
"<!----------------->\n"
"<html>\n"
),
(
# Non-numeric updateSequenceNum
"<!doctype html>\n"
"<!-- PLEASE DO NOT EDIT THIS FILE -->\n"
"<!-- All changes you've done here will be stashed on next sync -->\n"
"<!--+++++++++++++-->\n"
"<!-- guid: eaaaaaae-1797-4b92-ad11-f3f6e7ada8d7 -->\n"
"<!-- title: раз два три название -->\n"
"<!-- updateSequenceNum: 123haha -->\n"
"<!-- created: 2018-07-11 18:10:40+03:00 -->\n"
"<!-- updated: 2018-09-23 22:33:10+03:00 -->\n"
"<!----------------->\n"
"<html>\n"
"<head>\n"
),
],
)
def test_get_stored_note_metadata_invalid_vars(temp_dir, note_html, note_header_vars):
notes_dir = Path(temp_dir)
note = notes_dir / "Eleven" / "Haircut" / "s1.html"
os.makedirs(str(note.parents[0]))
note.write_text(note_html)
with pytest.raises(CorruptedNoteError):
EvernoteStoredNote.get_stored_note_metadata(notes_dir, note)
| 2.1875
| 2
|
tests/test_jwt.py
|
nihal-rao/supertokens-fastapi
| 0
|
12779981
|
<filename>tests/test_jwt.py
"""
Copyright (c) 2020, VRAI Labs and/or its affiliates. All rights reserved.
This software is licensed under the Apache License, Version 2.0 (the
"License") as published by the Apache Software Foundation.
You may not use this file except in compliance with the License. You may
obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
"""
from supertokens_fastapi.jwt import get_payload
from supertokens_fastapi.utils import utf_base64encode
from Crypto.PublicKey import RSA
from Crypto.Hash import SHA256
from json import (
dumps
)
from base64 import b64encode
from Crypto.Signature.pkcs1_15 import PKCS115_SigScheme
def test_jwt_get_payload():
key_pair = RSA.generate(bits=2048)
pub_key = key_pair.publickey().export_key().decode(
'utf-8').split('-----BEGIN PUBLIC KEY-----\n')[1]
pub_key = pub_key.split('-----END PUBLIC KEY-----')[0]
pub_key = ''.join(pub_key.split('\n'))
data = {'a': 'test'}
payload = utf_base64encode(dumps(data))
header = utf_base64encode(dumps({
'alg': 'RS256',
'typ': 'JWT',
'version': '2'
}, separators=(',', ':'), sort_keys=True))
msg = header + '.' + payload
hashed_msg = SHA256.new(msg.encode('utf-8'))
signer = PKCS115_SigScheme(key_pair)
signature = b64encode(signer.sign(hashed_msg)).decode('utf-8')
token = msg + '.' + signature
payload_from_func = get_payload(token, pub_key)
assert payload_from_func == data
| 2.015625
| 2
|
myworkspace/wechat02.py
|
lvzhang/test
| 0
|
12779982
|
#!/user/bin/env python
#_*_ coding=utf-8 *_*
"""
Function:微信消息自动回复
Date:2015/05/26
Author:lvzhang
ChangeLog:v0.1 init
"""
import itchat
@itchat.msg_register('Text')
def text_replay(msg):
# 自己实现问答
print("已经自动回复")
return "[自动回复]您好,我正忙,一会儿再联系您!!!"
if __name__=="__main__":
print("运行成功!!!")
itchat.auto_login(hotReload=True)
itchat.run()
| 2.21875
| 2
|
app.py
|
zmwangx/ncov
| 2
|
12779983
|
<filename>app.py
#!/usr/bin/env python3
import pathlib
import pandas as pd
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_table as dt
import plotly.graph_objs as go
from dash_dangerously_set_inner_html import DangerouslySetInnerHTML
from plotly.subplots import make_subplots
HERE = pathlib.Path(__file__).resolve().parent
datafile = HERE / "data.csv"
app = dash.Dash(
__name__,
meta_tags=[{"name": "viewport", "content": "width=device-width, initial-scale=1"}],
)
app.title = "新型冠状病毒肺炎疫情历史数据"
server = app.server
def category_scatter(df, category, color, stacked=False):
return go.Scatter(
x=df[category].index,
y=df[category],
line=dict(color=color),
mode="lines+markers",
name=category,
stackgroup="one" if stacked else None,
)
def category_bar(df, category, color):
return go.Bar(
x=df[category].index,
y=df[category],
marker_color=color,
opacity=0.25,
name=category,
)
def plot_categories(
df, categories, colors, stacked=False, overlay_categories=None, overlay_colors=None
):
scatter_data = [
category_scatter(df, category, color, stacked=stacked)
for category, color in zip(categories, colors)
]
if not overlay_categories:
fig = go.Figure(data=scatter_data)
else:
fig = make_subplots(specs=[[{"secondary_y": True}]])
for data in scatter_data:
fig.add_trace(data, secondary_y=False)
bar_data = [
category_bar(df, category, color)
for category, color in zip(overlay_categories, overlay_colors)
]
for data in bar_data:
fig.add_trace(data, secondary_y=True)
fig.update_layout(
plot_bgcolor="white",
legend=dict(x=0, y=1, bgcolor="rgba(255, 255, 255, 0.5)"),
margin=go.layout.Margin(l=20, r=20, b=40, t=40, pad=0),
hovermode="x",
bargap=0.4,
)
axes_common_args = dict(
fixedrange=True,
linecolor="rgb(192, 192, 192)",
zerolinecolor="rgb(192, 192, 192)",
gridcolor="rgb(230, 230, 230)",
zerolinewidth=1,
)
fig.update_xaxes(tickformat="%m-%d", **axes_common_args)
yaxes_common_args = dict(rangemode="tozero", **axes_common_args)
if not overlay_categories:
fig.update_yaxes(**yaxes_common_args)
else:
fig.update_yaxes(**yaxes_common_args, secondary_y=False)
fig.update_yaxes(**yaxes_common_args, tickformat=".1%", secondary_y=True)
fig.update_yaxes(gridcolor="white", secondary_y=True)
return fig
def setup():
df = pd.read_csv(datafile, index_col=0, parse_dates=[0])
df_display = df.rename(index=lambda d: d.strftime("%m-%d"))[::-1]
df["重症比例"] = df["当前重症"] / df["当前确诊"]
df["治愈率"] = df["治愈"] / df["累计确诊"]
df["死亡率"] = df["死亡"] / df["累计确诊"]
df["非湖北重症比例"] = df["非湖北当前重症"] / df["非湖北当前确诊"]
df["非湖北治愈率"] = df["非湖北治愈"] / df["非湖北累计确诊"]
df["非湖北死亡率"] = df["非湖北死亡"] / df["非湖北累计确诊"]
national_columns = [col for col in df_display.columns if "湖北" not in col]
hubei_columns = [col for col in df_display.columns if col.startswith("湖北")]
outside_hubei_columns = [col for col in df_display.columns if col.startswith("非湖北")]
tables = [
(
label,
dt.DataTable(
columns=[{"name": "", "id": "category"}]
+ [{"name": date, "id": date} for date in df_display.index],
data=[
{"category": series.name, **series.to_dict()}
for series in df_display[cols].to_dict("series").values()
],
style_table={"overflowX": "scroll"},
style_header={
"backgroundColor": "rgb(230, 230, 230)",
"fontWeight": "bold",
},
style_cell_conditional=[
{"if": {"column_id": "category"}, "textAlign": "center"}
],
style_data_conditional=[
{
"if": {"row_index": "odd"},
"backgroundColor": "rgb(248, 248, 248)",
}
],
),
)
for label, cols in (
("全国数据", national_columns),
("湖北数据", hubei_columns),
("非湖北数据", outside_hubei_columns),
)
]
confirmed_color = "#f06061"
severe_color = "#8c0d0d"
suspected_color = "#ffd661"
cured_color = "#65b379"
death_color = "#87878b"
other_color1 = "#cc00ff"
other_color2 = "#3399ff"
other_color3 = "#9900ff"
figs1 = [
(
"确诊、重症及其比例、疑似走势",
plot_categories(
df,
["累计确诊", "当前确诊", "当前重症", "当前疑似"],
[confirmed_color, other_color1, severe_color, suspected_color],
overlay_categories=["重症比例"],
overlay_colors=[severe_color],
),
),
(
"确诊加疑似走势",
plot_categories(
df, ["累计确诊", "当前疑似"], [confirmed_color, suspected_color], stacked=True
),
),
(
"治愈(率)、死亡(率)走势",
plot_categories(
df,
["治愈", "死亡"],
[cured_color, death_color],
overlay_categories=["治愈率", "死亡率"],
overlay_colors=[cured_color, death_color],
),
),
(
"每日新确诊、重症、疑似走势",
plot_categories(
df,
["新确诊", "新重症", "新疑似"],
[confirmed_color, severe_color, suspected_color],
),
),
("每日新治愈、死亡走势", plot_categories(df, ["新治愈", "新死亡"], [cured_color, death_color])),
(
"追踪、观察走势",
plot_categories(df, ["累计追踪", "当前观察"], [other_color2, other_color3]),
),
]
figs2 = [
(
"非湖北确诊、重症及其比例、疑似走势",
plot_categories(
df,
["非湖北累计确诊", "非湖北当前确诊", "非湖北当前重症", "非湖北当前疑似"],
[confirmed_color, other_color1, severe_color, suspected_color],
overlay_categories=["非湖北重症比例"],
overlay_colors=[severe_color],
),
),
(
"非湖北治愈(率)、死亡(率)走势",
plot_categories(
df,
["非湖北治愈", "非湖北死亡"],
[cured_color, death_color],
overlay_categories=["非湖北治愈率", "非湖北死亡率"],
overlay_colors=[cured_color, death_color],
),
),
(
"湖北内外累计确诊对比",
plot_categories(
df, ["湖北累计确诊", "非湖北累计确诊"], [severe_color, confirmed_color], stacked=True
),
),
]
app.layout = html.Div(
children=[
html.H1(children="新型冠状病毒肺炎疫情历史数据"),
DangerouslySetInnerHTML(
"""<p class="app-note app-note--center">数据主要来自<a href="http://www.nhc.gov.cn/yjb/pqt/new_list.shtml" target="_blank">国家卫生健康委员会卫生应急办公室网站</a></p>
<p class="app-note app-note--center">更多数据:<a href="https://news.qq.com/zt2020/page/feiyan.htm" target="_blank">腾讯新闻疫情实时追踪<a></p>"""
),
dcc.Tabs(
[
dcc.Tab(
label=label,
children=[table],
className="app-tab",
selected_className="app-tab--selected",
)
for label, table in tables
],
className="app-tabs-container",
),
DangerouslySetInnerHTML(
"""<p class="app-note">注1:2月6日前卫健委未直接发布“当前确诊”数据,表中数据系通过“当前确诊=累计确诊−治愈−死亡”计算补充。该计算方法与2月6日起卫健委直接发布的数据相符。</p>
<p class="app-note">注2:2月12日起卫健委未直接发布“湖北新重症”数据,表中数据系通过“湖北当前重症”较前日的增量计算补充。该计算方法与2月12日前直接发布的数据相符。</p>
<p class="app-note">注3:2月12日前部分国家卫健委未公示的湖北省数据来自<a href="http://wjw.hubei.gov.cn/fbjd/tzgg/index.shtml" target="_blank">湖北省卫健委网站</a>。</p>
<p class="app-note">注4:非湖北数据仅限我国,系相应全国数据减去相应湖北数据所得。</p>
"""
),
*[
dcc.Tabs(
[
dcc.Tab(
label=label,
children=[
dcc.Graph(
figure=fig,
config={
"displaylogo": False,
"modeBarButtonsToRemove": [
"pan2d",
"lasso2d",
"toggleSpikelines",
],
},
className="app-plot",
)
],
className="app-tab",
selected_className="app-tab--selected",
)
for label, fig in figs
],
className="app-tabs-container",
)
for figs in (figs1, figs2)
],
],
className="app-container",
)
setup()
def main():
app.run_server(host="0.0.0.0", debug=True)
if __name__ == "__main__":
main()
| 2.578125
| 3
|
workflow/setup.py
|
isciences/wsim
| 5
|
12779984
|
<gh_stars>1-10
#!/usr/bin/env python3
# Import setuptools if we have it (so we can use ./setup.py develop)
# but stick with distutils if we don't (so we can install it without
# needing tools outside of the standard library)
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='wsim_workflow',
version='0.1',
url='https://wsim.isciences.com',
author='ISciences, LLC',
author_email='<EMAIL>',
packages=[
'wsim_workflow',
'wsim_workflow.output',
'wsim_workflow.data_sources'
]
)
| 1.226563
| 1
|
tests/mep/genetics/test_population.py
|
paulfjacobs/py-mep
| 2
|
12779985
|
import unittest
import random
import numpy as np
from mep.genetics.population import Population
from mep.genetics.chromosome import Chromosome
class TestPopulation(unittest.TestCase):
"""
Test the Population class.
"""
def test_random_tournament_selection(self):
"""
Test the random_tournament_selection(...)
"""
# make it so this repeatable
random.seed(0)
# construct the population
num_examples = 5
num_features = 7
population = Population(np.zeros((num_examples, num_features)), [], 1, 1, 1, 1, 1, 1, 1)
# confirm the number of feature variables (not critical for this test)
self.assertEqual(num_features, population.num_feature_variables)
# test the tournament selection; not that it randomly chooses the not as good chromosome
min_chromosome, max_chromosome = Chromosome([], []), Chromosome([], [])
min_chromosome.error = 1
max_chromosome.error = 2
population.chromosomes = [min_chromosome, max_chromosome]
self.assertEqual(max_chromosome, population.random_tournament_selection(1))
def test_larger_random_tournament_selection(self):
"""
Test the random_tournament_selection(...)
"""
# make it so this repeatable
random.seed(0)
# construct the population
num_examples = 5
num_features = 7
population = Population(np.zeros((num_examples, num_features)), [], 1, 1, 1, 1, 1, 1, 1)
# test the tournament selection; not that it randomly chooses the not as good chromosome
min_chromosome, max_chromosome = Chromosome([], []), Chromosome([], [])
min_chromosome.error = 1
max_chromosome.error = 2
population.chromosomes = [min_chromosome, max_chromosome]
self.assertEqual(min_chromosome, population.random_tournament_selection(10))
| 3.359375
| 3
|
dotfiles/config/offlineimap/custom.py
|
nathbo/dotfiles
| 1
|
12779986
|
<reponame>nathbo/dotfiles
from subprocess import check_output
def get_password(account):
return check_output("authinfo.sh machine="+account, shell=True).strip(b'\n')
| 2.171875
| 2
|
webapp/views/access/forms.py
|
nwallin1/ezEML
| 3
|
12779987
|
from wtforms import (
StringField, SelectField, HiddenField
)
from webapp.home.forms import EDIForm
class AccessSelectForm(EDIForm):
pass
class AccessForm(EDIForm):
userid = StringField('User ID', validators=[])
permission = SelectField('Permission',
choices=[("all", "all"), ("changePermission", "changePermission"), ("read", "read"), ("write", "write")])
md5 = HiddenField('')
init_str = 'all'
# def field_data(self)->tuple:
# return (self.userid.data, self.permission.data)
| 2.546875
| 3
|
weather/apps/web/climate/lib/climate.py
|
Billykat7/btkweathers
| 0
|
12779988
|
<gh_stars>0
from datetime import datetime
from django.db.models import Q, Sum
from weather.apps.web.climate.models import Climate
def get_weathers(i):
city_req = i['city']
period_start = i['period_start']
period_end = i['period_end']
now = datetime.now()
today = datetime.today().strftime('%Y-%m-%d')
period_start = f"{today}T{period_start}"
period_end = f"{today}T{period_end}"
weathers = Climate.objects \
.values(
'city', 'weather_time', 'period_start', 'period_end', 'min_temp',
'max_temp', 'avg_temp', 'med_temp', 'humidity', 'description', 'icon'
) \
.order_by('city')
if city_req and period_start and period_end:
period_start = datetime.strptime(period_start, "%Y-%m-%dT%H:%M")
period_end = datetime.strptime(period_end, "%Y-%m-%dT%H:%M")
weathers = weathers \
.filter(
Q(city=i['city']) &
Q(period_end__gte=period_end)
) \
.values_list(
'city', 'weather_time', 'period_start', 'period_end', 'min_temp',
'max_temp', 'avg_temp', 'med_temp', 'humidity', 'description', 'icon',
) \
.order_by('city')
return weathers
def get_weather_sum(weathers):
city_labels = []
min_temp_list = []
max_temp_list = []
avg_temp_list = []
humidity_list = []
data_dict = {}
# if city_req and period_start and period_end:
# period_start = datetime.strptime(period_start, "%Y-%m-%dT%H:%M")
# period_end = datetime.strptime(period_end, "%Y-%m-%dT%H:%M")
weathers = weathers \
.values(
'city', 'weather_time', 'period_start', 'period_end', 'min_temp',
'max_temp', 'avg_temp', 'med_temp', 'humidity', 'description', 'icon',
) \
.order_by(
'city'
)
for weather in weathers:
city_labels.append(weather['city'])
min_temp_list.append(weather['min_temp'])
max_temp_list.append(weather['max_temp'])
avg_temp_list.append(weather['avg_temp'])
humidity_list.append(weather['humidity'])
data_dict['city_labels'] = city_labels
data_dict['min_temp_list'] = min_temp_list
data_dict['max_temp_list'] = max_temp_list
data_dict['avg_temp_list'] = avg_temp_list
data_dict['humidity_list'] = humidity_list
print("data_dict = ", data_dict)
return data_dict
| 2.421875
| 2
|
emat/util/xmle/html_table_to_txt.py
|
jinsanity07git/tmip-emat
| 13
|
12779989
|
<reponame>jinsanity07git/tmip-emat
import os
import math
import copy
import pandas as pd
import numpy as np
from bs4 import BeautifulSoup
class html_tables(object):
def __init__(self, raw_html):
self.url_soup = BeautifulSoup(raw_html, "lxml")
def read(self):
self.tables = []
self.tables_html = self.url_soup.find_all("table")
# Parse each table
for n in range(0, len(self.tables_html)):
n_cols = 0
n_rows = 0
for row in self.tables_html[n].find_all("tr"):
col_tags = row.find_all(["td", "th"])
if len(col_tags) > 0:
n_rows += 1
if len(col_tags) > n_cols:
n_cols = len(col_tags)
# Create dataframe
df = pd.DataFrame(index = range(0, n_rows), columns = range(0, n_cols))
# Create list to store rowspan values
skip_index = [0 for i in range(0, n_cols)]
this_skip_index = copy.deepcopy(skip_index)
# Start by iterating over each row in this table...
row_counter = 0
for row in self.tables_html[n].find_all("tr"):
# Skip row if it's blank
if len(row.find_all(["td", "th"])) == 0:
pass
else:
# Get all cells containing data in this row
columns = row.find_all(["td", "th"])
col_dim = []
row_dim = []
col_dim_counter = -1
row_dim_counter = -1
col_counter = -1
this_skip_index = copy.deepcopy(skip_index)
for col in columns:
# Determine cell dimensions
colspan = col.get("colspan")
if colspan is None:
col_dim.append(1)
else:
col_dim.append(int(colspan))
col_dim_counter += 1
rowspan = col.get("rowspan")
if rowspan is None:
row_dim.append(1)
else:
row_dim.append(int(rowspan))
row_dim_counter += 1
# Adjust column counter
if col_counter == -1:
col_counter = 0
else:
col_counter = col_counter + col_dim[col_dim_counter - 1]
try:
while skip_index[col_counter] > 0:
col_counter += 1
except IndexError:
from pprint import pprint
print("~"*50)
pprint(locals())
print("~"*50)
raise
pass
# Get cell contents
cell_data = col.get_text()
# Insert data into cell
df.iat[row_counter, col_counter] = cell_data
# Record column skipping index
if row_dim[row_dim_counter] > 1:
this_skip_index[col_counter] = row_dim[row_dim_counter]
# Adjust row counter
row_counter += 1
# Adjust column skipping index
skip_index = [i - 1 if i > 0 else i for i in this_skip_index]
# Append dataframe to list of tables
self.tables.append(df)
return(self.tables)
def xml_table_to_txt(elem):
first_table = html_tables(elem.tostring()).read()[0]
return (first_table.to_string(
buf=None, columns=None, col_space=None, header=True,
index=False, na_rep='', formatters=None, float_format=None,
sparsify=None, index_names=False, justify=None, line_width=None,
max_rows=None, max_cols=None, show_dimensions=False).partition("\n")[2])
| 2.96875
| 3
|
Events/reaction.py
|
hanss314/TheBrainOfTWOWCentral
| 0
|
12779990
|
import time, discord
from Config._functions import grammar_list
class EVENT:
LOADED = False
RUNNING = False
param = { # Define all the parameters necessary
"CHANNEL": "",
"EMOJIS": []
}
# Executes when loaded
def __init__(self):
self.LOADED = True
# Executes when activated
def start(self, TWOW_CENTRAL, PARAMS): # Set the parameters
self.RUNNING = True
# Executes when deactivated
def end(self): # Reset the parameters
self.param = {
"CHANNEL": "",
"EMOJIS": []
}
self.RUNNING = False
# Function that runs on each message
async def on_message(self, message, PERMS):
if message.channel.mention != self.param["CHANNEL"]:
return # Only messages that are in the channel
for emoji in self.param["EMOJIS"]:
try: # Add the reactions
await message.add_reaction(emoji)
except Exception: # If a reaction is invalid, skip it
continue
# Change a parameter of the event
async def edit_event(self, message, new_params):
incorrect = []
correct = []
for parameter in new_params.keys():
try:
self.param[parameter] = new_params[parameter]
correct.append(parameter)
except KeyError:
incorrect.append(parameter)
if len(correct) > 0:
await message.channel.send(f"Successfully changed the parameters: {grammar_list(correct)}")
if len(incorrect) > 0:
await message.channel.send(f"The following parameters are invalid: {grammar_list(incorrect)}")
return
| 2.78125
| 3
|
build/lib/annotation_utils/old/converter/__init__.py
|
HienDT27/annotation_utils
| 13
|
12779991
|
<filename>build/lib/annotation_utils/old/converter/__init__.py
from .labelimg_labelme_converter import LabelImgLabelMeConverter
from .labelme_coco_converter import LabelMeCOCOConverter
| 1.015625
| 1
|
problem_4_active_directory.py
|
tomgtqq/Data_Structures_Algorithms-Project2
| 0
|
12779992
|
<reponame>tomgtqq/Data_Structures_Algorithms-Project2
class Group(object):
def __init__(self, _name):
self.name = _name
self.groups = []
self.users = []
def add_group(self, group):
self.groups.append(group)
def add_user(self, user):
self.users.append(user)
def get_groups(self):
return self.groups
def get_users(self):
return self.users
def get_name(self):
return self.name
def is_user_in_group(user=None, group=None):
"""
Return True if user is in the group, False otherwise.
Args:
user(str): user name/id
group(class:Group): group to check user membership against
"""
if user == None:
return "Please check the user"
elif group == None:
return "please check the group"
if not type(group) == Group:
return "please input a valid group"
users = group.get_users()
groups = group.get_groups()
# check if 'user' is among the users of the group.
for user_of_group in users:
if user == user_of_group:
return True
# move on to the groups of 'group', check if 'user' in group
for sub_group in groups:
return is_user_in_group(user, sub_group)
return False
# Check the user relationship with groups, "assert" should all pass
parent = Group("parent")
child = Group("child")
sub_child = Group("subchild")
sub_sub_child = Group("subsubchild")
sub_child_user = "sub_child_user"
sub_child.add_user(sub_child_user)
sub_child.add_group(sub_sub_child)
child.add_group(sub_child)
parent.add_group(child)
assert (is_user_in_group("sub_child_user", parent)
== True), "Should return True"
assert (is_user_in_group("sub_child_user", child)
== True), "Should return True"
assert (is_user_in_group("sub_child_user", sub_child)
== True), "Should return True"
assert (is_user_in_group("sub_child_user", sub_sub_child)
== True), "Should return False"
assert (is_user_in_group("aaaaaaaaaaaaaa", parent)
== True), "Should return False"
# Check edge cases: missing args 'user' 'group' and invalid group type,
print(is_user_in_group(group=parent)) # should return "Please check the user"
# should return "please check the group"
print(is_user_in_group("sub_child_user"))
# should return "please input a valid group"
print(is_user_in_group("sub_child_user", group="not_group_type"))
| 3.546875
| 4
|
aoc03/03-1.py
|
ssfivy/adventofcode2019
| 0
|
12779993
|
<reponame>ssfivy/adventofcode2019<filename>aoc03/03-1.py
#!/usr/bin/env python3
# I am not familiar enough with doing geometrical stuff in code
# so using python for now, dont want to do two things at the same time
# ok I'm allowing google and external libraries too
from shapely.geometry import LineString
def calculate_shortest_distance(lines):
line = 0
points = [[(0,0)], [(0,0)]]
for l in lines:
x1 = 0
y1 = 0
directions = l.strip().split(',')
for d in directions:
# too many repetitions, not beautiful :(
if d[0] == 'R':
x2 = x1 + int(d[1:])
points[line].append((x2,y1))
x1 = x2
elif d[0] == 'L':
x2 = x1 - int(d[1:])
points[line].append((x2,y1))
x1 = x2
elif d[0] == 'U':
y2 = y1 + int(d[1:])
points[line].append((x1,y2))
y1 = y2
elif d[0] == 'D':
y2 = y1 - int(d[1:])
points[line].append((x1,y2))
y1 = y2
line += 1
#print(points)
minimum = None
line0 = LineString(points[0])
line1 = LineString(points[1])
crossings = line0.intersection(line1)
for cross in crossings:
#print(cross)
distance = abs(cross.x) + abs(cross.y)
if distance == 0:
continue
if minimum is None:
minimum = distance
elif distance < minimum:
minimum = distance
print(minimum)
return minimum
# test 1
test1 = ['R75,D30,R83,U83,L12,D49,R71,U7,L72',
'U62,R66,U55,R34,D71,R55,D58,R83']
result = calculate_shortest_distance(test1)
assert(result == 159)
# test 2
test2 = ['R98,U47,R26,D63,R33,U87,L62,D20,R33,U53,R51',
'U98,R91,D20,R16,D67,R40,U7,R15,U6,R7']
result = calculate_shortest_distance(test2)
assert(result == 135)
# real thing
with open('03-1-input.txt', 'r') as fb:
inputs = []
for l in fb:
inputs.append(l)
result = calculate_shortest_distance(inputs)
print('Part 1 result: {}'.format(result))
| 3.453125
| 3
|
dokklib_db/errors/transaction.py
|
cmoore-i3/dokklib-db
| 22
|
12779994
|
import re
from typing import Any, Dict, List, Optional, Type
from dokklib_db.errors import exceptions as ex
from dokklib_db.errors.client import ClientError
from dokklib_db.op_args import OpArg
CancellationReasons = List[Optional[Type[ClientError]]]
class TransactionCanceledException(ClientError):
"""The entire transaction request was canceled.
Please see DynamoDB docs for details.
https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_TransactWriteItems.html
"""
# Example match: "reasons [ConditionalCheckFailed, None]"
_reasons_re = re.compile(r'reasons\W+\[([A-Za-z0-9, ]+)]', re.MULTILINE)
_codes_to_exceptions: Dict[str, Type[ClientError]] = {
'ConditionalCheckFailed': ex.ConditionalCheckFailedException,
'ItemCollectionSizeLimitExceeded': ex.ItemCollectionSizeLimitExceededException, # noqa 501
'TransactionConflict': ex.TransactionConflictException,
'ProvisionedThroughputExceeded': ex.ProvisionedThroughputExceededException, # noqa 501
'ThrottlingError': ex.ThrottlingError,
'ValidationError': ex.ValidationError
}
def __init__(self, op_args: List[OpArg], *args: Any, **kwargs: Any):
"""Initialize a TransactionCanceledException instance.
Args:
op_args: The list of operations that were the inputs to this
transaction.
"""
super().__init__(*args, **kwargs)
self._op_args = list(op_args)
self._reasons: Optional[CancellationReasons] = None
def _extract_reasons(self, message: str) -> List[str]:
match = re.search(self._reasons_re, message)
if not match:
return []
else:
reasons = match.group(1)
split = reasons.split(', ')
if split[0] == reasons:
return reasons.split(',')
else:
return split
def _get_reasons(self) -> CancellationReasons:
db_error = self.response.get('Error', {})
message = db_error.get('Message', '')
reasons = self._extract_reasons(message)
res: CancellationReasons = []
for r in reasons:
if r == 'None':
res.append(None)
else:
exception = self._codes_to_exceptions.get(r, ClientError)
res.append(exception)
if len(res) != len(self.op_args):
msg = f'Transaction cancellation reasons don\'t match ' \
f'transaction arguments in error:\n{message}'
raise ValueError(msg)
return res
@property
def op_args(self) -> List[OpArg]:
"""Get the list of inputs to the transaction."""
return self._op_args
@property
def reasons(self) -> CancellationReasons:
"""List of cancellation reasons for each item in the transaction.
Corresponds to order of `op_args`.
"""
if self._reasons is None:
self._reasons = self._get_reasons()
return self._reasons
def has_error(self, exception: Type[ClientError]) -> bool:
"""Whether the transaction failed due to a particular exception.
Args:
exception: The exception type to check for, eg. `ValidationError`.
Returns:
True if any of the failure reasons match the exception type.
"""
for r in self.reasons:
if r is exception:
return True
else:
return False
| 2.28125
| 2
|
test/test_cross_section_box.py
|
ArnaudCrl/pywellcad
| 6
|
12779995
|
<gh_stars>1-10
import unittest
import pathlib
import wellcad.com
from ._sample_path import SamplePath
from ._extra_asserts import ExtraAsserts
class TestCrossSectionBox(unittest.TestCase, SamplePath, ExtraAsserts):
@classmethod
def setUpClass(cls):
cls.app = wellcad.com.Application()
cls.sample_path = cls._find_sample_path()
cls.borehole = cls.app.open_borehole(str(cls.sample_path / "ABI 43 Corrosion Plot.wcl"))
cls.log = cls.borehole.get_log("Cross Section")
cls.item = cls.log.cross_box(0)
@classmethod
def tearDownClass(cls):
cls.app.quit(False)
def test_top_depth(self):
self.assertAlmostEqual(self.item.top_depth, 45.0622, 3)
def test_bottom_depth(self):
self.assertAlmostEqual(self.item.bottom_depth, 45.0622, 3)
if __name__ == '__main__':
unittest.main()
| 2.359375
| 2
|
train_stageII_rank_instance.py
|
ChronousZhang/Dual-path
| 1
|
12779996
|
# -*- coding: utf-8 -*-
from __future__ import print_function,division
import os
import time
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.autograd import Variable
from torchvision import datasets,transforms
from load_text import load_dataset
from rank_loss import ImageSelector,TextSelector
from loss import TripletLoss
from model_rank import Merge_image_text
from test_acc import test
import utils
from utils import getDataset
from loader import ClassUniformlySampler
import random
#os.system("ulimit -n 5000000")
torch.multiprocessing.set_sharing_strategy('file_system')
parser = argparse.ArgumentParser(description='Training arguments')
parser.add_argument('--save_path',type=str,default='./flickr30k-56-stage2/')
parser.add_argument('--datasets',type=str,default='/data/reid/flickr30k/Dual-path/')
parser.add_argument('--batch_size',type=int,default=32,help='batch_size')
parser.add_argument('--learning_rate',type=float,default=0.001,help = 'FC parms learning rate')
parser.add_argument('--epochs',type=int,default=120,help='The number of epochs to train')
parser.add_argument('--stage',type=str,default='II',choices=['I','II'],help='which stage is on')
arg = parser.parse_args()
torch.manual_seed(1)
torch.cuda.manual_seed_all(1)
#Make saving directory
save_dir_path = arg.save_path
os.makedirs(save_dir_path,exist_ok=True)
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
# -------------------------------------Train Function--------------------------------------
def train_rank(model,criterion,optimizer,scheduler,dataloder,text_loader,num_epochs,device,stage):
start_time = time.time()
# Logger instance
logger = utils.Logger(save_dir_path)
logger.info('-'*10)
logger.info(vars(arg))
logger.info('Stage: '+stage)
print("############################ Train stage II #############################")
get = list(zip(dataloder,text_loader))
random.shuffle(get)
img,txt = zip(*get)
for epoch in range(num_epochs):
logger.info('Epoch {}/{}'.format(epoch+1,num_epochs))
model.train()
scheduler.step()
##Training
batch_num = 0
loss_avg = []
for (inputs,labels),(text_inputs,text_labels) in zip(img,txt):
batch_num += 1
inputs = inputs.to(device)
labels = labels.to(device)
text_inputs = text_inputs.to(device)
text_labels = text_labels.to(device,dtype=torch.int64)
outputs,text_outs = model(inputs,text_inputs)
# print("output.shape:: ",outputs.shape)
# print("text_out.shape:: ",text_outs.shape)
# print("label.shape: ",labels)
# print("text_label.shape:: ",text_labels)
anc_IT,pos_IT,neg_IT = ImageSelector(outputs,text_outs,labels)
anc_TI,pos_TI,neg_TI = TextSelector(text_outs,outputs,labels)
loss_rank = criterion(anc_IT,pos_IT,neg_IT)+criterion(anc_TI,pos_TI,neg_TI)
optimizer.zero_grad()
loss_rank.backward()
optimizer.step()
loss_avg.append(loss_rank.item())
if batch_num % 10 == 0:
loss_avg= sum(loss_avg) /len(loss_avg)
logger.info('Stage II training : {} [{}]]\t Rank_loss:{:.6f}'.format(epoch+1,batch_num*len(inputs),loss_avg))
loss_avg = []
if (epoch+1)%10==0 or epoch+1 == num_epochs:
##Testing / Vlidaing
torch.cuda.empty_cache()
# model.mode = 'test'
CMC,mAP = test(model,arg.datasets,128)
logger.info('Testing: Top1:{:.2f}% Top5:{:.2f}% Top10:{:.2f}% mAP:{:.2f}%'.format(CMC[0],CMC[4],CMC[9],mAP))
print("=======================================================")
logger.info('-'*10)
time_cost = time.time()-start_time
logger.info('Training complete in {:.0f}m {:.0f}s'.format(
time_cost//60,time_cost%60
))
utils.save_network(model,save_dir_path,'final_r')
class IterLoader:
def __init__(self,loader):
self.loader=loader
self.iter = iter(self.loader)
def next_one(self):
try:
return next(self.iter)
except:
self.iter = iter(self.loader)
return next(self.iter)
if __name__ == "__main__":
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# Loader image dataset ,PK samples
seeds = random.randint(0,100)
datasets_img = getDataset(arg.datasets,arg.batch_size,'train')
loader = torch.utils.data.DataLoader(datasets_img, batch_size=32, num_workers=0, drop_last=False, # default 16 works
sampler=ClassUniformlySampler(datasets_img, class_position=1, k=4, seeds=seeds))
dataloader_img = IterLoader(loader)
#print('labels_img: ',dataloader_img.next_one()[1])
##Loader txt dataset , PK samples
dataset_text = load_dataset(arg.datasets,'train',arg.batch_size, datasets_img)
loader_txt = torch.utils.data.DataLoader(dataset_text, batch_size=32, num_workers=0, drop_last=False, # 16 works
sampler=ClassUniformlySampler(dataset_text, class_position=1, k=4, seeds=seeds))
dataloader_txt = IterLoader(loader_txt)
#print('dataloader_txt: ',dataloader_txt.next_one()[1])
##############################################
model = Merge_image_text(num_class=len(datasets_img.classes),mode = 'test') #Stage II ,change to 'test',Stage I:'train'
model = model.to(device)
# criterion = nn.CrossEntropyLoss()
criterion = TripletLoss(margin = 1).cuda() #no margin means soft-margin
#delete module parallel
optimizer = optim.SGD([
{'params':model.image_feature.backbone.parameters(),'lr':arg.learning_rate},
# {'params':model.image_feature.fc1.parameters(),'lr':arg.learning_rate},
# {'params':model.image_feature.fc.parameters(),'lr':arg.learning_rate},
{'params':model.text_feature.parameters(),'lr':arg.learning_rate/10}
],lr=0.001,momentum=0.9,weight_decay = 5e-4,nesterov=True)
scheduler = lr_scheduler.StepLR(optimizer,step_size=100,gamma=0.1)
#---------------------Start training----------------------
#Stage I
# train(model,criterion,optimizer,scheduler,train_dataloder,train_dataloder_text,arg.epochs,device,'I')
##Stage II
model.load_state_dict(torch.load('./flickr30k-56-stage1/net_final.pth'),strict=False)
train_rank(model,criterion,optimizer,scheduler,loader,loader_txt,arg.epochs,device,'II')
torch.cuda.empty_cache()
| 2
| 2
|
lib/python/frugal/tests/transport/test_http_transport.py
|
ariasheets-wk/frugal
| 144
|
12779997
|
<gh_stars>100-1000
# Copyright 2017 Workiva
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from base64 import b64encode
from struct import pack_into
import unittest
from mock import Mock
from mock import patch
from thrift.transport.TTransport import TTransportException
from frugal.exceptions import TTransportExceptionType
from frugal.transport.http_transport import THttpTransport
@patch('frugal.transport.http_transport.requests')
class TestTHttpTransport(unittest.TestCase):
def test_request(self, mock_requests):
url = 'http://localhost:8080/frugal'
headers = {'foo': 'bar'}
resp = Mock(status_code=200)
response = b'response'
buff = bytearray(4)
pack_into('!I', buff, 0, len(response))
resp.content = b64encode(buff + response)
mock_requests.post.return_value = resp
def get_headers():
return {'baz': 'qux'}
tr = THttpTransport(url, headers=headers, get_headers=get_headers,
response_capacity=500)
tr.open()
self.assertTrue(tr.isOpen())
data = b'helloworld'
buff = bytearray(4)
pack_into('!I', buff, 0, len(data))
encoded_frame = b64encode(buff + data)
tr.write(data)
tr.flush()
mock_requests.post.assert_called_once_with(
url, data=encoded_frame, timeout=None,
headers={'foo': 'bar', 'baz': 'qux', 'Content-Length': '20',
'Content-Type': 'application/x-frugal',
'Content-Transfer-Encoding': 'base64',
'User-Agent': 'Python/TBaseHttpTransport',
'x-frugal-payload-limit': '500'})
resp = tr.read(len(response))
self.assertEqual(response, resp)
tr.close()
self.assertTrue(tr.isOpen()) # open/close are no-ops
def test_request_timeout(self, mock_requests):
url = 'http://localhost:8080/frugal'
headers = {'foo': 'bar'}
resp = Mock(status_code=200)
response = b'response'
buff = bytearray(4)
pack_into('!I', buff, 0, len(response))
resp.content = b64encode(buff + response)
mock_requests.post.return_value = resp
def get_headers():
return {'baz': 'qux'}
tr = THttpTransport(url, headers=headers, get_headers=get_headers,
response_capacity=500)
tr.open()
self.assertTrue(tr.isOpen())
data = b'helloworld'
buff = bytearray(4)
pack_into('!I', buff, 0, len(data))
encoded_frame = b64encode(buff + data)
tr.set_timeout(5000)
tr.write(data)
tr.flush()
mock_requests.post.assert_called_once_with(
url, data=encoded_frame, timeout=5,
headers={'foo': 'bar', 'baz': 'qux', 'Content-Length': '20',
'Content-Type': 'application/x-frugal',
'Content-Transfer-Encoding': 'base64',
'User-Agent': 'Python/TBaseHttpTransport',
'x-frugal-payload-limit': '500'})
resp = tr.read(len(response))
self.assertEqual(response, resp)
tr.close()
self.assertTrue(tr.isOpen()) # open/close are no-ops
def test_flush_no_body(self, mock_requests):
url = 'http://localhost:8080/frugal'
tr = THttpTransport(url)
tr.flush()
self.assertFalse(mock_requests.post.called)
def test_flush_bad_response(self, mock_requests):
url = 'http://localhost:8080/frugal'
resp = Mock(status_code=500)
mock_requests.post.return_value = resp
tr = THttpTransport(url)
data = b'helloworld'
buff = bytearray(4)
pack_into('!I', buff, 0, len(data))
encoded_frame = b64encode(buff + data)
tr.write(data)
with self.assertRaises(TTransportException):
tr.flush()
mock_requests.post.assert_called_once_with(
url, data=encoded_frame, timeout=None,
headers={'Content-Length': '20',
'Content-Type': 'application/x-frugal',
'Content-Transfer-Encoding': 'base64',
'User-Agent': 'Python/TBaseHttpTransport'})
def test_flush_bad_oneway_response(self, mock_requests):
url = 'http://localhost:8080/frugal'
resp = Mock(status_code=200)
buff = bytearray(4)
pack_into('!I', buff, 0, 10)
resp.content = b64encode(buff)
mock_requests.post.return_value = resp
tr = THttpTransport(url)
data = b'helloworld'
buff = bytearray(4)
pack_into('!I', buff, 0, len(data))
encoded_frame = b64encode(buff + data)
tr.write(data)
with self.assertRaises(TTransportException):
tr.flush()
mock_requests.post.assert_called_once_with(
url, data=encoded_frame, timeout=None,
headers={'Content-Length': '20',
'Content-Type': 'application/x-frugal',
'Content-Transfer-Encoding': 'base64',
'User-Agent': 'Python/TBaseHttpTransport'})
def test_flush_oneway(self, mock_requests):
url = 'http://localhost:8080/frugal'
resp = Mock(status_code=200)
buff = bytearray(4)
pack_into('!I', buff, 0, 0)
resp.content = b64encode(buff)
mock_requests.post.return_value = resp
tr = THttpTransport(url)
data = b'helloworld'
buff = bytearray(4)
pack_into('!I', buff, 0, len(data))
encoded_frame = b64encode(buff + data)
tr.write(data)
tr.flush()
mock_requests.post.assert_called_once_with(
url, data=encoded_frame, timeout=None,
headers={'Content-Length': '20',
'Content-Type': 'application/x-frugal',
'Content-Transfer-Encoding': 'base64',
'User-Agent': 'Python/TBaseHttpTransport'})
resp = tr.read(10)
self.assertEqual(b'', resp)
def test_write_limit_exceeded(self, mock_requests):
url = 'http://localhost:8080/frugal'
resp = Mock(status_code=200)
buff = bytearray(4)
pack_into('!I', buff, 0, 0)
resp.content = b64encode(buff)
mock_requests.post.return_value = resp
tr = THttpTransport(url, request_capacity=5)
data = b'helloworld'
with self.assertRaises(TTransportException) as cm:
tr.write(data)
self.assertEqual(TTransportExceptionType.REQUEST_TOO_LARGE,
cm.exception.type)
self.assertFalse(mock_requests.post.called)
| 1.875
| 2
|
lib/cappconfig.py
|
Aziroshin/mntools
| 0
|
12779998
|
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
#=======================================================================================
# Imports
#=======================================================================================
import sys
import os
from lib.configutils import *
#=======================================================================================
# Library
#=======================================================================================
#==========================================================
class DefaultsConfigSetup(ConfigSetup):
#=============================
"""'ConfigSetup' for general defaults for cappman."""
#=============================
def __init__(self):
super().__init__()
self.addOption(ConfigOption(varName="cappConfigDirPath", configName="cappconfigdir", optionTypes=[ConfigOptionCanonicalizedFilePathType()], enforceAssignment=True))
self.addOption(\
ConfigOption(varName="pluginDirPaths",\
configName="plugindirs",\
defaultValue="[\""+os.path.realpath(os.path.join(os.path.dirname(sys.argv[0]), "plugins")+"\"]"),\
optionTypes=[ConfigOptionListType(merge=True),\
ConfigOptionCanonicalizedFilePathType()]))
#==========================================================
class Defaults(Namespace):
#=============================
"""Capp Manager default config."""
#=============================
def __init__(self):
super().__init__()
# Hard coded defaults & other vital values.
self.execPath = sys.argv[0]
self.execDirPath = os.path.realpath(os.path.dirname(self.execPath))
self.distConfigDirPath = os.path.join(self.execDirPath, "config")
self.distPluginDirPath = os.path.join(self.execDirPath, "plugins")
self.distCappmanConfigPath = os.path.join(self.distConfigDirPath, "cappman.conf")
self.pluginDirNames = {\
"cappExtensions": "cappextensions",\
"callFlavors": "cappflavors",\
"cappLibs": "capplibs",\
"languages": "languages"}
# Configured defaults.
config = DefaultsConfigSetup().getConfig(configFilePaths=[self.distCappmanConfigPath])
self.cappConfigDirPath = config.cappConfigDirPath
#print("[DEBUG] [cappconfig.py.Defaults]"[self.distPluginDirPath]+config.pluginDirPaths)
self.pluginDirPaths = [self.distPluginDirPath]+config.pluginDirPaths
#==========================================================
class BasicCappConfigSetup(ConfigSetup):
#=============================
"""Basic capp information needed to decide which capp flavor we're dealing with, and other
information that's universal across all flavors."""
#=============================
def __init__(self, configFilePaths=[]):
super().__init__(configFilePaths)
self.addOption(ConfigOption(varName="cappFlavorName", configName="cappflavor", category="main", enforceAssignment=True))
self.addOption(ConfigOption(varName="name", configName="name", category="main", enforceAssignment=True))
#==========================================================
class BasicFlavorConfigSetup(ConfigSetup):
def __init__(self):
super().__init__()
self.addOption(ConfigOption(varName="cappLibName", configName="capplib", category="main", enforceAssignment=True))
| 1.6875
| 2
|
marueditor_debug.py
|
Marusoftware/Marutools
| 0
|
12779999
|
#! /usr/bin/python3
import subprocess
import time
import sys
import os
subprocess.Popen(["./marueditor.py","--debug"])
while 1:
time.sleep(1)
| 1.609375
| 2
|
gefolge_web/event/model.py
|
dasgefolge/gefolge.org
| 2
|
12780000
|
<filename>gefolge_web/event/model.py
import datetime
import itertools
import flask # PyPI: Flask
import icalendar # PyPI: icalendar
import jinja2 # PyPI: Jinja2
import pytz # PyPI: pytz
import class_key # https://github.com/fenhl/python-class-key
import lazyjson # https://github.com/fenhl/lazyjson
import peter # https://github.com/dasgefolge/peter-discord
import gefolge_web.login
import gefolge_web.person
import gefolge_web.util
EVENTS_ROOT = gefolge_web.util.BASE_PATH / 'event'
ORGA_ROLES = ['Abrechnung', 'Buchung', 'Essen', 'Programm', 'Schlüssel']
SILVESTER_CHANNEL = 387264349678338049
class EventGuest(gefolge_web.person.Guest):
def __init__(self, event, guest_id):
self.event = event
self.snowflake = int(guest_id) # not actually a snowflake but uses this variable name for compatibility with the DiscordPerson class
if not any('via' in person and person['id'] == self.snowflake for person in event.data.get('menschen', [])):
raise ValueError('Es gibt keinen Gast mit dieser ID.')
def __eq__(self, other):
return isinstance(other, EventGuest) and self.event == other.event and self.snowflake == other.snowflake
def __hash__(self):
return hash((self.event, self.snowflake))
def __html__(self):
return jinja2.escape(str(self))
def __repr__(self):
return f'gefolge_web.event.model.EventGuest({self.event!r}, {self.snowflake!r})'
def __str__(self):
return self.event.attendee_data(self)['name'].value()
@property
def url_part(self):
return str(self.snowflake)
class EventMeta(type):
def __iter__(self):
# iterating over the Event class yields all events
return iter(sorted(
Event(event_path.stem)
for event_path in EVENTS_ROOT.iterdir()
))
@class_key.class_key()
class Event(metaclass=EventMeta):
def __init__(self, event_id):
self.event_id = event_id
self.data.value() # make sure event exists
def __html__(self):
return jinja2.Markup('<a href="{}">{}</a>'.format(flask.url_for('event_page', event=self.event_id), self))
@property
def __key__(self):
return self.start is None, self.start, self.end is None, self.end, self.event_id
def __repr__(self):
return 'gefolge_web.event.model.Event({!r})'.format(self.event_id)
def __str__(self):
return self.data.get('name', self.event_id)
@property
def anzahlung(self):
if 'anzahlung' in self.data:
if self.data['anzahlung'].value() is None:
return None
return gefolge_web.util.Euro(self.data['anzahlung'].value())
else:
return None
@property
def anzahlung_total(self):
"""Die Summe der bisher gezahlten Anzahlungen."""
if self.anzahlung is None:
return None
return sum((
gefolge_web.util.Euro(self.attendee_data(person).get('anzahlung', self.anzahlung.value))
for person in self.signups
), gefolge_web.util.Euro())
def attendee_data(self, person):
if 'menschen' in self.data:
for iter_data in self.data['menschen']:
if iter_data['id'].value() == person.snowflake:
return iter_data
@property
def ausfall(self):
if 'ausfall' in self.data:
return gefolge_web.util.Euro(self.data['ausfall'].value())
else:
return gefolge_web.util.Euro()
@property
def ausfall_date(self):
if 'ausfallDate' in self.data:
return gefolge_web.util.parse_iso_date(self.data['ausfallDate'].value())
@property
def calendar(self):
import gefolge_web.event.programm
return sorted(itertools.chain((
[gefolge_web.event.programm.CalendarEvent(
self, 'neujahr',
text='Neujahr',
html='Neujahr',
start=self.timezone.localize(datetime.datetime(self.end.year, 1, 1), is_dst=None),
end=self.timezone.localize(datetime.datetime(self.end.year, 1, 1, 1), is_dst=None)
)] if self.end.year > self.start.year else []
), (
[gefolge_web.event.programm.CalendarEvent(
self, 'endreinigung',
text='Endreinigung',
html='Endreinigung',
start=self.end - datetime.timedelta(hours=2),
end=self.end
)] if self.location is not None and not self.location.is_online and 'host' not in self.location.data else []
), itertools.chain.from_iterable(
programmpunkt.calendar_events
for programmpunkt in self.programm
)))
def can_edit(self, editor, profile):
if editor.is_admin:
return True # always allow the admin to edit since they have write access to the database anyway
editor_data = self.attendee_data(editor)
if editor_data is None:
# wer nicht angemeldet ist, darf nichts bearbeiten
return False
if len(editor_data.get('orga', [])) > 0:
# Orga darf alle bearbeiten
return True
if gefolge_web.util.now(self.timezone) > self.end:
# nach Ende des event darf nur noch die Orga bearbeiten
return False
if editor == profile:
# Menschen dürfen ihr eigenes Profil bearbeiten
return True
if self.proxy(profile) == editor:
# Gastprofile dürfen von ihren proxies bearbeitet werden
return True
return False
def capacity(self, night):
if 'capacity' in self.data:
if isinstance(self.data['capacity'].value(), dict):
return self.data['capacity']['{:%Y-%m-%d}'.format(night)].value()
else:
return self.data['capacity'].value()
else:
return self.location.capacity
@property
def channel(self):
return self.data.get('channel', SILVESTER_CHANNEL)
def confirm_guest_signup(self, guest, *, message):
gefolge_web.util.log('eventConfirmSignup', {
'event': self.event_id,
'person': guest.snowflake
})
self.attendee_data(guest)['signup'] = f'{gefolge_web.util.now(self.timezone):%Y-%m-%dT%H:%M:%S}' #TODO Datum der Überweisung verwenden
if guest.is_active and 'role' in self.data:
peter.add_role(guest, self.data['role'].value())
if message:
peter.channel_msg(self.channel, '<@{proxy_snowflake}>: {guest} ist jetzt für {event} angemeldet. Fülle bitte bei Gelegenheit noch das Profil auf <https://gefolge.org/event/{event_id}/mensch/{guest_snowflake}/edit> aus. Außerdem kannst du {guest} auf <https://gefolge.org/event/{event_id}/programm> für Programmpunkte als interessiert eintragen'.format(
proxy_snowflake=self.proxy(guest).snowflake,
guest=f'<@{guest.snowflake}>' if guest.is_active else guest,
event=self,
event_id=self.event_id,
guest_snowflake=guest.snowflake
))
@property
def data(self):
return gefolge_web.util.cached_json(lazyjson.File(EVENTS_ROOT / '{}.json'.format(self.event_id)))
@property
def end(self):
if 'end' in self.data:
return gefolge_web.util.parse_iso_datetime(self.data['end'].value(), tz=self.timezone)
def essen(self, date):
import gefolge_web.event.programm.essen
if date in self.nights:
return gefolge_web.event.programm.essen.Abendessen(self, date)
else:
raise ValueError('Datum liegt außerhalb des event')
def free(self, start=None, end=None):
if end is None:
if start is None:
end = self.end.date()
else:
end = start + datetime.timedelta(days=1)
if start is None:
start = self.start.date()
return min(
self.capacity(night) - len(self.night_signups(night))
for night in gefolge_web.util.date_range(start, end)
)
@property
def guest_signup_block_reason(self):
return self.data.get('guestSignupBlockReason', self.data.get('signupBlockReason'))
@property
def guests(self):
return [
EventGuest(self, person['id']) if person['id'] < 100 else gefolge_web.login.DiscordGuest(person['id'])
for person in self.data.get('menschen', [])
if 'via' in person
]
@property
def location(self):
import gefolge_web.event.location
loc_id = self.data.get('location')
if loc_id is not None:
return gefolge_web.event.location.Location(loc_id)
@property
def menschen(self):
return [
gefolge_web.login.Mensch(person['id'])
for person in self.data.get('menschen', [])
if 'via' not in person
]
def night_going(self, attendee_data, night):
if hasattr(attendee_data, 'snowflake'):
attendee_data = self.attendee_data(attendee_data)
result = attendee_data.get('nights', {}).get('{:%Y-%m-%d}'.format(night), {'going': 'maybe', 'lastUpdated': None})
if isinstance(result, dict):
result = result['going']
return result
def night_log(self, attendee_data, night):
if hasattr(attendee_data, 'snowflake'):
attendee_data = self.attendee_data(attendee_data)
result = attendee_data.get('nights', {}).get('{:%Y-%m-%d}'.format(night), {'going': 'maybe', 'lastUpdated': None})
if isinstance(result, str):
return []
else:
return result.get('log', [])
def night_status_change(self, attendee_data, night):
if hasattr(attendee_data, 'snowflake'):
attendee_data = self.attendee_data(attendee_data)
result = attendee_data.get('nights', {}).get('{:%Y-%m-%d}'.format(night), {'going': 'maybe', 'lastUpdated': None})
if isinstance(result, str):
result = None
else:
result = result['lastUpdated']
if result is not None:
return gefolge_web.util.parse_iso_datetime(result, tz=pytz.utc)
def night_maybes(self, night):
return [
person
for person in self.signups
if self.night_going(person, night) == 'maybe'
]
def night_signups(self, night):
return [
person
for person in self.signups
if self.night_going(person, night) == 'yes'
]
@property
def nights(self):
return gefolge_web.util.date_range(self.start.date(), self.end.date())
def orga(self, aufgabe):
for mensch in self.data.get('menschen', []):
if aufgabe in mensch.get('orga', []):
return gefolge_web.login.Mensch(mensch['id'])
@property
def orga_unassigned(self):
return [role for role in ORGA_ROLES if self.orga(role) is None]
def person(self, snowflake):
if hasattr(snowflake, 'snowflake'):
snowflake = snowflake.snowflake
if snowflake is None:
return None
elif int(snowflake) < 100:
return EventGuest(self, snowflake)
else:
return gefolge_web.login.DiscordPerson(snowflake)
@property
def programm(self):
import gefolge_web.event.programm
import gefolge_web.event.programm.essen
import gefolge_web.event.programm.magic
try:
import werewolf_web # extension for Werewolf games, closed-source to allow the admin to make relevant changes before a game without giving away information to players
except ImportError:
werewolf_web = None
return sorted(itertools.chain((
gefolge_web.event.programm.Programmpunkt(self, name)
for name in self.data.get('programm', {})
if name not in {'custom-magic-draft', 'rtww'} # special, already listed below
), ([] if self.start is None or (self.location is not None and self.location.is_online) else (
gefolge_web.event.programm.essen.Abendessen(self, date)
for date in self.nights
)), (
# Before Lore Seeker was discontinued, Custom Magic Drafts were a regular part of every event.
# Now the repo is no longer cloned on the server where gefolge.org runs, so showing them for future events would cause errors.
[] if self.start is None or self.start >= pytz.utc.localize(datetime.datetime(2021, 1, 1)) or self.event_id in gefolge_web.event.programm.magic.config().get('skippedEvents', []) else [gefolge_web.event.programm.magic.CustomMagicDraft(self)]
), (
[] if werewolf_web is None or not werewolf_web.Setup(self).data_path.parent.exists() else [werewolf_web.RealtimeWerewolf(self)]
)))
def proxy(self, guest):
"""The person who invited this guest to this event. Also called “via”. `None` if `guest` is not a guest."""
if guest.is_guest:
return gefolge_web.login.Mensch(self.attendee_data(guest)['via'].value())
@property
def rooms(self):
if self.location is None:
return None
return self.location.rooms_for(self)
def signup(self, mensch, anzahlung=None):
gefolge_web.util.log('eventConfirmSignup', {
'event': self.event_id,
'person': mensch.snowflake,
'anzahlung': None if anzahlung is None else anzahlung.value
})
if 'menschen' not in self.data:
self.data['menschen'] = []
person_data = {
'id': mensch.snowflake,
'signup': '{:%Y-%m-%dT%H:%M:%S}'.format(gefolge_web.util.now(self.timezone))
}
if anzahlung is not None:
person_data['anzahlung'] = anzahlung.value
self.data['menschen'].append(person_data)
if 'role' in self.data:
peter.add_role(mensch, self.data['role'].value())
if self.orga('Abrechnung').is_treasurer:
peter.channel_msg(self.channel, '<@{}>: du bist jetzt für {} angemeldet. Du kannst dich auf <https://gefolge.org/event/{}/programm> für Programmpunkte als interessiert eintragen'.format(mensch.snowflake, self, self.event_id))
else:
peter.channel_msg(self.channel, '<@{}>: du bist jetzt für {} angemeldet. Fülle bitte bei Gelegenheit noch dein Profil auf <https://gefolge.org/event/{}/me/edit> aus. Außerdem kannst du dich auf <https://gefolge.org/event/{}/programm> für Programmpunkte als interessiert eintragen'.format(mensch.snowflake, self, self.event_id, self.event_id))
@property
def signup_block_reason(self):
return self.data.get('signupBlockReason')
@property
def signups(self):
"""Returns everyone who has completed signup, including guests, in order of signup."""
result = {
guest: self.attendee_data(guest)['signup'].value()
for guest in self.guests
if 'signup' in self.attendee_data(guest)
}
result.update({
mensch: self.attendee_data(mensch)['signup'].value()
for mensch in self.menschen
})
return [person for person, signup in sorted(result.items(), key=lambda kv: kv[1])]
@property
def start(self):
if 'start' in self.data:
return gefolge_web.util.parse_iso_datetime(self.data['start'].value(), tz=self.timezone)
@property
def timezone(self):
if 'timezone' in self.data:
return pytz.timezone(self.data['timezone'].value())
elif self.location is not None:
return self.location.timezone
else:
return pytz.timezone('Europe/Berlin')
def to_ical(self):
result = icalendar.Event()
result.add('summary', str(self))
result.add('dtstart', self.start) #TODO add support for personal start time based on profile
result.add('dtend', self.end) #TODO add support for personal end time based on profile
#TODO date created
#TODO date last modified
result.add('uid', '<EMAIL>'.format(self.event_id))
result.add('location', self.location.address)
result.add('url', flask.url_for('event_page', event=self.event_id, _external=True))
return result
def transaction(self, mensch):
return self.transactions()[mensch] #TODO include guests
def transactions(self):
details = {person: [] for person in self.signups}
# Anzahlung
for person in self.signups:
if 'anzahlung' in self.attendee_data(person):
anzahlung = self.attendee_data(person)['anzahlung'].value()
else:
anzahlung = self.anzahlung.value
if anzahlung != 0:
details[person].append({
'amount': anzahlung,
'label': 'Anzahlung',
'type': 'flat'
})
raise NotImplementedError() #TODO populate details
return {
person: {
'amount': sum(detail['amount'] for detail in details),
'details': details,
'event': self.event_id,
'time': '{:%Y-%m-%dT%H:%M:%SZ}'.format(gefolge_web.util.now(pytz.utc)),
'type': 'eventAbrechnung'
}
for person, details in details.items()
}
def travel_with(self, person, travel):
"""Helper method since Jinja doesn't have while loops"""
seen = set()
while self.attendee_data(person)[travel].get('type') == 'with':
person = self.person(self.attendee_data(person)[travel]['with'].value())
if person in seen:
return person
else:
seen.add(person)
return person
@property
def url_part(self):
return self.event_id
| 2.1875
| 2
|
logitch/config.py
|
thomaserlang/logitch
| 0
|
12780001
|
import os, yaml
config = {
'debug': False,
'user': '',
'token': '',
'sql_url': '',
'client_id': '',
'client_secret': '',
'cookie_secret': '',
'redirect_uri': '',
'web_port': 8001,
'irc': {
'host': 'irc.chat.twitch.tv',
'port': 6697,
'use_ssl': True,
},
'pubsub_url': 'wss://pubsub-edge.twitch.tv',
'discord': {
'token': None,
'email': None,
'password': None,
'bot': True,
},
'logging': {
'level': 'warning',
'path': None,
'max_size': 100 * 1000 * 1000,# ~ 95 mb
'num_backups': 10,
},
'mysql': {
'host': '127.0.0.1',
'port': 3306,
'user': 'root',
'password': '',
'database': 'logitch',
},
}
def load(path=None):
default_paths = [
'~/logitch.yaml',
'./logitch.yaml',
'../logitch.yaml',
'../../logitch.yaml',
'/etc/logitch/logitch.yaml',
'/etc/logitch.yaml',
]
if not path:
path = os.environ.get('LOGITCH_CONFIG', None)
if not path:
for p in default_paths:
p = os.path.expanduser(p)
if os.path.isfile(p):
path = p
break
if not path:
raise Exception('No config file specified.')
if not os.path.isfile(path):
raise Exception('Config: "{}" could not be found.'.format(path))
with open(path) as f:
data = yaml.load(f)
for key in data:
if key in config:
if isinstance(config[key], dict):
config[key].update(data[key])
else:
config[key] = data[key]
| 2.21875
| 2
|
decoder/decoder.py
|
Richard-Kirby/strobe
| 0
|
12780002
|
import config.config as config
# Decoder class for use with a rotary encoder.
class decoder:
"""Class to decode mechanical rotary encoder pulses."""
def __init__(self, pi, rot_gpioA, rot_gpioB, switch_gpio, rotation_callback, switch_callback):
"""
Instantiate the class with the pi and gpios connected to
rotary encoder contacts A and B. The common contact
should be connected to ground. The callback is
called when the rotary encoder is turned. It takes
one parameter which is +1 for clockwise and -1 for
counterclockwise.
EXAMPLE
import time
import pigpio
import rotary_encoder
pos = 0
def callback(way):
global pos
pos += way
print("pos={}".format(pos))
pi = config.pigpio.pi()
decoder = rotary_encoder.decoder(pi, 7, 8, callback)
time.sleep(300)
decoder.cancel()
pi.stop()
"""
self.pi = pi
self.rot_gpioA = rot_gpioA
self.rot_gpioB = rot_gpioB
self.rot_callback = rotation_callback
self.sw_callback = switch_callback
self.levA = 0
self.levB = 0
self.lastGpio = None
# Setting up rotary encoder, including callback.
self.pi.set_mode(rot_gpioA, config.pigpio.INPUT)
self.pi.set_mode(rot_gpioB, config.pigpio.INPUT)
self.pi.set_pull_up_down(rot_gpioA, config.pigpio.PUD_UP)
self.pi.set_pull_up_down(rot_gpioB, config.pigpio.PUD_UP)
self.cbA = self.pi.callback(rot_gpioA, config.pigpio.EITHER_EDGE, self._pulse)
self.cbB = self.pi.callback(rot_gpioB, config.pigpio.EITHER_EDGE, self._pulse)
# Setting up switch of rotary encoder.
self.pi.set_mode(switch_gpio, config.pigpio.INPUT)
self.pi.set_mode(switch_gpio, config.pigpio.INPUT)
self.pi.set_pull_up_down(switch_gpio, config.pigpio.PUD_UP)
self.pi.set_pull_up_down(switch_gpio, config.pigpio.PUD_UP)
self.switch_cb = self.pi.callback(switch_gpio, config.pigpio.RISING_EDGE, self._switch_toggle)
# Handles the switch part of the rotary encoder.
def _switch_toggle(self, gpio, level, tick):
self.sw_callback()
def _pulse(self, gpio, level, tick):
"""
Decode the rotary encoder pulse.
+---------+ +---------+ 0
| | | |
A | | | |
| | | |
+---------+ +---------+ +----- 1
+---------+ +---------+ 0
| | | |
B | | | |
| | | |
----+ +---------+ +---------+ 1
"""
if gpio == self.rot_gpioA:
self.levA = level
else:
self.levB = level;
if gpio != self.lastGpio: # debounce
self.lastGpio = gpio
if gpio == self.rot_gpioA and level == 1:
if self.levB == 1:
self.rot_callback(1)
elif gpio == self.rot_gpioB and level == 1:
if self.levA == 1:
self.rot_callback(-1)
def cancel(self):
"""
Cancel the rotary encoder decoder.
"""
self.cbA.cancel()
self.cbB.cancel()
if __name__ == "__main__":
import time
import pigpio
import rotary_encoder
pos = 0
def callback(way):
global pos
pos += way
print("pos={}".format(pos))
pi = pigpio.pi()
decoder = rotary_encoder.decoder(pi, 2, 4, callback)
time.sleep(300)
decoder.cancel()
pi.stop()
| 3.421875
| 3
|
sheetsync/__init__.py
|
guykisel/SheetSync
| 1
|
12780003
|
# -*- coding: utf-8 -*-
"""
sheetsync
~~~~~~~~~
A library to synchronize data with a google spreadsheet, with support for:
- Creating new spreadsheets. Including by copying template sheets.
- Call-back functions when rows are added/updated/deleted.
- Protected columns.
- Extending formulas to new rows.
:copyright: (c) 2014 by <NAME>.
:license: MIT, see LICENSE.txt for more details.
"""
from version import __version__
import logging
import httplib2 # pip install httplib2
from datetime import datetime
import json
# import latest google api python client.
import apiclient.errors # pip install --upgrade google-api-python-client
import apiclient.discovery
from oauth2client.client import OAuth2WebServerFlow, OAuth2Credentials, AccessTokenRefreshError
# import the excellent gspread library.
import gspread # pip install --upgrade gspread
from gspread import SpreadsheetNotFound, WorksheetNotFound
import dateutil.parser # pip install python-dateutil
logger = logging.getLogger('sheetsync')
MAX_BATCH_LEN = 500 # Google's limit is 1MB or 1000 batch entries.
DELETE_ME_FLAG = ' (DELETED)'
DEFAULT_WORKSHEET_NAME = 'Sheet1'
def ia_credentials_helper(client_id, client_secret,
credentials_cache_file="credentials.json",
cache_key="default"):
"""Helper function to manage a credentials cache during testing.
This function attempts to load and refresh a credentials object from a
json cache file, using the cache_key and client_id as a lookup.
If this isn't found then it starts an OAuth2 authentication flow, using
the client_id and client_secret and if successful, saves those to the local
cache. See :ref:`helper`.
Args:
client_id (str): Google Drive API client id string for an installed app
client_secret (str): The corresponding client secret.
credentials_cache_file (str): Filepath to the json credentials cache file
cache_key (str): Optional string to allow multiple credentials for a client
to be stored in the cache.
Returns:
OAuth2Credentials: A google api credentials object. As described here:
https://developers.google.com/api-client-library/python/guide/aaa_oauth
"""
def _load_credentials(key):
with open(credentials_cache_file, 'rb') as inf:
cache = json.load(inf)
cred_json = cache[key]
return OAuth2Credentials.from_json(cred_json)
def _save_credentials(key, credentials):
cache = {}
try:
with open(credentials_cache_file, 'rb') as inf:
cache = json.load(inf)
except (IOError, ValueError), e:
pass
cache[key] = credentials.to_json()
with open(credentials_cache_file, 'wb') as ouf:
json.dump(cache, ouf)
credentials_key = "%s/%s/%s" % (client_id, client_secret, cache_key)
try:
credentials = _load_credentials(credentials_key)
if credentials.access_token_expired:
http = httplib2.Http()
credentials.refresh(http)
except (IOError,
ValueError,
KeyError,
AccessTokenRefreshError), e:
# Check https://developers.google.com/drive/scopes for all available scopes
OAUTH_SCOPE = ('https://www.googleapis.com/auth/drive '+
'https://spreadsheets.google.com/feeds')
# Redirect URI for installed apps
REDIRECT_URI = 'urn:ietf:wg:oauth:2.0:oob'
# Run through the OAuth flow and retrieve credentials
flow = OAuth2WebServerFlow(client_id, client_secret, OAUTH_SCOPE,
redirect_uri=REDIRECT_URI)
authorize_url = flow.step1_get_authorize_url()
print('Go to the following link in your browser:\n' + authorize_url)
code = raw_input('Enter verification code: ').strip()
credentials = flow.step2_exchange(code)
_save_credentials(credentials_key, credentials)
return credentials
def _is_google_fmt_date(text):
frags = text.split('/')
if len(frags) != 3:
return False
if not all(frag.isdigit() for frag in frags):
return False
frags[0] = frags[0].zfill(2)
frags[1] = frags[1].zfill(2)
try:
date = datetime.strptime('-'.join(frags), '%m-%d-%Y')
return date
except:
return False
def google_equivalent(text1, text2):
# Google spreadsheets modify some characters, and anything that looks like
# a date. So this function will return true if text1 would equal text2 if
# both were input into a google cell.
lines1 = [l.replace('\t',' ').strip() for l in text1.splitlines()]
lines2 = [l.replace('\t',' ').strip() for l in text2.splitlines()]
if len(lines1) != len(lines2):
return False
if len(lines1) == 0 and len(lines2) == 0:
return True
if len(lines1) > 1:
for a,b in zip(lines1, lines2):
if a != b:
return False
# Multiline string matches on every line.
return True
elif lines1[0] == lines2[0]:
# Single line string.. that matches.
return True
# Might be dates.
text1 = lines1[0]
text2 = lines2[0]
if _is_google_fmt_date(text1) or _is_google_fmt_date(text2):
try:
date1 = dateutil.parser.parse(text1)
date2 = dateutil.parser.parse(text2)
if date1 == date2:
return True
except ValueError:
# Couldn't parse one of the dates.
pass
return False
class MissingSheet(Exception):
pass
class CorruptHeader(Exception):
pass
class BadDataFormat(Exception):
pass
class DuplicateRows(Exception):
pass
class UpdateResults(object):
""" A lightweight counter object that holds statistics about number of
updates made after using the 'sync' or 'inject' method.
Attributes:
added (int): Number of rows added
changed (int): Number of rows changed
nochange (int): Number of rows that were not modified.
deleted (int): Number of rows deleted (which will always be 0 when using
the 'inject' function)
"""
def __init__(self):
self.added = 0
self.changed = 0
self.deleted = 0
self.nochange = 0
def __str__(self):
r = 'Added: %s Changed: %s Deleted: %s No Change: %s' % (
self.added, self.changed, self.deleted, self.nochange)
return r
class Row(dict):
def __init__(self, row_num):
self.row_num = row_num
self.db = {}
dict.__init__(self)
def __setitem__(self, key, cell):
dict.__setitem__(self, key, cell)
self.db[key] = cell.value
def cell_list(self):
for cell in self.itervalues():
yield cell
def is_empty(self):
return all((val is None or val == '') for val in self.db.itervalues())
class Header(object):
def __init__(self):
self.col_to_header = {}
self.header_to_col = {}
def reset(self):
self.col_to_header = {}
self.header_to_col = {}
def col_lookup(self, col_ix):
return self.col_to_header.get(col_ix)
def header_lookup(self, header):
return self.header_to_col.get(header)
def set(self, col, header):
if header in self.header_to_col and self.header_to_col[header] != col:
raise CorruptHeader("'%s' was found twice in row %s of %s" % (header,
self.header_row_ix, self.sheet_name))
if col in self.col_to_header and self.col_to_header[col] != header:
raise CorruptHeader("Header for column '%s' changed while running" % col)
self.col_to_header[col] = header
self.header_to_col[header] = col
@property
def headers_in_order(self):
col_header_list = self.col_to_header.items()
col_header_list.sort(lambda x,y: cmp(x[0],y[0]))
return [header for col,header in col_header_list]
@property
def last_column(self):
if self.col_to_header:
return max(self.col_to_header.keys())
return 0
@property
def first_column(self):
if self.col_to_header:
return min(self.col_to_header.keys())
return 0
@property
def columns(self):
return self.col_to_header.keys()
def __contains__(self, header):
return (header in self.header_to_col)
class Sheet(object):
""" Represents a single worksheet within a google spreadsheet.
This class tracks the google connection, the reference to the worksheet, as
well as options controlling the structure of the data in the worksheet.. for
example:
* Which row is used as the table header
* What header names should be used for the key column(s)
* Whether some columns are protected from overwriting
Attributes:
document_key (str): The spreadsheet's document key assigned by google
drive. If you are using sheetsync to create a spreadsheet then use
this attribute to saved the document_key, and make sure you pass
it as a parameter in subsequent calls to __init__
document_name (str): The title of the google spreadsheet document
document_href (str): The HTML href for the google spreadsheet document
"""
def __init__(self,
credentials=None,
document_key=None, document_name=None,
worksheet_name=None,
# Behavior modifiers
key_column_headers=None,
header_row_ix=1,
formula_ref_row_ix=None,
flag_deletes=True,
protected_fields=None,
# Document creation behavior
template_key=None, template_name=None,
folder_key=None, folder_name=None):
"""Creates a worksheet object (also creating a new Google sheet doc if required)
Args:
credentials (OAuth2Credentials): Credentials object returned by the
google authorization server. Described in detail in this article:
https://developers.google.com/api-client-library/python/guide/aaa_oauth
For testing and development consider using the `ia_credentials_helper`
helper function
document_key (Optional) (str): Document key for the existing spreadsheet to
sync data to. More info here:
https://productforums.google.com/forum/#!topic/docs/XPOR9bTTS50
If this is not provided sheetsync will use document_name to try and
find the correct spreadsheet.
document_name (Optional) (str): The name of the spreadsheet document to
access. If this is not found it will be created. If you know
the document_key then using that is faster and more reliable.
worksheet_name (str): The name of the worksheet inside the spreadsheet
that data will be synced to. If omitted then the default name
"Sheet1" will be used, and a matching worksheet created if
necessary.
key_column_headers (Optional) (list of str): Data in the key column(s) uniquely
identifies a row in your data. So, for example, if your data is
indexed by a single username string, that you want to store in a
column with the header 'Username', you would pass this:
key_column_headers=['Username']
However, sheetsync also supports component keys. Python dictionaries can
use tuples as keys, for example if you had a tuple key like
this:
('Tesla', 'Model-S', '2013')
You can make the column meanings clear by passing in a list of
three key_column_headers:
['Make', 'Model', 'Year']
If no value is given, then the default behavior is to name the
column "Key"; or "Key-1", "Key-2", ... if your data dictionaries
keys are tuples.
header_row_ix (Optional) (int): The row number we expect to see column headers
in. Defaults to 1 (the very top row).
formula_ref_row_ix (Optional) (int): If you want formulas to be added to some
cells when inserting new rows then use a formula reference row.
See :ref:`formulas` for an example use.
flag_deletes (Optional) (bool): Specify if deleted rows should only be flagged
for deletion. By default sheetsync does not delete rows of data, it
just marks that they are deleted by appending the string
" (DELETED)" to key values. If you pass in the value "False" then
rows of data will be deleted by the sync method if they are not
found in the input data. Note, use the inject method if you only want
to add or modify data to in a worksheet.
protected_fields (Optional) (list of str): An list of fields (column
headers) that contain protected data. sheetsync will only write to
cells in these columns if they are blank. This can be useful if you
are expecting users of the spreadsheet to colaborate on the document
and edit values in certain columns (e.g. modifying a "Test result"
column from "PENDING" to "PASSED") and don't want to overwrite
their edits.
template_key (Optional) (str): This optional key references the spreadsheet
that will be copied if a new spreadsheet needs to be created.
This is useful for copying over formatting, a specific header
order, or apps-script functions. See :ref:`templates`.
template_name (Optional) (str): As with template_key but the name of the
template spreadsheet. If known, using the template_key will be
faster.
folder_key (Optional) (str): This optional key references the folder that a new
spreadsheet will be moved to if a new spreadsheet needs to be
created.
folder_name (Optional) (str): Like folder_key this parameter specifies the
optional folder that a spreadsheet will be created in (if
required). If a folder matching the name cannot be found, sheetsync
will attempt to create it.
"""
# Record connection settings, and create a connection.
self.credentials = credentials
self._drive_service = None
self._gspread_client = None
self._sheet = None # Gspread sheet instance.
self._worksheet = None # Gspread worksheet instance.
# Find or create the Google spreadsheet document
document = self._find_document(document_key, document_name)
if document is None:
if document_name is None:
raise ValueError("Must specify a document_name")
# We need to create the document
template = self._find_document(template_key, template_name)
if template is None and template_name is not None:
raise ValueError("Could not find template: %s" % template_name)
self.folder = self._find_or_create_folder(folder_key, folder_name)
document = self._create_new_or_copy(source_doc=template,
target_name=document_name,
folder=self.folder)
if not document:
raise Exception("Could not create doc '%s'." % document_name)
# Create attribute for document key
self.document_key = document['id']
self.document_name = document['title']
self.document_href = document['alternateLink']
# Find or create the worksheet
if worksheet_name is None:
logger.info("Using the default worksheet name")
worksheet_name = DEFAULT_WORKSHEET_NAME
self.worksheet_name = worksheet_name
# Store off behavioural settings for interacting with the worksheet.
if key_column_headers is None:
logger.info("No key column names. Will use 'Key'; or 'Key-1', 'Key-2' etc..")
key_column_headers = []
self.key_column_headers = key_column_headers
self.header_row_ix = header_row_ix
self.formula_ref_row_ix = formula_ref_row_ix
self.flag_delete_mode = flag_deletes
self.protected_fields = (protected_fields or [])
# Cache batch operations to write efficiently
self._batch_request = None
self._batch_href = None
# Track headers and reference formulas
self.header = Header()
self._get_or_create_headers()
self.header_to_ref_formula = {}
self.read_ref_formulas()
@property
def sheet(self):
# Finds and returns a gspread.Spreadsheet object
if self._sheet:
return self._sheet
self._sheet = self.gspread_client.open_by_key(self.document_key)
return self._sheet
@property
def worksheet(self):
# Finds (or creates) then returns a gspread.Worksheet object
if self._worksheet:
return self._worksheet
try:
try:
self._worksheet = self.sheet.worksheet(self.worksheet_name)
except WorksheetNotFound:
logger.info("Not found. Creating worksheet '%s'", self.worksheet_name)
self._worksheet = self.sheet.add_worksheet(title=self.worksheet_name,
rows=20, cols=10)
except Exception, e:
logger.exception("Failed to find or create worksheet: %s. %s",
self.worksheet_name, e)
raise e
return self._worksheet
@property
def gspread_client(self):
if self._gspread_client:
return self._gspread_client
self._gspread_client = gspread.authorize(self.credentials)
self._gspread_client.login()
return self._gspread_client
@property
def drive_service(self):
if self._drive_service:
return self._drive_service
http = httplib2.Http()
if self.credentials.access_token_expired:
logger.info('Refreshing expired credentials')
self.credentials.refresh(http)
logger.info('Creating drive service')
http = self.credentials.authorize(http)
drive_service = apiclient.discovery.build('drive', 'v2', http=http)
# Cache the drive_service object for future calls.
self._drive_service = drive_service
return drive_service
def _create_new_or_copy(self,
target_name=None,
source_doc=None,
folder=None,
sheet_description="new"):
if target_name is None:
raise KeyError("Must specify a name for the new document")
body = {'title': target_name }
if folder:
body['parents'] = [{'kind' : 'drive#parentReference',
'id' : folder['id'],
'isRoot' : False }]
drive_service = self.drive_service
if source_doc is not None:
logger.info("Copying spreadsheet.")
try:
print body
print source_doc['id']
new_document = drive_service.files().copy(fileId=source_doc['id'], body=body).execute()
except Exception, e:
logger.exception("gdata API error. %s", e)
raise e
else:
# Create new blank spreadsheet.
logger.info("Creating blank spreadsheet.")
body['mimeType'] = 'application/vnd.google-apps.spreadsheet'
try:
new_document = drive_service.files().insert(body=body).execute()
except Exception, e:
logger.exception("gdata API error. %s", e)
raise e
logger.info("Created %s spreadsheet with ID '%s'",
sheet_description,
new_document.get('id'))
return new_document
def _find_or_create_folder(self, folder_key=None, folder_name=None):
drive_service = self.drive_service
# Search by folder key.. raise Exception if not found.
if folder_key is not None:
try:
folder_rsrc = drive_service.files().get(fileId=folder_key).execute()
except apiclient.errors.HttpError, e:
# XXX: WRONG... probably returns 404 if not found,.. which is not an error.
logger.exception("Google API error: %s", e)
raise e
if not folder_rsrc:
raise KeyError("Folder with key %s was not found." % folder_key)
return folder_rsrc
if not folder_name:
return None
# Search by folder name.
try:
name_query = drive_service.files().list(
q=("title='%s' and trashed=false and "
"mimeType='application/vnd.google-apps.folder'") %
folder_name.replace("'","\\'")
).execute()
items = name_query['items']
if len(items) == 1:
return items[0]
elif len(items) > 1:
raise KeyError("%s folders found named: %s" % (len(items), folder_name))
except Exception, e:
logger.exception("Google API error. %s", e)
raise e
logger.info("Creating a new folder named: '%s'", folder_name)
try:
new_folder_rsrc = drive_service.files().insert(
body={ 'mimeType' : 'application/vnd.google-apps.folder',
'title' : folder_name }).execute()
except Exception, e:
logger.exception("Google API error. %s", e)
raise e
return new_folder_rsrc
def _find_document(self, doc_key=None, doc_name=None):
# Find the document by key and raise "KeyError" if not found.
# Otherwise search by document_name
drive_service = self.drive_service
if doc_key is not None:
logger.debug("Finding document by key.")
try:
doc_rsrc = drive_service.files().get(fileId=doc_key).execute()
except Exception, e:
logger.exception("gdata API error. %s", e)
raise e
if doc_rsrc is None:
raise KeyError("Could not find document with key: %s" % doc_key)
return doc_rsrc
if doc_name is None:
return None
try:
name_query = drive_service.files().list(
q=("title='%s' and trashed=false and "
"mimeType='application/vnd.google-apps.spreadsheet'") %
doc_name.replace("'","\\'")
).execute()
matches = name_query['items']
except Exception, e:
logger.exception("gdata API error. %s", e)
raise e
if len(matches) == 1:
return matches[0]
if len(matches) > 1:
raise KeyError("Too many matches for doc named '%s'" % doc_name)
return None
def _extends(self, rows=None, columns=None):
# Resizes the sheet if needed, to match the given
# number of rows and/or columns
new_rows, new_cols = None, None
if rows is not None and rows > self.worksheet.row_count:
# Need to add new rows to the spreadsheet.
new_rows = rows
if columns is not None and columns > self.worksheet.col_count:
new_cols = columns
if new_rows or new_cols:
try:
self.worksheet.resize(rows=new_rows, cols=new_cols)
except Exception, e:
logger.exception("Error resizing worksheet. %s", e)
raise e
def _write_cell(self, cell):
# Creates a batch_update if required, and adds the passed cell
# to it. Then tests if a flush_writes call is required (when the
# batch write might be close to the 1MB limit)
if not self._batch_request:
self._batch_request = []
logger.debug("_write_cell: Adding batch update")
self._batch_request.append(cell)
if len(self._batch_request) > MAX_BATCH_LEN:
self._flush_writes()
def _flush_writes(self):
# Write current batch_updates to google sheet.
if self._batch_request:
logger.info("_flush_writes: Writing %s cell writes",
len(self._batch_request))
try:
self.worksheet.update_cells(self._batch_request)
except Exception, e:
logger.exception("gdata API error. %s", e)
raise e
# Now check the response code.
#for entry in resp.entry:
# if entry.batch_status.code != '200':
# error = "gdata API error. %s - %s" % (entry.batch_status.code,
# entry.batch_status.reason)
# logger.error("Batch update failed: %s", error)
# raise Exception(error)
self._batch_request = []
def _cell_feed(self, row=None, max_row=None, further_rows=False, # XXX: REFACTOR
col=None, max_col=None, further_cols=False,
return_empty=False):
# Fetches cell data for a given row, and all following rows if
# further_rows is True. If no row is given, all cells are returned.
params = {}
if row is not None:
params['min-row'] = str(row)
if max_row is not None:
params['max-row'] = str(max_row)
elif not further_rows:
params['max-row'] = str(row)
if col is not None:
params['min-col'] = str(col)
if max_col is not None:
params['max-col'] = str(max_col)
elif not further_cols:
params['max-col'] = str(col)
if (params['min-col'] == params['max-col'] and
params['min-col'] == '0'):
return []
if return_empty:
params['return-empty'] = "true"
logger.info("getting cell feed")
try:
feed = self.gspread_client.get_cells_feed(self.worksheet, params=params)
# Bit of a hack to rip out Gspread's xml parsing.
cfeed = [gspread.Cell(self, elem) for elem in
feed.findall(gspread.client._ns('entry'))]
except Exception, e:
logger.exception("gspread error. %s", e)
raise e
return cfeed
def read_ref_formulas(self):
self.header_to_ref_formula = {}
if self.formula_ref_row_ix:
for cell in self._cell_feed(row=self.formula_ref_row_ix):
ref_formula = cell.input_value
header = self.header.col_lookup(cell.col)
if header and ref_formula.startswith("="):
self.header_to_ref_formula[header] = ref_formula
def _get_or_create_headers(self, required_headers=[]):
# Reads the header row, adds missing headers if required.
self.header.reset()
for cell in self._cell_feed(row=self.header_row_ix):
self.header.set(cell.col, cell.value)
headers_to_add = []
for key_field in self.key_column_headers:
if key_field not in self.header:
headers_to_add.append(key_field)
# Write new headers in alphabetical order.
sorted_required_headers = list(required_headers)
sorted_required_headers.sort()
for header in sorted_required_headers:
if header not in self.header:
headers_to_add.append(header)
if not headers_to_add:
return
target_cols = self.header.last_column + len(headers_to_add)
self._extends(columns=target_cols)
cells_list = self._cell_feed(row=self.header_row_ix,
return_empty=True)
for cell in cells_list:
if not headers_to_add:
break
if not cell.value:
header = headers_to_add.pop(0)
cell.value = header
self.header.set(cell.col, header)
self._write_cell(cell)
if headers_to_add:
raise CorruptHeader("Error adding headers")
self._flush_writes()
def backup(self, backup_name, folder_key=None, folder_name=None):
"""Copies the google spreadsheet to the backup_name and folder specified.
Args:
backup_name (str): The name of the backup document to create.
folder_key (Optional) (str): The key of a folder that the new copy will
be moved to.
folder_name (Optional) (str): Like folder_key, references the folder to move a
backup to. If the folder can't be found, sheetsync will create it.
"""
folder = self._find_or_create_folder(folder_key, folder_name)
drive_service = self.drive_service
try:
source_rsrc = drive_service.files().get(fileId=self.document_key).execute()
except Exception, e:
logger.exception("Google API error. %s", e)
raise e
backup = self._create_new_or_copy(source_doc=source_rsrc,
target_name=backup_name,
folder=folder,
sheet_description="backup")
backup_key = backup['id']
return backup_key
def _yield_rows(self, cells_feed):
cur_row = None
for cell in cells_feed:
if cell.row <= self.header_row_ix:
# Never yield the header from this function to avoid overwrites
continue
if self.formula_ref_row_ix and cell.row == self.formula_ref_row_ix:
# Never yield the formula ref row to avoid overwrites
continue
if cur_row is None or cur_row.row_num != cell.row:
if cur_row is not None:
yield cur_row
# Make a new row.
cur_row = Row(cell.row)
if cell.col in self.header.columns:
cur_row[self.header.col_lookup(cell.col)] = cell
if cur_row is not None:
yield cur_row
def data(self, as_cells=False):
""" Reads the worksheet and returns an indexed dictionary of the
row objects.
For example:
>>>print sheet.data()
{'<NAME>': {'Color': 'Pink', 'Performer': '<NAME>'}, 'Kermit': {'Color': 'Green', 'Performer': '<NAME>'}}
"""
sheet_data = {}
self.max_row = max(self.header_row_ix, self.formula_ref_row_ix)
all_cells = self._cell_feed(row=self.max_row+1,
further_rows=True,
col=self.header.first_column,
max_col=self.header.last_column,
return_empty=True)
for wks_row in self._yield_rows(all_cells):
if wks_row.row_num not in sheet_data and not wks_row.is_empty():
sheet_data[wks_row.row_num] = wks_row
all_rows = sheet_data.keys()
if all_rows:
self.max_row = max(all_rows)
# Now index by key_tuple
indexed_sheet_data = {}
for row, wks_row in sheet_data.iteritems():
# Make the key tuple
if len(self.key_column_headers) == 0:
# Are there any default key column headers?
if "Key" in wks_row:
logger.info("Assumed key column's header is 'Key'")
self.key_column_headers = ['Key']
elif "Key-1" in wks_row:
self.key_column_headers = [h for h in wks_row.keys()
if h.startswith("Key-") and h.split("-")[1].isdigit()]
logger.info("Assumed key column headers were: %s",
self.key_column_headers)
else:
raise Exception("Unable to read spreadsheet. Specify"
"key_column_headers when initializing Sheet object.")
key_list = []
for key_hdr in self.key_column_headers:
key_val = wks_row.db.get(key_hdr,"")
if key_val.startswith("'"):
key_val = key_val[1:]
key_list.append(key_val)
key_tuple = tuple(key_list)
if all(k == "" for k in key_tuple):
continue
if as_cells:
indexed_sheet_data[key_tuple] = wks_row
else:
if len(key_tuple) == 1:
key_tuple = key_tuple[0]
indexed_sheet_data[key_tuple] = wks_row.db
return indexed_sheet_data
@property
def key_length(self):
return len(self.key_column_headers)
#--------------------------------------------------------------------------
# Update the worksheet to match the raw_data, calling
# the row_change_callback for any adds/deletes/fieldchanges.
#
# Read the data to build a list of required headers and
# check the keys are valid tuples.
# sync and update.
#--------------------------------------------------------------------------
def sync(self, raw_data, row_change_callback=None):
""" Equivalent to the inject method but will delete rows from the
google spreadsheet if their key is not found in the input (raw_data)
dictionary.
Args:
raw_data (dict): See inject method
row_change_callback (Optional) (func): See inject method
Returns:
UpdateResults (object): See inject method
"""
return self._update(raw_data, row_change_callback, delete_rows=True)
def inject(self, raw_data, row_change_callback=None):
""" Use this function to add rows or update existing rows in the
spreadsheet.
Args:
raw_data (dict): A dictionary of dictionaries. Where the keys of the
outer dictionary uniquely identify each row of data, and the inner
dictionaries represent the field,value pairs for a row of data.
row_change_callback (Optional) (func): A callback function that you
can use to track changes to rows on the spreadsheet. The
row_change_callback function must take four parameters like so:
change_callback(row_key,
row_dict_before,
row_dict_after,
list_of_changed_keys)
Returns:
UpdateResults (object): A simple counter object providing statistics
about the changes made by sheetsync.
"""
return self._update(raw_data, row_change_callback, delete_rows=False)
def _update(self, raw_data, row_change_callback=None, delete_rows=False):
required_headers = set()
logger.debug("In _update. Checking for bad keys and missing headers")
fixed_data = {}
missing_raw_keys = set()
for key, row_data in raw_data.iteritems():
if not isinstance(key, tuple):
key = (str(key),)
else:
key = tuple([str(k) for k in key])
if len(self.key_column_headers) == 0:
# Pick default key_column_headers.
if len(key) == 1:
self.key_column_headers = ["Key"]
else:
self.key_column_headers = ["Key-%s" % i for i in range(1,len(key)+1)]
# Cast row_data values to unicode strings.
fixed_data[key] = dict([(k,unicode(v)) for (k,v) in row_data.items()])
missing_raw_keys.add(key)
if len(key) != self.key_length:
raise BadDataFormat("Key %s does not match key field headers %s" % (key,
self.key_column_headers))
required_headers.update( set(row_data.keys()) )
self._get_or_create_headers(required_headers)
results = UpdateResults()
# Check for changes and deletes.
for key_tuple, wks_row in self.data(as_cells=True).iteritems():
if key_tuple in fixed_data:
# This worksheet row is in the fixed_data, might be a change or no-change.
raw_row = fixed_data[key_tuple]
different_fields = []
for header, raw_value in raw_row.iteritems():
sheet_val = wks_row.db.get(header, "")
if not google_equivalent(raw_value, sheet_val):
logger.debug("Identified different field '%s' on %s: %s != %s", header, key_tuple, sheet_val, raw_value)
different_fields.append(header)
if different_fields:
if self._change_row(key_tuple,
wks_row,
raw_row,
different_fields,
row_change_callback):
results.changed += 1
else:
results.nochange += 1
missing_raw_keys.remove( key_tuple )
elif delete_rows:
# This worksheet row is not in fixed_data and needs deleting.
if self.flag_delete_mode:
# Just mark the row as deleted somehow (strikethrough)
if not self._is_flagged_delete(key_tuple, wks_row):
logger.debug("Flagging row %s for deletion (key %s)",
wks_row.row_num, key_tuple)
if row_change_callback:
row_change_callback(key_tuple, wks_row.db,
None, self.key_column_headers[:])
self._delete_flag_row(key_tuple, wks_row)
results.deleted += 1
else:
# Hard delete. Actually delete the row's data.
logger.debug("Deleting row: %s for key %s",
wks_row.row_num, key_tuple)
if row_change_callback:
row_change_callback(key_tuple, wks_row.db,
None, wks_row.db.keys())
self._log_change(key_tuple, "Deleted entry.")
self._delete_row(key_tuple, wks_row)
results.deleted += 1
if missing_raw_keys:
# Add missing key in raw
self._extends(rows=(self.max_row+len(missing_raw_keys)))
empty_cells_list = self._cell_feed(row=self.max_row+1,
col=self.header.first_column,
max_col=self.header.last_column,
further_rows=True,
return_empty=True)
iter_empty_rows = self._yield_rows(empty_cells_list)
while missing_raw_keys:
wks_row = iter_empty_rows.next()
key_tuple = missing_raw_keys.pop()
logger.debug("Adding new row: %s", str(key_tuple))
raw_row = fixed_data[key_tuple]
results.added += 1
if row_change_callback:
row_change_callback(key_tuple, None,
raw_row, raw_row.keys())
self._insert_row(key_tuple, wks_row, raw_row)
self._flush_writes()
return results
def _log_change(self, key_tuple, description, old_val="", new_val=""):
def truncate(text, length=18):
if len(text) <= length:
return text
return text[:(length-3)] + "..."
if old_val or new_val:
logger.debug("%s: %s '%s'-->'%s'", ".".join(key_tuple),
description, truncate(old_val), truncate(new_val))
else:
logger.debug("%s: %s", ".".join(key_tuple), description)
def _is_flagged_delete(self, key_tuple, wks_row):
# Ideally we'd use strikethrough to indicate deletes - but Google api
# doesn't allow access to get or set formatting.
for key in key_tuple:
if key.endswith(DELETE_ME_FLAG):
return True
return False
def _delete_flag_row(self, key_tuple, wks_row):
for cell in wks_row.cell_list():
if self.header.col_lookup(cell.col) in self.key_column_headers:
# Append the DELETE_ME_FLAG
cell.value = "%s%s" % (cell.value,DELETE_ME_FLAG)
self._write_cell(cell)
self._log_change(key_tuple, "Deleted entry")
def _delete_row(self, key_tuple, wks_row):
for cell in wks_row.cell_list():
cell.value = ''
self._write_cell(cell)
def _get_value_for_column(self, key_tuple, raw_row, col):
# Given a column, and a row dictionary.. returns the value
# of the field corresponding with that column.
try:
header = self.header.col_lookup(col)
except KeyError:
logger.error("Unexpected: column %s has no header", col)
return ""
if header in self.key_column_headers:
key_dict = dict(zip(self.key_column_headers, key_tuple))
key_val = key_dict[header]
if key_val.isdigit() and not key_val.startswith('0'):
# Do not prefix integers so that the key column can be sorted
# numerically.
return key_val
return "'%s" % key_val
elif header in raw_row:
return raw_row[header]
elif header in self.header_to_ref_formula:
return self.header_to_ref_formula[header]
return ""
def _insert_row(self, key_tuple, wks_row, raw_row):
for cell in wks_row.cell_list():
if cell.col in self.header.columns:
value = self._get_value_for_column(key_tuple, raw_row, cell.col)
logger.debug("Batching write of %s", value[:50])
cell.value = value
self._write_cell(cell)
logger.debug("Inserting row %s with batch operation.", wks_row.row_num)
self._log_change(key_tuple, "Added entry")
self.max_row += 1
def _change_row(self, key_tuple, wks_row,
raw_row, different_fields,
row_change_callback):
changed_fields = []
for cell in wks_row.cell_list():
if cell.col not in self.header.columns:
continue
header = self.header.col_lookup(cell.col)
if header in different_fields:
raw_val = raw_row[header]
sheet_val = wks_row.db.get(header,"")
if (header in self.protected_fields) and sheet_val != "":
# Do not overwrite this protected field.
continue
cell.value = raw_val
self._write_cell(cell)
changed_fields.append(header)
self._log_change(key_tuple, ("Updated %s" % header),
old_val=sheet_val, new_val=raw_val)
if row_change_callback:
row_change_callback(key_tuple, wks_row.db, raw_row, changed_fields)
return changed_fields
| 2.453125
| 2
|
asyncapi_schema_pydantic/v2_3_0/schema.py
|
albertnadal/asyncapi-schema-pydantic
| 0
|
12780004
|
<reponame>albertnadal/asyncapi-schema-pydantic
from typing import Optional
from pydantic import Extra
from .json_schema import JsonSchemaObject
from .external_documentation import ExternalDocumentation
class Schema(JsonSchemaObject):
"""
The Schema Object allows the definition of input and output data types.
These types can be objects, but also primitives and arrays. This object
is a superset of the JSON Schema Specification Draft 07.
Further information about the properties can be found in JSON Schema Core
and JSON Schema Validation. Unless stated otherwise, the property definitions
follow the JSON Schema specification as referenced here.
The AsyncAPI Schema Object is a JSON Schema vocabulary which extends JSON
Schema Core and Validation vocabularies.
"""
discriminator: Optional[str] = None
"""
Adds support for polymorphism. The discriminator is the schema property name
that is used to differentiate between other schema that inherit this schema.
The property name used MUST be defined at this schema and it MUST be in the
required property list. When used, the value MUST be the name of this schema
or any schema that inherits it.
"""
externalDocs: Optional[ExternalDocumentation] = None
"""
Additional external documentation for this schema.
"""
deprecated: Optional[bool] = False
"""
Specifies that a schema is deprecated and SHOULD be transitioned out of usage.
Default value is false.
"""
class Config:
extra = Extra.forbid
schema_extra = {
"examples": [
{
"type": "string",
"format": "email"
}
]
}
| 2.203125
| 2
|
pyscnet/BuildNet/__init__.py
|
MingBit/SCNetEnrich
| 5
|
12780005
|
<gh_stars>1-10
from .gne_dockercaller import rundocker
from .gne_synchrony import get_synchrony
| 1.117188
| 1
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/fipClearVirtualLinksFcf_template.py
|
OpenIxia/ixnetwork_restpy
| 20
|
12780006
|
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class FipClearVirtualLinksFcf(Base):
__slots__ = ()
_SDM_NAME = 'fipClearVirtualLinksFcf'
_SDM_ATT_MAP = {
'HeaderFipVersion': 'fipClearVirtualLinksFcf.header.fipVersion-1',
'HeaderFipReserved': 'fipClearVirtualLinksFcf.header.fipReserved-2',
'FipProtocolCodeFipKeepaliveVirtualLink': 'fipClearVirtualLinksFcf.header.fipOperation.fipProtocolCode.fipKeepaliveVirtualLink-3',
'FipOperationFipOperationReserved1': 'fipClearVirtualLinksFcf.header.fipOperation.fipOperationReserved1-4',
'FipSubcodeFipSubcode02h': 'fipClearVirtualLinksFcf.header.fipOperation.fipSubcode.fipSubcode02h-5',
'FipOperationFipDescriptorListLength': 'fipClearVirtualLinksFcf.header.fipOperation.fipDescriptorListLength-6',
'FipOperationFipFp': 'fipClearVirtualLinksFcf.header.fipOperation.fipFp-7',
'FipOperationFipSp': 'fipClearVirtualLinksFcf.header.fipOperation.fipSp-8',
'FipOperationFipReserved2': 'fipClearVirtualLinksFcf.header.fipOperation.fipReserved2-9',
'FipOperationFipABit': 'fipClearVirtualLinksFcf.header.fipOperation.fipABit-10',
'FipOperationFipSBit': 'fipClearVirtualLinksFcf.header.fipOperation.fipSBit-11',
'FipOperationFipFBit': 'fipClearVirtualLinksFcf.header.fipOperation.fipFBit-12',
'FipMacAddressDescriptorFipMacAddressDescriptorType': 'fipClearVirtualLinksFcf.header.fipDescriptors.fipSelectFipDescriptor.fipMacAddressDescriptor.fipMacAddressDescriptorType-13',
'FipMacAddressDescriptorFipMacAddressDescriptorLength': 'fipClearVirtualLinksFcf.header.fipDescriptors.fipSelectFipDescriptor.fipMacAddressDescriptor.fipMacAddressDescriptorLength-14',
'FipMacAddressDescriptorFipMacAddressDescriptorValue': 'fipClearVirtualLinksFcf.header.fipDescriptors.fipSelectFipDescriptor.fipMacAddressDescriptor.fipMacAddressDescriptorValue-15',
'FipNameIdentifierDescriptorFipNameIdentifierDescriptorType': 'fipClearVirtualLinksFcf.header.fipDescriptors.fipSelectFipDescriptor.fipNameIdentifierDescriptor.fipNameIdentifierDescriptorType-16',
'FipNameIdentifierDescriptorFipNameIdentifierDescriptorLength': 'fipClearVirtualLinksFcf.header.fipDescriptors.fipSelectFipDescriptor.fipNameIdentifierDescriptor.fipNameIdentifierDescriptorLength-17',
'FipNameIdentifierDescriptorFipNameIdentifierDescriptorReserved': 'fipClearVirtualLinksFcf.header.fipDescriptors.fipSelectFipDescriptor.fipNameIdentifierDescriptor.fipNameIdentifierDescriptorReserved-18',
'FipNameIdentifierDescriptorFipNameIdentifierDescriptorValue': 'fipClearVirtualLinksFcf.header.fipDescriptors.fipSelectFipDescriptor.fipNameIdentifierDescriptor.fipNameIdentifierDescriptorValue-19',
'FipVxPortIdentificationDescriptorFipVxPortIdentificationDescriptorType': 'fipClearVirtualLinksFcf.header.fipDescriptors.fipSelectFipDescriptor.fipVxPortIdentificationDescriptor.fipVxPortIdentificationDescriptorType-20',
'FipVxPortIdentificationDescriptorFipVxPortIdentificationDescriptorLength': 'fipClearVirtualLinksFcf.header.fipDescriptors.fipSelectFipDescriptor.fipVxPortIdentificationDescriptor.fipVxPortIdentificationDescriptorLength-21',
'FipVxPortIdentificationDescriptorFipVxPortIdentificationMacDescriptorAddress': 'fipClearVirtualLinksFcf.header.fipDescriptors.fipSelectFipDescriptor.fipVxPortIdentificationDescriptor.fipVxPortIdentificationMacDescriptorAddress-22',
'FipVxPortIdentificationDescriptorFipVxPortIdentificationDescriptorReserved': 'fipClearVirtualLinksFcf.header.fipDescriptors.fipSelectFipDescriptor.fipVxPortIdentificationDescriptor.fipVxPortIdentificationDescriptorReserved-23',
'FipVxPortIdentificationDescriptorFipVxPortIdentificationDescriptorAddressIdentifier': 'fipClearVirtualLinksFcf.header.fipDescriptors.fipSelectFipDescriptor.fipVxPortIdentificationDescriptor.fipVxPortIdentificationDescriptorAddressIdentifier-24',
'FipVxPortIdentificationDescriptorFipVxPortIdentificationDescriptorValue': 'fipClearVirtualLinksFcf.header.fipDescriptors.fipSelectFipDescriptor.fipVxPortIdentificationDescriptor.fipVxPortIdentificationDescriptorValue-25',
}
def __init__(self, parent, list_op=False):
super(FipClearVirtualLinksFcf, self).__init__(parent, list_op)
@property
def HeaderFipVersion(self):
"""
Display Name: Version
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderFipVersion']))
@property
def HeaderFipReserved(self):
"""
Display Name: Reserved
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderFipReserved']))
@property
def FipProtocolCodeFipKeepaliveVirtualLink(self):
"""
Display Name: Keep Alive/Clear Virtual Links
Default Value: 0x0003
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipProtocolCodeFipKeepaliveVirtualLink']))
@property
def FipOperationFipOperationReserved1(self):
"""
Display Name: Reserved
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipOperationFipOperationReserved1']))
@property
def FipSubcodeFipSubcode02h(self):
"""
Display Name: Subcode 02h
Default Value: 0x02
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipSubcodeFipSubcode02h']))
@property
def FipOperationFipDescriptorListLength(self):
"""
Display Name: FIP Descriptor List Length
Default Value: 10
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipOperationFipDescriptorListLength']))
@property
def FipOperationFipFp(self):
"""
Display Name: FP
Default Value: 1
Value Format: decimal
Available enum values: False, 0, True, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipOperationFipFp']))
@property
def FipOperationFipSp(self):
"""
Display Name: SP
Default Value: 1
Value Format: decimal
Available enum values: False, 0, True, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipOperationFipSp']))
@property
def FipOperationFipReserved2(self):
"""
Display Name: Reserved
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipOperationFipReserved2']))
@property
def FipOperationFipABit(self):
"""
Display Name: A bit
Default Value: 0
Value Format: decimal
Available enum values: False, 0, True, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipOperationFipABit']))
@property
def FipOperationFipSBit(self):
"""
Display Name: S bit
Default Value: 0
Value Format: decimal
Available enum values: False, 0, True, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipOperationFipSBit']))
@property
def FipOperationFipFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: decimal
Available enum values: False, 0, True, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipOperationFipFBit']))
@property
def FipMacAddressDescriptorFipMacAddressDescriptorType(self):
"""
Display Name: MAC Address Descriptor Type
Default Value: 2
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipMacAddressDescriptorFipMacAddressDescriptorType']))
@property
def FipMacAddressDescriptorFipMacAddressDescriptorLength(self):
"""
Display Name: MAC Address Descriptor Length
Default Value: 2
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipMacAddressDescriptorFipMacAddressDescriptorLength']))
@property
def FipMacAddressDescriptorFipMacAddressDescriptorValue(self):
"""
Display Name: MAC Address Descriptor Value
Default Value: 00:EE:00:00:00:00
Value Format: mAC
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipMacAddressDescriptorFipMacAddressDescriptorValue']))
@property
def FipNameIdentifierDescriptorFipNameIdentifierDescriptorType(self):
"""
Display Name: Name_Identifier Descriptor Type
Default Value: 4
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipNameIdentifierDescriptorFipNameIdentifierDescriptorType']))
@property
def FipNameIdentifierDescriptorFipNameIdentifierDescriptorLength(self):
"""
Display Name: Name_Identifier Descriptor Length
Default Value: 3
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipNameIdentifierDescriptorFipNameIdentifierDescriptorLength']))
@property
def FipNameIdentifierDescriptorFipNameIdentifierDescriptorReserved(self):
"""
Display Name: Reserved
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipNameIdentifierDescriptorFipNameIdentifierDescriptorReserved']))
@property
def FipNameIdentifierDescriptorFipNameIdentifierDescriptorValue(self):
"""
Display Name: Name_Identifier Descriptor Value
Default Value: 0x0000000000000000
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipNameIdentifierDescriptorFipNameIdentifierDescriptorValue']))
@property
def FipVxPortIdentificationDescriptorFipVxPortIdentificationDescriptorType(self):
"""
Display Name: Vx_Port Identification Descriptor Type
Default Value: 11
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipVxPortIdentificationDescriptorFipVxPortIdentificationDescriptorType']))
@property
def FipVxPortIdentificationDescriptorFipVxPortIdentificationDescriptorLength(self):
"""
Display Name: Vx_Port Identification Descriptor Length
Default Value: 5
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipVxPortIdentificationDescriptorFipVxPortIdentificationDescriptorLength']))
@property
def FipVxPortIdentificationDescriptorFipVxPortIdentificationMacDescriptorAddress(self):
"""
Display Name: Vx_Port Identification Descriptor MAC Address
Default Value: 00:00:00:00:00:01
Value Format: mAC
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipVxPortIdentificationDescriptorFipVxPortIdentificationMacDescriptorAddress']))
@property
def FipVxPortIdentificationDescriptorFipVxPortIdentificationDescriptorReserved(self):
"""
Display Name: Reserved
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipVxPortIdentificationDescriptorFipVxPortIdentificationDescriptorReserved']))
@property
def FipVxPortIdentificationDescriptorFipVxPortIdentificationDescriptorAddressIdentifier(self):
"""
Display Name: Vx_Port Identification Descriptor Address Identifier
Default Value: 0x000001
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipVxPortIdentificationDescriptorFipVxPortIdentificationDescriptorAddressIdentifier']))
@property
def FipVxPortIdentificationDescriptorFipVxPortIdentificationDescriptorValue(self):
"""
Display Name: Vx_Port Identification Descriptor Value
Default Value: 0x0000000000000001
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipVxPortIdentificationDescriptorFipVxPortIdentificationDescriptorValue']))
def add(self):
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
| 1.703125
| 2
|
finding_dallin/game/director.py
|
JosephRS409/cse210-project
| 0
|
12780007
|
import arcade
from game.title_view import Title
from game.player import Player
from game import constants
class Director():
def __init__(self):
"""Directs the game"""
self.window = arcade.Window(
constants.SCREEN_WIDTH, constants.SCREEN_HEIGHT, constants.SCREEN_TITLE)
self.main()
self.player = Player()
# Track the current state of what key is pressed
def main(self):
""" Main method """
# Class for displaying the screen.
start_view = Title()
self.window.show_view(start_view)
arcade.run()
| 2.9375
| 3
|
__main__.py
|
kocsob/tornado-template
| 0
|
12780008
|
import os
import tornado.httpserver
import tornado.ioloop
import tornado.log
import tornado.web
from tornado.options import define, options, parse_command_line
import config
import handlers.web
import handlers.api
class Application(tornado.web.Application):
def __init__(self, debug):
routes = [
# Web handlers
(r"/", handlers.web.ExampleWebHandler),
# API handlers
(r"/api", handlers.api.ExampleApiHandler),
# Public files: JS, CSS, images and favicon.ico
(r'/public/(.*)', tornado.web.StaticFileHandler, {
'path' : os.path.join(os.path.dirname(__file__), "public")
}),
(r'/(favicon\.ico)', tornado.web.StaticFileHandler, {
"path": os.path.join(os.path.dirname(__file__), "public", "images")
})
]
settings = {
"template_path": os.path.join(os.path.dirname(__file__), "templates"),
"debug": debug,
"cookie_secret": config.cookie_secret,
"xsrf_cookies": True,
"login_url": '/login'
}
tornado.web.Application.__init__(self, routes, **settings)
@property
def logger(self):
return tornado.log.app_log
if __name__=='__main__':
define('port', default = config.port, help = 'port', type = int)
define('debug', default = False, help = 'run in debug mode', type = bool)
parse_command_line()
app = Application(options.debug)
app.logger.info('Starting %s on 0.0.0.0:%s' % ('tornado skeleton', options.port))
http_server = tornado.httpserver.HTTPServer(app)
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
| 2.25
| 2
|
bower-license-parser.py
|
ewen/bower-license-parser
| 0
|
12780009
|
#!env python
import sys
import json
import csv
json_input = json.load(sys.stdin)
csv_output = csv.writer(sys.stdout)
csv_output.writerow(['Library', 'URL', 'License'])
for package_name, data in json_input.items():
name = package_name.split('@')[0]
url = ''
if 'homepage' in data:
if type(data['homepage']) is str:
url = data['homepage']
elif 'repository' in data:
if type(data['repository']) is str:
url = data['repository']
elif type(data['repository']) is dict:
url = data['repository']['url']
csv_output.writerow([name, url, ';'.join(data['licenses'])])
| 3.1875
| 3
|
src/ao3_beautifulsoup/history.py
|
jezaven/ao3-history
| 5
|
12780010
|
<reponame>jezaven/ao3-history<filename>src/ao3_beautifulsoup/history.py
# -*- encoding: utf-8
from datetime import datetime
import collections
import itertools
import re
from bs4 import BeautifulSoup, Tag
import requests
ReadingHistoryItem = collections.namedtuple(
'ReadingHistoryItem', ['work_id', 'title', 'authors', 'fandoms', \
'ships', 'characters', 'tags', 'summary', 'rating', 'warnings', \
'categories', 'iswip', 'language', 'words', 'comments', 'kudos', 'bookmarks', \
'hits', 'last_read'])
class User(object):
def __init__(self, username, password=<PASSWORD>, sess=None):
self.username = username
if sess == None:
sess = requests.Session()
if password != None:
req = sess.get('https://archiveofourown.org')
soup = BeautifulSoup(req.text, features='html.parser')
authenticity_token = soup.find('input', {'name': 'authenticity_token'})['value']
req = sess.post('https://archiveofourown.org/users/login', params={
'authenticity_token': authenticity_token,
'user[login]': username,
'user[password]': password,
})
# Unfortunately AO3 doesn't use HTTP status codes to communicate
# results -- it's a 200 even if the login fails.
if 'Please try again' in req.text:
raise RuntimeError(
'Error logging in to AO3; is your password correct?')
self.sess = sess
def __repr__(self):
return '%s(username=%r)' % (type(self).__name__, self.username)
def _lookup_stat(self, li_tag, class_name, default=None):
"""Returns the value of a stat."""
# The stats are stored in a series of divs of the form
#
# <dd class="[field_name]">[field_value]</div>
#
dd_tag = li_tag.find('dd', attrs={'class': class_name})
if dd_tag is None:
return default
# if 'tags' in dd_tag.attrs['class']:
# return self._lookup_list_stat(dd_tag=dd_tag)
return dd_tag.contents[0]
def reading_history(self):
"""Returns information about works in the user's reading history.
This requires the user to turn on the Viewing History feature.
"""
api_url = (
'https://archiveofourown.org/users/%s/readings?page=%%d' %
self.username)
for page_no in itertools.count(start=1):
print("PAGE NUM: " + str(page_no))
req = self.sess.get(api_url % page_no)
soup = BeautifulSoup(req.text, features='html.parser')
# The entries are stored in a list of the form:
#
# <ol class="reading work index group">
# <li id="work_12345" class="reading work blurb group">
# ...
# </li>
# <li id="work_67890" class="reading work blurb group">
# ...
# </li>
# ...
# </ol>
#
ol_tag = soup.find('ol', attrs={'class': 'reading'})
for li_tag in ol_tag.find_all('li', attrs={'class': 'blurb'}):
try:
work_id = li_tag.attrs['id'].replace('work_', '')
# print("id: " + work_id)
heading = li_tag.find('h4', attrs={'class': 'heading'})
if (heading.text == "Mystery Work"):
continue
title = heading.find('a').text
# print(title)
authors = []
for author in li_tag.find_all('a', attrs={'rel': 'author'}):
authors.append(author.text)
# print(authors)
fandoms_tag = li_tag.find('h5', attrs={'class': 'fandoms'})
fandoms = []
for fandom_a in fandoms_tag.find_all('a', attrs={'class': 'tag'}):
fandoms.append(fandom_a.text)
# print(fandoms)
ships = []
for ship in li_tag.find_all('li', attrs={'class': 'relationships'}):
ships.append(ship.text)
# print(ships)
characters = []
for character in li_tag.find_all('li', attrs={'class': 'characters'}):
characters.append(character.text)
# print(characters)
addl_tags = []
for tag in li_tag.find_all('li', attrs={'class': 'freeforms'}):
addl_tags.append(tag.text)
# print(addl_tags)
blockquote = li_tag.find('blockquote', attrs={'class': 'summary'})
if (blockquote == None):
summary = "none"
else:
summary = blockquote.renderContents().decode('utf8').strip()
# print(summary)
rating = li_tag.find('span', attrs={'class': 'rating'}).attrs['title']
# print("rating: " + rating)
warnings = li_tag.find('span', attrs={'class': 'warnings'}).attrs['title'].split(", ")
# print(warnings)
categories = li_tag.find('span', attrs={'class': 'category'}).attrs['title'].split(", ")
# print(categories)
iswip_tag = li_tag.find('span', attrs={'class': 'iswip'}).attrs['title']
if (iswip_tag == "Work in Progress"):
iswip = True
else:
iswip = False
# print("iswip: " + str(iswip))
language = self._lookup_stat(li_tag, 'language', 0)
# print("language: " + str(language))
words = int(self._lookup_stat(li_tag, 'words', 0).replace(',', ''))
# print("words: " + str(words))
# chapters = self._lookup_stat(li_tag, 'chapters', 0)
# print("chapters: " + str(chapters)) # needs a lot of work
comments_tag = self._lookup_stat(li_tag, 'comments', 0)
if (comments_tag == 0):
comments = 0
else:
comments = int(comments_tag.text)
# print("comments: " + str(comments))
kudos_tag = self._lookup_stat(li_tag, 'kudos', 0)
if (kudos_tag == 0):
kudos = 0
else:
kudos = int(kudos_tag.text)
# print("kudos: " + str(kudos))
bookmark_tag = self._lookup_stat(li_tag, 'bookmarks', 0)
if (bookmark_tag == 0):
bookmarks = 0
else:
bookmarks = bookmark_tag.text
# print("bookmarks: " + str(bookmrks))
hits = int(self._lookup_stat(li_tag, 'hits', 0))
# print("hits: " + str(hits))
# Within the <li>, the last viewed date is stored as
#
# <h4 class="viewed heading">
# <span>Last viewed:</span> 24 Dec 2012
#
# (Latest version.)
#
# Viewed once
# </h4>
#
h4_tag = li_tag.find('h4', attrs={'class': 'viewed'})
date_str = re.search(
r'[0-9]{1,2} [A-Z][a-z]+ [0-9]{4}',
h4_tag.contents[2]).group(0)
date = datetime.strptime(date_str, '%d %b %Y').date()
yield ReadingHistoryItem(work_id, title, authors, fandoms, \
ships, characters, addl_tags, summary, rating, warnings, \
categories, iswip, language, words, comments, kudos, bookmarks, \
hits, date)
except KeyError:
# A deleted work shows up as
#
# <li class="deleted reading work blurb group">
#
# There's nothing that we can do about that, so just skip
# over it.
if 'deleted' in li_tag.attrs['class']:
pass
else:
raise
# The pagination button at the end of the page is of the form
#
# <li class="next" title="next"> ... </li>
#
# If there's another page of results, this contains an <a> tag
# pointing to the next page. Otherwise, it contains a <span>
# tag with the 'disabled' class.
try:
next_button = soup.find('li', attrs={'class': 'next'})
if next_button.find('span', attrs={'class': 'disabled'}):
break
except:
# In case of absence of "next"
break
| 3.03125
| 3
|
apps/Servers/data_tables_views.py
|
ulibn/BlueXolo
| 21
|
12780011
|
from django.contrib.auth.mixins import LoginRequiredMixin
from django.db.models import Q
from django_datatables_view.base_datatable_view import BaseDatatableView
from apps.Servers.models import TemplateServer, ServerProfile, Parameters
class ServerTemplatesListJson(LoginRequiredMixin, BaseDatatableView):
model = TemplateServer
columns = ['name', 'description', 'pk']
order_columns = ['name', 'description', 'pk']
max_display_length = 200
def filter_queryset(self, qs):
search = self.request.GET.get(u'search[value]', None)
if search:
qs = qs.filter(
Q(name__icontains=search) |
Q(description__icontains=search)
)
return qs
class ServerProfilesListJson(LoginRequiredMixin, BaseDatatableView):
model = ServerProfile
columns = ['name', 'description', 'pk']
order_columns = ['name', 'description', 'pk']
max_display_length = 100
def filter_queryset(self, qs):
search = self.request.GET.get(u'search[value]', None)
if search:
qs = qs.filter(name__icontains=search)
return qs
class ParametersListJson(LoginRequiredMixin, BaseDatatableView):
model = Parameters
columns = ['name', 'category', 'pk']
order_columns = ['name', 'category', 'pk']
max_display_length = 200
def filter_queryset(self, qs):
search = self.request.GET.get(u'search[value]', None)
if search:
qs = qs.filter(
Q(name__icontains=search) |
Q(category__icontains=search)
)
return qs
| 1.984375
| 2
|
PyExercises - CeV - Mundo 3/Exercises (35 - 43)/Ex 41/ex 41.py
|
PatrickAMenezes/PyExercises-CursoEmVideo-Mundo3
| 0
|
12780012
|
<reponame>PatrickAMenezes/PyExercises-CursoEmVideo-Mundo3
from Functions import readint, readfloat
num_int = readint.readint()
num_float = readfloat.readfloat()
print(f'The integer entered was {num_int} and the float was {num_float}.')
| 3.484375
| 3
|
src/utils/config.py
|
rachelkberryman/flower_detection
| 1
|
12780013
|
# %% Packages
import json
from dotmap import DotMap
# %% Functions
def get_config_from_json(json_file):
with open(json_file, "r") as config_file:
config_dict = json.load(config_file)
config = DotMap(config_dict)
return config
def process_config(json_file):
config = get_config_from_json(json_file)
return config
| 2.5625
| 3
|
microserver.py
|
ronrest/model_nanny
| 0
|
12780014
|
<filename>microserver.py
"""
TODO: Add description of this script
TODO: Add license
"""
from __future__ import print_function, division, unicode_literals
import os
import pickle
from flask import Flask
from flask import render_template, url_for
app = Flask(__name__)
# SETTINGS
VALIDATION_METRIC = "valid_acc"
TRAIN_METRIC = "train_acc"
MODELS_DIR = "../models"
PORT = 8080
HOST = "0.0.0.0"
THREADED = True
DEBUG = False # Put the server in debug mode?
# DANGER! Setting to True makes the system vulnerable to
# attack. Should not be used on a publically accesible
# IP addresss.
# Remember to turn it back to False as soon as you are done.
# TODO: For some reason it requires numpy to open the pickles.
# I want to get rid of this dependency.
# TODO: Chose which metrics to show on the models page plots using a dict, eg:
# {"Accuracies over time":
# {
# "train": "train_acc",
# "valid": VALIDATION_METRIC,
# },
# "Loss over time":
# {
# "train": "train_loss",
# "valid": "valid_loss",
# }
# }
# This should perhaps be put in a config file, so user does not have to
# mess around with the python code here.
# TODO: make use of a file `training.txt` that lets you know what file/files
# is/are currently in the process of being trained.
# The existence of the file means it is training.
# The absesnse of the file will mean it is no longer training.
# Put some icon on the index, and model page to indicate it is training
# TODO: Use a more responsive html style. Text looks huge on desktop but tiny
# on mobile
# TODO: Make use of log files in model directory, and have a text panel that
# Allows you to scroll through all the logs of the model.
def pickle2obj(file):
with open(file, mode="rb") as fileObj:
obj = pickle.load(fileObj)
return obj
def get_train_status_file(model_name):
try:
with open(os.path.join(MODELS_DIR, model_name, "train_status.txt"), mode="r") as f:
return f.read().strip()
except:
return ""
def get_best_score(model_name):
try:
with open(os.path.join(MODELS_DIR, model_name, "best_score.txt"), mode="r") as f:
contents = f.read().strip()
return float(contents)
except ValueError:
print("WARNING! Incorrect best score file format for model", model_name)
print("- Expected a value that could be converted to a float.")
print('- Instead got: "{}"'.format(contents))
return 0
except IOError:
return 0
def get_evals_dict(model_name):
try:
pickle_file = os.path.join(MODELS_DIR, model_name, "evals.pickle")
evals = pickle2obj(pickle_file)
except:
print("WARNING: Could not load {} \n - Returning blank evals dict instead".format(pickle_file))
evals = {VALIDATION_METRIC: [], TRAIN_METRIC:[]}
return evals
@app.route("/")
def index():
model_names = os.listdir(MODELS_DIR)
# TODO: use a better sorting method. Does not do too well with filenames
# contianing numbers that are not fixed length and preceded by 0s
model_names.sort(key=lambda item: item.lower()) # Put in alphabetical order
scores = [get_best_score(model) for model in model_names]
statuses = [get_train_status_file(model) for model in model_names]
# Sort by score in descending order
model_score_statuses = sorted(zip(model_names,scores, statuses), key=lambda x: x[1], reverse=True)
return render_template('index.html',
style_path=url_for('static', filename='style.css'),
model_score_statuses=model_score_statuses)
@app.route('/models/<model_name>')
def model_page(model_name):
score = get_best_score(model_name)
evals = get_evals_dict(model_name)
return render_template('model.html',
style_path=url_for('static', filename='style.css'),
model_name=model_name,
x=list(range(len(evals[VALIDATION_METRIC]))),
acc_plot_title="Accuracy over time",
train_acc=evals.get(TRAIN_METRIC, []),
valid_acc=evals.get(VALIDATION_METRIC, []),
loss_plot_title="Loss over time",
train_loss=evals.get("train_loss", []),
valid_loss=evals.get("valid_loss", []),
)
app.run(host=HOST, port=PORT, threaded=THREADED, debug=DEBUG)
| 2.359375
| 2
|
cactus/skeleton.py
|
hzdg/Cactus
| 0
|
12780015
|
<reponame>hzdg/Cactus<filename>cactus/skeleton.py
data = """
<KEY>
ICAg8N3E/wM4o00/AMgAAA==
"""
| 0.902344
| 1
|
tests/base/test_view.py
|
bluetyson/discretize
| 0
|
12780016
|
<reponame>bluetyson/discretize
from __future__ import print_function
import unittest
import numpy as np
import matplotlib.pyplot as plt
import discretize
from discretize import Tests, utils
import pytest
np.random.seed(16)
TOL = 1e-1
class Cyl3DView(unittest.TestCase):
def setUp(self):
self.mesh = discretize.CylMesh([10, 4, 12])
def test_incorrectAxesWarnings(self):
# axes aren't polar
fig, ax = plt.subplots(1, 1)
# test z-slice
with pytest.warns(UserWarning):
self.mesh.plotGrid(slice='z', ax=ax)
# axes aren't right shape
with pytest.warns(UserWarning):
self.mesh.plotGrid(slice='both', ax=ax)
self.mesh.plotGrid(ax=ax)
# this should be fine
self.mesh.plotGrid(slice='theta', ax=ax)
fig, ax = plt.subplots(2, 1)
# axes are right shape, but not polar
with pytest.warns(UserWarning):
self.mesh.plotGrid(slice='both', ax=ax)
self.mesh.plotGrid(ax=ax)
# these should be fine
self.mesh.plotGrid()
ax0 = plt.subplot(121, projection='polar')
ax1 = plt.subplot(122)
self.mesh.plotGrid(slice='z', ax=ax0) # plot z only
self.mesh.plotGrid(slice='theta', ax=ax1) # plot theta only
self.mesh.plotGrid(slice='both', ax=[ax0, ax1]) # plot both
self.mesh.plotGrid(slice='both', ax=[ax1, ax0]) # plot both
self.mesh.plotGrid(ax=[ax1, ax0]) # plot both
def test_plotImage(self):
with self.assertRaises(Exception):
self.mesh.plotImage(np.random.rand(self.mesh.nC))
if __name__ == '__main__':
unittest.main()
| 2.296875
| 2
|
canopy/io/adat/errors.py
|
SomaLogic/Canopy
| 7
|
12780017
|
class AdatReadError(Exception):
pass
| 1.101563
| 1
|
alipay/aop/api/domain/InsCoverage.py
|
snowxmas/alipay-sdk-python-all
| 213
|
12780018
|
<filename>alipay/aop/api/domain/InsCoverage.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class InsCoverage(object):
def __init__(self):
self._coverage_name = None
self._coverage_no = None
self._effect_end_time = None
self._effect_start_time = None
self._iop = None
self._iop_premium = None
self._premium = None
self._sum_insured = None
@property
def coverage_name(self):
return self._coverage_name
@coverage_name.setter
def coverage_name(self, value):
self._coverage_name = value
@property
def coverage_no(self):
return self._coverage_no
@coverage_no.setter
def coverage_no(self, value):
self._coverage_no = value
@property
def effect_end_time(self):
return self._effect_end_time
@effect_end_time.setter
def effect_end_time(self, value):
self._effect_end_time = value
@property
def effect_start_time(self):
return self._effect_start_time
@effect_start_time.setter
def effect_start_time(self, value):
self._effect_start_time = value
@property
def iop(self):
return self._iop
@iop.setter
def iop(self, value):
self._iop = value
@property
def iop_premium(self):
return self._iop_premium
@iop_premium.setter
def iop_premium(self, value):
self._iop_premium = value
@property
def premium(self):
return self._premium
@premium.setter
def premium(self, value):
self._premium = value
@property
def sum_insured(self):
return self._sum_insured
@sum_insured.setter
def sum_insured(self, value):
self._sum_insured = value
def to_alipay_dict(self):
params = dict()
if self.coverage_name:
if hasattr(self.coverage_name, 'to_alipay_dict'):
params['coverage_name'] = self.coverage_name.to_alipay_dict()
else:
params['coverage_name'] = self.coverage_name
if self.coverage_no:
if hasattr(self.coverage_no, 'to_alipay_dict'):
params['coverage_no'] = self.coverage_no.to_alipay_dict()
else:
params['coverage_no'] = self.coverage_no
if self.effect_end_time:
if hasattr(self.effect_end_time, 'to_alipay_dict'):
params['effect_end_time'] = self.effect_end_time.to_alipay_dict()
else:
params['effect_end_time'] = self.effect_end_time
if self.effect_start_time:
if hasattr(self.effect_start_time, 'to_alipay_dict'):
params['effect_start_time'] = self.effect_start_time.to_alipay_dict()
else:
params['effect_start_time'] = self.effect_start_time
if self.iop:
if hasattr(self.iop, 'to_alipay_dict'):
params['iop'] = self.iop.to_alipay_dict()
else:
params['iop'] = self.iop
if self.iop_premium:
if hasattr(self.iop_premium, 'to_alipay_dict'):
params['iop_premium'] = self.iop_premium.to_alipay_dict()
else:
params['iop_premium'] = self.iop_premium
if self.premium:
if hasattr(self.premium, 'to_alipay_dict'):
params['premium'] = self.premium.to_alipay_dict()
else:
params['premium'] = self.premium
if self.sum_insured:
if hasattr(self.sum_insured, 'to_alipay_dict'):
params['sum_insured'] = self.sum_insured.to_alipay_dict()
else:
params['sum_insured'] = self.sum_insured
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = InsCoverage()
if 'coverage_name' in d:
o.coverage_name = d['coverage_name']
if 'coverage_no' in d:
o.coverage_no = d['coverage_no']
if 'effect_end_time' in d:
o.effect_end_time = d['effect_end_time']
if 'effect_start_time' in d:
o.effect_start_time = d['effect_start_time']
if 'iop' in d:
o.iop = d['iop']
if 'iop_premium' in d:
o.iop_premium = d['iop_premium']
if 'premium' in d:
o.premium = d['premium']
if 'sum_insured' in d:
o.sum_insured = d['sum_insured']
return o
| 1.914063
| 2
|
pommermanLearn/models/pommer_q_embedding_rnn.py
|
FrankPfirmann/playground
| 0
|
12780019
|
from typing import Callable
import numpy as np
import torch
import torch.nn as nn
from util.data import transform_observation
class PommerQEmbeddingRNN(nn.Module):
def __init__(self, embedding_model):
super(PommerQEmbeddingRNN, self).__init__()
self.embedding_model = embedding_model
self.memory = []
self.steps = 10
# Stacked lstm
self.rnn = [nn.LSTM(64, 64) for step in range(self.steps)]
self.linear = nn.Sequential(
nn.Flatten(),
nn.ReLU(),
nn.Linear(in_features=64, out_features=6),
nn.Softmax(dim=-1)
)
def forward(self, obs):
while len(self.memory) >= self.steps:
self.memory.pop(0)
while len(self.memory) != self.steps:
self.memory.append(obs)
# x=obs[0] # Board Embedding
x = None
h = None
for obs_n, rnn_n in zip(self.memory, self.rnn):
x_n = obs_n[0]
x, h = rnn_n(x_n, h)
x = self.linear(x).squeeze()
return x
def get_transformer(self) -> Callable:
"""
Return a callable for input transformation.
The callable should take a ``dict`` containing data of a single
observation from the Pommerman environment and return a ``list``
of individual numpy arrays that can be used later as an input
value in the ``forward()`` function.
"""
def transformer(obs: dict) -> list:
planes = transform_observation(obs, p_obs=True, centralized=True)
planes = np.array(planes, dtype=np.float32)
# Generate embedding
# flattened = planes.flatten()
# flattened = torch.tensor(flattened, device=torch.device('cpu')) # TODO: Make 'cpu' variable
X = torch.tensor(planes, device=torch.device('cpu')).unsqueeze(0)
board_embedding = self.embedding_model.forward(X)
board_embedding = board_embedding.detach().numpy()
return [
board_embedding
]
return transformer
| 2.671875
| 3
|
text_replace/presets/relative_url_prefixer.py
|
Salaah01/text-replace
| 0
|
12780020
|
<reponame>Salaah01/text-replace
"""Prepends some text to a relative URL."""
try:
import config # noqa: F401
except ModuleNotFoundError:
from . import config # noqa: F401
from replace import replace
def relative_url_prefixer(filePath: str, newText: str) -> None:
"""Prepends some text to a relative URL."""
replace(
filePath,
r'(?<!(\/|<|\w|:))((\/)(\w{0,}))(?<!(\/))',
f'{newText}\\2',
False
)
| 2.6875
| 3
|
potosnail.py
|
spe301/Potosnail
| 0
|
12780021
|
<filename>potosnail.py
import pandas as pd
import numpy as np
from math import log
from sklearn.svm import SVC, SVR
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.cluster import KMeans
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor, plot_tree
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor, AdaBoostClassifier, AdaBoostRegressor, GradientBoostingClassifier, GradientBoostingRegressor
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.metrics import confusion_matrix, roc_curve, auc, plot_confusion_matrix
from sklearn.preprocessing import StandardScaler, MinMaxScaler, LabelBinarizer, OneHotEncoder
from sklearn.pipeline import Pipeline
from matplotlib import pyplot as plt
from sklearn.feature_selection import SelectKBest
from statsmodels.stats.outliers_influence import variance_inflation_factor
from sklearn.decomposition import PCA
import os
import random
import re
from urllib.request import urlopen
from bs4 import BeautifulSoup
from tensorflow import keras
from tensorflow.keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from tensorflow.keras import layers, models
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier, KerasRegressor
from tensorflow.keras.regularizers import L1, L2
import warnings
warnings.filterwarnings('ignore')
import seaborn as sns
from pytrends.request import TrendReq
import plotly.graph_objects as go
import plotly.express as px
from scipy.signal import find_peaks
from statsmodels.tsa.statespace.sarimax import SARIMAX
from statsmodels.tsa import ar_model, stattools, arima_model
import datetime
import tweepy
from textblob import TextBlob
class MachineLearning:
def CompareModels(self, X, y, task):
'''returns out the box accuracy of sklearn models'''
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
n = len(np.unique(y))
if task == 'classification':
if n == 2:
methods = [KNeighborsClassifier(), GaussianNB(), DecisionTreeClassifier(), RandomForestClassifier(),
AdaBoostClassifier(), GradientBoostingClassifier(), LogisticRegression(),
SVC()]
strs = ['KNN', 'NB', 'DT', 'RF', 'AB', 'GB', 'Log', 'SVM']
else:
methods = [KNeighborsClassifier(), DecisionTreeClassifier(), RandomForestClassifier(),
AdaBoostClassifier(), GradientBoostingClassifier(), SVC()]
strs = ['KNN', 'DT', 'RF', 'AB', 'GB', 'XGB', 'SVM']
if task == 'regression':
methods = [LinearRegression(), KNeighborsRegressor(), DecisionTreeRegressor(), RandomForestRegressor(),
AdaBoostRegressor(), GradientBoostingRegressor(), SVR()]
strs = ['Lin', 'KNN', 'DT', 'RF', 'AB', 'GB', 'SVM']
train_acc = []
test_acc = []
for i in range(len(methods)):
model = methods[i].fit(X_train, y_train)
train_acc.append(model.score(X_train, y_train))
test_acc.append(model.score(X_test, y_test))
c1 = pd.DataFrame(strs)
c2 = pd.DataFrame(train_acc)
c3 = pd.DataFrame(test_acc)
results = pd.concat([c1, c2, c3], axis='columns')
results.columns = ['Model', 'train_acc', 'test_acc']
return results
def GetGrids(self, X, y, task):
al = Algorithms()
n = X.shape[1]
neighbors1, neighbors2, neighbors3 = al.Neighbors(X, y, 'classification')
knc = {'n_neighbors': [5, neighbors1, neighbors2, neighbors3],
'weights': ['uniform', 'distance'], 'p': [1, 2]}
gnb = {'var_smoothing': [1e-13, 1e-11, 1e-9, 1e-7, 1e-5]}
dtc = {'criterion': ['gini', 'entropy'], 'splitter': ['best', 'random'], 'max_depth': [2, 5, 8, None]}
rfc = {'n_estimators': al.Estimators(X, 50), 'criterion': ['gini', 'entropy'],
'max_depth': [2, 5, 8, None]}
abc = {'n_estimators': al.Estimators(X, 50), 'learning_rate': [0.1, 0.5, 1], 'algorithm': ['SAMME', 'SAMME.R']}
gbc = {'learning_rate': [0.1, 0.5, 1], 'n_estimators': al.Estimators(X, 100),
'criterion': ['friedman_mse', 'mse', 'mae']}
lor = {'penalty': ['l1', 'l2', None], 'C': [0.1, 0.5, 1, 2, 10],
'fit_intercept': [True, False]}
svc = {'kernel': ['linear', 'rbf'], 'gamma': ['scale', 'auto'],
'C': [0.1, 1, 10]}
lir = {'fit_intercept': [True, False]}
knr = {'n_neighbors': al.Neighbors(X, y, 'regression')}
dtr = {'max_depth': [2, 5, 8, None], 'criterion': ['mse', 'mae', 'poisson'],
'splitter': ['best', 'random']}
rfr = {'n_estimators': al.Estimators(X, 50), 'criterion': ['mse', 'mae'],
'max_depth': [2, 5, 8, None]}
abr = {'n_estimators': al.Estimators(X, 50), 'learning_rate': [0.1, 0.5, 1],
'loss': ['linear', 'square', 'exponential']}
gbr = {'learning_rate': [0.1, 0.5, 1], 'n_estimators': al.Estimators(X, 100),
'criterion': ['friedman_mse', 'mse', 'mae']}
svr = {'kernel': ['linear', 'rbf'], 'gamma': ['scale', 'auto'],
'C': [0.1, 1, 10]}
if task == 'regression':
grid = {'lir': [lir, LinearRegression()], 'knr': [knr, KNeighborsRegressor()],
'dtr': [dtr, DecisionTreeRegressor()], 'rfr': [rfr, RandomForestRegressor()],
'abr': [abr, AdaBoostRegressor()], 'gbr': [gbr, GradientBoostingRegressor()],
'svr': [svr, SVR()]}
if task == 'classification':
grid = {'knc': [knc, KNeighborsClassifier()], 'dtc': [dtc, DecisionTreeClassifier()],
'rfc': [rfc, RandomForestClassifier()], 'abc': [abc, AdaBoostClassifier()],
'gbc': [gbc, GradientBoostingClassifier()], 'svc': [svc, SVC()]}
if n == 2:
grid['gnb'] = [gnb, GaussianNB()]
grid['lor'] = [lor, LogisticRegression()]
return grid
def Optimize(self, model, parameters, X, y, metric='accuracy'): #make verbose a kwarg
'''facilitates a gridsearch on any sklearn model'''
try:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
return GridSearchCV(model, parameters, cv=3, scoring=metric, n_jobs=-1, verbose=2).fit(X_train, y_train).best_estimator_
except:
return GridSearchCV(model, parameters, cv=3, n_jobs=-1, verbose=2).fit(X_train, y_train).best_estimator_
def SeeModel(self, model, parameters, data, target_str, task, ensemble=False):
'''runs a gridsearch on a sklearn model and evaluates model preformance'''
dh = DataHelper()
ev = Evaluater()
ml = MachineLearning()
train, test = dh.HoldOut(data)
mod = ml.Optimize(model, parameters, train, target_str)
X = train.drop([target_str], axis='columns')
y = train[target_str]
score = ev.ScoreModel(mod, X, y)
Xval = test.drop([target_str], axis='columns')
yval = test[target_str]
if task == 'regression':
results = ev.EvaluateRegressor(mod, X, Xval, y, yval)
return mod, score, results
if task == 'classification':
fit = mod.fit(X, y)
cm = ev.BuildConfusion(fit, Xval, yval)
if ensemble == True:
importance = ev.GetImportance(mod, X, y)
else:
importance = None
return mod, score, fit, cm, importance
def ClusterIt(data, clusters):
k = KMeans(n_clusters=clusters).fit(data)
pred = k.predict(data)
centers = k.cluster_centers_
X2 = pd.DataFrame(data)
y = pd.DataFrame(pred)
y.columns = ['cluster']
results = pd.concat([X2, y], axis='columns')
return results
def Cluster2D(self, data, clusters):
dh = DataHelper()
ml = MachineLearning()
reduced = dh.ScaleData('pca', data, dim=2)
return ml.ClusterIt(reduced, clusters)
def AMC(self, X, y, task):
gmps = MachineLearning().GetGrids(X, y, task)
methods = list(gmps.keys())
results = {}
show = {}
for method in methods:
model = gmps[method][1]
model.fit(X, y)
score = model.score(X, y)
results[score] = gmps[method]
return results[max(results)]
class DeepLearning:
def DeepTabularRegression(self, nodes, activation, regularizer, stacking, dropout, nlayers, closer, loss, optimizer, y_var_str):
'''Builds a FeedForward net that does regression on tabular data'''
output_dim = 1
oa = 'linear'
model = models.Sequential()
if regularizer == 'L1':
model.add(layers.Dense(nodes, activation=activation, kernel_regularizer=L1(0.005)))
if regularizer == 'L2':
model.add(layers.Dense(nodes, activation=activation, kernel_regularizer=L2(0.005)))
if regularizer == None:
model.add(layers.Dense(nodes, activation=activation))
if stacking == True:
model.add(layers.Dense(nodes, activation=activation))
if dropout == True:
model.add(layers.Dropout(0.5))
if nlayers > 2:
model.add(layers.Dense(int(nodes/2), activation=activation))
if nlayers > 3:
model.add(layers.Dense(int(nodes/4), activation=activation))
if nlayers > 4:
for i in range(4, nlayers):
model.add(layers.Dense(int(nodes/4), activation=activation))
if closer == True:
model.add(layers.Dense(2, activation=activation))
model.add(layers.Dense(output_dim, activation=oa))
model.compile(loss=loss, optimizer=optimizer)
return model
def DeepTabularClassification(self, output_dim, nodes, activation, regularizer, stacking, dropout, nlayers, closer, loss, optimizer):
'''Builds a FeedForward net that does classification on tabular data'''
if output_dim == 2:
oa = 'sigmoid'
else:
oa = 'softmax'
model = models.Sequential()
if regularizer == 'L1':
model.add(layers.Dense(nodes, activation=activation, kernel_regularizer=L1(0.005)))
if regularizer == 'L2':
model.add(layers.Dense(nodes, activation=activation, kernel_regularizer=L2(0.005)))
if regularizer == None:
model.add(layers.Dense(nodes, activation=activation))
if stacking == True:
model.add(layers.Dense(nodes, activation=activation))
if dropout == True:
model.add(layers.Dropout(0.5))
if nlayers > 2:
model.add(layers.Dense(int(nodes/2), activation=activation))
if nlayers > 3:
model.add(layers.Dense(int(nodes/4), activation=activation))
if nlayers > 4:
for i in range(4, nlayers):
model.add(layers.Dense(int(nodes/4), activation=activation))
if closer == True:
model.add(layers.Dense(output_dim*2, activation=activation))
model.add(layers.Dense(output_dim, activation=oa))
model.compile(loss=loss, optimizer=optimizer, metrics=['accuracy'])
return model
def PipeIt(self, scaler, model, X, y):
'''an sklearn pipeline that returns the train and test score with scaled data'''
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
pipe = Pipeline([('scaler', scaler), ('model', model)]).fit(X_train, y_train)
return "Training: {}, Validation: {}".format(pipe.score(X_train, y_train), pipe.score(X_test, y_test))
def FastNN(self, task, loss, output_dim=None, nodes=64, activation='relu', regularizer=None, stacking=False, dropout=False, nlayers=4, closer=False, optimizer='adam'):
'''Build a FeedForward Network without filling in a bunch of parameters'''
dl = DeepLearning()
if task == 'regression':
model = dl.DeepTabularRegression(nodes, activation, regularizer, stacking, dropout, nlayers, closer, loss, optimizer, 'target')
if task == 'classification':
output_dim = output_dim
model = dl.DeepTabularClassification(output_dim, nodes, activation, regularizer, stacking, dropout, nlayers, closer, loss, optimizer)
return model
def RNN(self, output_dim, embedding, nodes, activation, regularizer, stacking, dropout, optimizer, method, bidirectional):
'''builds a neural network with LSTM or GRU layer(s)'''
al = Algorithms()
if output_dim > 16:
pen = output_dim*2
else:
pen = 16
if output_dim == 2:
oa = 'sigmoid'
loss = 'binary_crossentropy'
if output_dim == 1:
oa = 'linear'
loss = 'MSE'
if output_dim >= 3:
oa = 'softmax'
loss = 'categorical_crossentropy'
model = models.Sequential()
model.add(layers.Embedding(embedding, nodes))
if method == 'LSTM':
if regularizer == None:
if bidirectional == False:
model.add(layers.LSTM(nodes, activation=activation, return_sequences=stacking))
else:
model.add(layers.Bidirectional(layers.LSTM(nodes, activation=activation, return_sequences=stacking)))
if regularizer == 'L1':
if bidirectional == False:
model.add(layers.LSTM(nodes, activation=activation, kernel_regularizer=L1(0.005), return_sequences=stacking))
else:
model.add(layers.Bidirectional(layers.LSTM(nodes, activation=activation, return_sequences=stacking)))
if regularizer == 'L2':
if bidirectional == False:
model.add(layers.LSTM(nodes, activation=activation, kernel_regularizer=L2(0.005), return_sequences=stacking))
else:
model.add(layers.Bidirectional(layers.LSTM(nodes, activation=activation, return_sequences=stacking)))
if method == 'GRU':
if regularizer == None:
if bidirectional == False:
model.add(layers.GRU(nodes, activation=activation, return_sequences=stacking))
else:
model.add(layers.Bidirectional(layers.GRU(nodes, activation=activation, return_sequences=stacking)))
if regularizer == 'L1':
if bidirectional == False:
model.add(layers.GRU(nodes, activation=activation, kernel_regularizer=L1(0.005), return_sequences=stacking))
else:
model.add(layers.Bidirectional(layers.GRU(nodes, activation=activation, return_sequences=stacking)))
if regularizer == 'L2':
if bidirectional == False:
model.add(layers.GRU(nodes, activation=activation, kernel_regularizer=L2(0.005), return_sequences=stacking))
else:
model.add(layers.Bidirectional(layers.GRU(nodes, activation=activation, return_sequences=stacking)))
if dropout == True:
model.add(layers.Dropout(0.5))
if stacking == True:
nodes = nodes//2
if method == 'LSTM':
if bidirectional == False:
model.add(layers.LSTM(nodes, activation=activation, return_sequences=stacking))
else:
model.add(layers.Bidirectional(layers.LSTM(nodes, activation=activation, return_sequences=stacking)))
if method == 'GRU':
if bidirectional == False:
model.add(layers.GRU(nodes, activation=activation, return_sequences=stacking))
else:
model.add(layers.Bidirectional(layers.GRU(nodes, activation=activation, return_sequences=stacking)))
p = al.Powers(nodes)
for i in range(p):
nodes /= 2
nodes = int(nodes)
if nodes > 16:
if nodes > output_dim:
model.add(layers.Dense(nodes, activation=activation))
model.add(layers.Dense(pen, activation=activation))
model.add(layers.Dense(output_dim, activation=oa))
model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])
return model
def FastRNN(self, output_dim, embedding, nodes=64, activation='tanh', regularizer=None, stacking=False, dropout=None, optimizer='adam', method='GRU', bidirectional=True):
'''Build a Recurrent Network without filling in a bunch of parameters'''
dl = DeepLearning()
return dl.RNN(output_dim, embedding, nodes, activation, regularizer, stacking, dropout, optimizer, method, bidirectional)
def CNN(self, output_dim, base_filters, kernel_size, activation, nblocks, pool, dropout, closer, optimizer, metrics):
'''Builds a Convlolutional Network'''
if output_dim == 2:
oa = 'sigmoid'
loss = 'binary_crossentropy'
if output_dim == 1:
oa = 'linear'
loss = 'MSE'
else:
oa = 'softmax'
loss = 'categorical_crossentropy'
model = models.Sequential()
model.add(layers.Conv2D(base_filters, (kernel_size, kernel_size), activation=activation))
model.add(layers.MaxPooling2D(pool, pool))
if nblocks > 1:
for i in range(nblocks-1):
model.add(layers.Conv2D(base_filters*2, (kernel_size, kernel_size), activation=activation))
model.add(layers.MaxPooling2D(pool, pool))
model.add(layers.Flatten())
if dropout == True:
model.add(layers.Dropout(0.5))
model.add(layers.Dense(base_filters*2, activation=activation))
if base_filters/2 >= output_dim*2:
model.add(layers.Dense(base_filters/2, activation=activation))
if closer == True:
model.add(layers.Dense(output_dim*2, activation=activation))
else:
if base_filters/2 < output_dim*2:
model.add(layers.Dense(base_filters/2, activation=activation))
model.add(layers.Dense(output_dim, activation=oa))
model.compile(loss=loss, optimizer=optimizer, metrics=metrics)
return model
def FastCNN(self, output_dim, base_filters=32, kernel_size=3, activation='relu', nblocks=3, pool=2, dropout=False, closer=False, optimizer='adam', metrics='accuracy'):
'''Build a Convolutional Network without filling in a bunch of parameters'''
dl = DeepLearning()
return dl.CNN(output_dim, base_filters, kernel_size, activation, nblocks, pool, dropout, closer, optimizer, metrics)
def TestDL(self, params, func, task, X, y, batch_size=64, epochs=50, cv=3, patience=5):
'''wraps the keras wrapper functions and GridSearchCV into a simple one liner, one line plus the parameter grid'''
early_stopping = [EarlyStopping(patience=patience), ModelCheckpoint(filepath='model.{epoch:02d}-{val_loss:.2f}.h5')]
if task == 'classification':
k = KerasClassifier(func)
if task == 'regression':
k = KerasRegressor(func)
grid = GridSearchCV(k, params, cv=cv)
grid.fit(X, y, batch_size=batch_size, epochs=epochs, validation_split=0.2, callbacks=early_stopping)
return grid
def CollectPerformance(self, params, func, X, y, epochs=50, batch_size=32, patience=5, regression=False):
'''puts model training results from a gridsearch into a DataFrame'''
early_stopping = [EarlyStopping(patience=patience), ModelCheckpoint(filepath='model.{epoch:02d}-{val_loss:.2f}.h5')]
n = list(params.keys())
fits = len(params[n[0]])*len(params[n[1]])*len(params[n[2]])*len(params[n[3]])*len(params[n[4]])*len(params[n[5]])*len(params[n[6]])*len(params[n[7]])*len(params[n[8]])*len(params[n[9]])
print('preforming {} total fits ...'.format(fits))
lst1 = []
lst2 = []
lst3 = []
lst4 = []
lst5 = []
lst6 = []
lst7 = []
lst8 = []
lst9 = []
lst10 = []
acc = []
loss = []
val_acc = []
val_loss = []
epics = []
bs = []
progress = 0
for i in range(len(params[n[0]])):
var1 = params[n[0]][i]
for i in range(len(params[n[1]])):
var2 = params[n[1]][i]
for i in range(len(params[n[2]])):
var3 = params[n[2]][i]
for i in range(len(params[n[3]])):
var4 = params[n[3]][i]
for i in range(len(params[n[4]])):
var5 = params[n[4]][i]
for i in range(len(params[n[5]])):
var6 = params[n[5]][i]
for i in range(len(params[n[6]])):
var7 = params[n[6]][i]
for i in range(len(params[n[7]])):
var8 = params[n[7]][i]
for i in range(len(params[n[8]])):
var9 = params[n[8]][i]
for i in range(len(params[n[9]])):
var10 = params[n[9]][i]
lst1.append(var1)
lst2.append(var2)
lst3.append(var3)
lst4.append(var4)
lst5.append(var5)
lst6.append(var6)
lst7.append(var7)
lst8.append(var8)
lst9.append(var9)
lst10.append(var10)
history = func(params[n[0]][i], params[n[1]][i], params[n[2]][i], params[n[3]][i], params[n[4]][i],
params[n[5]][i], params[n[6]][i], params[n[7]][i], params[n[8]][i], params[n[9]][i]).fit(X, y, batch_size=batch_size, epochs=epochs, validation_split=0.2, callbacks=early_stopping)
progress += 1
print('{} of {} fits complete!'.format(progress, fits))
if regression == False:
acc.append(history.history['accuracy'][-1])
loss.append(history.history['loss'][-1])
if regression == False:
val_acc.append(history.history['val_accuracy'][-1])
val_loss.append(history.history['val_loss'][-1])
epics.append(len(history.history['loss']))
bs.append(batch_size)
if regression == False:
results = {n[0] : lst1, n[1] : lst2, n[2] : lst3, n[3] : lst4,
n[4] : lst5, n[5] : lst6, n[6] : lst7, n[7] : lst8,
n[9]: lst10, 'epochs': epics, 'batch_size': bs, n[8] : lst9,
'accuracy': acc, 'loss': loss, 'val_accuracy': val_acc, 'val_loss': val_loss}
else:
results = {n[0] : lst1, n[1] : lst2, n[2] : lst3, n[3] : lst4,
n[4] : lst5, n[5] : lst6, n[6] : lst7, n[7] : lst8,
n[9]: lst10, 'epochs': epics, 'batch_size': bs, n[8] : lst9,
'loss': loss, 'val_loss': val_loss}
df = pd.DataFrame(results)
df.columns = list(results.keys())
return df
def ClassifyImage(self, model_dir, image, classes):
'''uses a pretrained model to classify an individual image'''
model = models.load_model(model_dir)
plt.imshow(image[0])
clsix = int(round(model.predict(image)[0][0]))
pred = classes[clsix]
return pred, model.predict(image)[0][0]
def ClassifyText(self, model_dir, text_str, pad):
'''uses a pretrained model to classify an individual body of text'''
model = models.load_model(model_dir)
text = [text_str]
t = Tokenizer()
t.fit_on_texts(text)
tokens = t.texts_to_sequences(text)
tokens2 = pad_sequences(tokens, maxlen=pad)
result = model.predict(tokens2)
return result
def MulticlassOutput(self, labels):
'''output preprocessing'''
enc = OneHotEncoder()
y = labels.reshape(-1, 1)
oh = enc.fit_transform(y).toarray()
return oh
def ModelReadyText1(self, text, labels, pad):
'''converts text into tokenized sequences'''
t = Tokenizer()
t.fit_on_texts(text)
tokens = t.texts_to_sequences(text)
tokens2 = pad_sequences(tokens, maxlen=pad)
dl = DeepLearning()
y = dl.MulticlassOutput(np.array(labels))
return tokens2, y
def ModelReadyText2(self, text, labels, num_words):
'''converts text into one-hot-encoded vectors'''
dl = DeepLearning()
text = list(text)
t = Tokenizer(num_words=num_words)
t.fit_on_texts(text)
oh = t.texts_to_matrix(text)
y = dl.MulticlassOutput(np.array(labels))
return oh, y
def ModelReadyPixles(self, directories, classes, target_size=(150, 150)):
'''gets images ready to be fed into a CNN'''
n_classes = len(classes)
if n_classes == 2:
class_mode = 'binary'
else:
class_mode = 'categorical'
idg = ImageDataGenerator(rescale=1./255)
lens = []
n = len(directories)
for i in range(2, n):
lens.append(len(os.listdir(directories[i])))
bstr = sum(lens[:n_classes])
bste = sum(lens[n_classes:])
trig = idg.flow_from_directory(batch_size=bstr, directory=directories[0], shuffle=True, target_size=target_size, class_mode=class_mode)
teig = idg.flow_from_directory(batch_size=bste, directory=directories[1], shuffle=True, target_size=target_size, class_mode=class_mode)
tri, trl = next(trig)
tei, tel = next(teig)
dl = DeepLearning()
tray = dl.MulticlassOutput(trl)
tey = dl.MulticlassOutput(tel)
return tri, tei, tray, tey
class DataHelper:
def SmoteIt(self, X, y, yvar='target'):
'''must be a binary dataset'''
info = dict(pd.Series(y).value_counts())
info2 = {y:x for x,y in info.items()}
less = info2[min(info.values())]
mx = max(info2)
mn = min(info2)
ix = mx - mn
df = pd.DataFrame(X)
df[yvar] = list(y)
if ix<=mn:
df2 = df.loc[df[yvar]==less][:ix]
df3 = pd.concat([df, df2])
if ix<=mn:
df2 = df.loc[df[yvar]==less][:ix]
df3 = pd.concat([df, df2])
else:
ix = mx % mn
df1 = df.loc[df[yvar]==less]
df2 = df1[:ix]
dfs = [df2]
for i in range((mx // mn)-1):
dfs.append(df1)
df4 = pd.concat(dfs)
df3 = pd.concat([df, df4])
return df3
def AMF(self, X):
'''automatically filters out features with high multicolinearity'''
vif_scores = pd.DataFrame(DataHelper().VifIt(X))
vif_scores.columns = ['vif']
vif_scores['fname'] = list(X.columns)
try:
df = X[list(vif_scores.loc[vif_scores['vif'] < 5.5]['fname'])]
except:
return 'your data sucks!!'
return df
def HoldOut(self, data):
'''puts 10% of the data into a seperate dataset for testing purposes'''
train, test = train_test_split(data, test_size=0.1)
return train, test
def MakeNewDF(self, X, y, k):
'''drops less important features with sklearn's SelectKBest'''
selector = SelectKBest(k=k).fit(X, y)
mask = selector.get_support()
selected = []
for i in range(len(mask)):
if mask[i] == True:
selected.append(X.columns[i])
df = pd.DataFrame(selector.transform(X))
df.columns = selected
return df
def ScaleData(self, strategy, X, dim=None): #fix column problem
'''Scales data via minmax, standard, mean, or PCA scaling'''
if strategy == 'minmax':
return pd.DataFrame(MinMaxScaler().fit(X).transform(X))
if strategy == 'standard':
return pd.DataFrame(StandardScaler().fit(X).transform(X))
if strategy == 'pca':
try:
return pd.DataFrame(PCA(n_components=dim).fit_transform(X))
except:
return 'please pass an integer for dim'
def VifIt(self, X):
'''returns VIF scores to help prevent multicolinearity'''
vif = pd.Series([variance_inflation_factor(X.values, i)
for i in range(X.shape[1])],
index=X.columns)
return vif
def MakeDirs(self, train_dir, test_dir, classes, data_type='images'):
'''makes filepaths, intended for loading in image data'''
classes.reverse()
rc = classes
if data_type == 'images':
dirs = [train_dir, test_dir]
for c in rc:
dirs.append(os.path.join(train_dir, c))
for i in range(len(classes)):
dirs.append(os.path.join(test_dir, rc[i]))
return dirs
def GetCats(self, X):
'''returns an array of booleans with 'True' indicating that a feature is categorical'''
feats = list(X.columns)
boolys = []
for feat in feats:
boolys.append(list(np.unique(X[feat])) == [0.0, 1.0])
return np.array(boolys)
def Scrape(self, url):
'''Scrapes a wikikedia article'''
source = urlopen(url).read()
soup = BeautifulSoup(source, 'lxml')
text = soup.findAll('p')
article = ''
for i in range(len(text)):
segment = text[i].text
article += segment.replace('\n', '').replace('\'', '').replace(')', '')
article = article.lower()
clean = re.sub("([\(\[]).*?([\)\]])", '', article)
clean2 = re.sub(r'\[(^)*\]', '', clean)
return clean
def GetVocab(self, df, data_str):
'''returns the vocab size in a text corpus'''
words = []
for i in range(len(df)):
word_lst = list(df[data_str])[i].replace('\n', ' ').split(' ')
for word in word_lst:
words.append(word.replace('.', '').replace(',', '').replace(' ', '').replace('"', '').replace(':', '').replace(';', '').replace('!', ''))
return len(np.unique(words))
def Binarize(self, df, columns_list):
for col in columns_list:
booly = list(df[col].apply(lambda x: x==df[col][0], False))
inty = list(map(int, booly))
df[col] = inty
return df
def OHE(self, series):
ohe = OneHotEncoder()
oh = pd.DataFrame(ohe.fit_transform(np.array(series).reshape(-1, 1)).toarray())
oh.columns = list(np.unique(series))
return oh
def Stars2Binary(self, series):
thresh = int(round(max(series)/2))
booly = list(series.apply(lambda x: x>=thresh, True))
return list(map(int, booly))
class Evaluater:
def ScoreModel(self, model, X, y):
'''returns accuracies for any sklearn model'''
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
algo = model.fit(X_train, y_train)
return 'Training: {}, Validation: {}'.format(algo.score(X_train, y_train), algo.score(X_test, y_test))
def BuildConfusion(self, fitted_model, Xval, yval, cmap='Blues'):
'''returns a Confusion Matrix given a pretrained sklearn model'''
return plot_confusion_matrix(fitted_model, Xval, yval, cmap=cmap)
def BuildConfusionDL(self, model, X, y, normalize='true', cmap='Blues'):
'''displays a confusion matrix to evaluate a deep learning classifier'''
yreal = y.argmax(axis=1)
pred = model.predict(X)
prediction = pred.argmax(axis=1)
cm = confusion_matrix(yreal, prediction, normalize=normalize)
plot = sns.heatmap(cm, annot=True, cmap=cmap);
plot.set_ylabel('True')
plot.set_xlabel('Predict')
return plot
def BuildTree(self, tree):
'''a copy of plot_tree() from sklearn'''
try:
return plot_tree(tree)
except:
return 'Please pass a fitted model from the tree class'
def GetCoefficients(self, model, X, y):
'''returns coefficients from a sklearn model'''
try:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
model.fit(X_train, y_train)
return 'coefficients: {}'.format(model.coef_)
except:
return 'Please pass LinearRegression, LogisticRegression, or an SVM with a linear kernel'
def GetImportance(self, model, X, y):
'''returns feature importances from an ensemble sklearn model'''
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
model.fit(X_train, y_train)
try:
FI = model.feature_importances_
n_features = X_train.shape[1]
plt.figure(figsize=(8,8))
plt.barh(range(n_features), FI, align='center')
plt.yticks(np.arange(n_features), X_train.columns.values)
plt.xlabel('Feature importance')
plt.ylabel('Feature')
except:
return 'Please pass an ensemble class'
def EvaluateRegressor(self, model, X, Xval, y, yval):
'''returns rmse and accuracy of a sklearn regression model'''
model.fit(X, y)
pred = model.predict(Xval)
n = len(Xval)
a = list(yval)
e = []
pe = []
RMSE = 0
for i in range(n):
e.append(abs(pred[i] - a[i]))
RMSE += ((a[i] - pred[i])**2) / n
if pred[i] > a[i]:
pe.append(a[i]/pred[i])
else:
pe.append(100 - ((pred[i]/a[i])*100))
p = pd.DataFrame(pred)
a = pd.DataFrame(a)
e = pd.DataFrame(e)
pe = pd.DataFrame(pe)
results = pd.concat([p, a, e, pe], axis='columns')
results.columns = ['predicted', 'actual', 'error', '%error']
score = model.score(Xval, yval)*100
return results, RMSE, round(score, 2)
def BinaryCBA(self, model, X, y, rate, total_cost, discount, quiet=True):
'''calculates the potential revenue with and of using the model to make decisions'''
cm = confusion_matrix(y, model.predict(X))
tn = cm[0][0]
fp = cm[0][1]
fn = cm[1][0]
tp = cm[1][1]
cost = (fp*discount)+(fn*total_cost)
benefit = tp*((rate+total_cost)-discount)
if quiet == False:
return 'the cost is ${} and the benefit is ${}'.format(cost, benefit)
else:
return cost, benefit
def DoCohen(self, group1, group2):
'''calculates Cohen's D between 2 population samples'''
n1 = len(group1)
sd1 = np.std(group1)
n2 = len(group2)
sd2 = np.std(group2)
num = (n1 - 1)*(sd1**2) + (n2 - 1)*(sd2**2)
denom = (n1 + n2)-2
pooled_sd = np.sqrt(num/denom)
numerator = abs(np.mean(group1) - np.mean(group2))
return numerator/pooled_sd
def ViewAccuracy(self, history):
'''plots a model's accuracy throughout training'''
plt.plot(list(range(len(history.history['accuracy']))), history.history['accuracy'], label='train');
plt.plot(list(range(len(history.history['accuracy']))), history.history['val_accuracy'], label='val');
plt.legend(loc='best')
plt.xlabel('epochs')
plt.ylabel('accuracy')
return None
def ViewLoss(self, history):
'''plots a model's loss throughout training'''
plt.plot(list(range(len(history.history['loss']))), history.history['loss'], label='train');
plt.plot(list(range(len(history.history['loss']))), history.history['val_loss'], label='val');
plt.legend(loc='best')
plt.xlabel('epochs')
plt.ylabel('loss')
return None
def AUC(self, model, Xval, yval):
'''displays AUC to evaluate a classification model'''
pred = model.predict(Xval)
fpr, tpr, threshold = roc_curve(yval, pred)
return auc(fpr, tpr)
def ACE(self, fitted_model, metric, Xval, yval, merged=True):
'''Automated Classifier Evaluation'''
pred = fitted_model.predict(Xval)
cm = confusion_matrix(yval, pred)
if metric == 'accuracy':
score1 = fitted_model.score(Xval, yval)
if metric == 'recall':
score1 = cm[1][1] / (cm[1][0] + cm[1][1])
if metric == 'precision':
score1 = cm[0][0] / (cm[0][0] + cm[0][1])
return score1
def PipeIt(self, scaler, model, X, y, quiet=False):
'''an sklearn pipeline that returns the train and test score with scaled data'''
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
pipe = Pipeline([('scaler', scaler), ('model', model)]).fit(X_train, y_train)
if quiet == True:
return pipe.score(X_test, y_test)
else:
return "Training: {}, Validation: {}".format(pipe.score(X_train, y_train), pipe.score(X_test, y_test))
def InspectTree(self, tree, X, y, forest=False):
'''inspects a decision tree or random forrest'''
ev = Evaluater()
clf = tree.fit(X, y)
if forest == True:
try:
return ev.GetImportance(clf, X, y)
except:
return 'please pass a RandomForestClassifier or other ensemble model for "tree"'
else:
try:
return ev.BuildTree(clf)
except:
return 'please pass a DecisionTreeClassifier or other tree model for "tree"'
def BinaryBarGraph(self, df, opt1, opt2, var):
yes = len(df.loc[df[var] == opt1])/len(df)
no = 1 - yes
X = [opt1, opt2]
y = [yes, no]
sns.barplot(X, y);
plt.show()
return 'The sample is {}% {} and {}% {}'.format(int(round(yes*100)), opt1, int(round(no*100)), opt2)
def GetTopN(self, df, var, n):
dict(df[var].value_counts())
X = list({v: k for k, v in dict(df[var].value_counts()).items()}.values())[:n]
y = list({v: k for k, v in dict(df[var].value_counts()).items()}.keys())[:n]
sns.barplot(X, y);
plt.show()
class Algorithms:
def GetDC(self, y):
vc = list(dict(pd.Series(y.argmax(axis=1)).value_counts()).values())
return max(vc) / sum(vc)
def Powers(self, n):
'''returns how many times n is divisible by 2'''
k = int(log(n, 2))
return k
def Neighbors(self, X, y, task):
'''returns n_neighbors to test in a KNN'''
if task == 'classification':
nclasses = len(np.unique(y))
sizes = list(y.value_counts())
neighbors1 = int(min(sizes)/10)
neighbors2 = int(min(sizes)/nclasses)
neighbors3 = int(neighbors1/2)
return neighbors1, neighbors2, neighbors3
if task == 'regression':
n = int(0.05 * len(X))
return list(range(1, n))
def Estimators(self, X, default):
'''returns range of n_estimators to test'''
step = int(len(X)/100)
tests = [default]
for i in range(1, 11):
tests.append(i*step)
return tests
def GetMetric(self, y, fn):
'''determines if the metric should be accuracy or recall'''
total = len(y)
sizes = list(y.value_counts())
if max(sizes) > total*0.55:
if fn == False:
metric = 'recall'
if fn == True:
metric = 'precision'
else:
metric = 'accuracy'
return metric
def PickScaler(self, X, y, model):
'''decides how to scale data'''
ev = Evaluater()
n_features = len(list(X.columns))
if n_features >= 256:
return 'pca'
else:
ss = ev.PipeIt(StandardScaler(), model, X, y, quiet=True)
mm = ev.PipeIt(MinMaxScaler(), model, X, y, quiet=True)
if ss >= mm:
return 'standard'
else:
return 'minmax'
def ReduceTo(self, X):
'''divides a n columns 5 and converts it to an int'''
return int(len(list(X.columns))/5)
def ToTry(self, X):
'''returns a list of k features to select from'''
n_features = len(list(X.columns))
if n_features >= 15:
k = int(round(n_features/3))
step = int(round(k/3))
k_features = [k]
for i in range(3):
k += step
k_features.append(k)
else:
k_features = [n_features-1, n_features-2, n_features-3, n_features-4]
return k_features
def Imbalanced(self, y):
'''determines if a dataset is imbalanced'''
total = len(y)
sizes = list(y.value_counts())
if max(sizes) > total*0.55:
return True
else:
return False
def ScoreClf(self, model, metric, X, y):
'''Quantifies the quality of a sklearn classifier'''
ev = Evaluater()
if metric == 'accuracy':
score = model.score(X, y)
else:
cm = confusion_matrix(y, model.predict(X))
if metric == 'recall':
s1 = cm[1][1] / (cm[1][1]+cm[1][0])
s2 = cm[0][0] / (cm[0][0]+cm[0][1])
if metric == 'precision':
s1 = cm[0][0] / (cm[0][0]+cm[0][1])
s2 = cm[1][1] / (cm[1][1]+cm[1][0])
if s2 > 0.52:
score = s1
else:
score = (s1*0.5) + (ev.AUC(model, X, y)*0.5)
return score
def Nodes(self, X, generation=False):
if generation == True:
if len(X) < 1000000:
return [256, 512, 1024]
else:
return [512, 1024, 2048]
else:
if len(X) > 500000:
return [128, 256, 512]
if len(X) > 100000:
return [64, 128, 256]
else:
return [32, 64, 128]
def Epochs(self, structured):
if structured == True:
return list(range(25, 300, 25))
else:
return list(range(5, 60, 5))
def BatchSize(self, X):
if len(X) > 1000000:
return [256, 512, 1024]
if len(X) > 500000:
return [128, 256, 512]
if len(X) > 200000:
return [64, 128, 256]
if len(X) > 100000:
return [64, 128]
if len(X) > 50000:
return [32, 64, 128]
if len(X) > 10000:
return [32, 64]
if len(X) > 1000:
return [16, 32, 64]
else:
return [8, 16, 32, 64]
class Wrappers:
def TCP(self, X, y):
'''fully preprocesses labeld text data and finds vocab size, specific to classification problems'''
dl = DeepLearning()
avg = 0
words = []
for i in range(len(X)):
avg += len(list(X)[i].split(' '))/len(X)
vocab = len(set(','.join(X).split(' ')))
pad = int(avg)
text, labels = dl.ModelReadyText1(X, y, pad)
return text, labels, vocab
def TRP(self, X, y):
'''fully preprocesses labeld text data and finds vocab size, specific to regression problems'''
dl = DeepLearning()
avg = 0
words = []
for i in range(len(X)):
avg += len(list(X)[i].split(' '))/len(X)
word_lst = list(X)[i].replace('\n', ' ').split(' ')
for word in word_lst:
words.append(word.replace('.', '').replace(',', '').replace(' ', '').replace('"', '').replace(':', '').replace(';', '').replace('!', ''))
pad = int(avg)
text, _ = dl.ModelReadyText1(X, y, pad)
return text, np.array(y), len(np.unique(words))
def Vanilla(self, df, target_str, task):
'''returns the best vanilla model and a corresponding parameter grid'''
dh = DataHelper()
ml = MachineLearning()
train, test = dh.HoldOut(df)
X = train.drop([target_str], axis='columns')
Xval = test.drop([target_str], axis='columns')
y = train[target_str]
yval = test[target_str]
mg = ml.AMC(X, y, task)
grid = mg[0]
vanilla = mg[1]
return vanilla, grid, X, Xval, y, yval
def SmoteStack(self, model, grid, X, y, Xval, yval, yvar='target', metric='recall'):
dh = DataHelper()
ev = Evaluater()
ml = MachineLearning()
df = dh.SmoteIt(X, y, yvar=yvar)
X2 = df.drop([yvar], axis='columns')
y2 = df[yvar]
tm = ml.Optimize(model, grid, X2, y2)
score = ev.ACE(tm, metric, Xval, yval)
return model, score, X2, Xval, y2, yval
def FeatureEngineering(self, X, Xval, y, yval, model, grid, task, target_str, metric='accuracy', optimize=True):
al = Algorithms()
dh = DataHelper()
ev = Evaluater()
ml = MachineLearning()
try_list = al.ToTry(X)
results = {}
for num in try_list:
train = dh.MakeNewDF(X, y, num).drop([target_str], axis='columns')
test = Xval[list(train.columns)]
if optimize == True:
fm = ml.Optimize(model, grid, train, y)
else:
fm = model.fit(X, y)
if task == 'classification':
score = ev.ACE(fm, metric, test, yval)
if task == 'regression':
score = ev.EvaluateRegressor(fm, train, test, y, yval)[2]
train[target_str] = list(y)
test[target_str] = list(yval)
results[score] = [fm, train, test, score]
return results[max(results)]
def RegLoop(self, df, target_str, quiet=True):
task = 'regression'
results = {}
wr = Wrappers()
ev = Evaluater()
ml = MachineLearning()
al = Algorithms()
dh = DataHelper()
vanilla, grid, X, Xval, y, yval = wr.Vanilla(df, target_str, task)
model1 = vanilla.fit(X, y)
score1 = ev.EvaluateRegressor(model1, X, Xval, y, yval)[2]
results[score1] = [model1, X, y, Xval, yval, None, None]
if quiet == False:
print('{}% accuracy, untuned model, raw data'.format(score1))
model2 = ml.Optimize(vanilla, grid, X, y)
score2 = ev.EvaluateRegressor(model2, X, Xval, y, yval)[2]
results[score2] = [model2, X, y, Xval, yval, None, None]
if quiet == False:
print('{}% accuracy, tuned model, raw data'.format(score2))
scaler = al.PickScaler(X, y, vanilla)
if scaler == 'pca':
dim = al.ReduceTo(X)
else:
dim = None
X3 = dh.ScaleData(scaler, X, dim=dim)
Xv3 = dh.ScaleData(scaler, Xval, dim=dim)
model3 = ml.Optimize(vanilla, grid, X3, y)
score3 = ev.EvaluateRegressor(vanilla, X3, Xv3, y, yval)[2]
results[score3] = [model3, X3, y, Xv3, yval, scaler, dim]
if quiet == False:
print('{}% accuracy, tuned model, data is scaled with {} scaler'.format(score3, scaler))
if scaler == 'pca':
print('feartures have been reduced to {}'.format(dim))
try:
vdf = pd.DataFrame(dh.VifIt(X3.drop([target_str], axis='columns')))
except:
vdf = pd.DataFrame(dh.VifIt(X3))
vdf.columns = ['vif']
cols = list(vdf.loc[vdf['vif']<5.5].index)
X4 = X3[cols]
Xv4 = Xv3[cols]
model4 = ml.Optimize(vanilla, grid, X4, y)
score4 = ev.EvaluateRegressor(vanilla, X4, Xv4, y, yval)[2]
if quiet == False:
print('{}% accuracy, tuned model, features have been reduced to {}'.format(score4, len(cols)))
results[score4] = [model4, X4, y, Xv4, yval, scaler, dim]
if len(list(X4.columns)) < 10:
return results
else:
FE = wr.FeatureEngineering(X4, Xv4, y, yval, vanilla, grid, task, target_str)
results[FE[3]] = [FE[0], FE[1], FE[2], scaler, dim]
if quiet == False:
print('{}% accuracy, tuned model, features have been reduced to {}'.format(FE[2], len(list(FE[1].columns))))
return results[max(results)]
def ClfLoop(self, df, target_str, fn=False, quiet=True):
task ='classification'
wr = Wrappers()
ev = Evaluater()
al = Algorithms()
ml = MachineLearning()
dh = DataHelper()
results = {}
vanilla, grid, X, Xval, y, yval = wr.Vanilla(df, target_str, task)
metric = 'precision'
model1 = vanilla.fit(X, y)
metric = al.GetMetric(y, fn=fn)
score1 = ev.ACE(model1, metric, Xval, yval)
results[score1] = [model1, X, Xval, y, yval, None, None]
if quiet==False:
print('raw data, baseline model: {}'.format(score1))
model2 = ml.Optimize(vanilla, grid, X, y)
score2 = ev.ACE(model2, metric, Xval, yval)
results[score2] = [model2, X, Xval, y, yval, None, None]
if quiet==False:
print('raw data, tuned model: {}'.format(score2))
scaler = al.PickScaler(X, y, model2)
if scaler == 'pca':
dim = al.ReduceTo(X)
else:
dim = None
X3 = dh.ScaleData(scaler, X, dim=dim)
Xval3 = dh.ScaleData(scaler, Xval, dim=dim)
model3 = ml.Optimize(vanilla, grid, X3, y)
score3 = ev.ACE(model3, metric, Xval3, yval)
results[score3] = [model3, X3, Xval3, y, yval, scaler, dim]
if quiet==False:
if scaler == 'pca':
print('data has been reduced to {} features with pca'.format(dim))
else:
print('data has been scaled with {} scaler'.format(scaler))
if al.Imbalanced(y) == True:
print('Smoting!!')
print()
model4, score4, X4, Xval3, y4, yval = wr.SmoteStack(vanilla, grid, X3, y, Xval3, yval, yvar=target_str)
state = 'smoted'
else:
state = 'not smoted'
model4, score4, X4, y4 = model3, score3, X3, y
df4 = X4
df4[target_str] = list(y4)
results[score4] = [model4, X4, Xval3, y4, yval, scaler, dim]
if quiet==False:
print('data is {}, tuned model: {}'.format(state, score4))
FE = wr.FeatureEngineering(X4, Xval3, y4, yval, vanilla, grid, task, target_str, metric=metric)
model5 = FE[0]
score5 = FE[3]
results[score5] = [model5, FE[1], FE[2], scaler, dim]
if quiet==False:
print('data is {}, tuned model, using {} features: {}'.format(state, FE[1].shape[1], score5))
return results[max(results)]
def WrapML(self, df, target_str, task, fn=False, quiet=True):
wr = Wrappers()
if task == 'regression':
results = wr.RegLoop(df, target_str, quiet=quiet)
if task == 'classification':
results = wr.ClfLoop(df, target_str, fn=fn, quiet=quiet)
return results
class Stats:
def PDF(self, X, bins=100):
'''plots a probability density function'''
hist, bins = np.histogram(X, bins=bins, normed=True)
bin_centers = (bins[1:]+bins[:-1])*0.5
plt.plot(bin_centers, hist)
def Z(self, sample, x):
mu = np.mean(sample)
st = np.std(sample)
try:
return list((x-mu)/st)[0]
except:
return (x-mu)/st
def IQR(self, values):
arr = np.array(values)
uq = np.quantile(arr, 0.75)
lq = np.quantile(arr, 0.25)
return uq - lq
def SqDev(self, values):
'''measures variance in a dataset'''
avg = np.mean(values)
result = 0
n = len(values)-1
for num in values:
result += (num - avg)**2
return result/n
class Datasets:
def load_wikipedia(self):
df = pd.read_csv('https://raw.githubusercontent.com/spe301/AI-generated-AI/main/Data/Wikipedia.csv')
X, y, vocab = Wrappers().TCP(df['Text'], df['AI'])
dataset = {'data': X, 'target': y, 'classes': ['Human', 'AI'], 'vocab': vocab}
return dataset
def load_twitter(self):
df = pd.read_csv('https://raw.githubusercontent.com/spe301/AI-generated-AI/main/Data/Twitter.csv')
X, y, vocab = Wrappers().TCP(df['tweet'], df['is_there_an_emotion_directed_at_a_brand_or_product'])
dataset = {'data': X, 'target': y, 'classes': ['negative', 'neutral', 'positive'], 'vocab': vocab}
return dataset
def load_rnn(self):
df = pd.read_csv('https://raw.githubusercontent.com/spe301/AI-generated-AI/main/Data/NLP8.csv')
try:
df = df.drop(['Unnamed: 0'], axis='columns')
except:
pass
return df
def load_wikipedia2k(self):
df = pd.read_csv('https://raw.githubusercontent.com/spe301/Wikipedia-Capstone/main/Data/Wikipedia2k.csv')
X, y, vocab = Wrappers().TCP(df['Text'], df['AI'])
dataset = {'data': X, 'target': y, 'classses': ['Human, AI'], 'vocab': vocab}
return dataset
def load_wikipedia11k(self):
H = pd.read_csv('')[['Text', 'Human', 'AI']]
A = pd.read_csv('https://raw.githubusercontent.com/spe301/Wikipedia-Capstone/main/Data/Wikipedia2k.csv')
AI = A.loc[A['AI'] == 1]
df = pd.concat([H, A])
X, y, vocab = Wrappers().TCP(df['Text'], df['AI'])
dataset = {'data': X}
class DataBuilder:
def Generate(self, X, y, params, task, ts=150, epochs=50, batch_size=32, patience=5, regression=False):
'''facilitates the Data Collection Process'''
dl = DeepLearning()
wr = Wrappers()
if task == 'NLP':
func = dl.RNN
if task == 'CV':
func = dl.CNN
if task == 'TC':
func = dl.DeepTabularClassification
if task == 'TR':
func = dl.DeepTabularRegression
try:
df = dl.CollectPerformance(params, func, X, y, epochs=epochs, batch_size=batch_size, patience=patience, regression=regression)
except:
df = dl.CollectPerformance(params, func, X, y.reshape(-1), epochs=epochs, batch_size=batch_size, patience=patience, regression=regression)
size = []
n_features = []
batch = []
depth = []
dominances = []
for i in range(len(df)):
size.append(X.shape[0])
n_features.append(X.shape[1])
dominances.append(max(pd.DataFrame(y).value_counts())/len(y))
if task == 'CV':
depth.append(X.shape[3])
df['len_dataset'] = size
df['n_features'] = n_features
if regression == False:
df['dominant_class'] = dominances
if task == 'CV':
df['thickness'] = depth
return df
def ResultsDL(self, X, y, params, task, ts=150, epochs=50, batch_size=32, patience=5, regression=False):
'''facilitates the Data Collection Process'''
if task == 'NLP':
if regression==False:
X2, y2, _ = Wrappers().TCP(X, y)
else:
X2, y2, _ = Wrappers().TRP(X, y)
func = DeepLearning().RNN
if task == 'CV':
X2, _, y2, _ = DeepLearning().ModelReadyPixles(X, y, target_size=(ts, ts))
func = DeepLearning().CNN
if task == 'TC':
X2 = np.array(X)
y2 = DeepLearning().MulticlassOutput(np.array(y))
func = DeepLearning().DeepTabularClassification
if task == 'TR':
X2 = np.array(X)
y2 = np.array(y)
func = DeepLearning().DeepTabularRegression
try:
df = DeepLearning().CollectPerformance(params, func, X2, y2, epochs=epochs, batch_size=batch_size, patience=patience, regression=regression)
except:
df = DeepLearning().CollectPerformance(params, func, X2, y2.reshape(-1), epochs=epochs, batch_size=batch_size, patience=patience, regression=regression)
size = []
n_features = []
batch = []
depth = []
dominances = []
for i in range(len(df)):
size.append(X2.shape[0])
n_features.append(X2.shape[1])
dominances.append(max(pd.DataFrame(y2).value_counts())/len(y2))
if task == 'CV':
depth.append(X2.shape[3])
df['len_dataset'] = size
df['n_features'] = n_features
if regression == False:
df['dominant_class'] = dominances
if task == 'CV':
df['thickness'] = depth
return df
def ResultsNLP(self, df, gridding=False):
dh = DataHelper()
'''gets the nlp results dataset ready for modeling'''
df['regularizer'] = df['regularizer'].fillna('None')
df['stacking'] = df['stacking'].astype(int)
df['dropout'] = df['dropout'].astype(int)
df['bidirectional'] = df['bidirectional'].astype(int)
act = dh.OHE(df['activation'])
reg = dh.OHE(df['regularizer'])
opt = dh.OHE(df['optimizer'])
method = dh.OHE(df['method'])
df = df.drop(['activation', 'regularizer', 'optimizer', 'method'], axis='columns')
df = pd.concat([df, act, reg, opt, method], axis='columns')
if gridding == True:
return df
df['val_loss'] = df['val_loss'].fillna(max(df['val_loss']))
df['loss'] = df['loss'].fillna(max(df['loss']))
kpi_list = ['accuracy', 'loss', 'val_accuracy', 'val_loss']
kpi = df[kpi_list]
scores = []
for i in range(len(df)):
ts = (1 - (kpi['loss'][i] / max(kpi['loss'])) + kpi['accuracy'][i])/2
vs = (1 - (kpi['val_loss'][i] / max(kpi['val_loss'])) + kpi['val_accuracy'][i])/2
score = (ts+vs) - abs(ts-vs)
scores.append(score)
df2 = df.drop(kpi_list, axis='columns')
df2['quality'] = scores
return df2
def BuildCombos(self, params, len_dataset, n_features, dominant_class):
'''puts all possible gridsearch combinations in a dataframe'''
n = list(params.keys())
lst1 = []
lst2 = []
lst3 = []
lst4 = []
lst5 = []
lst6 = []
lst7 = []
lst8 = []
lst9 = []
lst10 = []
lst11 = []
lst12 = []
for i in range(len(params[n[0]])):
var1 = params[n[0]][i]
for i in range(len(params[n[1]])):
var2 = params[n[1]][i]
for i in range(len(params[n[2]])):
var3 = params[n[2]][i]
for i in range(len(params[n[3]])):
var4 = params[n[3]][i]
for i in range(len(params[n[4]])):
var5 = params[n[4]][i]
for i in range(len(params[n[5]])):
var6 = params[n[5]][i]
for i in range(len(params[n[6]])):
var7 = params[n[6]][i]
for i in range(len(params[n[7]])):
var8 = params[n[7]][i]
for i in range(len(params[n[8]])):
var9 = params[n[8]][i]
for i in range(len(params[n[9]])):
var10 = params[n[9]][i]
for i in range(len(params[n[10]])):
var11 = params[n[10]][i]
for i in range(len(params[n[11]])):
var12 = params[n[11]][i]
lst1.append(var1)
lst2.append(var2)
lst3.append(var3)
lst4.append(var4)
lst5.append(var5)
lst6.append(var6)
lst7.append(var7)
lst8.append(var8)
lst9.append(var9)
lst10.append(var10)
lst11.append(var11)
lst12.append(var12)
df = pd.DataFrame(lst1)
df.columns = [n[0]]
df[n[1]] = lst2
df[n[2]] = lst3
df[n[3]] = lst4
df[n[4]] = lst5
df[n[5]] = lst6
df[n[6]] = lst7
df[n[7]] = lst8
df[n[8]] = lst9
df[n[9]] = lst10
df[n[10]] = lst11
df[n[11]] = lst12
df['len_dataset'] = [len_dataset] * len(df)
df['n_features'] = [n_features] * len(df)
df['dominant_class'] = [dominant_class] * len(df)
return df
class Models:
def BuildML(self, net_type):
wr = Wrappers()
dh = DataHelper()
ml = MachineLearning()
db = DataBuilder()
ds = Datasets()
if net_type == 'RNN':
df = ds.load_rnn()
df2 = db.ResultsNLP(df)
vanilla, grid, X, Xval, y, yval = wr.Vanilla(df2, 'quality', 'regression')
Xs = dh.ScaleData('standard', X)
model = ml.Optimize(vanilla, grid, Xs, y)
return model
def BuildRNN(self, model, vocab_dim, X, y, quiet=True):
db = DataBuilder()
dh = DataHelper()
dl = DeepLearning()
al = Algorithms()
gr = Grids()
len_dataset = X.shape[0]
n_features = X.shape[1]
dominant_class = al.GetDC(y)
grid = gr.GridRNN(X, y, vocab_dim)
combos = db.ResultsNLP(db.BuildCombos(grid, len_dataset, n_features, dominant_class), gridding=True)
c2 = dh.ScaleData('standard', combos)
combos['quality'] = model.predict(c2)
best = combos.loc[combos['quality'] == max(combos['quality'])].iloc[0]
res = dict(best)
if res['relu'] == 1:
activation = 'relu'
if res['tanh'] == 1:
activation = 'tanh'
if res['L1'] == 1:
regularizer = 'L1'
if res['L2'] == 1:
regularizer = 'L2'
if res['None'] == 1:
regularizer = 'None'
if res['adam'] == 1:
optimizer = 'adam'
if res['rmsprop'] == 1:
optimizer = 'rmsprop'
if res['sgd'] == 1:
optimizer = 'sgd'
if res['GRU'] == 1:
method = 'GRU'
if res['LSTM'] == 1:
method = 'LSTM'
dm = dl.RNN(int(res['output_dim']), int(res['embedding']), int(res['nodes']), activation, regularizer,
bool(res['stacking']), bool(res['dropout']), optimizer, method, bool(res['bidirectional']))
epochs = int(res['epochs'])
bs = int(res['batch_size'])
try:
history = dm.fit(X, y, epochs=epochs, batch_size=bs, validation_split=0.2)
except:
history = dm.fit(X, y.reshape(-1), epochs=epochs, batch_size=bs, validation_split=0.2)
if quiet == False:
print(res)
print(Evaluater().ViewLoss(history))
try:
print(Evaluater().ViewAccuracy(history))
except:
pass
return dm
class Grids:
def GridRNN(self, X, y, v):
al = Algorithms()
the_grid = {'output_dim': [y.shape[1]], 'embedding': [v],
'nodes': al.Nodes(X, generation=False), 'activation': ['relu', 'tanh'],
'regularizer': ['L1', None, 'L2'], 'stacking': [True, False],
'dropout': [True, False], 'optimizer': ['adam', 'rmsprop', 'sgd'], 'method': ['LSTM', 'GRU'],
'bidirectional': [True, False], 'epochs': al.Epochs(False),
'batch_size': al.BatchSize(X)}
return the_grid
def GridNN(self, X, y, task):
al = Algorithms()
o = y.shape[1]
if task == 'classification':
if o == 2:
lf = 'binary_crossentropy'
else:
lf = 'categorical_crossentropy'
the_grid = {'output_dim': [o], 'nodes': al.Nodes(X), 'activation': ['relu', 'tanh'], 'regularizer': [None, 'L1', 'L2'], 'stacking': [False, True],
'dropout': [False, True], 'nlayers': list(range(3, 8)), 'closer': [True, False], 'loss_func': [lf], 'optimizer': ['adam', 'rmsprop', 'sgd']}
if task == 'regression':
the_grid = {'nodes': al.Nodes(X), 'activation': ['relu', 'tanh'], 'regularizer': [None, 'L1', 'L2'], 'stacking': [False, True], 'dropout': [False, True],
'nlayers': list(range(3, 8)), 'closer': [False, True], 'loss_func': ['MSE', 'MAE'], 'optimizer': ['rmsprop', 'adam', 'sgd'], 'y_col': ['string']}
return the_grid
def GridCNN(self, X, y):
al = Algorithms()
if len(X) > 500000:
ks = 5
pool = 5
else:
ks = 3
pool = 2
the_grid = {'output_dim': [y.shape[1]], 'base_filters': al.Nodes(X), 'kernel_size': [ks], 'activation': ['relu', 'tanh'], 'nblocks': [2, 3, 4, 5],
'pool':[pool], 'dropout': [False, True], 'closer': [True, False], 'optimizer': ['adam', 'rmsprop', 'sgd'], 'metrics': ['accuracy']}
return the_grid
class Google:
def GetReport(self, keywords, span='today 5-y', geo='', quiet=True):
'''observe a search term's popularity in the past 5 years'''
pytrends = TrendReq(hl='en-US', tz=360)
pytrends.build_payload(keywords, cat=0, timeframe=span, geo=geo, gprop='')
ts = pytrends.interest_over_time().drop(['isPartial'], axis='columns')
if quiet == False:
print(ts.plot())
return ts
def Collect(self, keyword, quiet=True):
row = {}
tsdf = Google().BuildTS(keyword)
row['term'] = keyword
current_popularity = list(tsdf[keyword][:260])[-1]
row['current_popularity'] = current_popularity
row['change_3mo'] = '{}%'.format(round(((tsdf[keyword][271] - current_popularity) / current_popularity) * 100, 1))
row['change_6mo'] = '{}%'.format(round(((tsdf[keyword][283] - current_popularity) / current_popularity) * 100, 1))
row['change_9mo'] = '{}%'.format(round(((tsdf[keyword][295] - current_popularity) / current_popularity) * 100, 1))
row['change_12mo'] = '{}%'.format(round(((tsdf[keyword][307] - current_popularity) / current_popularity) * 100, 1))
row['change_24mo'] = '{}%'.format(round(((tsdf[keyword][355] - current_popularity) / current_popularity) * 100, 1))
try:
row['popularity_2y'] = round((((tsdf[keyword][355] - current_popularity) / current_popularity) + 1) * current_popularity)
except:
row['popularity_2y'] = round(tsdf[keyword][355])
sentiment, subjectivity, sentiments = Twitter().AnalyzeTwitter(keyword)
row['sentiment'] = round(sentiment, 2)
row['subjectivity'] = round(subjectivity, 2)
row['sentiments_std'] = round(np.std(sentiments), 2)
if quiet == True:
return row
else:
return tsdf, row
def CollectLoop(self, terms_list):
'''tells us how popularity for a given list of search terms are expected to change'''
df = pd.DataFrame(Google().Collect(terms_list[0]), index=[0])
for term in terms_list[1:]:
temp = pd.DataFrame(Google().Collect(term), index=[0])
df = pd.concat([df, temp])
return df.reset_index().drop(['index'], axis='columns')
def PlotOne(self, keyword):
'''the output a user gets when looking at one term'''
ts, results = Google().Collect(keyword, quiet=False)
subj = results['subjectivity']
obj = 1 - subj
X = ['%subjective', '%objective']
y = [subj, obj]
X2 = ['sentiment']
y2 = results['sentiment']
if results['popularity_2y'] > results['current_popularity']:
future = 'increase'
else:
future = 'decrease'
fig = go.Figure(go.Indicator(
domain = {'x': [0, 1], 'y': [0, 1]},
value = results['sentiment'],
mode = "gauge+number",
title = {'text': "Sentiment of '{}' based on tweets".format(keyword)},
gauge = {'axis': {'range': [-1, 1]},
'steps' : [
{'range': [-1, 0], 'color': "red"},
{'range': [0, 1], 'color': "lightgreen"}]}))
fig.show()
fig = go.Figure(go.Indicator(
domain = {'x': [0, 1], 'y': [0, 1]},
value = results['subjectivity'],
mode = "gauge+number",
title = {'text': "Subjectivity of '{}' based on tweets".format(keyword)},
gauge = {'axis': {'range': [0, 1]},
'steps' : [
{'range': [0, 0.5], 'color': "yellow"},
{'range': [0.5, 1], 'color': "blue"}]}))
fig.show()
fig = px.line(ts, x='index', y=keyword, range_y=[0, 100])
fig.show()
def PlotMany(self, keywords):
df = Google().CollectLoop(keywords)
fig = px.bar(df, x='term', y='current_popularity', color='sentiment', range_y=[0, 100])
fig.show()
for i in range(len(keywords)):
ser = Google().Collect(keywords[i], quiet=False)[0]
fig = px.line(ser, x='index', y=keywords[i], range_y=[0, 100])
fig.show()
def GetPeaks(self, ser):
kw = list(ser.columns)[0]
varience = 0
for i in range(len(ser)):
varience += abs(np.mean(ser)[0] - ser.iloc[i][0])
delta = abs(np.mean(ser.iloc[235:])[0] - np.mean(ser.iloc[:27])[0])
si = varience/delta
x = np.array(list(Google().GetReport([kw])[kw]))
peaks, _ = find_peaks(x, prominence=10, distance=52)
return peaks, si
def CheckSeasonality(self, ser, quiet=True):
peaks, si = Google().GetPeaks(ser)
n_peaks = len(peaks)
if quiet == False:
print(peaks, si)
if si > 250:
if n_peaks < 3:
return False
else:
return True
else:
if n_peaks > 4:
return True
else:
if Google().CovidCheck(peaks) == False:
return False
else:
p1 = len(Google().GetPeaks(ser.loc[:np.datetime64('2020-03-08')])[0])
p2 = len(Google().GetPeaks(ser.loc[np.datetime64('2020-07-26'):])[0])
return p1+p2 > 4
def BuildTS(self, keyword):
ser = Google().GetReport([keyword])
s = Google().CheckSeasonality(ser)
if s == True:
my_order = (2,1,2) #probably wrong, also needs to be programatic
my_seasonal_order = (2, 1, 2, 52) #probably wrong, also needs to be programatic
model = SARIMAX(ser, order=my_order, seasonal_order=my_seasonal_order).fit()
pred = model.predict(start=len(ser), end=356)
ser_ = pd.DataFrame(ser)
pred_ = pd.DataFrame(pred)
pred_.columns = [keyword]
ser_.columns = [keyword]
return pd.concat([ser_, pred_]).reset_index()
if s == False:
model = ar_model.AutoReg(ser, lags=4).fit()
pred = model.predict(start=len(ser), end=356)
ser_ = pd.DataFrame(ser)
pred_ = pd.DataFrame(pred)
pred_.columns = [keyword]
ser_.columns = [keyword]
return pd.concat([ser_, pred_]).reset_index()
def PredictSearches(self, to_predict):
if type(to_predict) == str:
return Google().PlotOne(to_predict)
if type(to_predict) == list:
if len(to_predict) == 1:
return Google().PlotOne(to_predict[0])
else:
return Google().PlotMany(to_predict)
def CovidCheck(self, peaks):
today = datetime.date.today().strftime("%Y-%m-%d")
delta = np.datetime64(today) - np.datetime64('2021-03-28')
delta = int(delta.astype(int)/7)
peaks = np.array(peaks)
spike = np.array(list(range(205-delta, 226-delta)))
affected = np.intersect1d(spike, peaks)
regular = 0
try:
for peak in affected:
if affected-52 in list(peaks):
regular += 1
except:
return False
return len(affected)!=0 and regular==0
class Twitter:
def AnalyzeTwitter(self, keyword):
'''find the average sentimental value and subjectivity of a given search term'''
c1 = 'aHXduTrDkva3ItY52tUtYVPvA'
c2 = 'Qs6d4oNT3zXxDqOhita7IG07CfAJGceoqIs1sGuA4OURlbLP6d'
a1 = '1181578611171762177-sGQaj7E9fpWi2aEB3MfWL4nTRovXYk'
a2 = '<KEY>'
auth = tweepy.OAuthHandler(c1, c2)
auth.set_access_token(a1, a2)
api = tweepy.API(auth)
topic = api.search(keyword)
sent = 0
sub = 0
sents = []
for i in range(len(topic)):
tweet = topic[i]._json['text'].replace('@', '')
blob = TextBlob(tweet)
sents.append(blob.sentiment[0])
sent += blob.sentiment[0]/len(topic)
sub += blob.sentiment[1]/len(topic)
return sent, sub, sents
| 1.90625
| 2
|
expense/__version__.py
|
mahasak/bearlord
| 0
|
12780022
|
<reponame>mahasak/bearlord<gh_stars>0
__version__ = "0.0.1"
__name__ = "expense-tracker"
__description__ = "Python API Wrapper for the Airtable API"
__author__ = "<NAME>"
__authoremail__ = "<EMAIL>"
__license__ = "The MIT License (MIT)"
__copyright__ = "Copyright 2020 <NAME>"
| 0.820313
| 1
|
Implicit/auth_server/Implicit_auth_server.py
|
YungYanix/auth-server-sample
| 63
|
12780023
|
<reponame>YungYanix/auth-server-sample
import json
#import ssl
import urllib.parse as urlparse
from auth import (authenticate_user_credentials, generate_access_token,
verify_client_info, JWT_LIFE_SPAN)
from flask import Flask, redirect, render_template, request
from urllib.parse import urlencode
app = Flask(__name__)
@app.route('/auth')
def auth():
# Describe the access request of the client and ask user for approval
client_id = request.args.get('client_id')
redirect_url = request.args.get('redirect_url')
if None in [ client_id, redirect_url ]:
return json.dumps({
"error": "invalid_request"
}), 400
if not verify_client_info(client_id, redirect_url):
return json.dumps({
"error": "invalid_client"
})
return render_template('Implicit_grant_access.html',
client_id = client_id,
redirect_url = redirect_url)
def process_redirect_url(redirect_url, new_entries):
# Prepare the redirect URL
url_parts = list(urlparse.urlparse(redirect_url))
queries = dict(urlparse.parse_qsl(url_parts[4]))
queries.update(new_entries)
url_parts[4] = urlencode(queries)
url = urlparse.urlunparse(url_parts)
return url
@app.route('/signin', methods = ['POST'])
def signin():
# Issues authorization code
username = request.form.get('username')
password = request.form.get('password')
client_id = request.form.get('client_id')
redirect_url = request.form.get('redirect_url')
if None in [username, password, client_id, redirect_url]:
return json.dumps({
"error": "invalid_request"
}), 400
if not verify_client_info(client_id, redirect_url):
return json.dumps({
"error": "invalid_client"
})
if not authenticate_user_credentials(username, password):
return json.dumps({
'error': 'access_denied'
}), 401
access_token = generate_access_token()
print(process_redirect_url(redirect_url, {"1":"2"}))
return redirect(process_redirect_url(redirect_url, {
'access_token': access_token,
'token_type': 'JWT',
'expires_in': JWT_LIFE_SPAN
}), code = 303)
if __name__ == '__main__':
#context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
#context.load_cert_chain('domain.crt', 'domain.key')
#app.run(port = 5000, debug = True, ssl_context = context)
app.run(port = 5001, debug = True)
| 2.625
| 3
|
example/search/faceset_create.py
|
FacePlusPlus/facepp-python-demo
| 10
|
12780024
|
<filename>example/search/faceset_create.py
# coding: utf-8
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
import requests
from config import API_KEY, API_SECRET, FACESET_CREATE_PATH
display_name = 'hello'
outer_id = 'fpp-demo'
tags = 'group1'
face_tokens = 'e877e8176667e12d11a5eae94937b4fa,0252a884dc2a8fd0c15360a3b0c9aee5,<KEY>'
user_data = ''
force_merge = 0
def call_api():
data = {
'api_key': API_KEY,
'api_secret': API_SECRET,
'display_name': display_name,
'outer_id': outer_id,
'tags': tags,
'face_tokens': face_tokens,
'user_data': user_data,
'force_merge': force_merge
}
resp = requests.post(FACESET_CREATE_PATH, data=data).json()
print(resp)
if __name__ == "__main__":
call_api()
| 2.21875
| 2
|
assembler.py
|
Solomon1999/kivystudio
| 1
|
12780025
|
<filename>assembler.py
from kivy.uix.boxlayout import BoxLayout
from kivy.core.window import Window
from kivy.clock import Clock
import os
import sys
import traceback
from kivystudio.parser import emulate_file
from kivystudio.widgets.filemanager import filemanager
from kivystudio.components.screens import AndroidPhoneScreen
from kivystudio.components.topmenu import TopMenu
from kivystudio.components.codeplace import CodePlace
from kivystudio.components.sibebar import SideBar
from kivystudio.components.terminal import TerminalSpace
from kivystudio.components.emulator_area import emulator_area
class Assembly(BoxLayout):
pass
def add_new_tab(path):
print(path)
code_place.add_code_tab(filename=path)
def open_folder(*a):
print(a)
def key_down(self, *args):
if args[0] == 114 and args[3] == ['ctrl']: # emulate file Ctrl+R
Clock.schedule_once(lambda dt: emulate_file(emulator_area.emulation_file))
elif args[0] == 107 and args[3] == ['ctrl']: # Ctrl K pressed
pass
elif args[0] == 111 and args[3] == ['ctrl']: # open file Ctrl+O
filemanager.open_file(path='/root',callback=add_new_tab)
elif args[0] == 110 and args[3] == ['ctrl']: # new file Ctrl+N
code_place.add_code_tab(tab_type='new_file')
Window.bind(on_key_down=key_down)
project_dir = 'test_project'
main_file = os.path.join(project_dir, 'main.py')
kv_file = os.path.join(project_dir, 'main.kv')
sys.path.append(project_dir)
code_place = CodePlace()
code_place.add_code_tab(tab_type='welcome')
# code_place.add_code_tab(filename=main_file)
# code_place.add_code_tab(filename=kv_file)
emulator_area = emulator_area()
Assembler = Assembly()
Assembler.ids.box.add_widget(SideBar())
Assembler.ids.box.add_widget(code_place)
Assembler.ids.box.add_widget(emulator_area)
| 2.1875
| 2
|
api/wecubek8s/wecubek8s/apps/plugin/utils.py
|
WeBankPartners/wecube-plugins-kubernetes
| 6
|
12780026
|
<filename>api/wecubek8s/wecubek8s/apps/plugin/utils.py
# coding=utf-8
from __future__ import absolute_import
import logging
import re
from talos.core import config
from talos.core.i18n import _
from wecubek8s.common import exceptions
CONF = config.CONF
LOG = logging.getLogger(__name__)
def escape_name(name):
'''
lowercase RFC 1123 name must consist of lower case alphanumeric characters,
'-' or '.', and must start and end with an alphanumeric character
(e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')
'''
rule = r'[^.a-z0-9]'
return re.sub(rule, '-', name.lower())
def convert_tag(items):
labels = {}
for tag in items:
labels[tag['name']] = tag['value']
return labels
def convert_pod_ports(item):
# convert pod ports
# eg. 12, 23:32, 45::UDP, 678:876:TCP
rets = []
parts = re.split(r',|;', item)
for part in parts:
if part.strip():
part = part.strip()
map_parts = part.split(':', 2)
ret = {'containerPort': int(map_parts[0]), 'protocol': 'TCP'}
if len(map_parts) >= 1:
rets.append(ret)
if len(map_parts) >= 2:
if map_parts[1]:
ret['hostPort'] = int(map_parts[1])
if len(map_parts) >= 3:
ret['protocol'] = map_parts[2]
return rets
def convert_service_port(items):
# convert service port
rets = []
fields = ['name', 'protocol', 'port', 'targetPort', 'nodePort']
for item in items:
ret = {}
for field in fields:
if item.get(field):
ret[field] = item[field]
if field in ('port', 'targetPort', 'nodePort'):
ret[field] = int(ret[field])
rets.append(ret)
return rets
def convert_env(items):
# convert envs
rets = []
for idx, item in enumerate(items):
if 'valueRef' not in item:
item['valueRef'] = None
if 'valueFrom' not in item or not item['valueFrom']:
item['valueFrom'] = 'value'
if item['valueRef'] is None and item['valueFrom'] != 'value':
raise exceptions.ValidationError(attribute='envs[%s]' % (idx + 1),
msg=_('valueRef is NULL, while valueFrom is %(value)s') %
{'value': item['valueFrom']})
if item['valueFrom'] == 'value':
rets.append({'name': item['name'], 'value': item['value']})
elif item['valueFrom'] == 'configMap':
rets.append({
'name': item['name'],
'valueFrom': {
'configMapKeyRef': {
'name': item['valueRef']['name'],
'key': item['valueRef']['value']
}
}
})
elif item['valueFrom'] == 'secretKey':
rets.append({
'name': item['name'],
'valueFrom': {
'secretKeyRef': {
'name': item['valueRef']['name'],
'key': item['valueRef']['value']
}
}
})
elif item['valueFrom'] == 'fieldRef':
rets.append({'name': item['name'], 'valueFrom': {'fieldRef': {'fieldPath': item['valueRef']['name']}}})
return rets
def convert_volume(items):
# convert volumes
volumes = []
mounts = []
for item in items:
volume_name = item['name']
volume_type = item['type']
volume_type_spec = item.get('typeSpec', None) or {}
volume_mount_path = item['mountPath']
volume_read_only = item.get('readOnly', None) or False
volume_type_spec_new = {}
volumes.append({'name': volume_name, volume_type: volume_type_spec_new})
if volume_type == 'configMap':
volume_type_spec_new['name'] = volume_type_spec['name']
elif volume_type == 'secret':
volume_type_spec_new['secretName'] = volume_type_spec['name']
elif volume_type == 'hostPath':
volume_type_spec_new['path'] = volume_type_spec['path']
volume_type_spec_new['type'] = volume_type_spec.get('type', None) or ''
elif volume_type == 'emptyDir':
volume_type_spec_new['medium'] = volume_type_spec.get('medium', '') or ''
if 'sizeLimit' in volume_type_spec and volume_type_spec['sizeLimit']:
volume_type_spec_new['sizeLimit'] = volume_type_spec['sizeLimit']
elif volume_type == 'nfs':
volume_type_spec_new['server'] = volume_type_spec['server']
volume_type_spec_new['path'] = volume_type_spec['path']
volume_type_spec_new['readOnly'] = volume_read_only
elif volume_type == 'persistentVolumeClaim':
volume_type_spec_new['claimName'] = volume_type_spec['name']
volume_type_spec_new['readOnly'] = volume_read_only
mounts.append({'name': volume_name, 'mountPath': volume_mount_path, 'readOnly': volume_read_only})
return volumes, mounts
def convert_resource_limit(cpu, memory):
ret = {'limits': {}, 'requests': {}}
if cpu:
ret['limits'].setdefault('cpu', cpu)
ret['requests'].setdefault('cpu', cpu)
if memory:
ret['limits'].setdefault('memory', memory)
ret['requests'].setdefault('memory', memory)
return ret
def parse_image_url(image_url):
'''parse image_url
eg.
ccr.ccs.tencentyun.com:5555/webankpartners/platform-core:v2.9.0
ccr.ccs.tencentyun.com:5555/platform-core:v2.9.0
ccr.ccs.tencentyun.com:5555/webankpartners/platform-core
ccr.ccs.tencentyun.com/webankpartners/platform-core:v2.9.0
ccr.ccs.tencentyun.com/platform-core:v2.9.0
ccr.ccs.tencentyun.com/platform-core
webankpartners/platform-core:v2.9.0
webankpartners/platform-core
platform-core:v2.9.0
platform-core
minio/mc
ccr.ccs.tencentyun.com:5555/a.b.c.namespace/d.e.f.name:tag1
ccr.ccs.tencentyun.com:5555/a.b.c.namespace/d.e.f.name
ccr.ccs.tencentyun.com/a.b.c.namespace/d.e.f.name:tag1
ccr.ccs.tencentyun.com/a.b.c.namespace/d.e.f.name
'''
url_rule = r'^((?P<server>([a-zA-Z0-9]+(\.[-_a-zA-Z0-9]+)+?))(:(?P<port>\d+))?/)?((?P<namespace>([-_a-zA-Z0-9]+?))/)?(?P<image>([-_a-zA-Z0-9]+?))(:(?P<tag>[-_.a-zA-Z0-9]+))?$'
# private url like: server/namespace/image[:tag]
private_url_rule = r'^((?P<server>([a-zA-Z0-9]+(\.[-_a-zA-Z0-9]+)+?))(:(?P<port>\d+))?/)((?P<namespace>([-_.a-zA-Z0-9]+?))/)(?P<image>([-_.a-zA-Z0-9]+?))(:(?P<tag>[-_.a-zA-Z0-9]+))?$'
ret = re.search(url_rule, image_url)
if ret:
server_with_port = None
if ret['server']:
if ret['port']:
server_with_port = '%s:%s' % (ret['server'], ret['port'])
else:
server_with_port = ret['server']
return server_with_port, ret['namespace'], ret['image'], ret['tag']
else:
ret = re.search(private_url_rule, image_url)
if ret:
server_with_port = None
if ret['port']:
server_with_port = '%s:%s' % (ret['server'], ret['port'])
else:
server_with_port = ret['server']
return server_with_port, ret['namespace'], ret['image'], ret['tag']
return None, None, None, None
def convert_container(images, envs, vols, resource_limit):
containers = []
container_template = {
'name': '',
'image': '',
'imagePullPolicy': 'IfNotPresent',
'ports': [],
'env': envs,
'volumeMounts': vols,
'resources': resource_limit
}
for image_info in images:
container = container_template.copy()
registry_server, registry_namespace, image_name, image_tag = parse_image_url(image_info['name'].strip())
container['name'] = image_name
container['image'] = image_info['name'].strip()
container['ports'] = convert_pod_ports(image_info.get('ports', ''))
containers.append(container)
return containers
def convert_registry_secret(k8s_client, images, namespace, username, password):
rets = []
for image_info in images:
registry_server, registry_namespace, image_name, image_tag = parse_image_url(image_info['name'].strip())
if registry_server:
name = ''
name = registry_server + '#' + username
name = escape_name(name)
k8s_client.ensure_registry_secret(name, namespace, registry_server, username, password)
rets.append({'name': name})
return rets
def convert_affinity(strategy, tag_key, tag_value):
if strategy == 'anti-host-preferred':
return {
'podAntiAffinity': {
'preferredDuringSchedulingIgnoredDuringExecution': [{
'weight': 100,
'podAffinityTerm': {
'labelSelector': {
'matchExpressions': [{
'key': tag_key,
'operator': 'In',
'values': [tag_value]
}]
},
'topologyKey': 'kubernetes.io/hostname'
}
}]
}
}
elif strategy == 'anti-host-required':
return {
'podAntiAffinity': {
'requiredDuringSchedulingIgnoredDuringExecution': [{
'labelSelector': {
'matchExpressions': [{
'key': tag_key,
'operator': 'In',
'values': [tag_value]
}]
},
'topologyKey': 'kubernetes.io/hostname'
}]
}
}
return {}
| 2.078125
| 2
|
imagetyperzapi2/imagetyperzapi.py
|
imagetyperz/bypasscaptcha
| 0
|
12780027
|
<filename>imagetyperzapi2/imagetyperzapi.py<gh_stars>0
# Imagetyperz captcha API
# -----------------------
# requests lib
try:
from requests import session
except:
raise Exception('requests package not installed, try with: \'pip2.7 install requests\'')
import os
from base64 import b64encode
# endpoints
# -------------------------------------------------------------------------------------------
CAPTCHA_ENDPOINT = 'http://captchatypers.com/Forms/UploadFileAndGetTextNEW.ashx'
RECAPTCHA_SUBMIT_ENDPOINT = 'http://captchatypers.com/captchaapi/UploadRecaptchaV1.ashx'
RECAPTCHA_RETRIEVE_ENDPOINT = 'http://captchatypers.com/captchaapi/GetRecaptchaText.ashx'
BALANCE_ENDPOINT = 'http://captchatypers.com/Forms/RequestBalance.ashx'
BAD_IMAGE_ENDPOINT = 'http://captchatypers.com/Forms/SetBadImage.ashx'
CAPTCHA_ENDPOINT_CONTENT_TOKEN = 'http://captchatypers.com/Forms/UploadFileAndGetTextNEWToken.ashx'
CAPTCHA_ENDPOINT_URL_TOKEN = 'http://captchatypers.com/Forms/FileUploadAndGetTextCaptchaURLToken.ashx'
RECAPTCHA_SUBMIT_ENDPOINT_TOKEN = 'http://captchatypers.com/captchaapi/UploadRecaptchaToken.ashx'
RECAPTCHA_RETRIEVE_ENDPOINT_TOKEN = 'http://captchatypers.com/captchaapi/GetRecaptchaTextToken.ashx'
BALANCE_ENDPOINT_TOKEN = 'http://captchatypers.com/Forms/RequestBalanceToken.ashx'
BAD_IMAGE_ENDPOINT_TOKEN = 'http://captchatypers.com/Forms/SetBadImageToken.ashx'
# user agent used in requests
# ---------------------------
USER_AGENT = 'pythonAPI1.0'
# Captcha class
# -------------------------------
class Captcha:
def __init__(self, response):
self._captcha_id = ''
self._text = ''
self.parse_response(response)
# parse response from API, into id and text
def parse_response(self, response):
s = response.split('|')
# we have a captcha only with ID and response
if len(s) < 2:
raise Exception('cannot parse response from server: {}'.format(response))
# normally, would split by 2, but if captcha itself contains | will mess it
self._captcha_id = s[0]
self._text = '|'.join(s[1:]) # save text
@property
def text(self):
return self._text
@property
def captcha_id(self):
return self._captcha_id
# Recaptcha class
# ---------------------------------
class Recaptcha:
def __init__(self, captcha_id):
self._captcha_id = captcha_id
self._response = ''
# set response
def set_response(self, response):
self._response = response
@property
def captcha_id(self):
return self._captcha_id
@property
def response(self):
return self._response
# API class
# -----------------------------------------
class ImageTyperzAPI:
def __init__(self, access_token, affiliate_id = 0, timeout = 120):
self._access_token = access_token
self._affiliate_id = affiliate_id
# empty by default
self._username = ''
self._password = ''
self._timeout = timeout
self._session = session() # init a new session
self._normal_captcha = None # save last solved captcha
self._recaptcha = None
self._error = None # keep track of last error
self._headers = { # use this user agent
'User-Agent' : USER_AGENT
}
# set username and password
def set_user_password(self, username, password):
self._username = username
self._password = password
# solve normal captcha
def solve_captcha(self, image_path, case_sensitive = False):
data = {}
# if username is given, do it with user otherwise token
if self._username:
data['username'] = self._username
data['password'] = <PASSWORD>
url = CAPTCHA_ENDPOINT
if not os.path.exists(image_path): raise Exception('captcha image does not exist: {}'.format(image_path))
# read image/captcha
with open(image_path, 'rb') as f:
image_data = b64encode(f.read())
else:
if image_path.lower().startswith('http'):
url = CAPTCHA_ENDPOINT_URL_TOKEN
image_data = image_path
else:
url = CAPTCHA_ENDPOINT_CONTENT_TOKEN
# check if image/file exists
if not os.path.exists(image_path): raise Exception('captcha image does not exist: {}'.format(image_path))
# read image/captcha
with open(image_path, 'rb') as f:
image_data = b64encode(f.read())
# set token
data['token'] = self._access_token
# init dict params (request params)
data['action'] = 'UPLOADCAPTCHA'
data['chkCase'] = '1' if case_sensitive else '0'
data['file'] = image_data
if self._affiliate_id:
data['affiliateid'] = self._affiliate_id
# make request with all data
response = self._session.post(url, data=data,
headers=self._headers,
timeout=self._timeout)
response_text = response.text.encode('utf-8') # get text from response
# check if we got an error
# -------------------------------------------------------------
if 'ERROR:' in response_text and response_text.split('|') != 2:
response_err = response_text.split('ERROR:')[1].strip()
self._error = response_err
raise Exception(response_err) # raise Ex
c = Captcha(response_text) # init captcha from response
self._normal_captcha = c # save last captcha to obj
return c.text
# submit recaptcha to system
# SET PROXY AS WELL
# -------------------
# ----------------------------------
# ------------------------------
def submit_recaptcha(self, page_url, sitekey, proxy = None):
# check if page_url and sitekey are != None
if not page_url: raise Exception('provide a valid page_url')
if not sitekey: raise Exception('provide a valid sitekey')
data = {} # create data obj here, we might need it for proxy
if self._username:
data['username'] = self._username
data['password'] = self._password
url = RECAPTCHA_SUBMIT_ENDPOINT
else:
data['token'] = self._access_token
url = RECAPTCHA_SUBMIT_ENDPOINT_TOKEN
# check proxy and set dict (request params) accordingly
if proxy: # if proxy is given, check proxytype
# we have both proxy and type at this point
data['proxy'] = proxy
data['proxytype'] = 'HTTP'
# init dict params (request params)
data['action'] = 'UPLOADCAPTCHA'
data['pageurl'] = page_url
data['googlekey'] = sitekey
if self._affiliate_id:
data['affiliateid'] = self._affiliate_id
# make request with all data
response = self._session.post(url, data=data,
headers=self._headers, timeout=self._timeout)
response_text = response.text.encode('utf-8') # get text from response
# check if we got an error
# -------------------------------------------------------------
if 'ERROR:' in response_text and response_text.split('|') != 2:
response_err = response_text.split('ERROR:')[1].strip()
self._error = response_err
raise Exception(response_err) # raise Ex
self._recaptcha = Recaptcha(response_text) # init recaptcha obj with captcha_id (which is in the resp)
return self._recaptcha.captcha_id # return the ID
# retrieve recaptcha
def retrieve_recaptcha(self, captcha_id = None):
# if captcha id is not specified, use the ID of the last captcha submited
if not captcha_id:
if not self._recaptcha: raise Exception('no recaptcha was submited previously, submit a captcha'
' first or give captcha_id as argument') # raise it
captcha_id = self._recaptcha.captcha_id
# create params dict (multipart)
data = {
'action': 'GETTEXT',
'captchaid': captcha_id
}
if self._username:
data['username'] = self._username
data['password'] = self._password
url = RECAPTCHA_RETRIEVE_ENDPOINT
else:
data['token'] = self._access_token
url = RECAPTCHA_RETRIEVE_ENDPOINT_TOKEN
# make request with all data
response = self._session.post(url, data=data,
headers=self._headers, timeout=self._timeout)
response_text = response.text.encode('utf-8') # get text from response
# check if we got an error
# -------------------------------------------------------------
if 'ERROR:' in response_text and response_text.split('|') != 2:
response_err = response_text.split('ERROR:')[1].strip()
# if error is different than NOT_DECODED, save it to obj
if response_err != 'NOT_DECODED': self._error = response_err
raise Exception(response_err) # raise Ex
self._recaptcha.set_response(response_text) # set response to recaptcha obj
return response_text # return response
# check if captcha is still being decoded
def in_progress(self, captcha_id = None):
try:
self.retrieve_recaptcha(captcha_id) # retrieve captcha
return False # captcha got decoded
except Exception, ex:
if 'NOT_DECODED' in str(ex): # if NOT_DECODED in response, it's 'OK'
return True
raise # raise Exception if different error
# get account balance
def account_balance(self):
data = {}
if self._username:
url = BALANCE_ENDPOINT
data["username"] = self._username
data["password"] = <PASSWORD>
else:
url = BALANCE_ENDPOINT_TOKEN
data["token"] = self._access_token
data["action"] = "REQUESTBALANCE"
data["submit"] = "Submit"
response = self._session.post(url, data=data,
headers=self._headers, timeout=self._timeout)
response_text = response.text.encode('utf-8')
# check if we have an error
if 'ERROR:' in response_text:
response_err = response_text.split('ERROR:')[1].strip() # split the string
self._error = response_err # save error to obj
raise Exception(response_err) # raise
return '${}'.format(response_text) # we don't, return balance
# set captcha bad, if given id, otherwise set the last one
def set_captcha_bad(self, captcha_id = ''):
# check if we have solved a captcha
if not self._normal_captcha and not captcha_id: raise Exception('no captcha id given and no captcha solved before')
if not captcha_id: captcha_id = self._normal_captcha.captcha_id # if no captcha id given, use last id
data = {
"action": "SETBADIMAGE",
"imageid": captcha_id,
"submit": "Submissssst"
}
if self._username:
data["username"] = self._username
data["password"] = <PASSWORD>
url = BAD_IMAGE_ENDPOINT
else:
data["token"] = self._access_token
url = BAD_IMAGE_ENDPOINT_TOKEN
# make request
response = self._session.post(url, data=data,
headers=self._headers, timeout=self._timeout)
response_text = response.text.encode('utf-8')
# check if we have an error
if 'ERROR:' in response_text:
response_err = response_text.split('ERROR:')[1].strip() # split the string
self._error = response_err # save error to obj
raise Exception(response_err) # raise
return response_text # we don't, return balance
# get last captcha text
@property
def captcha_text(self):
if not self._normal_captcha: return '' # if captcha is not set yet, return nothing
return self._normal_captcha.text # return text
# get last captcha id
@property
def captcha_id(self):
if not self._normal_captcha: return '' # if captcha is not set yet, return nothing
return self._normal_captcha.captcha_id # return id
# get last recaptcha id
@property
def recaptcha_id(self):
if not self._recaptcha: return '' # return none if none
return self._recaptcha.captcha_id # return id
# get last recaptcha response
@property
def recaptcha_response(self):
if not self._recaptcha: return '' # return nothing if not set yet
return self._recaptcha.response # return response
# get last error
@property
def error(self):
return self._error
| 2.1875
| 2
|
raspi/Camera_and_LTE/LTE_stream.py
|
blake-shaffer/avionics
| 3
|
12780028
|
# This script does the following:
# 1) Record H264 Video using PiCam at a maximum bitrate of 300 kbps
# 2) Stream video data to a local BytesIO object
# 3) Send raw data over LTE
# 4) Store raw data to an onboard file
# 5) Clears BytesIO object after network stream and file store
# 6) Interrupts and ends recording after 'record_time' seconds
# Author: <NAME>
# Last Edited: 10/2/19
# Libraries
# -> picamera -> PiCamera: Enables pi cam interfacing and settings manipulation
# -> picamera -> CircularIO: Allows for a circular buffer (if we want one)
# -> threading: enables timer interrupt
# -> io -> BytesIO : local file-like object that camera streams to
# -> socket: allows for UDP socket and message sending
# -> Hologram.HologramCloud -> HologramCloud: LTE API to send data over an LTE network
from picamera import PiCamera
from picamera import CircularIO
from io import BytesIO
from Hologram.HologramCloud import HologramCloud
import threading
import socket
import time
import os
#======================= Global Variables and Objects =================
#Global Variables
lte = True
wifi = not(lte)
record_file = 'buffer_recording.h264' #on-board file video is stored to
bitrate_max = 100000 # bits per second
record_time = 8 # Time in seconds that the recording runs for
record_chunk = 0.1 #chunk size in seconds video object is broken into and sent
frame_rate = 15 #camera frame rate
interrupt_bool = False #global interrupt flag that ends recording/program
store_and_send_bool = False #global interrupt flag that initiates sending and storing of camera data
#ensures chunk size is not smaller than one frame
if record_chunk < 1/frame_rate:
record_chunk = 1/frame_rate
#Camera Settings
camera = PiCamera()
camera.resolution = (320, 240)
camera.framerate = frame_rate
if lte:
#LTE Network Streaming
credentials = {'devicekey': '<KEY>'}
hologram = HologramCloud(credentials, network='cellular')
if wifi:
#Wifi UDP Network Streaming
STREAM_IP = '127.0.0.1'
STREAM_PORT = 4000
send_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#========================= Functions =================================
def interrupt_func():
#Interrupt function that ends camera streaming and program
global interrupt_bool
interrupt_bool = True
print("Program Timer up")
def store_interrupt_func():
#interrupt function that initiates sending and storing camera data
global store_and_send_bool
store_and_send_bool = True
#threading.Timer(record_chunk, store_interrupt_func).start()
def send_network(msg):
if lte:
#Sends data over LTE
msg_err = hologram.sendMessage(msg, timeout = 1)
if wifi:
#Sends data over Wifi UDP
send_sock.sendto(msg, (STREAM_IP, STREAM_PORT))
#======================== Video Streaming and Recording ============
loop_cnt = 0.0
cnt = 0
#camera.start_preview()
#=================== Stores to local BytesIO then sends========================
# MOST EFFICENT AND TEST-PROVEN METHOD
if lte:
#Initialize LTE Network Connection
connected = 0
while not(connected == 1):
os.system("sudo hologram network disconnect")
if connected == 0:
print("Not Connected (%d)\n -> Connecting"%(connected))
hologram.network.connect(timeout = 10)
else:
print("Trying to Reconnect (%d)"%(connected))
hologram.network.disconnect()
hologram.network.connect(timeout = 10)
connected = hologram.network.getConnectionStatus()
print("Connected!")
#Initialize local stream object
stream = BytesIO()
#stream = CircularIO(int((10*bitrate_max*record_chunk)/8))
#Open and/or create onboard file to store to
camera_file_handle = open(record_file, 'wb+')
#Begin Pi Cam recording
camera.start_recording(stream, format='h264', bitrate=bitrate_max)
print("Beginning Program")
#Start timer threads
threading.Timer(record_time, interrupt_func).start()
threading.Timer(record_chunk, store_interrupt_func).start()
loop_sum = 0
comms_sum = 0
store_sum = 0
random_cnt = 0
program_start = time.time()
#Main Program Loop
while not(interrupt_bool):
#camera.wait_recording(record_chunk)
if (store_and_send_bool):
threading.Timer(record_chunk, store_interrupt_func).start()
loop_start = time.time()
#executes when record_chunk thread times out
#controls how often data is ported ofver the network and to file
#change 'record_chunk' to vary time and data size
#Reset global interrupt flag
store_and_send_bool = False
#Send bytes-like date over the Network (UDP)
comms_start = time.time()
send_network(stream.getvalue())
comms_sum += (time.time()-comms_start)
#Store bytes-like data to file
store_start = time.time()
camera_file_handle.write(stream.getvalue())
store_sum += (time.time()-store_start)
#Clear local file-like object
stream.truncate(0)
stream.seek(0)
#[Optional] Print Diagnostic printout
cnt+=1
print("Sent and Saved Chunk #%d | Loop Time: %f"%(cnt, (time.time()-loop_start)))
loop_sum+=(time.time() - loop_start)
#======================================================================================
#End Recording and Tidy Up
total_time = time.time() - program_start
print("Ending Recording")
camera.stop_recording()
print("Closing Video File")
camera_file_handle.close()
print("Program Time: %fs"%(total_time))
print("Process Time: %fs | Process Usage: %f%%"%(loop_sum, (loop_sum*100)/total_time))
print("\tComms: %fs | %f%%\n\tStore: %fs | %f%%"%(comms_sum, (comms_sum*100)/loop_sum, store_sum,(store_sum*100)/loop_sum))
#camera.stop_preview()
| 3.046875
| 3
|
weasyprint/tests/w3_test_suite/web.py
|
Smylers/WeasyPrint
| 0
|
12780029
|
# coding: utf8
"""
weasyprint.tests.w3_test_suite.web
----------------------------------
A simple web application to run and inspect the results of
the W3C CSS 2.1 Test Suite.
See http://test.csswg.org/suites/css2.1/20110323/
:copyright: Copyright 2011-2012 <NAME> and contributors, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import division, unicode_literals
import os.path
import lxml.html
# Don’t try to import Flask on Python 3
from weasyprint import HTML, CSS
def split(something):
return something.split(',') if something else []
def read_testinfo(suite_directory):
with open(os.path.join(suite_directory, '..', 'testinfo.data')) as fd:
lines = iter(fd)
next(lines) # skip labels
for line in lines:
test_id, references, title, flags, links, _, _, assertion = \
line.strip(' \n').split('\t')
yield dict(
test_id=test_id,
assertion=assertion,
title=title,
flags=split(flags),
links=split(links),
references=split(references),
)
def read_chapter(filename, tests_by_link):
url_prefix = 'http://www.w3.org/TR/CSS21/'
for link in lxml.html.parse(filename).xpath(
'//th/a[starts-with(@href, "%s")]' % url_prefix):
url = link.get('href')[len(url_prefix):]
if url in tests_by_link:
yield (
link.text_content().strip(),
link.get('href'),
tests_by_link[url]
)
def read_toc(suite_directory, tests_by_link):
filename = os.path.join(suite_directory, 'toc.html')
for link in lxml.html.parse(filename).xpath('//table//a[@href]'):
filename = os.path.join(suite_directory, link.get('href'))
sections = list(read_chapter(filename, tests_by_link))
if sections:
num = sum(len(tests) for _, _, tests in sections)
yield (link.text_content().strip(), sections, num)
def prepare_test_data(suite_directory):
tests = {}
tests_by_link = {}
for test in read_testinfo(suite_directory):
for link in test['links']:
tests[test['test_id']] = test
tests_by_link.setdefault(link, []).append(test)
return list(read_toc(suite_directory, tests_by_link)), tests
def run(suite_directory):
from flask import (
Flask, render_template, abort, send_from_directory, safe_join)
chapters, tests = prepare_test_data(suite_directory)
app = Flask(__name__)
app.jinja_env.globals['len'] = len
@app.route('/')
def toc():
return render_template('toc.html',
chapters=enumerate(chapters, 1), total=len(tests))
@app.route('/chapter<int:chapter_num>/')
def chapter(chapter_num):
try:
title, sections, _ = chapters[chapter_num - 1]
except IndexError:
abort(404)
return render_template('chapter.html',
chapter_num=chapter_num, chapter=title,
sections=enumerate(sections, 1))
@app.route('/chapter<int:chapter_num>/section<int:section_num>/')
def section(chapter_num, section_num):
try:
chapter, sections, _ = chapters[chapter_num - 1]
title, url, tests = sections[section_num - 1]
except IndexError:
abort(404)
return render_template('section.html', **locals())
default_stylesheet = CSS(string='''
@page { margin: 20px; size: 680px }
body { margin: 0 }
''')
@app.route('/test/<test_id>/')
@app.route('/chapter<int:chapter_num>/section<int:section_num>/test<int:test_index>/')
def run_test(chapter_num=None, section_num=None, test_index=None,
test_id=None):
if test_id is None:
try:
chapter, sections, _ = chapters[chapter_num - 1]
title, url, tests = sections[section_num - 1]
test = tests[test_index - 1]
previous_index = test_index - 1
next_index = test_index + 1 if test_index < len(tests) else None
except IndexError:
abort(404)
else:
test = dict(test_id=test_id)
from pygments import highlight
from pygments.lexers import HtmlLexer
from pygments.formatters import HtmlFormatter
filename = safe_join(suite_directory, test['test_id'] + '.htm')
with open(filename, 'rb') as fd:
source = fd.read().decode('utf8')
formatter = HtmlFormatter(linenos='inline')
source = highlight(source, HtmlLexer(), formatter)
css = formatter.get_style_defs('.highlight')
return render_template('run_test.html', **locals())
@app.route('/render/<path:test_id>')
def render(test_id):
document = HTML(
safe_join(suite_directory, test_id + '.htm'),
encoding='utf8',
).render(stylesheets=[default_stylesheet], enable_hinting=True)
pages = [
'data:image/png;base64,' + document.copy([page]).write_png(
)[0].encode('base64').replace('\n', '')
for page in document.pages]
return render_template('render.html', **locals())
@app.route('/test-data/<path:filename>')
def test_data(filename):
return send_from_directory(suite_directory, filename)
app.run(debug=True)
if __name__ == '__main__':
run(os.path.expanduser('~/css2.1_test_suite/20110323/html4/'))
| 2.78125
| 3
|
mlbozone_retrosheet_extraction.py
|
cat-astrophic/MLBozone
| 0
|
12780030
|
# This script parses MLB data from retrosheet and creates a dataframe
# Importing required modules
import pandas as pd
import glob
# Defining username + directory
username = ''
filepath = 'C:/Users/' + username + '/Documents/Data/mlbozone/'
# Create a list of all files in the raw_data subfolder
files = []
for file in glob.glob(filepath + 'raw_data/*'):
files.append(file)
# Declaring some data storage
attendance = []
dates = []
dblh = []
dow = []
stadia = []
dn = []
scorea = []
scoreh = []
team = [] # home team
ateam = [] # away team
gtype = [] # game type
# Main loop
for file in files:
print('Extracting data from ' + file + '.......')
data = pd.read_csv(file, header = None)
attendance = attendance + data[[17]][17].to_list()
dates = dates + data[[0]][0].to_list()
dblh = dblh + data[[1]][1].to_list()
dow = dow + data[[2]][2].to_list()
stadia = stadia + data[[16]][16].to_list()
dn = dn + data[[12]][12].to_list()
team = team + data[[6]][6].to_list()
ateam = ateam + data[[3]][3].to_list()
gt = file[-8:-4]
try:
gt = int(gt)
gt = 'REGS'
except:
pass
gtype = gtype + [gt]*len(data)
if file[-8:] == 'GLAS.TXT':
scorea = scorea + [None]*len(data)
scoreh = scoreh + [None]*len(data)
else:
scorea = scorea + data[[9]][9].to_list()
scoreh = scoreh + data[[10]][10].to_list()
# Compute home winner in non all-star games
winner = [int(scoreh[i] - scorea[i] > 0) if scoreh[i] != None and scorea[i] != None else None for i in range(len(scoreh))]
# Updating FLO to MIA for consistency
team = ['MIA' if t == 'FLO' else t for t in team]
# Creating a dataframe
attendance = pd.Series(attendance, name = 'Attendance')
dates = pd.Series(dates, name = 'Date')
dblh = pd.Series(dblh, name = 'Doubleheader')
dow = pd.Series(dow, name = 'Day')
stadia = pd.Series(stadia, name = 'Stadium')
dn = pd.Series(dn, name = 'Time')
scorea = pd.Series(scorea, name = 'Score_Away')
scoreh = pd.Series(scoreh, name = 'Score_Home')
winner = pd.Series(winner, name = 'Winner')
team = pd.Series(team, name = 'Home_Team')
ateam = pd.Series(ateam, name = 'Away_Team')
gtype = pd.Series(gtype, name = 'Type')
df = pd.concat([attendance, dates, dblh, dow, stadia, dn, scorea, scoreh, winner, team, ateam, gtype], axis = 1)
# Subset to remove non-season data (playoffs, etc.) that are outside of the window
df = df[df.Date > 20100000].reset_index(drop = True)
df = df[df.Date < 20200000].reset_index(drop = True)
# Create a 2010-2016 sample indicator and add to df
subsamp = [1 if d < 20170000 else 0 for d in df.Date]
df = pd.concat([df, pd.Series(subsamp, name = 'SAMPLE')], axis = 1)
# Subset to remove non-standard stadiums
parks = list(df.Stadium.unique())
counts = [len(df[df.Stadium == p]) for p in parks]
keeps = [p for p in parks if counts[parks.index(p)] > 100]
df = df[df.Stadium.isin(keeps)].reset_index(drop = True)
# Save df
df.to_csv(filepath + 'mlb_data.csv', index = False)
| 2.828125
| 3
|
test/run_pub_actor.py
|
F2011B/pyzac
| 0
|
12780031
|
<gh_stars>0
from pyzac import *
@pyzac_decorator(pub_addr="tcp://127.0.0.1:2000")
def publisher():
return 20
@pyzac_decorator(pub_addr="tcp://127.0.0.1:2001")
def publishertwo():
return 3
publisher()
publishertwo()
| 2.140625
| 2
|
tornado/setup.py
|
oberhamsi/FrameworkBenchmarks
| 1
|
12780032
|
<reponame>oberhamsi/FrameworkBenchmarks
from os.path import expanduser
from os import kill
import subprocess
import sys
import time
python = expanduser('~/FrameworkBenchmarks/installs/py2/bin/python')
cwd = expanduser('~/FrameworkBenchmarks/tornado')
def start(args, logfile, errfile):
subprocess.Popen(
python + " server.py --port=8080 --mongo=%s --logging=error" % (args.database_host,),
shell=True, cwd=cwd, stderr=errfile, stdout=logfile)
return 0
def stop(logfile, errfile):
for line in subprocess.check_output(["ps", "aux"]).splitlines():
if 'server.py --port=8080' in line:
pid = int(line.split(None,2)[1])
kill(pid, 9)
return 0
if __name__ == '__main__':
class DummyArg:
database_host = 'localhost'
start(DummyArg(), sys.stderr, sys.stderr)
time.sleep(1)
stop(sys.stderr, sys.stderr)
| 2.234375
| 2
|
settings.py
|
mr-karan/FinalYearCSE
| 3
|
12780033
|
<reponame>mr-karan/FinalYearCSE
import os
user_agent = 'when is my cakeday by /u/avinassh'
scopes = ['identity']
app_key = os.environ['APP_KEY']
app_secret = os.environ['APP_SECRET']
refresh_token = os.environ['REFRESH_TOKEN']
access_token = os.environ['ACCESS_TOKEN']
| 1.484375
| 1
|
scripts/land_sea_percentages.py
|
markmuetz/cosar_analysis
| 0
|
12780034
|
<gh_stars>0
# coding: utf-8
import iris
if __name__ == '__main__':
basedir = '/home/markmuetz/mirrors/rdf/um10.9_runs/archive/u-au197/land_sea_mask'
land_mask_cube = iris.load_cube(f'{basedir}/qrparm.mask')
land_mask = land_mask_cube.data[53:92, :]
# Same as in COSAR
num_cells = land_mask.shape[0] * land_mask.shape[1]
sea_frac = (land_mask == 0).sum() / num_cells
land_frac = (land_mask == 1).sum() / num_cells
print(f'Sea percentage: {sea_frac * 100:.2f} %')
print(f'Land percentage: {land_frac * 100:.2f} %')
| 2.1875
| 2
|
backend/everpro/competition_tracking/api/competition_track.py
|
Ascensiony/EverPro-Intelligence-APIs
| 1
|
12780035
|
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from six.moves.urllib.parse import urlencode, quote
from nltk.tokenize import RegexpTokenizer
from bs4 import BeautifulSoup
import re
import json
import time
tokenizer = RegexpTokenizer(r"[a-zA-Z\s\d]")
options = webdriver.ChromeOptions()
options.add_argument("--incognito")
options.add_argument("--headless")
options.add_argument("--disable-extensions")
options.add_argument("start-maximized")
zone_to_url = {
"US": "https://www.amazon.com/",
"UK": "https://www.amazon.co.uk/",
"IN": "https://www.amazon.co.in/",
}
def _identity_seller1(soup: BeautifulSoup):
search_area = soup.find("div", {"id": "mbc-sold-by-1"})
if search_area is None:
raise Exception("ID1 not found")
try:
search_area = search_area.find("span", {"class": "mbcMerchantName"})
if search_area is not None:
return search_area.text.strip()
except Exception as e:
print(e)
print("Could not retrieve the seller name open the website and debug1")
return ""
raise Exception("Wow1")
def _identity_seller2(soup: BeautifulSoup):
search_area = soup.find("a", {"id": "bylineInfo"})
if search_area is None:
raise Exception("ID2 not found")
try:
if search_area is not None:
return search_area.text.strip() # [10:-6].strip()
except Exception as e:
print(e)
print("Could not retrieve the seller name open the website and debug2")
return ""
raise Exception("Wow2")
def _identity_seller3(soup: BeautifulSoup):
search_area = soup.find("div", {"id": "merchant-info"})
if search_area is None:
raise Exception("ID3 not found")
try:
search_area = soup.find("a", {"id": "sellerProfileTriggerId"})
if search_area is not None:
return search_area.text.strip()
except Exception as e:
print(e)
print("Could not retrieve the seller name open the website and debug3")
return ""
return ""
def identity_seller(soup: BeautifulSoup):
try:
return _identity_seller1(soup)
except Exception as e:
print(e)
try:
return _identity_seller2(soup)
except Exception as e:
print(e)
try:
return _identity_seller3(soup)
except Exception as e:
print(e)
return ""
return ""
def get_stock_info(soup: BeautifulSoup):
try:
stock_info = soup.find("div", {"id": "availability_feature_div"})
if stock_info is None:
raise Exception("availability_feature_div ID not found")
stock_info = "".join(tokenizer.tokenize(stock_info.text.strip().lower()))
stock_info = " ".join(stock_info.split())
except Exception as e:
stock_info = soup.find("div", {"id": "availability"})
return stock_info.text.strip()
try:
if stock_info is not None:
return stock_info
except Exception as e:
pass
return ""
def get_stars_reviews(soup: BeautifulSoup):
try:
search_area = soup.find("div", "a-row")
try:
st = search_area.find("i")["class"][-1]
except Exception as e:
print(str(e) + " Stars could not be retrieved")
st = ""
try:
rev = search_area.find("span", "a-color-link").text.strip()
except Exception as e:
print(str(e) + " Reviews could not be retrieved")
rev = ""
return st, rev
except Exception as e:
print(str(e) + " Stars and reviews could not be retrieved")
return "", ""
return "", ""
def get_price_from_carousel(soup: BeautifulSoup):
try:
return soup.find("div", "a-row a-color-price").text.strip()
except Exception as e:
print(str(e) + " Price from Carousel could not be retrieved")
return ""
return ""
def extract_details(link: str, tracker):
browserdriver = webdriver.Chrome(options=options, executable_path=r"./chromedriver")
browserdriver.get(link)
timeout = 15
try:
wait = WebDriverWait(browserdriver, timeout)
except TimeoutException as e:
print(e)
print("Timeout")
browserdriver.quit()
return {}
try:
content = browserdriver.page_source
soup = BeautifulSoup(content, "html.parser")
browserdriver.quit()
product_title = soup.find("span", {"id": "productTitle"}).text.strip()
seller = str(identity_seller(soup)).strip()
if seller is None:
return dict()
if seller == tracker:
return dict()
stock_info = get_stock_info(soup)
return dict(
product_title=product_title,
seller=seller,
stock_info=stock_info,
)
except Exception as e:
browserdriver.quit()
print(e)
return {}
def parse_carousel(soup: BeautifulSoup, tracker):
skipped = 0
r = []
for data in soup.findChildren("li", recursive=False):
try:
asin = data.find(
"div", "a-section sp_offerVertical p13n-asin sp_ltr_offer"
)["data-asin"]
link = data.find("a", "a-link-normal")["href"]
details = extract_details(link, tracker)
if details is None or details == dict():
print("SKIPPING DUE TO SAME SELLER")
skipped += 1
time.sleep(7)
continue
stars, rev = get_stars_reviews(data)
price = get_price_from_carousel(data)
details["stars"] = stars
details["reviews"] = rev
details["price"] = price
print(asin)
print(link)
print(details)
print()
r.append(dict(asin=asin, linkToProduct=link, details=details))
time.sleep(7)
except Exception as e:
skipped += 1
print(e)
continue
print(f"SKIPPED COMPETITORS - {skipped}")
return r
def lookup_similar_products(soup: BeautifulSoup, tracker):
try:
search_area = soup.find("div", {"id": "sp_detail"})
try:
search_area = search_area.find("ol", "a-carousel")
except Exception as e:
print(str(e) + "Carousel1 not found")
except Exception as e:
print(str(e) + "sp_details not found")
try:
search_area = soup.find("div", {"id": "sp_detail2"})
try:
search_area = search_area.find("ol", "a-carousel")
except Exception as e:
print(str(e) + "Carousel2 not found")
except Exception as e:
print(str(e) + "sp_details2 not found")
return []
try:
if search_area is not None:
return parse_carousel(search_area, tracker)
except Exception as e:
print("sp_details AND sp_details2 found")
return []
return []
def track_for_product(soup: BeautifulSoup, asin, zone, search):
try:
seller_name = identity_seller(soup)
if seller_name is None:
return "NA"
print(seller_name)
tracker = seller_name
tracking = lookup_similar_products(soup, tracker)
return dict(
tracker_firm=tracker,
tracking=tracking,
asin=asin,
zone=zone,
search=search,
)
except Exception as e:
print(e)
print("IN track_for_product")
return {}
return {}
def amazon_track_competition(asin: str, zone: str, *args, **kwargs):
browserdriver = webdriver.Chrome(options=options, executable_path=r"./chromedriver")
search = zone_to_url[zone] + "dp/" + asin
browserdriver.get(search)
print(search)
print()
timeout = 15
try:
wait = WebDriverWait(browserdriver, timeout)
except TimeoutException as e:
print(e)
print("Timeout")
browserdriver.quit()
return {}
try:
content = browserdriver.page_source
soup = BeautifulSoup(content, "html.parser")
browserdriver.quit()
return track_for_product(soup, asin, zone, search)
except Exception as e:
browserdriver.quit()
print(e)
print("IN amazon_track_competition")
return {}
# print(amazon_track_competition("B07Y", "US"))
# amazon_track_competition("B07YWS4QTH", "US")
# amazon_track_competition("B07WSHWNH8", "IN")
# amazon_track_competition("B01MTQ5M7B", "IN")
# amazon_track_competition("B081KBXT5N", "US")
# amazon_track_competition("B07N39NDDB", "UK")
| 2.6875
| 3
|
final-project/audioquiz/views.py
|
krishna-vasudev/lightify
| 0
|
12780036
|
from django.shortcuts import render,redirect
from .models import QuesModel
from django.http import JsonResponse
# Create your views here.
def audioquiz(request):
quiz=QuesModel.objects.all()
if request.method == 'POST':
print(request.POST)
score = 0
wrong = 0
correct = 0
total = 0
for q in quiz:
total += 1
print(request.POST.get(q.question))
print(q.ans)
print()
if q.ans == request.POST.get(q.question):
score += 10
correct += 1
else:
wrong += 1
percent = score/(total*10) * 100
context = {
'score': score,
'time': request.POST.get('timer'),
'correct': correct,
'wrong': wrong,
'percent': percent,
'total': total
}
return render(request, 'results.html', context)
return render(request,'aqz.html',{'quiz':quiz})
def getquiz(request):
quiz=QuesModel.objects.all()
quiz=list(quiz.values())
for item in quiz:
del item['ans']
return JsonResponse({"quiz":quiz})
def results(request):
pass
| 2.515625
| 3
|
quadpy/ball/tools.py
|
gdmcbain/quadpy
| 1
|
12780037
|
# -*- coding: utf-8 -*-
#
from math import pi
import numpy
from .. import helpers
def show(scheme, backend="mpl"):
"""Displays scheme for 3D ball quadrature.
"""
helpers.backend_to_function[backend](
scheme.points,
scheme.weights,
volume=4.0 / 3.0 * pi,
edges=[],
balls=[((0.0, 0.0, 0.0), 1.0)],
)
return
def integrate(f, center, radius, rule, dot=numpy.dot):
center = numpy.array(center)
rr = numpy.multiply.outer(radius, rule.points)
rr = numpy.swapaxes(rr, 0, -2)
ff = numpy.array(f((rr + center).T))
return numpy.array(radius) ** 3 * dot(ff, rule.weights)
| 3.234375
| 3
|
tests/matchers/test_StringMatcher.py
|
javierseixas/pyJsonAssert
| 0
|
12780038
|
import unittest
from pyjsonassert.matchers import StringMatcher
class TestStringMatcher(unittest.TestCase):
string = "asfasdf"
number_as_string = "12"
number = 12
float = 12.2
boolean = False
def test_should_identify_an_string(self):
assert StringMatcher.match(self.string) is True
def test_should_consider_string_a_number_that_comes_as_string_type(self):
assert StringMatcher.match(self.number_as_string) is True
def test_should_return_false_if_varible_is_not_and_string(self):
assert StringMatcher.match(self.number) is False
assert StringMatcher.match(self.float) is False
assert StringMatcher.match(self.boolean) is False
| 3.25
| 3
|
bsp/ls1cdev/key.py
|
penghongguang/loong
| 15
|
12780039
|
from machine import Pin
led1 = Pin(("LED1", 52), Pin.OUT_PP)
led2 = Pin(("LED2", 53), Pin.OUT_PP)
key1 = Pin(("KEY1", 85), Pin.IN, Pin.PULL_UP)
key2 = Pin(("KEY2", 86), Pin.IN, Pin.PULL_UP)
while True:
if key1.value():
led1.value(1)
else:
led1.value(0)
if key2.value():
led2.value(1)
else:
led2.value(0)
| 3.28125
| 3
|
02/premier.py
|
alexprengere/PythonExercises
| 23
|
12780040
|
<reponame>alexprengere/PythonExercises
#!/usr/bin/env python
import sys
def is_prime(n):
"""Returns True is n is prime, False otherwise.
>>> is_prime(4)
False
>>> is_prime(7)
True
"""
if n <= 1:
return False
for i in xrange(2, n):
if n % i == 0:
# i divides n, n is not prime
return False
return True
def yield_prime_factors(n, primes):
"""Yields the prime factors for n. Primes are second argument.
>>> primes = [2, 3, 5, 7]
>>> list(yield_prime_factors(12, primes))
[2, 2, 3]
>>> list(yield_prime_factors(7, primes))
[7]
"""
# n is divided by its own factors
for prime in primes:
if prime > n:
break
while n % prime == 0:
yield prime
n = n / prime
def _test():
"""Test comments."""
import doctest
doctest.testmod()
if __name__ == '__main__':
_test()
if len(sys.argv) != 3:
print 'Usage: python %s inf sup' % sys.argv[0]
exit(1)
inf = int(sys.argv[1])
sup = int(sys.argv[2])
# List of prime numbers <= sup, e.g. [2, 3, 5, ...]
primes = [p for p in xrange(1 + sup) if is_prime(p)]
# Dict of prime factors: {12 : [2, 2, 3], ...}
factors = {}
for p in xrange(inf, sup + 1):
factors[p] = list(yield_prime_factors(p, primes))
print "Prime numbers between %s and %s:" % (inf, sup)
print '\n'.join([str(p) for p in primes if inf <= p <= sup])
for p in sorted(factors.keys()):
if len(factors[p]) > 1:
# p is not prime
print "Prime factors of number %s are %s" % (p, factors[p])
| 4.375
| 4
|
yeast_ubi/gpmdb_ptm_bot.py
|
RonBeavis/StandardsAndPractices
| 0
|
12780041
|
<gh_stars>0
#
# Copyright © 2021 <NAME>
# Licensed under Apache License, Version 2.0, January 2004
#
# Takes a list of protein accession numbers in a file and generates
# a list of lysine residues that have been observed with ubiquitination
# and the number of times the ubiquitination has been observed.
import sys
import requests
import re
import json
import os
#obtain the protein sequence for the protein identified by _l
def get_protein(_l):
url = 'http://rest.thegpm.org/1/protein/sequence/acc=%s' % (_l)
session = requests.session()
try:
r = session.get(url,timeout=20)
except requests.exceptions.RequestException as e:
print(e)
return None
try:
values = json.loads(r.text)
except:
return None
return values[0]
# generate a frequency histogram and returns the values
def get_list(_l):
#get the protein sequence to sanity check the results
seq = list(get_protein(_l))
#create a session for a REST request
session = requests.session()
values = {} #will hold the information retrieved by the REST request
#formulate a URL to request information about ubiquitinylation for the protein identified by _l
url = 'http://gpmdb.thegpm.org/1/peptide/uf/acc=%s&pos=1-%i&w=n' % (_l,len(seq))
try:
r = session.get(url,timeout=20)
except requests.exceptions.RequestException as e:
print(e)
return None
try:
values = json.loads(r.text)
except:
return None
rvalues = {} #cleaned up results
#run through the results of the REST request and clean them up
for v in values:
try:
if values[v] > 0 and seq[int(v)-1] == 'K':
rvalues[int(v)] = values[v]
except:
continue
return rvalues;
#deal with command line arguments
if len(sys.argv) < 2:
print('gpmdb_ptm_bot.py FILENAME')
exit()
#file contains a list of accession numbers
filename = sys.argv[1]
accs = [l.strip() for l in open(filename,'r')]
for acc in accs:
ls = get_list(acc)
print(acc)
for v in sorted(ls):
print('%i\t%i' % (v,ls[v]))
| 3.015625
| 3
|
inventory/suppliers/admin.py
|
cnobile2012/inventory
| 10
|
12780042
|
# -*- coding: utf-8 -*-
#
# inventory/suppliers/admin.py
#
"""
Supplier Admin
"""
__docformat__ = "restructuredtext en"
from django.contrib import admin
from django.utils.translation import gettext_lazy as _
from inventory.common.admin_mixins import UserAdminMixin, UpdaterFilter
from .models import Supplier
#
# SupplierAdmin
#
@admin.register(Supplier)
class SupplierAdmin(UserAdminMixin, admin.ModelAdmin):
fieldsets = (
(None, {'fields': ('public_id', 'project', 'name', 'stype',
'address_01', 'address_02', 'city', 'subdivision',
'postal_code', 'country', 'phone', 'fax', 'email',
'url', 'language', 'timezone',)}),
(_('Status'), {'classes': ('collapse',),
'fields': ('active', 'creator', 'created', 'updater',
'updated',)}),
)
readonly_fields = ('public_id', 'creator', 'created', 'updater',
'updated',)
list_display = ('name', 'public_id', 'stype', 'phone', 'email',
'url_producer', 'project', 'updater_producer', 'active',)
list_editable = ('stype', 'active',)
search_fields = ('project__name', 'country__country', 'city',
'region__region', 'region__region_code',)
list_filter = ('stype', 'active', 'project__name', UpdaterFilter,)
ordering = ('name',)
# class Media:
# js = ('vendor/js.cookie-2.0.4.min.js',
# 'js/inheritance.js',
# 'js/regions.js',)
| 1.921875
| 2
|
blackpixels.py
|
Pranit18De/Removing-Hand-Written-Annotations
| 2
|
12780043
|
<reponame>Pranit18De/Removing-Hand-Written-Annotations
import cv2
import numpy
import sys
from scipy.misc import toimage
from matplotlib import pyplot as plt
from PIL import Image
sys.setrecursionlimit(1500000)
src = cv2.imread('imgbin.jpg', 0)
arr=numpy.ndarray.tolist(src)
bp=[]
l1=[]
arr1=[]
for i in range(len(arr)):
count=0
for j in range(len(arr[i])):
if arr[i][j]<=10:
count+=1
bp.append(count)
f=open("bpinrow.txt", "w")
for i in range(len(bp)):
if bp[i]<50:
l1.append(numpy.ndarray.tolist(src[i:i+1][0]))
f.write("Row " + str(i) + ": " + str(bp[i]) + "\n")
f.close()
arr1=numpy.array(l1)
plt.imshow(arr1, interpolation='None')
plt.show()
plt.savefig("imageblk.jpg",dpi = 500)
img = toimage(arr1)
img.save("annotations.png")
| 2.4375
| 2
|
sidekick/models/__init__.py
|
cybera/netbox_sidekick
| 1
|
12780044
|
<reponame>cybera/netbox_sidekick
from .accounting import AccountingSource # noqa: F401
from .accounting import AccountingSourceCounter # noqa: F401
from .accounting import AccountingProfile # noqa: F401
from .accounting import BandwidthProfile # noqa: F401
from .networkservice import LogicalSystem, RoutingType # noqa: F401
from .networkservice import NetworkServiceType # noqa: F401
from .networkservice import NetworkService # noqa: F401
from .networkservice import NetworkServiceDevice # noqa: F401
from .networkservice import NetworkServiceL3 # noqa: F401
from .networkservice import NetworkServiceL2 # noqa: F401
from .networkservice import NetworkServiceGroup # noqa: F401
from .nic import NIC # noqa: F401
| 1.257813
| 1
|
datawire/views/api/collections.py
|
arc64/datawi.re
| 2
|
12780045
|
<filename>datawire/views/api/collections.py
from flask import Blueprint # , request
from flask.ext.login import current_user
from apikit import obj_or_404, jsonify, Pager, request_data
from datawire.model import Collection, db
from datawire import authz
blueprint = Blueprint('collections', __name__)
@blueprint.route('/api/1/collections', methods=['GET'])
def index():
q = Collection.all_by_user(current_user)
data = Pager(q).to_dict()
results = []
for lst in data.pop('results'):
ldata = lst.to_dict()
ldata['permissions'] = {
'write': authz.collection_write(lst.id)
}
results.append(ldata)
data['results'] = results
return jsonify(data)
@blueprint.route('/api/1/collections', methods=['POST', 'PUT'])
def create():
authz.require(authz.logged_in())
data = request_data()
data['creator'] = current_user
lst = Collection.create(data, current_user)
db.session.commit()
return view(lst.id)
@blueprint.route('/api/1/collections/<id>', methods=['GET'])
def view(id):
authz.require(authz.collection_read(id))
coll = obj_or_404(Collection.by_id(id))
data = coll.to_dict()
data['can_write'] = authz.collection_write(id)
return jsonify(data)
@blueprint.route('/api/1/collections/<id>', methods=['POST', 'PUT'])
def update(id):
authz.require(authz.collection_write(id))
coll = obj_or_404(Collection.by_id(id))
coll.update(request_data(), current_user)
db.session.add(coll)
db.session.commit()
return view(id)
@blueprint.route('/api/1/collections/<id>', methods=['DELETE'])
def delete(id):
authz.require(authz.collection_write(id))
coll = obj_or_404(Collection.by_id(id))
coll.delete()
db.session.commit()
return jsonify({'status': 'ok'})
| 2.21875
| 2
|
vestlus/forms/__init__.py
|
lehvitus/vestlus
| 12
|
12780046
|
<reponame>lehvitus/vestlus
# vestlus:forms
from .channel import ChannelForm
from .message import MessageForm
from .message import PrivateMessageForm
from .message import GroupMessageForm
from .membership import MembershipForm
| 1.117188
| 1
|
partners_histo.py
|
ccicconetti/cordis-scripts
| 2
|
12780047
|
<gh_stars>1-10
#!/usr/bin/python
import csv
import argparse
parser = argparse.ArgumentParser(
description='Parse the H2020 cordis csv file and produce statistics about participants',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--input", type=str,
default='cordis-h2020projects.csv',
help="input file")
parser.add_argument("--output", type=str,
default='output.dat',
help="input file")
parser.add_argument("--count", type=str,
default='',
help="count the number of projects of a given participant")
parser.add_argument("--histo", action="store_true",
default=False,
help="print the histogram of projects by participant")
parser.add_argument("--no_names", action="store_true",
default=False,
help="do not print the participant names")
parser.add_argument("--no_country", action="store_true",
default=False,
help="do not distinguish participants based on country")
parser.add_argument("--delimiter", type=str,
default=';',
help="delimiter")
args = parser.parse_args()
if args.count and args.histo:
raise Exception("Options --count and --histo are incompatible")
num_rows = 0
participants = dict()
with open(args.input, 'rb') as csvfile:
cordis = csv.reader(csvfile, delimiter=args.delimiter, quotechar='"')
for row in cordis:
num_rows += 1
name = '{} ({})'.format(row[16], row[17]) \
if not args.no_country \
else row[16]
if name not in participants:
participants[name] = 0
participants[name] = participants[name] + 1
for part,count in zip(row[18].split(';'), row[19].split(';')):
if len(part) == 0:
continue
name = '{} ({})'.format(part, count) \
if not args.no_country \
else part
if name not in participants:
participants[name] = 0
participants[name] = participants[name] + 1
if args.count:
print participants[args.count]
elif args.histo:
histo_values = [1, 2, 3, 4, 5, 10, 50, 100, 500]
histo_values_upper = histo_values[1:]
histo_values_upper.append(num_rows)
histo = dict()
for h in histo_values:
histo[h] = 0
for part, cnt in sorted(participants.items(), key=lambda x: x[1], reverse=False):
for lhs, rhs in zip(histo_values, histo_values_upper):
if lhs <= cnt < rhs:
histo[lhs] += 1
with open(args.output, 'wb') as outfile:
for k, v in sorted(histo.items(), key=lambda x: x[0], reverse=False):
outfile.write('{} {}\n'.format(k, v))
else:
print ('total number of projects: {}\n'
'total number of participants: {}').format(num_rows, len(participants))
with open(args.output, 'wb') as outfile:
for part, cnt in sorted(participants.items(), key=lambda x: x[1], reverse=True):
if args.no_names:
outfile.write('{}\n'.format(cnt))
else:
outfile.write('{};{}\n'.format(part, cnt))
| 3.453125
| 3
|
app.py
|
cerule7/exchange-website
| 0
|
12780048
|
#----------------------------------------------------------------------------#
# Imports
#----------------------------------------------------------------------------#
from flask import Flask, render_template, request
from flask_basicauth import BasicAuth
# from flask.ext.sqlalchemy import SQLAlchemy
import logging
from logging import Formatter, FileHandler
from forms import *
from pymongo import MongoClient
from alg import *
from language_data import *
#----------------------------------------------------------------------------#
# App Config
#----------------------------------------------------------------------------#
app = Flask(__name__)
app.config.from_object('config')
app.config['BASIC_AUTH_USERNAME'] = 'langadmin'
app.config['BASIC_AUTH_PASSWORD'] = '<PASSWORD>'
basic_auth = BasicAuth(app)
#open mongodb connection
client = MongoClient("mongodb+srv://admin:rutgers1@studentinfo-eoita.azure.mongodb.net/test?retryWrites=true")
db = client.test
#db = SQLAlchemy(app)
# Automatically tear down SQLAlchemy.
'''
@app.teardown_request
def shutdown_session(exception=None):
db_session.remove()
'''
# Login required decorator.
'''
def login_required(test):
@wraps(test)
def wrap(*args, **kwargs):
if 'logged_in' in session:
return test(*args, **kwargs)
else:
flash('You need to login first.')
return redirect(url_for('login'))
return wrap
'''
#----------------------------------------------------------------------------#
# Controllers.
#----------------------------------------------------------------------------#
@app.route('/')
def home():
return render_template('pages/home.html')
@app.route('/about')
def about():
return render_template('pages/about.html')
@app.route('/signup', methods=['POST', 'GET'])
def signup():
if request.method == 'POST': #successful form post
results = request.get_json()
db.inventory.insert(results) #load form results into mongodb
update()
return render_template('pages/register.html')
@app.route('/learn_data')
@basic_auth.required
def gen_learn_chart():
gen_learn_pie()
return render_template('learn-count-pie.html')
@app.route('/share_data')
@basic_auth.required
def gen_share_chart():
gen_share_pie()
return render_template('share-count-pie.html')
@app.route('/view_students', methods=['GET', 'PUT'])
@basic_auth.required
def students():
if request.method == 'PUT':
results = request.get_json()
header = results.pop(0)
if header['status'] == 'pair':
s1 = results[0]['name']
s2 = results[1]['name']
db.inventory.update(
{"name": s1},
{"$set": {"partner": s2}}
)
db.inventory.update(
{"name": s2},
{"$set": {"partner": s1}}
)
if header['status'] == 'remove':
for r in results:
db.inventory.update(
{"name": r['name']},
{"$set": {"partner": "None"}}
)
if header['status'] == 'correct':
for r in results:
db.inventory.update(
{"name": r['name']},
{"$set": {"placement": "Correct"}}
)
if header['status'] == 'incorrect':
for r in results:
db.inventory.update(
{"name": r['name']},
{"$set": {"placement": "Incorrect"}}
)
if header['status'] == 'delete':
for r in results:
db.inventory.delete_one(
{"name": r['name']}
)
update()
rows = db.inventory.find({})
rowslist = []
for r in rows:
llist = [r['ll1'], r['ll2'], r['ll3']]
llist[:] = [x for x in llist if x != 'None']
llist.sort()
slist = [r['sl1'], r['sl2'], r['sl3']]
slist[:] = [x for x in slist if x != 'None']
slist.sort()
student = {
'name': r['name'],
'year': r['year'],
'ruid': r['ruid'],
'email': r['email'],
'learn_langs': make_string(llist),
'share_langs': make_string(slist),
'partner': r['partner'],
'placement': r['placement']
}
rowslist.append(student)
return render_template('pages/students.html', rows=rowslist)
def make_string(langs):
res = ''
for i in range(len(langs)):
if(i + 1 < len(langs)):
res += langs[i] + ', '
else:
res += langs[i]
return res
@app.route('/make_pairs', methods=['POST', 'GET'])
@basic_auth.required
def make():
if request.method == 'POST':
results = request.get_json()
for r in results:
names = r['names'].split('&')
s1 = names[0][:(len(names[0])-1)]
s2 = names[1][1:]
db.inventory.update(
{"name": s1},
{"$set": {"partner": s2}}
)
db.inventory.update(
{"name": s2},
{"$set": {"partner": s1}}
)
pairlist = []
pairs = make_pairs()
for pair in pairs:
langs = pair.language1 + " & " + pair.language2
names = pair.student1.name + " & " + pair.student2.name
p = {
'languages' : langs,
'names' : names,
'prof1' : pair.prof1,
'prof2' : pair.prof2
}
pairlist.append(p)
return render_template('pages/pairs.html', pairs=pairlist)
@app.route('/register')
def register():
form = RegisterForm(request.form)
return render_template('forms/register.html', form=form)
@app.route('/login', methods=['GET'])
def login():
form = LoginForm(request.form)
return render_template('forms/login.html', form=form)
@app.route('/forgot')
def forgot():
form = ForgotForm(request.form)
return render_template('forms/forgot.html', form=form)
# Error handlers
@app.errorhandler(500)
def internal_error(error):
#db_session.rollback()
return render_template('errors/500.html'), 500
@app.errorhandler(404)
def not_found_error(error):
return render_template('errors/404.html'), 404
if not app.debug:
file_handler = FileHandler('error.log')
file_handler.setFormatter(
Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')
)
app.logger.setLevel(logging.INFO)
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.info('errors')
def update():
db.inventory.update_many(
{"$or" : [
{"ll2": {"$exists": False}},
{"ll2": None},
]},
{"$set": {"ll2": "None", "lp2": "None", "ll3": "None", "lp3": "None"}}
)
db.inventory.update_many(
{"$or" : [
{"ll3": {"$exists": False}},
{"ll3": None},
]},
{"$set": {"ll3": "None", "lp3": "None"}}
)
db.inventory.update_many(
{"$or" : [
{"sl2": {"$exists": False}},
{"sl2": None},
]},
{"$set": {"sl2": "None", "sp2": "None", "sl3": "None", "sp3": "None"}}
)
db.inventory.update_many(
{"$or" : [
{"sl3": {"$exists": False}},
{"sl3": None},
]},
{"$set": {"sl3": "None", "sp3": "None"}}
)
db.inventory.update_many(
{"$or" : [
{"partner": {"$exists": False}},
{"partner": None},
]},
{"$set": {"partner": "None"}}
)
db.inventory.update_many(
{"$or" : [
{"rate1": {"$exists": False}},
{"rate1": None},
]},
{"$set": {"rate1": "3"}}
)
db.inventory.update_many(
{"$or" : [
{"rate2": {"$exists": False}},
{"rate2": None},
]},
{"$set": {"rate2": "3"}}
)
db.inventory.update_many(
{"$or" : [
{"placement": {"$exists": False}},
{"placement": None},
]},
{"$set": {"placement": "Unverified"}}
)
#----------------------------------------------------------------------------#
# Launch.
#----------------------------------------------------------------------------#
# Default port:
if __name__ == '__main__':
app.run()
# Or specify port manually:
'''
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
'''
| 2.5
| 2
|
src/backend/apps/media/__init__.py
|
Vixx-X/ati-project
| 0
|
12780049
|
<reponame>Vixx-X/ati-project<filename>src/backend/apps/media/__init__.py<gh_stars>0
"""
Media module
"""
from flask import Blueprint
bp = Blueprint("media", __name__)
from . import urls
| 1.546875
| 2
|
src/base/exceptions.py
|
system-design-2/user-service
| 0
|
12780050
|
from rest_framework import exceptions, status
from rest_framework.views import Response, exception_handler
def custom_exception_handler(exc, context):
# Call REST framework's default exception handler first to get the standard error response.
response = exception_handler(exc, context)
# if there is an IntegrityError and the error response hasn't already been generated
if isinstance(exc, Exception) and not response:
response = Response({'message': str(exc)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return response
class UnprocessableEntity(exceptions.APIException):
status_code = 422
default_detail = 'Cannot process with the data'
default_code = 'unprocessable_entity'
class BadRequest(exceptions.APIException):
status_code = 400
default_detail = 'Bad Request'
default_code = 'bad_request'
| 2.484375
| 2
|