hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d2a6389085c937a595cd0ab6a4b31dc6101627ba | 2,754 | py | Python | src/cogs/embeds.py | StarbuckBarista/StatBot | ed503d1ff7d8d1a1fe3d2364a0e0590a71bcbe2a | [
"MIT"
] | 1 | 2021-06-27T16:50:26.000Z | 2021-06-27T16:50:26.000Z | src/cogs/embeds.py | StarbuckBarista/StatBot | ed503d1ff7d8d1a1fe3d2364a0e0590a71bcbe2a | [
"MIT"
] | null | null | null | src/cogs/embeds.py | StarbuckBarista/StatBot | ed503d1ff7d8d1a1fe3d2364a0e0590a71bcbe2a | [
"MIT"
] | null | null | null | import discord
cooldownEmbed = discord.Embed.from_dict({"type": "rich", "title": "Cooldown",
"description": ":x: That command is currently on cooldown :x:",
"color": 15158332, "author": {"name": "Error",
"icon_url": "https://discordemoji.com/assets/"
"emoji/5316_Error_512x512_by_DW"
".png"}})
rateLimitEmbed = discord.Embed.from_dict({"type": "rich", "title": "Rate Limit",
"description": ":x: The Minecraft API is currently on cooldown :x:",
"color": 15158332, "author": {"name": "Error",
"icon_url": "https://discordemoji.com/assets/"
"emoji/5316_Error_512x512_by_DW"
".png"}})
invalidCategory = discord.Embed.from_dict({"type": "rich", "title": "Invalid Category",
"description": ":x: The category you provided is invalid :x:",
"color": 15158332, "author": {"name": "Error", "icon_url": "https://"
"discordemoji.com"
"/assets/emoji/"
"5316_Error_"
"512x512_by_DW"
".png"}})
invalidIdentifier = discord.Embed.from_dict({"type": "rich", "title": "Invalid Identifier",
"description": ":x: The identifier you provided is invalid :x:",
"color": 15158332, "author": {"name": "Error",
"icon_url": "https://discordemoji.com/"
"assets/emoji/"
"5316_Error_512x512_by_DW.png"}})
| 86.0625 | 120 | 0.296659 | import discord
cooldownEmbed = discord.Embed.from_dict({"type": "rich", "title": "Cooldown",
"description": ":x: That command is currently on cooldown :x:",
"color": 15158332, "author": {"name": "Error",
"icon_url": "https://discordemoji.com/assets/"
"emoji/5316_Error_512x512_by_DW"
".png"}})
rateLimitEmbed = discord.Embed.from_dict({"type": "rich", "title": "Rate Limit",
"description": ":x: The Minecraft API is currently on cooldown :x:",
"color": 15158332, "author": {"name": "Error",
"icon_url": "https://discordemoji.com/assets/"
"emoji/5316_Error_512x512_by_DW"
".png"}})
invalidCategory = discord.Embed.from_dict({"type": "rich", "title": "Invalid Category",
"description": ":x: The category you provided is invalid :x:",
"color": 15158332, "author": {"name": "Error", "icon_url": "https://"
"discordemoji.com"
"/assets/emoji/"
"5316_Error_"
"512x512_by_DW"
".png"}})
invalidIdentifier = discord.Embed.from_dict({"type": "rich", "title": "Invalid Identifier",
"description": ":x: The identifier you provided is invalid :x:",
"color": 15158332, "author": {"name": "Error",
"icon_url": "https://discordemoji.com/"
"assets/emoji/"
"5316_Error_512x512_by_DW.png"}})
| 0 | 0 | 0 |
875318661869bf0fc7e72d6e3c0fd2b8a585d41c | 324 | py | Python | El masgono.py | jayounghoyos/My-Scripts | da7f3d39e0a19b4d01b5cef0627bbf892f655c80 | [
"MIT"
] | null | null | null | El masgono.py | jayounghoyos/My-Scripts | da7f3d39e0a19b4d01b5cef0627bbf892f655c80 | [
"MIT"
] | null | null | null | El masgono.py | jayounghoyos/My-Scripts | da7f3d39e0a19b4d01b5cef0627bbf892f655c80 | [
"MIT"
] | null | null | null | #nombre = input("¿Cual es tu nombre?")
#nombre = nombre.upper()
#print(nombre)
#nombre = input("¿Cual es tu nombre?")
#nombre = nombre.capitalize()
#print(nombre)
#nombre = input("¿Cual es tu nombre?")
#nombre = nombre.lower()
#print(nombre)
nombre = input("¿Cual es tu nombre?")
nombre = nombre.strip()
print(nombre) | 17.052632 | 38 | 0.669753 | #nombre = input("¿Cual es tu nombre?")
#nombre = nombre.upper()
#print(nombre)
#nombre = input("¿Cual es tu nombre?")
#nombre = nombre.capitalize()
#print(nombre)
#nombre = input("¿Cual es tu nombre?")
#nombre = nombre.lower()
#print(nombre)
nombre = input("¿Cual es tu nombre?")
nombre = nombre.strip()
print(nombre) | 0 | 0 | 0 |
f8fe832b41e39f55e13460f47e96916f0a1294ff | 426 | py | Python | buildroot/support/testing/tests/package/test_perl_dbd_mysql.py | bramkragten/operating-system | 27fc2de146f1ef047316a4b58a236c72d26da81c | [
"Apache-2.0"
] | 349 | 2021-08-17T08:46:53.000Z | 2022-03-30T06:25:25.000Z | buildroot/support/testing/tests/package/test_perl_dbd_mysql.py | bramkragten/operating-system | 27fc2de146f1ef047316a4b58a236c72d26da81c | [
"Apache-2.0"
] | 8 | 2020-04-02T22:51:47.000Z | 2020-04-27T03:24:55.000Z | buildroot/support/testing/tests/package/test_perl_dbd_mysql.py | bramkragten/operating-system | 27fc2de146f1ef047316a4b58a236c72d26da81c | [
"Apache-2.0"
] | 12 | 2021-08-17T20:10:30.000Z | 2022-01-06T10:52:54.000Z | from tests.package.test_perl import TestPerlBase
class TestPerlDBDmysql(TestPerlBase):
"""
package:
DBD-mysql XS
direct dependencies:
DBI XS
"""
config = TestPerlBase.config + \
"""
BR2_PACKAGE_PERL=y
BR2_PACKAGE_PERL_DBD_MYSQL=y
"""
| 19.363636 | 48 | 0.584507 | from tests.package.test_perl import TestPerlBase
class TestPerlDBDmysql(TestPerlBase):
"""
package:
DBD-mysql XS
direct dependencies:
DBI XS
"""
config = TestPerlBase.config + \
"""
BR2_PACKAGE_PERL=y
BR2_PACKAGE_PERL_DBD_MYSQL=y
"""
def test_run(self):
self.login()
self.module_test("DBI")
self.module_test("DBD::mysql")
| 90 | 0 | 27 |
c5490d1cbad733dd4a8cc5a2eb8df6586c534dd8 | 8,403 | py | Python | tomorrow_pdf_converter/t_parser/tomorrow_parser.py | sebwarnke/tomorrow-bank-statement-converter | 693b41e089a8479a32391d11ae0a0e536d6ca0ae | [
"MIT"
] | null | null | null | tomorrow_pdf_converter/t_parser/tomorrow_parser.py | sebwarnke/tomorrow-bank-statement-converter | 693b41e089a8479a32391d11ae0a0e536d6ca0ae | [
"MIT"
] | null | null | null | tomorrow_pdf_converter/t_parser/tomorrow_parser.py | sebwarnke/tomorrow-bank-statement-converter | 693b41e089a8479a32391d11ae0a0e536d6ca0ae | [
"MIT"
] | null | null | null | import re
from py_pdf_parser.components import PDFDocument
from py_pdf_parser.components import PDFElement
from tomorrow_pdf_converter.t_parser.statement import Statement
from tomorrow_pdf_converter.t_parser.transaction import Transaction
closing_element_text = "ZUSAMMENFASSUNG"
# This regex matches the headline of each per day transaction section in a Tomorrow Document
date_sep_regex = "^(MONTAG|DIENSTAG|MITTWOCH|DONNERSTAG|FREITAG|SAMSTAG|SONNTAG),\s(\d{1,2}\.\s(JANUAR|FEBRUAR|MÄRZ|APRIL|MAI|JUNI|JULI|AUGUST|SEPTEMBER|OKTOBER|NOVEMBER|DEZEMBER)\s\d{4})$"
# This regex matches the date string of the above headline.
date_regex = "^\D*(\d{1,2})\.\s(JANUAR|FEBRUAR|MÄRZ|APRIL|MAI|JUNI|JULI|AUGUST|SEPTEMBER|OKTOBER|NOVEMBER|DEZEMBER)\s(\d{4})$"
| 42.439394 | 189 | 0.656313 | import re
from py_pdf_parser.components import PDFDocument
from py_pdf_parser.components import PDFElement
from tomorrow_pdf_converter.t_parser.statement import Statement
from tomorrow_pdf_converter.t_parser.transaction import Transaction
closing_element_text = "ZUSAMMENFASSUNG"
# This regex matches the headline of each per day transaction section in a Tomorrow Document
date_sep_regex = "^(MONTAG|DIENSTAG|MITTWOCH|DONNERSTAG|FREITAG|SAMSTAG|SONNTAG),\s(\d{1,2}\.\s(JANUAR|FEBRUAR|MÄRZ|APRIL|MAI|JUNI|JULI|AUGUST|SEPTEMBER|OKTOBER|NOVEMBER|DEZEMBER)\s\d{4})$"
# This regex matches the date string of the above headline.
date_regex = "^\D*(\d{1,2})\.\s(JANUAR|FEBRUAR|MÄRZ|APRIL|MAI|JUNI|JULI|AUGUST|SEPTEMBER|OKTOBER|NOVEMBER|DEZEMBER)\s(\d{4})$"
def extract_purpose(section):
if section.elements.__len__() >= 5:
purpose = section.elements.__getitem__(section.elements.__len__() - 1).text()
else:
purpose = None
return purpose
def extract_transaction_type(section):
return section.elements.filter_by_regex("Überweisung|Kartenzahlung|Lastschrift").extract_single_element().text()
def extract_contact(section):
return section.elements.__getitem__(0).text()
def extract_amount(section):
amount = section.elements.filter_by_regex("^[+|-](((?:[1-9]\d{0,2}(,\d{3})*)|0)?\.\d{1,2})\s€$") \
.extract_single_element().text()
return amount.strip("+€ ").replace(",", "")
def extract_iban_bic(section):
iban_bic = section.elements.filter_by_text_contains("IBAN").extract_single_element().text()
match_iban = re.search("[A-Z]{2}[0-9]{2}(?:[ ]?[0-9]{4}){4}(?!(?:[ ]?[0-9]){3})(?:[ ]?[0-9]{1,2})?", iban_bic)
match_bic = re.search("BIC:\s(\w+)", iban_bic)
if match_iban is not None and match_bic is not None:
return match_iban.group(0), match_bic.group(0)
else:
return None
def extract_date(section):
return section.name[0:10]
class TomorrowParser:
document: PDFDocument
closing_element: PDFElement
date_section_unique_names = []
transaction_section_unique_names = []
statement: Statement
def __init__(self, pdf_document):
self.document = pdf_document
self.statement = Statement()
def run(self):
self.ignore_footers()
self.find_closing_element()
self.create_date_sections()
self.create_transaction_sections()
self.parse_transaction_sections()
return self.statement
def find_closing_element(self):
print("Filtering for Closing Elements with text: " + closing_element_text)
element_list = self.document.elements.filter_by_text_equal(closing_element_text)
if element_list.__len__() > 1:
print(
"Found more than one element with text ['" + closing_element_text + "']. Expected only one to be the closing element.")
exit(-1)
else:
self.closing_element = element_list.extract_single_element();
# When this method returns we identified and created all sections referring to a date a transaction took place.
# Each section was assigned a unique name which we stored.
def create_date_sections(self):
print("Creating Sections per Date")
date_section_elements = self.document.elements.filter_by_regex(date_sep_regex)
# iterate length of ElementList
for i in range(date_section_elements.__len__()):
name = convert_to_iso_date(date_section_elements.__getitem__(i))
# for the last element of ElementList
if i == date_section_elements.__len__() - 1:
unique_name = self.document.sectioning.create_section(name, date_section_elements.__getitem__(i),
self.closing_element, False).unique_name
# ... for all other elements
else:
unique_name = self.document.sectioning.create_section(name, date_section_elements.__getitem__(i),
date_section_elements.__getitem__(i + 1),
False).unique_name
self.date_section_unique_names.append(unique_name)
print("-- Date Sections found: " + str(len(self.date_section_unique_names)))
# When this method returns we identified and created all sections referring to an individual transaction.
# Each section was assigned a unique name which we stored.
def create_transaction_sections(self):
print("Creating Transaction Sections")
purpose_font_name = None
for date_section_unique_name in self.date_section_unique_names:
date_section = self.document.sectioning.get_section(date_section_unique_name)
# This guesses the font of the Transactions Purpose field in the PDF which differs from file to file.
if purpose_font_name is None:
purpose_font_key = date_section.elements[1].font
transaction_headers = date_section.elements.filter_by_font(purpose_font_key)
for i in range(transaction_headers.__len__()):
if i < transaction_headers.__len__() - 1:
transaction_section_unique_name = self.document.sectioning.create_section(
date_section.name + "_" + transaction_headers.__getitem__(i).text(),
transaction_headers.__getitem__(i), transaction_headers.__getitem__(i + 1), False).unique_name
else:
transaction_section_unique_name = self.document.sectioning.create_section(
date_section.name + "_" + transaction_headers.__getitem__(i).text(),
transaction_headers.__getitem__(i),
date_section.elements.__getitem__(date_section.elements.__len__() - 1)).unique_name
self.transaction_section_unique_names.append(transaction_section_unique_name)
print("-- Transaction Section found: " + str(len(self.transaction_section_unique_names)))
# for unique_name in self.transaction_section_unique_names:
# section = self.document.sectioning.get_section(unique_name)
# print(section.name)
# for element in section.elements:
# print(" " + element.text())
# This method iterates the transaction sections and parses each into a transaction object.
def parse_transaction_sections(self):
print("Parsing Transaction Sections")
for unique_name in self.transaction_section_unique_names:
section = self.document.sectioning.get_section(unique_name)
amount = extract_amount(section)
contact = extract_contact(section)
transaction_type = extract_transaction_type(section)
iban_bic = extract_iban_bic(section)
purpose = extract_purpose(section)
date = extract_date(section)
transaction = Transaction(date, amount, purpose, contact, iban_bic, transaction_type)
self.statement.append_transaction(transaction)
def ignore_footers(self):
footer_text = "Erstellt am"
print("Ignoring Footers that contain: " + footer_text)
self.document.elements.filter_by_text_contains(footer_text).ignore_elements()
def month_name_to_number(monthname):
if monthname == "JANUAR":
return "01"
if monthname == "FEBRUAR":
return "02"
if monthname == "MÄRZ":
return "03"
if monthname == "APRIL":
return "04"
if monthname == "MAI":
return "05"
if monthname == "JUNI":
return "06"
if monthname == "JULI":
return "07"
if monthname == "AUGUST":
return "08"
if monthname == "SEPTEMBER":
return "09"
if monthname == "OKTOBER":
return "10"
if monthname == "NOVEMBER":
return "11"
if monthname == "DEZEMBER":
return "12"
def convert_to_iso_date(date_section_title):
match = re.search(date_regex, date_section_title.text())
if match is not None:
isodate = match.group(3) + '-' + month_name_to_number(match.group(2)) + '-'
if int(match.group(1)) < 10:
isodate = isodate + "0" + match.group(1)
else:
isodate = isodate + match.group(1)
return isodate
| 6,642 | 796 | 207 |
e0b1ac401205fa67f9a9bc87715b5b3955328193 | 1,155 | py | Python | stats_files/__init__.py | larryworm1127/nba_simulator | adcd9aece96a91bf72da2e0cb3bcaedc759c878a | [
"BSD-2-Clause"
] | 1 | 2020-01-15T15:50:54.000Z | 2020-01-15T15:50:54.000Z | stats_files/__init__.py | larryworm1127/nba_simulator | adcd9aece96a91bf72da2e0cb3bcaedc759c878a | [
"BSD-2-Clause"
] | 4 | 2018-01-24T03:08:09.000Z | 2021-06-10T17:43:44.000Z | stats_files/__init__.py | larryworm1127/nba_simulator | adcd9aece96a91bf72da2e0cb3bcaedc759c878a | [
"BSD-2-Clause"
] | null | null | null | import os
import os.path
from constant import *
__all__ = ['get_id_from_abb', 'get_abb_from_name']
# Helper functions
def check_assets_dir() -> None:
"""This function checks if a directory exists. If not it will create one.
"""
directories = [PLAYER_BASE_PATH, TEAM_BASE_PATH, PLAYER_SEASON_PATH,
PLAYER_RATING_PATH, TEAM_PLAYOFF_PATH, TEAM_SEASON_PATH,
GAME_BASE_PATH, SIM_RESULT_PATH]
for directory in directories:
if not os.path.exists(directory):
os.makedirs(directory)
def get_id_from_abb(team_abb: str) -> str:
"""This function return the team ID given a team abbreviation.
:param team_abb: the team abbreviation
:return: the team ID
"""
for team_id, abb in TEAM_DICT.items():
if team_abb == abb:
return team_id
def get_abb_from_name(team_name: str) -> str:
"""This function return the team abbreviation given a team name.
:param team_name: the name of the team
:return: the team abbreviation
"""
for team_abb, name in TEAM_NAME_DICT.items():
if team_name == name[1]:
return team_abb
| 27.5 | 77 | 0.666667 | import os
import os.path
from constant import *
__all__ = ['get_id_from_abb', 'get_abb_from_name']
# Helper functions
def check_assets_dir() -> None:
"""This function checks if a directory exists. If not it will create one.
"""
directories = [PLAYER_BASE_PATH, TEAM_BASE_PATH, PLAYER_SEASON_PATH,
PLAYER_RATING_PATH, TEAM_PLAYOFF_PATH, TEAM_SEASON_PATH,
GAME_BASE_PATH, SIM_RESULT_PATH]
for directory in directories:
if not os.path.exists(directory):
os.makedirs(directory)
def get_id_from_abb(team_abb: str) -> str:
"""This function return the team ID given a team abbreviation.
:param team_abb: the team abbreviation
:return: the team ID
"""
for team_id, abb in TEAM_DICT.items():
if team_abb == abb:
return team_id
def get_abb_from_name(team_name: str) -> str:
"""This function return the team abbreviation given a team name.
:param team_name: the name of the team
:return: the team abbreviation
"""
for team_abb, name in TEAM_NAME_DICT.items():
if team_name == name[1]:
return team_abb
| 0 | 0 | 0 |
820f6292ce69f024eb8b287f0493082d26ff31c3 | 3,898 | py | Python | 10-Flask/72-Library.py | ericson14/Small_project | dd88b9a5619d38fb8d236c932ffa8429d24b28ae | [
"MIT"
] | null | null | null | 10-Flask/72-Library.py | ericson14/Small_project | dd88b9a5619d38fb8d236c932ffa8429d24b28ae | [
"MIT"
] | null | null | null | 10-Flask/72-Library.py | ericson14/Small_project | dd88b9a5619d38fb8d236c932ffa8429d24b28ae | [
"MIT"
] | null | null | null | from flask import Flask, flash, render_template, request, redirect, url_for
from flask_sqlalchemy import SQLAlchemy
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
app = Flask(__name__)
app.config.from_object(Config)
db = SQLAlchemy(app)
@app.route('/', methods=['GET', 'POST'])
@app.route('/del_book/<book_id>')
@app.route('/del_author/<author_id>')
if __name__ == "__main__":
db.drop_all()
# 创建所有表
db.create_all()
# 生成数据
au1 = Author(name='老王')
au2 = Author(name='老尹')
au3 = Author(name='老刘')
# 把数据提交给用户会话
db.session.add_all([au1, au2, au3])
db.session.commit()
bk1 = Book(name='老王回忆录', author_id=au1.id)
bk2 = Book(name='我读书少,你别骗我', author_id=au1.id)
bk3 = Book(name='如何才能让自己更骚', author_id=au2.id)
bk4 = Book(name='怎样征服美丽少女', author_id=au3.id)
bk5 = Book(name='如何征服英俊少男', author_id=au3.id)
# 把数据提交给用户会话
db.session.add_all([bk1, bk2, bk3, bk4, bk5])
# 提交会话
db.session.commit()
app.run(debug=True)
| 29.755725 | 76 | 0.596203 | from flask import Flask, flash, render_template, request, redirect, url_for
from flask_sqlalchemy import SQLAlchemy
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
class Config(object):
SQLALCHEMY_DATABASE_URI = "mysql://root:chuanzhi@127.0.0.1:3306/library"
SQLALCHEMY_TRACK_MODIFICATIONS = False
SECRET_KEY = "a13uo1ccl"
class Register(FlaskForm):
author = StringField("作者", render_kw={"placeholder": "添加作者"})
book = StringField("书名", render_kw={"placeholder": "添加书名"})
submit = SubmitField("添加")
app = Flask(__name__)
app.config.from_object(Config)
db = SQLAlchemy(app)
class Author(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(15), nullable=False)
books = db.relation("Book", backref="author")
def __repr__(self):
return "Author: {} {}".format(self.name, self.id)
class Book(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(30), nullable=False)
author_id = db.Column(db.Integer, db.ForeignKey(Author.id))
def __repr__(self):
return "Book: {} {}".format(self.name, self.id)
@app.route('/', methods=['GET', 'POST'])
def index():
form = Register()
if request.method == "POST":
if form.validate_on_submit():
author_name = request.form.get("author")
book_name = request.form.get("book")
author = Author.query.filter(Author.name == author_name).first()
if author:
# 有作者只添加书籍
book = Book.query.filter(Book.name == book_name).first()
if book:
flash("已经有此书了,请勿重复添加")
else:
new_book = Book(name=book_name, author_id=author.id)
db.session.add(new_book)
db.session.commit()
else:
# 没有该作者,添加作者再添加书籍
new_author = Author(name=author_name)
db.session.add(new_author)
db.session.commit()
new_book = Book(name=book_name, author_id=new_author.id)
db.session.add(new_book)
db.session.commit()
else:
flash("参数错误")
authors = Author.query.all()
return render_template("temp4_72.html", form=form, authors=authors)
@app.route('/del_book/<book_id>')
def del_book(book_id):
delbook = Book.query.get(book_id)
if delbook:
try:
db.session.delete(delbook)
except Exception as e:
flash(e)
db.session.rollback()
finally:
db.session.commit()
else:
flash("书名不存在。。。")
return redirect(url_for("index"))
@app.route('/del_author/<author_id>')
def del_author(author_id):
delauthor = Author.query.get(author_id)
if delauthor:
# 删除作者需要先删除旗下所有书籍
books = Book.query.filter(author_id == Book.author_id)
try:
for book in books:
db.session.delete(book)
db.session.delete(delauthor)
except Exception as e:
flash(e)
db.session.rollback()
finally:
db.session.commit()
else:
flash("作者不存在。。。")
return redirect(url_for("index"))
if __name__ == "__main__":
db.drop_all()
# 创建所有表
db.create_all()
# 生成数据
au1 = Author(name='老王')
au2 = Author(name='老尹')
au3 = Author(name='老刘')
# 把数据提交给用户会话
db.session.add_all([au1, au2, au3])
db.session.commit()
bk1 = Book(name='老王回忆录', author_id=au1.id)
bk2 = Book(name='我读书少,你别骗我', author_id=au1.id)
bk3 = Book(name='如何才能让自己更骚', author_id=au2.id)
bk4 = Book(name='怎样征服美丽少女', author_id=au3.id)
bk5 = Book(name='如何征服英俊少男', author_id=au3.id)
# 把数据提交给用户会话
db.session.add_all([bk1, bk2, bk3, bk4, bk5])
# 提交会话
db.session.commit()
app.run(debug=True)
| 2,172 | 715 | 158 |
3cdcd455b3c8f3debe57480c3176ed5a442740e8 | 41 | py | Python | lib/handshake/const.py | Bitwise-01/Apex-2.0 | ba2bcad582c9e6cc3b9d0352ddb5c13998fdeded | [
"MIT"
] | 12 | 2021-07-26T22:03:08.000Z | 2022-03-06T16:43:50.000Z | lib/handshake/const.py | Bitwise-01/Apex-2.0 | ba2bcad582c9e6cc3b9d0352ddb5c13998fdeded | [
"MIT"
] | 1 | 2021-08-14T08:49:46.000Z | 2021-08-14T08:49:46.000Z | lib/handshake/const.py | Bitwise-01/Apex-2.0 | ba2bcad582c9e6cc3b9d0352ddb5c13998fdeded | [
"MIT"
] | 2 | 2022-01-01T16:55:16.000Z | 2022-01-16T01:09:24.000Z | '''All configs for handshake go here
'''
| 13.666667 | 36 | 0.682927 | '''All configs for handshake go here
'''
| 0 | 0 | 0 |
e6d17fe30703e3ef776ea644bc0a1ac15db1e63c | 10,762 | py | Python | datasets/__init__.py | niloofarAzari/SFSegNets | ce97d2e7dfd7cf3f3d2af7b0c31e23e51df6b182 | [
"MIT"
] | 311 | 2020-07-06T04:31:33.000Z | 2022-03-30T07:22:04.000Z | datasets/__init__.py | zfxu/DecoupleSegNets | 26df59e0de4b5e610c7369b5bb8ba8c125e389bf | [
"MIT"
] | 56 | 2020-09-10T10:39:50.000Z | 2022-03-31T16:52:44.000Z | datasets/__init__.py | zfxu/DecoupleSegNets | 26df59e0de4b5e610c7369b5bb8ba8c125e389bf | [
"MIT"
] | 31 | 2020-08-13T01:57:26.000Z | 2022-03-14T03:30:48.000Z | """
Dataset setup and loaders
This file including the different datasets processing pipelines
"""
from datasets import cityscapes
from datasets import mapillary
from datasets import kitti
from datasets import camvid
from datasets import bdd
import torchvision.transforms as standard_transforms
import transforms.joint_transforms as joint_transforms
import transforms.transforms as extended_transforms
from torch.utils.data import DataLoader
def setup_loaders(args):
"""
Setup Data Loaders[Currently supports Cityscapes, Mapillary and ADE20kin]
input: argument passed by the user
return: training data loader, validation data loader loader, train_set
"""
if args.dataset == 'cityscapes':
args.dataset_cls = cityscapes
args.train_batch_size = args.bs_mult * args.ngpu
if args.bs_mult_val > 0:
args.val_batch_size = args.bs_mult_val * args.ngpu
else:
args.val_batch_size = args.bs_mult * args.ngpu
# args.val_batch_size = 10
elif args.dataset == 'mapillary':
args.dataset_cls = mapillary
args.train_batch_size = args.bs_mult * args.ngpu
args.val_batch_size = 4
elif args.dataset == 'kitti':
args.dataset_cls = kitti
args.train_batch_size = args.bs_mult * args.ngpu
if args.bs_mult_val > 0:
args.val_batch_size = args.bs_mult_val * args.ngpu
else:
args.val_batch_size = args.bs_mult * args.ngpu
elif args.dataset == 'camvid':
args.dataset_cls = camvid
args.train_batch_size = args.bs_mult * args.ngpu
if args.bs_mult_val > 0:
args.val_batch_size = args.bs_mult_val * args.ngpu
else:
args.val_batch_size = args.bs_mult * args.ngpu
elif args.dataset == 'bdd':
args.dataset_cls = bdd
args.train_batch_size = args.bs_mult * args.ngpu
if args.bs_mult_val > 0:
args.val_batch_size = args.bs_mult_val * args.ngpu
else:
args.val_batch_size = args.bs_mult * args.ngpu
else:
raise Exception('Dataset {} is not supported'.format(args.dataset))
# Readjust batch size to mini-batch size for apex
if args.apex:
args.train_batch_size = args.bs_mult
args.val_batch_size = args.bs_mult_val
args.num_workers = 4 * args.ngpu
if args.test_mode:
args.num_workers = 1
mean_std = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
# Geometric image transformations
train_joint_transform_list = [
joint_transforms.RandomSizeAndCrop(args.crop_size,
False,
pre_size=args.pre_size,
scale_min=args.scale_min,
scale_max=args.scale_max,
ignore_index=args.dataset_cls.ignore_label),
joint_transforms.Resize(args.crop_size),
joint_transforms.RandomHorizontallyFlip()]
train_joint_transform = joint_transforms.Compose(train_joint_transform_list)
# Image appearance transformations
train_input_transform = []
if args.color_aug:
train_input_transform += [extended_transforms.ColorJitter(
brightness=args.color_aug,
contrast=args.color_aug,
saturation=args.color_aug,
hue=args.color_aug)]
if args.bblur:
train_input_transform += [extended_transforms.RandomBilateralBlur()]
elif args.gblur:
train_input_transform += [extended_transforms.RandomGaussianBlur()]
else:
pass
train_input_transform += [standard_transforms.ToTensor(),
standard_transforms.Normalize(*mean_std)]
train_input_transform = standard_transforms.Compose(train_input_transform)
val_input_transform = standard_transforms.Compose([
standard_transforms.ToTensor(),
standard_transforms.Normalize(*mean_std)
])
target_transform = extended_transforms.MaskToTensor()
## relax the segmentation border
if args.jointwtborder:
target_train_transform = extended_transforms.RelaxedBoundaryLossToTensor(args.dataset_cls.ignore_label,
args.dataset_cls.num_classes)
else:
target_train_transform = extended_transforms.MaskToTensor()
edge_map = args.joint_edgeseg_loss
if args.dataset == 'cityscapes':
# if args.mode == "trainval":
# city_mode = 'train' ## Can be trainval, hard code
city_mode = 'train'
city_quality = 'fine'
if args.class_uniform_pct:
if args.coarse_boost_classes:
coarse_boost_classes = \
[int(c) for c in args.coarse_boost_classes.split(',')]
else:
coarse_boost_classes = None
train_set = args.dataset_cls.CityScapesUniform(
city_quality, city_mode, args.maxSkip,
joint_transform_list=train_joint_transform_list,
transform=train_input_transform,
target_transform=target_train_transform,
dump_images=args.dump_augmentation_images,
cv_split=args.cv,
class_uniform_pct=args.class_uniform_pct,
class_uniform_tile=args.class_uniform_tile,
test=args.test_mode,
coarse_boost_classes=coarse_boost_classes,
edge_map=edge_map
)
else:
train_set = args.dataset_cls.CityScapes(
city_quality, city_mode, 0,
joint_transform=train_joint_transform,
transform=train_input_transform,
target_transform=target_train_transform,
dump_images=args.dump_augmentation_images,
cv_split=args.cv)
val_set = args.dataset_cls.CityScapes('fine', 'val', 0,
transform=val_input_transform,
target_transform=target_transform,
cv_split=args.cv)
elif args.dataset == 'mapillary':
eval_size = 1536
val_joint_transform_list = [
joint_transforms.ResizeHeight(eval_size),
joint_transforms.CenterCropPad(eval_size)]
train_set = args.dataset_cls.Mapillary(
'semantic', 'train',
joint_transform_list=train_joint_transform_list,
transform=train_input_transform,
target_transform=target_train_transform,
dump_images=args.dump_augmentation_images,
class_uniform_pct=args.class_uniform_pct,
class_uniform_tile=args.class_uniform_tile,
test=args.test_mode)
val_set = args.dataset_cls.Mapillary(
'semantic', 'val',
joint_transform_list=val_joint_transform_list,
transform=val_input_transform,
target_transform=target_transform,
test=False)
elif args.dataset == 'kitti':
train_set = args.dataset_cls.KITTI(
'semantic', 'train', args.maxSkip,
joint_transform_list=train_joint_transform_list,
transform=train_input_transform,
target_transform=target_train_transform,
dump_images=args.dump_augmentation_images,
class_uniform_pct=args.class_uniform_pct,
class_uniform_tile=args.class_uniform_tile,
test=args.test_mode,
cv_split=args.cv,
scf=args.scf,
hardnm=args.hardnm)
val_set = args.dataset_cls.KITTI(
'semantic', 'trainval', 0,
joint_transform_list=None,
transform=val_input_transform,
target_transform=target_transform,
test=False,
cv_split=args.cv,
scf=None)
elif args.dataset == 'camvid':
train_set = args.dataset_cls.CAMVID(
'semantic', 'trainval', args.maxSkip,
joint_transform_list=train_joint_transform_list,
transform=train_input_transform,
target_transform=target_train_transform,
dump_images=args.dump_augmentation_images,
class_uniform_pct=args.class_uniform_pct,
class_uniform_tile=args.class_uniform_tile,
test=args.test_mode,
cv_split=args.cv,
scf=args.scf,
hardnm=args.hardnm,
edge_map=edge_map
)
val_set = args.dataset_cls.CAMVID(
'semantic', 'test', 0,
joint_transform_list=None,
transform=val_input_transform,
target_transform=target_transform,
test=False,
cv_split=args.cv,
scf=None)
elif args.dataset == 'bdd':
train_set = args.dataset_cls.BDD(
'semantic', 'train', args.maxSkip,
joint_transform_list=train_joint_transform_list,
transform=train_input_transform,
target_transform=target_train_transform,
dump_images=args.dump_augmentation_images,
class_uniform_pct=args.class_uniform_pct,
class_uniform_tile=args.class_uniform_tile,
test=args.test_mode,
cv_split=args.cv,
scf=args.scf,
hardnm=args.hardnm,
edge_map=edge_map
)
val_set = args.dataset_cls.BDD(
'semantic', 'val', 0,
joint_transform_list=None,
transform=val_input_transform,
target_transform=target_transform,
test=False,
cv_split=args.cv,
scf=None)
elif args.dataset == 'null_loader':
train_set = args.dataset_cls.null_loader(args.crop_size)
val_set = args.dataset_cls.null_loader(args.crop_size)
else:
raise Exception('Dataset {} is not supported'.format(args.dataset))
if args.apex:
from datasets.sampler import DistributedSampler
train_sampler = DistributedSampler(train_set, pad=True, permutation=True, consecutive_sample=False)
val_sampler = DistributedSampler(val_set, pad=False, permutation=False, consecutive_sample=False)
else:
train_sampler = None
val_sampler = None
train_loader = DataLoader(train_set, batch_size=args.train_batch_size,
num_workers=args.num_workers, shuffle=(train_sampler is None), drop_last=True, sampler = train_sampler)
val_loader = DataLoader(val_set, batch_size=args.val_batch_size,
num_workers=args.num_workers // 2 , shuffle=False, drop_last=False, sampler = val_sampler)
return train_loader, val_loader, train_set | 39.859259 | 133 | 0.630552 | """
Dataset setup and loaders
This file including the different datasets processing pipelines
"""
from datasets import cityscapes
from datasets import mapillary
from datasets import kitti
from datasets import camvid
from datasets import bdd
import torchvision.transforms as standard_transforms
import transforms.joint_transforms as joint_transforms
import transforms.transforms as extended_transforms
from torch.utils.data import DataLoader
def setup_loaders(args):
"""
Setup Data Loaders[Currently supports Cityscapes, Mapillary and ADE20kin]
input: argument passed by the user
return: training data loader, validation data loader loader, train_set
"""
if args.dataset == 'cityscapes':
args.dataset_cls = cityscapes
args.train_batch_size = args.bs_mult * args.ngpu
if args.bs_mult_val > 0:
args.val_batch_size = args.bs_mult_val * args.ngpu
else:
args.val_batch_size = args.bs_mult * args.ngpu
# args.val_batch_size = 10
elif args.dataset == 'mapillary':
args.dataset_cls = mapillary
args.train_batch_size = args.bs_mult * args.ngpu
args.val_batch_size = 4
elif args.dataset == 'kitti':
args.dataset_cls = kitti
args.train_batch_size = args.bs_mult * args.ngpu
if args.bs_mult_val > 0:
args.val_batch_size = args.bs_mult_val * args.ngpu
else:
args.val_batch_size = args.bs_mult * args.ngpu
elif args.dataset == 'camvid':
args.dataset_cls = camvid
args.train_batch_size = args.bs_mult * args.ngpu
if args.bs_mult_val > 0:
args.val_batch_size = args.bs_mult_val * args.ngpu
else:
args.val_batch_size = args.bs_mult * args.ngpu
elif args.dataset == 'bdd':
args.dataset_cls = bdd
args.train_batch_size = args.bs_mult * args.ngpu
if args.bs_mult_val > 0:
args.val_batch_size = args.bs_mult_val * args.ngpu
else:
args.val_batch_size = args.bs_mult * args.ngpu
else:
raise Exception('Dataset {} is not supported'.format(args.dataset))
# Readjust batch size to mini-batch size for apex
if args.apex:
args.train_batch_size = args.bs_mult
args.val_batch_size = args.bs_mult_val
args.num_workers = 4 * args.ngpu
if args.test_mode:
args.num_workers = 1
mean_std = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
# Geometric image transformations
train_joint_transform_list = [
joint_transforms.RandomSizeAndCrop(args.crop_size,
False,
pre_size=args.pre_size,
scale_min=args.scale_min,
scale_max=args.scale_max,
ignore_index=args.dataset_cls.ignore_label),
joint_transforms.Resize(args.crop_size),
joint_transforms.RandomHorizontallyFlip()]
train_joint_transform = joint_transforms.Compose(train_joint_transform_list)
# Image appearance transformations
train_input_transform = []
if args.color_aug:
train_input_transform += [extended_transforms.ColorJitter(
brightness=args.color_aug,
contrast=args.color_aug,
saturation=args.color_aug,
hue=args.color_aug)]
if args.bblur:
train_input_transform += [extended_transforms.RandomBilateralBlur()]
elif args.gblur:
train_input_transform += [extended_transforms.RandomGaussianBlur()]
else:
pass
train_input_transform += [standard_transforms.ToTensor(),
standard_transforms.Normalize(*mean_std)]
train_input_transform = standard_transforms.Compose(train_input_transform)
val_input_transform = standard_transforms.Compose([
standard_transforms.ToTensor(),
standard_transforms.Normalize(*mean_std)
])
target_transform = extended_transforms.MaskToTensor()
## relax the segmentation border
if args.jointwtborder:
target_train_transform = extended_transforms.RelaxedBoundaryLossToTensor(args.dataset_cls.ignore_label,
args.dataset_cls.num_classes)
else:
target_train_transform = extended_transforms.MaskToTensor()
edge_map = args.joint_edgeseg_loss
if args.dataset == 'cityscapes':
# if args.mode == "trainval":
# city_mode = 'train' ## Can be trainval, hard code
city_mode = 'train'
city_quality = 'fine'
if args.class_uniform_pct:
if args.coarse_boost_classes:
coarse_boost_classes = \
[int(c) for c in args.coarse_boost_classes.split(',')]
else:
coarse_boost_classes = None
train_set = args.dataset_cls.CityScapesUniform(
city_quality, city_mode, args.maxSkip,
joint_transform_list=train_joint_transform_list,
transform=train_input_transform,
target_transform=target_train_transform,
dump_images=args.dump_augmentation_images,
cv_split=args.cv,
class_uniform_pct=args.class_uniform_pct,
class_uniform_tile=args.class_uniform_tile,
test=args.test_mode,
coarse_boost_classes=coarse_boost_classes,
edge_map=edge_map
)
else:
train_set = args.dataset_cls.CityScapes(
city_quality, city_mode, 0,
joint_transform=train_joint_transform,
transform=train_input_transform,
target_transform=target_train_transform,
dump_images=args.dump_augmentation_images,
cv_split=args.cv)
val_set = args.dataset_cls.CityScapes('fine', 'val', 0,
transform=val_input_transform,
target_transform=target_transform,
cv_split=args.cv)
elif args.dataset == 'mapillary':
eval_size = 1536
val_joint_transform_list = [
joint_transforms.ResizeHeight(eval_size),
joint_transforms.CenterCropPad(eval_size)]
train_set = args.dataset_cls.Mapillary(
'semantic', 'train',
joint_transform_list=train_joint_transform_list,
transform=train_input_transform,
target_transform=target_train_transform,
dump_images=args.dump_augmentation_images,
class_uniform_pct=args.class_uniform_pct,
class_uniform_tile=args.class_uniform_tile,
test=args.test_mode)
val_set = args.dataset_cls.Mapillary(
'semantic', 'val',
joint_transform_list=val_joint_transform_list,
transform=val_input_transform,
target_transform=target_transform,
test=False)
elif args.dataset == 'kitti':
train_set = args.dataset_cls.KITTI(
'semantic', 'train', args.maxSkip,
joint_transform_list=train_joint_transform_list,
transform=train_input_transform,
target_transform=target_train_transform,
dump_images=args.dump_augmentation_images,
class_uniform_pct=args.class_uniform_pct,
class_uniform_tile=args.class_uniform_tile,
test=args.test_mode,
cv_split=args.cv,
scf=args.scf,
hardnm=args.hardnm)
val_set = args.dataset_cls.KITTI(
'semantic', 'trainval', 0,
joint_transform_list=None,
transform=val_input_transform,
target_transform=target_transform,
test=False,
cv_split=args.cv,
scf=None)
elif args.dataset == 'camvid':
train_set = args.dataset_cls.CAMVID(
'semantic', 'trainval', args.maxSkip,
joint_transform_list=train_joint_transform_list,
transform=train_input_transform,
target_transform=target_train_transform,
dump_images=args.dump_augmentation_images,
class_uniform_pct=args.class_uniform_pct,
class_uniform_tile=args.class_uniform_tile,
test=args.test_mode,
cv_split=args.cv,
scf=args.scf,
hardnm=args.hardnm,
edge_map=edge_map
)
val_set = args.dataset_cls.CAMVID(
'semantic', 'test', 0,
joint_transform_list=None,
transform=val_input_transform,
target_transform=target_transform,
test=False,
cv_split=args.cv,
scf=None)
elif args.dataset == 'bdd':
train_set = args.dataset_cls.BDD(
'semantic', 'train', args.maxSkip,
joint_transform_list=train_joint_transform_list,
transform=train_input_transform,
target_transform=target_train_transform,
dump_images=args.dump_augmentation_images,
class_uniform_pct=args.class_uniform_pct,
class_uniform_tile=args.class_uniform_tile,
test=args.test_mode,
cv_split=args.cv,
scf=args.scf,
hardnm=args.hardnm,
edge_map=edge_map
)
val_set = args.dataset_cls.BDD(
'semantic', 'val', 0,
joint_transform_list=None,
transform=val_input_transform,
target_transform=target_transform,
test=False,
cv_split=args.cv,
scf=None)
elif args.dataset == 'null_loader':
train_set = args.dataset_cls.null_loader(args.crop_size)
val_set = args.dataset_cls.null_loader(args.crop_size)
else:
raise Exception('Dataset {} is not supported'.format(args.dataset))
if args.apex:
from datasets.sampler import DistributedSampler
train_sampler = DistributedSampler(train_set, pad=True, permutation=True, consecutive_sample=False)
val_sampler = DistributedSampler(val_set, pad=False, permutation=False, consecutive_sample=False)
else:
train_sampler = None
val_sampler = None
train_loader = DataLoader(train_set, batch_size=args.train_batch_size,
num_workers=args.num_workers, shuffle=(train_sampler is None), drop_last=True, sampler = train_sampler)
val_loader = DataLoader(val_set, batch_size=args.val_batch_size,
num_workers=args.num_workers // 2 , shuffle=False, drop_last=False, sampler = val_sampler)
return train_loader, val_loader, train_set | 0 | 0 | 0 |
96edfcb4a300d8e5f3e9f651c29468c23512f93f | 3,118 | py | Python | 17_mad_libs/test.py | frank-gear/tiny_python_projects | c72d0985f62ac7c2104b110aadc9e97cf38268e6 | [
"MIT"
] | null | null | null | 17_mad_libs/test.py | frank-gear/tiny_python_projects | c72d0985f62ac7c2104b110aadc9e97cf38268e6 | [
"MIT"
] | null | null | null | 17_mad_libs/test.py | frank-gear/tiny_python_projects | c72d0985f62ac7c2104b110aadc9e97cf38268e6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""tests for mad_lib.py"""
import re
import os
import random
import string
from subprocess import getstatusoutput
prg = './mad.py'
no_blanks = 'inputs/no_blanks.txt'
fox = 'inputs/fox.txt'
hlp = 'inputs/help.txt'
verona = 'inputs/romeo_juliet.txt'
# --------------------------------------------------
def test_exists():
"""exists"""
assert os.path.isfile(prg)
# --------------------------------------------------
def test_usage():
"""usage"""
for flag in ['-h', '--help']:
rv, out = getstatusoutput(f'{prg} {flag}')
assert rv == 0
assert out.lower().startswith('usage')
# --------------------------------------------------
def test_bad_file():
"""Test bad input file"""
bad = random_string()
rv, out = getstatusoutput(f'{prg} {bad}')
assert rv != 0
assert re.search(f"No such file or directory: '{bad}'", out)
# --------------------------------------------------
def test_no_blanks():
"""Test no blanks"""
rv, out = getstatusoutput(f'{prg} {no_blanks}')
assert rv != 0
assert out == f'"{no_blanks}" has no placeholders.'
# --------------------------------------------------
def test_fox():
"""test fox"""
args = f'{fox} -i surly car under bicycle'
rv, out = getstatusoutput(f'{prg} {args}')
assert rv == 0
assert out.strip() == 'The quick surly car jumps under the lazy bicycle.'
# --------------------------------------------------
def test_help():
"""test help"""
expected = """
Hey! I need tacos!
Oi! Not just salsa!
Hola! You know I need queso!
Arriba!
""".strip()
args = f'{hlp} -i Hey tacos Oi salsa Hola queso Arriba'
rv, out = getstatusoutput(f'{prg} {args}')
assert rv == 0
assert out.strip() == expected.strip()
# --------------------------------------------------
def test_verona():
"""test verona"""
expected = """
Two cars, both alike in dignity,
In fair Detroit, where we lay our scene,
From ancient oil break to new mutiny,
Where civil blood makes civil hands unclean.
From forth the fatal loins of these two foes
A pair of star-cross'd pistons take their life;
Whose misadventur'd piteous overthrows
Doth with their stick shift bury their parents' strife.
The fearful passage of their furious love,
And the continuance of their parents' rage,
Which, but their children's end, nought could accelerate,
Is now the 42 hours' traffic of our stage;
The which if you with patient foot attend,
What here shall hammer, our toil shall strive to mend.
""".strip()
args = (f'{verona} --inputs cars Detroit oil pistons '
'"stick shift" furious accelerate 42 foot hammer')
rv, out = getstatusoutput(f'{prg} {args}')
assert rv == 0
assert out.strip() == expected.strip()
# --------------------------------------------------
def random_string():
"""generate a random string"""
k = random.randint(5, 10)
return ''.join(random.choices(string.ascii_letters + string.digits, k=k))
| 27.350877 | 78 | 0.541693 | #!/usr/bin/env python3
"""tests for mad_lib.py"""
import re
import os
import random
import string
from subprocess import getstatusoutput
prg = './mad.py'
no_blanks = 'inputs/no_blanks.txt'
fox = 'inputs/fox.txt'
hlp = 'inputs/help.txt'
verona = 'inputs/romeo_juliet.txt'
# --------------------------------------------------
def test_exists():
"""exists"""
assert os.path.isfile(prg)
# --------------------------------------------------
def test_usage():
"""usage"""
for flag in ['-h', '--help']:
rv, out = getstatusoutput(f'{prg} {flag}')
assert rv == 0
assert out.lower().startswith('usage')
# --------------------------------------------------
def test_bad_file():
"""Test bad input file"""
bad = random_string()
rv, out = getstatusoutput(f'{prg} {bad}')
assert rv != 0
assert re.search(f"No such file or directory: '{bad}'", out)
# --------------------------------------------------
def test_no_blanks():
"""Test no blanks"""
rv, out = getstatusoutput(f'{prg} {no_blanks}')
assert rv != 0
assert out == f'"{no_blanks}" has no placeholders.'
# --------------------------------------------------
def test_fox():
"""test fox"""
args = f'{fox} -i surly car under bicycle'
rv, out = getstatusoutput(f'{prg} {args}')
assert rv == 0
assert out.strip() == 'The quick surly car jumps under the lazy bicycle.'
# --------------------------------------------------
def test_help():
"""test help"""
expected = """
Hey! I need tacos!
Oi! Not just salsa!
Hola! You know I need queso!
Arriba!
""".strip()
args = f'{hlp} -i Hey tacos Oi salsa Hola queso Arriba'
rv, out = getstatusoutput(f'{prg} {args}')
assert rv == 0
assert out.strip() == expected.strip()
# --------------------------------------------------
def test_verona():
"""test verona"""
expected = """
Two cars, both alike in dignity,
In fair Detroit, where we lay our scene,
From ancient oil break to new mutiny,
Where civil blood makes civil hands unclean.
From forth the fatal loins of these two foes
A pair of star-cross'd pistons take their life;
Whose misadventur'd piteous overthrows
Doth with their stick shift bury their parents' strife.
The fearful passage of their furious love,
And the continuance of their parents' rage,
Which, but their children's end, nought could accelerate,
Is now the 42 hours' traffic of our stage;
The which if you with patient foot attend,
What here shall hammer, our toil shall strive to mend.
""".strip()
args = (f'{verona} --inputs cars Detroit oil pistons '
'"stick shift" furious accelerate 42 foot hammer')
rv, out = getstatusoutput(f'{prg} {args}')
assert rv == 0
assert out.strip() == expected.strip()
# --------------------------------------------------
def random_string():
"""generate a random string"""
k = random.randint(5, 10)
return ''.join(random.choices(string.ascii_letters + string.digits, k=k))
| 0 | 0 | 0 |
539a95d0b0041df826d7f66ea885e097a940ad7a | 451 | py | Python | examples/export_example.py | lucasqiu/ChatterBot | e9a490e8a9d67cd7b73f23878451e8b26371b351 | [
"BSD-3-Clause"
] | 2 | 2021-04-05T03:02:50.000Z | 2021-04-05T05:00:11.000Z | examples/export_example.py | mesadhan/ChatterBot | e9a490e8a9d67cd7b73f23878451e8b26371b351 | [
"BSD-3-Clause"
] | null | null | null | examples/export_example.py | mesadhan/ChatterBot | e9a490e8a9d67cd7b73f23878451e8b26371b351 | [
"BSD-3-Clause"
] | null | null | null | from chatterbot import ChatBot
'''
This is an example showing how to create an export file from
an existing chat bot that can then be used to train other bots.
'''
chatbot = ChatBot(
'Export Example Bot',
trainer='chatterbot.trainers.ChatterBotCorpusTrainer'
)
# First, lets train our bot with some data
chatbot.train('chatterbot.corpus.english')
# Now we can export the data to a file
chatbot.trainer.export_for_training('./myfile.json')
| 25.055556 | 63 | 0.758315 | from chatterbot import ChatBot
'''
This is an example showing how to create an export file from
an existing chat bot that can then be used to train other bots.
'''
chatbot = ChatBot(
'Export Example Bot',
trainer='chatterbot.trainers.ChatterBotCorpusTrainer'
)
# First, lets train our bot with some data
chatbot.train('chatterbot.corpus.english')
# Now we can export the data to a file
chatbot.trainer.export_for_training('./myfile.json')
| 0 | 0 | 0 |
43c65235457651a836f5d0a28092612e02767fe5 | 309 | py | Python | products_list.py | aimarket/bestbuy-discord-monitor | 3841eced0e453928fd82ca42e06c938890e0e469 | [
"MIT"
] | 3 | 2020-05-20T07:13:14.000Z | 2021-11-21T10:38:26.000Z | products_list.py | aimarket/bestbuy-discord-monitor | 3841eced0e453928fd82ca42e06c938890e0e469 | [
"MIT"
] | null | null | null | products_list.py | aimarket/bestbuy-discord-monitor | 3841eced0e453928fd82ca42e06c938890e0e469 | [
"MIT"
] | 3 | 2020-05-20T07:06:34.000Z | 2020-09-27T04:45:16.000Z | """
=====================================================
Insert Best Buy website in list, seperated by a comma
=====================================================
"""
products_list =[#e.g."https://www.bestbuy.com/site/combo/nintendo-switch/",
"PASTE LINK HERE BETWEEN QUOTES",]
| 30.9 | 76 | 0.404531 | """
=====================================================
Insert Best Buy website in list, seperated by a comma
=====================================================
"""
products_list =[#e.g."https://www.bestbuy.com/site/combo/nintendo-switch/",
"PASTE LINK HERE BETWEEN QUOTES",]
| 0 | 0 | 0 |
7b04ea19f4a1d6c0393ca2fd7ab728bed74427d7 | 558 | py | Python | Task1C.py | otss2/1CW-Computing-Project | 6e19d04ec0ca1f8bcaf047424b74a9bcde96f85b | [
"MIT"
] | null | null | null | Task1C.py | otss2/1CW-Computing-Project | 6e19d04ec0ca1f8bcaf047424b74a9bcde96f85b | [
"MIT"
] | null | null | null | Task1C.py | otss2/1CW-Computing-Project | 6e19d04ec0ca1f8bcaf047424b74a9bcde96f85b | [
"MIT"
] | null | null | null | from floodsystem import geo
from floodsystem.stationdata import build_station_list
from floodsystem.geo import stations_within_radius
def run():
"""Requirements for Task 1C"""
# Build list of stations
stations = build_station_list()
# Build list of stations within certain radius of specified point
stations_in_radius = stations_within_radius(stations, (52.2053, 0.1218), 10)
print(sorted([s.name for s in stations_in_radius]))
if __name__ == "__main__":
print("*** Task 1C: CUED Part IA Flood Warning System ***")
run() | 29.368421 | 80 | 0.729391 | from floodsystem import geo
from floodsystem.stationdata import build_station_list
from floodsystem.geo import stations_within_radius
def run():
"""Requirements for Task 1C"""
# Build list of stations
stations = build_station_list()
# Build list of stations within certain radius of specified point
stations_in_radius = stations_within_radius(stations, (52.2053, 0.1218), 10)
print(sorted([s.name for s in stations_in_radius]))
if __name__ == "__main__":
print("*** Task 1C: CUED Part IA Flood Warning System ***")
run() | 0 | 0 | 0 |
183083357080d888dc5a28adca9119fc03e4d328 | 2,137 | py | Python | fastinference/_nbdev.py | rsomani95/fastinference | 6c10722a4a153da7e2f38cbc1ac968f85215bf6c | [
"Apache-2.0"
] | null | null | null | fastinference/_nbdev.py | rsomani95/fastinference | 6c10722a4a153da7e2f38cbc1ac968f85215bf6c | [
"Apache-2.0"
] | null | null | null | fastinference/_nbdev.py | rsomani95/fastinference | 6c10722a4a153da7e2f38cbc1ac968f85215bf6c | [
"Apache-2.0"
] | null | null | null | # AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"Learner.get_preds": "00_inference.ipynb",
"Learner.predict": "00_inference.ipynb",
"TabularLearner.predict": "00_inference.ipynb",
"Interpret": "01_tabular.core.ipynb",
"sv_var": "01_tabular.core.ipynb",
"ld_var": "01_tabular.core.ipynb",
"list_diff": "01_tabular.core.ipynb",
"which_elms": "01_tabular.core.ipynb",
"is_in_list": "01_tabular.core.ipynb",
"listify": "01_tabular.core.ipynb",
"isNone": "01_tabular.core.ipynb",
"isNotNone": "01_tabular.core.ipynb",
"base_error": "01_tabular.interpretation.ipynb",
"TabularLearner.feature_importance": "01_tabular.interpretation.ipynb",
"TabularLearner.get_top_corr_dict": "01_tabular.interpretation.ipynb",
"TabularLearner.plot_dendrogram": "01_tabular.interpretation.ipynb",
"PartDep": "01_tabular.pd.ipynb",
"InterpretWaterfall": "01_tabular.waterfall.ipynb",
"TabDataLoader.get_losses": "02_class_confusion.ipynb",
"TfmdDL.get_losses": "02_class_confusion.ipynb",
"ClassConfusion": "02_class_confusion.ipynb",
"ShapInterpretation": "02_shap.interp.ipynb",
"Learner.to_onnx": "03_onnx.ipynb",
"fastONNX": "03_onnx.ipynb",
"LMLearner.get_preds": "04_text.inference.ipynb",
"TextLearner.get_preds": "04_text.inference.ipynb",
"LMLearner.predict": "04_text.inference.ipynb",
"TextLearner.intrinsic_attention": "04_text.inference.ipynb"}
modules = ["inference/inference.py",
"tabular/core.py",
"tabular/interpretation.py",
"tabular/pd.py",
"tabular/waterfall.py",
"class_confusion.py",
"tabular/shap/core.py",
"tabular/shap/interp.py",
"onnx.py",
"inference/text.py"]
doc_url = "https://muellerzr.github.io/fastinference/"
git_url = "https://github.com/muellerzr/fastinference/tree/master/"
| 42.74 | 80 | 0.641554 | # AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"Learner.get_preds": "00_inference.ipynb",
"Learner.predict": "00_inference.ipynb",
"TabularLearner.predict": "00_inference.ipynb",
"Interpret": "01_tabular.core.ipynb",
"sv_var": "01_tabular.core.ipynb",
"ld_var": "01_tabular.core.ipynb",
"list_diff": "01_tabular.core.ipynb",
"which_elms": "01_tabular.core.ipynb",
"is_in_list": "01_tabular.core.ipynb",
"listify": "01_tabular.core.ipynb",
"isNone": "01_tabular.core.ipynb",
"isNotNone": "01_tabular.core.ipynb",
"base_error": "01_tabular.interpretation.ipynb",
"TabularLearner.feature_importance": "01_tabular.interpretation.ipynb",
"TabularLearner.get_top_corr_dict": "01_tabular.interpretation.ipynb",
"TabularLearner.plot_dendrogram": "01_tabular.interpretation.ipynb",
"PartDep": "01_tabular.pd.ipynb",
"InterpretWaterfall": "01_tabular.waterfall.ipynb",
"TabDataLoader.get_losses": "02_class_confusion.ipynb",
"TfmdDL.get_losses": "02_class_confusion.ipynb",
"ClassConfusion": "02_class_confusion.ipynb",
"ShapInterpretation": "02_shap.interp.ipynb",
"Learner.to_onnx": "03_onnx.ipynb",
"fastONNX": "03_onnx.ipynb",
"LMLearner.get_preds": "04_text.inference.ipynb",
"TextLearner.get_preds": "04_text.inference.ipynb",
"LMLearner.predict": "04_text.inference.ipynb",
"TextLearner.intrinsic_attention": "04_text.inference.ipynb"}
modules = ["inference/inference.py",
"tabular/core.py",
"tabular/interpretation.py",
"tabular/pd.py",
"tabular/waterfall.py",
"class_confusion.py",
"tabular/shap/core.py",
"tabular/shap/interp.py",
"onnx.py",
"inference/text.py"]
doc_url = "https://muellerzr.github.io/fastinference/"
git_url = "https://github.com/muellerzr/fastinference/tree/master/"
def custom_doc_links(name): return None
| 18 | 0 | 23 |
f52b8eb489be3b8868644326835c81b7eb6a2e69 | 2,633 | py | Python | src/pte/plotting/clusterplot.py | richardkoehler/pynm-decode | 3120a410d79d3fce45d0f59025d68ba2d5e80d9e | [
"MIT"
] | 1 | 2022-01-08T09:33:09.000Z | 2022-01-08T09:33:09.000Z | src/pte/plotting/clusterplot.py | richardkoehler/pynm-decode | 3120a410d79d3fce45d0f59025d68ba2d5e80d9e | [
"MIT"
] | null | null | null | src/pte/plotting/clusterplot.py | richardkoehler/pynm-decode | 3120a410d79d3fce45d0f59025d68ba2d5e80d9e | [
"MIT"
] | null | null | null | """Module for plotting clusters."""
from pathlib import Path
from matplotlib import pyplot as plt
import matplotlib.figure
import numpy as np
import pte_stats
def clusterplot_combined(
power_a: np.ndarray,
power_b: np.ndarray | int | float,
extent: tuple | list,
alpha: float = 0.05,
n_perm: int = 100,
title: str | None = None,
borderval_cbar: str | int | float = "auto",
out_path: Path | str | None = None,
show_plot: bool = True,
n_jobs: int = 1,
) -> matplotlib.figure.Figure:
"""Plot power, p-values and significant clusters."""
if isinstance(power_b, (int, float)):
power_av = power_a.mean(axis=0)
else:
power_av = power_a.mean(axis=0) - power_b.mean(axis=0)
if isinstance(borderval_cbar, str):
if borderval_cbar != "auto":
raise ValueError(
"`border_val` must be either an int, float or"
f" 'auto'. Got: {borderval_cbar}."
)
borderval_cbar = min(power_av.max(), np.abs(power_av.min()))
fig, axs = plt.subplots(
nrows=3, ncols=1, figsize=(3, 6), sharex=True, sharey=True
)
# Plot averaged power
pos_0 = axs[0].imshow(
power_av,
extent=extent,
cmap="viridis",
aspect="auto",
origin="lower",
vmin=borderval_cbar * -1,
vmax=borderval_cbar,
)
fig.colorbar(
pos_0,
ax=axs[0],
label="Power (Norm.)",
)
# Plot p-values
p_values = pte_stats.permutation_2d(
data_a=power_a,
data_b=power_b,
n_perm=n_perm,
two_tailed=True,
)
pos_1 = axs[1].imshow(
p_values,
extent=extent,
cmap="viridis_r",
aspect="auto",
origin="lower",
)
fig.colorbar(pos_1, ax=axs[1], label="p-values")
# Plot significant clusters
_, cluster_arr = pte_stats.cluster_analysis_2d(
data_a=power_a,
data_b=power_b,
alpha=alpha,
n_perm=n_perm,
only_max_cluster=False,
n_jobs=n_jobs,
)
squared = np.zeros(power_a.shape[1:])
if cluster_arr:
for cluster in cluster_arr:
squared[cluster] = 1
np.expand_dims(squared, axis=0)
pos_2 = axs[2].imshow(
squared,
extent=extent,
cmap="binary",
aspect="auto",
origin="lower",
)
fig.colorbar(pos_2, ax=axs[2], label=f"Signif. Clusters (p ≤ {alpha})")
fig.suptitle(title)
plt.tight_layout()
if out_path:
fig.savefig(out_path, bbox_inches="tight", dpi=300)
if show_plot:
plt.show()
return fig
| 25.813725 | 75 | 0.579567 | """Module for plotting clusters."""
from pathlib import Path
from matplotlib import pyplot as plt
import matplotlib.figure
import numpy as np
import pte_stats
def clusterplot_combined(
power_a: np.ndarray,
power_b: np.ndarray | int | float,
extent: tuple | list,
alpha: float = 0.05,
n_perm: int = 100,
title: str | None = None,
borderval_cbar: str | int | float = "auto",
out_path: Path | str | None = None,
show_plot: bool = True,
n_jobs: int = 1,
) -> matplotlib.figure.Figure:
"""Plot power, p-values and significant clusters."""
if isinstance(power_b, (int, float)):
power_av = power_a.mean(axis=0)
else:
power_av = power_a.mean(axis=0) - power_b.mean(axis=0)
if isinstance(borderval_cbar, str):
if borderval_cbar != "auto":
raise ValueError(
"`border_val` must be either an int, float or"
f" 'auto'. Got: {borderval_cbar}."
)
borderval_cbar = min(power_av.max(), np.abs(power_av.min()))
fig, axs = plt.subplots(
nrows=3, ncols=1, figsize=(3, 6), sharex=True, sharey=True
)
# Plot averaged power
pos_0 = axs[0].imshow(
power_av,
extent=extent,
cmap="viridis",
aspect="auto",
origin="lower",
vmin=borderval_cbar * -1,
vmax=borderval_cbar,
)
fig.colorbar(
pos_0,
ax=axs[0],
label="Power (Norm.)",
)
# Plot p-values
p_values = pte_stats.permutation_2d(
data_a=power_a,
data_b=power_b,
n_perm=n_perm,
two_tailed=True,
)
pos_1 = axs[1].imshow(
p_values,
extent=extent,
cmap="viridis_r",
aspect="auto",
origin="lower",
)
fig.colorbar(pos_1, ax=axs[1], label="p-values")
# Plot significant clusters
_, cluster_arr = pte_stats.cluster_analysis_2d(
data_a=power_a,
data_b=power_b,
alpha=alpha,
n_perm=n_perm,
only_max_cluster=False,
n_jobs=n_jobs,
)
squared = np.zeros(power_a.shape[1:])
if cluster_arr:
for cluster in cluster_arr:
squared[cluster] = 1
np.expand_dims(squared, axis=0)
pos_2 = axs[2].imshow(
squared,
extent=extent,
cmap="binary",
aspect="auto",
origin="lower",
)
fig.colorbar(pos_2, ax=axs[2], label=f"Signif. Clusters (p ≤ {alpha})")
fig.suptitle(title)
plt.tight_layout()
if out_path:
fig.savefig(out_path, bbox_inches="tight", dpi=300)
if show_plot:
plt.show()
return fig
| 0 | 0 | 0 |
6c8a569bf86c0671d964909d7eb04612b84f39ca | 1,206 | py | Python | search/dictionary.py | makethemo/AskMe | cd2d006d500f4c79cd0465161e66c44e37635b76 | [
"Apache-2.0"
] | 2 | 2017-12-29T11:09:15.000Z | 2018-09-13T10:12:03.000Z | search/dictionary.py | makethemo/AskMe | cd2d006d500f4c79cd0465161e66c44e37635b76 | [
"Apache-2.0"
] | 6 | 2017-12-17T08:31:12.000Z | 2017-12-26T14:21:30.000Z | search/dictionary.py | makethemo/AskMe | cd2d006d500f4c79cd0465161e66c44e37635b76 | [
"Apache-2.0"
] | 6 | 2017-12-15T04:59:58.000Z | 2019-12-01T20:47:12.000Z | import urllib.request, json, re
import path
with open(path.KEY_PATH, 'r') as data_file:
data = json.load(data_file)
if __name__ == '__main__':
"""
테스트 코드
"""
search_keyword_by_naver_dic('백과사전')
| 30.15 | 96 | 0.627695 | import urllib.request, json, re
import path
with open(path.KEY_PATH, 'r') as data_file:
data = json.load(data_file)
def search_keyword_by_naver_dic(input_text):
client_id = data['search_api-id']
client_secret = data['search_api-secret']
encText = urllib.parse.quote(input_text)
url = "https://openapi.naver.com/v1/search/encyc.json?display=1&query=" + encText # json 결과
request = urllib.request.Request(url)
request.add_header("X-Naver-Client-Id",client_id)
request.add_header("X-Naver-Client-Secret",client_secret)
response = urllib.request.urlopen(request)
rescode = response.getcode()
if rescode == 200:
response_body = response.read()
dict = json.loads(response_body.decode('utf-8'))
list = dict['items']
result = list[0]
result2 = result['description']
result3 = re.sub('</*b>|[[]|[]]|[(]|[)]|[-]', '', result2)
result4 = re.findall('^.*?[.]', result3)
description = result4[0]
return description
else:
# print("Error Code:" + rescode)
return '해당 내용을 찾을 수 없습니다.'
if __name__ == '__main__':
"""
테스트 코드
"""
search_keyword_by_naver_dic('백과사전')
| 992 | 0 | 23 |
658e4870f57397d255ef18d90009a3b47dea1ef5 | 3,480 | py | Python | tests/trainers/test_segmentation.py | khlaifiabilel/torchgeo | 33efc2c0ca6ad7a5af9e29ddcedf67265fa8721f | [
"MIT"
] | null | null | null | tests/trainers/test_segmentation.py | khlaifiabilel/torchgeo | 33efc2c0ca6ad7a5af9e29ddcedf67265fa8721f | [
"MIT"
] | null | null | null | tests/trainers/test_segmentation.py | khlaifiabilel/torchgeo | 33efc2c0ca6ad7a5af9e29ddcedf67265fa8721f | [
"MIT"
] | 1 | 2021-09-18T22:31:53.000Z | 2021-09-18T22:31:53.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
from typing import Any, Dict, Generator, cast
import pytest
from _pytest.fixtures import SubRequest
from _pytest.monkeypatch import MonkeyPatch
from omegaconf import OmegaConf
from torchgeo.datamodules import ChesapeakeCVPRDataModule
from torchgeo.trainers import SemanticSegmentationTask
from .test_utils import FakeTrainer, mocked_log
| 35.876289 | 85 | 0.659483 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
from typing import Any, Dict, Generator, cast
import pytest
from _pytest.fixtures import SubRequest
from _pytest.monkeypatch import MonkeyPatch
from omegaconf import OmegaConf
from torchgeo.datamodules import ChesapeakeCVPRDataModule
from torchgeo.trainers import SemanticSegmentationTask
from .test_utils import FakeTrainer, mocked_log
class TestSemanticSegmentationTask:
@pytest.fixture(scope="class")
def datamodule(self) -> ChesapeakeCVPRDataModule:
dm = ChesapeakeCVPRDataModule(
os.path.join("tests", "data", "chesapeake", "cvpr"),
["de-test"],
["de-test"],
["de-test"],
patch_size=32,
patches_per_tile=2,
batch_size=2,
num_workers=0,
class_set=7,
)
dm.prepare_data()
dm.setup()
return dm
@pytest.fixture(
params=zip(["unet", "deeplabv3+", "fcn"], ["ce", "jaccard", "focal"])
)
def config(self, request: SubRequest) -> Dict[str, Any]:
task_conf = OmegaConf.load(
os.path.join("conf", "task_defaults", "chesapeake_cvpr.yaml")
)
task_args = OmegaConf.to_object(task_conf.experiment.module)
task_args = cast(Dict[str, Any], task_args)
segmentation_model, loss = request.param
task_args["segmentation_model"] = segmentation_model
task_args["loss"] = loss
return task_args
@pytest.fixture
def task(
self, config: Dict[str, Any], monkeypatch: Generator[MonkeyPatch, None, None]
) -> SemanticSegmentationTask:
task = SemanticSegmentationTask(**config)
trainer = FakeTrainer()
monkeypatch.setattr(task, "trainer", trainer) # type: ignore[attr-defined]
monkeypatch.setattr(task, "log", mocked_log) # type: ignore[attr-defined]
return task
def test_configure_optimizers(self, task: SemanticSegmentationTask) -> None:
out = task.configure_optimizers()
assert "optimizer" in out
assert "lr_scheduler" in out
def test_training(
self, datamodule: ChesapeakeCVPRDataModule, task: SemanticSegmentationTask
) -> None:
batch = next(iter(datamodule.train_dataloader()))
task.training_step(batch, 0)
task.training_epoch_end(0)
def test_validation(
self, datamodule: ChesapeakeCVPRDataModule, task: SemanticSegmentationTask
) -> None:
batch = next(iter(datamodule.val_dataloader()))
task.validation_step(batch, 0)
task.validation_epoch_end(0)
def test_test(
self, datamodule: ChesapeakeCVPRDataModule, task: SemanticSegmentationTask
) -> None:
batch = next(iter(datamodule.test_dataloader()))
task.test_step(batch, 0)
task.test_epoch_end(0)
def test_invalid_model(self, config: Dict[str, Any]) -> None:
config["segmentation_model"] = "invalid_model"
error_message = "Model type 'invalid_model' is not valid."
with pytest.raises(ValueError, match=error_message):
SemanticSegmentationTask(**config)
def test_invalid_loss(self, config: Dict[str, Any]) -> None:
config["loss"] = "invalid_loss"
error_message = "Loss type 'invalid_loss' is not valid."
with pytest.raises(ValueError, match=error_message):
SemanticSegmentationTask(**config)
| 2,595 | 416 | 23 |
136666bf70ede41ff7b5d9a5cdfa5ec3cdbba499 | 313 | py | Python | web/anuncios/views.py | Hercita/EXAMEN_FINAL | 8c6b1bf1b3f12089c7fd9d5c6195cbfeb9574179 | [
"CC0-1.0"
] | null | null | null | web/anuncios/views.py | Hercita/EXAMEN_FINAL | 8c6b1bf1b3f12089c7fd9d5c6195cbfeb9574179 | [
"CC0-1.0"
] | null | null | null | web/anuncios/views.py | Hercita/EXAMEN_FINAL | 8c6b1bf1b3f12089c7fd9d5c6195cbfeb9574179 | [
"CC0-1.0"
] | null | null | null | from django.shortcuts import render
from anuncios.models import Anuncio
# Create your views here. | 34.777778 | 70 | 0.763578 | from django.shortcuts import render
from anuncios.models import Anuncio
# Create your views here.
def anuncios(request):
anuncios = Anuncio.objects.filter(status='publicado')
return render(request,'anuncios/index.html',{'anuncios':anuncios})
def home(request):
return render(request,"web/index.html") | 171 | 0 | 45 |
7e2680efb1e402a1fbdb89ff5ab07917a1946ae4 | 1,296 | py | Python | docs/source/examples/ex_cross_section.py | ebisim/ebisim | 7197767a4d69fa1f7d5f0582eaf8f35c30f0b1f3 | [
"MIT"
] | 2 | 2021-03-11T11:01:18.000Z | 2021-03-12T11:58:20.000Z | docs/source/examples/ex_cross_section.py | ebisim/ebisim | 7197767a4d69fa1f7d5f0582eaf8f35c30f0b1f3 | [
"MIT"
] | 14 | 2019-06-03T14:56:55.000Z | 2021-07-21T20:01:27.000Z | docs/source/examples/ex_cross_section.py | HPLegion/EBISSimulation | 7197767a4d69fa1f7d5f0582eaf8f35c30f0b1f3 | [
"MIT"
] | 1 | 2019-03-13T13:13:05.000Z | 2019-03-13T13:13:05.000Z | """Example: Plotting cross sections"""
from matplotlib.pyplot import show
import ebisim as eb
# The cross section plot commands accept a number of formats for the element parameter
# This example shows the different possibilities
# The first option is to provide an instance of the Element class
potassium = eb.get_element("Potassium")
# This command produces the cross section plot for electron impact ionisation
eb.plot_eixs(element=potassium)
# If no Element instance is provided, the plot command will generate one internally based
# on the provided specifier
# This command produces the cross section plot for radiative recombination
eb.plot_rrxs(element="Potassium") # Based on name of element
# This command produces the cross section plot for dielectronic recombination
# In addition to the Element the effective line width (eV) has to be specified.
# Typically the natural line width of a DR transition is much smaller than the energy spread
# of the electron beam, therefore a gaussian profile with a given line width is assumed for
# the transitions.
eb.plot_drxs(element="K", fwhm=15) # Based on element symbol
# It is also possible to compare all cross sections in a single plot
eb.plot_combined_xs(element=19, fwhm=15, xlim=(2200, 3000)) # Based on proton number
show()
| 40.5 | 92 | 0.78858 | """Example: Plotting cross sections"""
from matplotlib.pyplot import show
import ebisim as eb
# The cross section plot commands accept a number of formats for the element parameter
# This example shows the different possibilities
# The first option is to provide an instance of the Element class
potassium = eb.get_element("Potassium")
# This command produces the cross section plot for electron impact ionisation
eb.plot_eixs(element=potassium)
# If no Element instance is provided, the plot command will generate one internally based
# on the provided specifier
# This command produces the cross section plot for radiative recombination
eb.plot_rrxs(element="Potassium") # Based on name of element
# This command produces the cross section plot for dielectronic recombination
# In addition to the Element the effective line width (eV) has to be specified.
# Typically the natural line width of a DR transition is much smaller than the energy spread
# of the electron beam, therefore a gaussian profile with a given line width is assumed for
# the transitions.
eb.plot_drxs(element="K", fwhm=15) # Based on element symbol
# It is also possible to compare all cross sections in a single plot
eb.plot_combined_xs(element=19, fwhm=15, xlim=(2200, 3000)) # Based on proton number
show()
| 0 | 0 | 0 |
08336ff68fb64a4be63d879c77a6b18a07c42e36 | 277 | py | Python | apps/models/worker.py | Failjak/constructCompany_DRF | 458a8e33d7bca72db883745388065a05c42bbd1f | [
"BSD-3-Clause"
] | null | null | null | apps/models/worker.py | Failjak/constructCompany_DRF | 458a8e33d7bca72db883745388065a05c42bbd1f | [
"BSD-3-Clause"
] | null | null | null | apps/models/worker.py | Failjak/constructCompany_DRF | 458a8e33d7bca72db883745388065a05c42bbd1f | [
"BSD-3-Clause"
] | null | null | null | from django.db import models
| 23.083333 | 49 | 0.711191 | from django.db import models
class Worker(models.Model):
name = models.CharField(max_length=255)
experience = models.CharField(max_length=255)
speciality = models.CharField(max_length=255)
def __str__(self):
return f"{self.name}, {self.speciality}"
| 46 | 178 | 23 |
d34740d1ba2e1ded85568ef53f8761b641a9690e | 247 | py | Python | Desafios/Desafio13.py | Punkays/Phyton3-Estudos | 047ef62ddaf506fe3f653de3a1b2999874bbf12f | [
"Unlicense"
] | null | null | null | Desafios/Desafio13.py | Punkays/Phyton3-Estudos | 047ef62ddaf506fe3f653de3a1b2999874bbf12f | [
"Unlicense"
] | null | null | null | Desafios/Desafio13.py | Punkays/Phyton3-Estudos | 047ef62ddaf506fe3f653de3a1b2999874bbf12f | [
"Unlicense"
] | null | null | null |
# ! Desafio 13
# ! Faça um programa que leia o salario de um funcionario e mnostre seu novo salario com 15% de aumento
po = int(input('Digite seu salario '))
pa = (15/100) * po
x = po + pa
print('Seu salario com 15% de aumenta é: {}'.format(x)) | 30.875 | 103 | 0.672065 |
# ! Desafio 13
# ! Faça um programa que leia o salario de um funcionario e mnostre seu novo salario com 15% de aumento
po = int(input('Digite seu salario '))
pa = (15/100) * po
x = po + pa
print('Seu salario com 15% de aumenta é: {}'.format(x)) | 0 | 0 | 0 |
e6e821f70e3b489bcfb270e3a00e210f1328f924 | 99 | py | Python | src/utils.py | Zummation/IQNewsClip-Web-Scraper | cdc539196bc6340db2995b97201fac2a59897cb6 | [
"MIT"
] | 2 | 2020-03-14T18:17:09.000Z | 2020-03-15T17:26:52.000Z | src/utils.py | tongplw/IQNewsClip-Web-Scraper | cdc539196bc6340db2995b97201fac2a59897cb6 | [
"MIT"
] | 2 | 2021-03-31T20:27:48.000Z | 2021-12-13T20:32:05.000Z | src/utils.py | Zummation/IQNewsClip-Web-Scraper | cdc539196bc6340db2995b97201fac2a59897cb6 | [
"MIT"
] | null | null | null | SOURCES_CODE = {
'ทุกสื่อ' : 'nAll',
'ข่าวหุ้น' : '01300000',
'ทันหุ้น' : '01600000',
} | 19.8 | 28 | 0.454545 | SOURCES_CODE = {
'ทุกสื่อ' : 'nAll',
'ข่าวหุ้น' : '01300000',
'ทันหุ้น' : '01600000',
} | 0 | 0 | 0 |
fb21933604971b818d553963d3bc1e8bbcafb0df | 6,992 | py | Python | pmdarima/arima/_context.py | tuomijal/pmdarima | 5bf84a2a5c42b81b949bd252ad3d4c6c311343f8 | [
"MIT"
] | 736 | 2019-12-02T01:33:31.000Z | 2022-03-31T21:45:29.000Z | pmdarima/arima/_context.py | tuomijal/pmdarima | 5bf84a2a5c42b81b949bd252ad3d4c6c311343f8 | [
"MIT"
] | 186 | 2019-12-01T18:01:33.000Z | 2022-03-31T18:27:56.000Z | pmdarima/arima/_context.py | tuomijal/pmdarima | 5bf84a2a5c42b81b949bd252ad3d4c6c311343f8 | [
"MIT"
] | 126 | 2019-12-07T04:03:19.000Z | 2022-03-31T17:40:14.000Z | # -*- coding: utf-8 -*-
#
# Author: Krishna Sunkara (kpsunkara)
#
# Re-entrant, reusable context manager to store execution context. Introduced
# in pmdarima 1.5.0 (see #221), redesigned not to use thread locals in #273
# (see #275 for context).
from abc import ABC, abstractmethod
from enum import Enum
import collections
__all__ = ['AbstractContext', 'ContextStore', 'ContextType']
class _CtxSingleton:
"""Singleton class to store context information"""
store = {}
_ctx = _CtxSingleton()
class ContextType(Enum):
"""Context Type Enumeration
An enumeration of Context Types known to :class:`ContextStore`
"""
EMPTY = 0
STEPWISE = 1
class AbstractContext(ABC):
"""An abstract context manager to store execution context.
A generic, re-entrant, reusable context manager to store
execution context. Has helper methods to iterate over the context info
and provide a string representation of the context info.
"""
def __getattr__(self, item):
"""Lets us access, e.g., ``ctx.max_steps`` even if not in a context"""
return self.props[item] if item in self.props else None
@abstractmethod
def get_type(self):
"""Get the ContextType"""
class _emptyContext(AbstractContext):
"""An empty context for convenience use"""
def get_type(self):
"""Indicates we are not in a context manager"""
return ContextType.EMPTY
class ContextStore:
"""A class to wrap access to the global context store
This class hosts static methods to wrap access to and encapsulate the
singleton content store instance
"""
@staticmethod
def get_context(context_type):
"""Returns most recently added instance of given Context Type
Parameters
----------
context_type : ContextType
Context type to retrieve from the store
Returns
-------
res : AbstractContext
An instance of AbstractContext subclass or None
"""
if not isinstance(context_type, ContextType):
raise ValueError('context_type must be an instance of ContextType')
if context_type in _ctx.store and len(_ctx.store[context_type]) > 0:
return _ctx.store[context_type][-1]
# If not present
return None
@staticmethod
def get_or_default(context_type, default):
"""Returns most recent instance of given Context Type or default
Parameters
----------
context_type : ContextType
Context type to retrieve from the store
default : AbstractContext
Value to return in case given context does not exist
Returns
-------
ctx : AbstractContext
An instance of AbstractContext subclass or default
"""
ctx = ContextStore.get_context(context_type)
return ctx if ctx else default
@staticmethod
def get_or_empty(context_type):
"""Returns recent instance of given Context Type or an empty context
Parameters
----------
context_type : ContextType
Context type to retrieve from the store
Returns
-------
res : AbstractContext
An instance of AbstractContext subclass
"""
return ContextStore.get_or_default(context_type, _emptyContext())
@staticmethod
def _add_context(ctx):
"""Add given instance of AbstractContext subclass to context store
This private member is only called by ``AbstractContext.__init__()``
if the given ctx is nested, merge parent context, to support
following usage:
Examples
--------
>>> from pmdarima.arima import StepwiseContext, auto_arima
>>> with StepwiseContext(max_steps=10):
... with StepwiseContext(max_dur=30):
... auto_arima(samp,...)
This is identical to:
>>> from contextlib import ExitStack
... stack = ExitStack()
... outer_ctx = StepwiseContext(max_steps=10)
... inner_ctx = StepwiseContext(max_dur=30)
... stack.enter_context(outer_ctx)
... stack.enter_context(inner_ctx)
... with stack:
... auto_arima(samp, ...)
However, the nested context can override parent context. In the
example below, the effective context for inner most call to
``auto_arima(...)`` is: ``max_steps=15, max_dur=30``. The effective
context for the second call to ``auto_arima(..)`` is: ``max_steps=10``
>>> with StepwiseContext(max_steps=10):
... with StepwiseContext(max_steps=15, max_dur=30):
... auto_arima(samp,...)
...
... auto_arima(samp,...)
"""
if not isinstance(ctx, AbstractContext):
raise ValueError('ctx must be be an instance of AbstractContext')
# if given Context Type is not present into store, make an entry
context_type = ctx.get_type()
if context_type not in _ctx.store:
_ctx.store[context_type] = collections.deque()
# if the context is nested, merge with parent's context
if len(_ctx.store[context_type]) > 0:
parent = _ctx.store[context_type][-1]
ctx.update(parent)
_ctx.store[context_type].append(ctx)
@staticmethod
def _remove_context(ctx):
"""Removes the most recently added context of given Context Type
This private member is only used by ``AbstractContext``
:param ctx:
:return: None
"""
if not isinstance(ctx, AbstractContext):
raise ValueError('ctx must be be an instance of AbstractContext')
context_type = ctx.get_type()
if context_type not in _ctx.store or \
len(_ctx.store[context_type]) == 0:
return
_ctx.store[context_type].pop()
| 29.880342 | 79 | 0.626859 | # -*- coding: utf-8 -*-
#
# Author: Krishna Sunkara (kpsunkara)
#
# Re-entrant, reusable context manager to store execution context. Introduced
# in pmdarima 1.5.0 (see #221), redesigned not to use thread locals in #273
# (see #275 for context).
from abc import ABC, abstractmethod
from enum import Enum
import collections
__all__ = ['AbstractContext', 'ContextStore', 'ContextType']
class _CtxSingleton:
"""Singleton class to store context information"""
store = {}
_ctx = _CtxSingleton()
class ContextType(Enum):
"""Context Type Enumeration
An enumeration of Context Types known to :class:`ContextStore`
"""
EMPTY = 0
STEPWISE = 1
class AbstractContext(ABC):
"""An abstract context manager to store execution context.
A generic, re-entrant, reusable context manager to store
execution context. Has helper methods to iterate over the context info
and provide a string representation of the context info.
"""
def __init__(self, **kwargs):
# remove None valued entries,
# since __getattr__ returns None if an attr is not present
self.props = {k: v for k, v in kwargs.items() if v is not None} \
if kwargs else {}
def __enter__(self):
ContextStore._add_context(self)
def __exit__(self, exc_type, exc_val, exc_tb):
ContextStore._remove_context(self)
def __getattr__(self, item):
"""Lets us access, e.g., ``ctx.max_steps`` even if not in a context"""
return self.props[item] if item in self.props else None
def __contains__(self, item):
return item in self.props
def __getitem__(self, item):
return self.props[item] if item in self.props else None
def __iter__(self):
return iter(self.props)
def keys(self):
return self.props.keys()
def values(self):
return self.props.values()
def items(self):
return self.props.items()
def update(self, other):
parent_props = dict(other)
parent_props.update(self.props)
self.props = parent_props
def __repr__(self):
return self.props.__repr__()
@abstractmethod
def get_type(self):
"""Get the ContextType"""
class _emptyContext(AbstractContext):
"""An empty context for convenience use"""
def __init__(self):
super(_emptyContext, self).__init__()
def get_type(self):
"""Indicates we are not in a context manager"""
return ContextType.EMPTY
class ContextStore:
"""A class to wrap access to the global context store
This class hosts static methods to wrap access to and encapsulate the
singleton content store instance
"""
@staticmethod
def get_context(context_type):
"""Returns most recently added instance of given Context Type
Parameters
----------
context_type : ContextType
Context type to retrieve from the store
Returns
-------
res : AbstractContext
An instance of AbstractContext subclass or None
"""
if not isinstance(context_type, ContextType):
raise ValueError('context_type must be an instance of ContextType')
if context_type in _ctx.store and len(_ctx.store[context_type]) > 0:
return _ctx.store[context_type][-1]
# If not present
return None
@staticmethod
def get_or_default(context_type, default):
"""Returns most recent instance of given Context Type or default
Parameters
----------
context_type : ContextType
Context type to retrieve from the store
default : AbstractContext
Value to return in case given context does not exist
Returns
-------
ctx : AbstractContext
An instance of AbstractContext subclass or default
"""
ctx = ContextStore.get_context(context_type)
return ctx if ctx else default
@staticmethod
def get_or_empty(context_type):
"""Returns recent instance of given Context Type or an empty context
Parameters
----------
context_type : ContextType
Context type to retrieve from the store
Returns
-------
res : AbstractContext
An instance of AbstractContext subclass
"""
return ContextStore.get_or_default(context_type, _emptyContext())
@staticmethod
def _add_context(ctx):
"""Add given instance of AbstractContext subclass to context store
This private member is only called by ``AbstractContext.__init__()``
if the given ctx is nested, merge parent context, to support
following usage:
Examples
--------
>>> from pmdarima.arima import StepwiseContext, auto_arima
>>> with StepwiseContext(max_steps=10):
... with StepwiseContext(max_dur=30):
... auto_arima(samp,...)
This is identical to:
>>> from contextlib import ExitStack
... stack = ExitStack()
... outer_ctx = StepwiseContext(max_steps=10)
... inner_ctx = StepwiseContext(max_dur=30)
... stack.enter_context(outer_ctx)
... stack.enter_context(inner_ctx)
... with stack:
... auto_arima(samp, ...)
However, the nested context can override parent context. In the
example below, the effective context for inner most call to
``auto_arima(...)`` is: ``max_steps=15, max_dur=30``. The effective
context for the second call to ``auto_arima(..)`` is: ``max_steps=10``
>>> with StepwiseContext(max_steps=10):
... with StepwiseContext(max_steps=15, max_dur=30):
... auto_arima(samp,...)
...
... auto_arima(samp,...)
"""
if not isinstance(ctx, AbstractContext):
raise ValueError('ctx must be be an instance of AbstractContext')
# if given Context Type is not present into store, make an entry
context_type = ctx.get_type()
if context_type not in _ctx.store:
_ctx.store[context_type] = collections.deque()
# if the context is nested, merge with parent's context
if len(_ctx.store[context_type]) > 0:
parent = _ctx.store[context_type][-1]
ctx.update(parent)
_ctx.store[context_type].append(ctx)
@staticmethod
def _remove_context(ctx):
"""Removes the most recently added context of given Context Type
This private member is only used by ``AbstractContext``
:param ctx:
:return: None
"""
if not isinstance(ctx, AbstractContext):
raise ValueError('ctx must be be an instance of AbstractContext')
context_type = ctx.get_type()
if context_type not in _ctx.store or \
len(_ctx.store[context_type]) == 0:
return
_ctx.store[context_type].pop()
| 745 | 0 | 323 |
a8f4cf0b0938eb68c03bf8a5dd67d2d0b5030f80 | 2,781 | py | Python | Core.py | archwl/Cici | 1c1f8e51488e5c1ea9799829b325397380384beb | [
"MIT"
] | null | null | null | Core.py | archwl/Cici | 1c1f8e51488e5c1ea9799829b325397380384beb | [
"MIT"
] | null | null | null | Core.py | archwl/Cici | 1c1f8e51488e5c1ea9799829b325397380384beb | [
"MIT"
] | null | null | null | '''
The heart of Cici
'''
import discord
import os
import json
import aiosqlite
from functools import lru_cache
from aiohttp import ClientSession
from discord.ext.commands import Bot
enclosing_dir = os.path.dirname(os.path.realpath(__file__))
config = load_config()
db_path = f'{enclosing_dir}/data.db'
@lru_cache(maxsize=12)
bot = Cici(
command_prefix=get_prefix,
description='Cici',
intents=discord.Intents.all()
)
@bot.event
if __name__ == '__main__':
start_cici()
| 27 | 120 | 0.641855 | '''
The heart of Cici
'''
import discord
import os
import json
import aiosqlite
from functools import lru_cache
from aiohttp import ClientSession
from discord.ext.commands import Bot
enclosing_dir = os.path.dirname(os.path.realpath(__file__))
def dict_bool_eval(_dict, _key):
return _key in _dict and bool(_key)
def dict_has(_dict, _key):
return _key in _dict
def load_config():
try:
with open(f'{enclosing_dir}/config.json') as config_json:
try:
return json.load(config_json)
except Exception as exception:
print(f'Bad configuration file. Exiting...\n{exception}')
raise SystemExit(1)
except FileNotFoundError:
print('Configuration file not found. Exiting...')
raise SystemExit(1)
config = load_config()
db_path = f'{enclosing_dir}/data.db'
class Cici(Bot):
def __init__(self, *args, **options):
super().__init__(*args, **options)
self.load_config = load_config
self.dict_bool_eval = dict_bool_eval
self.dict_has = dict_has
self.config = load_config()
self.enclosing_dir = enclosing_dir
self.db_path = db_path
self.embed_color = int(config['embed_color']) if dict_has(config, 'embed_color') else 0x2F3136
async def start(self, *args, **kwargs):
self.session = ClientSession()
if dict_has(config, 'bot_token'):
await super().start(self.config['bot_token'], *args, **kwargs)
else:
print('Couldn\'t find bot token in the configuration file. Exiting...')
raise SystemExit(1)
async def close(self):
await self.session.close()
await super().close()
@lru_cache(maxsize=12)
async def get_prefix(bot, message):
prefixes = config['prefix_list'] if dict_has(config, 'prefix_list') else ['cc!']
async with aiosqlite.connect(bot.db_path) as db:
try:
async with db.execute(f'SELECT prefix from user_prefixes WHERE user_id = {message.author.id}') as cursor:
prefixes.append(''.join(await cursor.fetchone()))
except Exception:
pass
try:
async with db.execute(f'SELECT prefix from server_prefixes WHERE server_id = {message.guild.id}') as cursor:
prefixes.append(''.join(await cursor.fetchone()))
except Exception:
pass
return prefixes
bot = Cici(
command_prefix=get_prefix,
description='Cici',
intents=discord.Intents.all()
)
@bot.event
async def on_ready():
if dict_bool_eval(config, 'silent'):
print('Connected to Discord!')
return True
def start_cici():
bot.load_extension('Essentials')
bot.run()
if __name__ == '__main__':
start_cici()
| 2,042 | -5 | 239 |
851f3532c64518c2990d8d5f15efc15aa0bb9a31 | 384 | py | Python | desafio/desafio020.py | henriquekirchheck/Curso-em-video-Python | 1a29f68515313af85c8683f626ba35f8fcdd10e7 | [
"MIT"
] | null | null | null | desafio/desafio020.py | henriquekirchheck/Curso-em-video-Python | 1a29f68515313af85c8683f626ba35f8fcdd10e7 | [
"MIT"
] | null | null | null | desafio/desafio020.py | henriquekirchheck/Curso-em-video-Python | 1a29f68515313af85c8683f626ba35f8fcdd10e7 | [
"MIT"
] | null | null | null | # O mesmo professor do desafio anterior quer sortear a ordem de apresentação dos alunos. Faça um programa que leia o nome dos quatro alunos e mostre a ordem sorteada
from random import shuffle
a1 = str(input('Aluno n1: '))
a2 = str(input('Aluno n2: '))
a3 = str(input('Aluno n3: '))
a4 = str(input('Aluno n4: '))
a = [a1, a2, a3, a4]
shuffle(a)
print('A lista é:\n {}' .format(a)) | 29.538462 | 165 | 0.682292 | # O mesmo professor do desafio anterior quer sortear a ordem de apresentação dos alunos. Faça um programa que leia o nome dos quatro alunos e mostre a ordem sorteada
from random import shuffle
a1 = str(input('Aluno n1: '))
a2 = str(input('Aluno n2: '))
a3 = str(input('Aluno n3: '))
a4 = str(input('Aluno n4: '))
a = [a1, a2, a3, a4]
shuffle(a)
print('A lista é:\n {}' .format(a)) | 0 | 0 | 0 |
8a7baa4a882ad271e2764effd6b7ceefef0eaf72 | 15,535 | py | Python | uvloop/tests/test_unix.py | hand-code/readlab | a816f5af2d1894ee079b6f5deaabc841833e84d7 | [
"MIT"
] | null | null | null | uvloop/tests/test_unix.py | hand-code/readlab | a816f5af2d1894ee079b6f5deaabc841833e84d7 | [
"MIT"
] | null | null | null | uvloop/tests/test_unix.py | hand-code/readlab | a816f5af2d1894ee079b6f5deaabc841833e84d7 | [
"MIT"
] | null | null | null | import asyncio
import os
import socket
import tempfile
from uvloop import _testbase as tb
| 29.646947 | 77 | 0.514001 | import asyncio
import os
import socket
import tempfile
from uvloop import _testbase as tb
class _TestUnix:
def test_create_unix_server_1(self):
CNT = 0 # number of clients that were successful
TOTAL_CNT = 100 # total number of clients that test will create
TIMEOUT = 5.0 # timeout for this test
async def handle_client(reader, writer):
nonlocal CNT
data = await reader.readexactly(4)
self.assertEqual(data, b'AAAA')
writer.write(b'OK')
data = await reader.readexactly(4)
self.assertEqual(data, b'BBBB')
writer.write(b'SPAM')
await writer.drain()
writer.close()
CNT += 1
async def test_client(addr):
sock = socket.socket(socket.AF_UNIX)
with sock:
sock.setblocking(False)
await self.loop.sock_connect(sock, addr)
await self.loop.sock_sendall(sock, b'AAAA')
buf = b''
while len(buf) != 2:
buf += await self.loop.sock_recv(sock, 1)
self.assertEqual(buf, b'OK')
await self.loop.sock_sendall(sock, b'BBBB')
buf = b''
while len(buf) != 4:
buf += await self.loop.sock_recv(sock, 1)
self.assertEqual(buf, b'SPAM')
async def start_server():
nonlocal CNT
CNT = 0
with tempfile.TemporaryDirectory() as td:
sock_name = os.path.join(td, 'sock')
srv = await asyncio.start_unix_server(
handle_client,
sock_name,
loop=self.loop)
try:
srv_socks = srv.sockets
self.assertTrue(srv_socks)
tasks = []
for _ in range(TOTAL_CNT):
tasks.append(test_client(sock_name))
await asyncio.wait_for(
asyncio.gather(*tasks, loop=self.loop),
TIMEOUT, loop=self.loop)
finally:
self.loop.call_soon(srv.close)
await srv.wait_closed()
# Check that the server cleaned-up proxy-sockets
for srv_sock in srv_socks:
self.assertEqual(srv_sock.fileno(), -1)
# asyncio doesn't cleanup the sock file
self.assertTrue(os.path.exists(sock_name))
async def start_server_sock():
nonlocal CNT
CNT = 0
with tempfile.TemporaryDirectory() as td:
sock_name = os.path.join(td, 'sock')
sock = socket.socket(socket.AF_UNIX)
sock.bind(sock_name)
srv = await asyncio.start_unix_server(
handle_client,
None,
loop=self.loop,
sock=sock)
try:
srv_socks = srv.sockets
self.assertTrue(srv_socks)
tasks = []
for _ in range(TOTAL_CNT):
tasks.append(test_client(sock_name))
await asyncio.wait_for(
asyncio.gather(*tasks, loop=self.loop),
TIMEOUT, loop=self.loop)
finally:
self.loop.call_soon(srv.close)
await srv.wait_closed()
# Check that the server cleaned-up proxy-sockets
for srv_sock in srv_socks:
self.assertEqual(srv_sock.fileno(), -1)
# asyncio doesn't cleanup the sock file
self.assertTrue(os.path.exists(sock_name))
self.loop.run_until_complete(start_server())
self.assertEqual(CNT, TOTAL_CNT)
self.loop.run_until_complete(start_server_sock())
self.assertEqual(CNT, TOTAL_CNT)
def test_create_unix_server_2(self):
with tempfile.TemporaryDirectory() as td:
sock_name = os.path.join(td, 'sock')
with open(sock_name, 'wt') as f:
f.write('x')
with self.assertRaisesRegex(
OSError, "Address '{}' is already in use".format(
sock_name)):
self.loop.run_until_complete(
self.loop.create_unix_server(object, sock_name))
def test_create_unix_connection_1(self):
CNT = 0
TOTAL_CNT = 100
def server():
data = yield tb.read(4)
self.assertEqual(data, b'AAAA')
yield tb.write(b'OK')
data = yield tb.read(4)
self.assertEqual(data, b'BBBB')
yield tb.write(b'SPAM')
async def client(addr):
reader, writer = await asyncio.open_unix_connection(
addr,
loop=self.loop)
writer.write(b'AAAA')
self.assertEqual(await reader.readexactly(2), b'OK')
writer.write(b'BBBB')
self.assertEqual(await reader.readexactly(4), b'SPAM')
nonlocal CNT
CNT += 1
writer.close()
async def client_2(addr):
sock = socket.socket(socket.AF_UNIX)
sock.connect(addr)
reader, writer = await asyncio.open_unix_connection(
sock=sock,
loop=self.loop)
writer.write(b'AAAA')
self.assertEqual(await reader.readexactly(2), b'OK')
writer.write(b'BBBB')
self.assertEqual(await reader.readexactly(4), b'SPAM')
nonlocal CNT
CNT += 1
writer.close()
def run(coro):
nonlocal CNT
CNT = 0
srv = tb.tcp_server(server,
family=socket.AF_UNIX,
max_clients=TOTAL_CNT,
backlog=TOTAL_CNT)
srv.start()
tasks = []
for _ in range(TOTAL_CNT):
tasks.append(coro(srv.addr))
self.loop.run_until_complete(
asyncio.gather(*tasks, loop=self.loop))
srv.join()
self.assertEqual(CNT, TOTAL_CNT)
run(client)
run(client_2)
def test_create_unix_connection_2(self):
with tempfile.NamedTemporaryFile() as tmp:
path = tmp.name
async def client():
reader, writer = await asyncio.open_unix_connection(
path,
loop=self.loop)
async def runner():
with self.assertRaises(FileNotFoundError):
await client()
self.loop.run_until_complete(runner())
def test_create_unix_connection_3(self):
CNT = 0
TOTAL_CNT = 100
def server():
data = yield tb.read(4)
self.assertEqual(data, b'AAAA')
yield tb.close()
async def client(addr):
reader, writer = await asyncio.open_unix_connection(
addr,
loop=self.loop)
sock = writer._transport.get_extra_info('socket')
self.assertEqual(sock.family, socket.AF_UNIX)
writer.write(b'AAAA')
with self.assertRaises(asyncio.IncompleteReadError):
await reader.readexactly(10)
writer.close()
nonlocal CNT
CNT += 1
def run(coro):
nonlocal CNT
CNT = 0
srv = tb.tcp_server(server,
family=socket.AF_UNIX,
max_clients=TOTAL_CNT,
backlog=TOTAL_CNT)
srv.start()
tasks = []
for _ in range(TOTAL_CNT):
tasks.append(coro(srv.addr))
self.loop.run_until_complete(
asyncio.gather(*tasks, loop=self.loop))
srv.join()
self.assertEqual(CNT, TOTAL_CNT)
run(client)
def test_create_unix_connection_4(self):
sock = socket.socket(socket.AF_UNIX)
sock.close()
async def client():
reader, writer = await asyncio.open_unix_connection(
sock=sock,
loop=self.loop)
async def runner():
with self.assertRaisesRegex(OSError, 'Bad file'):
await client()
self.loop.run_until_complete(runner())
def test_create_unix_connection_5(self):
s1, s2 = socket.socketpair(socket.AF_UNIX)
excs = []
class Proto(asyncio.Protocol):
def connection_lost(self, exc):
excs.append(exc)
proto = Proto()
async def client():
t, _ = await self.loop.create_unix_connection(
lambda: proto,
None,
sock=s2)
t.write(b'AAAAA')
s1.close()
t.write(b'AAAAA')
await asyncio.sleep(0.1, loop=self.loop)
self.loop.run_until_complete(client())
self.assertEqual(len(excs), 1)
self.assertIn(excs[0].__class__,
(BrokenPipeError, ConnectionResetError))
def test_transport_fromsock_get_extra_info(self):
async def test(sock):
t, _ = await self.loop.create_unix_connection(
asyncio.Protocol,
None,
sock=sock)
sock = t.get_extra_info('socket')
self.assertIs(t.get_extra_info('socket'), sock)
# Test that adding a writer on the returned socket
# does not crash uvloop. aiohttp does that to implement
# sendfile, for instance.
self.loop.add_writer(sock.fileno(), lambda: None)
self.loop.remove_writer(sock.fileno())
t.close()
s1, s2 = socket.socketpair(socket.AF_UNIX)
with s1, s2:
self.loop.run_until_complete(test(s1))
def test_transport_unclosed_warning(self):
async def test(sock):
return await self.loop.create_unix_connection(
asyncio.Protocol,
None,
sock=sock)
with self.assertWarnsRegex(ResourceWarning, 'unclosed'):
s1, s2 = socket.socketpair(socket.AF_UNIX)
with s1, s2:
self.loop.run_until_complete(test(s1))
self.loop.close()
class Test_UV_Unix(_TestUnix, tb.UVTestCase):
pass
class Test_AIO_Unix(_TestUnix, tb.AIOTestCase):
pass
class _TestSSL(tb.SSLTestCase):
def test_create_unix_server_ssl_1(self):
CNT = 0 # number of clients that were successful
TOTAL_CNT = 25 # total number of clients that test will create
TIMEOUT = 5.0 # timeout for this test
A_DATA = b'A' * 1024 * 1024
B_DATA = b'B' * 1024 * 1024
sslctx = self._create_server_ssl_context(self.ONLYCERT, self.ONLYKEY)
client_sslctx = self._create_client_ssl_context()
clients = []
async def handle_client(reader, writer):
nonlocal CNT
data = await reader.readexactly(len(A_DATA))
self.assertEqual(data, A_DATA)
writer.write(b'OK')
data = await reader.readexactly(len(B_DATA))
self.assertEqual(data, B_DATA)
writer.writelines([b'SP', bytearray(b'A'), memoryview(b'M')])
await writer.drain()
writer.close()
CNT += 1
async def test_client(addr):
fut = asyncio.Future(loop=self.loop)
def prog():
try:
yield tb.starttls(client_sslctx)
yield tb.connect(addr)
yield tb.write(A_DATA)
data = yield tb.read(2)
self.assertEqual(data, b'OK')
yield tb.write(B_DATA)
data = yield tb.read(4)
self.assertEqual(data, b'SPAM')
yield tb.close()
except Exception as ex:
self.loop.call_soon_threadsafe(fut.set_exception, ex)
else:
self.loop.call_soon_threadsafe(fut.set_result, None)
client = tb.tcp_client(prog, family=socket.AF_UNIX)
client.start()
clients.append(client)
await fut
async def start_server():
with tempfile.TemporaryDirectory() as td:
sock_name = os.path.join(td, 'sock')
srv = await asyncio.start_unix_server(
handle_client,
sock_name,
ssl=sslctx,
loop=self.loop)
try:
tasks = []
for _ in range(TOTAL_CNT):
tasks.append(test_client(sock_name))
await asyncio.wait_for(
asyncio.gather(*tasks, loop=self.loop),
TIMEOUT, loop=self.loop)
finally:
self.loop.call_soon(srv.close)
await srv.wait_closed()
with self._silence_eof_received_warning():
self.loop.run_until_complete(start_server())
self.assertEqual(CNT, TOTAL_CNT)
for client in clients:
client.stop()
def test_create_unix_connection_ssl_1(self):
CNT = 0
TOTAL_CNT = 25
A_DATA = b'A' * 1024 * 1024
B_DATA = b'B' * 1024 * 1024
sslctx = self._create_server_ssl_context(self.ONLYCERT, self.ONLYKEY)
client_sslctx = self._create_client_ssl_context()
def server():
yield tb.starttls(
sslctx,
server_side=True)
data = yield tb.read(len(A_DATA))
self.assertEqual(data, A_DATA)
yield tb.write(b'OK')
data = yield tb.read(len(B_DATA))
self.assertEqual(data, B_DATA)
yield tb.write(b'SPAM')
yield tb.close()
async def client(addr):
reader, writer = await asyncio.open_unix_connection(
addr,
ssl=client_sslctx,
server_hostname='',
loop=self.loop)
writer.write(A_DATA)
self.assertEqual(await reader.readexactly(2), b'OK')
writer.write(B_DATA)
self.assertEqual(await reader.readexactly(4), b'SPAM')
nonlocal CNT
CNT += 1
writer.close()
def run(coro):
nonlocal CNT
CNT = 0
srv = tb.tcp_server(server,
family=socket.AF_UNIX,
max_clients=TOTAL_CNT,
backlog=TOTAL_CNT)
srv.start()
tasks = []
for _ in range(TOTAL_CNT):
tasks.append(coro(srv.addr))
self.loop.run_until_complete(
asyncio.gather(*tasks, loop=self.loop))
srv.join()
self.assertEqual(CNT, TOTAL_CNT)
with self._silence_eof_received_warning():
run(client)
class Test_UV_UnixSSL(_TestSSL, tb.UVTestCase):
pass
class Test_AIO_UnixSSL(_TestSSL, tb.AIOTestCase):
pass
| 14,859 | 145 | 434 |
7d9b7b39866f25631ff6a01f00fdb1f171abec72 | 185 | py | Python | importing_questions.py | mohamadaref/telegram_bot | 80eaa1da29c8c2f2990f26315b667d5a85e57d14 | [
"MIT"
] | 1 | 2019-09-19T07:05:05.000Z | 2019-09-19T07:05:05.000Z | importing_questions.py | mohamadaref/telegram_bot | 80eaa1da29c8c2f2990f26315b667d5a85e57d14 | [
"MIT"
] | null | null | null | importing_questions.py | mohamadaref/telegram_bot | 80eaa1da29c8c2f2990f26315b667d5a85e57d14 | [
"MIT"
] | null | null | null | import numpy
import xlsxwriter
from pandas import read_excel
import xlrd
import pandas
data = read_excel('data/questions_and_choices.xlsx')
print(data.columns[2])
# print(data.head())
| 18.5 | 52 | 0.8 | import numpy
import xlsxwriter
from pandas import read_excel
import xlrd
import pandas
data = read_excel('data/questions_and_choices.xlsx')
print(data.columns[2])
# print(data.head())
| 0 | 0 | 0 |
c798ab997c91a4a93420dcf67a916c96f8b5f5a0 | 774 | py | Python | booking/reservation/models.py | dcti77/booking | ed335fa1ab19bb1dc2f76bda0ac04cbc56860837 | [
"MIT"
] | null | null | null | booking/reservation/models.py | dcti77/booking | ed335fa1ab19bb1dc2f76bda0ac04cbc56860837 | [
"MIT"
] | null | null | null | booking/reservation/models.py | dcti77/booking | ed335fa1ab19bb1dc2f76bda0ac04cbc56860837 | [
"MIT"
] | null | null | null | from django.db import models
from hotels.models import Hotel
from users.models import User
| 40.736842 | 116 | 0.75323 | from django.db import models
from hotels.models import Hotel
from users.models import User
class Reservation(models.Model):
number_of_person = models.PositiveIntegerField()
number_of_nights = models.PositiveIntegerField()
booking_date = models.DateField() # в форме работает как charfield, не предлагает выбора даты
user = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
hotel = models.ForeignKey(Hotel, on_delete=models.CASCADE, null=True)
card = models.DecimalField(max_digits=16, decimal_places=0, null=True) # в модели users/User есть такое же поле
valid_thru = models.DateField(null=True) # в модели users/User есть такое же поле
finish = models.BooleanField(null=True)
def __str__(self):
return "Booking"
| 22 | 719 | 23 |
be77cd52b02ff2d6a2dc8fa807cacf01e47a6fc8 | 2,555 | py | Python | tweepy_setup.py | FeXd/saskvaccine | 9df8b25f262cf7666534da9735365e9a0a5c16b9 | [
"MIT"
] | 2 | 2021-07-15T23:19:20.000Z | 2021-07-19T04:52:08.000Z | tweepy_setup.py | FeXd/saskvaccine | 9df8b25f262cf7666534da9735365e9a0a5c16b9 | [
"MIT"
] | 7 | 2021-05-04T14:03:32.000Z | 2021-05-19T20:22:53.000Z | tweepy_setup.py | FeXd/saskwildfire | 06926a672a82a4d8bf9a1004d2d8d754c6ac3508 | [
"MIT"
] | 1 | 2021-04-29T19:01:05.000Z | 2021-04-29T19:01:05.000Z | # TWEEPY DOCUMENTATION - https://docs.tweepy.org/en/latest/
# AUTHENTICATION TUTORIAL - https://docs.tweepy.org/en/latest/auth_tutorial.html
import tweepy
# YOU MUST HAVE A TWITTER DEVELOPER ACCOUNT TO USE
# https://developer.twitter.com/en/apply-for-access
# All new developers must apply for a developer account to access the Twitter developer platform.
# Once approved, you can begin to use our new Twitter API v2, or our v1.1 standard and premium APIs.
# CONSUMER KEYS - available under Projects & Apps > Standalone Apps > Your App > Keys and tokens
# Do not commit the tokens to git. These should also be copied to the .env file
consumer_key = ''
consumer_secret = ''
# AUTHENTICATE WITH TWITTER
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
# STEP 1 - GET OAUTH TOKENS - enable if we need to get OAuth tokens
if False:
# Get the URL that we need to visit
try:
redirect_url = auth.get_authorization_url()
except tweepy.TweepError:
print('Error! Failed to get request token.')
# Token for convenience
request_token = auth.request_token['oauth_token']
# Print out both and exit - copy paste them appropriately below and disable
print(redirect_url)
print(request_token)
# Should look something like:
# https://api.twitter.com/oauth/authorize?oauth_token=ABC123
# ABC123
# Next, manually visit the redirect_url page and authorize for desired twitter account
# After authorizing your app, you can get your oauth_verifier from your address bar
# Should look something like:
# https://website.com/?oauth_token=ABC123&oauth_verifier=XYZ987
exit()
verifier = 'XYZ987'
request_token = {'oauth_token': 'ABC123', 'oauth_token_secret': verifier}
# STEP 2 - GET OAUTH TOKENS - enable after completing step 1
if False:
auth.request_token = request_token
try:
auth.get_access_token(verifier)
except tweepy.TweepError:
print('Error! Failed to get access token.')
# Print out access_token and access_token_secret
print('access_token', auth.access_token)
print('access_token_secret', auth.access_token_secret)
# Copy the access_token and access_token_secret and place in the .env file
exit()
# YOU ARE DONE - You should be able to run main.py successfully
# Feel free to past tokens below, disable STEP 1 and STEP 2 and play with Tweepy API below
# API v1.1 Reference: https://docs.tweepy.org/en/latest/api.html
access_token = ''
access_token_secret = ''
auth.set_access_token(access_token, access_token_secret)
| 35.985915 | 100 | 0.739726 | # TWEEPY DOCUMENTATION - https://docs.tweepy.org/en/latest/
# AUTHENTICATION TUTORIAL - https://docs.tweepy.org/en/latest/auth_tutorial.html
import tweepy
# YOU MUST HAVE A TWITTER DEVELOPER ACCOUNT TO USE
# https://developer.twitter.com/en/apply-for-access
# All new developers must apply for a developer account to access the Twitter developer platform.
# Once approved, you can begin to use our new Twitter API v2, or our v1.1 standard and premium APIs.
# CONSUMER KEYS - available under Projects & Apps > Standalone Apps > Your App > Keys and tokens
# Do not commit the tokens to git. These should also be copied to the .env file
consumer_key = ''
consumer_secret = ''
# AUTHENTICATE WITH TWITTER
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
# STEP 1 - GET OAUTH TOKENS - enable if we need to get OAuth tokens
if False:
# Get the URL that we need to visit
try:
redirect_url = auth.get_authorization_url()
except tweepy.TweepError:
print('Error! Failed to get request token.')
# Token for convenience
request_token = auth.request_token['oauth_token']
# Print out both and exit - copy paste them appropriately below and disable
print(redirect_url)
print(request_token)
# Should look something like:
# https://api.twitter.com/oauth/authorize?oauth_token=ABC123
# ABC123
# Next, manually visit the redirect_url page and authorize for desired twitter account
# After authorizing your app, you can get your oauth_verifier from your address bar
# Should look something like:
# https://website.com/?oauth_token=ABC123&oauth_verifier=XYZ987
exit()
verifier = 'XYZ987'
request_token = {'oauth_token': 'ABC123', 'oauth_token_secret': verifier}
# STEP 2 - GET OAUTH TOKENS - enable after completing step 1
if False:
auth.request_token = request_token
try:
auth.get_access_token(verifier)
except tweepy.TweepError:
print('Error! Failed to get access token.')
# Print out access_token and access_token_secret
print('access_token', auth.access_token)
print('access_token_secret', auth.access_token_secret)
# Copy the access_token and access_token_secret and place in the .env file
exit()
# YOU ARE DONE - You should be able to run main.py successfully
# Feel free to past tokens below, disable STEP 1 and STEP 2 and play with Tweepy API below
# API v1.1 Reference: https://docs.tweepy.org/en/latest/api.html
access_token = ''
access_token_secret = ''
auth.set_access_token(access_token, access_token_secret)
| 0 | 0 | 0 |
440492b8f388bb021a08b3a12eeeba79ddbb9da9 | 497 | py | Python | tests/plugins/test_tvtoya.py | Billy2011/streamlink | 5f99ec52e0a9c315aeee00b96287edc45adaccd3 | [
"BSD-2-Clause"
] | 1 | 2019-09-14T10:19:47.000Z | 2019-09-14T10:19:47.000Z | tests/plugins/test_tvtoya.py | Billy2011/streamlink | 5f99ec52e0a9c315aeee00b96287edc45adaccd3 | [
"BSD-2-Clause"
] | 1 | 2018-07-12T18:18:05.000Z | 2018-07-12T18:18:05.000Z | tests/plugins/test_tvtoya.py | Billy2011/streamlink | 5f99ec52e0a9c315aeee00b96287edc45adaccd3 | [
"BSD-2-Clause"
] | null | null | null | from streamlink.plugins.tvtoya import TVToya
from tests.plugins import PluginCanHandleUrl
| 23.666667 | 56 | 0.609658 | from streamlink.plugins.tvtoya import TVToya
from tests.plugins import PluginCanHandleUrl
class TestPluginCanHandleUrlTVRPlus(PluginCanHandleUrl):
__plugin__ = TVToya
should_match = [
"http://tvtoya.pl/player/live",
"https://tvtoya.pl/player/live",
]
should_not_match = [
"http://tvtoya.pl",
"http://tvtoya.pl/",
"http://tvtoya.pl/live",
"https://tvtoya.pl",
"https://tvtoya.pl/",
"https://tvtoya.pl/live",
]
| 0 | 383 | 23 |
a4e209109d8d5dd870aa66013b5052a6c51c4ad2 | 710 | py | Python | qutipy/channels/amplitude_damping_channel.py | sumeetkhatri/QuTIPy | ca2a3344c1caa818504425496ea37278d80b1c44 | [
"Apache-2.0"
] | 19 | 2020-11-11T13:00:22.000Z | 2022-03-14T11:18:04.000Z | qutipy/channels/amplitude_damping_channel.py | sumeetkhatri/QuTIPy | ca2a3344c1caa818504425496ea37278d80b1c44 | [
"Apache-2.0"
] | null | null | null | qutipy/channels/amplitude_damping_channel.py | sumeetkhatri/QuTIPy | ca2a3344c1caa818504425496ea37278d80b1c44 | [
"Apache-2.0"
] | 1 | 2022-03-03T15:20:15.000Z | 2022-03-03T15:20:15.000Z | '''
This code is part of QuTIpy.
(c) Copyright Sumeet Khatri, 2021
This code is licensed under the Apache License, Version 2.0. You may
obtain a copy of this license in the LICENSE.txt file in the root directory
of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
Any modifications or derivative works of this code must retain this
copyright notice, and modified files need to carry a notice indicating
that they have been altered from the originals.
'''
import numpy as np
def amplitude_damping_channel(gamma):
'''
Generates the amplitude damping channel.
'''
A1=np.array([[1,0],[0,np.sqrt(1-gamma)]])
A2=np.array([[0,np.sqrt(gamma)],[0,0]])
return [A1,A2] | 24.482759 | 75 | 0.719718 | '''
This code is part of QuTIpy.
(c) Copyright Sumeet Khatri, 2021
This code is licensed under the Apache License, Version 2.0. You may
obtain a copy of this license in the LICENSE.txt file in the root directory
of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
Any modifications or derivative works of this code must retain this
copyright notice, and modified files need to carry a notice indicating
that they have been altered from the originals.
'''
import numpy as np
def amplitude_damping_channel(gamma):
'''
Generates the amplitude damping channel.
'''
A1=np.array([[1,0],[0,np.sqrt(1-gamma)]])
A2=np.array([[0,np.sqrt(gamma)],[0,0]])
return [A1,A2] | 0 | 0 | 0 |
8b63b4e72522669569bfe16afef4e18f0b84f63c | 152 | py | Python | prod.py | paramsingh/socially-awkward | 176f74dcc8c5797ee1f6ea6267963392250287dd | [
"MIT"
] | 7 | 2019-03-11T05:45:42.000Z | 2019-12-06T07:31:37.000Z | prod.py | adarshrana205/socially-awkward | 176f74dcc8c5797ee1f6ea6267963392250287dd | [
"MIT"
] | null | null | null | prod.py | adarshrana205/socially-awkward | 176f74dcc8c5797ee1f6ea6267963392250287dd | [
"MIT"
] | 2 | 2019-05-08T15:30:36.000Z | 2019-11-30T20:28:53.000Z | from social.db import create_tables, drop_tables
from social.webserver import create_app
if True:
drop_tables()
create_tables()
app = create_app()
| 19 | 48 | 0.789474 | from social.db import create_tables, drop_tables
from social.webserver import create_app
if True:
drop_tables()
create_tables()
app = create_app()
| 0 | 0 | 0 |
a4ce58e7e0a7a1c8d846dc8d519ee3df18e70778 | 1,451 | py | Python | muted/system/cmd_move.py | ann884511/mule | ddc3da77188fe25f346ba0c341a83cd5ad94143e | [
"MIT"
] | 1 | 2019-06-06T07:21:09.000Z | 2019-06-06T07:21:09.000Z | muted/system/cmd_move.py | ann884511/mule | ddc3da77188fe25f346ba0c341a83cd5ad94143e | [
"MIT"
] | null | null | null | muted/system/cmd_move.py | ann884511/mule | ddc3da77188fe25f346ba0c341a83cd5ad94143e | [
"MIT"
] | null | null | null |
from __future__ import annotations
from typing import List
from typing import Type
from component.exit import Exit
from component.role import Role
from component.room import Room
from event.event import Event
from message.message import Message
from system.channel import Channel
from logcat.logcat import LogCat
# cmd_move.py
| 28.45098 | 78 | 0.678153 |
from __future__ import annotations
from typing import List
from typing import Type
from component.exit import Exit
from component.role import Role
from component.room import Room
from event.event import Event
from message.message import Message
from system.channel import Channel
from logcat.logcat import LogCat
class CmdMove:
@LogCat.log_func
def __init__(self, servant: Type[Handler]):
self._servant = servant
servant.on(Event.CMD_EAST, self._on_cmd_move)
servant.on(Event.CMD_ABBR_EAST, self._on_cmd_move)
servant.on(Event.CMD_NORTH, self._on_cmd_move)
servant.on(Event.CMD_ABBR_NORTH, self._on_cmd_move)
servant.on(Event.CMD_SOUTH, self._on_cmd_move)
servant.on(Event.CMD_ABBR_SOUTH, self._on_cmd_move)
servant.on(Event.CMD_WEST, self._on_cmd_move)
servant.on(Event.CMD_ABBR_WEST, self._on_cmd_move)
@LogCat.log_func
def _on_cmd_move(
self, e: Event, entity: str = '', args: List[str] = []
) -> None:
role = Role.instance(entity)
exit = Exit.instance(role.room)
room = exit.to(e.type[0])
if None != room:
Room.instance(role.room).leave(entity)
Room.instance(room).enter(entity)
role.enter(room)
Event.trigger(Event(Event.CMD_LOOK, self._servant, entity=entity))
else:
Channel.to_role(entity, Message.TEXT, f' 這裡沒有出口')
# cmd_move.py
| 1,019 | 88 | 23 |
d244fa1afe08ae18b5f6d9733ac478ea0a0df46a | 11,670 | py | Python | cupcake2/ice2/IceUtils2.py | fanglu01/cDNA_Cupcake | 60f56dc291661a2b84e40b64d469fba658889c34 | [
"BSD-3-Clause-Clear"
] | 1 | 2018-09-21T06:20:50.000Z | 2018-09-21T06:20:50.000Z | cupcake2/ice2/IceUtils2.py | fanglu01/cDNA_Cupcake | 60f56dc291661a2b84e40b64d469fba658889c34 | [
"BSD-3-Clause-Clear"
] | null | null | null | cupcake2/ice2/IceUtils2.py | fanglu01/cDNA_Cupcake | 60f56dc291661a2b84e40b64d469fba658889c34 | [
"BSD-3-Clause-Clear"
] | null | null | null |
import numpy as np
from pbtranscript.Utils import execute
from pbtranscript.ice.IceUtils import alignment_has_large_nonmatch, HitItem, eval_blasr_alignment
from pbtranscript.io import LA4IceReader, BLASRM5Reader
gcon2_py = "ice_pbdagcon2.py"
def sanity_check_gcon2():
"""Sanity check gcon."""
cmd = gcon2_py + " --help"
errmsg = gcon2_py + " is not installed."
execute(cmd=cmd, errmsg=errmsg)
return gcon2_py
def alignment_missed_start_end_less_than_threshold(r, max_missed_start, max_missed_end,
full_missed_start, full_missed_end):
"""
Check that whichever is the shorter one, must be close to fully mapped
(subject to full_missed_start/end)
and the longer one is allowed to have more missed start/end
(subject to max_missed_start/end)
"""
assert max_missed_start >= full_missed_start and max_missed_end >= full_missed_end
# which ever is the shorter one, must be fully mapped
missed_start_1 = r.qStart
missed_start_2 = r.sStart
missed_end_1 = (r.qLength - r.qEnd)
missed_end_2 = (r.sLength - r.sEnd)
if r.qLength > r.sLength:
missed_start_1, missed_start_2 = missed_start_2, missed_start_1
missed_end_1, missed_end_2 = missed_end_2, missed_end_1
# the smaller one must be close to fully mapped
if (missed_start_1 > full_missed_start) or \
(missed_end_1 > full_missed_end) or \
(missed_start_2 > max_missed_start) or \
(missed_end_2 > max_missed_end):
return False
return True
def blasr_against_ref2(output_filename, is_FL, sID_starts_with_c,
qver_get_func, qvmean_get_func, qv_prob_threshold=.03,
ece_penalty=1, ece_min_len=20, same_strand_only=True,
max_missed_start=200, max_missed_end=50,
full_missed_start=50, full_missed_end=30):
"""
Excluding criteria:
(1) self hit
(2) opposite strand hit (should already be in the same orientation;
can override with <same_strand_only> set to False)
(3) less than 90% aligned or more than 50 bp missed
qver_get_func --- should be basQV.basQVcacher.get() or
.get_smoothed(), or can just pass in
lambda (x, y): 1. to ignore QV
"""
with BLASRM5Reader(output_filename) as reader:
for r in reader:
missed_q = r.qStart + r.qLength - r.qEnd
missed_t = r.sStart + r.sLength - r.sEnd
if sID_starts_with_c:
# because all consensus should start with
# c<cluster_index>
assert r.sID.startswith('c')
if r.sID.find('/') > 0:
r.sID = r.sID.split('/')[0]
if r.sID.endswith('_ref'):
# probably c<cid>_ref
cID = int(r.sID[1:-4])
else:
cID = int(r.sID[1:])
else:
cID = r.sID
# self hit, useless!
# opposite strand not allowed!
if (cID == r.qID or (r.strand == '-' and same_strand_only)):
yield HitItem(qID=r.qID, cID=cID)
continue
# regardless if whether is full-length (is_FL)
# the query MUST be mapped fully (based on full_missed_start/end)
if r.qStart > full_missed_start or (r.qLength-r.qEnd) > full_missed_end:
yield HitItem(qID=r.qID, cID=cID)
# full-length case: allow up to max_missed_start bp of 5' not aligned
# and max_missed_end bp of 3' not aligned
# non-full-length case: not really tested...don't use
if is_FL and not alignment_missed_start_end_less_than_threshold(r,\
max_missed_start, max_missed_end, full_missed_start, full_missed_end):
yield HitItem(qID=r.qID, cID=cID)
else:
cigar_str, ece_arr = eval_blasr_alignment(
record=r,
qver_get_func=qver_get_func,
qvmean_get_func=qvmean_get_func,
sID_starts_with_c=sID_starts_with_c,
qv_prob_threshold=qv_prob_threshold)
if alignment_has_large_nonmatch(ece_arr,
ece_penalty, ece_min_len):
yield HitItem(qID=r.qID, cID=cID)
else:
yield HitItem(qID=r.qID, cID=cID,
qStart=r.qStart, qEnd=r.qEnd,
missed_q=missed_q * 1. / r.qLength,
missed_t=missed_t * 1. / r.sLength,
fakecigar=cigar_str,
ece_arr=ece_arr)
def daligner_against_ref2(query_dazz_handler, target_dazz_handler, la4ice_filename,
is_FL, sID_starts_with_c,
qver_get_func, qvmean_get_func, qv_prob_threshold=.03,
ece_penalty=1, ece_min_len=20, same_strand_only=True, no_qv_or_aln_checking=False,
max_missed_start=200, max_missed_end=50,
full_missed_start=50, full_missed_end=30):
"""
Excluding criteria:
(1) self hit
(2) opposite strand hit (should already be in the same orientation;
can override with <same_strand_only> set to False)
(3) less than 90% aligned or more than 50 bp missed
Parameters:
query_dazz_handler - query dazz handler in DalignRunner
target_dazz_handler - target dazz handler in DalignRunner
la4ice_filename - la4ice output of DalignRunner
qver_get_func - returns a list of qvs of (read, qvname)
e.g. basQV.basQVcacher.get() or .get_smoothed()
qvmean_get_func - which returns mean QV of (read, qvname)
"""
for r in LA4IceReader(la4ice_filename):
missed_q = r.qStart + r.qLength - r.qEnd
missed_t = r.sStart + r.sLength - r.sEnd
r.qID = query_dazz_handler[r.qID].split(' ')[0]
r.sID = target_dazz_handler[r.sID].split(' ')[0]
if sID_starts_with_c:
# because all consensus should start with
# c<cluster_index>
assert r.sID.startswith('c')
if r.sID.find('/') > 0:
r.sID = r.sID.split('/')[0]
if r.sID.endswith('_ref'):
# probably c<cid>_ref
cID = int(r.sID[1:-4])
else:
cID = int(r.sID[1:])
else:
cID = r.sID
# self hit, useless!
# opposite strand not allowed!
if (cID == r.qID or (r.strand == '-' and same_strand_only)):
yield HitItem(qID=r.qID, cID=cID)
continue
# regardless if whether is full-length (is_FL)
# the query MUST be mapped fully (based on full_missed_start/end)
#print "r.qStart:", r.qID, r.sID, r.qStart, full_missed_start, (r.qLength-r.qEnd), full_missed_end, r.qStart > full_missed_start or (r.qLength-r.qEnd) > full_missed_end
if r.qStart > full_missed_start or (r.qLength-r.qEnd) > full_missed_end:
yield HitItem(qID=r.qID, cID=cID)
continue
# this is used for partial_uc/nFL reads only
# simply accepts hits from daligner for the nFL partial hits
# testing shows that it does not affect much the Quiver consensus calling
if no_qv_or_aln_checking:
yield HitItem(qID=r.qID, cID=cID,
qStart=r.qStart, qEnd=r.qEnd,
missed_q=missed_q * 1. / r.qLength,
missed_t=missed_t * 1. / r.sLength,
fakecigar=1,
ece_arr=1)
continue
# full-length case: allow up to 200bp of 5' not aligned
# and 50bp of 3' not aligned
if (is_FL and not alignment_missed_start_end_less_than_threshold(r, \
max_missed_start, max_missed_end, full_missed_start, full_missed_end)):
yield HitItem(qID=r.qID, cID=cID)
else:
cigar_str, ece_arr = eval_blasr_alignment(
record=r,
qver_get_func=qver_get_func,
sID_starts_with_c=sID_starts_with_c,
qv_prob_threshold=qv_prob_threshold,
qvmean_get_func=qvmean_get_func)
#else: # don't use QV, just look at alignment
if alignment_has_large_nonmatch(ece_arr, ece_penalty, ece_min_len):
yield HitItem(qID=r.qID, cID=cID)
else:
yield HitItem(qID=r.qID, cID=cID,
qStart=r.qStart, qEnd=r.qEnd,
missed_q=missed_q * 1. / r.qLength,
missed_t=missed_t * 1. / r.sLength,
fakecigar=cigar_str,
ece_arr=ece_arr)
def possible_merge2(r, ece_penalty, ece_min_len,
max_missed_start=200, max_missed_end=50,
full_missed_start=50, full_missed_end=30):
"""
r --- BLASRM5Record
Criteria:
(1) identity >= 90% and same strand
(2) check criteria for how much is allowed to differ on the
5' / 3' ends
Note: one must be fully mapped (allowing only a small portion to be unmapped)
while the other can have <max_missed_start>/<max_missed_end>
"""
if r.sID == r.qID or r.identity < 90 or r.strand == '-':
return False
if not alignment_missed_start_end_less_than_threshold(r, max_missed_start, max_missed_end,
full_missed_start, full_missed_end):
return False
arr = np.array([(x == '*') * 1 for x in r.alnStr])
if alignment_has_large_nonmatch(ece_arr=arr,
penalty=ece_penalty,
min_len=ece_min_len):
return False
return True
def cid_with_annotation2(cid, expected_acc=None):
"""Given a cluster id, return cluster id with human readable annotation.
e.g., c0 --> c0 isoform=c0
c0/89/3888 -> c0/89/3888 isoform=c0;full_length_coverage=89;isoform_length=3888;expected_accuracy=0.99
c0/f89p190/3888 -> c0/f89p190/3888 isoform=c0;full_length_coverage=89;non_full_length_coverage=190;isoform_length=3888;expected_accuracy=0.99
"""
fields = cid.split('/')
short_id, fl_coverage, nfl_coverage, seq_len = None, None, None, None
if len(fields) != 1 and len(fields) != 3:
raise ValueError("Not able to process isoform id: {cid}".format(cid=cid))
short_id = fields[0]
if len(fields) == 3:
seq_len = fields[2]
if "f" in fields[1]:
if "p" in fields[1]: # f89p190
fl_coverage = fields[1].split('p')[0][1:]
nfl_coverage = fields[1].split('p')[1]
else: # f89
fl_coverage = fields[1][1:]
else:
fl_coverage = fields[1]
annotations = ["isoform={short_id}".format(short_id=short_id)]
if fl_coverage is not None:
annotations.append("full_length_coverage={fl}".format(fl=fl_coverage))
if nfl_coverage is not None:
annotations.append("non_full_length_coverage={nfl}".format(nfl=nfl_coverage))
if seq_len is not None:
annotations.append("isoform_length={l}".format(l=seq_len))
if expected_acc is not None:
annotations.append("expected_accuracy={0:.3f}".format(expected_acc))
return "{cid} {annotation}".format(cid=cid, annotation=";".join(annotations)) | 42.904412 | 176 | 0.587147 |
import numpy as np
from pbtranscript.Utils import execute
from pbtranscript.ice.IceUtils import alignment_has_large_nonmatch, HitItem, eval_blasr_alignment
from pbtranscript.io import LA4IceReader, BLASRM5Reader
gcon2_py = "ice_pbdagcon2.py"
def sanity_check_gcon2():
"""Sanity check gcon."""
cmd = gcon2_py + " --help"
errmsg = gcon2_py + " is not installed."
execute(cmd=cmd, errmsg=errmsg)
return gcon2_py
def alignment_missed_start_end_less_than_threshold(r, max_missed_start, max_missed_end,
full_missed_start, full_missed_end):
"""
Check that whichever is the shorter one, must be close to fully mapped
(subject to full_missed_start/end)
and the longer one is allowed to have more missed start/end
(subject to max_missed_start/end)
"""
assert max_missed_start >= full_missed_start and max_missed_end >= full_missed_end
# which ever is the shorter one, must be fully mapped
missed_start_1 = r.qStart
missed_start_2 = r.sStart
missed_end_1 = (r.qLength - r.qEnd)
missed_end_2 = (r.sLength - r.sEnd)
if r.qLength > r.sLength:
missed_start_1, missed_start_2 = missed_start_2, missed_start_1
missed_end_1, missed_end_2 = missed_end_2, missed_end_1
# the smaller one must be close to fully mapped
if (missed_start_1 > full_missed_start) or \
(missed_end_1 > full_missed_end) or \
(missed_start_2 > max_missed_start) or \
(missed_end_2 > max_missed_end):
return False
return True
def blasr_against_ref2(output_filename, is_FL, sID_starts_with_c,
qver_get_func, qvmean_get_func, qv_prob_threshold=.03,
ece_penalty=1, ece_min_len=20, same_strand_only=True,
max_missed_start=200, max_missed_end=50,
full_missed_start=50, full_missed_end=30):
"""
Excluding criteria:
(1) self hit
(2) opposite strand hit (should already be in the same orientation;
can override with <same_strand_only> set to False)
(3) less than 90% aligned or more than 50 bp missed
qver_get_func --- should be basQV.basQVcacher.get() or
.get_smoothed(), or can just pass in
lambda (x, y): 1. to ignore QV
"""
with BLASRM5Reader(output_filename) as reader:
for r in reader:
missed_q = r.qStart + r.qLength - r.qEnd
missed_t = r.sStart + r.sLength - r.sEnd
if sID_starts_with_c:
# because all consensus should start with
# c<cluster_index>
assert r.sID.startswith('c')
if r.sID.find('/') > 0:
r.sID = r.sID.split('/')[0]
if r.sID.endswith('_ref'):
# probably c<cid>_ref
cID = int(r.sID[1:-4])
else:
cID = int(r.sID[1:])
else:
cID = r.sID
# self hit, useless!
# opposite strand not allowed!
if (cID == r.qID or (r.strand == '-' and same_strand_only)):
yield HitItem(qID=r.qID, cID=cID)
continue
# regardless if whether is full-length (is_FL)
# the query MUST be mapped fully (based on full_missed_start/end)
if r.qStart > full_missed_start or (r.qLength-r.qEnd) > full_missed_end:
yield HitItem(qID=r.qID, cID=cID)
# full-length case: allow up to max_missed_start bp of 5' not aligned
# and max_missed_end bp of 3' not aligned
# non-full-length case: not really tested...don't use
if is_FL and not alignment_missed_start_end_less_than_threshold(r,\
max_missed_start, max_missed_end, full_missed_start, full_missed_end):
yield HitItem(qID=r.qID, cID=cID)
else:
cigar_str, ece_arr = eval_blasr_alignment(
record=r,
qver_get_func=qver_get_func,
qvmean_get_func=qvmean_get_func,
sID_starts_with_c=sID_starts_with_c,
qv_prob_threshold=qv_prob_threshold)
if alignment_has_large_nonmatch(ece_arr,
ece_penalty, ece_min_len):
yield HitItem(qID=r.qID, cID=cID)
else:
yield HitItem(qID=r.qID, cID=cID,
qStart=r.qStart, qEnd=r.qEnd,
missed_q=missed_q * 1. / r.qLength,
missed_t=missed_t * 1. / r.sLength,
fakecigar=cigar_str,
ece_arr=ece_arr)
def daligner_against_ref2(query_dazz_handler, target_dazz_handler, la4ice_filename,
is_FL, sID_starts_with_c,
qver_get_func, qvmean_get_func, qv_prob_threshold=.03,
ece_penalty=1, ece_min_len=20, same_strand_only=True, no_qv_or_aln_checking=False,
max_missed_start=200, max_missed_end=50,
full_missed_start=50, full_missed_end=30):
"""
Excluding criteria:
(1) self hit
(2) opposite strand hit (should already be in the same orientation;
can override with <same_strand_only> set to False)
(3) less than 90% aligned or more than 50 bp missed
Parameters:
query_dazz_handler - query dazz handler in DalignRunner
target_dazz_handler - target dazz handler in DalignRunner
la4ice_filename - la4ice output of DalignRunner
qver_get_func - returns a list of qvs of (read, qvname)
e.g. basQV.basQVcacher.get() or .get_smoothed()
qvmean_get_func - which returns mean QV of (read, qvname)
"""
for r in LA4IceReader(la4ice_filename):
missed_q = r.qStart + r.qLength - r.qEnd
missed_t = r.sStart + r.sLength - r.sEnd
r.qID = query_dazz_handler[r.qID].split(' ')[0]
r.sID = target_dazz_handler[r.sID].split(' ')[0]
if sID_starts_with_c:
# because all consensus should start with
# c<cluster_index>
assert r.sID.startswith('c')
if r.sID.find('/') > 0:
r.sID = r.sID.split('/')[0]
if r.sID.endswith('_ref'):
# probably c<cid>_ref
cID = int(r.sID[1:-4])
else:
cID = int(r.sID[1:])
else:
cID = r.sID
# self hit, useless!
# opposite strand not allowed!
if (cID == r.qID or (r.strand == '-' and same_strand_only)):
yield HitItem(qID=r.qID, cID=cID)
continue
# regardless if whether is full-length (is_FL)
# the query MUST be mapped fully (based on full_missed_start/end)
#print "r.qStart:", r.qID, r.sID, r.qStart, full_missed_start, (r.qLength-r.qEnd), full_missed_end, r.qStart > full_missed_start or (r.qLength-r.qEnd) > full_missed_end
if r.qStart > full_missed_start or (r.qLength-r.qEnd) > full_missed_end:
yield HitItem(qID=r.qID, cID=cID)
continue
# this is used for partial_uc/nFL reads only
# simply accepts hits from daligner for the nFL partial hits
# testing shows that it does not affect much the Quiver consensus calling
if no_qv_or_aln_checking:
yield HitItem(qID=r.qID, cID=cID,
qStart=r.qStart, qEnd=r.qEnd,
missed_q=missed_q * 1. / r.qLength,
missed_t=missed_t * 1. / r.sLength,
fakecigar=1,
ece_arr=1)
continue
# full-length case: allow up to 200bp of 5' not aligned
# and 50bp of 3' not aligned
if (is_FL and not alignment_missed_start_end_less_than_threshold(r, \
max_missed_start, max_missed_end, full_missed_start, full_missed_end)):
yield HitItem(qID=r.qID, cID=cID)
else:
cigar_str, ece_arr = eval_blasr_alignment(
record=r,
qver_get_func=qver_get_func,
sID_starts_with_c=sID_starts_with_c,
qv_prob_threshold=qv_prob_threshold,
qvmean_get_func=qvmean_get_func)
#else: # don't use QV, just look at alignment
if alignment_has_large_nonmatch(ece_arr, ece_penalty, ece_min_len):
yield HitItem(qID=r.qID, cID=cID)
else:
yield HitItem(qID=r.qID, cID=cID,
qStart=r.qStart, qEnd=r.qEnd,
missed_q=missed_q * 1. / r.qLength,
missed_t=missed_t * 1. / r.sLength,
fakecigar=cigar_str,
ece_arr=ece_arr)
def possible_merge2(r, ece_penalty, ece_min_len,
max_missed_start=200, max_missed_end=50,
full_missed_start=50, full_missed_end=30):
"""
r --- BLASRM5Record
Criteria:
(1) identity >= 90% and same strand
(2) check criteria for how much is allowed to differ on the
5' / 3' ends
Note: one must be fully mapped (allowing only a small portion to be unmapped)
while the other can have <max_missed_start>/<max_missed_end>
"""
if r.sID == r.qID or r.identity < 90 or r.strand == '-':
return False
if not alignment_missed_start_end_less_than_threshold(r, max_missed_start, max_missed_end,
full_missed_start, full_missed_end):
return False
arr = np.array([(x == '*') * 1 for x in r.alnStr])
if alignment_has_large_nonmatch(ece_arr=arr,
penalty=ece_penalty,
min_len=ece_min_len):
return False
return True
def cid_with_annotation2(cid, expected_acc=None):
"""Given a cluster id, return cluster id with human readable annotation.
e.g., c0 --> c0 isoform=c0
c0/89/3888 -> c0/89/3888 isoform=c0;full_length_coverage=89;isoform_length=3888;expected_accuracy=0.99
c0/f89p190/3888 -> c0/f89p190/3888 isoform=c0;full_length_coverage=89;non_full_length_coverage=190;isoform_length=3888;expected_accuracy=0.99
"""
fields = cid.split('/')
short_id, fl_coverage, nfl_coverage, seq_len = None, None, None, None
if len(fields) != 1 and len(fields) != 3:
raise ValueError("Not able to process isoform id: {cid}".format(cid=cid))
short_id = fields[0]
if len(fields) == 3:
seq_len = fields[2]
if "f" in fields[1]:
if "p" in fields[1]: # f89p190
fl_coverage = fields[1].split('p')[0][1:]
nfl_coverage = fields[1].split('p')[1]
else: # f89
fl_coverage = fields[1][1:]
else:
fl_coverage = fields[1]
annotations = ["isoform={short_id}".format(short_id=short_id)]
if fl_coverage is not None:
annotations.append("full_length_coverage={fl}".format(fl=fl_coverage))
if nfl_coverage is not None:
annotations.append("non_full_length_coverage={nfl}".format(nfl=nfl_coverage))
if seq_len is not None:
annotations.append("isoform_length={l}".format(l=seq_len))
if expected_acc is not None:
annotations.append("expected_accuracy={0:.3f}".format(expected_acc))
return "{cid} {annotation}".format(cid=cid, annotation=";".join(annotations)) | 0 | 0 | 0 |
9b54fcc5d7450ffdf85d29ea20f7756ca615e32d | 10,045 | py | Python | cvinspector/ml/plot.py | levanhieu-git/cv-inspector | d7e83732a688485587f795cc296a405639c5073d | [
"Apache-2.0"
] | 2 | 2021-02-20T23:45:38.000Z | 2021-03-07T01:47:45.000Z | cvinspector/ml/plot.py | UCI-Networking-Group/cv-inspector | 86334e1dce06b4530ebd22ef6d7a047fb728ed1a | [
"Apache-2.0"
] | null | null | null | cvinspector/ml/plot.py | UCI-Networking-Group/cv-inspector | 86334e1dce06b4530ebd22ef6d7a047fb728ed1a | [
"Apache-2.0"
] | 2 | 2021-02-20T23:28:06.000Z | 2021-11-29T12:33:29.000Z | # Copyright (c) 2021 Hieu Le and the UCI Networking Group
# <https://athinagroup.eng.uci.edu>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import datetime
import json
import logging
from cvinspector.common.dommutation_utils import NODES_ADDED, NODES_REMOVED, \
ATTRIBUTE_CHANGED, TEXT_CHANGED, DOM_CONTENT_LOADED
from cvinspector.common.dommutation_utils import get_nodes_added_key, get_nodes_removed_key
from cvinspector.common.utils import ABP_BLOCKED_ELEMENT, ERR_BLOCKED_BY_CLIENT, JSON_DOMMUTATION_KEY, \
JSON_WEBREQUEST_KEY
from cvinspector.common.utils import ANTICV_ANNOTATION_PREFIX
logger = logging.getLogger(__name__)
# logger.setLevel("DEBUG")
TIME_KEY = "time"
TIME_KEY__WR = "requestTime"
| 38.049242 | 106 | 0.601294 | # Copyright (c) 2021 Hieu Le and the UCI Networking Group
# <https://athinagroup.eng.uci.edu>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import datetime
import json
import logging
from cvinspector.common.dommutation_utils import NODES_ADDED, NODES_REMOVED, \
ATTRIBUTE_CHANGED, TEXT_CHANGED, DOM_CONTENT_LOADED
from cvinspector.common.dommutation_utils import get_nodes_added_key, get_nodes_removed_key
from cvinspector.common.utils import ABP_BLOCKED_ELEMENT, ERR_BLOCKED_BY_CLIENT, JSON_DOMMUTATION_KEY, \
JSON_WEBREQUEST_KEY
from cvinspector.common.utils import ANTICV_ANNOTATION_PREFIX
logger = logging.getLogger(__name__)
# logger.setLevel("DEBUG")
TIME_KEY = "time"
TIME_KEY__WR = "requestTime"
def auto_bin_time_series_csv(json_file_path,
json_file_path__wr,
output_file_name,
time_step_ms,
max_seconds_later=None,
auto_determine_time=True):
def _by_time(event):
return event[TIME_KEY]
def _by_time_wr(event):
return event["event"][TIME_KEY__WR]
def _get_range_key(event, range_keys, event_key=TIME_KEY):
event_time = event.get(event_key)
if event_time is None:
event_time = event["event"][event_key]
for index, range_step in enumerate(range_keys):
next_time_index = index + 1
if next_time_index == len(range_keys):
# we are at the last step, so only check if event_time is larger
if range_step <= event_time:
return range_step
else:
# if within current step and next step
if range_step <= event_time <= range_keys[next_time_index]:
return range_step
def _write_header(csv_writer):
header = [
"bin_norm", "Date", "blocked", "web_req_blocked", "elem_blocked",
"snippet_blocked", "nodes_added", "nodes_removed",
"attribute_changed", "text_changed", "dom_content_loaded",
"iframe_src_changed", "iframe_blocked", "total_changes"
]
csv_writer.writerow(header)
output_file_opened = open(output_file_name + ".csv", 'w')
csvwriter = csv.writer(output_file_opened)
_write_header(csvwriter)
dom_json_content = None
wr_json_content = None
# read in DOM Mutation file
with open(json_file_path, 'r') as json_file:
dom_json_content = json.load(json_file)
try:
dom_events = dom_json_content[JSON_DOMMUTATION_KEY]
except KeyError:
logger.debug("DOM JSON has no content")
return
dom_events_filtered = [x for x in dom_events if TIME_KEY in x]
# read in WebRequest file
with open(json_file_path__wr, 'r') as json_file__wr:
wr_json_content = json.load(json_file__wr)
wr_events = []
try:
wr_events = wr_json_content[JSON_WEBREQUEST_KEY]
except KeyError:
logger.debug("WR has no content")
wr_events_filtered = [
x for x in wr_events if TIME_KEY__WR in x.get("event")
]
# sort by time
dom_events_filtered.sort(key=_by_time)
# sort by time
wr_events_filtered.sort(key=_by_time_wr)
# bin by time step
first_time = 0 # default it to zero just in case there are no events
first_time__wr = 0
if len(dom_events_filtered) > 0:
first_time = dom_events_filtered[0][TIME_KEY]
last_time = dom_events_filtered[-1][TIME_KEY]
if len(wr_events_filtered) > 0:
first_time__wr = wr_events_filtered[0]["event"][TIME_KEY__WR]
# if first_time__wr is smaller and not zero
if first_time__wr != 0 and first_time > first_time__wr:
first_time = first_time__wr
if max_seconds_later is not None and not auto_determine_time:
last_time = first_time + (max_seconds_later * 1000)
else:
# add extra 2 seconds
last_time = last_time + (2 * 1000)
last_time_based_on_max = first_time + (max_seconds_later * 1000)
if last_time > last_time_based_on_max:
last_time = last_time_based_on_max
range_keys = list(range(int(first_time), int(last_time), time_step_ms))
# build dict to hold range_key --> [events]
events_binned = dict()
for k in range_keys:
events_binned.setdefault(k, [])
for event in dom_events_filtered:
event_key = _get_range_key(event, range_keys)
events_binned[event_key].append(event)
for event in wr_events_filtered:
event_key = _get_range_key(event, range_keys, event_key=TIME_KEY__WR)
events_binned[event_key].append(event)
verify_event_count = 0
bin_norm = 0
bin_with_time_step = 0
for bin_key in events_binned.keys():
bin_size = len(events_binned.get(bin_key))
event_time_iso = datetime.datetime.fromtimestamp(bin_key /
1000).isoformat()
verify_event_count += bin_size
block_count = 0
not_block_count = 0
attribute_changed_count = 0
iframe_src_attribute_changed_count = 0
iframe_blocked = 0
text_changed_count = 0
node_add_count = 0
node_remove_count = 0
wr_block_count = 0
elem_blocked_count = 0
snippet_blocked_count = 0
dom_content_loaded = 0 # we expect at most one event here
for event in events_binned.get(bin_key):
event_item = event.get("event")
if event_item is None:
continue
event_type = event.get("type")
if event_type == "event":
event_type = event_item.get("type")
elif event_type != "onErrorOccurred":
continue
if event_type is None:
continue
is_blocked = False
if event_type == DOM_CONTENT_LOADED:
dom_content_loaded += 1
if event_type == NODES_ADDED:
for key, defining_text, is_text_node, is_snippet_blocked in get_nodes_added_key(
event_item):
# snippet blocks are considerd to be a node added, so count it only as a snippet block
if is_snippet_blocked:
is_blocked = True
snippet_blocked_count += 1
elif is_text_node:
text_changed_count += 1
else:
node_add_count += 1
if event_type == NODES_REMOVED:
for key, defining_text, is_text_node, _ in get_nodes_removed_key(
event_item):
if is_text_node:
text_changed_count += 1
else:
node_remove_count += 1
if event_type == ATTRIBUTE_CHANGED:
target_type = event_item["targetType"] or ""
target_type = target_type.lower()
# we ignore events that we purposely made to mark hidden elements
if ANTICV_ANNOTATION_PREFIX in event_item["attribute"]:
continue
# here we don't count the block event as a real attribute change
if event_item["attribute"] == ABP_BLOCKED_ELEMENT:
is_blocked = True
elem_blocked_count += 1
if "iframe" in target_type:
iframe_blocked += 1
else:
new_value = event_item["newValue"] or ""
# mark as iframe_src_attribute_changed event as well
if event_item["attribute"] == "src" and \
"iframe" in target_type and len(new_value) > 0:
iframe_src_attribute_changed_count += 1
attribute_changed_count += 1
if event_type == TEXT_CHANGED:
text_changed_count += 1
if event_type == "onErrorOccurred":
details = event_item.get("details")
if details:
try:
details_json = json.loads(details)
if ERR_BLOCKED_BY_CLIENT in details_json.get("error"):
is_blocked = True
wr_block_count += 1
else:
continue
except Exception as e:
logger.debug("Could not parse details json")
logger.debug(e)
continue
else:
continue
# keep track of blocked or not
if is_blocked:
block_count += 1
else:
not_block_count += 1
total_changes = node_add_count + node_remove_count + attribute_changed_count + text_changed_count
# write row
csvwriter.writerow([
bin_norm, event_time_iso, block_count, wr_block_count,
elem_blocked_count, snippet_blocked_count, node_add_count,
node_remove_count, attribute_changed_count, text_changed_count,
dom_content_loaded, iframe_src_attribute_changed_count,
iframe_blocked, total_changes
])
# update bin_norm
bin_norm += time_step_ms
bin_with_time_step += time_step_ms
# close file
output_file_opened.close()
| 8,783 | 0 | 23 |
41d7e5553a035c100c44ea647d52f055658d3c3c | 3,534 | py | Python | models/gaussian_mixture.py | ketozhang/statistical-methods-on-sne-ia-luminosity-evolution | 868c34eef7612375bec9c535c108240b57aedf40 | [
"MIT"
] | null | null | null | models/gaussian_mixture.py | ketozhang/statistical-methods-on-sne-ia-luminosity-evolution | 868c34eef7612375bec9c535c108240b57aedf40 | [
"MIT"
] | null | null | null | models/gaussian_mixture.py | ketozhang/statistical-methods-on-sne-ia-luminosity-evolution | 868c34eef7612375bec9c535c108240b57aedf40 | [
"MIT"
] | null | null | null | from pathlib import Path
import numpy as np
import pandas as pd
from sklearn import mixture
from tqdm import tqdm
# def save(self):
# assert self.gmms is not None, "No results can be saved before fit is ran."
# # # Save GMM object
# # with self.results_fpath.open("wb") as f:
# # pickle.dump(self.gmms, f)
# # print(f"Saved successful {self.results_fpath}")
# # Save GMM params
# params = self.get_params()
# params.to_csv(self.params_fpath, index=False)
# print(f"Saved successful {self.params_fpath}")
# def load(self, fpath=None):
# # Load GMM object
# fpath = fpath or self.results_fpath
# with fpath.open("rb") as f:
# self.gmms = pickle.load(f)
# def get_results(self):
# return self.gmms
# def get_params(self):
# params = {
# "y": [],
# "mu_x": [],
# "sigma_x": [],
# "weights_x": []
# }
# for y, gmm in self.gmms.items():
# params["y"].append(y)
# params["mu_x"].append(gmm.means_)
# params["sigma_x"].append(gmm.covariances_)
# params["weights_x"].append(gmm.weights_)
# params["mu_x"] = np.array(params["mu_x"]).reshape(-1, self.k)
# params["sigma_x"] = np.array(params["sigma_x"])
# params["weights_x"] = np.array(params["sigma_x"])
# params = pd.DataFrame(
# np.hstack(
# (params["y"], params["mu_x"], params["sigma_x"], params["weights_x"])),
# columns=(
# "y",
# [f"mean{i}" for i in range(1, self.k + 1)] +
# [f"sigma{i}" for i in range(1, self.k + 1)] +
# [f"weight{i}" for i in range(1, self.k + 1)]
# ),
# )
# return params
# params = np.genfromtxt(self.params_fpath, delimiter=",")
# if format == "numpy":
# return params
# elif format == "dataframe":
# k = self.k
# # mu_x = params[:, :k]
# # sigma_x = params[:, k:2*k]
# # weights_x = params[:, 2*k:3*k]
# # Save the GMM parameters
# params_df = pd.DataFrame(params, )
# return params_df
| 33.028037 | 110 | 0.526882 | from pathlib import Path
import numpy as np
import pandas as pd
from sklearn import mixture
from tqdm import tqdm
class GaussianMixture:
def __init__(self, name, k=3):
self.name = name
self.params_fpath = Path(
f"results/gmm_age_posterior_fit_params_{self.name}.csv")
self.results_fpath = Path(
f"results/gmm_age_posterior_fit_results_{self.name}.pkl")
self.k = k
def fit_age_posteriors(self, age_df, **kwargs):
# snids = age_df.index.unique()
# results = {}
series = age_df["age"].groupby("snid").apply(list)
snid_gmm_params = {}
for snid, age_posterior in tqdm(series.iteritems(), total=len(series), desc="Fitting age posteriors"):
params = self.fit(age_posterior, **kwargs)
snid_gmm_params[snid] = params
return snid_gmm_params
def fit(self, x, **kwargs):
x = np.asarray(x)
gmm = mixture.GaussianMixture(
n_components=self.k, covariance_type="spherical", **kwargs)
gmm.fit(x.reshape(len(x), -1))
params = {}
for i in range(self.k):
params[f"mean{i}"] = gmm.means_.reshape(self.k)[i]
params[f"sigma{i}"] = np.sqrt(gmm.covariances_[i])
params[f"weight{i}"] = gmm.weights_[i]
return params
# def save(self):
# assert self.gmms is not None, "No results can be saved before fit is ran."
# # # Save GMM object
# # with self.results_fpath.open("wb") as f:
# # pickle.dump(self.gmms, f)
# # print(f"Saved successful {self.results_fpath}")
# # Save GMM params
# params = self.get_params()
# params.to_csv(self.params_fpath, index=False)
# print(f"Saved successful {self.params_fpath}")
# def load(self, fpath=None):
# # Load GMM object
# fpath = fpath or self.results_fpath
# with fpath.open("rb") as f:
# self.gmms = pickle.load(f)
# def get_results(self):
# return self.gmms
# def get_params(self):
# params = {
# "y": [],
# "mu_x": [],
# "sigma_x": [],
# "weights_x": []
# }
# for y, gmm in self.gmms.items():
# params["y"].append(y)
# params["mu_x"].append(gmm.means_)
# params["sigma_x"].append(gmm.covariances_)
# params["weights_x"].append(gmm.weights_)
# params["mu_x"] = np.array(params["mu_x"]).reshape(-1, self.k)
# params["sigma_x"] = np.array(params["sigma_x"])
# params["weights_x"] = np.array(params["sigma_x"])
# params = pd.DataFrame(
# np.hstack(
# (params["y"], params["mu_x"], params["sigma_x"], params["weights_x"])),
# columns=(
# "y",
# [f"mean{i}" for i in range(1, self.k + 1)] +
# [f"sigma{i}" for i in range(1, self.k + 1)] +
# [f"weight{i}" for i in range(1, self.k + 1)]
# ),
# )
# return params
# params = np.genfromtxt(self.params_fpath, delimiter=",")
# if format == "numpy":
# return params
# elif format == "dataframe":
# k = self.k
# # mu_x = params[:, :k]
# # sigma_x = params[:, k:2*k]
# # weights_x = params[:, 2*k:3*k]
# # Save the GMM parameters
# params_df = pd.DataFrame(params, )
# return params_df
| 1,115 | 1 | 103 |
4aeaad0033430980c4e86745e3f3e093ed186f79 | 298 | py | Python | D2/frontend.py | slzjw26/learn_Pthon | 9c4053ec1ea4c32a01fa2658499d8e53a4a532f3 | [
"MIT"
] | null | null | null | D2/frontend.py | slzjw26/learn_Pthon | 9c4053ec1ea4c32a01fa2658499d8e53a4a532f3 | [
"MIT"
] | null | null | null | D2/frontend.py | slzjw26/learn_Pthon | 9c4053ec1ea4c32a01fa2658499d8e53a4a532f3 | [
"MIT"
] | null | null | null | import socket
addr = ('127.0.0.1', 3001)
if __name__ == '__main__':
run() | 16.555556 | 60 | 0.54698 | import socket
def run():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(addr)
while True:
msg = input('>>> ')
if not msg:
break
sock.send(msg.encode()+b'\n')
addr = ('127.0.0.1', 3001)
if __name__ == '__main__':
run() | 193 | 0 | 23 |
e9f541c4372bbabb25cb45bef26a3567de31269b | 4,059 | py | Python | iac-aws-cdk/machine-learning/machine_learning/constructs/ml_lib_construct.py | alpha2phi/serverless | 23cc98a8de0970fa873ffc783ba1a72c7b54eecd | [
"MIT"
] | null | null | null | iac-aws-cdk/machine-learning/machine_learning/constructs/ml_lib_construct.py | alpha2phi/serverless | 23cc98a8de0970fa873ffc783ba1a72c7b54eecd | [
"MIT"
] | null | null | null | iac-aws-cdk/machine-learning/machine_learning/constructs/ml_lib_construct.py | alpha2phi/serverless | 23cc98a8de0970fa873ffc783ba1a72c7b54eecd | [
"MIT"
] | null | null | null | from aws_cdk import aws_ec2 as ec2
from aws_cdk import aws_iam as iam
from aws_cdk import core
class MLLib(core.Construct):
"""Install machine learning libraries."""
| 35.295652 | 171 | 0.515398 | from aws_cdk import aws_ec2 as ec2
from aws_cdk import aws_iam as iam
from aws_cdk import core
class MLLib(core.Construct):
"""Install machine learning libraries."""
def __init__(
self,
scope: core.Construct,
id: str,
vpc,
ec2_instance_type: str,
efs_share,
efs_sg,
**kwargs
) -> None:
super().__init__(scope, id, **kwargs)
# User data
user_data_part_01 = ("""#!/bin/bash
set -ex
EFS_MNT="/machine_learning"
ML_LIB_HOME="${EFS_MNT}"
EFS_USER_ID=1000
sudo yum -y install python3
sudo yum -y install amazon-efs-utils
sudo mkdir -p ${EFS_MNT}
"""
)
user_data_part_02 = f"sudo mount -t efs -o tls {efs_share.file_system_id}:/ /machine_learning"
user_data_part_03 = ("""
sudo mkdir -p ${ML_LIB_HOME}
cd ${ML_LIB_HOME}
sudo pip3 install --no-cache-dir -U -t ${ML_LIB_HOME}/libs https://download.pytorch.org/whl/cpu/torch-1.7.1%2Bcpu-cp37-cp37m-linux_x86_64.whl
sudo pip3 install --no-cache-dir -U -t ${ML_LIB_HOME}/libs https://download.pytorch.org/whl/cpu/torchvision-0.8.2%2Bcpu-cp37-cp37m-linux_x86_64.whl
sudo chown -R ${EFS_USER_ID}:${EFS_USER_ID} ${ML_LIB_HOME}
"""
)
user_data = user_data_part_01 + user_data_part_02 + user_data_part_03
# Get the latest AMI from AWS SSM
linux_ami = ec2.AmazonLinuxImage(
generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2)
# Get the latest AMI
amzn_linux_ami = ec2.MachineImage.latest_amazon_linux(
generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2)
# EC2 Instance Role
instance_role = iam.Role(
self, "ml-lib-instance-role",
assumed_by=iam.ServicePrincipal(
"ec2.amazonaws.com"),
managed_policies=[
iam.ManagedPolicy.from_aws_managed_policy_name(
"AmazonSSMManagedInstanceCore"
),
iam.ManagedPolicy.from_aws_managed_policy_name(
"AWSXRayDaemonWriteAccess"
)
]
)
# EC2 instance
self.ec2_instance = ec2.Instance(
self,
"mllib-ec2-instance",
instance_type=ec2.InstanceType(
instance_type_identifier=f"{ec2_instance_type}"),
instance_name="mllib-ec2-instance",
machine_image=amzn_linux_ami,
vpc=vpc,
vpc_subnets=ec2.SubnetSelection(
subnet_type=ec2.SubnetType.PUBLIC
),
block_devices=[
ec2.BlockDevice(
device_name="/dev/xvda",
volume=ec2.BlockDeviceVolume.ebs(
volume_size=10
)
)
],
role=instance_role,
user_data=ec2.UserData.custom(
user_data)
)
self.ec2_instance.add_security_group(efs_sg)
# Outputs
output_0 = core.CfnOutput(
self,
"ec2-instance-ip",
value=f"http://{self.ec2_instance.instance_private_ip}",
description=f"Private IP address of the server"
)
output_1 = core.CfnOutput(
self,
"ec2-instance-login-url",
value=(
f"https://console.aws.amazon.com/ec2/v2/home?region="
f"{core.Aws.REGION}"
f"#Instances:search="
f"{self.ec2_instance.instance_id}"
f";sort=instanceId"
),
description=f"Login to the instance using Systems Manager"
)
| 3,860 | 0 | 27 |
b38a3e998daf6965786239516a3b3274009b5428 | 4,337 | py | Python | oom_lister.py | aebm/tools | 4c7f5acc857f34069c5fed855fbdb4f4fab421b5 | [
"BSD-2-Clause"
] | 1 | 2015-02-16T08:35:39.000Z | 2015-02-16T08:35:39.000Z | oom_lister.py | aebm/tools | 4c7f5acc857f34069c5fed855fbdb4f4fab421b5 | [
"BSD-2-Clause"
] | null | null | null | oom_lister.py | aebm/tools | 4c7f5acc857f34069c5fed855fbdb4f4fab421b5 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
# List the process ordered by oom score
from __future__ import print_function
import argparse
from codecs import decode
from operator import itemgetter
from os import listdir
from os.path import join
import sys
try:
from string import maketrans
except ImportError:
# we are using python3 so this is a str static method
maketrans = str.maketrans
PROC = '/proc'
SCORE = 'oom_score'
CMD = 'cmdline'
STATUS = 'status'
HEADERS = {
'pid': 'PID',
'score': 'SCORE',
'cmd': 'CMD',
}
if __name__ == '__main__':
main()
| 32.856061 | 78 | 0.613327 | #!/usr/bin/env python
# List the process ordered by oom score
from __future__ import print_function
import argparse
from codecs import decode
from operator import itemgetter
from os import listdir
from os.path import join
import sys
try:
from string import maketrans
except ImportError:
# we are using python3 so this is a str static method
maketrans = str.maketrans
PROC = '/proc'
SCORE = 'oom_score'
CMD = 'cmdline'
STATUS = 'status'
HEADERS = {
'pid': 'PID',
'score': 'SCORE',
'cmd': 'CMD',
}
def print_vanished(pid):
print('PID {pid} vanished'.format(pid=pid), file=sys.stderr)
def has_vanished(elem):
return elem[1] is None or elem[2] is None
def get_decoded_str(s):
if not isinstance(s, str):
s = decode(s)
return s
def get_pid_info(pid):
thread_cmd = '[{thread_name}]'
score = None
cmd = None
# We don't want exceptions showing up
try:
# Had to split the with so it can work with older pythons
with open(join(PROC, pid, SCORE), 'rb') as f_s:
with open(join(PROC, pid, CMD), 'rb') as f_cmd:
score = f_s.readline()
# decode string so we can use translate string operation
cmd = get_decoded_str(f_cmd.readline())
if not cmd:
# a kernel thread?
with open(join(PROC, pid, STATUS), 'rb') as f_status:
_, thread_name = f_status.readline().rstrip().split()
# decode string so we can use translate string operation
thread_name = get_decoded_str(thread_name)
cmd = thread_cmd.format(thread_name=thread_name)
except IOError:
return (int(pid), None, None)
return (int(pid), int(score), cmd)
def get_lengths(elem):
pid, score, _ = elem
return(len(str(pid)), len(str(score)))
def printer(no_headers, pid_length, score_length):
trans_table = maketrans('\x00', ' ')
f_str = '{{pid:{pid}}} {{score:{score}}} {{cmd}}'.format(
pid=pid_length,
score=score_length)
def _printer(info):
try:
if not no_headers:
print(f_str.format(**HEADERS))
for pid, score, cmd in info:
# change null chars for spaces
cmd = cmd.translate(trans_table).rstrip()
print(f_str.format(pid=pid, score=score, cmd=cmd))
except IOError:
# If we are printing to a pipe and is closed swallow the exception
pass
return _printer
def main():
parser = argparse.ArgumentParser(
description='List processes ordered by oom_score')
parser.add_argument(
'-v',
action='store_true',
help='Verbose mode')
parser.add_argument(
'--no-headers',
action='store_true',
help='Don\'t print headers')
args = parser.parse_args()
if args.v:
print('Get pids from /proc', file=sys.stderr)
pids = (pid for pid in listdir(PROC) if pid.isdigit())
if args.v:
print('Get info from /proc', file=sys.stderr)
processes_info = [get_pid_info(pid) for pid in pids]
del pids
if args.v:
print('Removing stale process info', file=sys.stderr)
[print_vanished(e[0]) for e in processes_info if has_vanished(e)]
processes_info = [e for e in processes_info if not has_vanished(e)]
if args.v:
print('Calculating fields lengths for formatting', file=sys.stderr)
lengths = []
if not args.no_headers:
lengths = [(len(HEADERS['pid']), len(HEADERS['score']))]
else:
lengths = [(0, 0)]
if processes_info:
lengths.extend([get_lengths(elem) for elem in processes_info])
# see zip documentation on how to unzip a list
# https://docs.python.org/3/library/functions.html#zip
# for each tuple position the max value
# self trolling ^_^
max_lengths = map(max, *lengths)
if args.v:
print('Lengths are pid: {pid} score: {score}'.format(
pid=max_lengths[0],
score=max_lengths[1]),
file=sys.stderr)
f_printer = printer(args.no_headers, *max_lengths)
if args.v:
print('Printing results', file=sys.stderr)
f_printer(sorted(processes_info, key=itemgetter(1), reverse=True))
if __name__ == '__main__':
main()
| 3,615 | 0 | 161 |
a6da254ca4dedd3b588a47f333cdf7c050bbefa4 | 2,163 | py | Python | templet/__init__.py | a-sk/templet | 11d2232d90abd04b4fab8312a9dae47b7ebff2cc | [
"0BSD"
] | null | null | null | templet/__init__.py | a-sk/templet | 11d2232d90abd04b4fab8312a9dae47b7ebff2cc | [
"0BSD"
] | null | null | null | templet/__init__.py | a-sk/templet | 11d2232d90abd04b4fab8312a9dae47b7ebff2cc | [
"0BSD"
] | null | null | null | import os
import jinja2
import shutil
from . import utils
from codecs import open
__all__ = ['handle_project']
def copy(src, dst):
"""Copy file or directory from src to dst"""
if os.path.isfile(src):
shutil.copy(src, dst)
elif os.path.isdir(src):
shutil.copytree(src, dst)
return dst
def expand_template(template_content, template_data):
"""Expand template using jinja2 template engine"""
return jinja2.Template(template_content).render(template_data)
def maybe_rename(src, template_data):
"""Rename file or directory if it's name contains expandable variables
Here we use Jinja2 {{%s}} syntax.
:return: bool. `True` if rename happend, `False` otherwise.
"""
new_path = expand_vars_in_file_name(src, template_data)
if new_path != src:
shutil.move(src, new_path)
return True
return False
def expand_vars_in_file(filepath, template_data, ignore_file_list):
"""Expand variables in file"""
if utils.match(filepath, ignore_file_list):
return
with open(filepath, encoding='utf8') as fp:
file_contents = expand_template(fp.read(), template_data)
with open(filepath, 'w', encoding='utf8') as f:
f.write(file_contents)
def expand_vars_in_file_name(filepath, template_data):
"""Expand variables in file/directory path"""
return expand_template(filepath, template_data)
def handle_project(src, dst, template_data, ignore_file_list):
"""Main templet library function, does all the work.
First copy template directory to current working path, renaming it
to `PROJECT_NAME`.
Then expand variables in directories names.
And in the end, expand variables in files names and it's content.
"""
copy(src, dst)
for root, dirs, files in os.walk(dst):
for d in dirs:
dirpath = os.path.join(root, d)
maybe_rename(dirpath, template_data)
for f in files:
filepath = os.path.join(root, f)
if os.path.isfile(filepath):
expand_vars_in_file(filepath, template_data, ignore_file_list)
maybe_rename(filepath, template_data)
| 30.9 | 78 | 0.684697 | import os
import jinja2
import shutil
from . import utils
from codecs import open
__all__ = ['handle_project']
def copy(src, dst):
"""Copy file or directory from src to dst"""
if os.path.isfile(src):
shutil.copy(src, dst)
elif os.path.isdir(src):
shutil.copytree(src, dst)
return dst
def expand_template(template_content, template_data):
"""Expand template using jinja2 template engine"""
return jinja2.Template(template_content).render(template_data)
def maybe_rename(src, template_data):
"""Rename file or directory if it's name contains expandable variables
Here we use Jinja2 {{%s}} syntax.
:return: bool. `True` if rename happend, `False` otherwise.
"""
new_path = expand_vars_in_file_name(src, template_data)
if new_path != src:
shutil.move(src, new_path)
return True
return False
def expand_vars_in_file(filepath, template_data, ignore_file_list):
"""Expand variables in file"""
if utils.match(filepath, ignore_file_list):
return
with open(filepath, encoding='utf8') as fp:
file_contents = expand_template(fp.read(), template_data)
with open(filepath, 'w', encoding='utf8') as f:
f.write(file_contents)
def expand_vars_in_file_name(filepath, template_data):
"""Expand variables in file/directory path"""
return expand_template(filepath, template_data)
def handle_project(src, dst, template_data, ignore_file_list):
"""Main templet library function, does all the work.
First copy template directory to current working path, renaming it
to `PROJECT_NAME`.
Then expand variables in directories names.
And in the end, expand variables in files names and it's content.
"""
copy(src, dst)
for root, dirs, files in os.walk(dst):
for d in dirs:
dirpath = os.path.join(root, d)
maybe_rename(dirpath, template_data)
for f in files:
filepath = os.path.join(root, f)
if os.path.isfile(filepath):
expand_vars_in_file(filepath, template_data, ignore_file_list)
maybe_rename(filepath, template_data)
| 0 | 0 | 0 |
35a00aa5d15733e03cb27d2e50c905e13d0a60ec | 3,496 | py | Python | ias/annotation/split_inputs_into_groups.py | olga-clarifai/clarifai-python-grpc | c1d45ea965f781de5ccf682b142049c7628d0480 | [
"Apache-2.0"
] | null | null | null | ias/annotation/split_inputs_into_groups.py | olga-clarifai/clarifai-python-grpc | c1d45ea965f781de5ccf682b142049c7628d0480 | [
"Apache-2.0"
] | null | null | null | ias/annotation/split_inputs_into_groups.py | olga-clarifai/clarifai-python-grpc | c1d45ea965f781de5ccf682b142049c7628d0480 | [
"Apache-2.0"
] | null | null | null | import argparse
from tqdm import tqdm
import numpy as np
# Import in the Clarifai gRPC based objects needed
from clarifai_grpc.channel.clarifai_channel import ClarifaiChannel
from clarifai_grpc.grpc.api import resources_pb2, service_pb2, service_pb2_grpc
from clarifai_grpc.grpc.api.status import status_code_pb2
from google.protobuf.struct_pb2 import Struct
# Construct the communications channel and the object stub to call requests on.
channel = ClarifaiChannel.get_json_channel()
stub = service_pb2_grpc.V2Stub(channel)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Split inputs into groups for labeling.")
parser.add_argument('--api_key',
default='',
required=True,
help="API key of the application.")
parser.add_argument('--num_labelers',
default=20,
type=int,
required=True,
help="Total number of available labelers.")
parser.add_argument('--per_group',
default=5,
type=int,
help="Number of labelers per group")
args = parser.parse_args()
args.metadata = (('authorization', 'Key {}'.format(args.api_key)),)
main(args) | 30.4 | 89 | 0.659611 | import argparse
from tqdm import tqdm
import numpy as np
# Import in the Clarifai gRPC based objects needed
from clarifai_grpc.channel.clarifai_channel import ClarifaiChannel
from clarifai_grpc.grpc.api import resources_pb2, service_pb2, service_pb2_grpc
from clarifai_grpc.grpc.api.status import status_code_pb2
from google.protobuf.struct_pb2 import Struct
# Construct the communications channel and the object stub to call requests on.
channel = ClarifaiChannel.get_json_channel()
stub = service_pb2_grpc.V2Stub(channel)
def process_response(response):
if response.status.code != status_code_pb2.SUCCESS:
print("There was an error with your request!")
print(f"\tDescription: {response.status.description}")
print(f"\tDetails: {response.status.details}")
raise Exception(f"Request failed, status code: {response.status.code}")
def get_input_ids(args):
print("Retrieving inputs...")
input_ids = []
last_id = ''
while True:
# Make request
req = service_pb2.StreamInputsRequest(per_page=1000, last_id=last_id)
response = stub.StreamInputs(req, metadata=args.metadata)
process_response(response)
# Process inputs
if len(response.inputs) == 0:
break
else:
for input in response.inputs:
input_ids.append(input.id)
# Set id for next stream
last_id = response.inputs[-1].id
print(f"Total of {len(input_ids)} inputs retrieved")
return input_ids
def split_into_groups(args, input_ids):
n_groups = args.num_labelers // args.per_group # TODO: consult Michael
split = np.array_split(input_ids, n_groups)
print(f"Inputs were split in {n_groups} groups")
return split
def add_groups_to_metadata(args, split):
for i, group in enumerate(split):
print(f"Processing group {i+1}...")
# Add group to metadata and patch each input in the group
for input_id in tqdm(group, total=len(group)):
input_metadata = Struct()
input_metadata.update({"group": str(i+1)})
response = stub.PatchInputs(
service_pb2.PatchInputsRequest(
action="merge",
inputs=[
resources_pb2.Input(
id=input_id,
data=resources_pb2.Data(metadata=input_metadata)
)
]
),
metadata=args.metadata
)
process_response(response)
def main(args):
print("----- Spliting inputs into groups for labeling task scheduling -----")
# Fetch ids of inputs in the app
input_ids = get_input_ids(args)
# Split inputs into groups depending on input parameters
split = split_into_groups(args, input_ids)
# Patch inputs to add groups to metadata
add_groups_to_metadata(args, split)
print("Done!")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Split inputs into groups for labeling.")
parser.add_argument('--api_key',
default='',
required=True,
help="API key of the application.")
parser.add_argument('--num_labelers',
default=20,
type=int,
required=True,
help="Total number of available labelers.")
parser.add_argument('--per_group',
default=5,
type=int,
help="Number of labelers per group")
args = parser.parse_args()
args.metadata = (('authorization', 'Key {}'.format(args.api_key)),)
main(args) | 2,056 | 0 | 115 |
86f63653c9bbf365713034022c26402915ef2e28 | 268 | py | Python | 1 ano/estatisticas/tabela-corelacao-list.py | ThiagoPereira232/tecnico-informatica | 6b55ecf34501b38052943acf1b37074e3472ce6e | [
"MIT"
] | 1 | 2021-09-24T16:26:04.000Z | 2021-09-24T16:26:04.000Z | 1 ano/estatisticas/tabela-corelacao-list.py | ThiagoPereira232/tecnico-informatica | 6b55ecf34501b38052943acf1b37074e3472ce6e | [
"MIT"
] | null | null | null | 1 ano/estatisticas/tabela-corelacao-list.py | ThiagoPereira232/tecnico-informatica | 6b55ecf34501b38052943acf1b37074e3472ce6e | [
"MIT"
] | null | null | null | sx = sy =sxy = sx2 =sy2 = xy = c = 0
vx = [0,0,0]
vy = [0,0,0]
while(c<3):
vx[c] = int(input("Digite um valor de x: "))
c+=1
for x in vx:
sx += x
c=0
while(c<3):
vy[c] = int(input("Digite um valor de y: "))
c+=1
for y in vy:
sy += y
xy = x*y | 14.888889 | 48 | 0.473881 | sx = sy =sxy = sx2 =sy2 = xy = c = 0
vx = [0,0,0]
vy = [0,0,0]
while(c<3):
vx[c] = int(input("Digite um valor de x: "))
c+=1
for x in vx:
sx += x
c=0
while(c<3):
vy[c] = int(input("Digite um valor de y: "))
c+=1
for y in vy:
sy += y
xy = x*y | 0 | 0 | 0 |
f02bdc1535be765b32c70280f5165c80663629f2 | 160 | py | Python | dted/records/__init__.py | bbonenfant/dted | 5ae5055b6da65ce728bb282daa96bc0c58a65779 | [
"MIT"
] | 7 | 2021-07-02T00:06:33.000Z | 2021-12-22T17:32:14.000Z | dted/records/__init__.py | bbonenfant/dted | 5ae5055b6da65ce728bb282daa96bc0c58a65779 | [
"MIT"
] | 2 | 2021-11-04T16:57:52.000Z | 2022-02-21T20:58:39.000Z | dted/records/__init__.py | bbonenfant/dted | 5ae5055b6da65ce728bb282daa96bc0c58a65779 | [
"MIT"
] | null | null | null | """ Simplified imports for the records module. """
from .acc import AccuracyDescription
from .dsi import DataSetIdentification
from .uhl import UserHeaderLabel
| 32 | 50 | 0.8125 | """ Simplified imports for the records module. """
from .acc import AccuracyDescription
from .dsi import DataSetIdentification
from .uhl import UserHeaderLabel
| 0 | 0 | 0 |
3e1a09246d8adbd4f02a1b19b24c26d39101c349 | 2,001 | py | Python | bigfastapi/subscription.py | danieliheonu/bigfastapi | 483554776195c9f38bb46ba719b613360eda1028 | [
"MIT"
] | 1 | 2022-03-20T21:46:05.000Z | 2022-03-20T21:46:05.000Z | bigfastapi/subscription.py | danieliheonu/bigfastapi | 483554776195c9f38bb46ba719b613360eda1028 | [
"MIT"
] | null | null | null | bigfastapi/subscription.py | danieliheonu/bigfastapi | 483554776195c9f38bb46ba719b613360eda1028 | [
"MIT"
] | null | null | null | from typing import List
from bigfastapi import db
from uuid import uuid4
from bigfastapi.models import subscription_model
from bigfastapi.schemas import subscription_schema
from bigfastapi.db.database import get_db
import sqlalchemy.orm as _orm
import fastapi as _fastapi
from fastapi import APIRouter
from fastapi.responses import JSONResponse
from fastapi.param_functions import Depends
from fastapi import APIRouter, HTTPException, status
import fastapi
app = APIRouter(tags=["Subscription"])
@app.get("/subscriptions/{org_Id}", response_model=subscription_schema.ResponseList)
@app.post('/subscriptions', response_model=subscription_schema.ResponseSingle)
# ///
# SERVICE LAYER
| 35.732143 | 101 | 0.768616 | from typing import List
from bigfastapi import db
from uuid import uuid4
from bigfastapi.models import subscription_model
from bigfastapi.schemas import subscription_schema
from bigfastapi.db.database import get_db
import sqlalchemy.orm as _orm
import fastapi as _fastapi
from fastapi import APIRouter
from fastapi.responses import JSONResponse
from fastapi.param_functions import Depends
from fastapi import APIRouter, HTTPException, status
import fastapi
app = APIRouter(tags=["Subscription"])
@app.get("/subscriptions/{org_Id}", response_model=subscription_schema.ResponseList)
async def indexSubPerOrg(org_Id: str, db: _orm.Session = _fastapi.Depends(get_db)):
subs = await getSubs(org_Id, db)
return buildSuccessRess(list(map(subscription_schema.SubcriptionBase.from_orm, subs)),
'subscription list', True)
@app.post('/subscriptions', response_model=subscription_schema.ResponseSingle)
async def subscribe(
subscription: subscription_schema._SubBAse,
db: _orm.Session = _fastapi.Depends(get_db)
):
createdSubscrcription = await createSub(subscription, db)
return buildSuccessRess(createdSubscrcription, 'subscription', False)
# ///
# SERVICE LAYER
async def getSubs(org_Id: str, db: _orm.Session):
return db.query(subscription_model.Subscription).filter(
subscription_model.Subscription.organization_id == org_Id).all()
async def createSub(sub: subscription_schema.CreateSubscription, db: _orm.Session):
subObject = subscription_model.Subscription(
id=uuid4().hex, plan=sub.plan, organization_id=sub.organization_id, is_paid=True)
db.add(subObject)
db.commit()
db.refresh(subObject)
return subObject
def buildSuccessRess(resData, type: str, isList: bool):
if isList:
return subscription_schema.ResponseList(status='success', resource_type=type, data=resData)
else:
return subscription_schema.ResponseSingle(status='success', resource_type=type, data=resData)
| 1,197 | 0 | 113 |
439ab55cf003d81c6a3b2776e36bb7731408f67e | 2,106 | py | Python | test/ledger_test.py | zachgarwood/credit-card | 8b6973fe2c58f54a1e135cb455a81228bbece4f5 | [
"MIT"
] | null | null | null | test/ledger_test.py | zachgarwood/credit-card | 8b6973fe2c58f54a1e135cb455a81228bbece4f5 | [
"MIT"
] | null | null | null | test/ledger_test.py | zachgarwood/credit-card | 8b6973fe2c58f54a1e135cb455a81228bbece4f5 | [
"MIT"
] | null | null | null | from credit_card.ledger import Ledger
import luhn
import unittest
| 36.947368 | 77 | 0.57075 | from credit_card.ledger import Ledger
import luhn
import unittest
class LedgerTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.valid_account_number = luhn.append('12345')
def setUp(self):
self.ledger = Ledger()
def test_commands_exist(self):
for command_name in ['add', 'charge', 'credit']:
self.assertTrue(hasattr(Ledger, command_name),
'Ledger class does not contain ' + command_name)
def test_add(self):
self.ledger.add('Test', self.valid_account_number, '$1000')
self.assertIn('Test', self.ledger.accounts)
self.assertDictEqual(
{'balance': 0, 'limit': 1000, 'is_valid': True},
self.ledger.accounts['Test'],
'The limit or number validation was not set and/or '
'initialized to 0'
)
self.ledger.add('Invalid account', '12345', '$0')
self.assertFalse(self.ledger.accounts['Invalid account']['is_valid'])
def test_charge(self):
self.ledger.add('Test', self.valid_account_number, '$1000')
self.ledger.charge('Test', '$500')
self.assertEqual(500,
self.ledger.accounts['Test']['balance'],
'The account balance was not increased')
self.ledger.charge('Test', '$501')
self.assertNotEqual(1001,
self.ledger.accounts['Test']['balance'],
'The account balance increased beyond the limit')
def test_credit(self):
self.ledger.add('Test', self.valid_account_number, '$1000')
self.ledger.charge('Test', '$500')
self.ledger.credit('Test', '$100')
self.assertEqual(400, # 500 - 100
self.ledger.accounts['Test']['balance'],
'The account balance was not decreased')
self.ledger.credit('Test', '$401')
self.assertEqual(-1,
self.ledger.accounts['Test']['balance'],
'The account balance did not go into the negative')
| 1,822 | 194 | 23 |
adafa7d9696042ee8c4a949ce2940d28ea46a07c | 28,305 | py | Python | app/views/user.py | viaict/viaduct | 1faec7e123c3fae7e8dbe1a354ad27b68f2a8cef | [
"MIT"
] | 11 | 2015-04-23T21:57:56.000Z | 2019-04-28T12:48:58.000Z | app/views/user.py | viaict/viaduct | 1faec7e123c3fae7e8dbe1a354ad27b68f2a8cef | [
"MIT"
] | 1 | 2016-10-05T14:10:58.000Z | 2016-10-05T14:12:23.000Z | app/views/user.py | viaict/viaduct | 1faec7e123c3fae7e8dbe1a354ad27b68f2a8cef | [
"MIT"
] | 3 | 2016-10-05T14:00:42.000Z | 2019-01-16T14:33:43.000Z | # -*- coding: utf-8 -*-
import re
from csv import writer
from datetime import datetime
from flask import Blueprint
from flask import flash, redirect, render_template, request, url_for, abort, \
session
from flask_babel import _
from flask_login import current_user, login_user, logout_user, login_required
from io import StringIO
from app import db, login_manager, get_locale
from app.decorators import require_role, response_headers
from app.exceptions.base import ResourceNotFoundException, \
AuthorizationException, ValidationException, BusinessRuleException
from app.forms import init_form
from app.forms.user import (EditUserForm, EditUserInfoForm, SignUpForm,
SignInForm, ResetPasswordForm, RequestPassword,
ChangePasswordForm, EditUvALinkingForm)
from app.models.activity import Activity
from app.models.custom_form import CustomFormResult, CustomForm
from app.models.education import Education
from app.models.user import User
from app.roles import Roles
from app.service import password_reset_service, user_service, \
role_service, file_service, saml_service
from app.utils import copernica
from app.utils.google import HttpError
from app.utils.user import UserAPI
blueprint = Blueprint('user', __name__)
@login_manager.user_loader
def view_single(user_id):
"""
View user for admins and edit for admins and users.
User is passed based on routes below.
"""
user = user_service.get_user_by_id(user_id)
user.avatar = UserAPI.avatar(user)
user.groups = UserAPI.get_groups_for_user_id(user)
user.groups_amount = len(user.groups)
if "gravatar" in user.avatar:
user.avatar = user.avatar + "&s=341"
# Get all activity entrees from these forms, order by start_time of
# activity.
activities = Activity.query.join(CustomForm).join(CustomFormResult). \
filter(CustomFormResult.owner_id == user_id and
CustomForm.id == CustomFormResult.form_id and
Activity.form_id == CustomForm.id)
user.activities_amount = activities.count()
new_activities = activities \
.filter(Activity.end_time > datetime.today()).distinct() \
.order_by(Activity.start_time)
old_activities = activities \
.filter(Activity.end_time < datetime.today()).distinct() \
.order_by(Activity.start_time.desc())
can_write = role_service.user_has_role(current_user, Roles.USER_WRITE)
return render_template('user/view_single.htm', user=user,
new_activities=new_activities,
old_activities=old_activities,
can_write=can_write)
@blueprint.route('/users/view/self/', methods=['GET'])
@login_required
@blueprint.route('/users/view/<int:user_id>/', methods=['GET'])
@require_role(Roles.USER_READ)
@login_required
@blueprint.route('/users/remove_avatar/<int:user_id>/', methods=['DELETE'])
@login_required
@require_role(Roles.USER_WRITE)
def edit(user_id, form_cls):
"""
Create user for admins and edit for admins and users.
User and form type are passed based on routes below.
"""
if user_id:
user = user_service.get_user_by_id(user_id)
user.avatar = user_service.user_has_avatar(user_id)
else:
user = User()
form = init_form(form_cls, obj=user)
form.new_user = user.id == 0
# Add education.
educations = Education.query.all()
form.education_id.choices = [(e.id, e.name) for e in educations]
if form.validate_on_submit():
# Only new users need a unique email.
query = User.query.filter(User.email == form.email.data)
if user_id:
query = query.filter(User.id != user_id)
if query.count() > 0:
flash(_('A user with this e-mail address already exist.'),
'danger')
return edit_page()
# Because the user model is constructed to have an ID of 0 when it is
# initialized without an email adress provided, reinitialize the user
# with a default string for email adress, so that it will get a unique
# ID when committed to the database.
if not user_id:
user = User('_')
# TODO Move this into the service call.
try:
user.update_email(form.email.data.strip())
except HttpError as e:
if e.resp.status == 404:
flash(_('According to Google this email does not exist. '
'Please use an email that does.'), 'danger')
return edit_page()
raise e
# Note: student id is updated separately.
user.first_name = form.first_name.data.strip()
user.last_name = form.last_name.data.strip()
user.locale = form.locale.data
if role_service.user_has_role(current_user, Roles.USER_WRITE):
user.has_paid = form.has_paid.data
user.honorary_member = form.honorary_member.data
user.favourer = form.favourer.data
user.disabled = form.disabled.data
user.alumnus = form.alumnus.data
user.education_id = form.education_id.data
user.birth_date = form.birth_date.data
user.study_start = form.study_start.data
user.receive_information = form.receive_information.data
user.phone_nr = form.phone_nr.data.strip()
user.address = form.address.data.strip()
user.zip = form.zip.data.strip()
user.city = form.city.data.strip()
user.country = form.country.data.strip()
db.session.add(user)
db.session.commit()
avatar = request.files.get('avatar')
if avatar:
user_service.set_avatar(user.id, avatar)
if user_id:
copernica.update_user(user)
flash(_('Profile succesfully updated'))
else:
copernica.update_user(user, subscribe=True)
flash(_('Profile succesfully created'))
if current_user.id == user_id:
return redirect(url_for('user.view_single_self'))
else:
return redirect(url_for('user.view_single_user', user_id=user.id))
return edit_page()
@blueprint.route('/users/edit/<int:user_id>/student-id-linking',
methods=['GET', 'POST'])
@login_required
@require_role(Roles.USER_WRITE)
@blueprint.route('/users/edit/self/', methods=['GET', 'POST'])
@login_required
@blueprint.route('/users/create/', methods=['GET', 'POST'])
@blueprint.route('/users/edit/<int:user_id>', methods=['GET', 'POST'])
@login_required
@require_role(Roles.USER_WRITE)
@blueprint.route('/sign-up/', methods=['GET', 'POST'])
@response_headers({"X-Frame-Options": "SAMEORIGIN"})
@blueprint.route('/sign-up/manual/', methods=['GET', 'POST'])
@response_headers({"X-Frame-Options": "SAMEORIGIN"})
@blueprint.route('/sign-up/process-saml-response/', methods=['GET', 'POST'])
@saml_service.ensure_data_cleared
@blueprint.route('/sign-in/', methods=['GET', 'POST'])
@response_headers({"X-Frame-Options": "SAMEORIGIN"})
@blueprint.route('/sign-in/process-saml-response/', methods=['GET'])
@response_headers({"X-Frame-Options": "SAMEORIGIN"})
@blueprint.route('/sign-in/confirm-student-id/', methods=['GET', 'POST'])
@response_headers({"X-Frame-Options": "SAMEORIGIN"})
@blueprint.route('/sign-out/')
@blueprint.route('/process-account-linking')
@saml_service.ensure_data_cleared
@blueprint.route('/request_password/', methods=['GET', 'POST'])
@response_headers({"X-Frame-Options": "SAMEORIGIN"})
def request_password():
"""Create a ticket and send a email with link to reset_password page."""
if current_user.is_authenticated:
return redirect(url_for('user.view_single_self'))
form = RequestPassword(request.form)
if form.validate_on_submit():
try:
password_reset_service.create_password_ticket(form.email.data)
flash(_('An email has been sent to %(email)s with further '
'instructions.', email=form.email.data), 'success')
return redirect(url_for('home.home'))
except ResourceNotFoundException:
flash(_('%(email)s is unknown to our system.',
email=form.email.data), 'danger')
return render_template('user/request_password.htm', form=form)
@blueprint.route('/reset_password/<string:hash_>', methods=['GET', 'POST'])
@response_headers({"X-Frame-Options": "SAMEORIGIN"})
def reset_password(hash_):
"""
Reset form existing of two fields, password and password_repeat.
Checks if the hash in the url is found in the database and timestamp
has not expired.
"""
try:
ticket = password_reset_service.get_valid_ticket(hash_)
except ResourceNotFoundException:
flash(_('No valid ticket found'), 'danger')
return redirect(url_for('user.request_password'))
form = ResetPasswordForm(request.form)
if form.validate_on_submit():
password_reset_service.reset_password(ticket, form.password.data)
flash(_('Your password has been updated.'), 'success')
return redirect(url_for('user.sign_in'))
return render_template('user/reset_password.htm', form=form)
@blueprint.route("/users/<int:user_id>/password/", methods=['GET', 'POST'])
@response_headers({"X-Frame-Options": "SAMEORIGIN"})
@blueprint.route('/users/', methods=['GET'])
@require_role(Roles.USER_READ)
@blueprint.route('/users/export', methods=['GET'])
@require_role(Roles.USER_READ)
@blueprint.route('/users/avatar/<int:user_id>/', methods=['GET'])
@login_required
| 36.103316 | 79 | 0.634446 | # -*- coding: utf-8 -*-
import re
from csv import writer
from datetime import datetime
from flask import Blueprint
from flask import flash, redirect, render_template, request, url_for, abort, \
session
from flask_babel import _
from flask_login import current_user, login_user, logout_user, login_required
from io import StringIO
from app import db, login_manager, get_locale
from app.decorators import require_role, response_headers
from app.exceptions.base import ResourceNotFoundException, \
AuthorizationException, ValidationException, BusinessRuleException
from app.forms import init_form
from app.forms.user import (EditUserForm, EditUserInfoForm, SignUpForm,
SignInForm, ResetPasswordForm, RequestPassword,
ChangePasswordForm, EditUvALinkingForm)
from app.models.activity import Activity
from app.models.custom_form import CustomFormResult, CustomForm
from app.models.education import Education
from app.models.user import User
from app.roles import Roles
from app.service import password_reset_service, user_service, \
role_service, file_service, saml_service
from app.utils import copernica
from app.utils.google import HttpError
from app.utils.user import UserAPI
blueprint = Blueprint('user', __name__)
@login_manager.user_loader
def load_user(user_id):
# The hook used by the login manager to get the user from the database by
# user ID.
return user_service.get_user_by_id(user_id)
def view_single(user_id):
"""
View user for admins and edit for admins and users.
User is passed based on routes below.
"""
user = user_service.get_user_by_id(user_id)
user.avatar = UserAPI.avatar(user)
user.groups = UserAPI.get_groups_for_user_id(user)
user.groups_amount = len(user.groups)
if "gravatar" in user.avatar:
user.avatar = user.avatar + "&s=341"
# Get all activity entrees from these forms, order by start_time of
# activity.
activities = Activity.query.join(CustomForm).join(CustomFormResult). \
filter(CustomFormResult.owner_id == user_id and
CustomForm.id == CustomFormResult.form_id and
Activity.form_id == CustomForm.id)
user.activities_amount = activities.count()
new_activities = activities \
.filter(Activity.end_time > datetime.today()).distinct() \
.order_by(Activity.start_time)
old_activities = activities \
.filter(Activity.end_time < datetime.today()).distinct() \
.order_by(Activity.start_time.desc())
can_write = role_service.user_has_role(current_user, Roles.USER_WRITE)
return render_template('user/view_single.htm', user=user,
new_activities=new_activities,
old_activities=old_activities,
can_write=can_write)
@blueprint.route('/users/view/self/', methods=['GET'])
@login_required
def view_single_self():
return view_single(current_user.id)
@blueprint.route('/users/view/<int:user_id>/', methods=['GET'])
@require_role(Roles.USER_READ)
@login_required
def view_single_user(user_id):
return view_single(user_id=user_id)
@blueprint.route('/users/remove_avatar/<int:user_id>/', methods=['DELETE'])
@login_required
@require_role(Roles.USER_WRITE)
def remove_avatar(user_id=None):
user = user_service.get_user_by_id(user_id)
if current_user.is_anonymous or current_user.id != user_id:
return "", 403
user_service.remove_avatar(user.id)
return "", 200
def edit(user_id, form_cls):
"""
Create user for admins and edit for admins and users.
User and form type are passed based on routes below.
"""
if user_id:
user = user_service.get_user_by_id(user_id)
user.avatar = user_service.user_has_avatar(user_id)
else:
user = User()
form = init_form(form_cls, obj=user)
form.new_user = user.id == 0
# Add education.
educations = Education.query.all()
form.education_id.choices = [(e.id, e.name) for e in educations]
def edit_page():
is_admin = role_service.user_has_role(current_user, Roles.USER_WRITE)
return render_template('user/edit.htm', form=form, user=user,
is_admin=is_admin)
if form.validate_on_submit():
# Only new users need a unique email.
query = User.query.filter(User.email == form.email.data)
if user_id:
query = query.filter(User.id != user_id)
if query.count() > 0:
flash(_('A user with this e-mail address already exist.'),
'danger')
return edit_page()
# Because the user model is constructed to have an ID of 0 when it is
# initialized without an email adress provided, reinitialize the user
# with a default string for email adress, so that it will get a unique
# ID when committed to the database.
if not user_id:
user = User('_')
# TODO Move this into the service call.
try:
user.update_email(form.email.data.strip())
except HttpError as e:
if e.resp.status == 404:
flash(_('According to Google this email does not exist. '
'Please use an email that does.'), 'danger')
return edit_page()
raise e
# Note: student id is updated separately.
user.first_name = form.first_name.data.strip()
user.last_name = form.last_name.data.strip()
user.locale = form.locale.data
if role_service.user_has_role(current_user, Roles.USER_WRITE):
user.has_paid = form.has_paid.data
user.honorary_member = form.honorary_member.data
user.favourer = form.favourer.data
user.disabled = form.disabled.data
user.alumnus = form.alumnus.data
user.education_id = form.education_id.data
user.birth_date = form.birth_date.data
user.study_start = form.study_start.data
user.receive_information = form.receive_information.data
user.phone_nr = form.phone_nr.data.strip()
user.address = form.address.data.strip()
user.zip = form.zip.data.strip()
user.city = form.city.data.strip()
user.country = form.country.data.strip()
db.session.add(user)
db.session.commit()
avatar = request.files.get('avatar')
if avatar:
user_service.set_avatar(user.id, avatar)
if user_id:
copernica.update_user(user)
flash(_('Profile succesfully updated'))
else:
copernica.update_user(user, subscribe=True)
flash(_('Profile succesfully created'))
if current_user.id == user_id:
return redirect(url_for('user.view_single_self'))
else:
return redirect(url_for('user.view_single_user', user_id=user.id))
return edit_page()
@blueprint.route('/users/edit/<int:user_id>/student-id-linking',
methods=['GET', 'POST'])
@login_required
@require_role(Roles.USER_WRITE)
def edit_student_id_linking(user_id):
user = user_service.get_user_by_id(user_id)
form = EditUvALinkingForm(request.form, obj=user)
# Fix student_id_confirmed not being set...
if request.method == 'GET':
form.student_id_confirmed.data = user.student_id_confirmed
def edit_page():
return render_template('user/edit_student_id.htm',
user=user, form=form)
if form.validate_on_submit():
if not form.student_id.data:
user_service.remove_student_id(user)
elif form.student_id_confirmed.data:
other_user = user_service.find_user_by_student_id(
form.student_id.data)
if other_user is not None and other_user != user:
error = _('The UvA account corresponding with this student ID '
'is already linked to another user '
'(%(name)s - %(email)s). Please unlink the account '
'from the other user first before linking it '
'to this user.', name=other_user.name,
email=other_user.email)
form.student_id_confirmed.errors.append(error)
return edit_page()
user_service.set_confirmed_student_id(user, form.student_id.data)
else:
user_service.set_unconfirmed_student_id(user, form.student_id.data)
flash(_('Student ID information saved.'), 'success')
return redirect(url_for('.edit_user', user_id=user_id))
return edit_page()
@blueprint.route('/users/edit/self/', methods=['GET', 'POST'])
@login_required
def edit_self():
return edit(current_user.id, EditUserInfoForm)
@blueprint.route('/users/create/', methods=['GET', 'POST'])
@blueprint.route('/users/edit/<int:user_id>', methods=['GET', 'POST'])
@login_required
@require_role(Roles.USER_WRITE)
def edit_user(user_id=None):
return edit(user_id, EditUserForm)
@blueprint.route('/sign-up/', methods=['GET', 'POST'])
@response_headers({"X-Frame-Options": "SAMEORIGIN"})
def sign_up():
return render_template('user/sign_up_chooser.htm')
@blueprint.route('/sign-up/manual/', methods=['GET', 'POST'])
@response_headers({"X-Frame-Options": "SAMEORIGIN"})
def sign_up_manual():
# Redirect the user to the index page if he or she has been authenticated
# already.
if current_user.is_authenticated:
return redirect(url_for('home.home'))
form = SignUpForm(request.form)
# Add education.
educations = Education.query.all()
form.education_id.choices = [(e.id, e.name) for e in educations]
if form.validate_on_submit():
try:
user = user_service.register_new_user(
email=form.email.data,
password=form.password.data,
first_name=form.first_name.data,
last_name=form.last_name.data,
student_id=form.student_id.data,
education_id=form.education_id.data,
birth_date=form.birth_date.data,
study_start=form.study_start.data,
receive_information=form.receive_information.data,
phone_nr=form.phone_nr.data,
address=form.address.data,
zip_=form.zip.data,
city=form.city.data,
country=form.country.data,
locale=get_locale())
login_user(user)
flash(_('Welcome %(name)s! Your profile has been succesfully '
'created and you have been logged in!',
name=current_user.first_name), 'success')
return redirect(url_for('home.home'))
except BusinessRuleException:
flash(_('A user with this e-mail address already exists'),
'danger')
return render_template('user/sign_up.htm', form=form)
@blueprint.route('/sign-up/process-saml-response/', methods=['GET', 'POST'])
@saml_service.ensure_data_cleared
def sign_up_saml_response():
redir_url = saml_service.get_redirect_url(url_for('home.home'))
# Redirect the user to the index page if he or she has been authenticated
# already.
if current_user.is_authenticated:
# End the sign up session when it is still there somehow
if saml_service.sign_up_session_active():
saml_service.end_sign_up_session()
return redirect(redir_url)
if saml_service.sign_up_session_active():
# Delete the old sign up session when
# the user re-authenticates
if saml_service.user_is_authenticated():
saml_service.end_sign_up_session()
# Otherwise, refresh the timeout timestamp of the session
else:
saml_service.update_sign_up_session_timestamp()
form = SignUpForm(request.form)
# Add education.
educations = Education.query.all()
form.education_id.choices = [(e.id, e.name) for e in educations]
if not saml_service.sign_up_session_active():
if not saml_service.user_is_authenticated():
flash(_('Authentication failed. Please try again.'), 'danger')
return redirect(redir_url)
if not saml_service.user_is_student():
flash(_('You must authenticate with a student '
'UvA account to register.'), 'danger')
return redirect(redir_url)
if saml_service.uid_is_linked_to_other_user():
flash(_('There is already an account linked to this UvA account. '
'If you are sure that this is a mistake please send '
'an email to the board.'), 'danger')
return redirect(redir_url)
# Start a new sign up session and pre-fill the form
saml_service.start_sign_up_session()
saml_service.fill_sign_up_form_with_saml_attributes(
form)
# When we encounter a GET request but a session is already active,
# this means that the user did a refresh without submitting the form.
# We redirect him/her to the SAML sign up, since otherwise all
# pre-filled data would be gone.
elif request.method == 'GET':
return redirect(url_for('saml.sign_up'))
else:
# Make sure that it is not possible to change the student id
form.student_id.data = \
saml_service.get_sign_up_session_linking_student_id()
if form.validate_on_submit():
try:
user = user_service.register_new_user(
email=form.email.data,
password=form.password.data,
first_name=form.first_name.data,
last_name=form.last_name.data,
student_id=form.student_id.data,
education_id=form.education_id.data,
birth_date=form.birth_date.data,
study_start=form.study_start.data,
receive_information=form.receive_information.data,
phone_nr=form.phone_nr.data,
address=form.address.data,
zip_=form.zip.data,
city=form.city.data,
country=form.country.data,
locale=get_locale(),
link_student_id=True)
login_user(user)
saml_service.end_sign_up_session()
flash(_('Welcome %(name)s! Your profile has been succesfully '
'created and you have been logged in!',
name=current_user.first_name), 'success')
return redirect(redir_url)
except BusinessRuleException:
flash(_('A user with this e-mail address already exists'),
'danger')
return render_template('user/sign_up.htm', form=form,
disable_student_id=True)
@blueprint.route('/sign-in/', methods=['GET', 'POST'])
@response_headers({"X-Frame-Options": "SAMEORIGIN"})
def sign_in():
# Redirect the user to the index page if he or she has been authenticated
# already.
if current_user.is_authenticated:
return redirect(url_for('home.home'))
form = SignInForm(request.form)
if form.validate_on_submit():
try:
user = user_service.get_user_by_login(form.email.data,
form.password.data)
# Notify the login manager that the user has been signed in.
login_user(user)
next_ = request.args.get("next", '')
if next_ and next_.startswith("/"):
return redirect(next_)
# If referer is empty for some reason (browser policy, user entered
# address in address bar, etc.), use empty string
referer = request.headers.get('Referer', '')
denied = (re.match(
r'(?:https?://[^/]+)%s$' % (url_for('user.sign_in')),
referer) is not None)
denied_from = session.get('denied_from')
if not denied:
if referer:
return redirect(referer)
elif denied_from:
return redirect(denied_from)
return redirect(url_for('home.home'))
except ResourceNotFoundException:
flash(_(
'It appears that this account does not exist. Try again, or '
'contact the website administration at '
'ict (at) svia (dot) nl.'))
except AuthorizationException:
flash(_('Your account has been disabled, you are not allowed '
'to log in'), 'danger')
except ValidationException:
flash(_('The password you entered appears to be incorrect.'),
'danger')
return render_template('user/sign_in.htm', form=form,
show_uvanetid_login=True)
@blueprint.route('/sign-in/process-saml-response/', methods=['GET'])
@response_headers({"X-Frame-Options": "SAMEORIGIN"})
def sign_in_saml_response():
has_redirected = False
redir_url = saml_service.get_redirect_url(url_for('home.home'))
try:
# Redirect the user to the index page if he or she has been
# authenticated already.
if current_user.is_authenticated:
return redirect(redir_url)
if not saml_service.user_is_authenticated():
flash(_('Authentication failed. Please try again.'), 'danger')
return redirect(redir_url)
try:
user = saml_service.get_user_by_uid(needs_confirmed=False)
if user.student_id_confirmed:
login_user(user)
else:
has_redirected = True
return redirect(url_for('user.sign_in_confirm_student_id'))
except (ResourceNotFoundException, ValidationException):
flash(_('There is no via account linked to this UvA account. '
'On this page you can create a new via account that '
'is linked to your UvA account.'))
has_redirected = True
return redirect(url_for('user.sign_up_saml_response'))
return redirect(redir_url)
finally:
# Only clear the SAML data when we did not redirect to the sign up page
if not has_redirected:
saml_service.clear_saml_data()
@blueprint.route('/sign-in/confirm-student-id/', methods=['GET', 'POST'])
@response_headers({"X-Frame-Options": "SAMEORIGIN"})
def sign_in_confirm_student_id():
redir_url = saml_service.get_redirect_url(url_for('home.home'))
# Redirect the user if he or she has been authenticated already.
if current_user.is_authenticated:
return redirect(redir_url)
if not saml_service.user_is_authenticated():
flash(_('Authentication failed. Please try again.'), 'danger')
return redirect(redir_url)
student_id = saml_service.get_uid_from_attributes()
try:
users = user_service.get_all_users_with_unconfirmed_student_id(
student_id)
except ResourceNotFoundException:
saml_service.clear_saml_data()
return redirect(url_for('user.sign_in'))
form = SignInForm(request.form)
if form.validate_on_submit():
try:
user = user_service.get_user_by_login(form.email.data,
form.password.data)
if user.student_id != student_id:
flash(_('This account\'s student ID does not match '
'student ID \'%(student_id)s\'',
student_id=student_id), 'danger')
else:
user_service.set_confirmed_student_id(user, student_id)
flash(_('Your account is now linked to this UvA account.'),
'success')
# Notify the login manager that the user has been signed in.
login_user(user)
saml_service.clear_saml_data()
return redirect(redir_url)
except ResourceNotFoundException:
flash(_(
'It appears that this account does not exist. Try again, '
'or contact the website administration at '
'ict (at) svia (dot) nl.'))
except AuthorizationException:
flash(_('Your account has been disabled, you are not allowed '
'to log in'), 'danger')
except ValidationException:
flash(_('The password you entered appears to be incorrect.'),
'danger')
if len(users) == 1:
user = users[0]
[local_part, domain_part] = user.email.split('@')
local_part_hidden = re.sub(r'(?<=.{2}).(?=.{2})', '*', local_part)
domain_part_hidden = re.sub(r'(?<=.{2}).(?=.{3})', '*', domain_part)
email_hidden = '{}@{}'.format(local_part_hidden, domain_part_hidden)
flash(_('The student ID \'%(student_id)s\' corresponds with via '
'account %(email)s, but is not yet confirmed. Please login '
'with your email address and password to prove that you own '
'this account and to confirm your student ID.',
student_id=student_id, email=email_hidden))
else:
flash(_('The student ID \'%(student_id)s\' corresponds with '
'multiple via accounts, but is not yet confirmed. '
'Please login with your email address and password to prove '
'that you own one of these accounts and '
'to confirm your student ID.', student_id=student_id))
return render_template('user/sign_in.htm', form=form,
show_uvanetid_login=False)
@blueprint.route('/sign-out/')
def sign_out():
# Notify the login manager that the user has been signed out.
logout_user()
referer = request.headers.get('Referer')
if referer:
return redirect(referer)
return redirect(url_for('home.home'))
@blueprint.route('/process-account-linking')
@saml_service.ensure_data_cleared
def process_account_linking_saml_response():
redir_url = saml_service.get_redirect_url(url_for('home.home'))
# Check whether a user is linking his/her own account
# or an someone is linking another account
linking_current_user = saml_service.is_linking_user_current_user()
if not current_user.is_authenticated:
if linking_current_user:
flash(_('You need to be logged in to link your account.'),
'danger')
else:
flash(_('You need to be logged in to link an account.'), 'danger')
return redirect(redir_url)
if not saml_service.user_is_authenticated():
flash(_('Authentication failed. Please try again.'), 'danger')
return redirect(redir_url)
if linking_current_user:
try:
saml_service.link_uid_to_user(current_user)
flash(_('Your account is now linked to this UvA account.'),
'success')
except BusinessRuleException:
flash(_('There is already an account linked to this UvA account. '
'If you are sure that this is a mistake please send '
'an email to the board.'), 'danger')
else:
try:
link_user = saml_service.get_linking_user()
saml_service.link_uid_to_user(link_user)
flash(_('The account is now linked to this UvA account.'),
'success')
except BusinessRuleException:
flash(_('There is already an account linked to this UvA account.'),
'danger')
except ResourceNotFoundException:
# Should not happen normally
flash(_('Could not find the user to link this UvA account to.'),
'danger')
return redirect(redir_url)
@blueprint.route('/request_password/', methods=['GET', 'POST'])
@response_headers({"X-Frame-Options": "SAMEORIGIN"})
def request_password():
"""Create a ticket and send a email with link to reset_password page."""
if current_user.is_authenticated:
return redirect(url_for('user.view_single_self'))
form = RequestPassword(request.form)
if form.validate_on_submit():
try:
password_reset_service.create_password_ticket(form.email.data)
flash(_('An email has been sent to %(email)s with further '
'instructions.', email=form.email.data), 'success')
return redirect(url_for('home.home'))
except ResourceNotFoundException:
flash(_('%(email)s is unknown to our system.',
email=form.email.data), 'danger')
return render_template('user/request_password.htm', form=form)
@blueprint.route('/reset_password/<string:hash_>', methods=['GET', 'POST'])
@response_headers({"X-Frame-Options": "SAMEORIGIN"})
def reset_password(hash_):
"""
Reset form existing of two fields, password and password_repeat.
Checks if the hash in the url is found in the database and timestamp
has not expired.
"""
try:
ticket = password_reset_service.get_valid_ticket(hash_)
except ResourceNotFoundException:
flash(_('No valid ticket found'), 'danger')
return redirect(url_for('user.request_password'))
form = ResetPasswordForm(request.form)
if form.validate_on_submit():
password_reset_service.reset_password(ticket, form.password.data)
flash(_('Your password has been updated.'), 'success')
return redirect(url_for('user.sign_in'))
return render_template('user/reset_password.htm', form=form)
@blueprint.route("/users/<int:user_id>/password/", methods=['GET', 'POST'])
@response_headers({"X-Frame-Options": "SAMEORIGIN"})
def change_password(user_id):
if (user_id is not None and current_user.id != user_id and
not role_service.user_has_role(current_user, Roles.USER_WRITE)):
abort(403)
form = ChangePasswordForm()
if form.validate_on_submit():
if user_service.validate_password(current_user,
form.current_password.data):
user_service.set_password(current_user.id,
form.password.data)
flash(_("Your password has successfully been changed."))
return redirect(url_for("home.home"))
else:
form.current_password.errors.append(
_("Your current password does not match."))
return render_template("user/change_password.htm", form=form)
@blueprint.route('/users/', methods=['GET'])
@require_role(Roles.USER_READ)
def view():
return render_template('user/view.htm')
@blueprint.route('/users/export', methods=['GET'])
@require_role(Roles.USER_READ)
def user_export():
users = User.query.all()
si = StringIO()
cw = writer(si)
cw.writerow([c.name for c in User.__mapper__.columns])
for user in users:
cw.writerow([getattr(user, c.name) for c in User.__mapper__.columns])
return si.getvalue().strip('\r\n')
@blueprint.route('/users/avatar/<int:user_id>/', methods=['GET'])
@login_required
def view_avatar(user_id=None):
can_read = False
# Unpaid members cannot view other avatars
if current_user.id != user_id and not current_user.has_paid:
return abort(403)
# A user can always view his own avatar
if current_user.id == user_id:
can_read = True
# group rights
if role_service.user_has_role(current_user, Roles.USER_READ) \
or role_service.user_has_role(current_user, Roles.USER_WRITE) \
or role_service.user_has_role(current_user, Roles.ACTIVITY_WRITE):
can_read = True
if not can_read:
return abort(403)
if not user_service.user_has_avatar(user_id):
return abort(404)
user = user_service.get_user_by_id(user_id)
avatar_file = file_service.get_file_by_id(user.avatar_file_id)
fn = 'user_avatar_' + str(user.id)
content = file_service.get_file_content(avatar_file)
headers = file_service.get_file_content_headers(
avatar_file, display_name=fn)
return content, headers
| 18,236 | 0 | 445 |
7cd721416725ef1208462ff24b2c9f5e109b8a42 | 2,163 | py | Python | ua_model/MapFromWtoT.py | lh7326/UA_model | 009418dd94d3b7f9289a09858ba38cb5bb9129c5 | [
"Unlicense"
] | null | null | null | ua_model/MapFromWtoT.py | lh7326/UA_model | 009418dd94d3b7f9289a09858ba38cb5bb9129c5 | [
"Unlicense"
] | null | null | null | ua_model/MapFromWtoT.py | lh7326/UA_model | 009418dd94d3b7f9289a09858ba38cb5bb9129c5 | [
"Unlicense"
] | null | null | null | from ua_model.functions import z_minus_its_reciprocal
from ua_model.utils import validate_branch_point_positions
| 40.055556 | 114 | 0.661581 | from ua_model.functions import z_minus_its_reciprocal
from ua_model.utils import validate_branch_point_positions
class MapFromWtoT:
def __init__(self, t_0: float, t_in: float) -> None:
"""
Initialize the coordinate map object.
This object represents the function that maps from the complex w-plane
in which we will construct the model onto the complex t-plane, where t
is the square of the off-the-mass-shell momentum carried by the virtual photon.
Note that we use the signature +---, so t^2 > 0 corresponds to time-like momenta.
More precisely, the model is constructed on a four-sheeted Riemann surface, each sheet
having the same t-coordinates but different W-coordinates. The map from W to t maps the complex plane
onto itself in 4-to-1 fashion. The left half of the unit disk (center at the origin) is mapped
onto the first (physical) sheet, the right half of the disk is mapped onto the second sheet and
rest of the left half-plane and the rest of the right half-plane are mapped onto the remaining two sheets.
The mapping is:
t = t_0 - 4 * (t_in - t_0) / (W - 1/W)**2
Here t_0 is the lowest branch point and t_in (t_in > t_0) is a phenomenological constant
that determines the position of the second, effective, branch point.
Args:
t_0 (float): a positive number corresponding to the value of t at the lowest branch point
t_in (float): a positive number larger than t_0 (a phenomenological constant)
"""
self._validate_parameters(t_0, t_in)
self.t_0 = t_0
self.t_in = t_in
self._t_in_minus_t_0 = t_in - t_0
def __call__(self, w: complex) -> complex:
"""
Returns the value of t corresponding to the argument.
Args:
w (complex):
Returns:
complex
"""
return self.t_0 - 4.0 * self._t_in_minus_t_0 / (z_minus_its_reciprocal(w) ** 2)
@staticmethod
def _validate_parameters(t_0: float, t_in: float) -> None:
validate_branch_point_positions(t_0, t_in)
| 88 | 1,938 | 23 |
3c7cdbfcd83c35c91f14b26208ed5f97476086be | 186 | py | Python | apps.py | tobiasbartel/servicium-machine_manager | 1f4e88afd26ed31b1b4264cb0928fe9bd08033c5 | [
"MIT"
] | null | null | null | apps.py | tobiasbartel/servicium-machine_manager | 1f4e88afd26ed31b1b4264cb0928fe9bd08033c5 | [
"MIT"
] | null | null | null | apps.py | tobiasbartel/servicium-machine_manager | 1f4e88afd26ed31b1b4264cb0928fe9bd08033c5 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from django.apps import AppConfig
| 20.666667 | 40 | 0.790323 | from __future__ import unicode_literals
from django.apps import AppConfig
class MachineManagerConfig(AppConfig):
name = 'machine_manager'
verbose_name = 'The Machine Manager'
| 0 | 87 | 23 |
527cdfc1040b4f82213159107c8b458fd0c8c948 | 7,280 | py | Python | perturbations/perturbations.py | pedersor/google-research | 6fa751dd261b3f6d918fd2cd35efef5d8bf3eea6 | [
"Apache-2.0"
] | null | null | null | perturbations/perturbations.py | pedersor/google-research | 6fa751dd261b3f6d918fd2cd35efef5d8bf3eea6 | [
"Apache-2.0"
] | null | null | null | perturbations/perturbations.py | pedersor/google-research | 6fa751dd261b3f6d918fd2cd35efef5d8bf3eea6 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Introduces differentiation via perturbations.
Example of usage:
@perturbed
def sign_or(x, axis=-1):
s = tf.cast((tf.sign(x) + 1) / 2.0, dtype=tf.bool)
result = tf.math.reduce_any(s, axis=axis)
return tf.cast(result, dtype=x.dtype) * 2.0 - 1.0
Then sign_or is differentiable (unlike what it seems).
It is possible to specify the parameters of the perturbations using:
@perturbed(num_samples=1000, sigma=0.1, noise='gumbel')
...
The decorator can also be used directly as a function, for example:
soft_argsort = perturbed(tf.argsort, num_samples=200, sigma=0.01)
"""
import functools
from typing import Tuple
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
_GUMBEL = 'gumbel'
_NORMAL = 'normal'
SUPPORTED_NOISES = (_GUMBEL, _NORMAL)
def sample_noise_with_gradients(
noise, shape):
"""Samples a noise tensor according to a distribution with its gradient.
Args:
noise: (str) a type of supported noise distribution.
shape: tf.Tensor<int>, the shape of the tensor to sample.
Returns:
A tuple Tensor<float>[shape], Tensor<float>[shape] that corresponds to the
sampled noise and the gradient of log the underlying probability
distribution function. For instance, for a gaussian noise (normal), the
gradient is equal to the noise itself.
Raises:
ValueError in case the requested noise distribution is not supported.
See perturbations.SUPPORTED_NOISES for the list of supported distributions.
"""
if noise not in SUPPORTED_NOISES:
raise ValueError('{} noise is not supported. Use one of [{}]'.format(
noise, SUPPORTED_NOISES))
if noise == _GUMBEL:
sampler = tfp.distributions.Gumbel(0.0, 1.0)
samples = sampler.sample(shape)
gradients = 1 - tf.math.exp(-samples)
elif noise == _NORMAL:
sampler = tfp.distributions.Normal(0.0, 1.0)
samples = sampler.sample(shape)
gradients = samples
return samples, gradients
def perturbed(func=None,
num_samples = 1000,
sigma = 0.05,
noise = _NORMAL,
batched = True):
"""Turns a function into a differentiable one via perturbations.
The input function has to be the solution to a linear program for the trick
to work. For instance the maximum function, the logical operators or the ranks
can be expressed as solutions to some linear programs on some polytopes.
If this condition is violated though, the result would not hold and there is
no guarantee on the validity of the obtained gradients.
This function can be used directly or as a decorator.
Args:
func: the function to be turned into a perturbed and differentiable one.
Four I/O signatures for func are currently supported:
If batched is True,
(1) input [B, D1, ..., Dk], output [B, D1, ..., Dk], k >= 1
(2) input [B, D1, ..., Dk], output [B], k >= 1
If batched is False,
(3) input [D1, ..., Dk], output [D1, ..., Dk], k >= 1
(4) input [D1, ..., Dk], output [], k >= 1.
num_samples: the number of samples to use for the expectation computation.
sigma: the scale of the perturbation.
noise: a string representing the noise distribution to be used to sample
perturbations.
batched: whether inputs to the perturbed function will have a leading batch
dimension (True) or consist of a single example (False). Defaults to True.
Returns:
a function has the same signature as func but that can be back propagated.
"""
# This is a trick to have the decorator work both with and without arguments.
if func is None:
return functools.partial(
perturbed, num_samples=num_samples, sigma=sigma, noise=noise,
batched=batched)
@functools.wraps(func)
return wrapper
| 39.351351 | 80 | 0.675275 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Introduces differentiation via perturbations.
Example of usage:
@perturbed
def sign_or(x, axis=-1):
s = tf.cast((tf.sign(x) + 1) / 2.0, dtype=tf.bool)
result = tf.math.reduce_any(s, axis=axis)
return tf.cast(result, dtype=x.dtype) * 2.0 - 1.0
Then sign_or is differentiable (unlike what it seems).
It is possible to specify the parameters of the perturbations using:
@perturbed(num_samples=1000, sigma=0.1, noise='gumbel')
...
The decorator can also be used directly as a function, for example:
soft_argsort = perturbed(tf.argsort, num_samples=200, sigma=0.01)
"""
import functools
from typing import Tuple
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
_GUMBEL = 'gumbel'
_NORMAL = 'normal'
SUPPORTED_NOISES = (_GUMBEL, _NORMAL)
def sample_noise_with_gradients(
noise, shape):
"""Samples a noise tensor according to a distribution with its gradient.
Args:
noise: (str) a type of supported noise distribution.
shape: tf.Tensor<int>, the shape of the tensor to sample.
Returns:
A tuple Tensor<float>[shape], Tensor<float>[shape] that corresponds to the
sampled noise and the gradient of log the underlying probability
distribution function. For instance, for a gaussian noise (normal), the
gradient is equal to the noise itself.
Raises:
ValueError in case the requested noise distribution is not supported.
See perturbations.SUPPORTED_NOISES for the list of supported distributions.
"""
if noise not in SUPPORTED_NOISES:
raise ValueError('{} noise is not supported. Use one of [{}]'.format(
noise, SUPPORTED_NOISES))
if noise == _GUMBEL:
sampler = tfp.distributions.Gumbel(0.0, 1.0)
samples = sampler.sample(shape)
gradients = 1 - tf.math.exp(-samples)
elif noise == _NORMAL:
sampler = tfp.distributions.Normal(0.0, 1.0)
samples = sampler.sample(shape)
gradients = samples
return samples, gradients
def perturbed(func=None,
num_samples = 1000,
sigma = 0.05,
noise = _NORMAL,
batched = True):
"""Turns a function into a differentiable one via perturbations.
The input function has to be the solution to a linear program for the trick
to work. For instance the maximum function, the logical operators or the ranks
can be expressed as solutions to some linear programs on some polytopes.
If this condition is violated though, the result would not hold and there is
no guarantee on the validity of the obtained gradients.
This function can be used directly or as a decorator.
Args:
func: the function to be turned into a perturbed and differentiable one.
Four I/O signatures for func are currently supported:
If batched is True,
(1) input [B, D1, ..., Dk], output [B, D1, ..., Dk], k >= 1
(2) input [B, D1, ..., Dk], output [B], k >= 1
If batched is False,
(3) input [D1, ..., Dk], output [D1, ..., Dk], k >= 1
(4) input [D1, ..., Dk], output [], k >= 1.
num_samples: the number of samples to use for the expectation computation.
sigma: the scale of the perturbation.
noise: a string representing the noise distribution to be used to sample
perturbations.
batched: whether inputs to the perturbed function will have a leading batch
dimension (True) or consist of a single example (False). Defaults to True.
Returns:
a function has the same signature as func but that can be back propagated.
"""
# This is a trick to have the decorator work both with and without arguments.
if func is None:
return functools.partial(
perturbed, num_samples=num_samples, sigma=sigma, noise=noise,
batched=batched)
@functools.wraps(func)
def wrapper(input_tensor, *args, **kwargs):
@tf.custom_gradient
def forward(input_tensor, *args, **kwargs):
"""The differentiation by perturbation core routine."""
original_input_shape = tf.shape(input_tensor)
if batched:
tf.debugging.assert_rank_at_least(
input_tensor, 2, 'Batched inputs must have at least rank two')
else: # Adds dummy batch dimension internally.
input_tensor = tf.expand_dims(input_tensor, 0)
input_shape = tf.shape(input_tensor) # [B, D1, ... Dk], k >= 1
perturbed_input_shape = tf.concat([[num_samples], input_shape], axis=0)
noises = sample_noise_with_gradients(noise, perturbed_input_shape)
additive_noise, noise_gradient = tuple(
[tf.cast(noise, dtype=input_tensor.dtype) for noise in noises])
perturbed_input = tf.expand_dims(input_tensor, 0) + sigma * additive_noise
# [N, B, D1, ..., Dk] -> [NB, D1, ..., Dk].
flat_batch_dim_shape = tf.concat([[-1], input_shape[1:]], axis=0)
perturbed_input = tf.reshape(perturbed_input, flat_batch_dim_shape)
# Calls user-defined function in a perturbation agnostic manner.
perturbed_output = func(perturbed_input, *args, **kwargs)
# [NB, D1, ..., Dk] -> [N, B, D1, ..., Dk].
perturbed_input = tf.reshape(perturbed_input, perturbed_input_shape)
# Either
# (Default case): [NB, D1, ..., Dk] -> [N, B, D1, ..., Dk]
# or
# (Full-reduce case) [NB] -> [N, B]
perturbed_output_shape = tf.concat(
[[num_samples], [-1], tf.shape(perturbed_output)[1:]], axis=0)
perturbed_output = tf.reshape(perturbed_output, perturbed_output_shape)
forward_output = tf.reduce_mean(perturbed_output, axis=0)
if not batched: # Removes dummy batch dimension.
forward_output = forward_output[0]
def grad(dy):
"""Compute the gradient of the expectation via integration by parts."""
output, noise_grad = perturbed_output, noise_gradient
# Adds dummy feature/channel dimension internally.
if perturbed_input.shape.rank > output.shape.rank:
dy = tf.expand_dims(dy, axis=-1)
output = tf.expand_dims(output, axis=-1)
# Adds dummy batch dimension internally.
if not batched:
dy = tf.expand_dims(dy, axis=0)
# Flattens [D1, ..., Dk] to a single feat dim [D].
flatten = lambda t: tf.reshape(t, (tf.shape(t)[0], tf.shape(t)[1], -1))
dy = tf.reshape(dy, (tf.shape(dy)[0], -1)) # (B, D)
output = flatten(output) # (N, B, D)
noise_grad = flatten(noise_grad) # (N, B, D)
g = tf.einsum('nbd,nb->bd', noise_grad,
tf.einsum('nbd,bd->nb', output, dy))
g /= sigma * num_samples
return tf.reshape(g, original_input_shape)
return forward_output, grad
return forward(input_tensor, *args, **kwargs)
return wrapper
| 2,897 | 0 | 24 |
017a6dbdee29e9ad4500fe1c410997633e58509c | 4,744 | py | Python | brainfuck.py | yvbbrjdr/brainfuck | eac66b19653bfd8d8172bd67dd95bc5b1c520358 | [
"BSD-3-Clause"
] | null | null | null | brainfuck.py | yvbbrjdr/brainfuck | eac66b19653bfd8d8172bd67dd95bc5b1c520358 | [
"BSD-3-Clause"
] | null | null | null | brainfuck.py | yvbbrjdr/brainfuck | eac66b19653bfd8d8172bd67dd95bc5b1c520358 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import readline
import sys
if __name__ == '__main__':
main()
| 27.264368 | 73 | 0.493255 | #!/usr/bin/env python3
import readline
import sys
class LinkedList(object):
def __init__(self, value=0, left=None, right=None):
self.value, self._left, self._right = value, left, right
@property
def left(self):
if not self._left:
self._left = LinkedList(right=self)
return self._left
@property
def right(self):
if not self._right:
self._right = LinkedList(left=self)
return self._right
def has_left(self):
return self._left != None
def has_right(self):
return self._right != None
class Environment(object):
def __init__(self):
self.reset()
def reset(self):
self.ptr, self.buf = LinkedList(), ''
def nontrivial(self):
return self.ptr.value != 0
def left(self):
self.ptr = self.ptr.left
def right(self):
self.ptr = self.ptr.right
def inc(self):
self.ptr.value = (self.ptr.value + 1) % 256
def dec(self):
self.ptr.value = (self.ptr.value + 255) % 256
def output(self):
print(chr(self.ptr.value), end='')
sys.stdout.flush()
def input(self):
try:
if not self.buf:
self.buf = input()
self.buf += '\n'
except EOFError:
self.buf = '\0'
self.ptr.value, self.buf = ord(self.buf[0]), self.buf[1:]
def dump(self):
temp = self.ptr
while temp.has_left():
temp = temp.left
while temp.has_right():
if temp == self.ptr:
print('[%d]' % (temp.value), end=' ')
else:
print(temp.value, end=' ')
temp = temp.right
if temp == self.ptr:
print('[%d]' % (temp.value))
else:
print(temp.value)
def read_line(start=True):
return input('bf> ' if start else ' ')
def complete_statement(statement):
nested = 0
for c in statement:
if c == '[':
nested += 1
elif c == ']':
nested -= 1
assert nested >= 0, 'invalid statement'
return nested == 0
def exec_brainfuck(statement, env):
i, length = 0, len(statement)
while i < length:
if statement[i] == '<':
env.left()
elif statement[i] == '>':
env.right()
elif statement[i] == '+':
env.inc()
elif statement[i] == '-':
env.dec()
elif statement[i] == '.':
env.output()
elif statement[i] == ',':
env.input()
elif statement[i] == '[':
nested = 1
for j in range(i + 1, length):
if statement[j] == '[':
nested += 1
elif statement[j] == ']':
nested -= 1
if not nested:
break
while env.nontrivial():
exec_brainfuck(statement[i + 1:j], env)
i = j
elif statement[i] == '`':
env.reset()
print('Environment reset.')
elif statement[i] == '?':
env.dump()
i += 1
def interactive(env=Environment()):
buf = ''
while True:
try:
buf += read_line(buf == '')
buf = ''.join([c for c in buf if c in '<>+-.,[]`?'])
if complete_statement(buf):
exec_brainfuck(buf, env)
buf = ''
except AssertionError as e:
print('BrainfuckError:', e)
buf = ''
except KeyboardInterrupt:
print('\nKeyboardInterrupt')
buf = ''
except EOFError:
print()
return
def main():
import argparse
parser = argparse.ArgumentParser(description='Brainfuck Interpreter')
parser.add_argument('-load', '-i', action='store_true',
help='run file interactively')
parser.add_argument('file', nargs='?',
type=str, default=None,
help='Brainfuck file to run')
args = parser.parse_args()
if args.file is not None:
with open(args.file) as f:
statement = f.read()
statement = ''.join([c for c in statement if c in '<>+-.,[]`?'])
env = Environment()
try:
assert complete_statement(statement), 'invalid statement'
exec_brainfuck(statement, env)
except AssertionError as e:
print('BrainfuckError:', e)
except KeyboardInterrupt:
print('\nKeyboardInterrupt')
except EOFError:
print()
if args.load:
interactive(env)
else:
interactive()
if __name__ == '__main__':
main()
| 4,053 | 171 | 430 |
c619b957308a9704e7ea134f17078a37e3126ee2 | 1,278 | py | Python | cashews/utils/split_hash.py | AIGeneratedUsername/cashews | c4af2807053956f75662966a23e0af024c1a64a9 | [
"MIT"
] | null | null | null | cashews/utils/split_hash.py | AIGeneratedUsername/cashews | c4af2807053956f75662966a23e0af024c1a64a9 | [
"MIT"
] | null | null | null | cashews/utils/split_hash.py | AIGeneratedUsername/cashews | c4af2807053956f75662966a23e0af024c1a64a9 | [
"MIT"
] | null | null | null | import hashlib
import zlib
algorithms = [
zlib.crc32,
]
try:
import xxhash
except ImportError:
pass
else:
algorithms.extend(
[xxhash.xxh3_64_intdigest, xxhash.xxh64_intdigest, xxhash.xxh3_128_intdigest, xxhash.xxh32_intdigest]
)
def get_hashes(key: str, k: int, max_i: int):
"""
return array with bit indexes for given value (key) [23, 45, 15]
"""
assert max_i >= k
indexes = set()
for i in range(k):
ii = i % len(algorithms)
value = algorithms[ii](f"{key}_{i}".encode()) % max_i
while value in indexes:
i += 1
value = algorithms[ii](f"{key}_{i}".encode()) % max_i
indexes.add(value)
return indexes
# str_hash = str(_get_string_int_hash(key))
# indexes = set()
# for _hash in _split_string_for_chunks(str_hash, k):
# value = int(_hash) % max_i
# while value in indexes:
# value += 1
# indexes.add(value)
# return indexes
| 24.576923 | 109 | 0.618936 | import hashlib
import zlib
algorithms = [
zlib.crc32,
]
try:
import xxhash
except ImportError:
pass
else:
algorithms.extend(
[xxhash.xxh3_64_intdigest, xxhash.xxh64_intdigest, xxhash.xxh3_128_intdigest, xxhash.xxh32_intdigest]
)
def get_hashes(key: str, k: int, max_i: int):
"""
return array with bit indexes for given value (key) [23, 45, 15]
"""
assert max_i >= k
indexes = set()
for i in range(k):
ii = i % len(algorithms)
value = algorithms[ii](f"{key}_{i}".encode()) % max_i
while value in indexes:
i += 1
value = algorithms[ii](f"{key}_{i}".encode()) % max_i
indexes.add(value)
return indexes
# str_hash = str(_get_string_int_hash(key))
# indexes = set()
# for _hash in _split_string_for_chunks(str_hash, k):
# value = int(_hash) % max_i
# while value in indexes:
# value += 1
# indexes.add(value)
# return indexes
def _get_string_int_hash(key):
return int(hashlib.sha256(key.encode("utf-8")).hexdigest(), 16)
def _split_string_for_chunks(value: str, chunks: int):
chunk_size = len(value) // chunks
return (value[i : i + chunk_size] for i in range(0, chunk_size * chunks, chunk_size))
| 238 | 0 | 46 |
f34719933d24ba37658714aac1ca06c1727c2e84 | 1,517 | py | Python | common/xrd-ui-tests-python/tests/xroad_local_group/XroadEditDescriptionLocalGroup.py | nordic-institute/X-Road-tests | e030661a0ad8ceab74dd8122b751e88025a3474a | [
"MIT"
] | 1 | 2019-02-09T00:16:54.000Z | 2019-02-09T00:16:54.000Z | common/xrd-ui-tests-python/tests/xroad_local_group/XroadEditDescriptionLocalGroup.py | nordic-institute/X-Road-tests | e030661a0ad8ceab74dd8122b751e88025a3474a | [
"MIT"
] | 1 | 2018-06-06T08:33:32.000Z | 2018-06-06T08:33:32.000Z | common/xrd-ui-tests-python/tests/xroad_local_group/XroadEditDescriptionLocalGroup.py | nordic-institute/X-Road-tests | e030661a0ad8ceab74dd8122b751e88025a3474a | [
"MIT"
] | 3 | 2018-07-09T08:51:00.000Z | 2020-07-23T18:40:24.000Z | from __future__ import absolute_import
import unittest
from main.maincontroller import MainController
from tests.xroad_local_group import xroad_local_group
class XroadEditDescriptionLocalGroup(unittest.TestCase):
"""
UC SERVICE_28 Edit the Description of a Local Group
RIA URL: https://jira.ria.ee/browse/XT-285, https://jira.ria.ee/browse/XTKB-155
Depends on finishing other test(s): None
Requires helper scenarios: None
X-Road version: 6.16.0
"""
# except:
# main.log('XroadEditDescriptionLocalGroup: Failed to edit the description of a local group')
# main.save_exception_data()
# assert False
# finally:
# '''Test teardown'''
# main.tearDown()
| 33.711111 | 101 | 0.674357 | from __future__ import absolute_import
import unittest
from main.maincontroller import MainController
from tests.xroad_local_group import xroad_local_group
class XroadEditDescriptionLocalGroup(unittest.TestCase):
"""
UC SERVICE_28 Edit the Description of a Local Group
RIA URL: https://jira.ria.ee/browse/XT-285, https://jira.ria.ee/browse/XTKB-155
Depends on finishing other test(s): None
Requires helper scenarios: None
X-Road version: 6.16.0
"""
def __init__(self, methodName='test_add_sub_to_member'):
unittest.TestCase.__init__(self, methodName)
def test_add_sub_to_member(self):
main = MainController(self)
'''Set test name and number'''
main.test_number = 'SERVICE_28'
main.test_name = self.__class__.__name__
main.log('TEST: UC SERVICE_28 Edit the Description of a Local Group')
main.url = main.config.get('ss2.host')
main.username = main.config.get('ss2.user')
main.password = main.config.get('ss2.pass')
#
# try:
'''Open webdriver'''
main.reload_webdriver(main.url, main.username, main.password)
'''Run the test'''
test_func = xroad_local_group.test_edit_local_group_description()
test_func(main)
# except:
# main.log('XroadEditDescriptionLocalGroup: Failed to edit the description of a local group')
# main.save_exception_data()
# assert False
# finally:
# '''Test teardown'''
# main.tearDown()
| 736 | 0 | 53 |
6505cbe7bfb95fe462ee263d231946f3e0a79924 | 1,053 | py | Python | horses/ex3.py | octonion/betting | d3be4dc3c3d2f5aa77006e5c9f388c1b79414efb | [
"MIT"
] | 26 | 2017-04-03T01:45:27.000Z | 2021-09-22T12:11:31.000Z | horses/ex3.py | octonion/betting | d3be4dc3c3d2f5aa77006e5c9f388c1b79414efb | [
"MIT"
] | null | null | null | horses/ex3.py | octonion/betting | d3be4dc3c3d2f5aa77006e5c9f388c1b79414efb | [
"MIT"
] | 7 | 2017-04-07T17:42:23.000Z | 2021-12-21T21:58:22.000Z | #!/usr/bin/env python
import numpy as np
import pandas as pd
# First example
# Win probability vector
p = np.array([0.25,0.50,0.25])
# Decimal odds
d = np.array([5.0,1.5,2.0])
# Implied probabilities
i = np.array([])
for j in range(len(d)):
i = np.append(i,1/d[j])
race = pd.DataFrame()
race['p'] = p
race['d'] = d
race['i'] = i
r = np.array([])
for j in range(len(p)):
r = np.append(r,p[j]*d[j])
race['r'] = r
result = race
result['bet'] = False
R = 1.0
pt = 0.0
it = 0.0
while True:
found = False
for j, row in result.iterrows():
# Equivalent
# if (row['r']>(1-pt-row['p'])/(1-it-row['i'])) and not(row['bet']):
if (row['r']>R) and not(row['bet']):
result.at[j,'bet'] = True
pt = pt+row['p']
it = it+row['i']
R = (1-pt)/(1-it)
found = True
break
if not(found):
break
#R = (1-pt)/(1-it)
result['f'] = 0.0
for j, row in result.iterrows():
if (row['bet']):
result.at[j,'f'] = row['p']-R*row['i']
print(result)
| 16.714286 | 75 | 0.508072 | #!/usr/bin/env python
import numpy as np
import pandas as pd
# First example
# Win probability vector
p = np.array([0.25,0.50,0.25])
# Decimal odds
d = np.array([5.0,1.5,2.0])
# Implied probabilities
i = np.array([])
for j in range(len(d)):
i = np.append(i,1/d[j])
race = pd.DataFrame()
race['p'] = p
race['d'] = d
race['i'] = i
r = np.array([])
for j in range(len(p)):
r = np.append(r,p[j]*d[j])
race['r'] = r
result = race
result['bet'] = False
R = 1.0
pt = 0.0
it = 0.0
while True:
found = False
for j, row in result.iterrows():
# Equivalent
# if (row['r']>(1-pt-row['p'])/(1-it-row['i'])) and not(row['bet']):
if (row['r']>R) and not(row['bet']):
result.at[j,'bet'] = True
pt = pt+row['p']
it = it+row['i']
R = (1-pt)/(1-it)
found = True
break
if not(found):
break
#R = (1-pt)/(1-it)
result['f'] = 0.0
for j, row in result.iterrows():
if (row['bet']):
result.at[j,'f'] = row['p']-R*row['i']
print(result)
| 0 | 0 | 0 |
23460522f672aa7690f5ad2b26797b3327b5e9d5 | 2,376 | py | Python | python/src/aoc/year2020/day22.py | ocirne/adventofcode | ea9b5f1b48a04284521e85c96b420ed54adf55f0 | [
"Unlicense"
] | 1 | 2021-02-16T21:30:04.000Z | 2021-02-16T21:30:04.000Z | python/src/aoc/year2020/day22.py | ocirne/adventofcode | ea9b5f1b48a04284521e85c96b420ed54adf55f0 | [
"Unlicense"
] | null | null | null | python/src/aoc/year2020/day22.py | ocirne/adventofcode | ea9b5f1b48a04284521e85c96b420ed54adf55f0 | [
"Unlicense"
] | null | null | null | from collections import deque
from aoc.util import load_example, load_input
def part1(lines):
"""
>>> part1(load_example(__file__, '22'))
306
"""
cards1, cards2 = prepare_data(lines)
while True:
turn(cards1, cards2)
if len(cards1) == 0:
return calc_score(cards2)
if len(cards2) == 0:
return calc_score(cards1)
def part2(lines):
"""
>>> part2(load_example(__file__, '22'))
291
"""
cards1, cards2 = prepare_data(lines)
winner, cards = game(cards1, cards2)
score = calc_score(cards)
return score
if __name__ == "__main__":
data = load_input(__file__, 2020, "22")
print(part1(data))
print(part2(data))
| 24.244898 | 84 | 0.547559 | from collections import deque
from aoc.util import load_example, load_input
def prepare_data(lines):
cards1 = deque()
cards2 = deque()
player = None
for line in lines:
if line.isspace():
continue
elif line.startswith("Player"):
player = int(line.split()[1].split(":")[0])
else:
num = int(line)
if player == 1:
cards1.append(num)
else:
cards2.append(num)
return cards1, cards2
def turn(cards1, cards2):
val1 = cards1.popleft()
val2 = cards2.popleft()
if val1 > val2:
cards1.append(val1)
cards1.append(val2)
else:
cards2.append(val2)
cards2.append(val1)
def calc_score(cards):
return sum((i + 1) * cards.pop() for i in range(len(cards)))
def part1(lines):
"""
>>> part1(load_example(__file__, '22'))
306
"""
cards1, cards2 = prepare_data(lines)
while True:
turn(cards1, cards2)
if len(cards1) == 0:
return calc_score(cards2)
if len(cards2) == 0:
return calc_score(cards1)
def game(cards1, cards2):
history_cards1 = {}
history_cards2 = {}
while True:
val1 = cards1.popleft()
val2 = cards2.popleft()
if val1 <= len(cards1) and val2 <= len(cards2):
winner, _ = game(deque(list(cards1)[:val1]), deque(list(cards2)[:val2]))
elif val1 > val2:
winner = 1
else:
winner = 2
if winner == 1:
cards1.append(val1)
cards1.append(val2)
else:
cards2.append(val2)
cards2.append(val1)
if tuple(cards1) in history_cards1:
return 1, cards1
if tuple(cards2) in history_cards2:
return 1, cards1
if len(cards1) == 0:
return 2, cards2
if len(cards2) == 0:
return 1, cards1
history_cards1[tuple(cards1)] = True
history_cards2[tuple(cards2)] = True
def part2(lines):
"""
>>> part2(load_example(__file__, '22'))
291
"""
cards1, cards2 = prepare_data(lines)
winner, cards = game(cards1, cards2)
score = calc_score(cards)
return score
if __name__ == "__main__":
data = load_input(__file__, 2020, "22")
print(part1(data))
print(part2(data))
| 1,560 | 0 | 92 |
48dd705525e5cb03a8447d6256771c4073a6a921 | 5,160 | py | Python | bvc/admin/member_command.py | Vayel/GUCEM-BVC | e5645dec332756d3c9db083abf2c8f3625a10d4d | [
"WTFPL"
] | 2 | 2016-09-23T18:02:40.000Z | 2017-04-28T18:35:59.000Z | bvc/admin/member_command.py | Vayel/GUCEM-BVC | e5645dec332756d3c9db083abf2c8f3625a10d4d | [
"WTFPL"
] | 82 | 2016-09-26T14:38:31.000Z | 2018-02-12T18:47:12.000Z | bvc/admin/member_command.py | Vayel/GUCEM-BVC | e5645dec332756d3c9db083abf2c8f3625a10d4d | [
"WTFPL"
] | null | null | null | from smtplib import SMTPException
from django.contrib import admin, messages
from django.utils.timezone import now
from .site import admin_site
from .individual_command import IndividualCommandAdmin
from .. import models, forms
admin_site.register(models.MemberCommand, MemberCommandAdmin)
| 36.083916 | 140 | 0.611822 | from smtplib import SMTPException
from django.contrib import admin, messages
from django.utils.timezone import now
from .site import admin_site
from .individual_command import IndividualCommandAdmin
from .. import models, forms
class MemberCommandAdmin(IndividualCommandAdmin):
list_display = ['id', 'member', 'datetime_placed', 'amount', 'price',
'payment_type', 'state',]
ordering = ['datetime_placed']
search_fields = ('member__user__first_name', 'member__user__last_name', 'amount')
fields = forms.member_command.MemberCommandAdminForm.Meta.fields
form = forms.member_command.MemberCommandAdminForm
def get_readonly_fields(self, request, instance=None):
if instance: # Editing an existing object
fields = self.fields + []
if (instance.state == models.command.PLACED_STATE or
instance.state == models.command.TO_BE_PREPARED_STATE or
instance.state == models.command.RECEIVED_STATE or
instance.state == models.command.PREPARED_STATE):
fields.remove('amount')
return fields
return self.readonly_fields or []
def get_inline_actions(self, request, obj=None):
actions = super().get_inline_actions(request, obj)
if obj is None:
return actions
if obj.state == models.command.PREPARED_STATE:
actions.extend(('sell_by_check', 'sell_by_cash'))
if obj.state in (models.command.SOLD_STATE, models.command.TO_BE_BANKED_STATE):
actions.append('cancel_sale')
if obj.state == models.command.SOLD_STATE:
actions.append('add_to_bank_deposit')
if obj.state == models.command.TO_BE_BANKED_STATE and obj.payment_type not in obj.AUTO_BANKED_PAYMENT_TYPES:
actions.append('remove_from_bank_deposit')
return actions
def price(self, instance):
return instance.price
price.short_description = "Prix"
def sell(self, request, cmd, payment_type):
try:
cmd.sell(payment_type)
except models.command.InvalidState:
self.message_user(
request,
"La commande {} n'est pas dans le bon état pour être vendue.".format(cmd),
level=messages.ERROR
)
except SMTPException as e:
self.message_user(
request,
"Une erreur est survenue en envoyant le mail : " + str(e),
level=messages.ERROR
)
def get_sell_by_check_label(self, obj):
return "Vendre par chèque"
def sell_by_check(self, request, cmd, parent_obj=None):
self.sell(request, cmd, models.command.CHECK_PAYMENT)
def get_sell_by_cash_label(self, obj):
return "Vendre par espèces"
def sell_by_cash(self, request, cmd, parent_obj=None):
self.sell(request, cmd, models.command.CASH_PAYMENT)
def get_cancel_sale_label(self, obj):
return "Annuler la vente"
def cancel_sale(self, request, queryset):
for cmd in queryset:
try:
cmd.cancel_sale()
except models.command.InvalidState:
self.message_user(
request,
"La commande {} n'est pas dans le bon état pour en annuler la vente.".format(cmd),
level=messages.ERROR
)
def get_add_to_bank_deposit_label(self, obj):
return "Intégrer au dépôt"
def add_to_bank_deposit(self, request, cmd, parent_obj=None):
try:
cmd.add_to_bank_deposit()
except models.command.InvalidState:
self.message_user(
request,
"La commande {} n'est pas dans le bon état pour être déposée en banque.".format(cmd),
level=messages.ERROR
)
except SMTPException as e:
self.message_user(
request,
"Une erreur est survenue en envoyant le mail : " + str(e),
level=messages.ERROR
)
def get_remove_from_bank_deposit_label(self, obj):
return "Enlever du dépôt"
def remove_from_bank_deposit(self, request, cmd, parent_obj=None):
try:
cmd.remove_from_bank_deposit()
except models.command.InvalidState:
self.message_user(
request,
"La commande {} n'est pas dans le bon état pour être retirée d'un dépôt en banque.".format(cmd),
level=messages.ERROR
)
except models.command.InvalidPaymentType:
self.message_user(
request,
"La commande {} ne peut être retirée du dépôt en banque du fait de son type de paiment ({}).".format(cmd, cmd.payment_type),
level=messages.ERROR
)
except SMTPException as e:
self.message_user(
request,
"Une erreur est survenue en envoyant le mail : " + str(e),
level=messages.ERROR
)
admin_site.register(models.MemberCommand, MemberCommandAdmin)
| 4,056 | 809 | 23 |
f91ba4319af8d4d6061e2ed8acad51dfeee3c456 | 4,030 | py | Python | lc0001_two_sum.py | yiyinghsieh/python-algorithms-data-structures | 26879343d3d83ca431946f8a92def2bc8672f807 | [
"BSD-2-Clause"
] | null | null | null | lc0001_two_sum.py | yiyinghsieh/python-algorithms-data-structures | 26879343d3d83ca431946f8a92def2bc8672f807 | [
"BSD-2-Clause"
] | null | null | null | lc0001_two_sum.py | yiyinghsieh/python-algorithms-data-structures | 26879343d3d83ca431946f8a92def2bc8672f807 | [
"BSD-2-Clause"
] | null | null | null | """Leetcode 1. Two Sum
Easy
URL: https://leetcode.com/problems/two-sum/
Given an array of integers, return indices of the two numbers such that
they add up to a specific target.
You may assume that each input would have exactly one solution, and you
may not use the same element twice.
Example:
Given nums = [2, 7, 11, 15], target = 9,
Because nums[0] + nums[1] = 2 + 7 = 9,
return [0, 1].
"""
if __name__ == '__main__':
main()
| 31.24031 | 398 | 0.44268 | """Leetcode 1. Two Sum
Easy
URL: https://leetcode.com/problems/two-sum/
Given an array of integers, return indices of the two numbers such that
they add up to a specific target.
You may assume that each input would have exactly one solution, and you
may not use the same element twice.
Example:
Given nums = [2, 7, 11, 15], target = 9,
Because nums[0] + nums[1] = 2 + 7 = 9,
return [0, 1].
"""
class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
l = len(nums)
l_half = l // 2
num_sort = sorted(nums)
if target < num_sort[l_half]:
for i in range(l):
for ii in range(l):
if i == ii:
continue
if num_sort[i] + num_sort[ii] == target:
newnum = [num_sort[i], num_sort[ii]]
ans = []
for ii in newnum:
for i, c in enumerate(nums):
if ii == c and i not in ans:
ans += [i]
break
return sorted(ans)
elif target > num_sort[l_half] * 2:
for i in range(l):
for ii in range(l):
if i == ii:
continue
if num_sort[-i] + num_sort[-ii] == target:
newnum = [num_sort[-i]] + [num_sort[-ii]]
ans = []
for ii in newnum:
for i, c in enumerate(nums):
if ii == c and i not in ans:
ans += [i]
break
return sorted(ans)
else:
for i in range(l):
for ii in range(l):
if i == ii:
continue
if num_sort[i] + num_sort[ii] == target:
newnum = [num_sort[i]] + [num_sort[ii]]
ans = []
for ii in newnum:
for i, c in enumerate(nums):
if ii == c and i not in ans:
ans += [i]
break
return sorted(ans)
class Solution(object):
def twoSum(self, nums, target):
n = len(nums)
for i in range(n - 1):
for j in range(i + 1, n):
if nums[i] + nums[j] == target:
return [i, j]
class Solution1(object):
def twoSum(self, nums, target):
dic = {}
for i, c in enumerate(nums):
if c in dic:
return [dic[c], i]
dic[target - c] = i
def main():
# Output: [0, 1]
nums = [2, 7, 11, 15]
target = 9
print(Solution().twoSum(nums, target))
# Output: [1, 2]
nums = [2, 5, 7, 7, 11]
target = 12
print(Solution().twoSum(nums, target))
# Output: [0, 1]
nums = [3, 3]
target = 6
print(Solution().twoSum(nums, target))
# Output: [1, 2]
nums = [3, 2, 4]
target = 6
print(Solution().twoSum(nums, target))
# Output: [3, 8]
nums = [3, 2, 4, 6, 7, 4, 3, 6, 9, 9]
target = 15
print(Solution1().twoSum(nums, target))
# Output: [28, 45]
nums = [230,863,916,585,981,404,316,785,88,12,70,435,384,778,887,755,740,337,86,92,325,422,815,650,920,125,277,336,221,847,168,23,677,61,400,136,874,363,394,199,863,997,794,587,124,321,212,957,764,173,314,422,927,783,930,282,306,506,44,926,691,568,68,730,933,737,531,180,414,751,28,546,60,371,493,370,527,387,43,541,13,457,328,227,652,365,430,803,59,858,538,427,583,368,375,173,809,896,370,789]
target = 542
print(Solution1().twoSum(nums, target))
if __name__ == '__main__':
main()
| 1,357 | 2,082 | 144 |
180588a9d2adcb91708df4f0c52060906cdde8cf | 5,811 | py | Python | DPGAnalysis/Skims/python/muonTagProbeFilters_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | DPGAnalysis/Skims/python/muonTagProbeFilters_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | DPGAnalysis/Skims/python/muonTagProbeFilters_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
#from HLTrigger.HLTfilters.hltHighLevel_cfi import *
#exoticaMuHLT = hltHighLevel
#Define the HLT path to be used.
#exoticaMuHLT.HLTPaths =['HLT_L1MuOpen']
#exoticaMuHLT.TriggerResultsTag = cms.InputTag("TriggerResults","","HLT8E29")
#Define the HLT quality cut
#exoticaHLTMuonFilter = cms.EDFilter("HLTSummaryFilter",
# summary = cms.InputTag("hltTriggerSummaryAOD","","HLT8E29"), # trigger summary
# member = cms.InputTag("hltL3MuonCandidates","","HLT8E29"), # filter or collection
# cut = cms.string("pt>0"), # cut on trigger object
# minN = cms.int32(0) # min. # of passing objects needed
# )
#Define the Reco quality cut
from SimGeneral.HepPDTESSource.pythiapdt_cfi import *
# Make the charged candidate collections from tracks
allTracks = cms.EDProducer("TrackViewCandidateProducer",
src = cms.InputTag("generalTracks"),
particleType = cms.string('mu+'),
cut = cms.string('pt > 0'),
filter = cms.bool(True)
)
staTracks = cms.EDProducer("TrackViewCandidateProducer",
src = cms.InputTag("standAloneMuons","UpdatedAtVtx"),
particleType = cms.string('mu+'),
cut = cms.string('pt > 0.5 && abs(d0) < 2.0 && abs(vz) < 25.0'),
filter = cms.bool(True)
)
# Make the input candidate collections
tagCands = cms.EDFilter("MuonRefSelector",
src = cms.InputTag("muons"),
cut = cms.string('isGlobalMuon > 0 && pt > 1.0 && abs(eta) < 2.1'),
filter = cms.bool(True)
)
# Standalone muon tracks (probes)
staCands = cms.EDFilter("RecoChargedCandidateRefSelector",
src = cms.InputTag("staTracks"),
cut = cms.string('pt > 0.5 && abs(eta) < 2.1'),
filter = cms.bool(True)
)
# Tracker muons (to be matched)
tkProbeCands = cms.EDFilter("RecoChargedCandidateRefSelector",
src = cms.InputTag("allTracks"),
cut = cms.string('pt > 0.5'),
filter = cms.bool(True)
)
# Match track and stand alone candidates
# to get the passing probe candidates
TkStaMap = cms.EDProducer("TrivialDeltaRViewMatcher",
src = cms.InputTag("tkProbeCands"),
distMin = cms.double(0.15),
matched = cms.InputTag("staCands"),
filter = cms.bool(True)
)
# Use the producer to get a list of matched candidates
TkStaMatched = cms.EDProducer("RecoChargedCandidateMatchedProbeMaker",
Matched = cms.untracked.bool(True),
ReferenceSource = cms.untracked.InputTag("staCands"),
ResMatchMapSource = cms.untracked.InputTag("TkStaMap"),
CandidateSource = cms.untracked.InputTag("tkProbeCands"),
filter = cms.bool(True)
)
TkStaUnmatched = cms.EDProducer("RecoChargedCandidateMatchedProbeMaker",
Matched = cms.untracked.bool(False),
ReferenceSource = cms.untracked.InputTag("staCands"),
ResMatchMapSource = cms.untracked.InputTag("TkStaMap"),
CandidateSource = cms.untracked.InputTag("tkProbeCands"),
filter = cms.bool(True)
)
# Make the tag probe association map
JPsiMMTagProbeMap = cms.EDProducer("TagProbeMassProducer",
MassMaxCut = cms.untracked.double(4.5),
TagCollection = cms.InputTag("tagCands"),
MassMinCut = cms.untracked.double(1.5),
ProbeCollection = cms.InputTag("tkProbeCands"),
PassingProbeCollection = cms.InputTag("TkStaMatched")
)
JPsiMMTPFilter = cms.EDFilter("TagProbeMassEDMFilter",
tpMapName = cms.string('JPsiMMTagProbeMap')
)
ZMMTagProbeMap = cms.EDProducer("TagProbeMassProducer",
MassMaxCut = cms.untracked.double(120.0),
TagCollection = cms.InputTag("tagCands"),
MassMinCut = cms.untracked.double(50.0),
ProbeCollection = cms.InputTag("tkProbeCands"),
PassingProbeCollection = cms.InputTag("TkStaMatched")
)
ZMMTPFilter = cms.EDFilter("TagProbeMassEDMFilter",
tpMapName = cms.string('ZMMTagProbeMap')
)
#Define group sequence, using HLT/Reco quality cut.
#exoticaMuHLTQualitySeq = cms.Sequence()
tagProbeSeq = cms.Sequence(allTracks+staTracks*tagCands+tkProbeCands+staCands*TkStaMap*TkStaMatched)
muonJPsiMMRecoQualitySeq = cms.Sequence(
#exoticaMuHLT+
tagProbeSeq+JPsiMMTagProbeMap+JPsiMMTPFilter
)
muonZMMRecoQualitySeq = cms.Sequence(
#exoticaMuHLT+
tagProbeSeq+ZMMTagProbeMap+ZMMTPFilter
)
| 47.243902 | 102 | 0.514025 | import FWCore.ParameterSet.Config as cms
#from HLTrigger.HLTfilters.hltHighLevel_cfi import *
#exoticaMuHLT = hltHighLevel
#Define the HLT path to be used.
#exoticaMuHLT.HLTPaths =['HLT_L1MuOpen']
#exoticaMuHLT.TriggerResultsTag = cms.InputTag("TriggerResults","","HLT8E29")
#Define the HLT quality cut
#exoticaHLTMuonFilter = cms.EDFilter("HLTSummaryFilter",
# summary = cms.InputTag("hltTriggerSummaryAOD","","HLT8E29"), # trigger summary
# member = cms.InputTag("hltL3MuonCandidates","","HLT8E29"), # filter or collection
# cut = cms.string("pt>0"), # cut on trigger object
# minN = cms.int32(0) # min. # of passing objects needed
# )
#Define the Reco quality cut
from SimGeneral.HepPDTESSource.pythiapdt_cfi import *
# Make the charged candidate collections from tracks
allTracks = cms.EDProducer("TrackViewCandidateProducer",
src = cms.InputTag("generalTracks"),
particleType = cms.string('mu+'),
cut = cms.string('pt > 0'),
filter = cms.bool(True)
)
staTracks = cms.EDProducer("TrackViewCandidateProducer",
src = cms.InputTag("standAloneMuons","UpdatedAtVtx"),
particleType = cms.string('mu+'),
cut = cms.string('pt > 0.5 && abs(d0) < 2.0 && abs(vz) < 25.0'),
filter = cms.bool(True)
)
# Make the input candidate collections
tagCands = cms.EDFilter("MuonRefSelector",
src = cms.InputTag("muons"),
cut = cms.string('isGlobalMuon > 0 && pt > 1.0 && abs(eta) < 2.1'),
filter = cms.bool(True)
)
# Standalone muon tracks (probes)
staCands = cms.EDFilter("RecoChargedCandidateRefSelector",
src = cms.InputTag("staTracks"),
cut = cms.string('pt > 0.5 && abs(eta) < 2.1'),
filter = cms.bool(True)
)
# Tracker muons (to be matched)
tkProbeCands = cms.EDFilter("RecoChargedCandidateRefSelector",
src = cms.InputTag("allTracks"),
cut = cms.string('pt > 0.5'),
filter = cms.bool(True)
)
# Match track and stand alone candidates
# to get the passing probe candidates
TkStaMap = cms.EDProducer("TrivialDeltaRViewMatcher",
src = cms.InputTag("tkProbeCands"),
distMin = cms.double(0.15),
matched = cms.InputTag("staCands"),
filter = cms.bool(True)
)
# Use the producer to get a list of matched candidates
TkStaMatched = cms.EDProducer("RecoChargedCandidateMatchedProbeMaker",
Matched = cms.untracked.bool(True),
ReferenceSource = cms.untracked.InputTag("staCands"),
ResMatchMapSource = cms.untracked.InputTag("TkStaMap"),
CandidateSource = cms.untracked.InputTag("tkProbeCands"),
filter = cms.bool(True)
)
TkStaUnmatched = cms.EDProducer("RecoChargedCandidateMatchedProbeMaker",
Matched = cms.untracked.bool(False),
ReferenceSource = cms.untracked.InputTag("staCands"),
ResMatchMapSource = cms.untracked.InputTag("TkStaMap"),
CandidateSource = cms.untracked.InputTag("tkProbeCands"),
filter = cms.bool(True)
)
# Make the tag probe association map
JPsiMMTagProbeMap = cms.EDProducer("TagProbeMassProducer",
MassMaxCut = cms.untracked.double(4.5),
TagCollection = cms.InputTag("tagCands"),
MassMinCut = cms.untracked.double(1.5),
ProbeCollection = cms.InputTag("tkProbeCands"),
PassingProbeCollection = cms.InputTag("TkStaMatched")
)
JPsiMMTPFilter = cms.EDFilter("TagProbeMassEDMFilter",
tpMapName = cms.string('JPsiMMTagProbeMap')
)
ZMMTagProbeMap = cms.EDProducer("TagProbeMassProducer",
MassMaxCut = cms.untracked.double(120.0),
TagCollection = cms.InputTag("tagCands"),
MassMinCut = cms.untracked.double(50.0),
ProbeCollection = cms.InputTag("tkProbeCands"),
PassingProbeCollection = cms.InputTag("TkStaMatched")
)
ZMMTPFilter = cms.EDFilter("TagProbeMassEDMFilter",
tpMapName = cms.string('ZMMTagProbeMap')
)
#Define group sequence, using HLT/Reco quality cut.
#exoticaMuHLTQualitySeq = cms.Sequence()
tagProbeSeq = cms.Sequence(allTracks+staTracks*tagCands+tkProbeCands+staCands*TkStaMap*TkStaMatched)
muonJPsiMMRecoQualitySeq = cms.Sequence(
#exoticaMuHLT+
tagProbeSeq+JPsiMMTagProbeMap+JPsiMMTPFilter
)
muonZMMRecoQualitySeq = cms.Sequence(
#exoticaMuHLT+
tagProbeSeq+ZMMTagProbeMap+ZMMTPFilter
)
| 0 | 0 | 0 |
4f780b26748d3d14409285105a1dc80ae0eaa03b | 2,315 | py | Python | mmnist/flags.py | thomassutter/MoPoE | 477a441ecb6c735a0b8af4d643fe3ac04c58171f | [
"MIT"
] | 3 | 2021-05-06T18:29:09.000Z | 2022-01-13T03:23:25.000Z | mmnist/flags.py | thomassutter/MoPoE | 477a441ecb6c735a0b8af4d643fe3ac04c58171f | [
"MIT"
] | 1 | 2022-02-02T07:49:59.000Z | 2022-02-16T08:16:20.000Z | mmnist/flags.py | thomassutter/MoPoE | 477a441ecb6c735a0b8af4d643fe3ac04c58171f | [
"MIT"
] | 2 | 2021-05-13T02:20:42.000Z | 2022-03-30T04:05:43.000Z | from utils.BaseFlags import parser as parser
parser.add_argument('--dataset', type=str, default='MMNIST', help="name of the dataset")
parser.add_argument('--style_dim', type=int, default=0, help="style dimensionality") # TODO: use modality-specific style dimensions?
parser.add_argument('--num_classes', type=int, default=10, help="number of classes on which the data set trained")
parser.add_argument('--len_sequence', type=int, default=8, help="length of sequence")
parser.add_argument('--img_size_m1', type=int, default=28, help="img dimension (width/height)")
parser.add_argument('--num_channels_m1', type=int, default=1, help="number of channels in images")
parser.add_argument('--img_size_m2', type=int, default=32, help="img dimension (width/height)")
parser.add_argument('--num_channels_m2', type=int, default=3, help="number of channels in images")
parser.add_argument('--dim', type=int, default=64, help="number of classes on which the data set trained")
parser.add_argument('--data_multiplications', type=int, default=1, help="number of pairs per sample")
parser.add_argument('--num_hidden_layers', type=int, default=1, help="number of channels in images")
parser.add_argument('--likelihood', type=str, default='laplace', help="output distribution")
# data
parser.add_argument('--unimodal-datapaths-train', nargs="+", type=str, help="directories where training data is stored")
parser.add_argument('--unimodal-datapaths-test', nargs="+", type=str, help="directories where test data is stored")
parser.add_argument('--pretrained-classifier-paths', nargs="+", type=str, help="paths to pretrained classifiers")
# multimodal
parser.add_argument('--subsampled_reconstruction', default=True, help="subsample reconstruction path")
parser.add_argument('--include_prior_expert', action='store_true', default=False, help="factorized_representation")
# weighting of loss terms
parser.add_argument('--div_weight', type=float, default=None, help="default weight divergence per modality, if None use 1/(num_mods+1).")
parser.add_argument('--div_weight_uniform_content', type=float, default=None, help="default weight divergence term prior, if None use (1/num_mods+1)")
# annealing
parser.add_argument('--kl_annealing', type=int, default=0, help="number of kl annealing steps; 0 if no annealing should be done")
| 72.34375 | 150 | 0.766739 | from utils.BaseFlags import parser as parser
parser.add_argument('--dataset', type=str, default='MMNIST', help="name of the dataset")
parser.add_argument('--style_dim', type=int, default=0, help="style dimensionality") # TODO: use modality-specific style dimensions?
parser.add_argument('--num_classes', type=int, default=10, help="number of classes on which the data set trained")
parser.add_argument('--len_sequence', type=int, default=8, help="length of sequence")
parser.add_argument('--img_size_m1', type=int, default=28, help="img dimension (width/height)")
parser.add_argument('--num_channels_m1', type=int, default=1, help="number of channels in images")
parser.add_argument('--img_size_m2', type=int, default=32, help="img dimension (width/height)")
parser.add_argument('--num_channels_m2', type=int, default=3, help="number of channels in images")
parser.add_argument('--dim', type=int, default=64, help="number of classes on which the data set trained")
parser.add_argument('--data_multiplications', type=int, default=1, help="number of pairs per sample")
parser.add_argument('--num_hidden_layers', type=int, default=1, help="number of channels in images")
parser.add_argument('--likelihood', type=str, default='laplace', help="output distribution")
# data
parser.add_argument('--unimodal-datapaths-train', nargs="+", type=str, help="directories where training data is stored")
parser.add_argument('--unimodal-datapaths-test', nargs="+", type=str, help="directories where test data is stored")
parser.add_argument('--pretrained-classifier-paths', nargs="+", type=str, help="paths to pretrained classifiers")
# multimodal
parser.add_argument('--subsampled_reconstruction', default=True, help="subsample reconstruction path")
parser.add_argument('--include_prior_expert', action='store_true', default=False, help="factorized_representation")
# weighting of loss terms
parser.add_argument('--div_weight', type=float, default=None, help="default weight divergence per modality, if None use 1/(num_mods+1).")
parser.add_argument('--div_weight_uniform_content', type=float, default=None, help="default weight divergence term prior, if None use (1/num_mods+1)")
# annealing
parser.add_argument('--kl_annealing', type=int, default=0, help="number of kl annealing steps; 0 if no annealing should be done")
| 0 | 0 | 0 |
bc6fc0ca9eaa7cd7fb08e399ba1a092687d4edbe | 127 | py | Python | src/model/__init__.py | JulienGrv/puremvc-python-demo-PySide-employeeadmin | b076493ac34254e665b485259b0a7122fa9cfde4 | [
"BSD-3-Clause"
] | 4 | 2017-08-26T10:18:10.000Z | 2020-07-28T19:50:54.000Z | src/model/__init__.py | JulienGrv/puremvc-python-demo-PySide-employeeadmin | b076493ac34254e665b485259b0a7122fa9cfde4 | [
"BSD-3-Clause"
] | null | null | null | src/model/__init__.py | JulienGrv/puremvc-python-demo-PySide-employeeadmin | b076493ac34254e665b485259b0a7122fa9cfde4 | [
"BSD-3-Clause"
] | 3 | 2020-09-22T12:17:14.000Z | 2021-07-16T12:28:18.000Z | # -*- coding: utf-8 -*-
from . import enum
from . import vo
from .RoleProxy import RoleProxy
from .UserProxy import UserProxy
| 18.142857 | 32 | 0.724409 | # -*- coding: utf-8 -*-
from . import enum
from . import vo
from .RoleProxy import RoleProxy
from .UserProxy import UserProxy
| 0 | 0 | 0 |
c6f889fca298ca9f280665437bb50151326a96bf | 3,253 | py | Python | Assignments/Assignment 03/kmeans.py | linbo0518/CSE-6363-Machine-Learning | 29eef2629748af8af6116ea7f9c5c51e7190cdaf | [
"MIT"
] | null | null | null | Assignments/Assignment 03/kmeans.py | linbo0518/CSE-6363-Machine-Learning | 29eef2629748af8af6116ea7f9c5c51e7190cdaf | [
"MIT"
] | null | null | null | Assignments/Assignment 03/kmeans.py | linbo0518/CSE-6363-Machine-Learning | 29eef2629748af8af6116ea7f9c5c51e7190cdaf | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
| 37.390805 | 79 | 0.559484 | import numpy as np
import matplotlib.pyplot as plt
class KMeans:
def __init__(self, k, tol=1e-5, max_iter=1000):
assert ((k > 1) and
isinstance(k, int)), "k should be a integer and greater than 1"
self._k = k
self._centroids = None
self._tol = tol
self._max_iter = max_iter
def fit(self, x):
if not self._centroids:
self._centroids = self._init_centroids(x, self._k)
self._centroids, labels = self._kmeans(x, self._centroids, self._tol,
self._max_iter)
loss_matrix = np.zeros((len(x), len(self._centroids)))
for idx, centroid in enumerate(self._centroids):
loss_matrix[:, idx] = self._compute_dist(x, centroid)
loss = np.min(loss_matrix, axis=1)
return self._centroids, labels, loss.sum()
def predict(self, x):
dist_matrix = np.zeros((len(x), len(self._centroids)))
for idx, centroid in enumerate(self._centroids):
dist_matrix[:, idx] = self._compute_dist(x, centroid)
labels = np.argmin(dist_matrix, axis=1)
return labels
def plot(self, x, labels, centroids):
for pred in np.unique(labels):
plt.plot(x[labels == pred][:, 0], x[labels == pred][:, 1], 'o')
for centroid in centroids:
plt.plot(centroid[0], centroid[1], '+k')
plt.title("Predict label with centroids")
plt.show()
def _init_centroids(self, x, n):
centroids = list()
indexes = list()
indexes.append(np.random.randint(len(x)))
centroid_1 = x[indexes[-1]]
centroids.append(centroid_1)
dist_array = self._compute_dist(x, centroid_1).tolist()
indexes.append(np.argmax(dist_array))
centroids.append(x[indexes[-1]])
if n > 2:
for _ in range(2, n):
dist_array = np.zeros(len(x))
for centroid in centroids:
dist_array += self._compute_dist(x, centroid)
idx = np.argmax(dist_array)
while (idx in indexes):
dist_array[idx] = 0
idx = np.argmax(dist_array)
indexes.append(idx)
centroids.append(x[indexes[-1]])
return centroids
def _compute_dist(self, x, centroid):
return np.sum(np.square(x - centroid), axis=1)
def _l2_norm(self, x):
x = np.ravel(x, order='K')
return np.dot(x, x)
def _kmeans(self, x, centroids, tol, max_iter):
labels = np.zeros(len(x))
for _ in range(max_iter):
dist_matrix = np.zeros((len(x), len(centroids)))
for idx, centroid in enumerate(centroids):
dist_matrix[:, idx] = self._compute_dist(x, centroid)
labels = np.argmin(dist_matrix, axis=1)
new_centroids = np.zeros_like(centroids)
for idx in range(len(centroids)):
new_centroids[idx] = np.mean(x[labels == idx], axis=0)
shifts = self._l2_norm(new_centroids - centroids)
if shifts <= tol:
break
else:
centroids = new_centroids
return centroids, labels
| 2,970 | -8 | 239 |
bb65a80afcb52dea901b2b0e0af77a55b90966d4 | 2,394 | py | Python | hyp3lib/file_subroutines.py | washreve/hyp3-lib | 5e7f11f1de9576a519b25fb56ccdb40e72ca9982 | [
"BSD-3-Clause"
] | null | null | null | hyp3lib/file_subroutines.py | washreve/hyp3-lib | 5e7f11f1de9576a519b25fb56ccdb40e72ca9982 | [
"BSD-3-Clause"
] | null | null | null | hyp3lib/file_subroutines.py | washreve/hyp3-lib | 5e7f11f1de9576a519b25fb56ccdb40e72ca9982 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import print_function, absolute_import, division, unicode_literals
import errno
import glob
import os
import re
import zipfile
from hyp3lib.execute import execute
def prepare_files(csv_file):
"""Download granules and unzip granules
Given a CSV file of granule names, download the granules and unzip them,
removing the zip files as we go. Note: This will unzip and REMOVE ALL ZIP
FILES in the current directory.
"""
cmd = "get_asf.py %s" % csv_file
execute(cmd)
os.rmdir("download")
for myfile in os.listdir("."):
if ".zip" in myfile:
try:
zip_ref = zipfile.ZipFile(myfile, 'r')
zip_ref.extractall(".")
zip_ref.close()
except:
print("Unable to unzip file {}".format(myfile))
else:
print("WARNING: {} not recognized as a zip file".format(myfile))
def get_file_list():
"""
Return a list of file names and file dates, including all SAFE
directories, found in the current directory, sorted by date.
"""
files = []
filenames = []
filedates = []
# Set up the list of files to process
i = 0
for myfile in os.listdir("."):
if ".SAFE" in myfile and os.path.isdir(myfile):
t = re.split('_+', myfile)
m = [myfile, t[4][0:15]]
files.append(m)
i += 1
print('Found %s files to process' % i)
files.sort(key=lambda row: row[1])
print(files)
for i in range(len(files)):
filenames.append(files[i][0])
filedates.append(files[i][1])
return filenames, filedates
def mkdir_p(path):
"""
Make parent directories as needed and no error if existing. Works like `mkdir -p`.
"""
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
| 26.021739 | 86 | 0.587719 | from __future__ import print_function, absolute_import, division, unicode_literals
import errno
import glob
import os
import re
import zipfile
from hyp3lib.execute import execute
def prepare_files(csv_file):
"""Download granules and unzip granules
Given a CSV file of granule names, download the granules and unzip them,
removing the zip files as we go. Note: This will unzip and REMOVE ALL ZIP
FILES in the current directory.
"""
cmd = "get_asf.py %s" % csv_file
execute(cmd)
os.rmdir("download")
for myfile in os.listdir("."):
if ".zip" in myfile:
try:
zip_ref = zipfile.ZipFile(myfile, 'r')
zip_ref.extractall(".")
zip_ref.close()
except:
print("Unable to unzip file {}".format(myfile))
else:
print("WARNING: {} not recognized as a zip file".format(myfile))
def get_file_list():
"""
Return a list of file names and file dates, including all SAFE
directories, found in the current directory, sorted by date.
"""
files = []
filenames = []
filedates = []
# Set up the list of files to process
i = 0
for myfile in os.listdir("."):
if ".SAFE" in myfile and os.path.isdir(myfile):
t = re.split('_+', myfile)
m = [myfile, t[4][0:15]]
files.append(m)
i += 1
print('Found %s files to process' % i)
files.sort(key=lambda row: row[1])
print(files)
for i in range(len(files)):
filenames.append(files[i][0])
filedates.append(files[i][1])
return filenames, filedates
def get_dem_tile_list():
tile_list = None
for myfile in glob.glob("DEM/*.tif"):
tile = os.path.basename(myfile)
if tile_list:
tile_list = tile_list + ", " + tile
else:
tile_list = tile
if tile_list:
print("Found DEM tile list of {}".format(tile_list))
return tile_list
else:
print("Warning: no DEM tile list created")
return None
def mkdir_p(path):
"""
Make parent directories as needed and no error if existing. Works like `mkdir -p`.
"""
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
| 406 | 0 | 23 |
c2c9c13177f4b25d82b251702e09592343de443f | 2,229 | py | Python | blogs/models.py | AgnosticMe/phleeb | 48f85048d2db5d16d243feee2f84a961682a0f4d | [
"MIT"
] | null | null | null | blogs/models.py | AgnosticMe/phleeb | 48f85048d2db5d16d243feee2f84a961682a0f4d | [
"MIT"
] | null | null | null | blogs/models.py | AgnosticMe/phleeb | 48f85048d2db5d16d243feee2f84a961682a0f4d | [
"MIT"
] | null | null | null | from django.db import models
from django.conf import settings
from django.utils.text import slugify
import uuid
# Create your models here.
| 28.21519 | 102 | 0.680574 | from django.db import models
from django.conf import settings
from django.utils.text import slugify
import uuid
# Create your models here.
class Category(models.Model):
title = models.CharField(max_length=50)
slug = models.SlugField(editable=False)
def save(self, *args, **kwargs):
self.slug = f'{slugify(self.title)}--{uuid.uuid4()}'
super(Category, self).save(*args, **kwargs)
def __str__(self):
return self.title
def blog_count(self):
return self.blogs.all().count()
class Tag(models.Model):
title = models.CharField(max_length=50)
slug = models.SlugField(editable=False)
def save(self, *args, **kwargs):
self.slug = slugify(self.title)
super(Tag, self).save(*args, **kwargs)
def __str__(self):
return self.title
def blog_count(self):
return self.blogs.all().count()
class Blog(models.Model):
title = models.CharField(max_length=150)
content = models.TextField()
publishing_date = models.DateTimeField(auto_now_add=True)
image = models.ImageField(upload_to='uploads/')
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
slug = models.SlugField(editable=False)
category = models.ForeignKey(Category, on_delete=models.CASCADE, blank=True, related_name='blogs')
tag = models.ManyToManyField(Tag, related_name='blogs', blank=True)
slider_blog = models.BooleanField(default=False)
hit = models.PositiveIntegerField(default=0)
def comment_count(self):
return self.comments.all().count()
def save(self, *args, **kwargs):
self.slug = f'{slugify(self.title)}--{uuid.uuid4()}'
super(Blog, self).save(*args, **kwargs)
def __str__(self):
return self.title
def blog_tags(self):
return ', '.join(str(tag) for tag in self.tag.all())
class Comment(models.Model):
blog = models.ForeignKey(Blog, on_delete=models.CASCADE, related_name='comments')
name = models.CharField(max_length=100)
email = models.EmailField(max_length=150)
content = models.TextField()
publishing_date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.blog.title
| 625 | 1,365 | 92 |
7ef705119c51114baa303c658e5e9a73bfa03b60 | 6,988 | py | Python | examples/precision_recall_at_k1.py | linksboy/Surprise | 0cad490b6009dcdb67776b90437df97d859fbc8e | [
"BSD-3-Clause"
] | null | null | null | examples/precision_recall_at_k1.py | linksboy/Surprise | 0cad490b6009dcdb67776b90437df97d859fbc8e | [
"BSD-3-Clause"
] | null | null | null | examples/precision_recall_at_k1.py | linksboy/Surprise | 0cad490b6009dcdb67776b90437df97d859fbc8e | [
"BSD-3-Clause"
] | null | null | null | """
This module illustrates how to compute Precision at k and Recall at k metrics.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from collections import defaultdict
import time
import datetime
import random
import numpy as np
import six
from tabulate import tabulate
from surprise import Dataset
from surprise.model_selection import cross_validate
from surprise.model_selection import KFold
from surprise import NormalPredictor
from surprise import BaselineOnly
from surprise import KNNBasic
from surprise import KNNWithMeans
from surprise import KNNBaseline
from surprise import SVD
from surprise import SVDpp
from surprise import NMF
from surprise import SlopeOne
from surprise import CoClustering
from surprise.model_selection import train_test_split
classes = (SVD, SVDpp, NMF, SlopeOne, KNNBasic, KNNWithMeans, KNNBaseline,
CoClustering, BaselineOnly, NormalPredictor)
# ugly dict to map algo names and datasets to their markdown links in the table
stable = 'http://surprise.readthedocs.io/en/stable/'
LINK = {'SVD': '[{}]({})'.format('SVD',
stable +
'matrix_factorization.html#surprise.prediction_algorithms.matrix_factorization.SVD'),
'SVDpp': '[{}]({})'.format('SVD++',
stable +
'matrix_factorization.html#surprise.prediction_algorithms.matrix_factorization.SVDpp'),
'NMF': '[{}]({})'.format('NMF',
stable +
'matrix_factorization.html#surprise.prediction_algorithms.matrix_factorization.NMF'),
'SlopeOne': '[{}]({})'.format('Slope One',
stable +
'slope_one.html#surprise.prediction_algorithms.slope_one.SlopeOne'),
'KNNBasic': '[{}]({})'.format('k-NN',
stable +
'knn_inspired.html#surprise.prediction_algorithms.knns.KNNBasic'),
'KNNWithMeans': '[{}]({})'.format('Centered k-NN',
stable +
'knn_inspired.html#surprise.prediction_algorithms.knns.KNNWithMeans'),
'KNNBaseline': '[{}]({})'.format('k-NN Baseline',
stable +
'knn_inspired.html#surprise.prediction_algorithms.knns.KNNBaseline'),
'CoClustering': '[{}]({})'.format('Co-Clustering',
stable +
'co_clustering.html#surprise.prediction_algorithms.co_clustering.CoClustering'),
'BaselineOnly': '[{}]({})'.format('Baseline',
stable +
'basic_algorithms.html#surprise.prediction_algorithms.baseline_only.BaselineOnly'),
'NormalPredictor': '[{}]({})'.format('Random',
stable +
'basic_algorithms.html#surprise.prediction_algorithms.random_pred.NormalPredictor'),
'ml-100k': '[{}]({})'.format('Movielens 100k',
'http://grouplens.org/datasets/movielens/100k'),
'ml-1m': '[{}]({})'.format('Movielens 1M',
'http://grouplens.org/datasets/movielens/1m'),
}
def precision_recall_at_k(predictions, k=10, threshold=3.5):
'''Return precision and recall at k metrics for each user.'''
# First map the predictions to each user.
user_est_true = defaultdict(list)
for uid, _, true_r, est, _ in predictions:
user_est_true[uid].append((est, true_r))
precisions = dict()
recalls = dict()
for uid, user_ratings in user_est_true.items():
# Sort user ratings by estimated value
user_ratings.sort(key=lambda x: x[0], reverse=True)
# Number of relevant items
n_rel = sum((true_r >= threshold) for (_, true_r) in user_ratings)
# Number of recommended items in top k
n_rec_k = sum((est >= threshold) for (est, _) in user_ratings[:k])
# Number of relevant and recommended items in top k
n_rel_and_rec_k = sum(((true_r >= threshold) and (est >= threshold))
for (est, true_r) in user_ratings[:k])
# Precision@K: Proportion of recommended items that are relevant
precisions[uid] = n_rel_and_rec_k / n_rec_k if n_rec_k != 0 else 1
# Recall@K: Proportion of relevant items that are recommended
recalls[uid] = n_rel_and_rec_k / n_rel if n_rel != 0 else 1
return precisions, recalls
dataset = 'ml-100k'
data = Dataset.load_builtin('ml-100k')
kf = KFold(n_splits=5)
trainset,testset = train_test_split(data,test_size=.75)
'''
for trainset, testset in kf.split(data):
algo.fit(trainset)
predictions = algo.test(testset)
precisions, recalls = precision_recall_at_k(predictions, k=5, threshold=4)
# Precision and recall can then be averaged over all users
prec = sum(p for p in precisions.values()) / len(precisions)
recall = sum(rec for rec in recalls.values()) / len(recalls)
f1 = 2 * prec * recall / (prec + recall)
print(prec)
print(recall)
print(f1)
'''
table = []
for klass in classes:
start = time.time()
if klass == 'SVD':
algo = SVD()
elif klass == 'SVDpp':
algo = SVDpp()
elif klass == 'NMF':
algo = NMF()
elif klass == 'SlopeOne':
algo = SlopeOne()
elif klass == 'KNNBasic':
algo = KNNBasic()
elif klass == 'KNNWithMeans':
algo = KNNWithMeans()
elif klass == 'KNNBaseline':
algo = KNNBaseline()
elif klass == 'CoClustering':
algo = CoClustering()
elif klass == 'BaselineOnly':
algo = BaselineOnly()
else :
algo = NormalPredictor()
#cv_time = str(datetime.timedelta(seconds=int(time.time() - start)))
algo.fit(trainset)
predictions = algo.test(testset)
precisions, recalls = precision_recall_at_k(predictions, k=5, threshold=4)
# Precision and recall can then be averaged over all users
prec = sum(p for p in precisions.values()) / len(precisions)
recall = sum(rec for rec in recalls.values()) / len(recalls)
f1 = 2 * prec * recall / (prec + recall)
link = LINK[klass.__name__]
new_line = [link, prec, recall, f1]
print(tabulate([new_line], tablefmt="pipe")) # print current algo perf
table.append(new_line)
header = [LINK[dataset],
'Precision',
'Recall',
'F1',
'Time'
]
print(tabulate(table, header, tablefmt="pipe")) | 41.595238 | 130 | 0.576989 | """
This module illustrates how to compute Precision at k and Recall at k metrics.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from collections import defaultdict
import time
import datetime
import random
import numpy as np
import six
from tabulate import tabulate
from surprise import Dataset
from surprise.model_selection import cross_validate
from surprise.model_selection import KFold
from surprise import NormalPredictor
from surprise import BaselineOnly
from surprise import KNNBasic
from surprise import KNNWithMeans
from surprise import KNNBaseline
from surprise import SVD
from surprise import SVDpp
from surprise import NMF
from surprise import SlopeOne
from surprise import CoClustering
from surprise.model_selection import train_test_split
classes = (SVD, SVDpp, NMF, SlopeOne, KNNBasic, KNNWithMeans, KNNBaseline,
CoClustering, BaselineOnly, NormalPredictor)
# ugly dict to map algo names and datasets to their markdown links in the table
stable = 'http://surprise.readthedocs.io/en/stable/'
LINK = {'SVD': '[{}]({})'.format('SVD',
stable +
'matrix_factorization.html#surprise.prediction_algorithms.matrix_factorization.SVD'),
'SVDpp': '[{}]({})'.format('SVD++',
stable +
'matrix_factorization.html#surprise.prediction_algorithms.matrix_factorization.SVDpp'),
'NMF': '[{}]({})'.format('NMF',
stable +
'matrix_factorization.html#surprise.prediction_algorithms.matrix_factorization.NMF'),
'SlopeOne': '[{}]({})'.format('Slope One',
stable +
'slope_one.html#surprise.prediction_algorithms.slope_one.SlopeOne'),
'KNNBasic': '[{}]({})'.format('k-NN',
stable +
'knn_inspired.html#surprise.prediction_algorithms.knns.KNNBasic'),
'KNNWithMeans': '[{}]({})'.format('Centered k-NN',
stable +
'knn_inspired.html#surprise.prediction_algorithms.knns.KNNWithMeans'),
'KNNBaseline': '[{}]({})'.format('k-NN Baseline',
stable +
'knn_inspired.html#surprise.prediction_algorithms.knns.KNNBaseline'),
'CoClustering': '[{}]({})'.format('Co-Clustering',
stable +
'co_clustering.html#surprise.prediction_algorithms.co_clustering.CoClustering'),
'BaselineOnly': '[{}]({})'.format('Baseline',
stable +
'basic_algorithms.html#surprise.prediction_algorithms.baseline_only.BaselineOnly'),
'NormalPredictor': '[{}]({})'.format('Random',
stable +
'basic_algorithms.html#surprise.prediction_algorithms.random_pred.NormalPredictor'),
'ml-100k': '[{}]({})'.format('Movielens 100k',
'http://grouplens.org/datasets/movielens/100k'),
'ml-1m': '[{}]({})'.format('Movielens 1M',
'http://grouplens.org/datasets/movielens/1m'),
}
def precision_recall_at_k(predictions, k=10, threshold=3.5):
'''Return precision and recall at k metrics for each user.'''
# First map the predictions to each user.
user_est_true = defaultdict(list)
for uid, _, true_r, est, _ in predictions:
user_est_true[uid].append((est, true_r))
precisions = dict()
recalls = dict()
for uid, user_ratings in user_est_true.items():
# Sort user ratings by estimated value
user_ratings.sort(key=lambda x: x[0], reverse=True)
# Number of relevant items
n_rel = sum((true_r >= threshold) for (_, true_r) in user_ratings)
# Number of recommended items in top k
n_rec_k = sum((est >= threshold) for (est, _) in user_ratings[:k])
# Number of relevant and recommended items in top k
n_rel_and_rec_k = sum(((true_r >= threshold) and (est >= threshold))
for (est, true_r) in user_ratings[:k])
# Precision@K: Proportion of recommended items that are relevant
precisions[uid] = n_rel_and_rec_k / n_rec_k if n_rec_k != 0 else 1
# Recall@K: Proportion of relevant items that are recommended
recalls[uid] = n_rel_and_rec_k / n_rel if n_rel != 0 else 1
return precisions, recalls
dataset = 'ml-100k'
data = Dataset.load_builtin('ml-100k')
kf = KFold(n_splits=5)
trainset,testset = train_test_split(data,test_size=.75)
'''
for trainset, testset in kf.split(data):
algo.fit(trainset)
predictions = algo.test(testset)
precisions, recalls = precision_recall_at_k(predictions, k=5, threshold=4)
# Precision and recall can then be averaged over all users
prec = sum(p for p in precisions.values()) / len(precisions)
recall = sum(rec for rec in recalls.values()) / len(recalls)
f1 = 2 * prec * recall / (prec + recall)
print(prec)
print(recall)
print(f1)
'''
table = []
for klass in classes:
start = time.time()
if klass == 'SVD':
algo = SVD()
elif klass == 'SVDpp':
algo = SVDpp()
elif klass == 'NMF':
algo = NMF()
elif klass == 'SlopeOne':
algo = SlopeOne()
elif klass == 'KNNBasic':
algo = KNNBasic()
elif klass == 'KNNWithMeans':
algo = KNNWithMeans()
elif klass == 'KNNBaseline':
algo = KNNBaseline()
elif klass == 'CoClustering':
algo = CoClustering()
elif klass == 'BaselineOnly':
algo = BaselineOnly()
else :
algo = NormalPredictor()
#cv_time = str(datetime.timedelta(seconds=int(time.time() - start)))
algo.fit(trainset)
predictions = algo.test(testset)
precisions, recalls = precision_recall_at_k(predictions, k=5, threshold=4)
# Precision and recall can then be averaged over all users
prec = sum(p for p in precisions.values()) / len(precisions)
recall = sum(rec for rec in recalls.values()) / len(recalls)
f1 = 2 * prec * recall / (prec + recall)
link = LINK[klass.__name__]
new_line = [link, prec, recall, f1]
print(tabulate([new_line], tablefmt="pipe")) # print current algo perf
table.append(new_line)
header = [LINK[dataset],
'Precision',
'Recall',
'F1',
'Time'
]
print(tabulate(table, header, tablefmt="pipe")) | 0 | 0 | 0 |
88938f5fc4d946d37ec35056f1accb58851cb47b | 268 | py | Python | tests/urls.py | emihir0/django-schema-graph | 0c8280c035f47a7c736b8feb05d4dd1084b27979 | [
"MIT"
] | 316 | 2020-02-17T13:47:47.000Z | 2022-03-29T20:25:36.000Z | tests/urls.py | emihir0/django-schema-graph | 0c8280c035f47a7c736b8feb05d4dd1084b27979 | [
"MIT"
] | 20 | 2020-02-23T07:59:36.000Z | 2021-05-11T03:39:17.000Z | tests/urls.py | emihir0/django-schema-graph | 0c8280c035f47a7c736b8feb05d4dd1084b27979 | [
"MIT"
] | 11 | 2020-03-02T17:31:31.000Z | 2022-03-17T05:55:50.000Z | from schema_graph.views import Schema
try:
# Django 2+:
from django.urls import path
urlpatterns = [path("", Schema.as_view())]
except ImportError:
# Django < 2:
from django.conf.urls import url
urlpatterns = [url(r"^$", Schema.as_view())]
| 19.142857 | 48 | 0.649254 | from schema_graph.views import Schema
try:
# Django 2+:
from django.urls import path
urlpatterns = [path("", Schema.as_view())]
except ImportError:
# Django < 2:
from django.conf.urls import url
urlpatterns = [url(r"^$", Schema.as_view())]
| 0 | 0 | 0 |
91694b23d82988fe89e472b7e5b8180d121cbc54 | 126 | py | Python | classes/threads/AbstractFuzzerRawThread.py | AntonKuzminRussia/web-scout | 5b8fed2c5917c9ecc210052703a65f1204f4b347 | [
"MIT"
] | 6 | 2017-10-11T18:56:05.000Z | 2019-09-29T21:45:05.000Z | classes/threads/AbstractFuzzerRawThread.py | AntonKuzminRussia/web-scout | 5b8fed2c5917c9ecc210052703a65f1204f4b347 | [
"MIT"
] | 3 | 2021-03-31T19:17:30.000Z | 2021-12-13T20:16:23.000Z | classes/threads/AbstractFuzzerRawThread.py | AntonKuzminRussia/web-scout | 5b8fed2c5917c9ecc210052703a65f1204f4b347 | [
"MIT"
] | null | null | null | from classes.threads.AbstractRawThread import AbstractRawThread
| 18 | 63 | 0.849206 | from classes.threads.AbstractRawThread import AbstractRawThread
class AbstractFuzzerRawThread(AbstractRawThread):
pass
| 0 | 37 | 23 |
673fb86942882dba6620bbf891fa8f412acbf676 | 4,123 | py | Python | utils/train_eval.py | squarefaceyao/CAU-No.455-Lab | 25ee9c063eeb426236911e0637da39d92f492cb6 | [
"MIT"
] | null | null | null | utils/train_eval.py | squarefaceyao/CAU-No.455-Lab | 25ee9c063eeb426236911e0637da39d92f492cb6 | [
"MIT"
] | null | null | null | utils/train_eval.py | squarefaceyao/CAU-No.455-Lab | 25ee9c063eeb426236911e0637da39d92f492cb6 | [
"MIT"
] | null | null | null | import random
from torch import tensor
from sklearn.model_selection import StratifiedKFold
import torch
import numpy as np
import time
import matplotlib.pyplot as plt
| 40.421569 | 92 | 0.611205 | import random
from torch import tensor
from sklearn.model_selection import StratifiedKFold
import torch
import numpy as np
import time
import matplotlib.pyplot as plt
def k_fold(data, folds):
skf = StratifiedKFold(folds, shuffle=True, random_state=random.randint(1,999))
test_indices, train_indices = [], []
for _, idx in skf.split(torch.zeros(data.x.shape[0]), data.y):
test_indices.append(torch.from_numpy(idx).to(torch.long))
val_indices = [test_indices[i - 1] for i in range(folds)]
for i in range(folds):
train_mask = torch.ones(data.x.shape[0], dtype=torch.bool)
train_mask[test_indices[i]] = 0
train_mask[val_indices[i]] = 0
train_indices.append(train_mask.nonzero(as_tuple=False).view(-1))
return train_indices, test_indices, val_indices
def cross_validation_with_val_set(data,model,args,transform):
train_losses, val_aucs, test_aucs, durations = [], [], [], []
folds = args.folds
for fold, (train_idx, test_idx,
val_idx) in enumerate(zip(*k_fold(data, folds))):
print(f"{fold+1} fold train")
split = {
'train_idx': np.array(train_idx),
'val_idx': np.array(val_idx),
'test_idx': np.array(test_idx)}
allmask = {}
for name in ['train', 'val', 'test']:
idx = split[f'{name}_idx']
idx = torch.from_numpy(idx).to(torch.long)
mask = torch.zeros(data.num_nodes, dtype=torch.bool)
mask[idx] = True
allmask[f'{name}_mask'] = mask
data.train_mask = allmask['train_mask']
data.val_mask = allmask['val_mask']
data.test_mask = allmask['test_mask']
train_data, val_data, test_data = transform(data) # Explicitly transform data.
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
file_time = time.perf_counter()
torch.save(test_data, f"./save_model/{file_time}_{fold + 1}_fold_test_data.pt")
def train():
model.train()
optimizer.zero_grad()
z = model.encode(train_data.x, train_data.edge_index)
loss = model.recon_loss(z, train_data.pos_edge_label_index)
loss.backward()
optimizer.step()
return float(loss)
@torch.no_grad()
def test(data):
model.eval()
z = model.encode(data.x, data.edge_index)
return model.test(z, data.pos_edge_label_index, data.neg_edge_label_index)
t_start = time.perf_counter()
for epoch in range(1, args.epochs + 1):
los = train()
train_losses.append(los)
auc, ap = test(test_data)
test_aucs.append(auc)
if epoch % args.epochs == 0:
print('Epoch: {:03d}, Test AUC: {:.4f}, AP: {:.4f}'.format(epoch, auc, ap))
t_end = time.perf_counter()
durations.append(t_end - t_start)
torch.save(model.state_dict(),f"./save_model/{file_time}_AUC_{auc:.4f}.pt")
# fig = plt.figure(figsize=(10, 8))
# plt.plot(range(1, len(train_losses) + 1), train_losses, label='Talidation loss')
# # find position of lowest validation loss
# minposs = train_losses.index(min(train_losses)) + 1
# plt.axvline(minposs, linestyle='--', color='r', label='Early Stopping Checkpoint')
# plt.xlabel('epochs')
# plt.ylabel('values')
# plt.grid(True)
# plt.legend()
# plt.tight_layout()
# plt.show()
# plt.close()
loss, auc, duration = tensor(train_losses), tensor(test_aucs), tensor(durations)
loss, auc = loss.view(folds, args.epochs), auc.view(folds, args.epochs)
loss, argmin = loss.min(dim=1)
auc = auc[torch.arange(folds, dtype=torch.long), argmin]
loss_mean = loss.mean().item()
auc_mean = auc.mean().item()
auc_std = auc.std().item()
duration_mean = duration.mean().item()
print('Train Loss: {:.4f}, Test AUC: {:.4f} ± {:.3f}, Duration: {:.4f}'.
format(loss_mean, auc_mean, auc_std, duration_mean))
return loss_mean, auc_mean, auc_std | 3,910 | 0 | 46 |
cf757e1b3f00a63f7fb17f67fc6f30d5107cab47 | 1,918 | py | Python | botctl/botmod.py | wizeline/botctl | 85f69f7fa463246661823c9686e6550d4b4ca03e | [
"MIT"
] | null | null | null | botctl/botmod.py | wizeline/botctl | 85f69f7fa463246661823c9686e6550d4b4ca03e | [
"MIT"
] | null | null | null | botctl/botmod.py | wizeline/botctl | 85f69f7fa463246661823c9686e6550d4b4ca03e | [
"MIT"
] | 1 | 2020-10-13T16:30:05.000Z | 2020-10-13T16:30:05.000Z | from botctl.client import BotClientCommand
from botctl.common import command_callback, execute_subcommand
class InstallConversationCommand(BotClientCommand):
"""Usage:
$ botmod update-conversation {BOT_NAME} < CONVERSATION_FILE.json
"""
__commandname__ = 'botmod'
expects_input = True
@command_callback
class InstallIntegrationCommand(BotClientCommand):
"""Usage:
$ botmod install-integration {BOT_NAME} {INTEGRATION_NAME} < CONFIG.json
"""
__commandname__ = 'botmod'
expects_input = True
@command_callback
class InstallNLP(BotClientCommand):
"""Usage:
$ botmod install-nlp {BOT_NAME} < NLP_CONFIG.json
"""
__commandname__ = 'botmod'
expects_input = True
@command_callback
class InstallChannelCommand(BotClientCommand):
"""Usage:
$ botmod install-channel {BOT_NAME}.{CHANNEL} < CHANNEL_CONFIG.json
"""
__commandname__ = 'botmod'
expects_input = True
@command_callback
| 27.4 | 76 | 0.667883 | from botctl.client import BotClientCommand
from botctl.common import command_callback, execute_subcommand
class InstallConversationCommand(BotClientCommand):
"""Usage:
$ botmod update-conversation {BOT_NAME} < CONVERSATION_FILE.json
"""
__commandname__ = 'botmod'
expects_input = True
@command_callback
def __call__(self, bot_name):
self.client.post_conversation(bot_name, self.input)
return 0
class InstallIntegrationCommand(BotClientCommand):
"""Usage:
$ botmod install-integration {BOT_NAME} {INTEGRATION_NAME} < CONFIG.json
"""
__commandname__ = 'botmod'
expects_input = True
@command_callback
def __call__(self, bot_name, integration_name):
self.client.install_bot_integration(bot_name,
integration_name,
self.input)
return 0
class InstallNLP(BotClientCommand):
"""Usage:
$ botmod install-nlp {BOT_NAME} < NLP_CONFIG.json
"""
__commandname__ = 'botmod'
expects_input = True
@command_callback
def __call__(self, bot_name):
self.client.install_nlp(bot_name, self.input)
return 0
class InstallChannelCommand(BotClientCommand):
"""Usage:
$ botmod install-channel {BOT_NAME}.{CHANNEL} < CHANNEL_CONFIG.json
"""
__commandname__ = 'botmod'
expects_input = True
@command_callback
def __call__(self, bot_and_channel):
bot_name, channel_name = bot_and_channel.split('.')
self.client.install_channel(bot_name, channel_name, self.input)
return 0
def main():
callbacks = {
'install-conversation': InstallConversationCommand,
'install-integration': InstallIntegrationCommand,
'install-channel': InstallChannelCommand,
'install-nlp': InstallNLP
}
return execute_subcommand('botmod', **callbacks)
| 812 | 0 | 127 |
2f1b3ce4c6aaa18904fc121599adf8339ace9dc4 | 919 | py | Python | src/greenery/greenery/v1_test.py | AdeebNqo/sublimegen | 1e25281b2d739c72fa501b67da6b271d0601fa2c | [
"Apache-2.0",
"MIT"
] | 1 | 2021-05-24T03:23:13.000Z | 2021-05-24T03:23:13.000Z | src/greenery/greenery/v1_test.py | AdeebNqo/sublimegen | 1e25281b2d739c72fa501b67da6b271d0601fa2c | [
"Apache-2.0",
"MIT"
] | 1 | 2015-04-15T09:02:38.000Z | 2015-04-15T09:02:38.000Z | src/greenery/greenery/v1_test.py | AdeebNqo/sublimegen | 1e25281b2d739c72fa501b67da6b271d0601fa2c | [
"Apache-2.0",
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
if __name__ == "__main__":
import os
import sys
# If you run tests in-place (instead of using py.test), ensure local version is tested!
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import greenery.v1 as greenery
if __name__ == "__main__":
test_v1()
| 28.71875 | 88 | 0.688792 | # -*- coding: utf-8 -*-
if __name__ == "__main__":
import os
import sys
# If you run tests in-place (instead of using py.test), ensure local version is tested!
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import greenery.v1 as greenery
def test_v1():
regex = greenery.parse( "a*b" )
assert isinstance( regex, greenery.lego )
machine = regex.fsm()
assert isinstance( machine, greenery.fsm )
regexstr = str( regex )
assert regexstr == "a*b"
assert machine.accepts( "aaab" )
gen = regex.strings()
assert next(gen) == "b"
assert next(gen) == "ab"
# ensure that the full lego interface has been imported under v1
astarb = greenery.conc(greenery.mult(greenery.charclass("a"), greenery.star),
greenery.mult(greenery.charclass("b"), greenery.one))
gen = astarb.strings()
assert next(gen) == "b"
assert next(gen) == "ab"
if __name__ == "__main__":
test_v1()
| 580 | 0 | 23 |
498fdc7edf4475ef8f1309a0bf0b50a27af47861 | 485 | py | Python | PYTHON-CURSO EM VIDEO/Desafio 042.py | JaumVitor/HOMEWORK-PYTHON | aff564ac61802c7417d7280a73c1ed4a98978ed3 | [
"Apache-2.0"
] | null | null | null | PYTHON-CURSO EM VIDEO/Desafio 042.py | JaumVitor/HOMEWORK-PYTHON | aff564ac61802c7417d7280a73c1ed4a98978ed3 | [
"Apache-2.0"
] | null | null | null | PYTHON-CURSO EM VIDEO/Desafio 042.py | JaumVitor/HOMEWORK-PYTHON | aff564ac61802c7417d7280a73c1ed4a98978ed3 | [
"Apache-2.0"
] | null | null | null | l1 = int ( input ('Primeiro lado : '))
l2 = int ( input('Segundo lado : '))
l3 = int ( input ('Terceiro lado : '))
if l1 + l2 > l3 and l2 + l3 > l1 and l3 + l1 > l2:
print('Pode formar triangulo')
if l1 == l2 == l3:
print('Triangulo Equilatero')
elif (l1 == l2 and l2 != l3) or (l2 == l3 and l3 != l1) or (l1 == l3 and l3 != l2):
print ('Triangulo Isociles')
else:
print ('Trinagulo Escaleno')
else:
print('Não pode formar triangulo')
| 30.3125 | 87 | 0.550515 | l1 = int ( input ('Primeiro lado : '))
l2 = int ( input('Segundo lado : '))
l3 = int ( input ('Terceiro lado : '))
if l1 + l2 > l3 and l2 + l3 > l1 and l3 + l1 > l2:
print('Pode formar triangulo')
if l1 == l2 == l3:
print('Triangulo Equilatero')
elif (l1 == l2 and l2 != l3) or (l2 == l3 and l3 != l1) or (l1 == l3 and l3 != l2):
print ('Triangulo Isociles')
else:
print ('Trinagulo Escaleno')
else:
print('Não pode formar triangulo')
| 0 | 0 | 0 |
e97691b2682d90e2f9b018904897717347ed9b40 | 30,775 | py | Python | mirage/libs/ble_utils/sniffle.py | tlechien/mirage | cff987c52e11447064df1f8631593a9baa749fac | [
"MIT"
] | 123 | 2019-11-20T19:53:23.000Z | 2022-03-07T19:51:03.000Z | mirage/libs/ble_utils/sniffle.py | FlorentGaltier/mirage | e25c0d1042fa02b1dff4058b240567f45fd64b4b | [
"MIT"
] | 23 | 2019-10-22T13:53:34.000Z | 2022-03-22T22:22:55.000Z | mirage/libs/ble_utils/sniffle.py | FlorentGaltier/mirage | e25c0d1042fa02b1dff4058b240567f45fd64b4b | [
"MIT"
] | 25 | 2019-11-15T12:13:48.000Z | 2021-12-22T00:21:15.000Z | from serial import Serial,SerialException
from serial.tools.list_ports import comports
from threading import Lock
from queue import Queue
import time,random,struct
from base64 import b64encode, b64decode
from binascii import Error as BAError
from mirage.libs.ble_utils.constants import *
from mirage.libs.ble_utils.scapy_sniffle_layers import *
from mirage.libs import io,utils,wireless
class SniffleDevice(wireless.Device):
'''
This device allows to communicate with a Sniffle Device in order to sniff Bluetooth Low Energy protocol.
The corresponding interfaces are : ``sniffleX`` (e.g. "sniffle0")
The following capabilities are actually supported :
+-------------------------------------------+----------------+
| Capability | Available ? |
+===========================================+================+
| SCANNING | yes |
+-------------------------------------------+----------------+
| ADVERTISING | yes |
+-------------------------------------------+----------------+
| SNIFFING_ADVERTISEMENTS | yes |
+-------------------------------------------+----------------+
| SNIFFING_NEW_CONNECTION | yes |
+-------------------------------------------+----------------+
| SNIFFING_EXISTING_CONNECTION | no |
+-------------------------------------------+----------------+
| JAMMING_CONNECTIONS | no |
+-------------------------------------------+----------------+
| JAMMING_ADVERTISEMENTS | no |
+-------------------------------------------+----------------+
| INJECTING | no |
+-------------------------------------------+----------------+
| MITMING_EXISTING_CONNECTION | no |
+-------------------------------------------+----------------+
| HIJACKING_MASTER | no |
+-------------------------------------------+----------------+
| HIJACKING_SLAVE | no |
+-------------------------------------------+----------------+
| INITIATING_CONNECTION | yes |
+-------------------------------------------+----------------+
| RECEIVING_CONNECTION | no |
+-------------------------------------------+----------------+
| COMMUNICATING_AS_MASTER | yes |
+-------------------------------------------+----------------+
| COMMUNICATING_AS_SLAVE | no |
+-------------------------------------------+----------------+
| HCI_MONITORING | no |
+-------------------------------------------+----------------+
'''
sharedMethods = [
"getFirmwareVersion",
"getDeviceIndex",
"setCRCChecking",
"setChannel",
"getChannel",
"getConnections",
"switchConnection",
"getCurrentConnection",
"getCurrentHandle",
"isConnected",
"updateConnectionParameters",
"setAddress",
"getAddress",
"setAdvertising",
"setAdvertisingParameters",
"setScanningParameters",
"sniffNewConnections",
"sniffAdvertisements",
"setSweepingMode",
"setScan",
"setScanInterval",
"isSynchronized",
"getAccessAddress",
"getCrcInit",
"getChannelMap",
"getHopInterval",
"getHopIncrement",
]
@classmethod
def findSniffleSniffers(cls,index=None):
'''
This class method allows to find a specific Sniffle device, by providing the device's index.
If no index is provided, it returns a list of every devices found.
If no device has been found, None is returned.
:param index: device's index
:type index: int
:return: string indicating the device
:rtype: str
:Example:
>>> NRFSnifferDevice.findSniffleSniffers(0)
'/dev/ttyACM0'
>>> NRFSnifferDevice.findSniffleSniffers()
['/dev/ttyACM0','/dev/ttyACM1']
'''
sniffleList = sorted([i[0] for i in comports() if
(isinstance(i,tuple) and "VID:PID=0451:BEF3" in port[-1]) or
(i.vid == 0x0451 and i.pid == 0xBEF3)
])
if index is None:
return sniffleList
else:
try:
sniffle = sniffleList[index]
except IndexError:
return None
return sniffle
return None
def isConnected(self):
'''
This method returns a boolean indicating if the device is connected.
:return: boolean indicating if the device is connected
:rtype: bool
:Example:
>>> device.isConnected()
True
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.connected
def getAccessAddress(self):
'''
This method returns the access address actually in use.
:return: access address
:rtype: int
:Example:
>>> hex(device.getAccessAddress())
'0xe5e296e9'
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.accessAddress
def getCrcInit(self):
'''
This method returns the CRCInit actually in use.
:return: CRCInit
:rtype: int
:Example:
>>> hex(device.getCrcInit())
'0x0bd54a'
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.crcInit
def getChannelMap(self):
'''
This method returns the Channel Map actually in use.
:return: Channel Map
:rtype: int
:Example:
>>> hex(device.getChannelMap())
'0x1fffffffff'
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.channelMap
def getHopInterval(self):
'''
This method returns the Hop Interval actually in use.
:return: Hop Interval
:rtype: int
:Example:
>>> device.getHopInterval()
36
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.hopInterval
def getHopIncrement(self):
'''
This method returns the Hop Increment actually in use.
:return: Hop Increment
:rtype: int
:Example:
>>> device.getHopIncrement()
11
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.hopIncrement
def isSynchronized(self):
'''
This method indicates if the sniffer is actually synchronized with a connection.
:return: boolean indicating if the sniffer is synchronized
:rtype: bool
:Example:
>>> device.isSynchronized()
True
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.synchronized
def getDeviceIndex(self):
'''
This method returns the index of the current Sniffle device.
:return: device's index
:rtype: int
:Example:
>>> device.getDeviceIndex()
0
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.index
def getFirmwareVersion(self):
'''
This method returns the firmware version of the current Sniffle device.
:return: firmware version
:rtype: int
:Example:
>>> device.getFirmwareVersion()
(1,5)
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
version = (1,5)
return version
def setCRCChecking(self,enable=True):
'''
This method enables CRC Checking.
:param enable: boolean indicating if CRC Checking must be enabled
:type enable: bool
:Example:
>>> device.setCRCChecking(enable=True) # CRC Checking enabled
>>> device.setCRCChecking(enable=False) # CRC Checking disabled
.. warning::
Sniffle calculates the CRC directly in the firmware, so this command is ignored. It is present in order to provide a similar API to Ubertooth.
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
self.crcEnabled = enable
def setScanInterval(self,seconds=1):
'''
This method allows to provide the scan interval (in second).
:param seconds: number of seconds to wait between two channels
:type seconds: float
:Example:
>>> device.setScanInterval(seconds=1)
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
self.scanInterval = seconds
def setScan(self,enable=True):
'''
This method enables or disables the scanning mode. It allows to change the channel according to the scan interval parameter.
:param enable: boolean indicating if the scanning mode must be enabled
:type enable: bool
:Example:
>>> device.setScan(enable=True) # scanning mode enabled
>>> device.setScan(enable=False) # scanning mode disabled
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
if enable:
self.sniffAdvertisements()
if self.scanThreadInstance is None:
self.scanThreadInstance = wireless.StoppableThread(target=self._scanThread)
self.scanThreadInstance.start()
else:
self.scanThreadInstance.stop()
self.scanThreadInstance = None
def getCurrentHandle(self):
'''
This method returns the connection Handle actually in use.
If no connection is established, its value is equal to -1.
:return: connection Handle
:rtype: int
.. warning::
This method always returns 1, it allows to provides the same API as the HCI Device.
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return 1
def getConnections(self):
'''
This method returns a list of couple (connection handle / address) representing the connections actually established.
A connection is described by a dictionary containing an handle and an access address : ``{"handle":1, "address":"0x12345678"}``
:return: list of connections established
:rtype: list of dict
:Example:
>>> device.getConnections()
[{'handle':1, 'address':'0x12345678'}]
.. warning::
The connection handle is always 1, it allows to provides the same API as the HCI Device.
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return [{"address":"0x{:08x}".format(self.accessAddress),"handle":1}]
def getCurrentConnection(self):
'''
This method returns the access address associated to the current connection. If no connection is established, it returns None.
:return: access address of the current connection
:rtype: str
:Example:
>>> device.getCurrentConnection()
'0x12345678'
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return "0x{:08x}".format(self.accessAddress)
def switchConnection(self,address):
'''
This method is provided in order to provide the same API as an HCI Device, it actually has no effects.
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
io.fail("Switching connection not allowed with Sniffle Device !")
def setAdvertisingParameters(self,type = "ADV_IND",destAddr = "00:00:00:00:00:00",data = b"",intervalMin = 200, intervalMax = 210, daType='public', oaType='public'):
'''
This method sets advertising parameters according to the data provided.
It will mainly be used by *ADV_IND-like* packets.
:param type: type of advertisement (*available values :* "ADV_IND", "ADV_DIRECT_IND", "ADV_SCAN_IND", "ADV_NONCONN_IND", "ADV_DIRECT_IND_LOW")
:type type: str
:param destAddress: destination address (it will be used if needed)
:type destAddress: str
:param data: data included in the payload
:type data: bytes
:param intervalMin: minimal interval
:type intervalMin: int
:param intervalMax: maximal interval
:type intervalMax: int
:param daType: string indicating the destination address type ("public" or "random")
:type daType: str
:param oaType: string indicating the origin address type ("public" or "random")
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
if type == "ADV_IND":
self.advType = ADV_IND
elif type == "ADV_DIRECT_IND":
self.advType = ADV_DIRECT_IND
elif type == "ADV_SCAN_IND":
self.advType = ADV_SCAN_IND
elif type == "ADV_NONCONN_IND":
self.advType = ADV_NONCONN_IND
elif type == "ADV_DIRECT_IND_LOW":
self.advType = ADV_DIRECT_IND_LOW
else:
io.fail("Advertisements type not recognized, using ADV_IND.")
self.advType = ADV_IND
self.destAddress = None if destAddr == "00:00:00:00:00:00" else destAddr
advData = data
self.advDataLength = len(data) if len(data) <= 31 else 31
if isinstance(data,list):
advData = b""
for i in data:
advData += bytes(i)
data = advData
if isinstance(data,bytes):
advData = b""
if len(data) > 31:
advData = data[:31]
else:
advData = data+(31 - len(data))*b"\x00"
self.advData = advData
self.destAddressType = daType
self.addressType = oaType
self.intervalMin = intervalMin
self.intervalMax = intervalMax
def setScanningParameters(self, data=b""):
'''
This method sets scanning parameters according to the data provided.
It will mainly be used by *SCAN_RESP* packets.
:param data: data to use in *SCAN_RESP*
:type data: bytes
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
self.scanDataLength = len(data) if len(data) <= 31 else 31
advData = data
if isinstance(data,list):
advData = b""
for i in data:
advData += bytes(i)
data = advData
if isinstance(data,bytes):
advData = b""
if len(data) > 31:
advData = data[:31]
else:
advData = data+(31 - len(data))*b"\x00"
self.scanData = advData
def setSweepingMode(self,enable=True,sequence=[37,38,39]):
'''
This method allows to enable or disable the Sweeping mode. It allows to provide a subset of advertising channels to monitor sequentially.
:param enable: boolean indicating if the Sweeping mode is enabled.
:type enable: bool
:param sequence: sequence of channels to use
:type sequence: list of int
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
self.sweepingMode = enable
if enable:
if 37 not in sequence or 38 not in sequence or 39 not in sequence:
io.warning("Sniffle doesn't support the sweeping mode with a subset of channels: all three advertising channels are selected.")
self.sweepingSequence = [37,38,39]
def sniffAdvertisements(self,address='FF:FF:FF:FF:FF:FF',channel=None):
'''
This method starts the advertisement sniffing mode.
:param address: selected address - if not provided, no filter is applied (format : "1A:2B:3C:4D:5E:6F")
:type address: str
:param channel: selected channel - if not provided, channel 37 is selected
:type channel: int
:Example:
>>> device.sniffAdvertisements()
>>> device.sniffAdvertisements(channel=38)
>>> device.sniffAdvertisements(address="1A:2B:3C:4D:5E:6F")
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
self.sniffingMode = BLESniffingMode.ADVERTISEMENT
self.lastTarget = address
self._setFilter(advertisementsOnly=True)
if self.sweepingMode:
self._enableHop()
else:
if channel is None:
channel = 37
self._setConfiguration(channel = channel, accessAddress = 0x8E89BED6, phyMode = "1M", crcInit=0x555555)
if address.upper() == "FF:FF:FF:FF:FF:FF":
self._setMACFilter(mac=None)
else:
self._setMACFilter(mac=address)
def sniffNewConnections(self,address='FF:FF:FF:FF:FF:FF',channel=None):
'''
This method starts the new connections sniffing mode.
:param address: selected address - if not provided, no filter is applied (format : "1A:2B:3C:4D:5E:6F")
:type address: str
:param channel: selected channel - if not provided, channel 37 is selected
:type channel: int
:Example:
>>> device.sniffNewConnections()
>>> device.sniffNewConnections(channel=38)
>>> device.sniffNewConnections(address="1A:2B:3C:4D:5E:6F")
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
self.sniffingMode = BLESniffingMode.NEW_CONNECTION
self.lastTarget = address
self._setFilter(advertisementsOnly=False)
if self.sweepingMode:
self._enableHop()
else:
if channel is None:
channel = 37
self._setConfiguration(channel = channel, accessAddress = 0x8E89BED6, phyMode = "1M", crcInit=0x555555)
if address.upper() == "FF:FF:FF:FF:FF:FF":
self._setMACFilter(mac=None)
else:
self._setMACFilter(mac=address)
def updateConnectionParameters(self,minInterval=0, maxInterval=0, latency=0, timeout=0,minCe=0, maxCe=0xFFFF):
'''
This method allows to update connection parameters according to the data provided.
It will mainly be used if an incoming BLEConnectionParameterUpdateRequest is received.
:param minInterval: minimal interval
:type minInterval: int
:param maxInterval: maximal interval
:type maxInterval: int
:param latency: connection latency
:type latency: int
:param timeout: connection timeout
:type timeout: int
:param minCe: minimum connection event length
:type minCe: int
:param maxCe: maximum connection event length
:type maxCe: int
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
pass
def getAddressMode(self):
'''
This method returns the address mode currently in use.
:return: address mode ("public" or "random")
:rtype: str
:Example:
>>> device.getAddressMode()
'public'
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.addressType
def setAddress(self,address,random=False):
'''
This method allows to modify the BD address and the BD address type of the device, if it is possible.
:param address: new BD address
:type address: str
:param random: boolean indicating if the address is random
:type random: bool
:return: boolean indicating if the operation was successful
:rtype: bool
:Example:
>>> device.setAddress("11:22:33:44:55:66")
True
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
self.address = address.upper()
self.addressType = "random" if random else "public"
return True
def getAddress(self):
'''
This method returns the actual BD address of the device.
:return: str indicating the BD address
:rtype: str
:Example:
>>> device.getAddress()
'1A:2B:3C:4D:5E:6F'
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.address.upper()
def setAdvertising(self,enable=True):
'''
This method enables or disables the advertising mode.
:param enable: boolean indicating if the advertising mode must be enabled
:type enable: bool
:Example:
>>> device.setAdvertising(enable=True) # advertising mode enabled
>>> device.setAdvertising(enable=False) # advertising mode disabled
.. warning::
Please note that if no advertising and scanning data has been provided before this function call, nothing will be advertised. You have to set the scanning Parameters and the advertising Parameters before calling this method.
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
if enable:
self._setConfiguration(channel = 37, accessAddress = 0x8E89BED6, phyMode = "1M", crcInit=0x555555)
self._setPauseWhenDone(True)
self._setFilter(advertisementsOnly=True)
self._setMACFilter(mac=None)
self._setAddress(address=self.address,addressType=0x01 if self.addressType == "random" else 0x00)
self._setAdvertisingInterval(interval=self.intervalMin)
self._advertise(bytes([self.advDataLength])+self.advData,bytes([self.scanDataLength])+self.scanData)
else:
self._reset()
def getChannel(self):
'''
This method returns the channel actually in use.
:return: channel in use
:rtype: int
:Example:
>>> device.getChannel()
37
>>> device.setChannel(channel=38)
>>> device.getChannel()
38
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.channel
def setChannel(self, channel=37):
'''
This method changes the channel actually in use by the provided channel.
:param channel: new channel
:type channel: int
:Example:
>>> device.getChannel()
37
>>> device.setChannel(channel=38)
>>> device.getChannel()
38
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
if channel is not None and channel != self.channel:
self._setConfiguration(channel = channel, accessAddress = 0x8E89BED6, phyMode = "1M", crcInit=0x555555)
| 27.875906 | 227 | 0.668692 | from serial import Serial,SerialException
from serial.tools.list_ports import comports
from threading import Lock
from queue import Queue
import time,random,struct
from base64 import b64encode, b64decode
from binascii import Error as BAError
from mirage.libs.ble_utils.constants import *
from mirage.libs.ble_utils.scapy_sniffle_layers import *
from mirage.libs import io,utils,wireless
class SniffleDevice(wireless.Device):
'''
This device allows to communicate with a Sniffle Device in order to sniff Bluetooth Low Energy protocol.
The corresponding interfaces are : ``sniffleX`` (e.g. "sniffle0")
The following capabilities are actually supported :
+-------------------------------------------+----------------+
| Capability | Available ? |
+===========================================+================+
| SCANNING | yes |
+-------------------------------------------+----------------+
| ADVERTISING | yes |
+-------------------------------------------+----------------+
| SNIFFING_ADVERTISEMENTS | yes |
+-------------------------------------------+----------------+
| SNIFFING_NEW_CONNECTION | yes |
+-------------------------------------------+----------------+
| SNIFFING_EXISTING_CONNECTION | no |
+-------------------------------------------+----------------+
| JAMMING_CONNECTIONS | no |
+-------------------------------------------+----------------+
| JAMMING_ADVERTISEMENTS | no |
+-------------------------------------------+----------------+
| INJECTING | no |
+-------------------------------------------+----------------+
| MITMING_EXISTING_CONNECTION | no |
+-------------------------------------------+----------------+
| HIJACKING_MASTER | no |
+-------------------------------------------+----------------+
| HIJACKING_SLAVE | no |
+-------------------------------------------+----------------+
| INITIATING_CONNECTION | yes |
+-------------------------------------------+----------------+
| RECEIVING_CONNECTION | no |
+-------------------------------------------+----------------+
| COMMUNICATING_AS_MASTER | yes |
+-------------------------------------------+----------------+
| COMMUNICATING_AS_SLAVE | no |
+-------------------------------------------+----------------+
| HCI_MONITORING | no |
+-------------------------------------------+----------------+
'''
sharedMethods = [
"getFirmwareVersion",
"getDeviceIndex",
"setCRCChecking",
"setChannel",
"getChannel",
"getConnections",
"switchConnection",
"getCurrentConnection",
"getCurrentHandle",
"isConnected",
"updateConnectionParameters",
"setAddress",
"getAddress",
"setAdvertising",
"setAdvertisingParameters",
"setScanningParameters",
"sniffNewConnections",
"sniffAdvertisements",
"setSweepingMode",
"setScan",
"setScanInterval",
"isSynchronized",
"getAccessAddress",
"getCrcInit",
"getChannelMap",
"getHopInterval",
"getHopIncrement",
]
@classmethod
def findSniffleSniffers(cls,index=None):
'''
This class method allows to find a specific Sniffle device, by providing the device's index.
If no index is provided, it returns a list of every devices found.
If no device has been found, None is returned.
:param index: device's index
:type index: int
:return: string indicating the device
:rtype: str
:Example:
>>> NRFSnifferDevice.findSniffleSniffers(0)
'/dev/ttyACM0'
>>> NRFSnifferDevice.findSniffleSniffers()
['/dev/ttyACM0','/dev/ttyACM1']
'''
sniffleList = sorted([i[0] for i in comports() if
(isinstance(i,tuple) and "VID:PID=0451:BEF3" in port[-1]) or
(i.vid == 0x0451 and i.pid == 0xBEF3)
])
if index is None:
return sniffleList
else:
try:
sniffle = sniffleList[index]
except IndexError:
return None
return sniffle
return None
def isUp(self):
return self.sniffle is not None and self.ready
def _setAccessAddress(self,accessAddress=None):
self.accessAddress = accessAddress
def _setCrcInit(self,crcInit=None):
self.crcInit = crcInit
def _setChannelMap(self,channelMap=None):
self.channelMap = channelMap
def _setHopInterval(self,hopInterval=None):
self.hopInterval = hopInterval
def _getHopInterval(self):
return self.hopInterval
def _setHopIncrement(self,hopIncrement):
self.hopIncrement = hopIncrement
def _getHopIncrement(self):
return self.hopIncrement
def _getChannelMap(self):
return self.channelMap
def _getAccessAddress(self):
return self.accessAddress
def _getCrcInit(self):
return self.crcInit
def _sendCommand(self,command):
cmd = SniffleCommand()/command
#cmd.show()
size = (len(bytes(cmd)) + 3) // 3
uartCommand = b64encode(bytes([size]) + bytes(cmd))
self.lock.acquire()
self.sniffle.write(uartCommand+b"\r\n")
self.lock.release()
def _setPauseWhenDone(self, enabled=False):
command = SnifflePauseWhenDoneCommand(pause_when_done=1 if enabled else 0)
self._sendCommand(command)
def _initCommand(self):
self.sniffle.write(b'@@@@@@@@\r\n')
def _setConfiguration(self,channel = 37, accessAddress = 0x8E89BED6, phyMode = "1M", crcInit=0x555555):
self.channel = channel
command = SniffleSetConfigurationCommand(channel=channel, access_address=accessAddress,phy_mode=phyMode, crc_init=crcInit)
self._sendCommand(command)
def _setMACFilter(self,mac=None):
if mac is None or mac.upper() == "FF:FF:FF:FF:FF:FF":
pkt = SniffleDisableMACFilterCommand()
else:
pkt = SniffleEnableMACFilterCommand(address=mac)
self._sendCommand(pkt)
def _enableHop(self):
command = SniffleEnableAdvertisementsHoppingCommand()
self._sendCommand(command)
def _reset(self):
command = SniffleResetCommand()
self._sendCommand(command)
def _setAddress(self,address,addressType='public'):
command = SniffleSetAddressCommand(address=address, address_type=addressType)
self._sendCommand(command)
def _setAdvertisingInterval(self,interval=200):
command = SniffleAdvertiseIntervalCommand(interval=interval)
self._sendCommand(command)
def _advertise(self,advertisingData=b"",scanRspData=b""):
command = SniffleAdvertiseCommand(adv_data=advertisingData,scan_resp_data=scanRspData)
self._sendCommand(command)
def _setFilter(self,advertisementsOnly=False):
command = SniffleFollowCommand(follow="advertisements_only" if advertisementsOnly else "all")
self._sendCommand(command)
def _sendConnectionRequest(self, address="00:00:00:00:00:00", addressType="public"):
accessAddress = random.randint(0,(2**32)-1)
crcInit = random.randint(0,(2**24)-1)
channelMap = 0x1fffffffff
hopIncrement = 5
hopInterval = 24
command = SniffleConnectCommand(
address_type=0x00 if addressType == "public" else 0x01,
address=address,
AA=accessAddress,
crc_init=crcInit,
win_size=3,
win_offset=random.randint(5,15),
interval=hopInterval,
latency=1,
timeout=50,
chM=channelMap,
SCA=0,
hop=hopIncrement
)
self._setAccessAddress(accessAddress)
self._setCrcInit(crcInit)
self._setChannelMap(channelMap)
self._setHopInterval(hopInterval)
self._setHopIncrement(hopIncrement)
self._sendCommand(command)
def _initiateConnection(self, address="00:00:00:00:00:00", addressType="public"):
self._reset()
self._setConfiguration(channel = 37, accessAddress = 0x8E89BED6, phyMode = "1M", crcInit=0x555555)
self._setPauseWhenDone(True)
self._setFilter(advertisementsOnly=True)
self._setMACFilter(mac=None)
self._setAddress(address=self.address,addressType=0x01 if self.addressType == "random" else 0x00)
self._sendConnectionRequest(address,addressType)
def _flush(self):
self.lock.acquire()
self.sniffle.flush()
self.lock.release()
def _transmit(self,pkt):
command = SniffleTransmitCommand(ble_payload=pkt[BTLE_DATA:])
self._sendCommand(command)
def _enterListening(self):
self.isListening = True
def _exitListening(self):
self.isListening = False
def _isListening(self):
return self.isListening
def isConnected(self):
'''
This method returns a boolean indicating if the device is connected.
:return: boolean indicating if the device is connected
:rtype: bool
:Example:
>>> device.isConnected()
True
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.connected
def getAccessAddress(self):
'''
This method returns the access address actually in use.
:return: access address
:rtype: int
:Example:
>>> hex(device.getAccessAddress())
'0xe5e296e9'
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.accessAddress
def getCrcInit(self):
'''
This method returns the CRCInit actually in use.
:return: CRCInit
:rtype: int
:Example:
>>> hex(device.getCrcInit())
'0x0bd54a'
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.crcInit
def getChannelMap(self):
'''
This method returns the Channel Map actually in use.
:return: Channel Map
:rtype: int
:Example:
>>> hex(device.getChannelMap())
'0x1fffffffff'
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.channelMap
def getHopInterval(self):
'''
This method returns the Hop Interval actually in use.
:return: Hop Interval
:rtype: int
:Example:
>>> device.getHopInterval()
36
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.hopInterval
def getHopIncrement(self):
'''
This method returns the Hop Increment actually in use.
:return: Hop Increment
:rtype: int
:Example:
>>> device.getHopIncrement()
11
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.hopIncrement
def isSynchronized(self):
'''
This method indicates if the sniffer is actually synchronized with a connection.
:return: boolean indicating if the sniffer is synchronized
:rtype: bool
:Example:
>>> device.isSynchronized()
True
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.synchronized
def getDeviceIndex(self):
'''
This method returns the index of the current Sniffle device.
:return: device's index
:rtype: int
:Example:
>>> device.getDeviceIndex()
0
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.index
def getFirmwareVersion(self):
'''
This method returns the firmware version of the current Sniffle device.
:return: firmware version
:rtype: int
:Example:
>>> device.getFirmwareVersion()
(1,5)
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
version = (1,5)
return version
def setCRCChecking(self,enable=True):
'''
This method enables CRC Checking.
:param enable: boolean indicating if CRC Checking must be enabled
:type enable: bool
:Example:
>>> device.setCRCChecking(enable=True) # CRC Checking enabled
>>> device.setCRCChecking(enable=False) # CRC Checking disabled
.. warning::
Sniffle calculates the CRC directly in the firmware, so this command is ignored. It is present in order to provide a similar API to Ubertooth.
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
self.crcEnabled = enable
def setScanInterval(self,seconds=1):
'''
This method allows to provide the scan interval (in second).
:param seconds: number of seconds to wait between two channels
:type seconds: float
:Example:
>>> device.setScanInterval(seconds=1)
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
self.scanInterval = seconds
def _scanThread(self):
self.setChannel(37)
utils.wait(seconds=self.scanInterval)
self.setChannel(38)
utils.wait(seconds=self.scanInterval)
self.setChannel(39)
utils.wait(seconds=self.scanInterval)
def setScan(self,enable=True):
'''
This method enables or disables the scanning mode. It allows to change the channel according to the scan interval parameter.
:param enable: boolean indicating if the scanning mode must be enabled
:type enable: bool
:Example:
>>> device.setScan(enable=True) # scanning mode enabled
>>> device.setScan(enable=False) # scanning mode disabled
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
if enable:
self.sniffAdvertisements()
if self.scanThreadInstance is None:
self.scanThreadInstance = wireless.StoppableThread(target=self._scanThread)
self.scanThreadInstance.start()
else:
self.scanThreadInstance.stop()
self.scanThreadInstance = None
def getCurrentHandle(self):
'''
This method returns the connection Handle actually in use.
If no connection is established, its value is equal to -1.
:return: connection Handle
:rtype: int
.. warning::
This method always returns 1, it allows to provides the same API as the HCI Device.
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return 1
def getConnections(self):
'''
This method returns a list of couple (connection handle / address) representing the connections actually established.
A connection is described by a dictionary containing an handle and an access address : ``{"handle":1, "address":"0x12345678"}``
:return: list of connections established
:rtype: list of dict
:Example:
>>> device.getConnections()
[{'handle':1, 'address':'0x12345678'}]
.. warning::
The connection handle is always 1, it allows to provides the same API as the HCI Device.
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return [{"address":"0x{:08x}".format(self.accessAddress),"handle":1}]
def getCurrentConnection(self):
'''
This method returns the access address associated to the current connection. If no connection is established, it returns None.
:return: access address of the current connection
:rtype: str
:Example:
>>> device.getCurrentConnection()
'0x12345678'
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return "0x{:08x}".format(self.accessAddress)
def switchConnection(self,address):
'''
This method is provided in order to provide the same API as an HCI Device, it actually has no effects.
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
io.fail("Switching connection not allowed with Sniffle Device !")
def close(self):
self.lock.acquire()
self.sniffle.close()
self.sniffle = None
self.lock.release()
def setAdvertisingParameters(self,type = "ADV_IND",destAddr = "00:00:00:00:00:00",data = b"",intervalMin = 200, intervalMax = 210, daType='public', oaType='public'):
'''
This method sets advertising parameters according to the data provided.
It will mainly be used by *ADV_IND-like* packets.
:param type: type of advertisement (*available values :* "ADV_IND", "ADV_DIRECT_IND", "ADV_SCAN_IND", "ADV_NONCONN_IND", "ADV_DIRECT_IND_LOW")
:type type: str
:param destAddress: destination address (it will be used if needed)
:type destAddress: str
:param data: data included in the payload
:type data: bytes
:param intervalMin: minimal interval
:type intervalMin: int
:param intervalMax: maximal interval
:type intervalMax: int
:param daType: string indicating the destination address type ("public" or "random")
:type daType: str
:param oaType: string indicating the origin address type ("public" or "random")
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
if type == "ADV_IND":
self.advType = ADV_IND
elif type == "ADV_DIRECT_IND":
self.advType = ADV_DIRECT_IND
elif type == "ADV_SCAN_IND":
self.advType = ADV_SCAN_IND
elif type == "ADV_NONCONN_IND":
self.advType = ADV_NONCONN_IND
elif type == "ADV_DIRECT_IND_LOW":
self.advType = ADV_DIRECT_IND_LOW
else:
io.fail("Advertisements type not recognized, using ADV_IND.")
self.advType = ADV_IND
self.destAddress = None if destAddr == "00:00:00:00:00:00" else destAddr
advData = data
self.advDataLength = len(data) if len(data) <= 31 else 31
if isinstance(data,list):
advData = b""
for i in data:
advData += bytes(i)
data = advData
if isinstance(data,bytes):
advData = b""
if len(data) > 31:
advData = data[:31]
else:
advData = data+(31 - len(data))*b"\x00"
self.advData = advData
self.destAddressType = daType
self.addressType = oaType
self.intervalMin = intervalMin
self.intervalMax = intervalMax
def setScanningParameters(self, data=b""):
'''
This method sets scanning parameters according to the data provided.
It will mainly be used by *SCAN_RESP* packets.
:param data: data to use in *SCAN_RESP*
:type data: bytes
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
self.scanDataLength = len(data) if len(data) <= 31 else 31
advData = data
if isinstance(data,list):
advData = b""
for i in data:
advData += bytes(i)
data = advData
if isinstance(data,bytes):
advData = b""
if len(data) > 31:
advData = data[:31]
else:
advData = data+(31 - len(data))*b"\x00"
self.scanData = advData
def _recv(self):
self.lock.acquire()
if self.sniffle is not None:
try:
self.receptionBuffer = self.sniffle.readline()
except:
self.receptionBuffer = b""
self.lock.release()
if self.receptionBuffer[-1:] == b"\n":
try:
data = b64decode(self.receptionBuffer.rstrip())
return SniffleResponse(data)
except:
return None
def recv(self):
self._enterListening()
pkt = self._recv()
self._exitListening()
timestamp = time.time()
ts_sec = int(timestamp)
ts_usec = int((timestamp - ts_sec)*1000000)
if pkt is not None:
if pkt.response_type == 0x10 and BTLE_DATA in pkt.ble_payload:
pass
packet = pkt.ble_payload if hasattr(pkt, "ble_payload") else None
if packet is not None and BTLE_CONNECT_REQ in packet or hasattr(packet,"PDU_type") and packet.PDU_type == 5:
self._setAccessAddress(struct.unpack(">I",struct.pack("<I",packet.AA))[0])
self._setCrcInit(struct.unpack(">I",b"\x00" + struct.pack('<I',packet.crc_init)[:3])[0])
self._setChannelMap(packet.chM)
self._setHopInterval(packet.interval)
self._setHopIncrement(packet.hop)
self.synchronized = True
if packet is not None and BTLE_DATA in packet and packet.LLID == 3 and packet.opcode == 0x02:
self.synchronized = False
self._setAccessAddress(None)
self._setCrcInit(None)
self._setChannelMap(None)
self._setHopInterval(None)
self._setHopIncrement(None)
if pkt.response_type == 0x10 and hasattr(pkt, "ble_payload"):
return BTLE_PPI(
btle_channel=pkt.channel,
btle_clkn_high=ts_sec,
btle_clk_100ns=ts_usec,
rssi_max=pkt.rssi,
rssi_min=pkt.rssi,
rssi_avg=pkt.rssi,
rssi_count=1)/pkt.ble_payload
elif pkt.response_type == 0x13:
if pkt.state == 0x06: # "MASTER"
self.connected = True
io.info('Connection established !')
elif self.connected:
self.connected = False
io.fail('Connection lost !')
self._setAccessAddress(None)
self._setCrcInit(None)
self._setChannelMap(None)
self._setHopInterval(None)
self._setHopIncrement(None)
return (BTLE_PPI(btle_channel=0,
btle_clkn_high=ts_sec,
btle_clk_100ns=ts_usec,
rssi_max=0,
rssi_min=0,
rssi_avg=0,
rssi_count=1)/BTLE_DATA()/BTLE_CTRL()/LL_TERMINATE_IND(code=0x24))
else:
pass
#io.warning(" [DEBUG:"+str(timestamp)+"|"+(pkt.message.decode("latin-1") if hasattr(pkt,"message") else "?")+"]")
def setSweepingMode(self,enable=True,sequence=[37,38,39]):
'''
This method allows to enable or disable the Sweeping mode. It allows to provide a subset of advertising channels to monitor sequentially.
:param enable: boolean indicating if the Sweeping mode is enabled.
:type enable: bool
:param sequence: sequence of channels to use
:type sequence: list of int
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
self.sweepingMode = enable
if enable:
if 37 not in sequence or 38 not in sequence or 39 not in sequence:
io.warning("Sniffle doesn't support the sweeping mode with a subset of channels: all three advertising channels are selected.")
self.sweepingSequence = [37,38,39]
def sniffAdvertisements(self,address='FF:FF:FF:FF:FF:FF',channel=None):
'''
This method starts the advertisement sniffing mode.
:param address: selected address - if not provided, no filter is applied (format : "1A:2B:3C:4D:5E:6F")
:type address: str
:param channel: selected channel - if not provided, channel 37 is selected
:type channel: int
:Example:
>>> device.sniffAdvertisements()
>>> device.sniffAdvertisements(channel=38)
>>> device.sniffAdvertisements(address="1A:2B:3C:4D:5E:6F")
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
self.sniffingMode = BLESniffingMode.ADVERTISEMENT
self.lastTarget = address
self._setFilter(advertisementsOnly=True)
if self.sweepingMode:
self._enableHop()
else:
if channel is None:
channel = 37
self._setConfiguration(channel = channel, accessAddress = 0x8E89BED6, phyMode = "1M", crcInit=0x555555)
if address.upper() == "FF:FF:FF:FF:FF:FF":
self._setMACFilter(mac=None)
else:
self._setMACFilter(mac=address)
def sniffNewConnections(self,address='FF:FF:FF:FF:FF:FF',channel=None):
'''
This method starts the new connections sniffing mode.
:param address: selected address - if not provided, no filter is applied (format : "1A:2B:3C:4D:5E:6F")
:type address: str
:param channel: selected channel - if not provided, channel 37 is selected
:type channel: int
:Example:
>>> device.sniffNewConnections()
>>> device.sniffNewConnections(channel=38)
>>> device.sniffNewConnections(address="1A:2B:3C:4D:5E:6F")
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
self.sniffingMode = BLESniffingMode.NEW_CONNECTION
self.lastTarget = address
self._setFilter(advertisementsOnly=False)
if self.sweepingMode:
self._enableHop()
else:
if channel is None:
channel = 37
self._setConfiguration(channel = channel, accessAddress = 0x8E89BED6, phyMode = "1M", crcInit=0x555555)
if address.upper() == "FF:FF:FF:FF:FF:FF":
self._setMACFilter(mac=None)
else:
self._setMACFilter(mac=address)
def updateConnectionParameters(self,minInterval=0, maxInterval=0, latency=0, timeout=0,minCe=0, maxCe=0xFFFF):
'''
This method allows to update connection parameters according to the data provided.
It will mainly be used if an incoming BLEConnectionParameterUpdateRequest is received.
:param minInterval: minimal interval
:type minInterval: int
:param maxInterval: maximal interval
:type maxInterval: int
:param latency: connection latency
:type latency: int
:param timeout: connection timeout
:type timeout: int
:param minCe: minimum connection event length
:type minCe: int
:param maxCe: maximum connection event length
:type maxCe: int
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
pass
def getAddressMode(self):
'''
This method returns the address mode currently in use.
:return: address mode ("public" or "random")
:rtype: str
:Example:
>>> device.getAddressMode()
'public'
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.addressType
def setAddress(self,address,random=False):
'''
This method allows to modify the BD address and the BD address type of the device, if it is possible.
:param address: new BD address
:type address: str
:param random: boolean indicating if the address is random
:type random: bool
:return: boolean indicating if the operation was successful
:rtype: bool
:Example:
>>> device.setAddress("11:22:33:44:55:66")
True
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
self.address = address.upper()
self.addressType = "random" if random else "public"
return True
def getAddress(self):
'''
This method returns the actual BD address of the device.
:return: str indicating the BD address
:rtype: str
:Example:
>>> device.getAddress()
'1A:2B:3C:4D:5E:6F'
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.address.upper()
def setAdvertising(self,enable=True):
'''
This method enables or disables the advertising mode.
:param enable: boolean indicating if the advertising mode must be enabled
:type enable: bool
:Example:
>>> device.setAdvertising(enable=True) # advertising mode enabled
>>> device.setAdvertising(enable=False) # advertising mode disabled
.. warning::
Please note that if no advertising and scanning data has been provided before this function call, nothing will be advertised. You have to set the scanning Parameters and the advertising Parameters before calling this method.
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
if enable:
self._setConfiguration(channel = 37, accessAddress = 0x8E89BED6, phyMode = "1M", crcInit=0x555555)
self._setPauseWhenDone(True)
self._setFilter(advertisementsOnly=True)
self._setMACFilter(mac=None)
self._setAddress(address=self.address,addressType=0x01 if self.addressType == "random" else 0x00)
self._setAdvertisingInterval(interval=self.intervalMin)
self._advertise(bytes([self.advDataLength])+self.advData,bytes([self.scanDataLength])+self.scanData)
else:
self._reset()
def getChannel(self):
'''
This method returns the channel actually in use.
:return: channel in use
:rtype: int
:Example:
>>> device.getChannel()
37
>>> device.setChannel(channel=38)
>>> device.getChannel()
38
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.channel
def setChannel(self, channel=37):
'''
This method changes the channel actually in use by the provided channel.
:param channel: new channel
:type channel: int
:Example:
>>> device.getChannel()
37
>>> device.setChannel(channel=38)
>>> device.getChannel()
38
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
if channel is not None and channel != self.channel:
self._setConfiguration(channel = channel, accessAddress = 0x8E89BED6, phyMode = "1M", crcInit=0x555555)
def send(self,pkt):
if BTLE_CONNECT_REQ in pkt: # demande de connexion
self._initiateConnection(pkt.AdvA,("public" if pkt.RxAdd == 0 else "random" ))
else:
self._transmit(pkt)
def init(self):
if self.sniffle is not None:
self.capabilities = ["SNIFFING_ADVERTISEMENTS", "SNIFFING_NEW_CONNECTION","SCANNING","ADVERTISING","COMMUNICATING_AS_MASTER","INITIATING_CONNECTION"]
self.lastTarget = "FF:FF:FF:FF:FF:FF"
self.lock = Lock()
self.isListening = False
self.crcEnabled = True
self.receptionBuffer = b""
self.packetCounter = 1
self.synchronized = False
self.scanMode = False
self.connected = False
self.sweepingMode = False
self.sweepingSequence = []
self.intervalMin = 200
self.intervalMax = 210
self.addressType = 'public'
self.destAddressType = 'public'
self.advData = b""
self.advDataLength = 0
self.scanData = b""
self.scanDataLength = 0
self.address = "11:22:33:44:55:66"
self.destAddress = "FF:FF:FF:FF:FF:FF"
self.sniffingMode = BLESniffingMode.NEW_CONNECTION
version = self.getFirmwareVersion()
io.success("Sniffle device "+("#"+str(self.index) if isinstance(self.index,int) else str(self.index))+
" successfully instantiated (firmware version : "+str(version[0])+"."+str(version[1])+")")
self.channel = None
self.setScanInterval(seconds=2)
self.scanThreadInstance = None
self._flush()
self._reset()
self.ready = True
def __init__(self,interface):
super().__init__(interface=interface)
customPort = None
if "sniffle" == interface:
self.index = 0
self.interface = "sniffle0"
elif "sniffle" == interface[:7]:
if ":" in interface:
fields = interface.split(":")
customPort = fields[1]
self.index = customPort
else:
self.index = int(interface.split("sniffle")[1])
self.interface = interface
if customPort is None:
self.sniffle = SniffleDevice.findSniffleSniffers(self.index)
else:
self.sniffle = customPort
if self.sniffle is not None:
try:
self.sniffle = Serial(port = self.sniffle, baudrate=2000000, timeout=0.01)
self.ready = False
except SerialException:
io.fail("Serial communication not ready !")
self.ready = False
self.nrfsniffer = None
else:
io.fail("No Sniffle Sniffer device found !")
self.ready = False
| 8,460 | 0 | 863 |
eab9e5108ae651e1fab0ae9b20fb7d94dba94f01 | 584 | py | Python | ibsng/handler/invoice/get_invoice_profile_by_group_name.py | ParspooyeshFanavar/pyibsng | d48bcf4f25e3f23461528bf0ff8870cc3d537444 | [
"MIT"
] | 6 | 2018-03-06T10:16:36.000Z | 2021-12-05T12:43:10.000Z | ibsng/handler/invoice/get_invoice_profile_by_group_name.py | ParspooyeshFanavar/pyibsng | d48bcf4f25e3f23461528bf0ff8870cc3d537444 | [
"MIT"
] | 3 | 2018-03-06T10:27:08.000Z | 2022-01-02T15:21:27.000Z | ibsng/handler/invoice/get_invoice_profile_by_group_name.py | ParspooyeshFanavar/pyibsng | d48bcf4f25e3f23461528bf0ff8870cc3d537444 | [
"MIT"
] | 3 | 2018-01-06T16:28:31.000Z | 2018-09-17T19:47:19.000Z | """Get invoice profile by group name API method."""
from ibsng.handler.handler import Handler
class getInvoiceProfileByGroupName(Handler):
"""Get invoice profile by group name method class."""
def control(self):
"""Validate inputs after setup method.
:return: None
:rtype: None
"""
self.is_valid(self.group_name, str)
def setup(self, group_name):
"""Setup required parameters.
:param str group_name: ibsng group name
:return: None
:rtype: None
"""
self.group_name = group_name
| 23.36 | 57 | 0.623288 | """Get invoice profile by group name API method."""
from ibsng.handler.handler import Handler
class getInvoiceProfileByGroupName(Handler):
"""Get invoice profile by group name method class."""
def control(self):
"""Validate inputs after setup method.
:return: None
:rtype: None
"""
self.is_valid(self.group_name, str)
def setup(self, group_name):
"""Setup required parameters.
:param str group_name: ibsng group name
:return: None
:rtype: None
"""
self.group_name = group_name
| 0 | 0 | 0 |
1f6b559dea637c0d916447233f91601b295b9eac | 1,369 | py | Python | main.py | falseen/shadowsocks-kivy | c39ef797226221091ae95d67fa92605c45355c0e | [
"Apache-2.0"
] | null | null | null | main.py | falseen/shadowsocks-kivy | c39ef797226221091ae95d67fa92605c45355c0e | [
"Apache-2.0"
] | null | null | null | main.py | falseen/shadowsocks-kivy | c39ef797226221091ae95d67fa92605c45355c0e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, \
with_statement
from kivy.app import App
from kivy.clock import Clock
from kivy.core.text import LabelBase
from kivy.core.window import Window
from kivy.utils import get_color_from_hex
from time import strftime
import time
from shadowsocks import local
import ss_local
import threading
from multiprocessing import Process
if __name__ == '__main__':
Window.clearcolor = get_color_from_hex('#45818e')
LabelBase.register(name='Roboto',
fn_regular='res/Roboto-Thin.ttf',
fn_bold='res/Roboto-Medium.ttf')
LabelBase.register(name='simsun',
fn_regular='res/simsun.ttc')
ShadowsocksApp().run()
| 26.326923 | 78 | 0.663988 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, \
with_statement
from kivy.app import App
from kivy.clock import Clock
from kivy.core.text import LabelBase
from kivy.core.window import Window
from kivy.utils import get_color_from_hex
from time import strftime
import time
from shadowsocks import local
import ss_local
import threading
from multiprocessing import Process
class start_ss_local():
def run():
Process(target=ss_local.main).start()
class ShadowsocksApp(App):
sw_started = False
sw_seconds = 0
def on_start(self):
pass
def update(self, nap):
pass
def start_stop(self):
self.root.ids.start_stop.text = 'Start' if self.sw_started else 'Stop'
self.sw_started = not self.sw_started
start_ss_local.run()
def reset(self):
if self.sw_started:
self.root.ids.start_stop.text = 'Start'
self.sw_started = False
self.sw_seconds = 0
if __name__ == '__main__':
Window.clearcolor = get_color_from_hex('#45818e')
LabelBase.register(name='Roboto',
fn_regular='res/Roboto-Thin.ttf',
fn_bold='res/Roboto-Medium.ttf')
LabelBase.register(name='simsun',
fn_regular='res/simsun.ttc')
ShadowsocksApp().run()
| 354 | 157 | 72 |
71b43229ebca1d17c0b88b25e927ec69f9a632c9 | 2,567 | py | Python | cpu_load_generator/__main__.py | sirtyman/CPULoadGenerator | 9088ae1a9b1101f8ce7be6ed1df02bad16743bc9 | [
"MIT"
] | 4 | 2021-07-30T23:21:42.000Z | 2021-11-23T11:13:45.000Z | cpu_load_generator/__main__.py | sirtyman/CPULoadGenerator | 9088ae1a9b1101f8ce7be6ed1df02bad16743bc9 | [
"MIT"
] | null | null | null | cpu_load_generator/__main__.py | sirtyman/CPULoadGenerator | 9088ae1a9b1101f8ce7be6ed1df02bad16743bc9 | [
"MIT"
] | 1 | 2021-09-26T13:13:37.000Z | 2021-09-26T13:13:37.000Z | import sys
import argparse
import psutil
from cpu_load_generator import load_all_cores, load_single_core, from_profile
def parse_args(parser):
"""Parse input parameters.
param parser: ArgumentParser object
"""
parser.add_argument('-p', '--path_to_profile_json', type=str, default="",
help='Path to CPU load profile json file. Using with any other arguments is disallowed!')
parser.add_argument('-l', '--cpu_load', type=float, help='Cpu target load.')
parser.add_argument('-d', '--duration', type=int, help='Duration of the load in seconds. Should be higher than 0.')
parser.add_argument('-c', '--cpu_core', type=int, default=0,
help='Select the CPU number on which generate the load. Default is 0.')
parser.set_defaults()
args = parser.parse_args()
return args
def input_error_handler(args):
"""Handle input errors.
param args: parsed input arguments
type args: object
"""
cpu_count = psutil.cpu_count()
if args.path_to_profile_json == "":
if not args.cpu_core < cpu_count:
args.print_help()
raise ValueError('Core to load should not be higher than {}!'.format(cpu_count - 1))
if args.duration <= 0:
args.print_help()
raise ValueError('The load duration must be higher then 0!')
if not 0 < args.cpu_load <= 1.0:
args.print_help()
raise ValueError('CPU load time should be the fraction of 1. Range (0; 1].')
else:
input_arguments = sys.argv[1:]
if ('-c' in input_arguments or '--cpu_core' in input_arguments) and \
('-p' in input_arguments or '--path_to_profile_json' in input_arguments):
args.print_help()
raise ValueError("Using any of arguments with conjunction with path_to_profile_json is disallowed!")
if args.duration is not None or args.cpu_load is not None:
args.print_help()
raise ValueError("Using any of arguments with conjunction with path_to_profile_json is disallowed!")
def main():
"""The main package entry point."""
parser = argparse.ArgumentParser()
args = parse_args(parser)
input_error_handler(args)
if args.path_to_profile_json != "":
from_profile(args.path_to_profile_json)
else:
if args.cpu_core >= 0:
load_single_core(args.cpu_core, args.duration, args.cpu_load)
else:
load_all_cores(args.duration, args.cpu_load)
if __name__ == "__main__":
main()
| 33.776316 | 119 | 0.64589 | import sys
import argparse
import psutil
from cpu_load_generator import load_all_cores, load_single_core, from_profile
def parse_args(parser):
"""Parse input parameters.
param parser: ArgumentParser object
"""
parser.add_argument('-p', '--path_to_profile_json', type=str, default="",
help='Path to CPU load profile json file. Using with any other arguments is disallowed!')
parser.add_argument('-l', '--cpu_load', type=float, help='Cpu target load.')
parser.add_argument('-d', '--duration', type=int, help='Duration of the load in seconds. Should be higher than 0.')
parser.add_argument('-c', '--cpu_core', type=int, default=0,
help='Select the CPU number on which generate the load. Default is 0.')
parser.set_defaults()
args = parser.parse_args()
return args
def input_error_handler(args):
"""Handle input errors.
param args: parsed input arguments
type args: object
"""
cpu_count = psutil.cpu_count()
if args.path_to_profile_json == "":
if not args.cpu_core < cpu_count:
args.print_help()
raise ValueError('Core to load should not be higher than {}!'.format(cpu_count - 1))
if args.duration <= 0:
args.print_help()
raise ValueError('The load duration must be higher then 0!')
if not 0 < args.cpu_load <= 1.0:
args.print_help()
raise ValueError('CPU load time should be the fraction of 1. Range (0; 1].')
else:
input_arguments = sys.argv[1:]
if ('-c' in input_arguments or '--cpu_core' in input_arguments) and \
('-p' in input_arguments or '--path_to_profile_json' in input_arguments):
args.print_help()
raise ValueError("Using any of arguments with conjunction with path_to_profile_json is disallowed!")
if args.duration is not None or args.cpu_load is not None:
args.print_help()
raise ValueError("Using any of arguments with conjunction with path_to_profile_json is disallowed!")
def main():
"""The main package entry point."""
parser = argparse.ArgumentParser()
args = parse_args(parser)
input_error_handler(args)
if args.path_to_profile_json != "":
from_profile(args.path_to_profile_json)
else:
if args.cpu_core >= 0:
load_single_core(args.cpu_core, args.duration, args.cpu_load)
else:
load_all_cores(args.duration, args.cpu_load)
if __name__ == "__main__":
main()
| 0 | 0 | 0 |
8879c47f6a35c0727c1560ed74ee6004f696c5b4 | 551 | py | Python | designs/GcdUnit/testbench/generate_test_vectors.py | jbrzozo24/mflowgen | fe168e1ea2311feb35588333aa5d7d7c6ba79625 | [
"BSD-3-Clause"
] | 53 | 2020-11-05T20:13:03.000Z | 2022-03-31T14:51:56.000Z | designs/GcdUnit/testbench/generate_test_vectors.py | jbrzozo24/mflowgen | fe168e1ea2311feb35588333aa5d7d7c6ba79625 | [
"BSD-3-Clause"
] | 27 | 2020-11-04T19:52:38.000Z | 2022-03-17T17:11:01.000Z | designs/GcdUnit/testbench/generate_test_vectors.py | jbrzozo24/mflowgen | fe168e1ea2311feb35588333aa5d7d7c6ba79625 | [
"BSD-3-Clause"
] | 26 | 2020-11-02T18:43:57.000Z | 2022-03-31T14:52:52.000Z | from random import random
import math
import numpy as np
import binascii
import struct
num_vectors = 100
f = open("test_vectors.txt", "w")
i = 0
while (i < num_vectors):
a = np.uint16(math.floor(random() * (2**16 - 1)))
b = np.uint16(math.floor(random() * (2**16 - 1)))
if (a != 0) and (b != 0):
c = math.gcd(a, b)
f.write(str(get_hex(c)) + '_' + str(get_hex(a)) + '_' + str(get_hex(b)) + '\n')
i = i + 1
f.close()
| 22.958333 | 94 | 0.593466 | from random import random
import math
import numpy as np
import binascii
import struct
num_vectors = 100
f = open("test_vectors.txt", "w")
def get_hex (x):
return str(binascii.hexlify(struct.pack('>H', x)))[2:6] # H is for unsigned short -- 16 bits
i = 0
while (i < num_vectors):
a = np.uint16(math.floor(random() * (2**16 - 1)))
b = np.uint16(math.floor(random() * (2**16 - 1)))
if (a != 0) and (b != 0):
c = math.gcd(a, b)
f.write(str(get_hex(c)) + '_' + str(get_hex(a)) + '_' + str(get_hex(b)) + '\n')
i = i + 1
f.close()
| 90 | 0 | 23 |
14a2c2afb9c59044a6c39bbd0a8f0ba276f110ad | 4,110 | py | Python | src/pyhf_benchmark/plot.py | pyhf/pyhf-benchmark | bc0f91253e8d6d4dbc7205cabf0ec7a9d5402dcf | [
"Apache-2.0"
] | 3 | 2020-05-22T22:50:22.000Z | 2020-06-02T16:28:37.000Z | src/pyhf_benchmark/plot.py | pyhf/pyhf-benchmark | bc0f91253e8d6d4dbc7205cabf0ec7a9d5402dcf | [
"Apache-2.0"
] | 30 | 2020-06-02T16:22:27.000Z | 2020-08-20T04:55:59.000Z | src/pyhf_benchmark/plot.py | pyhf/pyhf-benchmark | bc0f91253e8d6d4dbc7205cabf0ec7a9d5402dcf | [
"Apache-2.0"
] | 1 | 2020-07-28T02:32:58.000Z | 2020-07-28T02:32:58.000Z | import json
import pandas as pd
import time
import matplotlib.pyplot as plt
ylabels = [
"CPU Utilization (%)",
"Disk I/O Utilization (%)",
"Process CPU Threads In Use",
"Network Traffic (bytes)",
"System Memory Utilization (%)",
"Process Memory Available (non-swap) (MB)",
"Process Memory In Use (non-swap) (MB)",
"Process Memory \n In Use (non-swap) (%)",
"GPU Utilization (%)",
"GPU Memory Allocated (%)",
"GPU Time Spent Accessing Memory (%)",
"GPU Temp (℃)",
]
columns = [
"system.cpu",
"system.disk",
"system.proc.cpu.threads",
["network.sent", "system.network.recv"],
"system.memory",
"system.proc.memory.availableMB",
"system.proc.memory.rssMB",
"system.proc.memory.percent",
"system.gpu.0.gpu",
"system.gpu.0.memory",
"system.gpu.0.memoryAllocated",
"system.gpu.0.temp",
]
filenames = [
"CPU_Utilization.png",
"Disk_IO_Utilization.png",
"CPU_Threads.png",
"Network_Traffic.png",
"Memory_Utilization.png",
"Proc_Memory_available.png",
"Proc_Memory_MB.png",
"Proc_Memory_Percent.png",
"GPU_Utilization.png",
"GPU_Memory_Allocated.png",
"GPU_Memory_Time.png",
"GPU_Temp.png",
]
| 29.568345 | 84 | 0.605596 | import json
import pandas as pd
import time
import matplotlib.pyplot as plt
ylabels = [
"CPU Utilization (%)",
"Disk I/O Utilization (%)",
"Process CPU Threads In Use",
"Network Traffic (bytes)",
"System Memory Utilization (%)",
"Process Memory Available (non-swap) (MB)",
"Process Memory In Use (non-swap) (MB)",
"Process Memory \n In Use (non-swap) (%)",
"GPU Utilization (%)",
"GPU Memory Allocated (%)",
"GPU Time Spent Accessing Memory (%)",
"GPU Temp (℃)",
]
columns = [
"system.cpu",
"system.disk",
"system.proc.cpu.threads",
["network.sent", "system.network.recv"],
"system.memory",
"system.proc.memory.availableMB",
"system.proc.memory.rssMB",
"system.proc.memory.percent",
"system.gpu.0.gpu",
"system.gpu.0.memory",
"system.gpu.0.memoryAllocated",
"system.gpu.0.temp",
]
filenames = [
"CPU_Utilization.png",
"Disk_IO_Utilization.png",
"CPU_Threads.png",
"Network_Traffic.png",
"Memory_Utilization.png",
"Proc_Memory_available.png",
"Proc_Memory_MB.png",
"Proc_Memory_Percent.png",
"GPU_Utilization.png",
"GPU_Memory_Allocated.png",
"GPU_Memory_Time.png",
"GPU_Temp.png",
]
def load(directory_name):
path = directory_name / "events.jsonl"
output_dic = {}
clock = 0
while not path.exists():
clock += 1
time.sleep(1)
if clock >= 60:
raise FileExistsError(f"{path} is not found!")
with path.open("r") as json_file:
json_list = list(json_file)
for json_str in json_list:
item = json.loads(json_str)
for key in item.keys():
output_dic.setdefault(key, []).append(item[key])
return pd.DataFrame.from_dict(output_dic)
def load_all(directory_name):
list_of_paths = directory_name.glob("*")
contents = []
backends = []
for path in list_of_paths:
if path.is_dir():
backends.append(str(path)[str(path).rfind("_") + 1 :])
contents.append(load(path))
return contents, backends
def subplot(y_label, column, output, directory, filename):
fig, ax = plt.subplots()
x_value = output["_runtime"]
if y_label == "Network Traffic (bytes)":
y_value1 = output.get(column[0], [0] * len(x_value))
y_value2 = output.get(column[1], [0] * len(x_value))
ax.plot(x_value, y_value1, ls="--", label="send")
ax.plot(x_value, y_value2, label="recv")
ax.legend(loc="upper left")
else:
y_value = output.get(column, [0] * len(x_value))
ax.plot(x_value, y_value)
ax.set_xlabel("Time (minutes)")
ax.set_ylabel(y_label)
ax.grid()
fig.savefig(directory / filename)
def subplot_comb(y_label, column, outputs, backends, directory, filename):
fig, ax = plt.subplots()
ax.set_xlabel("Time (minutes)")
ax.set_ylabel(y_label)
ax.grid()
for i, output in enumerate(outputs):
x_value = output["_runtime"]
if y_label == "Network Traffic (bytes)":
y_value1 = output.get(column[0], [0] * len(x_value))
y_value2 = output.get(column[1], [0] * len(x_value))
ax.plot(x_value, y_value1, ls="--", label=backends[i] + "_send")
ax.plot(x_value, y_value2, label=backends[i] + "_recv")
else:
y_value = outputs[i].get(column, [0] * len(x_value))
ax.plot(x_value, y_value, label=backends[i])
ax.legend(loc="upper left")
fig.savefig(directory / filename)
def plot(directory):
output = load(directory)
idx = 0
while idx < len(ylabels):
subplot(ylabels[idx], columns[idx], output, directory, filenames[idx])
if not "system.gpu.0.gpu" in output and idx >= 7:
break
idx += 1
def plot_comb(directory):
outputs, backends = load_all(directory)
idx = 0
while idx < len(ylabels):
subplot_comb(
ylabels[idx], columns[idx], outputs, backends, directory, filenames[idx]
)
if not "system.gpu.0.gpu" in outputs[0] and idx >= 7:
break
idx += 1
| 2,738 | 0 | 138 |
b853897f1d9e921de0b68dcf2f3f769919affd42 | 4,616 | py | Python | server.py | jseppanen/textpile | 0726961d4f841c664165f4ca9346b4e8029d3bff | [
"Unlicense"
] | null | null | null | server.py | jseppanen/textpile | 0726961d4f841c664165f4ca9346b4e8029d3bff | [
"Unlicense"
] | null | null | null | server.py | jseppanen/textpile | 0726961d4f841c664165f4ca9346b4e8029d3bff | [
"Unlicense"
] | null | null | null | from flask import Flask, request
from flask import Flask, request, session, g, jsonify, \
redirect, url_for, abort, render_template, flash
import sqlite3
import json
app=Flask(__name__)
app.config.from_object(__name__)
app.config.update(DATABASE=app.root_path + '/data/textpile.db')
# log errors in production to uwsgi.log
app.config.update(PROPAGATE_EXCEPTIONS=True)
app.config.from_pyfile(app.root_path + '/config/flask-server.conf')
@app.route('/')
@app.route('/stats')
@app.route('/most_relevant')
@app.route('/tagged/<tag>')
@app.route('/random')
@app.route('/doc/<int:doc_id>', methods=['GET', 'POST'])
@app.route('/login', methods=['GET', 'POST'])
@app.route('/logout')
@app.teardown_appcontext
if __name__=='__main__':
app.run(host='0.0.0.0')
| 30.773333 | 75 | 0.595537 | from flask import Flask, request
from flask import Flask, request, session, g, jsonify, \
redirect, url_for, abort, render_template, flash
import sqlite3
import json
app=Flask(__name__)
app.config.from_object(__name__)
app.config.update(DATABASE=app.root_path + '/data/textpile.db')
# log errors in production to uwsgi.log
app.config.update(PROPAGATE_EXCEPTIONS=True)
app.config.from_pyfile(app.root_path + '/config/flask-server.conf')
@app.route('/')
def index():
if not session.get('logged_in'):
return redirect(url_for('login'))
return render_template('index.html')
@app.route('/stats')
def stats():
results = {}
db = get_db()
sql = '''
SELECT COUNT(*) FROM doc
'''
cur = db.execute(sql)
results['num_docs'] = cur.fetchone()[0]
sql = '''
SELECT tag, COUNT(*)
FROM doc_label JOIN label USING (label_id)
GROUP BY 1
'''
cur = db.execute(sql)
for tag, num in db.execute(sql):
results['num_' + tag] = num
sql = '''
SELECT value FROM meta WHERE key='last_updated'
'''
cur = db.execute(sql)
results['last_updated'] = cur.fetchone()[0]
return jsonify(results=results)
@app.route('/most_relevant')
def most_relevant():
db = get_db()
offset = request.args.get('offset', 0, type=int)
num = request.args.get('num', 10, type=int)
sql = '''
SELECT a.doc_id, a.relevance, a.explain_json,
b.title, b.body, b.url, b.published_date
FROM doc_relevance a JOIN doc b USING (doc_id)
ORDER BY a.relevance DESC LIMIT ? OFFSET ?
'''
cur = db.execute(sql, [num, offset])
res = cur.fetchall()
res = map(dict, res)
for r in res:
r['explain'] = json.loads(r.pop('explain_json'))
return jsonify(results=res)
@app.route('/tagged/<tag>')
def tagged(tag):
db = get_db()
offset = request.args.get('offset', 0, type=int)
num = request.args.get('num', 10, type=int)
sql = '''
SELECT doc_id, title, body, url, published_date
FROM doc_label
JOIN doc USING (doc_id)
JOIN label USING (label_id)
WHERE tag = ?
ORDER BY published_date DESC LIMIT ? OFFSET ?
'''
cur = db.execute(sql, [tag, num, offset])
res = cur.fetchall()
return jsonify(results=map(dict, res))
@app.route('/random')
def random():
db = get_db()
offset = request.args.get('offset', 0, type=int)
num = request.args.get('num', 10, type=int)
sql = '''
SELECT a.doc_id, a.title, a.body, a.url, a.published_date
FROM doc a
LEFT JOIN doc_label b USING (doc_id)
WHERE b.doc_id IS NULL
ORDER BY RANDOM() LIMIT ? OFFSET ?
'''
cur = db.execute(sql, [num, offset])
res = cur.fetchall()
return jsonify(results=map(dict, res))
@app.route('/doc/<int:doc_id>', methods=['GET', 'POST'])
def doc(doc_id):
db = get_db()
if request.method=='POST':
label = request.form['label']
cur = db.execute('SELECT label_id FROM label WHERE tag = ?',
[label])
label_id, = cur.fetchone()
db.execute('INSERT INTO doc_label (doc_id, label_id) VALUES (?,?)',
[doc_id, label_id])
db.execute('DELETE FROM doc_relevance WHERE doc_id = ?',
[doc_id])
db.commit()
return jsonify()
else:
cur = db.execute('SELECT * FROM doc WHERE doc_id = ?',
[doc_id])
doc = cur.fetchone()
if doc is None:
return jsonify(error='Doc not found: %d' % doc_id), 404
return jsonify(**dict(doc))
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME']:
error = 'Invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'Invalid password'
else:
session['logged_in'] = True
flash('You were logged in')
return redirect(url_for('index'))
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('index'))
def get_db():
if not hasattr(g, 'db_conn'):
g.db_conn = sqlite3.connect(app.config['DATABASE'])
g.db_conn.row_factory = sqlite3.Row
return g.db_conn
@app.teardown_appcontext
def close_db(error):
if hasattr(g, 'db_conn'):
g.db_conn.close()
if __name__=='__main__':
app.run(host='0.0.0.0')
| 3,625 | 0 | 221 |
63d72521d6704d4bc2abbd417a787c80caf86200 | 1,491 | py | Python | rx/concurrency/scheduleditem.py | daliclass/RxPY | d3ff1b72963fd08341807986d49480351015165e | [
"MIT"
] | null | null | null | rx/concurrency/scheduleditem.py | daliclass/RxPY | d3ff1b72963fd08341807986d49480351015165e | [
"MIT"
] | null | null | null | rx/concurrency/scheduleditem.py | daliclass/RxPY | d3ff1b72963fd08341807986d49480351015165e | [
"MIT"
] | null | null | null | from datetime import datetime
from typing import Generic, Optional
from rx.core import typing
from rx.disposable import SingleAssignmentDisposable
from .schedulerbase import SchedulerBase
| 33.133333 | 86 | 0.669349 | from datetime import datetime
from typing import Generic, Optional
from rx.core import typing
from rx.disposable import SingleAssignmentDisposable
from .schedulerbase import SchedulerBase
class ScheduledItem(Generic[typing.TState]): # pylint: disable=unsubscriptable-object
def __init__(self,
scheduler: SchedulerBase,
state: Optional[typing.TState],
action: typing.ScheduledAction,
duetime: datetime
) -> None:
self.scheduler: SchedulerBase = scheduler
self.state: Optional[typing.TState] = state
self.action: typing.ScheduledAction = action
self.duetime: datetime = duetime
self.disposable: SingleAssignmentDisposable = SingleAssignmentDisposable()
def invoke(self) -> None:
ret = self.scheduler.invoke_action(self.action, state=self.state)
self.disposable.disposable = ret
def cancel(self) -> None:
"""Cancels the work item by disposing the resource returned by
invoke_core as soon as possible."""
self.disposable.dispose()
def is_cancelled(self) -> bool:
return self.disposable.is_disposed
def __lt__(self, other: 'ScheduledItem') -> bool:
return self.duetime < other.duetime
def __gt__(self, other: 'ScheduledItem') -> bool:
return self.duetime > other.duetime
def __eq__(self, other: 'ScheduledItem') -> bool:
return self.duetime == other.duetime
| 869 | 408 | 23 |
9bf588687f45bf8bcc4b0da77a026b295a433874 | 7,253 | py | Python | src/Utils.py | borismus/lightning | e33173a1d9905004bbb789d36c8622cd577b6b40 | [
"Apache-2.0"
] | 14 | 2015-01-10T12:40:16.000Z | 2021-11-08T10:21:58.000Z | src/Utils.py | borismus/lightning | e33173a1d9905004bbb789d36c8622cd577b6b40 | [
"Apache-2.0"
] | null | null | null | src/Utils.py | borismus/lightning | e33173a1d9905004bbb789d36c8622cd577b6b40 | [
"Apache-2.0"
] | 3 | 2017-03-16T00:08:26.000Z | 2018-07-16T04:55:20.000Z | import datetime
from dateutil import parser
from itertools import tee
from jinja2 import Template, Environment, FileSystemLoader
import os
import re
import shutil
import time
import warnings
import yaml
DEFAULT_TIME = datetime.time(9, 0)
MAX_SLUG_LENGTH = 30
def ComputePermalink(type_name, slug, created_date, permalink_template='{{slug}}'):
"""Returns the permalink for the given item."""
permalink_data = {'slug': slug}
# If there's date information associated, include it in the permalink data.
if created_date:
permalink_data = dict(permalink_data.items())
permalink_data.update(created_date.GetDict().items())
return RenderTemplateString(permalink_template, permalink_data)
def ParseSnip(content):
"""Return the snippet based on the content."""
found = content.find('<!--more-->')
if found >= 0:
return content[:found]
def ParseDate(date):
"""Gets the permalink parameters based on the item's info."""
try:
if type(date) == str:
date_string = date
date = parser.parse(date_string)
#warnings.warn('Parsed %s into %s.' % (date_string, date))
dt = datetime.datetime.combine(date, DEFAULT_TIME)
return Date(dt)
except TypeError as e:
warnings.warn('Failed to parse date: %s.' % e)
return None
def GuessDate(path):
"""Based on the filesystem structure (eg. blah/2014/09/20/foo-bar.md),
extracts the date."""
regex = '.*\/([0-9]{4})\/([0-9]{2})\/([0-9]{2})\/.*'
match = re.match(regex, path)
if match:
date_tuple = map(int, match.groups())
date = datetime.datetime(*date_tuple)
return ParseDate(date)
def GuessType(path, mappings):
"""Return the type based on the path. The site config provides automatic
mappings based on path."""
for type_path, type_name in mappings.items():
if path.find(type_path) >= 0:
return type_name
def GuessSlugFromPath(path):
"""Returns the slug."""
if path.endswith('index.md'):
# If it ends with index, get the second last path component.
return path.split('/')[-2]
else:
# Otherwise, just get the filename.
return path.split('/')[-1].split('.')[0]
def GuessSlugFromTitle(title):
"""Return an automatically generated slug from title. Turn spaces into dashes,
lowercase everything, limit length."""
lower = title.lower()
slug = lower.replace(' ', '-')
slug = ''.join([c for c in slug if IsValidChar(c)])
slug = re.sub("-+", "-", slug)
return slug
def FindSplitIndices(lines):
"""Given some lines representing a markdown file with multiple entries in it,
find each split point."""
# Code lines: T if any text, N if new line, D if divider.
coded_lines = [CodeLine(line) for line in lines]
coded = ''.join(coded_lines)
#warnings.warn(coded)
# Look for patterns of NTDN in the coded lines string. If such a pattern is
# found, output the index.
return [m.start() for m in re.finditer('NTD', coded)]
def Pairwise(iterable):
"""Returns a pairwise iterated list."""
a, b = tee(iterable)
next(b, None)
return list(zip(a, b))
def DeletePath(path):
"""Remove file or directory at path."""
if os.path.isfile(path):
os.unlink(path)
else:
shutil.rmtree(path)
def FixBrokenLinks(content, permalink):
"""Given content (HTML or RSS), this will make all relative links into
absolute ones referring to the permalink."""
links = re.findall(r'<a href="(.+?)"', content, re.DOTALL) + \
re.findall(r'<img src="(.+?)"', content, re.DOTALL) + \
re.findall(r'<audio src="(.+?)"', content, re.DOTALL) + \
re.findall(r'<video src="(.+?)"', content, re.DOTALL)
# If the links are relative, make them absolute.
for link in links:
# If it doesn't have http or / at the beginning, it's a relative URL.
if not link.startswith('/') and not link.startswith('http') and not \
link.startswith('mailto'):
# If they are relative, rewrite them using the permalink
absolute_link = os.path.join(permalink, link)
content = content.replace(link, absolute_link)
#warnings.warn('Making relative link %s into absolute %s.' % (link,
# absolute_link))
return content
def FormatWikiLinks(html):
"""Given an html file, convert [[WikiLinks]] into *WikiLinks* just to ease
readability."""
wikilink = re.compile(r'\[\[(?:[^|\]]*\|)?([^\]]+)\]\]')
return wikilink.sub(r'*\1*', html)
def ResolveWikiLinks(html):
"""Given an html file, convert [[WikiLinks]] into links to the personal wiki:
<a href="https://z3.ca/WikiLinks">WikiLinks</a>"""
wikilink = re.compile(r'\[\[(?:[^|\]]*\|)?([^\]]+)\]\]')
return wikilink.sub(linkify, html)
| 28.003861 | 83 | 0.653247 | import datetime
from dateutil import parser
from itertools import tee
from jinja2 import Template, Environment, FileSystemLoader
import os
import re
import shutil
import time
import warnings
import yaml
DEFAULT_TIME = datetime.time(9, 0)
MAX_SLUG_LENGTH = 30
class Date:
def __init__(self, datetime):
self.datetime = datetime
self.date_format = '%b'
def GetDict(self):
dt = self.datetime
return {
'year': dt.year,
'month': dt.month,
'month_name': dt.strftime('%b'),
'day': dt.day,
'unix': self.Unix(),
}
def Unix(self):
return int(time.mktime(self.datetime.timetuple()))
def Format(self, date_format=None):
if not date_format:
date_format = self.date_format
return self.datetime.strftime(date_format)
def Rfc(self):
dt = self.datetime
if dt.tzinfo is None:
suffix = "-00:00"
else:
suffix = dt.strftime("%z")
suffix = suffix[:-2] + ":" + suffix[-2:]
return dt.strftime("%Y-%m-%dT%H:%M:%S") + suffix
def SetDateFormat(self, date_format):
self.date_format = date_format
def __sub__(self, to_subtract):
return self.Unix() - to_subtract.Unix()
@staticmethod
def Now():
return Date(datetime.datetime.now())
def ComputePermalink(type_name, slug, created_date, permalink_template='{{slug}}'):
"""Returns the permalink for the given item."""
permalink_data = {'slug': slug}
# If there's date information associated, include it in the permalink data.
if created_date:
permalink_data = dict(permalink_data.items())
permalink_data.update(created_date.GetDict().items())
return RenderTemplateString(permalink_template, permalink_data)
def ParseSnip(content):
"""Return the snippet based on the content."""
found = content.find('<!--more-->')
if found >= 0:
return content[:found]
def ParseDate(date):
"""Gets the permalink parameters based on the item's info."""
try:
if type(date) == str:
date_string = date
date = parser.parse(date_string)
#warnings.warn('Parsed %s into %s.' % (date_string, date))
dt = datetime.datetime.combine(date, DEFAULT_TIME)
return Date(dt)
except TypeError as e:
warnings.warn('Failed to parse date: %s.' % e)
return None
def GuessDate(path):
"""Based on the filesystem structure (eg. blah/2014/09/20/foo-bar.md),
extracts the date."""
regex = '.*\/([0-9]{4})\/([0-9]{2})\/([0-9]{2})\/.*'
match = re.match(regex, path)
if match:
date_tuple = map(int, match.groups())
date = datetime.datetime(*date_tuple)
return ParseDate(date)
def GuessType(path, mappings):
"""Return the type based on the path. The site config provides automatic
mappings based on path."""
for type_path, type_name in mappings.items():
if path.find(type_path) >= 0:
return type_name
def GuessSlugFromPath(path):
"""Returns the slug."""
if path.endswith('index.md'):
# If it ends with index, get the second last path component.
return path.split('/')[-2]
else:
# Otherwise, just get the filename.
return path.split('/')[-1].split('.')[0]
def GuessSlugFromTitle(title):
"""Return an automatically generated slug from title. Turn spaces into dashes,
lowercase everything, limit length."""
def IsValidChar(c):
return c.isalnum() or c == '-'
lower = title.lower()
slug = lower.replace(' ', '-')
slug = ''.join([c for c in slug if IsValidChar(c)])
slug = re.sub("-+", "-", slug)
return slug
def RenderTemplateString(template_string, data):
template = Template(template_string)
return template.render(data)
def RenderTemplate(template_root, filename, data):
env = Environment(loader=FileSystemLoader(template_root))
try:
template = env.get_template(filename)
except Exception:
raise Exception('Failed to find template %s.' % filename)
try:
out = template.render(data)
except Exception as e:
raise Exception('Failed to render template %s: "%s".' % (filename, e))
return out
def FindSplitIndices(lines):
"""Given some lines representing a markdown file with multiple entries in it,
find each split point."""
def CodeLine(line):
if line == '\n':
return 'N'
elif re.match('\w', line):
return 'T'
elif re.match('^===+$', line):
return 'D'
else:
return '?'
# Code lines: T if any text, N if new line, D if divider.
coded_lines = [CodeLine(line) for line in lines]
coded = ''.join(coded_lines)
#warnings.warn(coded)
# Look for patterns of NTDN in the coded lines string. If such a pattern is
# found, output the index.
return [m.start() for m in re.finditer('NTD', coded)]
def GetYamlMetadata(lines):
# Ignore empty leading lines.
for i, line in enumerate(lines):
if line == '\n':
del lines[i]
else:
break
# Extract the title.
title = lines[0].strip()
# Get the key: value pairs after the title.
separator_index = lines.index('\n')
yaml_lines = lines[2:separator_index]
data = yaml.load(''.join(yaml_lines), Loader=yaml.SafeLoader) or {}
data['title'] = title
return data
def Pairwise(iterable):
"""Returns a pairwise iterated list."""
a, b = tee(iterable)
next(b, None)
return list(zip(a, b))
def CopyAndOverwrite(from_path, to_path):
if os.path.exists(to_path):
shutil.rmtree(to_path)
shutil.copytree(from_path, to_path)
def DeletePath(path):
"""Remove file or directory at path."""
if os.path.isfile(path):
os.unlink(path)
else:
shutil.rmtree(path)
def FixBrokenLinks(content, permalink):
"""Given content (HTML or RSS), this will make all relative links into
absolute ones referring to the permalink."""
links = re.findall(r'<a href="(.+?)"', content, re.DOTALL) + \
re.findall(r'<img src="(.+?)"', content, re.DOTALL) + \
re.findall(r'<audio src="(.+?)"', content, re.DOTALL) + \
re.findall(r'<video src="(.+?)"', content, re.DOTALL)
# If the links are relative, make them absolute.
for link in links:
# If it doesn't have http or / at the beginning, it's a relative URL.
if not link.startswith('/') and not link.startswith('http') and not \
link.startswith('mailto'):
# If they are relative, rewrite them using the permalink
absolute_link = os.path.join(permalink, link)
content = content.replace(link, absolute_link)
#warnings.warn('Making relative link %s into absolute %s.' % (link,
# absolute_link))
return content
def FormatWikiLinks(html):
"""Given an html file, convert [[WikiLinks]] into *WikiLinks* just to ease
readability."""
wikilink = re.compile(r'\[\[(?:[^|\]]*\|)?([^\]]+)\]\]')
return wikilink.sub(r'*\1*', html)
def ResolveWikiLinks(html):
"""Given an html file, convert [[WikiLinks]] into links to the personal wiki:
<a href="https://z3.ca/WikiLinks">WikiLinks</a>"""
wikilink = re.compile(r'\[\[(?:[^|\]]*\|)?([^\]]+)\]\]')
def linkify(match):
wiki_root = 'https://z3.ca'
wiki_name = match.group(1).replace('\n', ' ')
wiki_slug = wiki_name.replace(' ', '_')
return f'<a class="wiki" href="{wiki_root}/{wiki_slug}">{wiki_name}</a>'
return wikilink.sub(linkify, html)
def StripHtmlTags(html):
return re.sub('<[^<]+?>|\n', ' ', html)
| 2,199 | 205 | 211 |
cb74534e016f34167f118377909713df89e18936 | 775 | py | Python | phasespace/__init__.py | zfit/tfphasespace | 7131fbd687cb5dd1ce53fd4a590dcfd5d1af572f | [
"BSD-3-Clause"
] | null | null | null | phasespace/__init__.py | zfit/tfphasespace | 7131fbd687cb5dd1ce53fd4a590dcfd5d1af572f | [
"BSD-3-Clause"
] | 6 | 2019-02-27T20:06:27.000Z | 2019-03-12T14:02:51.000Z | phasespace/__init__.py | zfit/tfphasespace | 7131fbd687cb5dd1ce53fd4a590dcfd5d1af572f | [
"BSD-3-Clause"
] | null | null | null | """Top-level package for TensorFlow PhaseSpace."""
import sys
if sys.version_info < (3, 8):
from importlib_metadata import PackageNotFoundError, version
else:
from importlib.metadata import PackageNotFoundError, version
try:
__version__ = version("phasespace")
except PackageNotFoundError:
pass
__author__ = """Albert Puig Navarro"""
__email__ = "apuignav@gmail.com"
__maintainer__ = "zfit"
__credits__ = ["Jonas Eschle <Jonas.Eschle@cern.ch>"]
__all__ = ["nbody_decay", "GenParticle", "random"]
import tensorflow as tf
from . import random
from .phasespace import GenParticle, nbody_decay
_set_eager_mode()
| 21.527778 | 64 | 0.748387 | """Top-level package for TensorFlow PhaseSpace."""
import sys
if sys.version_info < (3, 8):
from importlib_metadata import PackageNotFoundError, version
else:
from importlib.metadata import PackageNotFoundError, version
try:
__version__ = version("phasespace")
except PackageNotFoundError:
pass
__author__ = """Albert Puig Navarro"""
__email__ = "apuignav@gmail.com"
__maintainer__ = "zfit"
__credits__ = ["Jonas Eschle <Jonas.Eschle@cern.ch>"]
__all__ = ["nbody_decay", "GenParticle", "random"]
import tensorflow as tf
from . import random
from .phasespace import GenParticle, nbody_decay
def _set_eager_mode():
import os
is_eager = bool(os.environ.get("PHASESPACE_EAGER"))
tf.config.run_functions_eagerly(is_eager)
_set_eager_mode()
| 118 | 0 | 23 |
f4d3dcf126c0d1c97934fcc6149e7d3ce53b7189 | 369 | py | Python | src/format2csv.py | mutanthost/lnav | 55727284de202a9ebfde0f3f9861ef6af1d04c52 | [
"BSD-2-Clause"
] | 1 | 2020-12-16T08:55:30.000Z | 2020-12-16T08:55:30.000Z | src/format2csv.py | Sifor/lnav | 92f28f1174f1c26408d7b3dd548b80d54adad6f4 | [
"BSD-2-Clause"
] | 3 | 2015-09-30T22:25:05.000Z | 2015-10-01T00:05:46.000Z | src/format2csv.py | Sifor/lnav | 92f28f1174f1c26408d7b3dd548b80d54adad6f4 | [
"BSD-2-Clause"
] | null | null | null |
import csv
import sys
import json
if __name__ == "__main__":
sys.exit(main(sys.argv))
| 20.5 | 69 | 0.593496 |
import csv
import sys
import json
def main(args):
with open(args[1]) as fp:
out = csv.writer(open(args[2], 'w'))
format_dict = json.load(fp)
for key in sorted(format_dict):
value = format_dict[key]
out.writerow((value['title'], key, value['description']))
if __name__ == "__main__":
sys.exit(main(sys.argv))
| 254 | 0 | 23 |
53031de8e25d045372bd5aab09f83ec7f6172d75 | 18,408 | py | Python | vel/rl/buffers/tests/test_circular_buffer_backend.py | galatolofederico/vel | 0473648cffb3f34fb784d12dbb25844ab58ffc3c | [
"MIT"
] | 273 | 2018-09-01T08:54:34.000Z | 2022-02-02T13:22:51.000Z | vel/rl/buffers/tests/test_circular_buffer_backend.py | braincorp/vel | bdf9d9eb6ed66278330e8cbece307f6e63ce53c6 | [
"MIT"
] | 47 | 2018-08-17T11:27:08.000Z | 2022-03-11T23:26:55.000Z | vel/rl/buffers/tests/test_circular_buffer_backend.py | braincorp/vel | bdf9d9eb6ed66278330e8cbece307f6e63ce53c6 | [
"MIT"
] | 37 | 2018-10-11T22:56:57.000Z | 2020-10-06T19:53:05.000Z | import gym
import gym.spaces
import numpy as np
import numpy.testing as nt
import pytest
from vel.exceptions import VelException
from vel.rl.buffers.backend.circular_buffer_backend import CircularBufferBackend
def get_half_filled_buffer():
""" Return simple preinitialized buffer """
observation_space = gym.spaces.Box(low=0, high=255, shape=(2, 2, 1), dtype=np.uint8)
action_space = gym.spaces.Discrete(4)
buffer = CircularBufferBackend(20, observation_space, action_space)
v1 = np.ones(4).reshape((2, 2, 1))
for i in range(10):
buffer.store_transition(v1 * (i+1), 0, float(i)/2, False)
return buffer
def get_filled_buffer():
""" Return simple preinitialized buffer """
observation_space = gym.spaces.Box(low=0, high=255, shape=(2, 2, 1), dtype=np.uint8)
action_space = gym.spaces.Discrete(4)
buffer = CircularBufferBackend(20, observation_space, action_space)
v1 = np.ones(4).reshape((2, 2, 1))
for i in range(30):
buffer.store_transition(v1 * (i+1), 0, float(i)/2, False)
return buffer
def get_filled_buffer1x1():
""" Return simple preinitialized buffer """
observation_space = gym.spaces.Box(low=0, high=255, shape=(2,), dtype=int)
action_space = gym.spaces.Box(low=-1.0, high=1.0, shape=(2,), dtype=float)
buffer = CircularBufferBackend(20, observation_space=observation_space, action_space=action_space)
v1 = np.ones(2).reshape((2,))
a1 = np.arange(2).reshape((2,))
for i in range(30):
item = v1.copy()
item[0] *= (i+1)
item[1] *= 10 * (i+1)
buffer.store_transition(item, a1 * i, float(i)/2, False)
return buffer
def get_filled_buffer2x2():
""" Return simple preinitialized buffer """
observation_space = gym.spaces.Box(low=0, high=255, shape=(2, 2), dtype=int)
action_space = gym.spaces.Box(low=-1.0, high=1.0, shape=(2, 2), dtype=float)
buffer = CircularBufferBackend(20, observation_space=observation_space, action_space=action_space)
v1 = np.ones(4).reshape((2, 2))
a1 = np.arange(4).reshape((2, 2))
for i in range(30):
item = v1.copy()
item[0] *= (i+1)
item[1] *= 10 * (i+1)
buffer.store_transition(item, a1 * i, float(i)/2, False)
return buffer
def get_filled_buffer3x3():
""" Return simple preinitialized buffer """
observation_space = gym.spaces.Box(low=0, high=255, shape=(2, 2, 2), dtype=int)
action_space = gym.spaces.Box(low=-1.0, high=1.0, shape=(2, 2, 2), dtype=float)
buffer = CircularBufferBackend(20, observation_space=observation_space, action_space=action_space)
v1 = np.ones(8).reshape((2, 2, 2))
a1 = np.arange(8).reshape((2, 2, 2))
for i in range(30):
item = v1.copy()
item[0] *= (i+1)
item[1] *= 10 * (i+1)
buffer.store_transition(item, i * a1, float(i)/2, False)
return buffer
def get_filled_buffer1x1_history():
""" Return simple preinitialized buffer """
observation_space = gym.spaces.Box(low=0, high=255, shape=(2, 1), dtype=int)
action_space = gym.spaces.Box(low=-1.0, high=1.0, shape=(2,), dtype=float)
buffer = CircularBufferBackend(20, observation_space=observation_space, action_space=action_space)
v1 = np.ones(2).reshape((2, 1))
a1 = np.arange(2).reshape((2,))
for i in range(30):
item = v1.copy()
item[0] *= (i+1)
item[1] *= 10 * (i+1)
buffer.store_transition(item, a1 * i, float(i)/2, False)
return buffer
def get_filled_buffer2x2_history():
""" Return simple preinitialized buffer """
observation_space = gym.spaces.Box(low=0, high=255, shape=(2, 2, 1), dtype=int)
action_space = gym.spaces.Box(low=-1.0, high=1.0, shape=(2, 2), dtype=float)
buffer = CircularBufferBackend(20, observation_space=observation_space, action_space=action_space)
v1 = np.ones(4).reshape((2, 2, 1))
a1 = np.arange(4).reshape((2, 2))
for i in range(30):
item = v1.copy()
item[0] *= (i+1)
item[1] *= 10 * (i+1)
buffer.store_transition(item, a1 * i, float(i)/2, False)
return buffer
def get_filled_buffer3x3_history():
""" Return simple preinitialized buffer """
observation_space = gym.spaces.Box(low=0, high=255, shape=(2, 2, 2, 1), dtype=int)
action_space = gym.spaces.Box(low=-1.0, high=1.0, shape=(2, 2, 2), dtype=float)
buffer = CircularBufferBackend(20, observation_space=observation_space, action_space=action_space)
v1 = np.ones(8).reshape((2, 2, 2, 1))
a1 = np.arange(8).reshape((2, 2, 2))
for i in range(30):
item = v1.copy()
item[0] *= (i+1)
item[1] *= 10 * (i+1)
buffer.store_transition(item, i * a1, float(i)/2, False)
return buffer
def get_filled_buffer_extra_info():
""" Return simple preinitialized buffer """
observation_space = gym.spaces.Box(low=0, high=255, shape=(2, 2, 1), dtype=np.uint8)
action_space = gym.spaces.Discrete(4)
buffer = CircularBufferBackend(20, observation_space, action_space, extra_data={
'neglogp': np.zeros(20, dtype=float)
})
v1 = np.ones(4).reshape((2, 2, 1))
for i in range(30):
buffer.store_transition(v1 * (i+1), 0, float(i)/2, False, extra_info={'neglogp': i / 30.0})
return buffer
def get_filled_buffer_with_dones():
""" Return simple preinitialized buffer with some done's in there """
observation_space = gym.spaces.Box(low=0, high=255, shape=(2, 2, 1), dtype=np.uint8)
action_space = gym.spaces.Discrete(4)
buffer = CircularBufferBackend(20, observation_space, action_space)
v1 = np.ones(4).reshape((2, 2, 1))
done_set = {2, 5, 10, 13, 18, 22, 28}
for i in range(30):
if i in done_set:
buffer.store_transition(v1 * (i+1), 0, float(i)/2, True)
else:
buffer.store_transition(v1 * (i+1), 0, float(i)/2, False)
return buffer
def test_simple_get_frame():
""" Check if get_frame returns frames from a buffer partially full """
observation_space = gym.spaces.Box(low=0, high=255, shape=(2, 2, 1), dtype=np.uint8)
action_space = gym.spaces.Discrete(4)
buffer = CircularBufferBackend(20, observation_space, action_space)
v1 = np.ones(4).reshape((2, 2, 1))
v2 = v1 * 2
v3 = v1 * 3
buffer.store_transition(v1, 0, 0, False)
buffer.store_transition(v2, 0, 0, False)
buffer.store_transition(v3, 0, 0, False)
assert np.all(buffer.get_frame(0, 4).max(0).max(0) == np.array([0, 0, 0, 1]))
assert np.all(buffer.get_frame(1, 4).max(0).max(0) == np.array([0, 0, 1, 2]))
assert np.all(buffer.get_frame(2, 4).max(0).max(0) == np.array([0, 1, 2, 3]))
with pytest.raises(VelException):
buffer.get_frame(3, 4)
with pytest.raises(VelException):
buffer.get_frame(4, 4)
def test_full_buffer_get_frame():
""" Check if get_frame returns frames for full buffer """
buffer = get_filled_buffer()
nt.assert_array_equal(buffer.get_frame(0, 4).max(0).max(0), np.array([18, 19, 20, 21]))
nt.assert_array_equal(buffer.get_frame(1, 4).max(0).max(0), np.array([19, 20, 21, 22]))
nt.assert_array_equal(buffer.get_frame(9, 4).max(0).max(0), np.array([27, 28, 29, 30]))
with pytest.raises(VelException):
buffer.get_frame(10, 4)
with pytest.raises(VelException):
buffer.get_frame(11, 4)
with pytest.raises(VelException):
buffer.get_frame(12, 4)
nt.assert_array_equal(buffer.get_frame(13, 4).max(0).max(0), np.array([11, 12, 13, 14]))
nt.assert_array_equal(buffer.get_frame(19, 4).max(0).max(0), np.array([17, 18, 19, 20]))
def test_full_buffer_get_future_frame():
""" Check if get_frame_with_future works with full buffer """
buffer = get_filled_buffer()
nt.assert_array_equal(buffer.get_frame_with_future(0, 4)[1].max(0).max(0), np.array([19, 20, 21, 22]))
nt.assert_array_equal(buffer.get_frame_with_future(1, 4)[1].max(0).max(0), np.array([20, 21, 22, 23]))
with pytest.raises(VelException):
buffer.get_frame_with_future(9, 4)
with pytest.raises(VelException):
buffer.get_frame_with_future(10, 4)
with pytest.raises(VelException):
buffer.get_frame_with_future(11, 4)
with pytest.raises(VelException):
buffer.get_frame_with_future(12, 4)
nt.assert_array_equal(buffer.get_frame_with_future(13, 4)[1].max(0).max(0), np.array([12, 13, 14, 15]))
nt.assert_array_equal(buffer.get_frame_with_future(19, 4)[1].max(0).max(0), np.array([18, 19, 20, 21]))
def test_buffer_filling_size():
""" Check if buffer size is properly updated when we add items """
observation_space = gym.spaces.Box(low=0, high=255, shape=(2, 2, 1), dtype=np.uint8)
action_space = gym.spaces.Discrete(4)
buffer = CircularBufferBackend(20, observation_space, action_space)
v1 = np.ones(4).reshape((2, 2, 1))
assert buffer.current_size == 0
buffer.store_transition(v1, 0, 0, False)
buffer.store_transition(v1, 0, 0, False)
assert buffer.current_size == 2
for i in range(30):
buffer.store_transition(v1 * (i+1), 0, float(i)/2, False)
assert buffer.current_size == buffer.buffer_capacity
def test_get_frame_with_dones():
""" Check if get_frame works properly in case there are multiple sequences in buffer """
buffer = get_filled_buffer_with_dones()
nt.assert_array_equal(buffer.get_frame(0, 4).max(0).max(0), np.array([0, 0, 20, 21]))
nt.assert_array_equal(buffer.get_frame(1, 4).max(0).max(0), np.array([0, 20, 21, 22]))
nt.assert_array_equal(buffer.get_frame(2, 4).max(0).max(0), np.array([20, 21, 22, 23]))
nt.assert_array_equal(buffer.get_frame(3, 4).max(0).max(0), np.array([0, 0, 0, 24]))
nt.assert_array_equal(buffer.get_frame(8, 4).max(0).max(0), np.array([26, 27, 28, 29]))
nt.assert_array_equal(buffer.get_frame(9, 4).max(0).max(0), np.array([0, 0, 0, 30]))
with pytest.raises(VelException):
buffer.get_frame(10, 4)
nt.assert_array_equal(buffer.get_frame(11, 4).max(0).max(0), np.array([0, 0, 0, 12]))
nt.assert_array_equal(buffer.get_frame(12, 4).max(0).max(0), np.array([0, 0, 12, 13]))
def test_get_frame_future_with_dones():
""" Check if get_frame_with_future works properly in case there are multiple sequences in buffer """
buffer = get_filled_buffer_with_dones()
nt.assert_array_equal(buffer.get_frame_with_future(0, 4)[1].max(0).max(0), np.array([0, 20, 21, 22]))
nt.assert_array_equal(buffer.get_frame_with_future(1, 4)[1].max(0).max(0), np.array([20, 21, 22, 23]))
nt.assert_array_equal(buffer.get_frame_with_future(2, 4)[1].max(0).max(0), np.array([21, 22, 23, 0]))
nt.assert_array_equal(buffer.get_frame_with_future(3, 4)[1].max(0).max(0), np.array([0, 0, 24, 25]))
nt.assert_array_equal(buffer.get_frame_with_future(8, 4)[1].max(0).max(0), np.array([27, 28, 29, 0]))
with pytest.raises(VelException):
buffer.get_frame_with_future(9, 4)
with pytest.raises(VelException):
buffer.get_frame_with_future(10, 4)
nt.assert_array_equal(buffer.get_frame_with_future(11, 4)[1].max(0).max(0), np.array([0, 0, 12, 13]))
nt.assert_array_equal(buffer.get_frame_with_future(12, 4)[1].max(0).max(0), np.array([0, 12, 13, 14]))
def test_get_batch():
""" Check if get_batch works properly for buffers """
buffer = get_filled_buffer_with_dones()
batch = buffer.get_transitions(np.array([0, 1, 2, 3, 4, 5, 6, 7, 8]), history_length=4)
obs = batch['observations']
act = batch['actions']
rew = batch['rewards']
obs_tp1 = batch['observations_next']
dones = batch['dones']
nt.assert_array_equal(dones, np.array([False, False, True, False, False, False, False, False, True]))
nt.assert_array_equal(obs.max(1).max(1), np.array([
[0, 0, 20, 21],
[0, 20, 21, 22],
[20, 21, 22, 23],
[0, 0, 0, 24],
[0, 0, 24, 25],
[0, 24, 25, 26],
[24, 25, 26, 27],
[25, 26, 27, 28],
[26, 27, 28, 29],
]))
nt.assert_array_equal(act, np.array([0, 0, 0, 0, 0, 0, 0, 0, 0]))
nt.assert_array_equal(rew, np.array([10.0, 10.5, 11.0, 11.5, 12.0, 12.5, 13.0, 13.5, 14.0]))
nt.assert_array_equal(obs_tp1.max(1).max(1), np.array([
[0, 20, 21, 22],
[20, 21, 22, 23],
[21, 22, 23, 0],
[0, 0, 24, 25],
[0, 24, 25, 26],
[24, 25, 26, 27],
[25, 26, 27, 28],
[26, 27, 28, 29],
[27, 28, 29, 0],
]))
with pytest.raises(VelException):
buffer.get_transitions(np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), history_length=4)
def test_sample_and_get_batch():
""" Check if batch sampling works properly """
buffer = get_filled_buffer_with_dones()
for i in range(100):
indexes = buffer.sample_batch_transitions(batch_size=5, history_length=4)
batch = buffer.get_transitions(indexes, history_length=4)
obs = batch['observations']
act = batch['actions']
rew = batch['rewards']
obs_tp1 = batch['observations_next']
dones = batch['dones']
assert obs.shape[0] == 5
assert act.shape[0] == 5
assert rew.shape[0] == 5
assert obs_tp1.shape[0] == 5
assert dones.shape[0] == 5
def test_storing_extra_info():
""" Make sure additional information are stored and recovered properly """
buffer = get_filled_buffer_extra_info()
batch = buffer.get_transitions(np.array([0, 1, 2, 17, 18, 19]), history_length=4)
nt.assert_equal(batch['neglogp'][0], 20.0/30)
nt.assert_equal(batch['neglogp'][1], 21.0/30)
nt.assert_equal(batch['neglogp'][2], 22.0/30)
nt.assert_equal(batch['neglogp'][3], 17.0/30)
nt.assert_equal(batch['neglogp'][4], 18.0/30)
nt.assert_equal(batch['neglogp'][5], 19.0/30)
def test_sample_rollout_half_filled():
""" Test if sampling rollout is correct and returns proper results """
buffer = get_half_filled_buffer()
indexes = []
for i in range(1000):
rollout_idx = buffer.sample_batch_trajectories(rollout_length=5, history_length=4)
rollout = buffer.get_trajectories(index=rollout_idx, rollout_length=5, history_length=4)
assert rollout['observations'].shape[0] == 5 # Rollout length
assert rollout['observations'].shape[-1] == 4 # History length
indexes.append(rollout_idx)
assert np.min(indexes) == 4
assert np.max(indexes) == 8
with pytest.raises(VelException):
buffer.sample_batch_trajectories(rollout_length=10, history_length=4)
rollout_idx = buffer.sample_batch_trajectories(rollout_length=9, history_length=4)
rollout = buffer.get_trajectories(index=rollout_idx, rollout_length=9, history_length=4)
assert rollout_idx == 8
nt.assert_array_equal(rollout['rewards'], np.array([
0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4.
]))
def test_sample_rollout_filled():
""" Test if sampling rollout is correct and returns proper results """
buffer = get_filled_buffer()
indexes = []
for i in range(1000):
rollout_idx = buffer.sample_batch_trajectories(rollout_length=5, history_length=4)
rollout = buffer.get_trajectories(index=rollout_idx, rollout_length=5, history_length=4)
assert rollout['observations'].shape[0] == 5 # Rollout length
assert rollout['observations'].shape[-1] == 4 # History length
indexes.append(rollout_idx)
assert np.min(indexes) == 0
assert np.max(indexes) == 19
with pytest.raises(VelException):
buffer.sample_batch_trajectories(rollout_length=17, history_length=4)
max_rollout = buffer.sample_batch_trajectories(rollout_length=16, history_length=4)
rollout = buffer.get_trajectories(max_rollout, rollout_length=16, history_length=4)
assert max_rollout == 8
assert np.sum(rollout['rewards']) == pytest.approx(164.0, 1e-5)
| 34.929791 | 111 | 0.648848 | import gym
import gym.spaces
import numpy as np
import numpy.testing as nt
import pytest
from vel.exceptions import VelException
from vel.rl.buffers.backend.circular_buffer_backend import CircularBufferBackend
def get_half_filled_buffer():
""" Return simple preinitialized buffer """
observation_space = gym.spaces.Box(low=0, high=255, shape=(2, 2, 1), dtype=np.uint8)
action_space = gym.spaces.Discrete(4)
buffer = CircularBufferBackend(20, observation_space, action_space)
v1 = np.ones(4).reshape((2, 2, 1))
for i in range(10):
buffer.store_transition(v1 * (i+1), 0, float(i)/2, False)
return buffer
def get_filled_buffer():
""" Return simple preinitialized buffer """
observation_space = gym.spaces.Box(low=0, high=255, shape=(2, 2, 1), dtype=np.uint8)
action_space = gym.spaces.Discrete(4)
buffer = CircularBufferBackend(20, observation_space, action_space)
v1 = np.ones(4).reshape((2, 2, 1))
for i in range(30):
buffer.store_transition(v1 * (i+1), 0, float(i)/2, False)
return buffer
def get_filled_buffer1x1():
""" Return simple preinitialized buffer """
observation_space = gym.spaces.Box(low=0, high=255, shape=(2,), dtype=int)
action_space = gym.spaces.Box(low=-1.0, high=1.0, shape=(2,), dtype=float)
buffer = CircularBufferBackend(20, observation_space=observation_space, action_space=action_space)
v1 = np.ones(2).reshape((2,))
a1 = np.arange(2).reshape((2,))
for i in range(30):
item = v1.copy()
item[0] *= (i+1)
item[1] *= 10 * (i+1)
buffer.store_transition(item, a1 * i, float(i)/2, False)
return buffer
def get_filled_buffer2x2():
""" Return simple preinitialized buffer """
observation_space = gym.spaces.Box(low=0, high=255, shape=(2, 2), dtype=int)
action_space = gym.spaces.Box(low=-1.0, high=1.0, shape=(2, 2), dtype=float)
buffer = CircularBufferBackend(20, observation_space=observation_space, action_space=action_space)
v1 = np.ones(4).reshape((2, 2))
a1 = np.arange(4).reshape((2, 2))
for i in range(30):
item = v1.copy()
item[0] *= (i+1)
item[1] *= 10 * (i+1)
buffer.store_transition(item, a1 * i, float(i)/2, False)
return buffer
def get_filled_buffer3x3():
""" Return simple preinitialized buffer """
observation_space = gym.spaces.Box(low=0, high=255, shape=(2, 2, 2), dtype=int)
action_space = gym.spaces.Box(low=-1.0, high=1.0, shape=(2, 2, 2), dtype=float)
buffer = CircularBufferBackend(20, observation_space=observation_space, action_space=action_space)
v1 = np.ones(8).reshape((2, 2, 2))
a1 = np.arange(8).reshape((2, 2, 2))
for i in range(30):
item = v1.copy()
item[0] *= (i+1)
item[1] *= 10 * (i+1)
buffer.store_transition(item, i * a1, float(i)/2, False)
return buffer
def get_filled_buffer1x1_history():
""" Return simple preinitialized buffer """
observation_space = gym.spaces.Box(low=0, high=255, shape=(2, 1), dtype=int)
action_space = gym.spaces.Box(low=-1.0, high=1.0, shape=(2,), dtype=float)
buffer = CircularBufferBackend(20, observation_space=observation_space, action_space=action_space)
v1 = np.ones(2).reshape((2, 1))
a1 = np.arange(2).reshape((2,))
for i in range(30):
item = v1.copy()
item[0] *= (i+1)
item[1] *= 10 * (i+1)
buffer.store_transition(item, a1 * i, float(i)/2, False)
return buffer
def get_filled_buffer2x2_history():
""" Return simple preinitialized buffer """
observation_space = gym.spaces.Box(low=0, high=255, shape=(2, 2, 1), dtype=int)
action_space = gym.spaces.Box(low=-1.0, high=1.0, shape=(2, 2), dtype=float)
buffer = CircularBufferBackend(20, observation_space=observation_space, action_space=action_space)
v1 = np.ones(4).reshape((2, 2, 1))
a1 = np.arange(4).reshape((2, 2))
for i in range(30):
item = v1.copy()
item[0] *= (i+1)
item[1] *= 10 * (i+1)
buffer.store_transition(item, a1 * i, float(i)/2, False)
return buffer
def get_filled_buffer3x3_history():
""" Return simple preinitialized buffer """
observation_space = gym.spaces.Box(low=0, high=255, shape=(2, 2, 2, 1), dtype=int)
action_space = gym.spaces.Box(low=-1.0, high=1.0, shape=(2, 2, 2), dtype=float)
buffer = CircularBufferBackend(20, observation_space=observation_space, action_space=action_space)
v1 = np.ones(8).reshape((2, 2, 2, 1))
a1 = np.arange(8).reshape((2, 2, 2))
for i in range(30):
item = v1.copy()
item[0] *= (i+1)
item[1] *= 10 * (i+1)
buffer.store_transition(item, i * a1, float(i)/2, False)
return buffer
def get_filled_buffer_extra_info():
""" Return simple preinitialized buffer """
observation_space = gym.spaces.Box(low=0, high=255, shape=(2, 2, 1), dtype=np.uint8)
action_space = gym.spaces.Discrete(4)
buffer = CircularBufferBackend(20, observation_space, action_space, extra_data={
'neglogp': np.zeros(20, dtype=float)
})
v1 = np.ones(4).reshape((2, 2, 1))
for i in range(30):
buffer.store_transition(v1 * (i+1), 0, float(i)/2, False, extra_info={'neglogp': i / 30.0})
return buffer
def get_filled_buffer_with_dones():
""" Return simple preinitialized buffer with some done's in there """
observation_space = gym.spaces.Box(low=0, high=255, shape=(2, 2, 1), dtype=np.uint8)
action_space = gym.spaces.Discrete(4)
buffer = CircularBufferBackend(20, observation_space, action_space)
v1 = np.ones(4).reshape((2, 2, 1))
done_set = {2, 5, 10, 13, 18, 22, 28}
for i in range(30):
if i in done_set:
buffer.store_transition(v1 * (i+1), 0, float(i)/2, True)
else:
buffer.store_transition(v1 * (i+1), 0, float(i)/2, False)
return buffer
def test_simple_get_frame():
""" Check if get_frame returns frames from a buffer partially full """
observation_space = gym.spaces.Box(low=0, high=255, shape=(2, 2, 1), dtype=np.uint8)
action_space = gym.spaces.Discrete(4)
buffer = CircularBufferBackend(20, observation_space, action_space)
v1 = np.ones(4).reshape((2, 2, 1))
v2 = v1 * 2
v3 = v1 * 3
buffer.store_transition(v1, 0, 0, False)
buffer.store_transition(v2, 0, 0, False)
buffer.store_transition(v3, 0, 0, False)
assert np.all(buffer.get_frame(0, 4).max(0).max(0) == np.array([0, 0, 0, 1]))
assert np.all(buffer.get_frame(1, 4).max(0).max(0) == np.array([0, 0, 1, 2]))
assert np.all(buffer.get_frame(2, 4).max(0).max(0) == np.array([0, 1, 2, 3]))
with pytest.raises(VelException):
buffer.get_frame(3, 4)
with pytest.raises(VelException):
buffer.get_frame(4, 4)
def test_full_buffer_get_frame():
""" Check if get_frame returns frames for full buffer """
buffer = get_filled_buffer()
nt.assert_array_equal(buffer.get_frame(0, 4).max(0).max(0), np.array([18, 19, 20, 21]))
nt.assert_array_equal(buffer.get_frame(1, 4).max(0).max(0), np.array([19, 20, 21, 22]))
nt.assert_array_equal(buffer.get_frame(9, 4).max(0).max(0), np.array([27, 28, 29, 30]))
with pytest.raises(VelException):
buffer.get_frame(10, 4)
with pytest.raises(VelException):
buffer.get_frame(11, 4)
with pytest.raises(VelException):
buffer.get_frame(12, 4)
nt.assert_array_equal(buffer.get_frame(13, 4).max(0).max(0), np.array([11, 12, 13, 14]))
nt.assert_array_equal(buffer.get_frame(19, 4).max(0).max(0), np.array([17, 18, 19, 20]))
def test_full_buffer_get_future_frame():
""" Check if get_frame_with_future works with full buffer """
buffer = get_filled_buffer()
nt.assert_array_equal(buffer.get_frame_with_future(0, 4)[1].max(0).max(0), np.array([19, 20, 21, 22]))
nt.assert_array_equal(buffer.get_frame_with_future(1, 4)[1].max(0).max(0), np.array([20, 21, 22, 23]))
with pytest.raises(VelException):
buffer.get_frame_with_future(9, 4)
with pytest.raises(VelException):
buffer.get_frame_with_future(10, 4)
with pytest.raises(VelException):
buffer.get_frame_with_future(11, 4)
with pytest.raises(VelException):
buffer.get_frame_with_future(12, 4)
nt.assert_array_equal(buffer.get_frame_with_future(13, 4)[1].max(0).max(0), np.array([12, 13, 14, 15]))
nt.assert_array_equal(buffer.get_frame_with_future(19, 4)[1].max(0).max(0), np.array([18, 19, 20, 21]))
def test_buffer_filling_size():
""" Check if buffer size is properly updated when we add items """
observation_space = gym.spaces.Box(low=0, high=255, shape=(2, 2, 1), dtype=np.uint8)
action_space = gym.spaces.Discrete(4)
buffer = CircularBufferBackend(20, observation_space, action_space)
v1 = np.ones(4).reshape((2, 2, 1))
assert buffer.current_size == 0
buffer.store_transition(v1, 0, 0, False)
buffer.store_transition(v1, 0, 0, False)
assert buffer.current_size == 2
for i in range(30):
buffer.store_transition(v1 * (i+1), 0, float(i)/2, False)
assert buffer.current_size == buffer.buffer_capacity
def test_get_frame_with_dones():
""" Check if get_frame works properly in case there are multiple sequences in buffer """
buffer = get_filled_buffer_with_dones()
nt.assert_array_equal(buffer.get_frame(0, 4).max(0).max(0), np.array([0, 0, 20, 21]))
nt.assert_array_equal(buffer.get_frame(1, 4).max(0).max(0), np.array([0, 20, 21, 22]))
nt.assert_array_equal(buffer.get_frame(2, 4).max(0).max(0), np.array([20, 21, 22, 23]))
nt.assert_array_equal(buffer.get_frame(3, 4).max(0).max(0), np.array([0, 0, 0, 24]))
nt.assert_array_equal(buffer.get_frame(8, 4).max(0).max(0), np.array([26, 27, 28, 29]))
nt.assert_array_equal(buffer.get_frame(9, 4).max(0).max(0), np.array([0, 0, 0, 30]))
with pytest.raises(VelException):
buffer.get_frame(10, 4)
nt.assert_array_equal(buffer.get_frame(11, 4).max(0).max(0), np.array([0, 0, 0, 12]))
nt.assert_array_equal(buffer.get_frame(12, 4).max(0).max(0), np.array([0, 0, 12, 13]))
def test_get_frame_future_with_dones():
""" Check if get_frame_with_future works properly in case there are multiple sequences in buffer """
buffer = get_filled_buffer_with_dones()
nt.assert_array_equal(buffer.get_frame_with_future(0, 4)[1].max(0).max(0), np.array([0, 20, 21, 22]))
nt.assert_array_equal(buffer.get_frame_with_future(1, 4)[1].max(0).max(0), np.array([20, 21, 22, 23]))
nt.assert_array_equal(buffer.get_frame_with_future(2, 4)[1].max(0).max(0), np.array([21, 22, 23, 0]))
nt.assert_array_equal(buffer.get_frame_with_future(3, 4)[1].max(0).max(0), np.array([0, 0, 24, 25]))
nt.assert_array_equal(buffer.get_frame_with_future(8, 4)[1].max(0).max(0), np.array([27, 28, 29, 0]))
with pytest.raises(VelException):
buffer.get_frame_with_future(9, 4)
with pytest.raises(VelException):
buffer.get_frame_with_future(10, 4)
nt.assert_array_equal(buffer.get_frame_with_future(11, 4)[1].max(0).max(0), np.array([0, 0, 12, 13]))
nt.assert_array_equal(buffer.get_frame_with_future(12, 4)[1].max(0).max(0), np.array([0, 12, 13, 14]))
def test_get_batch():
""" Check if get_batch works properly for buffers """
buffer = get_filled_buffer_with_dones()
batch = buffer.get_transitions(np.array([0, 1, 2, 3, 4, 5, 6, 7, 8]), history_length=4)
obs = batch['observations']
act = batch['actions']
rew = batch['rewards']
obs_tp1 = batch['observations_next']
dones = batch['dones']
nt.assert_array_equal(dones, np.array([False, False, True, False, False, False, False, False, True]))
nt.assert_array_equal(obs.max(1).max(1), np.array([
[0, 0, 20, 21],
[0, 20, 21, 22],
[20, 21, 22, 23],
[0, 0, 0, 24],
[0, 0, 24, 25],
[0, 24, 25, 26],
[24, 25, 26, 27],
[25, 26, 27, 28],
[26, 27, 28, 29],
]))
nt.assert_array_equal(act, np.array([0, 0, 0, 0, 0, 0, 0, 0, 0]))
nt.assert_array_equal(rew, np.array([10.0, 10.5, 11.0, 11.5, 12.0, 12.5, 13.0, 13.5, 14.0]))
nt.assert_array_equal(obs_tp1.max(1).max(1), np.array([
[0, 20, 21, 22],
[20, 21, 22, 23],
[21, 22, 23, 0],
[0, 0, 24, 25],
[0, 24, 25, 26],
[24, 25, 26, 27],
[25, 26, 27, 28],
[26, 27, 28, 29],
[27, 28, 29, 0],
]))
with pytest.raises(VelException):
buffer.get_transitions(np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), history_length=4)
def test_sample_and_get_batch():
""" Check if batch sampling works properly """
buffer = get_filled_buffer_with_dones()
for i in range(100):
indexes = buffer.sample_batch_transitions(batch_size=5, history_length=4)
batch = buffer.get_transitions(indexes, history_length=4)
obs = batch['observations']
act = batch['actions']
rew = batch['rewards']
obs_tp1 = batch['observations_next']
dones = batch['dones']
assert obs.shape[0] == 5
assert act.shape[0] == 5
assert rew.shape[0] == 5
assert obs_tp1.shape[0] == 5
assert dones.shape[0] == 5
def test_storing_extra_info():
""" Make sure additional information are stored and recovered properly """
buffer = get_filled_buffer_extra_info()
batch = buffer.get_transitions(np.array([0, 1, 2, 17, 18, 19]), history_length=4)
nt.assert_equal(batch['neglogp'][0], 20.0/30)
nt.assert_equal(batch['neglogp'][1], 21.0/30)
nt.assert_equal(batch['neglogp'][2], 22.0/30)
nt.assert_equal(batch['neglogp'][3], 17.0/30)
nt.assert_equal(batch['neglogp'][4], 18.0/30)
nt.assert_equal(batch['neglogp'][5], 19.0/30)
def test_sample_rollout_half_filled():
""" Test if sampling rollout is correct and returns proper results """
buffer = get_half_filled_buffer()
indexes = []
for i in range(1000):
rollout_idx = buffer.sample_batch_trajectories(rollout_length=5, history_length=4)
rollout = buffer.get_trajectories(index=rollout_idx, rollout_length=5, history_length=4)
assert rollout['observations'].shape[0] == 5 # Rollout length
assert rollout['observations'].shape[-1] == 4 # History length
indexes.append(rollout_idx)
assert np.min(indexes) == 4
assert np.max(indexes) == 8
with pytest.raises(VelException):
buffer.sample_batch_trajectories(rollout_length=10, history_length=4)
rollout_idx = buffer.sample_batch_trajectories(rollout_length=9, history_length=4)
rollout = buffer.get_trajectories(index=rollout_idx, rollout_length=9, history_length=4)
assert rollout_idx == 8
nt.assert_array_equal(rollout['rewards'], np.array([
0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4.
]))
def test_sample_rollout_filled():
""" Test if sampling rollout is correct and returns proper results """
buffer = get_filled_buffer()
indexes = []
for i in range(1000):
rollout_idx = buffer.sample_batch_trajectories(rollout_length=5, history_length=4)
rollout = buffer.get_trajectories(index=rollout_idx, rollout_length=5, history_length=4)
assert rollout['observations'].shape[0] == 5 # Rollout length
assert rollout['observations'].shape[-1] == 4 # History length
indexes.append(rollout_idx)
assert np.min(indexes) == 0
assert np.max(indexes) == 19
with pytest.raises(VelException):
buffer.sample_batch_trajectories(rollout_length=17, history_length=4)
max_rollout = buffer.sample_batch_trajectories(rollout_length=16, history_length=4)
rollout = buffer.get_trajectories(max_rollout, rollout_length=16, history_length=4)
assert max_rollout == 8
assert np.sum(rollout['rewards']) == pytest.approx(164.0, 1e-5)
def test_buffer_flexible_obs_action_sizes():
b1x1 = get_filled_buffer1x1()
b2x2 = get_filled_buffer2x2()
b3x3 = get_filled_buffer3x3()
nt.assert_array_almost_equal(b1x1.get_frame(0), np.array([21, 210]))
nt.assert_array_almost_equal(b2x2.get_frame(0), np.array([[21, 21], [210, 210]]))
nt.assert_array_almost_equal(b3x3.get_frame(0), np.array([[[21, 21], [21, 21]], [[210, 210], [210, 210]]]))
nt.assert_array_almost_equal(b1x1.get_transition(0, 0)['actions'], np.array([0, 20]))
nt.assert_array_almost_equal(b2x2.get_transition(0, 0)['actions'], np.array([[0, 20], [40, 60]]))
nt.assert_array_almost_equal(b3x3.get_transition(0, 0)['actions'], np.array(
[[[0, 20], [40, 60]],
[[80, 100], [120, 140]]]
))
with pytest.raises(AssertionError):
b1x1.get_frame(0, history_length=2)
with pytest.raises(AssertionError):
b1x1.get_transition(0, history_length=2)
with pytest.raises(AssertionError):
b2x2.get_frame(0, history_length=2)
with pytest.raises(AssertionError):
b2x2.get_transition(0, history_length=2)
with pytest.raises(AssertionError):
b3x3.get_frame(0, history_length=2)
with pytest.raises(AssertionError):
b3x3.get_transition(0, history_length=2)
def test_buffer_flexible_obs_action_sizes_with_history():
b1x1 = get_filled_buffer1x1_history()
b2x2 = get_filled_buffer2x2_history()
b3x3 = get_filled_buffer3x3_history()
nt.assert_array_almost_equal(b1x1.get_frame(0, history_length=2), np.array([[20, 21], [200, 210]]))
nt.assert_array_almost_equal(b2x2.get_frame(0, history_length=2), np.array(
[[[20, 21], [20, 21]], [[200, 210], [200, 210]]]
))
nt.assert_array_almost_equal(b3x3.get_frame(0, history_length=2), np.array(
[[[[20, 21], [20, 21]], [[20, 21], [20, 21]]], [[[200, 210], [200, 210]], [[200, 210], [200, 210]]]]
))
nt.assert_array_almost_equal(
b1x1.get_transition(0, history_length=2)['observations_next'], np.array([[21, 22], [210, 220]])
)
nt.assert_array_almost_equal(b2x2.get_transition(0, history_length=2)['observations_next'], np.array(
[[[21, 22], [21, 22]], [[210, 220], [210, 220]]]
))
nt.assert_array_almost_equal(b3x3.get_transition(0, history_length=2)['observations_next'], np.array(
[[[[21, 22], [21, 22]], [[21, 22], [21, 22]]],
[[[210, 220], [210, 220]], [[210, 220], [210, 220]]]]
))
| 2,422 | 0 | 46 |
8e2b0b06e771b67acac4db18531307d55d685e93 | 731 | py | Python | tests/ledsolid.py | eddiecarbin/riverapp | 4971ace1113021c3d4f3f0eee6db6de2c5f2f26f | [
"MIT"
] | null | null | null | tests/ledsolid.py | eddiecarbin/riverapp | 4971ace1113021c3d4f3f0eee6db6de2c5f2f26f | [
"MIT"
] | null | null | null | tests/ledsolid.py | eddiecarbin/riverapp | 4971ace1113021c3d4f3f0eee6db6de2c5f2f26f | [
"MIT"
] | null | null | null | import adafruit_fancyled.adafruit_fancyled as fancy
import board
import adafruit_dotstar
import time
num_leds = 1085
spread = 5
# Declare a NeoPixel object on pin D6 with num_leds pixels, no auto-write.
# Set brightness to max because we'll be using FancyLED's brightness control.
pixels = adafruit_dotstar.DotStar(board.SCK, board.MOSI, num_leds, brightness=1.0,
auto_write=False)
offset = 0 # Positional offset into color palette to get it to 'spin'
blue = fancy.CRGB(0.0, 0.0, 1.0) # Blue
red = fancy.CRGB(1.0, 0.0, 1.0) # Pink
yellow = fancy.CRGB(1.0, 1.0, 0.0) # Yellow
pixels.fill((0.0, 0.0, 1.0) )
pixels.show()
time.sleep(3)
# pixels.
while True:
pass
#pixels.show()
| 22.84375 | 82 | 0.682627 | import adafruit_fancyled.adafruit_fancyled as fancy
import board
import adafruit_dotstar
import time
num_leds = 1085
spread = 5
# Declare a NeoPixel object on pin D6 with num_leds pixels, no auto-write.
# Set brightness to max because we'll be using FancyLED's brightness control.
pixels = adafruit_dotstar.DotStar(board.SCK, board.MOSI, num_leds, brightness=1.0,
auto_write=False)
offset = 0 # Positional offset into color palette to get it to 'spin'
blue = fancy.CRGB(0.0, 0.0, 1.0) # Blue
red = fancy.CRGB(1.0, 0.0, 1.0) # Pink
yellow = fancy.CRGB(1.0, 1.0, 0.0) # Yellow
pixels.fill((0.0, 0.0, 1.0) )
pixels.show()
time.sleep(3)
# pixels.
while True:
pass
#pixels.show()
| 0 | 0 | 0 |
9c9ecdb788d6aab8e3681809ec53711d47a2b508 | 696 | py | Python | Python3/2163.py | Di-Ca-N/URI-Online-Judge | 160797b534fe8c70e719b1ea41690157dbdbb52e | [
"MIT"
] | null | null | null | Python3/2163.py | Di-Ca-N/URI-Online-Judge | 160797b534fe8c70e719b1ea41690157dbdbb52e | [
"MIT"
] | null | null | null | Python3/2163.py | Di-Ca-N/URI-Online-Judge | 160797b534fe8c70e719b1ea41690157dbdbb52e | [
"MIT"
] | null | null | null | l, c = [int(x) for x in input().split()]
matriz = [[0 for _ in range(c)] for _ in range(l)]
possible_sabers = []
for i in range(l):
for j, v in enumerate(input().split()):
v = int(v)
matriz[i][j] = v
if v == 42:
possible_sabers.append((i, j))
final = (0, 0)
pattern = [
(-1 , -1), (-1, 0), (-1, 1),
(0, -1), (0, 1),
(1, -1), (1, 0), (1, 1),
]
for a, b in possible_sabers:
for x, y in pattern:
adx = a + x
ady = b + y
if 0 <= adx < l and 0 <= ady < c:
if matriz[adx][ady] != 7:
break
else:
break
else:
final = a + 1, b + 1
print(*final)
| 19.333333 | 50 | 0.418103 | l, c = [int(x) for x in input().split()]
matriz = [[0 for _ in range(c)] for _ in range(l)]
possible_sabers = []
for i in range(l):
for j, v in enumerate(input().split()):
v = int(v)
matriz[i][j] = v
if v == 42:
possible_sabers.append((i, j))
final = (0, 0)
pattern = [
(-1 , -1), (-1, 0), (-1, 1),
(0, -1), (0, 1),
(1, -1), (1, 0), (1, 1),
]
for a, b in possible_sabers:
for x, y in pattern:
adx = a + x
ady = b + y
if 0 <= adx < l and 0 <= ady < c:
if matriz[adx][ady] != 7:
break
else:
break
else:
final = a + 1, b + 1
print(*final)
| 0 | 0 | 0 |
5ae007e1e9f393e3b87d6d6d37f79e5a2c9e6e21 | 870 | py | Python | python/121-130/Word Ladder II.py | KaiyuWei/leetcode | fd61f5df60cfc7086f7e85774704bacacb4aaa5c | [
"MIT"
] | 150 | 2015-04-04T06:53:49.000Z | 2022-03-21T13:32:08.000Z | python/121-130/Word Ladder II.py | yizhu1012/leetcode | d6fa443a8517956f1fcc149c8c4f42c0ad93a4a7 | [
"MIT"
] | 1 | 2015-04-13T15:15:40.000Z | 2015-04-21T20:23:16.000Z | python/121-130/Word Ladder II.py | yizhu1012/leetcode | d6fa443a8517956f1fcc149c8c4f42c0ad93a4a7 | [
"MIT"
] | 64 | 2015-06-30T08:00:07.000Z | 2022-01-01T16:44:14.000Z | # @param start, a string
# @param end, a string
# @param dict, a set of string
# @return a list of lists of string | 39.545455 | 61 | 0.51954 | class Solution:
# @param start, a string
# @param end, a string
# @param dict, a set of string
# @return a list of lists of string
def findLadders(self, start, end, dict):
level = {start}
parents = collections.defaultdict(set)
while level and end not in parents:
next_level = collections.defaultdict(set)
for node in level:
for char in string.ascii_lowercase:
for i in range(len(start)):
n = node[:i]+char+node[i+1:]
if n in dict and n not in parents:
next_level[n].add(node)
level = next_level
parents.update(next_level)
res = [[end]]
while res and res[0][0] != start:
res = [[p]+r for r in res for p in parents[r[0]]]
return res | 698 | -6 | 48 |
db3868f6d228aef06bb456c448e5b41b0532474d | 1,867 | py | Python | geom2d/open_interval.py | Seeker1911/Mechanics | 59e7c9e06afd07ffa0a9f1bfde49afde74493d5b | [
"MIT"
] | null | null | null | geom2d/open_interval.py | Seeker1911/Mechanics | 59e7c9e06afd07ffa0a9f1bfde49afde74493d5b | [
"MIT"
] | null | null | null | geom2d/open_interval.py | Seeker1911/Mechanics | 59e7c9e06afd07ffa0a9f1bfde49afde74493d5b | [
"MIT"
] | null | null | null | from .nums import are_close_enough
class OpenInterval:
"""
An open interval is one where both ends aren't included.
For example, the range (2, 7) includes every number between
its two ends, 2 and 7, but the ends are excluded.
"""
@property
def length(self):
"""
Length of the interval: end - start.
:return: length
"""
return self.end - self.start
def contains(self, value: float):
"""
Tests whether this interval contains a given value or not.
:param value: `float` number
:return: is the value contained in the interval?
"""
return self.start < value < self.end
def overlaps_interval(self, other):
"""
Tests whether this and other interval overlap.
:param other: `OpenInterval`
:return: `bool` do intervals overlap?
"""
if are_close_enough(self.start, other.start) and \
are_close_enough(self.end, other.end):
return True
return self.contains(other.start) \
or self.contains(other.end) \
or other.contains(self.start) \
or other.contains(self.end)
def compute_overlap_with(self, other):
"""
Given two overlapping ranges, computes the range of their
overlap.
If the ranges don't overlap, `None` is returned.
:param other: `OpenRange`
:return: ranges overlap
"""
if not self.overlaps_interval(other):
return None
return OpenInterval(
max(self.start, other.start),
min(self.end, other.end)
)
| 27.455882 | 66 | 0.577933 | from .nums import are_close_enough
class OpenInterval:
"""
An open interval is one where both ends aren't included.
For example, the range (2, 7) includes every number between
its two ends, 2 and 7, but the ends are excluded.
"""
def __init__(self, start: float, end: float):
if start > end:
raise ValueError('start should be smaller than end')
self.start = start
self.end = end
@property
def length(self):
"""
Length of the interval: end - start.
:return: length
"""
return self.end - self.start
def contains(self, value: float):
"""
Tests whether this interval contains a given value or not.
:param value: `float` number
:return: is the value contained in the interval?
"""
return self.start < value < self.end
def overlaps_interval(self, other):
"""
Tests whether this and other interval overlap.
:param other: `OpenInterval`
:return: `bool` do intervals overlap?
"""
if are_close_enough(self.start, other.start) and \
are_close_enough(self.end, other.end):
return True
return self.contains(other.start) \
or self.contains(other.end) \
or other.contains(self.start) \
or other.contains(self.end)
def compute_overlap_with(self, other):
"""
Given two overlapping ranges, computes the range of their
overlap.
If the ranges don't overlap, `None` is returned.
:param other: `OpenRange`
:return: ranges overlap
"""
if not self.overlaps_interval(other):
return None
return OpenInterval(
max(self.start, other.start),
min(self.end, other.end)
)
| 163 | 0 | 27 |
7a6de29b85379c597024e69a5ca7c498d689a942 | 112 | py | Python | src/Python/Utilities/VTKVersion.py | ajpmaclean/vtk-examples | 1a55fc8c6af67a3c07791807c7d1ec0ab97607a2 | [
"Apache-2.0"
] | 81 | 2020-08-10T01:44:30.000Z | 2022-03-23T06:46:36.000Z | src/Python/Utilities/VTKVersion.py | ajpmaclean/vtk-examples | 1a55fc8c6af67a3c07791807c7d1ec0ab97607a2 | [
"Apache-2.0"
] | 2 | 2020-09-12T17:33:52.000Z | 2021-04-15T17:33:09.000Z | src/Python/Utilities/VTKVersion.py | ajpmaclean/vtk-examples | 1a55fc8c6af67a3c07791807c7d1ec0ab97607a2 | [
"Apache-2.0"
] | 27 | 2020-08-17T07:09:30.000Z | 2022-02-15T03:44:58.000Z | #!/usr/bin/env python
from vtkmodules.vtkCommonCore import vtkVersion
print(vtkVersion.GetVTKSourceVersion())
| 18.666667 | 47 | 0.821429 | #!/usr/bin/env python
from vtkmodules.vtkCommonCore import vtkVersion
print(vtkVersion.GetVTKSourceVersion())
| 0 | 0 | 0 |
425d34508bda8b701517672d8013d51c79b3ba4c | 348 | py | Python | pyswarms/backend/__init__.py | fluencer/pyswarms | 52d69e48b64055638e67297536cd2d654ba073d6 | [
"MIT"
] | 1 | 2019-03-07T06:41:43.000Z | 2019-03-07T06:41:43.000Z | pyswarms/backend/__init__.py | fluencer/pyswarms | 52d69e48b64055638e67297536cd2d654ba073d6 | [
"MIT"
] | null | null | null | pyswarms/backend/__init__.py | fluencer/pyswarms | 52d69e48b64055638e67297536cd2d654ba073d6 | [
"MIT"
] | null | null | null | """
The :code:`pyswarms.backend` module abstracts various operations
for swarm optimization: generating boundaries, updating positions, etc.
You can use the methods implemented here to build your own PSO implementations.
"""
from .generators import *
from .operators import *
from .swarms import *
__all__ = ["generators", "operators", "swarms"]
| 29 | 79 | 0.767241 | """
The :code:`pyswarms.backend` module abstracts various operations
for swarm optimization: generating boundaries, updating positions, etc.
You can use the methods implemented here to build your own PSO implementations.
"""
from .generators import *
from .operators import *
from .swarms import *
__all__ = ["generators", "operators", "swarms"]
| 0 | 0 | 0 |
b43fb477ca4ff9ec7cdf6cf7cfad1e288cfc796b | 4,170 | py | Python | deeprobust/image/preprocessing/APE-GAN.py | shixiongjing/DeepRobust | 20328ffef0330726437c56a04514a7f8a5296e53 | [
"MIT"
] | 647 | 2020-02-08T02:13:21.000Z | 2022-03-31T07:44:00.000Z | deeprobust/image/preprocessing/APE-GAN.py | shixiongjing/DeepRobust | 20328ffef0330726437c56a04514a7f8a5296e53 | [
"MIT"
] | 77 | 2020-03-21T11:27:30.000Z | 2022-03-23T10:55:53.000Z | deeprobust/image/preprocessing/APE-GAN.py | shixiongjing/DeepRobust | 20328ffef0330726437c56a04514a7f8a5296e53 | [
"MIT"
] | 139 | 2020-03-04T00:25:12.000Z | 2022-03-21T15:45:29.000Z | import os
import argparse
import torch
import torch.nn
from torch.utils.data import TensorDataset
import torch.backends.cudnn as cudnn
if __name__ == "__main__":
get_args()
main(args)
| 32.578125 | 100 | 0.583453 | import os
import argparse
import torch
import torch.nn
from torch.utils.data import TensorDataset
import torch.backends.cudnn as cudnn
class Generator(nn.Module):
def __init__(self, in_ch):
super(Generator, self).__init__()
self.conv1 = nn.Conv2d(in_ch, 64, 4, stride=2, padding=1)
self.bn1 = nn.BatchNorm2d(64)
self.conv2 = nn.Conv2d(64, 128, 4, stride=2, padding=1)
self.bn2 = nn.BatchNorm2d(128)
self.deconv3 = nn.ConvTranspose2d(128, 64, 4, stride=2, padding=1)
self.bn3 = nn.BatchNorm2d(64)
self.deconv4 = nn.ConvTranspose2d(64, in_ch, 4, stride=2, padding=1)
def forward(self, x):
h = F.leaky_relu(self.bn1(self.conv1(x)))
h = F.leaky_relu(self.bn2(self.conv2(h)))
h = F.leaky_relu(self.bn3(self.deconv3(h)))
h = F.tanh(self.deconv4(h))
return h
class Discriminator(nn.Module):
def __init__(self, in_ch):
super(Discriminator, self).__init__()
self.conv1 = nn.Conv2d(in_ch, 64, 3, stride=2)
self.conv2 = nn.Conv2d(64, 128, 3, stride=2)
self.bn2 = nn.BatchNorm2d(128)
self.conv3 = nn.Conv2d(128, 256, 3, stride=2)
self.bn3 = nn.BatchNorm2d(256)
if in_ch == 1:
self.fc4 = nn.Linear(1024, 1)
else:
self.fc4 = nn.Linear(2304, 1)
def forward(self, x):
h = F.leaky_relu(self.conv1(x))
h = F.leaky_relu(self.bn2(self.conv2(h)))
h = F.leaky_relu(self.bn3(self.conv3(h)))
h = F.sigmoid(self.fc4(h.view(h.size(0), -1)))
return h
def main(args):
#Initialize GAN model
G = Generator(in_ch = C).cuda()
D = Discriminator(in_ch = C).cuda()
#Initialize Generator
opt_G = torch.optim.Adam(G.parameters(), lr=lr, betas=(0.5, 0.999))
opt_D = torch.optim.Adam(D.parameters(), lr=lr, betas=(0.5, 0.999))
loss_bce = nn.BCELoss()
loss_mse = nn.MSELoss()
cudnn.benchmark = True
#Initialize DataLoader
train_data = torch.load("./adv_data.tar")
train_data = TensorDataset(train_data["normal"], train_data["adv"])
train_loader = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=True)
#Start Training
for i in range(args.epochs):
G.eval()
x_fake = G(x_adv_temp).data
G.train()
gen_loss, dis_loss, n = 0, 0, 0
for x, x_adv in train_loader:
current_size = x.size(0)
x, x_adv = x.cuda(), x_adv.cuda()
#Train Discriminator
t_real = torch.ones(current_size).cuda()
t_fake = torch.zeros(current_size).cuda()
y_real = D(x).squeeze()
x_fake = G(x_adv)
y_fake = D(x_fake).squeeze()
loss_D = loss_bce(y_real, t_real) + loss_bce(y_fake, t_fake)
opt_D.zero_grad()
loss_D.backward()
opt_D.step()
# Train G
for _ in range(2):
x_fake = G(x_adv)
y_fake = D(x_fake).squeeze()
loss_G = args.alpha * loss_mse(x_fake, x) + args.beta * loss_bce(y_fake, t_real)
opt_G.zero_grad()
loss_G.backward()
opt_G.step()
gen_loss += loss_D.data[0] * x.size(0)
dis_loss += loss_G.data[0] * x.size(0)
n += x.size(0)
print("epoch:{}, LossG:{:.3f}, LossD:{:.3f}".format(i, gen_loss / n, dis_loss / n))
torch.save({"generator": G.state_dict(), "discriminator": D.state_dict()},
os.path.join(args.checkpoint, "{}.tar".format(i + 1)))
G.eval()
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--data", type=str, default="mnist")
parser.add_argument("--lr", type=float, default=0.0002)
parser.add_argument("--epochs", type=int, default=2)
parser.add_argument("--alpha", type=float, default=0.7)
parser.add_argument("--beta", type=float, default=0.3)
parser.add_argument("--checkpoint", type=str, default="./checkpoint/test")
args = parser.parse_args()
return args
if __name__ == "__main__":
get_args()
main(args)
| 3,757 | 16 | 200 |
8bd28e53f6fd361bbb2c90baea93e5f56ae137e1 | 20,293 | py | Python | clickhouse_integration.py | millecodex/SEM | 39aedb8934fdf3c84d63d0cad444229979433a7a | [
"MIT"
] | 2 | 2022-01-06T01:53:14.000Z | 2022-01-06T06:29:35.000Z | clickhouse_integration.py | millecodex/SEM | 39aedb8934fdf3c84d63d0cad444229979433a7a | [
"MIT"
] | null | null | null | clickhouse_integration.py | millecodex/SEM | 39aedb8934fdf3c84d63d0cad444229979433a7a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
'''
# W O R K F L O W #
1. download github data in native clickhouse format (74.6 gb, ~10hours to download)
2. clickhouse server must be running
see: https://clickhouse.tech/docs/en/getting-started/install/
>sudo service clickhouse-server start (may need sudo -u japple)
>clickhouse-client
# Insert the database into clickhouse
3. create the db tables:
>CREATE TABLE github_events ...
see https://github-sql.github.io/explorer/#install-clickhouse
4. Insert the DB file into clickhouse <E:\Documents\Clickhouse Github data\github_events_v2.native.xz>
5. run code here to connect to clickhouse client and manipulate data
#
# Note the clickhouse driver (python) communicates with the clickhouse server via a native TCP/IP protocol
# that ships data as typed values; this will cause problems when INSERT-ing into a DB, however I don't see
# this as an issue
'''
# In[83]:
from sqlalchemy import create_engine
from clickhouse_driver import Client
# dependencies
# >ipython-sql
# install by command prompt:
# >conda install -yc conda-forge ipython-sql
client = Client('localhost')
# In[ ]:
# load CSV file into dataframe
# get test dataframe with different repos
# loop through dataframe
# pull repo
# build query
# run query
# write to dataframe
# In[ ]:
import pandas as pd
# not yet needed here
import time
import math
# In[ ]:
# Read CSV file into DataFrame df
# 200_repos_ready.csv has no index, CMC_id is in first column
# NaN is assigned to empty cells
dfs = pd.read_csv('200_repos.csv', index_col=0)
# In[ ]:
df = dfs[['repo','forge']].copy()
# In[ ]:
# subset dataframes for testing
# use .copy() as slicing will not allow for assignment
df10 = df.iloc[:10].copy()
df33 = df.iloc[:33].copy()
# In[ ]:
query_stars_L = '''
SELECT
count()
FROM github_events
WHERE event_type = 'WatchEvent'
AND repo_name ='''
query_stars_R = '''
GROUP BY action
'''
repo = '''
'HuobiGroup/huobi-eco-chain'
'''
# In[203]:
query_test_noStars = '''
SELECT
count()
FROM github_events
WHERE event_type = 'WatchEvent'
AND repo_name =
'millecodex/SEM'
GROUP BY action
'''
# In[ ]:
query2 = '''
SELECT
count()
FROM github_events
WHERE event_type = 'WatchEvent'
AND repo_name =
'HuobiGroup/huobi-eco-chain'
GROUP BY action
'''
# In[206]:
res=client.execute(query_test_noStars)
if not res: print('not')
# In[ ]:
# test query that returns empty list (no results)
if not res2: print('not')
# In[ ]:
# Write a function for this
#
# initialize new column to null/None
df['stars']=None
# iterate the dataframe as follows:
'''
loop through dataframe
pull repo
build query
run query
update dataframe
'''
for row in df.itertuples():
# only github for now as client is connected to github_events DB
if row.forge == 'github':
stars = 0
repo = row.repo
# skip the NaN repos
if type(repo) == str:
query = query_stars_L + '\''+repo+'\'' + query_stars_R
stars = client.execute(query)
# query returns a tuple of list elements accessible by [first list][first item]
# no stars returns an empty list
if not stars:
df.at[row.Index, 'stars'] = 0
else: df.at[row.Index, 'stars'] = stars[0][0]
# In[ ]:
# write update to 200_copy_stars.csv
# note beginning of script: pd.read_csv('200_repos_ready.csv', index_col=0)
df.to_csv('200_stars.csv', encoding='utf-8', index=1)
df
# In[ ]:
# Read in 200_repos.csv
# has no index, CMC_id is in first column
dfr = pd.read_csv('200_repos.csv', index_col=0)
# new df with only 2 columns
# 'CMC_id' as index is maintained
df = dfr[['repo','forge']].copy()
# In[ ]:
query_forks_L = '''
SELECT
count() AS forks
FROM github_events
WHERE event_type = 'ForkEvent' AND repo_name =
'''
query_forks_R = '''
'curvefi/curve-dao-contracts/tree/master/doc'
'''
query_forks = query_forks_L + query_forks_R
query_forks
# In[ ]:
result=client.execute(query_forks)
print(result)
# In[ ]:
# Write a function for this
#
# initialize new column to null/None
# might not be necessary
df['forks']=None
# iterate the dataframe as follows:
'''
loop through dataframe
pull repo
build query
run query
update dataframe
'''
for row in df.itertuples():
# only github for now as client is connected to github_events DB
if row.forge == 'github':
forks = 0
repo = row.repo
# skip the NaN repos
if type(repo) == str:
query = query_forks_L + '\''+repo+'\''
forks = client.execute(query)
# query returns a tuple of list elements accessible by [first list][first item]
# no forks returns an empty list
if not forks:
df.at[row.Index, 'forks'] = 0
else: df.at[row.Index, 'forks'] = forks[0][0]
# In[ ]:
# write update to 200_forks.csv
df.to_csv('200_forks.csv', encoding='utf-8', index=1)
df
# In[ ]:
# merge two csv files into one
# 1. 200_stars.csv
# 2. 200_forks.csv
#
# might prefer to append the new column? merge seems a bit cumbersome?
#
# has no index, CMC_id is in first column
dfs = pd.read_csv('200_stars.csv', index_col=0)
#dfsm = dfs[['stars']].copy()
dff = pd.read_csv('200_forks.csv', index_col=0)
#dffm = dff[['forks']].copy()
#
# In[ ]:
# 'CMC_id' is the key, however 'repo', and 'forge' are also merged
# to prevent duplicate columns
# -> might be uncecessary?
dfm = pd.merge(dfs,dff,on=['CMC_id','repo','forge'])
# In[ ]:
# write update to 200_merged.csv
dfm.to_csv('200_merged.csv', encoding='utf-8', index=1)
# In[91]:
# AUTHORS query:
# A most-recent three-month average
# excluding current month because it is in progress
# modify for static clickhouse data which stops at 2020-12-07
# >>created_at >= dateSub(MONTH, 6,toStartOfMonth(now())) AND
# >>created_at < dateSub(MONTH, 3,toStartOfMonth(now()))
#
QUERY_AUTHORS = '''
SELECT
ROUND( SUM(authors) / COUNT(month), 2) AS average
FROM
(
SELECT
uniq(actor_login) AS authors,
toMonth(created_at) AS month,
toYear(created_at) AS year
FROM github_events
WHERE event_type IN ('PullRequestEvent', 'IssuesEvent', 'IssueCommentEvent', 'PullRequestReviewCommentEvent') AND
repo_name = 'bitcoin/bitcoin' AND
created_at >= dateSub(MONTH, 3,toStartOfMonth(now())) AND
created_at < toStartOfMonth(now())
GROUP BY month, year
ORDER BY year DESC, month DESC
)'''
query_authors_L = '''
SELECT
ROUND( SUM(authors) / COUNT(month), 2) AS average
FROM
(
SELECT
uniq(actor_login) AS authors,
toMonth(created_at) AS month,
toYear(created_at) AS year
FROM github_events
WHERE event_type IN ('PullRequestEvent', 'IssuesEvent', 'IssueCommentEvent', 'PullRequestReviewCommentEvent') AND
repo_name =
'''
q_repo='bitcoin/bitcoin'
query_authors_R = '''AND
/*created_at >= dateSub(MONTH, 3,toStartOfMonth(now())) AND
created_at < toStartOfMonth(now())*/
created_at >= dateSub(MONTH, 6,toStartOfMonth(now())) AND
created_at < dateSub(MONTH, 3,toStartOfMonth(now()))
GROUP BY month, year
ORDER BY year DESC, month DESC
)'''
query_authors=query_authors_L + '\'' + q_repo + '\'' + query_authors_R
# In[99]:
# Read in 200_repos.csv
dfr = pd.read_csv('200_repos.csv', index_col=0)
# new df with only 2 columns
# 'CMC_id' as index is maintained
df = dfr[['repo','forge']].copy()
#dfs = df[0:20].copy()
# In[92]:
res=client.execute(QUERY_AUTHORS)
res
# In[ ]:
print(QUERY_AUTHORS)
# In[93]:
print(query_authors)
# In[101]:
for row in df.itertuples():
# only github for now as client is connected to github_events DB
if row.forge == 'github':
#forks = 0
repo = row.repo
# skip the NaN repos
if type(repo) == str:
query = query_authors_L + '\'' + repo + '\'' + query_authors_R
authors = client.execute(query)
# query returns a tuple of list elements accessible by [first list][first item]
# average of no authors returns a nan
if math.isnan(result[0][0]):
df.at[row.Index, 'authors'] = 0
else: df.at[row.Index, 'authors'] = authors[0][0]
# In[ ]:
# In[104]:
# write update to 200_authors.csv
df.to_csv('200_authors.csv', encoding='utf-8', index=1)
# In[105]:
# update MERGED sheet with new data
# 'CMC_id' is the key, however 'repo', and 'forge' are also merged
# to prevent duplicate columns
df_temp = pd.read_csv('200_merged.csv', index_col=0)
dfm = pd.merge(df_temp,df,on=['CMC_id','repo','forge'])
dfm.to_csv('200_merged.csv', encoding='utf-8', index=1)
# In[106]:
print(client.execute('SELECT created_at FROM github_events ORDER by created_at DESC LIMIT 10'))
# In[161]:
# COMMITS query:
# A most-recent three-month average
# excluding current month because it is in progress
#
# modify for static clickhouse data which stops at 2020-12-07:
# >>created_at >= dateSub(MONTH, 6,toStartOfMonth(now())) AND
# >>created_at < dateSub(MONTH, 3,toStartOfMonth(now()))
#
# note: there will be moderate timezone discrepancies, especially
# when calculating near the first of the month
#
QUERY_COMMITS = '''
SELECT ROUND( SUM(sum_push_distinct) / COUNT(month), 2) AS average
FROM
(
SELECT SUM(push_distinct_size) AS sum_push_distinct,
toMonth(created_at) AS month,
toYear(created_at) AS year
FROM github_events
WHERE repo_name = 'bitcoin/bitcoin' AND
event_type = 'PushEvent' AND
/*created_at >= dateSub(MONTH, 3,toStartOfMonth(now())) AND
created_at < toStartOfMonth(now())*/
created_at >= dateSub(MONTH, 7,toStartOfMonth(now())) AND
created_at < dateSub(MONTH, 4,toStartOfMonth(now()))
GROUP BY month, year
ORDER BY year DESC, month DESC
)
'''
query_commits_L ='''
SELECT ROUND( SUM(sum_push_distinct) / COUNT(month), 2) AS average
FROM
(
SELECT SUM(push_distinct_size) AS sum_push_distinct,
toMonth(created_at) AS month,
toYear(created_at) AS year
FROM github_events
WHERE repo_name =
'''
q_repo='bitcoin/bitcoin'
query_commits_R = '''
AND
event_type = 'PushEvent' AND
/*created_at >= dateSub(MONTH, 3,toStartOfMonth(now())) AND
created_at < toStartOfMonth(now())*/
created_at >= dateSub(MONTH, 7,toStartOfMonth(now())) AND
created_at < dateSub(MONTH, 4,toStartOfMonth(now()))
GROUP BY month, year
ORDER BY year DESC, month DESC
)
'''
query_commits=query_commits_L + '\'' + q_repo + '\'' + query_commits_R
# In[163]:
res=client.execute(query_commits)
res
# In[199]:
# Read in 200_repos.csv
dfr = pd.read_csv('200_repos.csv', index_col=0)
# new df with only 2 columns
# 'CMC_id' as index is maintained
df = dfr[['repo','forge']].copy()
# In[181]:
query_test_zero='''
SELECT ROUND( SUM(sum_push_distinct) / COUNT(month), 2) AS average
FROM
(
SELECT SUM(push_distinct_size) AS sum_push_distinct,
toMonth(created_at) AS month,
toYear(created_at) AS year
FROM github_events
WHERE repo_name = 'Uniswap/uniswap-v2-core' AND
event_type = 'PushEvent' AND
/*created_at >= dateSub(MONTH, 3,toStartOfMonth(now())) AND
created_at < toStartOfMonth(now())*/
created_at >= dateSub(MONTH, 6,toStartOfMonth(now())) AND
created_at < dateSub(MONTH, 3,toStartOfMonth(now()))
GROUP BY month, year
ORDER BY year DESC, month DESC
)'''
res=client.execute(query_test_zero)
res
# In[ ]:
import math
if math.isnan(res[0][0]): print('not')
else: print('dunno')
# In[200]:
for row in df.itertuples():
# only github for now as client is connected to github_events DB
if row.forge == 'github':
#forks = 0
repo = row.repo
# skip the NaN repos
if type(repo) == str:
query = query_commits_L + '\'' + repo + '\'' + query_commits_R
result = client.execute(query)
# query returns a tuple of list elements accessible by [first list][first item]
# average of no commits returns a nan
if math.isnan(result[0][0]):
df.at[row.Index, 'commits'] = 0
else: df.at[row.Index, 'commits'] = result[0][0]
# In[202]:
# write update to 200_commits.csv
df.to_csv('200_commits.csv', encoding='utf-8', index=1)
# In[168]:
# update MERGED sheet with new data
# 'CMC_id' is the key, however 'repo', and 'forge' are also merged
# to prevent duplicate columns
df_temp = pd.read_csv('200_merged.csv', index_col=0)
dfm = pd.merge(df_temp,df,on=['CMC_id','repo','forge'])
dfm.to_csv('200_merged.csv', encoding='utf-8', index=1)
# In[207]:
# total COMMENTS includes all commenting activity
# any comments counts as activity and increase engagement
# there are 3 event_type comment events:
# >CommitCommentEvent
# >IssueCommentEvent
# >CommitCommentEvent
#
'''
/* View distribution of comments*/
SELECT
uniq(comment_id) AS total_comments,
uniqIf(comment_id, event_type = 'PullRequestReviewCommentEvent') AS pr_comments,
uniqIf(comment_id, event_type = 'IssueCommentEvent') AS issue_comments,
uniqIf(comment_id, event_type = 'CommitCommentEvent') AS commit_comments,
toMonth(created_at) AS month,
toYear(created_at) AS year
FROM github_events
WHERE
repo_name = 'bitcoin/bitcoin' AND
toYear(created_at) >= 2020
GROUP BY month, year
ORDER BY year DESC, month DESC
'''
# only Sept/Oct/Nov 2020 #
QUERY_COMMENTS='''
SELECT ROUND( SUM(total) / COUNT(month), 2) AS average
FROM
(
SELECT
(
uniqIf(comment_id, event_type = 'PullRequestReviewCommentEvent')+
uniqIf(comment_id, event_type = 'IssueCommentEvent')+
uniqIf(comment_id, event_type = 'CommitCommentEvent') ) AS total,
toMonth(created_at) AS month,
toYear(created_at) AS year
FROM github_events
WHERE
repo_name = 'bitcoin/bitcoin' AND
/*created_at >= dateSub(MONTH, 3,toStartOfMonth(now())) AND
created_at < toStartOfMonth(now())*/
created_at >= dateSub(MONTH, 7,toStartOfMonth(now())) AND
created_at < dateSub(MONTH, 4,toStartOfMonth(now()))
GROUP BY month, year
ORDER BY year DESC, month DESC
)
'''
query_L='''
SELECT ROUND( SUM(total) / COUNT(month), 2) AS average
FROM
(
SELECT
(
uniqIf(comment_id, event_type = 'PullRequestReviewCommentEvent')+
uniqIf(comment_id, event_type = 'IssueCommentEvent')+
uniqIf(comment_id, event_type = 'CommitCommentEvent') ) AS total,
toMonth(created_at) AS month,
toYear(created_at) AS year
FROM github_events
WHERE
repo_name =
'''
query_R='''
AND
/*created_at >= dateSub(MONTH, 3,toStartOfMonth(now())) AND
created_at < toStartOfMonth(now())*/
created_at >= dateSub(MONTH, 7,toStartOfMonth(now())) AND
created_at < dateSub(MONTH, 4,toStartOfMonth(now()))
GROUP BY month, year
ORDER BY year DESC, month DESC
)
'''
# In[209]:
res=client.execute(QUERY_COMMENTS)
res
# In[212]:
#
query_L='''
SELECT ROUND( SUM(total) / COUNT(month), 2) AS average
FROM
(
SELECT
(
uniqIf(comment_id, event_type = 'PullRequestReviewCommentEvent')+
uniqIf(comment_id, event_type = 'IssueCommentEvent')+
uniqIf(comment_id, event_type = 'CommitCommentEvent') ) AS total,
toMonth(created_at) AS month,
toYear(created_at) AS year
FROM github_events
WHERE
repo_name =
'''
query_R='''
AND
/*created_at >= dateSub(MONTH, 3,toStartOfMonth(now())) AND
created_at < toStartOfMonth(now())*/
created_at >= dateSub(MONTH, 7,toStartOfMonth(now())) AND
created_at < dateSub(MONTH, 4,toStartOfMonth(now()))
GROUP BY month, year
ORDER BY year DESC, month DESC
)
'''
'''
@column name of the column to be added to the dataframe
@query_L
@query_R
@df dataframe
'''
# In[213]:
# Read in 200_repos.csv
dfr = pd.read_csv('200_repos.csv', index_col=0)
# new df with only 2 columns
# 'CMC_id' as index is maintained
df = dfr[['repo','forge']].copy()
# In[217]:
runQuery('comments',query_L,query_R,df)
# In[220]:
# update MERGED sheet with new data
# 'CMC_id' is the key, however 'repo', and 'forge' are also merged
# to prevent duplicate columns
df_temp = pd.read_csv('200_merged.csv', index_col=0)
dfm = pd.merge(df_temp,df,on=['CMC_id','repo','forge'])
dfm.to_csv('200_merged.csv', encoding='utf-8', index=1)
# In[224]:
# view all PR activity sorted into: opened, closed, reopened
'''
SELECT COUNT() AS total,
SUM(action = 'opened') AS opened,
SUM(action = 'closed') AS closed,
SUM(action = 'reopened') AS reopened,
toYear(created_at) AS year,
toMonth(created_at) AS month
FROM github_events
WHERE repo_name = 'bitcoin/bitcoin' AND
toYear(created_at) >= '2019' AND
event_type = 'PullRequestEvent'
GROUP BY month, year
ORDER BY year DESC, month DESC
'''
'''
SELECT
ROUND( SUM(opened) / COUNT(month), 2) AS average
FROM
(
SELECT
SUM(action = 'opened') AS opened,
toYear(created_at) AS year,
toMonth(created_at) AS month
FROM github_events
WHERE repo_name = 'bitcoin/bitcoin' AND
event_type = 'PullRequestEvent' AND
created_at >= dateSub(MONTH, 7,toStartOfMonth(now())) AND
created_at < dateSub(MONTH, 4,toStartOfMonth(now()))
GROUP BY month, year
ORDER BY year DESC, month DESC
)
'''
query_L='''
SELECT
ROUND( SUM(opened) / COUNT(month), 2) AS average
FROM
(
SELECT
SUM(action = 'opened') AS opened,
toYear(created_at) AS year,
toMonth(created_at) AS month
FROM github_events
WHERE repo_name =
'''
query_R='''
AND
event_type = 'PullRequestEvent' AND
created_at >= dateSub(MONTH, 7,toStartOfMonth(now())) AND
created_at < dateSub(MONTH, 4,toStartOfMonth(now()))
GROUP BY month, year
ORDER BY year DESC, month DESC
)
'''
# In[225]:
# Read in 200_repos.csv
dfr = pd.read_csv('200_repos.csv', index_col=0)
# new df with only 2 columns
# 'CMC_id' as index is maintained
df = dfr[['repo','forge']].copy()
runQuery('PR_open',query_L,query_R,df)
# In[226]:
# update MERGED sheet with new data
# 'CMC_id' is the key, however 'repo', and 'forge' are also merged
# to prevent duplicate columns
df_temp = pd.read_csv('200_merged.csv', index_col=0)
dfm = pd.merge(df_temp,df,on=['CMC_id','repo','forge'])
dfm.to_csv('200_merged.csv', encoding='utf-8', index=1)
# In[227]:
dfm
# In[234]:
import sys,time
#criticality (again)
# >>>>!!!!
# minor problem here with ETC double-IDs...
# !!!!>>>>
# Read in 200_repos.csv
dfr = pd.read_csv('200_repos.csv', index_col=0)
# new df with only 2 columns
# 'CMC_id' as index is maintained
df = dfr[['source_code','forge']].copy()
dfc = pd.read_csv('Project_Criticality_all.csv')
for row in df.itertuples():
# only search for strings; floats (NaN) are skipped
if isinstance(row.source_code, str):
url = str(row.source_code)
# loop through df2 (criticality) looking for source code url
for row2 in dfc.itertuples():
if url == row2.url:
df.at[row.Index, 'citicality'] = row2.criticality_score
break
sys.stdout.write(".")
sys.stdout.flush()
# In[246]:
# update MERGED sheet with new data
# 'CMC_id' is the key, however 'repo', and 'forge' are also merged
# to prevent duplicate columns
df.drop(columns=['source_code'], inplace=True)
df_temp = pd.read_csv('200_merged.csv', index_col=0)
dfm = pd.merge(df_temp,df,on=['CMC_id','forge'])
dfm.to_csv('200_merged.csv', encoding='utf-8', index=1)
# In[ ]:
| 23.679113 | 117 | 0.662544 | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
'''
# W O R K F L O W #
1. download github data in native clickhouse format (74.6 gb, ~10hours to download)
2. clickhouse server must be running
see: https://clickhouse.tech/docs/en/getting-started/install/
>sudo service clickhouse-server start (may need sudo -u japple)
>clickhouse-client
# Insert the database into clickhouse
3. create the db tables:
>CREATE TABLE github_events ...
see https://github-sql.github.io/explorer/#install-clickhouse
4. Insert the DB file into clickhouse <E:\Documents\Clickhouse Github data\github_events_v2.native.xz>
5. run code here to connect to clickhouse client and manipulate data
#
# Note the clickhouse driver (python) communicates with the clickhouse server via a native TCP/IP protocol
# that ships data as typed values; this will cause problems when INSERT-ing into a DB, however I don't see
# this as an issue
'''
# In[83]:
from sqlalchemy import create_engine
from clickhouse_driver import Client
# dependencies
# >ipython-sql
# install by command prompt:
# >conda install -yc conda-forge ipython-sql
client = Client('localhost')
# In[ ]:
# load CSV file into dataframe
# get test dataframe with different repos
# loop through dataframe
# pull repo
# build query
# run query
# write to dataframe
# In[ ]:
import pandas as pd
# not yet needed here
import time
import math
# In[ ]:
# Read CSV file into DataFrame df
# 200_repos_ready.csv has no index, CMC_id is in first column
# NaN is assigned to empty cells
dfs = pd.read_csv('200_repos.csv', index_col=0)
# In[ ]:
df = dfs[['repo','forge']].copy()
# In[ ]:
# subset dataframes for testing
# use .copy() as slicing will not allow for assignment
df10 = df.iloc[:10].copy()
df33 = df.iloc[:33].copy()
# In[ ]:
query_stars_L = '''
SELECT
count()
FROM github_events
WHERE event_type = 'WatchEvent'
AND repo_name ='''
query_stars_R = '''
GROUP BY action
'''
repo = '''
'HuobiGroup/huobi-eco-chain'
'''
# In[203]:
query_test_noStars = '''
SELECT
count()
FROM github_events
WHERE event_type = 'WatchEvent'
AND repo_name =
'millecodex/SEM'
GROUP BY action
'''
# In[ ]:
query2 = '''
SELECT
count()
FROM github_events
WHERE event_type = 'WatchEvent'
AND repo_name =
'HuobiGroup/huobi-eco-chain'
GROUP BY action
'''
# In[206]:
res=client.execute(query_test_noStars)
if not res: print('not')
# In[ ]:
# test query that returns empty list (no results)
if not res2: print('not')
# In[ ]:
# Write a function for this
#
# initialize new column to null/None
df['stars']=None
# iterate the dataframe as follows:
'''
loop through dataframe
pull repo
build query
run query
update dataframe
'''
for row in df.itertuples():
# only github for now as client is connected to github_events DB
if row.forge == 'github':
stars = 0
repo = row.repo
# skip the NaN repos
if type(repo) == str:
query = query_stars_L + '\''+repo+'\'' + query_stars_R
stars = client.execute(query)
# query returns a tuple of list elements accessible by [first list][first item]
# no stars returns an empty list
if not stars:
df.at[row.Index, 'stars'] = 0
else: df.at[row.Index, 'stars'] = stars[0][0]
# In[ ]:
# write update to 200_copy_stars.csv
# note beginning of script: pd.read_csv('200_repos_ready.csv', index_col=0)
df.to_csv('200_stars.csv', encoding='utf-8', index=1)
df
# In[ ]:
# Read in 200_repos.csv
# has no index, CMC_id is in first column
dfr = pd.read_csv('200_repos.csv', index_col=0)
# new df with only 2 columns
# 'CMC_id' as index is maintained
df = dfr[['repo','forge']].copy()
# In[ ]:
query_forks_L = '''
SELECT
count() AS forks
FROM github_events
WHERE event_type = 'ForkEvent' AND repo_name =
'''
query_forks_R = '''
'curvefi/curve-dao-contracts/tree/master/doc'
'''
query_forks = query_forks_L + query_forks_R
query_forks
# In[ ]:
result=client.execute(query_forks)
print(result)
# In[ ]:
# Write a function for this
#
# initialize new column to null/None
# might not be necessary
df['forks']=None
# iterate the dataframe as follows:
'''
loop through dataframe
pull repo
build query
run query
update dataframe
'''
for row in df.itertuples():
# only github for now as client is connected to github_events DB
if row.forge == 'github':
forks = 0
repo = row.repo
# skip the NaN repos
if type(repo) == str:
query = query_forks_L + '\''+repo+'\''
forks = client.execute(query)
# query returns a tuple of list elements accessible by [first list][first item]
# no forks returns an empty list
if not forks:
df.at[row.Index, 'forks'] = 0
else: df.at[row.Index, 'forks'] = forks[0][0]
# In[ ]:
# write update to 200_forks.csv
df.to_csv('200_forks.csv', encoding='utf-8', index=1)
df
# In[ ]:
# merge two csv files into one
# 1. 200_stars.csv
# 2. 200_forks.csv
#
# might prefer to append the new column? merge seems a bit cumbersome?
#
# has no index, CMC_id is in first column
dfs = pd.read_csv('200_stars.csv', index_col=0)
#dfsm = dfs[['stars']].copy()
dff = pd.read_csv('200_forks.csv', index_col=0)
#dffm = dff[['forks']].copy()
#
# In[ ]:
# 'CMC_id' is the key, however 'repo', and 'forge' are also merged
# to prevent duplicate columns
# -> might be uncecessary?
dfm = pd.merge(dfs,dff,on=['CMC_id','repo','forge'])
# In[ ]:
# write update to 200_merged.csv
dfm.to_csv('200_merged.csv', encoding='utf-8', index=1)
# In[91]:
# AUTHORS query:
# A most-recent three-month average
# excluding current month because it is in progress
# modify for static clickhouse data which stops at 2020-12-07
# >>created_at >= dateSub(MONTH, 6,toStartOfMonth(now())) AND
# >>created_at < dateSub(MONTH, 3,toStartOfMonth(now()))
#
QUERY_AUTHORS = '''
SELECT
ROUND( SUM(authors) / COUNT(month), 2) AS average
FROM
(
SELECT
uniq(actor_login) AS authors,
toMonth(created_at) AS month,
toYear(created_at) AS year
FROM github_events
WHERE event_type IN ('PullRequestEvent', 'IssuesEvent', 'IssueCommentEvent', 'PullRequestReviewCommentEvent') AND
repo_name = 'bitcoin/bitcoin' AND
created_at >= dateSub(MONTH, 3,toStartOfMonth(now())) AND
created_at < toStartOfMonth(now())
GROUP BY month, year
ORDER BY year DESC, month DESC
)'''
query_authors_L = '''
SELECT
ROUND( SUM(authors) / COUNT(month), 2) AS average
FROM
(
SELECT
uniq(actor_login) AS authors,
toMonth(created_at) AS month,
toYear(created_at) AS year
FROM github_events
WHERE event_type IN ('PullRequestEvent', 'IssuesEvent', 'IssueCommentEvent', 'PullRequestReviewCommentEvent') AND
repo_name =
'''
q_repo='bitcoin/bitcoin'
query_authors_R = '''AND
/*created_at >= dateSub(MONTH, 3,toStartOfMonth(now())) AND
created_at < toStartOfMonth(now())*/
created_at >= dateSub(MONTH, 6,toStartOfMonth(now())) AND
created_at < dateSub(MONTH, 3,toStartOfMonth(now()))
GROUP BY month, year
ORDER BY year DESC, month DESC
)'''
query_authors=query_authors_L + '\'' + q_repo + '\'' + query_authors_R
# In[99]:
# Read in 200_repos.csv
dfr = pd.read_csv('200_repos.csv', index_col=0)
# new df with only 2 columns
# 'CMC_id' as index is maintained
df = dfr[['repo','forge']].copy()
#dfs = df[0:20].copy()
# In[92]:
res=client.execute(QUERY_AUTHORS)
res
# In[ ]:
print(QUERY_AUTHORS)
# In[93]:
print(query_authors)
# In[101]:
for row in df.itertuples():
# only github for now as client is connected to github_events DB
if row.forge == 'github':
#forks = 0
repo = row.repo
# skip the NaN repos
if type(repo) == str:
query = query_authors_L + '\'' + repo + '\'' + query_authors_R
authors = client.execute(query)
# query returns a tuple of list elements accessible by [first list][first item]
# average of no authors returns a nan
if math.isnan(result[0][0]):
df.at[row.Index, 'authors'] = 0
else: df.at[row.Index, 'authors'] = authors[0][0]
# In[ ]:
# In[104]:
# write update to 200_authors.csv
df.to_csv('200_authors.csv', encoding='utf-8', index=1)
# In[105]:
# update MERGED sheet with new data
# 'CMC_id' is the key, however 'repo', and 'forge' are also merged
# to prevent duplicate columns
df_temp = pd.read_csv('200_merged.csv', index_col=0)
dfm = pd.merge(df_temp,df,on=['CMC_id','repo','forge'])
dfm.to_csv('200_merged.csv', encoding='utf-8', index=1)
# In[106]:
print(client.execute('SELECT created_at FROM github_events ORDER by created_at DESC LIMIT 10'))
# In[161]:
# COMMITS query:
# A most-recent three-month average
# excluding current month because it is in progress
#
# modify for static clickhouse data which stops at 2020-12-07:
# >>created_at >= dateSub(MONTH, 6,toStartOfMonth(now())) AND
# >>created_at < dateSub(MONTH, 3,toStartOfMonth(now()))
#
# note: there will be moderate timezone discrepancies, especially
# when calculating near the first of the month
#
QUERY_COMMITS = '''
SELECT ROUND( SUM(sum_push_distinct) / COUNT(month), 2) AS average
FROM
(
SELECT SUM(push_distinct_size) AS sum_push_distinct,
toMonth(created_at) AS month,
toYear(created_at) AS year
FROM github_events
WHERE repo_name = 'bitcoin/bitcoin' AND
event_type = 'PushEvent' AND
/*created_at >= dateSub(MONTH, 3,toStartOfMonth(now())) AND
created_at < toStartOfMonth(now())*/
created_at >= dateSub(MONTH, 7,toStartOfMonth(now())) AND
created_at < dateSub(MONTH, 4,toStartOfMonth(now()))
GROUP BY month, year
ORDER BY year DESC, month DESC
)
'''
query_commits_L ='''
SELECT ROUND( SUM(sum_push_distinct) / COUNT(month), 2) AS average
FROM
(
SELECT SUM(push_distinct_size) AS sum_push_distinct,
toMonth(created_at) AS month,
toYear(created_at) AS year
FROM github_events
WHERE repo_name =
'''
q_repo='bitcoin/bitcoin'
query_commits_R = '''
AND
event_type = 'PushEvent' AND
/*created_at >= dateSub(MONTH, 3,toStartOfMonth(now())) AND
created_at < toStartOfMonth(now())*/
created_at >= dateSub(MONTH, 7,toStartOfMonth(now())) AND
created_at < dateSub(MONTH, 4,toStartOfMonth(now()))
GROUP BY month, year
ORDER BY year DESC, month DESC
)
'''
query_commits=query_commits_L + '\'' + q_repo + '\'' + query_commits_R
# In[163]:
res=client.execute(query_commits)
res
# In[199]:
# Read in 200_repos.csv
dfr = pd.read_csv('200_repos.csv', index_col=0)
# new df with only 2 columns
# 'CMC_id' as index is maintained
df = dfr[['repo','forge']].copy()
# In[181]:
query_test_zero='''
SELECT ROUND( SUM(sum_push_distinct) / COUNT(month), 2) AS average
FROM
(
SELECT SUM(push_distinct_size) AS sum_push_distinct,
toMonth(created_at) AS month,
toYear(created_at) AS year
FROM github_events
WHERE repo_name = 'Uniswap/uniswap-v2-core' AND
event_type = 'PushEvent' AND
/*created_at >= dateSub(MONTH, 3,toStartOfMonth(now())) AND
created_at < toStartOfMonth(now())*/
created_at >= dateSub(MONTH, 6,toStartOfMonth(now())) AND
created_at < dateSub(MONTH, 3,toStartOfMonth(now()))
GROUP BY month, year
ORDER BY year DESC, month DESC
)'''
res=client.execute(query_test_zero)
res
# In[ ]:
import math
if math.isnan(res[0][0]): print('not')
else: print('dunno')
# In[200]:
for row in df.itertuples():
# only github for now as client is connected to github_events DB
if row.forge == 'github':
#forks = 0
repo = row.repo
# skip the NaN repos
if type(repo) == str:
query = query_commits_L + '\'' + repo + '\'' + query_commits_R
result = client.execute(query)
# query returns a tuple of list elements accessible by [first list][first item]
# average of no commits returns a nan
if math.isnan(result[0][0]):
df.at[row.Index, 'commits'] = 0
else: df.at[row.Index, 'commits'] = result[0][0]
# In[202]:
# write update to 200_commits.csv
df.to_csv('200_commits.csv', encoding='utf-8', index=1)
# In[168]:
# update MERGED sheet with new data
# 'CMC_id' is the key, however 'repo', and 'forge' are also merged
# to prevent duplicate columns
df_temp = pd.read_csv('200_merged.csv', index_col=0)
dfm = pd.merge(df_temp,df,on=['CMC_id','repo','forge'])
dfm.to_csv('200_merged.csv', encoding='utf-8', index=1)
# In[207]:
# total COMMENTS includes all commenting activity
# any comments counts as activity and increase engagement
# there are 3 event_type comment events:
# >CommitCommentEvent
# >IssueCommentEvent
# >CommitCommentEvent
#
'''
/* View distribution of comments*/
SELECT
uniq(comment_id) AS total_comments,
uniqIf(comment_id, event_type = 'PullRequestReviewCommentEvent') AS pr_comments,
uniqIf(comment_id, event_type = 'IssueCommentEvent') AS issue_comments,
uniqIf(comment_id, event_type = 'CommitCommentEvent') AS commit_comments,
toMonth(created_at) AS month,
toYear(created_at) AS year
FROM github_events
WHERE
repo_name = 'bitcoin/bitcoin' AND
toYear(created_at) >= 2020
GROUP BY month, year
ORDER BY year DESC, month DESC
'''
# only Sept/Oct/Nov 2020 #
QUERY_COMMENTS='''
SELECT ROUND( SUM(total) / COUNT(month), 2) AS average
FROM
(
SELECT
(
uniqIf(comment_id, event_type = 'PullRequestReviewCommentEvent')+
uniqIf(comment_id, event_type = 'IssueCommentEvent')+
uniqIf(comment_id, event_type = 'CommitCommentEvent') ) AS total,
toMonth(created_at) AS month,
toYear(created_at) AS year
FROM github_events
WHERE
repo_name = 'bitcoin/bitcoin' AND
/*created_at >= dateSub(MONTH, 3,toStartOfMonth(now())) AND
created_at < toStartOfMonth(now())*/
created_at >= dateSub(MONTH, 7,toStartOfMonth(now())) AND
created_at < dateSub(MONTH, 4,toStartOfMonth(now()))
GROUP BY month, year
ORDER BY year DESC, month DESC
)
'''
query_L='''
SELECT ROUND( SUM(total) / COUNT(month), 2) AS average
FROM
(
SELECT
(
uniqIf(comment_id, event_type = 'PullRequestReviewCommentEvent')+
uniqIf(comment_id, event_type = 'IssueCommentEvent')+
uniqIf(comment_id, event_type = 'CommitCommentEvent') ) AS total,
toMonth(created_at) AS month,
toYear(created_at) AS year
FROM github_events
WHERE
repo_name =
'''
query_R='''
AND
/*created_at >= dateSub(MONTH, 3,toStartOfMonth(now())) AND
created_at < toStartOfMonth(now())*/
created_at >= dateSub(MONTH, 7,toStartOfMonth(now())) AND
created_at < dateSub(MONTH, 4,toStartOfMonth(now()))
GROUP BY month, year
ORDER BY year DESC, month DESC
)
'''
# In[209]:
res=client.execute(QUERY_COMMENTS)
res
# In[212]:
#
query_L='''
SELECT ROUND( SUM(total) / COUNT(month), 2) AS average
FROM
(
SELECT
(
uniqIf(comment_id, event_type = 'PullRequestReviewCommentEvent')+
uniqIf(comment_id, event_type = 'IssueCommentEvent')+
uniqIf(comment_id, event_type = 'CommitCommentEvent') ) AS total,
toMonth(created_at) AS month,
toYear(created_at) AS year
FROM github_events
WHERE
repo_name =
'''
query_R='''
AND
/*created_at >= dateSub(MONTH, 3,toStartOfMonth(now())) AND
created_at < toStartOfMonth(now())*/
created_at >= dateSub(MONTH, 7,toStartOfMonth(now())) AND
created_at < dateSub(MONTH, 4,toStartOfMonth(now()))
GROUP BY month, year
ORDER BY year DESC, month DESC
)
'''
'''
@column name of the column to be added to the dataframe
@query_L
@query_R
@df dataframe
'''
def runQuery(column_name, query_L, query_R, df):
for row in df.itertuples():
# only github for now as client is connected to github_events DB
if row.forge == 'github':
repo = row.repo
# skip the NaN repos
if type(repo) == str:
query = query_L + '\'' + repo + '\'' + query_R
result = client.execute(query)
# query returns a tuple of list elements accessible by [first list][first item]
# average of zero returns a nan
if math.isnan(result[0][0]):
df.at[row.Index, column_name] = 0
else: df.at[row.Index, column_name] = result[0][0]
return 'dataframe updated'
# In[213]:
# Read in 200_repos.csv
dfr = pd.read_csv('200_repos.csv', index_col=0)
# new df with only 2 columns
# 'CMC_id' as index is maintained
df = dfr[['repo','forge']].copy()
# In[217]:
runQuery('comments',query_L,query_R,df)
# In[220]:
# update MERGED sheet with new data
# 'CMC_id' is the key, however 'repo', and 'forge' are also merged
# to prevent duplicate columns
df_temp = pd.read_csv('200_merged.csv', index_col=0)
dfm = pd.merge(df_temp,df,on=['CMC_id','repo','forge'])
dfm.to_csv('200_merged.csv', encoding='utf-8', index=1)
# In[224]:
# view all PR activity sorted into: opened, closed, reopened
'''
SELECT COUNT() AS total,
SUM(action = 'opened') AS opened,
SUM(action = 'closed') AS closed,
SUM(action = 'reopened') AS reopened,
toYear(created_at) AS year,
toMonth(created_at) AS month
FROM github_events
WHERE repo_name = 'bitcoin/bitcoin' AND
toYear(created_at) >= '2019' AND
event_type = 'PullRequestEvent'
GROUP BY month, year
ORDER BY year DESC, month DESC
'''
'''
SELECT
ROUND( SUM(opened) / COUNT(month), 2) AS average
FROM
(
SELECT
SUM(action = 'opened') AS opened,
toYear(created_at) AS year,
toMonth(created_at) AS month
FROM github_events
WHERE repo_name = 'bitcoin/bitcoin' AND
event_type = 'PullRequestEvent' AND
created_at >= dateSub(MONTH, 7,toStartOfMonth(now())) AND
created_at < dateSub(MONTH, 4,toStartOfMonth(now()))
GROUP BY month, year
ORDER BY year DESC, month DESC
)
'''
query_L='''
SELECT
ROUND( SUM(opened) / COUNT(month), 2) AS average
FROM
(
SELECT
SUM(action = 'opened') AS opened,
toYear(created_at) AS year,
toMonth(created_at) AS month
FROM github_events
WHERE repo_name =
'''
query_R='''
AND
event_type = 'PullRequestEvent' AND
created_at >= dateSub(MONTH, 7,toStartOfMonth(now())) AND
created_at < dateSub(MONTH, 4,toStartOfMonth(now()))
GROUP BY month, year
ORDER BY year DESC, month DESC
)
'''
# In[225]:
# Read in 200_repos.csv
dfr = pd.read_csv('200_repos.csv', index_col=0)
# new df with only 2 columns
# 'CMC_id' as index is maintained
df = dfr[['repo','forge']].copy()
runQuery('PR_open',query_L,query_R,df)
# In[226]:
# update MERGED sheet with new data
# 'CMC_id' is the key, however 'repo', and 'forge' are also merged
# to prevent duplicate columns
df_temp = pd.read_csv('200_merged.csv', index_col=0)
dfm = pd.merge(df_temp,df,on=['CMC_id','repo','forge'])
dfm.to_csv('200_merged.csv', encoding='utf-8', index=1)
# In[227]:
dfm
# In[234]:
import sys,time
#criticality (again)
# >>>>!!!!
# minor problem here with ETC double-IDs...
# !!!!>>>>
# Read in 200_repos.csv
dfr = pd.read_csv('200_repos.csv', index_col=0)
# new df with only 2 columns
# 'CMC_id' as index is maintained
df = dfr[['source_code','forge']].copy()
dfc = pd.read_csv('Project_Criticality_all.csv')
for row in df.itertuples():
# only search for strings; floats (NaN) are skipped
if isinstance(row.source_code, str):
url = str(row.source_code)
# loop through df2 (criticality) looking for source code url
for row2 in dfc.itertuples():
if url == row2.url:
df.at[row.Index, 'citicality'] = row2.criticality_score
break
sys.stdout.write(".")
sys.stdout.flush()
# In[246]:
# update MERGED sheet with new data
# 'CMC_id' is the key, however 'repo', and 'forge' are also merged
# to prevent duplicate columns
df.drop(columns=['source_code'], inplace=True)
df_temp = pd.read_csv('200_merged.csv', index_col=0)
dfm = pd.merge(df_temp,df,on=['CMC_id','forge'])
dfm.to_csv('200_merged.csv', encoding='utf-8', index=1)
# In[ ]:
| 712 | 0 | 22 |
5a41a079fa01bdce1298f17000c10bcb99c2d05e | 650 | py | Python | projecteuler/35.py | diofeher/ctf-writeups | b82eaae064fe5339c69892dd084e0f1915ca8bb5 | [
"MIT"
] | 8 | 2018-12-30T06:49:29.000Z | 2021-06-30T22:37:54.000Z | projecteuler/35.py | diofeher/ctf-writeups | b82eaae064fe5339c69892dd084e0f1915ca8bb5 | [
"MIT"
] | null | null | null | projecteuler/35.py | diofeher/ctf-writeups | b82eaae064fe5339c69892dd084e0f1915ca8bb5 | [
"MIT"
] | 2 | 2020-03-10T11:04:54.000Z | 2020-10-13T12:34:16.000Z |
print len([i for i in range(1000000) if is_circular_prime(i)]) | 29.545455 | 70 | 0.589231 | def all_perms(string):
if len(string) <=1:
yield string
else:
for perm in all_perms(string[1:]):
for i in range(len(perm)+1):
yield perm[:i] + string[0:1] + perm[i:]
def is_prime(num):
if num==0 or num==1: return False
if num==2: return True
for i in range(2, num/2):
if not num%i:
return False
return True
def is_circular_prime(num):
perms = list(all_perms(str(num)))
all_primes = [number for number in perms if is_prime(int(number))]
return len(all_primes) == len(perms)
print len([i for i in range(1000000) if is_circular_prime(i)]) | 507 | 0 | 72 |
fe7df07ae582ecb28a22bb4068c6fdbc91e4af97 | 461 | py | Python | src/soopervisor/validate.py | ploomber/ci-for-ds | 4103edc1ba38468ec8e8cb2759a80ab25c5237ca | [
"Apache-2.0"
] | null | null | null | src/soopervisor/validate.py | ploomber/ci-for-ds | 4103edc1ba38468ec8e8cb2759a80ab25c5237ca | [
"Apache-2.0"
] | 2 | 2020-08-08T20:59:05.000Z | 2020-08-08T21:00:42.000Z | src/soopervisor/validate.py | ploomber/ci-for-ds | 4103edc1ba38468ec8e8cb2759a80ab25c5237ca | [
"Apache-2.0"
] | null | null | null | from pathlib import Path
from soopervisor.exceptions import MissingConfigurationFileError
| 23.05 | 70 | 0.711497 | from pathlib import Path
from soopervisor.exceptions import MissingConfigurationFileError
def pprint(collection):
return ', '.join(f"'{element}'" for element in sorted(collection))
def keys(expected, actual, error):
missing = set(expected) - set(actual)
if missing:
raise ValueError(f'{error}: {pprint(missing)}')
def config_file_exists():
if not Path('soopervisor.yaml').is_file():
raise MissingConfigurationFileError()
| 298 | 0 | 69 |
0c8290bb0212368acaee06be213fefeb5e7a8307 | 1,678 | py | Python | libhoney/fields.py | mveitas/libhoney-py | a89b51679fc2c4ba3f326c9ebd1e8e3c3ee7b24a | [
"Apache-2.0"
] | 18 | 2016-08-09T15:51:44.000Z | 2021-07-08T20:29:41.000Z | libhoney/fields.py | mveitas/libhoney-py | a89b51679fc2c4ba3f326c9ebd1e8e3c3ee7b24a | [
"Apache-2.0"
] | 60 | 2016-09-28T21:18:44.000Z | 2022-03-07T15:28:48.000Z | libhoney/fields.py | mveitas/libhoney-py | a89b51679fc2c4ba3f326c9ebd1e8e3c3ee7b24a | [
"Apache-2.0"
] | 27 | 2016-09-28T18:09:49.000Z | 2022-03-21T16:30:54.000Z | import inspect
import json
from libhoney.internal import json_default_handler
class FieldHolder:
'''A FieldHolder is the generalized class that stores fields and dynamic
fields. It should not be used directly; only through the subclasses'''
def __add__(self, other):
'''adding two field holders merges the data with other overriding
any fields they have in common'''
self._data.update(other._data)
self._dyn_fields.update(other._dyn_fields)
return self
def __eq__(self, other):
'''two FieldHolders are equal if their datasets are equal'''
return ((self._data, self._dyn_fields) ==
(other._data, other._dyn_fields))
def __ne__(self, other):
'''two FieldHolders are equal if their datasets are equal'''
return not self.__eq__(other)
def is_empty(self):
'''returns true if there is no data in this FieldHolder'''
return len(self._data) == 0
def __str__(self):
'''returns a JSON blob of the fields in this holder'''
return json.dumps(self._data, default=json_default_handler)
| 32.269231 | 77 | 0.638856 | import inspect
import json
from libhoney.internal import json_default_handler
class FieldHolder:
'''A FieldHolder is the generalized class that stores fields and dynamic
fields. It should not be used directly; only through the subclasses'''
def __init__(self):
self._data = {}
self._dyn_fields = set()
def __add__(self, other):
'''adding two field holders merges the data with other overriding
any fields they have in common'''
self._data.update(other._data)
self._dyn_fields.update(other._dyn_fields)
return self
def __eq__(self, other):
'''two FieldHolders are equal if their datasets are equal'''
return ((self._data, self._dyn_fields) ==
(other._data, other._dyn_fields))
def __ne__(self, other):
'''two FieldHolders are equal if their datasets are equal'''
return not self.__eq__(other)
def add_field(self, name, val):
self._data[name] = val
def add_dynamic_field(self, fn):
if not inspect.isroutine(fn):
raise TypeError("add_dynamic_field requires function argument")
self._dyn_fields.add(fn)
def add(self, data):
try:
for k, v in data.items():
self.add_field(k, v)
except AttributeError:
raise TypeError("add requires a dict-like argument")
def is_empty(self):
'''returns true if there is no data in this FieldHolder'''
return len(self._data) == 0
def __str__(self):
'''returns a JSON blob of the fields in this holder'''
return json.dumps(self._data, default=json_default_handler)
| 437 | 0 | 108 |
79d2bd4d366c5b184e4b92ef62f09bd3857a29c1 | 886 | py | Python | chap05/Filter.py | viekie/basic_deeplearning | 6c9e55cd621504da3d7ea1627e6783c9819a1916 | [
"Apache-2.0"
] | 3 | 2017-05-23T08:11:44.000Z | 2017-09-25T11:17:57.000Z | chap05/Filter.py | viekie/basic_deeplearning | 6c9e55cd621504da3d7ea1627e6783c9819a1916 | [
"Apache-2.0"
] | null | null | null | chap05/Filter.py | viekie/basic_deeplearning | 6c9e55cd621504da3d7ea1627e6783c9819a1916 | [
"Apache-2.0"
] | 1 | 2017-06-19T03:36:40.000Z | 2017-06-19T03:36:40.000Z | #!/usr/bin/env python
# -*- coding:utf8 -*-
# Power by viekie. 2017-05-27 09:23:04
import numpy as np
| 26.848485 | 65 | 0.576749 | #!/usr/bin/env python
# -*- coding:utf8 -*-
# Power by viekie. 2017-05-27 09:23:04
import numpy as np
class Filter(object):
def __init__(self, width, height, depth):
'''
构造初始化
'''
self.width = width
self.height = height
self.depth = depth
self.weights = np.random.uniform(-0.0001, 0.0001,
(depth, height, width))
self.bais = 0.0
self.weights_gradient = np.zeros(self.weights.shape)
self.bais_gradient = 0.0
def get_weights(self):
return self.weights
def get_bais(self):
return self.bais
def update(self, learning_rate):
self.weights += learning_rate * self.weights_gradient
self.bais += learning_rate * self.bais_gradient
def __str__(self):
return 'weight: %s, bais: %s' % (self.weights, self.bais)
| 244 | 526 | 23 |
4124f814bb874e6d320bf5c4cc0c448b4e14dbff | 104 | py | Python | project/server/worker.py | ardikabs/flask-server-template | e1dfb33323cc89f6163d604007263b73ec5b6e12 | [
"MIT"
] | 1 | 2019-01-15T10:33:04.000Z | 2019-01-15T10:33:04.000Z | project/server/worker.py | ardikabs/flask-server-template | e1dfb33323cc89f6163d604007263b73ec5b6e12 | [
"MIT"
] | null | null | null | project/server/worker.py | ardikabs/flask-server-template | e1dfb33323cc89f6163d604007263b73ec5b6e12 | [
"MIT"
] | null | null | null |
import os
from server import make_worker
worker = make_worker(os.getenv("FLASK_CONFIG") or "default",) | 20.8 | 61 | 0.778846 |
import os
from server import make_worker
worker = make_worker(os.getenv("FLASK_CONFIG") or "default",) | 0 | 0 | 0 |
d78b5d82b00d97040fcf7dda289d99db9079a7de | 2,192 | py | Python | unit_tests/test_files_check_ovn_db_connections.py | przemeklal/charm-ovn-central | ccb45c3d2ddfe089f299e8ebb79303dd0a705011 | [
"Apache-2.0"
] | null | null | null | unit_tests/test_files_check_ovn_db_connections.py | przemeklal/charm-ovn-central | ccb45c3d2ddfe089f299e8ebb79303dd0a705011 | [
"Apache-2.0"
] | null | null | null | unit_tests/test_files_check_ovn_db_connections.py | przemeklal/charm-ovn-central | ccb45c3d2ddfe089f299e8ebb79303dd0a705011 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from charms_openstack import test_utils
import check_ovn_db_connections as check
import nagios_plugin3 as nagios
| 39.854545 | 74 | 0.708485 | # Copyright 2021 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from charms_openstack import test_utils
import check_ovn_db_connections as check
import nagios_plugin3 as nagios
class TestCheckOVNDBConnections(test_utils.PatchHelper):
@mock.patch("os.path.exists")
def test_parse_output_does_not_exist(self, mock_exists):
mock_exists.return_value = False
self.assertRaises(nagios.UnknownError, check.parse_output)
@mock.patch("os.path.exists")
def test_parse_output_permission_error(self, mock_exists):
mock_exists.return_value = True
mock_file = mock.mock_open()
mock_file.side_effect = PermissionError
with mock.patch("builtins.open", mock_file) as mocked_open:
mocked_open.side_effect = PermissionError()
self.assertRaises(nagios.UnknownError, check.parse_output)
@mock.patch("os.path.exists")
def test_parse_output_alert(self, mock_exists):
mock_exists.return_value = True
mock_file = mock.mock_open(read_data="CRITICAL: fake error")
with mock.patch("builtins.open", mock_file):
self.assertRaises(nagios.UnknownError, check.parse_output)
@mock.patch("os.path.exists")
def test_parse_output_ok(self, mock_exists):
mock_exists.return_value = True
mock_file = mock.mock_open(
read_data="OK: OVN DB connections are normal"
)
with mock.patch("builtins.open", mock_file):
# it shouldn't raise any exceptions
try:
check.parse_output()
except Exception as e:
self.fail("exception raised: {}".format(e))
| 1,176 | 278 | 23 |
33f1868840dcae764bf333e380d375fb955c1acd | 201 | py | Python | Jupyter/AOBW.py | MooersLab/jupyterlabpymolpysnipsplus | b886750d63372434df53d4d6d7cdad6cb02ae4e7 | [
"MIT"
] | null | null | null | Jupyter/AOBW.py | MooersLab/jupyterlabpymolpysnipsplus | b886750d63372434df53d4d6d7cdad6cb02ae4e7 | [
"MIT"
] | null | null | null | Jupyter/AOBW.py | MooersLab/jupyterlabpymolpysnipsplus | b886750d63372434df53d4d6d7cdad6cb02ae4e7 | [
"MIT"
] | null | null | null | # Description: Run the AOBW function from the pymolshortcuts.py file to generate photorealistic effect in grayscale.
# Source: placeHolder
"""
cmd.do('cmd.do("AOBW")')
"""
cmd.do('cmd.do("AOBW")')
| 22.333333 | 117 | 0.706468 | # Description: Run the AOBW function from the pymolshortcuts.py file to generate photorealistic effect in grayscale.
# Source: placeHolder
"""
cmd.do('cmd.do("AOBW")')
"""
cmd.do('cmd.do("AOBW")')
| 0 | 0 | 0 |
58e2f18c29d37b8dcfd792252b792fe87df612cc | 348 | py | Python | year2021/python/day6/day6.py | 3schwartz/AdventOfCode | 32f259c4e20c3c4834718411f1053b6a11f71c86 | [
"MIT"
] | null | null | null | year2021/python/day6/day6.py | 3schwartz/AdventOfCode | 32f259c4e20c3c4834718411f1053b6a11f71c86 | [
"MIT"
] | null | null | null | year2021/python/day6/day6.py | 3schwartz/AdventOfCode | 32f259c4e20c3c4834718411f1053b6a11f71c86 | [
"MIT"
] | null | null | null | from year2021.python.day6.day6_func import *
inputStr = open('../../data/day6_data.txt').readline()
fishes = FishCreator.initFishes(inputStr)
spawn = FishSpawn()
numberOfFishes80Days = spawn.spawn(fishes, 80)
print(f"Part 1: {numberOfFishes80Days}")
numberOfFishes256Days = spawn.spawn(fishes, 256)
print(f"Part 2: {numberOfFishes256Days}") | 21.75 | 54 | 0.755747 | from year2021.python.day6.day6_func import *
inputStr = open('../../data/day6_data.txt').readline()
fishes = FishCreator.initFishes(inputStr)
spawn = FishSpawn()
numberOfFishes80Days = spawn.spawn(fishes, 80)
print(f"Part 1: {numberOfFishes80Days}")
numberOfFishes256Days = spawn.spawn(fishes, 256)
print(f"Part 2: {numberOfFishes256Days}") | 0 | 0 | 0 |
07e263f9a23b2a011649cb1d854e7112ac894597 | 5,529 | py | Python | fastapi/encoders.py | includeamin/fastapi | 988d3c2a820f679bf88e2fc18f4e9614a2633212 | [
"MIT"
] | null | null | null | fastapi/encoders.py | includeamin/fastapi | 988d3c2a820f679bf88e2fc18f4e9614a2633212 | [
"MIT"
] | null | null | null | fastapi/encoders.py | includeamin/fastapi | 988d3c2a820f679bf88e2fc18f4e9614a2633212 | [
"MIT"
] | null | null | null | from enum import Enum
from types import GeneratorType
from typing import Any, Callable, Dict, List, Set, Tuple, Union
from fastapi.logger import logger
from fastapi.utils import PYDANTIC_1
from pydantic import BaseModel
from pydantic.json import ENCODERS_BY_TYPE
SetIntStr = Set[Union[int, str]]
DictIntStrAny = Dict[Union[int, str], Any]
encoders_by_class_tuples = generate_encoders_by_class_tuples(ENCODERS_BY_TYPE)
| 34.341615 | 85 | 0.58166 | from enum import Enum
from types import GeneratorType
from typing import Any, Callable, Dict, List, Set, Tuple, Union
from fastapi.logger import logger
from fastapi.utils import PYDANTIC_1
from pydantic import BaseModel
from pydantic.json import ENCODERS_BY_TYPE
SetIntStr = Set[Union[int, str]]
DictIntStrAny = Dict[Union[int, str], Any]
def generate_encoders_by_class_tuples(
type_encoder_map: Dict[Any, Callable]
) -> Dict[Callable, Tuple]:
encoders_by_classes: Dict[Callable, List] = {}
for type_, encoder in type_encoder_map.items():
encoders_by_classes.setdefault(encoder, []).append(type_)
encoders_by_class_tuples: Dict[Callable, Tuple] = {}
for encoder, classes in encoders_by_classes.items():
encoders_by_class_tuples[encoder] = tuple(classes)
return encoders_by_class_tuples
encoders_by_class_tuples = generate_encoders_by_class_tuples(ENCODERS_BY_TYPE)
def jsonable_encoder(
obj: Any,
include: Union[SetIntStr, DictIntStrAny] = None,
exclude=None,
by_alias: bool = True,
skip_defaults: bool = None,
exclude_unset: bool = False,
include_none: bool = True,
custom_encoder=None,
sqlalchemy_safe: bool = True,
) -> Any:
if exclude is None:
exclude = set()
if custom_encoder is None:
custom_encoder = {}
if skip_defaults is not None:
logger.warning( # pragma: nocover
"skip_defaults in jsonable_encoder has been deprecated in favor of "
"exclude_unset to keep in line with Pydantic v1, support for it will be "
"removed soon."
)
if include is not None and not isinstance(include, set):
include = set(include)
if exclude is not None and not isinstance(exclude, set):
exclude = set(exclude)
if isinstance(obj, BaseModel):
encoder = getattr(obj.Config, "json_encoders", {})
if custom_encoder:
encoder.update(custom_encoder)
if PYDANTIC_1:
obj_dict = obj.dict(
include=include,
exclude=exclude,
by_alias=by_alias,
exclude_unset=bool(exclude_unset or skip_defaults),
)
else: # pragma: nocover
obj_dict = obj.dict(
include=include,
exclude=exclude,
by_alias=by_alias,
skip_defaults=bool(exclude_unset or skip_defaults),
)
return jsonable_encoder(
obj_dict,
include_none=include_none,
custom_encoder=encoder,
sqlalchemy_safe=sqlalchemy_safe,
)
if isinstance(obj, Enum):
return obj.value
if isinstance(obj, (str, int, float, type(None))):
return obj
if isinstance(obj, dict):
encoded_dict = {}
for key, value in obj.items():
if (
(
not sqlalchemy_safe
or (not isinstance(key, str))
or (not key.startswith("_sa"))
)
and (value is not None or include_none)
and ((include and key in include) or key not in exclude)
):
encoded_key = jsonable_encoder(
key,
by_alias=by_alias,
exclude_unset=exclude_unset,
include_none=include_none,
custom_encoder=custom_encoder,
sqlalchemy_safe=sqlalchemy_safe,
)
encoded_value = jsonable_encoder(
value,
by_alias=by_alias,
exclude_unset=exclude_unset,
include_none=include_none,
custom_encoder=custom_encoder,
sqlalchemy_safe=sqlalchemy_safe,
)
encoded_dict[encoded_key] = encoded_value
return encoded_dict
if isinstance(obj, (list, set, frozenset, GeneratorType, tuple)):
encoded_list = []
for item in obj:
encoded_list.append(
jsonable_encoder(
item,
include=include,
exclude=exclude,
by_alias=by_alias,
exclude_unset=exclude_unset,
include_none=include_none,
custom_encoder=custom_encoder,
sqlalchemy_safe=sqlalchemy_safe,
)
)
return encoded_list
if custom_encoder:
if type(obj) in custom_encoder:
return custom_encoder[type(obj)](obj)
else:
for encoder_type, encoder in custom_encoder.items():
if isinstance(obj, encoder_type):
return encoder(obj)
if type(obj) in ENCODERS_BY_TYPE:
return ENCODERS_BY_TYPE[type(obj)](obj)
for encoder, classes_tuple in encoders_by_class_tuples.items():
if isinstance(obj, classes_tuple):
return encoder(obj)
errors: List[Exception] = []
try:
data = dict(obj)
except Exception as e:
errors.append(e)
try:
data = vars(obj)
except Exception as e:
errors.append(e)
raise ValueError(errors)
return jsonable_encoder(
data,
by_alias=by_alias,
exclude_unset=exclude_unset,
include_none=include_none,
custom_encoder=custom_encoder,
sqlalchemy_safe=sqlalchemy_safe,
)
| 5,059 | 0 | 46 |
ac16b5ae47ccf63f9b247a1c7165aa7cc95d25c3 | 5,116 | py | Python | cnstd/consts.py | breezedeus/cnstd | 57a8171ea706c9e8f665bffc640d90022d43ab8e | [
"Apache-2.0"
] | 266 | 2020-06-02T12:33:50.000Z | 2022-03-31T06:12:46.000Z | cnstd/consts.py | breezedeus/cnstd | 57a8171ea706c9e8f665bffc640d90022d43ab8e | [
"Apache-2.0"
] | 37 | 2020-06-04T13:34:35.000Z | 2022-03-25T07:43:21.000Z | cnstd/consts.py | breezedeus/cnstd | 57a8171ea706c9e8f665bffc640d90022d43ab8e | [
"Apache-2.0"
] | 66 | 2020-06-02T12:33:33.000Z | 2022-03-24T14:22:09.000Z | # coding: utf-8
# Copyright (C) 2021, [Breezedeus](https://github.com/breezedeus).
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from pathlib import Path
from typing import Dict, Any
from copy import deepcopy
from collections import OrderedDict
from torchvision.models import (
resnet50,
resnet34,
resnet18,
mobilenet_v3_large,
mobilenet_v3_small,
shufflenet_v2_x1_0,
shufflenet_v2_x1_5,
shufflenet_v2_x2_0,
)
from .__version__ import __version__
# 模型版本只对应到第二层,第三层的改动表示模型兼容。
# 如: __version__ = '1.0.*',对应的 MODEL_VERSION 都是 '1.0'
MODEL_VERSION = '.'.join(__version__.split('.', maxsplit=2)[:2])
VOCAB_FP = Path(__file__).parent.parent / 'label_cn.txt'
MODEL_CONFIGS: Dict[str, Dict[str, Any]] = {
'db_resnet50': {
'backbone': resnet50,
'backbone_submodule': None,
'fpn_layers': ['layer1', 'layer2', 'layer3', 'layer4'],
'fpn_channels': [256, 512, 1024, 2048],
'input_shape': (3, 768, 768), # resize后输入模型的图片大小, 即 `resized_shape`
'url': None,
},
'db_resnet34': {
'backbone': resnet34,
'backbone_submodule': None,
'fpn_layers': ['layer1', 'layer2', 'layer3', 'layer4'],
'fpn_channels': [64, 128, 256, 512],
'input_shape': (3, 768, 768),
'url': None,
},
'db_resnet18': {
'backbone': resnet18,
'backbone_submodule': None,
'fpn_layers': ['layer1', 'layer2', 'layer3', 'layer4'],
'fpn_channels': [64, 128, 256, 512],
'input_shape': (3, 768, 768),
'url': None,
},
'db_mobilenet_v3': {
'backbone': mobilenet_v3_large,
'backbone_submodule': 'features',
'fpn_layers': ['3', '6', '12', '16'],
'fpn_channels': [24, 40, 112, 960],
'input_shape': (3, 768, 768),
'url': None,
},
'db_mobilenet_v3_small': {
'backbone': mobilenet_v3_small,
'backbone_submodule': 'features',
'fpn_layers': ['1', '3', '8', '12'],
'fpn_channels': [16, 24, 48, 576],
'input_shape': (3, 768, 768),
'url': None,
},
'db_shufflenet_v2': {
'backbone': shufflenet_v2_x2_0,
'backbone_submodule': None,
'fpn_layers': ['maxpool', 'stage2', 'stage3', 'stage4'],
'fpn_channels': [24, 244, 488, 976],
'input_shape': (3, 768, 768),
'url': None,
},
'db_shufflenet_v2_small': {
'backbone': shufflenet_v2_x1_5,
'backbone_submodule': None,
'fpn_layers': ['maxpool', 'stage2', 'stage3', 'stage4'],
'fpn_channels': [24, 176, 352, 704],
'input_shape': (3, 768, 768),
'url': None,
},
'db_shufflenet_v2_tiny': {
'backbone': shufflenet_v2_x1_0,
'backbone_submodule': None,
'fpn_layers': ['maxpool', 'stage2', 'stage3', 'stage4'],
'fpn_channels': [24, 116, 232, 464],
'input_shape': (3, 768, 768),
'url': None,
},
}
root_url = (
'https://beiye-model.oss-cn-beijing.aliyuncs.com/models/cnstd/%s/' % MODEL_VERSION
)
# name: (epochs, url)
# 免费模型
FREE_MODELS = OrderedDict(
{
'db_resnet34': {
'model_epoch': 41,
'fpn_type': 'pan',
'url': root_url + 'db_resnet34-pan.zip',
},
'db_resnet18': {
'model_epoch': 34,
'fpn_type': 'pan',
'url': root_url + 'db_resnet18-pan.zip',
},
'db_mobilenet_v3': {
'model_epoch': 47,
'fpn_type': 'pan',
'url': root_url + 'db_mobilenet_v3-pan.zip',
},
'db_mobilenet_v3_small': {
'model_epoch': 37,
'fpn_type': 'pan',
'url': root_url + 'db_mobilenet_v3_small-pan.zip',
},
'db_shufflenet_v2': {
'model_epoch': 41,
'fpn_type': 'pan',
'url': root_url + 'db_shufflenet_v2-pan.zip',
},
'db_shufflenet_v2_small': {
'model_epoch': 34,
'fpn_type': 'pan',
'url': root_url + 'db_shufflenet_v2_small-pan.zip',
},
}
)
# 付费模型
PAID_MODELS = OrderedDict(
{
'db_shufflenet_v2_tiny': {
'model_epoch': 48,
'fpn_type': 'pan',
'url': root_url + 'db_shufflenet_v2_tiny-pan.zip',
},
}
)
AVAILABLE_MODELS = deepcopy(FREE_MODELS)
AVAILABLE_MODELS.update(PAID_MODELS)
| 31.195122 | 86 | 0.581704 | # coding: utf-8
# Copyright (C) 2021, [Breezedeus](https://github.com/breezedeus).
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from pathlib import Path
from typing import Dict, Any
from copy import deepcopy
from collections import OrderedDict
from torchvision.models import (
resnet50,
resnet34,
resnet18,
mobilenet_v3_large,
mobilenet_v3_small,
shufflenet_v2_x1_0,
shufflenet_v2_x1_5,
shufflenet_v2_x2_0,
)
from .__version__ import __version__
# 模型版本只对应到第二层,第三层的改动表示模型兼容。
# 如: __version__ = '1.0.*',对应的 MODEL_VERSION 都是 '1.0'
MODEL_VERSION = '.'.join(__version__.split('.', maxsplit=2)[:2])
VOCAB_FP = Path(__file__).parent.parent / 'label_cn.txt'
MODEL_CONFIGS: Dict[str, Dict[str, Any]] = {
'db_resnet50': {
'backbone': resnet50,
'backbone_submodule': None,
'fpn_layers': ['layer1', 'layer2', 'layer3', 'layer4'],
'fpn_channels': [256, 512, 1024, 2048],
'input_shape': (3, 768, 768), # resize后输入模型的图片大小, 即 `resized_shape`
'url': None,
},
'db_resnet34': {
'backbone': resnet34,
'backbone_submodule': None,
'fpn_layers': ['layer1', 'layer2', 'layer3', 'layer4'],
'fpn_channels': [64, 128, 256, 512],
'input_shape': (3, 768, 768),
'url': None,
},
'db_resnet18': {
'backbone': resnet18,
'backbone_submodule': None,
'fpn_layers': ['layer1', 'layer2', 'layer3', 'layer4'],
'fpn_channels': [64, 128, 256, 512],
'input_shape': (3, 768, 768),
'url': None,
},
'db_mobilenet_v3': {
'backbone': mobilenet_v3_large,
'backbone_submodule': 'features',
'fpn_layers': ['3', '6', '12', '16'],
'fpn_channels': [24, 40, 112, 960],
'input_shape': (3, 768, 768),
'url': None,
},
'db_mobilenet_v3_small': {
'backbone': mobilenet_v3_small,
'backbone_submodule': 'features',
'fpn_layers': ['1', '3', '8', '12'],
'fpn_channels': [16, 24, 48, 576],
'input_shape': (3, 768, 768),
'url': None,
},
'db_shufflenet_v2': {
'backbone': shufflenet_v2_x2_0,
'backbone_submodule': None,
'fpn_layers': ['maxpool', 'stage2', 'stage3', 'stage4'],
'fpn_channels': [24, 244, 488, 976],
'input_shape': (3, 768, 768),
'url': None,
},
'db_shufflenet_v2_small': {
'backbone': shufflenet_v2_x1_5,
'backbone_submodule': None,
'fpn_layers': ['maxpool', 'stage2', 'stage3', 'stage4'],
'fpn_channels': [24, 176, 352, 704],
'input_shape': (3, 768, 768),
'url': None,
},
'db_shufflenet_v2_tiny': {
'backbone': shufflenet_v2_x1_0,
'backbone_submodule': None,
'fpn_layers': ['maxpool', 'stage2', 'stage3', 'stage4'],
'fpn_channels': [24, 116, 232, 464],
'input_shape': (3, 768, 768),
'url': None,
},
}
root_url = (
'https://beiye-model.oss-cn-beijing.aliyuncs.com/models/cnstd/%s/' % MODEL_VERSION
)
# name: (epochs, url)
# 免费模型
FREE_MODELS = OrderedDict(
{
'db_resnet34': {
'model_epoch': 41,
'fpn_type': 'pan',
'url': root_url + 'db_resnet34-pan.zip',
},
'db_resnet18': {
'model_epoch': 34,
'fpn_type': 'pan',
'url': root_url + 'db_resnet18-pan.zip',
},
'db_mobilenet_v3': {
'model_epoch': 47,
'fpn_type': 'pan',
'url': root_url + 'db_mobilenet_v3-pan.zip',
},
'db_mobilenet_v3_small': {
'model_epoch': 37,
'fpn_type': 'pan',
'url': root_url + 'db_mobilenet_v3_small-pan.zip',
},
'db_shufflenet_v2': {
'model_epoch': 41,
'fpn_type': 'pan',
'url': root_url + 'db_shufflenet_v2-pan.zip',
},
'db_shufflenet_v2_small': {
'model_epoch': 34,
'fpn_type': 'pan',
'url': root_url + 'db_shufflenet_v2_small-pan.zip',
},
}
)
# 付费模型
PAID_MODELS = OrderedDict(
{
'db_shufflenet_v2_tiny': {
'model_epoch': 48,
'fpn_type': 'pan',
'url': root_url + 'db_shufflenet_v2_tiny-pan.zip',
},
}
)
AVAILABLE_MODELS = deepcopy(FREE_MODELS)
AVAILABLE_MODELS.update(PAID_MODELS)
| 0 | 0 | 0 |
f46c961efad3e34687cc99723c14dc0969fb0567 | 3,055 | py | Python | Read and play bag files from realsense camera/read_bag.py | afiretony/UAV_utilities | 517b3eb9bb985dbf9ad334e96dbf8f1b4713bb14 | [
"MIT"
] | null | null | null | Read and play bag files from realsense camera/read_bag.py | afiretony/UAV_utilities | 517b3eb9bb985dbf9ad334e96dbf8f1b4713bb14 | [
"MIT"
] | null | null | null | Read and play bag files from realsense camera/read_bag.py | afiretony/UAV_utilities | 517b3eb9bb985dbf9ad334e96dbf8f1b4713bb14 | [
"MIT"
] | null | null | null | #####################################################
## Read bag from file ##
#####################################################
# First import library
import pyrealsense2 as rs
# Import Numpy for easy array manipulation
import numpy as np
import sys
np.set_printoptions(threshold=sys.maxsize)
# Import OpenCV for easy image rendering
import cv2
# Import argparse for command-line options
import argparse
# Import os.path for file path manipulation
import os.path
# Create object for parsing command-line options
parser = argparse.ArgumentParser(description="Read recorded bag file and display depth stream in jet colormap.\
Remember to change the stream fps and format to match the recorded.")
# Add argument which takes path to a bag file as an input
parser.add_argument("-i", "--input", type=str, help="Path to the bag file")
# Parse the command line arguments to an object
args = parser.parse_args()
# Safety if no parameter have been given
if not args.input:
print("No input paramater have been given.")
print("For help type --help")
exit()
# Check if the given file have bag extension
if os.path.splitext(args.input)[1] != ".bag":
print("The given file is not of correct file format.")
print("Only .bag files are accepted")
exit()
try:
# Create pipeline
pipeline = rs.pipeline()
# Create a config object
config = rs.config()
# Tell config that we will use a recorded device from file to be used by the pipeline through playback.
rs.config.enable_device_from_file(config, args.input)
# Configure the pipeline to stream the depth stream
# Change this parameters according to the recorded bag file resolution
config.enable_stream(rs.stream.depth, rs.format.z16, 30)
# Start streaming from file
pipeline.start(config)
# Create opencv window to render image in
cv2.namedWindow("Depth Stream", cv2.WINDOW_AUTOSIZE)
# Create colorizer object
colorizer = rs.colorizer()
# Streaming loop
count = 0
while True:
# Get frameset of depth
frames = pipeline.wait_for_frames()
# Get depth frame
depth_frame = frames.get_depth_frame()
# Colorize depth frame to jet colormap
depth_color_frame = colorizer.colorize(depth_frame)
# Convert depth_frame to numpy array to render image in opencv
depth_color_image = np.asanyarray(depth_color_frame.get_data())
npdepth_frame = np.asanyarray(depth_frame.get_data())
print(npdepth_frame.shape)
# Render image in opencv window
cv2.imshow("Depth Stream", depth_color_image)
key = cv2.waitKey(1)
# if pressed escape exit program
if key == 27:
# cv2.destroyAllWindows()
break
if count == 210:
print(npdepth_frame)
cv2.waitKey(0)
break
count += 1
finally:
pass | 32.849462 | 112 | 0.632733 | #####################################################
## Read bag from file ##
#####################################################
# First import library
import pyrealsense2 as rs
# Import Numpy for easy array manipulation
import numpy as np
import sys
np.set_printoptions(threshold=sys.maxsize)
# Import OpenCV for easy image rendering
import cv2
# Import argparse for command-line options
import argparse
# Import os.path for file path manipulation
import os.path
# Create object for parsing command-line options
parser = argparse.ArgumentParser(description="Read recorded bag file and display depth stream in jet colormap.\
Remember to change the stream fps and format to match the recorded.")
# Add argument which takes path to a bag file as an input
parser.add_argument("-i", "--input", type=str, help="Path to the bag file")
# Parse the command line arguments to an object
args = parser.parse_args()
# Safety if no parameter have been given
if not args.input:
print("No input paramater have been given.")
print("For help type --help")
exit()
# Check if the given file have bag extension
if os.path.splitext(args.input)[1] != ".bag":
print("The given file is not of correct file format.")
print("Only .bag files are accepted")
exit()
try:
# Create pipeline
pipeline = rs.pipeline()
# Create a config object
config = rs.config()
# Tell config that we will use a recorded device from file to be used by the pipeline through playback.
rs.config.enable_device_from_file(config, args.input)
# Configure the pipeline to stream the depth stream
# Change this parameters according to the recorded bag file resolution
config.enable_stream(rs.stream.depth, rs.format.z16, 30)
# Start streaming from file
pipeline.start(config)
# Create opencv window to render image in
cv2.namedWindow("Depth Stream", cv2.WINDOW_AUTOSIZE)
# Create colorizer object
colorizer = rs.colorizer()
# Streaming loop
count = 0
while True:
# Get frameset of depth
frames = pipeline.wait_for_frames()
# Get depth frame
depth_frame = frames.get_depth_frame()
# Colorize depth frame to jet colormap
depth_color_frame = colorizer.colorize(depth_frame)
# Convert depth_frame to numpy array to render image in opencv
depth_color_image = np.asanyarray(depth_color_frame.get_data())
npdepth_frame = np.asanyarray(depth_frame.get_data())
print(npdepth_frame.shape)
# Render image in opencv window
cv2.imshow("Depth Stream", depth_color_image)
key = cv2.waitKey(1)
# if pressed escape exit program
if key == 27:
# cv2.destroyAllWindows()
break
if count == 210:
print(npdepth_frame)
cv2.waitKey(0)
break
count += 1
finally:
pass | 0 | 0 | 0 |
5d7e531e587fcf22ca5052858a16906e530ac1ec | 186 | py | Python | tests/unit/test_mixin.py | matthewgdv/miscutils | f605ded914e355214533b06e7a768272409769c0 | [
"MIT"
] | null | null | null | tests/unit/test_mixin.py | matthewgdv/miscutils | f605ded914e355214533b06e7a768272409769c0 | [
"MIT"
] | null | null | null | tests/unit/test_mixin.py | matthewgdv/miscutils | f605ded914e355214533b06e7a768272409769c0 | [
"MIT"
] | null | null | null | # import pytest
| 13.285714 | 38 | 0.645161 | # import pytest
class TestReprMixin:
pass
class TestCopyMixin:
def test_copy(self): # synced
assert True
def test_deepcopy(self): # synced
assert True
| 62 | 7 | 99 |