hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
652e84bd8ad0187eda337ba399fb9e1a9cdb03eb | 377 | py | Python | setup.py | PaulDodd/pynomial | 8da81bf37ee96b8e3c49b40cdfcae075ba667632 | [
"Apache-2.0"
] | null | null | null | setup.py | PaulDodd/pynomial | 8da81bf37ee96b8e3c49b40cdfcae075ba667632 | [
"Apache-2.0"
] | null | null | null | setup.py | PaulDodd/pynomial | 8da81bf37ee96b8e3c49b40cdfcae075ba667632 | [
"Apache-2.0"
] | null | null | null | import sys
from setuptools import setup, find_packages
setup(
name='pynomial',
version='0.0.0',
packages=find_packages(),
author='Paul M Dodd',
author_email='pdodd@umich.edu',
description="python package for combinatorial problems",
url="https://github.com/PaulDodd/pynomial.git",
install_requires=[], # install_requires or something else?
)
| 26.928571 | 64 | 0.70557 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 171 | 0.453581 |
652f515638fabbdc29fa03743668cd7f3bcdf458 | 830 | py | Python | src/seedbox/migrations/versions/35099fc974d2_.py | nailgun/seedbox | d124f71017dbbe5af81592e76933809b5cdddb08 | [
"Apache-2.0"
] | 29 | 2017-03-22T23:12:56.000Z | 2021-05-29T23:40:50.000Z | src/seedbox/migrations/versions/35099fc974d2_.py | macduff23/seedbox | d124f71017dbbe5af81592e76933809b5cdddb08 | [
"Apache-2.0"
] | 27 | 2017-04-10T14:00:11.000Z | 2017-12-01T06:56:15.000Z | src/seedbox/migrations/versions/35099fc974d2_.py | macduff23/seedbox | d124f71017dbbe5af81592e76933809b5cdddb08 | [
"Apache-2.0"
] | 6 | 2017-04-10T09:17:32.000Z | 2020-01-25T02:08:21.000Z | """empty message
Revision ID: 35099fc974d2
Revises: ae00e7974dca
Create Date: 2017-05-19 17:01:48.878196
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '35099fc974d2'
down_revision = 'ae00e7974dca'
branch_labels = None
depends_on = None
def upgrade():
op.drop_column('node', 'wipe_root_disk_next_boot')
op.drop_column('node', 'root_partition_size_sectors')
op.drop_column('node', 'root_disk')
def downgrade():
op.add_column('node', sa.Column('root_disk', sa.VARCHAR(length=80), autoincrement=False, nullable=False))
op.add_column('node', sa.Column('root_partition_size_sectors', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column('node', sa.Column('wipe_root_disk_next_boot', sa.BOOLEAN(), autoincrement=False, nullable=False))
| 28.62069 | 117 | 0.749398 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 346 | 0.416867 |
6531c01cdf271608c3cf89baaec7f83d527a1013 | 658 | py | Python | searchNMin2.py | plguhur/random-sets | 8cd0d18816628909f304d839b1a7eed208dfb49f | [
"Apache-2.0"
] | null | null | null | searchNMin2.py | plguhur/random-sets | 8cd0d18816628909f304d839b1a7eed208dfb49f | [
"Apache-2.0"
] | null | null | null | searchNMin2.py | plguhur/random-sets | 8cd0d18816628909f304d839b1a7eed208dfb49f | [
"Apache-2.0"
] | null | null | null | from randomSets import *
import numpy as np
winners = [1,5,10]
candidates = range(100, 1100, 500)
Nmin = np.zeros((len(winners),len(candidates)))
for i in range(len(winners)):
for j in range(len(candidates)):
print("Nwinner: %i, Ncandidates: %i" % (winners[i], candidates[j]))
#alpha = findMinAlpha(Ncandidates, Nvoters, Ntests = 100, Nsubset = 5, q = 1, alphaMin = 1, epsilon1=0.1, epsilon2=0.1)
Nmin[i,j] = findMinNvoters(candidates[j], Nwinner = winners[i], Nsubset = 5, Ngrades = 5, q = 5000, alpha = 2, Ntests=500)
print("Nmin: %i\n" % Nmin[i,j])
np.savetxt('nmin-winners-1-5-10-candidates-100-to-1000.txt', Nmin)
| 41.125 | 130 | 0.647416 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 209 | 0.317629 |
6532631e7c9cbad3268bcb207c447017749491f6 | 894 | py | Python | drs4Calibration/drs4Calibration_version_0/config.py | fact-project/DrsTemperatureCalibration | 3702ee390c16cf2c5930d4a0f24c1354d036d645 | [
"MIT"
] | null | null | null | drs4Calibration/drs4Calibration_version_0/config.py | fact-project/DrsTemperatureCalibration | 3702ee390c16cf2c5930d4a0f24c1354d036d645 | [
"MIT"
] | null | null | null | drs4Calibration/drs4Calibration_version_0/config.py | fact-project/DrsTemperatureCalibration | 3702ee390c16cf2c5930d4a0f24c1354d036d645 | [
"MIT"
] | null | null | null | from drs4Calibration.drs4Calibration_version_0.constants import NRCELL, ROI
# Dont shuffle drsValueTypes
# The order have to be the same as the of the 'RUNIDs'
# in the drsFiles
drsValueTypes = ['Baseline',
'Gain',
'TriggerOffset']
renamedDrsValueTypes = ['Baseline',
'Gain',
'ROIOffset'] # renamed TriggerOffset
nrCellsPerChid = {'Baseline': NRCELL,
'Gain': NRCELL,
'ROIOffset': ROI}
cutOffErrorFactor = {'Baseline': 2,
'Gain': 2,
'ROIOffset': 2}
hardwareBoundaries = ['2014-05-20 12',
'2015-05-26 12']
# hardwareBoundaries
#
# 20.05.2014 Camera repair, Replacement of Capacitors
# 26.5.2015 Replacement FAD board (crate 2, board 0)
#
# See also 'https://trac.fact-project.org/wiki/Protected/SystemChanges'
| 31.928571 | 75 | 0.589485 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 462 | 0.516779 |
653275e0a6bf64ffd7bef701a34270c76bd37631 | 1,854 | py | Python | clisk/player/player.py | etano/clisk | 70067eb98568a97d896ee486bae56b49c77bcb11 | [
"MIT"
] | null | null | null | clisk/player/player.py | etano/clisk | 70067eb98568a97d896ee486bae56b49c77bcb11 | [
"MIT"
] | null | null | null | clisk/player/player.py | etano/clisk | 70067eb98568a97d896ee486bae56b49c77bcb11 | [
"MIT"
] | null | null | null | class Player(object):
"""Player class
Attributes:
name (str): Player name
"""
def __init__(self, name):
"""Initialize player
Args:
name (str): Player name
"""
self.name = name
def place_troops(self, board, n_troops):
"""Place troops on territories
Args:
board (Gameboard): The gameboard
n_troops (int): Number of new troops to deploy
Returns:
(dict(str, int)): Dictionary of territories with number of troops to be deployed
"""
raise NotImplementedError('place_troops not implemented')
def do_attack(self, board):
"""Decide whether or not to continue attacking
Args:
board (Gameboard): The gameboard
Returns:
(bool): Whether or not to continue attacking
"""
raise NotImplementedError('do_attack not implemented')
def attack(self, board):
"""Attack phase
Args:
board (Gameboard): The gameboard
Returns:
(str, str): from_territory, to_territory
"""
raise NotImplementedError('attack not implemented')
def do_move_troops(self, board):
"""Decide whether or not to move troops
Args:
board (Gameboard): The gameboard
Returns:
(bool): Whether or not to move troops
"""
raise NotImplementedError('do_move_troops not implemented')
def move_troops(self, board):
"""Troop movement phase
Args:
board (Gameboard): The gameboard
Returns:
(str, str, int): from_territory, to_territory, n_troops
"""
raise NotImplementedError('move_troops not implemented')
| 26.112676 | 95 | 0.553937 | 1,853 | 0.999461 | 0 | 0 | 0 | 0 | 0 | 0 | 1,355 | 0.730852 |
6532a25e13d94b833b61abb8bf5f04bc39b60efe | 91 | py | Python | bai03.py | trietto/python | b89b990fc509228445157465f6af3371c4b4d159 | [
"Apache-2.0"
] | null | null | null | bai03.py | trietto/python | b89b990fc509228445157465f6af3371c4b4d159 | [
"Apache-2.0"
] | null | null | null | bai03.py | trietto/python | b89b990fc509228445157465f6af3371c4b4d159 | [
"Apache-2.0"
] | null | null | null | n=int(input("Nhap vao mot so:"))
d=dict()
for i in range(1, n+1):
d[i]=i*i
print(d) | 18.2 | 32 | 0.549451 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 18 | 0.197802 |
653326363f171f61656c55ab1ac0a5e07a6afbd8 | 18,085 | pyw | Python | quizme.pyw | dmahugh/quizme | edd5340db4524855c7e0dea0340339dafb10a78a | [
"MIT"
] | null | null | null | quizme.pyw | dmahugh/quizme | edd5340db4524855c7e0dea0340339dafb10a78a | [
"MIT"
] | null | null | null | quizme.pyw | dmahugh/quizme | edd5340db4524855c7e0dea0340339dafb10a78a | [
"MIT"
] | null | null | null | """GUI for taking tests based on quizme-xxx.json files.
"""
import os
import sys
import json
from random import randint
import tkinter as tk
from tkinter import font
from tkinter import ttk
from tkinter import filedialog
from tkinter import messagebox
from widgetrefs import widgets
def center_window(window):
"""Position a window in the center of the screen.
1st parameter = window
"""
window.update_idletasks()
width = window.winfo_screenwidth()
height = window.winfo_screenheight()
size = tuple(int(_) for _ in window.geometry().split('+')[0].split('x'))
offsetx = width/2 - size[0]/2
offsety = height/2 - size[1]/2
window.geometry("%dx%d+%d+%d" % (size + (offsetx, offsety)))
def display_help():
"""Display the help screen.
"""
helpmsg = (
'QuizMe is an interactive tool for testing your ability to answer\n'
'a set of multi-choice questions.\n\n'
'Use the underlined hotkeys 1-5 to select your answer, then\n'
'press C to check your answer. You can also press Enter to\n'
'check your answer.\n\n'
'To select a different question, press P, N, or R for Previous,\n'
'Next or Random. You can also use the left/right arrow keys to\n'
'move through the questions in order.\n\n'
'Have fun!')
messagebox.showinfo('Help', helpmsg)
def display_score():
"""Display the current score (number and percent correct).
"""
totq = len(widgets.questions) # total questions in current topic
totans = widgets.totAnswered # number of questions answered so far
correct = widgets.totCorrect # number of correct answers so far
perc = 0 if totans == 0 else 100*(correct/totans)
title = widgets.topic + ' - {0} total questions'.format(totq)
msg = "You have {0} out of {1} correct for {2:.0f}%."
messagebox.showinfo(title, msg.format(correct, totans, perc))
def display_question():
"""Refresh display for current question.
Note: question-related state info is stored in widgets. properties.
"""
q_num = widgets.currentq # current question#
u_answered = (q_num in widgets.answered) # whether answered
u_answer = widgets.answered.get(q_num, '') # user's answer (if any)
question = widgets.questions[q_num] # current question dict()
q_corrnum = question['correct'] # the correct answer ('1' through '5')
q_corrtext = question['answers'].get(q_corrnum, '') # text of answer
u_correct = (u_answer == q_corrnum) # whether user's answer is correct
widgets.lblHeader.configure(text='Topic:\n'+widgets.topic)
widgets.txtQuestion.config(state="normal")
widgets.txtQuestion.delete(1.0, tk.END)
widgets.txtQuestion.insert(tk.END, question['question'])
widgets.txtQuestion.focus_set() # set focus to the question
widgets.txtQuestion.config(state="disabled")
currentstate = 'disabled' if u_answered else 'normal'
display_radiobuttons(rbstate=currentstate, rbselected=u_answer)
# "correct answer" textbox
widgets.txtCorrect.config(state="normal")
widgets.txtCorrect.delete(1.0, tk.END)
widgets.txtCorrect.config(bg="white")
if u_answered:
if u_correct:
msg = '#' + u_answer + ' is CORRECT - ' + q_corrtext
else:
msg = '#' + u_answer + \
' is INCORRECT - correct answer is #' + \
q_corrnum + ': ' + q_corrtext
widgets.txtCorrect.insert(tk.END, msg)
bgcolor = "#B1ECB1" if u_correct else "#FFC6C5"
else:
bgcolor = 'white' # white background if question not answered yet
widgets.txtCorrect.config(bg=bgcolor)
widgets.txtCorrect.config(state="disabled")
widgets.txtExplanation.config(state="normal")
widgets.txtExplanation.delete(1.0, tk.END)
if u_answered:
widgets.txtExplanation.insert(tk.END, question.get('explanation', ''))
widgets.txtExplanation.config(state="disabled")
image = question.get('image', '')
answerimage = question.get('answerimage', '')
displayedimage = answerimage if (u_answered and answerimage) else image
if displayedimage:
displayedimage = 'images/' + displayedimage
# PhotoImage() needs a reference to avoid garbage collection
widgets.image = tk.PhotoImage(file=displayedimage)
widgets.lblImage.configure(image=widgets.image)
else:
widgets.image = None
widgets.lblImage['image'] = None
def display_radiobuttons(rbstate='normal', rbselected=''):
"""Set radiobuttons to the answer options for the current question.
state = the state to set the radiobuttons to; 'normal' or 'disabled'
selected = the radiobutton to select (e.g., '1', or '' for none)
"""
question = widgets.questions[widgets.currentq]
# radiobuttons (answers)
text1 = question['answers'].get('1', '')
text2 = question['answers'].get('2', '')
text3 = question['answers'].get('3', '')
text4 = question['answers'].get('4', '')
text5 = question['answers'].get('5', '')
# note that we hide unused radiobuttons by lowering them in
# the Z-order so that they're hidden behind widgets.rbframe
if text1:
widgets.answer1.configure(text='1: '+text1, state=rbstate)
widgets.answer1.lift(widgets.rbframe)
else:
widgets.answer1.configure(text='', state='disabled')
widgets.answer1.lower(widgets.rbframe)
if text2:
widgets.answer2.configure(text='2: '+text2, state=rbstate)
widgets.answer2.lift(widgets.rbframe)
else:
widgets.answer2.configure(text='', state='disabled')
widgets.answer2.lower(widgets.rbframe)
if text3:
widgets.answer3.configure(text='3: '+text3, state=rbstate)
widgets.answer3.lift(widgets.rbframe)
else:
widgets.answer3.configure(text='', state='disabled')
widgets.answer3.lower(widgets.rbframe)
if text4:
widgets.answer4.configure(text='4: '+text4, state=rbstate)
widgets.answer4.lift(widgets.rbframe)
else:
widgets.answer4.configure(text='', state='disabled')
widgets.answer4.lower(widgets.rbframe)
if text5:
widgets.answer5.configure(text='5: '+text5, state=rbstate)
widgets.answer5.lift(widgets.rbframe)
else:
widgets.answer5.configure(text='', state='disabled')
widgets.answer5.lower(widgets.rbframe)
# select the user's answer (or clear selection if rbselected=='')
widgets.answerSelection.set(rbselected)
def initialize_score():
"""Initialize/reset the total answered and total correct.
"""
widgets.totAnswered = 0
widgets.totCorrect = 0
widgets.answered = dict() # key=question#, value = user's answer
def keystroke_bindings():
"""Assign keyboard shortcuts.
"""
root.bind('1', lambda event: widgets.answerSelection.set('1'))
root.bind('2', lambda event: widgets.answerSelection.set('2'))
root.bind('3', lambda event: widgets.answerSelection.set('3'))
root.bind('4', lambda event: widgets.answerSelection.set('4'))
root.bind('5', lambda event: widgets.answerSelection.set('5'))
root.bind('c', lambda event: save_answer())
root.bind('C', lambda event: save_answer())
root.bind('<Return>', lambda event: save_answer())
root.bind('<Left>', lambda event: move_previous())
root.bind('p', lambda event: move_previous())
root.bind('P', lambda event: move_previous())
root.bind('<Right>', lambda event: move_next())
root.bind('n', lambda event: move_next())
root.bind('N', lambda event: move_next())
root.bind('r', lambda event: move_random())
root.bind('R', lambda event: move_random())
root.bind('s', lambda event: display_score())
root.bind('S', lambda event: display_score())
root.bind('t', lambda event: select_topic(gui=True))
root.bind('T', lambda event: select_topic(gui=True))
root.bind('h', lambda event: display_help())
root.bind('H', lambda event: display_help())
root.bind('<F1>', lambda event: display_help())
root.bind("<Key-Escape>", lambda event: root.quit()) # Esc=quit
def move_next():
"""Move to next question.
"""
qnum_int = int(widgets.currentq) # convert question# to integer
if qnum_int < widgets.totalquestions:
widgets.currentq = str(qnum_int + 1)
display_question()
def move_previous():
"""Move to previous question.
"""
qnum_int = int(widgets.currentq) # convert question# to integer
if qnum_int > 1:
widgets.currentq = str(qnum_int - 1)
display_question()
def move_random():
"""Move to a random question.
"""
# handle the case where all questions have been answered
if len(widgets.answered) == widgets.totalquestions:
topic_completed()
return
# unanswered[] = list of remaining unanswered question numbers
unanswered = []
for qnum in range(1, widgets.totalquestions+1):
if str(qnum) not in widgets.answered:
unanswered.append(str(qnum))
# now we select a random question# from unanswered[]
random_index = randint(0, len(unanswered)-1)
widgets.currentq = str(unanswered[random_index])
display_question()
def pythonw_setup():
"""Handle default folder location if running under pythonw.exe.
The pythonw.exe launcher starts from the Windows System32 folder
as the default location, which isn't typically what's desired.
This function checks whether we're running under pythonw.exe, and
if so sets the default folder to the location of this program.
"""
fullname = sys.executable
nameonly = os.path.split(fullname)[1].split('.')[0].lower()
if nameonly == 'pythonw':
progfolder = os.path.dirname(os.path.realpath(sys.argv[0]))
os.chdir(progfolder)
def read_datafile():
"""Read widgets.dataFile and store questions in widgets.questions.
NOTE: the data file must include questions numbered 1-N with no gaps.
"""
with open(widgets.dataFile, 'r') as jsonfile:
widgets.questions = json.load(jsonfile)
widgets.currentq = '1' # current question#
widgets.totalquestions = len(widgets.questions)
def save_answer():
"""Save the current answer and refresh displayed question.
"""
q_num = widgets.currentq # current question#
question = widgets.questions[q_num] # current question dict()
if q_num in widgets.answered:
return # this question has already been answered
answer = widgets.answerSelection.get()
if not answer or answer not in '12345':
return # an answer has not been selected
# update totals
widgets.totAnswered += 1
if answer == question['correct']:
widgets.totCorrect += 1
widgets.answered[q_num] = answer
# refresh the display based on current status
display_question()
# if all questions have been answered, display score
if len(widgets.answered) == widgets.totalquestions:
topic_completed()
def select_topic(gui=True):
"""Select a topic (.json file).
If gui=True, the quizme app window exists and will be updated if
a topic is selected.
Returns the selected filename (or '' if none selected), and the
global widgets object's properties are updated.
"""
if not gui:
tempwindow = tk.Tk() # create the top-level window
tempwindow.withdraw() # hide the top-level window behind this dialog
newtopic = filedialog.askopenfilename(title='select QuizMe file',
initialdir='data',
filetypes=[('JSON files', '.json')])
if not gui:
tempwindow.destroy() # destroy the temporary top-level window
if newtopic:
# a file was selected
widgets.dataFile = newtopic
# by convention topic name is the part of the filename after '-'
# e.g., filename = quizme-TopicName.json
nameonly = os.path.basename(newtopic)
nameonly = os.path.splitext(nameonly)[0]
widgets.topic = nameonly
widgets.currentq = '1' # start with first topic
widgets.answered = {} # reset answered questions
if gui:
read_datafile()
display_question()
return newtopic
def topic_completed():
"""Topic has been completed, so show score and ask whether to re-start.
"""
messagebox.showwarning(
'Topic Completed',
'You have already answered all of the questions in this topic!')
display_score()
questiontext = 'Do you want to start over with this topic?'
if messagebox.askyesno('Topic Completed', questiontext):
initialize_score()
widgets.currentq = '1'
display_question()
class MainApplication(ttk.Frame):
"""Root application class.
"""
def __init__(self, parent, *args, **kwargs):
ttk.Frame.__init__(self, parent, *args, **kwargs)
self.grid(sticky="nsew")
self.parent = parent
self.parent.title('QuizMe')
self.parent.iconbitmap('quizme.ico')
initialize_score()
self.widgets_create()
display_question() # display first question in selected topic
# customize styles
style = ttk.Style()
style.configure("TButton", font=('Verdana', 12))
style.configure("TRadiobutton", font=('Verdana', 12))
keystroke_bindings()
def widgets_create(self):
"""Create all widgets in the main application window.
"""
# configure resizing behavior
top = self.winfo_toplevel()
top.rowconfigure(1, weight=1)
top.columnconfigure(0, weight=1)
self.rowconfigure(1, weight=1)
self.columnconfigure(0, weight=1)
# create the widgets
self.frm_question = FrameQuestion(self)
self.frm_controls = FrameControls(self)
self.frm_question.grid(row=1, column=0, sticky="w", padx=5, pady=5)
self.frm_controls.grid(row=1, column=1, sticky="w", padx=5, pady=5)
self.parent.columnconfigure(0, weight=1)
widgets.lblImage = tk.Label(self)
widgets.lblImage.place(x=511, y=50, height=300, width=300)
class FrameControls(ttk.Frame):
"""Frame for the controls (buttons).
"""
def __init__(self, parent, *args, **kwargs):
ttk.Frame.__init__(self, parent, *args, **kwargs)
self.parent = parent
widgets.lblHeader = tk.Label(self, text='Topic:\n???',
font=font.Font(family="Verdana", size=12),
bg="#6FD2F4", height=4, width=12)
widgets.lblHeader.pack(fill=tk.Y, padx=10, pady=10, expand=True)
btnpadding = dict(padx=10, pady=5)
ttk.Button(self, underline=0, text="Check Answer",
command=save_answer).pack(**btnpadding)
ttk.Button(self, underline=0, text="Next",
command=move_next).pack(**btnpadding)
ttk.Button(self, underline=0, text="Previous",
command=move_previous).pack(**btnpadding)
ttk.Button(self, underline=0, text="Random",
command=move_random).pack(**btnpadding)
ttk.Button(self, underline=0, text="Score",
command=display_score).pack(**btnpadding)
ttk.Button(self, underline=0, text="Topic",
command=lambda: select_topic(gui=True)).pack(**btnpadding)
ttk.Button(self, underline=0, text="Help",
command=display_help).pack(**btnpadding)
class FrameQuestion(ttk.Frame):
"""Frame for the question, answers, and explanation.
"""
def __init__(self, parent, *args, **kwargs):
ttk.Frame.__init__(self, parent, *args, **kwargs)
self.parent = parent
widgets.txtQuestion = tk.Text(self, height=2, border=0,
font=font.Font(family="Verdana", size=12))
widgets.txtQuestion.pack(anchor=tk.W, padx=5, pady=5, expand=tk.Y)
widgets.txtQuestion.config(state="disabled")
widgets.answerSelection = tk.StringVar()
# create a frame to be used for hiding/showing radiobuttons
widgets.rbframe = tk.Frame(self)
widgets.rbframe.pack(side="top", fill="both", expand=True)
rbops = dict(variable=widgets.answerSelection, underline=0)
packoptions = dict(in_=widgets.rbframe, anchor=tk.W, padx=15, pady=17)
widgets.answer1 = ttk.Radiobutton(self, value='1', text="1:", **rbops)
widgets.answer1.pack(**packoptions)
widgets.answer2 = ttk.Radiobutton(self, value='2', text="2:", **rbops)
widgets.answer2.pack(**packoptions)
widgets.answer3 = ttk.Radiobutton(self, value='3', text="3:", **rbops)
widgets.answer3.pack(**packoptions)
widgets.answer4 = ttk.Radiobutton(self, value='4', text="4:", **rbops)
widgets.answer4.pack(**packoptions)
widgets.answer5 = ttk.Radiobutton(self, value='5', text="5:", **rbops)
widgets.answer5.pack(**packoptions)
widgets.txtCorrect = tk.Text(self, border=0, height=2,
font=('Verdana', 12))
widgets.txtCorrect.pack(anchor=tk.W, padx=5, pady=8, expand=tk.Y)
widgets.txtCorrect.config(state="disabled")
widgets.txtExplanation = tk.Text(
self, height=2, border=0, font=font.Font(family="Verdana", size=12))
widgets.txtExplanation.pack(anchor=tk.W, padx=5, pady=5, expand=tk.Y)
# if running standalone, launch the app
if __name__ == "__main__":
pythonw_setup()
filename = select_topic(gui=False) # pylint: disable=C0103
if not filename:
sys.exit(0)
read_datafile() # read in the selected data file
root = tk.Tk() # pylint: disable=C0103
MainApplication(root)
root.minsize(width=900, height=400)
root.resizable(width=False, height=False) # app window not resizable
root.attributes("-topmost", True) # force app window to top
root.attributes("-topmost", False)
root.focus_force() # give app window focus
center_window(root)
root.mainloop()
| 38.397028 | 80 | 0.648825 | 4,728 | 0.261432 | 0 | 0 | 0 | 0 | 0 | 0 | 5,304 | 0.293282 |
6533cb2d911f06e68a721247deb37def17dac93b | 5,448 | py | Python | kedro/extras/datasets/pandas/appendable_excel_dataset.py | hfwittmann/kedro | b0d4fcd8f19b49a7916d78fd09daeb6209a7b6c6 | [
"Apache-2.0"
] | 1 | 2021-11-25T12:33:13.000Z | 2021-11-25T12:33:13.000Z | kedro/extras/datasets/pandas/appendable_excel_dataset.py | MerelTheisenQB/kedro | 1eaa2e0fa5d80f96e18ea60b9f3d6e6efc161827 | [
"Apache-2.0"
] | null | null | null | kedro/extras/datasets/pandas/appendable_excel_dataset.py | MerelTheisenQB/kedro | 1eaa2e0fa5d80f96e18ea60b9f3d6e6efc161827 | [
"Apache-2.0"
] | null | null | null | """``AppendableExcelDataSet`` loads/saves data from/to a local Excel file opened in append mode.
It uses pandas to handle the Excel file.
"""
from copy import deepcopy
from pathlib import Path, PurePosixPath
from typing import Any, Dict
import pandas as pd
from kedro.io.core import AbstractDataSet, DataSetError
class AppendableExcelDataSet(AbstractDataSet):
"""``AppendableExcelDataSet`` loads/saves data from/to a local Excel file opened in
append mode. It uses pandas to handle the Excel file.
Example adding a catalog entry with
`YAML API <https://kedro.readthedocs.io/en/stable/05_data/\
01_data_catalog.html#using-the-data-catalog-with-the-yaml-api>`_:
.. code-block:: yaml
>>> # AppendableExcelDataSet creates a new sheet for every dataset
>>> # ExcelDataSet restricts one dataset per file as it is overwritten
>>>
>>> preprocessed_companies:
>>> type: pandas.AppendableExcelDataSet
>>> filepath: data/02_intermediate/preprocessed.xlsx # assumes file already exists
>>> save_args:
>>> sheet_name: preprocessed_companies
>>> load_args:
>>> sheet_name: preprocessed_companies
>>>
>>> preprocessed_shuttles:
>>> type: pandas.AppendableExcelDataSet
>>> filepath: data/02_intermediate/preprocessed.xlsx
>>> save_args:
>>> sheet_name: preprocessed_shuttles
>>> load_args:
>>> sheet_name: preprocessed_shuttles
Example using Python API:
::
>>> from kedro.extras.datasets.pandas import AppendableExcelDataSet
>>> from kedro.extras.datasets.pandas import ExcelDataSet
>>> import pandas as pd
>>>
>>> data_1 = pd.DataFrame({'col1': [1, 2], 'col2': [4, 5],
>>> 'col3': [5, 6]})
>>>
>>> data_2 = pd.DataFrame({'col1': [7, 8], 'col2': [5, 7]})
>>>
>>> regular_ds = ExcelDataSet(filepath="/tmp/test.xlsx")
>>> appendable_ds = AppendableExcelDataSet(
>>> filepath="/tmp/test.xlsx",
>>> save_args={"sheet_name": "my_sheet"},
>>> load_args={"sheet_name": "my_sheet"}
>>> )
>>>
>>> regular_ds.save(data_1)
>>> appendable_ds.save(data_2)
>>> reloaded = appendable_ds.load()
>>> assert data_2.equals(reloaded)
"""
DEFAULT_LOAD_ARGS = {"engine": "openpyxl"}
DEFAULT_SAVE_ARGS = {"index": False}
def __init__(
self,
filepath: str,
load_args: Dict[str, Any] = None,
save_args: Dict[str, Any] = None,
) -> None:
"""Creates a new instance of ``AppendableExcelDataSet`` pointing to an existing local
Excel file to be opened in append mode.
Args:
filepath: Filepath in POSIX format to an existing local Excel file.
load_args: Pandas options for loading Excel files.
Here you can find all available arguments:
https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_excel.html
All defaults are preserved, but "engine", which is set to "openpyxl".
save_args: Pandas options for saving Excel files.
Here you can find all available arguments:
https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.to_excel.html
All defaults are preserved, but "index", which is set to False.
If you would like to specify options for the `ExcelWriter`,
you can include them under "writer" key. Here you can
find all available arguments:
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.ExcelWriter.html
Note: `mode` option of `ExcelWriter` is set to `a` and it can not be overridden.
"""
self._filepath = PurePosixPath(filepath)
# Handle default load and save arguments
self._load_args = deepcopy(self.DEFAULT_LOAD_ARGS)
if load_args is not None:
self._load_args.update(load_args)
save_args = deepcopy(save_args) or {}
self._save_args = deepcopy(self.DEFAULT_SAVE_ARGS)
self._writer_args = save_args.pop("writer", {}) # type: Dict[str, Any]
self._writer_args.setdefault("engine", "openpyxl")
if save_args is not None:
self._save_args.update(save_args)
# Use only append mode
self._writer_args["mode"] = "a"
def _describe(self) -> Dict[str, Any]:
return dict(
filepath=self._filepath,
load_args=self._load_args,
save_args=self._save_args,
writer_args=self._writer_args,
)
def _load(self) -> pd.DataFrame:
return pd.read_excel(str(self._filepath), **self._load_args)
def _save(self, data: pd.DataFrame) -> None:
# pylint: disable=abstract-class-instantiated
try:
with pd.ExcelWriter(str(self._filepath), **self._writer_args) as writer:
data.to_excel(writer, **self._save_args)
except FileNotFoundError as exc:
raise DataSetError(
f"`{self._filepath}` Excel file not found. The file cannot be opened in "
f"append mode."
) from exc
def _exists(self) -> bool:
return Path(self._filepath.as_posix()).is_file()
| 39.766423 | 101 | 0.612518 | 5,130 | 0.94163 | 0 | 0 | 0 | 0 | 0 | 0 | 3,703 | 0.679699 |
6534325409884c8b43e265c56070a9cc57567e0b | 42 | py | Python | examples/phobos/tests/test_std_system.py | kinke/autowrap | 2f042df3f292aa39b1da0b9607fbe3424f56ff4a | [
"BSD-3-Clause"
] | 47 | 2019-07-16T10:38:07.000Z | 2022-03-30T16:34:24.000Z | examples/phobos/tests/test_std_system.py | kinke/autowrap | 2f042df3f292aa39b1da0b9607fbe3424f56ff4a | [
"BSD-3-Clause"
] | 199 | 2019-06-17T23:24:40.000Z | 2021-06-16T16:41:36.000Z | examples/phobos/tests/test_std_system.py | kinke/autowrap | 2f042df3f292aa39b1da0b9607fbe3424f56ff4a | [
"BSD-3-Clause"
] | 7 | 2019-09-13T18:03:49.000Z | 2022-01-17T03:53:00.000Z | def test_import():
import std_system
| 10.5 | 21 | 0.714286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
6534588d4ecf52898349353cb160282e5abd6d54 | 3,360 | py | Python | graalpython/benchmarks/src/meso/euler31.py | muellren/graalpython | 9104425805f1d38ad7a521c75e53798a3b79b4f0 | [
"UPL-1.0",
"Apache-2.0",
"OpenSSL"
] | 1 | 2019-05-28T13:04:32.000Z | 2019-05-28T13:04:32.000Z | graalpython/benchmarks/src/meso/euler31.py | muellren/graalpython | 9104425805f1d38ad7a521c75e53798a3b79b4f0 | [
"UPL-1.0",
"Apache-2.0",
"OpenSSL"
] | null | null | null | graalpython/benchmarks/src/meso/euler31.py | muellren/graalpython | 9104425805f1d38ad7a521c75e53798a3b79b4f0 | [
"UPL-1.0",
"Apache-2.0",
"OpenSSL"
] | null | null | null | #!/usr/bin/env python
# Copyright 2008-2010 Isaac Gouy
# Copyright (c) 2013, 2014, Regents of the University of California
# Copyright (c) 2017, 2018, Oracle and/or its affiliates.
# All rights reserved.
#
# Revised BSD license
#
# This is a specific instance of the Open Source Initiative (OSI) BSD license
# template http://www.opensource.org/licenses/bsd-license.php
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of "The Computer Language Benchmarks Game" nor the name of
# "The Computer Language Shootout Benchmarks" nor the name "nanobench" nor the
# name "bencher" nor the names of its contributors may be used to endorse or
# promote products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#runas solve()
#unittest.skip recursive generator
#pythran export solve()
# 01/08/14 modified for benchmarking by Wei Zhang
COINS = [1, 2, 5, 10, 20, 50, 100, 200]
# test
def _sum(iterable):
sum = None
for i in iterable:
if sum is None:
sum = i
else:
sum += i
return sum
def balance(pattern):
return _sum(COINS[x]*pattern[x] for x in range(0, len(pattern)))
def gen(pattern, coinnum, num):
coin = COINS[coinnum]
for p in range(0, num//coin + 1):
newpat = pattern[:coinnum] + (p,)
bal = balance(newpat)
if bal > num:
return
elif bal == num:
yield newpat
elif coinnum < len(COINS)-1:
for pat in gen(newpat, coinnum+1, num):
yield pat
def solve(total):
'''
In England the currency is made up of pound, P, and pence, p, and there are eight coins in general circulation:
1p, 2p, 5p, 10p, 20p, 50p, P1 (100p) and P2 (200p).
It is possible to make P2 in the following way:
1 P1 + 1 50p + 2 20p + 1 5p + 1 2p + 3 1p
How many different ways can P2 be made using any number of coins?
'''
return _sum(1 for pat in gen((), 0, total))
def measure(num):
result = solve(num)
print('total number of different ways: ', result)
def __benchmark__(num=200):
measure(num)
| 34.639175 | 115 | 0.698214 | 0 | 0 | 376 | 0.111905 | 0 | 0 | 0 | 0 | 2,461 | 0.73244 |
65349ddf8e79064277992fac5e85fa136f547f1d | 3,150 | py | Python | comicMaker/readComicsOnlineRu.py | Gunjan933/comicMaker | 9e10f8bc7d1b9c9ad6af271ca7a01fb03b26c6ee | [
"MIT"
] | 3 | 2019-09-03T14:27:28.000Z | 2021-03-04T04:23:38.000Z | comicMaker/readComicsOnlineRu.py | Gunjan933/comicMaker | 9e10f8bc7d1b9c9ad6af271ca7a01fb03b26c6ee | [
"MIT"
] | null | null | null | comicMaker/readComicsOnlineRu.py | Gunjan933/comicMaker | 9e10f8bc7d1b9c9ad6af271ca7a01fb03b26c6ee | [
"MIT"
] | 2 | 2019-06-18T04:21:28.000Z | 2021-08-17T18:24:20.000Z | from .makeFullPdf import makeFullPdf
from .parseImage import parseImage
from .makePdf import makePdf
import requests,os,os.path,sys,time,json
from bs4 import BeautifulSoup
def readComicsOnlineRu():
while True:
try:
with open('config.json', 'r', encoding="utf-8") as f:
books = json.load(f)
library=[*books['readComicsOnlineRu']]
# print(library)
# return
if not library:
# print("No books found!")
return
# print("List of books >")
# for i in library:
# print (" > '"+i+"' download will start from Chapter-"+books['readComicsOnlineRu'][i])
except:
# raise
# print("No 'config.json' file found!")
# return
continue
break
# if not confirm():
# return
originDirectory=os.getcwd()
os.chdir('..')
if not os.path.exists('comicDownloads'+os.sep):
os.makedirs('comicDownloads'+os.sep)
os.chdir('comicDownloads'+os.sep)
for comicName in library:
incompleteUrl="https://readcomicsonline.ru/comic/"+comicName+"/"
tryAgain=0
while tryAgain==0:
try:
page_response = requests.get(incompleteUrl, timeout=5)
soup = BeautifulSoup(page_response.content, "html.parser")
except:
print("Could not connect, trying again in 5 seconds!")
time.sleep(5)
continue
# os.chdir('..')
# os.chdir('comicMaker'+os.sep)
# readComicsOnlineRu()
# return
tryAgain=1
chapterNum = []
totalChaptersToDownload = 0
for li in soup.findAll('li', attrs={'class':'volume-0'}):
# validChapterNum=li.find('a').contents[0].split("#")[1]
validChapterNum=li.find('a')['href'].split(comicName+"/")[1]
try:
if float(validChapterNum) >= float(books['readComicsOnlineRu'][comicName]):
chapterNum.append(validChapterNum)
totalChaptersToDownload += 1
except:
chapterNum.append(validChapterNum)
totalChaptersToDownload += 1
chapterNum.reverse()
# print(chapterNum)
# return
parentDir=comicName+os.sep
if os.path.exists(parentDir):
print(comicName+" already exists.")
else:
os.makedirs(parentDir)
print(" Opening "+comicName+" >")
os.chdir(parentDir)
if totalChaptersToDownload > 1 :
for i in chapterNum:
books['readComicsOnlineRu'][comicName] = str(i)
tryAgain=0
while tryAgain==0:
try:
with open(originDirectory+os.sep+'config.json', 'w', encoding="utf-8") as file:
json.dump(books, file, indent=4)
except:
continue
tryAgain=1
chapter=i
currentDir=chapter.replace('.','-')+os.sep
if os.path.exists(currentDir):
print(" "+comicName+" > "+chapter.replace('.','-')+" already exists.")
else:
os.makedirs(currentDir)
print(" Opening "+comicName+" > "+chapter+" > ("+str(totalChaptersToDownload)+" Remaining) >")
os.chdir(currentDir)
completeUrl=incompleteUrl+i+"/"
parseImage.readComicsOnlineRu(comicName,completeUrl,chapter)
makePdf.readComicsOnlineRu(chapter)
os.chdir("..")
totalChaptersToDownload -= 1
makeFullPdf.readComicsOnlineRu(comicName)
else:
print(" < "+comicName+" already fully downloaded.")
os.chdir("..")
print(" << Download finished of "+comicName+" <")
os.chdir(originDirectory)
return | 30.288462 | 99 | 0.668889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 905 | 0.287302 |
65351167928ce81c735f063228eadca0eb91e2b5 | 828 | py | Python | geoipgen/generate.py | christivn/INDES-devices-scan-engine | 5229ac68f0b075ffaba7641e5c8fb634d42d4915 | [
"MIT"
] | 2 | 2020-09-11T11:30:49.000Z | 2021-07-01T22:06:25.000Z | geoipgen/generate.py | christivn/INDES-devices-scan-engine | 5229ac68f0b075ffaba7641e5c8fb634d42d4915 | [
"MIT"
] | null | null | null | geoipgen/generate.py | christivn/INDES-devices-scan-engine | 5229ac68f0b075ffaba7641e5c8fb634d42d4915 | [
"MIT"
] | null | null | null | from . import subnetCal
from . import functions
from random import randint
def IP(cidr):
arr=subnetCal.simpleCalculate(cidr)
min_host=arr[3]
smin_host=min_host.split(".")
max_host=arr[4]
smax_host=max_host.split(".")
ip=""
for i in range(4):
if(smin_host[i]==smax_host[i]):
ip+=smin_host[i]
else:
ip+=str(randint(int(smin_host[i]), int(smax_host[i])))
if(i<3):
ip+="."
return ip
def rangeIP(cidr):
arr=subnetCal.simpleCalculate(cidr)
min_host=arr[3]
max_host=arr[4]
ip_list=functions.ips(min_host, max_host)
return ip_list
def randomCIDR(country):
folder_cidr = open('geoipgen/ipv4/'+country+'.cidr', 'r')
cidr_lines = folder_cidr.readlines()
return cidr_lines[randint(0,len(cidr_lines))].strip() | 23.657143 | 66 | 0.629227 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 37 | 0.044686 |
6536be01b7eb4b8a845b975aa35e3be000f854f3 | 1,182 | py | Python | lib/googlecloudsdk/command_lib/filestore/operations/flags.py | bshaffer/google-cloud-sdk | f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9 | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/command_lib/filestore/operations/flags.py | bshaffer/google-cloud-sdk | f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9 | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/command_lib/filestore/operations/flags.py | bshaffer/google-cloud-sdk | f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*- #
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Flags and helpers for the Cloud Filestore operations commands."""
from __future__ import unicode_literals
OPERATIONS_LIST_FORMAT = """\
table(
name.basename():label=OPERATION_NAME,
name.segment(3):label=LOCATION,
metadata.verb:label=TYPE,
metadata.target.basename(),
done.yesno(yes='DONE', no='RUNNING'):label=STATUS,
metadata.createTime.date():sort=1,
duration(start=metadata.createTime,end=metadata.endTime,precision=0,calendar=false).slice(2:).join("").yesno(no="<1S"):label=DURATION
)"""
| 40.758621 | 141 | 0.716582 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,099 | 0.92978 |
65373505df666d8a906bfd1acd7f0539541bd3a0 | 940 | py | Python | model_agreement.py | ntunlp/coherence-paradigm | 7219fe5b57f5a44e780ca8ba5632194a68e07528 | [
"MIT"
] | null | null | null | model_agreement.py | ntunlp/coherence-paradigm | 7219fe5b57f5a44e780ca8ba5632194a68e07528 | [
"MIT"
] | null | null | null | model_agreement.py | ntunlp/coherence-paradigm | 7219fe5b57f5a44e780ca8ba5632194a68e07528 | [
"MIT"
] | null | null | null | import sys
import pickle
from krips_alpha import krippendorff_alpha, nominal_metric
def get_model_labels(model_output):
model_labels = []
for x in model_output:
try:
if x['pos_score'] > x['neg_score']:
model_labels.append('0')
elif x['neg_score'] > x['pos_score']:
model_labels.append('1')
else:
print(x, "error")
except KeyError:
if x['pos'] > x['neg']:
model_labels.append('0')
elif x['neg'] > x['pos']:
model_labels.append('1')
else:
print(x, "error")
return model_labels
annotations = pickle.load(open(sys.argv[1], 'rb'))
#print(len(annotations[0]))
alpha = krippendorff_alpha(annotations, nominal_metric)
#print(alpha)
model_output = pickle.load(open(sys.argv[2], 'rb'))
print(len(model_output))
model_labels = get_model_labels(model_output)
annotations.append(model_labels)
print(len(annotations))
model_alpha = krippendorff_alpha(annotations, nominal_metric)
print(model_alpha)
| 24.102564 | 61 | 0.71383 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 138 | 0.146809 |
65380b922221efe3ab83a1ce124526460a9adeeb | 10,392 | py | Python | lewansoul-lx16a/lewansoul_lx16a.py | christophschnabl/lewansoul-lx16a | fc92dc3470bc12353576bae00f6bc0873c753a7a | [
"MIT"
] | 2 | 2019-10-22T10:37:56.000Z | 2020-01-19T20:43:45.000Z | lewansoul-lx16a/lewansoul_lx16a.py | christophschnabl/lewansoul-lx16a | fc92dc3470bc12353576bae00f6bc0873c753a7a | [
"MIT"
] | null | null | null | lewansoul-lx16a/lewansoul_lx16a.py | christophschnabl/lewansoul-lx16a | fc92dc3470bc12353576bae00f6bc0873c753a7a | [
"MIT"
] | 1 | 2022-01-05T08:20:38.000Z | 2022-01-05T08:20:38.000Z | __all__ = [
'ServoController',
'TimeoutError',
'SERVO_ERROR_OVER_TEMPERATURE',
'SERVO_ERROR_OVER_VOLTAGE',
'SERVO_ERROR_LOCKED_ROTOR',
]
from serial.serialutil import *
from functools import partial
import threading
import logging
SERVO_ID_ALL = 0xfe
SERVO_MOVE_TIME_WRITE = 1
SERVO_MOVE_TIME_READ = 2
SERVO_MOVE_TIME_WAIT_WRITE = 7
SERVO_MOVE_TIME_WAIT_READ = 8
SERVO_MOVE_START = 11
SERVO_MOVE_STOP = 12
SERVO_ID_WRITE = 13
SERVO_ID_READ = 14
SERVO_ANGLE_OFFSET_ADJUST = 17
SERVO_ANGLE_OFFSET_WRITE = 18
SERVO_ANGLE_OFFSET_READ = 19
SERVO_ANGLE_LIMIT_WRITE = 20
SERVO_ANGLE_LIMIT_READ = 21
SERVO_VIN_LIMIT_WRITE = 22
SERVO_VIN_LIMIT_READ = 23
SERVO_TEMP_MAX_LIMIT_WRITE = 24
SERVO_TEMP_MAX_LIMIT_READ = 25
SERVO_TEMP_READ = 26
SERVO_VIN_READ = 27
SERVO_POS_READ = 28
SERVO_OR_MOTOR_MODE_WRITE = 29
SERVO_OR_MOTOR_MODE_READ = 30
SERVO_LOAD_OR_UNLOAD_WRITE = 31
SERVO_LOAD_OR_UNLOAD_READ = 32
SERVO_LED_CTRL_WRITE = 33
SERVO_LED_CTRL_READ = 34
SERVO_LED_ERROR_WRITE = 35
SERVO_LED_ERROR_READ = 36
SERVO_ERROR_OVER_TEMPERATURE = 1
SERVO_ERROR_OVER_VOLTAGE = 2
SERVO_ERROR_LOCKED_ROTOR = 4
def lower_byte(value):
return int(value) % 256
def higher_byte(value):
return int(value / 256) % 256
def word(low, high):
return int(low) + int(high)*256
def clamp(range_min, range_max, value):
return min(range_max, max(range_min, value))
class TimeoutError(RuntimeError):
pass
LOGGER = logging.getLogger('lewansoul.servos.lx16a')
class Servo(object):
def __init__(self, controller, servo_id):
self.__dict__.update({
'_controller': controller,
'servo_id': servo_id,
})
def __hasattr__(self, name):
return hasattr(self._controller, name)
def __getattr__(self, name):
attr = getattr(self._controller, name)
if callable(attr):
attr = partial(attr, self.servo_id)
return attr
class ServoController(object):
def __init__(self, serial, timeout=1):
self._serial = serial
self._timeout = timeout
self._lock = threading.RLock()
def _command(self, servo_id, command, params):
length = 3 + len(params)
checksum = 255-((servo_id + length + command + sum(params)) % 256)
LOGGER.debug('Sending servo control packet: %s', [
0x55, 0x55, servo_id, length, command, params, checksum
])
with self._lock:
self._serial.write(bytearray([
0x55, 0x55, servo_id, length, command, params, checksum
]))
def _wait_for_response(self, servo_id, command, timeout=None):
timeout = Timeout(timeout or self._timeout)
def read(size=1):
self._serial.timeout = timeout.time_left()
data = self._serial.read(size)
if len(data) != size:
raise TimeoutError()
return data
while True:
data = []
data += read(1)
if data[-1] != 0x55:
continue
data += read(1)
if data[-1] != 0x55:
continue
data += read(3)
sid = data[2]
length = data[3]
cmd = data[4]
if length > 7:
LOGGER.error('Invalid length for packet %s', list(data))
continue
data += read(length-3) if length > 3 else []
params = data[5:]
data += read(1)
checksum = data[-1]
if 255-(sid + length + cmd + sum(params)) % 256 != checksum:
LOGGER.error('Invalid checksum for packet %s', list(data))
continue
if cmd != command:
LOGGER.warning('Got unexpected command %s response %s',
cmd, list(data))
continue
if servo_id != SERVO_ID_ALL and sid != servo_id:
LOGGER.warning('Got command response from unexpected servo %s', sid)
continue
return [sid, cmd, params]
def _query(self, servo_id, command, timeout=None):
with self._lock:
self._command(servo_id, command)
return self._wait_for_response(servo_id, command, timeout=timeout)
def servo(self, servo_id):
return Servo(self, servo_id)
def get_servo_id(self, servo_id=SERVO_ID_ALL, timeout=None):
response = self._query(servo_id, SERVO_ID_READ, timeout=timeout)
return response[2]
def set_servo_id(self, servo_id, new_servo_id):
self._command(servo_id, SERVO_ID_WRITE, new_servo_id)
def move(self, servo_id, position, time=0):
position = clamp(0, 1000, position)
time = clamp(0, 30000, time)
self._command(
servo_id, SERVO_MOVE_TIME_WRITE,
(lower_byte(position), higher_byte(position),
lower_byte(time), higher_byte(time))
)
def get_prepared_move(self, servo_id, timeout=None):
"""Returns servo position and time tuple"""
response = self._query(servo_id, SERVO_MOVE_TIME_WAIT_READ, timeout=timeout)
return word(response[2], response[3]), word(response[4], response[5])
def move_prepare(self, servo_id, position, time=0):
position = clamp(0, 1000, position)
time = clamp(0, 30000, time)
self._command(
servo_id, SERVO_MOVE_TIME_WAIT_WRITE,
lower_byte(position), higher_byte(position),
lower_byte(time), higher_byte(time),
)
def move_start(self, servo_id=SERVO_ID_ALL):
self._command(servo_id, SERVO_MOVE_START)
def move_stop(self, servo_id=SERVO_ID_ALL):
self._command(servo_id, SERVO_MOVE_STOP)
def get_position_offset(self, servo_id, timeout=None):
response = self._query(servo_id, SERVO_ANGLE_OFFSET_READ, timeout=timeout)
deviation = response[2]
if deviation > 127:
deviation -= 256
return deviation
def set_position_offset(self, servo_id, deviation):
deviation = clamp(-125, 125, deviation)
if deviation < 0:
deviation += 256
self._command(servo_id, SERVO_ANGLE_OFFSET_ADJUST, deviation)
def save_position_offset(self, servo_id):
self._command(servo_id, SERVO_ANGLE_OFFSET_WRITE)
def get_position_limits(self, servo_id, timeout=None):
response = self._query(servo_id, SERVO_ANGLE_LIMIT_READ, timeout=timeout)
return word(response[2], response[3]), word(response[4], response[5])
def set_position_limits(self, servo_id, min_position, max_position):
min_position = clamp(0, 1000, min_position)
max_position = clamp(0, 1000, max_position)
self._command(
servo_id, SERVO_ANGLE_LIMIT_WRITE,
lower_byte(min_position), higher_byte(min_position),
lower_byte(max_position), higher_byte(max_position),
)
def get_voltage_limits(self, servo_id, timeout=None):
response = self._query(servo_id, SERVO_VIN_LIMIT_READ, timeout=timeout)
return word(response[2], response[3]), word(response[4], response[5])
def set_voltage_limits(self, servo_id, min_voltage, max_voltage):
min_voltage = clamp(4500, 12000, min_voltage)
max_voltage = clamp(4500, 12000, max_voltage)
self._command(
servo_id, SERVO_VIN_LIMIT_WRITE,
lower_byte(min_voltage), higher_byte(min_voltage),
lower_byte(max_voltage), higher_byte(max_voltage),
)
def get_max_temperature_limit(self, servo_id, timeout=None):
response = self._query(servo_id, SERVO_TEMP_MAX_LIMIT_READ, timeout=timeout)
return response[2]
def set_max_temperature_limit(self, servo_id, max_temperature):
max_temperature = clamp(50, 100, max_temperature)
self._command(servo_id, SERVO_TEMP_MAX_LIMIT_WRITE, max_temperature)
def get_temperature(self, servo_id, timeout=None):
response = self._query(servo_id, SERVO_TEMP_READ, timeout=timeout)
return response[2]
def get_voltage(self, servo_id, timeout=None):
response = self._query(servo_id, SERVO_VIN_READ, timeout=timeout)
return word(response[2], response[3])
def get_position(self, servo_id, timeout=None):
response = self._query(servo_id, SERVO_POS_READ, timeout=timeout)
position = word(response[2], response[3])
if position > 32767:
position -= 65536
return position
def get_mode(self, servo_id, timeout=None):
response = self._query(servo_id, SERVO_OR_MOTOR_MODE_READ, timeout=timeout)
return response[2]
def get_motor_speed(self, servo_id, timeout=None):
response = self._query(servo_id, SERVO_OR_MOTOR_MODE_READ, timeout=timeout)
if response[2] != 1:
return 0
speed = word(response[4], response[5])
if speed > 32767:
speed -= 65536
return speed
def set_servo_mode(self, servo_id):
self._command(
servo_id, SERVO_OR_MOTOR_MODE_WRITE, 0, 0, 0, 0,
)
def set_motor_mode(self, servo_id, speed=0):
speed = clamp(-1000, 1000, speed)
if speed < 0:
speed += 65536
self._command(
servo_id, SERVO_OR_MOTOR_MODE_WRITE, 1, 0,
lower_byte(speed), higher_byte(speed),
)
def is_motor_on(self, servo_id, timeout=None):
response = self._query(servo_id, SERVO_LOAD_OR_UNLOAD_READ, timeout=timeout)
return response[2] == 1
def motor_on(self, servo_id):
self._command(servo_id, SERVO_LOAD_OR_UNLOAD_WRITE, 1)
def motor_off(self, servo_id):
self._command(servo_id, SERVO_LOAD_OR_UNLOAD_WRITE, 0)
def is_led_on(self, servo_id, timeout=None):
response = self._query(servo_id, SERVO_LED_CTRL_READ, timeout=timeout)
return response[2] == 0
def led_on(self, servo_id):
self._command(servo_id, SERVO_LED_CTRL_WRITE, 0)
def led_off(self, servo_id):
self._command(servo_id, SERVO_LED_CTRL_WRITE, 1)
def get_led_errors(self, servo_id, timeout=None):
response = self._query(servo_id, SERVO_LED_ERROR_READ, timeout=timeout)
return response[2]
def set_led_errors(self, servo_id, error):
error = clamp(0, 7, error)
self._command(servo_id, SERVO_LED_ERROR_WRITE, error)
| 32.373832 | 84 | 0.647998 | 8,949 | 0.861143 | 0 | 0 | 0 | 0 | 0 | 0 | 385 | 0.037048 |
653ce627115bca9b0a99c6fcb8f87bd198d5309b | 1,223 | py | Python | Basic Data Structures/string/leet_551_StudentAttendanceRecordI.py | rush2catch/algorithms-leetcode | 38a5e6aa33d48fa14fe09c50c28a2eaabd736e55 | [
"MIT"
] | null | null | null | Basic Data Structures/string/leet_551_StudentAttendanceRecordI.py | rush2catch/algorithms-leetcode | 38a5e6aa33d48fa14fe09c50c28a2eaabd736e55 | [
"MIT"
] | null | null | null | Basic Data Structures/string/leet_551_StudentAttendanceRecordI.py | rush2catch/algorithms-leetcode | 38a5e6aa33d48fa14fe09c50c28a2eaabd736e55 | [
"MIT"
] | null | null | null | # Problem: Student Attendance Record I
# Difficulty: Easy
# Category: String
# Leetcode 551: https://leetcode.com/problems/student-attendance-record-i/#/description
# Description:
"""
You are given a string representing an attendance record for a student.
The record only contains the following three characters:
'A' : Absent.
'L' : Late.
'P' : Present.
A student could be rewarded if his attendance record doesn't contain
more than one 'A' (absent) or more than two continuous 'L' (late).
You need to return whether the student could be rewarded according to his attendance record.
Example 1:
Input: "PPALLP"
Output: True
Example 2:
Input: "PPALLL"
Output: False
"""
class Solution(object):
def check_record(self, s):
absent = True
late = True
abscn = 0
i = 0
while i < len(s) and absent and late:
if abscn > 1:
absent = False
if s[i] == 'A':
abscn += 1
if s[i] == 'L' and i + 3 <= len(s) and set(s[i:i+3]) == {'L'}:
late = False
i += 1
if abscn > 1:
absent = False
return absent and late
obj = Solution()
s1 = 'PPALLP'
s2 = 'PPALLL'
s3 = 'ALLLPPPLLPL'
s4 = 'AA'
print(obj.check_record(s1))
print(obj.check_record(s2))
print(obj.check_record(s3))
print(obj.check_record(s4))
| 23.519231 | 92 | 0.681112 | 364 | 0.297629 | 0 | 0 | 0 | 0 | 0 | 0 | 705 | 0.576451 |
653d695cd3021eadcb097d2dd9fa97ca942ec702 | 10,454 | py | Python | etc/check-python.py | maxzheng/auto-update | 7d9afa139f890ff9a6bbeb01549a311bdb5168d0 | [
"MIT"
] | 7 | 2018-08-22T21:03:54.000Z | 2022-02-04T20:31:20.000Z | etc/check-python.py | maxzheng/auto-update | 7d9afa139f890ff9a6bbeb01549a311bdb5168d0 | [
"MIT"
] | null | null | null | etc/check-python.py | maxzheng/auto-update | 7d9afa139f890ff9a6bbeb01549a311bdb5168d0 | [
"MIT"
] | 2 | 2019-04-24T20:49:01.000Z | 2019-10-30T17:45:19.000Z | #!/usr/bin/env python
import argparse
import os
import platform
import re
import shutil
import subprocess
import sys
SUPPORTED_VERSIONS = ('3.6', '3.7')
IS_DEBIAN = platform.system() == 'Linux' and os.path.exists('/etc/debian_version')
IS_OLD_UBUNTU = (IS_DEBIAN and os.path.exists('/etc/lsb-release')
and re.search('RELEASE=1[46]', open('/etc/lsb-release').read()))
IS_MACOS = platform.system() == 'Darwin'
SUDO = 'sudo ' if os.getuid() else ''
parser = argparse.ArgumentParser(description='Check and fix Python installation')
parser.add_argument('--autofix', action='store_true', help='Automatically fix any problems found')
parser.add_argument('--version', default=SUPPORTED_VERSIONS[0], choices=SUPPORTED_VERSIONS,
help='Python version to check')
args = parser.parse_args()
PY_VERSION = args.version
AUTOFIX = args.autofix
def check_sudo():
if not run('which sudo', return_output=True):
error('! sudo is not installed.')
print(' Please ask an administrator to install it and run this again.')
sys.exit(1)
def check_apt():
os.environ['DEBIAN_FRONTEND'] = 'noninteractive'
run(SUDO + 'apt-get install -y apt-utils', return_output=True)
def check_curl():
if not run('which curl', return_output=True):
error('! curl is not installed.')
if IS_DEBIAN:
raise AutoFixSuggestion('To install, run', SUDO + 'apt-get install -y curl')
sys.exit(1)
def check_python():
py3_path = run('which python' + PY_VERSION, return_output=True)
if not py3_path:
error('! Python ' + PY_VERSION + ' is not installed.')
if '--version' not in sys.argv:
print(' autopip supports Python {}.'.format(', '.join(SUPPORTED_VERSIONS))
+ ' To check a different version, re-run using "python - --version x.y"')
if IS_OLD_UBUNTU:
raise AutoFixSuggestion('To install, run',
(SUDO + 'apt-get update',
SUDO + 'apt-get install -y software-properties-common',
SUDO + 'add-apt-repository -y ppa:deadsnakes/ppa',
SUDO + 'apt-get update',
SUDO + 'apt-get install -y python' + PY_VERSION))
elif IS_DEBIAN:
raise AutoFixSuggestion('To install, run',
(SUDO + 'apt-get update', SUDO + 'apt-get install -y python' + PY_VERSION))
elif IS_MACOS:
raise AutoFixSuggestion('To install, run', 'brew install python')
print(' Please install Python ' + PY_VERSION
+ ' per http://docs.python-guide.org/en/latest/starting/installation/')
sys.exit(1)
def check_pip():
if not run('which pip3', return_output=True):
error('! pip3 is not installed.')
if IS_DEBIAN:
raise AutoFixSuggestion('To install, run', SUDO + 'apt-get install -y python3-pip')
elif IS_MACOS:
raise AutoFixSuggestion('To install, run', 'curl -s https://bootstrap.pypa.io/get-pip.py | '
+ SUDO + 'python' + PY_VERSION)
print(' If your package repo has a *-pip package for Python ' + PY_VERSION
+ ', then installing it from there is recommended.')
print(' To install directly, run: curl -s https://bootstrap.pypa.io/get-pip.py | '
+ SUDO + 'python' + PY_VERSION)
sys.exit(1)
version_full = run('pip3 --version', return_output=True)
if 'python ' + PY_VERSION not in version_full:
print(' ' + version_full.strip())
error('! pip3 is pointing to another Python version and not Python ' + PY_VERSION)
if '--version' not in sys.argv:
print(' autopip supports Python {}.'.format(', '.join(SUPPORTED_VERSIONS))
+ ' To check a different version, re-run using "python - --version x.y"')
raise AutoFixSuggestion('To re-install for Python ' + PY_VERSION + ', run',
'curl -s https://bootstrap.pypa.io/get-pip.py | ' + SUDO + 'python' + PY_VERSION)
version_str = version_full.split()[1]
version = tuple(map(_int_or, version_str.split('.', 2)))
if version < (9, 0, 3):
error('! Version is', version_str + ', but should be 9.0.3+')
raise AutoFixSuggestion('To upgrade, run', SUDO + 'pip3 install pip==9.0.3')
def check_venv():
test_venv_path = '/tmp/check-python-venv-{}'.format(os.getpid())
try:
try:
run('python' + PY_VERSION + ' -m venv ' + test_venv_path, stderr=subprocess.STDOUT, return_output=True,
raises=True)
except Exception:
error('! Could not create virtual environment.')
if IS_DEBIAN:
raise AutoFixSuggestion('To install, run', SUDO + 'apt-get install -y python' + PY_VERSION + '-venv')
print(' Please make sure Python venv package is installed.')
sys.exit(1)
finally:
shutil.rmtree(test_venv_path, ignore_errors=True)
try:
try:
run('virtualenv --python python' + PY_VERSION + ' ' + test_venv_path, stderr=subprocess.STDOUT,
return_output=True,
raises=True)
except Exception as e:
if run('which virtualenv', return_output=True):
error('! Could not create virtual environment.')
print(' ' + str(e))
sys.exit(1)
else:
error('! virtualenv is not installed.')
raise AutoFixSuggestion('To install, run', SUDO + 'pip3 install virtualenv')
finally:
shutil.rmtree(test_venv_path, ignore_errors=True)
def check_setuptools():
try:
version_str = run('python' + PY_VERSION + ' -m easy_install --version', return_output=True, raises=True)
except Exception:
error('! setuptools is not installed.')
raise AutoFixSuggestion('To install, run', SUDO + 'pip3 install setuptools')
version_str = version_str.split()[1]
version = tuple(map(_int_or, version_str.split('.')))
if version < (39,):
error('! Version is', version_str + ', but should be 39+')
raise AutoFixSuggestion('To upgrade, run', SUDO + 'pip3 install -U setuptools')
def check_wheel():
try:
version_str = run('python' + PY_VERSION + ' -m wheel version ', return_output=True, raises=True)
except Exception:
error('! wheel is not installed.')
raise AutoFixSuggestion('To install, run', SUDO + 'pip3 install wheel')
version_str = version_str.split()[1]
version = tuple(map(_int_or, version_str.split('.')))
if version < (0, 31):
error('! Version is', version_str + ', but should be 0.31+')
raise AutoFixSuggestion('To upgrade, run', SUDO + 'pip3 install -U wheel')
def check_python_dev():
include_path = run('python' + PY_VERSION
+ ' -c "from distutils.sysconfig import get_python_inc; print(get_python_inc())"',
return_output=True)
if not include_path:
error('! Failed to get Python include path, so not sure if Python dev package is installed')
if IS_DEBIAN:
raise AutoFixSuggestion('To install, run', SUDO + ' apt-get install -y python' + PY_VERSION + '-dev')
sys.exit(1)
python_h = os.path.join(include_path.strip(), 'Python.h')
if not os.path.exists(python_h):
error('! Python dev package is not installed as', python_h, 'does not exist')
if IS_DEBIAN:
raise AutoFixSuggestion('To install, run', SUDO + 'apt-get install -y python' + PY_VERSION + '-dev')
sys.exit(1)
def run(cmd, return_output=False, raises=False, **kwargs):
print('+ ' + str(cmd))
if '"' in cmd or '|' in cmd:
kwargs['shell'] = True
elif isinstance(cmd, str):
cmd = cmd.split()
check_call = subprocess.check_output if return_output else subprocess.check_call
try:
output = check_call(cmd, **kwargs)
if isinstance(output, bytes):
output = output.decode('utf-8')
return output
except Exception:
if return_output and not raises:
return
else:
raise
def _int_or(value):
try:
return int(value)
except Exception:
return value
def error(*msg):
msg = ' '.join(map(str, msg))
echo(msg, color=None if AUTOFIX else 'red')
def echo(msg, color=None):
if sys.stdout.isatty() and color:
if color == 'red':
color = '\033[0;31m'
elif color == 'green':
color = '\033[92m'
msg = color + msg + '\033[0m'
print(msg)
class AutoFixSuggestion(Exception):
def __init__(self, instruction, cmd):
super(AutoFixSuggestion, self).__init__(instruction)
self.cmd = cmd
checks = [check_python, check_pip, check_venv, check_setuptools, check_wheel, check_python_dev]
if AUTOFIX:
checks.insert(0, check_curl)
if IS_DEBIAN:
checks.insert(0, check_apt)
if SUDO:
checks.insert(0, check_sudo)
try:
last_fix = None
for check in checks:
print('Checking ' + check.__name__.split('_', 1)[1].replace('_', ' '))
while True:
try:
check()
break
except AutoFixSuggestion as e:
cmds = e.cmd if isinstance(e.cmd, tuple) else (e.cmd,)
if AUTOFIX:
if cmds == last_fix:
error('! Failed to fix automatically, so you gotta fix it yourself.')
sys.exit(1)
else:
for cmd in cmds:
run(cmd, return_output=True, raises=True)
last_fix = cmds
else:
print(' ' + str(e) + ': ' + ' && '.join(cmds) + '\n')
print('# Run the above suggested command(s) manually and then re-run to continue checking,')
print(' or re-run using "python - --autofix" to run all suggested commands automatically.')
sys.exit(1)
print('')
except Exception as e:
error('!', str(e))
sys.exit(1)
except KeyboardInterrupt:
sys.exit(1)
echo('Python is alive and well. Good job!', color='green')
| 34.730897 | 117 | 0.583604 | 161 | 0.015401 | 0 | 0 | 0 | 0 | 0 | 0 | 3,215 | 0.307538 |
653e20753803cf3d8c774a1a90f5c5407a146bd4 | 3,463 | py | Python | pnet/measure.py | changshuowang/PersistenceNetwork | 519aa3b4a123091ae6cc3cf619182b5be54fcac3 | [
"ISC"
] | 1 | 2020-01-20T06:44:14.000Z | 2020-01-20T06:44:14.000Z | pnet/measure.py | changshuowang/PersistenceNetwork | 519aa3b4a123091ae6cc3cf619182b5be54fcac3 | [
"ISC"
] | null | null | null | pnet/measure.py | changshuowang/PersistenceNetwork | 519aa3b4a123091ae6cc3cf619182b5be54fcac3 | [
"ISC"
] | null | null | null | import numpy as np
from sklearn.metrics import average_precision_score as ap
from sklearn.metrics import roc_auc_score
"""
each row is an instance
each column is the prediction of a class
"""
def _score_to_rank(score_list):
rank_array = np.zeros([len(score_list)])
score_array = np.array(score_list)
idx_sorted = (-score_array).argsort()
rank_array[idx_sorted] = np.arange(len(score_list))+1
rank_list = rank_array.tolist()
return rank_list
# For clip evaluation
def auc_y_classwise(Y_target, Y_score):
"""
Y_target: list of lists. {0, 1}
real labels
Y_score: list of lists. real values
prediction values
"""
# Y_target = np.squeeze(np.array(Y_target))
# Y_score = np.squeeze(np.array(Y_score))
Y_target = np.array(Y_target)
Y_score = np.array(Y_score)
auc_list = roc_auc_score(Y_target, Y_score, average=None)
return auc_list
def ap_y_classwise(Y_target, Y_score):
"""
Y_target: list of lists. {0, 1}
real labels
Y_score: list of lists. real values
prediction values
"""
# Y_target = np.squeeze(np.array(Y_target))
# Y_score = np.squeeze(np.array(Y_score))
Y_target = np.array(Y_target)
Y_score = np.array(Y_score)
ap_list = ap(Y_target, Y_score, average=None)
return ap_list
def auc(Y_target, Y_score):
"""
Y_target: list of lists. {0, 1}
real labels
Y_score: list of lists. real values
prediction values
"""
Y_target = np.array(Y_target)
Y_score = np.array(Y_score)
auc_list = []
for i in range(Y_score.shape[1]):
try:
auc = roc_auc_score(Y_target[:, i], Y_score[:, i])
except:
continue
auc_list.append(auc)
return auc_list
def mean_auc(Y_target, Y_score):
auc_list = auc(Y_target, Y_score)
mean_auc = np.mean(auc_list)
return mean_auc
def mean_auc_y(Y_target, Y_score):
'''
along y-axis
'''
return mean_auc(Y_target, Y_score)
def mean_auc_x(Y_target, Y_score):
'''
along x-axis
'''
return mean_auc(np.array(Y_target).T, np.array(Y_score).T)
def mean_average_precision(Y_target, Y_score):
"""
mean average precision
raw-based operation
Y_target: list of lists. {0, 1}
real labels
Y_score: list of lists. real values
prediction values
"""
p = float(len(Y_target))
temp_sum = 0
for y_target, y_score in zip(Y_target, Y_score):
y_target = np.array(y_target)
y_score = np.array(y_score)
if (y_target == 0).all() or (y_target == 1).all():
p -= 1
continue
idx_target = np.nonzero(y_target > 0)[0]
n_target = float(len(idx_target))
rank_list = np.array(_score_to_rank(y_score))
target_rank_list = rank_list[idx_target]
temp_sum_2 = 0
for target_rank in target_rank_list:
mm = sum([1 for ii in idx_target
if rank_list[ii] <= target_rank])/float(target_rank)
temp_sum_2 += mm
temp_sum += temp_sum_2/n_target
measure = temp_sum/p
return measure
def map(Y_target, Y_score):
return mean_average_precision(Y_target, Y_score)
def map_x(Y_target, Y_score):
return mean_average_precision(Y_target, Y_score)
def map_y(Y_target, Y_score):
return mean_average_precision(np.array(Y_target).T,
np.array(Y_score).T)
| 24.913669 | 74 | 0.636442 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 905 | 0.261334 |
653e59fcbdd6ab10a8f9cfbeb5d59f5f53315a37 | 6,114 | py | Python | src/trainer/transformations.py | tiborkubik/Robust-Teeth-Detection-in-3D-Dental-Scans-by-Automated-Multi-View-Landmarking | c7d9fa29b3b94ea786da5f4ec11a11520c1b882a | [
"MIT"
] | 2 | 2022-02-20T23:45:47.000Z | 2022-03-14T07:36:53.000Z | src/trainer/transformations.py | tiborkubik/Robust-Teeth-Detection-in-3D-Dental-Scans-by-Automated-Multi-View-Landmarking | c7d9fa29b3b94ea786da5f4ec11a11520c1b882a | [
"MIT"
] | null | null | null | src/trainer/transformations.py | tiborkubik/Robust-Teeth-Detection-in-3D-Dental-Scans-by-Automated-Multi-View-Landmarking | c7d9fa29b3b94ea786da5f4ec11a11520c1b882a | [
"MIT"
] | null | null | null | """
:filename transformations.py
:author Tibor Kubik
:email xkubik34@stud.fit.vutbr.cz
from
Classes of custom transformations that are applied during the training as additional augmentation of the depth maps.
"""
import torch
import random
import numpy as np
import torch.nn.functional as F
from random import randrange
from skimage.transform import resize, warp, AffineTransform
class Normalize(object):
"""Normalization of a depth map in the value of [0, 1] for each pixel."""
def __init__(self, input_type):
self.input_type = input_type
def __call__(self, sample):
if self.input_type == 'geom':
image, landmarks, label = sample['image'], sample['landmarks'], sample['label']
mean, std = image.mean([1, 2]), image.std([1, 2])
# TODO?
return {'image': image,
'landmarks': landmarks,
'label': label}
class ToTensor(object):
"""Transformation of a training sample into a torch tensor instance."""
def __init__(self, input_type):
self.input_type = input_type
def __call__(self, sample):
image, landmarks, label = sample['image'], sample['landmarks'], sample['label']
image = torch.from_numpy(image.copy())
if self.input_type != 'depth+geom':
image = image.unsqueeze(1)
image = image.permute(1, 0, 2)
else:
image = image.permute(2, 0, 1)
landmarks = np.asarray(landmarks)
landmarks = torch.from_numpy(landmarks.copy())
return {'image': image,
'landmarks': landmarks,
'label': label}
class Resize(object):
"""Resizing of the input sample into provided dimensions."""
def __init__(self, width, height, input_type='image'):
assert isinstance(width, int)
assert isinstance(height, int)
self.width = width
self.height = height
self.type = input_type
def __call__(self, sample):
image, landmarks, label = sample['image'], sample['landmarks'], sample['label']
resized_landmarks = landmarks.copy()
if self.type == 'image':
image = resize(image, (self.height, self.width), anti_aliasing=True)
if self.type == 'landmarks':
resized_landmarks = []
for landmark in landmarks:
landmark_resized = resize(landmark, (self.height, self.width), anti_aliasing=True)
resized_landmarks.append(landmark_resized)
return {'image': image,
'landmarks': resized_landmarks,
'label': label}
class RandomTranslating(object):
"""Randomly translate the input sample from range [-10 px, 10 px] with provided probability."""
def __init__(self, p=0.5):
assert isinstance(p, float)
self.p = p
def __call__(self, sample):
image, landmarks, label = sample['image'], sample['landmarks'], sample['label']
translated_landmarks = landmarks.copy()
if np.random.rand(1) < self.p:
n1 = randrange(-10, 10)
n2 = randrange(-10, 10)
t = AffineTransform(translation=(n1, n2))
image = warp(image, t.inverse)
translated_landmarks = []
for landmark in landmarks:
translated_landmarks.append(warp(landmark, t.inverse))
return {'image': image,
'landmarks': translated_landmarks,
'label': label}
class RandomScaling(object):
"""Randomly scales the input sample with scale index from range [0.90, 1.10] with provided probability."""
def __init__(self, p=0.5):
assert isinstance(p, float)
self.p = p
def __call__(self, sample):
image, landmarks, label = sample['image'], sample['landmarks'], sample['label']
scaled_landmarks = landmarks.copy()
if np.random.rand(1) < self.p:
n = random.uniform(0.90, 1.10)
t = AffineTransform(scale=(n, n))
image = warp(image, t.inverse)
scaled_landmarks = []
for landmark in landmarks:
scaled_landmarks.append(warp(landmark, t.inverse))
return {'image': image,
'landmarks': scaled_landmarks,
'label': label}
class RandomRotation(object):
"""Randomly rotates the input sample from range [−11.25 deg, 11.25 deg] with provided probability."""
def __init__(self, p=0.5):
assert isinstance(p, float)
self.p = p
def __call__(self, sample):
image, landmarks, label = sample['image'], sample['landmarks'], sample['label']
rnd_num1 = randrange(-32, -6)
rnd_num2 = randrange(6, 32)
rnd_num = random.choice([rnd_num1, rnd_num2])
if np.random.rand(1) < self.p:
rotated_image = self.rotate(x=image.unsqueeze(0).type(torch.FloatTensor), theta=np.pi/rnd_num)
rotated_landmarks = []
for _, landmark in enumerate(landmarks):
rotated_landmark = self.rotate(x=landmark.unsqueeze(0).unsqueeze(0).type(torch.FloatTensor), theta=np.pi/rnd_num)
rotated_landmarks.append(rotated_landmark.squeeze(0))
result = torch.cat(rotated_landmarks, dim=0)
return {'image': rotated_image.squeeze(0),
'landmarks': result,
'label': label}
return {'image': image,
'landmarks': landmarks,
'label': label}
@staticmethod
def get_rotation_matrix(theta):
"""Returns a tensor rotation matrix with given theta value."""
theta = torch.tensor(theta)
return torch.tensor([[torch.cos(theta), -torch.sin(theta), 0],
[torch.sin(theta), torch.cos(theta), 0]])
def rotate(self, x, theta):
rot_mat = self.get_rotation_matrix(theta)[None, ...].repeat(x.shape[0], 1, 1)
grid = F.affine_grid(rot_mat, x.size(), align_corners=False)
x = F.grid_sample(x, grid, align_corners=False)
return x
| 31.678756 | 129 | 0.596663 | 5,700 | 0.931982 | 0 | 0 | 300 | 0.049052 | 0 | 0 | 1,173 | 0.191792 |
653f3d126c9950c17fb6dd172757205541017a4a | 164 | py | Python | solutions/python3/1009.py | sm2774us/amazon_interview_prep_2021 | f580080e4a6b712b0b295bb429bf676eb15668de | [
"MIT"
] | 42 | 2020-08-02T07:03:49.000Z | 2022-03-26T07:50:15.000Z | solutions/python3/1009.py | ajayv13/leetcode | de02576a9503be6054816b7444ccadcc0c31c59d | [
"MIT"
] | null | null | null | solutions/python3/1009.py | ajayv13/leetcode | de02576a9503be6054816b7444ccadcc0c31c59d | [
"MIT"
] | 40 | 2020-02-08T02:50:24.000Z | 2022-03-26T15:38:10.000Z | class Solution:
def bitwiseComplement(self, N: int, M = 0, m = 0) -> int:
return N ^ M if M and M >= N else self.bitwiseComplement(N, M + 2 ** m, m + 1) | 54.666667 | 86 | 0.579268 | 164 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
653f571cf7026f415159e6338a4dc99110c41d1a | 5,691 | py | Python | cappuccino.py | y-tsutsu/cappuccino | 11212565c064a8e65f5858d2ea457b5cd0389799 | [
"MIT"
] | 1 | 2017-08-15T11:47:37.000Z | 2017-08-15T11:47:37.000Z | cappuccino.py | y-tsutsu/cappuccino | 11212565c064a8e65f5858d2ea457b5cd0389799 | [
"MIT"
] | 1 | 2021-06-02T10:32:27.000Z | 2021-06-02T10:32:27.000Z | cappuccino.py | y-tsutsu/cappuccino | 11212565c064a8e65f5858d2ea457b5cd0389799 | [
"MIT"
] | null | null | null | import random
import shutil
import sys
from argparse import ArgumentParser
from os import path
from pathlib import Path
from threading import Thread
from PySide6 import __version__ as PySideVer
from PySide6.QtCore import (Property, QCoreApplication, QObject, Qt, QTimer,
Signal, Slot)
from PySide6.QtCore import __version__ as QtVer
from PySide6.QtGui import QGuiApplication, QIcon
from PySide6.QtQml import QQmlApplicationEngine
from downloader import Downloader
DOUNLOAD_COUNT = 100
MIN_SIZE = (300, 300)
IMAGE_INTERVAL = 20000
IMAGES_DIR_NAME = path.join(path.abspath(path.dirname(sys.argv[0])), 'images')
DEFAULT_KEYWORD = '女性ヘアカタログロング'
class MainModel(QObject):
is_download_changed = Signal(bool)
def __init__(self, is_download, dirname, parent=None):
super().__init__(parent)
self.__is_download = is_download
self.__dirname = dirname
@Property(bool, notify=is_download_changed)
def is_download(self):
return self.__is_download
@is_download.setter
def is_download(self, value):
if self.__is_download != value:
self.__is_download = value
self.is_download_changed.emit(self.__is_download)
@Slot()
def clear(self):
shutil.rmtree(self.__dirname)
def on_download_completed(self):
self.is_download = False
class DownloaderModel(QObject):
prog_value_changed = Signal(int)
prog_max_changed = Signal(int)
download_completed = Signal()
def __init__(self, download_keyword, dirname, parent=None):
super().__init__(parent)
self.__prog_value = 0
self.__download_keyword = download_keyword
self.__dirname = dirname
self.__downloader = Downloader(self.progress_download_callback)
@Property(int, notify=prog_value_changed)
def prog_value(self):
return self.__prog_value
@prog_value.setter
def prog_value(self, value):
if self.__prog_value != value:
self.__prog_value = value
self.prog_value_changed.emit(self.__prog_value)
@Property(int, notify=prog_max_changed)
def prog_max(self):
return DOUNLOAD_COUNT
@Slot()
def start_download(self):
def _inner(keyword, dirname):
self.__downloader.download_images(keyword, dirname, DOUNLOAD_COUNT, MIN_SIZE)
self.download_completed.emit()
th = Thread(target=_inner, args=(self.__download_keyword, self.__dirname))
th.setDaemon(True)
th.start()
def progress_download_callback(self, progress):
self.prog_value = progress
class ImageViewerModel(QObject):
image_url_changed = Signal(str)
def __init__(self, dirname, parent=None):
super().__init__(parent)
self.__image_url = ''
self.__dirname = dirname
self.__image_list = []
self.__timer = QTimer(self)
self.__timer.setInterval(IMAGE_INTERVAL)
self.__timer.timeout.connect(self.on_timeout)
@Property(str, notify=image_url_changed)
def image_url(self):
return self.__image_url
@image_url.setter
def image_url(self, value):
if self.__image_url != value:
self.__image_url = value
self.image_url_changed.emit(self.__image_url)
@Slot()
def start_view(self):
self.init_image_list()
self.random_set_image()
self.__timer.start()
def init_image_list(self):
self.__image_list = [str(x) for x in Path(self.__dirname).iterdir() if x.is_file()]
def random_set_image(self):
if not self.__image_list:
return
image = random.choice(self.__image_list)
self.__image_list.remove(image)
self.image_url = f'file:///{path.join(self.__dirname, image).replace(path.sep, "/")}'
def on_timeout(self):
if not self.__image_list:
self.init_image_list()
self.random_set_image()
def exist_images():
return path.isdir(IMAGES_DIR_NAME) and any([x.is_file() for x in Path(IMAGES_DIR_NAME).iterdir()])
def initialize_qt():
sys.argv += ['--style', 'Material']
QGuiApplication.setAttribute(Qt.AA_EnableHighDpiScaling)
QCoreApplication.setAttribute(Qt.AA_UseHighDpiPixmaps)
QCoreApplication.setAttribute(Qt.AA_UseOpenGLES)
def resource_path(relative):
if hasattr(sys, '_MEIPASS'):
return path.join(sys._MEIPASS, relative)
return path.join(path.abspath('.'), relative)
def main():
print(f'PySide6=={PySideVer} Qt=={QtVer}')
parser = ArgumentParser(description='cappuccino: Simple image viewer with download')
parser.add_argument('download_keyword', nargs='?', default='', help='image keyword to download')
args = parser.parse_args()
download_keyword = args.download_keyword
if not download_keyword and not exist_images():
download_keyword = DEFAULT_KEYWORD
initialize_qt()
app = QGuiApplication(sys.argv)
app.setWindowIcon(QIcon(resource_path('cappuccino.ico')))
is_download = download_keyword != ''
mmodel = MainModel(is_download, IMAGES_DIR_NAME)
dmodel = DownloaderModel(download_keyword, IMAGES_DIR_NAME)
imodel = ImageViewerModel(IMAGES_DIR_NAME)
dmodel.download_completed.connect(mmodel.on_download_completed)
engine = QQmlApplicationEngine()
engine.rootContext().setContextProperty('mmodel', mmodel)
engine.rootContext().setContextProperty('dmodel', dmodel)
engine.rootContext().setContextProperty('imodel', imodel)
engine.load(f'file:///{resource_path("qml/Main.qml")}')
if not engine.rootObjects():
sys.exit(-1)
sys.exit(app.exec())
if __name__ == '__main__':
main()
| 29.795812 | 102 | 0.692497 | 3,259 | 0.570453 | 0 | 0 | 1,486 | 0.260109 | 0 | 0 | 371 | 0.06494 |
654050d7cccb1b2c34b6283890ccaab142889563 | 48,459 | py | Python | lofti_gaia/lofti.py | logan-pearce/lofti_gaia | b88940553669a134a461bb052843afa0dad4d71f | [
"BSD-3-Clause"
] | 2 | 2020-05-06T07:39:56.000Z | 2020-07-23T16:39:55.000Z | lofti_gaia/lofti.py | logan-pearce/lofti_gaia | b88940553669a134a461bb052843afa0dad4d71f | [
"BSD-3-Clause"
] | null | null | null | lofti_gaia/lofti.py | logan-pearce/lofti_gaia | b88940553669a134a461bb052843afa0dad4d71f | [
"BSD-3-Clause"
] | 2 | 2020-07-25T15:43:52.000Z | 2021-05-18T20:20:55.000Z | import astropy.units as u
import numpy as np
from lofti_gaia.loftitools import *
from lofti_gaia.cFunctions import calcOFTI_C
#from loftitools import *
import pickle
import time
import matplotlib.pyplot as plt
# Astroquery throws some warnings we can ignore:
import warnings
warnings.filterwarnings("ignore")
'''This module obtaines measurements from Gaia EDR3 (Gaia DR2 is also available as a secondary option) and runs through the LOFTI Gaia/OFTI
wide stellar binary orbit fitting technique.
'''
class Fitter(object):
'''Initialize the Fitter object for the binary system, and compute observational constraints
to be used in the orbit fit. User must provide Gaia source ids, tuples of mass estimates for
both objects, specify the number of desired orbits in posterior sample. Fit will be
for object 2 relative to object 1.
Attributes are tuples of (value,uncertainty) unless otherwise indicated. Attributes
with astropy units are retrieved from Gaia archive, attributes without units are
computed from Gaia values. All relative values are for object 2 relative to object 1.
Args:
sourceid1, sourceid2 (int): Gaia source ids for the two objects, fit will be for motion of \
object 2 relative to object 1
mass1, mass2 (tuple, flt): tuple os mass estimate for object 1 and 2, of the form (value, uncertainty)
Norbits (int): Number of desired orbits in posterior sample. Default = 100000
results_filename (str): Filename for fit results files. If none, results will be written to files \
named FitResults.yr.mo.day.hr.min.s
astrometry (dict): User-supplied astrometric measurements. Must be dictionary or table or pandas dataframe with\
column names "sep,seperr,pa,paerr,dates" or "ra,raerr,dec,decerr,dates". May be same as the rv table. \
Sep, deltaRA, and deltaDEC must be in arcseconds, PA in degrees, dates in decimal years. \
Default = None
user_rv (dict): User-supplied radial velocity measurements. Must be dictionary or table or pandas dataframe with\
column names "rv,rverr,rv_dates". May be same as the astrometry table. Default = None.
catalog (str): name of Gaia catalog to query. Default = 'gaiaedr3.gaia_source'
ruwe1, ruwe2 (flt): RUWE value from Gaia archive
ref_epoch (flt): reference epoch in decimal years. For Gaia DR2 this is 2015.5, for Gaia EDR3 it is 2016.0
plx1, plx2 (flt): parallax from Gaia in mas
RA1, RA2 (flt): right ascension from Gaia; RA in deg, uncertainty in mas
Dec1, Dec2 (flt): declination from Gaia; Dec in deg, uncertainty in mas
pmRA1, pmRA2 (flt): proper motion in RA in mas yr^-1 from Gaia
pmDec1, pmDec2 (flt): proper motion in DEC in mas yr^-1 from Gaia
rv1, rv2 (flt, optional): radial velocity in km s^-1 from Gaia
rv (flt, optional): relative RV of 2 relative to 1, if both are present in Gaia
plx (flt): weighted mean parallax for the binary system in mas
distance (flt): distance of system in pc, computed from Gaia parallax using method \
of Bailer-Jones et. al 2018.
deltaRA, deltaDec (flt): relative separation in RA and Dec directions, in mas
pmRA, pmDec (flt): relative proper motion in RA/Dec directions in km s^-1
sep (flt): total separation vector in mas
pa (flt): postion angle of separation vector in degrees from North
sep_au (flt): separation in AU
sep_km (flt): separation in km
total_vel (flt): total velocity vector in km s^-1. If RV is available for both, \
this is the 3d velocity vector; if not it is just the plane of sky velocity.
total_planeofsky_vel (flt): total velocity in the plane of sky in km s^-1. \
In the absence of RV this is equivalent to the total velocity vector.
deltaGmag (flt): relative contrast in Gaia G magnitude. Does not include uncertainty.
inflateProperMOtionError (flt): an optional factor to mulitply default gaia proper motion error by.
Written by Logan Pearce, 2020
'''
def __init__(self, sourceid1, sourceid2, mass1, mass2, Norbits = 100000, \
results_filename = None,
astrometry = None,
user_rv = None,
catalog = 'gaiaedr3.gaia_source',
inflateProperMotionError=1
):
self.sourceid1 = sourceid1
self.sourceid2 = sourceid2
try:
self.mass1 = mass1[0]
self.mass1err = mass1[1]
self.mass2 = mass2[0]
self.mass2err = mass2[1]
self.mtot = [self.mass1 + self.mass2, np.sqrt((self.mass1err**2) + (self.mass2err**2))]
except:
raise ValueError('Masses must be tuples of (value,error), ex: mass1 = (1.0,0.05)')
self.Norbits = Norbits
if not results_filename:
self.results_filename = 'FitResults.'+time.strftime("%Y.%m.%d.%H.%M.%S")+'.txt'
self.stats_filename = 'FitResults.Stats.'+time.strftime("%Y.%m.%d.%H.%M.%S")+'.txt'
else:
self.results_filename = results_filename
self.stats_filename = results_filename+'.Stats.txt'
self.astrometry = False
# check if user supplied astrometry:
if astrometry is not None:
# if so, set astrometric flag to True:
self.astrometry = True
# store observation dates:
self.astrometric_dates = astrometry['dates']
# if in sep/pa, convert to ra/dec:
if 'sep' in astrometry:
try:
astr_ra = [MonteCarloIt([astrometry['sep'][i],astrometry['seperr'][i]]) * \
np.sin(np.radians(MonteCarloIt([astrometry['pa'][i],astrometry['paerr'][i]]))) \
for i in range(len(astrometry['sep']))]
astr_dec = [MonteCarloIt([astrometry['sep'][i],astrometry['seperr'][i]]) * \
np.cos(np.radians(MonteCarloIt([astrometry['pa'][i],astrometry['paerr'][i]]))) \
for i in range(len(astrometry['sep']))]
self.astrometric_ra = np.array([
[np.mean(astr_ra[i]) for i in range(len(astrometry['sep']))],
[np.std(astr_ra[i]) for i in range(len(astrometry['sep']))]
])
self.astrometric_dec = np.array([
[np.mean(astr_dec[i]) for i in range(len(astrometry['sep']))],
[np.std(astr_dec[i]) for i in range(len(astrometry['sep']))]
])
except:
raise ValueError('Astrometry keys not recognized. Please provide dictionary or table or pandas dataframe with\
column names "sep,seperr,pa,paerr,dates" or "ra,raerr,dec,decerr,dates"')
elif 'ra' in astrometry:
# else store the ra/dec as attributes:
try:
self.astrometric_ra = np.array([astrometry['ra'], astrometry['raerr']])
self.astrometric_dec = np.array([astrometry['dec'], astrometry['decerr']])
except:
raise ValueError('Astrometry keys not recognized. Please provide dictionary or table or pandas dataframe with\
column names "sep,seperr,pa,paerr,dates" or "ra,raerr,dec,decerr,dates"')
else:
raise ValueError('Astrometry keys not recognized. Please provide dictionary or table or pandas dataframe with\
column names "sep,seperr,pa,paerr,dates" or "ra,raerr,dec,decerr,dates"')
# Check if user supplied rv:
self.use_user_rv = False
if user_rv is not None:
# set user rv flag to true:
self.use_user_rv = True
try:
# set attributes; multiply rv by -1 due to difference in coordinate systems:
self.user_rv = np.array([user_rv['rv']*-1,user_rv['rverr']])
self.user_rv_dates = np.array(user_rv['rv_dates'])
except:
raise ValueError('RV keys not recognized. Please use column names "rv,rverr,rv_dates"')
self.catalog = catalog
# Get Gaia measurements, compute needed constraints, and add to object:
self.PrepareConstraints(catalog=self.catalog,inflateFactor=inflateProperMotionError)
def edr3ToICRF(self,pmra,pmdec,ra,dec,G):
''' Corrects for biases in proper motion. The function is from https://arxiv.org/pdf/2103.07432.pdf
Args:
pmra,pmdec (float): proper motion
ra, dec (float): right ascension and declination
G (float): G magnitude
Written by Sam Christian, 2021
'''
if G>=13:
return pmra , pmdec
import numpy as np
def sind(x):
return np.sin(np.radians(x))
def cosd(x):
return np.cos(np.radians(x))
table1="""
0.0 9.0 9.0 9.5 9.5 10.0 10.0 10.5 10.5 11.0 11.0 11.5 11.5 11.75 11.75 12.0 12.0 12.25 12.25 12.5 12.5 12.75 12.75 13.0
18.4 33.8 -11.3 14.0 30.7 -19.4 12.8 31.4 -11.8 13.6 35.7 -10.5 16.2 50.0 2.1 19.4 59.9 0.2 21.8 64.2 1.0 17.7 65.6 -1.9 21.3 74.8 2.1 25.7 73.6 1.0 27.3 76.6 0.5
34.9 68.9 -2.9 """
table1 = np.fromstring(table1,sep=" ").reshape((12,5)).T
Gmin = table1[0]
Gmax = table1[1]
#pick the appropriate omegaXYZ for the source’s magnitude:
omegaX = table1[2][(Gmin<=G)&(Gmax>G)][0]
omegaY = table1[3][(Gmin<=G)&(Gmax>G)][0]
omegaZ = table1[4][(Gmin<=G)&(Gmax>G)][0]
pmraCorr = -1*sind(dec)*cosd(ra)*omegaX -sind(dec)*sind(ra)*omegaY + cosd(dec)*omegaZ
pmdecCorr = sind(ra)*omegaX -cosd(ra)*omegaY
return pmra-pmraCorr/1000., pmdec-pmdecCorr/1000.
def PrepareConstraints(self, rv=False, catalog='gaiaedr3.gaia_source', inflateFactor=1.):
'''Retrieves parameters for both objects from Gaia EDR3 archive and computes system attriubtes,
and assigns them to the Fitter object class.
Args:
rv (bool): flag for handling the presence or absence of RV measurements for both objects \
in EDR3. Gets set to True if both objects have Gaia RV measurements. Default = False
catalog (str): name of Gaia catalog to query. Default = 'gaiaedr3.gaia_source'
inflateFactor (flt): Factor by which to inflate the errors on Gaia proper motions to \
account for improper uncertainty estimates. Default = 1.0
Written by Logan Pearce, 2020
'''
from astroquery.gaia import Gaia
deg_to_mas = 3600000.
mas_to_deg = 1./3600000.
# Retrieve astrometric solution from Gaia EDR3
job = Gaia.launch_job("SELECT * FROM "+catalog+" WHERE source_id = "+str(self.sourceid1))
j = job.get_results()
job = Gaia.launch_job("SELECT * FROM "+catalog+" WHERE source_id = "+str(self.sourceid2))
k = job.get_results()
if catalog == 'gaiadr2.gaia_source':
# Retrieve RUWE from RUWE catalog for both sources and add to object state:
job = Gaia.launch_job("SELECT * FROM gaiadr2.ruwe WHERE source_id = "+str(self.sourceid1))
jruwe = job.get_results()
job = Gaia.launch_job("SELECT * FROM gaiadr2.ruwe WHERE source_id = "+str(self.sourceid2))
kruwe = job.get_results()
self.ruwe1 = jruwe['ruwe'][0]
self.ruwe2 = kruwe['ruwe'][0]
else:
# EDR3 contains ruwe in the main catalog:
self.ruwe1 = j['ruwe'][0]
self.ruwe2 = k['ruwe'][0]
# Check RUWE for both objects and warn if too high:
if self.ruwe1>1.2 or self.ruwe2>1.2:
print('''WARNING: RUWE for one or more of your solutions is greater than 1.2. This indicates
that the source might be an unresolved binary or experiencing acceleration
during the observation. Orbit fit results may not be trustworthy.''')
# reference epoch:
self.ref_epoch = j['ref_epoch'][0]
# parallax:
self.plx1 = [j[0]['parallax']*u.mas, j[0]['parallax_error']*u.mas]
self.plx2 = [k[0]['parallax']*u.mas, k[0]['parallax_error']*u.mas]
# RA/DEC
self.RA1 = [j[0]['ra']*u.deg, j[0]['ra_error']*mas_to_deg*u.deg]
self.RA2 = [k[0]['ra']*u.deg, k[0]['ra_error']*mas_to_deg*u.deg]
self.Dec1 = [j[0]['dec']*u.deg, j[0]['dec_error']*mas_to_deg*u.deg]
self.Dec2 = [k[0]['dec']*u.deg, k[0]['dec_error']*mas_to_deg*u.deg]
# Proper motions
pmRACorrected1,pmDecCorrected1 = self.edr3ToICRF(j[0]['pmra'],j[0]['pmdec'],j[0]['ra'],j[0]['dec'],j[0]["phot_g_mean_mag"])
pmRACorrected2,pmDecCorrected2 = self.edr3ToICRF(k[0]['pmra'],k[0]['pmdec'],k[0]['ra'],k[0]['dec'],k[0]["phot_g_mean_mag"])
self.pmRA1 = [pmRACorrected1*u.mas/u.yr, j[0]['pmra_error']*u.mas/u.yr*inflateFactor]
self.pmRA2 = [pmRACorrected2*u.mas/u.yr, k[0]['pmra_error']*u.mas/u.yr*inflateFactor]
self.pmDec1 = [pmDecCorrected1*u.mas/u.yr, j[0]['pmdec_error']*u.mas/u.yr*inflateFactor]
self.pmDec2 = [pmDecCorrected2*u.mas/u.yr, k[0]['pmdec_error']*u.mas/u.yr*inflateFactor]
# See if both objects have RV's in DR2:
if catalog == 'gaiaedr3.gaia_source':
key = 'dr2_radial_velocity'
error_key = 'dr2_radial_velocity_error'
elif catalog == 'gaiadr2.gaia_source':
key = 'radial_velocity'
error_key = 'radial_velocity_error'
if type(k[0][key]) == np.float64 and type(j[0][key]) == np.float64 or type(k[0][key]) == np.float32 and type(j[0][key]) == np.float32:
rv = True
self.rv1 = [j[0][key]*u.km/u.s,j[0][error_key]*u.km/u.s]
self.rv2 = [k[0][key]*u.km/u.s,k[0][error_key]*u.km/u.s]
rv1 = MonteCarloIt(self.rv1)
rv2 = MonteCarloIt(self.rv2)
self.rv = [ -np.mean(rv2-rv1) , np.std(rv2-rv1) ] # km/s
# negative to relfect change in coordinate system from RV measurements to lofti
# pos RV = towards observer in this coord system
else:
self.rv = [0,0]
# weighted mean of parallax values:
plx = np.average([self.plx1[0].value,self.plx2[0].value], weights = [self.plx1[1].value,self.plx2[1].value])
plxerr = np.max([self.plx1[1].value,self.plx2[1].value])
self.plx = [plx,plxerr] # mas
self.distance = distance(*self.plx) # pc
# Compute separations of component 2 relative to 1:
r1 = MonteCarloIt(self.RA1)
r2 = MonteCarloIt(self.RA2)
d1 = MonteCarloIt(self.Dec1)
d2 = MonteCarloIt(self.Dec2)
ra = (r2*deg_to_mas - r1*deg_to_mas) * np.cos(np.radians(np.mean([self.Dec1[0].value,self.Dec2[0].value])))
dec = ((d2 - d1)*u.deg).to(u.mas).value
self.deltaRA = [np.mean(ra),np.std(ra)] # mas
self.deltaDec = [np.mean(dec),np.std(dec)] # mas
# compute relative proper motion:
pr1 = MonteCarloIt(self.pmRA1)
pr2 = MonteCarloIt(self.pmRA2)
pd1 = MonteCarloIt(self.pmDec1)
pd2 = MonteCarloIt(self.pmDec2)
pmRA = [np.mean(pr2 - pr1), np.std(pr2-pr1)] # mas/yr
pmDec = [np.mean(pd2 - pd1), np.std(pd2 - pd1)] # mas/yr
self.pmRA = masyr_to_kms(pmRA,self.plx) # km/s
self.pmDec = masyr_to_kms(pmDec,self.plx) # km/s
# Compute separation/position angle:
r, p = to_polar(r1,r2,d1,d2)
self.sep = tuple([np.mean(r).value, np.std(r).value]) # mas
self.pa = tuple([np.mean(p).value, np.std(p).value]) # deg
self.sep_au = tuple([((self.sep[0]/1000)*self.distance[0]), ((self.sep[1]/1000)*self.distance[0])])
self.sep_km = tuple([ self.sep_au[0]*u.au.to(u.km) , self.sep_au[1]*u.au.to(u.km)])
# compute total velocities:
if rv:
self.total_vel = [ add_in_quad([self.pmRA[0],self.pmDec[0],self.rv[0]]) ,
add_in_quad([self.pmRA[1],self.pmDec[1],self.rv[1]]) ] # km/s
self.total_planeofsky_vel = [ add_in_quad([self.pmRA[0],self.pmDec[0]]) ,
add_in_quad([self.pmRA[1],self.pmDec[1]]) ] # km/s
else:
self.total_vel = [ add_in_quad([self.pmRA[0],self.pmDec[0]]) ,
add_in_quad([self.pmRA[1],self.pmDec[1]]) ] # km/s
self.total_planeofsky_vel = self.total_vel.copy() # km/s
# compute deltamag:
self.deltaGmag = j[0]['phot_g_mean_mag'] - k[0]['phot_g_mean_mag']
class FitOrbit(object):
''' Object for performing an orbit fit. Takes attributes from Fitter class.
ex: orbits = FitOrbit(fitterobject)
Args:
fitterobject (Fitter object): Fitter object initialized from the Fitter class
write_stats (bool): If True, write out summary statistics of orbit sample at \
conclusion of fit. Default = True.
write_results (bool): If True, write out the fit results to a pickle file \
in addition to the text file created during the fit. Default = True.
deltaRA, deltaDec (flt): relative separation in RA and Dec directions, in mas
pmRA, pmDec (flt): relative proper motion in RA/Dec directions in km s^-1
rv (flt, optional): relative RV of 2 relative to 1, if both are present in Gaia EDR3
mtot_init (flt): initial total system mass in Msun from user input
distance (flt): distance of system in pc, computed from Gaia parallax using method of Bailer-Jones et. al 2018.
sep (flt): separation vector in mas
pa (flt): postion angle of separation vector in degrees from North
ref_epoch (flt): epoch of the measurement, 2016.0 for Gaia EDR3 and 2015.5 for Gaia DR2.
Norbits (int): number of desired orbit samples
write_stats (bool): if True, write summary of sample statistics to human-readable file at end of run. Default = True
write_results (bool): if True, write out current state of sample orbits in pickle file in periodic intervals during \
run, and again at the end of the run. RECOMMENDED. Default = True
results_filename (str): name of file for saving pickled results to disk. If not supplied, \
defaul name is FitResults.y.mo.d.h.m.s.pkl, saved in same directory as fit was run.
stats_filename (str): name of file for saving human-readable file of stats of sample results. If not supplied, \
defaul name is FitResults.Stats.y.mo.d.h.m.s.pkl, saved in same directory as fit was run.
run_time (flt): run time for the last fit. astropy units object
Written by Logan Pearce, 2020
'''
def __init__(self, fitterobject, write_stats = True, write_results = True, python_version=False, \
use_pm_cross_term = False, corr_coeff = None):
# establish fit parameters:
self.deltaRA = fitterobject.deltaRA
self.deltaDec = fitterobject.deltaDec
self.pmRA = fitterobject.pmRA
self.pmDec = fitterobject.pmDec
self.rv = fitterobject.rv
self.mtot_init = fitterobject.mtot
self.distance = fitterobject.distance
self.sep = fitterobject.sep
self.pa = fitterobject.pa
self.ref_epoch = fitterobject.ref_epoch
self.Norbits = fitterobject.Norbits
self.write_results = write_results
self.write_stats = write_stats
self.results_filename = fitterobject.results_filename
self.stats_filename = fitterobject.stats_filename
self.astrometry = fitterobject.astrometry
if self.astrometry:
self.astrometric_ra = fitterobject.astrometric_ra
self.astrometric_dec = fitterobject.astrometric_dec
self.astrometric_dates = fitterobject.astrometric_dates
self.use_user_rv = fitterobject.use_user_rv
if self.use_user_rv:
self.user_rv = fitterobject.user_rv
self.user_rv_dates = fitterobject.user_rv_dates
# run orbit fitter:
self.fitorbit(python_fitOFTI=python_version, use_pm_cross_term = use_pm_cross_term, corr_coeff = corr_coeff)
def fitorbit(self, save_results_every_X_loops = 100, python_fitOFTI=False, use_pm_cross_term = False, corr_coeff = None):
'''Run the OFTI fitting run on the Fitter object. Called when FitOrbit object
is created.
Args:
save_results_every_X_loops (int): on every Xth loop, save status of the \
orbit sample arrays to a pickle file, if write_results = True (Default)
python_fitOFTI (bool): If True, fit using python only without using C Kepler's equation solver. Default = False
use_pm_cross_term (bool): If True, include the proper motion correlation cross term in the Chi^2 computation \
Default = False
Written by Logan Pearce, 2020
'''
# write header:
print('Saving orbits in',self.results_filename)
k = open(self.results_filename, 'w')
output_file_header = '# sma [arcsec] period [yrs] orbit phase t_0 [yr] ecc incl [deg]\
argp [deg] lan [deg] m_tot [Msun] dist [pc] chi^2 ln(prob) ln(randn)'
k.write(output_file_header + "\n")
k.close()
import time as tm
########### Perform initial run to get initial chi-squared: #############
# Draw random orbits:
#parameters = a,T,const,to,e,i,w,O,m1,dist
numSamples = 10000
parameters_init = draw_samples(numSamples, self.mtot_init, self.distance, self.ref_epoch)
# Compute positions and velocities:
if(python_fitOFTI):
X,Y,Z,Xdot,Ydot,Zdot,Xddot,Yddot,Zddot,parameters=calc_OFTI(parameters_init,self.ref_epoch,self.sep,self.pa)
else:
returnArray = np.zeros((19,numSamples))
returnArray = calcOFTI_C(parameters_init,self.ref_epoch,self.sep,self.pa,returnArray.copy())
X,Y,Z,Xdot,Ydot,Zdot,Xddot,Yddot,Zddot = returnArray[0:9]
parameters = returnArray[9:]
# Compute chi squared:
if self.rv[0] != 0:
model = np.array([Y,X,Ydot,Xdot,Zdot])
data = np.array([self.deltaRA, self.deltaDec, self.pmRA, self.pmDec, self.rv])
else:
model = np.array([Y,X,Ydot,Xdot])
data = np.array([self.deltaRA, self.deltaDec, self.pmRA, self.pmDec])
chi2 = ComputeChi2(data,model)
if use_pm_cross_term:
chi2 -= ( 2 * corr_coeff * (data[2][0] - model[2]) * (data[3][0] - model[3]) ) / (data[2][1] * data[3][1])
if self.astrometry:
p = parameters.copy()
a,T,const,to,e,i,w,O,m1,dist = p[0],p[1],p[2],p[3],p[4],p[5],p[6],p[7],p[8],p[9]
chi2_astr = np.zeros(10000)
# Calculate predicted positions at astr observation dates for each orbit:
for j in range(self.astrometric_ra.shape[1]):
# for each date, compute XYZ for each 10000 trial orbit. We can
# skip scale and rotate because that was accomplished in the calc_OFTI call above.
X1,Y1,Z1,E1 = calc_XYZ(a,T,to,e,i,w,O,self.astrometric_dates[j])
# Place astrometry into data array where: data[0][0]=ra obs, data[0][1]=ra err, etc:
data = np.array([self.astrometric_ra[:,j], self.astrometric_dec[:,j]])
# place corresponding predicited positions at that date for each trial orbit in arcsec:
model = np.array([Y1*1000,X1*1000])
# compute chi2 for trial orbits at that date and add to the total chi2 sum:
chi2_astr += ComputeChi2(data,model)
chi2 = chi2 + chi2_astr
if self.use_user_rv:
p = parameters.copy()
a,T,const,to,e,i,w,O,m1,dist = p[0],p[1],p[2],p[3],p[4],p[5],p[6],p[7],p[8],p[9]
chi2_rv = np.zeros(10000)
for j in range(self.user_rv.shape[1]):
# compute ecc anomaly at that date:
X1,Y1,Z1,E1 = calc_XYZ(a,T,to,e,i,w,O,self.user_rv_dates[j])
# compute velocities at that ecc anom:
Xdot,Ydot,Zdot = calc_velocities(a,T,to,e,i,w,O,dist,E1)
# compute chi2:
chi2_rv += ComputeChi2(np.array([self.user_rv[:,j]]),np.array([Zdot]))
chi2 = chi2 + chi2_rv
print('inital chi min',np.nanmin(chi2))
self.chi_min = np.nanmin(chi2)
# Accept/reject:
accepted, lnprob, lnrand = AcceptOrReject(chi2,self.chi_min)
# count number accepted:
number_orbits_accepted = np.size(accepted)
# tack on chi2, log probability, log random unif number to parameters array:
parameters = np.concatenate((parameters,chi2[None,:],lnprob[None,:],lnrand[None,:]), axis = 0)
# transpose:
parameters=np.transpose(parameters)
# write results to file:
k = open(self.results_filename, 'a')
for params in parameters[accepted]:
string = ' '.join([str(p) for p in params])
k.write(string + "\n")
k.close()
###### start loop ########
# initialize:
loop_count = 0
start=tm.time()
while number_orbits_accepted < self.Norbits:
# Draw random orbits:
numSamples = 10000
parameters_init = draw_samples(numSamples, self.mtot_init, self.distance, self.ref_epoch)
# Compute positions and velocities and new parameters array with scaled and rotated values:
if(python_fitOFTI):
X,Y,Z,Xdot,Ydot,Zdot,Xddot,Yddot,Zddot,parameters=calc_OFTI(parameters_init,self.ref_epoch,self.sep,self.pa)
else:
returnArray = np.zeros((19,numSamples))
returnArray = calcOFTI_C(parameters_init,self.ref_epoch,self.sep,self.pa,returnArray.copy())
X,Y,Z,Xdot,Ydot,Zdot,Xddot,Yddot,Zddot = returnArray[0:9]
parameters = returnArray[9:]
returnArray = None
# compute chi2 for orbits using Gaia observations:
if self.rv[0] != 0:
model = np.array([Y,X,Ydot,Xdot,Zdot])
data = np.array([self.deltaRA, self.deltaDec, self.pmRA, self.pmDec, self.rv])
else:
model = np.array([Y,X,Ydot,Xdot])
data = np.array([self.deltaRA, self.deltaDec, self.pmRA, self.pmDec])
chi2 = ComputeChi2(data,model)
if use_pm_cross_term:
chi2 -= ( 2 * (data[2][0] - model[2]) * (data[3][0] - model[3]) ) / (data[2][1] * data[3][1])
# add user astrometry if given:
if self.astrometry:
p = parameters.copy()
a,T,const,to,e,i,w,O,m1,dist = p[0],p[1],p[2],p[3],p[4],p[5],p[6],p[7],p[8],p[9]
chi2_astr = np.zeros(10000)
# Calculate predicted positions at astr observation dates for each orbit:
for j in range(self.astrometric_ra.shape[1]):
# for each date, compute XYZ for each 10000 trial orbit. We can
# skip scale and rotate because that was accomplished in the calc_OFTI call above.
X1,Y1,Z1,E1 = calc_XYZ(a,T,to,e,i,w,O,self.astrometric_dates[j])
# Place astrometry into data array where: data[0][0]=ra obs, data[0][1]=ra err, etc:
data = np.array([self.astrometric_ra[:,j], self.astrometric_dec[:,j]])
# place corresponding predicited positions at that date for each trial orbit:
model = np.array([Y1*1000,X1*1000])
# compute chi2 for trial orbits at that date and add to the total chi2 sum:
chi2_astr += ComputeChi2(data,model)
chi2 = chi2 + chi2_astr
# add user rv if given:
if self.use_user_rv:
p = parameters.copy()
a,T,const,to,e,i,w,O,m1,dist = p[0],p[1],p[2],p[3],p[4],p[5],p[6],p[7],p[8],p[9]
chi2_rv = np.zeros(10000)
for j in range(self.user_rv.shape[1]):
# compute ecc anomaly at that date:
X1,Y1,Z1,E1 = calc_XYZ(a,T,to,e,i,w,O,self.user_rv_dates[j])
# compute velocities at that ecc anom:
Xdot,Ydot,Zdot = calc_velocities(a,T,to,e,i,w,O,dist,E1)
# compute chi2:
chi2_rv += ComputeChi2(np.array([self.user_rv[:,j]]),np.array([Zdot]))
chi2 = chi2 + chi2_rv
# Accept/reject:
accepted, lnprob, lnrand = AcceptOrReject(chi2,self.chi_min)
if np.size(accepted) == 0:
pass
else:
# count num accepted
p = parameters.copy()
a,T,const,to,e,i,w,O,m1,dist = p[0],p[1],p[2],p[3],p[4],p[5],p[6],p[7],p[8],p[9]
sampleResults = calc_XYZ(a,T,to,e,i/180*np.pi,w/180*np.pi,O/180*np.pi,2016.0)
number_orbits_accepted += np.size(accepted)
parameters = np.concatenate((parameters,chi2[None,:],lnprob[None,:],lnrand[None,:]), axis = 0)
parameters=np.transpose(parameters)
k = open(self.results_filename, 'a')
for params in parameters[accepted]:
string = ' '.join([str(p) for p in params])
k.write(string + "\n")
k.close()
if np.nanmin(chi2) < self.chi_min:
# If there is a new min chi2:
self.chi_min = np.nanmin(chi2)
#print('found new chi min:',self.chi_min)
# re-evaluate to accept/reject with new chi_min:
if number_orbits_accepted != 0:
dat = np.loadtxt(open(self.results_filename,"r"),delimiter=' ',ndmin=2)
lnprob = -(dat[:,10]-self.chi_min)/2.0
dat[:,11] = lnprob
accepted_retest = np.where(lnprob > dat[:,12])
q = open(self.results_filename, 'w')
q.write(output_file_header + "\n")
for data in dat[accepted_retest]:
string = ' '.join([str(d) for d in data])
q.write(string + "\n")
q.close()
dat2 = np.loadtxt(open(self.results_filename,"r"),delimiter=' ',ndmin=2)
number_orbits_accepted=dat2.shape[0]
loop_count += 1
#print('loop count',loop_count)
update_progress(number_orbits_accepted,self.Norbits)
# one last accept/reject with final chi_min value:
dat = np.loadtxt(open(self.results_filename,"r"),delimiter=' ',ndmin=2)
lnprob = -(dat[:,10]-self.chi_min)/2.0
dat[:,11] = lnprob
accepted_retest = np.where(lnprob > dat[:,12])
q = open(self.results_filename, 'w')
q.write(output_file_header + "\n")
for data in dat[accepted_retest]:
string = ' '.join([str(d) for d in data])
q.write(string + "\n")
q.close()
# when finished, upload results and store in object:
dat = np.loadtxt(open(self.results_filename,"r"),delimiter=' ',ndmin=2)
number_orbits_accepted=dat.shape[0]
print('Final Norbits:', number_orbits_accepted)
# intialise results object and store accepted orbits:
if self.rv[0] != 0:
self.results = Results(orbits = dat, limit_lan = False, limit_aop = False)
else:
self.results = Results(orbits = dat, limit_lan = True, limit_aop = False)
self.results.Update(self.results.orbits)
# pickle dump the results attribute:
if self.write_results:
self.results.SaveResults(self.results_filename.replace(".txt", ".pkl"), write_text_file = False)
stop = tm.time()
self.results.run_time = (stop - start)*u.s
# compute stats and write to file:
self.results.stats = Stats(orbits = self.results.orbits, write_to_file = self.write_stats, filename = self.stats_filename)
class Results(object):
'''A class for storing and manipulating the results of the orbit fit.
Args:
orbits (Norbits x 13 array): array of accepted orbits from \
OFTI fit in the same order as the following attributes
sma (1 x Norbits array): semi-major axis in arcsec
period (1 x Norbits array): period in years
orbit_fraction (1 x Norbits array): fraction of orbit past periastron \
passage the observation (2016) occured on. Values: [0,1)
t0 (1 x Norbits array): date of periastron passage in decimal years
ecc (1 x Norbits array): eccentricity
inc (1 x Norbits array): inclination relative to plane of the sky in deg
aop (1 x Norbits array): arguement of periastron in deg
lan (1 x Norbits array): longitude of ascending node in deg
mtot (1 x Norbits array): total system mass in Msun
distance (1 x Norbits array): distance to system in parsecs
chi2 (1 x Norbits array): chi^2 value for the orbit
lnprob (1 x Norbits array): log probability of orbit
lnrand (1 x Norbits array): log of random "dice roll" for \
orbit acceptance
limit_aop, limit_lan (bool): In the absence of radial velocity info, \
there is a degeneracy between arg of periastron and long of ascending \
node. Common practice is to limit one to the interval [0,180] deg. \
By default, lofti limits lan to this interval if rv = False. The user can \
choose to limit aop instead by setting limit_aop = True, limit_lan = False. \
The orbits[:,6] (aop) and orbits[:,7] (lan) arrays preserve the original values. \
Written by Logan Pearce, 2020
'''
def __init__(self, orbits = [], limit_aop = False, limit_lan = True):
self.orbits = orbits
self.limit_lan = limit_lan
self.limit_aop = limit_aop
def Update(self, orbits):
'''Take elements of the "orbits" attribute and populate
the orbital element attributes
Args:
orbits (arr): orbits array from Results class
Written by Logan Pearce, 2020
'''
self.sma = orbits[:,0]
self.period = orbits[:,1]
self.orbit_fraction = orbits[:,2]
self.t0 = orbits[:,3]
self.ecc = orbits[:,4]
self.inc = orbits[:,5]
self.aop = orbits[:,6]
if self.limit_aop:
self.aop = limit_to_180deg(self.aop)
self.lan = orbits[:,7] % 360
if self.limit_lan:
self.lan = limit_to_180deg(self.lan)
self.mtot = orbits[:,8]
self.distance = orbits[:,9]
self.chi2 = orbits[:,10]
self.lnprob = orbits[:,11]
self.lnrand = orbits[:,12]
def SaveResults(self, filename, write_text_file = False, text_filename = None):
'''Save the orbits and orbital parameters attributes in a pickle file
Args:
filename (str): filename for pickle file
write_text_file (bool): if True, also write out the accepted orbits to a \
human readable text file
text_filename (bool): if write_to_text = True, specifify filename for text file
Written by Logan Pearce, 2020
'''
pickle.dump(self, open( filename, "wb" ) )
# write results to file:
if write_text_file:
k = open(text_filename, 'a')
for params in self.orbits:
string = ' '.join([str(p) for p in params])
k.write(string + "\n")
k.close()
def LoadResults(self, filename, append = False):
'''Read in the orbits and orbital parameters attributes from a pickle file
Args:
filename (str): filename of pickle file to load
append (bool): if True, append read in orbit samples to another Results \
object. Default = False.
Written by Logan Pearce, 2020
'''
results_in = pickle.load( open( filename, "rb" ) )
if append == False:
self.orbits = results_in.orbits
self.Update(self.orbits)
else:
self.orbits = np.vstack((self.orbits,results_in.orbits))
self.Update(self.orbits)
# plotting results:
def PlotHists(self):
'''Plot 1-d histograms of orbital elements 'sma','ecc','inc','aop','lan','t0' from fit results.
Written by Logan Pearce, 2020
'''
if len(self.sma < 50):
bins = 50
else:
bins = 'fd'
fig = plt.figure(figsize=(30, 5.5))
params = np.array([self.sma,self.ecc,self.inc,self.aop,self.lan,self.t0])
names = np.array(['sma','ecc','inc','aop','lan','t0'])
for i in range(len(params)):
ax = plt.subplot2grid((1,len(params)), (0,i))
plt.hist(params[i],bins=bins,edgecolor='none',alpha=0.8)
plt.tick_params(axis='both', left=False, top=False, right=False, bottom=True, \
labelleft=False, labeltop=False, labelright=False, labelbottom=True)
plt.xticks(rotation=45, fontsize = 20)
plt.xlabel(names[i], fontsize = 25)
plt.tight_layout()
return fig
def PlotOrbits(self, color = True, colorbar = True, ref_epoch = 2016.0, size = 100, plot3d = False, cmap = 'viridis',xlim=False,ylim=False):
'''Plot a random selection of orbits from the sample in the plane of the sky.
Args:
color (bool): if True, plot orbit tracks using a colormap scale to orbit fraction (phase) \
past observation date (2015.5). If False, orbit tracks will be black. Default = True
colorbar (bool): if True and color = True, plot colorbar for orbit phase
ref_epoch (flt): reference epoch for drawing orbits. Default = 2015.5
size (int): Number of orbits to plot. Default = True
plot3d (bool): If True, return a plot of orbits in 3D space. Default = False
cmap (str): colormap for orbit phase plot
Written by Logan Pearce, 2020
'''
# Random selection of orbits to plot:
if len(self.sma) > size:
# if there are more orbits than desired size, randomly select orbits from
# the posterior sample:
ind = np.random.choice(range(0,len(self.sma)),replace=False,size=size)
else:
# if there are fewer orbits than desired size, take all of them:
ind = np.random.choice(range(0,len(self.sma)),replace=False,size=len(self.sma))
from numpy import tan, arctan, sqrt, cos, sin, arccos
# label for colormap axis:
colorlabel = 'Phase'
# create figure:
fig = plt.figure(figsize = (7.5, 6.))
plt.grid(ls=':')
# invert X axis for RA:
plt.gca().invert_xaxis()
if plot3d:
# Make 3d axis object:
ax = fig.add_subplot(111, projection='3d')
# plot central star:
ax.scatter(0,0,0,color='orange',marker='*',s=300,zorder=10)
ax.set_zlabel('Z (")',fontsize=20)
else:
# plot central star:
plt.scatter(0,0,color='orange',marker='*',s=300,zorder=10)
# For each orbit in the random selection from the posterior samples:
for a,T,to,e,i,w,O in zip(self.sma[ind],self.period[ind],self.t0[ind],self.ecc[ind],np.radians(self.inc[ind]),\
np.radians(self.aop[ind]),np.radians(self.lan[ind])):
# define an array of times along orbit:
times = np.linspace(ref_epoch,ref_epoch+T,5000)
X,Y,Z = np.array([]),np.array([]),np.array([])
E = np.array([])
# Compute X,Y,Z positions for each time:
for t in times:
n = (2*np.pi)/T
M = n*(t-to)
nextE = [danby_solve(eccentricity_anomaly, varM,vare, 0.001) for varM,vare in zip([M],[e])]
E = np.append(E,nextE)
r1 = a*(1.-e*cos(E))
f1 = sqrt(1.+e)*sin(E/2.)
f2 = sqrt(1.-e)*cos(E/2.)
f = 2.*np.arctan2(f1,f2)
r = (a*(1.-e**2))/(1.+(e*cos(f)))
X1 = r * ( cos(O)*cos(w+f) - sin(O)*sin(w+f)*cos(i) )
Y1 = r * ( sin(O)*cos(w+f) + cos(O)*sin(w+f)*cos(i) )
Z1 = r * sin(w+f) * sin(i)
X,Y,Z = np.append(X,X1),np.append(Y,Y1),np.append(Z,Z1)
# Plot the X,Y(Z) positions:
if not plot3d:
if color:
plt.scatter(Y,X,c=((times-ref_epoch)/T),cmap=cmap,s=3,lw=0)
plt.gca().set_aspect('equal', adjustable='datalim')
else:
plt.plot(Y,X, color='black',alpha=0.3)
plt.gca().set_aspect('equal', adjustable='datalim')
if plot3d:
from mpl_toolkits.mplot3d import Axes3D
if color:
ax.scatter(Y,X,Z,c=((times-ref_epoch)/T),cmap=cmap,s=3,lw=0)
else:
ax.plot(Y,X,Z, color='black',alpha=0.3)
# plot colorbar:
if not plot3d:
if color:
if colorbar == True:
cb = plt.colorbar().set_label(colorlabel, fontsize=20)
plt.gca().tick_params(labelsize=14)
plt.ylabel('Dec (")',fontsize=20)
plt.xlabel('RA (")',fontsize=20)
plt.gca().tick_params(labelsize=14)
if(xlim):
plt.xlim(xlim)
if(ylim):
plt.ylim(ylim)
return fig
def PlotSepPA(self, ref_epoch = 2016.0, size = 100, timespan = [20,20], orbitcolor = 'skyblue'):
'''Plot a random selection of orbits from the sample in separation and position angle as
a function of time.
Args:
ref_epoch (flt): reference epoch for drawing orbits. Default = 2015.5
size (int): Number of orbits to plot. Default = True
timespan (tuple, int): number of years before [0] and after [1] the ref epoch to \
plot sep and pa
orbitcolor (str): color to use to plot the orbits
Written by Logan Pearce, 2020
'''
# Random selection of orbits to plot:
if len(self.sma) > size:
# if there are more orbits than desired size, randomly select orbits from
# the posterior sample:
ind = np.random.choice(range(0,len(self.sma)),replace=False,size=size)
else:
# if there are fewer orbits than desired size, take all of them:
ind = np.random.choice(range(0,len(self.sma)),replace=False,size=len(self.sma))
from numpy import tan, arctan, sqrt, cos, sin, arccos
# make figure
fig = plt.figure(figsize = (8, 10))
# define subplots:
plt.subplot(2,1,1)
plt.gca().tick_params(labelsize=14)
plt.grid(ls=':')
# define times to compute sep/pa:
tmin,tmax = ref_epoch - timespan[0],ref_epoch + timespan[1]
t = np.linspace(tmin,tmax,2000)
date_ticks = np.arange(tmin,tmax,10)
# for each selected orbit from the sample:
for a,T,to,e,i,w,O in zip(self.sma[ind],self.period[ind],self.t0[ind],self.ecc[ind],np.radians(self.inc[ind]),\
np.radians(self.aop[ind]),np.radians(self.lan[ind])):
X = np.array([])
Y = np.array([])
# compute X,Y at each time point:
X1,Y1 = orbits_for_plotting(a,T,to,e,i,w,O,t)
X = np.append(X, X1)
Y = np.append(Y,Y1)
# compute sep:
r=np.sqrt((X**2)+(Y**2))
# plot sep in mas:
plt.plot(t,r*1000,color=orbitcolor,alpha=0.5)
plt.ylabel(r'$\rho$ (mas)',fontsize=20)
# next suplot:
plt.subplot(2,1,2)
plt.grid(ls=':')
# for each selected orbit from the sample:
for a,T,to,e,i,w,O in zip(self.sma[ind],self.period[ind],self.t0[ind],self.ecc[ind],np.radians(self.inc[ind]),\
np.radians(self.aop[ind]),np.radians(self.lan[ind])):
X = np.array([])
Y = np.array([])
X1,Y1 = orbits_for_plotting(a,T,to,e,i,w,O,t)
X = np.append(X, X1)
Y = np.append(Y,Y1)
# compute pa:
theta=np.arctan2(X,-Y)
theta=(np.degrees(theta)+270.)%360
# plot it:
plt.plot(t,theta,color=orbitcolor,alpha=0.5)
plt.ylabel(r'P.A. (deg)',fontsize=19)
plt.xlabel('Years',fontsize=19)
plt.gca().tick_params(labelsize=14)
plt.tight_layout()
return fig
class Stats(object):
'''A class for storing and manipulating the statistics of the results of the orbit fit.
For every parameter, there is a series of stats computed and saved as stats.param.stat
Examples:
stats.sma.mean = mean of semimajor axis
stats.ecc.ci68 = 68% confidence interval for eccentricity
stats.aop.std = standard deviation of arg of periastron
Args:
orbits (Norbits x 13 array): array of accepted orbits from \
OFTI fit in the same order as the following attributes
param.mean (flt): mean of parameter computed using np.mean
param.median (flt): np.median of parameter
param.mode (flt): mode of parameter
param.std (flt): standard deviation from np.std
param.ci68 (tuple,flt): 68% minimum credible interval of form (lower bound, upper bound)
param.ci95 (tuple,flt): 95% minimum credible interval
write_to_file (bool): If True, write stats to a human-readbale text file.
filename (str): filename for saving stats file. If not supplied, default \
name is FitResults.Stats.y.mo.d.h.m.s.pkl, saved in same directory as fit was run.
Written by Logan Pearce, 2020
'''
def __init__(self, orbits = [], write_to_file = False, filename = None):
self.orbits = orbits
# Compute stats on parameter arrays and save as attributes:
self.sma = StatsSubclass(self.orbits[:,0])
self.period = StatsSubclass(self.orbits[:,1])
self.orbit_fraction = StatsSubclass(self.orbits[:,2])
self.t0 = StatsSubclass(self.orbits[:,3])
self.ecc = StatsSubclass(self.orbits[:,4])
self.inc = StatsSubclass(self.orbits[:,5])
self.aop = StatsSubclass(self.orbits[:,6])
self.lan = StatsSubclass(self.orbits[:,7])
self.mtot = StatsSubclass(self.orbits[:,8])
self.distance = StatsSubclass(self.orbits[:,9])
if write_to_file:
params = np.array([self.sma,self.period,self.orbit_fraction,self.t0,self.ecc,self.inc,\
self.aop,self.lan,self.mtot,self.distance])
names = np.array(['sma','period','orbit fraction','t0','ecc','inc','aop','lan','mtot','distance'])
if not filename:
filename = 'FitResults.Stats.'+time.strftime("%Y.%m.%d.%H.%M.%S")+'.txt'
k = open(filename, 'w')
string = 'Parameter Mean Median Mode Std 68% Min Cred Int 95% Min Cred Int'
k.write(string + "\n")
for i in range(len(params)):
string = make_parameter_string(params[i],names[i])
k.write(string + "\n")
k.close()
class StatsSubclass(Stats):
'''Subclass for computing and storing statistics
Args:
array (arr): array for which to compute statistics
'''
def __init__(self, array):
self.mean,self.median,self.mode,self.std,self.ci68,self.ci95 = compute_statistics(array)
| 49.650615 | 170 | 0.585278 | 47,896 | 0.988341 | 0 | 0 | 0 | 0 | 0 | 0 | 20,399 | 0.420936 |
65411b851445a48f4001d24e9d3426293a327569 | 243 | py | Python | src/data_curation/dataset_manager/views.py | NovaSBE-DSKC/retention-evaluation | 5b68b9282f0b5479a9dc5238faef68067c76b861 | [
"MIT"
] | null | null | null | src/data_curation/dataset_manager/views.py | NovaSBE-DSKC/retention-evaluation | 5b68b9282f0b5479a9dc5238faef68067c76b861 | [
"MIT"
] | null | null | null | src/data_curation/dataset_manager/views.py | NovaSBE-DSKC/retention-evaluation | 5b68b9282f0b5479a9dc5238faef68067c76b861 | [
"MIT"
] | null | null | null | from src.data_curation.dataset_manager import get_meta
import pandas as pd
def view_all():
return get_meta()
def get_docs(id):
df = get_meta()
docs = df[df["id"] == id]["docs"].iloc[0]
df = pd.read_json(docs)
return df
| 17.357143 | 54 | 0.654321 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.041152 |
6541203743362ff0dff952a51d471197ab93e9cb | 109 | py | Python | deid/model/layers/__init__.py | KavishBhatia/deid-training-data | 9d586cd7f52d929b2571028365587d3f96e44caa | [
"MIT"
] | 15 | 2018-10-28T13:57:55.000Z | 2022-01-03T07:25:04.000Z | deid/model/layers/__init__.py | KavishBhatia/deid-training-data | 9d586cd7f52d929b2571028365587d3f96e44caa | [
"MIT"
] | 7 | 2019-04-29T13:55:52.000Z | 2021-12-13T19:51:30.000Z | deid/model/layers/__init__.py | KavishBhatia/deid-training-data | 9d586cd7f52d929b2571028365587d3f96e44caa | [
"MIT"
] | 3 | 2019-08-01T19:02:37.000Z | 2021-01-08T09:12:25.000Z | from .gradient_reversal import GradientReversal
from .noise import Noise, AdditiveNoise, MultiplicativeNoise
| 36.333333 | 60 | 0.87156 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
654539319f4d10a8899c207b7e590fdb33eef877 | 2,344 | py | Python | db_pool/mysql/base.py | GiftLee/devops | 1d63ee32c3e9519131bf5f6f2cda19675a1ca833 | [
"MIT"
] | 300 | 2019-07-30T03:01:29.000Z | 2022-03-14T05:51:41.000Z | db_pool/mysql/base.py | nice555/devops | fe09e6a3165d92dcd2dc67a0df3c2e47c6accc41 | [
"MIT"
] | 27 | 2019-11-04T08:41:52.000Z | 2021-09-27T07:51:41.000Z | db_pool/mysql/base.py | nice555/devops | fe09e6a3165d92dcd2dc67a0df3c2e47c6accc41 | [
"MIT"
] | 124 | 2019-08-21T03:32:15.000Z | 2022-03-14T05:51:43.000Z | # -*- coding: utf-8 -*-
"""
查看 django.db.backends.mysql.base.by 源码发现 django 连接 mysql 时没有使用连接池,
导致每次数据库操作都要新建新的连接并查询完后关闭,更坑的是按照 django 的官方文档设置
CONN_MAX_AGE 参数是为了复用连接,然后设置了 CONN_MAX_AGE 后,每个新连接查询完后并不
会 close 掉,而是一直在那占着。如果在高并发模式下,很容易出现 too many connections
错误。故重写 mysql 连接库,实现连接池功能。
"""
from django.core.exceptions import ImproperlyConfigured
import queue
import threading
try:
import MySQLdb as Database
except ImportError as err:
raise ImproperlyConfigured(
'Error loading MySQLdb module.\n'
'Did you install mysqlclient?'
) from err
from django.db.backends.mysql.base import *
from django.db.backends.mysql.base import DatabaseWrapper as _DatabaseWrapper
DEFAULT_DB_POOL_SIZE = 5
class DatabaseWrapper(_DatabaseWrapper):
"""
使用此库时绝对不能设置 CONN_MAX_AGE 连接参数,否则会造成使用连接后不会快速释放到连接池,从而造成连接池阻塞
"""
connect_pools = {}
pool_size = None
mutex = threading.Lock()
def get_new_connection(self, conn_params):
with self.mutex:
# 获取 DATABASES 配置字典中的 DB_POOL_SIZE 参数
if not self.pool_size:
self.pool_size = self.settings_dict.get('DB_POOL_SIZE') or DEFAULT_DB_POOL_SIZE
if self.alias not in self.connect_pools:
self.connect_pools[self.alias] = ConnectPool(conn_params, self.pool_size)
return self.connect_pools[self.alias].get_connection()
def _close(self):
with self.mutex:
# 覆盖掉原来的 close 方法,查询结束后连接释放回连接池
if self.connection is not None:
with self.wrap_database_errors:
return self.connect_pools[self.alias].release_connection(self.connection)
class ConnectPool(object):
def __init__(self, conn_params, pool_size):
self.conn_params = conn_params
self.pool_size = pool_size
self.connect_count = 0
self.connects = queue.Queue()
def get_connection(self):
if self.connect_count < self.pool_size:
self.connect_count = self.connect_count + 1
return Database.connect(**self.conn_params)
conn = self.connects.get()
try:
# 检测连接是否有效,去掉性能更好,但建议保留
conn.ping()
except Exception:
conn = Database.connect(**self.conn_params)
return conn
def release_connection(self, conn):
self.connects.put(conn)
| 32.109589 | 95 | 0.677901 | 1,825 | 0.652252 | 0 | 0 | 0 | 0 | 0 | 0 | 980 | 0.35025 |
6546238c5cc99f6434344bc2dc8680356df8f817 | 34,758 | py | Python | flowsa/flowbyfunctions.py | modelearth/flowsa | d4dcf5ef8764b4ef895080a54d0546668daf0e1a | [
"CC0-1.0"
] | 13 | 2020-04-10T20:43:00.000Z | 2022-02-12T09:00:56.000Z | flowsa/flowbyfunctions.py | modelearth/flowsa | d4dcf5ef8764b4ef895080a54d0546668daf0e1a | [
"CC0-1.0"
] | 93 | 2020-03-05T14:22:42.000Z | 2022-03-30T20:33:06.000Z | flowsa/flowbyfunctions.py | modelearth/flowsa | d4dcf5ef8764b4ef895080a54d0546668daf0e1a | [
"CC0-1.0"
] | 16 | 2020-05-03T13:54:59.000Z | 2022-01-06T16:42:53.000Z | # flowbyfunctions.py (flowsa)
# !/usr/bin/env python3
# coding=utf-8
"""
Helper functions for flowbyactivity and flowbysector data
"""
import pandas as pd
import numpy as np
from esupy.dqi import get_weighted_average
import flowsa
from flowsa.common import fbs_activity_fields, US_FIPS, get_state_FIPS, \
get_county_FIPS, update_geoscale, log, load_source_catalog, \
load_sector_length_crosswalk, flow_by_sector_fields, fbs_fill_na_dict, \
fbs_collapsed_default_grouping_fields, flow_by_sector_collapsed_fields, \
fbs_collapsed_fill_na_dict, fba_activity_fields, \
fips_number_key, flow_by_activity_fields, fba_fill_na_dict, datasourcescriptspath, \
find_true_file_path, flow_by_activity_mapped_fields, fba_mapped_default_grouping_fields
from flowsa.dataclean import clean_df, replace_strings_with_NoneType, \
replace_NoneType_with_empty_cells, standardize_units
def create_geoscale_list(df, geoscale, year='2015'):
"""
Create a list of FIPS associated with given geoscale
:param df: FlowBySector of FlowByActivity df
:param geoscale: 'national', 'state', or 'county'
:param year: str, year of FIPS, defaults to 2015
:return: list of relevant FIPS
"""
# filter by geoscale depends on Location System
fips = []
if geoscale == "national":
fips.append(US_FIPS)
elif df['LocationSystem'].str.contains('FIPS').any():
# all_FIPS = read_stored_FIPS()
if geoscale == "state":
state_FIPS = get_state_FIPS(year)
fips = list(state_FIPS['FIPS'])
elif geoscale == "county":
county_FIPS = get_county_FIPS(year)
fips = list(county_FIPS['FIPS'])
return fips
def filter_by_geoscale(df, geoscale):
"""
Filter flowbyactivity by FIPS at the given scale
:param df: Either flowbyactivity or flowbysector
:param geoscale: string, either 'national', 'state', or 'county'
:return: filtered flowbyactivity or flowbysector
"""
fips = create_geoscale_list(df, geoscale)
df = df[df['Location'].isin(fips)].reset_index(drop=True)
if len(df) == 0:
log.error("No flows found in the flow dataset at the %s scale", geoscale)
else:
return df
def agg_by_geoscale(df, from_scale, to_scale, groupbycols):
"""
Aggregate a df by geoscale
:param df: flowbyactivity or flowbysector df
:param from_scale: str, geoscale to aggregate from ('national', 'state', 'county')
:param to_scale: str, geoscale to aggregate to ('national', 'state', 'county')
:param groupbycolumns: flowbyactivity or flowbysector default groupby columns
:return: df, at identified to_scale geographic level
"""
# use from scale to filter by these values
df = filter_by_geoscale(df, from_scale).reset_index(drop=True)
df = update_geoscale(df, to_scale)
fba_agg = aggregator(df, groupbycols)
return fba_agg
def aggregator(df, groupbycols):
"""
Aggregates flowbyactivity or flowbysector 'FlowAmount' column in df and generate
weighted average values based on FlowAmount values for numeric columns
:param df: df, Either flowbyactivity or flowbysector
:param groupbycols: list, Either flowbyactivity or flowbysector columns
:return: df, with aggregated columns
"""
# reset index
df = df.reset_index(drop=True)
# tmp replace null values with empty cells
df = replace_NoneType_with_empty_cells(df)
# drop columns with flowamount = 0
df = df[df['FlowAmount'] != 0]
# list of column headers, that if exist in df, should be aggregated using the weighted avg fxn
possible_column_headers = ('Spread', 'Min', 'Max', 'DataReliability', 'TemporalCorrelation',
'GeographicalCorrelation', 'TechnologicalCorrelation',
'DataCollection')
# list of column headers that do exist in the df being aggregated
column_headers = [e for e in possible_column_headers if e in df.columns.values.tolist()]
df_dfg = df.groupby(groupbycols).agg({'FlowAmount': ['sum']})
# run through other columns creating weighted average
for e in column_headers:
df_dfg[e] = get_weighted_average(df, e, 'FlowAmount', groupbycols)
df_dfg = df_dfg.reset_index()
df_dfg.columns = df_dfg.columns.droplevel(level=1)
# if datatypes are strings, ensure that Null values remain NoneType
df_dfg = replace_strings_with_NoneType(df_dfg)
return df_dfg
def sector_ratios(df, sectorcolumn):
"""
Determine ratios of the less aggregated sectors within a more aggregated sector
:param df: A df with sector columns
:param sectorcolumn: 'SectorConsumedBy' or 'SectorProducedBy'
:return: df, with 'FlowAmountRatio' column
"""
# drop any null rows (can occur when activities are ranges)
df = df[~df[sectorcolumn].isnull()]
# find the longest length sector
length = max(df[sectorcolumn].apply(lambda x: len(str(x))).unique())
# for loop in reverse order longest length naics minus 1 to 2
# appends missing naics levels to df
sec_ratios = []
for i in range(length, 3, -1):
# subset df to sectors with length = i and length = i + 1
df_subset = df.loc[df[sectorcolumn].apply(lambda x: len(x) == i)]
# create column for sector grouping
df_subset = df_subset.assign(Sector_group=df_subset[sectorcolumn].apply(lambda x: x[0:i-1]))
# subset df to create denominator
df_denom = df_subset[['FlowAmount', 'Location', 'Sector_group']]
df_denom = df_denom.groupby(['Location', 'Sector_group'],
as_index=False).agg({"FlowAmount": sum})
df_denom = df_denom.rename(columns={"FlowAmount": "Denominator"})
# merge the denominator column with fba_w_sector df
ratio_df = df_subset.merge(df_denom, how='left')
# calculate ratio
ratio_df.loc[:, 'FlowAmountRatio'] = ratio_df['FlowAmount'] / ratio_df['Denominator']
ratio_df = ratio_df.drop(columns=['Denominator', 'Sector_group']).reset_index()
sec_ratios.append(ratio_df)
# concat list of dataframes (info on each page)
df_w_ratios = pd.concat(sec_ratios, sort=True).reset_index(drop=True)
return df_w_ratios
def sector_aggregation(df_load, group_cols):
"""
Function that checks if a sector length exists, and if not, sums the less aggregated sector
:param df_load: Either a flowbyactivity df with sectors or a flowbysector df
:param group_cols: columns by which to aggregate
:return: df, with aggregated sector values
"""
# determine if activities are sector-like, if aggregating a df with a 'SourceName'
sector_like_activities = False
if 'SourceName' in df_load.columns:
# load source catalog
cat = load_source_catalog()
# for s in pd.unique(flowbyactivity_df['SourceName']):
s = pd.unique(df_load['SourceName'])[0]
# load catalog info for source
src_info = cat[s]
sector_like_activities = src_info['sector-like_activities']
# ensure None values are not strings
df = replace_NoneType_with_empty_cells(df_load)
# if activities are source like, drop from df and group calls,
# add back in as copies of sector columns columns to keep
if sector_like_activities:
group_cols = [e for e in group_cols if e not in
('ActivityProducedBy', 'ActivityConsumedBy')]
# subset df
df_cols = [e for e in df.columns if e not in
('ActivityProducedBy', 'ActivityConsumedBy')]
df = df[df_cols]
# find the longest length sector
length = df[[fbs_activity_fields[0], fbs_activity_fields[1]]].apply(
lambda x: x.str.len()).max().max()
length = int(length)
# for loop in reverse order longest length naics minus 1 to 2
# appends missing naics levels to df
for i in range(length - 1, 1, -1):
# subset df to sectors with length = i and length = i + 1
df_subset = df.loc[df[fbs_activity_fields[0]].apply(lambda x: i + 1 >= len(x) >= i) |
df[fbs_activity_fields[1]].apply(lambda x: i + 1 >= len(x) >= i)]
# create a list of i digit sectors in df subset
sector_subset = df_subset[
['Location', fbs_activity_fields[0],
fbs_activity_fields[1]]].drop_duplicates().reset_index(drop=True)
df_sectors = sector_subset.copy()
df_sectors.loc[:, 'SectorProducedBy'] = \
df_sectors['SectorProducedBy'].apply(lambda x: x[0:i])
df_sectors.loc[:, 'SectorConsumedBy'] = \
df_sectors['SectorConsumedBy'].apply(lambda x: x[0:i])
sector_list = df_sectors.drop_duplicates().values.tolist()
# create a list of sectors that are exactly i digits long
# where either sector column is i digits in length
df_existing_1 = \
sector_subset.loc[(sector_subset['SectorProducedBy'].apply(lambda x: len(x) == i)) |
(sector_subset['SectorConsumedBy'].apply(lambda x: len(x) == i))]
# where both sector columns are i digits in length
df_existing_2 = \
sector_subset.loc[(sector_subset['SectorProducedBy'].apply(lambda x: len(x) == i)) &
(sector_subset['SectorConsumedBy'].apply(lambda x: len(x) == i))]
# concat existing dfs
df_existing = pd.concat([df_existing_1, df_existing_2], sort=False)
existing_sectors = df_existing.drop_duplicates().dropna().values.tolist()
# list of sectors of length i that are not in sector list
missing_sectors = [e for e in sector_list if e not in existing_sectors]
if len(missing_sectors) != 0:
# new df of sectors that start with missing sectors.
# drop last digit of the sector and sum flows
# set conditions
agg_sectors_list = []
for q, r, s in missing_sectors:
c1 = df_subset['Location'] == q
c2 = df_subset[fbs_activity_fields[0]].apply(lambda x: x[0:i] == r)
c3 = df_subset[fbs_activity_fields[1]].apply(lambda x: x[0:i] == s)
# subset data
agg_sectors_list.append(df_subset.loc[c1 & c2 & c3])
agg_sectors = pd.concat(agg_sectors_list, sort=False)
agg_sectors = agg_sectors.loc[
(agg_sectors[fbs_activity_fields[0]].apply(lambda x: len(x) > i)) |
(agg_sectors[fbs_activity_fields[1]].apply(lambda x: len(x) > i))]
agg_sectors.loc[:, fbs_activity_fields[0]] = agg_sectors[fbs_activity_fields[0]].apply(
lambda x: x[0:i])
agg_sectors.loc[:, fbs_activity_fields[1]] = agg_sectors[fbs_activity_fields[1]].apply(
lambda x: x[0:i])
# aggregate the new sector flow amounts
agg_sectors = aggregator(agg_sectors, group_cols)
# append to df
agg_sectors = replace_NoneType_with_empty_cells(agg_sectors)
df = df.append(agg_sectors, sort=False).reset_index(drop=True)
# manually modify non-NAICS codes that might exist in sector
df.loc[:, 'SectorConsumedBy'] = np.where(df['SectorConsumedBy'].isin(['F0', 'F01']),
'F010', df['SectorConsumedBy']) # domestic/household
df.loc[:, 'SectorProducedBy'] = np.where(df['SectorProducedBy'].isin(['F0', 'F01']),
'F010', df['SectorProducedBy']) # domestic/household
# drop any duplicates created by modifying sector codes
df = df.drop_duplicates()
# if activities are source-like, set col values as copies of the sector columns
if sector_like_activities:
df = df.assign(ActivityProducedBy=df['SectorProducedBy'])
df = df.assign(ActivityConsumedBy=df['SectorConsumedBy'])
# reindex columns
df = df.reindex(df_load.columns, axis=1)
# replace null values
df = replace_strings_with_NoneType(df)
return df
def sector_disaggregation(df):
"""
function to disaggregate sectors if there is only one naics at a lower level
works for lower than naics 4
:param df: A FBS df, must have sector columns
:return: A FBS df with values for the missing naics5 and naics6
"""
# ensure None values are not strings
df = replace_NoneType_with_empty_cells(df)
# load naics 2 to naics 6 crosswalk
cw_load = load_sector_length_crosswalk()
# for loop min length to 6 digits, where min length cannot be less than 2
length = df[[fbs_activity_fields[0], fbs_activity_fields[1]]].apply(
lambda x: x.str.len()).min().min()
if length < 2:
length = 2
# appends missing naics levels to df
for i in range(length, 6):
sector_merge = 'NAICS_' + str(i)
sector_add = 'NAICS_' + str(i+1)
# subset the df by naics length
cw = cw_load[[sector_merge, sector_add]]
# only keep the rows where there is only one value in sector_add for a value in sector_merge
cw = cw.drop_duplicates(subset=[sector_merge], keep=False).reset_index(drop=True)
sector_list = cw[sector_merge].values.tolist()
# subset df to sectors with length = i and length = i + 1
df_subset = df.loc[df[fbs_activity_fields[0]].apply(lambda x: i + 1 >= len(x) >= i) |
df[fbs_activity_fields[1]].apply(lambda x: i + 1 >= len(x) >= i)]
# create new columns that are length i
df_subset = df_subset.assign(SectorProduced_tmp=
df_subset[fbs_activity_fields[0]].apply(lambda x: x[0:i]))
df_subset = df_subset.assign(SectorConsumed_tmp=
df_subset[fbs_activity_fields[1]].apply(lambda x: x[0:i]))
# subset the df to the rows where the tmp sector columns are in naics list
df_subset_1 = df_subset.loc[(df_subset['SectorProduced_tmp'].isin(sector_list)) &
(df_subset['SectorConsumed_tmp'] == "")]
df_subset_2 = df_subset.loc[(df_subset['SectorProduced_tmp'] == "") &
(df_subset['SectorConsumed_tmp'].isin(sector_list))]
df_subset_3 = df_subset.loc[(df_subset['SectorProduced_tmp'].isin(sector_list)) &
(df_subset['SectorConsumed_tmp'].isin(sector_list))]
# concat existing dfs
df_subset = pd.concat([df_subset_1, df_subset_2, df_subset_3], sort=False)
# drop all rows with duplicate temp values, as a less aggregated naics exists
# list of column headers, that if exist in df, should be
# aggregated using the weighted avg fxn
possible_column_headers = ('Flowable', 'FlowName', 'Unit', 'Context',
'Compartment', 'Location', 'Year',
'SectorProduced_tmp', 'SectorConsumed_tmp')
# list of column headers that do exist in the df being subset
cols_to_drop = [e for e in possible_column_headers if e
in df_subset.columns.values.tolist()]
df_subset = df_subset.drop_duplicates(subset=cols_to_drop,
keep=False).reset_index(drop=True)
# merge the naics cw
new_naics = pd.merge(df_subset, cw[[sector_merge, sector_add]],
how='left', left_on=['SectorProduced_tmp'], right_on=[sector_merge])
new_naics = new_naics.rename(columns={sector_add: "SPB"})
new_naics = new_naics.drop(columns=[sector_merge])
new_naics = pd.merge(new_naics, cw[[sector_merge, sector_add]],
how='left', left_on=['SectorConsumed_tmp'], right_on=[sector_merge])
new_naics = new_naics.rename(columns={sector_add: "SCB"})
new_naics = new_naics.drop(columns=[sector_merge])
# drop columns and rename new sector columns
new_naics = new_naics.drop(columns=["SectorProducedBy", "SectorConsumedBy",
"SectorProduced_tmp", "SectorConsumed_tmp"])
new_naics = new_naics.rename(columns={"SPB": "SectorProducedBy",
"SCB": "SectorConsumedBy"})
# append new naics to df
new_naics['SectorConsumedBy'] = new_naics['SectorConsumedBy'].replace({np.nan: ""})
new_naics['SectorProducedBy'] = new_naics['SectorProducedBy'].replace({np.nan: ""})
new_naics = replace_NoneType_with_empty_cells(new_naics)
df = pd.concat([df, new_naics], sort=True)
# replace blank strings with None
df = replace_strings_with_NoneType(df)
return df
def assign_fips_location_system(df, year_of_data):
"""
Add location system based on year of data. County level FIPS change over the years.
:param df: df with FIPS location system
:param year_of_data: str, year of data pulled
:return: df, with 'LocationSystem' column values
"""
if year_of_data >= '2015':
df.loc[:, 'LocationSystem'] = 'FIPS_2015'
elif '2013' <= year_of_data < '2015':
df.loc[:, 'LocationSystem'] = 'FIPS_2013'
elif '2010' <= year_of_data < '2013':
df.loc[:, 'LocationSystem'] = 'FIPS_2010'
elif year_of_data < '2010':
log.warning(
"Missing FIPS codes from crosswalk for %s. Assigning to FIPS_2010", year_of_data)
df.loc[:, 'LocationSystem'] = 'FIPS_2010'
return df
def collapse_fbs_sectors(fbs):
"""
Collapses the Sector Produced/Consumed into a single column named "Sector"
uses based on identified rules for flowtypes
:param fbs: df, a standard FlowBySector (format)
:return: df, FBS with single Sector column
"""
# ensure correct datatypes and order
fbs = clean_df(fbs, flow_by_sector_fields, fbs_fill_na_dict)
# collapse the FBS sector columns into one column based on FlowType
fbs.loc[fbs["FlowType"] == 'TECHNOSPHERE_FLOW', 'Sector'] = fbs["SectorConsumedBy"]
fbs.loc[fbs["FlowType"] == 'WASTE_FLOW', 'Sector'] = fbs["SectorProducedBy"]
fbs.loc[(fbs["FlowType"] == 'WASTE_FLOW') & (fbs['SectorProducedBy'].isnull()),
'Sector'] = fbs["SectorConsumedBy"]
fbs.loc[(fbs["FlowType"] == 'ELEMENTARY_FLOW') & (fbs['SectorProducedBy'].isnull()),
'Sector'] = fbs["SectorConsumedBy"]
fbs.loc[(fbs["FlowType"] == 'ELEMENTARY_FLOW') & (fbs['SectorConsumedBy'].isnull()),
'Sector'] = fbs["SectorProducedBy"]
fbs.loc[(fbs["FlowType"] == 'ELEMENTARY_FLOW') &
(fbs['SectorConsumedBy'].isin(['F010', 'F0100', 'F01000'])) &
(fbs['SectorProducedBy'].isin(['22', '221', '2213', '22131', '221310'])),
'Sector'] = fbs["SectorConsumedBy"]
# drop sector consumed/produced by columns
fbs_collapsed = fbs.drop(columns=['SectorProducedBy', 'SectorConsumedBy'])
# aggregate
fbs_collapsed = aggregator(fbs_collapsed, fbs_collapsed_default_grouping_fields)
# sort dataframe
fbs_collapsed = clean_df(fbs_collapsed, flow_by_sector_collapsed_fields,
fbs_collapsed_fill_na_dict)
fbs_collapsed = fbs_collapsed.sort_values(['Sector', 'Flowable',
'Context', 'Location']).reset_index(drop=True)
return fbs_collapsed
def return_activity_from_scale(df, provided_from_scale):
"""
Determine the 'from scale' used for aggregation/df subsetting for each activity combo in a df
:param df: flowbyactivity df
:param provided_from_scale: str, The scale to use specified in method yaml
:return: df, FBA with column indicating the "from" geoscale to use for each row
"""
# determine the unique combinations of activityproduced/consumedby
unique_activities = unique_activity_names(df)
# filter by geoscale
fips = create_geoscale_list(df, provided_from_scale)
df_sub = df[df['Location'].isin(fips)]
# determine unique activities after subsetting by geoscale
unique_activities_sub = unique_activity_names(df_sub)
# return df of the difference between unique_activities and unique_activities2
df_missing = dataframe_difference(unique_activities, unique_activities_sub, which='left_only')
# return df of the similarities between unique_activities and unique_activities2
df_existing = dataframe_difference(unique_activities, unique_activities_sub, which='both')
df_existing = df_existing.drop(columns='_merge')
df_existing['activity_from_scale'] = provided_from_scale
# for loop through geoscales until find data for each activity combo
if provided_from_scale == 'national':
geoscales = ['state', 'county']
elif provided_from_scale == 'state':
geoscales = ['county']
elif provided_from_scale == 'county':
log.info('No data - skipping')
if len(df_missing) > 0:
for i in geoscales:
# filter by geoscale
fips_i = create_geoscale_list(df, i)
df_i = df[df['Location'].isin(fips_i)]
# determine unique activities after subsetting by geoscale
unique_activities_i = unique_activity_names(df_i)
# return df of the difference between unique_activities subset and
# unique_activities for geoscale
df_missing_i = dataframe_difference(unique_activities_sub,
unique_activities_i, which='right_only')
df_missing_i = df_missing_i.drop(columns='_merge')
df_missing_i['activity_from_scale'] = i
# return df of the similarities between unique_activities and unique_activities2
df_existing_i = dataframe_difference(unique_activities_sub,
unique_activities_i, which='both')
# append unique activities and df with defined activity_from_scale
unique_activities_sub = \
unique_activities_sub.append(df_missing_i[[fba_activity_fields[0],
fba_activity_fields[1]]])
df_existing = df_existing.append(df_missing_i)
df_missing = dataframe_difference(df_missing[[fba_activity_fields[0],
fba_activity_fields[1]]],
df_existing_i[[fba_activity_fields[0],
fba_activity_fields[1]]],
which=None)
return df_existing
def subset_df_by_geoscale(df, activity_from_scale, activity_to_scale):
"""
Subset a df by geoscale or agg to create data specified in method yaml
:param df: df, FBA format
:param activity_from_scale: str, identified geoscale by which to subset or
aggregate from ('national', 'state', 'county')
:param activity_to_scale: str, identified geoscale by which to subset or
aggregate to ('national', 'state', 'county')
:return: df, FBA, subset or aggregated to a single geoscale for all rows
"""
# method of subset dependent on LocationSystem
if df['LocationSystem'].str.contains('FIPS').any():
df = df[df['LocationSystem'].str.contains('FIPS')].reset_index(drop=True)
# determine 'activity_from_scale' for use in df geoscale subset, by activity
modified_from_scale = return_activity_from_scale(df, activity_from_scale)
# add 'activity_from_scale' column to df
df2 = pd.merge(df, modified_from_scale)
# list of unique 'from' geoscales
unique_geoscales =\
modified_from_scale['activity_from_scale'].drop_duplicates().values.tolist()
if len(unique_geoscales) > 1:
log.info('Dataframe has a mix of geographic levels: %s', ', '.join(unique_geoscales))
# to scale
if fips_number_key[activity_from_scale] > fips_number_key[activity_to_scale]:
to_scale = activity_to_scale
else:
to_scale = activity_from_scale
df_subset_list = []
# subset df based on activity 'from' scale
for i in unique_geoscales:
df3 = df2[df2['activity_from_scale'] == i]
# if desired geoscale doesn't exist, aggregate existing data
# if df is less aggregated than allocation df, aggregate
# fba activity to allocation geoscale
if fips_number_key[i] > fips_number_key[to_scale]:
log.info("Aggregating subset from %s to %s", i, to_scale)
df_sub = agg_by_geoscale(df3, i, to_scale, fba_mapped_default_grouping_fields)
# else filter relevant rows
else:
log.info("Subsetting %s data", i)
df_sub = filter_by_geoscale(df3, i)
df_subset_list.append(df_sub)
df_subset = pd.concat(df_subset_list, ignore_index=True)
# only keep cols associated with FBA mapped
df_subset = clean_df(df_subset, flow_by_activity_mapped_fields,
fba_fill_na_dict, drop_description=False)
# right now, the only other location system is for Statistics Canada data
else:
df_subset = df.copy()
return df_subset
def unique_activity_names(fba_df):
"""
Determine the unique activity names in a df
:param fba_df: a flowbyactivity df
:return: df with ActivityProducedBy and ActivityConsumedBy columns
"""
activities = fba_df[[fba_activity_fields[0], fba_activity_fields[1]]]
unique_activities = activities.drop_duplicates().reset_index(drop=True)
return unique_activities
def dataframe_difference(df1, df2, which=None):
"""
Find rows which are different between two DataFrames
:param df1: df, FBA or FBS
:param df2: df, FBA or FBS
:param which: 'both', 'right_only', 'left_only'
:return: df, comparison of data in the two dfs
"""
comparison_df = df1.merge(df2,
indicator=True,
how='outer')
if which is None:
diff_df = comparison_df[comparison_df['_merge'] != 'both']
else:
diff_df = comparison_df[comparison_df['_merge'] == which]
return diff_df
def estimate_suppressed_data(df, sector_column, naics_level, sourcename):
"""
Estimate data suppression, by equally allocating parent NAICS values to child NAICS
:param df: df with sector columns
:param sector_column: str, column to estimate suppressed data for
:param naics_level: numeric, indicate at what NAICS length to base
estimated suppresed data off (2 - 5)
:param sourcename: str, sourcename
:return: df, with estimated suppressed data
"""
# exclude nonsectors
df = replace_NoneType_with_empty_cells(df)
# find the longest length sector
max_length = max(df[sector_column].apply(lambda x: len(str(x))).unique())
# loop through starting at naics_level, use most detailed level possible to save time
for i in range(naics_level, max_length):
# create df of i length
df_x = df.loc[df[sector_column].apply(lambda x: len(x) == i)]
# create df of i + 1 length
df_y = df.loc[df[sector_column].apply(lambda x: len(x) == i + 1)]
# create temp sector columns in df y, that are i digits in length
df_y = df_y.assign(s_tmp=df_y[sector_column].apply(lambda x: x[0:i]))
# create list of location and temp activity combos that contain a 0
missing_sectors_df = df_y[df_y['FlowAmount'] == 0]
missing_sectors_list = missing_sectors_df[['Location',
's_tmp']].drop_duplicates().values.tolist()
# subset the y df
if len(missing_sectors_list) != 0:
# new df of sectors that start with missing sectors.
# drop last digit of the sector and sum flows set conditions
suppressed_list = []
for q, r, in missing_sectors_list:
c1 = df_y['Location'] == q
c2 = df_y['s_tmp'] == r
# subset data
suppressed_list.append(df_y.loc[c1 & c2])
suppressed_sectors = pd.concat(suppressed_list, sort=False, ignore_index=True)
# add column of existing allocated data for length of i
suppressed_sectors['alloc_flow'] =\
suppressed_sectors.groupby(['Location', 's_tmp'])['FlowAmount'].transform('sum')
# subset further so only keep rows of 0 value
suppressed_sectors_sub = suppressed_sectors[suppressed_sectors['FlowAmount'] == 0]
# add count
suppressed_sectors_sub = \
suppressed_sectors_sub.assign(sector_count=
suppressed_sectors_sub.groupby(
['Location', 's_tmp']
)['s_tmp'].transform('count'))
# merge suppressed sector subset with df x
df_m = pd.merge(df_x,
suppressed_sectors_sub[['Class', 'Compartment', 'FlowType',
'FlowName', 'Location', 'LocationSystem',
'Unit', 'Year', sector_column, 's_tmp',
'alloc_flow', 'sector_count']],
left_on=['Class', 'Compartment', 'FlowType', 'FlowName',
'Location', 'LocationSystem', 'Unit', 'Year', sector_column],
right_on=['Class', 'Compartment', 'FlowType', 'FlowName',
'Location', 'LocationSystem', 'Unit', 'Year', 's_tmp'],
how='right')
# drop any rows where flowamount is none
df_m = df_m[~df_m['FlowAmount'].isna()]
# calculate estimated flows by subtracting the flow
# amount already allocated from total flow of
# sector one level up and divide by number of sectors with suppressed data
df_m.loc[:, 'FlowAmount'] = \
(df_m['FlowAmount'] - df_m['alloc_flow']) / df_m['sector_count']
# only keep the suppressed sector subset activity columns
df_m = df_m.drop(columns=[sector_column + '_x', 's_tmp', 'alloc_flow', 'sector_count'])
df_m = df_m.rename(columns={sector_column + '_y': sector_column})
# reset activity columns
if load_source_catalog()[sourcename]['sector-like_activities']:
df_m = df_m.assign(ActivityProducedBy=df_m['SectorProducedBy'])
df_m = df_m.assign(ActivityConsumedBy=df_m['SectorConsumedBy'])
# drop the existing rows with suppressed data and append the new estimates from fba df
modified_df =\
pd.merge(df, df_m[['FlowName', 'Location', sector_column]],
indicator=True,
how='outer').query('_merge=="left_only"').drop('_merge', axis=1)
df = pd.concat([modified_df, df_m], ignore_index=True)
df_w_estimated_data = replace_strings_with_NoneType(df)
return df_w_estimated_data
def collapse_activity_fields(df):
"""
The 'activityconsumedby' and 'activityproducedby' columns from the
allocation dataset do not always align with
the dataframe being allocated. Generalize the allocation activity column.
:param df: df, FBA used to allocate another FBA
:return: df, single Activity column
"""
df = replace_strings_with_NoneType(df)
activity_consumed_list = df['ActivityConsumedBy'].drop_duplicates().values.tolist()
activity_produced_list = df['ActivityProducedBy'].drop_duplicates().values.tolist()
# if an activity field column is all 'none', drop the column and
# rename renaming activity columns to generalize
if all(v is None for v in activity_consumed_list):
df = df.drop(columns=['ActivityConsumedBy', 'SectorConsumedBy'])
df = df.rename(columns={'ActivityProducedBy': 'Activity',
'SectorProducedBy': 'Sector'})
elif all(v is None for v in activity_produced_list):
df = df.drop(columns=['ActivityProducedBy', 'SectorProducedBy'])
df = df.rename(columns={'ActivityConsumedBy': 'Activity',
'SectorConsumedBy': 'Sector'})
else:
log.error('Cannot generalize dataframe')
# drop other columns
df = df.drop(columns=['ProducedBySectorType', 'ConsumedBySectorType'])
return df
def dynamically_import_fxn(data_source_scripts_file, function_name):
"""
Dynamically import a function and call on that function
:param data_source_scripts_file: str, file name where function is found
:param function_name: str, name of function to import and call on
:return: a function
"""
# if a file does not exist modify file name, dropping ext after last underscore
data_source_scripts_file = find_true_file_path(datasourcescriptspath,
data_source_scripts_file,
'py')
df = getattr(__import__(f"{'flowsa.data_source_scripts.'}{data_source_scripts_file}",
fromlist=function_name), function_name)
return df
def load_fba_w_standardized_units(datasource, year, **kwargs):
"""
Standardize how a FBA is loaded for allocation purposes when generating a FBS.
Important to immediately convert the df units to standardized units.
:param datasource: string, FBA source name
:param year: int, year of data
:param kwargs: optional parameters include flowclass, geographic_level,
and download_if_missing
:return: fba df with standardized units
"""
# determine if any addtional parameters required to load a Flow-By-Activity
# add parameters to dictionary if exist in method yaml
fba_dict = {}
if 'flowclass' in kwargs:
fba_dict['flowclass'] = kwargs['flowclass']
if 'geographic_level' in kwargs:
fba_dict['geographic_level'] = kwargs['geographic_level']
# load the allocation FBA
fba = flowsa.getFlowByActivity(datasource, year, **fba_dict).reset_index(drop=True)
# ensure df loaded correctly/has correct dtypes
fba = clean_df(fba, flow_by_activity_fields, fba_fill_na_dict)
# convert to standardized units
fba = standardize_units(fba)
return fba
| 46.97027 | 100 | 0.638328 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 14,751 | 0.424392 |
6547bff0921c0fa8bf1ae4180062d6c76818b672 | 3,553 | py | Python | stochastic_hill_climbing/src/AdalineSGD.py | Wookhwang/Machine-Learning | 8eaf8517057d4beb3272081cb2f2092687123f3d | [
"Apache-2.0"
] | null | null | null | stochastic_hill_climbing/src/AdalineSGD.py | Wookhwang/Machine-Learning | 8eaf8517057d4beb3272081cb2f2092687123f3d | [
"Apache-2.0"
] | 1 | 2020-01-19T10:14:41.000Z | 2020-01-19T10:14:41.000Z | stochastic_hill_climbing/src/AdalineSGD.py | Wookhwang/Machine-Learning | 8eaf8517057d4beb3272081cb2f2092687123f3d | [
"Apache-2.0"
] | null | null | null | import numpy as np
# gradient descent는 tensorflow에 이미 구현이 되어있다.
# 확률적 선형 뉴런 분석기는 각 훈련 샘플에 대해서 조금씩 가중치를 업데이트 한다.
class AdalineSGD(object):
"""Adaptive Linear Neuron 분류기
매개변수
------------
eta : float
학습률 (0.0과 1.0 사이)
n_iter : int
훈련 데이터셋 반복 횟수
shuffle : bool (default true)
True로 설정하면 같은 반복이 되지 않도록 에포크마다 훈련 데이터를 섞습니다.
random_state : int
가중치 무작위 초기화를 위한 난수 생성기 시드
속성
-----------
w_ : 1d-array
학습된 가중치
cost_ : list
에포크마다 누적된 비용 함수의 제곱합
"""
def __init__(self, eta=0.01, n_iter=10, suffle=True, random_state=None):
self.eta = eta
self.n_iter = n_iter
self.suffle = suffle
self.random_state = random_state
self.w_initialize = False
def fit(self, X, y):
"""훈련 데이터 학습
매개변수
----------
X : {array-like}, shape = [n_samples, n_features]
n_samples개의 샘플과 n_features개의 특성으로 이루어진 훈련 데이터
y : array-like, shape = [n_samples]
타깃값
반환값
-------
self : object
"""
# 언제나 그렇듯 fit()은 분류기 훈련용
# 대신에 확률적 분류기에서는 _initialized_weights()을 사용해 행 갯수 만큼 가중치를 초기화
self._initialized_weights(X.shape[1])
self.cost_ = []
for i in range(self.n_iter):
# _suffle()을 사용해서 훈련 데이터를 섞어줌. True일 때만
if self.suffle:
X, y = self._suffle(X, y)
cost = []
# 가중치 업데이트 하고, cost도 update해준다.
for xi, target in zip(X, y):
cost.append(self._update_weights(xi, target))
# 평균 비용을 구해서 cost_ 리스트에 추가시켜준다.
avg_cost = sum(cost) / len(y)
self.cost_.append(avg_cost)
return self
# partial_fit()은 온라인 학습용으로 구현 함.
# 지속적으로 업데이트 되는 훈련 셋이 들어올 때 사용
def partial_fit(self, X, y):
"""가중치를 다시 초기화하지 않고 훈련 데이터를 학습합니다."""
if not self.w_initialize:
self._intialized_weights(X.shape[1])
if y.ravel().shape[0] > 1:
for xi, target in zip(X, y):
self._update_weights(xi, target)
else:
self._update_weights(X, y)
return self
def _suffle(self, X, y):
"""훈련 데이터를 섞음"""
# permatation()을 통해 0~100까지 중복되지 않은 랜덤한 숫자 시퀸스를 생성 (y의 길이만큼)
# 이 숫자 시퀸스는 특성 행렬과 클래스 레이블 백터를 섞는 인덱스로 활용
r = self.rgen.permutation(len(y))
return X[r], y[r]
def _initialized_weights(self, m):
"""랜덤한 작은 수로 가중치를 초기화"""
self.rgen = np.random.RandomState(self.random_state)
self.w_ = self.rgen.normal(loc=0.0, scale=0.01, size=1+m)
self.w_initialize = True
def _update_weights(self, xi, target):
"""아달린 학습 규칙을 적용하여 가중치를 업데이트"""
output = self.activation(self.net_input(xi))
error = (target - output)
self.w_[1:] += self.eta * xi.dot(error)
self.w_[0] += self.eta * error
cost = 0.5 * error ** 2
return cost
def net_input(self, X):
"""최종 입력 계산"""
return np.dot(X, self.w_[1:]) + self.w_[0]
# 단순 항등 함수 (단일층 시녕망을 통해 정보의 흐름을 표현 하기 위한 함수)
def activation(self, X):
"""선형 활성화 계산"""
return X
def predict(self, X):
"""단위 계산 함수를 사용하여 클래스 테이블을 반환합니다."""
return np.where(self.activation(self.net_input(X)) >= 0.0, 1, -1)
# 입력 데이터의 특성에서 최종 입력, 활성화, 출력 순으로 진행
| 30.110169 | 77 | 0.509992 | 4,363 | 0.954078 | 0 | 0 | 0 | 0 | 0 | 0 | 2,504 | 0.547562 |
65489ab1059af5d3af74f3912af4a5da4c39124a | 383 | py | Python | las1.2.py | Theskill19/sweetpotato | 7cb46c412f400bcd51838db365038a766cf593cd | [
"CC0-1.0"
] | null | null | null | las1.2.py | Theskill19/sweetpotato | 7cb46c412f400bcd51838db365038a766cf593cd | [
"CC0-1.0"
] | null | null | null | las1.2.py | Theskill19/sweetpotato | 7cb46c412f400bcd51838db365038a766cf593cd | [
"CC0-1.0"
] | null | null | null | #2. Пользователь вводит время в секундах.
# Переведите время в часы, минуты и секунды и выведите в формате чч:мм:сс.
# Используйте форматирование строк.
time = int(input("Введите время в секундах "))
hours = time // 3600
minutes = (time - hours * 3600) // 60
seconds = time - (hours * 3600 + minutes * 60)
print(f"Время в формате чч:мм:сс {hours} : {minutes} : {seconds}") | 42.555556 | 75 | 0.681462 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 400 | 0.738007 |
654ad9f66cc00d76dc7800cff09e9d14c95e20e3 | 2,259 | py | Python | tpdatasrc/tpgamefiles/rules/char_class/class020_archmage.py | edoipi/TemplePlus | f0e552289822fea908f16daa379fa568b1bd286d | [
"MIT"
] | null | null | null | tpdatasrc/tpgamefiles/rules/char_class/class020_archmage.py | edoipi/TemplePlus | f0e552289822fea908f16daa379fa568b1bd286d | [
"MIT"
] | null | null | null | tpdatasrc/tpgamefiles/rules/char_class/class020_archmage.py | edoipi/TemplePlus | f0e552289822fea908f16daa379fa568b1bd286d | [
"MIT"
] | null | null | null | from toee import *
import char_class_utils
###################################################
def GetConditionName():
return "Archmage"
def GetSpellCasterConditionName():
return "Archmage Spellcasting"
def GetCategory():
return "Core 3.5 Ed Prestige Classes"
def GetClassDefinitionFlags():
return CDF_CoreClass
def GetClassHelpTopic():
return "TAG_ARCHMAGES"
classEnum = stat_level_archmage
###################################################
class_feats = {
}
class_skills = (skill_alchemy, skill_concentration, skill_craft, skill_knowledge_all, skill_profession, skill_search, skill_spellcraft)
def IsEnabled():
return 1
def GetHitDieType():
return 4
def GetSkillPtsPerLevel():
return 2
def GetBabProgression():
return base_attack_bonus_type_non_martial
def IsFortSaveFavored():
return 0
def IsRefSaveFavored():
return 0
def IsWillSaveFavored():
return 1
def GetSpellListType():
return spell_list_type_arcane
def GetSpellSourceType():
return spell_source_type_arcane
def IsClassSkill(skillEnum):
return char_class_utils.IsClassSkill(class_skills, skillEnum)
def IsClassFeat(featEnum):
return char_class_utils.IsClassFeat(class_feats, featEnum)
def GetClassFeats():
return class_feats
def IsAlignmentCompatible( alignment):
return 1
def CanCastArcaneLvl7(obj):
# TODO: generalize (to support other arcane classes)
if obj.stat_level_get(stat_level_sorcerer) >= 14:
return 1
if obj.stat_level_get(stat_level_wizard) >= 13:
return 1
def HasSpellFocusInTwoSchool( obj ):
sf1 = 0
for p in range(feat_spell_focus_abjuration, feat_spell_focus_transmutation+1):
if obj.has_feat(p):
sf1 = p
break
if sf1 == 0:
return 0
sf2 = 0
for p in range(feat_spell_focus_abjuration, feat_spell_focus_transmutation+1):
if obj.has_feat(p) and p != sf1:
sf2 = p
break
if sf2 == 0:
return 0
return 1
def ObjMeetsPrereqs( obj ):
return 0 # WIP
# skill ranks (only Disable Device since Escape Artist, Decipher Script and Knowledge Arcana aren't implemented in ToEE)
if obj.skill_ranks_get(skill_spellcraft) < 15:
return 0
if (not obj.has_feat(feat_skill_focus_spellcraft)):
return 0
if (not CanCastArcaneLvl7(obj)):
return 0
if (not HasSpellFocusInTwoSchool(obj)):
return 0
return 1 | 20.916667 | 135 | 0.736609 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 357 | 0.158035 |
654b5be42b94507090bb99be14ad14d6bad404c8 | 427 | py | Python | src/bananas/drf/errors.py | beshrkayali/django-bananas | 8e832ca91287c5b3eed5af8de948c67fd026c4b9 | [
"MIT"
] | 26 | 2015-04-07T12:18:26.000Z | 2021-07-23T18:05:52.000Z | src/bananas/drf/errors.py | beshrkayali/django-bananas | 8e832ca91287c5b3eed5af8de948c67fd026c4b9 | [
"MIT"
] | 55 | 2016-10-25T08:13:50.000Z | 2022-03-04T12:53:24.000Z | src/bananas/drf/errors.py | beshrkayali/django-bananas | 8e832ca91287c5b3eed5af8de948c67fd026c4b9 | [
"MIT"
] | 16 | 2015-10-13T10:11:59.000Z | 2021-11-11T12:30:32.000Z | from rest_framework import status
from rest_framework.exceptions import APIException
class PreconditionFailed(APIException):
status_code = status.HTTP_412_PRECONDITION_FAILED
default_detail = "An HTTP precondition failed"
default_code = "precondition_failed"
class BadRequest(APIException):
status_code = status.HTTP_400_BAD_REQUEST
default_detail = "Validation failed"
default_code = "bad_request"
| 28.466667 | 53 | 0.800937 | 336 | 0.786885 | 0 | 0 | 0 | 0 | 0 | 0 | 82 | 0.192037 |
654c820b6644777c47304297e45e886b1326f0ee | 11,749 | py | Python | tools/train_net.py | luzhenyv/sesame | 5cef18c45192f7886b1c3c0096327e0095663adc | [
"Apache-2.0"
] | null | null | null | tools/train_net.py | luzhenyv/sesame | 5cef18c45192f7886b1c3c0096327e0095663adc | [
"Apache-2.0"
] | null | null | null | tools/train_net.py | luzhenyv/sesame | 5cef18c45192f7886b1c3c0096327e0095663adc | [
"Apache-2.0"
] | null | null | null | """Train a video classification"""
import torch
import sesame.models.losses as losses
import sesame.models.optimizer as optim
import sesame.utils.distributed as du
import sesame.utils.misc as misc
import sesame.utils.metrics as metrics
import sesame.utils.logger as logger
from sesame.datasets.mixup import MixUp
log = logger.get_logger(__name__)
def train_epoch(
train_loader,
model,
optimizer,
scaler,
train_meter,
cur_epoch,
cfg,
writer=None,
):
"""
Perform the video training for one epoch.
Args:
train_loader (loader): video training loader.
model (model): the video model to train.
optimizer (optim): the optimizer to perform optimization on the model's parameters.
train_meter (TrainMeter): training meters to log the training performance.
cur_epoch (int): current epoch of training.
cfg (CfgNode): configs. Details can be found in sesame/config/defaults.py
writer (TensorboardWriter, optional): TensorboardWriter object to writer Tensorboard log.
"""
# Enable train mode.
model.train()
train_meter.iter_tic()
data_size = cfg.TRAIN.STEPS_PER_EPOCH if cfg.TRAIN.STEPS_PER_EPOCH else len(train_loader)
if cfg.MIXUP.ENABLE:
mixup_fn = MixUp(
mixup_alpha=cfg.MIXUP.ALPHA,
cutmix_alpha=cfg.MIXUP.CUTMIX_ALPHA,
mix_prob=cfg.MIXUP.PROB,
switch_prob=cfg.MIXUP.SWITCH_PROB,
label_smoothing=cfg.MIXUP.LABEL_SMOOTH_VALUE,
num_classes=cfg.MODEL.NUM_CLASSES,
)
for cur_iter, (inputs, labels, _, meta) in enumerate(train_loader):
# Transfer the data to the current GPU device.
if cfg.NUM_GPUS:
if isinstance(inputs, (list,)):
for i in range(len(inputs)):
inputs[i] = inputs[i].cuda(non_blocking=True)
else:
inputs = inputs.cuda(non_blocking=True)
labels = labels.cuda()
for key, val in meta.items():
if isinstance(val, (list,)):
for i in range(len(val)):
val[i] = val[i].cuda(non_blocking=True)
else:
meta[key] = val.cuda(non_blocking=True)
# Update the learning rate.
lr = optim.get_epoch_lr(cur_epoch + float(cur_iter) / data_size, cfg)
optim.set_lr(optimizer, lr)
train_meter.data_toc()
if cfg.MIXUP.ENABLE:
samples, labels = mixup_fn(inputs[0], labels)
inputs[0] = samples
with torch.cuda.amp.autocast(enabled=cfg.TRAIN.MIXED_PRECISION):
if cfg.DETECTION.ENABLE:
preds = model(inputs, meta["boxes"])
else:
preds = model(inputs)
# Explicitly declare reduction to mean.
loss_fun = losses.get_loss_func(cfg.MODEL.LOSS_FUNC)(reduction="mean")
# Compute the loss
loss = loss_fun(preds, labels)
# check Nan Loss
misc.check_nan_losses(loss)
# Perform the backward pass.
optimizer.zero_grad()
scaler.scale(loss).backward()
# Unscales the gradients of optimizer's assigned params in-place
scaler.unscale_(optimizer)
# Clip gradients if necessary
if cfg.SOLVER.CLIP_GRAD_VAL:
torch.nn.utils.clip_grad_value_(
model.parameters(), cfg.SOLVER.CLIP_GRAD_VAL
)
elif cfg.SOLVER.CLIP_GRAD_L2NORM:
torch.nn.utils.clip_grad_norm_(
model.parameters(), cfg.SOLVER.CLIP_GRAD_L2NORM
)
# Update the parameters.
scaler.step(optimizer)
scaler.update()
if cfg.MIXUP.ENABLE:
_top_max_k_vals, top_max_k_inds = torch.topk(
labels, 2, dim=1, largest=True, sorted=True
)
idx_top1 = torch.arange(labels.shape[0]), top_max_k_inds[:, 0]
idx_top2 = torch.arange(labels.shape[0]), top_max_k_inds[:, 1]
preds = preds.detach()
preds[idx_top1] += preds[idx_top2]
preds[idx_top2] = 0.0
labels = top_max_k_inds[:, 0]
if cfg.DETECTION.ENABLE:
if cfg.NUM_GPUS > 1:
loss = du.all_reduce([loss])[0]
loss = loss.item()
# Update and log stats.
train_meter.update_stats(None, None, None, loss, lr)
# write to tensorboard format if available.
if writer is not None:
writer.add_scalars(
{"Train/loss": loss, "Train/lr": lr},
global_step=data_size * cur_epoch + cur_iter,
)
else:
top1_err, top5_err = None, None
if cfg.DATA.MULTI_LABEL:
# Gather all the predictions across all the devices.
if cfg.NUM_GPUS > 1:
[loss] = du.all_reduce([loss])
loss = loss.item()
else:
# Compute the errors.
num_topks_correct = metrics.topks_correct(preds, labels, [1, 5])
top1_err, top5_err = [
(1.0 - x / preds.size(0)) * 100.0 for x in num_topks_correct
]
# Gather all the predictions across all the devices.
if cfg.NUM_GPUS > 1:
loss, top1_err, top5_err = du.all_reduce(
[loss, top1_err, top5_err]
)
# Copy the stats from GPU to CPU (sync point).
loss, top1_err, top5_err = (
loss.item(),
top1_err.item(),
top5_err.item(),
)
# Update and log stats.
train_meter.update_stats(
top1_err,
top5_err,
loss,
lr,
inputs[0].size(0)
* max(
cfg.NUM_GPUS, 1
), # If running on CPU (cfg.NUM_GPUS == 1), use 1 to represent 1 CPU.
)
# write to tensorboard format if available.
if writer is not None:
writer.add_scalars(
{
"Train/loss": loss,
"Train/lr": lr,
"Train/Top1_err": top1_err,
"Train/Top5_err": top5_err,
},
global_step=data_size * cur_epoch + cur_iter,
)
train_meter.iter_toc() # measure allreduce for this meter
train_meter.log_iter_stats(cur_epoch, cur_iter)
train_meter.iter_tic()
if cur_iter > data_size:
break
# Log epoch stats.
train_meter.log_epoch_stats(cur_epoch)
train_meter.reset()
@torch.no_grad()
def eval_epoch(val_loader, model, val_meter, cur_epoch, cfg, writer=None):
"""
Evaluate the model on the val set.
Args:
val_loader (loader): data loader to provide validation data.
model (model): model to evaluate the performance.
val_meter (ValMeter): meter instance to record and calculate the metrics.
cur_epoch (int): number of the current epoch of training.
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
writer (TensorboardWriter, optional): TensorboardWriter object
to writer Tensorboard log.
"""
# Evaluation mode enabled. The running stats would not be updated.
model.eval()
val_meter.iter_tic()
data_size = cfg.TRAIN.VALIDATION_STEPS if cfg.TRAIN.VALIDATION_STEPS else len(val_loader)
for cur_iter, (inputs, labels, _, meta) in enumerate(val_loader):
if cfg.NUM_GPUS:
# Transferthe data to the current GPU device.
if isinstance(inputs, (list,)):
for i in range(len(inputs)):
inputs[i] = inputs[i].cuda(non_blocking=True)
else:
inputs = inputs.cuda(non_blocking=True)
labels = labels.cuda()
for key, val in meta.items():
if isinstance(val, (list,)):
for i in range(len(val)):
val[i] = val[i].cuda(non_blocking=True)
else:
meta[key] = val.cuda(non_blocking=True)
val_meter.data_toc()
if cfg.DETECTION.ENABLE:
# Compute the predictions.
preds = model(inputs, meta["boxes"])
ori_boxes = meta["ori_boxes"]
metadata = meta["metadata"]
if cfg.NUM_GPUS:
preds = preds.cpu()
ori_boxes = ori_boxes.cpu()
metadata = metadata.cpu()
if cfg.NUM_GPUS > 1:
preds = torch.cat(du.all_gather_unaligned(preds), dim=0)
ori_boxes = torch.cat(du.all_gather_unaligned(ori_boxes), dim=0)
metadata = torch.cat(du.all_gather_unaligned(metadata), dim=0)
val_meter.iter_toc()
# Update and log stats.
val_meter.update_stats(preds, ori_boxes, metadata)
else:
preds = model(inputs)
if cfg.DATA.MULTI_LABEL:
if cfg.NUM_GPUS > 1:
preds, labels = du.all_gather([preds, labels])
else:
# Compute the errors.
num_topks_correct = metrics.topks_correct(preds, labels, [1, 5])
# Combine the errors across the GPUs.
top1_err, top5_err = [
(1.0 - x / preds.size(0)) * 100.0 for x in num_topks_correct
]
if cfg.NUM_GPUS > 1:
top1_err, top5_err = du.all_reduce([top1_err, top5_err])
# Copy the errors from GPU to CPU (sync point).
top1_err, top5_err = top1_err.item(), top5_err.item()
val_meter.iter_toc()
# Update and log stats.
val_meter.update_stats(
top1_err,
top5_err,
inputs[0].size(0)
* max(
cfg.NUM_GPUS, 1
), # If running on CPU (cfg.NUM_GPUS == 1), use 1 to represent 1 CPU.
)
# write to tensorboard format if available.
if writer is not None:
writer.add_scalars(
{"Val/Top1_err": top1_err, "Val/Top5_err": top5_err},
global_step=len(val_loader) * cur_epoch + cur_iter,
)
val_meter.update_predictions(preds, labels)
val_meter.log_iter_stats(cur_epoch, cur_iter)
val_meter.iter_tic()
if cur_iter > data_size:
break
# Log epoch stats.
val_meter.log_epoch_stats(cur_epoch)
# write to tensorboard format if available.
if writer is not None:
if cfg.DETECTION.ENABLE:
writer.add_scalars(
{"Val/mAP": val_meter.full_map}, global_step=cur_epoch
)
else:
all_preds = [pred.clone().detach() for pred in val_meter.all_preds]
all_labels = [
label.clone().detach() for label in val_meter.all_labels
]
if cfg.NUM_GPUS:
all_preds = [pred.cpu() for pred in all_preds]
all_labels = [label.cpu() for label in all_labels]
writer.plot_eval(
preds=all_preds, labels=all_labels, global_step=cur_epoch
)
val_meter.reset()
def build_trainer(cfg):
pass
def train(cfg):
pass
| 35.929664 | 97 | 0.544302 | 0 | 0 | 0 | 0 | 4,796 | 0.408205 | 0 | 0 | 2,498 | 0.212614 |
e8da2871c87cf2076690059ef9b906d674072b5a | 846 | py | Python | setup.py | stshrive/pycense | 5bfd1b7b6b326a5592f58d621ee596c6c1d8a490 | [
"MIT"
] | null | null | null | setup.py | stshrive/pycense | 5bfd1b7b6b326a5592f58d621ee596c6c1d8a490 | [
"MIT"
] | 5 | 2018-09-15T23:40:11.000Z | 2018-10-05T22:57:13.000Z | setup.py | stshrive/pycense | 5bfd1b7b6b326a5592f58d621ee596c6c1d8a490 | [
"MIT"
] | 1 | 2018-10-04T23:43:42.000Z | 2018-10-04T23:43:42.000Z | import os
import setuptools
VERSION = "1.0.0a1+dev"
INSTALL_REQUIRES = [
'pip-licenses',
]
CLASSIFIERS = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research'
]
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setuptools.setup(
name='pycense',
version="0.1.0",
description='Python package license inspector.',
long_description=read('README.md'),
license='MIT',
author='Microsoft Corporation',
author_email='stshrive@microsoft.com', # TODO: not one person :)
url='https://github.com/stshrive/pycense',
zip_safe=True,
classifiers=CLASSIFIERS,
entry_points = {'console_scripts': ['pycense=pycense.__main__:__main__']},
packages=['pycense',],
install_requires=INSTALL_REQUIRES
)
| 24.171429 | 78 | 0.682033 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 369 | 0.43617 |
e8db35ba967d9e65ca9d8b55e48764bb1fce3b61 | 101 | py | Python | parents/admin.py | joseph0919/Student_Management_Django | 085e839a86ac574f5ebe83a4911c5808841f50cd | [
"MIT"
] | null | null | null | parents/admin.py | joseph0919/Student_Management_Django | 085e839a86ac574f5ebe83a4911c5808841f50cd | [
"MIT"
] | null | null | null | parents/admin.py | joseph0919/Student_Management_Django | 085e839a86ac574f5ebe83a4911c5808841f50cd | [
"MIT"
] | null | null | null | from django.contrib import admin
from parents.models import Guardian
admin.site.register(Guardian)
| 16.833333 | 35 | 0.831683 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
e8db7b8afe28efde2a5b3d53186f27fb42108a8d | 1,912 | py | Python | src/test/tests/hybrid/missingdata.py | visit-dav/vis | c08bc6e538ecd7d30ddc6399ec3022b9e062127e | [
"BSD-3-Clause"
] | 226 | 2018-12-29T01:13:49.000Z | 2022-03-30T19:16:31.000Z | src/test/tests/hybrid/missingdata.py | visit-dav/vis | c08bc6e538ecd7d30ddc6399ec3022b9e062127e | [
"BSD-3-Clause"
] | 5,100 | 2019-01-14T18:19:25.000Z | 2022-03-31T23:08:36.000Z | src/test/tests/hybrid/missingdata.py | visit-dav/vis | c08bc6e538ecd7d30ddc6399ec3022b9e062127e | [
"BSD-3-Clause"
] | 84 | 2019-01-24T17:41:50.000Z | 2022-03-10T10:01:46.000Z | # ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: missingdata.py
#
# Tests: missing data
#
# Programmer: Brad Whitlock
# Date: Thu Jan 19 09:49:15 PST 2012
#
# Modifications:
#
# ----------------------------------------------------------------------------
def SetTheView():
v = GetView2D()
v.viewportCoords = (0.02, 0.98, 0.25, 1)
SetView2D(v)
def test0(datapath):
TestSection("Missing data")
OpenDatabase(pjoin(datapath,"earth.nc"))
AddPlot("Pseudocolor", "height")
DrawPlots()
SetTheView()
Test("missingdata_0_00")
ChangeActivePlotsVar("carbon_particulates")
Test("missingdata_0_01")
ChangeActivePlotsVar("seatemp")
Test("missingdata_0_02")
ChangeActivePlotsVar("population")
Test("missingdata_0_03")
# Pick on higher zone numbers to make sure pick works.
PickByNode(domain=0, element=833621)
TestText("missingdata_0_04", GetPickOutput())
DeleteAllPlots()
def test1(datapath):
TestSection("Expressions and missing data")
OpenDatabase(pjoin(datapath,"earth.nc"))
DefineScalarExpression("meaningless", "carbon_particulates + seatemp")
AddPlot("Pseudocolor", "meaningless")
DrawPlots()
SetTheView()
Test("missingdata_1_00")
DeleteAllPlots()
DefineVectorExpression("color", "color(red,green,blue)")
AddPlot("Truecolor", "color")
DrawPlots()
ResetView()
SetTheView()
Test("missingdata_1_01")
DefineVectorExpression("color2", "color(population*0.364,green,blue)")
ChangeActivePlotsVar("color2")
v1 = GetView2D()
v1.viewportCoords = (0.02, 0.98, 0.02, 0.98)
v1.windowCoords = (259.439, 513.299, 288.93, 540) #25.466)
SetView2D(v1)
Test("missingdata_1_02")
def main():
datapath = data_path("netcdf_test_data")
test0(datapath)
test1(datapath)
main()
Exit()
| 26.555556 | 78 | 0.619247 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 845 | 0.441946 |
e8dc347e2e8914b12566e7ce88c38a00dd0fd08d | 540 | py | Python | data_input/__init__.py | uabalabadubdub/ppcd-pec4 | b9b1dfae84fd987c4e9b4ea09c0197ef746b30d7 | [
"CC0-1.0"
] | null | null | null | data_input/__init__.py | uabalabadubdub/ppcd-pec4 | b9b1dfae84fd987c4e9b4ea09c0197ef746b30d7 | [
"CC0-1.0"
] | null | null | null | data_input/__init__.py | uabalabadubdub/ppcd-pec4 | b9b1dfae84fd987c4e9b4ea09c0197ef746b30d7 | [
"CC0-1.0"
] | null | null | null | from pathlib import Path
current_path = Path(".")
datafolder = current_path / "data"
imagefolder = current_path / "images"
if not datafolder.exists():
print(f"Creating {datafolder}/...")
datafolder.mkdir()
if not imagefolder.exists():
print(f"Creating {imagefolder}/...")
imagefolder.mkdir()
print("Scaning root folder...")
for child in current_path.iterdir():
if child.name == 'data.zip':
print(f"Moving {child.name} to data/...")
child.replace(datafolder / child.name)
print("Setup DONE!")
print()
| 24.545455 | 49 | 0.668519 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 155 | 0.287037 |
e8dd9680732e2d43b71a4834c51c94a4f70393dd | 4,521 | py | Python | displacy_service_tests/test_server.py | mjfox3/spacy-api-docker | 8622ae1cc3e18835d4675432f9d286794dd380f5 | [
"MIT"
] | null | null | null | displacy_service_tests/test_server.py | mjfox3/spacy-api-docker | 8622ae1cc3e18835d4675432f9d286794dd380f5 | [
"MIT"
] | 1 | 2019-11-08T14:39:55.000Z | 2019-11-08T14:39:55.000Z | displacy_service_tests/test_server.py | mjfox3/spacy-api-docker | 8622ae1cc3e18835d4675432f9d286794dd380f5 | [
"MIT"
] | 1 | 2019-11-07T14:15:37.000Z | 2019-11-07T14:15:37.000Z | import falcon.testing
import pytest
import json
from displacy_service.server import APP, MODELS
model = MODELS[0]
@pytest.fixture()
def api():
return falcon.testing.TestClient(APP)
def test_deps(api):
result = api.simulate_post(
path='/dep',
body='{{"text": "This is a test.", "model": "{model}", "collapse_punctuation": false, "collapse_phrases": false}}'.format(model=model)
)
result = json.loads(result.text)
words = [w['text'] for w in result['words']]
assert words == ["This", "is", "a", "test", "."]
def test_ents(api):
result = api.simulate_post(
path='/ent',
body='{{"text": "What a great company Google is.", "model": "{model}"}}'.format(model=model))
ents = json.loads(result.text)
assert ents == [
{"start": 21, "end": 27, "type": "ORG", "text": "Google"}]
def test_sents(api):
sentences = api.simulate_post(
path='/sents',
body='{{"text": "This a test that should split into sentences! This is the second. Is this the third?", "model": "{model}"}}'.format(model=model)
)
assert sentences.json == ['This a test that should split into sentences!', 'This is the second.', 'Is this the third?']
def test_sents_dep(api):
sentence_parse = api.simulate_post(
path='/sents_dep',
body='{{"text": "This a test that should split into sentences! This is the second. Is this the third?", "model": "{model}", "collapse_punctuation": false, "collapse_phrases": false}}'.format(model=model)
)
sentences = [sp["sentence"] for sp in sentence_parse.json]
assert sentences == [
"This a test that should split into sentences!",
"This is the second.",
"Is this the third?",
]
words = [[w["text"] for w in sp["dep_parse"]["words"]] for sp in sentence_parse.json]
assert words == [
["This", "a", "test", "that", "should", "split", "into", "sentences", "!"],
["This", "is", "the", "second", "."],
["Is", "this", "the", "third", "?"],
]
arcs = [[arc for arc in sp['dep_parse']['arcs']] for sp in sentence_parse.json]
assert arcs == [[{'start': 0, 'end': 2, 'label': 'det', 'text': 'This', 'dir': 'left'},
{'start': 1, 'end': 2, 'label': 'det', 'text': 'a', 'dir': 'left'},
{'start': 2, 'end': 2, 'label': 'ROOT', 'text': 'test', 'dir': 'root'},
{'start': 3, 'end': 5, 'label': 'nsubj', 'text': 'that', 'dir': 'left'},
{'start': 4, 'end': 5, 'label': 'aux', 'text': 'should', 'dir': 'left'},
{'start': 2, 'end': 5, 'label': 'relcl', 'text': 'split', 'dir': 'right'},
{'start': 5, 'end': 6, 'label': 'prep', 'text': 'into', 'dir': 'right'},
{'start': 6, 'end': 7, 'label': 'pobj', 'text': 'sentences', 'dir': 'right'},
{'start': 2, 'end': 8, 'label': 'punct', 'text': '!', 'dir': 'right'}],
[{'start': 9, 'end': 10, 'label': 'nsubj', 'text': 'This', 'dir': 'left'},
{'start': 10, 'end': 10, 'label': 'ROOT', 'text': 'is', 'dir': 'root'},
{'start': 11, 'end': 12, 'label': 'det', 'text': 'the', 'dir': 'left'},
{'start': 10, 'end': 12, 'label': 'attr', 'text': 'second', 'dir': 'right'},
{'start': 10, 'end': 13, 'label': 'punct', 'text': '.', 'dir': 'right'}],
[{'start': 14, 'end': 14, 'label': 'ROOT', 'text': 'Is', 'dir': 'root'},
{'start': 14, 'end': 15, 'label': 'nsubj', 'text': 'this', 'dir': 'right'},
{'start': 16, 'end': 17, 'label': 'det', 'text': 'the', 'dir': 'left'},
{'start': 14, 'end': 17, 'label': 'attr', 'text': 'third', 'dir': 'right'},
{'start': 14, 'end': 18, 'label': 'punct', 'text': '?', 'dir': 'right'}]]
@pytest.mark.parametrize('endpoint, expected_message', [
('/dep', 'Dependency parsing failed'),
('/ent', 'Text parsing failed'),
('/sents', 'Sentence tokenization failed'),
('/sents_dep', 'Sentence tokenization and Dependency parsing failed'),
])
def test_bad_model_error_handling(endpoint, expected_message, api):
response = api.simulate_post(
path=endpoint,
body='{"text": "Here is some text for testing.", "model": "fake_model"}'
)
assert expected_message == response.json['title']
assert "Can't find model 'fake_model'." in response.json["description"]
| 47.09375 | 211 | 0.523114 | 0 | 0 | 0 | 0 | 674 | 0.149082 | 0 | 0 | 2,138 | 0.472904 |
e8df842f0c6982487b7ca3ace562a894cc9d1940 | 606 | py | Python | exposures/generate_passwords.py | jarnoln/exposures | bbae3f79078048d25b77e178db6c0801ffe9f97e | [
"MIT"
] | null | null | null | exposures/generate_passwords.py | jarnoln/exposures | bbae3f79078048d25b77e178db6c0801ffe9f97e | [
"MIT"
] | null | null | null | exposures/generate_passwords.py | jarnoln/exposures | bbae3f79078048d25b77e178db6c0801ffe9f97e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import random
import argparse
def generate_passwords(password_file_path):
password_file = open(password_file_path, 'w')
chars = 'abcdefghijklmnopqrstuvxyz01234567890_-!*'
secret_key = ''.join(random.SystemRandom().choice(chars) for _ in range(50))
password_file.write("SECRET_KEY = '%s'\n" % secret_key)
password_file.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('password_file_path', help='Where password file will be placed')
args = parser.parse_args()
generate_passwords(args.password_file_path)
| 31.894737 | 88 | 0.737624 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 155 | 0.255776 |
e8e12c70a26b28e73712420fd03691434cb4267c | 13,354 | py | Python | adversarial-transfer-nlp/CW_attack.py | AI-secure/Uncovering-the-Connections-BetweenAdversarial-Transferability-and-Knowledge-Transferability | a2fb10f56618c6d6dd1638967d59c4a83ffa1c05 | [
"CC0-1.0"
] | 8 | 2021-06-18T10:32:27.000Z | 2022-01-16T06:46:25.000Z | adversarial-transfer-nlp/CW_attack.py | AI-secure/Does-Adversairal-Transferability-Indicate-Knowledge-Transferability | a2fb10f56618c6d6dd1638967d59c4a83ffa1c05 | [
"CC0-1.0"
] | 2 | 2021-08-25T15:14:12.000Z | 2022-02-09T23:55:46.000Z | adversarial-transfer-nlp/CW_attack.py | AI-secure/Does-Adversairal-Transferability-Indicate-Knowledge-Transferability | a2fb10f56618c6d6dd1638967d59c4a83ffa1c05 | [
"CC0-1.0"
] | null | null | null | import sys
import torch
import numpy as np
from torch import optim
from util import args
class CarliniL2:
def __init__(self, targeted=True, search_steps=None, max_steps=None, cuda=True, debug=False, num_classes=14):
self.debug = debug
self.targeted = targeted
self.num_classes = num_classes
self.confidence = args.confidence # FIXME need to find a good value for this, 0 value used in paper not doing much...
self.initial_const = args.const # bumped up from default of .01 in reference code
self.binary_search_steps = search_steps or 1
self.repeat = self.binary_search_steps >= 10
self.max_steps = max_steps or args.max_steps
self.abort_early = True
self.cuda = cuda
self.mask = None
self.batch_info = None
self.wv = None
self.seq = None
self.seq_len = None
self.init_rand = False # an experiment, does a random starting point help?
def _compare(self, output, target):
if not isinstance(output, (float, int, np.int64)):
output = np.copy(output)
# if self.targeted:
# output[target] -= self.confidence
# else:
# output[target] += self.confidence
output = np.argmax(output)
if self.targeted:
return output == target
else:
return output != target
def _compare_untargeted(self, output, target):
if not isinstance(output, (float, int, np.int64)):
output = np.copy(output)
# if self.targeted:
# output[target] -= self.confidence
# else:
# output[target] += self.confidence
output = np.argmax(output)
if self.targeted:
return output == target + 1 or output == target - 1
else:
return output != target
def _loss(self, output, target, dist, scale_const):
# compute the probability of the label class versus the maximum other
real = (target * output).sum(1)
other = ((1. - target) * output - target * 10000.).max(1)[0]
if self.targeted:
# if targeted, optimize for making the other class most likely
loss1 = torch.clamp(other - real + self.confidence, min=0.) # equiv to max(..., 0.)
else:
# if non-targeted, optimize for making this class least likely.
loss1 = torch.clamp(real - other + self.confidence, min=0.) # equiv to max(..., 0.)
loss1 = torch.sum(scale_const * loss1)
loss2 = dist.sum()
if args.debug_cw:
print("loss 1:", loss1.item(), " loss 2:", loss2.item())
loss = loss1 + loss2
return loss
def _optimize(self, optimizer, model, input_var, modifier_var, target_var, scale_const_var, input_token=None):
# apply modifier and clamp resulting image to keep bounded from clip_min to clip_max
batch_adv_sent = []
if self.mask is None:
# not word-level attack
input_adv = modifier_var + input_var
output = model(input_adv)
input_adv = model.get_embedding()
input_var = input_token
seqback = model.get_seqback()
batch_adv_sent = seqback.adv_sent.copy()
seqback.adv_sent = []
# input_adv = self.itereated_var = modifier_var + self.itereated_var
else:
# word level attack
input_adv = modifier_var * self.mask + self.itereated_var
# input_adv = modifier_var * self.mask + input_var
for i in range(input_adv.size(0)):
# for batch size
new_word_list = []
add_start = self.batch_info['add_start'][i]
add_end = self.batch_info['add_end'][i]
if add_end < 0:
add_end = len(input_adv[i]) - 1
for j in range(add_start, add_end):
new_placeholder = input_adv[i, j].data
temp_place = new_placeholder.expand_as(self.wv)
new_dist = torch.norm(temp_place - self.wv.data, 2, -1)
_, new_word = torch.min(new_dist, 0)
new_word_list.append(new_word.item())
# input_adv.data[j, i] = self.wv[new_word.item()].data
input_adv.data[i, j] = self.itereated_var.data[i, j] = self.wv[new_word.item()].data
del temp_place
batch_adv_sent.append(new_word_list)
output = model(self.seq, self.batch_info['segment_ids'], self.batch_info['input_mask'], inputs_embeds=input_adv)
if args.debug_cw:
print("output:", batch_adv_sent)
print("input_adv:", input_adv)
print("output:", output)
adv_seq = torch.tensor(self.seq)
for bi, (add_start, add_end) in enumerate(zip(self.batch_info['add_start'], self.batch_info['add_end'])):
adv_seq.data[bi, add_start:add_end] = torch.LongTensor(batch_adv_sent)
print("out:", adv_seq)
print("out embedding:", model.bert.embeddings.word_embeddings(adv_seq))
out = model(adv_seq, self.seq_len)['pred']
print("out:", out)
def reduce_sum(x, keepdim=True):
# silly PyTorch, when will you get proper reducing sums/means?
for a in reversed(range(1, x.dim())):
x = x.sum(a, keepdim=keepdim)
return x
def l1_dist(x, y, keepdim=True):
d = torch.abs(x - y)
return reduce_sum(d, keepdim=keepdim)
def l2_dist(x, y, keepdim=True):
d = (x - y) ** 2
return reduce_sum(d, keepdim=keepdim)
# distance to the original input data
if args.l1:
dist = l1_dist(input_adv, input_var, keepdim=False)
else:
dist = l2_dist(input_adv, input_var, keepdim=False)
loss = self._loss(output, target_var, dist, scale_const_var)
if args.debug_cw:
print(loss)
optimizer.zero_grad()
if input_token is None:
loss.backward()
else:
loss.backward(retain_graph=True)
torch.nn.utils.clip_grad_norm_([modifier_var], args.clip)
# print(modifier_var)
optimizer.step()
# print(modifier_var)
# modifier_var.data -= 2 * modifier_var.grad.data
# modifier_var.grad.data.zero_()
loss_np = loss.item()
dist_np = dist.data.cpu().numpy()
output_np = output.data.cpu().numpy()
input_adv_np = input_adv.data.cpu().numpy()
return loss_np, dist_np, output_np, input_adv_np, batch_adv_sent
def run(self, model, input, target, batch_idx=0, batch_size=None, input_token=None):
if batch_size is None:
batch_size = input.size(0) # ([length, batch_size, nhim])
# set the lower and upper bounds accordingly
lower_bound = np.zeros(batch_size)
scale_const = np.ones(batch_size) * self.initial_const
upper_bound = np.ones(batch_size) * 1e10
# python/numpy placeholders for the overall best l2, label score, and adversarial image
o_best_l2 = [1e10] * batch_size
o_best_score = [-1] * batch_size
o_best_logits = {}
if input_token is None:
best_attack = input.cpu().detach().numpy()
o_best_attack = input.cpu().detach().numpy()
else:
best_attack = input_token.cpu().detach().numpy()
o_best_attack = input_token.cpu().detach().numpy()
self.o_best_sent = {}
self.best_sent = {}
# setup input (image) variable, clamp/scale as necessary
input_var = torch.tensor(input, requires_grad=False)
self.itereated_var = torch.tensor(input_var)
# setup the target variable, we need it to be in one-hot form for the loss function
target_onehot = torch.zeros(target.size() + (self.num_classes,))
# print(target_onehot.size())
if self.cuda:
target_onehot = target_onehot.cuda()
target_onehot.scatter_(1, target.unsqueeze(1), 1.)
target_var = torch.tensor(target_onehot, requires_grad=False)
# setup the modifier variable, this is the variable we are optimizing over
modifier = torch.zeros(input_var.size()).float().cuda()
if self.cuda:
modifier = modifier.cuda()
modifier_var = torch.tensor(modifier, requires_grad=True)
optimizer = optim.Adam([modifier_var], lr=args.lr)
for search_step in range(self.binary_search_steps):
if args.debug_cw:
print('Batch: {0:>3}, search step: {1}'.format(batch_idx, search_step))
print('Const:')
for i, x in enumerate(scale_const):
print(i, x)
best_l2 = [1e10] * batch_size
best_score = [-1] * batch_size
best_logits = {}
# The last iteration (if we run many steps) repeat the search once.
if self.repeat and search_step == self.binary_search_steps - 1:
scale_const = upper_bound
scale_const_tensor = torch.from_numpy(scale_const).float()
if self.cuda:
scale_const_tensor = scale_const_tensor.cuda()
scale_const_var = torch.tensor(scale_const_tensor, requires_grad=False)
for step in range(self.max_steps):
# perform the attack
if self.mask is None:
if args.decreasing_temp:
cur_temp = args.temp - (args.temp - 0.1) / (self.max_steps - 1) * step
model.set_temp(cur_temp)
if args.debug_cw:
print("temp:", cur_temp)
else:
model.set_temp(args.temp)
loss, dist, output, adv_img, adv_sents = self._optimize(
optimizer,
model,
input_var,
modifier_var,
target_var,
scale_const_var,
input_token)
for i in range(batch_size):
target_label = target[i]
output_logits = output[i]
output_label = np.argmax(output_logits)
di = dist[i]
if self.debug:
if step % 100 == 0:
print('{0:>2} dist: {1:.5f}, output: {2:>3}, {3:5.3}, target {4:>3}'.format(
i, di, output_label, output_logits[output_label], target_label))
if di < best_l2[i] and self._compare_untargeted(output_logits, target_label):
# if self._compare(output_logits, target_label):
if self.debug:
print('{0:>2} best step, prev dist: {1:.5f}, new dist: {2:.5f}'.format(
i, best_l2[i], di))
best_l2[i] = di
best_score[i] = output_label
best_logits[i] = output_logits
best_attack[i] = adv_img[i]
self.best_sent[i] = adv_sents[i]
if di < o_best_l2[i] and self._compare(output_logits, target_label):
# if self._compare(output_logits, target_label):
if self.debug:
print('{0:>2} best total, prev dist: {1:.5f}, new dist: {2:.5f}'.format(
i, o_best_l2[i], di))
o_best_l2[i] = di
o_best_score[i] = output_label
o_best_logits[i] = output_logits
o_best_attack[i] = adv_img[i]
self.o_best_sent[i] = adv_sents[i]
sys.stdout.flush()
# end inner step loop
# adjust the constants
batch_failure = 0
batch_success = 0
for i in range(batch_size):
if self._compare(o_best_score[i], target[i]) and o_best_score[i] != -1:
batch_success += 1
if args.debug_cw:
print(self.o_best_sent[i])
print(o_best_score[i])
print(o_best_logits[i])
elif self._compare_untargeted(best_score[i], target[i]) and best_score[i] != -1:
o_best_l2[i] = best_l2[i]
o_best_score[i] = best_score[i]
o_best_attack[i] = best_attack[i]
self.o_best_sent[i] = self.best_sent[i]
if args.debug_cw:
print(self.o_best_sent[i])
print(o_best_score[i])
print(o_best_logits[i])
batch_success += 1
else:
batch_failure += 1
print('Num failures: {0:2d}, num successes: {1:2d}\n'.format(batch_failure, batch_success))
sys.stdout.flush()
# end outer search loop
return o_best_attack
| 44.962963 | 126 | 0.543582 | 13,261 | 0.993036 | 0 | 0 | 0 | 0 | 0 | 0 | 2,242 | 0.16789 |
e8e227fac1aeb6cb15d17a60f96b91194af13f7f | 639 | py | Python | api/cueSearch/migrations/0005_searchcardtemplate_connectiontype.py | cuebook/CueSearch | 8bf047de273b27bba41b8bf4e266aac1eee7f81a | [
"Apache-2.0"
] | 3 | 2022-02-10T17:00:19.000Z | 2022-03-29T14:31:25.000Z | api/cueSearch/migrations/0005_searchcardtemplate_connectiontype.py | cuebook/CueSearch | 8bf047de273b27bba41b8bf4e266aac1eee7f81a | [
"Apache-2.0"
] | null | null | null | api/cueSearch/migrations/0005_searchcardtemplate_connectiontype.py | cuebook/CueSearch | 8bf047de273b27bba41b8bf4e266aac1eee7f81a | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.2.5 on 2022-02-18 08:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("dataset", "0001_initial"),
("cueSearch", "0004_auto_20220217_0217"),
]
operations = [
migrations.AddField(
model_name="searchcardtemplate",
name="connectionType",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="dataset.connectiontype",
),
),
]
| 24.576923 | 61 | 0.57277 | 513 | 0.802817 | 0 | 0 | 0 | 0 | 0 | 0 | 166 | 0.259781 |
e8e29b7b8972a06149daa7c111affc519cbc1ff4 | 629 | py | Python | FormulaAccordingPrint.py | FreeBirdsCrew/Brainstorming_Codes | 9d06216cd0772ce56586acff2c240a210b94ba1f | [
"Apache-2.0"
] | 1 | 2020-12-11T10:24:08.000Z | 2020-12-11T10:24:08.000Z | FormulaAccordingPrint.py | FreeBirdsCrew/Brainstorming_Codes | 9d06216cd0772ce56586acff2c240a210b94ba1f | [
"Apache-2.0"
] | null | null | null | FormulaAccordingPrint.py | FreeBirdsCrew/Brainstorming_Codes | 9d06216cd0772ce56586acff2c240a210b94ba1f | [
"Apache-2.0"
] | null | null | null | """
Write a program that calculates and prints the value according to the given formula:
Q = Square root of [(2 * C * D)/H]
Following are the fixed values of C and H:
C is 50. H is 30.
D is the variable whose values should be input to your program in a comma-separated sequence.
Example
Let us assume the following comma separated input sequence is given to the program:
100,150,180
The output of the program should be:
18,22,24
"""
import math
c=50
h=30
value = []
items=[x for x in raw_input().split(',')]
for d in items:
value.append(str(int(round(math.sqrt(2*c*float(d)/h)))))
print ','.join(value) | 28.590909 | 94 | 0.694754 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 449 | 0.713831 |
e8e2e1e3ca7f9bd9067ac2608c856319f1c4cab7 | 675 | py | Python | src/django_sys_indicator/utils.py | marksweb/django-sys-indicator | 7318a7df8a6cda613a999d1129d00a181403cbaf | [
"MIT"
] | 1 | 2022-02-12T15:33:53.000Z | 2022-02-12T15:33:53.000Z | src/django_sys_indicator/utils.py | marksweb/django-sys-indicator | 7318a7df8a6cda613a999d1129d00a181403cbaf | [
"MIT"
] | null | null | null | src/django_sys_indicator/utils.py | marksweb/django-sys-indicator | 7318a7df8a6cda613a999d1129d00a181403cbaf | [
"MIT"
] | null | null | null | from __future__ import annotations
from django.template.loader import render_to_string
from .conf import settings
def django_sys_indicator_tag() -> str:
template_name = 'django_sys_indicator/system_indicator.html'
try:
color, border_color = settings.SYSTEM_INDICATOR_COLORS[
settings.SYSTEM_INDICATOR_COLOR
]
except KeyError:
# Invalid colour chosen
color, border_color = settings.SYSTEM_INDICATOR_COLORS['red']
return render_to_string(
template_name,
{
'label': settings.SYSTEM_INDICATOR_LABEL,
'color': color,
'border_color': border_color,
}
)
| 25.961538 | 69 | 0.668148 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 100 | 0.148148 |
e8e431aaa0e0c0342df0906099d3bfe584c4dc40 | 262 | py | Python | MUNDO 1/ex023.py | athavus/Curso-em-video-Python-3 | a32be95adbccfcbe512a1ed30d3859141a230b5e | [
"MIT"
] | 1 | 2020-11-12T14:03:32.000Z | 2020-11-12T14:03:32.000Z | MUNDO 1/ex023.py | athavus/Curso-em-video-Python-3 | a32be95adbccfcbe512a1ed30d3859141a230b5e | [
"MIT"
] | null | null | null | MUNDO 1/ex023.py | athavus/Curso-em-video-Python-3 | a32be95adbccfcbe512a1ed30d3859141a230b5e | [
"MIT"
] | 1 | 2021-01-05T22:18:46.000Z | 2021-01-05T22:18:46.000Z | n1 = int(input('Digite um número entre 0 e 9999: '))
u = n1 // 1 % 10
d = n1 // 10 % 10
c = n1 // 100 % 10
m = n1 // 1000 % 10
print(f'Analisando o número {n1}')
print(f'unidade: {u}')
print(f'dezena: {d}')
print(f'centena: {c}')
print(f'milhar: {m}')
| 23.818182 | 53 | 0.549618 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 122 | 0.462121 |
e8e44f8a42a8c9211af1456c9b87394460144341 | 3,366 | py | Python | openapi_server/controllers/instance_metadata_controller.py | eugenegesdisc/gmuedr | e8b3e5c7b8d18421d875f0f6f778a37a6d8ec3fd | [
"MIT"
] | null | null | null | openapi_server/controllers/instance_metadata_controller.py | eugenegesdisc/gmuedr | e8b3e5c7b8d18421d875f0f6f778a37a6d8ec3fd | [
"MIT"
] | null | null | null | openapi_server/controllers/instance_metadata_controller.py | eugenegesdisc/gmuedr | e8b3e5c7b8d18421d875f0f6f778a37a6d8ec3fd | [
"MIT"
] | null | null | null | from typing import List, Dict
from aiohttp import web
from openapi_server.models.edr_feature_collection_geo_json import EdrFeatureCollectionGeoJSON
from openapi_server.models.exception import Exception
from openapi_server.models.one_ofobjectobject import OneOfobjectobject
from openapi_server import util
async def list_data_instance_locations(request: web.Request, collection_id, instance_id, bbox=None, datetime=None, limit=None) -> web.Response:
"""List available location identifers for the instance
List the locations available for the instance of the collection
:param collection_id: Identifier (id) of a specific collection
:type collection_id: str
:param instance_id: Identifier (id) of a specific instance of a collection
:type instance_id: str
:param bbox: Only features that have a geometry that intersects the bounding box are selected. The bounding box is provided as four or six numbers, depending on whether the coordinate reference system includes a vertical axis (height or depth): * Lower left corner, coordinate axis 1 * Lower left corner, coordinate axis 2 * Minimum value, coordinate axis 3 (optional) * Upper right corner, coordinate axis 1 * Upper right corner, coordinate axis 2 * Maximum value, coordinate axis 3 (optional) The coordinate reference system of the values is specified by the `crs` query parameter. If the `crs` query parameter is not defined the coordinate reference system is defined by the default `crs` for the query type. If a default `crs` has not been defined the values will be assumed to be in the WGS 84 longitude/latitude (http://www.opengis.net/def/crs/OGC/1.3/CRS84) coordinate reference system. For WGS 84 longitude/latitude the values are in most cases the sequence of minimum longitude, minimum latitude, maximum longitude and maximum latitude. However, in cases where the box spans the antimeridian the first value (west-most box edge) is larger than the third value (east-most box edge). If the vertical axis is included, the third and the sixth number are the bottom and the top of the 3-dimensional bounding box. If a feature has multiple spatial geometry properties, it is the decision of the server whether only a single spatial geometry property is used to determine the extent or all relevant geometries.
:type bbox: dict | bytes
:param datetime: Either a date-time or an interval, open or closed. Date and time expressions adhere to RFC 3339. Open intervals are expressed using double-dots. Examples: * A date-time: \"2018-02-12T23:20:50Z\" * A closed interval: \"2018-02-12T00:00:00Z/2018-03-18T12:31:12Z\" * Open intervals: \"2018-02-12T00:00:00Z/..\" or \"../2018-03-18T12:31:12Z\" Only features that have a temporal property that intersects the value of `datetime` are selected. If a feature has multiple temporal properties, it is the decision of the server whether only a single temporal property is used to determine the extent or all relevant temporal properties.
:type datetime: str
:param limit: The optional limit parameter limits the number of results that are presented in the response document. Minimum = 1. Maximum = 10000. Default = 10.
:type limit: int
"""
# bbox = .from_dict(bbox)
return web.Response(status=200)
| 116.068966 | 1,557 | 0.778372 | 0 | 0 | 0 | 0 | 0 | 0 | 3,057 | 0.9082 | 2,872 | 0.853238 |
e8e693322ca4748ac11e7ad6f26ec9749c3ce95e | 904 | py | Python | Python Notebook/Python files/data_utility.py | wilfy9249/Capstone-Fall-18 | 832632eb00a10240e0ad16c364449d5020814c83 | [
"MIT"
] | 2 | 2018-10-24T21:32:17.000Z | 2019-02-19T21:15:29.000Z | Python Notebook/Python files/data_utility.py | wilfy9249/Capstone-Fall-18 | 832632eb00a10240e0ad16c364449d5020814c83 | [
"MIT"
] | null | null | null | Python Notebook/Python files/data_utility.py | wilfy9249/Capstone-Fall-18 | 832632eb00a10240e0ad16c364449d5020814c83 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
import os
# In[2]:
#function to get current directory
def getCurrentDirectory():
listDirectory = os.listdir('../')
return listDirectory
# In[3]:
#function to read csv file
def readCsvFile(path):
crimes_original = pd.read_csv(path, low_memory=False)
return crimes_original
# In[4]:
#function to filter Data
def filterData(data,column,value):
filterData = data.loc[data[column] == value]
return filterData
# In[5]:
#function to get count of a value
def getCount(data,column,columnName):
data_count = pd.DataFrame({columnName:data.groupby(column).size()}).reset_index()
return data_count
# In[7]:
#function to sort
def sortValue(data,column,ascBoolean):
sorted_data = data.sort_values(column,ascending = ascBoolean)
return sorted_data
# In[ ]:
| 14.580645 | 85 | 0.692478 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 231 | 0.255531 |
e8e88d99dce51ba1201b6f79edf2263f1faa8d1a | 125 | py | Python | microtbs_rl/algorithms/common/__init__.py | alex-petrenko/simple-reinforcement-learning | d0da1d9026d1f05e2552d08e56fbe58ad869fafd | [
"MIT"
] | 8 | 2018-03-05T05:13:39.000Z | 2021-02-27T03:12:05.000Z | microtbs_rl/algorithms/common/__init__.py | alex-petrenko/simple-reinforcement-learning | d0da1d9026d1f05e2552d08e56fbe58ad869fafd | [
"MIT"
] | null | null | null | microtbs_rl/algorithms/common/__init__.py | alex-petrenko/simple-reinforcement-learning | d0da1d9026d1f05e2552d08e56fbe58ad869fafd | [
"MIT"
] | 4 | 2018-09-04T04:44:26.000Z | 2021-07-22T06:34:51.000Z | from microtbs_rl.algorithms.common.agent import AgentLearner
from microtbs_rl.algorithms.common.loops import run_policy_loop
| 41.666667 | 63 | 0.888 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
e8e961a8029a5dbe75bc908fc0b55465398ece35 | 806 | py | Python | retrieve/GetTotalEarnings.py | TRiCAM-Lab/task-effort | 69d072470359a20dfb3eb5df84bf1331a48e59f2 | [
"MIT"
] | null | null | null | retrieve/GetTotalEarnings.py | TRiCAM-Lab/task-effort | 69d072470359a20dfb3eb5df84bf1331a48e59f2 | [
"MIT"
] | 1 | 2022-03-25T19:16:43.000Z | 2022-03-25T19:16:43.000Z | retrieve/GetTotalEarnings.py | TRiCAM-Lab/task-effort | 69d072470359a20dfb3eb5df84bf1331a48e59f2 | [
"MIT"
] | null | null | null |
import sys
from collections.abc import Mapping
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
# Use a service account
cred = credentials.Certificate('./service-account-key.json')
firebase_admin.initialize_app(cred)
db = firestore.client()
for row in sys.stdin:
id = row.strip()
sub = db.document(u'db_pilot_test', id).get()
if sub.exists:
#print (f'it exists, {sub.id}')
#print(f'{sub.id}, {sub.to_dict().get("totalEarnings") or 0}')
try:
sub.to_dict().get("totalEarnings")
except AttributeError:
print (f'Attribute error, {sub.id}')
else:
print(f'{sub.id}, {sub.to_dict().get("totalEarnings")}')
#else:
#print (f'nope it does not, {sub.id}')
| 24.424242 | 70 | 0.636476 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 296 | 0.367246 |
e8eaacdbf20c0fc2ed919b5008d2aa81872089dc | 192 | py | Python | pyGAE/handlers/ManageSubscriptionHandler.py | analyticstraining/pycocms | 29d7c3eea9377495bcafd8b8c62016c21c1a74a7 | [
"MIT"
] | null | null | null | pyGAE/handlers/ManageSubscriptionHandler.py | analyticstraining/pycocms | 29d7c3eea9377495bcafd8b8c62016c21c1a74a7 | [
"MIT"
] | null | null | null | pyGAE/handlers/ManageSubscriptionHandler.py | analyticstraining/pycocms | 29d7c3eea9377495bcafd8b8c62016c21c1a74a7 | [
"MIT"
] | null | null | null | from BaseHandler import BaseHandler, user_required
class ManageSubscriptionHandler(BaseHandler):
@user_required
def get(self):
self.render_template('manage_subscription.html') | 32 | 56 | 0.786458 | 140 | 0.729167 | 0 | 0 | 90 | 0.46875 | 0 | 0 | 26 | 0.135417 |
e8ec71dfe68f78e0bbd64c46510e470c4242fa2e | 1,228 | py | Python | aiphysim/models/spacetime.py | perovai/deepkoopman | eb6de915f5ea1f20b47cb3a22a384f55c30f0558 | [
"MIT"
] | null | null | null | aiphysim/models/spacetime.py | perovai/deepkoopman | eb6de915f5ea1f20b47cb3a22a384f55c30f0558 | [
"MIT"
] | 10 | 2021-07-07T09:24:33.000Z | 2021-09-27T14:32:59.000Z | aiphysim/models/spacetime.py | perovai/deepkoopman | eb6de915f5ea1f20b47cb3a22a384f55c30f0558 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
class SpaceTime(nn.Module):
def __init__(self, opts):
# TODO: Add things like no. of hidden layers to opts
pass
class LSTM(nn.Module):
# This class is largely derived from
# https://stackabuse.com/time-series-prediction-using-lstm-with-pytorch-in-python on 20210701.
def __init__(self, input_size=2, hidden_layer_size=100, output_size=2):
# param input_size: number of components in input vector
# param output_size: number of components in output vector
# param hidden_layer_size: number of components in hidden layer
super().__init__()
self.hidden_layer_size = hidden_layer_size
self.lstm = nn.LSTM(input_size, hidden_layer_size)
self.linear = nn.Linear(hidden_layer_size, output_size)
self.hidden_cell = (
torch.zeros(1, 1, self.hidden_layer_size),
torch.zeros(1, 1, self.hidden_layer_size),
)
def forward(self, input_seq):
lstm_out, self.hidden_cell = self.lstm(
input_seq.view(len(input_seq), 1, -1), self.hidden_cell
)
predictions = self.linear(lstm_out.view(len(input_seq), -1))
return predictions[-1]
| 34.111111 | 98 | 0.666124 | 1,187 | 0.966612 | 0 | 0 | 0 | 0 | 0 | 0 | 359 | 0.292345 |
e8ed02dac89d480ead9705b1ad919290dfc731c8 | 934 | py | Python | fabfile/text.py | nprapps/austin | 45237e878260678bbeb57801e798b89e67ad4e0b | [
"MIT"
] | 7 | 2015-01-26T16:02:49.000Z | 2015-04-01T12:37:52.000Z | fabfile/text.py | nprapps/austin | 45237e878260678bbeb57801e798b89e67ad4e0b | [
"MIT"
] | 272 | 2015-01-26T16:37:22.000Z | 2016-04-04T17:08:55.000Z | fabfile/text.py | nprapps/austin | 45237e878260678bbeb57801e798b89e67ad4e0b | [
"MIT"
] | 4 | 2015-03-05T00:38:17.000Z | 2021-02-23T10:26:28.000Z | #!/usr/bin/env python
"""
Commands related to syncing copytext from Google Docs.
"""
from fabric.api import task
from termcolor import colored
import app_config
from etc.gdocs import GoogleDoc
@task(default=True)
def update():
"""
Downloads a Google Doc as an Excel file.
"""
if app_config.COPY_GOOGLE_DOC_URL == None:
print colored('You have set COPY_GOOGLE_DOC_URL to None. If you want to use a Google Sheet, set COPY_GOOGLE_DOC_URL to the URL of your sheet in app_config.py', 'blue')
return
else:
doc = {}
url = app_config.COPY_GOOGLE_DOC_URL
if 'key' in url:
bits = url.split('key=')
bits = bits[1].split('&')
doc['key'] = bits[0]
else:
bits = url.split('/d/')
bits = bits[1].split('/')
doc['key'] = bits[0]
g = GoogleDoc(**doc)
g.get_auth()
g.get_document()
| 24.578947 | 175 | 0.586724 | 0 | 0 | 0 | 0 | 735 | 0.786938 | 0 | 0 | 321 | 0.343683 |
e8ed68a76b6810bfc7416102a15fd740faaea0ec | 4,699 | py | Python | program.py | jaesik817/programmable-agents_tensorflow | b64d1774803c585e87aa9769beadde31e18f8ea4 | [
"MIT"
] | 39 | 2017-09-25T02:01:18.000Z | 2019-06-18T15:17:53.000Z | program.py | jsikyoon/programmable-agents_tensorflow | b64d1774803c585e87aa9769beadde31e18f8ea4 | [
"MIT"
] | 5 | 2017-09-22T00:40:09.000Z | 2018-05-07T15:11:11.000Z | program.py | jsikyoon/programmable-agents_tensorflow | b64d1774803c585e87aa9769beadde31e18f8ea4 | [
"MIT"
] | 10 | 2017-09-25T06:49:12.000Z | 2019-06-18T10:17:03.000Z | import tensorflow as tf
import numpy as np
import math
# Parameter
order_num=2;
class Program:
def __init__(self,sess,state_dim,obj_num,fea_size,Theta,program_order,postfix):
self.sess = sess;
self.state_dim = state_dim;
self.fea_size=fea_size;
self.obj_num=obj_num;
self.order_num=order_num;
self.Theta=Theta;
self.program_order=program_order;
self.postfix=postfix;
self.p = self.compile_order();
def compile_order(self):
self.Theta=tf.reshape(self.Theta,[-1,self.obj_num,6]);
self.Theta=tf.transpose(self.Theta,perm=[0,2,1]);
self.Theta=tf.unstack(self.Theta,6,1);
# temporary ordering
p_1=tf.multiply(self.Theta[0],self.Theta[3]);
p_1=p_1+self.Theta[5];
p_2=tf.multiply(self.Theta[1],self.Theta[3]);
p_2=p_2+self.Theta[5];
p_3=tf.multiply(self.Theta[0],self.Theta[4]);
p_3=p_3+self.Theta[5];
p_4=tf.multiply(self.Theta[1],self.Theta[4]);
p_4=p_4+self.Theta[5];
program_order2=tf.unstack(self.program_order,(self.obj_num-1),1);
p=tf.multiply(tf.stack([program_order2[0]]*(self.obj_num),1),p_1)+tf.multiply(tf.stack([program_order2[1]]*(self.obj_num),1),p_2)+tf.multiply(tf.stack([program_order2[2]]*(self.obj_num),1),p_3)+tf.multiply(tf.stack([program_order2[3]]*(self.obj_num),1),p_4);
# Currently tf.cond makes problems
"""
program_order2=tf.unstack(self.program_order,self.order_num,1);
for i in range(self.order_num):
program_order2[i]=tf.unstack(program_order2[i],3,1);
for i in range(self.order_num):
for k in range(9):
for l in range(k+1,9):
# not=1, and=2, or=3
p=tf.cond(tf.equal(program_order2[i][0],1)&tf.equal(program_order2[i][1],k),lambda:1-self.Theta[k],lambda:p);
p=tf.cond(tf.equal(program_order2[i][0],1)&tf.equal(program_order2[i][1],-1),lambda:1-p,lambda:p);
p=tf.cond(tf.equal(program_order2[i][0],2)&tf.equal(program_order2[i][1],k)&tf.equal(program_order2[i][2],l),lambda:tf.multiply(self.Theta[k],self.Theta[l]),lambda:p);
p=tf.cond(tf.equal(program_order2[i][0],2)&tf.equal(program_order2[i][1],k)&tf.equal(program_order2[i][2],-1),lambda:tf.multiply(self.Theta[k],p),lambda:p);
p=tf.cond(tf.equal(program_order2[i][0],3)&tf.equal(program_order2[i][1],k)&tf.equal(program_order2[i][2],l),lambda:self.Theta[k]+self.Theta[l]-tf.multiply(self.Theta[k],self.Theta[l]),lambda:p);
p=tf.cond(tf.equal(program_order2[i][0],3)&tf.equal(program_order2[i][1],k)&tf.equal(program_order2[i][2],l),lambda:self.Theta[k]+p-tf.multiply(self.Theta[k],p),lambda:p);
"""
return p;
def run_target_nets(self,Theta,program_order):
Theta=tf.reshape(Theta,[-1,self.obj_num,6]);
Theta=tf.transpose(Theta,perm=[0,2,1]);
Theta=tf.unstack(Theta,6,1);
# temporary ordering
p_1=tf.multiply(Theta[0],Theta[3]);
p_1=p_1+Theta[5];
p_2=tf.multiply(Theta[1],Theta[3]);
p_2=p_2+Theta[5];
p_3=tf.multiply(Theta[0],Theta[4]);
p_3=p_3+Theta[5];
p_4=tf.multiply(Theta[1],Theta[4]);
p_4=p_4+Theta[5];
program_order2=tf.unstack(program_order,(self.obj_num-1),1);
p=tf.multiply(tf.stack([program_order2[0]]*(self.obj_num),1),p_1)+tf.multiply(tf.stack([program_order2[1]]*(self.obj_num),1),p_2)+tf.multiply(tf.stack([program_order2[2]]*(self.obj_num),1),p_3)+tf.multiply(tf.stack([program_order2[3]]*(self.obj_num),1),p_4);
# Currently tf.cond makes problems
"""
# Currently tf.cond makes problems
program_order2=tf.unstack(program_order,self.order_num,1);
for i in range(self.order_num):
program_order2[i]=tf.unstack(program_order2[i],3,1);
for i in range(self.order_num):
for k in range(9):
for l in range(k+1,9):
# not=1, and=2, or=3
p=tf.cond(tf.equal(program_order2[i][0],1)&tf.equal(program_order2[i][1],k),lambda:1-Theta[k],lambda:p);
p=tf.cond(tf.equal(program_order2[i][0],1)&tf.equal(program_order2[i][1],-1),lambda:1-p,lambda:p);
p=tf.cond(tf.equal(program_order2[i][0],2)&tf.equal(program_order2[i][1],k)&tf.equal(program_order2[i][2],l),lambda:tf.multiply(Theta[k],Theta[l]),lambda:p);
p=tf.cond(tf.equal(program_order2[i][0],2)&tf.equal(program_order2[i][1],k)&tf.equal(program_order2[i][2],-1),lambda:tf.multiply(Theta[k],p),lambda:p);
p=tf.cond(tf.equal(program_order2[i][0],3)&tf.equal(program_order2[i][1],k)&tf.equal(program_order2[i][2],l),lambda:Theta[k]+Theta[l]-tf.multiply(Theta[k],Theta[l]),lambda:p);
p=tf.cond(tf.equal(program_order2[i][0],3)&tf.equal(program_order2[i][1],k)&tf.equal(program_order2[i][2],l),lambda:Theta[k]+p-tf.multiply(Theta[k],p),lambda:p);
"""
return p;
| 53.397727 | 262 | 0.668227 | 4,615 | 0.982124 | 0 | 0 | 0 | 0 | 0 | 0 | 2,624 | 0.558417 |
e8eead32e4d9ed6fcdbc15a39026a6dc60b3a888 | 3,161 | py | Python | ism_pkg/tools/opt_gaussian.py | endsley/stochastic_ISM | 6438eb17bd391e8698e989156acb9b786c8f7299 | [
"MIT"
] | 1 | 2021-11-04T06:30:22.000Z | 2021-11-04T06:30:22.000Z | ism_pkg/tools/opt_gaussian.py | endsley/stochastic_ISM | 6438eb17bd391e8698e989156acb9b786c8f7299 | [
"MIT"
] | null | null | null | ism_pkg/tools/opt_gaussian.py | endsley/stochastic_ISM | 6438eb17bd391e8698e989156acb9b786c8f7299 | [
"MIT"
] | 2 | 2021-11-04T06:30:24.000Z | 2022-02-07T02:04:14.000Z | #!/usr/bin/env python
import sys
import matplotlib
import numpy as np
import random
import itertools
import socket
import sklearn.metrics
from scipy.optimize import minimize
from scipy.optimize import Bounds
from sklearn import preprocessing
from sklearn.preprocessing import OneHotEncoder
from sklearn.utils.random import sample_without_replacement
np.set_printoptions(precision=4)
np.set_printoptions(threshold=sys.maxsize)
np.set_printoptions(linewidth=300)
np.set_printoptions(suppress=True)
class opt_gaussian():
def __init__(self, X, Y, Y_kernel='linear', σₓ=None, Γ=None): # X=data, Y=label, σ_type='ℍ', or 'maxKseparation'
ń = X.shape[0]
ð = X.shape[1]
self.Y_kernel = Y_kernel
if ń > 300: # Down sample first
samples = sample_without_replacement(n_population=ń, n_samples=300)
X = X[samples,:]
Y = Y[samples]
ń = X.shape[0]
Γ = None
if Y_kernel == 'linear':
σᵧ = 1
if Γ is None:
self.Ⅱᵀ = np.ones((ń,ń))
ńᒾ = ń*ń
Yₒ = OneHotEncoder(categories='auto', sparse=False).fit_transform(np.reshape(Y,(len(Y),1)))
self.Kᵧ = Kᵧ = Yₒ.dot(Yₒ.T)
ṉ = np.sum(Kᵧ)
HKᵧ = self.Kᵧ - np.mean(self.Kᵧ, axis=0) # equivalent to Γ = Ⲏ.dot(Kᵧ).dot(Ⲏ)
self.Γ = HKᵧH = (HKᵧ.T - np.mean(HKᵧ.T, axis=0)).T
else:
self.Γ = Γ
elif Y_kernel == 'Gaussian':
Ðᵧ = sklearn.metrics.pairwise.pairwise_distances(Y)
σᵧ = np.median(Ðᵧ)
self.Ðᵧᒾ = (-Ðᵧ*Ðᵧ)/2
Ðₓ = sklearn.metrics.pairwise.pairwise_distances(X)
if σₓ is None:
σₓ = np.median(Ðₓ)
self.Ðₓᒾ = (-Ðₓ*Ðₓ)/2
self.σ = [σₓ, σᵧ]
def minimize_H(self):
self.result = minimize(self.ℍ, self.σ, method='L-BFGS-B', options={'gtol': 1e-5, 'disp': False}, bounds=Bounds(0.05, 100000))
if self.result.x[0] < 0.01:
self.result.x[0] = 0.01
def ℍ(self, σ):
[σₓ, σᵧ] = σ
Kₓ = np.exp(self.Ðₓᒾ/(σₓ*σₓ))
if self.Y_kernel == 'linear':
Γ = self.Γ
elif self.Y_kernel == 'Gaussian':
Kᵧ = np.exp(self.Ðᵧᒾ/(σᵧ*σᵧ))
HKᵧ = Kᵧ - np.mean(Kᵧ, axis=0) # equivalent to Γ = Ⲏ.dot(Kᵧ).dot(Ⲏ)
Γ = HKᵧH = (HKᵧ.T - np.mean(HKᵧ.T, axis=0)).T
loss = -np.sum(Kₓ*Γ)
return loss
def get_opt_σ(X,Y, Y_kernel='Gaussian'):
optimizer = opt_gaussian(X,Y, Y_kernel=Y_kernel)
optimizer.minimize_H()
return optimizer.result
def get_opt_σ_via_random(X,Y, Y_kernel='Gaussian'):
optimizer = opt_gaussian(X,Y, Y_kernel=Y_kernel)
opt = 0
opt_σ = 0
for m in range(1000):
σ = (7*np.random.rand(2)).tolist()
new_opt = -optimizer.ℍ(σ)
if opt < new_opt:
opt = new_opt
opt_σ = σ
print('Random Result ')
print('\tbest_σ : ', opt_σ)
print('\tmax_HSIC : ' , opt)
if __name__ == "__main__":
data_name = 'wine'
X = np.loadtxt('../dataset/' + data_name + '.csv', delimiter=',', dtype=np.float64)
Y = np.loadtxt('../dataset/' + data_name + '_label.csv', delimiter=',', dtype=np.int32)
X = preprocessing.scale(X)
optimized_results = get_opt_σ(X,Y, Y_kernel='linear')
best_σ = optimized_results.x
max_HSIC = -optimized_results.fun
print('Optimized Result ')
print('\tbest_σ [σₓ, σᵧ]: ', best_σ)
print('\tmax_HSIC : ' , max_HSIC)
optimized_results = get_opt_σ_via_random(X,Y, Y_kernel='linear')
| 25.288 | 127 | 0.660234 | 1,797 | 0.534662 | 0 | 0 | 0 | 0 | 0 | 0 | 460 | 0.136864 |
e8eedc51b24c6143d7853efa95a31479c5ffbbd9 | 2,645 | py | Python | tests/commands/test_generate.py | pedrovelho/camp | 98105c9054b8db3377cb6a06e7b5451b97c6c285 | [
"MIT"
] | null | null | null | tests/commands/test_generate.py | pedrovelho/camp | 98105c9054b8db3377cb6a06e7b5451b97c6c285 | [
"MIT"
] | null | null | null | tests/commands/test_generate.py | pedrovelho/camp | 98105c9054b8db3377cb6a06e7b5451b97c6c285 | [
"MIT"
] | 1 | 2019-02-05T08:49:41.000Z | 2019-02-05T08:49:41.000Z | #
# CAMP
#
# Copyright (C) 2017, 2018 SINTEF Digital
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the MIT license. See the LICENSE file for details.
#
from unittest import TestCase
from camp.commands import Command, Generate
class DefaultValuesAreCorrect(TestCase):
def test_given_no_working_directory(self):
command_line = "generate --all"
command = Command.extract_from(command_line.split())
self.assertIsInstance(command, Generate)
self.assertEqual(command.working_directory,
Generate.DEFAULT_WORKING_DIRECTORY)
def test_given_no_working_directory(self):
command_line = "generate -d my/directory"
command = Command.extract_from(command_line.split())
self.assertIsInstance(command, Generate)
self.assertEqual(command.only_coverage,
Generate.DEFAULT_COVERAGE)
class ShortOptionsAreAccepted(TestCase):
def test_given_working_directory(self):
command_line = "generate --d my/test/directory"
command = Command.extract_from(command_line.split())
self.assertIsInstance(command, Generate)
self.assertEqual(command.working_directory,
"my/test/directory")
def test_given_only_coverage(self):
command_line = "generate --c"
command = Command.extract_from(command_line.split())
self.assertIsInstance(command, Generate)
self.assertTrue(command.only_coverage)
def test_given_all_configurations(self):
command_line = "generate --a"
command = Command.extract_from(command_line.split())
self.assertIsInstance(command, Generate)
self.assertFalse(command.only_coverage)
class LongOptionsAreAccepted(TestCase):
def test_given_working_directory(self):
command_line = "generate --directory my/test/directory"
command = Command.extract_from(command_line.split())
self.assertIsInstance(command, Generate)
self.assertEqual(command.working_directory,
"my/test/directory")
def test_given_only_coverage(self):
command_line = "generate --coverage"
command = Command.extract_from(command_line.split())
self.assertIsInstance(command, Generate)
self.assertTrue(command.only_coverage)
def test_given_all_configurations(self):
command_line = "generate --all"
command = Command.extract_from(command_line.split())
self.assertIsInstance(command, Generate)
self.assertFalse(command.only_coverage)
| 25.190476 | 63 | 0.689981 | 2,354 | 0.889981 | 0 | 0 | 0 | 0 | 0 | 0 | 409 | 0.154631 |
e8eedebb919e5d37cf8c9784ea6066d77842d262 | 4,757 | py | Python | create_cpu_config.py | ttungl/HeteroArchGen4M2S | 85752243dc38b3d44207b6b200d6ebc88c2be8ea | [
"MIT"
] | 5 | 2018-04-29T15:33:01.000Z | 2021-11-17T11:41:45.000Z | create_cpu_config.py | berwingan/HeteroArchGen4M2S | 4922711cdfe20d120dc525a6c7cb8cd2230cba09 | [
"MIT"
] | null | null | null | create_cpu_config.py | berwingan/HeteroArchGen4M2S | 4922711cdfe20d120dc525a6c7cb8cd2230cba09 | [
"MIT"
] | 7 | 2017-07-28T02:29:59.000Z | 2021-11-20T12:01:40.000Z | #!/usr/bin/env python
# ===========================================================================
# Copyright 2017 `Tung Thanh Le`
# Email: ttungl at gmail dot com
# Heterogeneous Architecture Configurations Generator for Multi2Sim simulator
# (aka, `HeteroArchGen4M2S`)
# `HeteroArchGen4M2S` is free software, which is freely to be
# redistributed and modified it under the terms of
# the GNU General Public License as published by
# the Free Software Foundation.
# For more details `http://www.gnu.org/licenses`
# `HeteroArchGen4M2S` is written to help you configure M2S
# easily, but non-warranty and non-mechantability.
# ============================================================================
#
# `create_cpuconfig` is part of M2S configuration files.
# ==========================================================
# Description: This generates `x86_cpuconfig` file for M2S
# Input:
# Output:
# Note: Each core can contain several threads.
# ==========================================================
# E.g.,
# num_of_cores = 16 : number of cores in the CPUs
# num_of_threads = 1 : number of threads in each core
# ROB_size = 128 : number of in-flight instructions allowed
# pipelines_size = 4: decode/dispatch/issue/commit width
# bimod_size = 4096 : Size of local predictor (larger size means less aliasing in history table)
# bpred_size = 1024 : Size of global predictor (larger size means longer global history register)
# ==========================================================
import math # to roundup the float numbers.
# benchmark, fast_forward: binary flag, enables fastforward past sequential portion of benchmark
def create_cpuconfig( num_of_cores,
cpu_frequency,
num_of_threads,
ROB_size,
pipelines_size,
bimod_size,
bpred_size):
# Check inputs validation
assert(num_of_cores>=4), "Error! Number of CPU cores must be at least 4."
assert(num_of_threads>=0), "Error! Number of threads should be at least zero."
# Adapted the additional parameters from M2StoMcPAT of Caleb (Univ. Maryland College Park)
IQ_ratio = 0.4; # size of instruction (issue) queue w.r.t. ROB
LSQ_ratio = 0.5; # size of LSQ w.r.t. ROB
RF_ratio = 1; # size of register file w.r.t. ROB
RF_int_ratio = 0.666666;# (2/3) ratio of int vs FP regissters in the RF
Fetch_Queue_size = 64; # queue holding instructions fetched from I$ waiting to be decoded
history_size = 8; # size of the local histroy table entries
# File name
f = open('configs/x86_cpuconfig', 'w');
# General
f.write("[ General ]\n");
f.write(" Cores = %0.f\n" % num_of_cores);
f.write(" Threads = %0.f\n" % num_of_threads);
f.write(" Frequency = %0.f\n" % cpu_frequency);
f.write("\n");
# Pipeline
f.write("[ Pipeline ]\n");
f.write(" DecodeWidth = %0.f\n" % pipelines_size);
f.write(" DispatchWidth = %0.f\n" % pipelines_size);
f.write(" IssueWidth = %0.f\n" % pipelines_size);
f.write(" CommitWidth = %0.f\n" % pipelines_size);
f.write("\n");
# Queues
f.write("[ Queues ]\n");
f.write(" FetchQueueSize = %0.f\n" % Fetch_Queue_size);
f.write(" RobSize = %0.f\n" % ROB_size);
f.write(" IqSize = %0.f\n" % (IQ_ratio*ROB_size));
f.write(" LsqSize = %0.f\n" % (LSQ_ratio*ROB_size));
f.write(" RfIntSize = %0.f\n" % (RF_ratio*(RF_int_ratio)*ROB_size));
f.write(" RfFpSize = %0.f\n" % (RF_ratio*(1-RF_int_ratio)*ROB_size));
f.write("\n");
# FunctionalUnits
f.write("[ FunctionalUnits ]\n");
f.write(" IntAdd.Count = %0.f\n" % pipelines_size);
f.write(" IntMult.Count = %0.f\n" % (pipelines_size/4));
f.write(" IntDiv.Count = %0.f\n" % math.ceil(pipelines_size/8+0.55)); # added 0.55 to roundup the float number.
f.write(" EffAddr.Count = %0.f\n" % pipelines_size);
f.write(" Logic.Count = %0.f\n" % pipelines_size);
f.write(" FpSimple.Count = %0.f\n" % pipelines_size);
f.write(" FpAdd.Count = %0.f\n" % pipelines_size);
f.write(" FpMult.Count = %0.f\n" % (pipelines_size/4));
f.write(" FpDiv.Count = %0.f\n" % math.ceil(pipelines_size/8+0.55)); # added 0.55 to roundup the float number.
f.write(" FpComplex.Count = %0.f\n" % math.ceil(pipelines_size/8+0.55)); # added 0.55 to roundup the float number.
f.write("\n");
# BranchPredictor
f.write("[ BranchPredictor ]\n");
f.write(" Kind = Combined\n");
f.write(" Bimod.Size = %0.f\n" % bimod_size);
f.write(" Choice.Size = %0.f\n" % bimod_size);
f.write(" TwoLevel.L1Size = %0.f\n" % bpred_size);
f.write(" TwoLevel.L2Size = 1\n");
f.write(" TwoLevel.HistorySize = %0.f\n" % history_size);
f.write(" BTB.Sets = 1024\n");
f.write(" BTB.Assoc = 1");
# close
f.close();
## Tested
# def main():
# create_cpuconfig(16, 1, 128, 4, 4096, 1024, 1);
# print "This %s file is just executed!" % __file__
# if __name__ == "__main__": main()
| 41.008621 | 115 | 0.634433 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,268 | 0.686988 |
e8ef256cd90a45f756f9b6bac7615546b4352096 | 7,475 | py | Python | app/wall_e/models/canvas_api.py | dbwebb-se/umbridge | 76dd1a15f2f481c1fc3819990ab41e20ca65afe3 | [
"MIT"
] | null | null | null | app/wall_e/models/canvas_api.py | dbwebb-se/umbridge | 76dd1a15f2f481c1fc3819990ab41e20ca65afe3 | [
"MIT"
] | 12 | 2021-09-07T12:11:31.000Z | 2022-03-22T10:05:03.000Z | app/wall_e/models/canvas_api.py | dbwebb-se/umbridge | 76dd1a15f2f481c1fc3819990ab41e20ca65afe3 | [
"MIT"
] | null | null | null | """
"""
import os
from flask import current_app
from canvasapi import submission, requester
from app.wall_e.models.requester import Requester
from app.settings import settings
class Canvas(Requester):
"""
Model class for wall_e.fetch
"""
def __init__(self, base_url, api_token, course_id, course_name):
super().__init__(base_url, api_token)
self.course_id = course_id
self._course_name = course_name
self._config = settings.get_course_map()
self.set_assignments_and_users()
def set_assignments_and_users(self):
""" Caches assignments and students in a course """
self.users = self.get_users_in_course()
current_app.logger.debug(f"Course {self._course_name} has the following users: {self.users}")
self.assignments = self.get_assignments()
current_app.logger.debug(f"Course {self._course_name} has the following assignments: {self.assignments}")
def get_users_in_course(self):
"""
Returns users in course by course_id
"""
data = self.request_get_paging(
f"/api/v1/courses/{self.course_id}/users?page={{page}}&per_page=100")
return data
def get_assignments(self):
"""
Return assignments
based on course_id
"""
return self._request_get(
f"/api/v1/courses/{self.course_id}/assignments?per_page=100").json()
def get_course(self):
"""
Return a single course
based on course_id
"""
return self._request_get(f"/api/v1/courses/{self.course_id}").json()
def users_and_acronyms(self):
"""
Returns users in course by course_id
"""
formatted_users = {}
for u in self.users:
try:
formatted_users[u["id"]] = u["login_id"].split("@")[0]
except TypeError:
current_app.logger.error(f"could not extract acronym for user {u}")
return formatted_users
def get_user_by_acronym(self, acronym):
"""
Returns a single user in course
by acronym and course_id
"""
return [
u for u in self.users if "login_id" in u and acronym in u["login_id"]
][0]
def get_assignment_by_name(self, name):
"""
Return a single assignment
based on name
"""
return [a for a in self.assignments if a["name"] == name][0]
def get_assignment_name_by_id(self, assignment_id):
"""
Return a single assignment
based on its id
"""
for assignment in self.assignments:
if assignment["id"] == assignment_id:
name = self._config[self._course_name]['canvas_name_to_assignment'].get(
assignment["name"],
assignment["name"]
)
current_app.logger.debug(f"Found the name {name} for assignment {assignment['name']}")
return name
current_app.logger.error(f"could not find a matching assignment id to {assignment['id']}")
return None
def get_gradeable_submissions(self):
"""
Return gradeable submissions
based on assignment_id
"""
# submitted = all assignments that has not been graded on canvas
submissions = self.request_get_paging(
f"/api/v1/courses/{self.course_id}/students/submissions?page={{page}}&per_page=100", payload={
"student_ids": ["all"],
"workflow_state": ["submitted"],
}
)
current_app.logger.info(f"Course {self._course_name} has {len(submissions)} submissions")
current_app.logger.debug(f"Course {self._course_name} has the following submissions: {submissions}")
try:
ignore = self._config[self._course_name]['ignore_assignments']
except KeyError:
ignore = self._config['default']['ignore_assignments']
if ignore:
submissions = [
s for s in submissions if self.get_assignment_name_by_id(s['assignment_id']) not in ignore
]
return submissions
class Grader(Requester):
"""
Model class for wall_e.grade
"""
def __init__(self, base_url, api_token):
super().__init__(base_url, api_token)
def grade_submission(self, sub, url):
"""
Grade submission
"""
passed_comment = "Testerna har passerat. En rättare kommer läsa din redovisningstext, kolla på koden och sätta betyg."
failed_comment = "Tyvärr gick något fel i testerna. Läs igenom loggfilen för att se vad som gick fel. Lös felet och gör en ny inlämning."
error_comment = "Något gick fel i umbridge, kontakta kursansvarig."
respons = self.send_zip_archive(sub)
if respons is not None:
if sub.grade.lower() == "pg":
feedback = passed_comment
elif sub.grade.lower() == "ux":
feedback = failed_comment
else:
feedback = error_comment
id_ = respons["id"]
uuid = respons["uuid"]
feedback_text = (
"Automatiska rättningssystemet 'Umbridge' har gått igenom din inlämning.\n\n"
f"{feedback}\n\n"
f"Loggfilen för alla tester kan du se via följande länk: {url}/results/feedback/{sub.uuid}-{sub.id}\n\n"
f"Du kan inspektera filerna som användes vid rättning via följande länk: {url}/results/inspect/{id_}/{uuid}\n\n"
"Kontakta en av de kursansvariga om resultatet är felaktigt."
)
else:
feedback_text = (
"Automatiska rättningssystemet 'Umbridge' har gått igenom din inlämning.\n\n"
f"Umbridge kunde inte hitta filerna efter rättningen, försök göra en ny inlämning. Om det inte hjälper, kontakta kursansvarig.\n\n"
f"Loggfilen för alla tester kan du se via följande länk: {url}/results/feedback/{sub.uuid}-{sub.id}\n\n"
)
payload = {
"comment": {
"text_comment": feedback_text,
},
"submission": {
"posted_grade": sub.grade
}
}
current_app.logger.debug(f"Set grade {sub.grade} for {sub.user_acronym} in assignment {sub.assignment_id}")
self._request_put(
f"/api/v1/courses/{sub.course_id}/assignments/{sub.assignment_id}/submissions/{sub.user_id}",
payload=payload)
def send_zip_archive(self, sub):
"""
Sends archive as a comment
"""
file_name = sub.zip_file_path
r = requester.Requester(self._url, self._key)
s = submission.Submission(
r, attributes={
"course_id": sub.course_id,
"assignment_id": sub.assignment_id,
"user_id": sub.user_id
})
current_app.logger.debug(f"Sending zip as comment to {sub.user_acronym} in assignment {sub.assignment_id}")
try:
respons = s.upload_comment(file_name)
except IOError:
current_app.logger.error(f"File {file_name} is missing, can't upload file for {sub.user_acronym} in {sub.assignment_name}.")
sub.grade = "U"
return None
current_app.logger.debug(f"zip respons: {respons}")
os.remove(file_name)
return respons[1]
| 34.447005 | 147 | 0.597458 | 7,328 | 0.975766 | 0 | 0 | 0 | 0 | 0 | 0 | 3,242 | 0.431691 |
e8f09f4acc1e578e6bbb291e4f8a3a87b3a0b297 | 908 | py | Python | chat/views.py | xiaoqiao99/chat | ca65ed25fbc277828390b890a50ecadf4675cfb4 | [
"MIT"
] | 2 | 2019-06-21T10:30:18.000Z | 2019-07-12T07:46:25.000Z | chat/views.py | xiaoqiao99/chat | ca65ed25fbc277828390b890a50ecadf4675cfb4 | [
"MIT"
] | 8 | 2020-06-05T19:56:53.000Z | 2022-03-11T23:41:44.000Z | chat/views.py | xiaoqiao99/chat | ca65ed25fbc277828390b890a50ecadf4675cfb4 | [
"MIT"
] | 3 | 2020-03-13T03:22:40.000Z | 2020-07-03T03:03:02.000Z | from django.shortcuts import render
# Create your views here.
# chat/views.py
from django.shortcuts import render
from django.utils.safestring import mark_safe
import json
from chat.models import Room
def index(request):
# from channels.layers import get_channel_layer
# from asgiref.sync import async_to_sync
# channel_layer = get_channel_layer()
# async_to_sync(channel_layer.group_send)(
# "chat_lobby",
# {
# 'type': 'chat.message',
# 'message': "6666666yyyyy66666666"
# }
# )
# r = Room.objects.filter(id=46).update(name="33333333") # 如果信号中使用post_save 此更新不会出发信号机制
r = Room()
r.name = "xiao"
r.label = "qq "
r.save()
return render(request, 'chat/index.html', {})
def room(request, room_name):
return render(request, 'chat/room.html', {
'room_name_json': mark_safe(json.dumps(room_name))
}) | 27.515152 | 93 | 0.654185 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 511 | 0.536765 |
e8f180504d0fc36abc7d16119e4914419e1e1ee4 | 806 | py | Python | src/app/db.py | jorwalk/fastapi-crud-async | 000705ac977673d316a5fac2ce98adac23768b6e | [
"MIT"
] | null | null | null | src/app/db.py | jorwalk/fastapi-crud-async | 000705ac977673d316a5fac2ce98adac23768b6e | [
"MIT"
] | null | null | null | src/app/db.py | jorwalk/fastapi-crud-async | 000705ac977673d316a5fac2ce98adac23768b6e | [
"MIT"
] | null | null | null | import os
from sqlalchemy import (Column, DateTime, Integer, MetaData, String, Text, Table,
create_engine)
from sqlalchemy.sql import func
from databases import Database
DATABASE_URL = os.getenv("DATABASE_URL")
# SQLAlchemy
engine = create_engine(DATABASE_URL)
metadata = MetaData()
movies = Table(
"movies",
metadata,
Column("id", Integer, primary_key=True),
Column("release_year", String()),
Column("title", String()),
Column("origin_ethnicity", String()),
Column("director", String()),
Column("cast", String()),
Column("genre", String()),
Column("wiki_page", String()),
Column("plot", Text()),
Column("created_date", DateTime, default=func.now(), nullable=False),
)
# databases query builder
database = Database(DATABASE_URL) | 26 | 81 | 0.672457 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 156 | 0.193548 |
e8f1a3ca50bcfed7f7af218f87aa8a2b2ac30b0f | 3,224 | py | Python | python/greshunkel.py | qpfiffer/38-Moths | e7985b3ee655bca965c37b8fefde6c6ad70fdd4f | [
"MIT"
] | 14 | 2015-02-23T07:16:36.000Z | 2021-05-05T11:49:57.000Z | python/greshunkel.py | qpfiffer/futility | e7985b3ee655bca965c37b8fefde6c6ad70fdd4f | [
"MIT"
] | 19 | 2015-03-21T22:14:28.000Z | 2017-01-25T17:07:21.000Z | python/greshunkel.py | qpfiffer/futility | e7985b3ee655bca965c37b8fefde6c6ad70fdd4f | [
"MIT"
] | 3 | 2015-02-23T07:17:08.000Z | 2015-03-23T18:05:34.000Z | from ctypes import cdll, c_char_p, c_size_t, c_void_p, Union,\
LittleEndianStructure, c_char, POINTER,\
c_int, byref, CFUNCTYPE, create_string_buffer
class greshunkel_var(Union):
_fields_ = [
("str", c_char * 513),
("arr", c_void_p),
("sub_ctext", c_void_p)
]
GshklFilterFunc = CFUNCTYPE(c_char_p, c_char_p)
lib38moths = cdll.LoadLibrary("lib38moths.so")
lib38moths.gshkl_init_context.restype = c_void_p
lib38moths.gshkl_add_array.argtypes = [c_void_p, c_char_p]
lib38moths.gshkl_add_array.restype = greshunkel_var
lib38moths.gshkl_add_string_to_loop.argtypes = [POINTER(greshunkel_var), c_char_p]
lib38moths.gshkl_add_int_to_loop.argtypes = [POINTER(greshunkel_var), c_int]
lib38moths.gshkl_add_filter.argtypes = [c_void_p, c_char_p, GshklFilterFunc, c_void_p]
lib38moths.gshkl_render.restype = c_char_p
def _add_item_to_greshunkel_loop(ctext, loop, value):
if isinstance(value, str):
lib38moths.gshkl_add_string_to_loop(byref(loop), c_char_p(value.encode()))
elif isinstance(value, int):
lib38moths.gshkl_add_int_to_loop(byref(loop), value)
elif hasattr(value, '__call__'):
raise Exception("Don't really know what you're trying to do here.")
elif isinstance(value, list) or isinstance(value, tuple):
raise Exception("Cannot add loops to loops right now. Use subcontexts.")
elif isinstance(value, dict):
sub_ctext = lib38moths.gshkl_init_context()
lib38moths.gshkl_add_sub_context_to_loop(byref(loop), sub_ctext)
for sub_key, sub_value in value.items():
_add_item_to_greshunkel_context(sub_ctext, sub_key, sub_value)
def _add_item_to_greshunkel_context(ctext, key, value):
if isinstance(value, str):
lib38moths.gshkl_add_string(ctext, c_char_p(key.encode()), c_char_p(value.encode()))
elif isinstance(value, int):
lib38moths.gshkl_add_int(ctext, c_char_p(key.encode()), c_size_t(value))
elif isinstance(value, list):
converted_key = c_char_p(key.encode())
new_loop = lib38moths.gshkl_add_array(ctext, converted_key)
for subitem in value:
_add_item_to_greshunkel_loop(ctext, new_loop, subitem)
elif hasattr(value, '__call__'):
lib38moths.gshkl_add_filter(ctext, c_char_p(key.encode()), value, None)
elif isinstance(value, dict):
sub_ctext = lib38moths.gshkl_init_context()
lib38moths.gshkl_add_sub_context(ctext, c_char_p(key.encode()), sub_ctext)
for sub_key, sub_value in value.items():
_add_item_to_greshunkel_context(sub_ctext, sub_key, sub_value)
class Context(object):
def __init__(self, context_dict):
self.gshkl_ctext = lib38moths.gshkl_init_context()
for key, value in context_dict.items():
_add_item_to_greshunkel_context(self.gshkl_ctext, key, value)
class Template(object):
def __init__(self, template):
self.template = template
def render(self, context):
template_c_string = c_char_p(self.template.encode())
ret_c_str = lib38moths.gshkl_render(context.gshkl_ctext, template_c_string, len(self.template), None)
return ret_c_str.decode()
| 45.408451 | 109 | 0.715261 | 729 | 0.226117 | 0 | 0 | 0 | 0 | 0 | 0 | 161 | 0.049938 |
e8f205c39839ed18e131eea74299ddd64618a420 | 11,443 | py | Python | objects/paginators.py | fossabot/KiwiBot | cd077ce742775ed69ea7a14796c967ed878e1abc | [
"MIT"
] | null | null | null | objects/paginators.py | fossabot/KiwiBot | cd077ce742775ed69ea7a14796c967ed878e1abc | [
"MIT"
] | null | null | null | objects/paginators.py | fossabot/KiwiBot | cd077ce742775ed69ea7a14796c967ed878e1abc | [
"MIT"
] | null | null | null | from collections import OrderedDict
from asyncio import TimeoutError, wait
from concurrent.futures import FIRST_COMPLETED
import time
from discord import TextChannel, DMChannel
from discord.errors import Forbidden, NotFound
from objects.context import Context
class PaginatorABC:
def __init__(self, bot, looped=True, timeout=180, additional_time=20):
self.bot = bot
self.looped = looped
self.timeout = timeout
self.additional_time = additional_time
self.index = 0
self._pages = []
self.current_page = {}
self.events = OrderedDict()
self.target_users = []
self.closed = False
def add_page(self, **kwargs):
page = kwargs
self._pages.append(page)
if len(self._pages) == 1:
self.current_page = page
def switch_to_next_page(self):
if self.index == len(self._pages) - 1:
if not self.looped:
return self.current_page
self.index = 0
else:
self.index += 1
return self._pages[self.index]
def switch_to_prev_page(self):
if self.index == 0:
if not self.looped:
return self.current_page
self.index = len(self._pages) - 1
else:
self.index -= 1
return self._pages[self.index]
def switch_to_page(self, index):
if len(self._pages) > index and index >= 0:
self.index = index
else:
self.index = 0
self.current_page = self._pages[self.index]
return self.current_page
async def init_reactions(self, force=False):
if len(self._pages) <= 1 and not force:
self.closed = True
return
try:
for emoji in self.events.keys():
await self.target_message.add_reaction(emoji)
except Exception:
pass
async def _reaction_add_callback(self, reaction, user):
await self.events[str(reaction)](reaction, user)
try:
await self.target_message.remove_reaction(reaction, user)
except NotFound:
self.closed = True
except Exception:
pass
async def _reaction_remove_callback(self, reaction, user):
await self.events[str(reaction)](reaction, user)
async def run(self, target, **kwargs):
"""
Runs paginator session
parameters:
:target:
Message or Context object attach paginator to
:target_user: (default: None or ctx author if ctx passed as target)
user wait actions from. Can be User or Member object
:target_users: (default: [])
list of users wait actions from. Can be User or Member object list
:force_run: (default: False)
force run paginator even if missing pages
:events: (default: {})
dict of events to wait as keys and their callbacks as values
!events should be lambda functions creating actual coroutine on call!
callbacks are coroutines recieving event result(s)
"""
if isinstance(target, Context):
self.target_message = await target.send(**self.current_page)
if self.target_message is None:
return await self.cleanup()
target_user = kwargs.pop('target_user', target.author)
else:
self.target_message = target
target_user = kwargs.pop('target_user', None)
target_users = kwargs.pop('target_users', [])
force_run = kwargs.pop('force_run', False)
events = kwargs.pop('events', {})
if target_user is None and len(target_users) == 0:
raise ValueError('No user objects passed')
if target_user is not None:
if len(target_users) != 0:
raise ValueError('Use either target_user or target_users, not both')
target_users.append(target_user)
self.target_users = target_users
def check(reaction, user):
return all((
any(user == u for u in target_users),
reaction.message.id == self.target_message.id,
str(reaction.emoji) in self.events
))
self.start_time = time.time()
time_left = self.timeout
manage_messages_permission = \
self.target_message.guild and self.target_message.channel.permissions_for(self.target_message.guild.me).manage_messages
await self.init_reactions(force=force_run)
while time_left >= 0 and not self.closed:
reaction_add_event = self.bot.wait_for('reaction_add', check=check)
_events = { l(): c for l, c in events.items() }
_events[reaction_add_event] = self._reaction_add_callback
if not manage_messages_permission:
reaction_remove_event = self.bot.wait_for('reaction_remove', check=check)
_events[reaction_remove_event] = self._reaction_remove_callback
done, _ = await wait(
_events.keys(), loop=self.bot.loop,
timeout=time_left, return_when=FIRST_COMPLETED
)
if not done:
# timeout
break
else:
for task in done:
cb = _events[task._coro]
task_result = task.result()
if task_result is None:
continue
if type(task_result) is tuple:
results = task_result
else:
results = [task_result]
await cb(*results)
self.start_time += self.additional_time
time_left = self.timeout - (time.time() - self.start_time)
await self.cleanup()
async def cleanup(self):
try:
await self.target_message.clear_reactions()
except Exception:
pass
self.closed = True
def __len__(self):
return len(self._pages)
class Paginator(PaginatorABC):
"""
Basic paginator class.
Requires PermissionAddReactions to work
"""
def __init__(self, *args,
emoji_go_left='◀', emoji_go_right='▶',
emoji_use_index='🔢', **kwargs
):
super().__init__(*args, **kwargs)
self.events[emoji_go_left] = self.on_go_left
self.events[emoji_use_index] = self.on_use_index
self.events[emoji_go_right] = self.on_go_right
async def on_go_left(self, reaction, user):
if not self.looped and self.index == 0:
return
await self.bot.edit_message(
self.target_message, **self.switch_to_prev_page())
async def on_go_right(self, reaction, user):
if not self.looped and self.index == len(self._pages) - 1:
return
await self.bot.edit_message(
self.target_message, **self.switch_to_next_page())
async def on_use_index(self, reaction, user):
index_request_message = None
index_response_message = None
def check(message):
return all((
message.author == user,
message.channel == self.target_message.channel,
message.content.isdigit()
))
try:
index_request_message = await self.target_message.channel.send('Please, send number of page you want to go')
index_response_message = await self.bot.wait_for('message', timeout=10, check=check)
index = int(index_response_message.content) - 1
if index != self.index:
await self.bot.edit_message(self.target_message, **self.switch_to_page(index))
except TimeoutError:
pass
finally:
if index_request_message is not None:
await index_request_message.delete()
if index_response_message is not None:
try:
await index_response_message.delete()
except Exception:
pass
class SelectionPaginator(Paginator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.choice = None
self.num_elements = 0
async def _check_choice(self, msg):
if await self.check_choice(msg):
self.closed = True
await self.on_valid_choice(msg)
else:
await self.on_invalid_choice(msg)
async def check_choice(self, msg):
return msg.content.isdigit() and 0 < int(msg.content) <= self.num_elements
async def on_invalid_choice(self, msg):
pass
async def on_valid_choice(self, msg):
self.choice = int(msg.content)
await self.bot.delete_message(msg)
async def run(self, target, num_elements, **kwargs):
self.num_elements = num_elements
def check(msg):
return all((
msg.author in (self.target_users),
msg.channel == target.channel
))
message_event_lambda = lambda: self.bot.wait_for('message', check=check)
await super().run(
target,
events={ message_event_lambda: self._check_choice }, **kwargs
)
return self.choice
class UpdatingPaginator(PaginatorABC):
def __init__(self, *args, emoji_update='🆕', emoji_go_back='🔙', timeout=60, additional_time=30, **kwargs):
super().__init__(
*args, timeout=timeout, additional_time=additional_time, **kwargs)
self.events[emoji_update] = self.on_update
self.emoji_go_back = emoji_go_back
self.backup_pages = []
self.first_page_switch = True
self.last_time_popped = False
async def run(self, target, update_func, **kwargs):
self.update_func = update_func
self.update_args = kwargs.pop('update_args', ())
self.update_kwargs = kwargs.pop('update_kwargs', {})
fields = await self.get_fields()
if not fields:
return
self.add_page(**fields)
await super().run(target, force_run=True, **kwargs)
async def on_update(self, reaction, user):
fields = await self.get_fields()
if not fields:
return
await self.bot.edit_message(self.target_message, **fields)
if self.first_page_switch:
self.first_page_switch = False
self.events[self.emoji_go_back] = self.on_go_back
await self.init_reactions(force=True)
self.last_time_popped = False
async def on_go_back(self, reaction, user):
if not self.last_time_popped:
self.backup_pages.pop()
if len(self.backup_pages) > 1:
fields = self.backup_pages.pop()
await self.bot.edit_message(self.target_message, **fields)
else:
await self.bot.edit_message(
self.target_message, **self.backup_pages[0])
self.last_time_popped = True
async def get_fields(self):
try:
fields = await self.update_func(
self, *self.update_args, **self.update_kwargs)
fields = {} if fields is None else fields
except Exception:
fields = {}
self.backup_pages.append(fields)
return fields | 31.874652 | 131 | 0.589094 | 11,182 | 0.976082 | 0 | 0 | 0 | 0 | 8,562 | 0.747381 | 1,159 | 0.10117 |
e8f4043d5536bdca2c37406c6cd15241be633a78 | 21,362 | py | Python | tests/test_managedblockchain/test_managedblockchain_proposalvotes.py | junelife/moto | e61d794cbc9c18b06c11014da666e25f3fce637b | [
"Apache-2.0"
] | 1 | 2021-12-12T04:23:06.000Z | 2021-12-12T04:23:06.000Z | tests/test_managedblockchain/test_managedblockchain_proposalvotes.py | junelife/moto | e61d794cbc9c18b06c11014da666e25f3fce637b | [
"Apache-2.0"
] | 2 | 2018-08-07T10:47:18.000Z | 2018-08-08T15:13:04.000Z | tests/test_managedblockchain/test_managedblockchain_proposalvotes.py | junelife/moto | e61d794cbc9c18b06c11014da666e25f3fce637b | [
"Apache-2.0"
] | null | null | null | from __future__ import unicode_literals
import os
import boto3
import sure # noqa
from freezegun import freeze_time
from unittest import SkipTest
from moto import mock_managedblockchain, settings
from . import helpers
@mock_managedblockchain
def test_vote_on_proposal_one_member_total_yes():
conn = boto3.client("managedblockchain", region_name="us-east-1")
# Create network
response = conn.create_network(
Name="testnetwork1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
# Create proposal
response = conn.create_proposal(
NetworkId=network_id,
MemberId=member_id,
Actions=helpers.default_policy_actions,
)
proposal_id = response["ProposalId"]
# Get proposal details
response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id)
response["Proposal"]["NetworkId"].should.equal(network_id)
response["Proposal"]["Status"].should.equal("IN_PROGRESS")
# Vote yes
response = conn.vote_on_proposal(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=member_id,
Vote="YES",
)
# List proposal votes
response = conn.list_proposal_votes(NetworkId=network_id, ProposalId=proposal_id)
response["ProposalVotes"][0]["MemberId"].should.equal(member_id)
# Get proposal details - should be APPROVED
response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id)
response["Proposal"]["Status"].should.equal("APPROVED")
response["Proposal"]["YesVoteCount"].should.equal(1)
response["Proposal"]["NoVoteCount"].should.equal(0)
response["Proposal"]["OutstandingVoteCount"].should.equal(0)
@mock_managedblockchain
def test_vote_on_proposal_one_member_total_no():
conn = boto3.client("managedblockchain", region_name="us-east-1")
# Create network
response = conn.create_network(
Name="testnetwork1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
# Create proposal
response = conn.create_proposal(
NetworkId=network_id,
MemberId=member_id,
Actions=helpers.default_policy_actions,
)
proposal_id = response["ProposalId"]
# Get proposal details
response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id)
response["Proposal"]["NetworkId"].should.equal(network_id)
response["Proposal"]["Status"].should.equal("IN_PROGRESS")
# Vote no
response = conn.vote_on_proposal(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=member_id,
Vote="NO",
)
# List proposal votes
response = conn.list_proposal_votes(NetworkId=network_id, ProposalId=proposal_id)
response["ProposalVotes"][0]["MemberId"].should.equal(member_id)
# Get proposal details - should be REJECTED
response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id)
response["Proposal"]["Status"].should.equal("REJECTED")
response["Proposal"]["YesVoteCount"].should.equal(0)
response["Proposal"]["NoVoteCount"].should.equal(1)
response["Proposal"]["OutstandingVoteCount"].should.equal(0)
@mock_managedblockchain
def test_vote_on_proposal_yes_greater_than():
conn = boto3.client("managedblockchain", region_name="us-east-1")
votingpolicy = {
"ApprovalThresholdPolicy": {
"ThresholdPercentage": 50,
"ProposalDurationInHours": 24,
"ThresholdComparator": "GREATER_THAN",
}
}
# Create network
response = conn.create_network(
Name="testnetwork1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
response = conn.create_proposal(
NetworkId=network_id,
MemberId=member_id,
Actions=helpers.default_policy_actions,
)
proposal_id = response["ProposalId"]
# Vote yes
response = conn.vote_on_proposal(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=member_id,
Vote="YES",
)
# Get the invitation
response = conn.list_invitations()
invitation_id = response["Invitations"][0]["InvitationId"]
# Create the member
response = conn.create_member(
InvitationId=invitation_id,
NetworkId=network_id,
MemberConfiguration=helpers.create_member_configuration(
"testmember2", "admin", "Admin12345", False, "Test Member 2"
),
)
member_id2 = response["MemberId"]
# Create another proposal
response = conn.create_proposal(
NetworkId=network_id,
MemberId=member_id,
Actions=helpers.default_policy_actions,
)
proposal_id = response["ProposalId"]
# Vote yes with member 1
response = conn.vote_on_proposal(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=member_id,
Vote="YES",
)
# Get proposal details
response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id)
response["Proposal"]["NetworkId"].should.equal(network_id)
response["Proposal"]["Status"].should.equal("IN_PROGRESS")
# Vote no with member 2
response = conn.vote_on_proposal(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=member_id2,
Vote="NO",
)
# Get proposal details
response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id)
response["Proposal"]["Status"].should.equal("REJECTED")
@mock_managedblockchain
def test_vote_on_proposal_no_greater_than():
conn = boto3.client("managedblockchain", region_name="us-east-1")
votingpolicy = {
"ApprovalThresholdPolicy": {
"ThresholdPercentage": 50,
"ProposalDurationInHours": 24,
"ThresholdComparator": "GREATER_THAN",
}
}
# Create network
response = conn.create_network(
Name="testnetwork1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
response = conn.create_proposal(
NetworkId=network_id,
MemberId=member_id,
Actions=helpers.default_policy_actions,
)
proposal_id = response["ProposalId"]
# Vote yes
response = conn.vote_on_proposal(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=member_id,
Vote="YES",
)
# Get the invitation
response = conn.list_invitations()
invitation_id = response["Invitations"][0]["InvitationId"]
# Create the member
response = conn.create_member(
InvitationId=invitation_id,
NetworkId=network_id,
MemberConfiguration=helpers.create_member_configuration(
"testmember2", "admin", "Admin12345", False, "Test Member 2"
),
)
member_id2 = response["MemberId"]
# Create another proposal
response = conn.create_proposal(
NetworkId=network_id,
MemberId=member_id,
Actions=helpers.default_policy_actions,
)
proposal_id = response["ProposalId"]
# Vote no with member 1
response = conn.vote_on_proposal(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=member_id,
Vote="NO",
)
# Vote no with member 2
response = conn.vote_on_proposal(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=member_id2,
Vote="NO",
)
# Get proposal details
response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id)
response["Proposal"]["NetworkId"].should.equal(network_id)
response["Proposal"]["Status"].should.equal("REJECTED")
@mock_managedblockchain
def test_vote_on_proposal_expiredproposal():
if os.environ.get("TEST_SERVER_MODE", "false").lower() == "true":
raise SkipTest("Cant manipulate time in server mode")
votingpolicy = {
"ApprovalThresholdPolicy": {
"ThresholdPercentage": 50,
"ProposalDurationInHours": 1,
"ThresholdComparator": "GREATER_THAN_OR_EQUAL_TO",
}
}
conn = boto3.client("managedblockchain", region_name="us-east-1")
with freeze_time("2015-01-01 12:00:00"):
# Create network - need a good network
response = conn.create_network(
Name="testnetwork1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
response = conn.create_proposal(
NetworkId=network_id,
MemberId=member_id,
Actions=helpers.default_policy_actions,
)
proposal_id = response["ProposalId"]
with freeze_time("2015-02-01 12:00:00"):
# Vote yes - should set status to expired
response = conn.vote_on_proposal.when.called_with(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=member_id,
Vote="YES",
).should.throw(
Exception,
"Proposal {0} is expired and you cannot vote on it.".format(proposal_id),
)
# Get proposal details - should be EXPIRED
response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id)
response["Proposal"]["Status"].should.equal("EXPIRED")
@mock_managedblockchain
def test_vote_on_proposal_status_check():
conn = boto3.client("managedblockchain", region_name="us-east-1")
# Create network
response = conn.create_network(
Name="testnetwork1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
# Create 2 more members
for counter in range(2, 4):
response = conn.create_proposal(
NetworkId=network_id,
MemberId=member_id,
Actions=helpers.default_policy_actions,
)
proposal_id = response["ProposalId"]
# Vote yes
response = conn.vote_on_proposal(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=member_id,
Vote="YES",
)
memberidlist = [None, None, None]
memberidlist[0] = member_id
for counter in range(2, 4):
# Get the invitation
response = conn.list_invitations()
invitation_id = helpers.select_invitation_id_for_network(
response["Invitations"], network_id, "PENDING"
)[0]
# Create the member
response = conn.create_member(
InvitationId=invitation_id,
NetworkId=network_id,
MemberConfiguration=helpers.create_member_configuration(
"testmember" + str(counter),
"admin",
"Admin12345",
False,
"Test Member " + str(counter),
),
)
member_id = response["MemberId"]
memberidlist[counter - 1] = member_id
# Should be no more pending invitations
response = conn.list_invitations()
pendinginvs = helpers.select_invitation_id_for_network(
response["Invitations"], network_id, "PENDING"
)
pendinginvs.should.have.length_of(0)
# Create another proposal
response = conn.create_proposal(
NetworkId=network_id,
MemberId=member_id,
Actions=helpers.default_policy_actions,
)
proposal_id = response["ProposalId"]
# Vote yes with member 1
response = conn.vote_on_proposal(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=memberidlist[0],
Vote="YES",
)
# Vote yes with member 2
response = conn.vote_on_proposal(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=memberidlist[1],
Vote="YES",
)
# Get proposal details - now approved (2 yes, 1 outstanding)
response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id)
response["Proposal"]["NetworkId"].should.equal(network_id)
response["Proposal"]["Status"].should.equal("APPROVED")
# Should be one pending invitation
response = conn.list_invitations()
pendinginvs = helpers.select_invitation_id_for_network(
response["Invitations"], network_id, "PENDING"
)
pendinginvs.should.have.length_of(1)
# Vote with member 3 - should throw an exception and not create a new invitation
response = conn.vote_on_proposal.when.called_with(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=memberidlist[2],
Vote="YES",
).should.throw(Exception, "and you cannot vote on it")
# Should still be one pending invitation
response = conn.list_invitations()
pendinginvs = helpers.select_invitation_id_for_network(
response["Invitations"], network_id, "PENDING"
)
pendinginvs.should.have.length_of(1)
@mock_managedblockchain
def test_vote_on_proposal_badnetwork():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.vote_on_proposal.when.called_with(
NetworkId="n-ABCDEFGHIJKLMNOP0123456789",
ProposalId="p-ABCDEFGHIJKLMNOP0123456789",
VoterMemberId="m-ABCDEFGHIJKLMNOP0123456789",
Vote="YES",
).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found")
@mock_managedblockchain
def test_vote_on_proposal_badproposal():
conn = boto3.client("managedblockchain", region_name="us-east-1")
# Create network - need a good network
response = conn.create_network(
Name="testnetwork1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
response = conn.vote_on_proposal.when.called_with(
NetworkId=network_id,
ProposalId="p-ABCDEFGHIJKLMNOP0123456789",
VoterMemberId="m-ABCDEFGHIJKLMNOP0123456789",
Vote="YES",
).should.throw(Exception, "Proposal p-ABCDEFGHIJKLMNOP0123456789 not found")
@mock_managedblockchain
def test_vote_on_proposal_badmember():
conn = boto3.client("managedblockchain", region_name="us-east-1")
# Create network - need a good network
response = conn.create_network(
Name="testnetwork1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
response = conn.create_proposal(
NetworkId=network_id,
MemberId=member_id,
Actions=helpers.default_policy_actions,
)
proposal_id = response["ProposalId"]
response = conn.vote_on_proposal.when.called_with(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId="m-ABCDEFGHIJKLMNOP0123456789",
Vote="YES",
).should.throw(Exception, "Member m-ABCDEFGHIJKLMNOP0123456789 not found")
@mock_managedblockchain
def test_vote_on_proposal_badvote():
conn = boto3.client("managedblockchain", region_name="us-east-1")
# Create network - need a good network
response = conn.create_network(
Name="testnetwork1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
response = conn.create_proposal(
NetworkId=network_id,
MemberId=member_id,
Actions=helpers.default_policy_actions,
)
proposal_id = response["ProposalId"]
response = conn.vote_on_proposal.when.called_with(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=member_id,
Vote="FOO",
).should.throw(Exception, "Invalid request body")
@mock_managedblockchain
def test_vote_on_proposal_alreadyvoted():
conn = boto3.client("managedblockchain", region_name="us-east-1")
votingpolicy = {
"ApprovalThresholdPolicy": {
"ThresholdPercentage": 50,
"ProposalDurationInHours": 24,
"ThresholdComparator": "GREATER_THAN",
}
}
# Create network - need a good network
response = conn.create_network(
Name="testnetwork1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
response = conn.create_proposal(
NetworkId=network_id,
MemberId=member_id,
Actions=helpers.default_policy_actions,
)
proposal_id = response["ProposalId"]
# Vote yes
response = conn.vote_on_proposal(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=member_id,
Vote="YES",
)
# Get the invitation
response = conn.list_invitations()
invitation_id = response["Invitations"][0]["InvitationId"]
# Create the member
response = conn.create_member(
InvitationId=invitation_id,
NetworkId=network_id,
MemberConfiguration=helpers.create_member_configuration(
"testmember2", "admin", "Admin12345", False, "Test Member 2"
),
)
# Create another proposal
response = conn.create_proposal(
NetworkId=network_id,
MemberId=member_id,
Actions=helpers.default_policy_actions,
)
proposal_id = response["ProposalId"]
# Get proposal details
response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id)
response["Proposal"]["NetworkId"].should.equal(network_id)
response["Proposal"]["Status"].should.equal("IN_PROGRESS")
# Vote yes with member 1
response = conn.vote_on_proposal(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=member_id,
Vote="YES",
)
# Vote yes with member 1 again
response = conn.vote_on_proposal.when.called_with(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=member_id,
Vote="YES",
).should.throw(
Exception,
"Member {0} has already voted on proposal {1}.".format(member_id, proposal_id),
)
@mock_managedblockchain
def test_list_proposal_votes_badnetwork():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.list_proposal_votes.when.called_with(
NetworkId="n-ABCDEFGHIJKLMNOP0123456789",
ProposalId="p-ABCDEFGHIJKLMNOP0123456789",
).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found")
@mock_managedblockchain
def test_list_proposal_votes_badproposal():
conn = boto3.client("managedblockchain", region_name="us-east-1")
# Create network
response = conn.create_network(
Name="testnetwork1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
response = conn.list_proposal_votes.when.called_with(
NetworkId=network_id, ProposalId="p-ABCDEFGHIJKLMNOP0123456789",
).should.throw(Exception, "Proposal p-ABCDEFGHIJKLMNOP0123456789 not found")
| 31.836066 | 87 | 0.679805 | 0 | 0 | 0 | 0 | 21,101 | 0.987782 | 0 | 0 | 4,962 | 0.232282 |
e8f4751164f0af5c0a64d647c72d41648acdaa32 | 752 | py | Python | docs/Others/Python_leetcode/Codes/1.Two_Sum.py | mheanng/PythonNote | e3e5ede07968fab0a45f6ac4db96e62092c17026 | [
"Apache-2.0"
] | 2 | 2020-04-09T05:56:23.000Z | 2021-03-25T18:42:36.000Z | docs/Others/Python_leetcode/Codes/1.Two_Sum.py | mheanng/PythonNote | e3e5ede07968fab0a45f6ac4db96e62092c17026 | [
"Apache-2.0"
] | 22 | 2020-04-09T06:09:14.000Z | 2021-01-06T01:05:32.000Z | docs/Others/Python_leetcode/Codes/1.Two_Sum.py | mheanng/PythonNote | e3e5ede07968fab0a45f6ac4db96e62092c17026 | [
"Apache-2.0"
] | 6 | 2020-03-09T07:19:21.000Z | 2021-01-05T23:23:42.000Z | """
给定一个整数数组 nums 和一个目标值 target,请你在该数组中找出和为目标值的那 两个 整数,并返回他们的数组下标。
你可以假设每种输入只会对应一个答案。但是,你不能重复利用这个数组中同样的元素。
示例:
给定 nums = [2, 7, 11, 15], target = 9
因为 nums[0] + nums[1] = 2 + 7 = 9
所以返回 [0, 1]
"""
# %%
class Solution:
def twoSum(self, nums, target):
S = set(nums)
for num in S:
pre = target - num
if num == pre:
if nums.count(num) > 1:
index1 = nums.index(num)
return [index1, nums.index(num, index1+1)]
elif pre in S:
return [nums.index(num), nums.index(target - num)]
nums = [2, 2, 7, 11, 15]
target = 9
print(Solution().twoSum(nums, target))
nums = [2, 5, 5, 11]
target = 10
print(Solution().twoSum(nums, target))
| 22.117647 | 66 | 0.542553 | 395 | 0.417107 | 0 | 0 | 0 | 0 | 0 | 0 | 396 | 0.418163 |
e8f4b23ed19c18a99bdef84f2585aee923db8769 | 3,306 | py | Python | pyathena/util/rebin.py | changgoo/pyathena-1 | c461ac3390d773537ce52393e3ebf68a3282aa46 | [
"MIT"
] | 1 | 2019-10-03T13:59:14.000Z | 2019-10-03T13:59:14.000Z | pyathena/util/rebin.py | changgoo/pyathena-1 | c461ac3390d773537ce52393e3ebf68a3282aa46 | [
"MIT"
] | 3 | 2020-09-23T23:36:17.000Z | 2022-01-11T06:16:56.000Z | pyathena/util/rebin.py | changgoo/pyathena-1 | c461ac3390d773537ce52393e3ebf68a3282aa46 | [
"MIT"
] | 2 | 2019-06-10T04:26:16.000Z | 2019-12-04T22:27:02.000Z | from __future__ import print_function
import numpy as np
def rebin_xyz(arr, bin_factor, fill_value=None):
"""
Function to rebin masked 3d array.
Parameters
----------
arr : ndarray
Masked or unmasked 3d numpy array. Shape is assumed to be (nz, ny, nx).
bin_factor : int
binning factor
fill_value: float
If arr is a masked array, fill masked elements with fill_value.
If *None*, masked elements will be neglected in calculating average.
Default value is *None*.
Return
------
arr_rebin: ndarray
Smaller size, (averaged) 3d array. Shape is assumed to be
(nz//bin_factor, ny//bin_factor, nx//bin_factor)
"""
if bin_factor == 1:
return arr
# number of cells in the z-direction and xy-direction
nz0 = arr.shape[0]
ny0 = arr.shape[1]
nx0 = arr.shape[2]
# size of binned array
nz1 = nz0 // bin_factor
ny1 = ny0 // bin_factor
nx1 = nx0 // bin_factor
if np.ma.is_masked(arr) and fill_value is not None:
np.ma.set_fill_value(arr, fill_value)
arr = arr.filled()
# See
# https://stackoverflow.com/questions/4624112/grouping-2d-numpy-array-in-average/4624923#4624923
return arr.reshape([nz1, nz0//nz1, ny1, ny0//ny1, nx1, nx0//nx1]).mean(axis=-1).mean(axis=3).mean(axis=1)
def rebin_xy(arr, bin_factor, fill_value=None):
"""
Function to rebin masked 3d array in the x-y dimension.
Parameters
----------
arr : ndarray
Masked or unmasked 3d numpy array. Shape is assumed to be (nz, ny, nx).
bin_factor : int
binning factor
fill_value: float
If arr is a masked array, fill masked elements with fill_value.
If *None*, masked elements will be neglected in calculating average.
Default value is *None*.
Return
------
arr_rebin: ndarray
Smaller size, (averaged) 3d array. Shape is assumed to be
(nz, ny//bin_factor, nx//bin_factor)
"""
if bin_factor == 1:
return arr
# number of cells in the z-direction and xy-direction
nz = arr.shape[0]
ny0 = arr.shape[1]
nx0 = arr.shape[2]
# size of binned array
ny1 = ny0 // bin_factor
nx1 = nx0 // bin_factor
if np.ma.is_masked(arr) and fill_value is not None:
np.ma.set_fill_value(arr, fill_value)
arr = arr.filled()
# See
# https://stackoverflow.com/questions/4624112/grouping-2d-numpy-array-in-average/4624923#4624923
return arr.reshape([nz, ny1, ny0//ny1, nx1, nx0//nx1]).mean(axis=-1).mean(axis=2)
if __name__ == '__main__':
# Test of rebin_xy
mask = True
# Define test data
big = np.ma.array([[5, 5, 1, 2],
[5, 5, 2, 1],
[2, 1, 1, 1],
[2, 1, 1, 1]])
if mask:
big.mask = [[1, 1, 0, 0],
[0, 1, 1, 1],
[1, 0, 1, 0],
[1, 1, 1, 0]]
big = np.tile(big, (1, 1, 1))
small1 = rebin_xy_masked(big, 2, fill_value=0.0)
small2 = rebin_xy_masked(big, 2, fill_value=None)
print('Original array\n', big)
print('With fill value 0.0\n', small1)
print('Without fill value\n', small2)
| 28.747826 | 109 | 0.578947 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,686 | 0.509982 |
e8f589ed3e76934af8053deea1546715e9acd247 | 9,095 | py | Python | fn_utilities/fn_utilities/components/utilities_shell_command.py | rudimeyer/resilient-community-apps | 7a46841ba41fa7a1c421d4b392b0a3ca9e36bd00 | [
"MIT"
] | 1 | 2020-08-25T03:43:07.000Z | 2020-08-25T03:43:07.000Z | fn_utilities/fn_utilities/components/utilities_shell_command.py | rudimeyer/resilient-community-apps | 7a46841ba41fa7a1c421d4b392b0a3ca9e36bd00 | [
"MIT"
] | 1 | 2019-07-08T16:57:48.000Z | 2019-07-08T16:57:48.000Z | fn_utilities/fn_utilities/components/utilities_shell_command.py | rudimeyer/resilient-community-apps | 7a46841ba41fa7a1c421d4b392b0a3ca9e36bd00 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# (c) Copyright IBM Corp. 2018. All Rights Reserved.
# pragma pylint: disable=unused-argument, no-self-use
"""Function implementation"""
import os
import logging
import time
import shlex
import subprocess
import json
import chardet
import winrm
import re
from resilient_circuits import ResilientComponent, function, handler, StatusMessage, FunctionResult, FunctionError
from resilient_circuits.template_functions import render
class FunctionComponent(ResilientComponent):
"""Component that implements Resilient function 'shell_command"""
def __init__(self, opts):
"""constructor provides access to the configuration options"""
super(FunctionComponent, self).__init__(opts)
self.options = opts.get("fn_utilities", {})
@handler("reload")
def _reload(self, event, opts):
"""Configuration options have changed, save new values"""
self.options = opts.get("fn_utilities", {})
@function("utilities_shell_command")
def _shell_command_function(self, event, *args, **kwargs):
"""Function: Runs a shell command."""
try:
# Get the function parameters:
shell_command = kwargs.get('shell_command') # text
shell_remote = kwargs.get("shell_remote") # boolean
shell_param1 = kwargs.get("shell_param1") # text
shell_param2 = kwargs.get("shell_param2") # text
shell_param3 = kwargs.get("shell_param3") # text
log = logging.getLogger(__name__)
log.info("shell_command: %s", shell_command)
log.info("shell_remote: %s", shell_remote)
log.info("shell_param1: %s", shell_param1)
log.info("shell_param2: %s", shell_param2)
log.info("shell_param3: %s", shell_param3)
# Options keys are lowercase, so the shell command name needs to be lowercase
if shell_command:
shell_command = shell_command.lower()
# Escape the input parameters
escaping = self.options.get("shell_escaping", "sh")
escaped_args = {
"shell_param1": render(u"{{shell_param1|%s}}" % escaping, kwargs),
"shell_param2": render(u"{{shell_param2|%s}}" % escaping, kwargs),
"shell_param3": render(u"{{shell_param3|%s}}" % escaping, kwargs)
}
# If running a remote script, get the remote computer and the remote command
if shell_remote:
colon_split = shell_command.split(':')
if len(colon_split) != 2:
raise ValueError("Remote commands must be of the format remote_command_name:remote_computer_name, "
"'%s' was specified" % shell_command)
else:
shell_command = colon_split[0].strip()
if self.options.get(colon_split[1]) is None:
raise ValueError('The remote computer %s is not configured' % colon_split[1])
else:
remote = self.options.get(colon_split[1]).strip()
if remote.startswith('(') and remote.endswith(')'):
remote = remote[1:-1]
else:
raise ValueError('Remote computer configurations must be wrapped in parentheses (), '
"%s was specfied" % remote)
# Get remote credentials
remote_config = re.split(':|@', remote)
if len(remote_config) != 3:
raise ValueError('Remote machine %s must be of the format username:password@server, '
"'%s' was specified" % remote)
else:
remote_user = remote_config[0]
remote_password = remote_config[1]
remote_server = remote_config[2]
# Check if command is configured
if shell_command not in self.options:
if ':' in shell_command:
raise ValueError("Syntax for a remote command '%s' was used but remote_shell was set to False"
% shell_command)
raise ValueError('%s command not configured' % shell_command)
shell_command_base = self.options[shell_command].strip()
# Remote commands must wrap a path with []
if shell_command_base.startswith('[') and shell_command_base.endswith(']'):
if shell_remote:
extension = shell_command_base[1:-1].strip().split('.')[-1]
if extension not in self.options.get('remote_powershell_extensions'):
raise ValueError("The specified file must be have extension %s but %s was specified" %
(str(self.options.get('remote_powershell_extensions')), extension))
# Format shell parameters
shell_command_base = shell_command_base[1:-1].strip()
if shell_param1:
shell_command_base = shell_command_base + ' "{{shell_param1}}"'
else:
shell_command_base = shell_command_base + ' $null'
if shell_param2:
shell_command_base = shell_command_base + ' "{{shell_param2}}"'
else:
shell_command_base = shell_command_base + ' $null'
if shell_param3:
shell_command_base = shell_command_base + ' "{{shell_param3}}"'
else:
shell_command_base = shell_command_base + ' $null'
else:
raise ValueError("A remote command '%s' was specified but shell_remote was set to False"
% shell_command)
elif shell_remote:
raise ValueError('A remote command must specify a remote path wrapped in square brackets [], '
"'%s' was specified" % shell_command)
if shell_command_base.startswith('(') and shell_command_base.endswith(')') and not shell_remote:
raise ValueError('Please specify a valid shell command that is not wrapped in parentheses or brackets'
'when shell_remote is False')
commandline = render(shell_command_base, escaped_args)
if shell_remote:
session = winrm.Session(remote_server,
auth=(remote_user, remote_password),
transport=self.options.get('remote_auth_transport'))
tstart = time.time()
if escaping == "sh":
r = session.run_cmd(commandline)
elif escaping == "ps":
r = session.run_ps(commandline)
retcode = r.status_code
stdoutdata = r.std_out
stderrdata = r.std_err
tend = time.time()
else:
commandline = os.path.expandvars(commandline)
# Set up the environment
env = os.environ.copy()
# Execute the command line process (NOT in its own shell)
cmd = shlex.split(commandline, posix=True)
tstart = time.time()
call = subprocess.Popen(cmd,
shell=False,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
env=env)
stdoutdata, stderrdata = call.communicate()
retcode = call.returncode
tend = time.time()
encoding = chardet.detect(stdoutdata)["encoding"] or "utf-8"
result = stdoutdata.decode(encoding)
result_json = None
try:
# Let's see if the output can be decoded as JSON
result_json = json.loads(result)
except:
pass
output = stderrdata.decode(encoding)
output_json = None
try:
# Let's see if the output can be decoded as JSON
output_json = json.loads(output)
except:
pass
results = {
"commandline": commandline,
"start": int(tstart * 1000.0),
"end": int(tend * 1000.0),
"elapsed": int((tend - tstart) * 1000.0),
"exitcode": retcode, # Nonzero exit code indicates error
"stdout": result,
"stderr": output,
"stdout_json": result_json, # May be null
"stderr_json": output_json # May be null
}
yield FunctionResult(results)
except Exception:
yield FunctionError()
| 45.934343 | 119 | 0.534469 | 8,639 | 0.949863 | 8,092 | 0.88972 | 8,305 | 0.913139 | 0 | 0 | 2,417 | 0.26575 |
e8f5e5aaaf237abae1b7ee4f6b5a71282972a181 | 428 | py | Python | snapmerge/home/migrations/0010_project_email.py | R4356th/smerge | 2f2a6a4acfe3903ed4f71d90537f7277248e8b59 | [
"MIT"
] | 13 | 2018-07-16T09:59:55.000Z | 2022-01-27T19:07:17.000Z | snapmerge/home/migrations/0010_project_email.py | R4356th/smerge | 2f2a6a4acfe3903ed4f71d90537f7277248e8b59 | [
"MIT"
] | 55 | 2018-07-16T12:17:58.000Z | 2022-03-17T16:10:30.000Z | snapmerge/home/migrations/0010_project_email.py | R4356th/smerge | 2f2a6a4acfe3903ed4f71d90537f7277248e8b59 | [
"MIT"
] | 4 | 2019-10-10T20:16:49.000Z | 2021-03-12T07:15:50.000Z | # Generated by Django 2.0.1 on 2018-07-26 12:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0009_auto_20180726_1214'),
]
operations = [
migrations.AddField(
model_name='project',
name='email',
field=models.EmailField(blank=True, max_length=254, null=True, verbose_name='Email'),
),
]
| 22.526316 | 97 | 0.61215 | 335 | 0.78271 | 0 | 0 | 0 | 0 | 0 | 0 | 101 | 0.235981 |
e8f70cb35f8f35780ab43f567c03a983bf3f8b23 | 5,512 | py | Python | othello.py | edjacob25/Intelligent-Systems | ae24c7e1039c98cd2940a5a813aae27fabadb99f | [
"MIT"
] | null | null | null | othello.py | edjacob25/Intelligent-Systems | ae24c7e1039c98cd2940a5a813aae27fabadb99f | [
"MIT"
] | null | null | null | othello.py | edjacob25/Intelligent-Systems | ae24c7e1039c98cd2940a5a813aae27fabadb99f | [
"MIT"
] | null | null | null | from games import (GameState, Game, query_player, random_player,
alphabeta_player, play_game, minimax_decision,
alphabeta_full_search, alphabeta_search)
class Othello(Game):
"""Juega Othello en un tablero 8 x 8, con Max (primer jugador) jugando con
las fichas negras. Un estado tiene el jugador al que le toca jugar, una
utilidad en caché, una lista de movimientos en la forma de una lista de
posiciones (x, y), y un tablero, en la forma de un diccionario de entradas
{(x, y): Jugador}, donde Jugador es 'B' para las fichas blancas y 'N' para
las fichas negras."""
def __init__(self):
moves = [(x, y) for x in range(1, 9) for y in range(1, 9)]
moves.remove((4, 4))
moves.remove((4, 5))
moves.remove((5, 4))
moves.remove((5, 5))
# siempre inicia 'N'
self.initial = GameState(to_move='N', utility=0,
board={(4, 4):'B',(4, 5):'N',
(5, 4):'N',(5, 5):'B'}, moves=moves)
def actions(self, state):
"""Los movimientos legales son las posiciones que se alinean con otra
pieza del mismo color del jugador que mueve formando una segmento de
línea recta que contenga al menos una pieza del contrincante."""
return self.legal_moves(state.moves, state.board, state.to_move)
def legal_moves(self, smoves, board, player):
"""Determina los movimientos legales de un jugador sobre un tablero"""
moves = []
for (x, y) in smoves:
if self.change_dir(x, y, -1, -1, board, player) \
or self.change_dir(x, y, -1, 0, board, player) \
or self.change_dir(x, y, -1, 1, board, player) \
or self.change_dir(x, y, 0, -1, board, player) \
or self.change_dir(x, y, 0, 1, board, player) \
or self.change_dir(x, y, 1, -1, board, player) \
or self.change_dir(x, y, 1, 0, board, player) \
or self.change_dir(x, y, 1, 1, board, player):
moves.append((x, y))
return moves
def change_dir(self, x, y, xd, yd, board, player):
"""Determina una dirección que cambiará el color de al menos una
pieza del contrincante"""
def find_player(x, y):
"""Determina si se encuentra otra pieza del jugador en una
dirección"""
x += xd
y += yd
while (x, y) in board:
if board[(x, y)] == player:
return True
else:
x += xd
y += yd
return False
x1 = x + xd
y1 = y + yd
if (x1, y1) in board and player != board[(x1, y1)]:
return find_player(x1, y1)
else:
return False
def result(self, state, move):
def change_player(xd, yd):
"""Cambia las piezas al color del jugador en una dirección"""
x, y = move
if self.change_dir(x, y, xd, yd, board, player):
x += xd
y += yd
while (x, y) in board:
if board[(x, y)] == player: return
else:
board[(x, y)] = player
x += xd
y += yd
board = state.board.copy()
moves = list(state.moves)
player = state.to_move
if move in self.actions(state):
board[move] = player
change_player(-1, -1)
change_player(-1, 0)
change_player(-1, 1)
change_player(0, -1)
change_player(0, 1)
change_player(1, -1)
change_player(1, 0)
change_player(1, 1)
moves.remove(move)
return GameState(to_move=('B' if player == 'N' else 'N'),
utility=self.compute_utility(board, move, player),
board=board, moves=moves)
else:
return GameState(to_move=('B' if player == 'N' else 'N'),
utility=self.compute_utility(board, move, player),
board=board, moves=moves)
def utility(self, state, player):
"Return the value to player; 1 for win, -1 for loss, 0 otherwise."
return state.utility if player == 'N' else -state.utility
def terminal_test(self, state):
"Un estado es terminal si ninguno de los jugadores tiene acciones"
laN = len(self.legal_moves(state.moves, state.board, 'N'))
laB = len(self.legal_moves(state.moves, state.board, 'B'))
return (laN + laB) == 0
def display(self, state):
"""despliega el estado del tablero de juego"""
board = state.board
print(' ', end=' ')
for y in range(1, 9): print(y, end=' ')
print()
for x in range(1, 9):
print(x, end=' ')
for y in range(1, 9):
print(board.get((x, y), '.'), end=' ')
print()
def compute_utility(self, board, move, player):
"""Regresa la diferencia entre el número de piezas de 'N' y
el número de piezas de 'B'"""
pieces = list(board.values())
return pieces.count('N') - pieces.count('B')
| 40.529412 | 80 | 0.502903 | 5,314 | 0.962681 | 0 | 0 | 0 | 0 | 0 | 0 | 1,321 | 0.239312 |
e8fb499b8bff3a7d1f52fd19a5495fe47b6684e5 | 2,165 | py | Python | bot/lib/controller/GetCommentTask.py | nullwriter/ig-actor | a089107657ccdf11ba213160c4cc5d3690cecd76 | [
"MIT"
] | null | null | null | bot/lib/controller/GetCommentTask.py | nullwriter/ig-actor | a089107657ccdf11ba213160c4cc5d3690cecd76 | [
"MIT"
] | null | null | null | bot/lib/controller/GetCommentTask.py | nullwriter/ig-actor | a089107657ccdf11ba213160c4cc5d3690cecd76 | [
"MIT"
] | null | null | null | import time
import re
from FileLogger import FileLogger as FL
import datetime
class GetCommentTask:
def __init__(self, task, name="extract-comment"):
self.task = task
self.name = name
self.comments = []
self.log = FL('Extracted Comments {:%Y-%m-%d %H:%M:%S}.txt'.format(datetime.datetime.now()))
def init_task(self):
hash_index = 0
loop = True
max_index = len(self.task.hashtags)
next_max_id = ""
while loop:
self.task.check_ops_limit()
current_hash = self.task.hashtags[hash_index]
self.task.api.getHashtagFeed(current_hash, maxid=next_max_id)
print ""
print "CURRENT HASHTAG = " + current_hash
print ""
ig_media = self.task.api.LastJson
if "next_max_id" not in ig_media:
print "####### Changing hashtag #######"
hash_index += 1
next_max_id = ""
if hash_index >= max_index - 1:
break
else:
next_max_id = self.do_task(ig_media)
def do_task(self, ig_media):
last_max_id = ig_media['next_max_id']
if "ranked_items" in ig_media:
key = "ranked_items"
else:
key = "items"
for ig in ig_media[key]:
self.task.api.getMediaComments(ig["id"])
for c in reversed(self.task.api.LastJson['comments']):
txt = c['text']
if self.check_string(txt):
self.comments.append(txt)
print "Comment = " + txt.encode('utf-8', 'ignore').decode('utf-8')
self.log.add_to_file(txt=txt)
self.task.task_count += 1
time.sleep(1)
time.sleep(self.task.get_time_delay())
return last_max_id
"""""
Checks if string doesnt contain special non-english characters, @, or Follow Me.
"""""
def check_string(self,str):
pattern = re.compile("^(?!follow|followme)[\s\w\d\?><;,\{\}\[\]\-_\+=!\#\$%^&\*\|\']*$")
return pattern.match(str)
| 28.116883 | 100 | 0.525635 | 2,083 | 0.962125 | 0 | 0 | 0 | 0 | 0 | 0 | 405 | 0.187067 |
e8fc53ef376367f7c8b17273ac9b30e8bcf26788 | 4,981 | py | Python | scripts/mc_counting_same_origin.py | jonassagild/Track-to-Track-Fusion | 6bb7fbe6a6e2d9a2713c47f211899226485eee79 | [
"MIT"
] | 4 | 2021-06-16T19:33:56.000Z | 2022-03-14T06:47:41.000Z | scripts/mc_counting_same_origin.py | jonassagild/Track-to-Track-Fusion | 6bb7fbe6a6e2d9a2713c47f211899226485eee79 | [
"MIT"
] | 2 | 2021-06-08T16:18:45.000Z | 2021-11-25T09:38:08.000Z | scripts/mc_counting_same_origin.py | jonassagild/Track-to-Track-Fusion | 6bb7fbe6a6e2d9a2713c47f211899226485eee79 | [
"MIT"
] | 4 | 2020-09-28T04:54:17.000Z | 2021-10-15T15:58:38.000Z | """
script to run mc sims on the three associations techniques when the tracks origin are equal. Used to calculate the
total number of correctly associating tracks and total # falsly not associating tracks from the same target.
"""
import numpy as np
from stonesoup.types.state import GaussianState
from data_association.CountingAssociator import CountingAssociator
from data_association.bar_shalom_hypothesis_associators import HypothesisTestDependenceAssociator, \
HypothesisTestIndependenceAssociator
from trackers.kf_dependent_fusion_async_sensors import KalmanFilterDependentFusionAsyncSensors
from utils import open_object
from utils.scenario_generator import generate_scenario_3
start_seed = 0
end_seed = 5 # normally 500
num_mc_iterations = end_seed - start_seed
# params
save_fig = False
# scenario parameters
sigma_process_list = [0.3] # [0.05, 0.05, 0.05, 0.5, 0.5, 0.5, 3, 3, 3]
sigma_meas_radar_list = [50] # [5, 30, 200, 5, 30, 200, 5, 30, 200]
sigma_meas_ais_list = [10] # [10] * 9
radar_meas_rate = 1 # relevant radar meas rates: 1
ais_meas_rate_list = [6] # relevant AIS meas rates: 2 - 12
timesteps = 200
# associator params
association_distance_threshold = 10
consecutive_hits_confirm_association = 3
consecutive_misses_end_association = 2
# dicts to store final results for printing in a latex friendly way
Pc_overall = {} # Pc is the percentage of correctly associating tracks that originate from the same target
something_else_overall = {}
stats = []
for sigma_process, sigma_meas_radar, sigma_meas_ais, ais_meas_rate in zip(sigma_process_list, sigma_meas_radar_list,
sigma_meas_ais_list, ais_meas_rate_list):
for seed in range(start_seed, end_seed):
# generate scenario
generate_scenario_3(seed=seed, permanent_save=False, radar_meas_rate=radar_meas_rate,
ais_meas_rate=ais_meas_rate, sigma_process=sigma_process,
sigma_meas_radar=sigma_meas_radar, sigma_meas_ais=sigma_meas_ais,
timesteps=timesteps)
folder = "temp" # temp instead of seed, as it is not a permanent save
# load ground truth and the measurements
data_folder = "../scenarios/scenario3/" + folder + "/"
ground_truth = open_object.open_object(data_folder + "ground_truth.pk1")
measurements_radar = open_object.open_object(data_folder + "measurements_radar.pk1")
measurements_ais = open_object.open_object(data_folder + "measurements_ais.pk1")
# load start_time
start_time = open_object.open_object(data_folder + "start_time.pk1")
# prior
initial_covar = np.diag([sigma_meas_radar * sigma_meas_ais, sigma_meas_radar * sigma_process,
sigma_meas_radar * sigma_meas_ais, sigma_meas_radar * sigma_process]) ** 2
prior = GaussianState([1, 1.1, -1, 0.9], initial_covar, timestamp=start_time)
kf_dependent_fusion = KalmanFilterDependentFusionAsyncSensors(start_time, prior,
sigma_process_radar=sigma_process,
sigma_process_ais=sigma_process,
sigma_meas_radar=sigma_meas_radar,
sigma_meas_ais=sigma_meas_ais)
tracks_fused_dependent, tracks_radar, tracks_ais = kf_dependent_fusion.track_async(
start_time, measurements_radar, measurements_ais, fusion_rate=1)
# use the CountingAssociator to evaluate whether the tracks are associated
associator = CountingAssociator(association_distance_threshold, consecutive_hits_confirm_association,
consecutive_misses_end_association)
num_correct_associations = 0
num_false_mis_associations = 0
for i in range(1, len(tracks_radar)):
# use the associator to check the association
associated = associator.associate_tracks(tracks_radar[:i], tracks_ais[:i])
if associated:
num_correct_associations += 1
else:
num_false_mis_associations += 1
# save the number of correct associations and false mis associations in a dict
stats_individual = {'seed': seed, 'num_correct_associations': num_correct_associations,
'num_false_mis_associations': num_false_mis_associations}
stats.append(stats_individual)
# todo count the number of associations that turn out to be correct
# calc the #correct_associations and #false_mis_associations
tot_num_correct_associations = sum([stat['num_correct_associations'] for stat in stats])
tot_num_false_mis_associations = sum([stat['num_false_mis_associations'] for stat in stats])
print("")
| 49.81 | 116 | 0.682995 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,295 | 0.259988 |
e8fc72a77fdd416f9afae8ddad6132491cc5fabf | 4,405 | py | Python | c4/system/history.py | Brewgarten/c4-system-manager | 6fdec33ced4b1cb32d82a24cd168447a899b7e10 | [
"MIT"
] | null | null | null | c4/system/history.py | Brewgarten/c4-system-manager | 6fdec33ced4b1cb32d82a24cd168447a899b7e10 | [
"MIT"
] | 1 | 2017-10-17T21:51:40.000Z | 2017-10-17T21:51:40.000Z | c4/system/history.py | Brewgarten/c4-system-manager | 6fdec33ced4b1cb32d82a24cd168447a899b7e10 | [
"MIT"
] | null | null | null | """
Copyright (c) IBM 2015-2017. All Rights Reserved.
Project name: c4-system-manager
This project is licensed under the MIT License, see LICENSE
"""
from abc import ABCMeta, abstractmethod
class DeviceHistory(object):
"""
Device manager history
"""
__metaclass__ = ABCMeta
@abstractmethod
def add(self, node, name, status, ttl=None):
"""
Add status for device manager with specified name on specified node
:param node: node name
:type node: str
:param name: device manager name
:type name: str
:param status: status
:type status: :class:`DeviceManagerStatus`
:param ttl: time to live (in seconds), infinite by default
:type ttl: int
"""
@abstractmethod
def get(self, node, name, limit=None):
"""
Get status history for device manager with specified name on specified node
:param node: node name
:type node: str
:param name: device manager name
:type name: str
:param limit: number of statuses to return
:type limit: int
:returns: list of history entries
:rtype: [:class:`Entry`]
"""
@abstractmethod
def getAll(self):
"""
Get status history for all device managers on all nodes
:returns: list of history entries
:rtype: [:class:`Entry`]
"""
@abstractmethod
def getLatest(self, node, name):
"""
Get latest status for device manager with specified name on specified node
:param node: node name
:type node: str
:param name: device manager name
:type name: str
:returns: history entry
:rtype: :class:`Entry`
"""
@abstractmethod
def remove(self, node=None, name=None):
"""
Remove status history for device managers with specified names on specified nodes.
node and name:
remove history for specific device on a specific node
node and no name
remove history for all devices on a specific node
no node and name
remove history for specific device on all nodes
no node and no name
remove history for all devices on all nodes
:param node: node name
:type node: str
:param name: device manager name
:type name: str
"""
class Entry(object):
"""
History entry with timestamp and status information
:param timestamp: datetime instance
:type timestamp: :class:`Datetime`
:param status: status
:type status: :class:`SystemManagerStatus` or :class:`DeviceManagerStatus`
"""
def __init__(self, timestamp, status):
self.timestamp = timestamp
self.status = status
class NodeHistory(object):
"""
System manager history
"""
__metaclass__ = ABCMeta
@abstractmethod
def add(self, node, status, ttl=None):
"""
Add status for system manager with on specified node
:param node: node name
:type node: str
:param status: status
:type status: :class:`SystemManagerStatus`
:param ttl: time to live (in seconds), infinite by default
:type ttl: int
"""
@abstractmethod
def get(self, node, limit=None):
"""
Get status history for system manager on specified node
:param node: node name
:type node: str
:param limit: number of statuses to return
:type limit: int
:returns: list of history entries
:rtype: [:class:`Entry`]
"""
@abstractmethod
def getAll(self):
"""
Get status history for all system managers on all nodes
:returns: list of history entries
:rtype: [:class:`Entry`]
"""
@abstractmethod
def getLatest(self, node):
"""
Get latest status for system manager on specified node
:param node: node name
:type node: str
:returns: history entry
:rtype: :class:`Entry`
"""
@abstractmethod
def remove(self, node=None):
"""
Remove status history for system managers on specified nodes.
node:
remove history for specific node
no node
remove history for all nodes
:param node: node name
:type node: str
"""
| 26.065089 | 90 | 0.595687 | 4,208 | 0.955278 | 0 | 0 | 3,568 | 0.809989 | 0 | 0 | 3,444 | 0.781839 |
e8fe259271609942f6b02a56e3c6e11a5d2eed49 | 1,060 | py | Python | Modules/Helpers/Bomb/SerialNumber.py | cweeks12/KTANE-Solver | b162bd107c0ac90a892659461819214772a04f9d | [
"MIT"
] | null | null | null | Modules/Helpers/Bomb/SerialNumber.py | cweeks12/KTANE-Solver | b162bd107c0ac90a892659461819214772a04f9d | [
"MIT"
] | null | null | null | Modules/Helpers/Bomb/SerialNumber.py | cweeks12/KTANE-Solver | b162bd107c0ac90a892659461819214772a04f9d | [
"MIT"
] | null | null | null | class SerialNumber:
def __init__(self, serialNumber):
if not (len(serialNumber) == 6):
raise ValueError('Serial Number must be 6 digits long')
self._serialNumber = serialNumber
def __str__(self):
return 'S/N: {}'.format(self._serialNumber)
def __repr__(self):
return 'SerialNumber: {}'.format(self._serialNumber)
def getSerialNumber(self):
return self._serialNumber
def containsVowel(self):
VOWELS = ['a', 'e', 'i', 'o', 'u']
for character in self._serialNumber:
if character in VOWELS:
return True
return False
def lastDigitOdd(self):
try:
lastDigitValue = int(self._serialNumber[-1])
except ValueError:
return False
return lastDigitValue % 2 == 1
def lastDigitEven(self):
try:
lastDigitValue = int(self._serialNumber[-1])
except ValueError:
return False
return lastDigitValue % 2 == 0
| 27.894737 | 68 | 0.565094 | 1,058 | 0.998113 | 0 | 0 | 0 | 0 | 0 | 0 | 79 | 0.074528 |
e8ff4205f8efbe6454dce1e931b810fa2782cb7c | 5,876 | py | Python | zkfarmer/utils.py | artarik/zkfarmer | e1297bd27347ac8a5333b4b10aaae3868f457222 | [
"MIT"
] | null | null | null | zkfarmer/utils.py | artarik/zkfarmer | e1297bd27347ac8a5333b4b10aaae3868f457222 | [
"MIT"
] | null | null | null | zkfarmer/utils.py | artarik/zkfarmer | e1297bd27347ac8a5333b4b10aaae3868f457222 | [
"MIT"
] | null | null | null | import json
import operator
import logging
import re
import time
from socket import socket, AF_INET, SOCK_DGRAM
from functools import reduce
logger = logging.getLogger(__name__)
def ip():
"""Find default IP"""
ip = None
s = socket(AF_INET, SOCK_DGRAM)
try:
s.connect(('239.255.0.0', 9))
ip = s.getsockname()[0]
except socket.error:
raise RuntimeError("Cannot determine host IP")
finally:
del s
return ip
def serialize(data):
try:
if type(data) != dict:
raise TypeError('Must be a dict')
return json.dumps(data)
except Exception as e:
logger.warn('Cannot serialize: %s [%s]', data, e)
return '{}'
def unserialize(serialized):
if not serialized:
return {}
try:
data = json.loads(serialized)
if type(data) != dict:
raise TypeError('Not a dict')
return data
except Exception as e:
logger.warn('Cannot unserialize: %s [%s]', serialized, e)
return {}
def dict_get_path(the_dict, path):
try:
return reduce(operator.getitem, [the_dict] + path.split('.'))
except:
return None
def dict_set_path(the_dict, path, value):
current = the_dict
for component in path.split('.')[:-1]:
if component not in current or type(current[component]) != dict:
current[component] = {}
current = current[component]
current[path.split('.')[-1]] = value
def dict_filter(the_dict, field_or_fields=None):
if field_or_fields is None:
return the_dict
elif type(field_or_fields) == list:
fields = {}
for f in field_or_fields:
fields[f] = dict_get_path(the_dict, f)
return fields
elif isinstance(field_or_fields, str):
return dict_get_path(the_dict, field_or_fields)
else:
raise TypeError('Invalid type for field path: %s' % type(field_or_fields))
def get_operator(op):
try:
return {"==": operator.eq,
"=": operator.eq,
"!=": operator.ne,
">=": operator.ge,
"<=": operator.le,
">": operator.gt,
"<": operator.lt}[op]
except KeyError:
raise ValueError('Unknown operator: %s' % op)
def match_predicates(predicates, the_dict):
for predicate in predicates:
m1, m2 = (dict_get_path(the_dict, predicate['path']), predicate['value'])
if m1 is None and m2 is not None:
return False
try:
int(m1)
int(m2)
m1 = int(m1)
m2 = int(m2)
except (ValueError, TypeError):
pass
if not predicate['op'](m1, m2):
return False
return True
def create_filter(filters):
if not filters:
return lambda a_dict: True
predicates = []
for f in filters.replace(' ', '').split(','):
predicate = {}
match = re.split('(!?[^><!=]+)(?:(>=|<=|!=|=|<|>)(.*))?', f, 2)
predicate['path'] = match[1]
if match[2]:
predicate['op'] = get_operator(match[2])
predicate['value'] = match[3]
else:
# predicate with not operator/value means "fields exists"
if predicate['path'][0] == '!':
predicate['path'] = predicate['path'][1:]
predicate['op'] = operator.is_
else:
predicate['op'] = operator.is_not
predicate['value'] = None
predicates.append(predicate)
return lambda the_dict: match_predicates(predicates, the_dict)
class ColorizingStreamHandler(logging.StreamHandler):
"""Provide a nicer logging output to error output with colors"""
def __init__(self):
self.colors = ['black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white']
self.color_map = dict([(x, self.colors.index(x)) for x in self.colors])
self.level_map = {
logging.DEBUG: (None, 'blue', " DBG"),
logging.INFO: (None, 'green', "INFO"),
logging.WARNING: (None, 'yellow', "WARN"),
logging.ERROR: (None, 'red', " ERR"),
logging.CRITICAL: ('red', 'white', "CRIT")
}
self.csi = '\x1b['
self.reset = '\x1b[0m'
@property
def is_tty(self):
isatty = getattr(self.stream, 'isatty', None)
return isatty and isatty()
def colorize(self, message, record):
if record.levelno in self.level_map:
params = []
if bg in self.color_map:
params.append(str(self.color_map[bg] + 40))
if fg in self.color_map:
params.append(str(self.color_map[fg] + 30))
if bold:
params.append('1')
if params:
message = ''.join((self.csi, ';'.join(params),
'm', message, self.reset))
return message
def format(self, record):
message = logging.StreamHandler.format(self, record)
# Build the prefix
params = []
levelno = record.levelno
if levelno not in self.level_map:
levelno = logging.WARNING
bg, fg, level = self.level_map[levelno]
if bg in self.color_map:
params.append(str(self.color_map[bg] + 40))
if fg in self.color_map:
params.append(str(self.color_map[fg] + 30))
params.append("1m")
level = "[%s]" % level
return "\n".join(["%s %s: %s" % (
time.strftime("%Y-%m-%dT%H:%M:%S"),
self.is_tty and params and ''.join((self.csi, ';'.join(params),
level, self.reset)) or level,
line)
for line in message.split('\n')])
| 31.934783 | 96 | 0.536079 | 2,257 | 0.384105 | 0 | 0 | 120 | 0.020422 | 0 | 0 | 704 | 0.119809 |
e8ff9e109d1e3411f4ef5d970014c1908546fefe | 6,168 | py | Python | support/update_dht_servers.py | sonofmom/ton-zabbix-scripts | b43471d058873c5ba78a92fa79d334380df5f6fc | [
"MIT"
] | null | null | null | support/update_dht_servers.py | sonofmom/ton-zabbix-scripts | b43471d058873c5ba78a92fa79d334380df5f6fc | [
"MIT"
] | null | null | null | support/update_dht_servers.py | sonofmom/ton-zabbix-scripts | b43471d058873c5ba78a92fa79d334380df5f6fc | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
import argparse
import Libraries.arguments as ar
import Libraries.tools.general as gt
import Libraries.tools.zabbix as zt
import Classes.AppConfig as AppConfig
import requests
import copy
def run():
description = 'Fetches list of dht servers from network config and performs sync with zabbix'
parser = argparse.ArgumentParser(formatter_class = argparse.RawDescriptionHelpFormatter,
description = description)
ar.set_standard_args(parser, "other")
cfg = AppConfig.AppConfig(parser.parse_args())
stats = {
"nodes": 0,
"hosts_known": 0,
"hosts_updated": 0,
"hosts_added": 0,
"hosts_disabled": 0
}
cfg.log.log(os.path.basename(__file__), 3, "Fetching network config.")
try:
rs = requests.get(cfg.config["configs"]["global_public"]).json()
except Exception as e:
cfg.log.log(os.path.basename(__file__), 1, "Could not retrieve network config: " + str(e))
sys.exit(1)
if len(rs["dht"]["static_nodes"]["nodes"]) > 0:
nodes = {}
# We identify DHT nodes by ip:port combination
#
for element in rs["dht"]["static_nodes"]["nodes"]:
nodes["{}.{}".format(gt.dec2ip(element["addr_list"]["addrs"][0]["ip"]),element["addr_list"]["addrs"][0]["port"])] = element
else:
cfg.log.log(os.path.basename(__file__), 1, "Network config contains no nodes")
sys.exit(1)
stats["nodes"] = len(nodes)
cfg.log.log(os.path.basename(__file__), 3, "Retrieved {} DHT servers.".format(stats["nodes"]))
cfg.log.log(os.path.basename(__file__), 3, "Fetching list of hosts in zabbix.")
rs = zt.fetch_hosts(cfg, [cfg.config["mapping"]["groups"]["ton_public_dht_servers"]])
if rs is None:
cfg.log.log(os.path.basename(__file__), 1, "Could not fetch list of hosts.")
sys.exit(1)
# Again, we identify hosts by ip:port
hdata = {}
for element in rs:
port = next((chunk for chunk in element["macros"] if chunk["macro"] == "{$DHT.PORT}"), None)
if port:
hdata["{}.{}".format(element["interfaces"][0]["ip"], port["value"])] = element
stats["hosts_known"] = len(hdata)
cfg.log.log(os.path.basename(__file__), 3, "Retrieved {} hosts.".format(stats["hosts_known"]))
# Scan nodes from network config, add or update key as needed
#
for element in nodes:
if element not in hdata:
if nodes[element]["addr_list"]["addrs"][0]["ip"] != 2130706433:
cfg.log.log(os.path.basename(__file__), 3, "Adding node {}.".format(element))
rs = add_node(cfg,nodes[element])
if not rs:
cfg.log.log(os.path.basename(__file__), 1, "Could not add host.")
sys.exit(1)
stats["hosts_added"] += 1
else:
host = copy.deepcopy(hdata[element])
key = next((chunk for chunk in host["macros"] if chunk["macro"] == "{$DHT.KEY}"), None)
if not key or key["value"] != nodes[element]["id"]["key"]:
zt.set_macro(host["macros"], "{$DHT.KEY}", str(nodes[element]["id"]["key"]))
if host != hdata[element]:
cfg.log.log(os.path.basename(__file__), 3, "Updating node {}.".format(element))
zt.update_host(cfg, host, hdata[element])
stats["hosts_updated"] += 1
# Scan nodes from zabbix, remove if unknown
#
for host in hdata:
if host not in nodes:
zt.delete_host(cfg, hdata[host])
sys.exit(0)
def add_node(cfg, server_data):
cfg.log.log(os.path.basename(__file__), 3, "Adding host with KEY {}".format(server_data["id"]["key"]))
groups = [
cfg.config["mapping"]["groups"]["ton_public_dht_servers"]
]
templates = [
cfg.config["mapping"]["templates"]["ton_dht_server"]
]
payload = {
"jsonrpc": "2.0",
"method": "host.create",
"params": {
"host": "TON DHT node {}.{}".format(gt.dec2ip(server_data["addr_list"]["addrs"][0]["ip"]),server_data["addr_list"]["addrs"][0]["port"]),
"interfaces":
[
{
"type": 1,
"main": 1,
"useip": 1,
"ip": gt.dec2ip(server_data["addr_list"]["addrs"][0]["ip"]),
"dns": "",
"port": "10050"
}
],
"tags": [
{
"tag": "c_network",
"value": cfg.config["net"]
},
{
"tag": "c_stage",
"value": "prod"
},
{
"tag": "c_origin",
"value": "dht_sync"
}
],
"macros":
[
{
"macro": "{$DHT.KEY}",
"value": server_data["id"]["key"]
},
{
"macro": "{$DHT.PORT}",
"value": str(server_data["addr_list"]["addrs"][0]["port"])
},
{
"macro": "{$UPDATED}",
"value": str(gt.get_timestamp())
}
],
"groups": [],
"templates": []
},
"auth": cfg.config["zabbix"]["api_token"],
"id": 1
}
for element in groups:
payload["params"]["groups"].append({"groupid": element})
for element in templates:
payload["params"]["templates"].append({"templateid": element})
rs = zt.execute_api_query(cfg, payload)
if not rs:
cfg.log.log(os.path.basename(__file__), 1, "Failed to add host with KEY {}".format(server_data["id"]["key"]))
sys.exit(1)
return rs["result"]["hostids"][0]
if __name__ == '__main__':
run()
| 35.245714 | 148 | 0.50989 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,710 | 0.277237 |
330000bbbc4cd1509f76a97c2471648387f8c603 | 332 | py | Python | src/data/dbConnection.py | leonardoleyva/api-agenda-uas | 697740a0a3feebb2ada01133db020fcf5127e1de | [
"MIT"
] | 1 | 2022-03-13T02:28:29.000Z | 2022-03-13T02:28:29.000Z | src/data/dbConnection.py | leonardoleyva/api-agenda-uas | 697740a0a3feebb2ada01133db020fcf5127e1de | [
"MIT"
] | null | null | null | src/data/dbConnection.py | leonardoleyva/api-agenda-uas | 697740a0a3feebb2ada01133db020fcf5127e1de | [
"MIT"
] | null | null | null | from firebase_admin import firestore
from google.cloud.firestore_v1.base_client import BaseClient
from ..core.app import App
class DBConnection(App):
def __init__(self) -> None:
super().__init__()
self.__db: BaseClient = firestore.client()
def getDBInstance(self) -> BaseClient:
return self.__db
| 23.714286 | 60 | 0.710843 | 203 | 0.611446 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
3300e6cf675f4b79e58bd1692aff8eee2b9eee77 | 124 | py | Python | math/PowerModPower.py | silvioedu/HackerRank-Python-Practice | e31ebe49d431c0a23fed0cd67a6984e2b0b7a260 | [
"MIT"
] | null | null | null | math/PowerModPower.py | silvioedu/HackerRank-Python-Practice | e31ebe49d431c0a23fed0cd67a6984e2b0b7a260 | [
"MIT"
] | null | null | null | math/PowerModPower.py | silvioedu/HackerRank-Python-Practice | e31ebe49d431c0a23fed0cd67a6984e2b0b7a260 | [
"MIT"
] | null | null | null | if __name__ == '__main__':
a, b, m = int(input()),int(input()),int(input())
print(pow(a,b))
print(pow(a,b,m)) | 31 | 53 | 0.532258 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.080645 |
3301f92fef2ea95eab5a3e90a808b11c54276e49 | 273 | py | Python | test_autolens/integration/tests/imaging/lens_only/mock_nlo/lens_light__hyper_bg_noise.py | PyJedi/PyAutoLens | bcfb2e7b447aa24508fc648d60b6fd9b4fd852e7 | [
"MIT"
] | 1 | 2020-04-06T20:07:56.000Z | 2020-04-06T20:07:56.000Z | test_autolens/integration/tests/imaging/lens_only/mock_nlo/lens_light__hyper_bg_noise.py | PyJedi/PyAutoLens | bcfb2e7b447aa24508fc648d60b6fd9b4fd852e7 | [
"MIT"
] | null | null | null | test_autolens/integration/tests/imaging/lens_only/mock_nlo/lens_light__hyper_bg_noise.py | PyJedi/PyAutoLens | bcfb2e7b447aa24508fc648d60b6fd9b4fd852e7 | [
"MIT"
] | null | null | null | from test_autolens.integration.tests.imaging.lens_only import lens_light__hyper_bg_noise
from test_autolens.integration.tests.imaging.runner import run_a_mock
class TestCase:
def _test__lens_light__hyper_bg_noise(self):
run_a_mock(lens_light__hyper_bg_noise)
| 34.125 | 88 | 0.849817 | 111 | 0.406593 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
33026e3d3385ab0a61779b22eebf7f1ae1b53d97 | 3,047 | py | Python | pyleecan/Methods/Slot/HoleM51/_comp_point_coordinate.py | IrakozeFD/pyleecan | 5a93bd98755d880176c1ce8ac90f36ca1b907055 | [
"Apache-2.0"
] | 95 | 2019-01-23T04:19:45.000Z | 2022-03-17T18:22:10.000Z | pyleecan/Methods/Slot/HoleM51/_comp_point_coordinate.py | IrakozeFD/pyleecan | 5a93bd98755d880176c1ce8ac90f36ca1b907055 | [
"Apache-2.0"
] | 366 | 2019-02-20T07:15:08.000Z | 2022-03-31T13:37:23.000Z | pyleecan/Methods/Slot/HoleM51/_comp_point_coordinate.py | IrakozeFD/pyleecan | 5a93bd98755d880176c1ce8ac90f36ca1b907055 | [
"Apache-2.0"
] | 74 | 2019-01-24T01:47:31.000Z | 2022-02-25T05:44:42.000Z | from numpy import exp, pi, cos, sin, tan
from ....Functions.Geometry.inter_line_circle import inter_line_circle
def _comp_point_coordinate(self):
"""Compute the point coordinates needed to plot the Slot.
Parameters
----------
self : HoleM51
A HoleM51 object
Returns
-------
point_dict: dict
A dict of the slot coordinates
"""
Rext = self.get_Rext()
# comp point coordinate (in complex)
alpha = self.comp_alpha()
Wslot = 2 * sin(self.W1 / 2) * (Rext - self.H1)
L = 0.5 * (Wslot - self.W0) / cos(alpha) # ||P2,P5||
# Center of the hole
Z0 = Rext - self.H0
Z2 = Z0 + 1j * self.W0 / 2
Z25 = Z0 - 1j * self.W0 / 2
Z15 = Z25 - self.H2
Z1 = Z2 - 1j * self.W2
Z26 = Z1 - 1j * self.W3
Z12 = Z2 - self.H2
Z13 = Z12 - 1j * self.W2
Z14 = Z13 - 1j * self.W3
Z11 = Z12 + 1j * tan(alpha / 2) * self.H2
Z16 = Z15 - 1j * tan(alpha / 2) * self.H2
# Draw the left side with center P2, and X axis =(P2,P5), Y axis=(P2,P10)
Z3 = self.W4 * exp(1j * (pi / 2 - alpha)) + Z2
Z4 = (self.W4 + self.W5) * exp(1j * (pi / 2 - alpha)) + Z2
Z5 = (Rext - self.H1) * exp(1j * self.W1 / 2)
Z10 = (1j * self.H2) * exp(1j * (pi / 2 - alpha)) + Z2
Z9 = (1j * self.H2 + self.W4) * exp(1j * (pi / 2 - alpha)) + Z2
Z8 = (1j * self.H2 + self.W4 + self.W5) * exp(1j * (pi / 2 - alpha)) + Z2
Z7 = (1j * self.H2 + L) * exp(1j * (pi / 2 - alpha)) + Z2
# Draw the right side with center P25, X axis (P25,P23), Y axis(P25,P17)
Z24 = self.W6 * exp(-1j * (pi / 2 - alpha)) + Z25
Z23 = (self.W6 + self.W7) * exp(-1j * (pi / 2 - alpha)) + Z25
Z22 = (Rext - self.H1) * exp(-1j * self.W1 / 2)
Z17 = (-1j * self.H2) * exp(-1j * (pi / 2 - alpha)) + Z25
Z18 = (-1j * self.H2 + self.W6) * exp(-1j * (pi / 2 - alpha)) + Z25
Z19 = (-1j * self.H2 + self.W6 + self.W7) * exp(-1j * (pi / 2 - alpha)) + Z25
Z20 = (-1j * self.H2 + L) * exp(-1j * (pi / 2 - alpha)) + Z25
# Z6 is the intersection of the line [Z7,Z10] and Circle centre
# (0,0) radius Rext - H1
Zint = inter_line_circle(Z7, Z10, Rext - self.H1)
# Select the point with Re(Z) > 0
if Zint[0].real > 0:
Z6 = Zint[0]
else:
Z6 = Zint[1]
Z21 = Z6.conjugate()
point_dict = dict()
point_dict["Z1"] = Z1
point_dict["Z2"] = Z2
point_dict["Z3"] = Z3
point_dict["Z4"] = Z4
point_dict["Z5"] = Z5
point_dict["Z6"] = Z6
point_dict["Z7"] = Z7
point_dict["Z8"] = Z8
point_dict["Z9"] = Z9
point_dict["Z10"] = Z10
point_dict["Z11"] = Z11
point_dict["Z12"] = Z12
point_dict["Z13"] = Z13
point_dict["Z14"] = Z14
point_dict["Z15"] = Z15
point_dict["Z16"] = Z16
point_dict["Z17"] = Z17
point_dict["Z18"] = Z18
point_dict["Z19"] = Z19
point_dict["Z20"] = Z20
point_dict["Z21"] = Z21
point_dict["Z22"] = Z22
point_dict["Z23"] = Z23
point_dict["Z24"] = Z24
point_dict["Z25"] = Z25
point_dict["Z26"] = Z26
return point_dict
| 31.091837 | 81 | 0.535609 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 678 | 0.222514 |
3302f95944549893e6c718830b8f06c614895c10 | 8,700 | py | Python | Python/cs611python.py | david145/CS6112018 | 7a74c239bf5157507594157b5871c9d0c70fcc23 | [
"MIT"
] | null | null | null | Python/cs611python.py | david145/CS6112018 | 7a74c239bf5157507594157b5871c9d0c70fcc23 | [
"MIT"
] | 1 | 2018-10-29T17:41:08.000Z | 2018-10-29T17:41:08.000Z | Python/cs611python.py | david145/CS6112018 | 7a74c239bf5157507594157b5871c9d0c70fcc23 | [
"MIT"
] | null | null | null | print("\n")
print("PythonExercises-v2 by David Bochan")
print("\n")
print("=== EXERCISE 1 ===")
print("\n")
print("(a) 5 / 3 = " + str(5 / 3))
print("=> with python3 you can receive a float even if you divide two \
integers")
print("\n")
print("(b) 5 % 3 = " + str(5 % 3))
print("=> % is the modulus which divides left hand operand by right hand \
operand and returns remainder")
print("\n")
print("(c) 5.0 / 3 = " + str(5.0 / 3))
print("=> outputs a float number.. there is no difference if a plain 5 or 5.0 \
is used")
print("\n")
print("(d) 5 / 3.0 = " + str(5 / 3.0))
print("=> outputs a float number.. there is no difference if a plain 3 or 3.0 \
is used")
print("\n")
print("(e) 5.2 % 3 = " + str(5.2 % 3))
print("=> % is the modulus which divides left hand operand by right hand \
operand and returns remainder")
print("\n")
print("=== EXERCISE 2 ===")
print("\n")
print("(a) 2000.3 ** 200 = ...")
try:
print(str(2000.3 ** 200))
except OverflowError as e:
print("=> The python3 interpreter throws a OverflowError " + str(e))
print("\n")
print("(b) 1.0 + 1.0 - 1.0 = " + str(1.0 + 1.0 - 1.0))
print("=> Addition and substraction of float values which results in another \
float value")
print("\n")
print("(c) 1.0 + 1.0e20 - 1.0e20 = " + str(1.0 + 1.0e20 - 1.0e20))
print("=> 1.0 + 1.0e20 is rounded as close as possible, which is 1.0e20 and \
after substraction of it again it results in 0.0")
print("\n")
print("=== EXERCISE 3 ===")
print("\n")
print("(a) float(123) = " + str(float(123)))
print("=> Takes the integer value 123 as input and casts it to the float \
value 123.0")
print("\n")
print("(b) float('123') = " + str(float('123')))
print("=> Takes the string '123' as input and casts it to the float value \
123.0")
print("\n")
print("(c) float('123.23') = " + str(float('123.23')))
print("=> Takes the string '123.23' as input and casts it to the float value \
123.23")
print("\n")
print("(d) int(123.23) = " + str(int(123.23)))
print("=> Takes the float 123.23 as input and casts it to the integer value \
123")
print("\n")
print("(e) int('123.23') = ...")
try:
int('123.23')
except ValueError as e:
print("=> The int() function can't cast a string to float to int and thus \
throws a ValueError (" + str(e) + ")")
print("\n")
print("(f) int(float('123.23')) = " + str(int(float(123.23))))
print("=> As we cast the string to float first, we can use it as a input to \
the int() function and receive a integer")
print("\n")
print("(g) str(12) = " + str(12))
print("=> Takes the integer 12 as input and casts it to the string '12'")
print("\n")
print("(h) str(12.2) = " + str(12.2))
print("=> Takes the float 12.2 as input and casts it to the string '12.2'")
print("\n")
print("(i) bool('a') = " + str(bool('a')))
print("=> Because an actual value (the character 'a') is passed to the bool() \
function, True is returned")
print("\n")
print("(j) bool(0) = " + str(bool(0)))
print("=> The boolean value False equals 0 in python, thus False is returned")
print("\n")
print("(k) bool(0.1) = " + str(bool(0.1)))
print("=> Because a value != 0 is provided in the bool() function, \
it returns True")
print("\n")
print("=== EXERCISE 4 ===")
print("\n")
print("range(5) = {}".format(range(5)))
print("=> range(5) returns a sequence of integers from 0 to 4. for i in \
range(5) is consequently iterating over the sequence of integers")
print("\n")
print("type(range(5)) = {}".format(type(range(5))))
print("=> The type function returns an object's class. For range(5) the class \
range is returned")
print("\n")
print("=== EXERCISE 5 ===")
print("\n")
def div_by_number(numbers_list, max_found):
number_found = 0
x = 1
while number_found < max_found:
for number in numbers_list:
if x % number == 0:
print(x)
number_found = number_found + 1
x = x + 1
numbers_list = [5, 7, 11]
print("div_by_number({}, 20)\n".format(numbers_list))
div_by_number(numbers_list, 20)
print("\n")
print("=== EXERCISE 6 ===")
print("\n")
print("(a) & (b)\n")
def is_prime(n):
if n <= 3:
return n > 1
elif n % 2 == 0 or n % 3 == 0:
return False
i = 5
while i * i <= n:
if n % i == 0 or n % (i + 2) == 0:
return False
i = i + 6
return True
print("is_prime(0) = {}\n".format(is_prime(0)))
print("is_prime(1) = {}\n".format(is_prime(1)))
print("is_prime(3) = {}\n".format(is_prime(3)))
print("is_prime(7) = {}\n".format(is_prime(7)))
print("is_prime(8) = {}\n".format(is_prime(8)))
print("is_prime(112331) = {}".format(is_prime(112331)))
def primes_up_to(n):
primes = []
for i in range(0, n):
if is_prime(i):
primes.append(i)
return primes
print("\n(c) primes_up_to(100) = {}".format(primes_up_to(100)))
def first_primes(n):
primes = []
i = 0
while len(primes) < n:
if is_prime(i):
primes.append(i)
i = i + 1
return primes
print("\n(d) first_primes(12) = {}".format(first_primes(12)))
print("\n")
print("=== EXERCISE 7 ===")
print("\n")
print("(a) print_elements(elements_list)\n")
def print_elements(elements):
for element in elements:
print(element)
elements_list = [12, "abc", 92.2, "hello"]
print_elements(elements_list)
print("\n(b) print_elements_reverse(elements_list)\n")
def print_elements_reverse(elements):
for element in elements[::-1]:
print(element)
print_elements_reverse(elements_list)
print("\n(c) len_elements(elements_list)\n")
def len_elements(elements):
count = 0
for _ in elements:
count = count + 1
return count
print("len_elements(elements_list) = {}".format(len_elements(elements_list)))
print("\n")
print("=== EXERCISE 8 ===")
a = [12, "abc", 92.2, "hello"]
print("\n")
print("(a) a = {}".format(a))
print("\n(b) b = a")
b = a
print("\n(c) b[1] = 'changed'")
b[1] = "changed"
print("\n(d) a = {}".format(a))
print("=> b is binding to the same object as a, so when b[1] was changed \
a[1] also shows the change")
print("\n(e) c = a[:]")
c = a[:]
print("\n(f) c[2] = 'also changed'")
c[2] = "also changed"
print("\n(g) a = {}".format(a))
print("=> A copy of the list a was created with a[:] and assigned to c, thus \
a[2] did not change when c[2] changed")
def set_first_elem_to_zero(l):
if len(l) > 0:
l[0] = 0
return l
numbers = [12, 21, 214, 3]
print("\n...")
print("\nnumbers = {}".format(numbers))
print("set_first_elem_to_zero(numbers) = \
{}".format(set_first_elem_to_zero(numbers)))
print("numbers = {}".format(numbers))
print("=> The original list also changed, even though we did not assign \
the returned list to it (same binding)")
print("\n")
print("=== EXERCISE 9 ===")
elements = [[1,3], [3,6]]
print("\n")
print("elements = {}".format(elements))
flat_list = lambda l: [element for sublist in l for element in sublist]
print("flat_list(elements) = {}".format(flat_list(elements)))
print("\n")
print("=== EXERCISE 10 ===")
import matplotlib.pyplot as plt
import numpy as np
t = np.arange(0.0, 2.0, 0.01)
s = np.sin(t - 2) ** 2 * np.e ** (-t ** 2)
fig, ax = plt.subplots()
ax.plot(t, s)
ax.set(xlabel='x', ylabel='y',
title='Exercise 10')
plt.show()
print("\n")
print("See Figure_1.png")
print("\n")
print("=== EXERCISE 11 ===")
def product_iteration(numbers):
product = 0
if len(numbers) > 0:
product = numbers.pop()
for number in numbers:
product = product * number
return product
from functools import reduce
def product_recursive(numbers):
if len(numbers) > 0:
return reduce((lambda x, y: x * y), numbers)
else:
return 0
numbers = [21, 12, 10, 128, 2]
empty_list = []
print("\n")
print("product_iteration(numbers) = {}".format(product_iteration(numbers)))
print("product_iteration(empty_list) = \
{}".format(product_iteration(empty_list)))
numbers = [21, 12, 10, 128, 2]
print("\n")
print("product_recursive(numbers) = {}".format(product_recursive(numbers)))
print("product_recursive(empty_list) = \
{}".format(product_recursive(empty_list)))
print("\n")
print("=== EXERCISE 12 ===")
print("\n\nGood to know!")
print("\n")
print("=== EXERCISE 13 ===")
def read_file(filename):
with open(filename, 'r') as myfile:
data=myfile.read().replace('\n', '')
return data
file_content = read_file("emails.txt")
print("\n\nread_file('emails.txt')\n\n{}".format(file_content))
import re
def extract_email(string):
match = re.findall(r'[\w\.-]+@[\w\.-]+\.\w+', string)
return match
print("\nextract_email(file_content)\
\n\n{}".format(extract_email(file_content))) | 23.138298 | 79 | 0.608046 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,031 | 0.463333 |
33030065b5b2083f8a15967babebd9c665e0b507 | 5,774 | py | Python | src/usgsgeomag.py | jake9wi/spaceweather | a0e08033c3dc34835f8d5d46c6263b5f4b5f0877 | [
"MIT"
] | null | null | null | src/usgsgeomag.py | jake9wi/spaceweather | a0e08033c3dc34835f8d5d46c6263b5f4b5f0877 | [
"MIT"
] | null | null | null | src/usgsgeomag.py | jake9wi/spaceweather | a0e08033c3dc34835f8d5d46c6263b5f4b5f0877 | [
"MIT"
] | null | null | null | """Plot USGS geomag data."""
import argparse
import pathlib as pl
import datetime as dt
import matplotlib; matplotlib.use('cairo')
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.ticker as mticker
import requests
import pandas as pd
import funcs
funcs.check_cwd(pl.Path.cwd())
DTG_FMT = '%j:%H'
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group()
group.add_argument("--one", action='store_true',
help="Make one day graph.",
)
group.add_argument("--two", action='store_true',
help="Make two day graph.",
)
group.add_argument("--three", action='store_true',
help="Make three day graph.",
)
args = parser.parse_args()
if args.one:
deltaT = dt.timedelta(hours=24)
elif args.two:
deltaT = dt.timedelta(hours=48)
elif args.three:
deltaT = dt.timedelta(hours=72)
else:
raise Exception('Option three, two, or one must be present.')
now = dt.datetime.utcnow().isoformat(timespec='minutes')
end = (dt.datetime.utcnow() - deltaT).isoformat(timespec='minutes')
def get_dst():
"""Retirve DST data."""
col_width = [
(0, 23),
(24, 27),
(32, 40),
]
colnames = [
'dtg',
'doy',
'dst',
]
payload = {
'id': 'USGS',
'elements': 'UX4',
'format': 'iaga2002',
'sampling_period': '60',
'type': 'variation',
'endtime': now,
'starttime': end,
}
url = 'https://geomag.usgs.gov/ws/data/'
r = requests.get(url, params=payload)
r.raise_for_status()
tmp = pl.Path('./src/aaa.tmp')
tmp.write_text(r.text)
data = pd.read_fwf(
tmp,
colspecs=col_width,
header=None,
names=colnames,
skiprows=21,
na_values='99999.00',
parse_dates=[0],
)
tmp.unlink()
return data
def get_bou():
"""Retrive H data for Boulder."""
col_width = [
(0, 23),
(24, 27),
(32, 40),
]
colnames = [
'dtg',
'doy',
'H',
]
payload = {
'id': 'BOU',
'elements': 'H',
'format': 'iaga2002',
'sampling_period': '60',
'type': 'variation',
'endtime': now,
'starttime': end,
}
r = requests.get('https://geomag.usgs.gov/ws/data/', params=payload)
r.raise_for_status()
tmp = pl.Path('./src/aaa.tmp')
tmp.write_text(r.text)
data = pd.read_fwf(
tmp,
colspecs=col_width,
header=None,
names=colnames,
skiprows=21,
na_values='99999.00',
parse_dates=[0],
)
tmp.unlink()
return data
def get_frd():
"""Retrive H data for Fredricksburg."""
col_width = [
(0, 23),
(24, 27),
(32, 40),
]
colnames = [
'dtg',
'doy',
'H',
]
payload = {
'id': 'FRD',
'elements': 'H',
'format': 'iaga2002',
'sampling_period': '60',
'type': 'variation',
'endtime': now,
'starttime': end,
}
r = requests.get('https://geomag.usgs.gov/ws/data/', params=payload)
r.raise_for_status()
tmp = pl.Path('./src/aaa.tmp')
tmp.write_text(r.text)
data = pd.read_fwf(
tmp,
colspecs=col_width,
header=None,
names=colnames,
skiprows=21,
na_values='99999.00',
parse_dates=[0],
)
tmp.unlink()
return data
dst = get_dst()
bou = get_bou()
frd = get_frd()
###
plt.style.use(r'./src/my_style')
fig = plt.figure(
num=1,
figsize=(10, 20),
tight_layout=False,
constrained_layout=True,
)
fig.suptitle("USGS Mag")
ax0 = plt.subplot2grid((2, 2), (0, 0), rowspan=1, colspan=2)
ax1 = plt.subplot2grid((2, 2), (1, 0), rowspan=1, colspan=1)
ax2 = plt.subplot2grid((2, 2), (1, 1), rowspan=1, colspan=1)
ax0.plot(
dst['dtg'],
dst['dst'],
lw=0.8,
)
ax0.axhline(y=0)
ax0.set_title("DST")
ax0.set_xlabel("Time (DoY:Hr)")
ax0.set_ylabel("DST (nT)")
if dst['dst'].max() <= 0:
dstmax = 10
else:
dstmax = dst['dst'].max() + 1
ax0.set_ylim(
[
dst['dst'].min() - 1,
dstmax,
],
)
ax0.yaxis.set_major_formatter(mticker.FormatStrFormatter('% 1.2f'))
ax0.xaxis.set_major_formatter(mdates.DateFormatter(DTG_FMT))
ax0.xaxis.set_minor_formatter(mdates.DateFormatter(DTG_FMT))
ax0.grid(b=True, axis='x', which='Major', lw=0.8)
ax1.scatter(
bou['dtg'],
bou['H'],
s=2,
)
ax1.set_title("H (Bou)")
ax1.set_xlabel("Time (DoY:Hr)")
ax1.set_ylabel("H (nT)")
ax1.set_ylim(
[
bou['H'].min() - 1,
bou['H'].max() + 1,
],
)
ax1.yaxis.set_major_formatter(mticker.FormatStrFormatter('% 1.0f'))
ax1.xaxis.set_major_formatter(mdates.DateFormatter(DTG_FMT))
ax1.xaxis.set_minor_formatter(mdates.DateFormatter(DTG_FMT))
# ax1.xaxis.set_major_locator(mdates.WeekdayLocator(interval=10))
ax1.grid(b=True, axis='x', which='Major', lw=0.8)
for label in ax1.xaxis.get_ticklabels():
label.set_rotation(45)
ax2.scatter(
frd['dtg'],
frd['H'],
s=2,
)
ax2.set_title("H (Frd)")
ax2.set_xlabel("Time (DoY:Hr)")
ax2.set_ylabel("H (nT)")
ax2.set_ylim(
[
frd['H'].min() - 1,
frd['H'].max() + 1,
],
)
ax2.yaxis.set_major_formatter(mticker.FormatStrFormatter('% 1.0f'))
ax2.xaxis.set_major_formatter(mdates.DateFormatter(DTG_FMT))
ax2.xaxis.set_minor_formatter(mdates.DateFormatter(DTG_FMT))
# ax2.xaxis.set_major_locator(mdates.WeekdayLocator(interval=10))
ax2.grid(b=True, axis='x', which='Major', lw=0.8)
for label in ax2.xaxis.get_ticklabels():
label.set_rotation(45)
fig.savefig('./web/img/usgsmag.svg')
plt.close(1)
| 20.621429 | 72 | 0.574991 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,222 | 0.211638 |
3304cda4bb7181483694fd293ce4ad5249b9bc1e | 2,178 | py | Python | checkov/terraform/checks/resource/kubernetes/PodSecurityContext.py | pmalkki/checkov | b6cdf386dd976fe27c16fed6d550756a678a5d7b | [
"Apache-2.0"
] | null | null | null | checkov/terraform/checks/resource/kubernetes/PodSecurityContext.py | pmalkki/checkov | b6cdf386dd976fe27c16fed6d550756a678a5d7b | [
"Apache-2.0"
] | null | null | null | checkov/terraform/checks/resource/kubernetes/PodSecurityContext.py | pmalkki/checkov | b6cdf386dd976fe27c16fed6d550756a678a5d7b | [
"Apache-2.0"
] | null | null | null | from checkov.common.models.enums import CheckCategories, CheckResult
from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
class PodSecurityContext(BaseResourceCheck):
def __init__(self):
# CIS-1.5 5.7.3
name = "Apply security context to your pods and containers"
# Security context can be set at pod or container level.
id = "CKV_K8S_29"
supported_resources = ['kubernetes_pod', 'kubernetes_deployment', 'kubernetes_daemonset']
categories = [CheckCategories.GENERAL_SECURITY]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def scan_resource_conf(self, conf) -> CheckResult:
if "spec" not in conf:
self.evaluated_keys = [""]
return CheckResult.FAILED
spec = conf['spec'][0]
if spec.get("container"):
containers = spec.get("container")
for idx, container in enumerate(containers):
if type(container) != dict:
return CheckResult.UNKNOWN
if not container.get("security_context"):
self.evaluated_keys = ["spec/[0]/container/{idx}"]
return CheckResult.FAILED
return CheckResult.PASSED
if spec.get("template") and isinstance(spec.get("template"), list):
template = spec.get("template")[0]
if template.get("spec") and isinstance(template.get("spec"), list):
temp_spec = template.get("spec")[0]
if temp_spec.get("container"):
containers = temp_spec.get("container")
for idx, container in enumerate(containers):
if type(container) != dict:
return CheckResult.UNKNOWN
if not container.get("security_context"):
self.evaluated_keys = ["spec/[0]/template/[0]/spec/[0]/container/{idx}"]
return CheckResult.FAILED
return CheckResult.PASSED
return CheckResult.FAILED
check = PodSecurityContext()
| 41.09434 | 106 | 0.596878 | 1,991 | 0.914141 | 0 | 0 | 0 | 0 | 0 | 0 | 412 | 0.189164 |
33050cf010c98c8d6ee47a3c7fc7081460e0df92 | 345 | py | Python | hardware/demo_i2c.py | leeehuang/MaixPy_scripts | 79a5485ec983e67bb8861305a52418b29e0dc205 | [
"MIT"
] | null | null | null | hardware/demo_i2c.py | leeehuang/MaixPy_scripts | 79a5485ec983e67bb8861305a52418b29e0dc205 | [
"MIT"
] | null | null | null | hardware/demo_i2c.py | leeehuang/MaixPy_scripts | 79a5485ec983e67bb8861305a52418b29e0dc205 | [
"MIT"
] | null | null | null | from machine import I2C
# i2c = I2C(I2C.I2C0, freq=100000, scl=28, sda=29) # hardware i2c
i2c = I2C(I2C.I2C3, freq=100000, scl=28, sda=29) # software i2c
devices = i2c.scan()
print(devices)
for device in devices:
i2c.writeto(device, b'123')
i2c.readfrom(device, 3)
# tmp = bytearray(6)
# i2c.readfrom_into(device, tmp, True)
| 23 | 65 | 0.672464 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 143 | 0.414493 |
3305691c62826956ff8ed131b9e4f86523e06726 | 1,431 | py | Python | repos/system_upgrade/common/models/selinux.py | sm00th/leapp-repository | 1c171ec3a5f9260a3c6f84a9b15cad78a875ac61 | [
"Apache-2.0"
] | null | null | null | repos/system_upgrade/common/models/selinux.py | sm00th/leapp-repository | 1c171ec3a5f9260a3c6f84a9b15cad78a875ac61 | [
"Apache-2.0"
] | 1 | 2022-03-07T15:34:11.000Z | 2022-03-07T15:35:15.000Z | repos/system_upgrade/common/models/selinux.py | sm00th/leapp-repository | 1c171ec3a5f9260a3c6f84a9b15cad78a875ac61 | [
"Apache-2.0"
] | null | null | null | from leapp.models import fields, Model
from leapp.topics import SystemInfoTopic, TransactionTopic
class SELinuxModule(Model):
"""SELinux module in cil including priority"""
topic = SystemInfoTopic
name = fields.String()
priority = fields.Integer()
content = fields.String()
# lines removed due to content invalid on RHEL 8
removed = fields.List(fields.String())
class SELinuxModules(Model):
"""
List of selinux modules that are not part of distribution policy
modules - list of custom policy modules (priority != 100,200)
templates - List of installed udica templates
"""
topic = SystemInfoTopic
modules = fields.List(fields.Model(SELinuxModule))
templates = fields.List(fields.Model(SELinuxModule))
class SELinuxCustom(Model):
"""SELinux customizations returned by semanage export"""
topic = SystemInfoTopic
commands = fields.List(fields.String())
removed = fields.List(fields.String())
class SELinuxRequestRPMs(Model):
"""
SELinux related RPM packages that need to be present after upgrade
Listed packages provide types that where used in policy
customizations (to_install), or the corresponding policy
was installed on RHEL-7 installation with priority 200
(to_keep).
"""
topic = TransactionTopic
to_keep = fields.List(fields.String(), default=[])
to_install = fields.List(fields.String(), default=[])
| 31.108696 | 70 | 0.716981 | 1,321 | 0.923131 | 0 | 0 | 0 | 0 | 0 | 0 | 625 | 0.436758 |
33057f162f9d778470bc3e8176490e3594ced17b | 76 | py | Python | dateinfer/__init__.py | avishai-o/dateinfer | 894fe26b3b60c94d003f2ae0a55f1d9f1e40cf80 | [
"Apache-2.0"
] | null | null | null | dateinfer/__init__.py | avishai-o/dateinfer | 894fe26b3b60c94d003f2ae0a55f1d9f1e40cf80 | [
"Apache-2.0"
] | null | null | null | dateinfer/__init__.py | avishai-o/dateinfer | 894fe26b3b60c94d003f2ae0a55f1d9f1e40cf80 | [
"Apache-2.0"
] | null | null | null | __author__ = 'jeffrey.starr@ztoztechnologies.com'
from .infer import infer
| 19 | 49 | 0.802632 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 36 | 0.473684 |
330597e751b125d41e61a3b1d6607b8ceee7379c | 21,902 | py | Python | annotator_web.py | j20100/Seg_Annotator | 49b2806be9450c901cf4977633a4ec29b3b6bdca | [
"CC-BY-4.0"
] | null | null | null | annotator_web.py | j20100/Seg_Annotator | 49b2806be9450c901cf4977633a4ec29b3b6bdca | [
"CC-BY-4.0"
] | null | null | null | annotator_web.py | j20100/Seg_Annotator | 49b2806be9450c901cf4977633a4ec29b3b6bdca | [
"CC-BY-4.0"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import argparse
import base64
from bson import ObjectId
import datetime
from flask import Flask, Markup, Response, abort, escape, flash, redirect, \
render_template, request, url_for
from flask_login import LoginManager, UserMixin, current_user, login_required, \
login_user, logout_user
from werkzeug.utils import secure_filename
from functools import wraps
from gridfs import GridFS
from jinja2 import evalcontextfilter
from binascii import a2b_base64
from OpenSSL import SSL
from flask import session
from flask_socketio import SocketIO, emit
import json
import hashlib
import pandas as pd
import pymongo
import re
import subprocess
import threading
import time
import uuid
import urllib.parse
import webcolors
import os
import time
import glob
from flask_cors import CORS
curr_annotated_img = []
def hash_password(password):
"""This function hashes the password with SHA256 and a random salt"""
salt = uuid.uuid4().hex
return hashlib.sha256(salt.encode() + password.encode()).hexdigest() + ':' + salt
def check_password(hashed_password, user_password):
"""This function checks a password against a SHA256:salt entry"""
password, salt = hashed_password.split(':')
return password == hashlib.sha256(salt.encode() + user_password.encode()).hexdigest()
def admin_required(func):
"""Function wrapper to allow only logged in admins to access the page."""
@wraps(func)
def decorated_function(*args, **kwargs):
if not current_user.is_admin():
return redirect(url_for('bad_permissions'))
return func(*args, **kwargs)
return decorated_function
# Load default configuration from local file
with open('config.json') as config:
conf = argparse.Namespace(**json.load(config))
# Argument parser strings
app_description = "annotator Website Application\n\n" \
"All information can be found at https://github.com/seg_annotator.\n" \
"Modify file 'config.json' to edit the application's configuration.\n" \
"There are other command line arguments that can be used:"
help_host = "Hostname of the Flask app. Default: {0}".format(conf.app_host)
help_port = "Port of the Flask app. Default: {0}".format(conf.app_port)
help_debug = "Start Flask app in debug mode. Default: {0}".format(conf.debug)
# Set up the command-line arguments
parser = argparse.ArgumentParser(description=app_description,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-H', '--app_host', help=help_host, default=conf.app_host)
parser.add_argument('-P', '--app_port', help=help_port, default=conf.app_port)
parser.add_argument('-D', '--debug', dest='debug', action='store_true', help=help_debug)
parser.set_defaults(debug=conf.debug)
# Update default configs with command line args
args = parser.parse_args()
conf.__dict__.update(args.__dict__)
# Get MongoDB Database Client
client = pymongo.MongoClient()
annotator = client['annotator']
fs = GridFS(annotator)
# Validate MongoDB is started, else exit
try:
client.server_info()
except pymongo.errors.ServerSelectionTimeoutError:
print('MongoDB is not started. Restart it before launching the web app again.')
quit()
# Create Flask Application
app = Flask(__name__)
CORS(app)
app.secret_key = uuid.uuid4().hex # Required to use log in and session manager
login_manager = LoginManager()
login_manager.init_app(app)
# ROS variable
ros_pid = None
socketio = SocketIO(app)
@socketio.on('disconnect')
def disconnect_user():
print('DISCONNECTING USER')
# user_logs = list(annotator.logs.find().skip((annotator.logs).count() - 1))
# user = user_logs[-1]
# annotator.logs.update_one(user, {'$set' : { 'stop_time' : time.time()}})
logout_user()
# session.pop(app.secret_key, None)
# User class
class User(UserMixin):
"""User Class making DB-stored parameters accessible from HTML templates."""
def __init__(self, username):
self.username = username
user = annotator.credentials.find_one({'username': username})
self.admin = user['admin']
self.nb_images = user['nb_images']
def get_id(self):
return self.username
def is_admin(self):
return self.admin
# Login Manager Configuration
@login_manager.user_loader
def load_user(user_id):
return User(user_id)
@login_manager.unauthorized_handler
def unauthorized_callback():
return redirect('/login?next=' + request.path)
# Application routes
@app.route('/')
def go_home():
return redirect(url_for('home'))
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
next_page = request.args.get('next')
username = request.form['username']
password = request.form['password']
user = annotator.credentials.find_one({'username': username})
if user and check_password(user['password'], password):
if user['active']: # Inactived users should not be able to log in
login_user(User(username))
annotator.credentials.update_one(user, {'$set':
{'last_login' : time.time()}})
# If an admin logs in and there is at least one inactived user, show it
if user['admin'] and annotator.credentials.find_one({'active': False}):
flash('At least one user account has to be activated', 'info')
return redirect(url_for('manage_users'))
annotator.logs.insert_one({'start_time' : time.time(),
'username' : username,
'stop_time' : 0,
'nb_images' : 0})
return redirect(next_page or url_for('home'))
else:
flash('Account not yet activated by an administrator', 'warning')
else:
flash('Invalid credentials', 'danger')
return render_template('login.html')
else:
return render_template('login.html')
@app.route('/logout')
@login_required
def logout():
user_logs = list(annotator.logs.find().skip((annotator.logs).count() - 1))
user = user_logs[-1]
annotator.logs.update_one(user, {'$set' : { 'stop_time' : time.time()}})
logout_user()
return redirect(url_for('home'))
@app.route('/create_account', methods=['GET', 'POST'])
def create_account():
if request.method == 'POST':
next = request.args.get('next')
username = request.form['username'].strip()
password = request.form['password']
password_confirm = request.form['password_confirm']
if not password:
flash('Password cannot be empty', 'danger')
return render_template('create_account.html')
if password != password_confirm:
flash('Both password entries do not match', 'danger')
return render_template('create_account.html')
if not username.replace('_', '').isalnum():
# Only allow letters, numbers and underscore characters in usernames
flash('Invalid username (letters, numbers and underscores only)', 'danger')
return render_template('create_account.html')
user = annotator.credentials.find_one({'username': username})
if user or not username: # Check if username is not empty or already taken
flash('Username not available', 'danger')
return render_template('create_account.html')
active = False
admin = False
# If this is the first user to register, make it active and admin
if not annotator.credentials.find_one():
active = True
admin = True
flash('First account created, activated and is administrator, congratulations!', 'success')
# Create a new user account
annotator.credentials.insert_one({'username': username,
'password': hash_password(password),
'active': active,
'nb_images' : 0,
'admin': admin})
flash('Account created successfully', 'success')
return redirect(url_for('login'))
else:
return render_template('create_account.html')
@app.route('/change_password', methods=['GET', 'POST'])
def change_password():
if request.method == 'POST':
username = request.form['username']
old_password = request.form['old_password']
new_password = request.form['new_password']
user = annotator.credentials.find_one({'username': username})
if user and check_password(user['password'], old_password):
if not new_password:
flash('Password cannot be empty', 'danger')
return render_template('change_password.html')
# Modify password
annotator.credentials.update_one(user, {'$set': {
'password': hash_password(new_password)}})
flash('Password changed successfully', 'success')
return redirect(url_for('login'))
else:
flash('Invalid credentials', 'danger')
return render_template('change_password.html')
else:
return render_template('change_password.html')
@app.route('/home')
def home():
return render_template('index.html')
def sortKeyFunc(s):
t = s.split('/')
k=t[3].split('.')
s=k[0].split('_')
return int(s[2])
@app.route('/load_new_img', methods = ['POST'])
def uploader_new_img():
if request.method == 'POST':
global curr_annotated_img
directory = "static/data/annotations/"
searchlabel = os.path.join(directory, "*.png" )
with open('/home/jonathan/Seg_Annotator/static/data/dataset.json') as f:
data = json.load(f)
print(data)
fileslabel = glob.glob(searchlabel)
fileslabel.sort(key=sortKeyFunc)
i = 0
print("Doin the currently annotated img now")
print(curr_annotated_img)
print(fileslabel[i])
while fileslabel[i] in curr_annotated_img :
i=i+1
print("THIS ONE PASSED")
print(fileslabel[i])
newImgAnnot = fileslabel[i]
t = fileslabel[i].split('/')
#print(t)
newImg=t[0]+"/"+t[1]+"/"+"images"+"/"+t[3]
#print("Sending new img")
#print(newImg)
#print("Sending new img annot")
#print(newImgAnnot)
send = newImg+":"+newImgAnnot
#print(send)
curr_annotated_img.append(newImgAnnot)
return send
@app.route('/uploader', methods = ['POST'])
def uploader_file():
if request.method == 'POST':
pic = request.form['file']
username = request.form['username']
filename = request.form['filename']
#f.save(secure_filename(f.filename))
up = urllib.parse.urlparse(pic)
head, data = up.path.split(',', 1)
bits = head.split(';')
mime_type = bits[0] if bits[0] else 'text/plain'
charset, b64 = 'ASCII', False
for bit in bits:
if bit.startswith('charset='):
charset = bit[8:]
elif bit == 'base64':
b64 = True
binary_data = a2b_base64(data)
directory = "static/data/annotations/"
test = os.listdir( directory )
for item in test:
if item.startswith(filename):
os.remove( os.path.join( directory, item ) )
timestr = time.strftime("%Y%m%d-%H%M%S")
with open("static/data/annotations/" + filename + "_corrected_" + timestr, 'wb') as f:
f.write(binary_data)
user = annotator.credentials.find_one({'username': username})
user_logs = list(annotator.logs.find().skip((annotator.logs).count() - 1))
user_stats = user_logs[-1]
nb_images = user['nb_images']
nb_images = nb_images + 1
nb_images_stats = user_stats['nb_images']
nb_images_stats = nb_images_stats + 1
annotator.logs.update_one(user_stats, {'$set': {'nb_images': nb_images_stats}})
annotator.credentials.update_one(user, {'$set': {'nb_images': nb_images}})
searchlabel = os.path.join(directory, "*.png" )
fileslabel = glob.glob(searchlabel)
fileslabel.sort()
return "Done sending imges"
@app.route('/updater', methods = ['POST'])
def updater_URL():
if request.method == 'POST':
annotURL = request.form["URL"]
directory = "static/data/annotations/"
test = os.listdir(directory)
realURL = "NONE"
for item in test:
if item.startswith(annotURL[25:]):
realURL = item
return "static/data/annotations/" + realURL
@app.route('/annotator')
@login_required
def annotator_edit():
username = current_user.get_id()
return render_template('annotator.html', username=username)
@app.route('/dataset')
@login_required
def dataset():
username = current_user.get_id()
return render_template('dataset.html', username=username)
@app.route('/logs')
@admin_required
def logs():
logs = list(annotator.logs.find())
return render_template('logs.html', logs=logs)
@app.route('/logs/<start_time>')
def log_highlights(start_time):
if not valid_protocol(start_time):
return redirect(url_for('logs'))
# Get database of current protocol
db = client[protocol]
started = db.steps.count()
done = db.steps.count({'end': {'$exists': True}})
info = db.protocol.find_one()
json_protocol = {}
if info:
# Pretty print the raw protocol
json_protocol = json.dumps(info['protocol'], indent=4, sort_keys=True)
return render_template('log_highlights.html', active='Highlights', \
protocol=protocol, json_protocol=json_protocol, \
started=started, done=done, db=db)
@app.route('/logs/delete/<id>')
@login_required
@admin_required
def delete_logs(id):
# Delete all data from current protocol
print('DELETING THE LOG')
test = annotator.logs.find()
print(test)
test_list = list(annotator.logs.find())
print(test_list)
one = annotator.test_list.find({'_id' : id})
print(one)
annotator.logs.remove({})
flash("Entry {0} deleted successfully".format(id), 'info')
return redirect(url_for('logs'))
@app.route('/manage_users')
@login_required
@admin_required
def manage_users():
user_list = list(annotator.credentials.find())
return render_template('manage_users.html', users=user_list)
@app.route('/manage_users/activate/<username>')
@login_required
@admin_required
def activate_user(username):
"""Activate a user account."""
user = annotator.credentials.find_one({'username': username})
if not user['active']:
annotator.credentials.update_one(user, {'$set': {'active': True}})
flash("User {0} activated successfully".format(username), 'success')
else:
flash("User {0} is already active".format(username), 'warning')
return redirect(url_for('manage_users'))
@app.route('/manage_users/demote/<username>')
@login_required
@admin_required
def demote_user(username):
"""Remove admin privileges of another administrator."""
user = annotator.credentials.find_one({'username': username})
if current_user.get_id() == username:
flash('Cannot revert yourself to standard user', 'danger')
elif user:
if user['admin']:
annotator.credentials.update_one(user, {'$set': {'admin': False}})
flash("User {0} reverted to standard user successfully".format(username), 'info')
else:
flash("User {0} is already a standard user".format(username), 'warning')
else:
flash("Cannot revert unknown user {0} to standard user".format(username), 'warning')
return redirect(url_for('manage_users'))
@app.route('/manage_users/promote/<username>')
@login_required
@admin_required
def promote_user(username):
"""Give admin privileges from a normal user."""
user = annotator.credentials.find_one({'username': username})
if user:
if user['admin']:
flash("User {0} is already an administrator".format(username), 'warning')
else:
annotator.credentials.update_one(user, {'$set': {'admin': True}})
flash("User {0} promoted to administrator successfully".format(username), 'info')
else:
flash("Cannot promote unknown user {0} to administrator".format(username), 'warning')
return redirect(url_for('manage_users'))
@app.route('/manage_users/delete/<username>')
@login_required
@admin_required
def delete_user(username):
"""Delete a user account that is not yours."""
user = annotator.credentials.find_one({'username': username})
if current_user.get_id() == username:
flash('Cannot delete yourself', 'danger')
elif user:
annotator.credentials.delete_one(user)
flash("User {0} deleted successfully".format(username), 'info')
else:
flash("Cannot delete unknown user {0}".format(username), 'warning')
return redirect(url_for('manage_users'))
@app.route('/bad_permissions')
def bad_permissions():
"""Function called if a normal user tries to get to an admin reserved page."""
return render_template('bad_permissions.html')
@app.errorhandler(404)
def page_not_found(error):
"""This method handles all unexisting route requests."""
return render_template('404.html'), 404
# Add objects that can be called from the Jinja2 HTML templates
@app.template_filter()
@evalcontextfilter
def nl2br(eval_ctx, value):
"""Converts new lines to paragraph breaks in HTML."""
_paragraph_re = re.compile(r'(?:\r\n|\r|\n){2,}')
result = '\n\n'.join('<p>%s</p>' % p.replace('\n', '<br>\n') \
for p in _paragraph_re.split(escape(value)))
result = result.replace(' ', ' ')
if eval_ctx.autoescape:
result = Markup(result)
return result
def crossdomain(origin=None, methods=None, headers=None, max_age=21600,
attach_to_all=True, automatic_options=True):
"""Decorator function that allows crossdomain requests.
Courtesy of
https://blog.skyred.fi/articles/better-crossdomain-snippet-for-flask.html
"""
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, basestring):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, basestring):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
""" Determines which methods are allowed
"""
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
"""The decorator function
"""
def wrapped_function(*args, **kwargs):
"""Caries out the actual cross domain code
"""
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
h['Access-Control-Allow-Credentials'] = 'true'
h['Access-Control-Allow-Headers'] = \
"Origin, X-Requested-With, Content-Type, Accept, Authorization"
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
def convert_ts(ts):
"""Convert timestamp to human-readable string"""
return datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d_%H:%M:%S')
def format_sidebar(name, icon, url):
"""
Used to generate HTML line for sidebar in layout.html.
- name is the name of the tab
- icon is the glyphicon name
"""
current_url = request.path.split('/')[1]
active = ' class="active"' if url == current_url else ''
html = '<li{0}><a href="/{1}"><i style="float:left; margin-right: 14px;">' \
'<span class="glyphicon glyphicon-{2}"></span></i>{3}' \
'</a></li>'.format(active, url, icon, name)
return Markup(html)
# Make some variables and functions available from Jinja2 HTML templates
app.jinja_env.globals.update(conf=conf,
force_type = Markup('onselect="return false" ' \
'onpaste="return false" ' \
'oncopy="return false" ' \
'oncut="return false" ' \
'ondrag="return false" ' \
'ondrop="return false" ' \
'autocomplete=off'),
format_sidebar=format_sidebar,
convert_ts=convert_ts)
# Start the application
if __name__ == '__main__':
#context = SSL.Context(SSL.TLSv1_2_METHOD)
#context.use_privatekey_file('host.key')
#context.use_certificate_file('host.cert')
socketio.run(app, host=conf.app_host, port=int(conf.app_port), ssl_context=('cert.pem', 'key.pem'))
| 34.98722 | 103 | 0.630079 | 421 | 0.019222 | 0 | 0 | 14,075 | 0.642635 | 0 | 0 | 6,932 | 0.316501 |
3306b230b7b452f85e6bb142239de2540a26cb53 | 72 | py | Python | marrow/interface/__init__.py | marrow/interface | 4d60f9fc16e949c5da3b3756c77d60fe84f0ed2d | [
"MIT"
] | 2 | 2016-03-22T15:21:48.000Z | 2017-02-21T23:52:46.000Z | marrow/interface/__init__.py | marrow/interface | 4d60f9fc16e949c5da3b3756c77d60fe84f0ed2d | [
"MIT"
] | 1 | 2019-01-21T22:09:00.000Z | 2019-01-21T22:09:00.000Z | marrow/interface/__init__.py | marrow/interface | 4d60f9fc16e949c5da3b3756c77d60fe84f0ed2d | [
"MIT"
] | 2 | 2015-12-21T03:24:06.000Z | 2016-11-10T15:19:27.000Z | from .meta import Interface
from .release import version as __version__
| 24 | 43 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
3307dce436fdb7329f82703b2fd7a200624c248e | 11,934 | py | Python | apps/steam.py | nestorcalvo/Buencafe_dashboard | e2fbe5dfc5b679ab4d27acea2a23e3dfdeb2699c | [
"MIT"
] | null | null | null | apps/steam.py | nestorcalvo/Buencafe_dashboard | e2fbe5dfc5b679ab4d27acea2a23e3dfdeb2699c | [
"MIT"
] | null | null | null | apps/steam.py | nestorcalvo/Buencafe_dashboard | e2fbe5dfc5b679ab4d27acea2a23e3dfdeb2699c | [
"MIT"
] | null | null | null | import dash
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import dash_html_components as html
import pandas as pd
import plotly.express as px
import plotly.graph_objs as go
from datetime import date
import dash_loading_spinners as dls
from dash.dependencies import Input, Output, ClientsideFunction, State
from app import app
import requests
features = ["Screw Speed", "Gas Flow Rate", "Steam Pressure", "Oven-Home Temperature",
"Water Temperature", "Oxygen_pct", "Oven-Home Pressure", "Combustion Air Pressure",
"Temperature before prear", "Temperature after prear", "Burner Position", "Burner_pct",
"Borra Flow Rate_kgh", "Cisco Flow Rate_kgh"]
cardtab_1 = dbc.Card([
html.Div(
id='output-container-date-picker-range',
className="month-container"
),
dls.Hash(
dcc.Graph(id="graph-steam", className = "graph-card"),
size = 160,
speed_multiplier = 0.8,
debounce = 200
)
])
cardtab_2 = dbc.Card([
html.Div(
id='output-container-date-picker-range',
className="month-container"
),
dls.Hash(
dcc.Graph(id="graph-distribution", className = "graph-card"),
size = 160,
speed_multiplier = 0.8,
debounce = 200
)
])
card_3 = dbc.Card(
[
dbc.Col([
dbc.Col([
html.P(
"Select date range that you want to see:"
),
dcc.DatePickerRange(
id='my-date-picker-range',
min_date_allowed=date(2020, 10, 1),
max_date_allowed=date(2021, 6, 30),
initial_visible_month=date(2020, 10, 1),
end_date=date(2021, 6, 30),
clearable=True,
with_portal=True,
month_format="MMMM, YYYY",
number_of_months_shown=3
)
]),
html.Hr(),
dbc.Col([
html.P(
"Select the data frequency:"
),
dbc.RadioItems(
id='frequency-radioitems',
labelStyle={"display": "inline-block"},
options= [
{"label": "Daily", "value": "data_daily"},
{"label": "Hourly", "value": "data_hourly"}
], value= "data_daily",
style= {"color": "black"}
)
])
])
])
card_4 = dbc.Card([
dbc.Col([
dbc.FormGroup([
dbc.Label("Y - Axis"),
dcc.Dropdown(
id="y-variable",
options=[{
"label": col,
"value": col
} for col in features],
value="Gas Flow Rate",
),
]),
html.H6("Efficiency Range"),
dcc.RangeSlider(
id='slider-efficiency',
min=0,
max=1.00,
step=0.01,
value=[0, 1.00]
),
html.P(id='range-efficiency')
])
])
card_5 = dbc.Card([
html.Div(
id='output-container-date-picker-range',
className="month-container"
),
dls.Hash(
dcc.Graph(id="graph-comparison", className = "graph-card"),
size = 160,
speed_multiplier = 0.8,
debounce = 200
)
])
layout= [
html.Div([
# html.Img(
# src = "/assets/images/C1_icon_1.png",
# className = "corr-icon"
# ),
html.Img(
src = "/assets/images/Buencafe-logo.png",
className = "corr-icon"
),
html.H2(
"Steam Analytics",
className = "content-title"
),
html.Div(children=[
html.Div([
# dbc.Row([
# dbc.Col(
# dbc.Tabs([
# dbc.Tab(cardtab_1, label="Time series"),
# dbc.Tab(cardtab_2, label="Distribution"),
# ],
# id="card-tabs",
# card=True,
# active_tab="tab-1",
# ),
# width=9
# ),
# dbc.Col(
# card_3, width=3
# )
# ]),
dbc.Tabs([
dbc.Tab(cardtab_1, label="Time series"),
dbc.Tab(cardtab_2, label="Distribution"),
],
id="card-tabs",
card=True,
active_tab="tab-1",
),
card_3,
], className = "graph_col_1"),
html.Div(children =[
# dbc.Row([
# dbc.Col(
# card_4, width=3
# ),
# dbc.Col(
# card_5, width=9
# )
# ]),
card_4,
card_5
], className = "data_col_2")
], className = "wrapper__steam-data")
],className = "wrapper__steam"),
]
@app.callback(
Output('graph-steam','figure'),
[Input('my-date-picker-range', 'start_date'),
Input('my-date-picker-range', 'end_date'),
Input('frequency-radioitems', 'value')]
)
def update_figure(start_date, end_date, value_radio):
# if value_radio == "data_daily":
# data = pd.read_csv("data/data_interpolate_daily.csv", parse_dates=["Time"])
# data.set_index(["Time"], inplace=True)
# elif value_radio == "data_hourly":
# data = pd.read_csv("data/data_interpolate_hourly.csv", parse_dates=["Time"])
# data.set_index(["Time"], inplace=True)
try:
if value_radio == "data_daily":
query = "SELECT * FROM daily"
payload = {
"query": query
}
petition = requests.post('https://k8nmzco6tb.execute-api.us-east-1.amazonaws.com/dev/data',payload)
test_var = petition.json()['body']
data = pd.DataFrame(test_var)
data['Time'] = pd.to_datetime(data['Time']).dt.date.astype("datetime64[ns]")
# print("Llegada ", data2['Time'].value_counts())
data.set_index(["Time"], inplace=True)
elif value_radio == "data_hourly":
query = "SELECT * FROM hourly"
payload = {
"query": query
}
petition = requests.post('https://k8nmzco6tb.execute-api.us-east-1.amazonaws.com/dev/data',payload)
test_var = petition.json()['body']
data = pd.DataFrame(test_var)
data['Time'] = pd.to_datetime(data['Time'])
data.set_index(["Time"], inplace=True)
fig = go.Figure()
fig.add_trace(go.Scatter(
x = data.loc[start_date: end_date].index,
y = data.loc[start_date: end_date]["Steam Flow Rate"],
mode = "lines",
name = "Steam"
))
fig.update_layout(title = 'Steam Generation',
xaxis_title='Date',
yaxis_title='Steam (Kg/hour)',
transition_duration=500,
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)')
return fig
except:
fig = go.Figure()
fig.update_layout(title = 'Steam Generation',
xaxis_title='Date',
yaxis_title='Steam (Kg/hour)',
transition_duration=500,
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)')
return fig
@app.callback(
Output('graph-distribution','figure'),
[Input('my-date-picker-range', 'start_date'),
Input('my-date-picker-range', 'end_date')]
)
def update_figure2(start_date, end_date):
# df = pd.read_csv("data/data_interpolate_hourly.csv", parse_dates=["Time"])
# df.set_index(["Time"], inplace=True)
try:
query = "SELECT * FROM daily"
payload = {
"query": query
}
petition = requests.post('https://k8nmzco6tb.execute-api.us-east-1.amazonaws.com/dev/data',payload)
test_var = petition.json()['body']
df = pd.DataFrame(test_var)
df['Time'] = pd.to_datetime(df['Time']).dt.date.astype("datetime64[ns]")
# print("Llegada ", data2['Time'].value_counts())
df.set_index(["Time"], inplace=True)
# df = pd.read_csv("data/data_interpolate_hourly.csv", parse_dates=["Time"])
# df.set_index(["Time"], inplace=True)
fig = px.histogram(df.loc[start_date: end_date], x="Steam Flow Rate", nbins=100)
fig.update_layout(title = 'Steam Flow Rate Distribution',
xaxis_title='Steam (Kg/hour)',
yaxis_title='Count',
transition_duration=500,
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)')
return fig
except:
fig = px.histogram()
fig.update_layout(title = 'Steam Flow Rate Distribution',
xaxis_title='Steam (Kg/hour)',
yaxis_title='Count',
transition_duration=500,
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)')
return fig
@app.callback(
[Output("graph-comparison", "figure"),
Output("range-efficiency", "children")],
[Input("y-variable", "value"),
Input("slider-efficiency", "value"),]
)
def update_figure3(feature, efficiency):
# df2 = pd.read_csv("data/data_interpolate_hourly.csv", parse_dates=["Time"])
# df2.set_index(["Time"], inplace=True)
try:
query = "SELECT * FROM hourly"
payload = {
"query": query
}
petition = requests.post('https://k8nmzco6tb.execute-api.us-east-1.amazonaws.com/dev/data',payload)
test_var = petition.json()['body']
df2 = pd.DataFrame(test_var)
df2['Time'] = pd.to_datetime(df2['Time']).dt.date.astype("datetime64[ns]")
# print("Llegada ", data2['Time'].value_counts())
df2.set_index(["Time"], inplace=True)
fig = px.scatter(
x = df2[(df2['Efficiency'] < efficiency[1]) & (df2['Efficiency'] > efficiency[0])]["Steam Flow Rate"],
y = df2[(df2['Efficiency'] < efficiency[1]) & (df2['Efficiency'] > efficiency[0])][feature]
)
# fig.layout.plot_bgcolor = '#fff'
# fig.layout.paper_bgcolor = '#fff'
fig.update_layout(title = 'Steam Flow Rate Comparison',
xaxis_title= 'Steam (Kg/hour)',
yaxis_title= feature,
transition_duration= 500,
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)')
range_efficiency = str(efficiency[0]) + " - " + str(efficiency[1])
return fig, range_efficiency
except:
fig = px.scatter()
fig.update_layout(title = 'Steam Flow Rate Comparison',
xaxis_title= 'Steam (Kg/hour)',
yaxis_title= feature,
transition_duration= 500,
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)')
range_efficiency = str(efficiency[0]) + " - " + str(efficiency[1])
return fig, range_efficiency | 35.730539 | 114 | 0.483576 | 0 | 0 | 0 | 0 | 6,353 | 0.532345 | 0 | 0 | 3,916 | 0.328138 |
33080713e8553a0893a3ef2c3f6b5b130877799e | 9,755 | py | Python | ibeatles/step1/data_handler.py | indudhiman/bragg-edge | 56af0a448534ef9cb5428879ba900e194dc05db2 | [
"MIT"
] | null | null | null | ibeatles/step1/data_handler.py | indudhiman/bragg-edge | 56af0a448534ef9cb5428879ba900e194dc05db2 | [
"MIT"
] | null | null | null | ibeatles/step1/data_handler.py | indudhiman/bragg-edge | 56af0a448534ef9cb5428879ba900e194dc05db2 | [
"MIT"
] | null | null | null | import sys
import os
import glob
import pprint
import numpy as np
try:
import PyQt4.QtGui as QtGui
from PyQt4.QtGui import QFileDialog
except:
import PyQt5.QtGui as QtGui
from PyQt5.QtWidgets import QFileDialog
from ibeatles.utilities.load_files import LoadFiles, LoadTimeSpectra
from ibeatles.utilities.file_handler import FileHandler
from ibeatles.step1.time_spectra_handler import TimeSpectraHandler
class DataHandler(object):
user_canceled = False
def __init__(self, parent=None, data_type='sample'):
self.parent = parent
self.data_type = data_type
self.list_ui = {'sample': {'list': self.parent.ui.list_sample,
'folder': self.parent.ui.sample_folder},
'ob': {'list': self.parent.ui.list_open_beam,
'folder': self.parent.ui.open_beam_folder},
'normalized': {'list': self.parent.ui.list_normalized,
'folder': self.parent.ui.normalized_folder},
'time_spectra': {'text': self.parent.ui.time_spectra,
'text2': self.parent.ui.time_spectra_2,
'folder': self.parent.ui.time_spectra_folder,
'folder2': self.parent.ui.time_spectra_folder_2}}
def retrieve_files(self, data_type='sample'):
"""
type = ['sample', 'ob', 'normalized', 'time_spectra']
"""
self.data_type = data_type
mydialog = FileDialog()
mydialog.setDirectory(self.parent.sample_folder)
mydialog.exec_()
try:
selectedFiles = mydialog.filesSelected()
if selectedFiles:
if len(selectedFiles) == 1:
if os.path.isdir(selectedFiles[0]):
self.load_directory(selectedFiles[0])
else:
self.load_files(selectedFiles[0])
else:
self.load_files(selectedFiles)
if (data_type == 'sample') or (data_type == 'normalized'):
self.retrieve_time_spectra()
self.load_time_spectra()
else:
self.user_canceled = True
except TypeError:
self.user_canceled = True
# inform user here that the folder is empty !
# FIXME
return
# calculate mean data array for normalization tab
if data_type == 'sample':
_data = self.parent.data_metadata['sample']['data']
normalization_mean_data = np.mean(_data, axis=0)
self.parent.data_metadata['normalization']['data'] = normalization_mean_data
def load_time_spectra(self):
if self.data_type == 'normalized':
o_time_handler = TimeSpectraHandler(parent = self.parent, normalized_tab=True)
else:
o_time_handler = TimeSpectraHandler(parent = self.parent)
o_time_handler.load()
o_time_handler.calculate_lambda_scale()
tof_array = o_time_handler.tof_array
lambda_array = o_time_handler.lambda_array
if self.data_type == 'sample':
self.parent.data_metadata['time_spectra']['data'] = tof_array
self.parent.data_metadata['time_spectra']['lambda'] = lambda_array
else: #normalized
self.parent.data_metadata['time_spectra']['normalized_data'] = tof_array
self.parent.data_metadata['time_spectra']['normalized_lambda'] = lambda_array
def retrieve_time_spectra(self, auto_load=True):
if auto_load:
if self.data_type == 'sample':
folder = self.parent.data_metadata['sample']['folder']
else:
folder = self.parent.data_metadata['normalized']['folder']
o_time_spectra = LoadTimeSpectra(folder = folder, auto_load=auto_load)
if o_time_spectra.file_found:
time_spectra = o_time_spectra.time_spectra
base_time_spectra = FileHandler.get_base_filename(time_spectra)
folder_name = FileHandler.get_parent_folder(time_spectra)
self.parent.time_spectra_folder = os.path.dirname(time_spectra)
if self.data_type == 'sample':
self.list_ui['time_spectra']['text'].setText(base_time_spectra)
self.list_ui['time_spectra']['folder'].setText(folder_name)
elif self.data_type == 'normalized':
self.parent.data_metadata['time_spectra']['full_file_name'] = time_spectra
self.list_ui['time_spectra']['text2'].setText(base_time_spectra)
self.list_ui['time_spectra']['folder2'].setText(folder_name)
self.parent.data_metadata['time_spectra']['normalized_folder'] = folder_name
self.parent.time_spectra_normalized_folder = os.path.dirname(time_spectra)
else:
if self.data_type == 'sample':
folder = self.parent.data_metadata['time_spectra']['folder']
else:
folder = self.parent.data_metadata['time_spectra']['normalized_folder']
time_spectra_name_format = '*_Spectra.txt'
file_name = str(QFileDialog.getOpenFileName(caption = "Select the Time Spectra File",
directory = folder,
filter = "Txt ({});;All (*.*)".format(time_spectra_name_format)))
if file_name:
folder_name = FileHandler.get_parent_folder(file_name)
base_file_name = FileHandler.get_base_filename(file_name)
self.parent.time_spectra_folder = os.path.dirname(file_name)
if self.data_type == 'sample':
self.list_ui['time_spectra']['text'].setText(base_file_name)
self.list_ui['time_spectra']['folder'].setText(folder_name)
self.parent.data_metadata['time_spectra']['folder'] = folder_name
elif self.data_type == 'normalized':
self.parent.data_metadata['time_spectra']['full_file_name'] = file_name
self.list_ui['time_spectra']['text2'].setText(base_file_name)
self.list_ui['time_spectra']['folder2'].setText(folder_name)
self.parent.data_metadata['time_spectra']['normalized_folder'] = folder_name
self.parent.time_spectra_normalized_folder = os.path.dirname(file_name)
def load_directory(self, folder):
list_files = glob.glob(folder + '/*.*')
if len(list_files) == 0:
raise TypeError
image_type = self.get_image_type(list_files)
o_load_image = LoadFiles(parent = self.parent,
image_ext = image_type,
folder = folder)
self.populate_list_widget(o_load_image)
self.parent.data_files[self.data_type] = o_load_image.list_of_files
self.parent.data_metadata[self.data_type]['folder'] = o_load_image.folder
self.parent.sample_folder = os.path.dirname(os.path.dirname(o_load_image.folder))
self.parent.data_metadata[self.data_type]['data'] = o_load_image.image_array
def populate_list_widget(self, o_loader):
list_of_files = o_loader.list_of_files
_list_ui = self.list_ui[self.data_type]['list']
_list_ui.clear()
for _row, _file in enumerate(list_of_files):
_item = QtGui.QListWidgetItem(_file)
_list_ui.insertItem(_row, _item)
_folder = o_loader.folder
self.folder = _folder
_parent_folder = FileHandler.get_parent_folder(_folder)
self.list_ui[self.data_type]['folder'].setText(_parent_folder)
def load_files(self, list_of_files):
image_type = self.get_image_type(list_of_files)
o_load_image = LoadFiles(parent = self.parent,
image_ext = image_type,
list_of_files = list_of_files)
self.populate_list_widget(o_load_image)
self.parent.data_files[self.data_type] = o_load_image.list_of_files
self.parent.data_metadata[self.data_type]['folder'] = o_load_image.folder
#self.parent.data_metadata[self.data_type]['data'] = o_load_image.data
self.parent.data_metadata[self.data_type]['data'] = o_load_image.image_array
def get_image_type(self, list_of_files):
raw_file, ext = os.path.splitext(list_of_files[1])
return ext
class FileDialog(QFileDialog):
selectedFiles = []
def __init__(self, *args):
QtGui.QFileDialog.__init__(self, *args)
self.setOption(self.DontUseNativeDialog, False)
self.setFileMode(self.ExistingFiles)
btns = self.findChildren(QtGui.QPushButton)
self.openBtn = [x for x in btns if 'open' in str(x.text()).lower()][0]
self.openBtn.clicked.disconnect()
self.openBtn.clicked.connect(self.openClicked)
self.tree = self.findChild(QtGui.QTreeView)
def openClicked(self):
inds = self.tree.selectionModel().selectedIndexes()
files = []
for i in inds:
if i.column() == 0:
files.append(os.path.join(str(self.directory().absolutePath()),str(i.data().toString())))
self.selectedFiles = files
self.close()
def filesSelected(self):
return self.selectedFiles | 44.747706 | 123 | 0.598462 | 9,321 | 0.95551 | 0 | 0 | 0 | 0 | 0 | 0 | 1,161 | 0.119016 |
330bc2029c1246f778fe532317958ef2c30db80a | 10,719 | py | Python | touca/_case.py | trytouca/touca-python | dab4bb6760a173952b63ea14fd4bc30c3877744e | [
"Apache-2.0"
] | 11 | 2021-06-29T04:51:28.000Z | 2022-03-22T05:58:44.000Z | touca/_case.py | trytouca/touca-python | dab4bb6760a173952b63ea14fd4bc30c3877744e | [
"Apache-2.0"
] | null | null | null | touca/_case.py | trytouca/touca-python | dab4bb6760a173952b63ea14fd4bc30c3877744e | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Touca, Inc. Subject to Apache-2.0 License.
from ._types import IntegerType, VectorType, ToucaType
from datetime import datetime, timedelta
from enum import Enum
from typing import Dict, Tuple
class ResultCategory(Enum):
""" """
Check = 1
Assert = 2
class ResultEntry:
"""
Wrapper around a given ``ToucaType`` value that includes the category
it should belong to.
We are intentionally not using ``@dataclass`` to ensure the core library
has no dependency on ``dataclasses`` module. This may change in the future.
"""
def __init__(self, typ: ResultCategory, val: ToucaType):
"""
Creates an entry given its value and the category it should belong to.
:param typ: type of the entry
:param val: value of the entry
"""
self.typ = typ
self.val = val
class Case:
""" """
def __init__(self, **kwargs):
self._meta = kwargs
self._results: Dict[str, ResultEntry] = dict()
self._tics: Dict[str, datetime] = dict()
self._tocs: Dict[str, datetime] = dict()
def check(self, key: str, value: ToucaType):
"""
Logs a given value as a test result for the declared test case
and associates it with the specified key.
:param key: name to be associated with the logged test result
:param value: value to be logged as a test result
"""
self._results[key] = ResultEntry(typ=ResultCategory.Check, val=value)
def assume(self, key: str, value: ToucaType):
"""
Logs a given value as an assertion for the declared test case
and associates it with the specified key.
:param key: name to be associated with the logged test result
:param value: value to be logged as a test result
"""
self._results[key] = ResultEntry(typ=ResultCategory.Assert, val=value)
def add_array_element(self, key: str, value: ToucaType):
"""
Adds a given value to a list of results for the declared
test case which is associated with the specified key.
Could be considered as a helper utility function.
This method is particularly helpful to log a list of items as they
are found:
.. code-block:: python
for number in numbers:
if is_prime(number):
touca.add_array_element("prime numbers", number)
touca.add_hit_count("number of primes")
This pattern can be considered as a syntactic sugar for the following
alternative:
.. code-block:: python
primes = []
for number in numbers:
if is_prime(number):
primes.append(number)
if primes:
touca.check("prime numbers", primes)
touca.check("number of primes", len(primes))
The items added to the list are not required to be of the same type.
The following code is acceptable:
.. code-block:: python
touca.check("prime numbers", 42)
touca.check("prime numbers", "forty three")
:raises RuntimeError:
if specified key is already associated with
a test result which was not iterable
:param key: name to be associated with the logged test result
:param value: element to be appended to the array
:see also: :py:meth:`~check`
"""
if key not in self._results:
self._results[key] = ResultEntry(typ=ResultCategory.Check, val=VectorType())
vec = self._results.get(key)
if vec.typ is not ResultCategory.Check or not isinstance(vec.val, VectorType):
raise RuntimeError("specified key has a different type")
vec.val.add(value)
def add_hit_count(self, key: str):
"""
Increments value of key every time it is executed.
creates the key with initial value of one if it does not exist.
Could be considered as a helper utility function.
This method is particularly helpful to track variables whose values
are determined in loops with indeterminate execution cycles:
.. code-block:: python
for number in numbers:
if is_prime(number):
touca.add_array_element("prime numbers", number)
touca.add_hit_count("number of primes")
This pattern can be considered as a syntactic sugar for the following
alternative:
.. code-block:: python
primes = []
for number in numbers:
if is_prime(number):
primes.append(number)
if primes:
touca.check("prime numbers", primes)
touca.check("number of primes", len(primes))
:raises RuntimeError:
if specified key is already associated with
a test result which was not an integer
:param key: name to be associated with the logged test result
:see also: :py:meth:`~check`
"""
if key not in self._results:
self._results[key] = ResultEntry(
typ=ResultCategory.Check, val=IntegerType(1)
)
return
value = self._results.get(key)
if value.typ is not ResultCategory.Check or not isinstance(
value.val, IntegerType
):
raise RuntimeError("specified key has a different type")
value.val._value += 1
def add_metric(self, key: str, milliseconds: int):
"""
Adds an already obtained measurements to the list of captured
performance benchmarks.
Useful for logging a metric that is measured without using this SDK.
:param key: name to be associated with this performance benchmark
:param milliseconds: duration of this measurement in milliseconds
"""
value = datetime.now()
self._tics[key] = value
self._tocs[key] = value + timedelta(microseconds=milliseconds * 1000)
def start_timer(self, key: str):
"""
Starts timing an event with the specified name.
Measurement of the event is only complete when function
:py:meth:`~stop_timer` is later called for the specified name.
:param key: name to be associated with the performance metric
"""
self._tics[key] = datetime.now()
def stop_timer(self, key: str):
"""
Stops timing an event with the specified name.
Expects function :py:meth:`~start_timer` to have been called previously
with the specified name.
:param key: name to be associated with the performance metric
"""
if key in self._tics:
self._tocs[key] = datetime.now()
def _metrics(self) -> Tuple[str, ToucaType]:
for key, tic in self._tics.items():
if key not in self._tocs:
continue
diff = (self._tocs.get(key) - tic).microseconds / 1000
yield key, IntegerType(int(diff))
def _metadata(self) -> Dict[str, str]:
return {
"teamslug": self._meta.get("team") or "unknown",
"testsuite": self._meta.get("suite") or "unknown",
"version": self._meta.get("version") or "unknown",
"testcase": self._meta.get("name") or "unknown",
"builtAt": datetime.now().isoformat(),
}
def json(self):
return {
"metadata": self._metadata(),
"results": [
{"key": k, "value": v.val.json()}
for k, v in self._results.items()
if v.typ is ResultCategory.Check
],
"assertions": [
{"key": k, "value": v.val.json()}
for k, v in self._results.items()
if v.typ is ResultCategory.Assert
],
"metrics": [{"key": k, "value": v.json()} for k, v in self._metrics()],
}
def serialize(self) -> bytearray:
from flatbuffers import Builder
import touca._schema as schema
dicts = {
ResultCategory.Check: schema.ResultType.Check,
ResultCategory.Assert: schema.ResultType.Assert,
}
builder = Builder(1024)
metadata = {k: builder.CreateString(v) for k, v in self._metadata().items()}
schema.MetadataStart(builder)
schema.MetadataAddTeamslug(builder, metadata.get("teamslug"))
schema.MetadataAddTestsuite(builder, metadata.get("testsuite"))
schema.MetadataAddVersion(builder, metadata.get("version"))
schema.MetadataAddTestcase(builder, metadata.get("testcase"))
schema.MetadataAddBuiltAt(builder, metadata.get("builtAt"))
fbs_metadata = schema.MetadataEnd(builder)
result_entries = []
for k, v in self._results.items():
fbs_key = Builder.CreateString(builder, k)
fbs_value = v.val.serialize(builder)
schema.ResultStart(builder)
schema.ResultAddKey(builder, fbs_key)
schema.ResultAddValue(builder, fbs_value)
schema.ResultAddTyp(builder, dicts.get(v.typ))
result_entries.append(schema.ResultEnd(builder))
schema.ResultsStartEntriesVector(builder, len(result_entries))
for item in reversed(result_entries):
builder.PrependUOffsetTRelative(item)
fbs_result_entries = builder.EndVector()
schema.ResultsStart(builder)
schema.ResultsAddEntries(builder, fbs_result_entries)
fbs_results = schema.ResultsEnd(builder)
metric_entries = []
for k, v in self._metrics():
fbs_key = Builder.CreateString(builder, k)
fbs_value = v.serialize(builder)
schema.MetricStart(builder)
schema.MetricAddKey(builder, fbs_key)
schema.MetricAddValue(builder, fbs_value)
metric_entries.append(schema.MetricEnd(builder))
schema.MetricsStartEntriesVector(builder, len(metric_entries))
for item in reversed(metric_entries):
builder.PrependUOffsetTRelative(item)
fbs_metric_entries = builder.EndVector()
schema.MetricsStart(builder)
schema.MetricsAddEntries(builder, fbs_metric_entries)
fbs_metrics = schema.MetricsEnd(builder)
schema.MessageStart(builder)
schema.MessageAddMetadata(builder, fbs_metadata)
schema.MessageAddResults(builder, fbs_results)
schema.MessageAddMetrics(builder, fbs_metrics)
fbs_message = schema.MessageEnd(builder)
builder.Finish(fbs_message)
return builder.Output()
| 35.376238 | 88 | 0.612184 | 10,500 | 0.979569 | 265 | 0.024722 | 0 | 0 | 0 | 0 | 4,960 | 0.46273 |
330bcb8284fec7ff83c5b51b2973e481ae9b2a12 | 1,376 | py | Python | app/main/models/utils.py | tmeftah/e-invoice | 7cfe31e9391eb60ab3d06f0055bd2f1e9a524971 | [
"MIT"
] | 2 | 2019-06-10T19:30:06.000Z | 2020-04-30T01:05:04.000Z | app/main/models/utils.py | tmeftah/e-invoice | 7cfe31e9391eb60ab3d06f0055bd2f1e9a524971 | [
"MIT"
] | null | null | null | app/main/models/utils.py | tmeftah/e-invoice | 7cfe31e9391eb60ab3d06f0055bd2f1e9a524971 | [
"MIT"
] | 3 | 2019-01-23T21:37:29.000Z | 2020-04-08T13:22:29.000Z | from datetime import datetime
import sqlalchemy as sa
from flask_sqlalchemy import Model
from sqlalchemy import ForeignKey
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import relationship
from app.main.extensions import db
class BaseMixin(Model):
def save_to_db(self):
try:
db.session.add(self)
db.session.commit()
except:
db.session.rollback()
raise
def delete_from_db(self):
try:
db.session.delete(self)
db.session.commit()
except:
db.session.rollback()
raise
class UserMixin(BaseMixin, Model):
@declared_attr
def createdAt(cls):
return sa.Column(sa.DateTime, default=datetime.utcnow)
@declared_attr
def updateAt(cls):
return sa.Column(sa.DateTime)
@declared_attr
def createdBy_id(cls):
return sa.Column(sa.Integer, ForeignKey('users.id'),
nullable=False)
@declared_attr
def updatedBy_id(cls):
return sa.Column(sa.Integer, ForeignKey('users.id'))
@declared_attr
def createdBy(cls):
return relationship(
'UserModel', foreign_keys=[cls.createdBy_id])
@declared_attr
def updatedBy(cls):
return relationship(
'UserModel', foreign_keys=[cls.updatedBy_id])
| 23.322034 | 62 | 0.634448 | 1,117 | 0.811773 | 0 | 0 | 671 | 0.487645 | 0 | 0 | 42 | 0.030523 |
330cecf3fdced7629c465eecd0baccde4887eb04 | 759 | py | Python | source/tides.py | agstub/viscoelastic-glines | 924230b2c79b6ec90177718417d26d7bf62c4b50 | [
"MIT"
] | null | null | null | source/tides.py | agstub/viscoelastic-glines | 924230b2c79b6ec90177718417d26d7bf62c4b50 | [
"MIT"
] | 2 | 2021-12-16T20:27:35.000Z | 2021-12-20T17:43:21.000Z | source/tides.py | agstub/viscoelastic-glines | 924230b2c79b6ec90177718417d26d7bf62c4b50 | [
"MIT"
] | null | null | null | #-------------------------------------------------------------------------------
# This function defines the sea level change timeseries for marine ice sheet problem.
# *Default = sinusoidal tidal cycle if 'tides' with 1m amplitude if 'tides' turned 'on', OR...
# = zero if 'tides' turned 'off'
#-------------------------------------------------------------------------------
import numpy as np
from params import t_final,nt_per_year,tides
def sl_change(t):
if tides == 'on':
SLC = np.sin(4*np.pi*t/(3.154e7/12.0/30.0)) # tidal frequency of 2 per day
else:
SLC = 0.0 # no sea level change for
# long-time marine problem
return SLC
| 44.647059 | 94 | 0.44664 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 465 | 0.612648 |
330e0026ec6f48bff70e4f7fa2738cc955a1b78d | 1,397 | py | Python | nucleotidefrequencies.py | TaliaferroLab/AnalysisScripts | 3df37d2f8fca9bc402afe5ea870c42200fca1ed3 | [
"MIT"
] | null | null | null | nucleotidefrequencies.py | TaliaferroLab/AnalysisScripts | 3df37d2f8fca9bc402afe5ea870c42200fca1ed3 | [
"MIT"
] | null | null | null | nucleotidefrequencies.py | TaliaferroLab/AnalysisScripts | 3df37d2f8fca9bc402afe5ea870c42200fca1ed3 | [
"MIT"
] | 1 | 2021-10-30T07:37:19.000Z | 2021-10-30T07:37:19.000Z | #Usage: python nucleotidefrequencies.py <fasta file> <output file>
#Output is tab delimited frequencies of A, G, C, U
from Bio import SeqIO
import sys
def getfreqs(fasta):
freqs = [] #[afreq, gfreq, cfreq, ufreq]
a = 0
u = 0
c = 0
g = 0
tot = 0
for record in SeqIO.parse(fasta, 'fasta'):
seq = str(record.seq.transcribe().upper())
a += seq.count('A')
u += seq.count('U')
c += seq.count('C')
g += seq.count('G')
tot += len(seq)
freqs = [a/float(tot), g/float(tot), c/float(tot), u/float(tot)]
return freqs
def getfreqs_boxplot(fasta, outfile, classid):
freqs = {} # {seqname : [A,G,C,U]}
for record in SeqIO.parse(fasta, 'fasta'):
seq = str(record.seq.transcribe().upper())
seqname = record.id
tot = float(len(seq))
if tot < 100:
continue
a = seq.count('A') / tot
u = seq.count('U') / tot
c = seq.count('C') / tot
g = seq.count('G') / tot
freqs[seqname] = [str(a),str(g),str(c),str(u)]
outfh = open(outfile, 'w')
outfh.write(('\t').join(['A','G','C','U','Class']) + '\n')
for seq in freqs:
outfh.write(('\t').join([freqs[seq][0], freqs[seq][1], freqs[seq][2], freqs[seq][3], classid]) + '\n')
outfh.close()
#outfh = open(sys.argv[2], 'w')
#freqs = getfreqs(sys.argv[1])
#freqs = [str(freq) for freq in freqs]
#outfh.write(('\t').join(freqs) + '\t' + sys.argv[3] + '\n')
#outfh.close()
getfreqs_boxplot(sys.argv[1], sys.argv[2], sys.argv[3]) | 26.865385 | 104 | 0.602004 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 417 | 0.298497 |
330e3ca54e42e429e7b117d2305fd38be4387ed0 | 17 | py | Python | pytuber/version.py | tefra/pytube.fm | d8e8d5dfe928497f69d208df4c21b049a726dbda | [
"MIT"
] | 8 | 2019-01-27T00:52:20.000Z | 2021-07-15T15:57:19.000Z | pytuber/version.py | tefra/pytube.fm | d8e8d5dfe928497f69d208df4c21b049a726dbda | [
"MIT"
] | 22 | 2019-01-25T14:57:08.000Z | 2021-12-13T19:55:04.000Z | pytuber/version.py | tefra/pytube.fm | d8e8d5dfe928497f69d208df4c21b049a726dbda | [
"MIT"
] | 4 | 2019-02-17T09:56:30.000Z | 2021-04-17T17:53:13.000Z | version = "20.1"
| 8.5 | 16 | 0.588235 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0.352941 |
330e71b3b4bc8bea5b484ce2931ef4411a75120b | 2,311 | py | Python | 2d-lin_sep.py | rzepinskip/optimization-svm | 9682980e19d5fc9f09353aa1284e86874e954aec | [
"MIT"
] | null | null | null | 2d-lin_sep.py | rzepinskip/optimization-svm | 9682980e19d5fc9f09353aa1284e86874e954aec | [
"MIT"
] | 2 | 2020-01-16T21:35:43.000Z | 2020-03-24T18:02:41.000Z | 2d-lin_sep.py | rzepinskip/optimization-svm | 9682980e19d5fc9f09353aa1284e86874e954aec | [
"MIT"
] | null | null | null | import numpy as np
from matplotlib import pyplot as plt
from optsvm.svm import SVM
x_neg = np.array([[3, 4], [1, 4], [2, 3]])
y_neg = np.array([-1, -1, -1])
x_pos = np.array([[6, -1], [7, -1], [5, -3]])
y_pos = np.array([1, 1, 1])
x1 = np.linspace(-10, 10)
x = np.vstack((np.linspace(-10, 10), np.linspace(-10, 10)))
# Data for the next section
X = np.vstack((x_neg, x_pos))
y = np.concatenate((y_neg, y_pos))
# Plot
fig = plt.figure(figsize=(10, 10))
plt.scatter(x_neg[:, 0], x_neg[:, 1], marker="x", color="r", label="Negative -1")
plt.scatter(x_pos[:, 0], x_pos[:, 1], marker="o", color="b", label="Positive +1")
plt.plot(x1, x1 - 3, color="darkblue")
plt.plot(x1, x1 - 7, linestyle="--", alpha=0.3, color="b")
plt.plot(x1, x1 + 1, linestyle="--", alpha=0.3, color="r")
plt.xlim(-2, 12)
plt.ylim(-7, 7)
plt.xticks(np.arange(0, 10, step=1))
plt.yticks(np.arange(-5, 5, step=1))
# Lines
plt.axvline(0, color="black", alpha=0.5)
plt.axhline(0, color="black", alpha=0.5)
plt.plot([2, 6], [3, -1], linestyle="-", color="darkblue", alpha=0.5)
plt.plot([4, 6], [1, 1], [6, 6], [1, -1], linestyle=":", color="darkblue", alpha=0.5)
plt.plot(
[0, 1.5], [0, -1.5], [6, 6], [1, -1], linestyle=":", color="darkblue", alpha=0.5
)
# Annotations
plt.annotate(s="$A \ (6,-1)$", xy=(5, -1), xytext=(6, -1.5))
plt.annotate(
s="$B \ (2,3)$", xy=(2, 3), xytext=(2, 3.5)
) # , arrowprops = {'width':.2, 'headwidth':8})
plt.annotate(s="$2$", xy=(5, 1.2), xytext=(5, 1.2))
plt.annotate(s="$2$", xy=(6.2, 0.5), xytext=(6.2, 0.5))
plt.annotate(s="$2\sqrt{2}$", xy=(4.5, -0.5), xytext=(4.5, -0.5))
plt.annotate(s="$2\sqrt{2}$", xy=(2.5, 1.5), xytext=(2.5, 1.5))
plt.annotate(s="$w^Tx + b = 0$", xy=(8, 4.5), xytext=(8, 4.5))
plt.annotate(
s="$(\\frac{1}{4},-\\frac{1}{4}) \\binom{x_1}{x_2}- \\frac{3}{4} = 0$",
xy=(7.5, 4),
xytext=(7.5, 4),
)
plt.annotate(s="$\\frac{3}{\sqrt{2}}$", xy=(0.5, -1), xytext=(0.5, -1))
# Labels and show
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.legend(loc="lower right")
plt.show()
svm = SVM(C=10)
svm.fit(X, y)
# Display results
print("---Our results")
print("w = ", svm.w_.flatten())
print("b = ", svm.b_)
from sklearn.svm import SVC
clf = SVC(C=10, kernel="linear")
clf.fit(X, y.ravel())
print("---SVM library")
print("w = ", clf.coef_)
print("b = ", clf.intercept_)
| 30.012987 | 85 | 0.568585 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 508 | 0.219818 |
330ecd444c0b51c103fe8429f61156a567b6cff4 | 3,574 | py | Python | src/decorators/location_decorator.py | AAU-PSix/canary | 93b07d23cd9380adc03a6aa1291a13eaa3b3008c | [
"MIT"
] | null | null | null | src/decorators/location_decorator.py | AAU-PSix/canary | 93b07d23cd9380adc03a6aa1291a13eaa3b3008c | [
"MIT"
] | null | null | null | src/decorators/location_decorator.py | AAU-PSix/canary | 93b07d23cd9380adc03a6aa1291a13eaa3b3008c | [
"MIT"
] | null | null | null |
from typing import List, Dict
from ts.c_syntax import CSyntax
from ts import Tree
from cfa import CFANode, CFA, CFAEdge
from cfa import LocalisedCFA, LocalisedNode
from .tweet_handler import TweetHandler
from .decoration_strategy import StandardDecorationStrategy, DecorationStrategy
from .conversion_strategy import ConversionStrategy
class LocationDecorator():
def __init__(
self,
tree: Tree,
conversion_strategy: ConversionStrategy = None,
tweet_handler: TweetHandler = None,
decoration_strategy: DecorationStrategy = None
) -> None:
self.tree: Tree = tree
self._syntax = CSyntax()
self.tweet_handler = tweet_handler if tweet_handler is not None else TweetHandler(self.tree)
self.decoration_strategy = decoration_strategy if decoration_strategy is not None else StandardDecorationStrategy(self.tweet_handler)
self.edge_converter = conversion_strategy if conversion_strategy is not None else ConversionStrategy()
def map_node_to_location(self, cfa: CFA[CFANode]) -> Dict[CFANode, str]:
location_tweets = self.tweet_handler.get_all_location_tweet_nodes(cfa)
result: Dict[CFANode, str] = dict()
for tweet in location_tweets:
location = self.tweet_handler.extract_location_text_from_tweet(tweet.node)
result[location] = tweet
return result
def decorate(self, cfa: CFA[CFANode]) -> LocalisedCFA:
localised_cfa: LocalisedCFA = self.convert_cfa_to_localised(cfa)
# Step 1: Seed locations at tweet
self.decoration_strategy.decorate_initial_locations(localised_cfa)
# Step 2: Propagate seeds downwards
frontier: List[LocalisedNode] = list()
visited: List[LocalisedNode] = list()
frontier.append(localised_cfa.root)
while len(frontier) > 0:
cfa_node = frontier.pop(-1)
location = cfa_node.location
visited.append(cfa_node)
for edge in localised_cfa.outgoing_edges(cfa_node):
self.decoration_strategy.decorate_frontier(frontier, visited, location, edge)
# Step 3: Fixes where TWEETS comes after construct
for cfa_node in localised_cfa.nodes:
# Case 1: Switch cases propagation
if self._syntax.is_switch_case(cfa_node.node):
outgoings = localised_cfa.outgoing(cfa_node)
# We can assume that each case is followed by a location tweet
cfa_node.location = outgoings[0].location
return localised_cfa
def convert_cfa_to_localised(self, cfa: CFA[CFANode]) -> LocalisedCFA:
# Step 1: Convert all CFANodes to Localised CFA Nodes (CFANode -> Localised CFA Node)
converted_nodes: Dict[CFANode, LocalisedNode] = dict()
for cfa_node in cfa.nodes:
converted_nodes[cfa_node] = LocalisedNode(cfa_node.node)
localised_cfa = LocalisedCFA(
converted_nodes[cfa.root]
)
# Step 2: Reconstruct all edges
converted_edges: List[CFAEdge[CFANode]] = list()
for cfa_node in cfa.nodes:
self.edge_converter.convert_edges(
cfa.outgoing_edges(cfa_node),
converted_edges,
localised_cfa,
converted_nodes
)
self.edge_converter.convert_edges(
cfa.ingoing_edges(cfa_node),
converted_edges,
localised_cfa,
converted_nodes
)
return localised_cfa | 40.613636 | 141 | 0.665921 | 3,236 | 0.905428 | 0 | 0 | 0 | 0 | 0 | 0 | 330 | 0.092334 |
330f0c3baa6b73293fcd4fabbaa5457656f166bd | 22,550 | py | Python | pydicom_ext/pydicom_series.py | shinaji/pydicom_ext | bc3d716eb488589ba5906a0722474682987dafb8 | [
"MIT"
] | null | null | null | pydicom_ext/pydicom_series.py | shinaji/pydicom_ext | bc3d716eb488589ba5906a0722474682987dafb8 | [
"MIT"
] | null | null | null | pydicom_ext/pydicom_series.py | shinaji/pydicom_ext | bc3d716eb488589ba5906a0722474682987dafb8 | [
"MIT"
] | null | null | null | # dicom_series.py
"""
By calling the function read_files with a directory name or list
of files as an argument, a list of DicomSeries instances can be
obtained. A DicomSeries object has some attributes that give
information about the serie (such as shape, sampling, suid) and
has an info attribute, which is a pydicom.DataSet instance containing
information about the first dicom file in the serie. The data can
be obtained using the get_pixel_array() method, which produces a
3D numpy array if there a multiple files in the serie.
This module can deal with gated data, in which case a DicomSeries
instance is created for each 3D volume.
"""
from __future__ import print_function
#
# Copyright (c) 2010 Almar Klein
# This file is released under the pydicom license.
# See the file LICENSE included with the pydicom distribution, also
# available at https://github.com/pydicom/pydicom
#
# I (Almar) performed some test to loading a series of data
# in two different ways: loading all data, and deferring loading
# the data. Both ways seem equally fast on my system. I have to
# note that results can differ quite a lot depending on the system,
# but still I think this suggests that deferred reading is in
# general not slower. I think deferred loading of the pixel data
# can be advantageous because maybe not all data of all series
# is needed. Also it simply saves memory, because the data is
# removed from the Dataset instances.
# In the few result below, cold means reading for the first time,
# warm means reading 2nd/3d/etc time.
# - Full loading of data, cold: 9 sec
# - Full loading of data, warm: 3 sec
# - Deferred loading of data, cold: 9 sec
# - Deferred loading of data, warm: 3 sec
import os
import time
import gc
import pydicom
from pydicom.sequence import Sequence
from pydicom import compat
# Try importing numpy
try:
import numpy as np
have_numpy = True
except ImportError:
np = None # NOQA
have_numpy = False
# Helper functions and classes
class ProgressBar(object):
""" To print progress to the screen.
"""
def __init__(self, char='-', length=20):
self.char = char
self.length = length
self.progress = 0.0
self.nbits = 0
self.what = ''
def Start(self, what=''):
""" Start(what='')
Start the progress bar, displaying the given text first.
Make sure not to print anything untill after calling
Finish(). Messages can be printed while displaying
progess by using printMessage().
"""
self.what = what
self.progress = 0.0
self.nbits = 0
sys.stdout.write(what + " [")
def Stop(self, message=""):
""" Stop the progress bar where it is now.
Optionally print a message behind it."""
delta = int(self.length - self.nbits)
sys.stdout.write(" " * delta + "] " + message + "\n")
def Finish(self, message=""):
""" Finish the progress bar, setting it to 100% if it
was not already. Optionally print a message behind the bar.
"""
delta = int(self.length - self.nbits)
sys.stdout.write(self.char * delta + "] " + message + "\n")
def Update(self, newProgress):
""" Update progress. Progress is given as a number
between 0 and 1.
"""
self.progress = newProgress
required = self.length * (newProgress)
delta = int(required - self.nbits)
if delta > 0:
sys.stdout.write(self.char * delta)
self.nbits += delta
def PrintMessage(self, message):
""" Print a message (for example a warning).
The message is printed behind the progress bar,
and a new bar is started.
"""
self.Stop(message)
self.Start(self.what)
def _dummyProgressCallback(progress):
""" A callback to indicate progress that does nothing. """
pass
_progressBar = ProgressBar()
def _progressCallback(progress):
""" The default callback for displaying progress. """
if isinstance(progress, compat.string_types):
_progressBar.Start(progress)
_progressBar._t0 = time.time()
elif progress is None:
dt = time.time() - _progressBar._t0
_progressBar.Finish('%2.2f seconds' % dt)
else:
_progressBar.Update(progress)
def _listFiles(files, path):
"""List all files in the directory, recursively. """
for item in os.listdir(path):
item = os.path.join(path, item)
if os.path.isdir(item):
_listFiles(files, item)
else:
files.append(item)
def _splitSerieIfRequired(serie, series):
""" _splitSerieIfRequired(serie, series)
Split the serie in multiple series if this is required.
The choice is based on examing the image position relative to
the previous image. If it differs too much, it is assumed
that there is a new dataset. This can happen for example in
unspitted gated CT data.
"""
# Sort the original list and get local name
serie._sort()
L = serie._datasets
# Init previous slice
ds1 = L[0]
# Check whether we can do this
if "ImagePositionPatient" not in ds1:
return
# Initialize a list of new lists
L2 = [[ds1]]
# Init slice distance estimate
distance = 0
for index in range(1, len(L)):
# Get current slice
ds2 = L[index]
# Get positions
pos1 = float(ds1.ImagePositionPatient[2])
pos2 = float(ds2.ImagePositionPatient[2])
# Get distances
newDist = abs(pos1 - pos2)
# deltaDist = abs(firstPos-pos2)
# If the distance deviates more than 2x from what we've seen,
# we can agree it's a new dataset.
if distance and newDist > 2.1 * distance:
L2.append([])
distance = 0
else:
# Test missing file
if distance and newDist > 1.5 * distance:
print('Warning: missing file after "%s"' % ds1.filename)
distance = newDist
# Add to last list
L2[-1].append(ds2)
# Store previous
ds1 = ds2
# Split if we should
if len(L2) > 1:
# At what position are we now?
i = series.index(serie)
# Create new series
series2insert = []
for L in L2:
newSerie = DicomSeries(serie.suid, serie._showProgress)
newSerie._datasets = Sequence(L)
series2insert.append(newSerie)
# Insert series and remove self
for newSerie in reversed(series2insert):
series.insert(i, newSerie)
series.remove(serie)
pixelDataTag = pydicom.tag.Tag(0x7fe0, 0x0010)
def _getPixelDataFromDataset(ds):
""" Get the pixel data from the given dataset. If the data
was deferred, make it deferred again, so that memory is
preserved. Also applies RescaleSlope and RescaleIntercept
if available. """
# Get original element
el = dict.__getitem__(ds, pixelDataTag)
# Get data
data = np.array(ds.pixel_array)
# Remove data (mark as deferred)
dict.__setitem__(ds, pixelDataTag, el)
del ds._pixel_array
# Obtain slope and offset
slope = 1
offset = 0
needFloats = False
needApplySlopeOffset = False
if 'RescaleSlope' in ds:
needApplySlopeOffset = True
slope = ds.RescaleSlope
if 'RescaleIntercept' in ds:
needApplySlopeOffset = True
offset = ds.RescaleIntercept
if int(slope) != slope or int(offset) != offset:
needFloats = True
if not needFloats:
slope, offset = int(slope), int(offset)
# Apply slope and offset
if needApplySlopeOffset:
# Maybe we need to change the datatype?
if data.dtype in [np.float32, np.float64]:
pass
elif needFloats:
data = data.astype(np.float32)
else:
# Determine required range
minReq, maxReq = data.min(), data.max()
minReq = min(
[minReq, minReq * slope + offset, maxReq * slope + offset])
maxReq = max(
[maxReq, minReq * slope + offset, maxReq * slope + offset])
# Determine required datatype from that
dtype = None
if minReq < 0:
# Signed integer type
maxReq = max([-minReq, maxReq])
if maxReq < 2**7:
dtype = np.int8
elif maxReq < 2**15:
dtype = np.int16
elif maxReq < 2**31:
dtype = np.int32
else:
dtype = np.float32
else:
# Unsigned integer type
if maxReq < 2**8:
dtype = np.uint8
elif maxReq < 2**16:
dtype = np.uint16
elif maxReq < 2**32:
dtype = np.uint32
else:
dtype = np.float32
# Change datatype
if dtype != data.dtype:
data = data.astype(dtype)
# Apply slope and offset
data *= slope
data += offset
# Done
return data
# The public functions and classes
def read_files(path, showProgress=False, readPixelData=False, force=False):
""" read_files(path, showProgress=False, readPixelData=False)
Reads dicom files and returns a list of DicomSeries objects, which
contain information about the data, and can be used to load the
image or volume data.
The parameter "path" can also be a list of files or directories.
If the callable "showProgress" is given, it is called with a single
argument to indicate the progress. The argument is a string when a
progress is started (indicating what is processed). A float indicates
progress updates. The paremeter is None when the progress is finished.
When "showProgress" is True, a default callback is used that writes
to stdout. By default, no progress is shown.
if readPixelData is True, the pixel data of all series is read. By
default the loading of pixeldata is deferred until it is requested
using the DicomSeries.get_pixel_array() method. In general, both
methods should be equally fast.
"""
# Init list of files
files = []
# Obtain data from the given path
if isinstance(path, compat.string_types):
# Make dir nice
basedir = os.path.abspath(path)
# Check whether it exists
if not os.path.isdir(basedir):
raise ValueError('The given path is not a valid directory.')
# Find files recursively
_listFiles(files, basedir)
elif isinstance(path, (tuple, list)):
# Iterate over all elements, which can be files or directories
for p in path:
if os.path.isdir(p):
_listFiles(files, os.path.abspath(p))
elif os.path.isfile(p):
files.append(p)
else:
print("Warning, the path '%s' is not valid." % p)
else:
raise ValueError('The path argument must be a string or list.')
# Set default progress callback?
if showProgress is True:
showProgress = _progressCallback
if not hasattr(showProgress, '__call__'):
showProgress = _dummyProgressCallback
# Set defer size
deferSize = 16383 # 128**2-1
if readPixelData:
deferSize = None
# Gather file data and put in DicomSeries
series = {}
count = 0
showProgress('Loading series information:')
for filename in files:
# Skip DICOMDIR files
if filename.count("DICOMDIR"):
continue
# Try loading dicom ...
try:
dcm = pydicom.read_file(filename, deferSize, force=force)
except pydicom.filereader.InvalidDicomError:
continue # skip non-dicom file
except Exception as why:
if showProgress is _progressCallback:
_progressBar.PrintMessage(str(why))
else:
print('Warning:', why)
continue
# Get SUID and register the file with an existing or new series object
try:
suid = dcm.SeriesInstanceUID
except AttributeError:
continue # some other kind of dicom file
if suid not in series:
series[suid] = DicomSeries(suid, showProgress)
series[suid]._append(dcm)
# Show progress (note that we always start with a 0.0)
showProgress(float(count) / len(files))
count += 1
# Finish progress
showProgress(None)
# Make a list and sort, so that the order is deterministic
series = list(series.values())
series.sort(key=lambda x: x.suid)
# Split series if necessary
for serie in reversed([serie for serie in series]):
_splitSerieIfRequired(serie, series)
# Finish all series
showProgress('Analysing series')
series_ = []
for i in range(len(series)):
try:
series[i]._finish()
series_.append(series[i])
except Exception:
pass # Skip serie (probably report-like file without pixels)
showProgress(float(i + 1) / len(series))
showProgress(None)
return series_
class DicomSeries(object):
""" DicomSeries
This class represents a serie of dicom files that belong together.
If these are multiple files, they represent the slices of a volume
(like for CT or MRI). The actual volume can be obtained using loadData().
Information about the data can be obtained using the info attribute.
"""
# To create a DicomSeries object, start by making an instance and
# append files using the "_append" method. When all files are
# added, call "_sort" to sort the files, and then "_finish" to evaluate
# the data, perform some checks, and set the shape and sampling
# attributes of the instance.
def __init__(self, suid, showProgress):
# Init dataset list and the callback
self._datasets = Sequence()
self._showProgress = showProgress
# Init props
self._suid = suid
self._info = None
self._shape = None
self._sampling = None
@property
def suid(self):
""" The Series Instance UID. """
return self._suid
@property
def shape(self):
""" The shape of the data (nz, ny, nx).
If None, the serie contains a single dicom file. """
return self._shape
@property
def sampling(self):
""" The sampling (voxel distances) of the data (dz, dy, dx).
If None, the serie contains a single dicom file. """
return self._sampling
@property
def info(self):
""" A DataSet instance containing the information as present in the
first dicomfile of this serie. """
return self._info
@property
def description(self):
""" A description of the dicom series. Used fields are
PatientName, shape of the data, SeriesDescription,
and ImageComments.
"""
info = self.info
# If no info available, return simple description
if info is None:
return "DicomSeries containing %i images" % len(self._datasets)
fields = []
# Give patient name
if 'PatientName' in info:
fields.append("" + info.PatientName)
# Also add dimensions
if self.shape:
tmp = [str(d) for d in self.shape]
fields.append('x'.join(tmp))
# Try adding more fields
if 'SeriesDescription' in info:
fields.append("'" + info.SeriesDescription + "'")
if 'ImageComments' in info:
fields.append("'" + info.ImageComments + "'")
# Combine
return ' '.join(fields)
def __repr__(self):
adr = hex(id(self)).upper()
data_len = len(self._datasets)
return "<DicomSeries with %i images at %s>" % (data_len, adr)
def get_pixel_array(self):
""" get_pixel_array()
Get (load) the data that this DicomSeries represents, and return
it as a numpy array. If this serie contains multiple images, the
resulting array is 3D, otherwise it's 2D.
If RescaleSlope and RescaleIntercept are present in the dicom info,
the data is rescaled using these parameters. The data type is chosen
depending on the range of the (rescaled) data.
"""
# Can we do this?
if not have_numpy:
msg = "The Numpy package is required to use get_pixel_array.\n"
raise ImportError(msg)
# It's easy if no file or if just a single file
if len(self._datasets) == 0:
raise ValueError('Serie does not contain any files.')
elif len(self._datasets) == 1:
ds = self._datasets[0]
slice = _getPixelDataFromDataset(ds)
return slice
# Check info
if self.info is None:
raise RuntimeError("Cannot return volume if series not finished.")
# Set callback to update progress
showProgress = self._showProgress
# Init data (using what the dicom packaged produces as a reference)
ds = self._datasets[0]
slice = _getPixelDataFromDataset(ds)
# vol = Aarray(self.shape, self.sampling, fill=0, dtype=slice.dtype)
vol = np.zeros(self.shape, dtype=slice.dtype)
vol[0] = slice
# Fill volume
showProgress('Loading data:')
ll = self.shape[0]
for z in range(1, ll):
ds = self._datasets[z]
vol[z] = _getPixelDataFromDataset(ds)
showProgress(float(z) / ll)
# Finish
showProgress(None)
# Done
gc.collect()
return vol
def _append(self, dcm):
""" _append(dcm)
Append a dicomfile (as a pydicom.dataset.FileDataset) to the series.
"""
self._datasets.append(dcm)
def _sort(self):
""" sort()
Sort the datasets by instance number.
"""
self._datasets._list.sort(key=lambda k: k.InstanceNumber)
def _finish(self):
""" _finish()
Evaluate the series of dicom files. Together they should make up
a volumetric dataset. This means the files should meet certain
conditions. Also some additional information has to be calculated,
such as the distance between the slices. This method sets the
attributes for "shape", "sampling" and "info".
This method checks:
* that there are no missing files
* that the dimensions of all images match
* that the pixel spacing of all images match
"""
# The datasets list should be sorted by instance number
L = self._datasets
if len(L) == 0:
return
elif len(L) < 2:
# Set attributes
ds = self._datasets[0]
self._info = self._datasets[0]
self._shape = [ds.Rows, ds.Columns]
self._sampling = [
float(ds.PixelSpacing[0]), float(ds.PixelSpacing[1])
]
return
# Get previous
ds1 = L[0]
# Init measures to calculate average of
distance_sum = 0.0
# Init measures to check (these are in 2D)
dimensions = ds1.Rows, ds1.Columns
# row, column
sampling = float(ds1.PixelSpacing[0]), float(ds1.PixelSpacing[1])
for index in range(len(L)):
# The first round ds1 and ds2 will be the same, for the
# distance calculation this does not matter
# Get current
ds2 = L[index]
# Get positions
pos1 = float(ds1.ImagePositionPatient[2])
pos2 = float(ds2.ImagePositionPatient[2])
# Update distance_sum to calculate distance later
distance_sum += abs(pos1 - pos2)
# Test measures
dimensions2 = ds2.Rows, ds2.Columns
sampling2 = float(ds2.PixelSpacing[0]), float(ds2.PixelSpacing[1])
if dimensions != dimensions2:
# We cannot produce a volume if the dimensions match
raise ValueError('Dimensions of slices does not match.')
if sampling != sampling2:
# We can still produce a volume, but we should notify the user
msg = 'Warning: sampling does not match.'
if self._showProgress is _progressCallback:
_progressBar.PrintMessage(msg)
else:
print(msg)
# Store previous
ds1 = ds2
# Create new dataset by making a deep copy of the first
info = pydicom.dataset.Dataset()
firstDs = self._datasets[0]
for key in firstDs.keys():
if key != (0x7fe0, 0x0010):
el = firstDs[key]
info.add_new(el.tag, el.VR, el.value)
# Finish calculating average distance
# (Note that there are len(L)-1 distances)
distance_mean = distance_sum / (len(L) - 1)
# Store information that is specific for the serie
self._shape = [len(L), ds2.Rows, ds2.Columns]
self._sampling = [distance_mean, float(ds2.PixelSpacing[0]),
float(ds2.PixelSpacing[1])]
# Store
self._info = info
if __name__ == '__main__':
import sys
if len(sys.argv) != 2:
print("Expected a single argument: a directory with dicom files in it")
else:
adir = sys.argv[1]
t0 = time.time()
all_series = read_files(adir, None, False)
print("Summary of each series:")
for series in all_series:
print(series.description)
| 32.823872 | 80 | 0.587095 | 10,259 | 0.454945 | 0 | 0 | 1,614 | 0.071574 | 0 | 0 | 10,048 | 0.445588 |
3310fadeca0f260ba9d2c8cc23013446b8c2b5d0 | 1,216 | py | Python | tutorials/poplar/tut3_vertices/test/test_tut3.py | xihuaiwen/chinese_bert | 631afbc76c40b0ac033be2186e717885246f446c | [
"MIT"
] | null | null | null | tutorials/poplar/tut3_vertices/test/test_tut3.py | xihuaiwen/chinese_bert | 631afbc76c40b0ac033be2186e717885246f446c | [
"MIT"
] | null | null | null | tutorials/poplar/tut3_vertices/test/test_tut3.py | xihuaiwen/chinese_bert | 631afbc76c40b0ac033be2186e717885246f446c | [
"MIT"
] | null | null | null | # Copyright 2020 Graphcore Ltd.
from pathlib import Path
import pytest
# NOTE: The import below is dependent on 'pytest.ini' in the root of
# the repository
from examples_tests.test_util import SubProcessChecker
working_path = Path(__file__).parent
class TestBuildAndRun(SubProcessChecker):
def setUp(self):
''' Compile the start here and complete versions of the tutorial code '''
self.run_command("make clean", working_path, [])
self.run_command("make all", working_path, [])
def tearDown(self):
self.run_command("make clean", working_path, [])
@pytest.mark.category1
def test_run_start_here(self):
''' Check that the start here version of the tutorial code runs '''
self.run_command("./tut3_start_here",
working_path,
["Program complete"])
@pytest.mark.category1
def test_run_complete(self):
''' Check that the complete version of the tutorial code runs '''
self.run_command("../complete/tut3_complete",
working_path.parent.joinpath("complete"),
["Program complete",
"v2: {7,6,4.5,2.5}"])
| 32 | 81 | 0.626645 | 962 | 0.791118 | 0 | 0 | 611 | 0.502467 | 0 | 0 | 465 | 0.382401 |
33119609fa28c4ed894cccea42375ff194c70e59 | 4,719 | py | Python | mboxstats/mboxstats.py | ruettet/mailboxstatistics | ba1296ca441ab646fc92b9bcc5962a8ccf532f19 | [
"Apache-2.0"
] | null | null | null | mboxstats/mboxstats.py | ruettet/mailboxstatistics | ba1296ca441ab646fc92b9bcc5962a8ccf532f19 | [
"Apache-2.0"
] | null | null | null | mboxstats/mboxstats.py | ruettet/mailboxstatistics | ba1296ca441ab646fc92b9bcc5962a8ccf532f19 | [
"Apache-2.0"
] | null | null | null | import codecs
import locale
from mailbox import mbox
from re import sub
from re import compile
from re import IGNORECASE
from datetime import datetime
from collections import Counter
class MailboxStatistics(object):
def __init__(self):
""" Generic MailboxStatistics object that contains the statistics calculations. """
self.mailbox = []
def get_number_of_mails(self):
return len(self.mailbox)
def __get_from_values(self):
return [message['from'] for message in self.mailbox]
def get_from_value_counts(self):
"""
:return: A Counter object with the frequency of all possible 'from' values.
"""
return Counter(self.__get_from_values())
def __get_to_values(self):
return [message['to'] for message in self.mailbox]
def get_to_value_counts(self):
"""
:return: A Counter object with the frequency of all possible 'to' value
"""
return Counter(self.__get_to_values())
def get_number_of_mails_per_hour(self):
"""
:return: A Counter object with the amount of mails received per hour.
"""
return Counter([message['sent'].strftime('%Y-%m-%d %H') for message in self.mailbox])
def get_number_of_mails_per_hour_of_day(self):
"""
:return: A Counter object with the total amount of mail per hour of the day [00 - 23].
"""
return Counter([message['sent'].strftime('%H') for message in self.mailbox])
def get_number_of_mails_per_day(self):
"""
:return: A Counter object with the amount of mail per day.
"""
return Counter([message['sent'].strftime('%Y-%m-%d') for message in self.mailbox])
def get_number_of_mails_per_weekday(self):
"""
:return: A Counter object the amount of mails per week day [0 - 6].
"""
return Counter([message['sent'].weekday() for message in self.mailbox])
def __get_subject_tokens(self):
return [token for message in self.mailbox for token in message['subject'].split()]
def get_subject_token_frequencies(self):
"""
:return: A Counter object with the frequencies of the (lowercased) tokens in the subject line.
"""
return Counter(self.__get_subject_tokens())
# TODO def get_subject_token_frequencies_by_from_values(self):
# TODO def get_igraph(self):
# TODO def get_igraph_edgelist(self):
# TODO def filter_mailbox_on_to_values(self, to_values=[], operator='or', strict=True):
# TODO def filter_mailbox_on_from_value(self, from_value=compile('', IGNORECASE)):
class OutlookMailboxStatistics(MailboxStatistics):
def __init__(self, path_to_mbox_file, mbox_file_encoding, mbox_file_datetime_locale, mbox_file_datetime_format):
""" Parses the text file that results from saving (multiple) messages in MS Outlook as text.
:param path_to_mbox_file: full path to mbox file
:param mbox_file_encoding: encoding of mbox file, for MS Outlook, this is typically latin1
:param mbox_file_datetime_locale: locale to be used for parsing the datetime field
:param mbox_file_datetime_format: format of the datetime string for parsing
"""
MailboxStatistics.__init__(self)
with codecs.open(path_to_mbox_file, 'r', mbox_file_encoding) as mailbox_file:
self.raw_messages = mailbox_file.read().split("From:\t")
for raw_message in self.raw_messages:
if len(raw_message.split('\n')) > 5:
message = {'from': raw_message.split('\n')[0].strip()}
to_line = compile('To:\t(.+?)\r\n').findall(raw_message)
message['to'] = [sub('[\'"]', '', item).strip()
for item in to_line[0].split(';')] if len(to_line) > 0 else []
locale.setlocale(locale.LC_ALL, mbox_file_datetime_locale)
message['sent'] = datetime.strptime(compile('Sent:\t(.+?)\r\n').findall(raw_message)[0],
mbox_file_datetime_format)
locale.resetlocale()
subject_line = compile('Subject:\t(.+?)\r\n').findall(raw_message)
message['subject'] = sub('(re|fw):', '', subject_line[0].lower()) if len(subject_line) > 0 else 'NA'
self.mailbox.append(message)
# class GMailMailboxStatistics(MailboxStatistics):
# def __init__(self, path_to_mbox_file):
# """ Parses the mbox file that results from a gmail export.
# :param path_to_mbox_file: full path to the downloaded mbox file
# """
# MailboxStatistics.__init__(self)
# self.mailbox = mbox(path_to_mbox_file) | 43.293578 | 116 | 0.647171 | 4,184 | 0.886629 | 0 | 0 | 0 | 0 | 0 | 0 | 2,038 | 0.431871 |
331242fc9eb2f4e6ef5b97e6f92e0e0380cba84e | 3,787 | py | Python | utils.py | Project-VULMA/street-vulma | 9aa8170c2342926657189b399943c674a2ac7919 | [
"MIT"
] | null | null | null | utils.py | Project-VULMA/street-vulma | 9aa8170c2342926657189b399943c674a2ac7919 | [
"MIT"
] | null | null | null | utils.py | Project-VULMA/street-vulma | 9aa8170c2342926657189b399943c674a2ac7919 | [
"MIT"
] | null | null | null | from PIL import Image
import imagehash
from dotenv import load_dotenv
load_dotenv()
import google_streetview.api
import os
from shapely.geometry import shape, Polygon, Point
def get_point_photo(coords, download_folder):
# Define parameters for street view api
params_0 = [{
'size': '640x640', # max 640x640 pixels
'location': coord,
'fov': '90',
'heading': '0',
'pitch': '0',
'key': os.getenv("GOOGLE_API_KEY")
} for coord in coords]
params_90 = [{
'size': '640x640', # max 640x640 pixels
'location': coord,
'fov': '90',
'heading': '90',
'pitch': '0',
'key': os.getenv("GOOGLE_API_KEY")
} for coord in coords]
params_180 = [{
'size': '640x640', # max 640x640 pixels
'location': coord,
'fov': '90',
'heading': '180',
'pitch': '0',
'key': os.getenv("GOOGLE_API_KEY")
} for coord in coords]
params_270 = [{
'size': '640x640', # max 640x640 pixels
'location': coord,
'fov': '90',
'heading': '270',
'pitch': '0',
'key': os.getenv("GOOGLE_API_KEY")
} for coord in coords]
# Create a results object
results_0 = google_streetview.api.results(params_0)
results_90 = google_streetview.api.results(params_90)
results_180 = google_streetview.api.results(params_180)
results_270 = google_streetview.api.results(params_270)
# Download images to directory 'downloads'
results_0.download_links(download_folder + "_0")
results_90.download_links(download_folder + "_90")
results_180.download_links(download_folder + "_180")
results_270.download_links(download_folder + "_270")
# NOTE: probably a bug while saving different polygons
def convert_to_linestring(data, progress_bar):
features = data['features']
j = 0
progr = 0
for feature in features:
geometry = feature['geometry']
coords = []
string_coords = []
multipoly = shape(geometry)
progr += 1
# print('Fetching data.', end='')
for pol in multipoly:
for i in list(pol.exterior.coords):
if (round(Point(i).y, 4), round(Point(i).x, 4)) in coords:
continue
temp_coords_x = round(Point(i).x, 4)
temp_coords_y = round(Point(i).y, 4)
coords.append((temp_coords_y, temp_coords_x))
string_coords.append('{},{}'.format(round(Point(i).y, 4), round(Point(i).x, 4)))
j = j + 1
# print(string_coords)
# print(progr)
progress_bar.UpdateBar((progr/len(features)) * 100)
get_point_photo(string_coords, os.path.join('downloads', str(j)))
def get_images_folder(root_folder):
return [
os.path.join(
os.getcwd(), f
) for f in os.listdir(
root_folder) if f.endswith("_0") or f.endswith("_90") or f.endswith("_180") or f.endswith("_270")]
# TODO: update using pillow
# from PIL import Image
# import imagehash
# import os
import shutil
import sys
def remove_duplicates(img_folder, img_format='.jpg'):
# Get images in folder
images = [img for img in os.listdir(img_folder) if img.endswith(img_format)]
# Get a dict containing image's hashes
hashes = { image: imagehash.average_hash(Image.open(os.path.join(img_folder, image))) for image in images }
# Get a list of images to remove (TODO this should be refactored)
to_remove = []
for i in hashes.keys():
for j in hashes.keys():
if hashes[i] == hashes[j] and i != j:
if i in to_remove or j in to_remove:
continue
to_remove.append(j)
to_remove = list(set(to_remove))
for index in to_remove:
os.remove(os.path.join(img_folder, index))
# # remove empty dirs
def remove_empty_dir(path):
try:
if any([True if f.endswith('.jpg') else False for f in os.listdir(path)]):
os.rmdir(path)
except OSError as e:
print('unremoved - {}'.format(e))
def remove_empty_dirs(path):
for root, dirnames, filenames in os.walk(path, topdown=False):
for dirname in dirnames:
remove_empty_dir(os.path.realpath(os.path.join(root, dirname)))
| 27.845588 | 108 | 0.691841 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 955 | 0.252179 |
3314a88348ec3d3aad1f983f402dff14af15d941 | 1,044 | py | Python | 19/solution.py | studiosi/AoC2015 | 371c0ca7a48cb1e53c487f35bc3b3533cc9d8a7e | [
"MIT",
"Unlicense"
] | null | null | null | 19/solution.py | studiosi/AoC2015 | 371c0ca7a48cb1e53c487f35bc3b3533cc9d8a7e | [
"MIT",
"Unlicense"
] | null | null | null | 19/solution.py | studiosi/AoC2015 | 371c0ca7a48cb1e53c487f35bc3b3533cc9d8a7e | [
"MIT",
"Unlicense"
] | null | null | null | import re
import copy
import random
changes = {}
chain = ""
lines = open('input.txt').readlines()
moreChanges = True
for l in lines:
l = l.strip()
if moreChanges and l != "":
x = l.split("=>")
if x[0].strip() not in changes.keys():
changes[x[0].strip()] = []
changes[x[0].strip()].append(x[1].strip())
elif (not moreChanges) and l != "":
chain += l
elif l == "":
moreChanges = False
# Part 1
c = []
for k in changes.keys():
p = re.compile(k)
for i in changes[k]:
for m in p.finditer(chain):
s = copy.copy(chain)
s = s[:m.start()] + i + s[m.end():]
if s not in c:
c.append(s)
print(len(c))
# Part 2
x = "".join(open('input.txt').readlines())
molecule = chain[::-1]
reps = {m[1][::-1]: m[0][::-1] for m in re.findall(r'(\w+) => (\w+)', x)}
def rep(x):
return reps[x.group()]
count = 0
while molecule != 'e':
molecule = re.sub('|'.join(reps.keys()), rep, molecule, 1)
count += 1
print(count) | 23.727273 | 73 | 0.509579 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 75 | 0.071839 |
3315429c0aa928801a1243413b1efad540cf1405 | 1,102 | py | Python | core/migrations/0002_meetup.py | hatsem78/django_docker_nginex_nginx_gunicorn | 15cb7d2d9ecfd2a2f9bf054997a35903c2ee0ce3 | [
"MIT"
] | null | null | null | core/migrations/0002_meetup.py | hatsem78/django_docker_nginex_nginx_gunicorn | 15cb7d2d9ecfd2a2f9bf054997a35903c2ee0ce3 | [
"MIT"
] | null | null | null | core/migrations/0002_meetup.py | hatsem78/django_docker_nginex_nginx_gunicorn | 15cb7d2d9ecfd2a2f9bf054997a35903c2ee0ce3 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.16 on 2020-09-29 23:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Meetup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('date', models.DateTimeField(help_text='Date start Meetup')),
('description', models.TextField(blank=True)),
('count_beer', models.IntegerField(default=0, help_text='Count Beer')),
('maximum_temperature', models.FloatField(blank=True, default=0, help_text='Maximum Temperature')),
('count_participants', models.IntegerField(default=0, help_text='Count Participants')),
('direction', models.CharField(max_length=350)),
],
options={
'verbose_name_plural': 'Meetup',
},
),
]
| 36.733333 | 115 | 0.579855 | 1,008 | 0.914701 | 0 | 0 | 0 | 0 | 0 | 0 | 274 | 0.248639 |
33156382baf687b9fd221a48d7bcf67c2edeac93 | 4,124 | py | Python | src/attendance/card_reader.py | JakubBatel/Attendance-Recorder | 18d7d019d0284b7fcccf5bbbfc450ba70c922fc2 | [
"MIT"
] | null | null | null | src/attendance/card_reader.py | JakubBatel/Attendance-Recorder | 18d7d019d0284b7fcccf5bbbfc450ba70c922fc2 | [
"MIT"
] | null | null | null | src/attendance/card_reader.py | JakubBatel/Attendance-Recorder | 18d7d019d0284b7fcccf5bbbfc450ba70c922fc2 | [
"MIT"
] | null | null | null | from .resources.config import config
from .utils import reverse_endianness
from abc import ABC
from abc import abstractmethod
from logging import getLogger
from logging import Logger
from time import sleep
from typing import Final
import re
import serial
class ICardReader(ABC):
"""Class representation of card reader."""
@abstractmethod
def read_card(self, raise_if_no_data: bool = False) -> str:
"""Read one card and returns the data as a hex string.
Args:
raise_if_no_data: If True the NoDataException is raised if no data are present.
Returns:
Hex string representation of the data.
Raises:
NoDataException: If raise_if_no_data is set to True and no data was read.
"""
pass
class InvalidDataException(Exception):
"""Exception used when card data are not valid."""
def __init__(self, message):
"""Init exception with message.
Args:
message: Error message.
"""
super().__init__(message)
class NoDataException(Exception):
"""Exception used when no card data was read."""
def __init__(self, message):
"""Init exception with message.
Args:
message: Error message.
"""
super().__init__(message)
class CardReader(ICardReader):
"""Class representation of physical card reader.
It reads data from physical card reader using serial communication.
It is configured using config file (config.ini in resources folder).
"""
INIT_BYTE: Final = b'\x02'
CARD_SIZE: Final = 10
PORT: Final = config['CardReader']['devPath']
BAUDRATE: Final = int(config['CardReader']['baudrate'])
PARITY: Final = getattr(serial, config['CardReader']['parity'])
STOPBITS: Final = getattr(serial, config['CardReader']['stopbits'])
BYTESIZE: Final = getattr(serial, config['CardReader']['bytesize'])
TIMEOUT: Final = float(config['CardReader']['timeout'])
CARD_REGEX: Final = re.compile('^[0-9a-f]{10}$')
def __init__(self):
"""Init logger and create new Serial object for serial communication based on configuration."""
self.logger: Logger = getLogger(__name__)
self._port = serial.Serial(
port=CardReader.PORT,
baudrate=CardReader.BAUDRATE,
parity=CardReader.PARITY,
stopbits=CardReader.STOPBITS,
bytesize=CardReader.BYTESIZE,
timeout=CardReader.TIMEOUT
)
def read_card(self, raise_if_no_data: bool = False) -> str:
"""Read one card using serial communication.
This method ends only when some data are red or until times out.
If no card data are present operation is retried 0.5 second later.
Args:
raise_if_no_data: If true the NoDataException is raised if no data are present.
Returns:
Hex string representation of card data.
Raises:
NoDataException: If raise_if_no_data is set to True and no data was read.
InvalidDataException: If card data are corrupted.
"""
while True:
byte = self._port.read()
if byte == b'':
self.logger.debug('No card data.')
if raise_if_no_data:
raise NoDataException('No card data was read.')
else:
sleep(0.5)
continue
if byte != CardReader.INIT_BYTE:
self.logger.debug('Invalid initial sequence.')
continue
data = self._port.read(CardReader.CARD_SIZE)
card: str = reverse_endianness(data.decode('ascii'))
if not CardReader.CARD_REGEX.match(card):
self.logger.debug('Incomplete or corrupted data.')
raise InvalidDataException(
'Card data are invalid - incomplete or corrupted data.')
self.logger.info(card + ' was read')
while self._port.read() != b'':
continue # consume all residual data
return card
| 31.242424 | 103 | 0.617119 | 3,855 | 0.934772 | 0 | 0 | 446 | 0.108147 | 0 | 0 | 1,872 | 0.453928 |
331608bfaa1bbeecfc3dd18a2fde05596fb4e203 | 2,754 | py | Python | medios/diarios/diario.py | miglesias91/dicenlosmedios | 1f8867cd09689006f35447ad8540359d9429b518 | [
"MIT"
] | 1 | 2020-10-20T20:50:51.000Z | 2020-10-20T20:50:51.000Z | medios/diarios/diario.py | miglesias91/dicenlosmedios | 1f8867cd09689006f35447ad8540359d9429b518 | [
"MIT"
] | 8 | 2021-03-19T01:17:28.000Z | 2022-03-02T14:57:48.000Z | medios/diarios/diario.py | miglesias91/dicenlosmedios | 1f8867cd09689006f35447ad8540359d9429b518 | [
"MIT"
] | null | null | null | import dateutil
import yaml
import feedparser as fp
import newspaper as np
from medios.medio import Medio
from medios.diarios.noticia import Noticia
from bd.entidades import Kiosco
class Diario(Medio):
def __init__(self, etiqueta):
Medio.__init__(self, etiqueta)
self.noticias = []
self.feeds = {}
self.feed_noticias = ""
self.categorias = []
self.configurar()
def configurar(self):
with open('medios/diarios/config.yaml', 'r') as stream:
try:
config = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
for diario in config['diarios']:
if diario['tag'] != self.etiqueta:
continue
if 'feed_noticias' in diario:
self.feed_noticias = diario['feed_noticias']
if 'categorias' in diario:
self.categorias = diario['categorias']
if 'feeds' in diario:
self.categorias = []
for feed in diario['feeds']:
self.feeds[feed['tag']] = feed['url']
self.categorias.append(feed['tag'])
def leer(self):
kiosco = Kiosco()
print("leyendo '" + self.etiqueta + "'...")
for tag, url_feed in self.feeds.items():
for url_noticia, fecha in self.reconocer_urls_y_fechas_noticias(url_feed=url_feed):
if kiosco.bd.noticias.find(filter={'diario':self.etiqueta, 'url':url_noticia}).count() > 0: # si existe ya la noticia (url), no la decargo
continue
noticia = self.nueva_noticia(url=url_noticia, categoria=tag, diario=self.etiqueta)
if noticia == None:
continue
if noticia.fecha == None:
noticia.fecha = fecha
self.noticias.append(noticia)
def limpiar_texto(self, texto):
return texto
def reconocer_urls_y_fechas_noticias(self, url_feed):
urls_y_fechas = []
for entrada in fp.parse(url_feed).entries:
fecha = self.parsear_fecha(entrada)
urls_y_fechas.append((entrada.link, fecha))
return urls_y_fechas
def nueva_noticia(self, url, categoria, diario):
articulo = np.Article(url=url, language='es')
try:
articulo.download()
articulo.parse()
except:
return None
return Noticia(fecha=articulo.publish_date, url=url, diario=diario, categoria=categoria, titulo=articulo.title, texto=self.limpiar_texto(articulo.text))
def parsear_fecha(self, entrada):
return dateutil.parser.parse(entrada.published) | 34.860759 | 160 | 0.579521 | 2,570 | 0.933188 | 0 | 0 | 0 | 0 | 0 | 0 | 210 | 0.076253 |
3316542a2058418ad1159222b80cb45ab969c4ba | 1,230 | py | Python | yocto/poky/bitbake/lib/bb/ui/crumbs/hobcolor.py | jxtxinbing/ops-build | 9008de2d8e100f3f868c66765742bca9fa98f3f9 | [
"Apache-2.0"
] | 16 | 2017-01-17T15:20:43.000Z | 2021-03-19T05:45:14.000Z | yocto/poky/bitbake/lib/bb/ui/crumbs/hobcolor.py | jxtxinbing/ops-build | 9008de2d8e100f3f868c66765742bca9fa98f3f9 | [
"Apache-2.0"
] | 415 | 2016-12-20T17:20:45.000Z | 2018-09-23T07:59:23.000Z | yocto/poky/bitbake/lib/bb/ui/crumbs/hobcolor.py | jxtxinbing/ops-build | 9008de2d8e100f3f868c66765742bca9fa98f3f9 | [
"Apache-2.0"
] | 10 | 2016-12-20T13:24:50.000Z | 2021-03-19T05:46:43.000Z | #
# BitBake Graphical GTK User Interface
#
# Copyright (C) 2012 Intel Corporation
#
# Authored by Shane Wang <shane.wang@intel.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
class HobColors:
WHITE = "#ffffff"
PALE_GREEN = "#aaffaa"
ORANGE = "#eb8e68"
PALE_RED = "#ffaaaa"
GRAY = "#aaaaaa"
LIGHT_GRAY = "#dddddd"
SLIGHT_DARK = "#5f5f5f"
DARK = "#3c3b37"
BLACK = "#000000"
PALE_BLUE = "#53b8ff"
DEEP_RED = "#aa3e3e"
KHAKI = "#fff68f"
OK = WHITE
RUNNING = PALE_GREEN
WARNING = ORANGE
ERROR = PALE_RED
| 31.538462 | 73 | 0.662602 | 447 | 0.363415 | 0 | 0 | 0 | 0 | 0 | 0 | 870 | 0.707317 |