hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
142f6d81b4cdb9236e603b9d2db42775743f83e2 | 5,831 | py | Python | src/encoded/audit/dataset.py | Lattice-Data/encoded | 94bb4f7cb51970523715e0598d84699a28f90861 | [
"MIT"
] | null | null | null | src/encoded/audit/dataset.py | Lattice-Data/encoded | 94bb4f7cb51970523715e0598d84699a28f90861 | [
"MIT"
] | 10 | 2020-07-22T20:16:15.000Z | 2021-06-16T19:17:44.000Z | src/encoded/audit/dataset.py | Lattice-Data/encoded | 94bb4f7cb51970523715e0598d84699a28f90861 | [
"MIT"
] | null | null | null | from snovault import (
AuditFailure,
audit_checker,
)
from .formatter import (
audit_link,
path_to_text,
)
def audit_contributor_institute(value, system):
if value['status'] in ['deleted']:
return
need_inst = []
if 'corresponding_contributors' in value:
for user in value['corresponding_contributors']:
if not user.get('institute_name'):
need_inst.append(user.get('uuid'))
if need_inst:
detail = ('Dataset {} contains corresponding_contributors {} that do not have an institute_name.'.format(
audit_link(path_to_text(value['@id']), value['@id']),
', '.join(need_inst)
)
)
yield AuditFailure('no contributor institute', detail, level='ERROR')
need_inst = []
if 'contributors' in value:
for user in value['contributors']:
if not user.get('institute_name'):
need_inst.append(user.get('uuid'))
if need_inst:
detail = ('Dataset {} contains contributors {} that do not have an institute_name.'.format(
audit_link(path_to_text(value['@id']), value['@id']),
', '.join(need_inst)
)
)
yield AuditFailure('no contributor institute', detail, level='ERROR')
return
def audit_contributor_email(value, system):
if value['status'] in ['deleted']:
return
need_email = []
if 'corresponding_contributors' in value:
for user in value['corresponding_contributors']:
if not user.get('email'):
need_email.append(user.get('uuid'))
if need_email:
detail = ('Dataset {} contains corresponding_contributors {} that do not have an email.'.format(
audit_link(path_to_text(value['@id']), value['@id']),
', '.join(need_email)
)
)
yield AuditFailure('no corresponding email', detail, level='ERROR')
return
def audit_contributor_lists(value, system):
if value['status'] in ['deleted']:
return
duplicates = []
if 'contributors' in value and 'corresponding_contributors' in value:
for user in value['corresponding_contributors']:
if user in value.get('contributors'):
duplicates.append(user.get('uuid'))
if duplicates:
detail = ('Dataset {} contains duplicated contributors {}.'.format(
audit_link(path_to_text(value['@id']), value['@id']),
', '.join(duplicates)
)
)
yield AuditFailure('duplicated contributors', detail, level='ERROR')
return
def audit_dataset_no_raw_files(value, system):
if value['status'] in ['deleted']:
return
raw_data = False
if 'original_files' in value:
for f in value['original_files']:
if f['@type'][0] == 'RawSequenceFile' and f['no_file_available'] != True:
raw_data = True
if raw_data == False:
detail = ('Dataset {} does not contain any raw sequence files.'.format(
audit_link(path_to_text(value['@id']), value['@id'])
)
)
yield AuditFailure('no raw data', detail, level='ERROR')
return
def audit_dataset_dcp_required_properties(value, system):
if value['status'] in ['deleted']:
return
dcp_reqs = ['dataset_title', 'description', 'funding_organizations']
for req in dcp_reqs:
if req not in value:
detail = ('Dataset {} does not have {}, required by the DCP.'.format(
audit_link(path_to_text(value['@id']), value['@id']),
req
)
)
yield AuditFailure('missing DCP-required field', detail, level='ERROR')
dcp_optional = ['corresponding_contributors', 'contributors']
for opt in dcp_optional:
if opt not in value:
detail = ('Dataset {} does not have {}, strongly encouraged by the DCP.'.format(
audit_link(path_to_text(value['@id']), value['@id']),
opt
)
)
yield AuditFailure('missing DCP-encouraged field', detail, level='ERROR')
return
def audit_experiment_released_with_unreleased_files(value, system):
'''
A released experiment should not have unreleased files
'''
if value['status'] != 'released':
return
if 'original_files' not in value:
return
for f in value['original_files']:
if f['status'] not in ['released', 'deleted',
'revoked', 'replaced',
'archived']:
detail = ('Released dataset {} contains file {} that has not been released.'.format(
audit_link(path_to_text(value['@id']), value['@id']),
audit_link(path_to_text(f['@id']), f['@id'])
)
)
yield AuditFailure('mismatched file status', detail, level='INTERNAL_ACTION')
return
function_dispatcher_with_files = {
'audit_contributor_institute': audit_contributor_institute,
'audit_contributor_email': audit_contributor_email,
'audit_contributor_lists': audit_contributor_lists,
'audit_dataset_no_raw_files': audit_dataset_no_raw_files,
'audit_dataset_dcp_required_properties': audit_dataset_dcp_required_properties,
'audit_released_with_unreleased_files': audit_experiment_released_with_unreleased_files
}
@audit_checker('Dataset',
frame=['original_files',
'corresponding_contributors',
'contributors'])
def audit_experiment(value, system):
for function_name in function_dispatcher_with_files.keys():
yield from function_dispatcher_with_files[function_name](value, system)
return
| 35.993827 | 113 | 0.601784 | 0 | 0 | 5,082 | 0.871549 | 344 | 0.058995 | 0 | 0 | 1,854 | 0.317956 |
1431085ed453d7690fe84e1c128524430df1f728 | 5,990 | py | Python | psatlib/__init__.py | nie93/psatlib | 19947658fb4162f325e729eab086121947d8306a | [
"MIT"
] | null | null | null | psatlib/__init__.py | nie93/psatlib | 19947658fb4162f325e729eab086121947d8306a | [
"MIT"
] | 2 | 2018-06-19T20:53:13.000Z | 2018-11-01T20:11:45.000Z | psatlib/__init__.py | nie93/psatlib | 19947658fb4162f325e729eab086121947d8306a | [
"MIT"
] | null | null | null | """
psatlib -- An imported library designed for PSAT running with Python scripts.
Created by Zhijie Nie (nie@ieee.org)
Created on: 06/11/2018
Last Modified on: 10/01/2018
"""
__name__ = "psatlib"
__version__ = "0.1"
__author__ = "Zhijie Nie"
__author_email__ = "nie@ieee.org"
__copyright__ = "Copyright (c) 2018 Zhijie Nie"
__description__ = "psatlib is an imported library designed for PSAT running with Python scripts"
import sys
if sys.version_info[0] == 2:
if sys.version_info[1] == 5:
from psat_python25 import *
elif sys.version_info[1] == 7:
from psat_python27 import *
elif sys.version_info[0] == 3:
from psat_python34 import *
error = psat_error()
# Sets customized PSAT solving environment variables
def set_psat_env(algo='NR', iter=20, failopt=0, flat=False, msgdisabled=False):
psat_command(r'SetSolutionAlgorithm:%s' %algo, error)
psat_command(r'SetSolutionParameter:MaxIterations;%d' %iter, error)
psat_command(r'SetSolutionParameter:SolutionTolerance;1', error)
if flat:
psat_command(r'SetSolutionParameter:FlatStart;FLAT', error)
set_solution_failure_option(failopt)
disable_engine_messages(msgdisabled)
return
# Returns True if the two components are at the same bus
def samebus(c1,c2):
if c1.bus == c2.bus:
return True
else:
return False
# Solves the powerflow if the case is not solved
def solve_if_not_solved(flat=False):
if get_solution_status() == 0:
if flat:
psat_command(r'SetSolutionParameter:FlatStart;FLAT', error)
psat_msg('Imported case is not solved, initializing powerflow solution using NR method.')
psat_command(r'SetSolutionAlgorithm:NR',error)
psat_command(r'Solve',error)
if get_solution_status() != 1:
psat_msg('Returned: Powerflow solution failed, initial powerflow case returned')
return get_solution_status()
# Displays components information
def disp_comp_msg(ct):
f = psat_comp_id(ct,1,'')
more = get_next_comp('mainsub',f,error)
while more == True:
if f.type == ctype.ld:
c = get_load_dat(f,error)
msg_str = '%9s AT BUS #%3d (AREA #%2d), {P:%8.2f, Q:%8.2f}' \
%('LOAD', c.bus, c.area, c.mw, c.mvar)
elif f.type == ctype.gen:
c = get_gen_dat(f,error)
msg_str = '%9s AT BUS #%3d, {P:%8.2f, Q:%8.2f} ST:%d' \
%('GENERATOR', c.bus, c.mw, c.mvar, c.status)
psat_msg(msg_str)
more = get_next_comp('mainsub',f,error)
# Returns the summation of fixed shunt reactive power injection (will be replaced by get_fxsh_prop())
def get_sum_fxshmvar(subsys):
mvar = 0.
f = psat_comp_id(ctype.fxsh,1,'')
more = get_next_comp(subsys,f,error)
while more == True:
fxshdat = get_fx_shunt_dat(f,error)
mvar += fxshdat.mvar
more = get_next_comp(subsys,f,error)
return mvar
# Displays a list in PSAT message tab
def disp_list(l,transpose=0):
if transpose:
l = list(zip(*l))
for i in l:
psat_msg(str(i))
# Returns a list of components' ids of specified type whose bus numbers are in the list of bn
def get_comp_id(ct,bn):
cid = []
if type(bn) != list:
bn = [bn]
f = psat_comp_id(ct,1,'')
more = get_next_comp('mainsub',f,error)
while more == True:
if bool(set([f.bus,f.bus2, f.bus3, f.bus4]) & set(bn)):
cid.append(f)
more = get_next_comp('mainsub',f,error)
return cid
# Returns a list of psat component data according to psat_comp_id
def get_comp_dat(cid):
c = []
if type(cid) != list:
cid = [cid]
for i in range(len(cid)):
if cid[i].type == ctype.bs:
c.append(get_bus_dat(cid[i],error))
elif cid[i].type == ctype.gen:
c.append(get_gen_dat(cid[i],error))
elif cid[i].type == ctype.ld:
c.append(get_load_dat(cid[i],error))
elif cid[i].type == ctype.fxsh:
c.append(get_fix_shunt_dat(cid[i],error))
elif cid[i].type == ctype.swsh:
c.append(get_sw_shunt_dat(cid[i],error))
elif cid[i].type == ctype.ln:
c.append(get_line_dat(cid[i],error))
elif cid[i].type == ctype.fxtr:
c.append(get_fx_trans_dat(cid[i],error))
elif cid[i].type == ctype.ultc:
c.append(get_2w_trans_dat(cid[i],error))
elif cid[i].type == ctype.twtr:
c.append(get_3w_trans_dat(cid[i],error))
elif cid[i].type == ctype.fxsc:
c.append(get_fx_sercomp_dat(cid[i],error))
elif cid[i].type == ctype.vrsc:
c.append(get_fx_sercomp_dat(cid[i],error))
elif cid[i].type == ctype.stcpr:
c.append(get_stcpr_dat(cid[i],error))
elif cid[i].type == ctype.dcbs:
c.append(get_dcbus_dat(cid[i],error))
elif cid[i].type == ctype.cnvrtr:
c.append(get_converter_dat(cid[i],error))
elif cid[i].type == ctype.vsc:
c.append(get_vsc_dat(cid[i],error))
elif cid[i].type == ctype.dcln:
c.append(get_dcline_dat(cid[i],error))
elif cid[i].type == ctype.dcbrkr:
c.append(get_dcbrkr_dat(cid[i],error))
elif cid[i].type == ctype.zseq:
c.append(get_z_seq_coupling_dat(cid[i],error))
return c
# Returns a list of flow on transmission lines
def get_branch_flow(brnum):
bf = []
return
# Returns a list of flow on fixed transformers
def get_fxtr_flow(brnum):
return
# Returns a list of flow on adjustable (two-winding) transformers
def get_ultc_flow(brnum):
return
# Returns a list of flow on three-winding transformers
def get_twtr_flow(brnum):
return
# Generates TSAT files for transient stability analysis (psb,dyr,swi,mon)
def generate_tsa(path,psb,dyr,swi,mon):
return
# Generates VSAT files for transient stability analysis (psb,dyr,swi,mon)
def generate_vsa():
return
| 34.825581 | 101 | 0.635225 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,699 | 0.283639 |
1431e9db188b5d696eaec5acf37de7154e8339f9 | 3,001 | py | Python | webcam_OrbitalTimmer.py | JarvisSan22/Python_webcam | 1c90e9173257f134f34cf094b947d99a745ab2ae | [
"MIT"
] | null | null | null | webcam_OrbitalTimmer.py | JarvisSan22/Python_webcam | 1c90e9173257f134f34cf094b947d99a745ab2ae | [
"MIT"
] | null | null | null | webcam_OrbitalTimmer.py | JarvisSan22/Python_webcam | 1c90e9173257f134f34cf094b947d99a745ab2ae | [
"MIT"
] | null | null | null | import numpy as np
import cv2
import math
import datetime
from datetime import timedelta as Delta
h=300
w=300
cap = cv2.VideoCapture(0)
SUN_LOC=(200,70)
SUN_RSIZE=20
ORBITAL_R=10
def Orbiral(frame,Centerloc,orbit_r,size_r,phi,color):
x_orbit=Centerloc[0]+int(orbit_r*np.cos(np.deg2rad(phi)))
y_orbit=Centerloc[1]+int(orbit_r*np.sin(np.deg2rad(phi)))
#print(f"x:{x_orbit} y:{y_orbit} phi:{int(orbitphi)}")
frame= cv2.circle(frame,(x_orbit,y_orbit),size_r, color, -1)
return frame
ORBITAL_RSIZE=3
ORBITAL_PHI=0
ORBITAL_DPHI=1 #0.5deg delta
dr=(SUN_RSIZE+ORBITAL_R) #*(orbitdphi) #*np.pi/180)
orbitloc=(SUN_LOC[0],SUN_LOC[1]+SUN_RSIZE+ORBITAL_R)
satsn=0
#2021/05/06 Window priority
print(cv2.WND_PROP_FULLSCREEN)
cv2.namedWindow("Frame", cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty("Frame",cv2.WND_PROP_FULLSCREEN,0)
Start_Time=datetime.datetime.today()
Delta_T=60
#Sat_Time_Space=Delta(minutes=1)
Sat_Time_Space=Delta(seconds=Delta_T)
Sat_dic={}
Poff=180
Roff=0
#mins=time.minute
while True:
_, frame = cap.read()
frame_time=datetime.datetime.today()
if frame_time >= Sat_Time_Space+Start_Time:
Start_Time=frame_time
dr=(SUN_RSIZE+ORBITAL_R)
Sat_dic[satsn]={"Time":Start_Time,"Phi_Offset":Poff,"Sat_Radius":dr}
print("New Sat added")
print(Sat_dic[satsn])
Poff-=30
satsn+=1
if Poff <=-180:
Poff=180
ORBITAL_R+=5
print(frame_time)
#frame = cv2.resize(frame,(h,w))
if(frame is None):
continue
frame = cv2.circle(frame,SUN_LOC,SUN_RSIZE, (0,0,250), -1)
#Satn to frame
# frame=cv2.putText(frame,str(satsn),(SUN_LOC[0]-15,SUN_LOC[1]+15),
# cv2.FONT_HERSHEY_PLAIN,3,(255,255,255))
if satsn:
for n,sat in Sat_dic.items():
frame=Orbiral(frame,SUN_LOC,sat["Sat_Radius"],ORBITAL_RSIZE,ORBITAL_PHI-sat["Phi_Offset"],(0,0,255))
#for offphi in range(-180,180,satsn):
#if n==satsn:
# for R_OFF, fadeSeconds in zip(np.linspace(ORBITAL_RSIZE,1,ORBITAL_RSIZE),np.linspace(0,Delta//2,int(ORBITAL_RSIZE))):
# if frame_time >= Sat_Time_Space+fadeSeconds:
# print("Fade:",R_OFF)
# frame=Orbiral(frame,SUN_LOC,sat["Sat_Radius"],ORBITAL_RSIZE-int(R_OFF),ORBITAL_PHI-sat["Phi_Offset"],(255,0,255))
# else:
#frame=Orbiral(frame,SUN_LOC,sat["Sat_Radius"],ORBITAL_RSIZE,ORBITAL_PHI-sat["Phi_Offset"],(0,0,255))
ORBITAL_PHI+=ORBITAL_DPHI
if ORBITAL_PHI>=360:
ORBITAL_PHI=0
#Line
#img = cv2.line(frame,logoloc,orbitloc,(255,0,0),5)
cv2.imshow('Frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# VideoCaptureオブジェクト破棄
cap.release()
cv2.destroyAllWindows()
| 28.311321 | 139 | 0.618794 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,014 | 0.336095 |
1433a45ce0d16e3b1c57cbfb15267e409f99e3b0 | 2,913 | py | Python | spyder/plugins/outlineexplorer/tests/test_outline_explorer_editor.py | suokunlong/spyder | 2d5d450fdcef232fb7f38e7fefc27f0e7f704c9a | [
"MIT"
] | 3 | 2019-09-27T21:00:00.000Z | 2021-03-07T23:28:32.000Z | spyder/plugins/outlineexplorer/tests/test_outline_explorer_editor.py | jastema/spyder | 0ef48ea227c53f57556cd8002087dc404b0108b0 | [
"MIT"
] | 3 | 2020-10-13T21:15:23.000Z | 2020-10-13T21:15:24.000Z | spyder/plugins/outlineexplorer/tests/test_outline_explorer_editor.py | jastema/spyder | 0ef48ea227c53f57556cd8002087dc404b0108b0 | [
"MIT"
] | 2 | 2021-04-30T01:18:22.000Z | 2021-09-19T06:31:42.000Z | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
#
"""Tests for editor and outline explorer interaction."""
# Test library imports
import pytest
# Local imports
from spyder.plugins.outlineexplorer.widgets import OutlineExplorerWidget
from spyder.plugins.outlineexplorer.editor import OutlineExplorerProxyEditor
from spyder.plugins.outlineexplorer.api import OutlineExplorerData
from spyder.utils.qthelpers import qapplication
from spyder.plugins.editor.widgets.codeeditor import CodeEditor
class testBlock():
def __init__(self, line_number):
self._line = line_number - 1
def blockNumber(self):
return self._line
text = ('# test file\n'
'class a():\n'
' self.b = 1\n'
' print(self.b)\n'
' \n'
' def some_method(self):\n'
' self.b = 3')
expected_oe_list = [
OutlineExplorerData(
testBlock(2), 'class a():', 0,
OutlineExplorerData.CLASS, 'a'),
OutlineExplorerData(
testBlock(6), ' def some_method(self):', 4,
OutlineExplorerData.FUNCTION, 'some_method')
]
@pytest.fixture()
def editor_outline_explorer_bot():
"""setup editor and outline_explorer."""
app = qapplication()
editor = CodeEditor(parent=None)
editor.setup_editor(language='Python')
outlineexplorer = OutlineExplorerWidget(editor)
editor.set_text(text)
editor.oe_proxy = OutlineExplorerProxyEditor(editor, "test.py")
outlineexplorer.set_current_editor(editor.oe_proxy,
update=False,
clear=False)
outlineexplorer.setEnabled(True)
return editor, outlineexplorer, editor.oe_proxy
def test_editor_outline_explorer(editor_outline_explorer_bot):
"""Test basic interaction between outline_explorer and editor."""
editor, outline_explorer, oe_proxy = editor_outline_explorer_bot
assert outline_explorer
# Assert proxy
assert oe_proxy == outline_explorer.treewidget.current_editor
assert len(outline_explorer.treewidget.editor_items) == 1
# Assert root item
file_root = outline_explorer.treewidget.editor_items[id(editor)]
assert file_root.text(0) == oe_proxy.fname
# Assert OEData
oedata = oe_proxy.outlineexplorer_data_list()
for left, right in zip(oedata, expected_oe_list):
a = right.__dict__
b = left.__dict__
b['color'] = None
assert a['block'].blockNumber() == b['block'].blockNumber()
a['block'] = None
b['block'] = None
assert a == b
# Assert Treewidget Items
items = outline_explorer.treewidget.get_items()
oedata_texts = [oe.def_name for oe in expected_oe_list]
for item, oe_item in zip(items, oedata_texts):
assert item.text(0) == oe_item
if __name__ == "__main__":
pytest.main()
| 29.13 | 76 | 0.675249 | 146 | 0.050103 | 0 | 0 | 601 | 0.206246 | 0 | 0 | 626 | 0.214825 |
1435d19388b06f46cd61f4a44ed68dc45546446f | 3,105 | py | Python | challonge/attachment.py | Parapheen/achallonge | 8f05a0832cd0209536475aa9e91a5e71071512a0 | [
"MIT"
] | 3 | 2017-03-27T18:02:02.000Z | 2021-09-20T02:44:21.000Z | challonge/attachment.py | Parapheen/achallonge | 8f05a0832cd0209536475aa9e91a5e71071512a0 | [
"MIT"
] | 6 | 2017-05-28T11:08:25.000Z | 2021-09-20T03:12:36.000Z | challonge/attachment.py | Parapheen/achallonge | 8f05a0832cd0209536475aa9e91a5e71071512a0 | [
"MIT"
] | 8 | 2017-03-30T18:35:13.000Z | 2021-08-01T10:53:54.000Z | from .helpers import FieldHolder, assert_or_raise
class Attachment(metaclass=FieldHolder):
""" representation of a Challonge match attachment """
_fields = ['id', 'match_id', 'user_id', 'description',
'url', 'original_file_name', 'created_at',
'updated_at', 'asset_file_name', 'asset_content_type',
'asset_file_size', 'asset_url']
def __init__(self, connection, json_def, **kwargs):
self.connection = connection
self._refresh_from_json(json_def)
self._tournament_id = kwargs.get('tournament_id', 0)
def _refresh_from_json(self, json_def):
if 'match_attachment' in json_def:
self._get_from_dict(json_def['match_attachment'])
@staticmethod
def prepare_params(asset, url: str, description: str):
assert_or_raise(asset is not None or url is not None or description is not None,
ValueError,
'One of the following must not be None: asset, url, description')
params = {}
if asset is not None:
params.update({'asset': asset})
elif url is not None:
params.update({'url': url})
if description is not None:
params.update({'description': description})
return params
async def _change(self, asset=None, url: str = None, description: str = None):
params = Attachment.prepare_params(asset, url, description)
res = await self.connection('PUT',
'tournaments/{}/matches/{}/attachments/{}'.format(self._tournament_id, self._match_id, self._id),
'match_attachment',
**params)
self._refresh_from_json(res)
async def change_url(self, url: str, description: str = None):
""" change the url of that attachment
|methcoro|
Args:
url: url you want to change
description: *optional* description for your attachment
Raises:
ValueError: url must not be None
APIException
"""
await self._change(url=url, description=description)
async def change_text(self, text: str):
""" change the text / description of that attachment
|methcoro|
Args:
text: content you want to add / modify (description)
Raises:
ValueError: text must not be None
APIException
"""
await self._change(description=text)
change_description = change_text
async def change_file(self, file_path: str, description: str = None):
""" change the file of that attachment
|methcoro|
Warning:
|unstable|
Args:
file_path: path to the file you want to add / modify
description: *optional* description for your attachment
Raises:
ValueError: file_path must not be None
APIException
"""
with open(file_path, 'rb') as f:
await self._change(asset=f.read())
| 33.031915 | 133 | 0.588084 | 3,052 | 0.982931 | 0 | 0 | 565 | 0.181965 | 1,739 | 0.560064 | 1,299 | 0.418357 |
1436e076256d16b106fecbd6efa5e663549ff699 | 737 | py | Python | tests/cupy_tests/array_api_tests/test_sorting_functions.py | pri1311/cupy | 415be9536582ba86dbbb3e98bc14db4877a242c6 | [
"MIT"
] | null | null | null | tests/cupy_tests/array_api_tests/test_sorting_functions.py | pri1311/cupy | 415be9536582ba86dbbb3e98bc14db4877a242c6 | [
"MIT"
] | null | null | null | tests/cupy_tests/array_api_tests/test_sorting_functions.py | pri1311/cupy | 415be9536582ba86dbbb3e98bc14db4877a242c6 | [
"MIT"
] | 1 | 2022-03-21T20:19:12.000Z | 2022-03-21T20:19:12.000Z | import pytest
from cupy import array_api as xp
@pytest.mark.parametrize(
"obj, axis, expected",
[
([0, 0], -1, [0, 1]),
([0, 1, 0], -1, [1, 0, 2]),
([[0, 1], [1, 1]], 0, [[1, 0], [0, 1]]),
([[0, 1], [1, 1]], 1, [[1, 0], [0, 1]]),
],
)
@pytest.mark.skipif(
# https://github.com/cupy/cupy/issues/5701
True, reason="Sorting functions miss arguments kind and order")
def test_stable_desc_argsort(obj, axis, expected):
"""
Indices respect relative order of a descending stable-sort
See https://github.com/numpy/numpy/issues/20778
"""
x = xp.asarray(obj)
out = xp.argsort(x, axis=axis, stable=True, descending=True)
assert xp.all(out == xp.asarray(expected))
| 27.296296 | 67 | 0.573948 | 0 | 0 | 0 | 0 | 686 | 0.930801 | 0 | 0 | 239 | 0.324288 |
1437f6b2872552388cca334ea76cadd246a03878 | 6,406 | py | Python | drip/api/subscribers.py | willjohnson/drip-python | 13e2836b5acb7a822b0e1f9884e3249d37734cef | [
"MIT"
] | 5 | 2019-04-11T19:32:14.000Z | 2020-08-03T21:58:55.000Z | drip/api/subscribers.py | willjohnson/drip-python | 13e2836b5acb7a822b0e1f9884e3249d37734cef | [
"MIT"
] | 7 | 2019-03-19T03:54:49.000Z | 2021-12-09T21:53:28.000Z | drip/api/subscribers.py | willjohnson/drip-python | 13e2836b5acb7a822b0e1f9884e3249d37734cef | [
"MIT"
] | 1 | 2021-01-11T21:51:51.000Z | 2021-01-11T21:51:51.000Z | from typing import TYPE_CHECKING
from drip.utils import json_list, json_object, raise_response
if TYPE_CHECKING:
from requests import Session
class Subscribers:
session: 'Session'
@json_object('subscribers')
def create_or_update_subscriber(self, email, marshall=True, **options):
"""
create_or_update_subscriber(email,
new_email=None, user_id=None, time_zone='Etc/UTC', lifetime_value=None, ip_address=None,
tags=None, remove_tags=None, prospect=True, base_lead_score=30, eu_consent=None, eu_consent_message=None, marshall=True)
Update a subscriber or create it if it doesn't exist.
Arguments:
email {str} -- Person's email address
Call Options:
new_email {str} -- Update the subscriber's email address, taking precedence over 'email' while creating (default: {None})
user_id {str} -- A custom unique identifier (default: {None})
time_zone {str} -- Timezone (default: {'Etc/UTC'})
lifetime_value {int} -- LifeTime Value, in cents (default: {None})
ip_address {str} -- IP Address (default: {None})
custom_fields {Mapping[str, Any]} -- Dictionary of custom fields and their values (default: {None})
tags {Iterable[str]} -- List of tags to apply (default: {None})
remove_tags {Iterable[str]} -- List of tags to remove (default: {None})
prospect {bool} -- Person is a Prospect (default: {True})
base_lead_score {int} -- Starting leadscore (default: {0})
eu_consent {str} -- Status of consent for GDPR: granted, denied (default: {None})
eu_consent_message {str} -- Message that was consented to (default: {None})
Other Keyword Arguments:
marshall {bool} -- Unpack the Response object (default: {True})
Returns:
Response -- API Response, or the marshalled Subscriber object
"""
payload = {
'email': email,
}
payload.update(options)
return self.session.post('subscribers', json={'subscribers': [payload, ]})
@json_list('subscribers')
def subscribers(self, marshall=True, **params):
"""
subscribers(page=0, per_page=100, marshall=True)
List all subscribers. Supports pagination and filtering.
Call Parameters:
page {int} -- Page to get, or 0 for all pages (default: {0})
per_page {int} -- Number of objects to get on each page (default: {100})
tags {Iterable[str]} -- List of tags to filter by (default: {None})
subscribed_before {str} -- Include only people created before this date, Eg. "2016-01-01T00:00:00Z" (default: {None})
subscribed_after {str} -- Include only people after before this date, Eg. "2016-01-01T00:00:00Z" (default: {None})
Other Keyword Arguments:
marshall {bool} -- Unpack the Response object (default: {True})
Returns:
Response -- API Response, or the marshalled List of Subscriber objects
"""
return self.session.get('subscribers', params=params)
@json_object('subscribers')
def subscriber(self, email, marshall=True):
"""
subscriber(email, marshall=True)
Get a subscriber.
Arguments:
email {str} -- Person's email address
Other Keyword Arguments:
marshall {bool} -- Unpack the Response object (default: {True})
Returns:
Response -- API Response, or the marshalled Subscriber object
"""
return self.session.get(f'subscribers/{email}')
@json_object('subscribers')
def unsubscribe(self, email, marshall=True, **params):
"""
unsubscribe(email, campaign_id=None, marshall=True)
Unsubscribe a subscriber from all campaigns, or optionally one specific campaign.
Arguments:
email {str} -- Person's email address
Call Parameters:
campaign_id {int} -- Campaign from which to remove the subscriber (default: {None})
Other Keyword Arguments:
marshall {bool} -- Unpack the Response object (default: {True})
Returns:
Response -- API Response, or the marshalled Subscriber object
"""
return self.session.post(f'subscribers/{email}/remove', params=params)
@json_object('subscribers')
def unsubscribe_from_all(self, email, marshall=True):
"""
unsubscribe_from_all(email, campaign_id=None, marshall=True)
Unsubscribe a subscriber from all campaigns.
Arguments:
email {str} -- Person's email address
Other Keyword Arguments:
marshall {bool} -- Unpack the Response object (default: {True})
Returns:
Response -- API Response, or the marshalled Subscriber object
"""
return self.session.post(f'subscribers/{email}/unsubscribe_all')
@raise_response()
def delete_subscriber(self, email):
"""
delete_subscriber(email, campaign_id=None, marshall=True)
Delete a subscriber.
Arguments:
email {str} -- Person's email address
Returns:
Response -- API Response
"""
return self.session.delete(f'subscribers/{email}')
# @pagination('subscribers')
# def subscribers(self,
# status: str = None, # active, all, unsubscribed, active_or_unsubscribed, undeliverable. Default: active
# tags: 'Iterable[str]' = None,
# subscribed_before: str = None, # "2017-01-01T00:00:00Z"
# subscribed_after: str = None,
# page: int = 0,
# per_page: int = None,
# marshall=True) -> 'Response':
# payload: 'MutableMapping[str, Any]' = {}
# if status:
# payload['status'] = status
# if tags:
# payload['tags'] = tags
# if subscribed_before:
# payload['subscribed_before'] = subscribed_before
# if subscribed_after:
# payload['subscribed_after'] = subscribed_after
# if page:
# payload['page'] = page
# if per_page:
# payload['per_page'] = per_page
# return self.session.get('subscribers', params=payload)
| 38.130952 | 133 | 0.604589 | 6,255 | 0.976428 | 0 | 0 | 5,127 | 0.800343 | 0 | 0 | 5,253 | 0.820012 |
143877dd2c390f5b0dc8e5729d0cf5ded4ef1c91 | 2,154 | py | Python | ldapauth/main/main.py | trutty/python-ldap | d656548641d2d3e45e868bb6ce1585cffe3c6848 | [
"CC0-1.0"
] | null | null | null | ldapauth/main/main.py | trutty/python-ldap | d656548641d2d3e45e868bb6ce1585cffe3c6848 | [
"CC0-1.0"
] | null | null | null | ldapauth/main/main.py | trutty/python-ldap | d656548641d2d3e45e868bb6ce1585cffe3c6848 | [
"CC0-1.0"
] | null | null | null | from flask import Flask, request
from flask_httpauth import HTTPBasicAuth
from auth_handler import AuthHandler
from cache import Cache
from os import environ
from yaml import safe_load
import logging
from connection_provider import ConnectionProvider
# init logging
logging.basicConfig(format='[%(asctime)s] [%(levelname)s] %(message)s', level=logging.DEBUG)
# Init flask app
app = Flask(__name__)
auth = HTTPBasicAuth()
# Basic cache
CACHE_KEY_EXPIRATION_SECONDS = 60 * 60 * 8 # 8 hours
cache = Cache(CACHE_KEY_EXPIRATION_SECONDS)
# Init LDAP config
logging.info("Reading config.yaml")
with open("/config/config.yaml", 'r') as stream:
config = safe_load(stream)
# Create the AuthHandler instance
logging.info("Initializing authentication handler")
authHandler = AuthHandler(
environ['LDAP_MANAGER_BINDDN'],
environ["LDAP_MANAGER_PASSWORD"],
ConnectionProvider(config['ldapServers'])
)
@auth.verify_password
def login(username, password):
# Check if username or password is empty
if not username or not password:
return False
# Get lookup key for config
ldap_config_key = request.headers['Ldap-Config-Key']
# Check if authentication was cached
if cache.validate(username, ldap_config_key, password):
logging.info("[user=%s, config=%s] authenticated from cache", username, ldap_config_key)
return True
# Lookup LDAP config
ldapParameters = config[ldap_config_key]
# Validate user
if authHandler.validate(username, password, ldap_config_key, ldapParameters):
# Add successful authentication to cache
cache.set(username, ldap_config_key, password)
return True
return False
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
@auth.login_required
def index(path):
code = 200
msg = "LDAP Authentication"
headers = []
return msg, code, headers
# health endpoint
@app.route('/healthz')
def healthz():
if cache is None or authHandler is None:
return "not healthy", 503
else:
return "healthy", 200
# Main
if __name__ == '__main__':
app.run(host='0.0.0.0', port=9000, debug=True)
| 26.268293 | 96 | 0.716806 | 0 | 0 | 0 | 0 | 1,131 | 0.52507 | 0 | 0 | 647 | 0.300371 |
1438db051f7ec3e90a15d96cb90dfbe4c91397a1 | 4,038 | py | Python | Square_game/Square_game_play.py | samutams/Square_game | 8067e856075b8d68eaa3cefc73fe818d545c85b0 | [
"MIT"
] | null | null | null | Square_game/Square_game_play.py | samutams/Square_game | 8067e856075b8d68eaa3cefc73fe818d545c85b0 | [
"MIT"
] | null | null | null | Square_game/Square_game_play.py | samutams/Square_game | 8067e856075b8d68eaa3cefc73fe818d545c85b0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
class Square_game():
display_col = (255, 255, 255)
player_col = (0, 0, 0)
text_col = (255, 0, 0)
food_col = (0, 0, 255)
maze_col = (50, 205, 55)
dis_width = 400
dis_height = 400
square_block = 20
game_over = False
game_close = False
game_win = False
def __init__(self):
self.player = Player(square_block = self.square_block, dis_width = self.dis_width, dis_height = self.dis_height)
self.maze = Maze(dis_width = self.dis_width, dis_height = self.dis_height, square_block = self.square_block)
self.food = Food(dis_width = self.dis_width, dis_height = self.dis_height)
self.dis = pygame.display.set_mode((self.dis_width, self.dis_height))
def message(self,msg, color, dis):
font_style = pygame.font.SysFont(None, 20)
mesg = font_style.render(msg, True, color)
dis.blit(mesg, [self.dis_width/4, self.dis_height/2])
def eval_(self):
for i in self.maze.maze_list:
if i[0] == self.player.x and i[1] == self.player.y:
self.game_close = True
if self.player.x == self.food.foodx and self.player.y == self.food.foody:
self.game_close = True
self.game_win = True
def gameLoop(self): # creating a functionű
self.game_over = False
self.game_close = False
self.game_win = False
clock = pygame.time.Clock()
while not self.game_over:
while self.game_close == True:
if self.game_win == True:
self.dis.fill(self.display_col)
self.message("You Won! Press Q-Quit or P-Play Again", self.text_col, dis = self.dis)
pygame.display.update()
if self.game_win == False:
self.dis.fill(self.display_col)
self.message("You Lost! Press Q-Quit or P-Play Again", self.text_col, dis = self.dis)
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
self.game_over = True
self.game_close = False
if event.key == pygame.K_p:
self.player.reset()
self.gameLoop()
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.game_over = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
self.player.moveLeft()
elif event.key == pygame.K_RIGHT:
self.player.moveRight()
elif event.key == pygame.K_UP:
self.player.moveUp()
elif event.key == pygame.K_DOWN:
self.player.moveDown()
self.dis.fill(self.display_col)
self.maze.maze_draw(dis = self.dis, col = self.maze_col)
self.food.food_draw(dis = self.dis, col = self.food_col, square_block = self.square_block)
self.player.player_draw(dis = self.dis, col = self.player_col , square_block = self.square_block)
pygame.display.update()
self.eval_()
clock.tick(self.square_block)
pygame.display.quit()
#pygame.quit()
#quit()
def run_game(self):
pygame.init()
pygame.display.set_caption('Snake like Game by Tamas')
font_style = pygame.font.SysFont(None, 30)
self.gameLoop()
import pygame
import time
import random
import numpy as np
import pandas as pd
from Square_game.Maze import *
from Square_game.A_star import *
from Square_game.Player import *
from Square_game.Food import *
| 33.371901 | 159 | 0.540862 | 3,761 | 0.931171 | 0 | 0 | 0 | 0 | 0 | 0 | 193 | 0.047784 |
1439a1471ef42c4ab1b73ed6dc780c31a03941cb | 7,255 | py | Python | gentun/algorithms.py | ahmadmobeen/gentun | c0cbb3eaf458b3e4abeade6b9baf914f461f3727 | [
"Apache-2.0"
] | 2 | 2020-03-23T07:29:33.000Z | 2022-03-16T06:18:37.000Z | gentun/algorithms.py | ahmadmobeen/gentun | c0cbb3eaf458b3e4abeade6b9baf914f461f3727 | [
"Apache-2.0"
] | 1 | 2020-03-25T05:53:15.000Z | 2020-03-25T05:53:15.000Z | gentun/algorithms.py | ahmadmobeen/gentun | c0cbb3eaf458b3e4abeade6b9baf914f461f3727 | [
"Apache-2.0"
] | 3 | 2020-03-11T05:53:43.000Z | 2020-03-24T10:44:21.000Z | #!/usr/bin/env python
"""
Genetic algorithm class
"""
import random
import operator
import pymongo
db_client=pymongo.MongoClient("223.195.37.85",27017)
db=db_client["binaryCSA"]
exp_col=db["experiments"]
class GeneticAlgorithm(object):
"""Evolve a population iteratively to find better
individuals on each generation. If elitism is set, the
fittest individual of a generation will be part of the
next one.
"""
def __init__(self, population, tournament_size=5, elitism=True):
self.population = population
self.x_train, self.y_train = self.population.get_data()
self.tournament_size = tournament_size
self.elitism = elitism
self.generation = 1
def get_population_type(self):
return self.population.__class__
def run(self, max_generations):
print("Starting genetic algorithm...\n")
while self.generation <= max_generations:
self.evolve_population()
self.generation += 1
def evolve_population(self):
if self.population.get_size() < self.tournament_size:
raise ValueError("Population size is smaller than tournament size.")
print("Evaluating generation #{}...".format(self.generation))
fittest = self.population.get_fittest()
print("Fittest individual is:")
print(fittest)
print("Fitness value is: {}\n".format(round(fittest.get_fitness(), 4)))
new_population = self.get_population_type()(
self.population.get_species(), self.x_train, self.y_train, individual_list=[],
maximize=self.population.get_fitness_criteria()
)
if self.elitism:
new_population.add_individual(self.population.get_fittest())
while new_population.get_size() < self.population.get_size():
child = self.tournament_select().reproduce(self.tournament_select())
child.mutate()
new_population.add_individual(child)
self.population = new_population
def tournament_select(self):
tournament = self.get_population_type()(
self.population.get_species(), self.x_train, self.y_train, individual_list=[
self.population[i] for i in random.sample(range(self.population.get_size()), self.tournament_size)
], maximize=self.population.get_fitness_criteria()
)
return tournament.get_fittest()
class RussianRouletteGA(GeneticAlgorithm):
"""Simpler genetic algorithm used in the Genetic CNN paper.
"""
def __init__(self, population, crossover_probability=0.2, mutation_probability=0.8):
super(RussianRouletteGA, self).__init__(population)
self.crossover_probability = crossover_probability
self.mutation_probability = mutation_probability
def evolve_population(self, eps=1e-15):
print("Evaluating generation #{}...".format(self.generation))
fittest = self.population.get_fittest()
print("Fittest individual is:")
print(fittest)
print("Fitness value is: {}\n".format(round(fittest.get_fitness(), 4)))
# Russian roulette selection
if self.population.get_fitness_criteria():
weights = [self.population[i].get_fitness() for i in range(self.population.get_size())]
else:
weights = [1 / (self.population[i].get_fitness() + eps) for i in range(self.population.get_size())]
min_weight = min(weights)
weights = [weight - min_weight for weight in weights]
if sum(weights) == .0:
weights = [1. for _ in range(self.population.get_size())]
new_population = self.get_population_type()(
self.population.get_species(), self.x_train, self.y_train, individual_list=[
self.population[i].copy() for i in random.choices(
range(self.population.get_size()), weights=weights, k=self.population.get_size()
)
], maximize=self.population.get_fitness_criteria()
)
# Crossover and mutation
for i in range(new_population.get_size() // 2):
if random.random() < self.crossover_probability:
new_population[i].crossover(new_population[i + 1])
else:
if random.random() < self.mutation_probability:
new_population[i].mutate()
if random.random() < self.mutation_probability:
new_population[i + 1].mutate()
self.population = new_population
class CrowSearchAlgorithm(object):
"""Evolve a population iteratively to find better
individuals on each generation. If elitism is set, the
fittest individual of a generation will be part of the
next one.
"""
def __init__(self, flock, tournament_size=5):
self.flock = flock
self.x_train, self.y_train = self.flock.get_data()
self.tournament_size = tournament_size
self.iteration = 1
self.max_iterations=0
self.explored=[crow.get_location() for crow in self.flock]
unique=[]
for location in self.explored:
if location not in unique:
unique.append(location)
print(self.explored)
print(unique)
print("Flock Size",len(self.explored),";","Unique Crows",len(unique))
def get_flock_type(self):
return self.flock.__class__
def run(self, max_iterations,exp_no):
print("\nStarting Crow Search Algorithm...")
self.max_iterations=max_iterations
while self.iteration <= max_iterations:
exp_col.update_one({"no":exp_no},{"$push":{"iterations":[]}})
self.release_flock()
self.iteration += 1
def release_flock(self):
if self.flock.get_size() < self.tournament_size:
raise ValueError("Flock size is smaller than tournament size.")
print("\nRunning iteration #{}...".format(self.iteration))
fittest = self.flock.get_fittest()
print("\nBest performance is {:.8f} by Crow {} on the location :".format(fittest.get_best_fitness(),fittest.get_id()),fittest.get_memory())
# print(fittest.get_memory())
# print("Fitness value is: {:.8f}\n".format(fittest.get_best_fitness()))
if self.iteration <self.max_iterations:
for i,_ in enumerate(self.flock):
crow=self.flock[i]
while crow.get_location() in self.explored:
crow.follow(self.tournament_select(crow))
self.explored.append(crow.get_location())
# new_locations = [crow.get_location() for crow in self.flock]
# repeated=[]
# for location in new_locations:
# if location in self.explored:
# repeated.append(location)
# print("Flock Size", len(new_locations), ";", "Repeated Crows", len(repeated))
# for r in repeated:
# print(r)
def tournament_select(self,crow):
individual_list=[self.flock[i] for i in random.sample(range(self.flock.get_size()), self.tournament_size) if crow.get_id() !=self.flock[i].get_id()]
target=max(individual_list, key=operator.methodcaller('get_best_fitness'))
return target
| 42.928994 | 156 | 0.641351 | 7,042 | 0.970641 | 0 | 0 | 0 | 0 | 0 | 0 | 1,457 | 0.200827 |
1439fdf7a78ef76ce5defaea7b2ef8c4d002969d | 2,867 | py | Python | src/frontend/views.py | AllenAnthes/operationcode-pyback | b501a67eb055aff4c5ed11996de4a351f439bed9 | [
"MIT"
] | 21 | 2018-10-01T19:05:44.000Z | 2021-01-03T02:49:00.000Z | src/frontend/views.py | AllenAnthes/operationcode-pyback | b501a67eb055aff4c5ed11996de4a351f439bed9 | [
"MIT"
] | 5 | 2018-09-17T18:14:34.000Z | 2019-04-29T17:22:58.000Z | src/frontend/views.py | AllenAnthes/operationcode-pyback | b501a67eb055aff4c5ed11996de4a351f439bed9 | [
"MIT"
] | 10 | 2018-10-04T01:34:49.000Z | 2021-04-06T14:24:50.000Z | import json
import logging
from typing import Tuple
import requests
from django.conf import settings
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.views.generic import FormView, TemplateView
from .forms import CodeSchoolForm
logger = logging.getLogger(__name__)
class IndexView(TemplateView):
template_name = 'frontend/index.html'
class CodeschoolFormView(FormView):
form_class = CodeSchoolForm
template_name = 'frontend/codeschool-form.html'
success_url = f'https://github.com/{settings.GITHUB_REPO}/issues'
def form_valid(self, form):
form.save()
handle_submission(form.cleaned_data)
return super().form_valid(form)
def form_invalid(self, form):
return super().form_invalid(form)
class BotMessagesView(TemplateView):
template_name = 'frontend/messages.html'
def get_logo_and_users(logo: InMemoryUploadedFile) -> Tuple[str, str]:
school_logo = logo.name.replace(' ', '_')
if settings.DEBUG or settings.PRE_PROD:
users = '@wimo7083 @AllenAnthes,'
else:
users = '@wimo7083 @jhampton @kylemh'
logo_url = f'{settings.MEDIA_URL}logos/{school_logo}'
return logo_url, users
def handle_submission(form: dict):
repo_path = settings.GITHUB_REPO
url = f"https://api.github.com/repos/{repo_path}/issues"
headers = {"Authorization": f"Bearer {settings.GITHUB_JWT}"}
params = make_params(**form)
res = requests.post(url, headers=headers, data=json.dumps(params))
logger.info(f'response from github API call {res}')
def make_params(logo, name, url, address1, city, state, zipcode, country, rep_name, rep_email, recaptcha='',
address2=None, fulltime=False, hardware=False, has_online=False, only_online=False, accredited=False,
housing=False, mooc=False):
logo_url, notify_users = get_logo_and_users(logo)
return ({
'title': f'New Code School Request: {name}',
'body': (
f"Name: {name}\n"
f"Website: {url}\n"
f"Full Time: {fulltime}\n"
f"Hardware Included: {hardware}\n"
f"Has Online: {has_online}\n"
f"Only Online: {only_online}\n"
f"VA Accredited: {accredited}\n"
f"Housing Included: {housing}\n"
f"MOOC Only: {mooc}\n"
f"Address: {address1} {address2}\n"
f"City: {city}\n"
f"State: {state}\n"
f"Country: {country}\n"
f"Zip: {zipcode}\n\n"
f"Representative Name: {rep_name}\n"
f"Representative Email: {rep_email}\n"
f"logo: \n"
'This code school is ready to be added/updated:\n'
f"{notify_users}\n"
"Please close this issue once you've added/updated the code school."
)
})
| 31.505495 | 117 | 0.642832 | 557 | 0.19428 | 0 | 0 | 0 | 0 | 0 | 0 | 1,017 | 0.354726 |
143afa2a3ac5466b576ac87ae8b831db9911e23c | 13,495 | py | Python | bitwise/arithmetic/ADD_SUB.py | jamesjiang52/Bitwise | c71f151d23034b3f9e2a939f637be0eaa16c45c3 | [
"MIT"
] | null | null | null | bitwise/arithmetic/ADD_SUB.py | jamesjiang52/Bitwise | c71f151d23034b3f9e2a939f637be0eaa16c45c3 | [
"MIT"
] | null | null | null | bitwise/arithmetic/ADD_SUB.py | jamesjiang52/Bitwise | c71f151d23034b3f9e2a939f637be0eaa16c45c3 | [
"MIT"
] | null | null | null | """
The following classes are defined:
AdderSubtractor4
AdderSubtractor8
AdderSubtractor16
"""
from .. import wire
from .. import gate
from .. import signal
from . import ADD
Wire = wire.Wire
Bus4 = wire.Bus4
Bus8 = wire.Bus8
Bus16 = wire.Bus16
class AdderSubtractor4:
"""Construct a new 4-bit adder-subtractor.
Args:
add_subtract: An object of type Wire. Indicates the operation to carry
out - 0 for addition, 1 for subtraction.
a_bus: An object of type Bus4. The first addend, or the minuend.
a_bus[0] and a_bus[3] are the most and least significant bit,
respectively. a_bus[0] is the sign bit in subtraction operations.
b_bus: An object of type Bus4. The second addend, or the subtrahend.
b_bus[0] and b_bus[3] are the most and least significant bit,
respectively. b_bus[0] is the sign bit in subtraction operations.
overflow: An object of type Wire. The overflow indicator of the
subtractor.
carry_out: An object of type Wire. The carry-out of the adder.
sum_bus: An object of type Bus4. The sum of the two addends, or the
difference between the minuend and the subtrahend. sum_bus[0] and
sum_bus[3] are the most and least significant bit, respectively.
sum_bus[0] is the sign bit in subtraction operations.
Raises:
TypeError: If either a_bus, b_bus, or sum_bus is not a bus of width 4.
"""
def __init__(
self,
add_subtract,
a_bus,
b_bus,
overflow,
carry_out,
sum_bus
):
if len(a_bus.wires) != 4:
raise TypeError(
"Expected bus of width 4, received bus of width {0}.".format(
len(a_bus.wires)
)
)
if len(b_bus.wires) != 4:
raise TypeError(
"Expected bus of width 4, received bus of width {0}.".format(
len(b_bus.wires)
)
)
if len(sum_bus.wires) != 4:
raise TypeError(
"Expected bus of width 4, received bus of width {0}.".format(
len(sum_bus.wires)
)
)
wire_1 = Wire()
wire_2 = Wire()
wire_3 = Wire()
wire_4 = Wire()
not_input_1 = Wire()
not_input_2 = Wire()
not_output = Wire()
and_1_wire = Wire()
and_2_wire = Wire()
bus_1 = Bus4(wire_1, wire_2, wire_3, wire_4)
input_1 = a_bus.wires
input_2 = b_bus.wires
output = sum_bus.wires
signal.ControlledInverter4(add_subtract, b_bus, bus_1)
ADD.Adder4(add_subtract, a_bus, bus_1, carry_out, sum_bus)
gate.NOTGate(input_1[0], not_input_1)
gate.NOTGate(input_2[0], not_input_2)
gate.NOTGate(output[0], not_output)
gate.ANDGate3(input_1[0], not_input_2, not_output, and_1_wire)
gate.ANDGate3(not_input_1, input_2[0], output[0], and_2_wire)
gate.ORGate2(and_1_wire, and_2_wire, overflow)
self.add_subtract = add_subtract
self.a_bus = a_bus
self.b_bus = b_bus
self.overflow = overflow
self.carry_out = carry_out
self.sum_bus = sum_bus
def __str__(self):
str_ = ""
str_ += "add_subtract: " + str(self.add_subtract.value) + "\n"
str_ += "a_bus: " + self.a_bus.__str__() + "\n"
str_ += "b_bus: " + self.b_bus.__str__() + "\n"
str_ += "overflow: " + str(self.overflow.value) + "\n"
str_ += "carry_out: " + str(self.carry_out.value) + "\n"
str_ += "sum_bus: " + self.sum_bus.__str__()
return str_
def __call__(
self, *,
add_subtract=None,
a_bus=None,
b_bus=None,
overflow=None,
carry_out=None,
sum_bus=None
):
if add_subtract is not None:
self.add_subtract.value = add_subtract
if a_bus is not None:
self.a_bus.wire_values = a_bus
if b_bus is not None:
self.b_bus.wire_values = b_bus
if overflow is not None:
self.overflow.value = overflow
if carry_out is not None:
self.carry_out.value = carry_out
if sum_bus is not None:
self.sum_bus.wire_values = sum_bus
class AdderSubtractor8:
"""Construct a new 8-bit adder-subtractor.
Args:
add_subtract: An object of type Wire. Indicates the operation to carry
out - 0 for addition, 1 for subtraction.
a_bus: An object of type Bus8. The first addend, or the minuend.
a_bus[0] and a_bus[7] are the most and least significant bit,
respectively. a_bus[0] is the sign bit in subtraction operations.
b_bus: An object of type Bus8. The second addend, or the subtrahend.
b_bus[0] and b_bus[7] are the most and least significant bit,
respectively. b_bus[0] is the sign bit in subtraction operations.
overflow: An object of type Wire. The overflow indicator of the
subtractor.
carry_out: An object of type Wire. The carry-out of the adder.
sum_bus: An object of type Bus8. The sum of the two addends, or the
difference between the minuend and the subtrahend. sum_bus[0] and
sum_bus[7] are the most and least significant bit, respectively.
sum_bus[0] is the sign bit in subtraction operations.
Raises:
TypeError: If either a_bus, b_bus, or sum_bus is not a bus of width 8.
"""
def __init__(
self,
add_subtract,
a_bus,
b_bus,
overflow,
carry_out,
sum_bus
):
if len(a_bus.wires) != 8:
raise TypeError(
"Expected bus of width 8, received bus of width {0}.".format(
len(a_bus.wires)
)
)
if len(b_bus.wires) != 8:
raise TypeError(
"Expected bus of width 8, received bus of width {0}.".format(
len(b_bus.wires)
)
)
if len(sum_bus.wires) != 8:
raise TypeError(
"Expected bus of width 8, received bus of width {0}.".format(
len(sum_bus.wires)
)
)
wire_1 = Wire()
wire_2 = Wire()
wire_3 = Wire()
wire_4 = Wire()
wire_5 = Wire()
wire_6 = Wire()
wire_7 = Wire()
wire_8 = Wire()
not_input_1 = Wire()
not_input_2 = Wire()
not_output = Wire()
and_1_wire = Wire()
and_2_wire = Wire()
bus_1 = Bus8(
wire_1,
wire_2,
wire_3,
wire_4,
wire_5,
wire_6,
wire_7,
wire_8
)
input_1 = a_bus.wires
input_2 = b_bus.wires
output = sum_bus.wires
signal.ControlledInverter8(add_subtract, b_bus, bus_1)
ADD.Adder8(add_subtract, a_bus, bus_1, carry_out, sum_bus)
gate.NOTGate(input_1[0], not_input_1)
gate.NOTGate(input_2[0], not_input_2)
gate.NOTGate(output[0], not_output)
gate.ANDGate3(input_1[0], not_input_2, not_output, and_1_wire)
gate.ANDGate3(not_input_1, input_2[0], output[0], and_2_wire)
gate.ORGate2(and_1_wire, and_2_wire, overflow)
self.add_subtract = add_subtract
self.a_bus = a_bus
self.b_bus = b_bus
self.overflow = overflow
self.carry_out = carry_out
self.sum_bus = sum_bus
def __str__(self):
str_ = ""
str_ += "add_subtract: " + str(self.add_subtract.value) + "\n"
str_ += "a_bus: " + self.a_bus.__str__() + "\n"
str_ += "b_bus: " + self.b_bus.__str__() + "\n"
str_ += "overflow: " + str(self.overflow.value) + "\n"
str_ += "carry_out: " + str(self.carry_out.value) + "\n"
str_ += "sum_bus: " + self.sum_bus.__str__()
return str_
def __call__(
self, *,
add_subtract=None,
a_bus=None,
b_bus=None,
overflow=None,
carry_out=None,
sum_bus=None
):
if add_subtract is not None:
self.add_subtract.value = add_subtract
if a_bus is not None:
self.a_bus.wire_values = a_bus
if b_bus is not None:
self.b_bus.wire_values = b_bus
if overflow is not None:
self.overflow.value = overflow
if carry_out is not None:
self.carry_out.value = carry_out
if sum_bus is not None:
self.sum_bus.wire_values = sum_bus
class AdderSubtractor16:
"""Construct a new 16-bit adder-subtractor.
Args:
add_subtract: An object of type Wire. Indicates the operation to carry
out - 0 for addition, 1 for subtraction.
a_bus: An object of type Bus16. The first addend, or the minuend.
a_bus[0] and a_bus[15] are the most and least significant bit,
respectively. a_bus[0] is the sign bit in subtraction operations.
b_bus: An object of type Bus16. The second addend, or the subtrahend.
b_bus[0] and b_bus[15] are the most and least significant bit,
respectively. b_bus[0] is the sign bit in subtraction operations.
overflow: An object of type Wire. The overflow indicator of the
subtractor.
carry_out: An object of type Wire. The carry-out of the adder.
sum_bus: An object of type Bus16. The sum of the two addends, or the
difference between the minuend and the subtrahend. sum_bus[0] and
sum_bus[15] are the most and least significant bit, respectively.
sum_bus[0] is the sign bit in subtraction operations.
Raises:
TypeError: If either a_bus, b_bus, or sum_bus is not a bus of width 16.
"""
def __init__(
self,
add_subtract,
a_bus,
b_bus,
overflow,
carry_out,
sum_bus
):
if len(a_bus.wires) != 16:
raise TypeError(
"Expected bus of width 16, received bus of width {0}.".format(
len(a_bus.wires)
)
)
if len(b_bus.wires) != 16:
raise TypeError(
"Expected bus of width 16, received bus of width {0}.".format(
len(b_bus.wires)
)
)
if len(sum_bus.wires) != 16:
raise TypeError(
"Expected bus of width 16, received bus of width {0}.".format(
len(sum_bus.wires)
)
)
wire_1 = Wire()
wire_2 = Wire()
wire_3 = Wire()
wire_4 = Wire()
wire_5 = Wire()
wire_6 = Wire()
wire_7 = Wire()
wire_8 = Wire()
wire_9 = Wire()
wire_10 = Wire()
wire_11 = Wire()
wire_12 = Wire()
wire_13 = Wire()
wire_14 = Wire()
wire_15 = Wire()
wire_16 = Wire()
not_input_1 = Wire()
not_input_2 = Wire()
not_output = Wire()
and_1_wire = Wire()
and_2_wire = Wire()
bus_1 = Bus16(
wire_1,
wire_2,
wire_3,
wire_4,
wire_5,
wire_6,
wire_7,
wire_8,
wire_9,
wire_10,
wire_11,
wire_12,
wire_13,
wire_14,
wire_15,
wire_16
)
input_1 = a_bus.wires
input_2 = b_bus.wires
output = sum_bus.wires
signal.ControlledInverter16(add_subtract, b_bus, bus_1)
ADD.Adder16(add_subtract, a_bus, bus_1, carry_out, sum_bus)
gate.NOTGate(input_1[0], not_input_1)
gate.NOTGate(input_2[0], not_input_2)
gate.NOTGate(output[0], not_output)
gate.ANDGate3(input_1[0], not_input_2, not_output, and_1_wire)
gate.ANDGate3(not_input_1, input_2[0], output[0], and_2_wire)
gate.ORGate2(and_1_wire, and_2_wire, overflow)
self.add_subtract = add_subtract
self.a_bus = a_bus
self.b_bus = b_bus
self.overflow = overflow
self.carry_out = carry_out
self.sum_bus = sum_bus
def __str__(self):
str_ = ""
str_ += "add_subtract: " + str(self.add_subtract.value) + "\n"
str_ += "a_bus: " + self.a_bus.__str__() + "\n"
str_ += "b_bus: " + self.b_bus.__str__() + "\n"
str_ += "overflow: " + str(self.overflow.value) + "\n"
str_ += "carry_out: " + str(self.carry_out.value) + "\n"
str_ += "sum_bus: " + self.sum_bus.__str__()
return str_
def __call__(
self, *,
add_subtract=None,
a_bus=None,
b_bus=None,
overflow=None,
carry_out=None,
sum_bus=None
):
if add_subtract is not None:
self.add_subtract.value = add_subtract
if a_bus is not None:
self.a_bus.wire_values = a_bus
if b_bus is not None:
self.b_bus.wire_values = b_bus
if overflow is not None:
self.overflow.value = overflow
if carry_out is not None:
self.carry_out.value = carry_out
if sum_bus is not None:
self.sum_bus.wire_values = sum_bus
| 32.914634 | 79 | 0.558133 | 13,227 | 0.980141 | 0 | 0 | 0 | 0 | 0 | 0 | 4,479 | 0.331901 |
143b9d9f954626237df01251d000fda5f839d52f | 680 | py | Python | utils/notification/notification_manager.py | sanjusci/google_sheet_reader | c2b11bdb1eb3e2799cdd22aa4ed67f9c86daea56 | [
"MIT"
] | null | null | null | utils/notification/notification_manager.py | sanjusci/google_sheet_reader | c2b11bdb1eb3e2799cdd22aa4ed67f9c86daea56 | [
"MIT"
] | 3 | 2021-02-08T20:27:15.000Z | 2021-06-01T23:18:51.000Z | utils/notification/notification_manager.py | sanjusci/google_sheet_reader | c2b11bdb1eb3e2799cdd22aa4ed67f9c86daea56 | [
"MIT"
] | null | null | null | # Create your service here.
__author__ = "Sanju Sci"
__email__ = "sanju.sci9@gmail.com"
__copyright__ = "Copyright 2019."
from utils.commons import safe_invoke
class NotificationManager(object):
def __init__(self, *args, **kwargs):
pass
def notify(self, *args, **kwargs):
pass
@staticmethod
def notify_sync(notif_mgr, *args, **kwargs):
safe_invoke(notif_mgr.notify, *args)
@classmethod
def EMAIL(cls, *args, **kwargs):
from utils.notification.email.email_manager import EmailManager
return EmailManager(*args, **kwargs)
class Parameters(object):
pass
__all__ = ["NotificationManager", "Parameters"]
| 20.606061 | 71 | 0.680882 | 462 | 0.679412 | 0 | 0 | 273 | 0.401471 | 0 | 0 | 110 | 0.161765 |
143dbd8a1c5acee5a901ba1feaee07be0801dc4b | 5,901 | py | Python | tests/unit/test_config.py | zaro0508/lambda-budgets | 8eef392edadeae992f91aca0cda8bfb8ecef777e | [
"Apache-2.0"
] | null | null | null | tests/unit/test_config.py | zaro0508/lambda-budgets | 8eef392edadeae992f91aca0cda8bfb8ecef777e | [
"Apache-2.0"
] | 4 | 2020-07-28T20:44:35.000Z | 2021-08-17T01:22:34.000Z | tests/unit/test_config.py | zaro0508/lambda-budgets | 8eef392edadeae992f91aca0cda8bfb8ecef777e | [
"Apache-2.0"
] | 2 | 2020-07-20T18:05:03.000Z | 2020-07-31T20:57:59.000Z | from pathlib import Path
import json
import unittest
from unittest.mock import MagicMock, patch
from budget.config import Config
import yaml
class TestConfig(unittest.TestCase):
def test_init(self):
account_id = '012345678901'
topic_arn = 'arn:aws:sns:us-east-1:123456789012:mystack-mytopic-NZJ5JSMVGFIE'
end_user_role_name = 'SomeRoleName'
parentdir = Path(__file__).parent
budget_rules = (
'teams:\n'
' \'3412821\':\n'
' amount: \'10\'\n'
' period: ANNUALLY\n'
' unit: USD\n'
' community_manager_emails:\n'
' - someone@example.org'
)
thresholds = (
'notify_user_only: [25.0, 50.0, 80.0]\n'
'notify_admins_too: [90.0, 100.0, 110.0]'
)
with patch.dict('os.environ', {
'AWS_ACCOUNT_ID': account_id,
'NOTIFICATION_TOPIC_ARN': topic_arn,
'BUDGET_RULES': budget_rules,
'THRESHOLDS': thresholds,
'END_USER_ROLE_NAME': end_user_role_name
}):
config = Config()
self.assertEqual(config.account_id, account_id)
self.assertEqual(config.notification_topic_arn, topic_arn)
self.assertEqual(config.end_user_role_name, end_user_role_name)
expected_budget_rules = yaml.safe_load(budget_rules)
expected_thresholds = yaml.safe_load(thresholds)
self.assertDictEqual(config.budget_rules, expected_budget_rules)
self.assertDictEqual(config.thresholds, expected_thresholds)
def test_get_env_var_present(self):
env_var_value = 'some_value'
env_var_key = 'SOME_ENV_VAR'
with patch('os.getenv', MagicMock(return_value=env_var_value)) as mock:
result = Config._get_env_var(env_var_key)
expected = env_var_value
self.assertEqual(result, expected)
mock.assert_called_once_with(env_var_key)
def test_get_env_var_missing(self):
env_var_key = 'SOME_ENV_VAR'
with self.assertRaises(ValueError) as context_manager:
Config._get_env_var(env_var_key)
expected = (
'Lambda configuration error: '
f'missing environment variable {env_var_key}'
)
self.assertEqual(str(context_manager.exception), expected)
def test_load_yaml_happy(self):
yaml_input = 'foo:\n - bar'
result = Config._load_yaml(yaml_input)
expected = {'foo': ['bar']}
self.assertDictEqual(result, expected)
def test_load_yaml_invalid(self):
yaml_input = 'foo:\n - \'bar"'
config_name = 'test'
with self.assertRaises(Exception) as context_manager:
result = Config._load_yaml(yaml_input, config_name=config_name)
expected = f'There was an error when attempting to load {config_name}'
self.assertTrue(str(context_manager.exception).startswith(expected))
@patch.object(Config, "__init__", lambda x: None)
def test_budget_rules_setter_happy(self):
# happy path -- the setter will throw an error if the rules don't validate
budget_rules = {
'teams': {
'3412821': {
'amount': '10',
'period': 'ANNUALLY',
'unit': 'USD',
'community_manager_emails': [ 'someone@example.org']
}
}
}
Config().budget_rules = budget_rules
@patch.object(Config, "__init__", lambda x: None)
def test_budget_rules_setter_empty(self):
# empty test
budget_rules = {}
with self.assertRaises(Exception) as context_manager:
Config().budget_rules = budget_rules
expected = (
f'There was a configuration validation error: '
"{'teams': ['required field']}. "
f'Configuration submitted: {budget_rules}'
)
self.assertEqual(str(context_manager.exception), expected)
@patch.object(Config, "__init__", lambda x: None)
def test_budget_rules_setter_empty_team(self):
budget_rules = {
'teams': {
'3412821': {}
}
}
with self.assertRaises(Exception) as context_manager:
Config().budget_rules = budget_rules
expected = (
"[{'amount': ['required field'], "
"'community_manager_emails': ['required field'], "
"'period': ['required field'], "
"'unit': ['required field']}]"
)
print(str(context_manager.exception))
self.assertTrue(expected in str(context_manager.exception))
@patch.object(Config, "__init__", lambda x: None)
def test_budget_rules_setter_missing_manager(self):
budget_rules = {
'teams': {
'3412821': {
'amount': '10',
'period': 'ANNUALLY',
'unit': 'USD',
'community_manager_emails': []
}
}
}
with self.assertRaises(Exception) as context_manager:
Config().budget_rules = budget_rules
expected = "{'community_manager_emails': ['min length is 1']}"
print(str(context_manager.exception))
self.assertTrue(expected in str(context_manager.exception))
@patch.object(Config, "__init__", lambda x: None)
def test_thresholds_setter_happy(self):
thresholds = {
'notify_user_only': [50.0],
'notify_admins_too': [100.0]
}
Config().thresholds = thresholds
@patch.object(Config, "__init__", lambda x: None)
def test_thresholds_setter_all_empty(self):
thresholds = {}
with self.assertRaises(Exception) as context_manager:
Config().thresholds = thresholds
expected = (
"{'notify_admins_too': ['required field'], 'notify_user_only': "
"['required field']}"
)
print(str(context_manager.exception))
self.assertTrue(expected in str(context_manager.exception))
@patch.object(Config, "__init__", lambda x: None)
def test_thresholds_setter_empty_list(self):
thresholds = {
'notify_user_only': [],
'notify_admins_too': [100.0]
}
with self.assertRaises(Exception) as context_manager:
Config().thresholds = thresholds
expected = "{'notify_user_only': ['min length is 1']}"
print(str(context_manager.exception))
self.assertTrue(expected in str(context_manager.exception))
| 32.070652 | 81 | 0.671073 | 5,756 | 0.975428 | 0 | 0 | 3,174 | 0.537875 | 0 | 0 | 1,539 | 0.260803 |
143fbec953156df9d4526de475d51f26bf5b991c | 54 | py | Python | chap4/4-9.py | StewedChickenwithStats/Answers-to-Python-Crash-Course | 9ffbe02abba5d111f702d920db7932303daf59d4 | [
"MIT"
] | 1 | 2022-02-21T07:05:48.000Z | 2022-02-21T07:05:48.000Z | chap4/4-9.py | StewedChickenwithStats/Answers-to-Python-Crash-Course | 9ffbe02abba5d111f702d920db7932303daf59d4 | [
"MIT"
] | null | null | null | chap4/4-9.py | StewedChickenwithStats/Answers-to-Python-Crash-Course | 9ffbe02abba5d111f702d920db7932303daf59d4 | [
"MIT"
] | null | null | null | cubes=[value**3 for value in range(1,11)]
print(cubes) | 27 | 41 | 0.722222 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
14403d9dd0685c366e48890d567b4976cccff5bc | 946 | py | Python | test/test_composer.py | nccr-itmo/FEDOT.Web | 9b6f7b66de277ea34d6d5ed621b99a3f938db61b | [
"BSD-3-Clause"
] | 23 | 2020-12-24T11:05:01.000Z | 2022-03-31T20:29:12.000Z | test/test_composer.py | nccr-itmo/FEDOT.Web | 9b6f7b66de277ea34d6d5ed621b99a3f938db61b | [
"BSD-3-Clause"
] | 42 | 2021-01-11T09:38:31.000Z | 2022-03-25T17:19:05.000Z | test/test_composer.py | nccr-itmo/FEDOT.Web | 9b6f7b66de277ea34d6d5ed621b99a3f938db61b | [
"BSD-3-Clause"
] | 5 | 2021-03-31T04:38:31.000Z | 2022-03-31T20:29:26.000Z | import random
import numpy as np
random.seed(1)
np.random.seed(1)
def test_composer_endpoint(client):
case_id = 'scoring'
history_json = client.get(f'api/composer/{case_id}').json
nodes = history_json['nodes']
nodes_ids = [n['uid'] for n in nodes]
edges = history_json['edges']
targets = [e['target'] for e in history_json['edges']]
sources = [e['source'] for e in history_json['edges']]
assert len(nodes) > 0
assert len(edges) > 0
assert len(nodes_ids) == len(set(nodes_ids))
for node_id in nodes_ids:
assert node_id in targets or node_id in sources or 'pipeline_' in node_id
for edge in edges:
assert edge['source'] in nodes_ids
assert edge['target'] in nodes_ids
assert edge['source'] != edge['target']
reverse_edge = {
'source': edge['target'],
'target': edge['source']
}
assert reverse_edge not in edges
| 26.277778 | 81 | 0.625793 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 158 | 0.167019 |
144393dffd51c7f70ab1943e4270d90ddd43598c | 1,447 | py | Python | testClient.py | step305/PiCameraStream_Test | 569b5d63052bfb6d60eddc221692a2dcb78b87fa | [
"MIT"
] | 1 | 2020-12-18T09:59:47.000Z | 2020-12-18T09:59:47.000Z | testClient.py | step305/PiCameraStream_Test | 569b5d63052bfb6d60eddc221692a2dcb78b87fa | [
"MIT"
] | null | null | null | testClient.py | step305/PiCameraStream_Test | 569b5d63052bfb6d60eddc221692a2dcb78b87fa | [
"MIT"
] | null | null | null | import cv2
import urllib
import numpy as np
import multiprocessing as mp
stream = 'http://192.168.53.114:8000/streamLow.mjpg'
stream2 = 'http://192.168.53.114:8001/streamLow.mjpg'
def procImg(str, wind, stop):
bytes = ''
stream = urllib.urlopen(str)
while not stop.is_set():
try:
bytes += stream.read(4096)
a = bytes.find('\xff\xd8')
b = bytes.find('\xff\xd9')
if wind == 'Low':
c = bytes.find('\xff\xaa\xee')
if a != -1 and b != -1:
jpg = bytes[a:b+2]
if wind == 'Low':
if c != -1:
str = bytes[b+2:c]
print(str)
bytes = bytes[c+3:]
else:
bytes = bytes[b+2:]
else:
bytes = bytes[b+2:]
i = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.IMREAD_COLOR)
cv2.imshow(wind, i)
cv2.waitKey(1)
if cv2.waitKey(1) == ord('q'):
stop.set()
break
except:
pass
if __name__ == '__main__':
st = mp.Event()
lowProc = mp.Process(target = procImg, args=(stream, 'Low', st))
HighProc = mp.Process(target = procImg, args=(stream2, 'High', st))
lowProc.start()
HighProc.start()
lowProc.join()
HighProc.join()
exit(0)
| 30.787234 | 86 | 0.463718 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 156 | 0.107809 |
1444b1b92ae47cbc60d016724b5d44bf1122493f | 901 | py | Python | 2012/AU_Danny.py | Valchris/IEEEXtreme_WorkingAsIntended | c3e04633ce6d9c1a1582081767e8f2090adffa28 | [
"MIT"
] | null | null | null | 2012/AU_Danny.py | Valchris/IEEEXtreme_WorkingAsIntended | c3e04633ce6d9c1a1582081767e8f2090adffa28 | [
"MIT"
] | null | null | null | 2012/AU_Danny.py | Valchris/IEEEXtreme_WorkingAsIntended | c3e04633ce6d9c1a1582081767e8f2090adffa28 | [
"MIT"
] | null | null | null | import sys
import re
line = sys.stdin.readline().strip()
commands = line.split()
if len(commands) > 15: #Too Long
print 'ERROR'
exit(0)
for i in range(0,len(line)): #Missing Spaces
if i % 2 == 1 and line[i] != ' ':
print 'ERROR'
exit(0)
for i in range(0,len(commands) - 2,2): #Repeated Symbol
if commands[i] == commands[i + 2]:
print 'REJECT'
exit(0)
regex_test = re.search("[^RYGPCX ]", line) #Invalid Symbol
if regex_test is not None:
print 'ERROR'
exit(0)
flashing_test1 = re.search("[^R] [PC]", line)
flashing_test2 = re.search("[^ ][PC][^ ]", line)
flashing_test3 = re.search("[PC] [^R]", line)
if flashing_test1 is not None or flashing_test2 is not None or flashing_test3 is not None: #Flashing not surrounded
print 'REJECT'
exit(0)
if line[0] != 'R': #Doesn't start with R
print 'REJECT'
exit(0)
print 'ACCEPT' | 21.97561 | 115 | 0.619312 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 207 | 0.229745 |
144698a559c916923c258db6ccf04e14398014a5 | 1,293 | py | Python | pupil/profiler.py | nigamapurv/Minkowski_planview | 1c887bf0c54b9bdd632b9249cac71b275be8b2c2 | [
"MIT"
] | 1 | 2019-12-12T07:16:05.000Z | 2019-12-12T07:16:05.000Z | pupil/profiler.py | nigamapurv/Minkowski_planview | 1c887bf0c54b9bdd632b9249cac71b275be8b2c2 | [
"MIT"
] | null | null | null | pupil/profiler.py | nigamapurv/Minkowski_planview | 1c887bf0c54b9bdd632b9249cac71b275be8b2c2 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cProfile
import io
import pstats
cache = {}
PROFILE_SWITCH = True
class Profiler:
def __init__(self):
self.profiler = cProfile.Profile()
def profile(self, func, *args, **kwargs):
self.profiler.enable()
result = func(*args, **kwargs)
self.profiler.disable()
with io.StringIO() as string_stream:
profiler_stats = pstats.Stats(self.profiler, stream = string_stream).sort_stats("cumulative")
profiler_stats.print_stats()
print(string_stream.getvalue())
return result
class PassThroughProfiler:
def __init__(self):
pass
def profile(self, func, *args, **kwargs):
return func(*args, **kwargs)
def get_profiler():
if "profiler" not in cache:
if PROFILE_SWITCH:
cache["profiler"] = Profiler()
else:
cache["profiler"] = PassThroughProfiler()
return cache["profiler"]
def profile(func, *args, **kwargs):
profiler = get_profiler()
return profiler.profile(func, *args, **kwargs)
def profileable(func):
def _profile(*args, **kwargs):
return profile(func, *args, **kwargs)
return _profile | 23.509091 | 105 | 0.648879 | 639 | 0.4942 | 0 | 0 | 0 | 0 | 0 | 0 | 52 | 0.040217 |
1446b8c5fba47ab7e1297f04805cc27c8254a77e | 544 | py | Python | dogdog045.py | chikochiko76/anime2021 | 0a7e3dcf6b0ee47a824d4fae6f061154930ce587 | [
"CC0-1.0"
] | null | null | null | dogdog045.py | chikochiko76/anime2021 | 0a7e3dcf6b0ee47a824d4fae6f061154930ce587 | [
"CC0-1.0"
] | null | null | null | dogdog045.py | chikochiko76/anime2021 | 0a7e3dcf6b0ee47a824d4fae6f061154930ce587 | [
"CC0-1.0"
] | null | null | null | class AImage(AShape):
color: any
def __init__(self, width=100, height=None, cx=None, cy=None, image='pet_darui_dog.png'):
AShape.__init__(self, width, height, cx, cy)
if image.startswith('http')
self.pic = Image.open(io.BytesIO(requests.get(image).content))
else:
self.pic = Image.open(image)
def render(self, canvas: ACanvas, frame: int):
ox, oy, w, h = self.bounds()
pic = self.pic.resize((int(w), int(h)))
canvas.image.paste(pic, (int(ox), int(oy)), pic)
| 36.266667 | 92 | 0.595588 | 543 | 0.998162 | 0 | 0 | 0 | 0 | 0 | 0 | 25 | 0.045956 |
1446e34643a51f72b5218098028b06210bd3c768 | 1,898 | py | Python | tests/contrib/aiohttp/conftest.py | discord/dd-trace-py | 3f6bca078e751bf7459fd02b7aff7f96eff0eeb6 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | tests/contrib/aiohttp/conftest.py | discord/dd-trace-py | 3f6bca078e751bf7459fd02b7aff7f96eff0eeb6 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | tests/contrib/aiohttp/conftest.py | discord/dd-trace-py | 3f6bca078e751bf7459fd02b7aff7f96eff0eeb6 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | import aiohttp
import aiohttp_jinja2
import pytest
from ddtrace.contrib.aiohttp.middlewares import trace_app
from ddtrace.contrib.aiohttp_jinja2.patch import patch as patch_jinja2
from ddtrace.internal.utils import version
from ddtrace.pin import Pin
from .app.web import setup_app
if version.parse_version(aiohttp.__version__) < (3, 0, 0):
@pytest.fixture
def aiohttp_client(test_client):
return test_client
@pytest.fixture
def app_tracer(tracer, loop):
app = setup_app()
trace_app(app, tracer)
return app, tracer
@pytest.fixture
def patched_app_tracer(app_tracer):
patch_jinja2()
app, tracer = app_tracer
Pin.override(aiohttp_jinja2, tracer=tracer)
return app, tracer
# When Python 3.5 is dropped, rather do:
# yield app, tracer
# unpatch()
@pytest.fixture
def untraced_app_tracer(tracer, loop):
patch_jinja2()
app = setup_app()
Pin.override(aiohttp_jinja2, tracer=tracer)
return app, tracer
# When Python 3.5 is dropped, rather do:
# yield app, tracer
# unpatch()
else:
@pytest.fixture
async def app_tracer(tracer, loop):
app = setup_app()
trace_app(app, tracer)
return app, tracer
@pytest.fixture
async def patched_app_tracer(app_tracer):
patch_jinja2()
app, tracer = app_tracer
Pin.override(aiohttp_jinja2, tracer=tracer)
return app, tracer
# When Python 3.5 is dropped, rather do:
# yield app, tracer
# unpatch()
@pytest.fixture
async def untraced_app_tracer(tracer, loop):
patch_jinja2()
app = setup_app()
Pin.override(aiohttp_jinja2, tracer=tracer)
return app, tracer
# When Python 3.5 is dropped, rather do:
# yield app, tracer
# unpatch()
| 26 | 70 | 0.643309 | 0 | 0 | 0 | 0 | 1,503 | 0.791886 | 661 | 0.348261 | 280 | 0.147524 |
1446f15e2cee0b0f3b21fb8bde4f8852bd30e37b | 1,074 | py | Python | douban.movie_requests.py | willcod/requests_example | 572826c4d7081f21e99e4661b273e8ad0e67e046 | [
"MIT"
] | null | null | null | douban.movie_requests.py | willcod/requests_example | 572826c4d7081f21e99e4661b273e8ad0e67e046 | [
"MIT"
] | null | null | null | douban.movie_requests.py | willcod/requests_example | 572826c4d7081f21e99e4661b273e8ad0e67e046 | [
"MIT"
] | null | null | null | import requests, json
from bs4 import BeautifulSoup
hds=[{'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'},\
{'User-Agent':'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.12 Safari/535.11'},\
{'User-Agent': 'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Trident/6.0)'}]
url_hot='https://movie.douban.com/j/search_subjects?type=movie&tag=%E7%83%AD%E9%97%A8&page_limit=50&page_start=0'
url_highrate = 'https://movie.douban.com/j/search_subjects?type=movie&tag=%E8%B1%86%E7%93%A3%E9%AB%98%E5%88%86&page_limit=50&page_start=0'
response = requests.get(url_highrate, headers=hds[1])
# print(response.text)
data = json.loads(response.text);
items = data['subjects']
for item in items[:]:
print(item["title"] + ' ' + item['url'])
if False:
movie_resp = requests.get(item['url'], headers=hds[0])
#print(movie_resp.text);
soup = BeautifulSoup(movie_resp.text, 'html.parser')
div = soup.select('strong.ll.rating_num')[0]
print(div.text) | 51.142857 | 138 | 0.68622 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 633 | 0.589385 |
14471fb559051a475c6f693ab9f75659647a5b99 | 1,323 | py | Python | app/serialmonitor/forms.py | fretchen/ArduinoMagnetometerWeb | 1d8bee848e488b7136dbae20172bd8785931dee0 | [
"MIT"
] | null | null | null | app/serialmonitor/forms.py | fretchen/ArduinoMagnetometerWeb | 1d8bee848e488b7136dbae20172bd8785931dee0 | [
"MIT"
] | null | null | null | app/serialmonitor/forms.py | fretchen/ArduinoMagnetometerWeb | 1d8bee848e488b7136dbae20172bd8785931dee0 | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, HiddenField,IntegerField
from wtforms.validators import DataRequired, NumberRange
class ConnectForm(FlaskForm):
'''
The form for connecting to the Arduino
'''
id = HiddenField('A hidden field');
serial_port = StringField('Connect on port:', validators=[DataRequired()], description = 'Serial port')
name = StringField('Name of the Arduino:', description = 'Name', default = 'Arduino')
submit = SubmitField('Connect')
class UpdateForm(FlaskForm):
'''
The form for connecting to the Arduino
'''
id = HiddenField('A hidden field');
serial_port = StringField('Update to port:', validators=[DataRequired()])
baud_rate = IntegerField('Baudrate:', validators=[ NumberRange(4800,1000000)])
submit = SubmitField('Update connection')
class SerialWaitForm(FlaskForm):
'''
The form for connecting to the Arduino
'''
id = HiddenField('A hidden field');
serial_time = IntegerField('Time between measurements (s):', [DataRequired(), NumberRange(2,300)])
submit = SubmitField('Update waiting time.')
class DisconnectForm(FlaskForm):
'''
The form for disconnecting from the Arduino
'''
id = HiddenField('A hidden field');
submit = SubmitField('Disconnect')
| 34.815789 | 107 | 0.699169 | 1,154 | 0.87226 | 0 | 0 | 0 | 0 | 0 | 0 | 475 | 0.359033 |
14479cf21718a5d4475f47cd6e178f58e9dd330a | 421 | py | Python | degvabank/degvabank/apps/user/signals.py | Vixx-X/DEGVABanck-backend | de413d55b55dba25e89b7f3bc60dfa94e89ddcde | [
"MIT"
] | null | null | null | degvabank/degvabank/apps/user/signals.py | Vixx-X/DEGVABanck-backend | de413d55b55dba25e89b7f3bc60dfa94e89ddcde | [
"MIT"
] | null | null | null | degvabank/degvabank/apps/user/signals.py | Vixx-X/DEGVABanck-backend | de413d55b55dba25e89b7f3bc60dfa94e89ddcde | [
"MIT"
] | 1 | 2022-02-03T03:18:43.000Z | 2022-02-03T03:18:43.000Z | from django.db.models.signals import post_save
from django.dispatch import receiver
from .models import User, EmailDevice
@receiver(post_save, sender=User)
def creating_user_settings(sender, instance, created, raw, **kwargs):
"""Creating the user device for a new User"""
if created and not raw:
EmailDevice.objects.create(user=instance, name=f"personal device for user {instance.pk}", confirmed=True)
| 35.083333 | 113 | 0.760095 | 0 | 0 | 0 | 0 | 296 | 0.703088 | 0 | 0 | 86 | 0.204276 |
144ab5fd23d14f2a4b0d04f079ebb18f914db56c | 2,969 | py | Python | casperlabs_client/commands/transfer_cmd.py | CasperLabs/client-py | 12955d2b88bc439f94a1cc33a063fda0c20ef8ab | [
"Apache-2.0"
] | 2 | 2021-05-12T06:43:45.000Z | 2021-10-02T11:45:41.000Z | casperlabs_client/commands/transfer_cmd.py | CasperLabs/client-py | 12955d2b88bc439f94a1cc33a063fda0c20ef8ab | [
"Apache-2.0"
] | 24 | 2020-06-30T14:55:20.000Z | 2021-01-05T18:18:29.000Z | casperlabs_client/commands/transfer_cmd.py | CasperLabs/client-py | 12955d2b88bc439f94a1cc33a063fda0c20ef8ab | [
"Apache-2.0"
] | 1 | 2020-06-22T15:32:38.000Z | 2020-06-22T15:32:38.000Z | # -*- coding: utf-8 -*-
from casperlabs_client import CasperLabsClient, consts, reformat
from casperlabs_client.commands.common_options import (
FROM_OPTION,
CHAINNAME_OPTION,
DEPENDENCIES_OPTION,
TTL_MILLIS_OPTION,
private_key_option,
WAIT_PROCESSED_OPTION,
TIMEOUT_SECONDS_OPTION,
ALGORITHM_OPTION,
PAYMENT_OPTIONS,
)
from casperlabs_client.decorators import guarded_command
NAME: str = "transfer"
HELP: str = "Transfers funds between accounts"
OPTIONS = [
[
("-a", "--amount"),
dict(
required=True,
default=None,
type=int,
help="Amount of motes to transfer. Note: a mote is the smallest, indivisible unit of a token.",
),
],
[
("-t", "--target-account"),
dict(
required=False,
type=str,
help="base64 or base16 representation of target account's public key",
),
],
[
("--target-purse",),
dict(
required=False,
type=str,
help="base64 or base16 representation of target purse URef",
),
],
[
("--source-purse",),
dict(
required=False,
type=str,
help="base64 or base16 representation of source purse URef",
),
],
FROM_OPTION,
CHAINNAME_OPTION,
DEPENDENCIES_OPTION,
TTL_MILLIS_OPTION,
WAIT_PROCESSED_OPTION,
TIMEOUT_SECONDS_OPTION,
ALGORITHM_OPTION,
private_key_option(required=True),
] + PAYMENT_OPTIONS
@guarded_command
def method(casperlabs_client: CasperLabsClient, args: dict):
deploy_hash = casperlabs_client.transfer(
amount=args.get("amount"),
target_account=args.get("target_account"),
target_purse=args.get("target_purse"),
source_purse=args.get("source_purse"),
from_addr=args.get("from"),
private_key=args.get("private_key"),
ttl_millis=args.get("ttl_millis"),
dependencies=args.get("dependencies"),
chain_name=args.get("chain_name"),
algorithm=args.get("algorithm"),
payment=args.get("payment"),
payment_amount=args.get("payment_amount"),
payment_args=args.get("payment_args"),
payment_hash=args.get("payment_hash"),
payment_name=args.get("payment_name"),
payment_entry_point=args.get("payment_entry_point"),
payment_version=args.get("payment_version"),
payment_package_hash=args.get("payment_package_hash"),
payment_package_name=args.get("payment_package_name"),
)
print(f"Success! Deploy {deploy_hash} deployed")
if args.get("wait_for_processed", False):
deploy_info = casperlabs_client.show_deploy(
deploy_hash,
full_view=False,
wait_for_processed=True,
timeout_seconds=args.get("timeout_seconds", consts.STATUS_TIMEOUT),
)
print(reformat.hexify(deploy_info))
| 30.927083 | 107 | 0.629168 | 0 | 0 | 0 | 0 | 1,410 | 0.474907 | 0 | 0 | 743 | 0.250253 |
144aea4f0502bed2772fcf751e46d1dcc9492dd4 | 203 | py | Python | Chapter 03/Chap03_Example3.10.py | bpbpublications/Programming-Techniques-using-Python | 49b785f37e95a3aad1d36cef51e219ac56e5e9f0 | [
"MIT"
] | null | null | null | Chapter 03/Chap03_Example3.10.py | bpbpublications/Programming-Techniques-using-Python | 49b785f37e95a3aad1d36cef51e219ac56e5e9f0 | [
"MIT"
] | null | null | null | Chapter 03/Chap03_Example3.10.py | bpbpublications/Programming-Techniques-using-Python | 49b785f37e95a3aad1d36cef51e219ac56e5e9f0 | [
"MIT"
] | null | null | null | for num in range(1,6):
#code inside for loop
if num == 4:
continue
#code inside for loop
print(num)
#code outside for loop
print("continue statement executed on num = 4")
| 22.555556 | 48 | 0.615764 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 107 | 0.527094 |
144d32396683ecc3b3bc88e006d8084a2755425a | 50,657 | py | Python | src/inv/views.py | Nikhilgupta18/practice-react_django | 4226345a10c528308d13629907952e841621badc | [
"MIT"
] | null | null | null | src/inv/views.py | Nikhilgupta18/practice-react_django | 4226345a10c528308d13629907952e841621badc | [
"MIT"
] | 11 | 2020-09-07T15:48:40.000Z | 2022-03-08T23:06:16.000Z | src/inv/views.py | Nikhilgupta18/practice-react_django | 4226345a10c528308d13629907952e841621badc | [
"MIT"
] | null | null | null | from django.views import View
from django.shortcuts import render
from services.models import Service, ServiceUser, MaterialUser
from account.models import ContactUs
from inv.settings import enable_otp
from account.tasks import send_email
from django.shortcuts import render, redirect
from django.contrib import messages
from university.models import University
from django.db.models import Q
from django.core import serializers
from django.http import JsonResponse, HttpResponse
from rest_framework.views import APIView
from account.models import Country
from django.core.paginator import Paginator
from account.models import Decisions, LatestWhatsappGroup
from account.models import Major
from django.contrib.auth import get_user_model
from account.models import Student, GradProfile
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.mail import EmailMultiAlternatives, send_mail
from university.models import EngineeringGrad, BusinessGrad
import razorpay
from services.models import PaidMaterial, Payment, Statement, PrincetonAccounts, KaplanAccounts, PrincetonGMATAccounts
import os
import pickle
from django.core.files.images import get_image_dimensions
from django.conf import settings
from django.urls import reverse
from django.shortcuts import render, get_object_or_404
from paypal.standard.forms import PayPalPaymentsForm
User = get_user_model()
countries = ['Canada', 'United Kingdom', 'Germany', 'Australia', 'United States']
class Index(View):
def get(self, request, *args, **kwargs):
services = Service.objects.all()
context = dict()
context['services'] = services
univ = University.objects.filter(rank__lte=50)[:12]
import random
valid_profiles_id_list = list(GradProfile.objects.filter(Q(user__student__complete_profile=True), ~Q(user__student__path='default_user.jpg')).values_list('id', flat=True))
random_profiles_id_list = random.sample(valid_profiles_id_list, min(len(valid_profiles_id_list), 12))
grad_profiles = GradProfile.objects.filter(id__in=random_profiles_id_list)
# countries = list(Country.objects.all().values_list('name', flat=True))
# countries = list(University.objects.all().values_list('country__name', flat=True).annotate(itemcount=Count('id')).order_by('-itemcount').distinct())#.order_by('country__name'))
context['univ'] = univ
context['countries'] = countries
context['grad_profiles'] = grad_profiles
return render(request, template_name="react.html", context=context)
class UniversitySearchPage(View):
def get(self, request, *args, **kwargs):
# countries = list(Country.objects.all().values_list('name', flat=True))
context = dict()
context['countries'] = countries
country_name = self.kwargs.get('country')
country_name = country_name.title()
if country_name not in countries:
return render(request, 'error.html')
country = Country.objects.filter(name__iexact=country_name).first()
universities = University.objects.filter(country=country).order_by('rank')
paginator = Paginator(universities, 24)
page_num = request.GET.get('page')
result = paginator.get_page(page_num)
try:
page = paginator.page(page_num)
except:
page = paginator.page(1)
count = paginator.num_pages
context['universities'] = universities
context['country'] = country_name
context['page'] = page
context['page_count'] = count
context['result'] = result
return render(request, template_name="university/university_search.html", context=context)
def post(self, request, *args, **kwargs):
# countries = list(Country.objects.all().values_list('name', flat=True))
context = dict()
context['countries'] = countries
country_name = self.kwargs.get('country')
country_name = country_name.title()
if country_name not in countries:
return render(request, 'error.html')
country = Country.objects.filter(name__iexact=country_name).first()
universities = University.objects.filter(country=country).order_by('rank')
paginator = Paginator(universities, 24)
page_num = request.GET.get('page')
result = paginator.get_page(page_num)
try:
page = paginator.page(page_num)
except:
page = paginator.page(1)
count = paginator.num_pages
context['universities'] = universities
context['page'] = page
context['page_count'] = count
context['result'] = result
context['universities'] = universities
context['country'] = country_name
return render(request, template_name="university/university_search.html", context=context)
class SearchUni(APIView):
def post(self, request, *args, **kwargs):
val = request.POST.get('val')
if len(val) < 2:
unis = []
slugs = []
limit = 0
else:
unis = list(University.objects.filter(Q(name__search=val) | Q(name__istartswith=val) | Q(name__icontains=val), Q(country__name__in=countries)).order_by('id').values_list('name', flat=True))[:10]
slugs = list(University.objects.filter(Q(name__search=val) | Q(name__istartswith=val) | Q(name__icontains=val), Q(country__name__in=countries)).order_by('id').values_list('slug', flat=True))[:10]
limit = len(unis)
return JsonResponse({"unis": unis, 'limit': limit, 'slugs': slugs})
#
# class FetchMoreUnis(APIView):
# def post(self, request, *args, **kwargs):
# country_name = request.POST.get('country')
# start = int(request.POST.get('start'))
# end = start + 24
# country = Country.objects.filter(name__iexact=country_name).first()
# if not Country.objects.filter(name__iexact=country_name).exists():
# return JsonResponse({'unis': []})
#
# unis = University.objects.filter(country=country).order_by('rank')[start:end]
# print(unis)
# response = serializers.serialize("json", unis)
# return HttpResponse(response, content_type='application/json')
class PrivacyPolicy(View):
def get(self, request, *args, **kwargs):
return render(request, template_name="privacy-policy.html", context={})
class TermsOfUse(View):
def get(self, request, *args, **kwargs):
return render(request, template_name="terms-of-use.html", context={})
class TestingPage(View):
def get(self, request, *args, **kwargs):
context = dict()
context['universities'] = list(University.objects.all())#.order_by('id').values_list('name', flat=True))
return render(request, template_name="testing-page.html", context=context)
class ContactUsView(View):
def post(self, request, *args, **kwargs):
name = request.POST.get('name')
email = request.POST.get('email')
subject1 = request.POST.get('subject')
message = request.POST.get('message')
subject = "Someone is contacting us on YMGrad.com"
msg = "Sender's Name: " + name + "<br>Sender's Contact: " + email + "<br>Subject: " + subject1 + "<br>Message: " + message
if request.user.is_authenticated:
ContactUs.objects.create(user=request.user, name=name, email=email,
subject=subject1, message=message)
else:
ContactUs.objects.create(name=name, email=email,
subject=subject1, message=message)
if enable_otp:
send_email.delay(receiver=["mittrayash@gmail.com"], email_message=msg, subject=subject)
else:
print("Not sending email because in development mode.")
messages.success(request, "Your message has reached us. We will get back to you soon.")
return redirect("/")
class SearchUniversity(View):
def get(self, request, *args, **kwargs):
val = request.GET.get('q').strip()
if University.objects.filter(name__iexact=val).exists():
uni = University.objects.filter(name__iexact=val).first()
return redirect('/university/' + uni.slug)
unis = University.objects.filter(Q(name__search=val) | Q(name__istartswith=val) | Q(name__icontains=val), Q(country__name__in=countries)).order_by('id')[:56]
print(unis)
context = dict()
context['result'] = unis
context['q'] = val
context['countries'] = countries
return render(request, 'university/university_search.html', context=context)
def handler404(request, exception=None):
return render(request, 'error.html', status=404)
def handler500(request):
return render(request, 'error.html', status=500)
class AdmitsRejects(View):
def get(self, request, *args, **kwargs):
# decisions = request.GET.get('university')
uni_name = request.GET.get('target_university')
uni = University.objects.filter(name=uni_name).first()
major = request.GET.get('major')
major = Major.objects.filter(name=major).first()
decision_type = request.GET.get('decision_type')
# decision_type = Decisions.objects.filter(name=decision_type).first()
if uni and major and decision_type:
decisions = Decisions.objects.filter(university=uni, major=major, decision_type=decision_type).order_by('-id')
elif uni and major:
decisions = Decisions.objects.filter(university=uni, major=major).order_by('-id')
elif uni and decision_type:
decisions = Decisions.objects.filter(university=uni, decision_type=decision_type).order_by('-id')
elif major and decision_type:
decisions = Decisions.objects.filter(major=major, decision_type=decision_type).order_by('-id')
elif uni:
decisions = Decisions.objects.filter(university=uni).order_by('-id')
elif major:
decisions = Decisions.objects.filter(major=major).order_by('-id')
elif decision_type:
decisions = Decisions.objects.filter(decision_type=decision_type).order_by('-id')
else:
decisions = Decisions.objects.all().order_by('-id')
# decisions = Decisions.objects.all().order_by('-id')
context = dict()
context['decisions'] = decisions
context['universities'] = list(University.objects.all().values_list('name', flat=True).order_by('name'))
context['majors'] = list(Major.objects.all().values_list('name', flat=True).order_by('name'))
paginator = Paginator(decisions, 30)
page_num = request.GET.get('page')
result = paginator.get_page(page_num)
try:
page = paginator.page(page_num)
except:
page = paginator.page(1)
count = paginator.num_pages
context['page'] = page
context['page_count'] = count
context['decisions'] = result
context['result'] = result
context['target_university']= uni_name
context['major'] = major
context['decision_type'] = decision_type
return render(request, 'admits_rejects.html', context=context)
def post(self, request, *args, **kwargs):
uni_name = request.GET.get('target_university')
major = request.GET.get('major')
decision_type = request.GET.get('decision_type')
uni = University.objects.filter(name=uni_name).first()
major = Major.objects.filter(name=major).first()
if uni and major and decision_type:
decisions = Decisions.objects.filter(university=uni, major=major, decision_type=decision_type).order_by('-id')
elif uni and major:
decisions = Decisions.objects.filter(university=uni, major=major).order_by('-id')
elif uni and decision_type:
decisions = Decisions.objects.filter(university=uni, decision_type=decision_type).order_by('-id')
elif major and decision_type:
decisions = Decisions.objects.filter(major=major, decision_type=decision_type).order_by('-id')
elif uni:
decisions = Decisions.objects.filter(university=uni).order_by('-id')
elif major:
decisions = Decisions.objects.filter(major=major).order_by('-id')
elif decision_type:
decisions = Decisions.objects.filter(decision_type=decision_type).order_by('-id')
else:
decisions = Decisions.objects.all().order_by('-id')
paginator = Paginator(decisions, 30)
page_num = request.GET.get('page')
result = paginator.get_page(page_num)
try:
page = paginator.page(page_num)
except:
page = paginator.page(1)
count = paginator.num_pages
context = dict()
# context['decisions'] = decisions
context['universities'] = list(University.objects.all().values_list('name', flat=True).order_by('name'))
context['majors'] = list(Major.objects.all().values_list('name', flat=True).order_by('name'))
context['page'] = page
context['page_count'] = count
context['decisions'] = result
context['result'] = result
return render(request, 'admits_rejects.html', context=context)
class GetSubscribers(APIView):
def post(self, request, *args, **kwargs):
file = open('subscribers.csv', 'w')
# users = User.objects.exclude(id__in=ServiceUser.objects.all().values_list('user_id', flat=True))
users = User.objects.all()
for user in users:
has_purchased_sop = 0
if ServiceUser.objects.filter(user=user, service_name='SOP Drafting').exists():
has_purchased_sop = 1
string = user.first_name + "," + user.last_name + "," + user.username + "," + user.email + "," + str(has_purchased_sop) + "\n"
file.write(string)
file.close()
file = open('subscribers.csv', 'rb')
msgtoUser = EmailMultiAlternatives(subject="Latest Subscribers CSV", body="PFA", from_email='yashmittra@gmail.com', to=['mittrayash@gmail.com'])
msgtoUser.attach_alternative('SUBSCRIBERS', "text/html")
msgtoUser.attach("subscribers.csv", file.read(), 'text/csv')
msgtoUser.send()
return JsonResponse({"success": True})
class WhatsappGroup(View):
def get(self, request, *args, **kwargs):
link = LatestWhatsappGroup.objects.first().link
return redirect(link)
class AllLinks(View):
def get(self, request, *args, **kwargs):
link = LatestWhatsappGroup.objects.first().link
context = dict()
context['whatsapp_group_link'] = link
return render(request, template_name='all_links.html', context=context)
class ViewPremiumMembers(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
if not request.user.is_superuser:
return render(request, 'error.html', context={})
pending_users = ServiceUser.objects.filter(pending=True).order_by('-timestamp')
completed_users = ServiceUser.objects.filter(pending=False).order_by('-timestamp')
context = dict()
context['pending_users'] = pending_users
context['completed_users'] = completed_users
return render(request, template_name='view_premium_members.html', context=context)
class ServiceUserDone(APIView):
def get(self, request, *args, **kwargs):
# if not request.user.is_superuser:
# return render(request, 'error.html', context={})
service_user_id = self.kwargs.get('service_user_id')
service_user = ServiceUser.objects.filter(id=service_user_id).first()
service_user.pending = False
service_user.save()
pending_users = ServiceUser.objects.filter(pending=True).order_by('-timestamp')
completed_users = ServiceUser.objects.filter(pending=False).order_by('-timestamp')
context = dict()
context['pending_users'] = pending_users
context['completed_users'] = completed_users
return render(request, template_name='view_premium_members.html', context=context)
class FilterUnis(View):
def get(self, request, *args, **kwargs):
from inv.views import countries
from django.core.paginator import Paginator
percentage = request.POST.get('percentage')
gre_quant = request.POST.get('gre_quant')
gre_verbal = request.POST.get('gre_verbal')
gmat = request.POST.get('gmat')
test = request.POST.get('test')
lang = request.POST.get('lang')
ielts = request.POST.get('ielts_score')
toefl = request.POST.get('toefl_score')
context = dict()
context['test'] = test
context['percentage'] = percentage
context['lang'] = lang
context['gre_quant'] = gre_quant
context['gre_verbal'] = gre_verbal
context['gmat'] = gmat
context['ielts_score'] = ielts
context['toefl_score'] = toefl
return render(request, template_name='filter_unis.html', context=context)
def post(self, request, *args, **kwargs):
from django.core.paginator import Paginator
percentage = float(request.POST.get('percentage'))
gre_quant = request.POST.get('gre_quant')
gre_verbal = request.POST.get('gre_verbal')
gmat = request.POST.get('gmat')
test = request.POST.get('test')
lang = request.POST.get('lang')
ielts = request.POST.get('ielts_score')
slider1 = int(request.POST.get('slider1'))
slider2 = int(request.POST.get('slider2'))
toefl = request.POST.get('toefl_score')
gpa = min(((percentage**2)/1104) - (0.05924 * percentage) + 2.491, 4)
print(gpa)
if gpa > 4:
gpa = 4
if percentage < 50:
messages.error(request, "Your percentage is too low! Unfortunately, we couldn't find any universities for you.")
return render(request, template_name='filter_unis.html', context=dict())
context = dict()
if test == 'gre':
gre_quant = int(gre_quant)
gre_verbal = int(gre_verbal)
if gre_quant > 157:
quant_upper_limit = gre_quant + 6
else:
quant_upper_limit = gre_quant + 10
if gre_quant > 157:
quant_lower_limit = gre_quant - 5
else:
quant_lower_limit = gre_quant - 10
if gre_quant > 167:
quant_lower_limit = quant_lower_limit - 7
if gre_verbal > 154:
verbal_upper_limit = gre_verbal + 6
else:
verbal_upper_limit = gre_verbal + 10
if gre_verbal > 154:
verbal_lower_limit = gre_verbal - 12
else:
verbal_lower_limit = gre_verbal - 15
if gre_verbal > 167:
verbal_lower_limit = verbal_lower_limit - 7
universities = EngineeringGrad.objects.filter(gre__quant__lte=quant_upper_limit, gre__quant__gte=quant_lower_limit) #.exclude(gre__quant__isnull=True)
universities = universities.filter(gre__verbal__lte=verbal_upper_limit, gre__verbal__gte=verbal_lower_limit)
elif test == 'gmat':
gmat = int(gmat)
if gmat > 700:
gmat_upper_limit = gmat + 30
else:
gmat_upper_limit = gmat + 50
if gmat > 700:
gmat_lower_limit = gmat - 120
else:
gmat_lower_limit = gmat - 100
universities = BusinessGrad.objects.filter(gmat__lte=gmat_upper_limit, gmat__gte=gmat_lower_limit)
if gpa > 3.9:
gpa = gpa - 0.2
if gpa > 3.5:
gpa_upper_limit = gpa + 0.2
else:
gpa_upper_limit = gpa + 0.45
if gpa > 3.5:
gpa_lower_limit = gpa - 0.3
else:
gpa_lower_limit = gpa - 0.4
universities = universities.filter(gpa__lte=gpa_upper_limit, gpa__gte=gpa_lower_limit)
if lang == 'toefl':
toefl = int(toefl)
if toefl > 110:
toefl_upper_limit = toefl + 20
toefl_lower_limit = toefl - 20
else:
toefl_upper_limit = toefl + 10
toefl_lower_limit = toefl - 10
universities = universities.filter(mean_toefl_score__lte=toefl_upper_limit, mean_toefl_score__gte=toefl_lower_limit)
else:
ielts = float(ielts)
universities = universities.filter(min_ielts_score__lte=ielts)
from django.db.models import F, Sum, FloatField
universities = universities.annotate(total_exp=Sum(F('tuition') + F('living_expenses'), output_field=FloatField())).filter(total_exp__lte=slider2, total_exp__gte=slider1)
universities = universities.order_by('university__rank')
context['universities'] = universities
context['result'] = universities
context['test'] = test
context['percentage'] = percentage
context['lang'] = lang
context['gre_quant'] = gre_quant
context['gre_verbal'] = gre_verbal
context['gmat'] = gmat
context['ielts_score'] = ielts
context['toefl_score'] = toefl
return render(request, template_name='filter_unis.html', context=context)
class PaidMaterialView(View):
def get(self, request):
user = request.user
if not user.is_anonymous:
student = user.student
student.study_material_page_opens = student.study_material_page_opens + 1
student.save()
if student.study_material_page_opens >= student.study_material_email_threshold:
student.study_material_page_opens = 0
student.study_material_email_threshold = student.study_material_email_threshold + 2
student.save()
msg = "Hi " + user.first_name + ",<br>Notification: <b>The prices for the <a href='https://www.ymgrad.com/study_material/'>study material</a> are bound to increase by tomorrow</b>." \
"<br><br>In order to get the material at the current price, please purchase the material today.<br><br>" \
"In case you face any issues purchasing the material and would like to use PayPal instead, feel free to reply to this email with the material name and we will arrange that for you." \
"<br>Please note that using PayPal will increase the charges by up to $2 depending on the material you wish to purchase.<br><br>We hope to serve you soon.<br><br>Best,<br>YMGrad"
sub = "Important: Prices about to increase for the study material."
send_email.delay(receiver=[user.email], email_message=msg, subject=sub)
material = PaidMaterial.objects.filter(is_available=True).order_by('name')
context = dict()
if enable_otp:
key_id = "rzp_live_6MxRlb7ZCO7XaB"
client = razorpay.Client(auth=(key_id, "fGjvMdo8cs7o48pXou5sa3Y5"))
else:
key_id = "rzp_test_QiGwvmuHqNFHk5"
client = razorpay.Client(auth=(key_id, "v4gHikpMnv2DVK0OK6CQ9Ttm"))
context['key_id'] = key_id
context['materials'] = material
return render(request, template_name="service/all-material.html", context=context)
def post(self, request, *args, **kwargs):
try:
user = request.user
material_name = request.POST.get('material_name')
material = PaidMaterial.objects.filter(name=material_name).first()
material_slug = material.slug
amount = request.POST.get('amount')
currency = request.POST.get('currency')
payment_id = request.POST.get('razorpay_payment_id')
price_inr = material.price_inr
price_usd = material.price_usd
if enable_otp:
key_id = "rzp_live_6MxRlb7ZCO7XaB"
client = razorpay.Client(auth=(key_id, "fGjvMdo8cs7o48pXou5sa3Y5"))
else:
key_id = "rzp_test_QiGwvmuHqNFHk5"
client = razorpay.Client(auth=(key_id, "v4gHikpMnv2DVK0OK6CQ9Ttm"))
payment_obj = client.payment.fetch(payment_id)
user_email = payment_obj['email']
paid = False
if currency == "inr":
if payment_obj['status'] == 'authorized' and int(amount) >= price_inr:
paid = True
client.payment.capture(payment_id, amount)
payment = Payment.objects.create(user=user, payment_id=payment_id, status='Paid', amount=amount, service_name=material_name, currency="INR")
student = user.student
student.payments.add(payment)
MaterialUser.objects.create(user=user,
material_name=material_name,
payment=payment,
)
# send_email.delay(receiver=[user.email, "mittrayash@gmail.com"], email_message=msg, subject=subject)
detail = user.first_name + " " + user.last_name + " bought " + material_name + "."
Statement.objects.create(type='Credit', detail=detail, amount=price_inr)
elif currency == "usd":
if payment_obj['status'] == 'authorized' and int(amount) >= price_usd:
paid = True
# client.payment.capture(payment_id, amount) # currency field is required.
payment = Payment.objects.create(user=user, payment_id=payment_id, status='Paid', amount=amount, service_name=material_name, currency="USD")
student = user.student
student.payments.add(payment)
MaterialUser.objects.create(user=user,
material_name=material_name,
payment=payment,
)
# send_email.delay(receiver=[user.email, "mittrayash@gmail.com"], email_message=msg, subject=subject)
detail = user.first_name + " " + user.last_name + " bought " + material_name + "."
Statement.objects.create(type='Credit', detail=detail, amount=price_inr)
if paid:
material.purchase_times = material.purchase_times + 1
material.save()
if material_slug == 'magoosh_gre':
email_domain = user_email.split('@')[1]
if email_domain == 'gmail.com':
messages.success(request,
"Thank You for your payment. Your order has been received and will be processed soon. Please check your email for further instructions.")
# share
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
from googleapiclient.discovery import build
service = build('drive', 'v3', credentials=creds)
file_id = '1vKTxnTEFRWiEbnmBuU_B2lZhbwcSW_hm'
def callback(request_id, response, exception):
if exception:
# Handle error
print(exception)
else:
print("Permission Id: %s" % response.get('id'))
# res = insert_permission(service, "1eXY0GOhZ6pN7C7_id57oxByHPUzKTLT2", user_email, 'user', 'reader')
# print(res)
batch = service.new_batch_http_request(callback=callback)
user_permission = {
'type': 'user',
'role': 'reader',
'emailAddress': user_email
}
batch.add(service.permissions().create(
fileId=file_id,
body=user_permission,
fields='id',
emailMessage="This is a huge package. You may see the folders are empty at first. Please give it an hour to sync on your Google Drive and then download the package."
))
batch.execute()
else:
messages.info(request,
"Thank You for your payment. However, we need a Gmail email ID to process your order. Please check your email.")
msg = "Hi " + user.first_name + ",<br>We have received your payment for <b>" + material_name + "</b>." \
"<br><br>However, we need a GMAIL email ID to give you access to the material.<br><br>" \
"<b>Please reply to this email with your GMAIL email ID and we will process your order " \
"within the next 8 hours.</b><br>In case you still fail to receive the material, feel " \
"free to contact us by replying to this email again.<br><br>Best,<br>YMGrad"
sub = "Additional Information Needed to give access to the material."
send_email.delay(receiver=[user_email, "mittrayash@gmail.com"], email_message=msg, subject=sub)
elif material_slug == 'ets_toefl_tests':
email_domain = user_email.split('@')[1]
if email_domain == 'gmail.com':
messages.success(request, "Thank You for your payment. Your order has been received and will be processed soon. Please check your email for further instructions.")
# share
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
from googleapiclient.discovery import build
service = build('drive', 'v3', credentials=creds)
file_id = '1eXY0GOhZ6pN7C7_id57oxByHPUzKTLT2'
def callback(request_id, response, exception):
if exception:
# Handle error
print(exception)
else:
print("Permission Id: %s" % response.get('id'))
# res = insert_permission(service, "1eXY0GOhZ6pN7C7_id57oxByHPUzKTLT2", user_email, 'user', 'reader')
# print(res)
batch = service.new_batch_http_request(callback=callback)
user_permission = {
'type': 'user',
'role': 'reader',
'emailAddress': user_email
}
batch.add(service.permissions().create(
fileId=file_id,
body=user_permission,
fields='id',
emailMessage="This is a huge package. You may see the folders are empty at first. Please give it an hour to sync on your Google Drive and then download the package."
))
batch.execute()
else:
messages.info(request, "Thank You for your payment. However, we need a Gmail email ID to process your order. Please check your email.")
msg = "Hi " + user.first_name + ",<br>We have received your payment for <b>" + material_name + "</b>." \
"<br><br>However, we need a GMAIL email ID to give you access to the material.<br><br>" \
"<b>Please reply to this email with your GMAIL email ID and we will process your order " \
"within the next 8 hours.</b><br>In case you still fail to receive the material, feel " \
"free to contact us by replying to this email again.<br><br>Best,<br>YMGrad"
sub = "Additional Information Needed to give access to the material."
send_email.delay(receiver=[user_email, "mittrayash@gmail.com"], email_message=msg, subject=sub)
elif material_slug == 'kaplan_practice_sets':
email_domain = user_email.split('@')[1]
if email_domain == 'gmail.com':
messages.success(request, "Thank You for your payment. Your order has been received and will be processed soon. Please check your email for further instructions.")
# share
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
from googleapiclient.discovery import build
service = build('drive', 'v3', credentials=creds)
file_id = '15EeDQ_bvxUWCdCBoV5FDjblPlH11U_B5'
def callback(request_id, response, exception):
if exception:
# Handle error
print(exception)
else:
print("Permission Id: %s" % response.get('id'))
# res = insert_permission(service, "1eXY0GOhZ6pN7C7_id57oxByHPUzKTLT2", user_email, 'user', 'reader')
# print(res)
batch = service.new_batch_http_request(callback=callback)
user_permission = {
'type': 'user',
'role': 'reader',
'emailAddress': user_email
}
batch.add(service.permissions().create(
fileId=file_id,
body=user_permission,
fields='id',
emailMessage="This is a huge package. You may see the folders are empty at first. Please give it an hour to sync on your Google Drive and then download the package."
))
batch.execute()
else:
messages.info(request, "Thank You for your payment. However, we need a Gmail email ID to process your order. Please check your email.")
msg = "Hi " + user.first_name + ",<br>We have received your payment for <b>" + material_name + "</b>." \
"<br><br>However, we need a GMAIL email ID to give you access to the material.<br><br>" \
"<b>Please reply to this email with your GMAIL email ID and we will process your order " \
"within the next 8 hours.</b><br>In case you still fail to receive the material, feel " \
"free to contact us by replying to this email again.<br><br>Best,<br>YMGrad"
sub = "Additional Information Needed to give access to the material."
send_email.delay(receiver=[user_email, "mittrayash@gmail.com"], email_message=msg, subject=sub)
elif material_slug == 'magoosh_ielts':
email_domain = user_email.split('@')[1]
if email_domain == 'gmail.com':
messages.success(request, "Thank You for your payment. Your order has been received and will be processed soon. Please check your email for further instructions.")
# share
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
from googleapiclient.discovery import build
service = build('drive', 'v3', credentials=creds)
file_id = '1QjwfL1EVMIM-p21IvtvWBYdoHUSCmBKc'
def callback(request_id, response, exception):
if exception:
# Handle error
print(exception)
else:
print("Permission Id: %s" % response.get('id'))
# res = insert_permission(service, "1eXY0GOhZ6pN7C7_id57oxByHPUzKTLT2", user_email, 'user', 'reader')
# print(res)
batch = service.new_batch_http_request(callback=callback)
user_permission = {
'type': 'user',
'role': 'reader',
'emailAddress': user_email
}
batch.add(service.permissions().create(
fileId=file_id,
body=user_permission,
fields='id',
emailMessage="This is a huge package. You may see the folders are empty at first. Please give it an hour to sync on your Google Drive and then download the package."
))
batch.execute()
else:
messages.info(request, "Thank You for your payment. However, we need a Gmail email ID to process your order. Please check your email.")
msg = "Hi " + user.first_name + ",<br>We have received your payment for <b>" + material_name + "</b>." \
"<br><br>However, we need a GMAIL email ID to give you access to the material.<br><br>" \
"<b>Please reply to this email with your GMAIL email ID and we will process your order " \
"within the next 8 hours.</b><br>In case you still fail to receive the material, feel " \
"free to contact us by replying to this email again.<br><br>Best,<br>YMGrad"
sub = "Additional Information Needed to give access to the material."
send_email.delay(receiver=[user_email, "mittrayash@gmail.com"], email_message=msg, subject=sub)
elif material_slug == 'kaplan_gre':
account = KaplanAccounts.objects.filter(sold=False).first()
sub = "Your Kaplan GRE Online Tests Account"
msg = "Hi " + user.first_name + ",<br>We have received your payment for <b>" + material_name + "</b>." \
"<br><br>Here are your account details. Please login <a href='https://www.kaptest.com/login'>here</a>.<br><br>" \
"<b>Email: </b> " + account.email_id + "<br>" \
"<b>Password: </b>" + account.password + "<br><br>" \
"Please note that the password is case-sensitive. In case you need help," \
" feel free to reply to this email.<br><br>Best,<br>YMGrad"
send_email.delay(receiver=[user_email], email_message=msg, subject=sub)
account.sold = True
account.sold_to = user
account.save()
messages.success(request,
"Thank You for your payment. Your order has been received and will be processed soon. Please check your email for further instructions.")
accounts_left = KaplanAccounts.objects.filter(sold=False).count()
if accounts_left <= 3:
msg = "Only " + str(accounts_left) + " Kaplan accounts left!!! Reload now!"
sub = "Kaplan Accounts sold out ALERT"
send_email.delay(receiver=["mittrayash@gmail.com"], email_message=msg, subject=sub)
if accounts_left == 0:
kaplan = PaidMaterial.objects.filter(slug='kaplan_gre').first()
kaplan.is_available = False
kaplan.save()
elif material_slug == 'princeton_gre_tests':
account = PrincetonAccounts.objects.filter(sold=False).first()
sub = "Your Princeton GRE Online Tests Account"
msg = "Hi " + user.first_name + ",<br>We have received your payment for <b>" + material_name + "</b>." \
"<br><br>Here are your account details. Please login <a href='https://secure.princetonreview.com/account/signin/?go=http%3a%2f%2fsecure.princetonreview.com%2f'>here</a>.<br><br>" \
"<b>Email: </b> " + account.email_id + "<br>" \
"<b>Password: </b>" + account.password + "<br><br>" \
"Please note that the password is case-sensitive. In case you need help," \
" feel free to reply to this email.<br><br>Best,<br>YMGrad"
send_email.delay(receiver=[user_email], email_message=msg, subject=sub)
account.sold = True
account.sold_to = user.username
account.save()
messages.success(request,
"Thank You for your payment. Your order has been received and will be processed soon. Please check your email for further instructions.")
accounts_left = PrincetonAccounts.objects.filter(sold=False).count()
if accounts_left <= 3:
msg = "Only " + str(accounts_left) + " princeton accounts left!!! Reload now!"
sub = "Princeton GRE Accounts sold out ALERT"
send_email.delay(receiver=["mittrayash@gmail.com"], email_message=msg, subject=sub)
if accounts_left == 0:
princeton = PaidMaterial.objects.filter(slug='princeton_gre_tests').first()
princeton.is_available = False
princeton.save()
elif material_slug == 'princeton_gmat':
account = PrincetonGMATAccounts.objects.filter(sold=False).first()
sub = "Your Princeton GRE Online Tests Account"
msg = "Hi " + user.first_name + ",<br>We have received your payment for <b>" + material_name + "</b>." \
"<br><br>Here are your account details. Please login <a href='https://secure.princetonreview.com/account/signin/?go=http%3a%2f%2fsecure.princetonreview.com%2f'>here</a>.<br><br>" \
"<b>Email: </b> " + account.email_id + "<br>" \
"<b>Password: </b>" + account.password + "<br><br>" \
"Please note that the password is case-sensitive. In case you need help," \
" feel free to reply to this email.<br><br>Best,<br>YMGrad"
send_email.delay(receiver=[user_email], email_message=msg, subject=sub)
account.sold = True
account.sold_to = user
account.save()
messages.success(request,
"Thank You for your payment. Your order has been received and will be processed soon. Please check your email for further instructions.")
accounts_left = PrincetonGMATAccounts.objects.filter(sold=False).count()
if accounts_left <= 3:
msg = "Only " + str(accounts_left) + " princeton GMAT accounts left!!! Reload now!"
sub = "Princeton GMAT Accounts sold out ALERT"
send_email.delay(receiver=["mittrayash@gmail.com"], email_message=msg, subject=sub)
if accounts_left == 0:
princeton = PaidMaterial.objects.filter(slug='princeton_gre_tests').first()
princeton.is_available = False
princeton.save()
elif material_slug == 'grammarly_business':
sub = "Your Grammarly Business Account"
msg = "Hi " + user.first_name + ",<br>We have received your payment for <b>" + material_name + "</b>." \
"<br><br>Here are your account details.<br><br>" \
"<b>Email: </b>crystal.g.lewis@gmail.com<br>" \
"<b>Password: </b>12131981c<br><br>" \
"Please note that the password is case-sensitive. Please do not change the password at any point of time. In case you need help," \
" feel free to reply to this email.<br><br>Best,<br>YMGrad"
send_email.delay(receiver=[user_email], email_message=msg, subject=sub)
messages.success(request,
"Thank You for your payment. Your order has been received and will be processed soon. Please check your email for further instructions.")
elif material_slug == 'usnews':
sub = "Your USNews Premium Account"
msg = "Hi " + user.first_name + ",<br>We have received your payment for <b>" + material_name + "</b>." \
"<br><br>Here are your account details.<br><br>" \
"<b>Email: </b>mittrayash@gmail.com<br>" \
"<b>Password: </b>Apropos12<br><br>" \
"Please note that the password is case-sensitive. Please do not change the password at any point of time. In case you need help," \
" feel free to reply to this email.<br><br>Best,<br>YMGrad"
send_email.delay(receiver=[user_email], email_message=msg, subject=sub)
messages.success(request,
"Thank You for your payment. Your order has been received and will be processed soon. Please check your email for further instructions.")
elif material_slug == 'manhattan_toefl':
sub = "Your Manhattan TOEFL Online Access + Ebook"
msg = "Hi " + user.first_name + ",<br>We have received your payment for <b>" + material_name + "</b>." \
"<br><br>First step: Download the ebook <a href='https://drive.google.com/open?id=15AFBhsKdlCRpEWnTTsSdkYH1CRo3kDxl'>here</a>.<br><br>" \
"<br>Second Step: Download epubfilereader for windows to open the ebook." \
"<br><br>With the ebook, use the portal with the details below:" \
"<br><br>Here are your account details which can be used to log in <a href='https://www.manhattanprep.com/college/studentcenter/'>here</a>.<br><br>" \
"<b>Email: </b>Yashmittra4@gmail.com<br>" \
"<b>Password: </b>Toefl422019<br><br>" \
"Please note that the password is case-sensitive. Please do not change the password at any point of time. In case you need help," \
" feel free to reply to this email.<br><br>Best,<br>YMGrad"
send_email.delay(receiver=[user_email], email_message=msg, subject=sub)
messages.success(request,
"Thank You for your payment. Your order has been received and will be processed soon. Please check your email for further instructions.")
except Exception as e:
print(e)
messages.error(request, "Unfortunately, we are unable to process your request at this time. In case any funds have been deducted from your account, please contact us and we will resolve the issue for you.")
return redirect("/account/dashboard/")
class Compress(APIView):
def post(self, request):
from PIL import Image
import os, sys
path = "/home/kali/Desktop/Study Abroad/StudyAbroad/static_my_proj/base/"
dirs = os.listdir(path)
def resize():
for item in dirs:
if os.path.isfile(path + item):
im = Image.open(path + item)
width, height = im.size
if im.mode != 'RGB':
im = im.convert('RGB')
f, e = os.path.splitext(path + item)
new_width = width
im.thumbnail((new_width, new_width * height / width), Image.ANTIALIAS)
# imResize = im.resize((200, 200), Image.ANTIALIAS)
im.save(f + '.jpg', 'JPEG', quality=100)
resize()
return JsonResponse({'success': True})
class Fix(APIView):
def post(self, request):
all = Student.objects.all()
for obj in all:
path = str(obj.path)
f = path.split('.')[0]
e = path.split('.')[1]
if e == 'jpeg' or e == 'png' or e == 'JPG':
e = 'jpg'
new_path = f + '.' + e
obj.path = new_path
obj.save()
return JsonResponse({'success': True})
def payment_process(request):
host = request.get_host()
paypal_dict = {
'business': settings.PAYPAL_RECEIVER_EMAIL,
'amount': '100',
'item_name': 'Item_Name_xyz',
'invoice': 'Test Payment Invoice',
'currency_code': 'USD',
'notify_url': 'http://{}{}'.format(host, reverse('paypal-ipn')),
'return_url': 'http://{}{}'.format(host, reverse('payment_done')),
'cancel_return': 'http://{}{}'.format(host, reverse('payment_canceled')),
}
form = PayPalPaymentsForm(initial=paypal_dict)
return render(request, 'pets/payment_process.html', {'form': form})
| 48.708654 | 218 | 0.573346 | 47,672 | 0.941074 | 0 | 0 | 0 | 0 | 0 | 0 | 14,805 | 0.29226 |
144e75f0af24909b2f53fb50c3580fc3294a54f6 | 2,171 | py | Python | S4/S4 Library/simulation/situations/visiting/stay_the_night_situation.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | 1 | 2021-05-20T19:33:37.000Z | 2021-05-20T19:33:37.000Z | S4/S4 Library/simulation/situations/visiting/stay_the_night_situation.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | null | null | null | S4/S4 Library/simulation/situations/visiting/stay_the_night_situation.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | null | null | null | from role.role_state import RoleState
from situations.situation_complex import SituationState, SituationStateData
from situations.situation_job import SituationJob
from situations.visiting.visiting_situation_common import VisitingNPCSituation
import services
import sims4.tuning.instances
import sims4.tuning.tunable
import situations.bouncer
import tunable_time
class StayTheNightSituation(VisitingNPCSituation):
INSTANCE_TUNABLES = {'invited_job': sims4.tuning.tunable.TunableTuple(situation_job=SituationJob.TunableReference(description='\n The situation job for the sim spending the night.\n '), staying_role_state=RoleState.TunableReference(description='\n The role state for the sim spending the night.\n ')), 'when_to_leave': tunable_time.TunableTimeOfDay(description='\n The time of day for the invited sim to leave.\n ', default_hour=7)}
@classmethod
def _states(cls):
return (SituationStateData(1, _StayState),)
@classmethod
def _get_tuned_job_and_default_role_state_tuples(cls):
return [(cls.invited_job.situation_job, cls.invited_job.staying_role_state)]
@classmethod
def default_job(cls):
return cls.invited_job.situation_job
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._start_time = None
def start_situation(self):
super().start_situation()
self._start_time = services.time_service().sim_now
self._change_state(_StayState())
def _get_duration(self):
if self._seed.duration_override is not None:
return self._seed.duration_override
time_span = self._start_time.time_till_next_day_time(self.when_to_leave)
return time_span.in_minutes()
sims4.tuning.instances.lock_instance_tunables(StayTheNightSituation, exclusivity=situations.bouncer.bouncer_types.BouncerExclusivityCategory.VISIT, creation_ui_option=situations.situation_types.SituationCreationUIOption.NOT_AVAILABLE, duration=0, _implies_greeted_status=True)
class _StayState(SituationState):
pass
| 48.244444 | 558 | 0.739291 | 1,526 | 0.702902 | 0 | 0 | 325 | 0.149701 | 0 | 0 | 314 | 0.144634 |
144f7ff3eea9576a04434ab76782143398b49093 | 4,588 | py | Python | starter_code/student_utils.py | Harshit4199/Patient-Selection-for-Diabetes-Drug-Testing | c6b7fbae0fe7737185543e958bc3b4c596c0f422 | [
"MIT"
] | 1 | 2021-07-25T17:41:07.000Z | 2021-07-25T17:41:07.000Z | starter_code/student_utils.py | Harshit4199/Patient-Selection-for-Diabetes-Drug-Testing | c6b7fbae0fe7737185543e958bc3b4c596c0f422 | [
"MIT"
] | null | null | null | starter_code/student_utils.py | Harshit4199/Patient-Selection-for-Diabetes-Drug-Testing | c6b7fbae0fe7737185543e958bc3b4c596c0f422 | [
"MIT"
] | 2 | 2020-10-04T23:01:40.000Z | 2020-12-21T18:53:41.000Z | import pandas as pd
import numpy as np
import os
import tensorflow as tf
####### STUDENTS FILL THIS OUT ######
#Question 3
def reduce_dimension_ndc(df, ndc_df):
'''
df: pandas dataframe, input dataset
ndc_df: pandas dataframe, drug code dataset used for mapping in generic names
return:
df: pandas dataframe, output dataframe with joined generic drug name
'''
df1 = pd.merge(df, ndc_df[['Proprietary Name', 'NDC_Code']], left_on='ndc_code', right_on='NDC_Code')
df1['generic_drug_name'] = df1['Proprietary Name']
df1 = df1.drop(['NDC_Code', 'Proprietary Name'], axis=1)
return df1
#Question 4
def select_first_encounter(df):
'''
df: pandas dataframe, dataframe with all encounters
return:
first_encounter_df: pandas dataframe, dataframe with only the first encounter for a given patient
'''
first_encounter_df = df.copy()
first_encounter_df = first_encounter_df.sort_values('encounter_id')
first_encounter_df = (first_encounter_df.drop_duplicates(subset=['encounter_id'], keep='first').drop_duplicates(subset=['patient_nbr'], keep='first'))
return first_encounter_df
#Question 6
def patient_dataset_splitter(df, patient_key='patient_nbr'):
'''
df: pandas dataframe, input dataset that will be split
patient_key: string, column that is the patient id
return:
- train: pandas dataframe,
- validation: pandas dataframe,
- test: pandas dataframe,
'''
df = pd.DataFrame(df)
df = df.iloc[np.random.permutation(len(df))]
unique_values = df[patient_key].unique()
total_values = len(unique_values)
sample_size_60 = round(total_values * (0.6 ))
sample_size_80 = round(total_values * (0.8 ))
train = df[df[patient_key].isin(unique_values[:sample_size_60])].reset_index(drop=True)
validation = df[df[patient_key].isin(unique_values[sample_size_60:sample_size_80])].reset_index(drop=True)
test = df[df[patient_key].isin(unique_values[sample_size_80:])].reset_index(drop=True)
return train, validation, test
#Question 7
def create_tf_categorical_feature_cols(categorical_col_list,
vocab_dir='./diabetes_vocab/'):
'''
categorical_col_list: list, categorical field list that will be transformed with TF feature column
vocab_dir: string, the path where the vocabulary text files are located
return:
output_tf_list: list of TF feature columns
'''
output_tf_list = []
for c in categorical_col_list:
vocab_file_path = os.path.join(vocab_dir, c + "_vocab.txt")
'''
Which TF function allows you to read from a text file and create a categorical feature
You can use a pattern like this below...
tf_categorical_feature_column = tf.feature_column.......
'''
tf_categorical_feature_column = tf.feature_column.categorical_column_with_vocabulary_file(
key=c, vocabulary_file = vocab_file_path, num_oov_buckets=1)
tf_categorical_feature_column = tf.feature_column.indicator_column(tf_categorical_feature_column)
output_tf_list.append(tf_categorical_feature_column)
return output_tf_list
#Question 8
import functools
def normalize_numeric_with_zscore(col, mean, std):
'''
This function can be used in conjunction with the tf feature column for normalization
'''
return (col - mean)/std
def create_tf_numeric_feature(col, MEAN, STD, default_value=0):
'''
col: string, input numerical column name
MEAN: the mean for the column in the training data
STD: the standard deviation for the column in the training data
default_value: the value that will be used for imputing the field
return:
tf_numeric_feature: tf feature column representation of the input field
'''
normalizer = functools.partial(normalize_numeric_with_zscore, mean=MEAN, std=STD)
tf_numeric_feature = tf.feature_column.numeric_column(key=col, default_value = default_value, normalizer_fn=normalizer, dtype=tf.float64)
return tf_numeric_feature
#Question 9
def get_mean_std_from_preds(diabetes_yhat):
'''
diabetes_yhat: TF Probability prediction object
'''
m = '?'
s = '?'
return m, s
# Question 10
def get_student_binary_prediction(df, col):
'''
df: pandas dataframe prediction output dataframe
col: str, probability mean prediction field
return:
student_binary_prediction: pandas dataframe converting input to flattened numpy array and binary labels
'''
return student_binary_prediction
| 37 | 154 | 0.71469 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,200 | 0.479512 |
144fcd8882ebd8111a949df2336133b82d27c08c | 982 | py | Python | _resume/build.py | edublancas/edublancas.github.io | 55990126d2c9530f7317b9c75395b836c5fc5420 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | _resume/build.py | edublancas/edublancas.github.io | 55990126d2c9530f7317b9c75395b836c5fc5420 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | _resume/build.py | edublancas/edublancas.github.io | 55990126d2c9530f7317b9c75395b836c5fc5420 | [
"MIT",
"BSD-3-Clause"
] | 1 | 2019-01-02T22:21:02.000Z | 2019-01-02T22:21:02.000Z | """
Build resume and cv from resume.md and cv.md
"""
import subprocess
import os.path
from jinja2 import Environment, FileSystemLoader
from datetime import date
def get_commit_hash():
out = subprocess.check_output('git show --oneline -s', shell=True)
return out.decode('utf-8') .replace('\n', '').split(' ')[0]
path = os.path.dirname(os.path.abspath(__file__))
env = Environment(loader=FileSystemLoader(path))
git_hash = get_commit_hash()
now = '{}. {}'.format(date.today().strftime('%b %d, %Y'), git_hash)
# this will remove the jinja2 tags
resume = env.get_template('resume.md').render(referral=True, now=now)
# this will fill the blocks in cv.md in the corresponding parts in resume.md
# also, do not include referral since cv contains all sections
cv = env.get_template('cv.md').render(referral=False, now=now)
with open(os.path.join(path, 'tmp_resume.md'), 'w') as f:
f.write(resume)
with open(os.path.join(path, 'tmp_cv.md'), 'w') as f:
f.write(cv)
| 28.057143 | 76 | 0.706721 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 332 | 0.338086 |
145310a82264d9765ef6047c0373076d3bb1a072 | 511 | py | Python | setup.py | SUNET/aws-sns-message-validator | 2d613485ba7324e1f05b9327fa0fc327c25147ad | [
"MIT"
] | null | null | null | setup.py | SUNET/aws-sns-message-validator | 2d613485ba7324e1f05b9327fa0fc327c25147ad | [
"MIT"
] | null | null | null | setup.py | SUNET/aws-sns-message-validator | 2d613485ba7324e1f05b9327fa0fc327c25147ad | [
"MIT"
] | null | null | null | from datetime import datetime
from setuptools import setup, find_packages
DEPENDENCIES = [
'requests',
'cryptography',
]
EXCLUDED_PACKAGES = [
'flask_example.py',
]
setup(
name='sns-message-validator',
version='0.0.2+sunet',
description='Validator for SNS messages.',
author='https://github.com/wlwg',
url='https://github.com/wlwg/sns-message-validator',
python_requires='>=3.7',
install_requires=DEPENDENCIES,
packages=find_packages(exclude=EXCLUDED_PACKAGES),
)
| 22.217391 | 56 | 0.702544 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 186 | 0.363992 |
14541e0b1100f11493c9ef593f0ac473ab731bfc | 907 | py | Python | Util/nucleus_lib.py | cao13jf/CShaper | 8fdbd40cb67bab9bfc657c5be652fc3d05f9cd00 | [
"MIT"
] | 3 | 2021-02-19T07:21:20.000Z | 2021-09-11T10:04:58.000Z | Util/nucleus_lib.py | cao13jf/CShaper | 8fdbd40cb67bab9bfc657c5be652fc3d05f9cd00 | [
"MIT"
] | 2 | 2020-11-13T18:56:49.000Z | 2020-12-18T08:01:57.000Z | Util/nucleus_lib.py | cao13jf/CShaper | 8fdbd40cb67bab9bfc657c5be652fc3d05f9cd00 | [
"MIT"
] | 3 | 2020-12-10T03:12:30.000Z | 2021-06-01T15:07:07.000Z | '''
This library is used to incorporate
'''
import numpy as np
def cell_prob_with_nucleus(cell, nucleus):
'''
This function is used to figure out whether one region is cell or empty hole (without nucleus)
:param cell: segmentations results with different labels
:param nucleus: nucleus RawMemb image (after resize)
:return cell: cells without cavity
:return hole: cavity inside the embryos
'''
labels = np.unique(cell).tolist()
labels.remove(0)
hole = np.zeros_like(cell, dtype=np.uint8)
for label in labels:
one_cell_mask = (cell == label)
# After checking on all intensity values, the segmented region should be regarded as empty when the intensity is
# lower than 100. Most are equals 0
if (nucleus[one_cell_mask].sum() == 0):
cell[one_cell_mask] = 0
hole[one_cell_mask] = 1
return cell, hole
| 31.275862 | 121 | 0.670342 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 503 | 0.554576 |
14544423bd130c73df8f39e8904810b8c2efbccd | 5,677 | py | Python | api/src/wt/provider/db/models/statistics.py | sedlar/work-tracking | 78917ff8200829eb674142ce43b503d8e892d7eb | [
"BSD-2-Clause"
] | null | null | null | api/src/wt/provider/db/models/statistics.py | sedlar/work-tracking | 78917ff8200829eb674142ce43b503d8e892d7eb | [
"BSD-2-Clause"
] | null | null | null | api/src/wt/provider/db/models/statistics.py | sedlar/work-tracking | 78917ff8200829eb674142ce43b503d8e892d7eb | [
"BSD-2-Clause"
] | null | null | null | from collections import namedtuple
from typing import List
from decimal import Decimal
from sqlalchemy import select, func
from wt.common import Money, Currency
from wt.costs.expenditures import ExpenditureStatus
from wt.ids import EntityId, EntityType
from wt.provider.db import DbModel
from wt.provider.db.tables import (
OBJECTS_TRACKER_TABLE,
ENTITY_LINKS_TABLE,
ISSUES_TABLE,
PROJECTS_TABLE,
TIMESHEETS_TABLE,
EXPENDITURES_TABLE,
)
from wt.statistics import StatisticsModel, EntityStatistics
EntityStatisticsData = namedtuple("EntityStatisticsData", ["hour_rate", "estimated_duration"])
class DbStatisticsModel(StatisticsModel, DbModel):
def get_project_ids(self, project_id: EntityId):
query = select(
[OBJECTS_TRACKER_TABLE.c.id]
).where(
OBJECTS_TRACKER_TABLE.c.project_id == project_id.full_id
).where(
OBJECTS_TRACKER_TABLE.c.type != EntityType.project.value
)
result = self._session.execute(query)
return [
EntityId(row["id"])
for row
in result
]
def get_related_entities_ids(self, entity_id: EntityId):
query = select(
[ENTITY_LINKS_TABLE.c.object_id]
).where(
ENTITY_LINKS_TABLE.c.other_object_id == entity_id.full_id
)
result = self._session.execute(query)
return [
EntityId(row["object_id"])
for row
in result
]
def get_entity_statistics(self, entity_ids: List[EntityId]):
if not entity_ids:
return []
assert len({entity_id.project_id for entity_id in entity_ids}) == 1
full_ids = [entity_id.full_id for entity_id in entity_ids]
# All entities are from same project
default_hour_rate = self._get_project_hour_rate(entity_ids[0].project_id)
entity_data = self._get_entity_data(full_ids)
burned_time = self._get_burned_time(full_ids)
expenditure_costs = self._get_expenditure_costs(full_ids)
entity_statistics = []
for entity_id in entity_ids:
hour_rate = default_hour_rate
if entity_data[entity_id].hour_rate:
hour_rate = entity_data[entity_id].hour_rate
entity_statistics.append(
EntityStatistics(
estimated_duration=entity_data[entity_id].estimated_duration,
hour_rate=hour_rate,
burned_duration=burned_time.get(entity_id, Decimal(0)),
expenditure_costs=expenditure_costs.get(
entity_id,
Money(
amount=Decimal(0),
currency=Currency.czk,
)
),
)
)
return entity_statistics
def _get_entity_data(self, entity_ids: List[str]):
query = select(
[
ISSUES_TABLE.c.hour_rate_amount,
ISSUES_TABLE.c.hour_rate_currency,
ISSUES_TABLE.c.estimated_duration,
ISSUES_TABLE.c.object_id,
]
).where(
ISSUES_TABLE.c.object_id.in_(entity_ids)
)
result = self._session.execute(query)
entity_data = {}
for row in result:
amount = row["hour_rate_amount"]
currency = row["hour_rate_currency"]
hour_rate = None
if amount is not None and currency:
hour_rate = Money(
amount=amount,
currency=currency,
)
entity_data[EntityId(row["object_id"])] = EntityStatisticsData(
hour_rate=hour_rate,
estimated_duration=row["estimated_duration"]
)
return entity_data
def _get_burned_time(self, entity_ids: List[str]):
query = select(
[
TIMESHEETS_TABLE.c.parent_id,
func.sum(TIMESHEETS_TABLE.c.duration).label("burned_duration"),
]
).group_by(
TIMESHEETS_TABLE.c.parent_id
).where(
TIMESHEETS_TABLE.c.parent_id.in_(entity_ids)
)
result = self._session.execute(query)
return {
EntityId(row["parent_id"]): row["burned_duration"]
for row
in result
}
def _get_expenditure_costs(self, entity_ids: List[str]):
query = select(
[
EXPENDITURES_TABLE.c.parent_id,
func.sum(EXPENDITURES_TABLE.c.cost_amount).label("expenditure_costs"),
]
).group_by(
EXPENDITURES_TABLE.c.parent_id
).where(
EXPENDITURES_TABLE.c.parent_id.in_(entity_ids)
).where(
EXPENDITURES_TABLE.c.status == ExpenditureStatus.approved.value
)
result = self._session.execute(query)
return {
EntityId(row["parent_id"]): Money(
amount=row["expenditure_costs"],
currency=Currency.czk,
)
for row
in result
}
def _get_project_hour_rate(self, project_id: str):
query = select(
[
PROJECTS_TABLE.c.hour_rate_amount,
PROJECTS_TABLE.c.hour_rate_currency,
]
).where(
PROJECTS_TABLE.c.project_id == project_id
)
row = self._session.execute(query).fetchone()
return Money(
amount=row["hour_rate_amount"],
currency=Currency(row["hour_rate_currency"]),
)
| 33.591716 | 94 | 0.578651 | 5,055 | 0.890435 | 0 | 0 | 0 | 0 | 0 | 0 | 305 | 0.053726 |
14554f3f2e1820159b31b772eade8453bd740328 | 6,232 | py | Python | cp_mri/masks.py | vladpopovici/CP_mri | a198a3f65737be26611713c1d64125de888cc25d | [
"MIT"
] | null | null | null | cp_mri/masks.py | vladpopovici/CP_mri | a198a3f65737be26611713c1d64125de888cc25d | [
"MIT"
] | null | null | null | cp_mri/masks.py | vladpopovici/CP_mri | a198a3f65737be26611713c1d64125de888cc25d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# --------------------------------------------------------------------
# Copyright (c) 2022 Vlad Popovici <popovici@bioxlab.org>
#
# Licensed under the MIT License. See LICENSE file in root folder.
# --------------------------------------------------------------------
# MASKS are single channel images (arrays) storing information about a
# corresponding pixel in the image. A binary mask, for example, indicates
# that the pixels corresponding to "True" values in the mask have a common
# property (e.g. they belong to the same object).
# Here we use PyVIPS for creating and storing large masks (potentially
# larger than available RAM). Also, pyramids are generated from a given
# mask to match the levels of the corresponding MRI.
#
# Usage philosophy: a mask is created to match the image shape (width and
# height) of the image currently processed. This mask is stored as a temporary
# ZARR array, in <tmp_folder>. Upon completion of the processing, the user must
# request that the mask is written to its final destination: either a large
# TIFF file or a ZARR pyramid. The temporary file is automatically deleted
# when the mask is closed (mask object expires).
import numpy as np
from pathlib import Path
import shutil
import zarr
from hashlib import md5
from time import localtime
import tifffile as tif
from skimage.transform import resize
class BinaryMask(object):
def __init__(self, shape: dict[str, int],
dtype=np.uint8,
tmp_folder: str='/tmp',
tmp_prefix: str='tmp_binarymask_'):
"""Initialize a binary mask.
Args:
shape: (dict) {'width', 'height'} of the mask
dtype: (np.dtype) type of values stored
tmp_folder: (str) path where the temp mask is stored
tmp_prefix: (str) prefix for the temp mask
"""
self._shape = {'width' : shape['width'], 'height' : shape['height']}
self._dtype = dtype
self._white = np.iinfo(self._dtype).max
if not Path(tmp_folder).exists():
tmp_folder = '/tmp'
random_file_name = tmp_prefix + md5(str(localtime()).encode('utf-8')).hexdigest() + '.zarr'
self._mask_storage_path = Path(tmp_folder) / Path(random_file_name)
chunks = (min(4096, self._shape['height']), min(4096, self._shape['width']))
self._mask_storage = zarr.open(str(self._mask_storage_path), mode='w',
shape=(self._shape['height'], self._shape['width']),
chunks=chunks,
dtype=self._dtype)
self._mask_storage[:] = 0
return
def __del__(self):
self._mask_storage.store.close()
if self._mask_storage_path.exists():
shutil.rmtree(self._mask_storage_path)
return
def get_temp_path(self) -> Path:
return self._mask_storage_path
@property
def mask(self) -> zarr.Array:
return self._mask_storage
def to_image(self, dst_path: Path):
self._mask_storage.set_mask_selection(self._mask_storage[:] > 0,
self._white) # 0 - black, everything else - white
tif.imwrite(dst_path.with_suffix('.tiff'), self._mask_storage,
bigtiff=True, photometric='minisblack',
compression='zlib', metadata={'axes': 'CYX'})
return
def to_pyramid(self, dst_path: Path,
current_level: int,
max_level: int,
min_level: int=0
):
"""Generate a multi-resolution pyramid from the current mask.
The pyramid will have the current mask as its <current_level> image, all
other levels being generated via down-/up-sampling from it. The number
of levels to be generated is controlled via <min_level> (non-negative) and
<max_level>.
Args:
dst_path: (Path) where to save the pyramid.zarr dataset
current_level: (int) the level in the pyramid represented by the mask
max_level: (int) maximum level (smallest image) to be generated
min_level: (int) minimum level (largest image) to be generated
"""
min_level = max(min_level, 0)
# now, find a max_level that does not shrink the image below 128px in either
# width or height
max_level = max(current_level, max_level)
d = min(self._mask_storage.shape)
l = max_level - current_level
while d // 2**l < 128 and l >= 0:
l -= 1
if l < 0:
l = 0
max_level = current_level + l
with zarr.open_group(str(dst_path.with_suffix('.zarr')), mode='w') as zroot:
pyramid_info = []
# up-sampling:
for level in range(min_level, max_level):
factor = 2**(current_level - level)
mask_shape = (int(factor * self._mask_storage.shape[0]),
int(factor * self._mask_storage.shape[1]))
chunks = (min(4096, mask_shape[0]), min(4096, mask_shape[1]))
new_mask = zroot.create_dataset(str(level),
shape=mask_shape,
chunks=chunks,
dtype=self._mask_storage.dtype)
if level == current_level:
# just copy:
new_mask[:] = self._mask_storage[:]
else:
new_mask[:] = resize(self._mask_storage[:], mask_shape, order=0,
mode='reflect', anti_aliasing=False)
new_mask.set_mask_selection(new_mask[:] > 0, self._white)
pyramid_info.append({
'level': level,
'width': mask_shape[1],
'height' : mask_shape[0],
'downsample_factor': 2**(level - min_level)
})
zroot.attrs['pyramid'] = pyramid_info
zroot.attrs['pyramid_desc'] = 'generated for binary mask'
return
| 41.825503 | 99 | 0.56595 | 4,844 | 0.777279 | 0 | 0 | 77 | 0.012356 | 0 | 0 | 2,527 | 0.405488 |
14557dc9517de3207abf36fd3533f0849a533784 | 25,259 | py | Python | src/gam/gapi/reports.py | GAM-team/GAM | b45ad5dcafc217690afc3c2e7086c1895f036172 | [
"Apache-2.0"
] | 102 | 2022-01-15T22:08:37.000Z | 2022-03-31T16:02:20.000Z | src/gam/gapi/reports.py | GAM-team/GAM | b45ad5dcafc217690afc3c2e7086c1895f036172 | [
"Apache-2.0"
] | 29 | 2022-01-14T20:16:51.000Z | 2022-03-25T15:56:33.000Z | src/gam/gapi/reports.py | GAM-team/GAM | b45ad5dcafc217690afc3c2e7086c1895f036172 | [
"Apache-2.0"
] | 30 | 2022-01-14T22:18:10.000Z | 2022-03-31T17:31:40.000Z | import calendar
import datetime
import re
import sys
from dateutil.relativedelta import relativedelta
import gam
from gam.var import *
from gam import controlflow
from gam import display
from gam import gapi
from gam import utils
from gam.gapi.directory import orgunits as gapi_directory_orgunits
def build():
return gam.buildGAPIObject('reports')
REPORT_CHOICE_MAP = {
'access': 'access_transparency',
'accesstransparency': 'access_transparency',
'calendars': 'calendar',
'customers': 'customer',
'doc': 'drive',
'docs': 'drive',
'domain': 'customer',
'enterprisegroups': 'groups_enterprise',
'google+': 'gplus',
'group': 'groups',
'groupsenterprise': 'groups_enterprise',
'hangoutsmeet': 'meet',
'logins': 'login',
'oauthtoken': 'token',
'tokens': 'token',
'usage': 'usage',
'usageparameters': 'usageparameters',
'users': 'user',
'useraccounts': 'user_accounts',
}
def showUsageParameters():
rep = build()
throw_reasons = [
gapi.errors.ErrorReason.INVALID, gapi.errors.ErrorReason.BAD_REQUEST
]
todrive = False
if len(sys.argv) == 3:
controlflow.missing_argument_exit('user or customer',
'report usageparameters')
report = sys.argv[3].lower()
titles = ['parameter']
if report == 'customer':
endpoint = rep.customerUsageReports()
kwargs = {}
elif report == 'user':
endpoint = rep.userUsageReport()
kwargs = {'userKey': gam._get_admin_email()}
else:
controlflow.expected_argument_exit('usageparameters',
['user', 'customer'], report)
customerId = GC_Values[GC_CUSTOMER_ID]
if customerId == MY_CUSTOMER:
customerId = None
tryDate = datetime.date.today().strftime(YYYYMMDD_FORMAT)
all_parameters = set()
i = 4
while i < len(sys.argv):
myarg = sys.argv[i].lower().replace('_', '')
if myarg == 'todrive':
todrive = True
i += 1
else:
controlflow.invalid_argument_exit(sys.argv[i],
'gam report usageparameters')
fullDataRequired = ['all']
while True:
try:
result = gapi.call(endpoint,
'get',
throw_reasons=throw_reasons,
date=tryDate,
customerId=customerId,
fields='warnings,usageReports(parameters(name))',
**kwargs)
warnings = result.get('warnings', [])
usage = result.get('usageReports')
has_reports = bool(usage)
fullData, tryDate = _check_full_data_available(
warnings, tryDate, fullDataRequired, has_reports)
if fullData < 0:
print('No usage parameters available.')
sys.exit(1)
if has_reports:
for parameter in usage[0]['parameters']:
name = parameter.get('name')
if name:
all_parameters.add(name)
if fullData == 1:
break
except gapi.errors.GapiInvalidError as e:
tryDate = _adjust_date(str(e))
csvRows = []
for parameter in sorted(all_parameters):
csvRows.append({'parameter': parameter})
display.write_csv_file(csvRows, titles,
f'{report.capitalize()} Report Usage Parameters',
todrive)
REPORTS_PARAMETERS_SIMPLE_TYPES = [
'intValue', 'boolValue', 'datetimeValue', 'stringValue'
]
def showUsage():
rep = build()
throw_reasons = [
gapi.errors.ErrorReason.INVALID, gapi.errors.ErrorReason.BAD_REQUEST
]
todrive = False
if len(sys.argv) == 3:
controlflow.missing_argument_exit('user or customer', 'report usage')
report = sys.argv[3].lower()
titles = ['date']
if report == 'customer':
endpoint = rep.customerUsageReports()
kwargs = [{}]
elif report == 'user':
endpoint = rep.userUsageReport()
kwargs = [{'userKey': 'all'}]
titles.append('user')
else:
controlflow.expected_argument_exit('usage', ['user', 'customer'],
report)
customerId = GC_Values[GC_CUSTOMER_ID]
if customerId == MY_CUSTOMER:
customerId = None
parameters = []
start_date = end_date = orgUnitId = None
skip_day_numbers = []
skip_dates = set()
one_day = datetime.timedelta(days=1)
i = 4
while i < len(sys.argv):
myarg = sys.argv[i].lower().replace('_', '')
if myarg == 'startdate':
start_date = utils.get_yyyymmdd(sys.argv[i + 1],
returnDateTime=True)
i += 2
elif myarg == 'enddate':
end_date = utils.get_yyyymmdd(sys.argv[i + 1], returnDateTime=True)
i += 2
elif myarg == 'todrive':
todrive = True
i += 1
elif myarg in ['fields', 'parameters']:
parameters = sys.argv[i + 1].split(',')
i += 2
elif myarg == 'skipdates':
for skip in sys.argv[i + 1].split(','):
if skip.find(':') == -1:
skip_dates.add(utils.get_yyyymmdd(skip,
returnDateTime=True))
else:
skip_start, skip_end = skip.split(':', 1)
skip_start = utils.get_yyyymmdd(skip_start,
returnDateTime=True)
skip_end = utils.get_yyyymmdd(skip_end, returnDateTime=True)
while skip_start <= skip_end:
skip_dates.add(skip_start)
skip_start += one_day
i += 2
elif myarg == 'skipdaysofweek':
skipdaynames = sys.argv[i + 1].split(',')
dow = [d.lower() for d in calendar.day_abbr]
skip_day_numbers = [dow.index(d) for d in skipdaynames if d in dow]
i += 2
elif report == 'user' and myarg in ['orgunit', 'org', 'ou']:
_, orgUnitId = gapi_directory_orgunits.getOrgUnitId(sys.argv[i + 1])
i += 2
elif report == 'user' and myarg in usergroup_types:
users = gam.getUsersToModify(myarg, sys.argv[i + 1])
kwargs = [{'userKey': user} for user in users]
i += 2
else:
controlflow.invalid_argument_exit(sys.argv[i],
f'gam report usage {report}')
if parameters:
titles.extend(parameters)
parameters = ','.join(parameters)
else:
parameters = None
if not end_date:
end_date = datetime.datetime.now()
if not start_date:
start_date = end_date + relativedelta(months=-1)
if orgUnitId:
for kw in kwargs:
kw['orgUnitID'] = orgUnitId
usage_on_date = start_date
start_date = usage_on_date.strftime(YYYYMMDD_FORMAT)
usage_end_date = end_date
end_date = end_date.strftime(YYYYMMDD_FORMAT)
start_use_date = end_use_date = None
csvRows = []
while usage_on_date <= usage_end_date:
if usage_on_date.weekday() in skip_day_numbers or \
usage_on_date in skip_dates:
usage_on_date += one_day
continue
use_date = usage_on_date.strftime(YYYYMMDD_FORMAT)
usage_on_date += one_day
try:
for kwarg in kwargs:
try:
usage = gapi.get_all_pages(endpoint,
'get',
'usageReports',
throw_reasons=throw_reasons,
customerId=customerId,
date=use_date,
parameters=parameters,
**kwarg)
except gapi.errors.GapiBadRequestError:
continue
for entity in usage:
row = {'date': use_date}
if 'userEmail' in entity['entity']:
row['user'] = entity['entity']['userEmail']
for item in entity.get('parameters', []):
if 'name' not in item:
continue
name = item['name']
if name == 'cros:device_version_distribution':
for cros_ver in item['msgValue']:
v = cros_ver['version_number']
column_name = f'cros:num_devices_chrome_{v}'
if column_name not in titles:
titles.append(column_name)
row[column_name] = cros_ver['num_devices']
else:
if not name in titles:
titles.append(name)
for ptype in REPORTS_PARAMETERS_SIMPLE_TYPES:
if ptype in item:
row[name] = item[ptype]
break
else:
row[name] = ''
if not start_use_date:
start_use_date = use_date
end_use_date = use_date
csvRows.append(row)
except gapi.errors.GapiInvalidError as e:
display.print_warning(str(e))
break
if start_use_date:
report_name = f'{report.capitalize()} Usage Report - {start_use_date}:{end_use_date}'
else:
report_name = f'{report.capitalize()} Usage Report - {start_date}:{end_date} - No Data'
display.write_csv_file(csvRows, titles, report_name, todrive)
def showReport():
rep = build()
throw_reasons = [gapi.errors.ErrorReason.INVALID]
report = sys.argv[2].lower()
report = REPORT_CHOICE_MAP.get(report.replace('_', ''), report)
if report == 'usage':
showUsage()
return
if report == 'usageparameters':
showUsageParameters()
return
valid_apps = gapi.get_enum_values_minus_unspecified(
rep._rootDesc['resources']['activities']['methods']['list']
['parameters']['applicationName']['enum']) + ['customer', 'user']
if report not in valid_apps:
controlflow.expected_argument_exit('report',
', '.join(sorted(valid_apps)),
report)
customerId = GC_Values[GC_CUSTOMER_ID]
if customerId == MY_CUSTOMER:
customerId = None
filters = parameters = actorIpAddress = groupIdFilter = startTime = endTime = eventName = orgUnitId = None
tryDate = datetime.date.today().strftime(YYYYMMDD_FORMAT)
to_drive = False
userKey = 'all'
fullDataRequired = None
i = 3
while i < len(sys.argv):
myarg = sys.argv[i].lower()
if myarg == 'date':
tryDate = utils.get_yyyymmdd(sys.argv[i + 1])
i += 2
elif myarg in ['orgunit', 'org', 'ou']:
_, orgUnitId = gapi_directory_orgunits.getOrgUnitId(sys.argv[i + 1])
i += 2
elif myarg == 'fulldatarequired':
fullDataRequired = []
fdr = sys.argv[i + 1].lower()
if fdr and fdr == 'all':
fullDataRequired = 'all'
else:
fullDataRequired = fdr.replace(',', ' ').split()
i += 2
elif myarg == 'start':
startTime = utils.get_time_or_delta_from_now(sys.argv[i + 1])
i += 2
elif myarg == 'end':
endTime = utils.get_time_or_delta_from_now(sys.argv[i + 1])
i += 2
elif myarg == 'event':
eventName = sys.argv[i + 1]
i += 2
elif myarg == 'user':
userKey = sys.argv[i + 1].lower()
if userKey != 'all':
userKey = gam.normalizeEmailAddressOrUID(sys.argv[i + 1])
i += 2
elif myarg in ['filter', 'filters']:
filters = sys.argv[i + 1]
i += 2
elif myarg in ['fields', 'parameters']:
parameters = sys.argv[i + 1]
i += 2
elif myarg == 'ip':
actorIpAddress = sys.argv[i + 1]
i += 2
elif myarg == 'groupidfilter':
groupIdFilter = sys.argv[i + 1]
i += 2
elif myarg == 'todrive':
to_drive = True
i += 1
else:
controlflow.invalid_argument_exit(sys.argv[i], 'gam report')
if report == 'user':
while True:
try:
one_page = gapi.call(rep.userUsageReport(),
'get',
throw_reasons=throw_reasons,
date=tryDate,
userKey=userKey,
customerId=customerId,
orgUnitID=orgUnitId,
fields='warnings,usageReports',
maxResults=1)
warnings = one_page.get('warnings', [])
has_reports = bool(one_page.get('usageReports'))
fullData, tryDate = _check_full_data_available(
warnings, tryDate, fullDataRequired, has_reports)
if fullData < 0:
print('No user report available.')
sys.exit(1)
if fullData == 0:
continue
page_message = gapi.got_total_items_msg('Users', '...\n')
usage = gapi.get_all_pages(rep.userUsageReport(),
'get',
'usageReports',
page_message=page_message,
throw_reasons=throw_reasons,
date=tryDate,
userKey=userKey,
customerId=customerId,
orgUnitID=orgUnitId,
filters=filters,
parameters=parameters)
break
except gapi.errors.GapiInvalidError as e:
tryDate = _adjust_date(str(e))
if not usage:
print('No user report available.')
sys.exit(1)
titles = ['email', 'date']
csvRows = []
for user_report in usage:
if 'entity' not in user_report:
continue
row = {'email': user_report['entity']['userEmail'], 'date': tryDate}
for item in user_report.get('parameters', []):
if 'name' not in item:
continue
name = item['name']
if not name in titles:
titles.append(name)
for ptype in REPORTS_PARAMETERS_SIMPLE_TYPES:
if ptype in item:
row[name] = item[ptype]
break
else:
row[name] = ''
csvRows.append(row)
display.write_csv_file(csvRows, titles, f'User Reports - {tryDate}',
to_drive)
elif report == 'customer':
while True:
try:
first_page = gapi.call(rep.customerUsageReports(),
'get',
throw_reasons=throw_reasons,
customerId=customerId,
date=tryDate,
fields='warnings,usageReports')
warnings = first_page.get('warnings', [])
has_reports = bool(first_page.get('usageReports'))
fullData, tryDate = _check_full_data_available(
warnings, tryDate, fullDataRequired, has_reports)
if fullData < 0:
print('No customer report available.')
sys.exit(1)
if fullData == 0:
continue
usage = gapi.get_all_pages(rep.customerUsageReports(),
'get',
'usageReports',
throw_reasons=throw_reasons,
customerId=customerId,
date=tryDate,
parameters=parameters)
break
except gapi.errors.GapiInvalidError as e:
tryDate = _adjust_date(str(e))
if not usage:
print('No customer report available.')
sys.exit(1)
titles = ['name', 'value', 'client_id']
csvRows = []
auth_apps = list()
for item in usage[0]['parameters']:
if 'name' not in item:
continue
name = item['name']
if 'intValue' in item:
value = item['intValue']
elif 'msgValue' in item:
if name == 'accounts:authorized_apps':
for subitem in item['msgValue']:
app = {}
for an_item in subitem:
if an_item == 'client_name':
app['name'] = 'App: ' + \
subitem[an_item].replace('\n', '\\n')
elif an_item == 'num_users':
app['value'] = f'{subitem[an_item]} users'
elif an_item == 'client_id':
app['client_id'] = subitem[an_item]
auth_apps.append(app)
continue
values = []
for subitem in item['msgValue']:
if 'count' in subitem:
mycount = myvalue = None
for key, value in list(subitem.items()):
if key == 'count':
mycount = value
else:
myvalue = value
if mycount and myvalue:
values.append(f'{myvalue}:{mycount}')
value = ' '.join(values)
elif 'version_number' in subitem \
and 'num_devices' in subitem:
values.append(f'{subitem["version_number"]}:'
f'{subitem["num_devices"]}')
else:
continue
value = ' '.join(sorted(values, reverse=True))
csvRows.append({'name': name, 'value': value})
for app in auth_apps: # put apps at bottom
csvRows.append(app)
display.write_csv_file(csvRows,
titles,
f'Customer Report - {tryDate}',
todrive=to_drive)
else:
page_message = gapi.got_total_items_msg('Activities', '...\n')
activities = gapi.get_all_pages(rep.activities(),
'list',
'items',
page_message=page_message,
applicationName=report,
userKey=userKey,
customerId=customerId,
actorIpAddress=actorIpAddress,
startTime=startTime,
endTime=endTime,
eventName=eventName,
filters=filters,
orgUnitID=orgUnitId,
groupIdFilter=groupIdFilter)
if activities:
titles = ['name']
csvRows = []
for activity in activities:
events = activity['events']
del activity['events']
activity_row = utils.flatten_json(activity)
purge_parameters = True
for event in events:
for item in event.get('parameters', []):
if set(item) == {'value', 'name'}:
event[item['name']] = item['value']
elif set(item) == {'intValue', 'name'}:
if item['name'] in ['start_time', 'end_time']:
val = item.get('intValue')
if val is not None:
val = int(val)
if val >= 62135683200:
event[item['name']] = \
datetime.datetime.fromtimestamp(
val-62135683200).isoformat()
else:
event[item['name']] = item['intValue']
elif set(item) == {'boolValue', 'name'}:
event[item['name']] = item['boolValue']
elif set(item) == {'multiValue', 'name'}:
event[item['name']] = ' '.join(item['multiValue'])
elif item['name'] == 'scope_data':
parts = {}
for message in item['multiMessageValue']:
for mess in message['parameter']:
value = mess.get(
'value',
' '.join(mess.get('multiValue', [])))
parts[mess['name']] = parts.get(
mess['name'], []) + [value]
for part, v in parts.items():
if part == 'scope_name':
part = 'scope'
event[part] = ' '.join(v)
else:
purge_parameters = False
if purge_parameters:
event.pop('parameters', None)
row = utils.flatten_json(event)
row.update(activity_row)
for item in row:
if item not in titles:
titles.append(item)
csvRows.append(row)
display.sort_csv_titles([
'name',
], titles)
display.write_csv_file(csvRows, titles,
f'{report.capitalize()} Activity Report',
to_drive)
def _adjust_date(errMsg):
match_date = re.match(
'Data for dates later than (.*) is not yet '
'available. Please check back later', errMsg)
if not match_date:
match_date = re.match('Start date can not be later than (.*)', errMsg)
if not match_date:
controlflow.system_error_exit(4, errMsg)
return str(match_date.group(1))
def _check_full_data_available(warnings, tryDate, fullDataRequired,
has_reports):
one_day = datetime.timedelta(days=1)
tryDateTime = datetime.datetime.strptime(tryDate, YYYYMMDD_FORMAT)
# move to day before if we don't have at least one usageReport
if not has_reports:
tryDateTime -= one_day
return (0, tryDateTime.strftime(YYYYMMDD_FORMAT))
for warning in warnings:
if warning['code'] == 'PARTIAL_DATA_AVAILABLE':
for app in warning['data']:
if app['key'] == 'application' and \
app['value'] != 'docs' and \
fullDataRequired is not None and \
(fullDataRequired == 'all' or app['value'] in fullDataRequired):
tryDateTime -= one_day
return (0, tryDateTime.strftime(YYYYMMDD_FORMAT))
elif warning['code'] == 'DATA_NOT_AVAILABLE':
for app in warning['data']:
if app['key'] == 'application' and \
app['value'] != 'docs' and \
(not fullDataRequired or app['value'] in fullDataRequired):
return (-1, tryDate)
return (1, tryDate)
| 42.811864 | 110 | 0.458926 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,275 | 0.129657 |
1456065316091e9651e32d09311e3ffec143cd48 | 28 | py | Python | advanced/part13-17_asteroids/src/main.py | Hannah-Abi/python-pro-21 | 2ce32c4bf118054329d19afdf83c50561be1ada8 | [
"MIT"
] | null | null | null | advanced/part13-17_asteroids/src/main.py | Hannah-Abi/python-pro-21 | 2ce32c4bf118054329d19afdf83c50561be1ada8 | [
"MIT"
] | null | null | null | advanced/part13-17_asteroids/src/main.py | Hannah-Abi/python-pro-21 | 2ce32c4bf118054329d19afdf83c50561be1ada8 | [
"MIT"
] | null | null | null | # WRITE YOUR SOLUTION HERE:
| 14 | 27 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 27 | 0.964286 |
14560849915561883889885a3454e4e8c7afc839 | 286 | py | Python | example.py | davehenton/tf-lyrics | 6c2a397df22acdf41a63012aacfcdc7b7eaf1302 | [
"MIT"
] | null | null | null | example.py | davehenton/tf-lyrics | 6c2a397df22acdf41a63012aacfcdc7b7eaf1302 | [
"MIT"
] | null | null | null | example.py | davehenton/tf-lyrics | 6c2a397df22acdf41a63012aacfcdc7b7eaf1302 | [
"MIT"
] | null | null | null | from tflyrics import Poet, LyricsGenerator
artists = ['Bob Dylan', 'Tim Buckley', 'The Beatles']
gen = LyricsGenerator(artists, per_artist=5)
ds = gen.as_dataset(batch_size=4)
p = Poet()
p.train_on(ds, n_epochs=10)
poem = p.generate(start_string='Hey ', n_gen_chars=1000)
print(poem)
| 26 | 56 | 0.741259 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 43 | 0.15035 |
1456fcb0edd89c366e30cb0c8a0ffaa3820a7828 | 762 | py | Python | netvisor/schemas/companies/list.py | fastmonkeys/netvisor.py | 1555e06439d69dc7515ebe0ce3dc061c609bc1a1 | [
"MIT"
] | 3 | 2015-08-26T10:08:03.000Z | 2022-03-16T09:31:52.000Z | netvisor/schemas/companies/list.py | fastmonkeys/netvisor.py | 1555e06439d69dc7515ebe0ce3dc061c609bc1a1 | [
"MIT"
] | 6 | 2015-09-10T08:32:21.000Z | 2016-09-19T14:32:07.000Z | netvisor/schemas/companies/list.py | fastmonkeys/netvisor.py | 1555e06439d69dc7515ebe0ce3dc061c609bc1a1 | [
"MIT"
] | 7 | 2015-09-10T08:23:04.000Z | 2019-08-28T17:39:08.000Z | # -*- coding: utf-8 -*-
"""
netvisor.schemas.companies.list
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2013-2016 by Fast Monkeys Oy.
:license: MIT, see LICENSE for more details.
"""
from marshmallow import Schema, fields, post_load
from ..fields import Boolean, List
class CompanySchema(Schema):
id = fields.Integer()
name = fields.String()
finnish_organization_identifier = fields.String(allow_none=True)
is_active = Boolean(true='1', false='0')
class CompanyListSchema(Schema):
companies = List(
fields.Nested(CompanySchema, allow_none=True),
load_from='company'
)
@post_load
def preprocess_company_list(self, input_data):
return input_data['companies'] if input_data else []
| 25.4 | 68 | 0.654856 | 466 | 0.611549 | 0 | 0 | 122 | 0.160105 | 0 | 0 | 228 | 0.299213 |
1457ef0dea7deba4f2c8c70b3f33da68626af688 | 608 | py | Python | merge_delim.py | brendane/miscellaneous_bioinfo_scripts | 91ca3282823495299e4c68aa79bdc1c0225a6d7b | [
"MIT"
] | null | null | null | merge_delim.py | brendane/miscellaneous_bioinfo_scripts | 91ca3282823495299e4c68aa79bdc1c0225a6d7b | [
"MIT"
] | 1 | 2020-09-17T11:14:13.000Z | 2020-09-17T11:14:13.000Z | merge_delim.py | brendane/miscellaneous_bioinfo_scripts | 91ca3282823495299e4c68aa79bdc1c0225a6d7b | [
"MIT"
] | null | null | null | #!/usr/bin/env python2.7
import sys
infiles = sys.argv[1:]
data = []
genes = []
for i, fname in enumerate(infiles):
sys.stderr.write(fname + '\n')
d = []
with open(fname, 'rb') as ihandle:
for j, line in enumerate(ihandle):
g, c = line.strip().split()
if i != 0 and g != genes[j]:
raise Exception('no match')
if i == 0:
genes.append(g)
d.append(c)
data.append(d)
out = sys.stdout
for i in xrange(len(genes)):
out.write(genes[i])
for d in data:
out.write('\t' + d[i])
out.write('\n')
| 24.32 | 43 | 0.509868 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 50 | 0.082237 |
1458c5f022bcbdb49cfdd4c7518d98dccf9754e4 | 1,372 | py | Python | provider/facebook.py | marinewater/pyramid-social-auth | 926f230294ec6b0fdf02a5ed4113073d82a9d18c | [
"MIT"
] | 2 | 2015-02-10T01:19:21.000Z | 2016-07-24T14:40:59.000Z | provider/facebook.py | marinewater/pyramid-social-auth | 926f230294ec6b0fdf02a5ed4113073d82a9d18c | [
"MIT"
] | null | null | null | provider/facebook.py | marinewater/pyramid-social-auth | 926f230294ec6b0fdf02a5ed4113073d82a9d18c | [
"MIT"
] | null | null | null | from provider.base import BaseProvider
class FacebookProvider(BaseProvider):
def __init__(self, client_id, client_secret, name, redirect_uri, state=None):
"""
:param client_id:
:param client_secret:
:param name:
:param redirect_uri:
:param state:
:return:
"""
authorize_url = 'https://www.facebook.com/dialog/oauth'
access_token_url = 'https://graph.facebook.com/oauth/access_token'
base_url = 'https://graph.facebook.com/'
super().__init__(client_id, client_secret, authorize_url, access_token_url, base_url, name, redirect_uri,
state=state)
def auth(self, scope=None):
if scope is None:
scope = 'email public_profile'
return super().auth(scope)
def get_user_info(self, info):
"""
retrieve basic user info
"""
if self.info[info] is None:
self.info[info] = self.get_info(info)
def get_user(self):
"""
retrieve username
:return: :rtype: string
"""
self.get_user_info('me')
return self.info['me']['name']
def get_email(self):
"""
retrieve users email address
:return: :rtype: string
"""
self.get_user_info('me')
return self.info['me']['email'] | 28 | 113 | 0.569971 | 1,331 | 0.970117 | 0 | 0 | 0 | 0 | 0 | 0 | 546 | 0.397959 |
1458dc2ffa19f2e218619cc3c1b9af0b90945664 | 14,757 | py | Python | eratosthenes/preprocessing/color_transforms.py | GO-Eratosthenes/start-code | d40192a482a260676db9ec9b3ece6854c0d8ccf7 | [
"Apache-2.0"
] | 1 | 2021-11-09T14:45:05.000Z | 2021-11-09T14:45:05.000Z | eratosthenes/preprocessing/color_transforms.py | GO-Eratosthenes/start-code | d40192a482a260676db9ec9b3ece6854c0d8ccf7 | [
"Apache-2.0"
] | 4 | 2022-02-22T13:42:42.000Z | 2022-03-24T22:07:24.000Z | eratosthenes/preprocessing/color_transforms.py | GO-Eratosthenes/start-code | d40192a482a260676db9ec9b3ece6854c0d8ccf7 | [
"Apache-2.0"
] | null | null | null | import numpy as np
from .image_transforms import mat_to_gray
def rgb2hcv(Blue, Green, Red):
"""transform red green blue arrays to a color space
Parameters
----------
Blue : np.array, size=(m,n)
Blue band of satellite image
Green : np.array, size=(m,n)
Green band of satellite image
Red : np.array, size=(m,n)
Red band of satellite image
Returns
-------
V : np.array, size=(m,n)
array with dominant frequency
H : np.array, size=(m,n)
array with amount of color
C : np.array, size=(m,n)
luminance
See also
--------
rgb2yiq, rgb2ycbcr, rgb2hsi, rgb2xyz, rgb2lms
Notes
-----
.. [1] Smith, "Putting colors in order", Dr. Dobb’s Journal, pp 40, 1993.
.. [2] Tsai, "A comparative study on shadow compensation of color aerial
images in invariant color models", IEEE transactions in geoscience and
remote sensing, vol. 44(6) pp. 1661--1671, 2006.
"""
NanBol = Blue == 0
Blue, Green = mat_to_gray(Blue, NanBol), mat_to_gray(Green, NanBol)
Red = Red = mat_to_gray(Red, NanBol)
np.amax( np.dstack((Red, Green)))
V = 0.3*(Red + Green + Blue)
H = np.arctan2( Red-Blue, np.sqrt(3)*(V-Green))
IN = abs(np.cos(H))<= 0.2
C = np.divide(V-Green, np.cos(H))
C2 = np.divide(Red-Blue, np.sqrt(3)*np.sin(H))
C[IN] = C2[IN]
return H, C, V
def rgb2yiq(Red, Green, Blue):
"""transform red, green, blue to luminance, inphase, quadrature values
Parameters
----------
Red : np.array, size=(m,n)
red band of satellite image
Green : np.array, size=(m,n)
green band of satellite image
Blue : np.array, size=(m,n)
blue band of satellite image
Returns
-------
Y : np.array, size=(m,n)
luminance
I : np.array, size=(m,n)
inphase
Q : np.array, size=(m,n)
quadrature
See also
--------
yiq2rgb, rgb2hcv, rgb2ycbcr, rgb2hsi, rgb2xyz, rgb2lms
Notes
-----
.. [1] Gonzalez & Woods "Digital image processing", 1992.
"""
L = np.array([(+0.299, +0.587, +0.114),
(+0.596, -0.275, -0.321),
(+0.212, -0.523, +0.311)])
RGB = np.dstack((Red, Green, Blue))
YIQ = np.einsum('ij,klj->kli', L, RGB)
Y,I,Q = YIQ[:,:,0], YIQ[:,:,1], YIQ[:,:,2]
return Y, I, Q
def yiq2rgb(Y,I,Q):
"""transform luminance, inphase, quadrature values to red, green, blue
Parameters
----------
Red : np.array, size=(m,n)
red band of satellite image
Green : np.array, size=(m,n)
green band of satellite image
Blue : np.array, size=(m,n)
blue band of satellite image
Returns
-------
Y : np.array, size=(m,n)
luminance
I : np.array, size=(m,n)
inphase
Q : np.array, size=(m,n)
quadrature
See also
--------
rgb2yiq
Notes
-----
.. [1] Gonzalez & Woods "Digital image processing", 1992.
"""
L = np.array([(+0.299, +0.587, +0.114),
(+0.596, -0.275, -0.321),
(+0.212, -0.523, +0.311)])
Linv = np.linalg.inv(L)
YIQ = np.dstack((Y, I, Q))
RGB = np.einsum('ij,klj->kli', Linv, YIQ)
R,G,B = RGB[:,:,0], RGB[:,:,1], RGB[:,:,2]
return R, G, B
def rgb2ycbcr(Red, Green, Blue):
"""transform red, green, blue arrays to luna and chroma values
Parameters
----------
Red : np.array, size=(m,n)
red band of satellite image
Green : np.array, size=(m,n)
green band of satellite image
Blue : np.array, size=(m,n)
blue band of satellite image
Returns
-------
Y : np.array, size=(m,n)
luma
Cb : np.array, size=(m,n)
chroma
Cr : np.array, size=(m,n)
chroma
See also
--------
rgb2hcv, rgb2yiq, rgb2hsi, rgb2xyz, rgb2lms
Notes
-----
.. [1] Tsai, "A comparative study on shadow compensation of color aerial
images in invariant color models", IEEE transactions in geoscience and
remote sensing, vol. 44(6) pp. 1661--1671, 2006.
"""
L = np.array([(+0.257, +0.504, +0.098),
(-0.148, -0.291, +0.439),
(+0.439, -0.368, -0.071)])
C = np.array([16, 128, 128])/2**8
RGB = np.dstack((Red, Green, Blue))
YCC = np.einsum('ij,klj->kli', L, RGB)
del RGB
Y = YCC[:,:,0] + C[0]
Cb= YCC[:,:,1] + C[1]
Cr= YCC[:,:,2] + C[2]
return Y, Cb, Cr
def rgb2hsi(Red, Green, Blue):
"""transform red, green, blue arrays to hue, saturation, intensity arrays
Parameters
----------
Red : np.array, size=(m,n)
red band of satellite image
Green : np.array, size=(m,n)
green band of satellite image
Blue : np.array, size=(m,n)
blue band of satellite image
Returns
-------
Hue : np.array, size=(m,n), range=0...1
Hue
Sat : np.array, size=(m,n), range=0...1
Saturation
Int : np.array, size=(m,n), range=0...1
Intensity
See also
--------
erdas2hsi, rgb2hcv, rgb2yiq, rgb2ycbcr, rgb2xyz, rgb2lms
Notes
-----
.. [1] Tsai, "A comparative study on shadow compensation of color aerial
images in invariant color models", IEEE transactions in geoscience and
remote sensing, vol. 44(6) pp. 1661--1671, 2006.
.. [2] Pratt, "Digital image processing" Wiley, 1991.
"""
if np.ptp(Red.flatten())>1:
Red = mat_to_gray(Red)
if np.ptp(Green.flatten())>1:
Green = mat_to_gray(Green)
if np.ptp(Blue.flatten())>1:
Blue = mat_to_gray(Blue)
Tsai = np.array([(1/3, 1/3, 1/3),
(-np.sqrt(6)/6, -np.sqrt(6)/6, -np.sqrt(6)/3),
(1/np.sqrt(6), 2/-np.sqrt(6), 0)])
RGB = np.dstack((Red, Green, Blue))
HSI = np.einsum('ij,klj->kli', Tsai, RGB)
Int = HSI[:,:,0]
Sat = np.sqrt(HSI[:,:,1] ** 2 + HSI[:,:,2] ** 2)
Hue = np.arctan2(HSI[:,:,1], HSI[:,:,2])/np.pi
Hue = np.remainder(Hue, 1) # bring to from -.5...+.5 to 0...1 range
return Hue, Sat, Int
def hsi2rgb(Hue, Sat, Int): #todo
Red,Green,Blue = np.zeros_like(Hue), np.zeros_like(Hue), np.zeros_like(Hue)
Class = np.ceil(Hue/3)
Color = 1 + Sat * np.divide(Hue, np.cos(np.radians(60)))
# red-green space
Sel = Class==1
Blue[Sel] = np.divide(1 - Sat[Sel], 3)
Red[Sel] = np.divide(Int[Sel] + Color[Sel], 3)
Green[Sel] = 1 - (Red[Sel] + Blue[Sel])
# green-blue space
Sel = Class==2
Red[Sel] = np.divide(1 - Sat[Sel], 3)
Green[Sel] = np.divide(Int[Sel] + Color[Sel], 3)
Blue[Sel] = 1 - (Green[Sel] + Red[Sel])
# blue-red space
Sel = Class==3
Green[Sel] = np.divide(1 - Sat[Sel], 3)
Blue[Sel] = np.divide(Int[Sel] + Color[Sel], 3)
Red[Sel] = 1 - (Blue[Sel] + Green[Sel])
return Red, Green, Blue
def erdas2hsi(Blue, Green, Red):
"""transform red, green, blue arrays to hue, saturation, intensity arrays
Parameters
----------
Blue : np.array, size=(m,n)
blue band of satellite image
Green : np.array, size=(m,n)
green band of satellite image
Red : np.array, size=(m,n)
red band of satellite image
Returns
-------
Hue : np.array, size=(m,n), float
hue
Sat : np.array, size=(m,n), float
saturation
Int : np.array, size=(m,n), float
intensity
See also
--------
rgb2hsi
Notes
-----
.. [1] ERDAS, "User handbook", 2013.
"""
if np.ptp(Red.flatten())>1:
Red = mat_to_gray(Red)
if np.ptp(Green.flatten())>1:
Green = mat_to_gray(Green)
if np.ptp(Blue.flatten())>1:
Blue = mat_to_gray(Blue)
Stack = np.dstack((Blue, Green, Red))
min_Stack = np.amin(Stack, axis=2)
max_Stack = np.amax(Stack, axis=2)
Int = (max_Stack + min_Stack)/2
Sat = np.copy(Blue)
Sat[Int==0] = 0
Sat[Int<=.5] = (max_Stack[Int<=.5] -
min_Stack[Int<=.5]) / (max_Stack[Int<=.5] +
min_Stack[Int<=.5])
Sat[Int>.5] = (max_Stack[Int>.5] -
min_Stack[Int>.5]) / ( 2 - max_Stack[Int>.5] +
min_Stack[Int>.5])
Hue = np.copy(Blue)
Hue[Blue==max_Stack] = (1/6) *(6
+ Green[Blue==max_Stack]
- Red[Blue==max_Stack])
Hue[Green==max_Stack] = (1/6) *(4
+ Red[Green==max_Stack]
- Blue[Green==max_Stack])
Hue[Red==max_Stack] = (1/6) *(2
+ Blue[Red==max_Stack]
- Green[Red==max_Stack])
return Hue, Sat, Int
def rgb2xyz(Red, Green, Blue, method='reinhardt'):
"""transform red, green, blue arrays to XYZ tristimulus values
Parameters
----------
Red : np.array, size=(m,n)
red band of satellite image
Green : np.array, size=(m,n)
green band of satellite image
Blue : np.array, size=(m,n)
blue band of satellite image
method :
'reinhardt'
XYZitu601-1 axis
'ford'
D65 illuminant
Returns
-------
X : np.array, size=(m,n)
Y : np.array, size=(m,n)
Z : np.array, size=(m,n)
See also
--------
rgb2hcv, rgb2ycbcr, rgb2hsi, rgb2yiq, rgb2lms, xyz2lms
Notes
-----
.. [1] Reinhard et al. "Color transfer between images" IEEE Computer graphics
and applications vol.21(5) pp.34-41, 2001.
.. [2] Ford & Roberts. "Color space conversion", pp. 1--31, 1998.
"""
if method=='ford':
M = np.array([(0.4124564, 0.3575761, 0.1804375),
(0.2126729, 0.7151522, 0.0721750),
(0.0193339, 0.1191920, 0.9503041)])
else:
M = np.array([(0.5141, 0.3239, 0.1604),
(0.2651, 0.6702, 0.0641),
(0.0241, 0.1228, 0.8444)])
RGB = np.dstack((Red, Green, Blue))
XYZ = np.einsum('ij,klj->kli', M, RGB)
X,Y,Z = XYZ[:,:,0], XYZ[:,:,1], XYZ[:,:,2]
return X, Y, Z
def xyz2lms(X, Y, Z):
"""transform XYZ tristimulus arrays to LMS values
Parameters
----------
X : np.array, size=(m,n)
modified XYZitu601-1 axis
Y : np.array, size=(m,n)
modified XYZitu601-1 axis
Z : np.array, size=(m,n)
modified XYZitu601-1 axis
Returns
-------
L : np.array, size=(m,n)
M : np.array, size=(m,n)
S : np.array, size=(m,n)
See also
--------
rgb2hcv, rgb2ycbcr, rgb2hsi, rgb2yiq, rgb2lms
Notes
-----
.. [1] Reinhard et al. "Color transfer between images" IEEE Computer graphics
and applications vol.21(5) pp.34-41, 2001.
"""
N = np.array([(+0.3897, +0.6890, -0.0787),
(-0.2298, +1.1834, +0.0464),
(+0.0000, +0.0000, +0.0000)])
RGB = np.dstack((X, Y, Z))
LMS = np.einsum('ij,klj->kli', N, RGB)
L,M,S = LMS[:,:,0], LMS[:,:,1], LMS[:,:,2]
return L, M, S
def xyz2lab(X, Y, Z, th=0.008856):
"""transform XYZ tristimulus arrays to Lab values
Parameters
----------
X : np.array, size=(m,n)
Y : np.array, size=(m,n)
Z : np.array, size=(m,n)
Returns
-------
L : np.array, size=(m,n)
a : np.array, size=(m,n)
b : np.array, size=(m,n)
See also
--------
rgb2xyz, xyz2lms, lms2lch
Notes
-----
.. [1] Ford & Roberts. "Color space conversion", pp. 1--31, 1998.
.. [2] Silva et al. "Near real-time shadow detection and removal in aerial
motion imagery application" ISPRS journal of photogrammetry and remote
sensing, vol.140 pp.104--121, 2018.
"""
Xn,Yn,Zn = 95.047, 100.00, 108.883 # D65 illuminant
YYn = Y/Yn
L_1 = 116* YYn**(1/3.)
L_2 = 903.3 * YYn
L = L_1
L[YYn<=th] = L_2[YYn<=th]
def f(tau, th):
fx = X**(1/3.)
fx[X<=th] = 7.787*X[X<th] + 16/116
return fx
a = 500*( f(X/Xn, th) - f(Z/Zn, th) )
b = 200*( f(Y/Yn, th) - f(Z/Zn, th) )
return L, a, b
def lab2lch(L, a, b):
"""transform XYZ tristimulus arrays to Lab values
Parameters
----------
L : np.array, size=(m,n)
a : np.array, size=(m,n)
b : np.array, size=(m,n)
Returns
-------
C : np.array, size=(m,n)
h : np.array, size=(m,n)
See also
--------
rgb2xyz, xyz2lms, xyz2lab
Notes
-----
.. [1] Ford & Roberts. "Color space conversion", pp. 1--31, 1998.
.. [2] Silva et al. "Near real-time shadow detection and removal in aerial
motion imagery application" ISPRS journal of photogrammetry and remote
sensing, vol.140 pp.104--121, 2018.
"""
C = np.sqrt( a**2 + b**2)
# calculate angle, and let it range from 0...1
h = ((np.arctan2(b, a) + 2*np.pi)% 2*np.pi) / 2*np.pi
return C, h
def rgb2lms(Red, Green, Blue):
"""transform red, green, blue arrays to XYZ tristimulus values
Parameters
----------
Red : np.array, size=(m,n)
red band of satellite image
Green : np.array, size=(m,n)
green band of satellite image
Blue : np.array, size=(m,n)
blue band of satellite image
Returns
-------
L : np.array, size=(m,n)
M : np.array, size=(m,n)
S : np.array, size=(m,n)
See also
--------
rgb2hcv, rgb2ycbcr, rgb2hsi, rgb2yiq, rgb2xyz, xyz2lms
Notes
-----
.. [1] Reinhard et al. "Color transfer between images", 2001.
"""
I = np.array([(0.3811, 0.5783, 0.0402),
(0.1967, 0.7244, 0.0782),
(0.0241, 0.1228, 0.8444)])
RGB = np.dstack((Red, Green, Blue))
LMS = np.einsum('ij,klj->kli', I, RGB)
L,M,S = LMS[:,:,0], LMS[:,:,1], LMS[:,:,2]
return L, M, S
def lms2lab(L, M, S):
"""transform L, M, S arrays to lab color space
Parameters
----------
L : np.array, size=(m,n)
M : np.array, size=(m,n)
S : np.array, size=(m,n)
Returns
-------
l : np.array, size=(m,n)
a : np.array, size=(m,n)
b : np.array, size=(m,n)
See also
--------
rgb2hcv, rgb2ycbcr, rgb2hsi, rgb2yiq, rgb2xyz, xyz2lms
Notes
-----
.. [1] Reinhard et al. "Color transfer between images", 2001.
"""
I = np.matmul(np.array([(1/np.sqrt(3), 0, 0),
(0, 1/np.sqrt(6), 0),
(0, 0, 1/np.sqrt(2))]),
np.array([(+1, +1, +1),
(+1, +1, -2),
(+1, -1, +0)]))
LMS = np.dstack((L, M, S))
lab = np.einsum('ij,klj->kli', I, LMS)
l,a,b = lab[:,:,0], lab[:,:,1], lab[:,:,2]
return l, a, b
| 27.226937 | 81 | 0.515959 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8,448 | 0.572397 |
14590358be50b254390f5fc7eebb4b6ad228a02e | 148 | py | Python | Week3/Hospitalisations_ClassificationCase/import.py | PenelopeCorsica/AppliedML2021 | 1b48fe56553d7e4486e2542cf5d26b5aa6f5b473 | [
"BSD-3-Clause"
] | 13 | 2021-04-20T13:13:51.000Z | 2022-03-28T18:08:07.000Z | Week3/Hospitalisations_ClassificationCase/import.py | PenelopeCorsica/AppliedML2021 | 1b48fe56553d7e4486e2542cf5d26b5aa6f5b473 | [
"BSD-3-Clause"
] | null | null | null | Week3/Hospitalisations_ClassificationCase/import.py | PenelopeCorsica/AppliedML2021 | 1b48fe56553d7e4486e2542cf5d26b5aa6f5b473 | [
"BSD-3-Clause"
] | 8 | 2021-04-16T11:03:42.000Z | 2021-09-29T02:25:15.000Z | import pandas as pd
X_train = pd.read_csv("X_train.csv")
df_y = pd.read_csv("y_train.csv")
y_train = df_y["y"]
X_test = pd.read_csv("X_test.csv")
| 18.5 | 36 | 0.702703 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 41 | 0.277027 |
145a12e1b7a4ae3dc30b7b7ae78473fce6b23434 | 1,266 | py | Python | redash/handlers/embed.py | steedos/redash | 46d954843f229d74b5712384e74b4b0fdcef621b | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | redash/handlers/embed.py | steedos/redash | 46d954843f229d74b5712384e74b4b0fdcef621b | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | redash/handlers/embed.py | steedos/redash | 46d954843f229d74b5712384e74b4b0fdcef621b | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2021-07-27T06:27:29.000Z | 2021-07-27T06:27:29.000Z | import logging
from flask import render_template, request, redirect, session, url_for, flash
from flask.ext.restful import abort
from flask_login import current_user, login_required
from redash import models, settings
from redash.wsgi import app
from redash.utils import json_dumps
@app.route('/embed/query/<query_id>/visualization/<visualization_id>', methods=['GET'])
@login_required
def embed(query_id, visualization_id):
query = models.Query.get_by_id(query_id)
vis = query.visualizations.where(models.Visualization.id == visualization_id).first()
qr = {}
if vis is not None:
vis = vis.to_dict()
qr = query.latest_query_data
if qr is None:
abort(400, message="No Results for this query")
else:
qr = qr.to_dict()
else:
abort(404, message="Visualization not found.")
client_config = {}
client_config.update(settings.COMMON_CLIENT_CONFIG)
return render_template("embed.html",
name=settings.NAME,
client_config=json_dumps(client_config),
visualization=json_dumps(vis),
query_result=json_dumps(qr),
analytics=settings.ANALYTICS)
| 33.315789 | 89 | 0.650079 | 0 | 0 | 0 | 0 | 979 | 0.773302 | 0 | 0 | 128 | 0.101106 |
145a2aac257a81ff3e9b25e0317ce229eb2a70b3 | 15,109 | py | Python | test_vs_model_DEMs.py | drewleonard42/CoronaTemps | 210642175e063b39dd1878c2731f15bf34e4225f | [
"BSD-2-Clause"
] | 1 | 2019-07-31T17:27:12.000Z | 2019-07-31T17:27:12.000Z | test_vs_model_DEMs.py | SolarDrew/CoronaTemps | 210642175e063b39dd1878c2731f15bf34e4225f | [
"BSD-2-Clause"
] | null | null | null | test_vs_model_DEMs.py | SolarDrew/CoronaTemps | 210642175e063b39dd1878c2731f15bf34e4225f | [
"BSD-2-Clause"
] | 2 | 2015-07-07T10:31:03.000Z | 2015-10-19T15:42:47.000Z | # -*- coding: utf-8 -*-
"""
Script to produce synthetic AIA data based on arbitrary model DEMs and test the
results of the tempmap code against the model.
Created on Mon Jul 28 16:34:28 2014
@author: Drew Leonard
"""
import numpy as np
from matplotlib import use, rc
use('agg')
rc('savefig', bbox='tight', pad_inches=0.5)
import matplotlib.pyplot as plt
from matplotlib import patches
import sunpy
from sunpy.map import Map
from temperature import TemperatureMap
from utils import gaussian, load_temp_responses
from os import path, makedirs
import subprocess32 as subp
from itertools import product
from skimage import measure
from sys import argv
# Decide whether to assess single-parameter or full-Gaussian method
n_pars = int(argv[1])
# Define which wavelength to use for EM estimation with 1-parameter TMaps
emwlen = str(argv[2])
# Define CoronaTemps home folder and output folder
CThome = path.join(path.expanduser('~'), 'CoronaTemps')
outdir = path.join(CThome, 'validation', '{}pars'.format(n_pars))
tmap_script = path.join(CThome, 'create_tempmap.py')
if not path.exists(outdir): makedirs(outdir)
# Define parameter ranges
#temps = np.arange(4.6, 7.405, 0.01)#0.005)
temps = np.arange(5.6, 7.005, 0.01)
widths = np.array([0.01, 0.1, 0.5])#np.arange(0.01, 0.605, 0.005) # Just copying Aschwanden's range here
#heights = 10 ** np.arange(18, 37, 0.1)#0.05)
heights = 10 ** np.arange(20, 35, 0.1)
#print heights
n_temps = len(temps)
n_widths = len(widths)
n_heights = len(heights)
parvals = np.array([i for i in product(temps, widths, heights)])
#print parvals.shape
n_vals = n_temps * n_widths * n_heights
#print n_temps, n_widths, n_heights, n_vals, n_vals * 6
# Create model DEMs and synthetic emission
emission = np.zeros((6, n_temps, n_widths, n_heights))
#print emission.shape
logt = np.arange(0, 15.05, 0.05)
resp = load_temp_responses()
delta_t = logt[1] - logt[0]
for p, params in enumerate(parvals):
dem = gaussian(logt, *params)
f = resp * dem
t = np.where(temps == params[0])[0][0]
w = np.where(widths == params[1])[0][0]
h = np.where(heights == params[2])[0][0]
emission[:, t, w, h] = np.sum(f, axis=1) * delta_t
#emission = emission / emission[2, :, :, :]
#print '----', emission[2, :, :, :].min(), emission[2, :, :, :].max()
# Load AIA response functions
resp = load_temp_responses()
# Load unnessecary map for its metadata
voidmap = Map(sunpy.AIA_171_IMAGE)
mapmeta = voidmap.meta
#rect = patches.Rectangle([25.0, 5.6], 1.0, 1.0, color='black', fill=True, clip_on=False)
# Run synthetic data through 1param tempmap method
for w, wid in enumerate(widths):#heights):
print '\nWidth:', wid
fig = plt.figure(figsize=(30, 12))
for wl, wlength in enumerate(['94', '131', '171', '193', '211', '335']):
#emiss = Map(emission[wl, :, :, w], mapmeta)
emiss = Map(emission[wl, :, w, :].copy(), mapmeta)
emiss.cmap = sunpy.cm.get_cmap('sdoaia{}'.format(wlength))
emiss.meta['naxis1'] = emiss.shape[1]
emiss.meta['naxis2'] = emiss.shape[0]
#emiss.meta['cdelt1'] = widths[1] - widths[0]
emiss.meta['cdelt1'] = heights[1] - heights[0] #np.log10(heights[1]) - np.log10(heights[0])
emiss.meta['cdelt2'] = temps[1] - temps[0]
#emiss.meta['crval1'] = widths[0]
emiss.meta['crval1'] = heights[0] #np.log10(heights[0])
emiss.meta['crval2'] = temps[0]
emiss.meta['crpix1'] = 0.5
emiss.meta['crpix2'] = 0.5
if wlength == '94': wlength = '094'
fits_dir = path.join(CThome, 'data', 'synthetic', wlength)
if not path.exists(fits_dir): makedirs(fits_dir)
emiss.save(path.join(fits_dir, 'model.fits'), clobber=True)
#print '----', emission[2, :, :, :].min(), emission[2, :, :, :].max()
#print '------', emission[2, :, w, :].min(), emission[2, :, w, :].max()
emiss.data /= emission[2, :, w, :]
#print '--------', emiss.min(), emiss.max()
ax = fig.add_subplot(1, 6, wl+1)
emiss.plot(aspect='auto', vmin=emiss.min(), vmax=emiss.max())
plt.title('{}'.format(wlength))
plt.xlabel('Input EM')
plt.ylabel('Input log(T)')
plt.colorbar()
#fig.gca().add_artist(rect)
plt.axvline(20.0, color='white')
plt.axvline(35.0, color='white')
plt.axhline(5.6, color='white')
plt.axhline(7.0, color='white')
#plt.savefig(path.join(outdir, 'model_emission_h={}'.format(np.log10(wid)).replace('.', '_')))
plt.savefig(path.join(outdir, 'model_emission_w={}'.format(wid).replace('.', '_')))
plt.close()
#images = [Map(emission[i, :, :, w], mapmeta) for i in range(6)]
images = [Map(emission[i, :, w, :], mapmeta) for i in range(6)]
if n_pars == 3:
cmdargs = "mpiexec -n 10 python {} model {} {} {} {} {} {}".format(
tmap_script, n_pars, path.join(CThome, 'data'),
None, None, True, True).split()
else:
cmdargs = "python {} model {} {} {} {} {} {}".format(
tmap_script, n_pars, path.join(CThome, 'data'),
None, None, True, True).split()
status = subp.call(cmdargs)
newmap = TemperatureMap(fname=path.join(CThome, 'temporary.fits'))
subp.call(["rm", path.join(CThome, 'temporary.fits')])
data, meta = newmap.data, newmap.meta
fitsmap = Map(np.log10(newmap.goodness_of_fit), newmap.meta.copy())
#print fitsmap.max()
newmap.data = data
print '-------------MINMAX:-------------'#, newmap.min(), newmap.max(), newmap.shape,
#print newmap.data[newmap.data == 0].shape, '----------\n'
print 'GoF', fitsmap.min(), fitsmap.mean(), fitsmap.max()
print 'T_out', newmap.min(), newmap.mean(), newmap.max()
#truetemp = np.array(list(temps)*n_widths).reshape((n_widths, n_temps)).T
truetemp = np.array(list(temps)*n_heights).reshape((n_heights, n_temps)).T
#print truetemp.shape, data.shape
diff = Map((abs(truetemp - data) / truetemp) * 100, newmap.meta.copy())
print 'T_diff', diff.min(), diff.mean(), diff.max()
if n_pars == 3:
wdata = Map(newmap.dem_width, newmap.meta.copy())
truew = np.ones(shape=(n_temps, n_heights)) * wid
#print 'truew', truew.min(), truew.mean(), truew.max()
diffw = Map((abs(truew - wdata.data) / truew) * 100, newmap.meta.copy())
print 'w_out', wdata.min(), wdata.mean(), wdata.max()
print 'w_diff', diffw.min(), diffw.mean(), diffw.max()
#print wid, newmap.xrange, newmap.yrange, newmap.scale
#print wid, diff.xrange, diff.yrange, diff.scale
fig = plt.figure(figsize=(24, 12))
fig.add_subplot(1, 3, 1)
newmap.plot(cmap='coolwarm', vmin=5.6, vmax=7.0, aspect='auto')
plt.colorbar()
plt.title('Solution log(T)', fontsize=28)
plt.ylabel('Input log(T)', fontsize=24)
plt.xlabel('Input EM', fontsize=24)#width', fontsize=24)
#fig.gca().add_artist(rect)
plt.axvline(20.0, color='white')
plt.axvline(35.0, color='white')
plt.axhline(5.6, color='white')
plt.axhline(7.0, color='white')
ax = fig.add_subplot(1, 3, 2)
#print 'diff', diff.min(), diff.max()
#print np.nanmin(diff.data), np.nanmax(diff.data)
diff.plot(cmap='RdYlGn_r', aspect='auto')#, vmin=diff.min(), vmax=diff.max())
plt.colorbar()
plt.title('Difference from input (%)', fontsize=28)
plt.xlabel('Input EM', fontsize=24)
#fig.gca().add_artist(rect)
plt.axvline(20.0, color='white')
plt.axvline(35.0, color='white')
plt.axhline(5.6, color='white')
plt.axhline(7.0, color='white')
ax = fig.add_subplot(1, 3, 3)
#print 'fits', fitsmap.min(), fitsmap.max()
#print np.nanmin(fitsmap.data), np.nanmax(fitsmap.data)
fitsmap.plot(cmap='cubehelix', aspect='auto')#,
# vmin=np.nanmin(fitsmap.data)-(2.0*(np.nanstd(fitsmap.data))),
# vmax=np.nanmax(fitsmap.data)+(2.0*(np.nanstd(fitsmap.data))))
plt.colorbar()
plt.title('log(Goodness-of-fit)', fontsize=28)
plt.xlabel('Input EM', fontsize=24)
#fig.gca().add_artist(rect)
plt.axvline(20.0, color='white')
plt.axvline(35.0, color='white')
plt.axhline(5.6, color='white')
plt.axhline(7.0, color='white')
#plt.savefig(path.join(outdir, 'tempsolutions_em={}'.format(np.log10(wid)).replace('.', '_')))
plt.savefig(path.join(outdir, 'tempsolutions_wid={:.3f}'.format(wid).replace('.', '_')))
plt.close()
if n_pars == 3:
emdata = Map(newmap.emission_measure, newmap.meta.copy())
else:
if emwlen == 'three':
total = np.zeros(newmap.shape)
for w in ['171', '193', '211']:
emdata = newmap.calculate_em(w, model=True)
total += emdata.data
emdata.data = total/3.0
elif emwlen == 'all':
total = np.zeros(newmap.shape)
for w in ['94', '131', '171', '193', '211', '335']:
emdata = newmap.calculate_em(w, model=True)
total += emdata.data
emdata.data = total/6.0
else:
emdata = newmap.calculate_em(emwlen, model=True)
trueem = np.array(list(heights)*n_temps).reshape(n_temps, n_heights)
diffem = Map((abs(trueem - emdata.data) / trueem) * 100, newmap.meta.copy())
#print wid, emdata.xrange, emdata.yrange, emdata.scale
#print wid, diffem.xrange, diffem.yrange, diffem.scale
#print wid, fitsmap.xrange, fitsmap.yrange, fitsmap.scale
fig = plt.figure(figsize=(24, 12))
ax = fig.add_subplot(1, 3, 1)
print 'em_out', emdata.min(), emdata.mean(), emdata.max()
print 'em_diff', diffem.min(), diffem.mean(), diffem.max()
#print np.nanmin(emdata.data), np.nanmax(emdata.data)
emdata.plot(cmap='coolwarm', aspect='auto',
vmin=emdata.min(), vmax=emdata.max())
# vmin=np.log10(heights[0]), vmax=np.log10(heights[-1]))
contours = measure.find_contours(emdata.data, heights[0])
for contour in contours:
contour[:, 0] *= emdata.scale['y']
contour[:, 1] *= emdata.scale['x']
contour[:, 0] += emdata.yrange[0]
contour[:, 1] += emdata.xrange[0]
plt.plot(contour[:, 1], contour[:, 0], color='blue')
plt.xlim(*emdata.xrange)
plt.ylim(*emdata.yrange)
contours = measure.find_contours(emdata.data, heights[-1])
for contour in contours:
contour[:, 0] *= emdata.scale['y']
contour[:, 1] *= emdata.scale['x']
contour[:, 0] += emdata.yrange[0]
contour[:, 1] += emdata.xrange[0]
plt.plot(contour[:, 1], contour[:, 0], color='black')
plt.xlim(*emdata.xrange)
plt.ylim(*emdata.yrange)
if n_pars == 3:
plt.axvline(20.0, color='white')
plt.axvline(35.0, color='white')
plt.axhline(5.6, color='white')
plt.axhline(7.0, color='white')
plt.colorbar()
plt.title('Solution EM', fontsize=28)
plt.ylabel('Input log(T)', fontsize=24)
plt.xlabel('Input EM', fontsize=24)#width', fontsize=24)
ax = fig.add_subplot(1, 3, 2)
#print 'diffem', diffem.min(), diffem.max()
#print np.nanmin(diffem.data), np.nanmax(diffem.data)
diffem.plot(cmap='RdYlGn_r', aspect='auto',
# vmin=0, vmax=50)
vmin=diffem.min(), vmax=diffem.max())
#fig.gca().add_artist(rect)
if n_pars == 3:
plt.axvline(20.0, color='white')
plt.axvline(35.0, color='white')
plt.axhline(5.6, color='white')
plt.axhline(7.0, color='white')
plt.colorbar()
plt.title('Difference from input (%)', fontsize=28)
plt.xlabel('Input EM', fontsize=24)
ax = fig.add_subplot(1, 3, 3)
fitsmap.plot(cmap='cubehelix', aspect='auto')
if n_pars == 3:
plt.axvline(20.0, color='white')
plt.axvline(35.0, color='white')
plt.axhline(5.6, color='white')
plt.axhline(7.0, color='white')
plt.colorbar()
plt.title('log(Goodness-of-fit)', fontsize=28)
plt.xlabel('Input EM', fontsize=24)
if n_pars == 3:
plt.savefig(path.join(outdir, 'emsolutions_wid={:.3f}'.format(wid).replace('.', '_')))
else:
plt.savefig(path.join(outdir, 'emsolutions_wid={:.3f}_wlen={}'.format(wid, emwlen).replace('.', '_')))
plt.close()
if n_pars == 3:
#print wid, wdata.xrange, wdata.yrange, wdata.scale
#print wid, diffw.xrange, diffw.yrange, diffw.scale
fig = plt.figure(figsize=(24, 12))
ax = fig.add_subplot(1, 3, 1)
wdata.plot(cmap='coolwarm', vmin=widths[0], vmax=widths[-1], aspect='auto')
#fig.gca().add_artist(rect)
plt.axvline(20.0, color='white')
plt.axvline(35.0, color='white')
plt.axhline(5.6, color='white')
plt.axhline(7.0, color='white')
plt.colorbar()
plt.title('Solution width', fontsize=28)
plt.ylabel('Input log(T)', fontsize=24)
plt.xlabel('Input EM', fontsize=24)#width', fontsize=24)
ax = fig.add_subplot(1, 3, 2)
#print 'diffw', diffw.min(), diffw.max()
#print np.nanmin(diffw.data), np.nanmax(diffw.data)
diffw.plot(cmap='RdYlGn_r', vmin=diffw.min(), vmax=diffw.max(), aspect='auto')
#fig.gca().add_artist(rect)
plt.axvline(20.0, color='white')
plt.axvline(35.0, color='white')
plt.axhline(5.6, color='white')
plt.axhline(7.0, color='white')
plt.colorbar()
plt.title('Difference from input (%)', fontsize=28)
plt.xlabel('Input EM', fontsize=24)
ax = fig.add_subplot(1, 3, 3)
fitsmap.plot(cmap='cubehelix', aspect='auto')
#fig.gca().add_artist(rect)
plt.axvline(20.0, color='white')
plt.axvline(35.0, color='white')
plt.axhline(5.6, color='white')
plt.axhline(7.0, color='white')
plt.colorbar()
plt.title('log(Goodness-of-fit)', fontsize=28)
plt.xlabel('Input EM', fontsize=24)
plt.savefig(path.join(outdir, 'widsolutions_wid={:.3f}'.format(wid).replace('.', '_')))
plt.close()
"""w = np.where((widths > 0.097)*(widths < 0.103))
dataslice = data[:, w].reshape(len(temps))
diffslice = diff[:, w].reshape(len(temps))
fitslice = fits[:, w].reshape(len(temps))
fig = plt.figure(figsize=(16, 12))
plt.plot(temps, dataslice)
plt.title('Solution log(T) at width=0.1', fontsize=28)
plt.xlabel('Input log(T)', fontsize=24)
plt.ylabel('Solution log(T)', fontsize=24)
plt.savefig('/home/drew/Dropbox/euroscipy/dataslice')
plt.close()
fig = plt.figure(figsize=(16, 12))
plt.plot(temps, diffslice)
plt.title('Difference from input at width=0.1', fontsize=28)
plt.xlabel('Input log(T)', fontsize=24)
plt.ylabel('Difference (%)', fontsize=24)
plt.savefig('/home/drew/Dropbox/euroscipy/diffslice')
plt.close()
fig = plt.figure(figsize=(16, 12))
ax = fig.add_subplot(1, 1, 1)
plt.plot(temps, np.log10(fitslice))
plt.title('Goodness-of-fit at width=0.1', fontsize=28)
plt.xlabel('Input log(T)', fontsize=24)
plt.ylabel('log(Goodness-of-fit)', fontsize=24)
plt.savefig('/home/drew/Dropbox/euroscipy/fitslice')
plt.close()"""
# Run synthetic data throguh 3param tempmap method
# Somehow display the results.
| 41.057065 | 110 | 0.613608 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,912 | 0.39129 |
145acd330fc58583c49d623ffb6e709da478a671 | 315 | py | Python | tests/test_set_up_logger.py | dennlinger/hypergraph-document-store | 72b90119b163b92254c73442bee52cde55e58517 | [
"MIT"
] | null | null | null | tests/test_set_up_logger.py | dennlinger/hypergraph-document-store | 72b90119b163b92254c73442bee52cde55e58517 | [
"MIT"
] | 1 | 2019-12-12T09:20:00.000Z | 2019-12-12T09:20:00.000Z | tests/test_set_up_logger.py | dennlinger/hypergraph-document-store | 72b90119b163b92254c73442bee52cde55e58517 | [
"MIT"
] | 1 | 2021-07-22T14:16:47.000Z | 2021-07-22T14:16:47.000Z | from unittest import TestCase
import os
class TestSet_up_logger(TestCase):
def test_set_up_logger(self):
from utils import set_up_logger
from logging import Logger
logger = set_up_logger("test", "test.log")
self.assertIsInstance(logger, Logger)
os.remove("test.log")
| 22.5 | 50 | 0.688889 | 272 | 0.863492 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.08254 |
145d2ba73cbb15562b18fa340ca1117a5d2135a1 | 2,459 | py | Python | rubicon_reminders_cli.py | ZG34/StratNotes | ec8e0f50740a558d14b5f790aa67b26dc3a85d90 | [
"MIT"
] | 6 | 2022-01-21T07:16:12.000Z | 2022-01-23T22:01:16.000Z | rubicon_reminders_cli.py | ZG34/StratNotes | ec8e0f50740a558d14b5f790aa67b26dc3a85d90 | [
"MIT"
] | null | null | null | rubicon_reminders_cli.py | ZG34/StratNotes | ec8e0f50740a558d14b5f790aa67b26dc3a85d90 | [
"MIT"
] | 4 | 2022-01-21T07:17:03.000Z | 2022-01-23T03:37:12.000Z | # the comments in this file were made while learning, as reminders
# to RUN APP IN CMD PROMPT: cd to this directory, or place in default CMD directory:
# then run 'python rubicon_reminders_cli.py'
from os import listdir
from datetime import datetime
# this assigns dt variable as date + timestamp
dt = (datetime.now())
# TODO numerate note items per entry
# open existing or create a new file prompt:
p1 = input("(V)iew or (N)ew [V/N]: ").upper()
if p1 == "V":
# this views file directory of existing notes if first input is (view)
for file in listdir():
if file.endswith(".txt"):
print(file)
# below opens existing file, allows multiple note lines, and dates it when finished with session.
old_file = (input("which file would you like to open: "))
hdl = open(old_file + ".txt", "r+") # using r+ by default places text at beginning, overwriting.
for line in hdl: # as long as you first READ the file, then r+ becomes APPEND TO END.
print(line.strip())
of_note = input("Add Note: ")
if of_note == "done": # FIXME add accept on any 'done' check for upper and lowercase
# specifies notes were reviewed if first note entry is "done"
hdl.write(" REVIEWED: ")
hdl.write(str(dt))
# if first entered note is not 'done', continue asking for more notes until entry == 'done'
else:
hdl.write('\n')
hdl.write(of_note)
hdl.write('\n')
while of_note != "done":
of_note = input("Add more notes: ")
while of_note != "done":
hdl.write(of_note)
hdl.write('\n')
else:
hdl.write("SESSION END: ")
hdl.write(str(dt))
hdl.write('\n')
hdl.close()
# below is the block for generating and noting in a new file, if line 15 == 'N'
elif p1 == "N":
new_file = input("new file name: ")
hdl = open(new_file, "a")
nf_note = input("Add Note: ")
if nf_note == "done":
print("finished")
else:
hdl.write(nf_note)
hdl.write('\n')
while nf_note != "done":
nf_note = input("Add more notes: ")
while nf_note != "done":
hdl.write(nf_note)
hdl.write('\n')
break
else:
hdl.write("SESSION END: ")
hdl.write(str(dt))
hdl.write('\n')
hdl.close()
else:
print("Error: please enter V or N")
| 34.633803 | 101 | 0.581537 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,217 | 0.494917 |
145d3ea9060b7295f0b7e5b21153fbe39e93cf1f | 12,213 | py | Python | tradingdb/relationaldb/tests/utils.py | gnosis/gnosisdb | b3055406fba7061c3677bfd16e19f8bc5c97db2c | [
"MIT"
] | 11 | 2017-06-23T15:35:10.000Z | 2018-04-27T06:11:25.000Z | tradingdb/relationaldb/tests/utils.py | gnosis/gnosisdb | b3055406fba7061c3677bfd16e19f8bc5c97db2c | [
"MIT"
] | 42 | 2018-01-17T15:46:33.000Z | 2018-05-08T08:13:17.000Z | tradingdb/relationaldb/tests/utils.py | gnosis/gnosisdb | b3055406fba7061c3677bfd16e19f8bc5c97db2c | [
"MIT"
] | 12 | 2017-07-03T15:51:41.000Z | 2018-03-25T17:31:54.000Z | tournament_token_bytecode = "0x608060405233600360006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550611776806100546000396000f3006080604052600436106100fc576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806302d05d3f1461010157806306fdde0314610158578063095ea7b3146101e8578063136ef18a1461024d57806318160ddd146102b357806323b872dd146102de578063313ce567146103635780633d0950a81461039457806342958b54146103fa578063429b62e51461046a5780636105c94b146104c557806370a08231146104f457806395d89b411461054b5780639b19251a146105db578063a9059cbb14610636578063ae876f611461069b578063bbc8e3cb14610701578063dd62ed3e14610767575b600080fd5b34801561010d57600080fd5b506101166107de565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561016457600080fd5b5061016d610804565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156101ad578082015181840152602081019050610192565b50505050905090810190601f1680156101da5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b3480156101f457600080fd5b50610233600480360381019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291908035906020019092919050505061083d565b604051808215151515815260200191505060405180910390f35b34801561025957600080fd5b506102b16004803603810190808035906020019082018035906020019080806020026020016040519081016040528093929190818152602001838360200280828437820191505050505050919291929050505061092f565b005b3480156102bf57600080fd5b506102c8610a76565b6040518082815260200191505060405180910390f35b3480156102ea57600080fd5b50610349600480360381019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803573ffffffffffffffffffffffffffffffffffffffff16906020019092919080359060200190929190505050610a80565b604051808215151515815260200191505060405180910390f35b34801561036f57600080fd5b50610378610b42565b604051808260ff1660ff16815260200191505060405180910390f35b3480156103a057600080fd5b506103f860048036038101908080359060200190820180359060200190808060200260200160405190810160405280939291908181526020018383602002808284378201915050505050509192919290505050610b47565b005b34801561040657600080fd5b506104686004803603810190808035906020019082018035906020019080806020026020016040519081016040528093929190818152602001838360200280828437820191505050505050919291929080359060200190929190505050610c33565b005b34801561047657600080fd5b506104ab600480360381019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610ddc565b604051808215151515815260200191505060405180910390f35b3480156104d157600080fd5b506104da610dfc565b604051808215151515815260200191505060405180910390f35b34801561050057600080fd5b50610535600480360381019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610e01565b6040518082815260200191505060405180910390f35b34801561055757600080fd5b50610560610e49565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156105a0578082015181840152602081019050610585565b50505050905090810190601f1680156105cd5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b3480156105e757600080fd5b5061061c600480360381019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610e82565b604051808215151515815260200191505060405180910390f35b34801561064257600080fd5b50610681600480360381019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919080359060200190929190505050610ea2565b604051808215151515815260200191505060405180910390f35b3480156106a757600080fd5b506106ff60048036038101908080359060200190820180359060200190808060200260200160405190810160405280939291908181526020018383602002808284378201915050505050509192919290505050610f62565b005b34801561070d57600080fd5b506107656004803603810190808035906020019082018035906020019080806020026020016040519081016040528093929190818152602001838360200280828437820191505050505050919291929050505061104e565b005b34801561077357600080fd5b506107c8600480360381019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050611195565b6040518082815260200191505060405180910390f35b600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6040805190810160405280600d81526020017f4f6c796d70696120546f6b656e0000000000000000000000000000000000000081525081565b600081600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055508273ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925846040518082815260200191505060405180910390a36001905092915050565b6000600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806109dd575060011515600560003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060009054906101000a900460ff161515145b15156109e857600080fd5b600090505b8151811015610a72576001600460008484815181101515610a0a57fe5b9060200190602002015173ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060006101000a81548160ff02191690831515021790555080806001019150506109ed565b5050565b6000600254905090565b6000600460008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060009054906101000a900460ff1680610b235750600460008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060009054906101000a900460ff165b1515610b2e57600080fd5b610b3984848461121c565b90509392505050565b601281565b6000600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141515610ba557600080fd5b600090505b8151811015610c2f576001600560008484815181101515610bc757fe5b9060200190602002015173ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060006101000a81548160ff0219169083151502179055508080600101915050610baa565b5050565b600080600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141515610c9257600080fd5b600091505b8351821015610da8578382815181101515610cae57fe5b906020019060200201519050610d0b836000808473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205461150190919063ffffffff16565b6000808373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055508073ffffffffffffffffffffffffffffffffffffffff167f9cb9c14f7bc76e3a89b796b091850526236115352a198b1e472f00e91376bbcb846040518082815260200191505060405180910390a28180600101925050610c97565b610dd0610dbf85518561152390919063ffffffff16565b60025461150190919063ffffffff16565b60028190555050505050565b60056020528060005260406000206000915054906101000a900460ff1681565b600181565b60008060008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020549050919050565b6040805190810160405280600381526020017f4f4c59000000000000000000000000000000000000000000000000000000000081525081565b60046020528060005260406000206000915054906101000a900460ff1681565b6000600460003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060009054906101000a900460ff1680610f455750600460008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060009054906101000a900460ff165b1515610f5057600080fd5b610f5a8383611545565b905092915050565b6000600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141515610fc057600080fd5b600090505b815181101561104a576000600560008484815181101515610fe257fe5b9060200190602002015173ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060006101000a81548160ff0219169083151502179055508080600101915050610fc5565b5050565b6000600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806110fc575060011515600560003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060009054906101000a900460ff161515145b151561110757600080fd5b600090505b815181101561119157600060046000848481518110151561112957fe5b9060200190602002015173ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060006101000a81548160ff021916908315150217905550808060010191505061110c565b5050565b6000600160008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054905092915050565b600061126f826000808773ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205461170890919063ffffffff16565b1580611307575061130582600160008773ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205461170890919063ffffffff16565b155b80611360575061135e826000808673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205461171690919063ffffffff16565b155b1561136e57600090506114fa565b816000808673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000828254039250508190555081600160008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008282540392505081905550816000808573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825401925050819055508273ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef846040518082815260200191505060405180910390a3600190505b9392505050565b600061150d8383611716565b151561151857600080fd5b818301905092915050565b600061152f8383611726565b151561153a57600080fd5b818302905092915050565b6000611598826000803373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205461170890919063ffffffff16565b15806115f257506115f0826000808673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205461171690919063ffffffff16565b155b156116005760009050611702565b816000803373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008282540392505081905550816000808573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825401925050819055508273ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef846040518082815260200191505060405180910390a3600190505b92915050565b600081831015905092915050565b6000828284011015905092915050565b6000808214806117425750828283850281151561173f57fe5b04145b9050929150505600a165627a7a723058205e5821735c824c73df5e6600554ce48b350410c6c3602c9b74f92af5b46eefdf0029"
| 6,106.5 | 12,212 | 0.999509 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12,184 | 0.997625 |
145d4d73e5688c8c92783fdd2c96db07fb334f0c | 352 | py | Python | script/generate_user.py | MTDzi/data_nanodegree_project_5 | a436b34d4a1aa030357d1db82d1e1a7439061c1a | [
"Apache-2.0"
] | null | null | null | script/generate_user.py | MTDzi/data_nanodegree_project_5 | a436b34d4a1aa030357d1db82d1e1a7439061c1a | [
"Apache-2.0"
] | 6 | 2020-04-19T10:18:05.000Z | 2020-04-28T16:26:07.000Z | script/generate_user.py | MTDzi/data_nanodegree_project_5 | a436b34d4a1aa030357d1db82d1e1a7439061c1a | [
"Apache-2.0"
] | null | null | null | import os
from airflow import models, settings
from airflow.contrib.auth.backends.password_auth import PasswordUser
user = PasswordUser(models.User())
user.username = os.environ['AIRFLOW_UI_USER']
user.password = os.environ['AIRFLOW_UI_PASSWORD']
user.superuser = True
session = settings.Session()
session.add(user)
session.commit()
session.close()
| 23.466667 | 68 | 0.795455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 38 | 0.107955 |
145e6aad4169f10659d6af4a8eac4c1ff2e1a29b | 909 | py | Python | generic_functions.py | MrHellYea/Art-Gallery-SQL | 6e56ee18eb5dc0d2f4efa2fbaf01d887ed808dbf | [
"MIT"
] | null | null | null | generic_functions.py | MrHellYea/Art-Gallery-SQL | 6e56ee18eb5dc0d2f4efa2fbaf01d887ed808dbf | [
"MIT"
] | null | null | null | generic_functions.py | MrHellYea/Art-Gallery-SQL | 6e56ee18eb5dc0d2f4efa2fbaf01d887ed808dbf | [
"MIT"
] | null | null | null | def get_remain(cpf: str, start: int, upto: int) -> int:
total = 0
for count, num in enumerate(cpf[:upto]):
try:
total += int(num) * (start - count)
except ValueError:
return None
remain = (total * 10) % 11
remain = remain if remain != 10 else 0
return remain
def padronize_date(date: str) -> str:
day, month, year = map(lambda x: x.lstrip("0"), date.split("/"))
day = day if len(day) == 2 else f"0{day}"
month = month if len(month) == 2 else f"0{month}"
year = year if len(year) == 4 else f"{'0'*(4-len(year))}{year}"
return f"{day}/{month}/{year}"
def padronize_time(time: str) -> str:
hour, minute = map(lambda x: x.lstrip("0"), time.split(":"))
hour = hour if len(hour) == 2 else f"0{hour}"
minute = minute if len(minute) == 2 else f"0{minute}"
return f"{hour}:{minute}"
| 29.322581 | 69 | 0.547855 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 123 | 0.135314 |
145ef1cdc80727bde5f62588059dc90f98d8c984 | 535 | py | Python | src/generic.py | jfecroft/Hamilton | 35ade5fc07a330e6a05e1ea391a16b8874737eb5 | [
"MIT"
] | null | null | null | src/generic.py | jfecroft/Hamilton | 35ade5fc07a330e6a05e1ea391a16b8874737eb5 | [
"MIT"
] | null | null | null | src/generic.py | jfecroft/Hamilton | 35ade5fc07a330e6a05e1ea391a16b8874737eb5 | [
"MIT"
] | null | null | null | """
generic functions
"""
from yaml import load
def reduce_output(func, item, *args, **kwargs):
"""
simple function to reduce output from existing functions
if func returns an iterable - just return item
"""
def inner_func(*args, **kwargs):
return func(*args, **kwargs)[item]
return inner_func
def load_yaml(filen):
"""
load a yaml file and return the json object
"""
with open('{}.yml'.format(filen), 'r') as open_file:
return_dict = load(open_file)
return return_dict
| 21.4 | 60 | 0.642991 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 219 | 0.409346 |
145f50f90a8086649d041c871071f93bec34f696 | 334 | py | Python | app/app.py | raevilman/aws_lambda_python_skeleton | aebd3a4f80bc1149662c8a0b1af5df40d28eff01 | [
"MIT"
] | 3 | 2020-10-19T11:31:10.000Z | 2021-02-19T12:14:26.000Z | app/app.py | raevilman/aws_lambda_python_skeleton | aebd3a4f80bc1149662c8a0b1af5df40d28eff01 | [
"MIT"
] | null | null | null | app/app.py | raevilman/aws_lambda_python_skeleton | aebd3a4f80bc1149662c8a0b1af5df40d28eff01 | [
"MIT"
] | 1 | 2021-01-21T10:29:10.000Z | 2021-01-21T10:29:10.000Z | import json
from app.event import Event
from app.responder import send_ok_response
from app.utils import get_logger
# Setup logging
logger = get_logger("app")
def run(event: Event):
logger.info(event.http_path())
logger.info(event.http_method())
return send_ok_response(json.dumps({
'message': 'Hello'
}))
| 19.647059 | 42 | 0.715569 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 36 | 0.107784 |
146025215903f4a31f6eb67eb9638cd290edeaac | 592 | py | Python | blender/arm/logicnode/transform/LN_set_object_location.py | Lykdraft/armory | da1cf33930ce9a8b1865d35c128fe4842bef2933 | [
"Zlib"
] | null | null | null | blender/arm/logicnode/transform/LN_set_object_location.py | Lykdraft/armory | da1cf33930ce9a8b1865d35c128fe4842bef2933 | [
"Zlib"
] | null | null | null | blender/arm/logicnode/transform/LN_set_object_location.py | Lykdraft/armory | da1cf33930ce9a8b1865d35c128fe4842bef2933 | [
"Zlib"
] | null | null | null | from arm.logicnode.arm_nodes import *
class SetLocationNode(ArmLogicTreeNode):
"""Use to set the location of an object."""
bl_idname = 'LNSetLocationNode'
bl_label = 'Set Object Location'
arm_version = 1
def init(self, context):
super(SetLocationNode, self).init(context)
self.add_input('ArmNodeSocketAction', 'In')
self.add_input('ArmNodeSocketObject', 'Object')
self.add_input('NodeSocketVector', 'Location')
self.add_output('ArmNodeSocketAction', 'Out')
add_node(SetLocationNode, category=PKG_AS_CATEGORY, section='location')
| 34.823529 | 71 | 0.706081 | 479 | 0.809122 | 0 | 0 | 0 | 0 | 0 | 0 | 201 | 0.339527 |
14602718b6991bccc17ff0fb8c058bc954f6e408 | 32,756 | py | Python | lichess/client.py | qe/lichess | 4d6aaeeeae57df863663565b652b7d3533a538ac | [
"MIT"
] | null | null | null | lichess/client.py | qe/lichess | 4d6aaeeeae57df863663565b652b7d3533a538ac | [
"MIT"
] | null | null | null | lichess/client.py | qe/lichess | 4d6aaeeeae57df863663565b652b7d3533a538ac | [
"MIT"
] | null | null | null | from .enums import *
from .utils import *
from .exceptions import *
import logging
import requests
import urllib
logger = logging.getLogger(__name__)
VALID_PERF_TYPES = [_.value for _ in PerfType]
class Client:
def __init__(self, token=None):
self.url = "https://lichess.org/"
self.s = requests.Session()
if token:
self.token = token
# post_data
def request(self, path, payload=None, oauth=False, **kwargs):
parsed_url = urllib.parse.urljoin(self.url, path)
try:
if oauth:
# print("OAUTH status:", oauth)
try:
response = self.s.get(parsed_url, headers={"Authorization": f"Bearer {self.token}"}, params=payload)
except AttributeError:
raise APIKeyError("Missing API key. Generate one at: https://lichess.org/account/oauth/token")
# print("hitting this URL:", response.url)
else:
response = self.s.get(parsed_url, params=payload)
# print("hitting this URL:", response.url)
except requests.exceptions.RequestException as err:
logger.error(err)
raise
# print(response.content)
# print(response.text)
# print("response.status_code", response.status_code)
if response.status_code == 200:
if kwargs.get("parse"):
return str(response.text)
elif kwargs.get("ndjson"):
return ndjson(response)
# print(type(response.json()))
return response.json()
elif response.status_code == 401:
raise APIKeyError(
"Invalid or expired API key. Generate a new one at: https://lichess.org/account/oauth/token")
else:
raise ResponseError(response.status_code)
# -- Account --------------------------------------------------------------
def get_profile(self):
"""Get your public profile information
:return: A dictionary with your public information
:rtype: dict
"""
endpoint = "api/account"
return self.request(path=endpoint, oauth=True)
def get_email(self):
"""Get your email address
:return: A dictionary with your email address
:rtype: dict
"""
endpoint = "api/account/email"
return self.request(path=endpoint, oauth=True)
def get_preferences(self):
"""Get your preferences
:return: A dictionary with your preferences
:rtype: dict
"""
endpoint = "api/account/preferences"
return self.request(path=endpoint, oauth=True)
def get_kid_mode(self):
"""Get your kid mode status
:return: A dictionary with your kid mode status
:rtype: dict
"""
endpoint = "api/account/kid"
return self.request(path=endpoint, oauth=True)
# """
# POST
# """
# def set_kid_mode(self):
# """Set your kid mode status
#
# :return:
# :rtype:
# """
# endpoint = "api/account/kid"
# # POST
# pass
# -- Users ----------------------------------------------------------------
def get_status(self, users, with_game_ids=False):
"""Get real-time status of one or more users
:param list[str] users: Users to query their real-time status
:param Optional[bool] with_game_ids: Flag to include the ID of games being played, if any, for each player
:return: A list with dictionaries containing the real-time status of one or more users
:rtype: list
"""
invalid_inputs = [usr for usr in users if not valid_input(usr)]
if invalid_inputs:
logger.warning("One or more usernames are invalid.")
endpoint = "api/users/status"
payload = {
"ids": ','.join(users),
"withGameIds": with_game_ids,
}
return self.request(path=endpoint, payload=payload)
# """
# Create function in utils.py to manually parse these two responses (JSON problems)
# """
# def get_top_ten(self):
# """Get the top 10 players for each speed and variant
#
# :return:
# """
# endpoint = "player"
# return self.request(path=endpoint)
#
# def get_leaderboard(self, perf_type, num_users):
# """Get leaderboard of an individual speed or variant
#
# :return:
# """
# if perf_type not in VALID_PERF_TYPES:
# raise ArgumentValueError("Value of perf_type is invalid.")
# logger.warning("")
# if (num_users <= 0) or (200 < num_users):
# raise ArgumentValueError("Value of num_users is invalid. Valid range includes any integer from 1 to 200")
# logger.warning("")
#
# endpoint = "player/top/{nb}/{perfType}"
# path = endpoint.format(nb=num_users, perfType=perf_type)
# return self.request(path=path)
def get_data(self, user):
"""Get public data of an individual user
:param str user: User to query their public data
:return: A dictionary with the public data of the user
:rtype: dict
"""
if not valid_input(user):
logger.warning("Value of user is invalid.")
endpoint = "api/user/{username}"
path = endpoint.format(username=user)
return self.request(path=path)
def get_rating_history(self, user):
"""Get rating history of an individual user
:param str user: User to query their public data
:return: A list with dictionaries containing the rating history of the user
:rtype: list
"""
if not valid_input(user):
logger.warning("Value of user is invalid.")
endpoint = "api/user/{username}/rating-history"
path = endpoint.format(username=user)
return self.request(path=path)
"""
Possibly add Enum for PerfType (also related to the get_leaderboard() method)
"""
def get_stats(self, user, perf_type):
"""Get performance statistics of an individual user
:param str user: User to query their performance statistics
:param str perf_type: Type of speed or variant to query
:return: A dictionary with the performance statistics of the user
:rtype: dict
"""
if not valid_input(user):
logger.warning("Value of user is invalid.")
if perf_type not in VALID_PERF_TYPES:
logger.warning("Value of perf_type is invalid.")
endpoint = "api/user/{username}/perf/{perf}"
path = endpoint.format(username=user, perf=perf_type)
return self.request(path=path)
def get_activity(self, user):
"""Get the activity feed of an individual user
:param str user: User to query their activity feed
:return: A list with dictionaries containing the activity feed of the user
:rtype: list
"""
if not valid_input(user):
logger.warning("Value of user is invalid.")
endpoint = "api/user/{username}/activity"
path = endpoint.format(username=user)
return self.request(path=path)
# """
# POST
# """
# def get_by_id(self):
# """Get users by ID
#
# :return:
# :rtype:
# """
# endpoint = "api/users"
# pass
def get_live_streamers(self):
"""Get the current live streamers
:return: A list with dictionaries containing the current live streamers
:rtype: list
"""
endpoint = "streamer/live"
return self.request(path=endpoint)
def get_crosstable(self, user1, user2, matchup=False):
"""Get the crosstable of two users
:param str user1: First user to compare with second user
:param str user2: Second user to compare with first user
:param Optional[bool] matchup: Flag to get current match data, if the two users are currently playing
:return: A dictionary with the crosstable (total number of games and current score of the two users)
:rtype: dict
"""
endpoint = "api/crosstable/{user1}/{user2}"
path = endpoint.format(user1=user1, user2=user2)
if matchup:
payload = {"matchup": True,}
return self.request(path=path, payload=payload)
else:
return self.request(path=path)
# -- Relations ------------------------------------------------------------
def following(self):
"""Get users who you are following
:return: A list with dictionaries containing the information of users you are following
:rtype: list
"""
endpoint = "api/rel/following"
return self.request(path=endpoint, oauth=True, ndjson=True)
# """
# POST
# """
# def follow(self, player):
# """Follow a player
#
# :param str player:
# :return:
# :rtype:
# """
# pass
# """
# POST
# """
# def unfollow(self, player):
# """Unfollow a player
#
# :param str player:
# :return:
# :rtype:
# """
# pass
# -- Games ----------------------------------------------------------------
def export_by_id(self, game_id, moves=True, pgn_in_json=False, tags=True, clocks=True, evals=True, opening=True, literate=False, players=None):
"""Export an individual game
:param str game_id: ID of game to export
:param Optional[bool] moves: Whether to include the PGN moves
:param Optional[bool] pgn_in_json: Whether to include the full PGN within the JSON response
:param Optional[bool] tags: Whether to include the PGN tags
:param Optional[bool] clocks: Whether to include clock comments, whenever available, in the PGN moves
:param Optional[bool] evals: Whether to include analysis evaluation comments, whenever available, in the PGN
:param Optional[bool] opening: Whether to include the opening name
:param Optional[bool] literate: Whether to include textual annotations in the PGN about the opening, analysis variations, mistakes, and game termination
:param Optional[str] players: A URL of a text file containing real names and ratings to replace Lichess usernames and ratings in the PGN
:return: A string with PGN data of an individual game
:rtype: str
"""
endpoint = "game/export/{gameId}"
path = endpoint.format(gameId=game_id)
payload = {
"moves": moves,
"pgnInJson": pgn_in_json,
"tags": tags,
"clocks": clocks,
"evals": evals,
"opening": opening,
"literate": literate,
"players": players,
}
return self.request(path=path, payload=payload, parse=True, game_id=game_id)
def export_ongoing_by_user(self, user, moves=True, pgn_in_json=False, tags=True, clocks=True, evals=True, opening=True, literate=False, players=None):
"""Export the ongoing game of a user
:param str user: User whose ongoing game you want to export
:param Optional[bool] moves: Whether to include the PGN moves
:param Optional[bool] pgn_in_json: Whether to include the full PGN within the JSON response
:param Optional[bool] tags: Whether to include the PGN tags
:param Optional[bool] clocks: Whether to include clock comments, whenever available, in the PGN moves
:param Optional[bool] evals: Whether to include analysis evaluation comments, whenever available, in the PGN
:param Optional[bool] opening: Whether to include the opening name
:param Optional[bool] literate: Whether to include textual annotations in the PGN about the opening, analysis variations, mistakes, and game termination
:param Optional[str] players: A URL of a text file containing real names and ratings to replace Lichess usernames and ratings in the PGN
:return: A string with PGN data of the user's ongoing game
:rtype: str
"""
endpoint = "api/user/{username}/current-game"
path = endpoint.format(username=user)
payload = {
"moves": moves,
"pgnInJson": pgn_in_json,
"tags": tags,
"clocks": clocks,
"evals": evals,
"opening": opening,
"literate": literate,
"players": players,
}
return self.request(path=path, payload=payload, parse=True)
def export_by_user(self, user, since=None, until=None, max_games=None, vs=None, rated=None, perf_type=None, color=None, analyzed=None, moves=True, pgn_in_json=False, tags=True, clocks=True, evals=True, opening=True, ongoing=False, finished=True, players=None, sort="dateDesc"):
"""Export all the games of a user
:param str user: User whose games you want to export
:param Optional[int] since: Filters for games played since this timestamp (default is account creation date)
:param Optional[int] until: Filters for games played until this timestamp (default is now)
:param Optional[int] max_games: How many games to download (default downloads all games)
:param Optional[str] vs: Filter for games that are only played against this specific opponent
:param Optional[bool] rated: Filter for games that are only rated (True) or only casual (False)
:param Optional[str] perf_type: Filter for games that have a specific speed or variant
:param Optional[str] color: Filters for games only played as a specific color ("white" or "black")
:param Optional[bool] analyzed: Whether to filter for games that have a computer analysis available
:param Optional[bool] moves: Whether to include the PGN moves
:param Optional[bool] pgn_in_json: Whether to include the full PGN within the JSON response
:param Optional[bool] tags: Whether to include the PGN tags
:param Optional[bool] clocks: Whether to include clock comments, whenever available, in the PGN moves
:param Optional[bool] evals: Whether to include analysis evaluation comments, whenever available, in the PGN
:param Optional[bool] opening: Whether to include the opening name
:param Optional[bool] ongoing: Whether to include ongoing games (last 3 moves will be omitted)
:param Optional[bool] finished: Whether to only include finished games (False to only get ongoing games)
:param Optional[str] players: A URL of a text file containing real names and ratings to replace Lichess usernames and ratings in the PGN
:param Optional[str] sort: Sort order of the games ("dateAsc" or "dateDesc")
:return: A string with PGN data of all the user's games
:rtype: str
"""
endpoint = "api/games/user/{username}"
path = endpoint.format(username=user)
payload = {
"since": since,
"until": until,
"max": max_games,
"vs": vs,
"rated": rated,
"perfType": perf_type,
"color": color,
"analysed": analyzed,
"moves": moves,
"pgnInJson": pgn_in_json,
"tags": tags,
"clocks": clocks,
"evals": evals,
"opening": opening,
"ongoing": ongoing,
"finished": finished,
"players": players,
"sort": sort,
}
return self.request(path=path, payload=payload, parse=True)
# """
# ndjson
# POST
# """
# def export_by_ids(self):
# """
# :return:
# :rtype:
# """
# endpoint = "api/games/export/_ids"
# pass
# """
# ndjson
# POST
# """
# def stream_among_users(self):
# """Stream the games played between users
# :return:
# :rtype:
# """
# endpoint = "api/stream/games-by-users"
# pass
def get_ongoing(self, max_games=9):
"""Get your ongoing games (realtime and correspondence)
:param int max_games: Max number of games to fetch
:return: A dictionary with your ongoing games
:rtype: dict
"""
endpoint = "api/account/playing"
return self.request(path=endpoint, oauth=True)
# """
# ndjson
# """
# def stream_moves(self, game_id):
# """Stream the moves/positions of any ongoing game
# :param str game_id: ID of game to stream
# :return:
# :rtype:
# """
# endpoint = "api/stream/game/{id}"
# path = endpoint.format(id=game_id)
# return self.request(path=path, ndjson=True)
# """
# POST
# """
# def import_by_pgn(self, pgn):
# """Upload a PGN game
# :param pgn:
# :return:
# :rtype:
# """
# endpoint = "api/import"
# pass
# -- TV -------------------------------------------------------------------
def get_games_channels(self):
"""Get the best games currently being played for each speed/variant, including computer games and bot games
:return: A dictionary with info on the current TV games
:rtype: dict
"""
endpoint = "api/tv/channels"
return self.request(path=endpoint)
# """
# ndjson
# """
# def stream_tv_game(self):
# """Stream positions and moves of the current TV game
# :return:
# :rtype:
# """
# endpoint = "api/tv/feed"
# return self.request(path=endpoint, ndjson=True)
def get_games_channel(self, channel, num_games=10, moves=True, pgn_in_json=False, tags=True, clocks=True, opening=True):
"""Get the best games currently being played for a specific speed/variant, including computer games and bot games
:param str channel: Name of the channel in camelCase
:param Optional[bool] num_games: Number of games to fetch
:param Optional[bool] moves: Whether to include the PGN moves
:param Optional[bool] pgn_in_json: Whether to include the full PGN within the JSON response
:param Optional[bool] tags: Whether to include the PGN tags
:param Optional[bool] clocks: Whether to include clock comments, whenever available, in the PGN moves
:param Optional[bool] opening: Whether to include the opening name
:return: A string with the PGN data of the best games being played for a specific speed/variant
:rtype: str
"""
endpoint = "api/tv/{channel}"
path = endpoint.format(channel=channel)
payload = {
"nb": num_games,
"moves": moves,
"pgnInJson": pgn_in_json,
"tags": tags,
"clocks": clocks,
"opening": opening,
}
return self.request(path=path, payload=payload, parse=True)
# -- Puzzles --------------------------------------------------------------
def get_daily_puzzle(self):
"""Get the daily puzzle
:return: A dictionary with the daily puzzle
:rtype: dict
"""
endpoint = "api/puzzle/daily"
return self.request(path=endpoint)
def get_puzzle_activity(self, max_entries=None):
"""Get your puzzle activity
:param Optional[int] max_entries: Number of entries to download (leave empty to download all activity)
:return: A list with dictionaries containing all your puzzle activity
:rtype: list
"""
endpoint = "api/puzzle/activity"
payload = {"max": max_entries,}
return self.request(path=endpoint, payload=payload, oauth=True, ndjson=True)
def get_puzzle_dashboard(self, days):
"""Get your puzzle dashboard
:param int days: Number of days to look back when aggregating puzzle results
:return: A dictionary with your puzzle dashboard data
:rtype: dict
"""
endpoint = "api/puzzle/dashboard/{days}"
path = endpoint.format(days=days)
return self.request(path=path, oauth=True)
def get_storm_dashboard(self, user, days=30):
"""Get the storm dashboard of a player
:param str user: User to query their storm dashboard data
:param Optional[int] days: Number of days of history to return (set to zero for only highscores)
:return: A dictionary with the storm dashboard data of the user
:rtype: dict
"""
endpoint = "api/storm/dashboard/{username}"
path = endpoint.format(username=user)
payload = {"days": days, }
return self.request(path=path, payload=payload)
# -- Teams ----------------------------------------------------------------
def get_team_swiss(self, team_id, max_tournaments=100):
"""Get all swiss tournaments of a team
:param str team_id: ID of team whose info to query
:param Optional[int] max_tournaments: Maximum tournaments to query
:return: A list with dictionaries containing all the swiss tournaments of a team
:rtype: list
"""
endpoint = "api/team/{teamId}/swiss"
path = endpoint.format(teamId=team_id)
payload = {"max": max_tournaments, }
return self.request(path=path, payload=payload, ndjson=True)
def get_team_info(self, team_id):
"""Get info about a team
:param str team_id: ID of team whose info to query
:return: A dictionary with the team's info
:rtype: dict
"""
endpoint = "api/team/{teamId}"
path = endpoint.format(teamId=team_id)
return self.request(path=path)
def get_popular_teams(self, page=1):
"""Get popular teams
:param Optional[int] page: Page of most popular teams to query
:return: A dictionary with the popular teams
:rtype: dict
"""
endpoint = "api/team/all"
payload = {"page": page, }
return self.request(path=endpoint, payload=payload)
def get_teams_player(self, user):
"""Get all the teams a player is a member of
:param str user: User to query their team memberships
:return: A list with dictionaries containing the teams a player is a member of
:rtype: list
"""
endpoint = "api/team/of/{username}"
path = endpoint.format(username=user)
return self.request(path=path)
def search_teams(self, text, page=1):
"""Get search results for keyword in team search
:param str text: Keyword to use in team search
:param Optional[int] page: Page of team search to query
:return: A dictionary with the team search results
:rtype: dict
"""
endpoint = "api/team/search"
payload = {
"text": text,
"page": page,
}
return self.request(path=endpoint, payload=payload)
def get_team_members(self, team_id):
"""Get members of a team
:param str team_id: ID of team whose members to query
:return: A list with dictionaries containing the members of a team
:rtype: list
"""
endpoint = "api/team/{teamId}/users"
path = endpoint.format(teamId=team_id)
return self.request(path=path, ndjson=True)
# """
# 400 Bad Request
# """
# def get_join_requests(self, team_id):
# """Get pending join requests of your team
# :param str team_id:
# :return:
# :rtype:
# """
# endpoint = "api/team/{teamId}/requests"
# path = endpoint.format(teamId=team_id)
# return self.request(path=path, oauth=True)
# -- Board ----------------------------------------------------------------
# -- Bot ------------------------------------------------------------------
# -- Challenges -----------------------------------------------------------
# -- Bulk pairings --------------------------------------------------------
# -- Arena tournaments ----------------------------------------------------
def get_arena_all(self):
"""Get recently finished, ongoing, and upcoming tournaments
:return: A dictionary with the recently finished, ongoing, and upcoming tournaments
:rtype: dict
"""
endpoint = "api/tournament"
return self.request(path=endpoint)
def get_arena_info(self, tournament_id, page=1):
"""Get info about an Arena tournament
:param str tournament_id: ID of Arena tournament to query
:param Optional[int] page:
:return: A dictionary with the info about the queried Arena tournament
:rtype: dict
"""
endpoint = "api/tournament/{id}"
path = endpoint.format(id=tournament_id)
payload = {"page": page, }
return self.request(path=path, payload=payload)
def export_arena_games(self, tournament_id):
"""Export games of an Arena tournament
:param str tournament_id: ID of Arena tournament to query
:return: A string with PGN data of the Arena tournament's games
:rtype: str
"""
endpoint = "api/tournament/{id}/games"
path = endpoint.format(id=tournament_id)
return self.request(path=path, parse=True)
def get_arena_results(self, tournament_id, max_players=None):
"""Get results of an Arena tournament
:param str tournament_id: ID of Arena tournament to query
:param Optional[int] max_players: Maximum number of players to fetch
:return: A list with dictionaries of Arena tournament players, with their score and performance, sorted by rank (best first)
:rtype: list
"""
endpoint = "api/tournament/{id}/results"
path = endpoint.format(id=tournament_id)
if max_players:
payload = {"nb": max_players, }
return self.request(path=path, payload=payload, ndjson=True)
else:
return self.request(path=path, ndjson=True)
def get_teambattle_info(self, tournament_id):
"""Get team standing of a team battle
:param str tournament_id: ID of arena tournament to query
:return: A dictionary with the info about the queried team battle
:rtype: dict
"""
endpoint = "api/tournament/{id}/teams"
path = endpoint.format(id=tournament_id)
return self.request(path=path)
def get_arena_createdby(self, user):
"""Get tournaments created by a user
:param str user: User to query their created tournaments
:return: A list with dictionaries of all the tournaments created by the user
:rtype: list
"""
endpoint = "api/user/{username}/tournament/created"
path = endpoint.format(username=user)
return self.request(path=path, ndjson=True)
# -- Swiss Tournaments ----------------------------------------------------
def get_swiss_info(self, tournament_id):
"""Get info about a Swiss tournament
:param str tournament_id: ID of Swiss tournament to query
:return: A dictionary with the info about the queried Swiss tournament
:rtype: dict
"""
endpoint = "api/swiss/{id}"
path = endpoint.format(id=tournament_id)
return self.request(path=path)
def export_swiss_info(self, tournament_id):
"""Export the TRF of a Swiss tournament
:param str tournament_id: ID of Swiss tournament to query
:return: A string with TRF data of the Swiss tournament
:rtype: str
"""
endpoint = "swiss/{id}.trf"
path = endpoint.format(id=tournament_id)
return self.request(path=path, parse=True)
def export_swiss_games(self, tournament_id):
"""Export games of a Swiss tournament
:param str tournament_id: ID of Swiss tournament to query
:return: A string with PGN data of the Swiss tournament's games
:rtype: str
"""
endpoint = "api/swiss/{id}/games"
path = endpoint.format(id=tournament_id)
return self.request(path=path, parse=True)
def get_swiss_results(self, tournament_id, max_players=None):
"""Get results of a Swiss tournament
:param str tournament_id: ID of Swiss tournament to query
:param Optional[int] max_players: Maximum number of players to fetch
:return: A list with dictionaries of Swiss tournament players, with their score and performance, sorted by rank (best first)
:rtype: list
"""
endpoint = "api/swiss/{id}/results"
path = endpoint.format(id=tournament_id)
if max_players:
payload = {"nb": max_players, }
return self.request(path=path, payload=payload, ndjson=True)
else:
return self.request(path=path, ndjson=True)
# -- Simuls ---------------------------------------------------------------
def get_simuls(self):
"""Get recently finished, ongoing, and upcoming simuls
:return: A dictionary with the recently finished, ongoing, and upcoming simuls
:rtype: dict
"""
endpoint = "api/simul"
return self.request(path=endpoint)
# -- Studies --------------------------------------------------------------
def export_chapter(self, study_id, chapter_id, clocks=True, comments=True, variations=True):
"""Export one study chapter
:param str study_id: The study ID (8 characters)
:param str chapter_id: The chapter ID (8 characters)
:param Optional[bool] clocks: When available, include clock comments in the PGN moves
:param Optional[bool] comments: When available, include analysis and annotator comments in the PGN moves
:param Optional[bool] variations: When available, include non-mainline moves
:return: A string with PGN data of one study chapter
:rtype: str
"""
endpoint = "study/{studyId}/{chapterId}.pgn"
path = endpoint.format(studyId=study_id, chapterId=chapter_id)
payload = {
"clocks": clocks,
"comments": comments,
"variations": variations,
}
return self.request(path=path, payload=payload, parse=True)
def export_chapters(self, study_id, clocks=True, comments=True, variations=True):
"""Export all the chapters of a study
:param str study_id: The study ID (8 characters)
:param Optional[bool] clocks: When available, include clock comments in the PGN moves
:param Optional[bool] comments: When available, include analysis and annotator comments in the PGN moves
:param Optional[bool] variations: When available, include non-mainline moves
:return: A string with PGN data of all the chapters of a study
:rtype: str
"""
endpoint = "api/study/{studyId}.pgn"
path = endpoint.format(studyId=study_id)
payload = {
"clocks": clocks,
"comments": comments,
"variations": variations,
}
return self.request(path=path, payload=payload, parse=True)
def export_studies(self, user, clocks=True, comments=True, variations=True):
"""Export all the studies of a user
:param str user: The user whose studies to export
:param Optional[bool] clocks: When available, include clock comments in the PGN moves
:param Optional[bool] comments: When available, include analysis and annotator comments in the PGN moves
:param Optional[bool] variations: When available, include non-mainline moves
:return: A string with PGN data of all the studies of a user
:rtype: str
"""
endpoint = "study/by/{username}/export.pgn"
path = endpoint.format(username=user)
payload = {
"clocks": clocks,
"comments": comments,
"variations": variations,
}
return self.request(path=path, payload=payload, oauth=True, parse=True)
# -- Messaging ------------------------------------------------------------
# -- Broadcasts -----------------------------------------------------------
# -- Analysis -------------------------------------------------------------
# -- Opening Explorer -----------------------------------------------------
# -- Tablebase ------------------------------------------------------------
# -- OAuth ----------------------------------------------------------------
| 38.177156 | 281 | 0.590396 | 32,553 | 0.993803 | 0 | 0 | 0 | 0 | 0 | 0 | 20,749 | 0.633441 |
1461a4d69bcc5d469dfc60c076132f4feb9b241d | 2,273 | py | Python | src/mcedit2/widgets/infopanel.py | elcarrion06/mcedit2 | 4bb98da521447b6cf43d923cea9f00acf2f427e9 | [
"BSD-3-Clause"
] | 673 | 2015-01-02T02:08:13.000Z | 2022-03-24T19:38:14.000Z | src/mcedit2/widgets/infopanel.py | ozzhates/mcedit2 | 4bb98da521447b6cf43d923cea9f00acf2f427e9 | [
"BSD-3-Clause"
] | 526 | 2015-01-01T02:10:53.000Z | 2022-02-06T16:24:21.000Z | src/mcedit2/widgets/infopanel.py | ozzhates/mcedit2 | 4bb98da521447b6cf43d923cea9f00acf2f427e9 | [
"BSD-3-Clause"
] | 231 | 2015-01-01T16:47:30.000Z | 2022-03-31T21:51:55.000Z | """
${NAME}
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import time
import weakref
from PySide import QtGui
from mcedit2.widgets.layout import Column
log = logging.getLogger(__name__)
class InfoPanel(QtGui.QWidget):
def __init__(self, attrs, signals, **kwargs):
"""
Create a widget that displays a list of an object's selected attributes, named in `attrs`.
The widget updates itself whenever one of the object's signals named in `signals` is emitted.
If an attribute named in `attrs` is not found on `object`, the InfoPanel instance is checked for
an attribute of the same name and it is used instead if found.
:type attrs: list of attribute names to display
:type signals: list of signals to monitor
:param kwargs: args for QWidget
:type kwargs:
"""
QtGui.QWidget.__init__(self, **kwargs)
self.attrs = attrs
self.signals = signals
self.lastUpdate = time.time()
self.labels = [QtGui.QLabel() for _ in attrs]
self.setLayout(Column(*self.labels))
def updateLabels(self):
now = time.time()
if now < self.lastUpdate + 0.25:
return
self.lastUpdate = now
if self.object:
for attr, label in zip(self.attrs, self.labels):
try:
value = getattr(self.object, attr)
except AttributeError: # catches unrelated AttributeErrors in property getters...
try:
value = getattr(self, attr)
except AttributeError:
log.exception("Error updating info panel.")
value = getattr(self, attr, "Attribute not found")
label.setText("%s: %s" % (attr, value))
_object = None
@property
def object(self):
return self._object()
@object.setter
def object(self, value):
self._object = weakref.ref(value)
self.updateLabels()
for signal in self.signals:
signal = getattr(self.object, signal, None)
if signal:
signal.connect(self.updateLabels)
setObject = object.setter
| 32.014085 | 104 | 0.601848 | 2,021 | 0.889133 | 0 | 0 | 340 | 0.149582 | 0 | 0 | 696 | 0.306203 |
146252a9963181e60bb9b862f53e83071c1987b1 | 2,925 | py | Python | venv/lib/python3.7/site-packages/MDAnalysis/selections/__init__.py | dtklinh/GBRDE | c87fada492f24943d7d6b6ecda61c67f41d5bf83 | [
"MIT"
] | 2 | 2021-03-04T16:57:06.000Z | 2021-08-11T01:42:29.000Z | venv/lib/python3.7/site-packages/MDAnalysis/selections/__init__.py | dtklinh/GBRDE | c87fada492f24943d7d6b6ecda61c67f41d5bf83 | [
"MIT"
] | null | null | null | venv/lib/python3.7/site-packages/MDAnalysis/selections/__init__.py | dtklinh/GBRDE | c87fada492f24943d7d6b6ecda61c67f41d5bf83 | [
"MIT"
] | null | null | null | # -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
Selection exporters
===================
Functions to write a :class:`MDAnalysis.core.groups.AtomGroup` selection
to a file so that it can be used in another programme.
:mod:`MDAnalysis.selections.vmd`
VMD_ selections
:mod:`MDAnalysis.selections.pymol`
PyMol_ selections
:mod:`MDAnalysis.selections.gromacs`
Gromacs_ selections
:mod:`MDAnalysis.selections.charmm`
CHARMM_ selections
The :class:`MDAnalysis.selections.base.SelectionWriterBase` base class and
helper functions are in :mod:`MDAnalysis.selections.base`, with the
exception of `:func:get_writer`:
.. autofunction:: get_writer
"""
from __future__ import absolute_import
from six import raise_from
import os.path
from .. import _SELECTION_WRITERS
from . import vmd
from . import pymol
from . import gromacs
from . import charmm
from . import jmol
def get_writer(filename, defaultformat):
"""Return a SelectionWriter for `filename` or a `defaultformat`.
Parameters
----------
filename : str
name of the output file; the extension is used to guess the file format
defaultformat : str
if `filename` does not have an extension, use `defaultformat` instead
Returns
-------
SelectionWriter : `type`
the writer *class* for the detected format
Raises
------
:exc:`NotImplementedError`
for any format that is not defined
"""
if filename:
format = os.path.splitext(filename)[1][1:] # strip initial dot!
format = format or defaultformat # use default if no fmt from fn
format = format.strip().upper() # canonical for lookup
try:
return _SELECTION_WRITERS[format]
except KeyError:
raise_from(NotImplementedError(
"Writing as {0!r} is not implemented;"
" only {1!r} will work.".format(format, _SELECTION_WRITERS.keys())),
None)
| 31.451613 | 80 | 0.696752 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,286 | 0.781538 |
1466e6f9e9390c64c5ae93f8f73e9d6b213aba54 | 4,536 | py | Python | cs15211/PalindromePermutation.py | JulyKikuAkita/PythonPrac | 0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c | [
"Apache-2.0"
] | 1 | 2021-07-05T01:53:30.000Z | 2021-07-05T01:53:30.000Z | cs15211/PalindromePermutation.py | JulyKikuAkita/PythonPrac | 0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c | [
"Apache-2.0"
] | null | null | null | cs15211/PalindromePermutation.py | JulyKikuAkita/PythonPrac | 0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c | [
"Apache-2.0"
] | 1 | 2018-01-08T07:14:08.000Z | 2018-01-08T07:14:08.000Z | __source__ = 'https://leetcode.com/problems/palindrome-permutation/'
# https://github.com/kamyu104/LeetCode/blob/master/Python/palindrome-permutation.py
# Time: O(n)
# Space: O(1)
#
# Description: Leetcode # 266. Palindrome Permutation
#
# Given a string, determine if a permutation of the string could form a palindrome.
#
# For example,
# "code" -> False, "aab" -> True, "carerac" -> True.
#
# Hint:
#
# Consider the palindromes of odd vs even length. What difference do you notice?
# Count the frequency of each character.
# If each character occurs even number of times,
# then it must be a palindrome. How about character which occurs odd number of times?
#
# #count of odd number char < 2
# Companies
# Google Uber Bloomberg
# Related Topics
# Hash Table
# Similar Questions
# Longest Palindromic Substring Valid Anagram Palindrome Permutation II Longest Palindrome
#
import unittest
import collections
# 20ms 99.07%
class Solution(object):
def canPermutePalindrome(self, s):
"""
:type s: str
:rtype: bool
"""
# print collections.Counter(s).values()
return sum(v % 2 for v in collections.Counter(s).values()) < 2
# 20ms 99.07%
from collections import defaultdict
class Solution2(object):
def canPermutePalindrome(self, s):
"""
:type s: str
:rtype: bool
"""
dict = defaultdict(int)
for char in s:
dict[char] = dict[char] + 1
odd = 0
for cnt in dict.values():
if cnt % 2 == 1:
odd += 1
if odd > 1:
return False
return True
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
# Java solution
Java = '''
# Thought: https://leetcode.com/problems/palindrome-permutation/solution/
#
Time complexity : O(n). We traverse over the string ss of length nn once only.
Space complexity : O(n). The setset can grow upto a maximum size of nn in case of all distinct elements.
The idea is to iterate over string, adding current character to set if set doesn't contain that character,
or removing current character from set if set contains it.
When the iteration is finished, just return set.size()==0 || set.size()==1.
set.size()==0 corresponds to the situation when there are even number of any character in the string, and
set.size()==1 corresponsds to the fact that there are even number of any character except one.
# 1ms 65.75%
class Solution {
public boolean canPermutePalindrome(String s) {
Set<Character> set=new HashSet<Character>();
for(int i=0; i<s.length(); ++i){
if (!set.contains(s.charAt(i)))
set.add(s.charAt(i));
else
set.remove(s.charAt(i));
}
return set.size()==0 || set.size()==1;
}
}
# same as above
class Solution {
public boolean canPermutePalindrome(String s) {
Set<Character> set = new HashSet();
for (int i = 0; i < s.length(); i++) {
if (!set.add(s.charAt(i))) {
set.remove(s.charAt(i));
}
}
return set.size() <= 1;
}
}
# 1ms 65.75%
class Solution {
public boolean canPermutePalindrome(String s) {
BitSet bs = new BitSet();
for (byte b : s.getBytes())
bs.flip(b);
return bs.cardinality() < 2;
}
}
# count char with boolean[128]
# 0ms 100%
class Solution {
public boolean canPermutePalindrome(String s) {
boolean[] arr = new boolean[128];
for (int i = 0; i < s.length(); i++) {
char c = s.charAt(i);
arr[c] = !arr[c];
}
boolean odd = false;
for (int i = 0; i < 128; i++) {
if (arr[i]) {
if (odd) { //2 occurrence of odd char count
return false;
} else {
odd = true;
}
}
}
return true;
}
}
# 0ms 100%
class Solution {
public boolean canPermutePalindrome(String s) {
if (s == null || s.length() == 0) return true;
int[] map = new int[128]; //or use 256 depending on encoding
int count = 0;
for (int i = 0; i < s.length(); i++) {
map[s.charAt(i)]++;
if ( (map[s.charAt(i)] & 1) == 0) { //%2 ==0
count--;
} else {
count++;
}
}
return count <= 1;
}
}
'''
| 28.173913 | 106 | 0.573633 | 752 | 0.165785 | 0 | 0 | 0 | 0 | 0 | 0 | 3,778 | 0.832892 |
14686ee7aaaa9630d28bd94df5f95d8bfc28579e | 2,398 | py | Python | others/edge/face_identification/sphereface20/tflite/postprocess/eval.py | luluseptember/inference | acbc7b0bf288343ed81e62b69dea8afec03d679b | [
"Apache-2.0"
] | 4 | 2019-07-26T03:00:39.000Z | 2021-01-29T16:12:21.000Z | others/edge/face_identification/sphereface20/tflite/postprocess/eval.py | luluseptember/inference | acbc7b0bf288343ed81e62b69dea8afec03d679b | [
"Apache-2.0"
] | 5 | 2020-09-26T00:53:40.000Z | 2022-02-10T01:13:29.000Z | others/edge/face_identification/sphereface20/tflite/postprocess/eval.py | luluseptember/inference | acbc7b0bf288343ed81e62b69dea8afec03d679b | [
"Apache-2.0"
] | 7 | 2019-07-23T14:07:21.000Z | 2021-07-05T12:41:08.000Z |
""" To calculate the verification accuracy of LFW dataset """
# MIT License
#
# Copyright (c) 2018 Jimmy Chiang
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import math
import pdb
import numpy as np
THRESHOLD = 0.41
def _distance(embeddings1, embeddings2):
# Distance based on cosine similarity
dot = np.sum(np.multiply(embeddings1, embeddings2), axis=1)
norm = np.linalg.norm(embeddings1, axis=1) * np.linalg.norm(embeddings2, axis=1)
similarity = dot / norm
dist = np.arccos(similarity) / math.pi
return dist
def _calculate_accuracy(threshold, dist, actual_issame):
predict_issame = np.less(dist, threshold)
tp = np.sum(np.logical_and(predict_issame, actual_issame))
tn = np.sum(np.logical_and(np.logical_not(predict_issame), np.logical_not(actual_issame)))
acc = float(tp + tn) / dist.size
return acc
def _lfw_evaluate(embeddings1, embeddings2, actual_issame):
if np.sum(np.isnan(embeddings1)) > 0 or np.sum(np.isnan(embeddings2)) > 0:
return True, 0
else:
dist = _distance(embeddings1, embeddings2)
accuracy = _calculate_accuracy(THRESHOLD, dist, actual_issame)
return False, accuracy
def lfw_metric(embeddings1, embeddings2, actual_issame):
isNan, accuracy = _lfw_evaluate(embeddings1, embeddings2, actual_issame)
if isNan:
return np.nan
else:
return accuracy
| 38.677419 | 94 | 0.741451 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,184 | 0.493745 |
146b5de715f79bfd487c02d589bad6ff62c03076 | 5,032 | py | Python | LDAR_Sim/src/ldar_sim_main.py | sinakiaei/LDAR_Sim | 299d81d51bd5dd734eb3bf0d8fe671d2b7eebe01 | [
"MIT"
] | 2 | 2021-11-08T13:55:45.000Z | 2021-11-16T16:46:07.000Z | LDAR_Sim/src/ldar_sim_main.py | sinakiaei/LDAR_Sim | 299d81d51bd5dd734eb3bf0d8fe671d2b7eebe01 | [
"MIT"
] | 4 | 2021-11-19T20:43:39.000Z | 2022-01-20T00:14:39.000Z | LDAR_Sim/src/ldar_sim_main.py | sinakiaei/LDAR_Sim | 299d81d51bd5dd734eb3bf0d8fe671d2b7eebe01 | [
"MIT"
] | 2 | 2021-11-18T17:10:10.000Z | 2022-01-14T21:39:06.000Z | # ------------------------------------------------------------------------------
# Program: The LDAR Simulator (LDAR-Sim)
# File: LDAR-Sim main
# Purpose: Interface for parameterizing and running LDAR-Sim.
#
# Copyright (C) 2018-2021 Intelligent Methane Monitoring and Management System (IM3S) Group
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the MIT License as published
# by the Free Software Foundation, version 3.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
# You should have received a copy of the MIT License
# along with this program. If not, see <https://opensource.org/licenses/MIT>.
#
# ------------------------------------------------------------------------------
import datetime
import json
import multiprocessing as mp
import os
import shutil
from pathlib import Path
from economics.cost_mitigation import cost_mitigation
from initialization.args import files_from_args, get_abs_path
from initialization.input_manager import InputManager
from initialization.sims import create_sims
from initialization.sites import init_generator_files
from ldar_sim_run import ldar_sim_run
from out_processing.batch_reporting import BatchReporting
from out_processing.prog_table import generate as gen_prog_table
from utils.generic_functions import check_ERA5_file
opening_msg = """
You are running LDAR-Sim version 2.0 an open sourced software (MIT) license.
It is continually being developed by the University of Calgary's Intelligent
Methane Monitoring and Management System (IM3S) Group.
Provide any issues, comments, questions, or recommendations to the IM3S by
adding an issue to https://github.com/LDAR-Sim/LDAR_Sim.git.
"""
if __name__ == '__main__':
print(opening_msg)
# Get route directory , which is parent folder of ldar_sim_main file
# Set current working directory directory to root directory
root_dir = Path(os.path.dirname(os.path.realpath(__file__))).parent
os.chdir(root_dir)
# --- Retrieve input parameters and parse ---
parameter_filenames = files_from_args(root_dir)
input_manager = InputManager()
sim_params = input_manager.read_and_validate_parameters(parameter_filenames)
# --- Assign local variabls
ref_program = sim_params['reference_program']
base_program = sim_params['baseline_program']
in_dir = get_abs_path(sim_params['input_directory'])
out_dir = get_abs_path(sim_params['output_directory'])
programs = sim_params.pop('programs')
# --- Run Checks ----
check_ERA5_file(in_dir, programs)
has_ref = ref_program in programs
has_base = base_program in programs
# --- Setup Output folder
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
os.makedirs(out_dir)
input_manager.write_parameters(out_dir / 'parameters.yaml')
# If leak generator is used and there are generated files, user is prompted
# to use files, If they say no, the files will be removed
if sim_params['pregenerate_leaks']:
generator_dir = in_dir / "generator"
init_generator_files(
generator_dir, input_manager.simulation_parameters, in_dir, programs[base_program])
else:
generator_dir = None
# --- Create simulations ---
simulations = create_sims(sim_params, programs, generator_dir, in_dir, out_dir, input_manager)
# --- Run simulations (in parallel) --
with mp.Pool(processes=sim_params['n_processes']) as p:
sim_outputs = p.starmap(ldar_sim_run, simulations)
# ---- Generate Outputs ----
# Do batch reporting
print("....Generating output data")
if sim_params['write_data']:
# Create a data object...
if has_ref & has_base:
print("....Generating cost mitigation outputs")
cost_mitigation = cost_mitigation(sim_outputs, ref_program, base_program, out_dir)
reporting_data = BatchReporting(
out_dir, sim_params['start_date'], ref_program, base_program)
if sim_params['n_simulations'] > 1:
reporting_data.program_report()
if len(programs) > 1:
print("....Generating program comparison plots")
reporting_data.batch_report()
reporting_data.batch_plots()
else:
print('No reference or base program input...skipping batch reporting and economics.')
# Generate output table
print("....Exporting summary statistic tables")
out_prog_table = gen_prog_table(sim_outputs, base_program, programs)
with open(out_dir / 'prog_table.json', 'w') as fp:
json.dump(out_prog_table, fp)
# Write program metadata
metadata = open(out_dir / '_metadata.txt', 'w')
metadata.write(str(programs) + '\n' +
str(datetime.datetime.now()))
metadata.close()
| 39.3125 | 98 | 0.69217 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,288 | 0.45469 |
146b6b043636c6d237f0ca4b07fe4ebbf0597b33 | 3,037 | py | Python | results/try_different_variance_maps.py | osagha/turktools | 389c1243e837e946080b28a5ca83faf62890f4b4 | [
"Unlicense",
"MIT"
] | null | null | null | results/try_different_variance_maps.py | osagha/turktools | 389c1243e837e946080b28a5ca83faf62890f4b4 | [
"Unlicense",
"MIT"
] | null | null | null | results/try_different_variance_maps.py | osagha/turktools | 389c1243e837e946080b28a5ca83faf62890f4b4 | [
"Unlicense",
"MIT"
] | null | null | null | import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from sympy import Symbol, solve
from sympy.abc import a, b, c
from fit_beta import fit_beta_mean_uncertainty, kl_dirichlet
RESULTS_FILE = "../results/results_beta_exp.csv"
FILTER_BY = "range"
results = pd.read_csv(RESULTS_FILE, header=0)
# Filter results by range
if "range" in FILTER_BY:
results = results[results.apply(lambda x: x["helpfulness_range"] < 0.75 and
x["prior_range"] < 0.75 and
x["posterior_range"] < 0.75, axis=1)]
if "upper_right" in FILTER_BY:
results = results[results.apply(lambda x: x["prior_mturk_july"] > 0.5 and
x["posterior_mturk_july"] > 0.5, axis=1)]
def map_certainty_to_variance(certainty):
"""
Let's just fit a line with these endpoints:
7 --> 0.001
1 --> 0.07 (the max possible value, when \alpha=\beta=1, is 0.08133333)
"""
return 0.0815 - (0.0115 * certainty)
def hyperbola_map(certainty, a, b, c):
variance = a / (certainty - c) + b
return variance
for x in np.arange(0.002, 0.07, 0.002):
params = solve([a/(1-c)+b-0.07, a/(7-c)+b-0.001, a/(6-c)+b-x])[0]
a_x, b_x, c_x = float(params[a]), float(params[b]), float(params[c])
for field in ["prior", "posterior"]:
results[field + f"_beta_{x}"] = results.apply(
lambda z: fit_beta_mean_uncertainty(z[field + "_mturk_july"],
hyperbola_map(z[field + "_certainty_july"], params[a], params[b], params[c])), axis=1)
results[f"kl_beta_{x}"] = results.apply(lambda z: kl_dirichlet(z[f"posterior_beta_{x}"], z[f"prior_beta_{x}"]), axis=1)
fig, axs = plt.subplots(1, 3, figsize=(11, 4))
g = sns.scatterplot(data=results, x="prior_mturk_july", y="posterior_mturk_july", hue="kl_july_and_mturk_probs_exp_10", ax=axs.flat[0], alpha=0.7, s=80)
g.legend_.set_title("KL")
g.title.set_text(f"Prior & Posterior vs. KL (exp)")
g.set_xlabel("Prior")
g.set_ylabel("Posterior")
g = sns.scatterplot(data=results, x="prior_mturk_july", y="posterior_mturk_july", hue="helpfulness_mean", ax=axs.flat[1], alpha=0.7, s=80)
g.legend_.set_title("Helpfulness")
g.title.set_text(f"Prior & Posterior vs. Helpfulness")
g.set_xlabel("Prior")
g.set_ylabel("")
g.set_yticklabels([])
g = sns.scatterplot(data=results, x="prior_mturk_july", y="posterior_mturk_july", hue=f"kl_beta_{x}", ax=axs.flat[2], alpha=0.7, s=80)
g.legend_.set_title("Beta KL")
g.title.set_text(f"Prior & Posterior vs. KL Beta (exp)")
g.set_xlabel("Prior")
g.set_ylabel("")
g.set_yticklabels([])
plt.subplots_adjust(left=0.05, right=0.95, wspace=0.1, )
plt.savefig(f"../figures/beta/scatter/prior_posterior_vs_helpfulness_kl_klbeta_{x}.png")
# fig.close()
# plt.savefig(f"../figures/beta/scatter/prior_posterior_vs_helpfulness_kl_klbeta_upper_right_unfiltered.png")
| 39.960526 | 156 | 0.641093 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 987 | 0.324992 |
146b9782c6137a18b395c75b1a9f1454ebb38ab8 | 5,977 | py | Python | conf_testing/lib/HABAppTests/test_base.py | pailloM/HABApp | 3e0defd99ede9b91c164cb9d1ee011fd74e801c3 | [
"Apache-2.0"
] | null | null | null | conf_testing/lib/HABAppTests/test_base.py | pailloM/HABApp | 3e0defd99ede9b91c164cb9d1ee011fd74e801c3 | [
"Apache-2.0"
] | null | null | null | conf_testing/lib/HABAppTests/test_base.py | pailloM/HABApp | 3e0defd99ede9b91c164cb9d1ee011fd74e801c3 | [
"Apache-2.0"
] | null | null | null | import logging
import threading
import typing
import HABApp
from HABApp.core.events.habapp_events import HABAppException
from ._rest_patcher import RestPatcher
log = logging.getLogger('HABApp.Tests')
LOCK = threading.Lock()
class TestResult:
def __init__(self):
self.run = 0
self.io = 0
self.nio = 0
self.skipped = 0
def __iadd__(self, other):
assert isinstance(other, TestResult)
self.run += other.run
self.io += other.io
self.nio += other.nio
self.skipped += other.skipped
return self
def __repr__(self):
return f'Processed {self.run:d} Tests: IO: {self.io} NIO: {self.nio} skipped: {self.skipped}'
class TestConfig:
def __init__(self):
self.skip_on_failure = False
self.warning_is_error = False
RULE_CTR = 0
TESTS_RULES: typing.Dict[int, 'TestBaseRule'] = {}
def get_next_id(rule):
global RULE_CTR
with LOCK:
RULE_CTR += 1
TESTS_RULES[RULE_CTR] = rule
return RULE_CTR
def pop_rule(rule_id: int):
with LOCK:
TESTS_RULES.pop(rule_id)
class TestBaseRule(HABApp.Rule):
"""This rule is testing the OpenHAB data types by posting values and checking the events"""
def __init__(self):
super().__init__()
self.__tests_funcs = {}
self.tests_started = False
self.__id = get_next_id(self)
self.register_on_unload(lambda: pop_rule(self.__id))
self.config = TestConfig()
# we have to chain the rules later, because we register the rules only once we loaded successfully.
self.run.at(2, self.__execute_run)
# collect warnings and infos
self.listen_event(HABApp.core.const.topics.WARNINGS, self.__warning)
self.listen_event(HABApp.core.const.topics.ERRORS, self.__error)
self.__warnings = 0
self.__errors = 0
def __warning(self, event: str):
self.__warnings += 1
for line in event.splitlines():
log.warning(line)
def __error(self, event):
self.__errors += 1
msg = event.to_str() if isinstance(event, HABAppException) else event
for line in msg.splitlines():
log.error(line)
def __execute_run(self):
with LOCK:
if self.__id != RULE_CTR:
return None
result = TestResult()
for k, rule in sorted(TESTS_RULES.items()):
assert isinstance(rule, TestBaseRule)
if rule.tests_started:
continue
r = TestResult()
rule.run_tests(r)
result += r
log.info('-' * 120)
log.info(str(result)) if not result.nio else log.error(str(result))
print(str(result))
return None
def add_test(self, name, func, *args, **kwargs):
assert name not in self.__tests_funcs, name
self.__tests_funcs[name] = (func, args, kwargs)
def set_up(self):
pass
def tear_down(self):
pass
def run_tests(self, result: TestResult):
self.tests_started = True
try:
with RestPatcher(self.__class__.__name__ + '.' + 'set_up'):
self.set_up()
except Exception as e:
log.error(f'"Set up of {self.__class__.__name__}" failed: {e}')
for line in HABApp.core.wrapper.format_exception(e):
log.error(line)
result.nio += 1
return None
test_count = len(self.__tests_funcs)
log.info(f'Running {test_count} tests for {self.rule_name}')
for name, test_data in self.__tests_funcs.items():
self.__run_test(name, test_data, result)
# TEAR DOWN
try:
with RestPatcher(self.__class__.__name__ + '.' + 'tear_down'):
self.tear_down()
except Exception as e:
log.error(f'"Set up of {self.__class__.__name__}" failed: {e}')
for line in HABApp.core.wrapper.format_exception(e):
log.error(line)
result.nio += 1
def __run_test(self, name: str, data: tuple, result: TestResult):
test_count = len(self.__tests_funcs)
width = test_count // 10 + 1
result.run += 1
self.__warnings = 0
self.__errors = 0
# add possibility to skip on failure
if self.config.skip_on_failure:
if result.nio:
result.skipped += 1
log.warning(f'Test {result.run:{width}}/{test_count} "{name}" skipped!')
return None
try:
func = data[0]
args = data[1]
kwargs = data[2]
with RestPatcher(self.__class__.__name__ + '.' + name):
msg = func(*args, **kwargs)
except Exception as e:
log.error(f'Test "{name}" failed: {e}')
for line in HABApp.core.wrapper.format_exception(e):
log.error(line)
result.nio += 1
return None
if msg is True or msg is None:
msg = ''
if self.__errors:
msg = f'{", " if msg else ""}{self.__errors} error{"s" if self.__errors != 1 else ""} in worker'
if self.config.warning_is_error and self.__warnings:
msg = f'{", " if msg else ""}{self.__errors} warning{"s" if self.__errors != 1 else ""} in worker'
if msg == '':
result.io += 1
log.info(f'Test {result.run:{width}}/{test_count} "{name}" successful!')
elif isinstance(msg, str) and msg.lower() == 'SKIP':
result.skipped += 1
log.info(f'Test {result.run:{width}}/{test_count} "{name}" skipped!')
else:
result.nio += 1
if isinstance(msg, bool):
log.error(f'Test {result.run:{width}}/{test_count} "{name}" failed')
else:
log.error(f'Test {result.run:{width}}/{test_count} "{name}" failed: {msg} ({type(msg)})')
| 30.340102 | 110 | 0.574536 | 5,454 | 0.912498 | 0 | 0 | 0 | 0 | 0 | 0 | 1,099 | 0.183872 |
146cd6968f58dcd6a930000bd6f24a70be3c40a2 | 1,948 | py | Python | dags/connect_to_oracle_and_sql.py | whatwouldaristotledo/docker-airflow | 2c6f6179170d87e2046bd844480bfddc6309b1de | [
"Apache-2.0"
] | null | null | null | dags/connect_to_oracle_and_sql.py | whatwouldaristotledo/docker-airflow | 2c6f6179170d87e2046bd844480bfddc6309b1de | [
"Apache-2.0"
] | null | null | null | dags/connect_to_oracle_and_sql.py | whatwouldaristotledo/docker-airflow | 2c6f6179170d87e2046bd844480bfddc6309b1de | [
"Apache-2.0"
] | null | null | null | from airflow.models import DAG
from airflow.utils.dates import days_ago
from airflow.operators.python_operator import PythonOperator
from datetime import datetime
import pandas as pd
import urllib
import random
import cx_Oracle
import pyodbc
import sqlalchemy
args={
'owner': 'BI',
### start date is used to for the scheduler, the initial run date is the start_date plus 1 day, see documentation online for more information
'start_date': datetime(2021, 1 , 27),
'retries':0
}
###schedule interval uses UTC time so, it is 8 hours ahead PST, if you want it to run 8am, you will need to put 16 in for the hours
### so 10 is really 2am PST
dag = DAG(dag_id='connect_to_oracle_or_sql', default_args=args, schedule_interval='0 8 * * *', catchup=False)
def sql_connect_with_cursor(**context):
###One way to connect to mssql using sql cursor
sql_cnxn = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server};SERVER=YourServer;DATABASE=Yourdb;UID=YourUserNameHere;PWD=YourUserPWHere')
sql_cursor = sql_cnxn.cursor()
data_x = sql_cursor.execute('EXEC Somestoredprocedure')
sql_cnxn.close()
def connect_to_oracle(**connect):
##How to toconnect to oracle
rs = cx_Oracle.makedsn('10.1.1.31', '1521', service_name='orcl')
orcl_cnxn = cx_Oracle.connect(user=r'user', password='userpw', dsn=rs)
orcl_cnxn.close()
def sql_connect_with_alchemy_engine(**context):
###another way to connecto to Mssql using mssq_engine
params = urllib.parse.quote_plus("Driver={ODBC Driver 17 for SQL Server};Server=YourServer;Database=Yourdb;UID=YourUserNameHere;PWD=YourUserPWHere;")
mssql_engine = sqlalchemy.create_engine("mssql+pyodbc:///?odbc_connect={}".format(params))
mssql_engine_cnxn = mssql_engine.connect()
mssql_engine_cnxn.close()
with dag:
run_this_task = PythonOperator(
task_id='run_this',
python_callable=connect_to_oracle,
provide_context=True
)
run_this_task
| 36.754717 | 153 | 0.747433 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 837 | 0.429671 |
146ce26ee142df10da663c661efd59cf5bef1b60 | 10,674 | py | Python | tests/test_packages/test_skills/test_tac_negotiation/test_helpers.py | bryanchriswhite/agents-aea | d3f177a963eb855d9528555167255bf2b478f4ba | [
"Apache-2.0"
] | 126 | 2019-09-07T09:32:44.000Z | 2022-03-29T14:28:41.000Z | tests/test_packages/test_skills/test_tac_negotiation/test_helpers.py | salman6049/agents-aea | d3f177a963eb855d9528555167255bf2b478f4ba | [
"Apache-2.0"
] | 1,814 | 2019-08-24T10:08:07.000Z | 2022-03-31T14:28:36.000Z | tests/test_packages/test_skills/test_tac_negotiation/test_helpers.py | salman6049/agents-aea | d3f177a963eb855d9528555167255bf2b478f4ba | [
"Apache-2.0"
] | 46 | 2019-09-03T22:13:58.000Z | 2022-03-22T01:25:16.000Z | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the tests of the helpers module of the tac negotiation."""
from pathlib import Path
from aea.helpers.search.models import (
Attribute,
Constraint,
ConstraintType,
DataModel,
Description,
)
from aea.test_tools.test_skill import BaseSkillTestCase
from packages.fetchai.skills.tac_negotiation.helpers import (
DEMAND_DATAMODEL_NAME,
SUPPLY_DATAMODEL_NAME,
_build_goods_datamodel,
build_goods_description,
build_goods_query,
)
from tests.conftest import ROOT_DIR
class TestHelpers(BaseSkillTestCase):
"""Test Helper module methods of tac control."""
path_to_skill = Path(ROOT_DIR, "packages", "fetchai", "skills", "tac_negotiation")
@classmethod
def setup(cls):
"""Setup the test class."""
super().setup()
def test_build_goods_datamodel_supply(self):
"""Test the _build_goods_datamodel of Helpers module for a supply."""
good_ids = ["1", "2"]
is_supply = True
attributes = [
Attribute("1", int, True, "A good on offer."),
Attribute("2", int, True, "A good on offer."),
Attribute("ledger_id", str, True, "The ledger for transacting."),
Attribute(
"currency_id",
str,
True,
"The currency for pricing and transacting the goods.",
),
Attribute("price", int, False, "The price of the goods in the currency."),
Attribute(
"fee",
int,
False,
"The transaction fee payable by the buyer in the currency.",
),
Attribute(
"nonce", str, False, "The nonce to distinguish identical descriptions."
),
]
expected_data_model = DataModel(SUPPLY_DATAMODEL_NAME, attributes)
actual_data_model = _build_goods_datamodel(good_ids, is_supply)
assert actual_data_model == expected_data_model
def test_build_goods_datamodel_demand(self):
"""Test the _build_goods_datamodel of Helpers module for a demand."""
good_ids = ["1", "2"]
is_supply = False
attributes = [
Attribute("1", int, True, "A good on offer."),
Attribute("2", int, True, "A good on offer."),
Attribute("ledger_id", str, True, "The ledger for transacting."),
Attribute(
"currency_id",
str,
True,
"The currency for pricing and transacting the goods.",
),
Attribute("price", int, False, "The price of the goods in the currency."),
Attribute(
"fee",
int,
False,
"The transaction fee payable by the buyer in the currency.",
),
Attribute(
"nonce", str, False, "The nonce to distinguish identical descriptions."
),
]
expected_data_model = DataModel(DEMAND_DATAMODEL_NAME, attributes)
actual_data_model = _build_goods_datamodel(good_ids, is_supply)
assert actual_data_model == expected_data_model
def test_build_goods_description_supply(self):
"""Test the build_goods_description of Helpers module for supply."""
quantities_by_good_id = {"2": 5, "3": 10}
currency_id = "1"
ledger_id = "some_ledger_id"
is_supply = True
attributes = [
Attribute("2", int, True, "A good on offer."),
Attribute("3", int, True, "A good on offer."),
Attribute("ledger_id", str, True, "The ledger for transacting."),
Attribute(
"currency_id",
str,
True,
"The currency for pricing and transacting the goods.",
),
Attribute("price", int, False, "The price of the goods in the currency."),
Attribute(
"fee",
int,
False,
"The transaction fee payable by the buyer in the currency.",
),
Attribute(
"nonce", str, False, "The nonce to distinguish identical descriptions."
),
]
expected_data_model = DataModel(SUPPLY_DATAMODEL_NAME, attributes)
expected_values = {"currency_id": currency_id, "ledger_id": ledger_id}
expected_values.update(quantities_by_good_id)
expected_description = Description(expected_values, expected_data_model)
actual_description = build_goods_description(
quantities_by_good_id, currency_id, ledger_id, is_supply
)
assert actual_description == expected_description
def test_build_goods_description_demand(self):
"""Test the build_goods_description of Helpers module for demand (same as above)."""
quantities_by_good_id = {"2": 5, "3": 10}
currency_id = "1"
ledger_id = "some_ledger_id"
is_supply = False
attributes = [
Attribute("2", int, True, "A good on offer."),
Attribute("3", int, True, "A good on offer."),
Attribute("ledger_id", str, True, "The ledger for transacting."),
Attribute(
"currency_id",
str,
True,
"The currency for pricing and transacting the goods.",
),
Attribute("price", int, False, "The price of the goods in the currency."),
Attribute(
"fee",
int,
False,
"The transaction fee payable by the buyer in the currency.",
),
Attribute(
"nonce", str, False, "The nonce to distinguish identical descriptions."
),
]
expected_data_model = DataModel(DEMAND_DATAMODEL_NAME, attributes)
expected_values = {"currency_id": currency_id, "ledger_id": ledger_id}
expected_values.update(quantities_by_good_id)
expected_description = Description(expected_values, expected_data_model)
actual_description = build_goods_description(
quantities_by_good_id, currency_id, ledger_id, is_supply
)
assert actual_description == expected_description
def test_build_goods_query(self):
"""Test the build_goods_query of Helpers module."""
good_ids = ["2", "3"]
currency_id = "1"
ledger_id = "some_ledger_id"
is_searching_for_sellers = True
attributes = [
Attribute("2", int, True, "A good on offer."),
Attribute("3", int, True, "A good on offer."),
Attribute("ledger_id", str, True, "The ledger for transacting."),
Attribute(
"currency_id",
str,
True,
"The currency for pricing and transacting the goods.",
),
Attribute("price", int, False, "The price of the goods in the currency."),
Attribute(
"fee",
int,
False,
"The transaction fee payable by the buyer in the currency.",
),
Attribute(
"nonce", str, False, "The nonce to distinguish identical descriptions."
),
]
expected_data_model = DataModel(SUPPLY_DATAMODEL_NAME, attributes)
expected_constraints = [
Constraint("2", ConstraintType(">=", 1)),
Constraint("3", ConstraintType(">=", 1)),
Constraint("ledger_id", ConstraintType("==", ledger_id)),
Constraint("currency_id", ConstraintType("==", currency_id)),
]
actual_query = build_goods_query(
good_ids, currency_id, ledger_id, is_searching_for_sellers
)
constraints = [
(c.constraint_type.type, c.constraint_type.value)
for c in actual_query.constraints[0].constraints
]
for constraint in expected_constraints:
assert (
constraint.constraint_type.type,
constraint.constraint_type.value,
) in constraints
assert actual_query.model == expected_data_model
def test_build_goods_query_1_good(self):
"""Test the build_goods_query of Helpers module where there is 1 good."""
good_ids = ["2"]
currency_id = "1"
ledger_id = "some_ledger_id"
is_searching_for_sellers = True
attributes = [
Attribute("2", int, True, "A good on offer."),
Attribute("ledger_id", str, True, "The ledger for transacting."),
Attribute(
"currency_id",
str,
True,
"The currency for pricing and transacting the goods.",
),
Attribute("price", int, False, "The price of the goods in the currency."),
Attribute(
"fee",
int,
False,
"The transaction fee payable by the buyer in the currency.",
),
Attribute(
"nonce", str, False, "The nonce to distinguish identical descriptions."
),
]
expected_data_model = DataModel(SUPPLY_DATAMODEL_NAME, attributes)
expected_constraints = [
Constraint("2", ConstraintType(">=", 1)),
Constraint("ledger_id", ConstraintType("==", ledger_id)),
Constraint("currency_id", ConstraintType("==", currency_id)),
]
actual_query = build_goods_query(
good_ids, currency_id, ledger_id, is_searching_for_sellers
)
for constraint in expected_constraints:
assert constraint in actual_query.constraints
assert actual_query.model == expected_data_model
| 37.985765 | 92 | 0.570639 | 9,349 | 0.875867 | 0 | 0 | 92 | 0.008619 | 0 | 0 | 3,515 | 0.329305 |
146de2700d3f564e16e6e3c269777835f59d7552 | 5,930 | py | Python | go/vumitools/metrics_worker.py | lynnUg/vumi-go | 852f906c46d5d26940bd6699f11488b73bbc3742 | [
"BSD-3-Clause"
] | null | null | null | go/vumitools/metrics_worker.py | lynnUg/vumi-go | 852f906c46d5d26940bd6699f11488b73bbc3742 | [
"BSD-3-Clause"
] | null | null | null | go/vumitools/metrics_worker.py | lynnUg/vumi-go | 852f906c46d5d26940bd6699f11488b73bbc3742 | [
"BSD-3-Clause"
] | null | null | null | # -*- test-case-name: go.vumitools.tests.test_metrics_worker -*-
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.internet.task import LoopingCall
from vumi import log
from vumi.worker import BaseWorker
from vumi.config import ConfigInt, ConfigError
from vumi.persist.model import Manager
from go.vumitools.api import VumiApi, VumiApiCommand, ApiCommandPublisher
from go.vumitools.app_worker import GoWorkerConfigMixin, GoWorkerMixin
class GoMetricsWorkerConfig(BaseWorker.CONFIG_CLASS, GoWorkerConfigMixin):
"""At the start of each `metrics_interval` the :class:`GoMetricsWorker`
collects a list of all active conversations and distributes them
into `metrics_interval / metrics_granularity` buckets.
Immediately afterwards and then after each `metrics_granulatiry`
interval, the metrics worker sends a `collect_metrics` command to each
of the conversations in the current bucket until all buckets have been
processed.
Once all buckets have been processed, active conversations are
collected again and the cycle repeats.
"""
metrics_interval = ConfigInt(
"How often (in seconds) the worker should send `collect_metrics` "
"commands for each conversation. Must be an integer multiple of "
"`metrics_granularity`.",
default=300,
static=True)
metrics_granularity = ConfigInt(
"How often (in seconds) the worker should process a bucket of "
"conversations.",
default=5,
static=True)
def post_validate(self):
if (self.metrics_interval % self.metrics_granularity != 0):
raise ConfigError("Metrics interval must be an integer multiple"
" of metrics granularity.")
class GoMetricsWorker(BaseWorker, GoWorkerMixin):
"""A metrics collection worker for Go applications.
This worker operates by finding all conversations that require metrics
collection and sending commands to the relevant application workers to
trigger the actual metrics.
"""
CONFIG_CLASS = GoMetricsWorkerConfig
worker_name = 'go_metrics'
@inlineCallbacks
def setup_worker(self):
yield self._go_setup_worker()
config = self.get_static_config()
self.vumi_api = yield VumiApi.from_config_async({
'riak_manager': config.riak_manager,
'redis_manager': config.redis_manager,
})
self.redis = self.vumi_api.redis
self.command_publisher = yield self.start_publisher(
ApiCommandPublisher)
self._current_bucket = 0
self._num_buckets = (
config.metrics_interval // config.metrics_granularity)
self._buckets = dict((i, []) for i in range(self._num_buckets))
self._conversation_workers = {}
self._looper = LoopingCall(self.metrics_loop_func)
self._looper.start(config.metrics_granularity)
@inlineCallbacks
def teardown_worker(self):
if self._looper.running:
self._looper.stop()
yield self.redis.close_manager()
yield self._go_teardown_worker()
def bucket_for_conversation(self, conv_key):
return hash(conv_key) % self._num_buckets
@inlineCallbacks
def populate_conversation_buckets(self):
account_keys = yield self.find_account_keys()
num_conversations = 0
# We deliberarely serialise this. We don't want to hit the datastore
# too hard for metrics.
for account_key in account_keys:
conv_keys = yield self.find_conversations_for_account(account_key)
num_conversations += len(conv_keys)
for conv_key in conv_keys:
bucket = self.bucket_for_conversation(conv_key)
if conv_key not in self._conversation_workers:
# TODO: Clear out archived conversations
user_api = self.vumi_api.get_user_api(account_key)
conv = yield user_api.get_wrapped_conversation(conv_key)
self._conversation_workers[conv_key] = conv.worker_name
worker_name = self._conversation_workers[conv_key]
self._buckets[bucket].append(
(account_key, conv_key, worker_name))
log.info(
"Scheduled metrics commands for %d conversations in %d accounts."
% (num_conversations, len(account_keys)))
@inlineCallbacks
def process_bucket(self, bucket):
convs, self._buckets[bucket] = self._buckets[bucket], []
for account_key, conversation_key, worker_name in convs:
yield self.send_metrics_command(
account_key, conversation_key, worker_name)
def increment_bucket(self):
self._current_bucket += 1
self._current_bucket %= self._num_buckets
@inlineCallbacks
def metrics_loop_func(self):
if self._current_bucket == 0:
yield self.populate_conversation_buckets()
yield self.process_bucket(self._current_bucket)
self.increment_bucket()
def setup_connectors(self):
pass
@Manager.calls_manager
def find_account_keys(self):
keys = yield self.vumi_api.account_store.users.all_keys()
disabled_keys = yield self.redis.smembers('disabled_metrics_accounts')
returnValue(set(keys) - set(disabled_keys))
def find_conversations_for_account(self, account_key):
user_api = self.vumi_api.get_user_api(account_key)
return user_api.conversation_store.list_running_conversations()
def send_metrics_command(self, account_key, conversation_key, worker_name):
cmd = VumiApiCommand.command(
worker_name,
'collect_metrics',
conversation_key=conversation_key,
user_account_key=account_key)
return self.command_publisher.publish_message(cmd)
| 38.258065 | 79 | 0.685835 | 5,459 | 0.920573 | 2,816 | 0.474874 | 2,948 | 0.497133 | 0 | 0 | 1,474 | 0.248567 |
146f9bd7318af3cc0fd1815d1722bd7ff1cb6d1d | 6,434 | py | Python | rastervision/new_version/learner/classification_learner.py | carderne/raster-vision | 915fbcd3263d8f2193e65c2cd0eb53e050a47a01 | [
"Apache-2.0"
] | 1 | 2019-11-07T10:02:23.000Z | 2019-11-07T10:02:23.000Z | rastervision/new_version/learner/classification_learner.py | carderne/raster-vision | 915fbcd3263d8f2193e65c2cd0eb53e050a47a01 | [
"Apache-2.0"
] | null | null | null | rastervision/new_version/learner/classification_learner.py | carderne/raster-vision | 915fbcd3263d8f2193e65c2cd0eb53e050a47a01 | [
"Apache-2.0"
] | null | null | null | import warnings
warnings.filterwarnings('ignore') # noqa
from os.path import join, isfile, isdir
import zipfile
import torch
from torchvision import models
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader, Subset, ConcatDataset
from torchvision.transforms import (Compose, ToTensor, Resize, ColorJitter,
RandomVerticalFlip, RandomHorizontalFlip)
from rastervision.backend.torch_utils.chip_classification.folder import (
ImageFolder)
from rastervision.utils.files import (download_if_needed, list_paths,
get_local_path)
from rastervision.new_version.learner.learner import Learner
from rastervision.new_version.learner.metrics import (compute_conf_mat_metrics,
compute_conf_mat)
class ClassificationLearner(Learner):
def build_model(self):
model = getattr(models, self.cfg.model.backbone)(pretrained=True)
in_features = model.fc.in_features
num_labels = len(self.cfg.data.labels)
model.fc = nn.Linear(in_features, num_labels)
return model
def build_data(self):
cfg = self.cfg
batch_sz = cfg.solver.batch_sz
num_workers = cfg.data.num_workers
label_names = cfg.data.labels
# download and unzip data
if cfg.data.data_format == 'image_folder':
if cfg.data.uri.startswith('s3://') or cfg.data.uri.startswith(
'/'):
data_uri = cfg.data.uri
else:
data_uri = join(cfg.base_uri, cfg.data.uri)
data_dirs = []
zip_uris = [data_uri] if data_uri.endswith('.zip') else list_paths(
data_uri, 'zip')
for zip_ind, zip_uri in enumerate(zip_uris):
zip_path = get_local_path(zip_uri, self.data_cache_dir)
if not isfile(zip_path):
zip_path = download_if_needed(zip_uri, self.data_cache_dir)
with zipfile.ZipFile(zip_path, 'r') as zipf:
data_dir = join(self.tmp_dir, 'data', str(zip_ind))
data_dirs.append(data_dir)
zipf.extractall(data_dir)
train_ds, valid_ds, test_ds = [], [], []
for data_dir in data_dirs:
train_dir = join(data_dir, 'train')
valid_dir = join(data_dir, 'valid')
# build datasets
transform = Compose(
[Resize((cfg.data.img_sz, cfg.data.img_sz)),
ToTensor()])
aug_transform = Compose([
RandomHorizontalFlip(),
RandomVerticalFlip(),
ColorJitter(0.1, 0.1, 0.1, 0.1),
Resize((cfg.data.img_sz, cfg.data.img_sz)),
ToTensor()
])
if isdir(train_dir):
if cfg.overfit_mode:
train_ds.append(
ImageFolder(
train_dir,
transform=transform,
classes=label_names))
else:
train_ds.append(
ImageFolder(
train_dir,
transform=aug_transform,
classes=label_names))
if isdir(valid_dir):
valid_ds.append(
ImageFolder(
valid_dir, transform=transform, classes=label_names))
test_ds.append(
ImageFolder(
valid_dir, transform=transform, classes=label_names))
train_ds, valid_ds, test_ds = \
ConcatDataset(train_ds), ConcatDataset(valid_ds), ConcatDataset(test_ds)
if cfg.overfit_mode:
train_ds = Subset(train_ds, range(batch_sz))
valid_ds = train_ds
test_ds = train_ds
elif cfg.test_mode:
train_ds = Subset(train_ds, range(batch_sz))
valid_ds = Subset(valid_ds, range(batch_sz))
test_ds = Subset(test_ds, range(batch_sz))
train_dl = DataLoader(
train_ds,
shuffle=True,
batch_size=batch_sz,
num_workers=num_workers,
pin_memory=True)
valid_dl = DataLoader(
valid_ds,
shuffle=True,
batch_size=batch_sz,
num_workers=num_workers,
pin_memory=True)
test_dl = DataLoader(
test_ds,
shuffle=True,
batch_size=batch_sz,
num_workers=num_workers,
pin_memory=True)
self.train_ds, self.valid_ds, self.test_ds = (train_ds, valid_ds,
test_ds)
self.train_dl, self.valid_dl, self.test_dl = (train_dl, valid_dl,
test_dl)
def train_step(self, batch, batch_nb):
x, y = batch
out = self.model(x)
return {'train_loss': F.cross_entropy(out, y, reduction='sum')}
def validate_step(self, batch, batch_nb):
x, y = batch
out = self.model(x)
val_loss = F.cross_entropy(out, y, reduction='sum')
num_labels = len(self.cfg.data.labels)
out = self.post_forward(out)
conf_mat = compute_conf_mat(out, y, num_labels)
return {'val_loss': val_loss, 'conf_mat': conf_mat}
def validate_end(self, outputs, num_samples):
conf_mat = sum([o['conf_mat'] for o in outputs])
val_loss = torch.stack([o['val_loss']
for o in outputs]).sum() / num_samples
conf_mat_metrics = compute_conf_mat_metrics(conf_mat,
self.cfg.data.labels)
metrics = {'val_loss': val_loss.item()}
metrics.update(conf_mat_metrics)
return metrics
def post_forward(self, x):
return x.argmax(-1)
def plot_xyz(self, ax, x, y, z=None):
x = x.permute(1, 2, 0)
if x.shape[2] == 1:
x = torch.cat([x for _ in range(3)], dim=2)
ax.imshow(x)
title = 'true: {}'.format(self.cfg.data.labels[y])
if z is not None:
title += ' / pred: {}'.format(self.cfg.data.labels[z])
ax.set_title(title, fontsize=8)
ax.axis('off')
| 37.190751 | 84 | 0.546316 | 5,573 | 0.86618 | 0 | 0 | 0 | 0 | 0 | 0 | 213 | 0.033105 |
146fb23c6714dde0702a9be7da3d2cf6a252772c | 1,558 | py | Python | scraping/get-list-2/scrapes/scrapes/spiders/amazon_listspider.py | hvarS/AmazonPrivacy | d3c975e428d8ac80dbe4b4e7a2e33082eec89524 | [
"MIT"
] | null | null | null | scraping/get-list-2/scrapes/scrapes/spiders/amazon_listspider.py | hvarS/AmazonPrivacy | d3c975e428d8ac80dbe4b4e7a2e33082eec89524 | [
"MIT"
] | null | null | null | scraping/get-list-2/scrapes/scrapes/spiders/amazon_listspider.py | hvarS/AmazonPrivacy | d3c975e428d8ac80dbe4b4e7a2e33082eec89524 | [
"MIT"
] | null | null | null | import scrapy
import pandas as pd
import time
import random
import string
import os
class QuotesSpider(scrapy.Spider):
name = "amazonspione"
def start_requests(self):
os.mkdir("./products")
list_of_urls = []
link_file = "./names.csv"
df1 = pd.read_csv(link_file)
length = df1.shape[0]
for i in range(length):
list_of_urls.append(df1.iat[i, 2])
urls = list_of_urls
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
# length = 10
# letters = string.ascii_lowercase
# fn = './products/' + ''.join(random.choice(letters) for k in range(length)) + ".csv"
fn = "./links_11.csv"
asin_nums = []
isExists = response.xpath('//*[@id="mainResults"]').extract_first(default='not-found')
count = 0
if isExists == 'not-found':
pass
else:
for product in response.css('div#mainResults ul li'):
try:
asin = product.attrib['data-asin']
asin_nums.append(asin)
count = count + 1
except:
pass
dict1 = {'ASIN': asin_nums}
df1 = pd.DataFrame(dict1)
df1.to_csv(fn, index=False, mode='a', header=False)
time.sleep(10)
dict2 = {'Numbers': [fn]}
df2 = pd.DataFrame(dict2)
df2.to_csv("numbers.csv", index=False, mode='a', header=False) | 30.54902 | 94 | 0.530167 | 1,472 | 0.944801 | 378 | 0.242619 | 0 | 0 | 0 | 0 | 302 | 0.193838 |
147035cef75b4248a1685bdc47735841a296c61d | 2,407 | py | Python | data_pipeline/db/file_query_results.py | iagcl/data_pipeline | b9b965d43a4261357e417f4eeee5d8b7d2dfd858 | [
"Apache-2.0"
] | 16 | 2017-10-31T21:43:26.000Z | 2019-08-11T08:49:06.000Z | data_pipeline/db/file_query_results.py | iagcl/data_pipeline | b9b965d43a4261357e417f4eeee5d8b7d2dfd858 | [
"Apache-2.0"
] | 1 | 2017-11-01T06:25:56.000Z | 2017-11-01T06:25:56.000Z | data_pipeline/db/file_query_results.py | iagcl/data_pipeline | b9b965d43a4261357e417f4eeee5d8b7d2dfd858 | [
"Apache-2.0"
] | 9 | 2017-10-30T05:23:15.000Z | 2022-02-17T03:53:09.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
###############################################################################
# Module: file_query_results
# Purpose: Represents the query result object returned from a file query
#
# Notes:
###############################################################################
import itertools
from .query_results import QueryResults
from data_pipeline.stream.file_reader import FileReader
def default_post_process_func(line):
return line
class FileQueryResults(QueryResults):
def __init__(self, filename, post_process_func):
super(FileQueryResults, self).__init__()
self._handle = FileReader(filename)
if post_process_func is None:
self._post_process_func = default_post_process_func
else:
self._post_process_func = post_process_func
def __iter__(self):
return self
def next(self):
line = self._handle.readline().strip('\n')
if not line:
self._handle.close()
raise StopIteration
return self._post_process_func(line)
def fetchone(self):
line = None
try:
line = self.next()
except StopIteration, e:
pass
return line
def fetchall(self):
return [self._post_process_func(l.strip('\n'))
for l in self._handle]
def fetchmany(self, arraysize=None):
if arraysize > 0:
return [self._post_process_func(l.strip('\n'))
for l in itertools.islice(self._handle, arraysize)]
return self.fetchall()
def __del__(self):
self._handle.close()
| 31.671053 | 79 | 0.636061 | 1,166 | 0.48442 | 0 | 0 | 0 | 0 | 0 | 0 | 1,057 | 0.439136 |
14707c163eafcd974725dfd54d470997350920ec | 1,008 | py | Python | scripts/use_dataframe_with_info.py | HK3-Lab-Team/pytrousse | 59744b3e3c19dd07b43fec3ddaf60bc889a140a9 | [
"Apache-2.0"
] | null | null | null | scripts/use_dataframe_with_info.py | HK3-Lab-Team/pytrousse | 59744b3e3c19dd07b43fec3ddaf60bc889a140a9 | [
"Apache-2.0"
] | 52 | 2020-09-11T10:33:39.000Z | 2021-02-03T12:09:50.000Z | scripts/use_dataframe_with_info.py | HK3-Lab-Team/pytrousse | 59744b3e3c19dd07b43fec3ddaf60bc889a140a9 | [
"Apache-2.0"
] | null | null | null | import os
import time
from trousse.dataset import Dataset
df_sani_dir = os.path.join(
"/home/lorenzo-hk3lab/WorkspaceHK3Lab/",
"smvet",
"data",
"Sani_15300_anonym.csv",
)
metadata_cols = (
"GROUPS TAG DATA_SCHEDA NOME ID_SCHEDA COMUNE PROV MONTH YEAR BREED"
" SEX AGE SEXUAL STATUS BODYWEIGHT PULSE RATE RESPIRATORY RATE TEMP "
"BLOOD PRESS MAX BLOOD PRESS MIN BLOOD PRESS MEAN BODY CONDITION SCORE "
"HT H DEATH TIME OF DEATH PROFILO_PAZIENTE ANAMNESI_AMBIENTALE"
" ANAMNESI_ALIMENTARE VACCINAZIONI FILARIOSI GC_SEQ"
)
metadata_cols = tuple(metadata_cols.replace("\t", ",").split(","))
df_sani = Dataset(metadata_cols=metadata_cols, data_file=df_sani_dir)
time0 = time.time()
print(df_sani.column_list_by_type)
print(time.time() - time0)
whole_word_replace_dict = {
"---": None,
".": None,
"ASSENTI": "0",
"non disponibile": None,
"NV": None,
"-": None,
"Error": None,
# '0%': '0'
}
char_replace_dict = {"°": "", ",": "."}
| 25.846154 | 78 | 0.670635 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 493 | 0.488603 |
14717a6c7ca6f6055e2601fb501025c2b4eb4af3 | 3,303 | py | Python | tests/test_spatial_sort.py | Ruibin-Liu/Despace | f26b06398a1100696f335ca1c7f4852b31e1f2e9 | [
"Apache-2.0"
] | 2 | 2022-03-17T02:37:45.000Z | 2022-03-17T13:24:24.000Z | tests/test_spatial_sort.py | Ruibin-Liu/Despace | f26b06398a1100696f335ca1c7f4852b31e1f2e9 | [
"Apache-2.0"
] | 1 | 2022-03-17T01:22:27.000Z | 2022-03-17T03:28:10.000Z | tests/test_spatial_sort.py | Ruibin-Liu/Despace | f26b06398a1100696f335ca1c7f4852b31e1f2e9 | [
"Apache-2.0"
] | null | null | null | import sys
from os.path import exists
from unittest.mock import patch
import numpy as np # type: ignore
import pytest
from despace.spatial_sort import SortND
sys.path.append("..")
coords_1d = np.array([1.0, 0.1, 1.5, -0.3, 0.0])
sorted_coords_1d = np.array([-0.3, 0.0, 0.1, 1.0, 1.5])
coords_2d = np.array(
[[1.0, 0.1, 1.5, -0.3, 0.0], [1.5, 0.2, 1.3, -0.1, 0.7]]
).transpose()
sorted_coords_2d = np.array(
[[-0.3, -0.1], [0.0, 0.7], [0.1, 0.2], [1.0, 1.5], [1.5, 1.3]]
)
coords_3d = np.array(
[[1.2, 0.0, 1.7, -0.4, 0.1], [1.4, 0.9, 1.0, -0.6, 0.3], [2.0, 0.0, 1.4, -0.2, 0.2]]
).transpose()
sorted_coords_3d = np.array(
[
[-0.4, -0.6, -0.2],
[0.0, 0.9, 0.0],
[0.1, 0.3, 0.2],
[1.7, 1.0, 1.4],
[1.2, 1.4, 2.0],
]
)
grid_16 = np.array([[i, j] for i in range(4) for j in range(4)])
morton_grid_16 = np.array(
[
[0, 0],
[0, 1],
[1, 0],
[1, 1],
[0, 2],
[0, 3],
[1, 2],
[1, 3],
[2, 0],
[2, 1],
[3, 0],
[3, 1],
[2, 2],
[2, 3],
[3, 2],
[3, 3],
]
)
hilbert_grid_16 = np.array(
[
[0, 0],
[1, 0],
[1, 1],
[0, 1],
[0, 2],
[0, 3],
[1, 3],
[1, 2],
[2, 2],
[2, 3],
[3, 3],
[3, 2],
[3, 1],
[2, 1],
[2, 0],
[3, 0],
]
)
def test_sort():
# Init and call the sort method
t = SortND(coords_1d)
assert np.array_equal(t.sort(), sorted_coords_1d)
t = SortND(coords_2d)
assert np.array_equal(t.sort(), sorted_coords_2d)
t = SortND(coords_3d)
assert np.array_equal(t.sort(), sorted_coords_3d)
with pytest.raises(ValueError):
SortND(np.random.rand(2, 2, 2))
# init and directly call
s = SortND()
assert np.array_equal(s(coords_1d), sorted_coords_1d)
assert np.array_equal(s(coords_2d), sorted_coords_2d)
assert np.array_equal(s(coords_3d), sorted_coords_3d)
with pytest.raises(ValueError):
s(np.random.rand(2, 2, 2))
# test Morton
s = SortND(sort_type="Morton")
assert np.array_equal(s(grid_16), morton_grid_16)
# test Hilbert
s = SortND(sort_type="Hilbert")
assert np.array_equal(s(grid_16), hilbert_grid_16)
with pytest.raises(NotImplementedError):
s(np.random.rand(5, 3))
@patch("matplotlib.pyplot.show")
def test_plot(mock_show):
s = SortND()
# show plots
s(coords_1d)
assert s.plot(save_plot=False)
assert s.plot(save_plot=False, show_plot=True)
s(coords_2d)
assert s.plot(save_plot=False)
assert s.plot(save_plot=False, show_plot=True)
s(coords_3d)
assert s.plot(save_plot=False)
assert s.plot(save_plot=False, show_plot=True)
# save plots
s(coords_1d)
s.plot(save_plot=True)
assert exists("1D_5.png")
s.plot(save_plot=True, file_name="test_1d.png")
assert exists("test_1d.png")
s(coords_2d)
s.plot(save_plot=True)
assert exists("2D_5.png")
s.plot(save_plot=True, file_name="test_2d.png")
assert exists("test_2d.png")
s(coords_3d)
s.plot(save_plot=True)
assert exists("3D_5.png")
s.plot(save_plot=True, file_name="test_3d.png")
assert exists("test_3d.png")
| 23.76259 | 88 | 0.546776 | 0 | 0 | 0 | 0 | 897 | 0.271571 | 0 | 0 | 273 | 0.082652 |
147277a6aa2b1959c8dba97e04e292544cd443d3 | 5,517 | py | Python | examples/pendulum/Train_Pendulum.py | JayLago/Hankel-DLDMD | c1b739a7488fbeb71a1f581c288843e9de66d2fc | [
"MIT"
] | null | null | null | examples/pendulum/Train_Pendulum.py | JayLago/Hankel-DLDMD | c1b739a7488fbeb71a1f581c288843e9de66d2fc | [
"MIT"
] | null | null | null | examples/pendulum/Train_Pendulum.py | JayLago/Hankel-DLDMD | c1b739a7488fbeb71a1f581c288843e9de66d2fc | [
"MIT"
] | null | null | null | """
Author:
Jay Lago, SDSU, 2021
"""
import tensorflow as tf
import pickle
import datetime as dt
import os
import sys
sys.path.insert(0, '../../')
import HDMD as dl
import LossDLDMD as lf
import Data as dat
import Training as tr
# ==============================================================================
# Setup
# ==============================================================================
NUM_SAVES = 2 # Number of times to save the model throughout training
NUM_PLOTS = 100 # Number of diagnostic plots to generate while training
DEVICE = '/GPU:1'
GPUS = tf.config.experimental.list_physical_devices('GPU')
if GPUS:
try:
for gpu in GPUS:
tf.config.experimental.set_memory_growth(gpu, True)
except RuntimeError as e:
print(e)
else:
DEVICE = '/CPU:0'
tf.keras.backend.set_floatx('float64') # !! Set precision for the entire model here
print("TensorFlow version: {}".format(tf.__version__))
print("Eager execution: {}".format(tf.executing_eagerly()))
print("Num GPUs available: {}".format(len(GPUS)))
print("Training at precision: {}".format(tf.keras.backend.floatx()))
print("Training on device: {}".format(DEVICE))
# ==============================================================================
# Initialize hyper-parameters and model
# ==============================================================================
# General parameters
hyp_params = dict()
hyp_params['sim_start'] = dt.datetime.now().strftime("%Y-%m-%d-%H%M")
hyp_params['experiment'] = 'pendulum'
hyp_params['plot_path'] = './training_results/' + hyp_params['experiment'] + '_' + hyp_params['sim_start']
hyp_params['model_path'] = './trained_models/' + hyp_params['experiment'] + '_' + hyp_params['sim_start']
hyp_params['device'] = DEVICE
hyp_params['precision'] = tf.keras.backend.floatx()
hyp_params['num_init_conds'] = 10000
hyp_params['num_train_init_conds'] = 8000
hyp_params['num_val_init_conds'] = 2000
hyp_params['time_final'] = 6
hyp_params['delta_t'] = 0.02
hyp_params['num_time_steps'] = int(hyp_params['time_final']/hyp_params['delta_t'] + 1)
hyp_params['num_pred_steps'] = hyp_params['num_time_steps']
hyp_params['window'] = hyp_params['num_time_steps'] - 4
hyp_params['max_epochs'] = 100
hyp_params['save_every'] = hyp_params['max_epochs'] // NUM_SAVES
hyp_params['plot_every'] = hyp_params['max_epochs'] // NUM_PLOTS
# Universal network layer parameters (AE & Aux)
hyp_params['optimizer'] = 'adam'
hyp_params['batch_size'] = 256
hyp_params['phys_dim'] = 2
hyp_params['latent_dim'] = 2
hyp_params['hidden_activation'] = tf.keras.activations.relu
hyp_params['bias_initializer'] = tf.keras.initializers.zeros
# Encoding/Decoding Layer Parameters
hyp_params['num_en_layers'] = 3
hyp_params['num_en_neurons'] = 128
hyp_params['kernel_init_enc'] = tf.keras.initializers.TruncatedNormal(mean=0.0, stddev=0.1)
hyp_params['kernel_init_dec'] = tf.keras.initializers.TruncatedNormal(mean=0.0, stddev=0.1)
hyp_params['ae_output_activation'] = tf.keras.activations.linear
# Loss Function Parameters
hyp_params['a1'] = tf.constant(1, dtype=hyp_params['precision']) # Reconstruction
hyp_params['a2'] = tf.constant(1, dtype=hyp_params['precision']) # DMD
hyp_params['a3'] = tf.constant(1, dtype=hyp_params['precision']) # Prediction
hyp_params['a4'] = tf.constant(1e-14, dtype=hyp_params['precision']) # L-2 on weights
# Learning rate
hyp_params['lr'] = 1e-3
# Initialize the Koopman model and loss
myMachine = dl.HDMD(hyp_params)
myLoss = lf.LossDLDMD(hyp_params)
# ==============================================================================
# Generate / load data
# ==============================================================================
data_fname = 'pendulum_data.pkl'
if os.path.exists(data_fname):
# Load data from file
data = pickle.load(open(data_fname, 'rb'))
data = tf.cast(data, dtype=hyp_params['precision'])
else:
# Create new data
data = dat.data_maker_pendulum(x_lower1=-3.1, x_upper1=3.1, x_lower2=-2, x_upper2=2,
n_ic=hyp_params['num_init_conds'], dt=hyp_params['delta_t'],
tf=hyp_params['time_final'])
data = tf.cast(data, dtype=hyp_params['precision'])
# Save data to file
pickle.dump(data, open(data_fname, 'wb'))
# Create training and validation datasets from the initial conditions
shuffled_data = tf.random.shuffle(data)
ntic = hyp_params['num_train_init_conds']
nvic = hyp_params['num_val_init_conds']
train_data = shuffled_data[:ntic, :, :]
val_data = shuffled_data[ntic:ntic+nvic, :, :]
test_data = shuffled_data[ntic+nvic:, :, :]
pickle.dump(train_data, open('data_train.pkl', 'wb'))
pickle.dump(val_data, open('data_val.pkl', 'wb'))
pickle.dump(test_data, open('data_test.pkl', 'wb'))
train_data = tf.data.Dataset.from_tensor_slices(train_data)
val_data = tf.data.Dataset.from_tensor_slices(val_data)
test_data = tf.data.Dataset.from_tensor_slices(test_data)
# Batch and prefetch the validation data to the GPUs
val_set = val_data.batch(hyp_params['batch_size'], drop_remainder=True)
val_set = val_set.prefetch(tf.data.AUTOTUNE)
# ==============================================================================
# Train the model
# ==============================================================================
results = tr.train_model(hyp_params=hyp_params, train_data=train_data,
val_set=val_set, model=myMachine, loss=myLoss)
print(results['model'].summary())
exit()
| 40.270073 | 106 | 0.634584 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,352 | 0.426319 |
14728ecbe8e51b8719ee9b289419e71b4c078ce0 | 1,542 | py | Python | FIST.py | IGN-Styly/FIST | 69f0b11b863f3440d716ad302e7fc2acddfb145f | [
"MIT"
] | 2 | 2022-01-20T18:57:53.000Z | 2022-03-16T23:23:47.000Z | FIST.py | IGN-Styly/FIST | 69f0b11b863f3440d716ad302e7fc2acddfb145f | [
"MIT"
] | 2 | 2022-01-20T18:41:46.000Z | 2022-01-24T18:15:53.000Z | FIST.py | IGN-Styly/FIST | 69f0b11b863f3440d716ad302e7fc2acddfb145f | [
"MIT"
] | 1 | 2022-01-20T01:29:03.000Z | 2022-01-20T01:29:03.000Z | import json
def is_valid(smc_type): # checks if smc_type is valid
if smc_type == 'vmt':
return True
elif smc_type == 'flt':
return True
elif smc_type == 'nfl':
return True
else:
return False
def parse_vmt(contract):
try:
contract.get('value')
contract.get('seller')
contract.get('product')
except:
return False
return True
def parse_flt(contract):
try:
contract.get('value')
contract.get('seller')
contract.get('product')
contract.get('land-id')
contract.get('land-id').get('1')
except:
return False
return True
def parse_nfl(contract):
try:
contract.get('value')
contract.get('seller')
contract.get('product')
contract.get('land-id')
contract.get('land-id').get('1')
except:
return False
return True
def parse_type(smc_type, contract):
if smc_type=='vmt':
return parse_vmt(contract)
elif smc_type=='flt':
return parse_flt(contract)
elif smc_type == 'nfl':
return parse_nfl(contract)
def parse_contract(json):
try:
contract = json.load(json) # Tries to load the contract
smc_type = contract.get('type')
except:
return False
if(is_valid(smc_type) is False): # validate if the contract type is valid
return False
if parse_type(smc_type=smc_type, contract=contract) is True:
return contract
else:
return False
| 22.028571 | 78 | 0.592088 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 247 | 0.160182 |
1472d75a6abf289e47bd71dbaf7c5950d9fe47ac | 1,332 | py | Python | john_zelle_python3/gpa.py | alirkaya/programming-textbook-solutions | 7362dce474b8a881d654f95604e09d1d0e76aec2 | [
"MIT"
] | null | null | null | john_zelle_python3/gpa.py | alirkaya/programming-textbook-solutions | 7362dce474b8a881d654f95604e09d1d0e76aec2 | [
"MIT"
] | null | null | null | john_zelle_python3/gpa.py | alirkaya/programming-textbook-solutions | 7362dce474b8a881d654f95604e09d1d0e76aec2 | [
"MIT"
] | null | null | null | class Student:
def __init__(self, name, hours, qpoints):
self.name = name
self.hours = float(hours)
self.qpoints = float(qpoints)
def get_name(self):
return self.name
def get_hours(self):
return self.hours
def get_qpoints(self):
return self.qpoints
def gpa(self):
return self.qpoints / self.hours
def make_student(info_str):
# info_str is a tab-separated line: name hours qpoints
# returns a corresponding Student object
name, hours, qpoints = info_str.split('\t')
return Student(name, hours, qpoints)
def main():
# open the input file for reading
filename = input('Enter the name of the grade file: ')
infile = open(filename, 'r')
# set best to the record for the first student in the file
best = make_student(infile.readline())
# process subsequent lines of the file
for line in infile:
# turn the line into a student record
s = make_student(line)
# if this student is best so far, remember it.
if s.gpa() > best.gpa():
best = s
infile.close()
# print information about the best student
print('The best student is', best.get_name())
print('hours:', best.get_hours())
print('GPA:', best.gpa())
if __name__ == '__main__': main() | 25.615385 | 63 | 0.627628 | 377 | 0.283033 | 0 | 0 | 0 | 0 | 0 | 0 | 441 | 0.331081 |
147420a84e36353cde55058397ca18c7de1ab832 | 1,292 | py | Python | {{cookiecutter.project_name}}/Data/sample_treatment/simple_repeat.py | piperwelch/SymbulationProjectTemplate | 7bdfafb33bae821fc0f3406e363b8b9f0b7cc831 | [
"MIT"
] | null | null | null | {{cookiecutter.project_name}}/Data/sample_treatment/simple_repeat.py | piperwelch/SymbulationProjectTemplate | 7bdfafb33bae821fc0f3406e363b8b9f0b7cc831 | [
"MIT"
] | null | null | null | {{cookiecutter.project_name}}/Data/sample_treatment/simple_repeat.py | piperwelch/SymbulationProjectTemplate | 7bdfafb33bae821fc0f3406e363b8b9f0b7cc831 | [
"MIT"
] | null | null | null | #a script to run several replicates of several treatments locally
#You should create a directory for your result files and run this script from within that directory
seeds = range(21, 41)
verts = [0.3]
h_mut_rate = [0.1, 0.5, 1.0]
import subprocess
def cmd(command):
'''This wait causes all executions to run in sieries.
For parralelization, remove .wait() and instead delay the
R script calls unitl all neccesary data is created.'''
return subprocess.Popen(command, shell=True).wait()
def silent_cmd(command):
'''This wait causes all executions to run in sieries.
For parralelization, remove .wait() and instead delay the
R script calls unitl all neccesary data is created.'''
return subprocess.Popen(command, shell=True, stdout=subprocess.PIPE).wait()
for a in seeds:
for b in verts:
for c in h_mut_rate:
command_str = './symbulation -UPDATES 10001 -SYNERGY 3 -MUTATION_RATE 0.1 -SEED '+str(a)+ ' -VERTICAL_TRANSMISSION ' +str(b)+ ' -EFFICIENCY_MUT_RATE -1'+' -HORIZ_MUTATION_RATE '+ str(c) +' -FILE_NAME _Seed'+str(a)+'_VT'+str(b)+'_MR'+str(c) +' -GRID_X 100 -GRID_Y 100'
print(command_str)
cmd(command_str)
| 46.142857 | 279 | 0.645511 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 806 | 0.623839 |
147447bc0d6e65d1e4b7dc6b9ee8bffa5888dd3b | 499 | py | Python | First Unique Character in a String.py | frank0215/Leetcode_python | 9428ded4f9abd347b12bfef8aa1dd2d177f3afea | [
"MIT"
] | null | null | null | First Unique Character in a String.py | frank0215/Leetcode_python | 9428ded4f9abd347b12bfef8aa1dd2d177f3afea | [
"MIT"
] | null | null | null | First Unique Character in a String.py | frank0215/Leetcode_python | 9428ded4f9abd347b12bfef8aa1dd2d177f3afea | [
"MIT"
] | null | null | null | class Solution:
def firstUniqChar(self, s):
table = {}
for ele in s:
table[ele] = table.get(ele, 0) + 1
# for i in range(len(s)):
# if table[s[i]] == 1:
# return i
for ele in s:
if table[ele] == 1:
return s.index(ele)
return -1
if __name__ == '__main__':
s = 'leetcode' # output 0
#s = 'loveleetcode' # output 2
#s = 'llee' # output -1
print(Solution().firstUniqChar(s)) | 27.722222 | 46 | 0.472946 | 339 | 0.679359 | 0 | 0 | 0 | 0 | 0 | 0 | 152 | 0.304609 |
14770d630f638d640c9e37a29b68e980bdb4f14d | 2,897 | py | Python | tests/list_tests.py | kzawisto/mc | ce37f0bbfd73e0767b4edc905ce2a75777b33f42 | [
"MIT"
] | null | null | null | tests/list_tests.py | kzawisto/mc | ce37f0bbfd73e0767b4edc905ce2a75777b33f42 | [
"MIT"
] | null | null | null | tests/list_tests.py | kzawisto/mc | ce37f0bbfd73e0767b4edc905ce2a75777b33f42 | [
"MIT"
] | null | null | null | from hamcrest import *
from nose.tools import eq_
from mc import List, Some, Nothing,add
def test_list_map():
eq_(List([1, 2, 3]).map(lambda x: x * 2), [2, 4, 6])
def test_list_flat_map():
eq_(List([1, 3]).flat_map(lambda x: (x * 2, x * 4)), [2, 4, 6, 12])
def test_list_filter():
eq_(List([1, 2, 3]).filter(lambda x: x < 2), [1])
def test_list_fold():
eq_(List([1, 2, 3]).fold(lambda x, y: x * y, 1), 6)
def test_list_group_by():
eq_(
List([1, 2, 3, 4, 5, 6]).group_by(lambda x: x % 2),
{1: [1, 3, 5], 0: [2, 4, 6]}
)
def test_list_mk_string():
eq_(List([5, 6, 7]).mk_string("_", "<", ">"), "<5_6_7>")
def test_list_to_dict():
eq_(List([(5, 6), (7, 8)]).to_dict(), {5: 6, 7: 8})
def test_list_to_set():
eq_(List([5, 6, 7]).to_set().to_list(), List([5, 6, 7]))
def test_list_multiproc_map():
def process_el(x):
return x * 2
eq_(List([1, 2, 3]).multiproc_map(process_el), [2, 4, 6])
def test_list_foreach():
dictionary = {}
def add_to_dict(value):
dictionary[value] = value
List([9, 8, 7]).foreach(add_to_dict)
actual = set(dictionary.keys())
eq_(actual, {9, 8, 7})
def test_list_should_flat_map_iterables():
assert_that(
List([1, 2]).flat_map(lambda x: {
x, x * 2, x * 3}), contains_inanyorder(1, 2, 3, 2, 4, 6)
)
def test_list_reduce_should_return_nothing_for_empty_list():
assert_that(
List([]).reduce(lambda x, y: x), equal_to(Nothing())
)
def test_list_reduce_should_aggregate_values():
assert_that(
List([1, 2, 3]).reduce(lambda x, y: x + y), equal_to(Some(6))
)
def test_list_addition():
assert_that(
List([1, 2]) + List(["3", 4]), equal_to(List([1, 2, "3", 4]))
)
def test_zip_with_idx():
assert_that(
List(["A","C","D"]).zip_with_idx(), equal_to(List([(0,"A"),(1,"C"),(2,"D")]))
)
def test_list_pick_one():
assert_that(
calling(List(['1','2']).pick_one), raises(AssertionError)
)
assert_that(
calling(List([]).pick_one), raises(AssertionError)
)
assert_that(
List([1]).pick_one(), equal_to(1)
)
def test_accumulate():
assert_that(
List([1,2,3]).accumulate(add, 2), equal_to(8)
)
def test_accumulate():
assert_that(
List([1,2,3]).accumulate(add, 2), equal_to(8)
)
def test_count():
assert_that(
List([1,2,3]).count(), equal_to(3)
)
def test_zip():
assert_that(
List([1,2]).zip([3,4]) , equal_to([(1,3),(2,4)])
)
def test_zip_shift():
assert_that(
List([1,2]).zip_shift() , equal_to([(1,2)])
)
assert_that(
List([1,2]).zip_shift(2) , equal_to([])
)
assert_that(
List([1,2,3]).zip_shift(2) , equal_to([(1,3)])
)
assert_that(
List([1,2,3]).zip_shift(1) , equal_to([(1,2),(2,3)])
)
| 21.145985 | 85 | 0.556438 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 48 | 0.016569 |
147880e13d2fb6983b451f768932d5c83c8eb10b | 1,087 | py | Python | src/groktoolkit/__init__.py | zopefoundation/groktoolkit | 6d295997510139b0588b2401d5f54fca8234a15e | [
"ZPL-2.1"
] | 2 | 2015-12-05T05:46:55.000Z | 2017-07-03T17:39:32.000Z | src/groktoolkit/__init__.py | zopefoundation/groktoolkit | 6d295997510139b0588b2401d5f54fca8234a15e | [
"ZPL-2.1"
] | 7 | 2017-07-06T15:07:31.000Z | 2021-04-20T14:38:21.000Z | src/groktoolkit/__init__.py | zopefoundation/groktoolkit | 6d295997510139b0588b2401d5f54fca8234a15e | [
"ZPL-2.1"
] | 2 | 2016-03-20T18:03:15.000Z | 2018-01-11T04:33:40.000Z | import sys
import re
import os
import commands
HOST = 'grok.zope.org'
RELEASEINFOPATH = '/var/www/html/grok/releaseinfo'
def _upload_gtk_versions(packageroot, version):
# Create the releaseinfo directory for this version.
cmd = 'ssh %s "mkdir %s/%s"' % (HOST, RELEASEINFOPATH, version)
print(cmd + '\n')
print(commands.getoutput(cmd))
# ``scp`` the file to the given destination.
versions_filename = os.path.join(packageroot, 'grok.cfg')
cmd = 'scp %s %s:%s/%s/versions.cfg' % (
versions_filename, HOST, RELEASEINFOPATH, version)
print(cmd + '\n')
print(commands.getoutput(cmd))
def upload_entrypoint(data):
if data['name'] != 'groktoolkit':
# We're dealing with another package that somehow depends on
# groktoolkit. Skip the step in that case.
return
packageroot = data['workingdir']
version = data['version']
_upload_gtk_versions(packageroot, version)
def upload_gtk_versions():
packageroot = os.getcwd() # Ugh.
version = sys.argv[1] # Ugh.
_upload_gtk_versions(packageroot, version)
| 31.970588 | 68 | 0.679853 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 367 | 0.337626 |
147a0334d820a059fbd8f4598327b2b7c87e1f19 | 3,720 | py | Python | obywatele/migrations/0020_auto_20201225_1624.py | soma115/wikikracja | 7715ca1daa4ca09888e1c7389ed5f8a2df29898b | [
"MIT"
] | 7 | 2016-02-21T17:25:54.000Z | 2021-10-09T19:36:10.000Z | obywatele/migrations/0020_auto_20201225_1624.py | soma115/wikikracja | 7715ca1daa4ca09888e1c7389ed5f8a2df29898b | [
"MIT"
] | 19 | 2020-02-11T23:55:01.000Z | 2022-03-31T18:11:56.000Z | obywatele/migrations/0020_auto_20201225_1624.py | soma115/wikikracja | 7715ca1daa4ca09888e1c7389ed5f8a2df29898b | [
"MIT"
] | 3 | 2016-01-20T22:34:58.000Z | 2020-09-16T07:45:42.000Z | # Generated by Django 3.1 on 2020-12-25 15:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('obywatele', '0019_auto_20201225_1621'),
]
operations = [
migrations.AlterField(
model_name='uzytkownik',
name='business',
field=models.CharField(blank=True, help_text='If you are running a business', max_length=200, null=True, verbose_name='Business'),
),
migrations.AlterField(
model_name='uzytkownik',
name='city',
field=models.CharField(blank=True, help_text='Where you are spending most of your time', max_length=100, null=True, verbose_name='City'),
),
migrations.AlterField(
model_name='uzytkownik',
name='fb',
field=models.CharField(blank=True, help_text='Link to Facebook profile', max_length=500, null=True, verbose_name='Facebook'),
),
migrations.AlterField(
model_name='uzytkownik',
name='for_sale',
field=models.CharField(blank=True, help_text='Stuff you have for sale', max_length=500, null=True, verbose_name='For sale'),
),
migrations.AlterField(
model_name='uzytkownik',
name='hobby',
field=models.CharField(blank=True, help_text='Hobbies you have', max_length=200, null=True, verbose_name='Hobby'),
),
migrations.AlterField(
model_name='uzytkownik',
name='i_need',
field=models.CharField(blank=True, help_text='What do you need', max_length=500, null=True, verbose_name='I need'),
),
migrations.AlterField(
model_name='uzytkownik',
name='job',
field=models.CharField(blank=True, help_text='Your profession', max_length=500, null=True, verbose_name='Job'),
),
migrations.AlterField(
model_name='uzytkownik',
name='knowledge',
field=models.CharField(blank=True, help_text='Knowledge you have', max_length=500, null=True, verbose_name='Knowledge'),
),
migrations.AlterField(
model_name='uzytkownik',
name='other',
field=models.CharField(blank=True, help_text='Other things about worth mentioning', max_length=500, null=True, verbose_name='Other'),
),
migrations.AlterField(
model_name='uzytkownik',
name='responsibilities',
field=models.CharField(blank=True, help_text='Tasks performed in our group', max_length=2000, null=True, verbose_name='Responsibilities'),
),
migrations.AlterField(
model_name='uzytkownik',
name='skills',
field=models.CharField(blank=True, help_text='What practical skills do you have', max_length=500, null=True, verbose_name='Skills'),
),
migrations.AlterField(
model_name='uzytkownik',
name='to_borrow',
field=models.CharField(blank=True, help_text='Stuff you can borrow to others', max_length=500, null=True, verbose_name='To borrow'),
),
migrations.AlterField(
model_name='uzytkownik',
name='to_give_away',
field=models.CharField(blank=True, help_text='Things you are willing to give away for free', max_length=2000, null=True, verbose_name='To give away'),
),
migrations.AlterField(
model_name='uzytkownik',
name='want_to_learn',
field=models.CharField(blank=True, help_text='Things you would like to learn', max_length=500, null=True, verbose_name='I want to learn'),
),
]
| 44.285714 | 162 | 0.618548 | 3,629 | 0.975538 | 0 | 0 | 0 | 0 | 0 | 0 | 934 | 0.251075 |
147a5096d0d2e1067ea8f8785a929ae310227d3e | 386 | py | Python | Darlington/phase1/python Basic 2/day 21 solution/qtn1.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 6 | 2020-05-23T19:53:25.000Z | 2021-05-08T20:21:30.000Z | Darlington/phase1/python Basic 2/day 21 solution/qtn1.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 8 | 2020-05-14T18:53:12.000Z | 2020-07-03T00:06:20.000Z | Darlington/phase1/python Basic 2/day 21 solution/qtn1.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 39 | 2020-05-10T20:55:02.000Z | 2020-09-12T17:40:59.000Z | #program to compute and print sum of two given integers (more than or equal to zero).
# If given integers or the sum have more than 80 digits, print "overflow".
print("Input first integer:")
x = int(input())
print("Input second integer:")
y = int(input())
if x >= 10 ** 80 or y >= 10 ** 80 or x + y >= 10 ** 80:
print("Overflow!")
else:
print("Sum of the two integers: ",x + y) | 38.6 | 86 | 0.647668 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 243 | 0.629534 |
147c28455f56f5983cd813f99265bdd8b89586bb | 270 | py | Python | app/cli/__init__.py | Hacker-1202/Selfium | 7e798c23c9f24aacab6f6a485d6355f1045bc65c | [
"MIT"
] | 14 | 2021-11-05T11:27:25.000Z | 2022-02-28T02:04:32.000Z | app/cli/__init__.py | CssHammer/Selfium | 7e798c23c9f24aacab6f6a485d6355f1045bc65c | [
"MIT"
] | 2 | 2021-05-17T23:55:34.000Z | 2021-07-09T17:24:44.000Z | app/cli/__init__.py | CssHammer/Selfium | 7e798c23c9f24aacab6f6a485d6355f1045bc65c | [
"MIT"
] | 5 | 2022-01-02T13:33:17.000Z | 2022-02-26T13:09:50.000Z |
"""
Selfium CLI Tools
~~~~~~~~~~~~~~~~~~~
All cli functions used in Selfium project;
:copyright: (c) 2021 - Caillou and ZeusHay;
:license: MIT, see LICENSE for more details.
"""
from .logo import *
from .clear import *
from .welcome import *
from .tokenError import *
| 19.285714 | 44 | 0.67037 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 177 | 0.655556 |
147d0d54209eeeaab12be3279f9d110b874ff369 | 5,136 | py | Python | controllers/mainController.py | jersobh/zfs-resty | 2a71d89df6b90992723a87ee8bf5eeb2ac27cc8a | [
"MIT"
] | 11 | 2019-01-03T04:05:31.000Z | 2021-09-22T23:47:05.000Z | controllers/mainController.py | jersobh/zfs-resty | 2a71d89df6b90992723a87ee8bf5eeb2ac27cc8a | [
"MIT"
] | 11 | 2020-10-28T08:02:06.000Z | 2022-03-18T08:02:51.000Z | controllers/mainController.py | jersobh/zfs-resty | 2a71d89df6b90992723a87ee8bf5eeb2ac27cc8a | [
"MIT"
] | null | null | null | import uuid
from datetime import datetime, timedelta
from controllers import zfsController
import jwt
import pam
import render
JWT_SECRET = "7wXJ4kxCRWJpMQNqRVTVR3Qbc"
JWT_ALGORITHM = "HS256"
JWT_EXP_DELTA_SECONDS = 4300
async def index(request):
return render.json({'error': 'nothing to see here...'}, 200)
async def auth(request):
try:
data = await request.json()
user = data['username']
password = data['password']
if pam.authenticate(user, password):
payload = {
'user': user,
'session_id': str(uuid.uuid4()),
'exp': datetime.utcnow() + timedelta(seconds=JWT_EXP_DELTA_SECONDS)
}
jwt_token = jwt.encode(payload, JWT_SECRET, JWT_ALGORITHM)
return await render.json({'token': jwt_token.decode('utf-8')}, 200)
else:
return None
except Exception as e:
return await render.json({'error': str(e)}, 200)
async def check_token(request):
try:
jwt_token = request.headers.get('Authorization', None)
payload = jwt.decode(jwt_token, JWT_SECRET, algorithms=[JWT_ALGORITHM])
return payload['session_id']
except (jwt.DecodeError, jwt.ExpiredSignatureError):
return False
async def create_pool(request):
check = await check_token(request)
if check:
try:
data = await request.json()
res = await zfsController.create_pool(data['name'], data['raid'], data['devices'])
return await render.json({"success": res}, 200)
except Exception as e:
print(str(e))
return await render.raw({'error': str(e)}, 200)
else:
return await render.json({'error': 'Invalid or expired token'}, 403)
async def delete_pool(request):
check = await check_token(request)
if check:
try:
data = await request.json()
res = await zfsController.delete_pool(data['name'])
return await render.json({"success": res}, 200)
except Exception as e:
print(str(e))
return await render.raw({'error': str(e)}, 200)
else:
return await render.json({'error': 'Invalid or expired token'}, 403)
async def check_status(request):
check = await check_token(request)
if check:
try:
res = await zfsController.get_status()
return await render.json({'msg': res}, 200)
except Exception as e:
print(str(e))
async def get_storage_info(request):
check = await check_token(request)
if check:
try:
res = await zfsController.get_disk_info()
return await render.json(res, 200)
except Exception as e:
print(str(e))
return await render.raw({'error': str(e)}, 500)
async def get_io_status(request):
check = await check_token(request)
if check:
try:
res = await zfsController.get_IO_stats()
return await render.json({'msg': res}, 200)
except Exception as e:
print(str(e))
return await render.raw({'error': str(e)}, 500)
async def add_disk(request):
check = await check_token(request)
if check:
try:
data = await request.json()
res = await zfsController.add_new_disk(data['pool'], data['device'])
return await render.json({"success": res}, 200)
except Exception as e:
print(str(e))
return await render.raw({'error': str(e)}, 500)
else:
return await render.json({'error': 'Invalid or expired token'}, 403)
async def add_spare_disk(request):
check = await check_token(request)
if check:
try:
data = await request.json()
res = await zfsController.add_spare_disk(data['pool'], data['device'])
return await render.json({"success": res}, 200)
except Exception as e:
print(str(e))
return await render.raw({'error': str(e)}, 200)
else:
return await render.json({'error': 'Invalid or expired token'}, 403)
async def replace_disk(request):
check = await check_token(request)
if check:
try:
data = await request.json()
res = await zfsController.replace_disk(data['pool'], data['old_device'], data['new_device'])
return await render.json({"success": res}, 200)
except Exception as e:
print(str(e))
return await render.raw({'error': str(e)}, 200)
else:
return await render.json({'error': 'Invalid or expired token'}, 403)
async def set_mountpoint(request):
check = await check_token(request)
if check:
try:
data = await request.json()
res = await zfsController.set_mountpoint(data['mountpoint'], data['pool'])
return await render.json({"success": res}, 200)
except Exception as e:
print(str(e))
return await render.raw({'error': str(e)}, 200)
else:
return await render.json({'error': 'Invalid or expired token'}, 403)
| 31.900621 | 104 | 0.596379 | 0 | 0 | 0 | 0 | 0 | 0 | 4,876 | 0.949377 | 577 | 0.112344 |
147d6b7888d4e07de9fd294908f9ee518aa81f10 | 2,888 | py | Python | agent/stubs/retina.py | yoshi-ono/WM_Hackathon | 546996c80b53a155ca94417b2fd19412c8f9f9c4 | [
"Apache-2.0"
] | 9 | 2020-09-08T04:39:55.000Z | 2021-08-03T14:28:33.000Z | agent/stubs/retina.py | pulinagrawal/WM_Hackathon | 19522ba74c5cf1d78b19d58b1881e77b5ef26c27 | [
"Apache-2.0"
] | null | null | null | agent/stubs/retina.py | pulinagrawal/WM_Hackathon | 19522ba74c5cf1d78b19d58b1881e77b5ef26c27 | [
"Apache-2.0"
] | 4 | 2021-04-21T00:48:28.000Z | 2021-06-28T02:33:04.000Z | import torch.nn as nn
import torch
import torchvision
from utils.image_filter_utils import get_dog_image_filter, conv2d_output_shape
from utils.writer_singleton import WriterSingleton
class Retina(nn.Module):
STEP = 0
@staticmethod
def get_default_config():
config = {
'f_size': 7,
'f_sigma': 2.0,
'f_k': 1.6 # approximates Laplacian of Gaussian
}
return config
def __init__(self, name, channels, config=None, device=None):
super().__init__()
self._name = name
self.channels = channels
if config is None:
self._config = Retina.config
else:
self._config = config
self.summaries = self._config['summaries']
self._device = device
self._dog_filter_pos = None
self._dog_filter_neg = None
self._build()
def _build(self):
# DoG kernel - edge and corner detection plus smoothing
size = self._config['f_size']
sigma = self._config['f_sigma']
k = self._config['f_k']
self._dog_filter_pos = get_dog_image_filter(channels=self.channels, size=size, sigma=sigma,
device=self._device, k=k)
self._dog_filter_neg = get_dog_image_filter(channels=self.channels, size=size, sigma=sigma,
device=self._device, k=k, invert=True)
def forward(self, image_tensor):
interest_pos = self._dog_filter_pos(image_tensor)
interest_neg = self._dog_filter_neg(image_tensor)
channel_dim = 1 # B,C,H,W
interest = torch.cat([interest_pos, interest_neg], dim=channel_dim)
writer = WriterSingleton.get_writer()
if self.summaries and writer:
self.STEP += 1
# print("retina/input shape: ", image_tensor.shape)
# print("retina/dog_neg shape: ", interest_neg.shape)
writer.add_image(self._name + '/input', torchvision.utils.make_grid(image_tensor), global_step=self.STEP)
writer.add_image(self._name + '/dog-', torchvision.utils.make_grid(interest_neg), global_step=self.STEP)
writer.add_image(self._name + '/dog+', torchvision.utils.make_grid(interest_pos), global_step=self.STEP)
writer.add_histogram(self._name + '/hist-input', image_tensor, global_step=self.STEP)
writer.add_histogram(self._name + '/hist-dog-', interest_neg, global_step=self.STEP)
writer.add_histogram(self._name + '/hist-dog+', interest_pos, global_step=self.STEP)
writer.flush()
return interest, interest_pos, interest_neg
def get_output_size(self, h, w):
kernel_size = self._config['f_size']
output_shape = conv2d_output_shape([h,w], kernel_size=kernel_size, stride=1, pad=0, dilation=1)
return output_shape
def get_output_shape(self, h, w):
output_size = self.get_output_size(h, w)
output_shape = [-1, self.channels * 2, output_size[0], output_size[1]] # because 2x 3 channels (+/-)
return output_shape
| 36.1 | 111 | 0.684557 | 2,700 | 0.934903 | 0 | 0 | 176 | 0.060942 | 0 | 0 | 355 | 0.122922 |
147da05a9f652a5715c04bef141e04cc07ea681a | 790 | py | Python | aws_sso/utils/registry.py | rkhullar/many-sso | 14e019187e411bdeee77152310b09d250ec90555 | [
"MIT"
] | 2 | 2019-12-15T18:57:58.000Z | 2020-06-15T22:02:29.000Z | aws_sso/utils/registry.py | rkhullar/many-sso | 14e019187e411bdeee77152310b09d250ec90555 | [
"MIT"
] | 1 | 2020-02-06T19:02:36.000Z | 2020-02-06T19:02:36.000Z | aws_sso/utils/registry.py | rkhullar/many-sso | 14e019187e411bdeee77152310b09d250ec90555 | [
"MIT"
] | null | null | null | from typing import Callable, Dict, Type
def register_action(action: str, mapping_name: str = '__actions__'):
# https://stackoverflow.com/questions/3589311/get-defining-class-of-unbound-method-object-in-python-3/25959545#25959545
class RegistryHandler:
def __init__(self, fn: Callable):
self.fn = fn
def __set_name__(self, owner: Type, name: str):
if not hasattr(owner, mapping_name):
setattr(owner, mapping_name, dict())
mapping: Dict[str: Callable] = getattr(owner, mapping_name)
mapping[action] = self.fn
setattr(owner, name, self.fn)
return RegistryHandler
def register_service(cls):
"""discover methods within a class with prefix "handle_" and populate the actions"""
| 30.384615 | 123 | 0.663291 | 402 | 0.508861 | 0 | 0 | 0 | 0 | 0 | 0 | 216 | 0.273418 |
147e3d176f26326ab89c0aebc52786cc4463a120 | 5,154 | py | Python | tools/waf-tools/f_cppcheck.py | TerraWilly/foxbms-2 | 62288b333fe6da52deae91f74fb15e71060ac99c | [
"CC-BY-4.0"
] | 1 | 2021-04-11T02:02:28.000Z | 2021-04-11T02:02:28.000Z | tools/waf-tools/f_cppcheck.py | TerraWilly/foxbms-2 | 62288b333fe6da52deae91f74fb15e71060ac99c | [
"CC-BY-4.0"
] | null | null | null | tools/waf-tools/f_cppcheck.py | TerraWilly/foxbms-2 | 62288b333fe6da52deae91f74fb15e71060ac99c | [
"CC-BY-4.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @copyright © 2010 - 2021, Fraunhofer-Gesellschaft zur Foerderung der
# angewandten Forschung e.V. All rights reserved.
#
# BSD 3-Clause License
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# We kindly request you to use one or more of the following phrases to refer to
# foxBMS in your hardware, software, documentation or advertising materials:
#
# ″This product uses parts of foxBMS®″
#
# ″This product includes parts of foxBMS®″
#
# ″This product is derived from foxBMS®″
"""Implements a waf tool to configure Cppcheck
For information on Cppcheck see http://cppcheck.sourceforge.net/.
"""
import os
from waflib import Utils, Task, TaskGen, Logs
class cppcheck(Task.Task): # pylint: disable=invalid-name
"""Call cppcheck"""
#: str: color in which the command line is displayed in the terminal
color = "GREEN"
#: str: Cppcheck handles the need for a re-run, so always run this task
always_run = True
run_str = (
"${CPPCHECK} --project=${CPPCHECK_MAIN_PROJECT_FILE} --cppcheck-build-dir=. "
"--exitcode-suppressions=${CPPCHECK_RULE_SUPPRESSION_FILE} -f "
"--std=c99 --enable=warning,style,performance,portability,information,unusedFunction "
"--addon=${CPPCHECK_ADDON_CNF_MISRA} --error-exitcode=${CPPCHECK_EXITCODE_FAIL} "
"--suppressions-list=${CPPCHECK_RULE_SUPPRESSION_FILE}",
)
"""str: string to be interpolated to create the command line to run
cppcheck."""
@TaskGen.feature("cppcheck")
def add_cppcheck_task(self):
"""Task creator for cppcheck"""
self.create_task("cppcheck")
def options(opt):
"""Defines options that can be passed to cppcheck tool"""
if Utils.is_win32:
doc_paths = [
os.path.join(os.path.expanduser("~"), "Documents", "MISRA-C"),
os.path.join(os.environ["PUBLIC"], "Documents", "MISRA-C"),
]
else:
doc_paths = [
os.path.join(os.path.expanduser("~"), "MISRA-C"),
]
rules_files = [
opt.root.find_node(os.path.join(x, "rules-2012.txt")) for x in doc_paths
]
rules_file = False
if any(rules_files):
rules_file = list(filter(None, rules_files))[0].abspath()
opt.add_option(
"--misra-rules-file",
action="store",
default=rules_file,
dest="misra_rules_file",
help="Sets the path to the MISRA rules file for cppcheck",
)
def configure(conf):
"""configuration step of the Cppcheck waf tool
- Find cppcheck
- Search for the MISRA-C rules text
"""
# check first for cppcheck in the PATH. If it is not present search in
# the default installation directory
conf.start_msg("Checking for program 'cppcheck'")
conf.find_program("cppcheck", mandatory=False)
if not conf.env.CPPCHECK:
if Utils.is_win32:
conf.find_program(
"cppcheck",
path_list=[os.path.join(os.environ["ProgramFiles"], "Cppcheck")],
mandatory=False,
)
conf.end_msg(conf.env.get_flat("CPPCHECK"))
if not conf.env.CPPCHECK:
return
conf.start_msg("Checking for MISRA-C rules file")
rules_file = []
if conf.options.misra_rules_file:
rules_file = conf.root.find_node(os.path.abspath(conf.options.misra_rules_file))
if not rules_file:
Logs.warn(
f"{os.path.abspath(conf.options.misra_rules_file)} does not exist. Ignoring input."
)
else:
conf.env.append_unique("RULES_FILE", rules_file.abspath())
conf.end_msg(conf.env.get_flat("RULES_FILE"))
| 37.620438 | 99 | 0.689367 | 795 | 0.154249 | 0 | 0 | 126 | 0.024447 | 0 | 0 | 3,479 | 0.67501 |
147e734bd09bed751a8e4643798609eb58918fad | 539 | py | Python | updater/models.py | h4ck3rm1k3/srtracker | 2ef54b1854bbc082c871ce08ea22f6c9ce03eb37 | [
"Ruby"
] | 1 | 2019-04-27T20:15:11.000Z | 2019-04-27T20:15:11.000Z | updater/models.py | h4ck3rm1k3/srtracker | 2ef54b1854bbc082c871ce08ea22f6c9ce03eb37 | [
"Ruby"
] | null | null | null | updater/models.py | h4ck3rm1k3/srtracker | 2ef54b1854bbc082c871ce08ea22f6c9ce03eb37 | [
"Ruby"
] | null | null | null | from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, DateTime, Sequence
Base = declarative_base()
class Subscription(Base):
__tablename__ = 'subscriptions'
id = Column(Integer, Sequence('subscription_id_seq'), primary_key=True)
sr_id = Column(String, index=True)
method = Column(String)
contact = Column(String)
class UpdateInfoItem(Base):
__tablename__ = 'updateinfo'
key = Column(String, primary_key=True)
value = Column(String)
| 25.666667 | 80 | 0.714286 | 383 | 0.710575 | 0 | 0 | 0 | 0 | 0 | 0 | 48 | 0.089054 |
147f8e40d8bb9c99516ab75985ce538329215277 | 1,352 | py | Python | setup.py | writememe/motherstarter | e326ad8f73fc8741fa69faf20ea214486aca1c42 | [
"Apache-2.0"
] | 33 | 2020-11-15T09:51:11.000Z | 2022-01-23T09:51:49.000Z | setup.py | writememe/motherstarter | e326ad8f73fc8741fa69faf20ea214486aca1c42 | [
"Apache-2.0"
] | 98 | 2020-11-08T21:55:12.000Z | 2022-03-01T15:53:08.000Z | setup.py | writememe/motherstarter | e326ad8f73fc8741fa69faf20ea214486aca1c42 | [
"Apache-2.0"
] | 2 | 2022-01-10T07:15:15.000Z | 2022-02-25T04:22:46.000Z | #!/usr/bin/env python
# Motherstarter setup file
from setuptools import setup, find_packages
from motherstarter import __version__, __author__
# Open and read README file
with open("README.md", "r", encoding="utf-8") as f:
README = f.read()
# Setup requirements to be installed
requirements = []
with open("requirements.txt") as f:
requirements = f.read().splitlines()
setup(
author_email="danielfjteycheney@gmail.com",
description="Network automation inventory data translation tool.",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/writememe/motherstarter",
name="motherstarter",
license="Apache License 2.0",
version=__version__,
author=__author__,
packages=find_packages(),
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Operating System :: POSIX :: Linux",
"Operating System :: MacOS",
],
python_requires=">=3.7",
include_package_data=True,
install_requires=requirements,
entry_points="""
[console_scripts]
motherstarter=motherstarter.motherstarter:cli
""",
)
| 29.391304 | 70 | 0.674556 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 692 | 0.511834 |
147faa823472137bc7f168c087637c0fa7b9e1d9 | 8,006 | py | Python | rgw/v2/tests/s3_swift/test_dynamic_bucket_resharding.py | rpratap-bot/ceph-qe-scripts | 8a7090d6707a8e7b927eabfc9c9212f343a35bc4 | [
"MIT"
] | null | null | null | rgw/v2/tests/s3_swift/test_dynamic_bucket_resharding.py | rpratap-bot/ceph-qe-scripts | 8a7090d6707a8e7b927eabfc9c9212f343a35bc4 | [
"MIT"
] | null | null | null | rgw/v2/tests/s3_swift/test_dynamic_bucket_resharding.py | rpratap-bot/ceph-qe-scripts | 8a7090d6707a8e7b927eabfc9c9212f343a35bc4 | [
"MIT"
] | null | null | null | """
test_dynamic_bucket_resharding - Test resharding operations on bucket
Usage: test_dynamic_bucket_resharding.py -c <input_yaml>
<input_yaml>
Note: any one of these yamls can be used
test_manual_resharding.yaml
test_dynamic_resharding.yaml
Operation:
Create user
Perform IOs in specific bucket
Initiate dynamic or manual sharding on bucket
Restart RGW service
Verify created shard numbers of bucket
"""
# test RGW dynamic bucket resharding
import os, sys
sys.path.append(os.path.abspath(os.path.join(__file__, "../../../..")))
from v2.lib.resource_op import Config
import v2.lib.resource_op as s3lib
from v2.lib.s3.auth import Auth
import v2.utils.utils as utils
from v2.lib.rgw_config_opts import CephConfOp, ConfigOpts
from v2.utils.utils import HttpResponseParser, RGWService
import traceback
import argparse
import yaml
import v2.lib.manage_data as manage_data
from v2.utils.log import configure_logging
from v2.lib.exceptions import TestExecError, RGWBaseException
from v2.utils.test_desc import AddTestInfo
from v2.tests.s3_swift import reusable
from v2.lib.s3.write_io_info import IOInfoInitialize, BasicIOInfoStructure, BucketIoInfo
import random, time
import threading
import json
import logging
log = logging.getLogger()
TEST_DATA_PATH = None
def test_exec(config):
io_info_initialize = IOInfoInitialize()
basic_io_structure = BasicIOInfoStructure()
write_bucket_io_info = BucketIoInfo()
io_info_initialize.initialize(basic_io_structure.initial())
ceph_conf = CephConfOp()
rgw_service = RGWService()
log.info('starting IO')
config.user_count = 1
user_info = s3lib.create_users(config.user_count)
user_info = user_info[0]
auth = Auth(user_info, ssl=config.ssl)
rgw_conn = auth.do_auth()
log.info('sharding configuration will be added now.')
if config.sharding_type == 'dynamic':
log.info('sharding type is dynamic')
# for dynamic,
# the number of shards should be greater than [ (no of objects)/(max objects per shard) ]
# example: objects = 500 ; max object per shard = 10
# then no of shards should be at least 50 or more
time.sleep(15)
log.info('making changes to ceph.conf')
ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_max_objs_per_shard, str(config.max_objects_per_shard))
ceph_conf.set_to_ceph_conf('global', ConfigOpts.rgw_dynamic_resharding,
'True')
num_shards_expected = config.objects_count / config.max_objects_per_shard
log.info('num_shards_expected: %s' % num_shards_expected)
log.info('trying to restart services ')
srv_restarted = rgw_service.restart()
time.sleep(30)
if srv_restarted is False:
raise TestExecError("RGW service restart failed")
else:
log.info('RGW service restarted')
config.bucket_count = 1
objects_created_list = []
log.info('no of buckets to create: %s' % config.bucket_count)
bucket_name = utils.gen_bucket_name_from_userid(user_info['user_id'], rand_no=1)
bucket = reusable.create_bucket(bucket_name, rgw_conn, user_info)
if config.test_ops.get('enable_version', False):
log.info('enable bucket version')
reusable.enable_versioning(bucket, rgw_conn, user_info, write_bucket_io_info)
log.info('s3 objects to create: %s' % config.objects_count)
for oc, size in list(config.mapped_sizes.items()):
config.obj_size = size
s3_object_name = utils.gen_s3_object_name(bucket.name, oc)
s3_object_path = os.path.join(TEST_DATA_PATH, s3_object_name)
if config.test_ops.get('enable_version', False):
reusable.upload_version_object(config, user_info, rgw_conn, s3_object_name, config.obj_size, bucket,
TEST_DATA_PATH)
else:
reusable.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config, user_info)
objects_created_list.append((s3_object_name, s3_object_path))
if config.sharding_type == 'manual':
log.info('sharding type is manual')
# for manual.
# the number of shards will be the value set in the command.
time.sleep(15)
log.info('in manual sharding')
cmd_exec = utils.exec_shell_cmd('radosgw-admin bucket reshard --bucket=%s --num-shards=%s '
'--yes-i-really-mean-it'
% (bucket.name, config.shards))
if cmd_exec is False:
raise TestExecError("manual resharding command execution failed")
sleep_time = 600
log.info(f'verification starts after waiting for {sleep_time} seconds')
time.sleep(sleep_time)
op = utils.exec_shell_cmd("radosgw-admin metadata get bucket:%s" % bucket.name)
json_doc = json.loads(op)
bucket_id = json_doc['data']['bucket']['bucket_id']
op2 = utils.exec_shell_cmd("radosgw-admin metadata get bucket.instance:%s:%s" % (bucket.name, bucket_id))
json_doc2 = json.loads((op2))
num_shards_created = json_doc2['data']['bucket_info']['num_shards']
log.info('no_of_shards_created: %s' % num_shards_created)
if config.sharding_type == 'manual':
if config.shards != num_shards_created:
raise TestExecError("expected number of shards not created")
log.info('Expected number of shards created')
if config.sharding_type == 'dynamic':
log.info('Verify if resharding list is empty')
reshard_list_op = json.loads(utils.exec_shell_cmd("radosgw-admin reshard list"))
if not reshard_list_op:
log.info(
'for dynamic number of shards created should be greater than or equal to number of expected shards')
log.info('no_of_shards_expected: %s' % num_shards_expected)
if int(num_shards_created) >= int(num_shards_expected):
log.info('Expected number of shards created')
else:
raise TestExecError('Expected number of shards not created')
if config.test_ops.get('delete_bucket_object', False):
if config.test_ops.get('enable_version', False):
for name, path in objects_created_list:
reusable.delete_version_object(bucket, name, path, rgw_conn, user_info)
else:
reusable.delete_objects(bucket)
reusable.delete_bucket(bucket)
if __name__ == '__main__':
test_info = AddTestInfo('RGW Dynamic Resharding test')
test_info.started_info()
try:
project_dir = os.path.abspath(os.path.join(__file__, "../../.."))
test_data_dir = 'test_data'
TEST_DATA_PATH = (os.path.join(project_dir, test_data_dir))
log.info('TEST_DATA_PATH: %s' % TEST_DATA_PATH)
if not os.path.exists(TEST_DATA_PATH):
log.info('test data dir not exists, creating.. ')
os.makedirs(TEST_DATA_PATH)
parser = argparse.ArgumentParser(description='RGW S3 Automation')
parser.add_argument('-c', dest="config",
help='RGW Test yaml configuration')
parser.add_argument('-log_level', dest='log_level',
help='Set Log Level [DEBUG, INFO, WARNING, ERROR, CRITICAL]',
default='info')
args = parser.parse_args()
yaml_file = args.config
log_f_name = os.path.basename(os.path.splitext(yaml_file)[0])
configure_logging(f_name=log_f_name,
set_level=args.log_level.upper())
config = Config(yaml_file)
config.read()
if config.mapped_sizes is None:
config.mapped_sizes = utils.make_mapped_sizes(config)
test_exec(config)
test_info.success_status('test passed')
sys.exit(0)
except (RGWBaseException, Exception) as e:
log.info(e)
log.info(traceback.format_exc())
test_info.failed_status('test failed')
sys.exit(1)
| 41.697917 | 116 | 0.673995 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,227 | 0.278166 |
14817026f39bd06023f2b84c932bb54cab1a147b | 690 | py | Python | example/schema.py | devind-team/devind-django-dictionaries | 6b2086a15590c968450a5c6fa2a81b4734ee1a81 | [
"MIT"
] | null | null | null | example/schema.py | devind-team/devind-django-dictionaries | 6b2086a15590c968450a5c6fa2a81b4734ee1a81 | [
"MIT"
] | 1 | 2022-03-30T02:44:05.000Z | 2022-03-30T02:44:05.000Z | example/schema.py | devind-team/devind-django-dictionaries | 6b2086a15590c968450a5c6fa2a81b4734ee1a81 | [
"MIT"
] | null | null | null |
import graphene
from typing import cast
from graphene_django import DjangoObjectType
from graphene_django.debug import DjangoDebug
from django.contrib.auth import get_user_model
import devind_dictionaries.schema
class UserType(DjangoObjectType):
class Meta:
model = get_user_model()
fields = ('id', 'username', 'last_name', 'email', 'is_active',)
class Query(
devind_dictionaries.schema.Query,
graphene.ObjectType
):
debug = graphene.Field(DjangoDebug, name='__debug')
class Mutation(
devind_dictionaries.schema.Mutation,
graphene.ObjectType
):
pass
schema = graphene.Schema(query=cast(graphene.ObjectType, Query), mutation=Mutation)
| 20.294118 | 83 | 0.750725 | 379 | 0.549275 | 0 | 0 | 0 | 0 | 0 | 0 | 52 | 0.075362 |
14848a43d3f090cdc183de7af48de1c3e490059a | 311 | py | Python | fuzzi-gen/fuzzi/evaluation/pate.py | hengchu/fuzzi-impl | bcf49c42d2dd4e1e3c1fe8b85fa7f845ea8fd016 | [
"BSD-3-Clause"
] | 4 | 2019-04-02T15:54:21.000Z | 2022-02-14T20:34:00.000Z | fuzzi-gen/fuzzi/evaluation/pate.py | hengchu/fuzzi-impl | bcf49c42d2dd4e1e3c1fe8b85fa7f845ea8fd016 | [
"BSD-3-Clause"
] | 1 | 2019-08-19T13:53:50.000Z | 2019-08-19T13:53:50.000Z | fuzzi-gen/fuzzi/evaluation/pate.py | hengchu/fuzzi-impl | bcf49c42d2dd4e1e3c1fe8b85fa7f845ea8fd016 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
def main():
from fuzzi.evaluation import pate_train
from fuzzi.generated import pate_label
predictions = pate_label.outputs
truth = [x[-1] for x in pate_label.db_test]
print(predictions)
print(truth)
print('PATE accuracy = %f' % (np.mean(predictions == truth)))
| 25.916667 | 65 | 0.691318 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 20 | 0.064309 |
1486390989e81040ce5dfa57404584bdd0aad30e | 462 | py | Python | bedrock/app/utils/jinja.py | ronbeltran/webapp2-bedrock | 42909fd6eb99ffe19ff941f9c66c9c84548139c6 | [
"MIT"
] | 1 | 2019-01-09T10:14:38.000Z | 2019-01-09T10:14:38.000Z | bedrock/app/utils/jinja.py | ronbeltran/webapp2-bedrock | 42909fd6eb99ffe19ff941f9c66c9c84548139c6 | [
"MIT"
] | null | null | null | bedrock/app/utils/jinja.py | ronbeltran/webapp2-bedrock | 42909fd6eb99ffe19ff941f9c66c9c84548139c6 | [
"MIT"
] | null | null | null | import os
import webapp2
import jinja2
import config
from app.utils.compressor import WEBASSETS_ENV
JINJA_ENV = jinja2.Environment(
autoescape=lambda x: True,
extensions=['jinja2.ext.autoescape',
'webassets.ext.jinja2.AssetsExtension'],
loader=jinja2.FileSystemLoader(
os.path.join(config.PROJECT_ROOT, 'templates')),
)
JINJA_ENV.globals.update({'uri_for': webapp2.uri_for})
JINJA_ENV.assets_environment = WEBASSETS_ENV
| 22 | 56 | 0.742424 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 81 | 0.175325 |
14878c677e10667944d3fa541d7f4f2e7bfbbbc0 | 1,479 | py | Python | Mundo 3/ex094.py | adonaifariasdev/cursoemvideo-python3 | 1fd35e45b24c52013fa3bc98e723971db8e6b7d1 | [
"MIT"
] | null | null | null | Mundo 3/ex094.py | adonaifariasdev/cursoemvideo-python3 | 1fd35e45b24c52013fa3bc98e723971db8e6b7d1 | [
"MIT"
] | null | null | null | Mundo 3/ex094.py | adonaifariasdev/cursoemvideo-python3 | 1fd35e45b24c52013fa3bc98e723971db8e6b7d1 | [
"MIT"
] | null | null | null | # Crie um programa que leia nome, sexo e idade de várias pessoas, guardando os dados
# de cada pessoa em um dicionário e todos os dicionários em uma lista. No final, mostre:
# A) Quantas pessoas foram cadastradas B) A média de idade C) Uma lista com as mulheres
# D) Uma lista de pessoas com idade acima da média
dados = dict()
lista = list()
somaIdade = media = 0
while True:
dados['nome'] = str(input('Nome: '))
while True:
dados['sexo'] = str(input('Sexo [M/F]: ')).upper()
if dados['sexo'] in 'MF':
break
print('ERRO! Por favor, Digite apenas M ou F.')
if dados['sexo'] in 'N':
break
dados['idade'] = int(input('Idade: '))
somaIdade += dados['idade']
lista.append(dados.copy())
while True:
resp = str(input('Quer continuar? [S/N] ')).upper()
if resp in 'SN':
break
print('ERRO! Responda apenas S ou N.')
if resp in 'N':
break
media = somaIdade / len(lista)
print('-=' * 30)
print(f'A) A quantidade de pessoas cadastradas foi: {len(lista)}')
print(f'B) A média de idade é: {media:5.2f} anos.')
print('C) As mulheres cadastradas são: ', end='')
for p in lista:
if p['sexo'] == 'F':
print(f'{p["nome"]}; ', end='')
print()
print('D) As pessoas que estão acima da média de idade são: ')
for p in lista:
if p['idade'] >= media:
for k, v in p.items():
print(f'{k} = {v}; ', end='')
print()
print('<< ENCERRADO >>')
| 33.613636 | 88 | 0.584178 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 764 | 0.512752 |
1487bed70be2fb31efb307ee1f6bd18e9a12f492 | 3,794 | py | Python | lonet.py | johan12345/lonet.py | 903138df325ad7be91d17045d0fcf768ca09aa4e | [
"MIT"
] | 1 | 2016-09-28T06:53:45.000Z | 2016-09-28T06:53:45.000Z | lonet.py | johan12345/lonet.py | 903138df325ad7be91d17045d0fcf768ca09aa4e | [
"MIT"
] | null | null | null | lonet.py | johan12345/lonet.py | 903138df325ad7be91d17045d0fcf768ca09aa4e | [
"MIT"
] | null | null | null | import argparse
import os
import re
import urllib.parse
import requests
from bs4 import BeautifulSoup
from pushbullet import Pushbullet
pushbullet = None
def download_file(url, dir):
local_filename = dir + '/' + urllib.parse.unquote_plus(url.split('/')[-1], encoding='iso-8859-1')
if os.path.exists(local_filename): return;
print(local_filename)
r = requests.get(url, stream=True)
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
if pushbullet is not None:
pushbullet.push_note('Neue Datei', local_filename)
return local_filename
def download_folder(folder, base_dir):
dir = base_dir + '/' + folder['name']
if not os.path.exists(dir):
os.makedirs(dir)
if 'url' in folder:
download_files(folder['url'], dir)
for key, subfolder in folder['subfolders'].items():
download_folder(subfolder, dir)
def download_files(url, dir):
files_page = BeautifulSoup(session.get(url=url).text, 'html.parser')
for download_link in files_page.select('a[download]'):
download_file(base_download_url + download_link['href'], dir)
return files_page
parser = argparse.ArgumentParser(description='Download files from lo-net2.de file storage')
parser.add_argument('-u', '--username', type=str, required=True,
help='lo-net2 email address (.lo-net2.de at the end can be omitted)')
parser.add_argument('-p', '--password', type=str, required=True,
help='lo-net2 password')
parser.add_argument('-pb', '--pushbullet-token', type=str, help='Pushbullet API token')
args = parser.parse_args()
base_url = 'https://www.lo-net2.de/wws/'
base_download_url = 'https://www.lo-net2.de'
session = requests.Session()
if args.pushbullet_token is not None:
pushbullet = Pushbullet(args.pushbullet_token)
login_page = session.get('https://www.lo-net2.de/wws/100001.php').text
sid = re.compile('sid=(\d+)').search(login_page).group(1)
main_page = BeautifulSoup(session.post(url=base_url + '100001.php?sid=' + sid,
files={
'default_submit_button': ('', ''),
'login_nojs': ('', ''),
'login_login': ('', args.username),
'login_password': ('', args.password),
'language': ('', '2')
}).text, 'html.parser')
course_links = main_page.select('#status_member_of_19 li > a')
for course_link in course_links:
course_name = course_link.text
print(course_name)
if not os.path.exists(course_name):
os.makedirs(course_name)
course_page = BeautifulSoup(session.get(url=base_url + course_link['href']).text, 'html.parser')
files_url = base_url + course_page('a', text='Dateiablage')[0]['href']
files_page = download_files(files_url, course_name)
base_folder = dict(name=course_name, subfolders={})
for folder_link in files_page.select('#table_folders a'):
folder_url = base_download_url + folder_link['href']
query = urllib.parse.urlparse(folder_url).query
params = urllib.parse.parse_qs(query, keep_blank_values=True)
path = params['path'][0]
if path == '': continue
parts = path.split('/')[1:]
folder = base_folder
for i in range(0, len(parts) - 1):
folder = folder['subfolders'][parts[i]]
folder['subfolders'][parts[len(parts) - 1]] = dict(
name=folder_link.text,
url=folder_url,
subfolders={}
)
download_folder(base_folder, '.')
| 35.12963 | 101 | 0.614128 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 658 | 0.173432 |
148809bfd699775d4def161db38c6f759cec7977 | 1,404 | py | Python | multiagent/scenarios/simple_spread_random_one.py | enikon/MACP | 2de004d4eaf09f3b02dde3b7041ce6d693d0c25c | [
"MIT"
] | null | null | null | multiagent/scenarios/simple_spread_random_one.py | enikon/MACP | 2de004d4eaf09f3b02dde3b7041ce6d693d0c25c | [
"MIT"
] | null | null | null | multiagent/scenarios/simple_spread_random_one.py | enikon/MACP | 2de004d4eaf09f3b02dde3b7041ce6d693d0c25c | [
"MIT"
] | null | null | null | import random
from multiagent.scenarios.commons import *
from multiagent.scenarios.simple_spread import Scenario as S
class Scenario(S):
def make_world(self):
world = World()
# set any world properties first
world.dim_c = 2
num_agents = 3
num_landmarks = 3
world.collaborative = True
world_definition(world, num_agents, num_landmarks)
self.perm_oh = permutation_utils(world)
self.reset_world(world)
return world
def reset_world(self, world):
super().reset_world(world)
self.wagents = [random.randint(*int_range) for _ in range(len(world.agents))]
self.wland = [random.randint(*int_range) for _ in range(len(world.agents))]
def reward(self, agent, world):
# Agents are rewarded based on minimum agent distance to each landmark, penalized for collisions
rew = permutation_reward(world, self.perm_oh)
rew += collision_reward(agent, world)
return rew
def observation(self, agent, world):
entity_pos, other_pos = obs_relative(agent, world)
# shuffle landmark order
landmarks_info = list(entity_pos)
random.Random(self.wland[agent.index]).shuffle(landmarks_info)
random_entity_pos = list(landmarks_info)
return np.concatenate([agent.state.p_vel] + [agent.state.p_pos] + random_entity_pos + other_pos)
| 34.243902 | 104 | 0.675926 | 1,283 | 0.913818 | 0 | 0 | 0 | 0 | 0 | 0 | 152 | 0.108262 |
14898fa376cacb6e7cf263f4a13f44d71dbd2631 | 1,961 | py | Python | tests/unit/seed/test_extra_install.py | hauntsaninja/virtualenv | 8c2985c2946e767bb6f74a7e22f51add17b38987 | [
"MIT"
] | 1 | 2022-02-21T02:01:37.000Z | 2022-02-21T02:01:37.000Z | tests/unit/seed/test_extra_install.py | hauntsaninja/virtualenv | 8c2985c2946e767bb6f74a7e22f51add17b38987 | [
"MIT"
] | null | null | null | tests/unit/seed/test_extra_install.py | hauntsaninja/virtualenv | 8c2985c2946e767bb6f74a7e22f51add17b38987 | [
"MIT"
] | null | null | null | from __future__ import absolute_import, unicode_literals
import os
import subprocess
import pytest
from virtualenv.discovery.py_info import PythonInfo
from virtualenv.run import run_via_cli
from virtualenv.util.path import Path
from virtualenv.util.subprocess import Popen
CURRENT = PythonInfo.current_system()
CREATOR_CLASSES = CURRENT.creators().key_to_class
def builtin_shows_marker_missing():
builtin_classs = CREATOR_CLASSES.get("builtin")
if builtin_classs is None:
return False
host_include_marker = getattr(builtin_classs, "host_include_marker", None)
if host_include_marker is None:
return False
marker = host_include_marker(CURRENT)
return not marker.exists()
@pytest.mark.xfail(
condition=bool(os.environ.get(str("CI_RUN"))),
strict=False,
reason="did not manage to setup CI to run with VC 14.1 C++ compiler, but passes locally",
)
@pytest.mark.skipif(
not Path(CURRENT.system_include).exists() and not builtin_shows_marker_missing(),
reason="Building C-Extensions requires header files with host python",
)
@pytest.mark.parametrize("creator", list(i for i in CREATOR_CLASSES.keys() if i != "builtin"))
def test_can_build_c_extensions(creator, tmp_path, coverage_env):
session = run_via_cli(["--creator", creator, "--seed", "app-data", str(tmp_path), "-vvv"])
coverage_env()
cmd = [
str(session.creator.script("pip")),
"install",
"--no-index",
"--no-deps",
"--disable-pip-version-check",
"-vvv",
str(Path(__file__).parent.resolve() / "greet"),
]
process = Popen(cmd)
process.communicate()
assert process.returncode == 0
process = Popen(
[str(session.creator.exe), "-c", "import greet; greet.greet('World')"],
universal_newlines=True,
stdout=subprocess.PIPE,
)
out, _ = process.communicate()
assert process.returncode == 0
assert out == "Hello World!\n"
| 31.629032 | 94 | 0.694034 | 0 | 0 | 0 | 0 | 1,242 | 0.63335 | 0 | 0 | 369 | 0.188169 |
1489fd9b83819471be937ccd9caa3138125a9719 | 481 | py | Python | pyhanko_certvalidator/_types.py | MatthiasValvekens/certvalidator | 246c5075ecdb6d50b14c93fdc97a9d0470f84821 | [
"MIT"
] | 4 | 2020-11-11T13:59:05.000Z | 2022-03-13T14:06:10.000Z | pyhanko_certvalidator/_types.py | MatthiasValvekens/certvalidator | 246c5075ecdb6d50b14c93fdc97a9d0470f84821 | [
"MIT"
] | 1 | 2020-11-11T11:29:37.000Z | 2020-11-11T11:29:37.000Z | pyhanko_certvalidator/_types.py | MatthiasValvekens/certvalidator | 246c5075ecdb6d50b14c93fdc97a9d0470f84821 | [
"MIT"
] | 2 | 2020-11-11T10:33:32.000Z | 2022-03-13T14:06:11.000Z | # coding: utf-8
import inspect
def type_name(value):
"""
Returns a user-readable name for the type of an object
:param value:
A value to get the type name of
:return:
A unicode string of the object's type name
"""
if inspect.isclass(value):
cls = value
else:
cls = value.__class__
if cls.__module__ in {'builtins', '__builtin__'}:
return cls.__name__
return '%s.%s' % (cls.__module__, cls.__name__)
| 20.041667 | 58 | 0.609148 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 239 | 0.496881 |
148a037daf2a31f1b0747c7a72dcdff26d1106d3 | 959 | gyp | Python | allofw.node/binding.gyp | donghaoren/AllofwModule | 4367327cda0605aad53469294ed8751f8befbdc3 | [
"Unlicense"
] | 3 | 2016-05-04T23:23:48.000Z | 2021-08-03T21:48:07.000Z | allofw.node/binding.gyp | donghaoren/AllofwModule | 4367327cda0605aad53469294ed8751f8befbdc3 | [
"Unlicense"
] | null | null | null | allofw.node/binding.gyp | donghaoren/AllofwModule | 4367327cda0605aad53469294ed8751f8befbdc3 | [
"Unlicense"
] | 2 | 2016-01-31T04:06:51.000Z | 2016-09-30T16:38:36.000Z | {
"targets": [
{
"target_name": "allofw",
"include_dirs": [
"<!@(pkg-config liballofw --cflags-only-I | sed s/-I//g)",
"<!(node -e \"require('nan')\")"
],
"libraries": [
"<!@(pkg-config liballofw --libs)",
"<!@(pkg-config glew --libs)",
],
"cflags!": [ "-fno-exceptions", "-fno-rtti" ],
"cflags_cc!": [ "-fno-exceptions", "-fno-rtti" ],
"cflags_cc": [
"-std=c++11"
],
'conditions': [
[ 'OS=="mac"', {
'xcode_settings': {
'OTHER_CPLUSPLUSFLAGS' : ['-std=c++11'],
'GCC_ENABLE_CPP_EXCEPTIONS': 'YES',
'GCC_ENABLE_CPP_RTTI': 'YES'
},
} ],
],
"sources": [
"src/allofw.cpp",
"src/node_graphics.cpp",
"src/node_sharedmemory.cpp",
"src/node_opengl.cpp",
"src/node_omnistereo.cpp",
"src/gl3binding/glbind.cpp"
]
}
]
}
| 25.236842 | 66 | 0.448384 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 586 | 0.611053 |
148ab50bbb98c6b8649b8ab5529309acc57c2593 | 5,237 | py | Python | LeetCode-All-Solution/Python3/LC-0388-Longest-Absolute-File-Path.py | YuweiYin/Algorithm_YuweiYin | 28648fac59c5a4e3c907978cbd1b3e662ba18fd5 | [
"MIT"
] | null | null | null | LeetCode-All-Solution/Python3/LC-0388-Longest-Absolute-File-Path.py | YuweiYin/Algorithm_YuweiYin | 28648fac59c5a4e3c907978cbd1b3e662ba18fd5 | [
"MIT"
] | null | null | null | LeetCode-All-Solution/Python3/LC-0388-Longest-Absolute-File-Path.py | YuweiYin/Algorithm_YuweiYin | 28648fac59c5a4e3c907978cbd1b3e662ba18fd5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""=================================================================
@Project : Algorithm_YuweiYin/LeetCode-All-Solution/Python3
@File : LC-0388-Longest-Absolute-File-Path.py
@Author : [YuweiYin](https://github.com/YuweiYin)
@Date : 2022-04-20
=================================================================="""
import sys
import time
from typing import List
# import functools
"""
LeetCode - 0388 - (Medium) - Longest Absolute File Path
https://leetcode.com/problems/longest-absolute-file-path/
Description & Requirement:
Suppose we have a file system that stores both files and directories.
An example of one system in text form looks like this (with ⟶ representing the tab character):
dir
⟶ subdir1
⟶ ⟶ file1.ext
⟶ ⟶ subsubdir1
⟶ subdir2
⟶ ⟶ subsubdir2
⟶ ⟶ ⟶ file2.ext
Here, we have dir as the only directory in the root. dir contains two subdirectories,
subdir1 and subdir2. subdir1 contains a file file1.ext and subdirectory subsubdir1.
subdir2 contains a subdirectory subsubdir2, which contains a file file2.ext.
If we were to write this representation in code, it will look like this:
"dir\n\tsubdir1\n\t\tfile1.ext\n\t\tsubsubdir1\n\tsubdir2\n\t\tsubsubdir2\n\t\t\tfile2.ext".
Note that the '\n' and '\t' are the new-line and tab characters.
Every file and directory has a unique absolute path in the file system,
which is the order of directories that must be opened to reach the file/directory itself,
all concatenated by '/'s. Using the above example, the absolute path to file2.ext is
"dir/subdir2/subsubdir2/file2.ext". Each directory name consists of letters, digits,
and/or spaces. Each file name is of the form name.extension,
where name and extension consist of letters, digits, and/or spaces.
Given a string input representing the file system in the explained format,
return the length of the longest absolute path to a file in the abstracted file system.
If there is no file in the system, return 0.
Example 1:
Input: input = "dir\n\tsubdir1\n\tsubdir2\n\t\tfile.ext"
Output: 20
Explanation: We have only one file, and the absolute path is "dir/subdir2/file.ext" of length 20.
Example 2:
Input: input = "dir\n\tsubdir1\n\t\tfile1.ext\n\t\tsubsubdir1\n\tsubdir2\n\t\tsubsubdir2\n\t\t\tfile2.ext"
Output: 32
Explanation: We have two files:
"dir/subdir1/file1.ext" of length 21
"dir/subdir2/subsubdir2/file2.ext" of length 32.
We return 32 since it is the longest absolute path to a file.
Example 3:
Input: input = "a"
Output: 0
Explanation: We do not have any files, just a single directory named "a".
Constraints:
1 <= input.length <= 10^4
input may contain lowercase or uppercase English letters, a new line character '\n',
a tab character '\t', a dot '.', a space ' ', and digits.
"""
class Solution:
def lengthLongestPath(self, _input: str) -> int:
# exception case
assert isinstance(_input, str) and len(_input) >= 1
# main method: (deal with each char)
return self._lengthLongestPath(_input)
def _lengthLongestPath(self, _input: str) -> int:
directory_len = [] # the name length of each level directory/file
res = 0
cur_idx = 0
max_idx = len(_input) - 1
while cur_idx <= max_idx:
# get the file depth of the current directory
cur_depth = 1
while cur_idx <= max_idx and _input[cur_idx] == "\t":
cur_depth += 1
cur_idx += 1
# get the file name length of the current directory
is_file = False
filename_length = 0
while cur_idx <= max_idx and _input[cur_idx] != "\n":
if _input[cur_idx] == ".":
is_file = True
filename_length += 1
cur_idx += 1
# now _input[cur_idx] == "\n", skip it
cur_idx += 1
# combine parent directory and the current filename
while len(directory_len) >= cur_depth:
directory_len.pop()
if len(directory_len) > 0:
filename_length += directory_len[-1] + 1 # parent directory and "/"
# if is_file, update res; else, update directory_len
if is_file:
res = max(res, filename_length)
else:
directory_len.append(filename_length)
return res
def main():
# Example 1: Output: 20
# _input = "dir\n\tsubdir1\n\tsubdir2\n\t\tfile.ext"
# Example 2: Output: 32
_input = "dir\n\tsubdir1\n\t\tfile1.ext\n\t\tsubsubdir1\n\tsubdir2\n\t\tsubsubdir2\n\t\t\tfile2.ext"
# Example 3: Output: 0
# _input = "a"
# init instance
solution = Solution()
# run & time
start = time.process_time()
ans = solution.lengthLongestPath(_input)
end = time.process_time()
# show answer
print('\nAnswer:')
print(ans)
# show time consumption
print('Running Time: %.5f ms' % ((end - start) * 1000))
if __name__ == "__main__":
sys.exit(main())
| 35.385135 | 110 | 0.618293 | 1,609 | 0.305835 | 0 | 0 | 0 | 0 | 0 | 0 | 3,651 | 0.693975 |
148cb7b0b27629b649b5fbda0b3320b27df1be32 | 44,876 | py | Python | main.py | DeadCodeProductions/dead | a8e65abd76bf4a10268317bbf7a168c03c84e5f8 | [
"Apache-2.0"
] | 20 | 2022-02-25T10:49:53.000Z | 2022-03-17T09:17:27.000Z | main.py | DeadCodeProductions/dead | a8e65abd76bf4a10268317bbf7a168c03c84e5f8 | [
"Apache-2.0"
] | 3 | 2022-02-28T20:00:32.000Z | 2022-03-28T12:52:35.000Z | main.py | DeadCodeProductions/dead | a8e65abd76bf4a10268317bbf7a168c03c84e5f8 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import copy
import hashlib
import logging
import os
import random
import re
import subprocess
import sys
import tempfile
import time
from multiprocessing import Pool
from pathlib import Path
from typing import Any, Dict, Optional, cast
import requests
import bisector
import builder
import checker
import database
import generator
import init
import parsers
import patchdatabase
import preprocessing
import reducer
import repository
import utils
def get_llvm_github_commit_author(rev: str) -> Optional[str]:
html = requests.get(
"https://github.com/llvm/llvm-project/commit/" + rev
).content.decode()
p = re.compile(r'.*\/llvm\/llvm-project\/commits\?author=(.*)".*')
for l in html.split("\n"):
l = l.strip()
if m := p.match(l):
return m.group(1)
return None
def get_all_bisections(ddb: database.CaseDatabase) -> list[str]:
res = ddb.con.execute("select distinct bisection from cases")
return [r[0] for r in res]
def _run() -> None:
scenario = utils.get_scenario(config, args)
counter = 0
output_directory = (
Path(args.output_directory).absolute() if args.output_directory else None
)
parallel_generator = (
gnrtr.parallel_interesting_case(config, scenario, args.cores, start_stop=True)
if args.parallel_generation
else None
)
pipeline_components = (
["Generator<" + "parallel>" if args.parallel_generation else "single>"]
+ (["Bisector"] if args.bisector else [])
+ (
["Reducer<Only New>"]
if args.reducer is None
else (["Reducer<Always>"] if args.reducer == True else [])
)
)
print("Pipeline:", " -> ".join(pipeline_components), file=sys.stderr)
last_update_time = time.time()
while True:
if args.amount and args.amount != 0:
if counter >= args.amount:
break
if args.update_trunk_after_X_hours is not None:
if (
time.time() - last_update_time
) / 3600 > args.update_trunk_after_X_hours:
logging.info("Updating repositories...")
last_update_time = time.time()
known: Dict[str, list[int]] = dict()
for i, s in enumerate(scenario.target_settings):
cname = s.compiler_config.name
if cname not in known:
known[cname] = []
known[cname].append(i)
for cname, l in known.items():
repo = repository.Repo.repo_from_setting(
scenario.target_settings[l[0]]
)
old_trunk_commit = repo.rev_to_commit("trunk")
repo.pull()
new_trunk_commit = repo.rev_to_commit("trunk")
for i in l:
if scenario.target_settings[i].rev == old_trunk_commit:
scenario.target_settings[i].rev = new_trunk_commit
# Time db values
generator_time: Optional[float] = None
generator_try_count: Optional[int] = None
bisector_time: Optional[float] = None
bisector_steps: Optional[int] = None
reducer_time: Optional[float] = None
if parallel_generator:
case = next(parallel_generator)
else:
time_start_gen = time.perf_counter()
case = gnrtr.generate_interesting_case(scenario)
time_end_gen = time.perf_counter()
generator_time = time_end_gen - time_start_gen
generator_try_count = gnrtr.try_counter
if args.bisector:
try:
time_start_bisector = time.perf_counter()
bisect_worked = bsctr.bisect_case(case)
time_end_bisector = time.perf_counter()
bisector_time = time_end_bisector - time_start_bisector
bisector_steps = bsctr.steps
if not bisect_worked:
continue
except bisector.BisectionException as e:
print(f"BisectionException: '{e}'", file=sys.stderr)
continue
except AssertionError as e:
print(f"AssertionError: '{e}'", file=sys.stderr)
continue
except builder.BuildException as e:
print(f"BuildException: '{e}'", file=sys.stderr)
continue
if args.reducer is not False:
if (
args.reducer
or case.bisection
and case.bisection in get_all_bisections(ddb)
):
try:
time_start_reducer = time.perf_counter()
worked = rdcr.reduce_case(case)
time_end_reducer = time.perf_counter()
reducer_time = time_end_reducer - time_start_reducer
except builder.BuildException as e:
print(f"BuildException: {e}")
continue
if not output_directory:
case_id = ddb.record_case(case)
ddb.record_timing(
case_id,
generator_time,
generator_try_count,
bisector_time,
bisector_steps,
reducer_time,
)
else:
h = abs(hash(str(case)))
path = output_directory / Path(f"case_{counter:08}-{h:019}.tar")
logging.debug("Writing case to {path}...")
case.to_file(path)
counter += 1
def _absorb() -> None:
def read_into_db(file: Path) -> None:
# Why another db here?
# https://docs.python.org/3/library/sqlite3.html#sqlite3.threadsafety
# “Threads may share the module, but not connections.”
# Of course we are using multiple processes here, but the processes
# are a copy of eachother and who knows how things are implemented,
# so better be safe than sorry and create a new connection,
# especially when the next sentence is:
# "However, this may not always be true."
# (They may just refer to the option of having sqlite compiled with
# SQLITE_THREADSAFE=0)
db = database.CaseDatabase(config, config.casedb)
case = utils.Case.from_file(config, file)
db.record_case(case)
if Path(args.absorb_object).is_file():
read_into_db(Path(args.absorb_object))
exit(0)
pool = Pool(10)
absorb_directory = Path(args.absorb_object).absolute()
paths = [p for p in absorb_directory.iterdir() if p.match("*.tar")]
len_paths = len(paths)
len_len_paths = len(str(len_paths))
print("Absorbing... ", end="", flush=True)
status_str = ""
counter = 0
start_time = time.perf_counter()
for _ in pool.imap_unordered(read_into_db, paths):
counter += 1
print("\b" * len(status_str), end="", flush=True)
delta_t = time.perf_counter() - start_time
status_str = f"{{: >{len_len_paths}}}/{len_paths} {delta_t:.2f}s".format(
counter
)
print(status_str, end="", flush=True)
print("")
def _tofile() -> None:
case_pre = ddb.get_case_from_id(args.case_id)
if not case_pre:
print(f"Found no case for ID {args.case_id}")
exit(1)
else:
case = case_pre
print(f"Saving case to ./case_{args.case_id}.tar")
case.to_file(Path(f"./case_{args.case_id}.tar"))
def _rereduce() -> None:
with open(args.code_path, "r") as f:
rereduce_code = f.read()
case = ddb.get_case_from_id_or_die(args.case_id)
print(f"Re-reducing code with respect to Case {args.case_id}", file=sys.stderr)
res = rdcr.reduce_code(
rereduce_code,
case.marker,
case.bad_setting,
case.good_settings,
preprocess=False,
)
print(res)
def _report() -> None:
pre_check_case = ddb.get_case_from_id(args.case_id)
if not pre_check_case:
print("No case with this ID.", file=sys.stderr)
exit(1)
else:
case = pre_check_case
if not case.bisection:
print("Case is not bisected. Starting bisection...", file=sys.stderr)
start_time = time.perf_counter()
worked = bsctr.bisect_case(case)
bisector_time = time.perf_counter() - start_time
if worked:
ddb.update_case(args.case_id, case)
g_time, gtc, b_time, b_steps, r_time = ddb.get_timing_from_id(args.case_id)
b_time = bisector_time
b_steps = bsctr.steps
ddb.record_timing(args.case_id, g_time, gtc, b_time, b_steps, r_time)
else:
print("Could not bisect case. Aborting...", file=sys.stderr)
exit(1)
# check for reduced and massaged code
if not case.reduced_code:
print("Case is not reduced. Starting reduction...", file=sys.stderr)
if rdcr.reduce_case(case):
ddb.update_case(args.case_id, case)
else:
print("Could not reduce case. Aborting...", file=sys.stderr)
exit(1)
massaged_code, _, _ = ddb.get_report_info_from_id(args.case_id)
if massaged_code:
case.reduced_code = massaged_code
bad_setting = case.bad_setting
bad_repo = repository.Repo(
bad_setting.compiler_config.repo, bad_setting.compiler_config.main_branch
)
is_gcc: bool = bad_setting.compiler_config.name == "gcc"
# Last sanity check
cpy = copy.deepcopy(case)
cpy.code = cast(str, case.reduced_code)
print("Normal interestingness test...", end="", file=sys.stderr, flush=True)
if not chkr.is_interesting(cpy, preprocess=False):
print("\nCase is not interesting! Aborting...", file=sys.stderr)
exit(1)
else:
print("OK", file=sys.stderr)
# Check against newest upstream
if args.pull:
print("Pulling Repo...", file=sys.stderr)
bad_repo.pull()
print("Interestingness test against main...", end="", file=sys.stderr)
cpy.bad_setting.rev = bad_repo.rev_to_commit(f"{bad_repo.main_branch}")
if not chkr.is_interesting(cpy, preprocess=False):
print(
"\nCase is not interesting on main! Might be fixed. Stopping...",
file=sys.stderr,
)
exit(0)
else:
print("OK", file=sys.stderr)
# Use newest main in report
case.bad_setting.rev = cpy.bad_setting.rev
# Check if bisection commit is what it should be
print("Checking bisection commit...", file=sys.stderr)
marker_prefix = utils.get_marker_prefix(case.marker)
bisection_setting = copy.deepcopy(cpy.bad_setting)
bisection_setting.rev = cast(str, cpy.bisection)
prebisection_setting = copy.deepcopy(bisection_setting)
repo = repository.Repo.repo_from_setting(bisection_setting)
prebisection_setting.rev = repo.rev_to_commit(f"{case.bisection}~")
bis_set = builder.find_alive_markers(
cpy.code, bisection_setting, marker_prefix, bldr
)
rebis_set = builder.find_alive_markers(
cpy.code, prebisection_setting, marker_prefix, bldr
)
if not cpy.marker in bis_set or cpy.marker in rebis_set:
print("Bisection commit is not correct! Aborting...", file=sys.stderr)
exit(1)
# Choose same opt level and newest version
possible_good_compiler = [
gs for gs in case.good_settings if gs.opt_level == bad_setting.opt_level
]
good_setting = utils.get_latest_compiler_setting_from_list(
bad_repo, possible_good_compiler
)
# Replace markers
source = cpy.code.replace(cpy.marker, "foo").replace(
utils.get_marker_prefix(cpy.marker), "bar"
)
bad_setting_tag = bad_setting.rev + " (trunk)"
bad_setting_str = f"{bad_setting.compiler_config.name}-{bad_setting_tag} -O{bad_setting.opt_level}"
tmp = bad_repo.rev_to_tag(good_setting.rev)
if not tmp:
good_setting_tag = good_setting.rev
else:
good_setting_tag = tmp
good_setting_str = f"{good_setting.compiler_config.name}-{good_setting_tag} -O{good_setting.opt_level}"
def to_collapsed(
s: str, is_gcc: bool, summary: str = "Output", open: bool = False
) -> str:
if is_gcc:
s = (
"--------- OUTPUT ---------\n"
+ s
+ "\n---------- END OUTPUT ---------\n"
)
else:
sopen = "open" if open else ""
s = (
f"<details {sopen}><summary>{summary}</summary><p>\n"
+ s
+ "\n</p></details>"
)
return s
def to_code(code: str, is_gcc: bool, stype: str = "") -> str:
if not is_gcc:
return f"\n```{stype}\n" + code.rstrip() + "\n```"
return code
def print_cody_str(s: str, is_gcc: bool) -> None:
s = "`" + s + "`"
print(s)
def to_cody_str(s: str, is_gcc: bool) -> str:
if not is_gcc:
s = "`" + s + "`"
return s
def replace_rand(code: str) -> str:
# Replace .file with case.c
ex = re.compile(r"\t\.file\t(\".*\")")
m = ex.search(code)
if m:
res = m.group(1)
return code.replace(res, '"case.c"')
return code
def replace_file_name_IR(ir: str) -> str:
head = "; ModuleID = 'case.c'\n" + 'source_filename = "case.c"\n'
tail = ir.split("\n")[2:]
ir = head + "\n".join(tail)
return ir
def keep_only_main(code: str) -> str:
lines = list(code.split("\n"))
first = 0
for i, line in enumerate(lines):
if "main:" in line:
first = i
break
last = first + 1
ex = re.compile(".*.cfi_endproc")
for i, line in enumerate(lines[last:], start=last):
if ex.match(line):
last = i
break
return "\n".join(lines[first:last])
def prep_asm(asm: str, is_gcc: bool) -> str:
asm = replace_rand(asm)
asm = keep_only_main(asm)
asm = to_code(asm, is_gcc, "asm")
asm = to_collapsed(asm, is_gcc, summary="Reduced assembly")
return asm
def prep_IR(ir: str) -> str:
ir = replace_file_name_IR(ir)
ir = to_code(ir, False, "ll")
ir = to_collapsed(ir, False, summary="Emitted IR")
return ir
print(
f"Dead Code Elimination Regression at -O{bad_setting.opt_level} (trunk vs. {good_setting_tag.split('-')[-1]}) {args.case_id}"
)
print("---------------")
print(to_cody_str(f"cat case.c #{args.case_id}", is_gcc))
print(to_code(source, is_gcc, "c"))
print(
f"`{bad_setting_str}` can not eliminate `foo` but `{good_setting_str}` can.\n"
)
# Compile
if is_gcc:
case.bad_setting.add_flag("-emit-llvm")
good_setting.add_flag("-emit-llvm")
asm_bad = builder.get_asm_str(source, case.bad_setting, bldr)
asm_good = builder.get_asm_str(source, good_setting, bldr)
print_cody_str(f"{bad_setting_str} -S -o /dev/stdout case.c", is_gcc)
print(prep_asm(asm_bad, is_gcc))
print()
print_cody_str(f"{good_setting_str} -S -o /dev/stdout case.c", is_gcc)
print(prep_asm(asm_good, is_gcc))
print()
print(
"Bisects to: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h="
+ str(case.bisection)
)
print()
print("----- Build information -----")
print(f"----- {bad_setting_tag}")
print(
builder.get_verbose_compiler_info(bad_setting, bldr).split("lto-wrapper\n")[
-1
]
)
print(f"\n----- {good_setting_tag}")
print(
builder.get_verbose_compiler_info(good_setting, bldr).split(
"lto-wrapper\n"
)[-1]
)
else:
print("Target: `x86_64-unknown-linux-gnu`")
ir_bad = builder.get_llvm_IR(source, case.bad_setting, bldr)
ir_good = builder.get_llvm_IR(source, good_setting, bldr)
asm_bad = builder.get_asm_str(source, case.bad_setting, bldr)
asm_good = builder.get_asm_str(source, good_setting, bldr)
print("\n------------------------------------------------\n")
print_cody_str(
f"{bad_setting_str} [-emit-llvm] -S -o /dev/stdout case.c", is_gcc
)
print(prep_IR(ir_bad))
print()
print(prep_asm(asm_bad, is_gcc))
print()
print("\n------------------------------------------------\n")
print_cody_str(
f"{good_setting_str} [-emit-llvm] -S -o /dev/stdout case.c", is_gcc
)
print()
print(prep_IR(ir_good))
print()
print(prep_asm(asm_good, is_gcc))
print("\n------------------------------------------------\n")
print("### Bisection")
bisection_setting = copy.deepcopy(case.bad_setting)
bisection_setting.rev = cast(str, case.bisection)
print(f"Bisected to: {case.bisection}")
author = get_llvm_github_commit_author(cast(str, case.bisection))
if author:
print(f"Committed by: @{author}")
print("\n------------------------------------------------\n")
bisection_asm = builder.get_asm_str(source, bisection_setting, bldr)
bisection_ir = builder.get_llvm_IR(source, bisection_setting, bldr)
print(
to_cody_str(
f"{bisection_setting.report_string()} [-emit-llvm] -S -o /dev/stdout case.c",
is_gcc,
)
)
print(prep_IR(bisection_ir))
print()
print(prep_asm(bisection_asm, is_gcc))
print("\n------------------------------------------------\n")
prebisection_setting = copy.deepcopy(bisection_setting)
prebisection_setting.rev = bad_repo.rev_to_commit(f"{bisection_setting.rev}~")
print(f"Previous commit: {prebisection_setting.rev}")
print(
"\n"
+ to_cody_str(
f"{prebisection_setting.report_string()} [-emit-llvm] -S -o /dev/stdout case.c",
is_gcc,
)
)
prebisection_asm = builder.get_asm_str(source, prebisection_setting, bldr)
prebisection_ir = builder.get_llvm_IR(source, prebisection_setting, bldr)
print()
print(prep_IR(prebisection_ir))
print()
print(prep_asm(prebisection_asm, is_gcc))
with open("case.txt", "w") as f:
f.write(source)
print("Saved case.txt...", file=sys.stderr)
def _diagnose() -> None:
width = 50
def ok_fail(b: bool) -> str:
if b:
return "OK"
else:
return "FAIL"
def nice_print(name: str, value: str) -> None:
print(("{:.<" f"{width}}}").format(name), value)
if args.case_id:
case = ddb.get_case_from_id_or_die(args.case_id)
else:
case = utils.Case.from_file(config, Path(args.file))
repo = repository.Repo(
case.bad_setting.compiler_config.repo,
case.bad_setting.compiler_config.main_branch,
)
def sanitize_values(
config: utils.NestedNamespace,
case: utils.Case,
prefix: str,
chkr: checker.Checker,
) -> None:
empty_body_code = chkr._emtpy_marker_code_str(case)
with tempfile.NamedTemporaryFile(suffix=".c") as tf:
with open(tf.name, "w") as f:
f.write(empty_body_code)
res_comp_warnings = checker.check_compiler_warnings(
config.gcc.sane_version,
config.llvm.sane_version,
Path(tf.name),
case.bad_setting.get_flag_str(),
10,
)
nice_print(
prefix + "Sanity: compiler warnings",
ok_fail(res_comp_warnings),
)
res_use_ub_san = checker.use_ub_sanitizers(
config.llvm.sane_version,
Path(tf.name),
case.bad_setting.get_flag_str(),
10,
10,
)
nice_print(
prefix + "Sanity: undefined behaviour", ok_fail(res_use_ub_san)
)
res_ccomp = checker.verify_with_ccomp(
config.ccomp,
Path(tf.name),
case.bad_setting.get_flag_str(),
10,
)
nice_print(
prefix + "Sanity: ccomp",
ok_fail(res_ccomp),
)
def checks(case: utils.Case, prefix: str) -> None:
nice_print(
prefix + "Check marker", ok_fail(chkr.is_interesting_wrt_marker(case))
)
nice_print(prefix + "Check CCC", ok_fail(chkr.is_interesting_wrt_ccc(case)))
nice_print(
prefix + "Check static. annotated",
ok_fail(chkr.is_interesting_with_static_globals(case)),
)
res_empty = chkr.is_interesting_with_empty_marker_bodies(case)
nice_print(prefix + "Check empty bodies", ok_fail(res_empty))
if not res_empty:
sanitize_values(config, case, prefix, chkr)
print(("{:=^" f"{width}}}").format(" Values "))
nice_print("Marker", case.marker)
nice_print("Code lenght", str(len(case.code)))
nice_print("Bad Setting", str(case.bad_setting))
same_opt = [
gs for gs in case.good_settings if gs.opt_level == case.bad_setting.opt_level
]
nice_print(
"Newest Good Setting",
str(utils.get_latest_compiler_setting_from_list(repo, same_opt)),
)
checks(case, "")
cpy = copy.deepcopy(case)
if not (
code_pp := preprocessing.preprocess_csmith_code(
case.code, utils.get_marker_prefix(case.marker), case.bad_setting, bldr
)
):
print("Code could not be preprocessed. Skipping perprocessed checks")
else:
cpy.code = code_pp
checks(cpy, "PP: ")
if case.reduced_code:
cpy = copy.deepcopy(case)
cpy.code = case.reduced_code
checks(cpy, "Reduced: ")
if args.case_id:
massaged_code, _, _ = ddb.get_report_info_from_id(args.case_id)
if massaged_code:
cpy.code = massaged_code
checks(cpy, "Massaged: ")
if case.bisection:
cpy = copy.deepcopy(case)
nice_print("Bisection", case.bisection)
cpy.bad_setting.rev = case.bisection
prev_rev = repo.rev_to_commit(case.bisection + "~")
nice_print("Bisection prev commit", prev_rev)
bis_res_og = chkr.is_interesting(cpy, preprocess=False)
cpy.bad_setting.rev = prev_rev
bis_prev_res_og = chkr.is_interesting(cpy, preprocess=False)
nice_print(
"Bisection test original code", ok_fail(bis_res_og and not bis_prev_res_og)
)
cpy = copy.deepcopy(case)
if cpy.reduced_code:
cpy.code = cpy.reduced_code
cpy.bad_setting.rev = case.bisection
bis_res = chkr.is_interesting(cpy, preprocess=False)
cpy.bad_setting.rev = prev_rev
bis_prev_res = chkr.is_interesting(cpy, preprocess=False)
nice_print(
"Bisection test reduced code", ok_fail(bis_res and not bis_prev_res)
)
if case.reduced_code:
print(case.reduced_code)
def _check_reduced() -> None:
"""Check code against every good and bad setting of a case.
Args:
Returns:
None:
"""
def ok_fail(b: bool) -> str:
if b:
return "OK"
else:
return "FAIL"
def nice_print(name: str, value: str) -> None:
width = 100
print(("{:.<" f"{width}}}").format(name), value)
with open(args.code_path, "r") as f:
new_code = f.read()
case = ddb.get_case_from_id_or_die(args.case_id)
prefix = utils.get_marker_prefix(case.marker)
bad_alive = builder.find_alive_markers(new_code, case.bad_setting, prefix, bldr)
nice_print(f"Bad {case.bad_setting}", ok_fail(case.marker in bad_alive))
for gs in case.good_settings:
good_alive = builder.find_alive_markers(new_code, gs, prefix, bldr)
nice_print(f"Good {gs}", ok_fail(case.marker not in good_alive))
case.code = new_code
case.reduced_code = new_code
nice_print("Check", ok_fail(chkr.is_interesting(case, preprocess=False)))
# Useful when working with watch -n 0 to see that something happened
print(random.randint(0, 1000))
def _cache() -> None:
if args.what == "clean":
print("Cleaning...")
for c in Path(config.cachedir).iterdir():
if not (c / "DONE").exists():
try:
os.rmdir(c)
except FileNotFoundError:
print(c, "spooky. It just disappeared...")
except OSError:
print(c, "is not empty but also not done!")
print("Done")
elif args.what == "stats":
count_gcc = 0
count_clang = 0
for c in Path(config.cachedir).iterdir():
if c.name.startswith("clang"):
count_clang += 1
else:
count_gcc += 1
tot = count_gcc + count_clang
print("Amount compilers:", tot)
print("Amount clang: {} {:.2f}%".format(count_clang, count_clang / tot * 100))
print("Amount GCC: {} {:.2f}%".format(count_gcc, count_gcc / tot * 100))
def _asm() -> None:
def save_wrapper(name: str, content: str) -> None:
utils.save_to_file(Path(name + ".s"), content)
print(f"Saving {name + '.s'}...")
case = ddb.get_case_from_id_or_die(args.case_id)
bad_repo = repository.Repo(
case.bad_setting.compiler_config.repo,
case.bad_setting.compiler_config.main_branch,
)
same_opt = [
gs for gs in case.good_settings if gs.opt_level == case.bad_setting.opt_level
]
good_setting = utils.get_latest_compiler_setting_from_list(bad_repo, same_opt)
asmbad = builder.get_asm_str(case.code, case.bad_setting, bldr)
asmgood = builder.get_asm_str(case.code, good_setting, bldr)
save_wrapper("asmbad", asmbad)
save_wrapper("asmgood", asmgood)
if case.reduced_code:
reducedasmbad = builder.get_asm_str(case.reduced_code, case.bad_setting, bldr)
reducedasmgood = builder.get_asm_str(case.reduced_code, good_setting, bldr)
save_wrapper("reducedasmbad", reducedasmbad)
save_wrapper("reducedasmgood", reducedasmgood)
if case.bisection:
bisection_setting = copy.deepcopy(case.bad_setting)
bisection_setting.rev = case.bisection
asmbisect = builder.get_asm_str(case.code, bisection_setting, bldr)
save_wrapper("asmbisect", asmbisect)
if case.reduced_code:
reducedasmbisect = builder.get_asm_str(
case.reduced_code, bisection_setting, bldr
)
save_wrapper("reducedasmbisect", reducedasmbisect)
print(case.marker)
def _get() -> None:
# Why are you printing code with end=""?
case_id: int = int(args.case_id)
if args.what in ["ocode", "rcode", "bisection"]:
case = ddb.get_case_from_id_or_die(args.case_id)
if args.what == "ocode":
print(case.code, end="")
return
elif args.what == "rcode":
print(case.reduced_code, end="")
return
elif args.what == "bisection":
print(case.bisection, end="")
return
else:
mcode, link, fixed = ddb.get_report_info_from_id(case_id)
if args.what == "link":
print(link)
return
elif args.what == "fixed":
print(fixed)
return
elif args.what == "mcode":
print(mcode, end="")
return
logging.warning(
"Whoops, this should not have"
" happened because the parser forces "
"`what` to only allow some strings."
)
return
def _set() -> None:
case_id: int = int(args.case_id)
case = ddb.get_case_from_id_or_die(case_id)
mcode, link, fixed = ddb.get_report_info_from_id(case_id)
repo = repository.Repo(
case.bad_setting.compiler_config.repo,
case.bad_setting.compiler_config.main_branch,
)
if args.what == "ocode":
with open(args.var, "r") as f:
new_code = f.read()
case.code = new_code
if chkr.is_interesting(case):
ddb.update_case(case_id, case)
else:
logging.critical(
"The provided code is not interesting wrt to the case. Will not save!"
)
exit(1)
return
elif args.what == "rcode":
if args.var == "null":
print("Old reduced_code:")
print(case.reduced_code)
case.reduced_code = None
ddb.update_case(case_id, case)
return
with open(args.var, "r") as f:
rcode = f.read()
old_code = case.code
case.code = rcode
if chkr.is_interesting(case):
case.code = old_code
case.reduced_code = rcode
ddb.update_case(case_id, case)
else:
logging.critical(
"The provided code is not interesting wrt to the case. Will not save!"
)
exit(1)
return
elif args.what == "bisection":
if args.var == "null":
print("Old bisection:", case.bisection)
case.bisection = None
ddb.update_case(case_id, case)
return
# Also acts as check that the given rev is ok
rev = repo.rev_to_commit(args.var)
# Just in case someone accidentally overrides things...
logging.info(f"Previous bisection for case {case_id}: {case.bisection}")
case.bisection = rev
ddb.update_case(case_id, case)
return
elif args.what == "link":
if args.var == "null":
print("Old link:", link)
ddb.record_reported_case(case_id, mcode, None, fixed)
return
tmp: str = args.var
tmp = tmp.strip()
ddb.record_reported_case(case_id, mcode, tmp, fixed)
return
elif args.what == "fixed":
if args.var == "null":
print("Old fixed:", fixed)
ddb.record_reported_case(case_id, mcode, link, None)
return
rev = repo.rev_to_commit(args.var)
case.bad_setting.rev = rev
if not chkr.is_interesting(case):
ddb.record_reported_case(case_id, mcode, link, rev)
print("Fixed")
else:
logging.critical(f"Case {case_id} was not fixed by {args.var}! Not saving!")
exit(1)
return
elif args.what == "mcode":
if args.var == "null":
print("Old massaged code:")
print(mcode)
ddb.record_reported_case(case_id, None, link, fixed)
return
if not case.bisection:
logging.fatal(
"Can not save massaged code to a case that is not bisected. Bad things could happen. Stopping..."
)
exit(1)
with open(args.var, "r") as f:
new_mcode = f.read()
old_bisection = case.bisection
case.code = new_mcode
if chkr.is_interesting(case):
print("Checking bisection...")
if not bsctr.bisect_case(case, force=True):
logging.critical("Checking bisection failed...")
exit(1)
if case.bisection != old_bisection:
logging.critical(
"Bisection of provided massaged code does not match the original bisection!"
)
exit(1)
ddb.record_reported_case(case_id, new_mcode, link, fixed)
else:
logging.critical("The provided massaged code is not interesting!")
exit(1)
return
logging.warning(
"Whoops, this should not have"
" happened because the parser forces "
"`what` to only allow some strings."
)
return
def _build() -> None:
compiler_config = utils.get_compiler_config(config, args.project)
additional_patches: list[Path] = []
if args.add_patches:
additional_patches = [Path(patch).absolute() for patch in args.add_patches]
for rev in args.rev:
print(
bldr.build(
compiler_config,
rev,
additional_patches=additional_patches,
force=args.force,
)
)
def _reduce() -> None:
for i, case_id in enumerate(args.case_id):
print(f"Reducing {case_id}. Done {i}/{len(args.case_id)}", file=sys.stderr)
pre_case = ddb.get_case_from_id(case_id)
if not pre_case:
if len(args.case_id) == 1:
print(f"Case ID {case_id} is not known. Aborting...", file=sys.stderr)
exit(1)
else:
print(f"Case ID {case_id} is not known. Continuing...", file=sys.stderr)
continue
else:
case = pre_case
start_time = time.perf_counter()
if rdcr.reduce_case(case, force=args.force):
ddb.update_case(case_id, case)
reducer_time = time.perf_counter() - start_time
# If the reduction takes less than 5 seconds,
# we can assume that the reduction was already done
if reducer_time > 5.0:
gtime, gtc, b_time, b_steps, _ = ddb.get_timing_from_id(case_id)
ddb.record_timing(case_id, gtime, gtc, b_time, b_steps, reducer_time)
else:
print(f"{case_id} failed...", file=sys.stderr)
print("Done")
def _bisect() -> None:
for i, case_id in enumerate(args.case_id):
print(f"Bisecting {case_id}. Done {i}/{len(args.case_id)}", file=sys.stderr)
pre_case = ddb.get_case_from_id(case_id)
if not pre_case:
if len(args.case_id) == 1:
print(f"Case ID {case_id} is not known. Aborting...", file=sys.stderr)
exit(1)
else:
print(f"Case ID {case_id} is not known. Continuing...", file=sys.stderr)
continue
else:
case = pre_case
start_time = time.perf_counter()
if bsctr.bisect_case(case, force=args.force):
ddb.update_case(case_id, case)
bisector_time = time.perf_counter() - start_time
# if the bisection took less than 5 seconds
# we can assume that it was already bisected
if bisector_time > 5.0:
gtime, gtc, _, _, rtime = ddb.get_timing_from_id(case_id)
ddb.record_timing(
case_id, gtime, gtc, bisector_time, bsctr.steps, rtime
)
else:
print(f"{case_id} failed...", file=sys.stderr)
print("Done", file=sys.stderr)
def _edit() -> None:
if "EDITOR" not in os.environ:
print("Did not find EDITOR variable. Using nano...", file=sys.stderr)
subprocess.run(["nano", config.config_path])
else:
subprocess.run(os.environ["EDITOR"].split(" ") + [config.config_path])
def _unreported() -> None:
query = """
WITH exclude_bisections AS (
select distinct bisection from reported_cases join cases on cases.case_id = reported_cases.case_id
where fixed_by is not NULL
or bug_report_link is not NULL
)
"""
if args.good_version or args.OX_only:
query += f"""
,concrete_good AS (
select case_id from good_settings join compiler_setting on good_settings.compiler_setting_id = compiler_setting.compiler_setting_id
where 1
"""
if args.good_version:
gcc_repo = repository.Repo(config.gcc.repo, config.gcc.main_branch)
llvm_repo = repository.Repo(config.llvm.repo, config.llvm.main_branch)
try:
rev = gcc_repo.rev_to_commit(args.good_version)
except:
rev = llvm_repo.rev_to_commit(args.good_version)
query += f" and rev = '{rev}'"
query += ")"
query += """
select MIN(cases.case_id), bisection, count(bisection) as cnt from cases
join compiler_setting on cases.bad_setting_id = compiler_setting.compiler_setting_id
"""
if args.good_version:
query += "\njoin concrete_good on cases.case_id = concrete_good.case_id\n"
if args.reduced or args.not_reduced:
query += "\nleft join reported_cases on cases.case_id = reported_cases.case_id"
query += """
where bisection not in exclude_bisections
"""
if args.clang_only:
query += "\nand compiler = 'clang'"
elif args.gcc_only:
query += "\nand compiler = 'gcc'"
if args.OX_only:
query += f" and opt_level = '{args.OX_only}'"
query += "\ngroup by bisection"
if args.reduced:
query += "\n having reduced_code_sha1 is not null "
elif args.not_reduced:
query += "\n having reduced_code_sha1 is null "
query += "\norder by cnt desc"
res = ddb.con.execute(query).fetchall()
if not res:
return
if res[-1][1] is None:
res = res[:-1]
if args.id_only:
for case_id, _, _ in res:
print(case_id)
else:
print("{: <8} {: <45} {}".format("ID", "Bisection", "Count"))
print("{:-<64}".format(""))
for case_id, bisection, count in res:
print("{: <8} {: <45} {}".format(case_id, bisection, count))
print("{:-<64}".format(""))
print("{: <8} {: <45} {}".format("ID", "Bisection", "Count"))
def _reported() -> None:
query = """
with rep as (
select cases.case_id, bisection, bug_report_link, compiler from cases
join compiler_setting on bad_setting_id = compiler_setting_id
left join reported_cases on cases.case_id = reported_cases.case_id
where bug_report_link is not null order by cases.case_id
)
select rep.case_id, bisection, bug_report_link
"""
if args.good_settings:
query += """, compiler_setting.compiler, compiler_setting.rev, compiler_setting.opt_level
from rep
left join good_settings on rep.case_id = good_settings.case_id
left join compiler_setting on good_settings.compiler_setting_id = compiler_setting.compiler_setting_id
"""
else:
query += " from rep"
query += " where 1 "
if args.clang_only or args.llvm_only:
query += " and compiler = 'clang'"
elif args.gcc_only:
query += " and compiler = 'gcc'"
query += " order by rep.case_id"
if not (res := ddb.con.execute(query).fetchall()):
return
if args.id_only:
for case_id, _, _ in res:
print(case_id)
elif args.good_settings:
gcc_repo = repository.Repo(config.gcc.repo, config.gcc.main_branch)
llvm_repo = repository.Repo(config.llvm.repo, config.llvm.main_branch)
print(
"{: <8} {: <45} {: <45} {}".format(
"ID", "Bisection", "Good Settings", "Link"
)
)
last_case_id = -1
for case_id, bisection, link, name, rev, opt_level in res:
if name == "gcc":
maybe_tag = gcc_repo.rev_to_tag(rev)
else:
maybe_tag = llvm_repo.rev_to_tag(rev)
nice_rev = maybe_tag if maybe_tag else rev
comp_str = f"{name}-{nice_rev} -O{opt_level}"
if last_case_id != case_id:
last_case_id = case_id
print("{:-<155}".format(""))
print(
"{: <8} {: <45} {: <45} {}".format(
case_id, bisection, comp_str, link
)
)
else:
print("{: <8} {: <45} {: <45} {}".format("", "", comp_str, ""))
print("{:-<155}".format(""))
print(
"{: <8} {: <45} {: <45} {}".format(
"ID", "Bisection", "Good Settings", "Link"
)
)
else:
print("{: <8} {: <45} {}".format("ID", "Bisection", "Link"))
print("{:-<110}".format(""))
for case_id, bisection, link in res:
print("{: <8} {: <45} {}".format(case_id, bisection, link))
print("{:-<110}".format(""))
print("{: <8} {: <45} {}".format("ID", "Bisection", "Link"))
def _findby() -> None:
if args.what == "link":
link_query = "SELECT case_id FROM reported_cases WHERE bug_report_link = ?"
res = ddb.con.execute(link_query, (args.var.strip(),)).fetchall()
for r in res:
print(r[0])
return
elif args.what == "fixed":
query = "SELECT case_id FROM reported_cases WHERE fixed_by = ?"
res = ddb.con.execute(query, (args.var.strip(),)).fetchall()
for r in res:
print(r[0])
return
elif args.what == "case":
case = utils.Case.from_file(config, Path(args.var))
code_sha1 = hashlib.sha1(case.code.encode("utf-8")).hexdigest()
# Try if we have any luck with just using code
code_query = "SELECT cases.case_id FROM cases LEFT OUTER JOIN reported_cases ON cases.case_id = reported_cases.case_id WHERE code_sha1 = ? OR reduced_code_sha1 = ? OR massaged_code_sha1 = ?"
res_ocode = ddb.con.execute(
code_query, (code_sha1, code_sha1, code_sha1)
).fetchall()
possible = set([i[0] for i in res_ocode])
if case.reduced_code:
rcode_sha1 = hashlib.sha1(case.reduced_code.encode("utf-8")).hexdigest()
res_ocode = ddb.con.execute(
code_query, (rcode_sha1, rcode_sha1, rcode_sha1)
).fetchall()
possible.update([i[0] for i in res_ocode])
if case.bisection:
other = ddb.con.execute(
"SELECT case_id FROM cases WHERE marker = ? AND bisection = ?",
(case.marker, case.bisection),
).fetchall()
else:
other = ddb.con.execute(
"SELECT case_id FROM cases WHERE marker = ?", (case.marker)
).fetchall()
if len(possible) > 0:
possible = possible.intersection([i[0] for i in other])
else:
possible = set([i[0] for i in other])
for i in possible:
print(i)
return
elif args.what == "code":
with open(args.var, "r") as f:
code = f.read()
code_sha1 = hashlib.sha1(code.encode("utf-8")).hexdigest()
res = ddb.con.execute(
"SELECT cases.case_id FROM cases LEFT OUTER JOIN reported_cases ON cases.case_id = reported_cases.case_id WHERE code_sha1 = ? OR reduced_code_sha1 = ? OR massaged_code_sha1 = ?",
(code_sha1, code_sha1, code_sha1),
).fetchall()
for i in res:
print(i[0])
return
return
if __name__ == "__main__":
config, args = utils.get_config_and_parser(parsers.main_parser())
patchdb = patchdatabase.PatchDB(config.patchdb)
bldr = builder.Builder(config, patchdb, args.cores)
chkr = checker.Checker(config, bldr)
gnrtr = generator.CSmithCaseGenerator(config, patchdb, args.cores)
rdcr = reducer.Reducer(config, bldr)
bsctr = bisector.Bisector(config, bldr, chkr)
ddb = database.CaseDatabase(config, config.casedb)
if args.sub == "run":
_run()
elif args.sub == "get":
_get()
elif args.sub == "set":
_set()
elif args.sub == "absorb":
_absorb()
elif args.sub == "tofile":
_tofile()
elif args.sub == "rereduce":
_rereduce()
elif args.sub == "report":
_report()
elif args.sub == "diagnose":
if not args.case_id and not args.file:
print("Need a file or a case id to work with", file=sys.stderr)
_diagnose()
elif args.sub == "checkreduced":
_check_reduced()
elif args.sub == "cache":
_cache()
elif args.sub == "asm":
_asm()
elif args.sub == "build":
_build()
elif args.sub == "reduce":
_reduce()
elif args.sub == "bisect":
_bisect()
elif args.sub == "edit":
_edit()
elif args.sub == "unreported":
_unreported()
elif args.sub == "reported":
_reported()
elif args.sub == "findby":
_findby()
elif args.sub == "init":
init.main()
gnrtr.terminate_processes()
| 34.022745 | 198 | 0.572867 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9,747 | 0.217179 |
148e62acadab2abb7a544e4448e8d3e81c73ab98 | 585 | py | Python | pathod/__init__.py | illera88/mitmproxy | 4f464001841e5119bf57ef620a3257892ded2ded | [
"MIT"
] | 6 | 2020-11-25T07:33:05.000Z | 2022-01-25T07:25:54.000Z | pathod/__init__.py | illera88/mitmproxy | 4f464001841e5119bf57ef620a3257892ded2ded | [
"MIT"
] | null | null | null | pathod/__init__.py | illera88/mitmproxy | 4f464001841e5119bf57ef620a3257892ded2ded | [
"MIT"
] | 4 | 2021-03-14T16:14:27.000Z | 2021-09-25T03:01:15.000Z | import os
import sys
import warnings
warnings.warn(
"pathod and pathoc modules are deprecated, see https://github.com/mitmproxy/mitmproxy/issues/4273",
DeprecationWarning,
stacklevel=2
)
def print_tool_deprecation_message():
print("####", file=sys.stderr)
print(f"### {os.path.basename(sys.argv[0])} is deprecated and will not be part of future mitmproxy releases!", file=sys.stderr)
print("### See https://github.com/mitmproxy/mitmproxy/issues/4273 for more information.", file=sys.stderr)
print("####", file=sys.stderr)
print("", file=sys.stderr)
| 30.789474 | 131 | 0.707692 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 297 | 0.507692 |
148fa74d7a5813457d336a4cb138f1b0a1e99c02 | 1,321 | py | Python | urchin/fs/mp3file.py | kellen/urchinfs | ea47d8997b42b9b472ea16d1bc7921b988840e4b | [
"Unlicense"
] | 2 | 2021-11-08T03:11:32.000Z | 2021-11-08T08:21:24.000Z | urchin/fs/mp3file.py | kellen/urchinfs | ea47d8997b42b9b472ea16d1bc7921b988840e4b | [
"Unlicense"
] | null | null | null | urchin/fs/mp3file.py | kellen/urchinfs | ea47d8997b42b9b472ea16d1bc7921b988840e4b | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import logging
from fnmatch import fnmatch
import mutagen
from mutagen.easyid3 import EasyID3
import urchin.fs.default
import urchin.fs.json
import urchin.fs.plugin
import urchin.fs.mp3
MP3_GLOB = "*.mp3"
class Plugin(urchin.fs.plugin.Plugin):
name = "mp3file"
def __init__(self):
super(Plugin, self).__init__(
indexer=Mp3FileIndexer,
matcher=urchin.fs.default.DefaultMetadataMatcher,
extractor=urchin.fs.mp3.Mp3MetadataExtractor,
merger=urchin.fs.default.DefaultMerger,
munger=urchin.fs.default.DefaultMunger,
formatter=Mp3FileFormatter,
)
class Mp3FileIndexer(urchin.fs.abstract.AbstractFileIndexer):
name = "mp3file"
def __init__(self, config):
super(Mp3FileIndexer, self).__init__(config, MP3_GLOB)
class Mp3FileFormatter(urchin.fs.plugin.Formatter):
name = "mp3file"
def __init__(self, config):
pass
def format(self, original_name, metadata):
# reduce single values from sets
d = {k: list(v)[0] if type(v) == set and len(v) == 1 else v for k,v in metadata.items()}
return set(["%(tracknumber)s - %(artist)s - %(title)s" % d])
| 30.022727 | 96 | 0.659349 | 1,011 | 0.765329 | 0 | 0 | 0 | 0 | 0 | 0 | 152 | 0.115064 |
14902d3960eb5bce06bda721f56d69952ffbd4d8 | 2,449 | py | Python | tests/model/test_qubic.py | Reathe/Qubic | 7ee18eb6cb67ff3637f664dd225273c51ae19847 | [
"MIT"
] | null | null | null | tests/model/test_qubic.py | Reathe/Qubic | 7ee18eb6cb67ff3637f664dd225273c51ae19847 | [
"MIT"
] | null | null | null | tests/model/test_qubic.py | Reathe/Qubic | 7ee18eb6cb67ff3637f664dd225273c51ae19847 | [
"MIT"
] | null | null | null | import unittest
from model.curseur import Curseur
from model.pion import PionBlanc, PionNoir
from model.qubic import Qubic
class TestQubic(unittest.TestCase):
def test_poser(self):
q = Qubic()
q.poser((0, 7, 0))
self.assertTrue(q.get_pion((0, 0, 0)) == PionBlanc)
self.assertFalse(q.get_pion((0, 1, 0)))
q.poser((0, 7, 0))
self.assertTrue(q.get_pion((0, 0, 0)) == PionBlanc)
self.assertTrue(q.get_pion((0, 1, 0)) == PionNoir)
self.assertTrue(q.get_pion((0, 2, 0)) is None)
def test_tour(self):
q = Qubic()
i = 0
for x in range(len(q)):
for y in range(len(q)):
for z in range(len(q)):
if q.fini:
self.assertFalse(q.tour_blanc() or q.tour_noir())
elif i % 2 == 0:
self.assertTrue(q.tour_blanc() and not q.tour_noir(), "au tour {}".format(i))
else:
self.assertTrue(q.tour_noir() and not q.tour_blanc(), "au tour {}".format(i))
q.poser((x, y, z))
i += 1
self.assertFalse(q.tour_blanc() or q.tour_noir())
def test_annule_pose(self):
q = Qubic(gravite=False)
self.make_win(q)
q.annule_coup()
self.assertTrue(q.get_pion((len(q) - 1, 0, 0)) is None)
self.assertFalse(q.fini)
def make_win(self, q):
for x, z in zip(range(len(q)), range(len(q))[:0:-1]):
q.poser((x, 0, z))
q.poser((x, 1, z))
self.assertFalse(q.fini)
q.poser((len(q) - 1, 0, 0))
self.assertTrue(q.fini)
def test_valid_pos(self):
q = Qubic()
c = Curseur((4, 4, 4))
for x in range(len(q)):
for y in range(len(q)):
for z in range(len(q)):
self.assertTrue(q.valid_pos((x, y, z)))
self.assertFalse(q.valid_pos((-5, 1, 1)))
self.assertFalse(q.valid_pos((1, 4, 1)))
def test_reset(self):
pass
def test_victoire(self):
q = Qubic(gravite=False)
for x in range(len(q)):
q.poser((x, x, x))
self.assertFalse(q.fini, "at pos {}".format(x))
q.reset()
self.make_win(q)
q.reset()
for x in range(len(q)):
q.plateau[x][x][x] = PionBlanc()
self.assertTrue(q.win((len(q) - 1,) * 3))
q.reset()
for x in range(len(q)):
q.plateau[x][len(q) - 1 - x][x] = PionBlanc()
self.assertTrue(q.win((len(q) - 1, 0, len(q) - 1)))
q.reset()
for x in list(range(len(q))):
q.plateau[x][x][len(q) - 1 - x] = PionBlanc()
self.assertTrue((len(q), len(q), 0))
q.reset()
for x in list(range(len(q))):
q.plateau[x][len(q) - 1 - x][len(q) - 1 - x] = PionBlanc()
self.assertTrue((len(q), len(q), 0))
if __name__ == '__main__':
unittest.main()
| 25.778947 | 83 | 0.60392 | 2,276 | 0.929359 | 0 | 0 | 0 | 0 | 0 | 0 | 45 | 0.018375 |
1490c779943d925d00b0d9fdb094aaea36004a99 | 1,536 | py | Python | sherpa_client/models/project_status.py | kairntech/sherpa-client | cd259c87b7291eeec3f3ea025e368f2f069a06cd | [
"Apache-2.0"
] | null | null | null | sherpa_client/models/project_status.py | kairntech/sherpa-client | cd259c87b7291eeec3f3ea025e368f2f069a06cd | [
"Apache-2.0"
] | null | null | null | sherpa_client/models/project_status.py | kairntech/sherpa-client | cd259c87b7291eeec3f3ea025e368f2f069a06cd | [
"Apache-2.0"
] | null | null | null | from typing import Any, Dict, Type, TypeVar, Union
import attr
from ..models.sherpa_job_bean import SherpaJobBean
from ..types import UNSET, Unset
T = TypeVar("T", bound="ProjectStatus")
@attr.s(auto_attribs=True)
class ProjectStatus:
""" """
project_name: str
status: str
pending_job: Union[Unset, SherpaJobBean] = UNSET
def to_dict(self) -> Dict[str, Any]:
project_name = self.project_name
status = self.status
pending_job: Union[Unset, Dict[str, Any]] = UNSET
if not isinstance(self.pending_job, Unset):
pending_job = self.pending_job.to_dict()
field_dict: Dict[str, Any] = {}
field_dict.update(
{
"projectName": project_name,
"status": status,
}
)
if pending_job is not UNSET:
field_dict["pendingJob"] = pending_job
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
project_name = d.pop("projectName")
status = d.pop("status")
_pending_job = d.pop("pendingJob", UNSET)
pending_job: Union[Unset, SherpaJobBean]
if isinstance(_pending_job, Unset):
pending_job = UNSET
else:
pending_job = SherpaJobBean.from_dict(_pending_job)
project_status = cls(
project_name=project_name,
status=status,
pending_job=pending_job,
)
return project_status
| 26.033898 | 63 | 0.598958 | 1,316 | 0.856771 | 0 | 0 | 1,343 | 0.874349 | 0 | 0 | 91 | 0.059245 |