hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b88a756fba8d93702364d516718c809d4476d07c
| 14,247
|
py
|
Python
|
hm_gerber_ex/rs274x.py
|
halfmarble/halfmarble-panelizer
|
73489a0b5d0d46e6d363f6d14454d91fab62f8e3
|
[
"MIT"
] | null | null | null |
hm_gerber_ex/rs274x.py
|
halfmarble/halfmarble-panelizer
|
73489a0b5d0d46e6d363f6d14454d91fab62f8e3
|
[
"MIT"
] | 5
|
2022-01-15T13:32:54.000Z
|
2022-01-30T15:18:15.000Z
|
hm_gerber_ex/rs274x.py
|
halfmarble/halfmarble-panelizer
|
73489a0b5d0d46e6d363f6d14454d91fab62f8e3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2022 HalfMarble LLC
# Copyright 2019 Hiroshi Murayama <opiopan@gmail.com>
from hm_gerber_tool.cam import FileSettings
import hm_gerber_tool.rs274x
from hm_gerber_tool.gerber_statements import *
from hm_gerber_ex.gerber_statements import AMParamStmt, AMParamStmtEx, ADParamStmtEx
from hm_gerber_ex.utility import rotate
import re
def loads(data, filename=None):
cls = hm_gerber_tool.rs274x.GerberParser
cls.SF = r"(?P<param>SF)(A(?P<a>{decimal}))?(B(?P<b>{decimal}))?".format(decimal=cls.DECIMAL)
cls.PARAMS = (cls.FS, cls.MO, cls.LP, cls.AD_CIRCLE,
cls.AD_RECT, cls.AD_OBROUND, cls.AD_POLY,
cls.AD_MACRO, cls.AM, cls.AS, cls.IF, cls.IN,
cls.IP, cls.IR, cls.MI, cls.OF, cls.SF, cls.LN)
cls.PARAM_STMT = [re.compile(r"%?{0}\*%?".format(p)) for p in cls.PARAMS]
return cls().parse_raw(data, filename)
def write_gerber_header(file, settings):
file.write('%s\n%s\n%%IPPOS*%%\n' % (
MOParamStmt('MO', settings.units).to_gerber(settings),
FSParamStmt('FS', settings.zero_suppression,
settings.notation, settings.format).to_gerber(settings)))
class GerberFile(hm_gerber_tool.rs274x.GerberFile):
@classmethod
def from_gerber_file(cls, gerber_file):
if not isinstance(gerber_file, hm_gerber_tool.rs274x.GerberFile):
raise Exception('only gerber.rs274x.GerberFile object is specified')
return cls(gerber_file.statements, gerber_file.settings, gerber_file.primitives,
gerber_file.apertures, gerber_file.filename)
def __init__(self, statements, settings, primitives, apertures, filename=None):
super(GerberFile, self).__init__(statements, settings, primitives, apertures, filename)
self.context = GerberContext.from_settings(self.settings)
self.aperture_macros = {}
self.aperture_defs = []
self.main_statements = []
for stmt in self.statements:
type, stmts = self.context.normalize_statement(stmt)
if type == self.context.TYPE_AM:
for mdef in stmts:
self.aperture_macros[mdef.name] = mdef
elif type == self.context.TYPE_AD:
self.aperture_defs.extend(stmts)
elif type == self.context.TYPE_MAIN:
self.main_statements.extend(stmts)
if self.context.angle != 0:
self.rotate(self.context.angle)
if self.context.is_negative:
self.negate_polarity()
self.context.notation = 'absolute'
self.context.zeros = 'trailing'
def write(self, filename=None):
self.context.notation = 'absolute'
self.context.zeros = 'trailing'
self.context.format = self.format
self.units = self.units
filename = filename if filename is not None else self.filename
with open(filename, 'w') as f:
write_gerber_header(f, self.context)
for macro in self.aperture_macros:
f.write(self.aperture_macros[macro].to_gerber(self.context) + '\n')
for aperture in self.aperture_defs:
f.write(aperture.to_gerber(self.context) + '\n')
for statement in self.main_statements:
f.write(statement.to_gerber(self.context) + '\n')
f.write('M02*\n')
def to_inch(self):
if self.units == 'metric':
for macro in self.aperture_macros:
self.aperture_macros[macro].to_inch()
for aperture in self.aperture_defs:
aperture.to_inch()
for statement in self.statements:
statement.to_inch()
self.units = 'inch'
self.context.units = 'inch'
def to_metric(self):
if self.units == 'inch':
for macro in self.aperture_macros:
self.aperture_macros[macro].to_metric()
for aperture in self.aperture_defs:
aperture.to_metric()
for statement in self.statements:
statement.to_metric()
self.units = 'metric'
self.context.units = 'metric'
def offset(self, x_offset=0, y_offset=0):
for statement in self.main_statements:
if isinstance(statement, CoordStmt):
if statement.x is not None:
statement.x += x_offset
if statement.y is not None:
statement.y += y_offset
for primitive in self.primitives:
primitive.offset(x_offset, y_offset)
def rotate(self, angle, center=(0, 0)):
if angle % 360 == 0:
return
last_x = 0
last_y = 0
last_rx = 0
last_ry = 0
# TODO major workaround verify!
# PCB houses do not like rotated AM macros, so keep them same, but rotate arguments and points instead
# self._generalize_aperture()
# for name in self.aperture_macros:
# self.aperture_macros[name].rotate(angle, center)
if angle != 0:
for aperture in self.aperture_defs:
aperture.flip()
for statement in self.main_statements:
if isinstance(statement, CoordStmt) and statement.x is not None and statement.y is not None:
if statement.i is not None and statement.j is not None:
cx = last_x + statement.i
cy = last_y + statement.j
cx, cy = rotate(cx, cy, angle, center)
statement.i = cx - last_rx
statement.j = cy - last_ry
last_x = statement.x
last_y = statement.y
last_rx, last_ry = rotate(statement.x, statement.y, angle, center)
statement.x = last_rx
statement.y = last_ry
def negate_polarity(self):
for statement in self.main_statements:
if isinstance(statement, LPParamStmt):
statement.lp = 'dark' if statement.lp == 'clear' else 'clear'
def _generalize_aperture(self):
CIRCLE = 0
RECTANGLE = 1
LANDSCAPE_OBROUND = 2
PORTRATE_OBROUND = 3
POLYGON = 4
macro_defs = [
('MACC', AMParamStmtEx.circle),
('MACR', AMParamStmtEx.rectangle),
('MACLO', AMParamStmtEx.landscape_obround),
('MACPO', AMParamStmtEx.portrate_obround),
('MACP', AMParamStmtEx.polygon)
]
need_to_change = False
for statement in self.aperture_defs:
if isinstance(statement, ADParamStmt) and statement.shape in ['R', 'O', 'P']:
need_to_change = True
if need_to_change:
for idx in range(0, len(macro_defs)):
macro_def = macro_defs[idx]
name = macro_def[0]
num = 1
while name in self.aperture_macros:
name = '%s_%d' % (macro_def[0], num)
num += 1
self.aperture_macros[name] = macro_def[1](name, self.units)
macro_defs[idx] = (name, macro_def[1])
for statement in self.aperture_defs:
if isinstance(statement, ADParamStmt):
if statement.shape == 'R':
statement.shape = macro_defs[RECTANGLE][0]
elif statement.shape == 'O':
x = statement.modifiers[0][0] if len(statement.modifiers[0]) > 0 else 0
y = statement.modifiers[0][1] if len(statement.modifiers[0]) > 1 else 0
if x < y:
statement.shape = macro_defs[PORTRATE_OBROUND][0]
elif x > y:
statement.shape = macro_defs[LANDSCAPE_OBROUND][0]
else:
statement.shape = macro_defs[CIRCLE][0]
elif statement.shape == 'P':
statement.shape = macro_defs[POLYGON][0]
class GerberContext(FileSettings):
TYPE_NONE = 'none'
TYPE_AM = 'am'
TYPE_AD = 'ad'
TYPE_MAIN = 'main'
IP_LINEAR = 'linear'
IP_ARC = 'arc'
DIR_CLOCKWISE = 'cw'
DIR_COUNTERCLOCKWISE = 'ccw'
ignored_stmt = ('FSParamStmt', 'MOParamStmt', 'ASParamStmt',
'INParamStmt', 'IPParamStmt', 'IRParamStmt',
'MIParamStmt', 'OFParamStmt', 'SFParamStmt',
'LNParamStmt', 'CommentStmt', 'EofStmt',)
@classmethod
def from_settings(cls, settings):
return cls(settings.notation, settings.units, settings.zero_suppression,
settings.format, settings.zeros, settings.angle_units)
def __init__(self, notation='absolute', units='inch',
zero_suppression=None, format=(2, 5), zeros=None,
angle_units='degrees',
name=None,
mirror=(False, False), offset=(0., 0.), scale=(1., 1.),
angle=0., axis='xy'):
super(GerberContext, self).__init__(notation, units, zero_suppression, format, zeros, angle_units)
self.name = name
self.mirror = mirror
self.offset = offset
self.scale = scale
self.angle = angle
self.axis = axis
self.matrix = (1, 0,
1, 0,
1, 1)
self.is_negative = False
self.is_first_coordinate = True
self.no_polarity = True
self.in_single_quadrant_mode = False
self.op = None
self.interpolation = self.IP_LINEAR
self.direction = self.DIR_CLOCKWISE
self.x = 0.
self.y = 0.
def normalize_statement(self, stmt):
additional_stmts = None
if isinstance(stmt, INParamStmt):
self.name = stmt.name
elif isinstance(stmt, MIParamStmt):
self.mirror = (stmt.a, stmt.b)
self._update_matrix()
elif isinstance(stmt, OFParamStmt):
self.offset = (stmt.a, stmt.b)
self._update_matrix()
elif isinstance(stmt, SFParamStmt):
self.scale = (stmt.a, stmt.b)
self._update_matrix()
elif isinstance(stmt, ASParamStmt):
self.axis = 'yx' if stmt.mode == 'AYBX' else 'xy'
self._update_matrix()
elif isinstance(stmt, IRParamStmt):
self.angle = stmt.angle
elif isinstance(stmt, AMParamStmt) and not isinstance(stmt, AMParamStmtEx):
stmt = AMParamStmtEx.from_stmt(stmt)
return (self.TYPE_AM, [stmt])
elif isinstance(stmt, ADParamStmt) and not isinstance(stmt, AMParamStmtEx):
stmt = ADParamStmtEx.from_stmt(stmt)
return (self.TYPE_AD, [stmt])
elif isinstance(stmt, QuadrantModeStmt):
self.in_single_quadrant_mode = stmt.mode == 'single-quadrant'
stmt.mode = 'multi-quadrant'
elif isinstance(stmt, IPParamStmt):
self.is_negative = stmt.ip == 'negative'
elif isinstance(stmt, LPParamStmt):
self.no_polarity = False
elif isinstance(stmt, CoordStmt):
self._normalize_coordinate(stmt)
if self.is_first_coordinate:
self.is_first_coordinate = False
if self.no_polarity:
additional_stmts = [LPParamStmt('LP', 'dark'), stmt]
if type(stmt).__name__ in self.ignored_stmt:
return (self.TYPE_NONE, None)
elif additional_stmts is not None:
return (self.TYPE_MAIN, additional_stmts)
else:
return (self.TYPE_MAIN, [stmt])
def _update_matrix(self):
if self.axis == 'xy':
mx = -1 if self.mirror[0] else 1
my = -1 if self.mirror[1] else 1
self.matrix = (
self.scale[0] * mx, self.offset[0],
self.scale[1] * my, self.offset[1],
self.scale[0] * mx, self.scale[1] * my)
else:
mx = -1 if self.mirror[1] else 1
my = -1 if self.mirror[0] else 1
self.matrix = (
self.scale[1] * mx, self.offset[1],
self.scale[0] * my, self.offset[0],
self.scale[1] * mx, self.scale[0] * my)
def _normalize_coordinate(self, stmt):
if stmt.function == 'G01' or stmt.function == 'G1':
self.interpolation = self.IP_LINEAR
elif stmt.function == 'G02' or stmt.function == 'G2':
self.interpolation = self.IP_ARC
self.direction = self.DIR_CLOCKWISE
if self.mirror[0] != self.mirror[1]:
stmt.function = 'G03'
elif stmt.function == 'G03' or stmt.function == 'G3':
self.interpolation = self.IP_ARC
self.direction = self.DIR_COUNTERCLOCKWISE
if self.mirror[0] != self.mirror[1]:
stmt.function = 'G02'
if stmt.only_function:
return
last_x = self.x
last_y = self.y
if self.notation == 'absolute':
x = stmt.x if stmt.x is not None else self.x
y = stmt.y if stmt.y is not None else self.y
else:
x = self.x + stmt.x if stmt.x is not None else 0
y = self.y + stmt.y if stmt.y is not None else 0
self.x, self.y = x, y
self.op = stmt.op if stmt.op is not None else self.op
stmt.op = self.op
stmt.x = self.matrix[0] * x + self.matrix[1]
stmt.y = self.matrix[2] * y + self.matrix[3]
if stmt.op == 'D01' and self.interpolation == self.IP_ARC:
qx, qy = 1, 1
if self.in_single_quadrant_mode:
if self.direction == self.DIR_CLOCKWISE:
qx = 1 if y > last_y else -1
qy = 1 if x < last_x else -1
else:
qx = 1 if y < last_y else -1
qy = 1 if x > last_x else -1
if last_x == x and last_y == y:
qx, qy = 0, 0
stmt.i = qx * self.matrix[4] * stmt.i if stmt.i is not None else 0
stmt.j = qy * self.matrix[5] * stmt.j if stmt.j is not None else 0
| 41.057637
| 110
| 0.563206
| 13,028
| 0.914438
| 0
| 0
| 570
| 0.040008
| 0
| 0
| 1,056
| 0.074121
|
b88ad3cd16814edcf01716b7796117d85426c826
| 691
|
py
|
Python
|
salamander/mktcalendar.py
|
cclauss/statarb
|
a59366f70122c355fc93a2391362a3e8818a290e
|
[
"Apache-2.0"
] | 51
|
2019-02-01T19:43:37.000Z
|
2022-03-16T09:07:03.000Z
|
salamander/mktcalendar.py
|
cclauss/statarb
|
a59366f70122c355fc93a2391362a3e8818a290e
|
[
"Apache-2.0"
] | 2
|
2019-02-23T18:54:22.000Z
|
2019-11-09T01:30:32.000Z
|
salamander/mktcalendar.py
|
cclauss/statarb
|
a59366f70122c355fc93a2391362a3e8818a290e
|
[
"Apache-2.0"
] | 35
|
2019-02-08T02:00:31.000Z
|
2022-03-01T23:17:00.000Z
|
from pandas.tseries.holiday import AbstractHolidayCalendar, Holiday, nearest_workday, \
USMartinLutherKingJr, USPresidentsDay, GoodFriday, USMemorialDay, \
USLaborDay, USThanksgivingDay
from pandas.tseries.offsets import CustomBusinessDay
class USTradingCalendar(AbstractHolidayCalendar):
rules = [
Holiday('NewYearsDay', month=1, day=1),
USMartinLutherKingJr,
USPresidentsDay,
GoodFriday,
USMemorialDay,
Holiday('USIndependenceDay', month=7, day=4),
USLaborDay,
USThanksgivingDay,
Holiday('Christmas', month=12, day=25)
]
TDay = CustomBusinessDay(calendar=USTradingCalendar())
| 31.409091
| 88
| 0.691751
| 374
| 0.541245
| 0
| 0
| 0
| 0
| 0
| 0
| 43
| 0.062229
|
b88df16653e927e74a8e50a7da42dd7a7bec9732
| 3,063
|
py
|
Python
|
wall/views.py
|
pydanny/pinax-wall
|
1e3df60dad394292be9024e2ad90a07bf1a0b395
|
[
"MIT"
] | 1
|
2019-08-16T20:05:40.000Z
|
2019-08-16T20:05:40.000Z
|
wall/views.py
|
pydanny/pinax-wall
|
1e3df60dad394292be9024e2ad90a07bf1a0b395
|
[
"MIT"
] | null | null | null |
wall/views.py
|
pydanny/pinax-wall
|
1e3df60dad394292be9024e2ad90a07bf1a0b395
|
[
"MIT"
] | null | null | null |
""" Sample view for group aware projects """
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.decorators import login_required
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from uni_form.helpers import FormHelper, Submit, Reset
from wall.models import Post
from wall.forms import WallForm
@login_required
def list(request, group_slug=None, bridge=None, form_class=WallForm):
# If there is a bridge then get the group
if bridge is not None:
try:
group = bridge.get_group(group_slug)
except ObjectDoesNotExist:
raise Http404
else:
group = None
# If we have a group we fetch the wall from the group
if group:
posts = group.content_objects(Post)
else:
posts = Post.objects.all()
# check on user authentication or if user is member of a group
if not request.user.is_authenticated():
is_member = False
else:
is_member = group.user_is_member(request.user)
if is_member:
if request.method == "POST":
if request.user.is_authenticated():
form = form_class(request.user, group, request.POST)
if form.is_valid():
post = form.save(commit = False)
post.creator = request.user
if group:
group.associate(post)
post.save()
if group:
redirect_to = bridge.reverse("wall_list", group)
else:
redirect_to = reverse("wall_list")
return HttpResponseRedirect(redirect_to)
else:
form = form_class(request.user, group)
else:
form = None
return render_to_response("wall/list.html", {
"group": group,
"posts": posts,
"form": form,
"is_member": is_member
}, context_instance=RequestContext(request))
def detail(request, slug, group_slug=None, bridge=None):
# If there is a bridge then get the group
if bridge is not None:
try:
group = bridge.get_group(group_slug)
except ObjectDoesNotExist:
raise Http404
else:
group = None
# If we have a group we fetch the post from the group
#if group:
# posts = group.content_objects(Post)
#else:
post = get_object_or_404(Post, slug=slug)
# check on user authentication or if user is member of a group
if not request.user.is_authenticated():
is_member = False
else:
is_member = group.user_is_member(request.user)
return render_to_response("wall/detail.html", {
"group": group,
"post": post,
"is_member": is_member
}, context_instance=RequestContext(request))
| 31.57732
| 91
| 0.601698
| 0
| 0
| 0
| 0
| 1,717
| 0.560562
| 0
| 0
| 529
| 0.172706
|
b88fca2ebe335e0075492e9a81b964d8fd3677ae
| 2,595
|
py
|
Python
|
cdc/src/NoteDeid.py
|
ZebinKang/cdc
|
a32fe41892021d29a1d9c534728a92b67f9b6cea
|
[
"MIT"
] | null | null | null |
cdc/src/NoteDeid.py
|
ZebinKang/cdc
|
a32fe41892021d29a1d9c534728a92b67f9b6cea
|
[
"MIT"
] | null | null | null |
cdc/src/NoteDeid.py
|
ZebinKang/cdc
|
a32fe41892021d29a1d9c534728a92b67f9b6cea
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
The MIT License (MIT)
Copyright (c) 2016 Wei-Hung Weng
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Title : Clinical Document Classification Pipeline: Deidentification module (deid)
Author : Wei-Hung Weng
Created : 10/21/2016
'''
import sys, os, time
import subprocess
import commands
import string
import pandas as pd
def RunDeid(folder, deidDir, rpdr=False, erisOne=False):
print "Copying deid program into folders"
cwd = os.getcwd()
os.system("cp -r " + deidDir + "/* " + folder)
#subprocess.check_output(['bash', '-c', cmd])
print "Executing deid"
if rpdr:
cmd = "for file in *.txt; do sed 1,2d \"${file}\" > temp && mv temp \"${file}\"; echo '\r\nSTART_OF_RECORD=1||||1||||' | cat - \"$file\" > temp && mv temp \"$file\"; echo '||||END_OF_RECORD\r\n' >> \"$file\"; mv -- \"${file}\" \"${file}.text\"; perl deid.pl \"${file%%.txt.text}\" deid-output.config; sed 's/\[\*\*.*\*\*\]//g' \"${file}.res\" > temp && mv temp \"${file}.res\"; done"
else:
cmd = "for file in *.txt; do echo '\r\nSTART_OF_RECORD=1||||1||||' | cat - \"$file\" > temp && mv temp \"$file\"; echo '||||END_OF_RECORD\r\n' >> \"$file\"; mv -- \"${file}\" \"${file}.text\"; perl deid.pl \"${file%%.txt.text}\" deid-output.config; sed 's/\[\*\*.*\*\*\]//g' \"${file}.res\" > temp && mv temp \"${file}.res\"; done"
os.chdir(folder)
subprocess.check_output(['bash', '-c', cmd])
os.system('rm -rf dict; rm -rf doc; rm -rf GSoutput; rm -rf GSstat; rm -rf lists')
os.system('find . ! -name "*.res" -exec rm -r {} \;')
os.chdir(cwd)
| 47.181818
| 391
| 0.665511
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,191
| 0.844316
|
b8922ccaf7aecd6b398d579f4ddc8b100bfa96aa
| 2,540
|
py
|
Python
|
turbo_transformers/python/tests/qbert_layer_test.py
|
xcnick/TurboTransformers
|
48b6ba09af2219616c6b97cc5c09222408e080c2
|
[
"BSD-3-Clause"
] | 1
|
2021-11-04T07:12:46.000Z
|
2021-11-04T07:12:46.000Z
|
turbo_transformers/python/tests/qbert_layer_test.py
|
xcnick/TurboTransformers
|
48b6ba09af2219616c6b97cc5c09222408e080c2
|
[
"BSD-3-Clause"
] | null | null | null |
turbo_transformers/python/tests/qbert_layer_test.py
|
xcnick/TurboTransformers
|
48b6ba09af2219616c6b97cc5c09222408e080c2
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (C) 2020 THL A29 Limited, a Tencent company.
# All rights reserved.
# Licensed under the BSD 3-Clause License (the "License"); you may
# not use this file except in compliance with the License. You may
# obtain a copy of the License at
# https://opensource.org/licenses/BSD-3-Clause
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
# See the AUTHORS file for names of contributors.
import torch
import transformers
import turbo_transformers
from turbo_transformers.layers.utils import convert2tt_tensor, try_convert, convert_returns_as_type, ReturnType
import time
model = transformers.BertModel.from_pretrained('bert-base-uncased')
model.eval()
torch.set_grad_enabled(False)
bertlayer = model.encoder.layer[0]
qbertlayer = turbo_transformers.QBertLayer.from_torch(bertlayer)
torchqbertlayer = torch.quantization.quantize_dynamic(bertlayer)
lens = [40, 60]
loops = 1
for l in lens:
input_tensor = torch.rand((1, l, 768))
attention_mask = torch.ones((1, l))
attention_mask = attention_mask[:, None, None, :]
attention_mask = (1.0 - attention_mask) * -10000.0
print("seq length =", l)
start = time.time()
for i in range(loops):
res = bertlayer(input_tensor, attention_mask, output_attentions=True)
end = time.time()
print("torch fp32 layer QPS =", loops / (end - start))
start = time.time()
for i in range(loops):
res2 = qbertlayer(input_tensor, attention_mask, output_attentions=True)
end = time.time()
print("turbo fp32+int8 layer QPS =", loops / (end - start))
start = time.time()
for i in range(loops):
res3 = torchqbertlayer(input_tensor,
attention_mask,
output_attentions=True)
end = time.time()
print("torch int8 layer QPS =", loops / (end - start))
print(
"max error against torch fp32 =",
max(torch.max(torch.abs(res[0] - res2[0])),
torch.max(torch.abs(res[1] - res2[1]))))
print(
"max error against torch int8 =",
max(torch.max(torch.abs(res3[0] - res2[0])),
torch.max(torch.abs(res3[1] - res2[1]))))
print(
"max error between torch int8 and torch fp32 =",
max(torch.max(torch.abs(res3[0] - res[0])),
torch.max(torch.abs(res3[1] - res[1]))))
| 36.285714
| 111
| 0.686614
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 870
| 0.34252
|
b892a01e77462df62b5d9db18651eb65be2d4626
| 571
|
py
|
Python
|
datasets/__init__.py
|
cogito233/text-autoaugment
|
cae3cfddaba9da01cf291f975e5cf4f734634b51
|
[
"MIT"
] | 1
|
2021-09-08T12:00:11.000Z
|
2021-09-08T12:00:11.000Z
|
datasets/__init__.py
|
cogito233/text-autoaugment
|
cae3cfddaba9da01cf291f975e5cf4f734634b51
|
[
"MIT"
] | null | null | null |
datasets/__init__.py
|
cogito233/text-autoaugment
|
cae3cfddaba9da01cf291f975e5cf4f734634b51
|
[
"MIT"
] | null | null | null |
from .imdb import IMDB
from .sst5 import SST5
from .sst2 import SST2
from .trec import TREC
from .yelp2 import YELP2
from .yelp5 import YELP5
__all__ = ('IMDB', 'SST2', 'SST5', 'TREC', 'YELP2', 'YELP5')
def get_dataset(dataset_name, examples, tokenizer, text_transform=None):
dataset_name = dataset_name.lower()
datasets = {
'imdb': IMDB,
'sst2': SST2,
'sst5': SST5,
'trec': TREC,
'yelp2': YELP2,
'yelp5': YELP5,
}
dataset = datasets[dataset_name](examples, tokenizer, text_transform)
return dataset
| 24.826087
| 73
| 0.640981
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 76
| 0.1331
|
b892a5808d0820ec82e081a7e4b50b19b5c795cf
| 2,396
|
py
|
Python
|
src/pypleasant/artifacts.py
|
weibell/pypleasant
|
de635994113b6cee7e2c5cfb8a5078921a8805cf
|
[
"MIT"
] | 3
|
2021-01-09T13:45:23.000Z
|
2021-07-07T22:54:28.000Z
|
src/pypleasant/artifacts.py
|
weibell/pypleasant
|
de635994113b6cee7e2c5cfb8a5078921a8805cf
|
[
"MIT"
] | 2
|
2021-01-10T17:39:16.000Z
|
2021-01-19T11:43:12.000Z
|
src/pypleasant/artifacts.py
|
weibell/pypleasant
|
de635994113b6cee7e2c5cfb8a5078921a8805cf
|
[
"MIT"
] | 3
|
2021-01-10T11:59:51.000Z
|
2021-08-15T10:45:24.000Z
|
import base64
import pathlib
from collections import UserDict
from pypleasant.api import PleasantAPI
class Attachment:
def __init__(self, attachment_as_json: dict, api: PleasantAPI):
self.api = api
self._entry_id = attachment_as_json["CredentialObjectId"]
self._attachment_id = attachment_as_json["AttachmentId"]
self.name = attachment_as_json["FileName"]
@property
def data(self) -> bytes:
return self.api.get_attachment(self._entry_id, self._attachment_id)
def __str__(self):
return base64.b64encode(self.data).decode()
def download(self, output_file_path: pathlib.Path = None):
output_file_path = output_file_path or pathlib.Path(f"./{self.name}")
output_file_path.write_bytes(self.data)
class Attachments(UserDict):
def download(self, output_dir: pathlib.Path = None):
output_dir = output_dir or pathlib.Path(f"./pleasant_attachments")
if not output_dir.exists():
output_dir.mkdir()
for file_name, attachment in self.data.items():
attachment.download(output_dir / file_name)
class Entry:
def __init__(self, entry_as_json, api: PleasantAPI):
self.api = api
self._entry_id = entry_as_json["Id"]
self.name = entry_as_json["Name"]
self.username = entry_as_json["Username"]
self.url = entry_as_json["Url"]
self.custom_fields = entry_as_json["CustomUserFields"]
attachments_as_dict = {}
for attachment_as_json in entry_as_json["Attachments"]:
attachments_as_dict[attachment_as_json["FileName"]] = Attachment(attachment_as_json, api)
self.attachments = Attachments(attachments_as_dict)
@property
def password(self) -> str:
return self.api.get_credential(self._entry_id)
class Folder(UserDict):
def __init__(self, folder_as_json: dict, api: PleasantAPI):
self.name = folder_as_json["Name"]
entries = {entry_as_json["Name"]: Entry(entry_as_json, api) for entry_as_json in
folder_as_json["Credentials"]}
folders = {folders_as_json["Name"]: Folder(folders_as_json, api) for folders_as_json in
folder_as_json["Children"]}
super().__init__({**entries, **folders})
class Database(Folder):
def __init__(self, api: PleasantAPI):
super().__init__(api.get_db(), api)
| 34.724638
| 101
| 0.680718
| 2,279
| 0.951169
| 0
| 0
| 209
| 0.087229
| 0
| 0
| 192
| 0.080134
|
b894aa5fcf5ee5c3c91a08a010d10cc426cae285
| 857
|
py
|
Python
|
npc/gui/util.py
|
Arent128/npc
|
c8a1e227a1d4d7c540c4f4427b611ffc290535ee
|
[
"MIT"
] | null | null | null |
npc/gui/util.py
|
Arent128/npc
|
c8a1e227a1d4d7c540c4f4427b611ffc290535ee
|
[
"MIT"
] | null | null | null |
npc/gui/util.py
|
Arent128/npc
|
c8a1e227a1d4d7c540c4f4427b611ffc290535ee
|
[
"MIT"
] | null | null | null |
# Helpers common to the gui
from contextlib import contextmanager
from PyQt5 import QtWidgets
@contextmanager
def safe_command(command):
"""
Helper to suppress AttributeErrors from commands
Args:
command (callable): The command to run. Any AttributeError raised by
the command will be suppressed.
"""
try:
yield command
except AttributeError as err:
pass
def show_error(title, message, parent):
"""
Helper to show a modal error window
Args:
title (str): Title for the error window
message (str): Message text to display
parent (object): Parent window for the modal. This window will be
disabled while the modal is visible. Defaults to the main window.
"""
QtWidgets.QMessageBox.warning(parent, title, message, QtWidgets.QMessageBox.Ok)
| 27.645161
| 83
| 0.679113
| 0
| 0
| 305
| 0.355893
| 321
| 0.374562
| 0
| 0
| 532
| 0.62077
|
b894e0eb0f3f3a5eab5eca43855c560fff5104ea
| 2,040
|
py
|
Python
|
meterbus/wtelegram_header.py
|
noda/pyMeterBus
|
a1bb6b6ef9b3db4583dfb2b154e4f65365dee9d9
|
[
"BSD-3-Clause"
] | 44
|
2016-12-11T14:43:14.000Z
|
2022-03-17T18:31:14.000Z
|
meterbus/wtelegram_header.py
|
noda/pyMeterBus
|
a1bb6b6ef9b3db4583dfb2b154e4f65365dee9d9
|
[
"BSD-3-Clause"
] | 13
|
2017-11-29T14:36:34.000Z
|
2020-12-20T18:33:35.000Z
|
meterbus/wtelegram_header.py
|
noda/pyMeterBus
|
a1bb6b6ef9b3db4583dfb2b154e4f65365dee9d9
|
[
"BSD-3-Clause"
] | 32
|
2015-09-15T12:23:19.000Z
|
2022-03-22T08:32:22.000Z
|
import simplejson as json
from .telegram_field import TelegramField
class WTelegramHeader(object):
def __init__(self):
# self._startField = TelegramField()
self._lField = TelegramField()
self._cField = TelegramField()
# self._crcField = TelegramField()
# self._stopField = TelegramField()
self._headerLength = 2
# self._headerLengthCRCStop = 8
@property
def headerLength(self):
return self._headerLength
# @property
# def headerLengthCRCStop(self):
# return self._headerLengthCRCStop
@property
def startField(self):
return self._startField
@startField.setter
def startField(self, value):
self._startField = TelegramField(value)
@property
def lField(self):
return self._lField
@lField.setter
def lField(self, value):
self._lField = TelegramField(value)
@property
def cField(self):
return self._cField
@cField.setter
def cField(self, value):
self._cField = TelegramField(value)
@property
def interpreted(self):
return {
'length': hex(self.lField.parts[0]),
'c': hex(self.cField.parts[0]),
}
# @property
# def crcField(self):
# return self._crcField
# @crcField.setter
# def crcField(self, value):
# self._crcField = TelegramField(value)
# @property
# def stopField(self):
# return self._stopField
# @stopField.setter
# def stopField(self, value):
# self._stopField = TelegramField(value)
def load(self, hat):
header = hat
if isinstance(hat, str):
header = list(map(ord, hat))
# self.startField = header[0]
self.lField = header[0]
self.cField = header[1]
# self.crcField = header[-2]
# self.stopField = header[-1]
def to_JSON(self):
return json.dumps(self.interpreted, sort_keys=False,
indent=4, use_decimal=True)
| 24
| 60
| 0.601471
| 1,969
| 0.965196
| 0
| 0
| 685
| 0.335784
| 0
| 0
| 615
| 0.301471
|
b8957a58c70fbb1e911970ddbd303c74a8951fba
| 2,966
|
py
|
Python
|
clitt/actions.py
|
Leviosar/tt
|
f6099ca77736d17f46121c76a0763d587536467e
|
[
"MIT"
] | null | null | null |
clitt/actions.py
|
Leviosar/tt
|
f6099ca77736d17f46121c76a0763d587536467e
|
[
"MIT"
] | null | null | null |
clitt/actions.py
|
Leviosar/tt
|
f6099ca77736d17f46121c76a0763d587536467e
|
[
"MIT"
] | null | null | null |
import tweepy
from .interface import show_message, show_tweet, show_user
def dm(api: tweepy.API, target: str, content: str):
"""
Sends a direct message to target user
Keyword arguments:
api -- API instance for handling the request
target -- Target user's screename (e.g. @jake/jake)
content -- String that will be sent as message
"""
target = target.replace("@", "")
user = api.get_user(target)
api.send_direct_message(user.id, content)
def search(api: tweepy.API, query: str, count: int):
"""
Searches for tweets containing the input string
Keyword arguments:
api -- API instance for handling the request
query -- String passed as search query for the API
count -- Maximum number of results the API will return
"""
results = api.search(query, count=count)
for result in results:
show_tweet(result)
def user(api: tweepy.API, query: str, count: int):
"""
Searches for users related to the input string
Keyword arguments:
api -- API instance for handling the request
query -- String passed as search query for the API
count -- Maximum number of results the API will return
"""
results = api.search_users(query, count=count)
for user in results:
show_user(user)
def post(api: tweepy.API, content: str):
"""
Update the status for currently logged user (basically, this methods tweets)
Keyword arguments:
api -- API instance for handling the request
content -- String that will be posted
"""
api.update_status(content)
def chat(api: tweepy.API, user: str):
"""
Search and displays private chat with target user
Keyword arguments:
api -- API instance for handling the request
user -- Target user's screename (e.g. @jake/jake)
"""
try:
user = user.replace("@", "")
user = api.get_user(user)
me = api.me()
messages = api.list_direct_messages(count=100)
for message in sorted(
messages, key=lambda message: int(message.created_timestamp)
):
if int(message.message_create["sender_id"]) == user.id:
show_message(message, user)
if (
int(message.message_create["sender_id"]) == me.id
and int(message.message_create["target"]["recipient_id"]) == user.id
):
show_message(message, me, reverse=True)
except tweepy.TweepError:
print("Sorry, user not found")
def read(api: tweepy.API, count: int):
"""
Read currently logged user's timeline
Keyword arguments:
api -- API instance for handling the request
count -- Maximum number of results the API will return
"""
public_tweets = api.home_timeline(count=count)
for tweet in public_tweets:
show_tweet(tweet)
| 28.796117
| 84
| 0.619016
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,501
| 0.506069
|
b89711b17746d4b4271b066247a24c7b87a987eb
| 5,711
|
py
|
Python
|
test/test_html.py
|
dominickpastore/pymd4c
|
7fac37348b1e2520532c83bcb84b9cfecbcdff0c
|
[
"MIT"
] | 7
|
2020-04-30T08:27:44.000Z
|
2022-02-09T12:23:07.000Z
|
test/test_html.py
|
dominickpastore/pymd4c
|
7fac37348b1e2520532c83bcb84b9cfecbcdff0c
|
[
"MIT"
] | 23
|
2020-05-29T14:58:46.000Z
|
2021-11-10T23:44:25.000Z
|
test/test_html.py
|
dominickpastore/pymd4c
|
7fac37348b1e2520532c83bcb84b9cfecbcdff0c
|
[
"MIT"
] | 2
|
2020-09-17T19:40:44.000Z
|
2021-07-13T16:43:18.000Z
|
# Based on spec_tests.py from
# https://github.com/commonmark/commonmark-spec/blob/master/test/spec_tests.py
# and
# https://github.com/github/cmark-gfm/blob/master/test/spec_tests.py
import sys
import os
import os.path
import re
import md4c
import md4c.domparser
import pytest
from normalize import normalize_html
extension_flags = {
'table': md4c.MD_FLAG_TABLES,
'urlautolink': md4c.MD_FLAG_PERMISSIVEURLAUTOLINKS,
'emailautolink': md4c.MD_FLAG_PERMISSIVEEMAILAUTOLINKS,
'wwwautolink': md4c.MD_FLAG_PERMISSIVEWWWAUTOLINKS,
'tasklist': md4c.MD_FLAG_TASKLISTS,
'strikethrough': md4c.MD_FLAG_STRIKETHROUGH,
'underline': md4c.MD_FLAG_UNDERLINE,
'wikilink': md4c.MD_FLAG_WIKILINKS,
'latexmath': md4c.MD_FLAG_LATEXMATHSPANS,
#TODO Add test cases for the rest of the flags
# (including combination flags)
}
def get_tests(specfile):
line_number = 0
start_line = 0
end_line = 0
example_number = 0
markdown_lines = []
html_lines = []
state = 0 # 0 regular text, 1 markdown example, 2 html output
extensions = []
headertext = ''
tests = []
header_re = re.compile('#+ ')
full_specfile = os.path.join(sys.path[0], 'spec', specfile)
with open(full_specfile, 'r', encoding='utf-8', newline='\n') as specf:
for line in specf:
line_number = line_number + 1
l = line.strip()
if l.startswith("`" * 32 + " example"):
state = 1
extensions = l[32 + len(" example"):].split()
elif l == "`" * 32:
state = 0
example_number = example_number + 1
end_line = line_number
md4c_version = None
for extension in extensions:
if extension.startswith('md4c-'):
md4c_version = extension
break
if md4c_version is not None:
extensions.remove(md4c_version)
md4c_version = md4c_version[5:]
if 'disabled' not in extensions:
tests.append({
"markdown":''.join(markdown_lines).replace('→',"\t"),
"html":''.join(html_lines).replace('→',"\t"),
"example": example_number,
"start_line": start_line,
"end_line": end_line,
"section": headertext,
"file": specfile,
"md4c_version": md4c_version,
"extensions": extensions})
start_line = 0
markdown_lines = []
html_lines = []
elif l == ".":
state = 2
elif state == 1:
if start_line == 0:
start_line = line_number - 1
markdown_lines.append(line)
elif state == 2:
html_lines.append(line)
elif state == 0 and re.match(header_re, line):
headertext = header_re.sub('', line).strip()
return tests
def collect_all_tests():
all_tests = []
specfiles = os.listdir(os.path.join(sys.path[0], 'spec'))
for specfile in specfiles:
all_tests.extend(get_tests(specfile))
return all_tests
def skip_if_older_version(running_version, test_version):
"""Skip the current test if the running version of MD4C is older than the
version required for the test
:param running_version: Running version of MD4C, e.g. "0.4.8"
:type running_version: str
:param test_version: Version of MD4C required for the test
:type test_version: str
"""
if running_version is None or test_version is None:
return
running_version = [int(x) for x in running_version.split('.')]
test_version = [int(x) for x in test_version.split('.')]
for r, t in zip(running_version, test_version):
if r < t:
pytest.skip()
for t in test_version[len(running_version):]:
if t > 0:
pytest.skip("Test requires newer MD4C")
@pytest.fixture
def md4c_version(pytestconfig):
return pytestconfig.getoption('--md4c-version')
@pytest.mark.parametrize(
'test_case', collect_all_tests(),
ids=lambda x: f'{x["file"]}:{x["start_line"]}-{x["section"]}')
def test_html_output(test_case, md4c_version):
"""Test HTMLRenderer with default render flags on the given example"""
skip_if_older_version(md4c_version, test_case['md4c_version'])
parser_flags = 0
for extension in test_case['extensions']:
parser_flags |= extension_flags[extension]
renderer = md4c.HTMLRenderer(parser_flags, 0)
output = renderer.parse(test_case['markdown'])
assert normalize_html(output) == normalize_html(test_case['html'], False)
@pytest.mark.parametrize(
'test_case', collect_all_tests(),
ids=lambda x: f'{x["file"]}:{x["start_line"]}-{x["section"]}')
def test_domparser_html(test_case, md4c_version):
"""Test that the output for DOMParser render() matches HTMLRenderer char
for char"""
skip_if_older_version(md4c_version, test_case['md4c_version'])
parser_flags = 0
for extension in test_case['extensions']:
parser_flags |= extension_flags[extension]
html_renderer = md4c.HTMLRenderer(parser_flags)
html_output = html_renderer.parse(test_case['markdown'])
dom_parser = md4c.domparser.DOMParser(parser_flags)
dom_output = dom_parser.parse(test_case['markdown']).render()
assert html_output == dom_output
#TODO Test keyword arguments for flags
#TODO Test HTML flags
#TODO Test mixing keyword arguments and traditional flags
| 33.994048
| 78
| 0.615304
| 0
| 0
| 0
| 0
| 1,454
| 0.254418
| 0
| 0
| 1,434
| 0.250919
|
b897120ea53c19ba1923fce20e96449f3e0b8393
| 1,737
|
py
|
Python
|
codes/fetch.py
|
Pregaine/debian
|
9f4838d0eb9f38c9b8d3bf035a74f7d713bf8a95
|
[
"Linux-OpenIB"
] | null | null | null |
codes/fetch.py
|
Pregaine/debian
|
9f4838d0eb9f38c9b8d3bf035a74f7d713bf8a95
|
[
"Linux-OpenIB"
] | null | null | null |
codes/fetch.py
|
Pregaine/debian
|
9f4838d0eb9f38c9b8d3bf035a74f7d713bf8a95
|
[
"Linux-OpenIB"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Usage: Download all stock code info from TWSE
#
# TWSE equities = 上市證券
# TPEx equities = 上櫃證券
#
import csv
from collections import namedtuple
import requests
from lxml import etree
TWSE_EQUITIES_URL = 'http://isin.twse.com.tw/isin/C_public.jsp?strMode=2'
TPEX_EQUITIES_URL = 'http://isin.twse.com.tw/isin/C_public.jsp?strMode=4'
ROW = namedtuple( 'Row', [ 'type', 'code', 'name', 'ISIN', 'start', 'market', 'group', 'CFI' ] )
def make_row_tuple(typ, row):
"""u'\u3000′是全角空格的unicode编码"""
code, name = row[ 1 ].split( '\u3000' )
code = code.replace( ' ', '' )
name = name.replace( ' ', '' )
return ROW( typ, code, name, *row[ 2:-1 ] )
def fetch_data(url):
r = requests.get(url)
print( r.url )
root = etree.HTML( r.text )
trs = root.xpath('//tr')[1:]
result = []
typ = ''
for tr in trs:
tr = list( map( lambda x: x.text, tr.iter( ) ) )
if len(tr) == 4:
# This is type
typ = tr[2].strip(' ')
else:
# This is the row data
result.append( make_row_tuple( typ, tr ) )
return result
def to_csv( url, path ):
data = fetch_data( url )
print( 'Save File Path {}'.format( path ) )
with open( path, 'w', newline='', encoding='utf_8' ) as csvfile:
writer = csv.writer( csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL )
writer.writerow( data[0]._fields )
for d in data:
writer.writerow( [ _ for _ in d ] )
def GetFile( path ):
to_csv( TWSE_EQUITIES_URL, path )
if __name__ == '__main__':
to_csv( TWSE_EQUITIES_URL, 'twse_equities.csv' )
to_csv( TPEX_EQUITIES_URL, 'tpex_equities.csv' )
| 27.140625
| 96
| 0.580887
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 493
| 0.278374
|
b898caf8e8371912904209cfea669349d7d43e84
| 453
|
py
|
Python
|
SimplePyGA/FitnessCalc/__init__.py
|
UglySoftware/SimplePyGA
|
2cc0ef5709800059b323de2be6ea8bf77fb94384
|
[
"MIT"
] | 1
|
2019-09-03T17:52:12.000Z
|
2019-09-03T17:52:12.000Z
|
SimplePyGA/FitnessCalc/__init__.py
|
UglySoftware/SimplePyGA
|
2cc0ef5709800059b323de2be6ea8bf77fb94384
|
[
"MIT"
] | null | null | null |
SimplePyGA/FitnessCalc/__init__.py
|
UglySoftware/SimplePyGA
|
2cc0ef5709800059b323de2be6ea8bf77fb94384
|
[
"MIT"
] | 1
|
2019-09-03T17:52:13.000Z
|
2019-09-03T17:52:13.000Z
|
#-----------------------------------------------------------------------
#
# __init__.py (FitnessCalc)
#
# FitnessCalc package init module
#
# Copyright and Distribution
#
# Part of SimplePyGA: Simple Genetic Algorithms in Python
# Copyright (c) 2016 Terry McKiernan (terry@mckiernan.com)
# Released under The MIT License
# See LICENSE file in top-level package folder
#
#-----------------------------------------------------------------------
| 32.357143
| 72
| 0.503311
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 440
| 0.971302
|
b898de5e2e4348a76809bd0da7631a2cc93a7b25
| 3,757
|
py
|
Python
|
pyaz/billing/invoice/section/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | null | null | null |
pyaz/billing/invoice/section/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | null | null | null |
pyaz/billing/invoice/section/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | 1
|
2022-02-03T09:12:01.000Z
|
2022-02-03T09:12:01.000Z
|
'''
billing invoice section
'''
from .... pyaz_utils import _call_az
def list(account_name, profile_name):
'''
List the invoice sections that a user has access to. The operation is supported only for billing accounts with agreement type Microsoft Customer Agreement.
Required Parameters:
- account_name -- The ID that uniquely identifies a billing account.
- profile_name -- The ID that uniquely identifies a billing profile.
'''
return _call_az("az billing invoice section list", locals())
def show(account_name, name, profile_name):
'''
Get an invoice section by its ID. The operation is supported only for billing accounts with agreement type Microsoft Customer Agreement.
Required Parameters:
- account_name -- The ID that uniquely identifies a billing account.
- name -- The ID that uniquely identifies an invoice section.
- profile_name -- The ID that uniquely identifies a billing profile.
'''
return _call_az("az billing invoice section show", locals())
def create(account_name, name, profile_name, display_name=None, labels=None, no_wait=None):
'''
Creates or updates an invoice section. The operation is supported only for billing accounts with agreement type Microsoft Customer Agreement.
Required Parameters:
- account_name -- The ID that uniquely identifies a billing account.
- name -- The ID that uniquely identifies an invoice section.
- profile_name -- The ID that uniquely identifies a billing profile.
Optional Parameters:
- display_name -- The name of the invoice section.
- labels -- Dictionary of metadata associated with the invoice section. Expect value: KEY1=VALUE1 KEY2=VALUE2 ...
- no_wait -- Do not wait for the long-running operation to finish.
'''
return _call_az("az billing invoice section create", locals())
def update(account_name, name, profile_name, display_name=None, labels=None, no_wait=None):
'''
Creates or updates an invoice section. The operation is supported only for billing accounts with agreement type Microsoft Customer Agreement.
Required Parameters:
- account_name -- The ID that uniquely identifies a billing account.
- name -- The ID that uniquely identifies an invoice section.
- profile_name -- The ID that uniquely identifies a billing profile.
Optional Parameters:
- display_name -- The name of the invoice section.
- labels -- Dictionary of metadata associated with the invoice section. Expect value: KEY1=VALUE1 KEY2=VALUE2 ...
- no_wait -- Do not wait for the long-running operation to finish.
'''
return _call_az("az billing invoice section update", locals())
def wait(account_name, name, profile_name, created=None, custom=None, deleted=None, exists=None, interval=None, timeout=None, updated=None):
'''
Place the CLI in a waiting state until a condition of the billing invoice section is met.
Required Parameters:
- account_name -- The ID that uniquely identifies a billing account.
- name -- The ID that uniquely identifies an invoice section.
- profile_name -- The ID that uniquely identifies a billing profile.
Optional Parameters:
- created -- wait until created with 'provisioningState' at 'Succeeded'
- custom -- Wait until the condition satisfies a custom JMESPath query. E.g. provisioningState!='InProgress', instanceView.statuses[?code=='PowerState/running']
- deleted -- wait until deleted
- exists -- wait until the resource exists
- interval -- polling interval in seconds
- timeout -- maximum wait in seconds
- updated -- wait until updated with provisioningState at 'Succeeded'
'''
return _call_az("az billing invoice section wait", locals())
| 45.26506
| 164
| 0.730104
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,117
| 0.829651
|
b89c391348948a67ee076b201c2356ffbd5b2843
| 418
|
py
|
Python
|
fifth.py
|
leephoter/coding-exam
|
a95fdd6e8477651da811b5b5a93b7214914e9418
|
[
"MIT"
] | null | null | null |
fifth.py
|
leephoter/coding-exam
|
a95fdd6e8477651da811b5b5a93b7214914e9418
|
[
"MIT"
] | null | null | null |
fifth.py
|
leephoter/coding-exam
|
a95fdd6e8477651da811b5b5a93b7214914e9418
|
[
"MIT"
] | null | null | null |
# abba
# foo bar bar foo
text1 = list(input())
text2 = input().split()
# text1 = set(text1)
print(text1)
print(text2)
for i in range(len(text1)):
if text1[i] == "a":
text1[i] = 1
else:
text1[i] = 0
for i in range(len(text2)):
if text2[i] == "foo":
text2[i] = 1
else:
text2[i] = 0
print(text1)
print(text2)
if (text1 == text2):
print(True)
else:
print(False)
| 14.928571
| 27
| 0.543062
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 51
| 0.12201
|
b89c5decc2d125e57179ddb7e0fbbf5b7fa1d17a
| 864
|
py
|
Python
|
login_checks.py
|
mhhoban/basic-blog
|
107d6df7c8374ae088097780a15364bb96394664
|
[
"MIT"
] | null | null | null |
login_checks.py
|
mhhoban/basic-blog
|
107d6df7c8374ae088097780a15364bb96394664
|
[
"MIT"
] | null | null | null |
login_checks.py
|
mhhoban/basic-blog
|
107d6df7c8374ae088097780a15364bb96394664
|
[
"MIT"
] | null | null | null |
"""
Methods for user login
"""
from cgi import escape
from google.appengine.ext import ndb
def login_fields_complete(post_data):
"""
validates that both login fields were filled in
:param post_data:
:return:
"""
try:
user_id = escape(post_data['user_id'], quote=True)
except KeyError:
user_id = False
try:
password = escape(post_data['password'], quote=True)
except KeyError:
password = False
if user_id and password:
return {'complete': True, 'user_id': user_id, 'password': password}
else:
return {'complete': False}
def valid_user_id_check(user_id):
"""
checks that user exists
:param user_id:
:return:
"""
user_key = ndb.Key('User', user_id)
user = user_key.get()
if user:
return True
else:
return False
| 18
| 75
| 0.611111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 264
| 0.305556
|
b89c8ecff52061b4f4230988ae5b6f0af41cff09
| 1,988
|
py
|
Python
|
models/lstm_hands_enc_dec.py
|
amjltc295/hand_track_classification
|
71fdc980d3150646cd531e28878ff1eb63c7efea
|
[
"MIT"
] | 6
|
2019-07-08T12:01:17.000Z
|
2021-11-01T06:01:28.000Z
|
models/lstm_hands_enc_dec.py
|
georkap/hand_track_classification
|
962faa1697864e892475989a97fa6ed9c2f1d7b3
|
[
"MIT"
] | null | null | null |
models/lstm_hands_enc_dec.py
|
georkap/hand_track_classification
|
962faa1697864e892475989a97fa6ed9c2f1d7b3
|
[
"MIT"
] | 3
|
2019-07-08T12:25:45.000Z
|
2020-06-05T20:27:57.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 16 13:20:51 2018
lstm encoder decoder for hands
@author: Γιώργος
"""
import torch
import torch.nn as nn
from utils.file_utils import print_and_save
class LSTM_Hands_encdec(nn.Module):
# source: https://github.com/yunjey/pytorch-tutorial/blob/master/tutorials/02-intermediate/bidirectional_recurrent_neural_network/main.py
def __init__(self, input_size, row_hidden, time_hidden, num_layers, num_classes, dropout, log_file=None):
super(LSTM_Hands_encdec, self).__init__()
self.row_hidden = row_hidden
self.time_hidden = time_hidden
self.num_layers = num_layers
self.dropout = dropout
self.log_file=log_file
self.row_lstm = nn.LSTM(input_size, row_hidden, 1,
bias=True, batch_first=False, dropout=dropout, bidirectional=False)
self.time_lstm = nn.LSTM(row_hidden, time_hidden, num_layers,
bias=True, batch_first=False, dropout=dropout, bidirectional=False)
self.fc = nn.Linear(time_hidden, num_classes)
def forward(self, seq_height_width, seq_lengths):
# seq_batch_coords 256, x, 456
h0_row = torch.zeros(1, 1, self.row_hidden).cuda()
c0_row = torch.zeros(1, 1, self.row_hidden).cuda()
h0_time = torch.zeros(self.num_layers, 1, self.time_hidden).cuda()
c0_time = torch.zeros(self.num_layers, 1, self.time_hidden).cuda()
im_hiddens = []
for i in range(seq_height_width.size(0)):
row_out, _ = self.row_lstm(seq_height_width[i].unsqueeze(1), (h0_row, c0_row))
im_hiddens.append(row_out[-1]) # can also concatenate the hiddens for an image
time_input = torch.stack(im_hiddens)#.unsqueeze(1)
time_out, _ = self.time_lstm(time_input, (h0_time, c0_time))
out = self.fc(time_out[-1])
return out
| 37.509434
| 141
| 0.640845
| 1,788
| 0.896241
| 0
| 0
| 0
| 0
| 0
| 0
| 351
| 0.17594
|
b89dc31dd7495dce4a5df9abb3cb76e616316b5e
| 2,201
|
py
|
Python
|
My_Simple_Chatroom/MyChat_C1.py
|
WilliamWuLH/Network_Basic_Programming
|
284f8d3664340b0270271da5c50d5b8bb7ce8534
|
[
"MIT"
] | 1
|
2020-11-29T14:56:22.000Z
|
2020-11-29T14:56:22.000Z
|
My_Simple_Chatroom/MyChat_C1.py
|
WilliamWuLH/Network_Basic_Programming
|
284f8d3664340b0270271da5c50d5b8bb7ce8534
|
[
"MIT"
] | null | null | null |
My_Simple_Chatroom/MyChat_C1.py
|
WilliamWuLH/Network_Basic_Programming
|
284f8d3664340b0270271da5c50d5b8bb7ce8534
|
[
"MIT"
] | null | null | null |
import socket
import sys
import os
ip_port = ('127.0.0.1',6666)
sk = socket.socket()
sk.bind(ip_port)
sk.listen(5)
def FTP_send(conn):
path = input('Path:')
file_name = os.path.basename(path)
file_size=os.stat(path).st_size
Informf=(file_name+'|'+str(file_size))
conn.send(Informf.encode())
receive = conn.recv(1024).decode()
if receive == 'n':
print("OVER")
return
send_size = 0
with open(path,'rb') as f:
Flag = True
while Flag:
if send_size + 1024 >file_size:
data = f.read(file_size-send_size)
Flag = False
else:
data = f.read(1024)
send_size+=1024
conn.send(data)
print('send successed !')
f.close()
def FTP_receive(conn, pre_data):
base_path = 'C:\\Users\\William Wu\\Desktop'
file_name,file_size = pre_data.split('|')
print("Have a file --> name : ", file_name, " size : ", file_size)
ans = input("Do you receive ? (y/n) ")
conn.sendall(ans.encode())
if ans == 'n':
print("OVER")
return
recv_size = 0
file_dir = os.path.join(base_path,file_name)
with open(file_dir,'wb') as f:
Flag = True
while Flag:
if int(file_size)>recv_size:
data = conn.recv(1024)
recv_size+=len(data)
f.write(data)
else:
recv_size = 0
Flag = False
print('receive successed !')
f.close()
while True:
print('client_1 waiting...')
conn,addr = sk.accept()
client_data = conn.recv(1024).decode()
if '|' in client_data:
FTP_receive(conn, client_data)
else:
print(client_data)
model = input("MODEL: 1:chat 2:send file 3:exit\nYour choice : ")
if model == '1':
massage = input("Client_1 : ").strip()
conn.sendall(("Client_1 : "+massage).encode())
elif model == '2':
FTP_send(conn)
elif model == '3':
conn.close()
break
else:
print('error !')
conn.close()
break
#C:\\Users\\William Wu\\Desktop\\WLH.txt
| 26.518072
| 78
| 0.531122
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 352
| 0.159927
|
b89ddcfaf0bb84573dcaa412350a05a29d779e4c
| 557
|
py
|
Python
|
2020/day14-1.py
|
alvaropp/AdventOfCode2017
|
2827dcc18ecb9ad59a1a5fe11e469f31bafb74ad
|
[
"MIT"
] | null | null | null |
2020/day14-1.py
|
alvaropp/AdventOfCode2017
|
2827dcc18ecb9ad59a1a5fe11e469f31bafb74ad
|
[
"MIT"
] | null | null | null |
2020/day14-1.py
|
alvaropp/AdventOfCode2017
|
2827dcc18ecb9ad59a1a5fe11e469f31bafb74ad
|
[
"MIT"
] | null | null | null |
import re
with open("day14.txt", "r") as f:
data = f.read().splitlines()
def apply_mask(mask, value):
binary_value = f"{value:>036b}"
masked_value = "".join(
value if mask_value == "X" else mask_value
for value, mask_value in zip(binary_value, mask)
)
return int(masked_value, 2)
memory = {}
for line in data:
if "mask" in line:
mask = line.split(" = ")[-1]
else:
address, value = re.findall("(\d+)", line)
memory[address] = apply_mask(mask, int(value))
print(sum(memory.values()))
| 21.423077
| 56
| 0.59246
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 53
| 0.095153
|
b89e8a9c6f13124a3751f523a8aebbcf0178bdb6
| 2,429
|
py
|
Python
|
heppy/modules/host.py
|
bladeroot/heppy
|
b597916ff80890ca057b17cdd156e90bbbd9a87a
|
[
"BSD-3-Clause"
] | null | null | null |
heppy/modules/host.py
|
bladeroot/heppy
|
b597916ff80890ca057b17cdd156e90bbbd9a87a
|
[
"BSD-3-Clause"
] | null | null | null |
heppy/modules/host.py
|
bladeroot/heppy
|
b597916ff80890ca057b17cdd156e90bbbd9a87a
|
[
"BSD-3-Clause"
] | null | null | null |
from ..Module import Module
from ..TagData import TagData
class host(Module):
opmap = {
'infData': 'descend',
'chkData': 'descend',
'creData': 'descend',
'roid': 'set',
'name': 'set',
'clID': 'set',
'crID': 'set',
'upID': 'set',
'crDate': 'set',
'upDate': 'set',
'exDate': 'set',
'trDate': 'set',
}
### RESPONSE parsing
def parse_cd(self, response, tag):
return self.parse_cd_tag(response, tag)
def parse_addr(self, response, tag):
response.put_to_list('ips', tag.text)
### REQUEST rendering
def render_check(self, request, data):
self.render_check_command(request, data, 'name')
def render_info(self, request, data):
self.render_command_with_fields(request, 'info', [
TagData('name', data.get('name'))
])
def render_create(self, request, data):
command = self.render_command_with_fields(request, 'create', [
TagData('name', data.get('name'))
])
self.render_ips(request, data.get('ips', []), command)
def render_delete(self, request, data):
self.render_command_with_fields(request, 'delete', [
TagData('name', data.get('name'))
])
def render_update(self, request, data):
command = self.render_command_with_fields(request, 'update', [
TagData('name', data.get('name'))
])
if 'add' in data:
self.render_update_section(request, data.get('add'), command, 'add')
if 'rem' in data:
self.render_update_section(request, data.get('rem'), command, 'rem')
if 'chg' in data:
self.render_update_section(request, data.get('chg'), command, 'chg')
def render_update_section(self, request, data, command, operation):
element = request.add_subtag(command, 'host:' + operation)
if operation == 'chg':
request.add_subtag(element, 'host:name', text=data.get('name'))
else:
self.render_ips(request, data.get('ips', []), element)
self.render_statuses(request, element, data.get('statuses', {}))
def render_ips(self, request, ips, parent):
for ip in ips:
request.add_subtag(parent, 'host:addr', {'ip': 'v6' if ':' in ip else 'v4'}, ip)
| 33.273973
| 92
| 0.559078
| 2,368
| 0.974887
| 0
| 0
| 0
| 0
| 0
| 0
| 411
| 0.169205
|
b89ead298075031fa1c3f90802815475e6fa1de6
| 594
|
py
|
Python
|
setup.py
|
ANich/patois-stopwords
|
37e63a0d9df60c7273dd7664a024e02cfcfb04c7
|
[
"MIT"
] | null | null | null |
setup.py
|
ANich/patois-stopwords
|
37e63a0d9df60c7273dd7664a024e02cfcfb04c7
|
[
"MIT"
] | null | null | null |
setup.py
|
ANich/patois-stopwords
|
37e63a0d9df60c7273dd7664a024e02cfcfb04c7
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
setup(
name='patois-stop-words',
version='0.0.1',
description='A list of patois stop words.',
long_description=open('README.md').read(),
license='MIT',
author='Alexander Nicholson',
author_email='alexj.nich@hotmail.com',
url='https://github.com/ANich/patois-stop-words',
packages=find_packages(),
package_data={
'patois_stop_words': ['words.txt']
},
classifiers=[
'Intended Audience :: Developers',
'Programming Language :: Python :: 3',
],
keywords='patois'
)
| 27
| 53
| 0.62963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 269
| 0.452862
|
b8a056a06ca2c0eb3d9388de7c316004827053f2
| 6,924
|
py
|
Python
|
ls/joyous/tests/test_manythings.py
|
tjwalch/ls.joyous
|
0ee50d3af71c066bddb2310948b02f74b52ee253
|
[
"BSD-3-Clause"
] | 72
|
2018-03-16T16:35:08.000Z
|
2022-03-23T08:09:33.000Z
|
polrev/ls/joyous/tests/test_manythings.py
|
polrev-github/polrev-django
|
99108ace1a5307b14c3eccb424a9f9616e8c02ae
|
[
"MIT"
] | 41
|
2018-03-25T20:36:52.000Z
|
2022-03-10T08:59:27.000Z
|
polrev/ls/joyous/tests/test_manythings.py
|
polrev-github/polrev-django
|
99108ace1a5307b14c3eccb424a9f9616e8c02ae
|
[
"MIT"
] | 28
|
2018-08-13T22:36:09.000Z
|
2022-03-17T12:24:15.000Z
|
# ------------------------------------------------------------------------------
# Test Many Things Utilities
# ------------------------------------------------------------------------------
import sys
import datetime as dt
import pytz
from django.test import TestCase
from django.utils import translation
from ls.joyous.utils.manythings import (toOrdinal, toTheOrdinal,
toDaysOffsetStr, hrJoin)
# ------------------------------------------------------------------------------
class Test(TestCase):
def testToOrdinal(self):
self.assertEqual(toOrdinal(-1), "last")
self.assertEqual(toOrdinal(-2), "penultimate")
self.assertEqual(toOrdinal(1), "first")
self.assertEqual(toOrdinal(2), "second")
self.assertEqual(toOrdinal(3), "third")
self.assertEqual(toOrdinal(4), "fourth")
self.assertEqual(toOrdinal(5), "fifth")
def testToOrdinalNum(self):
self.assertEqual(toOrdinal(6), "6th")
self.assertEqual(toOrdinal(11), "11th")
self.assertEqual(toOrdinal(12), "12th")
self.assertEqual(toOrdinal(13), "13th")
self.assertEqual(toOrdinal(21), "21st")
self.assertEqual(toOrdinal(102), "102nd")
self.assertEqual(toOrdinal(6543), "6543rd")
def testToTheOrdinal(self):
self.assertEqual(toTheOrdinal(-1), "The last")
self.assertEqual(toTheOrdinal(-2, False), "the penultimate")
self.assertEqual(toTheOrdinal(1), "The first")
self.assertEqual(toTheOrdinal(2), "The second")
self.assertEqual(toTheOrdinal(3), "The third")
self.assertEqual(toTheOrdinal(4), "The fourth")
self.assertEqual(toTheOrdinal(5), "The fifth")
def testToTheOrdinalNum(self):
self.assertEqual(toTheOrdinal(6), "The 6th")
self.assertEqual(toTheOrdinal(11), "The 11th")
self.assertEqual(toTheOrdinal(12), "The 12th")
self.assertEqual(toTheOrdinal(13), "The 13th")
self.assertEqual(toTheOrdinal(21), "The 21st")
self.assertEqual(toTheOrdinal(102), "The 102nd")
self.assertEqual(toTheOrdinal(6543), "The 6543rd")
def testToDaysOffsetStr(self):
self.assertEqual(toDaysOffsetStr(-3), "Three days before")
self.assertEqual(toDaysOffsetStr(-2), "Two days before")
self.assertEqual(toDaysOffsetStr(-1), "The day before")
self.assertEqual(toDaysOffsetStr(0), "")
self.assertEqual(toDaysOffsetStr(1), "The day after")
self.assertEqual(toDaysOffsetStr(2), "Two days after")
self.assertEqual(toDaysOffsetStr(3), "Three days after")
self.assertEqual(toDaysOffsetStr(25), "Twenty-five days after")
def testHumanReadableJoin(self):
self.assertEqual(hrJoin([""]), "")
self.assertEqual(hrJoin(["ice"]), "ice")
self.assertEqual(hrJoin(["ice", "fire"]), "ice and fire")
self.assertEqual(hrJoin(["wind", "ice", "fire"]),
"wind, ice and fire")
self.assertEqual(hrJoin(["dog", "cat", "hen", "yak", "ant"]),
"dog, cat, hen, yak and ant")
# ------------------------------------------------------------------------------
class TestFrançais(TestCase):
def setUp(self):
translation.activate('fr')
def tearDown(self):
translation.deactivate()
def testToOrdinal(self):
self.assertEqual(toOrdinal(-1), "dernier")
self.assertEqual(toOrdinal(-2), "avant-dernier")
self.assertEqual(toOrdinal (1), "premier")
self.assertEqual(toOrdinal (2), "deuxième")
self.assertEqual(toOrdinal (3), "troisième")
self.assertEqual(toOrdinal (4), "quatrième")
self.assertEqual(toOrdinal (5), "cinquième")
def testToOrdinalNum(self):
self.assertEqual(toOrdinal(6), "6me")
self.assertEqual(toOrdinal(11), "11me")
self.assertEqual(toOrdinal(12), "12me")
self.assertEqual(toOrdinal(13), "13me")
self.assertEqual(toOrdinal(21), "21me")
self.assertEqual(toOrdinal(102), "102me")
self.assertEqual(toOrdinal(6543), "6543me")
def testToTheOrdinal(self):
self.assertEqual(toTheOrdinal(-1), "Le dernier")
self.assertEqual(toTheOrdinal(-2, True), "L'avant-dernier")
self.assertEqual(toTheOrdinal(-2, False), "l'avant-dernier")
self.assertEqual(toTheOrdinal(1), "La premier")
self.assertEqual(toTheOrdinal(2, False), "la deuxième")
self.assertEqual(toTheOrdinal(3), "Le troisième")
self.assertEqual(toTheOrdinal(4), "Le quatrième")
self.assertEqual(toTheOrdinal(5), "Le cinquième")
def testToTheOrdinalNum(self):
self.assertEqual(toTheOrdinal(6), "La 6me")
self.assertEqual(toTheOrdinal(11), "La 11me")
self.assertEqual(toTheOrdinal(12), "La 12me")
self.assertEqual(toTheOrdinal(13), "La 13me")
self.assertEqual(toTheOrdinal(21), "La 21me")
self.assertEqual(toTheOrdinal(102), "La 102me")
self.assertEqual(toTheOrdinal(6543), "La 6543me")
def testToDaysOffsetStr(self):
self.assertEqual(toDaysOffsetStr(-3), "Trois jours avant")
self.assertEqual(toDaysOffsetStr(-2), "Deux jours avant")
self.assertEqual(toDaysOffsetStr(-1), "Le jour précédent")
self.assertEqual(toDaysOffsetStr(0), "")
self.assertEqual(toDaysOffsetStr(1), "Le jour après")
self.assertEqual(toDaysOffsetStr(2), "Deux jours après")
self.assertEqual(toDaysOffsetStr(3), "Trois jours après")
self.assertEqual(toDaysOffsetStr(25), "Vingt-cinq jours après")
def testHumanReadableJoin(self):
self.assertEqual(hrJoin([""]), "")
self.assertEqual(hrJoin (["glace"]), "glace")
self.assertEqual(hrJoin (["glace", "feu"]), "glace et feu")
self.assertEqual(hrJoin (["vent", "glace", "feu"]),
"vent, glace et feu")
self.assertEqual(hrJoin (["chien", "chat", "poule", "yak", "fourmi"]),
"chien, chat, poule, yak et fourmi")
# ------------------------------------------------------------------------------
class TestΕλληνικά(TestCase):
def setUp(self):
translation.activate('el')
def tearDown(self):
translation.deactivate()
def testToOrdinal(self):
self.assertEqual(toOrdinal(-1), "τελευταίος")
self.assertEqual(toOrdinal(-2), "προτελευταία")
self.assertEqual(toOrdinal (1), "τελευταίο")
self.assertEqual(toOrdinal (2), "προτελευταία")
self.assertEqual(toOrdinal (3), "πρώτη")
self.assertEqual(toOrdinal (4), "δεύτερη")
self.assertEqual(toOrdinal (5), "τρίτη")
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
| 45.254902
| 80
| 0.579434
| 6,078
| 0.867418
| 0
| 0
| 0
| 0
| 0
| 0
| 1,918
| 0.273726
|
b8a247344049a96e9c957980e4d29c8b38b429af
| 21
|
py
|
Python
|
sgqlc/__init__.py
|
pberthonneau/sgqlc
|
6fb29d381239ba9256589cf177d236eb79b3f8cc
|
[
"ISC"
] | null | null | null |
sgqlc/__init__.py
|
pberthonneau/sgqlc
|
6fb29d381239ba9256589cf177d236eb79b3f8cc
|
[
"ISC"
] | null | null | null |
sgqlc/__init__.py
|
pberthonneau/sgqlc
|
6fb29d381239ba9256589cf177d236eb79b3f8cc
|
[
"ISC"
] | null | null | null |
__version__ = '10.0'
| 10.5
| 20
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6
| 0.285714
|
b8a2fb720f101a2bc0adde648afe4fb533a5d387
| 5,919
|
py
|
Python
|
brew/migrations/0007_auto_20180307_1842.py
|
williamlagos/brauerei
|
9ba1e22a45ea4f9cb4a58ee02a3149526318e523
|
[
"MIT"
] | null | null | null |
brew/migrations/0007_auto_20180307_1842.py
|
williamlagos/brauerei
|
9ba1e22a45ea4f9cb4a58ee02a3149526318e523
|
[
"MIT"
] | null | null | null |
brew/migrations/0007_auto_20180307_1842.py
|
williamlagos/brauerei
|
9ba1e22a45ea4f9cb4a58ee02a3149526318e523
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-03-07 18:42
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('brew', '0006_auto_20180109_0535'),
]
operations = [
migrations.AlterModelOptions(
name='message',
options={'verbose_name': 'Mensagem', 'verbose_name_plural': 'Mensagens'},
),
migrations.AlterModelOptions(
name='product',
options={'verbose_name': 'Produto', 'verbose_name_plural': 'Produtos'},
),
migrations.AlterModelOptions(
name='profile',
options={'verbose_name': 'Perfil', 'verbose_name_plural': 'Perfis'},
),
migrations.AlterModelOptions(
name='request',
options={'verbose_name': 'Pedido', 'verbose_name_plural': 'Pedidos'},
),
migrations.AlterModelOptions(
name='stock',
options={'verbose_name': 'Estoque', 'verbose_name_plural': 'Estoques'},
),
migrations.AlterField(
model_name='message',
name='email',
field=models.EmailField(max_length=254, verbose_name=b'E-mail'),
),
migrations.AlterField(
model_name='message',
name='name',
field=models.CharField(max_length=128, verbose_name=b'Nome'),
),
migrations.AlterField(
model_name='message',
name='text',
field=models.TextField(verbose_name=b'Texto'),
),
migrations.AlterField(
model_name='product',
name='description',
field=models.TextField(verbose_name=b'Descritivo'),
),
migrations.AlterField(
model_name='product',
name='name',
field=models.CharField(max_length=128, verbose_name=b'Nome'),
),
migrations.AlterField(
model_name='product',
name='photo',
field=models.ImageField(upload_to=b'', verbose_name=b'Foto'),
),
migrations.AlterField(
model_name='product',
name='sku',
field=models.CharField(max_length=64, verbose_name=b'SKU'),
),
migrations.AlterField(
model_name='profile',
name='address',
field=models.TextField(default=b'', verbose_name=b'Logradouro'),
),
migrations.AlterField(
model_name='profile',
name='description',
field=models.TextField(default=b'', verbose_name=b'Descritivo'),
),
migrations.AlterField(
model_name='profile',
name='lat',
field=models.FloatField(default=0.0, verbose_name=b'Latitude'),
),
migrations.AlterField(
model_name='profile',
name='lon',
field=models.FloatField(default=0.0, verbose_name=b'Longitude'),
),
migrations.AlterField(
model_name='profile',
name='name',
field=models.CharField(default=b'', max_length=128, verbose_name=b'Nome'),
),
migrations.AlterField(
model_name='profile',
name='phone',
field=models.CharField(max_length=64, verbose_name=b'Telefone'),
),
migrations.AlterField(
model_name='profile',
name='photo',
field=models.ImageField(upload_to=b'', verbose_name=b'Foto'),
),
migrations.AlterField(
model_name='profile',
name='rank',
field=models.IntegerField(verbose_name=b'Grau'),
),
migrations.AlterField(
model_name='profile',
name='side',
field=models.IntegerField(verbose_name=b'Tipo'),
),
migrations.AlterField(
model_name='profile',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name=b'Cliente'),
),
migrations.AlterField(
model_name='request',
name='client',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='client', to='brew.Profile', verbose_name=b'Cliente'),
),
migrations.AlterField(
model_name='request',
name='estimated',
field=models.DateTimeField(verbose_name=b'Prazo'),
),
migrations.AlterField(
model_name='request',
name='products',
field=models.ManyToManyField(related_name='request_products', to='brew.Product', verbose_name=b'Produtos'),
),
migrations.AlterField(
model_name='request',
name='provider',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='provider', to='brew.Profile', verbose_name=b'Fornecedor'),
),
migrations.AlterField(
model_name='stock',
name='key',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='brew.Product', verbose_name=b'Produto'),
),
migrations.AlterField(
model_name='stock',
name='provider',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name=b'Fornecedor'),
),
migrations.AlterField(
model_name='stock',
name='quantity',
field=models.IntegerField(verbose_name=b'Quantidade'),
),
migrations.AlterField(
model_name='stock',
name='value',
field=models.FloatField(verbose_name=b'Valor'),
),
]
| 36.312883
| 164
| 0.573746
| 5,695
| 0.962156
| 0
| 0
| 0
| 0
| 0
| 0
| 1,168
| 0.197331
|
b8a36c9bf09c46b7b162a8edc1a64e4df507a08f
| 1,050
|
py
|
Python
|
src/box/importer.py
|
p-ranav/box
|
9060343cd4960894da220c4f244535623a54ff98
|
[
"MIT"
] | 91
|
2021-07-02T06:00:57.000Z
|
2022-03-04T02:51:05.000Z
|
src/box/importer.py
|
p-ranav/box
|
9060343cd4960894da220c4f244535623a54ff98
|
[
"MIT"
] | 1
|
2021-07-07T03:42:32.000Z
|
2021-07-07T13:45:00.000Z
|
src/box/importer.py
|
p-ranav/box
|
9060343cd4960894da220c4f244535623a54ff98
|
[
"MIT"
] | 2
|
2021-07-02T06:01:02.000Z
|
2021-11-16T21:10:38.000Z
|
from box.parser import Parser
from box.generator import Generator
import os
class Importer:
def __init__(self, path):
# Path to directory containing function graphs to import
self.path = os.path.abspath(path)
# { "FunctionName": <Generator>, ... }
self.function_declarations = {}
# List of (Parser, Generator) objects,
# one for each function graph .box file
self.parser_generators = self._parse_box_files()
print(self.function_declarations)
def _parse_box_files(self):
# Result is a list of tuples [(parser, generator), ...]
result = []
for file in os.listdir(self.path):
if file.endswith(".box"):
path = os.path.join(self.path, file)
parser = Parser(path)
generator = Generator(parser)
code = generator.to_python([])
result.append((parser, generator))
self.function_declarations[generator.function_name] = generator
return result
| 32.8125
| 79
| 0.60381
| 971
| 0.924762
| 0
| 0
| 0
| 0
| 0
| 0
| 232
| 0.220952
|
b8a3792b87c74a7c4d324caa87c2a3a3046ea018
| 319
|
py
|
Python
|
gargantua/utils/elasticsearch.py
|
Laisky/laisky-blog
|
ebe7dadf8fce283ebab0539926ad1be1246e5156
|
[
"Apache-2.0"
] | 18
|
2015-05-08T02:06:39.000Z
|
2022-03-05T21:36:48.000Z
|
gargantua/utils/elasticsearch.py
|
Laisky/laisky-blog
|
ebe7dadf8fce283ebab0539926ad1be1246e5156
|
[
"Apache-2.0"
] | 131
|
2015-01-22T14:54:59.000Z
|
2022-02-16T15:14:10.000Z
|
gargantua/utils/elasticsearch.py
|
Laisky/laisky-blog
|
ebe7dadf8fce283ebab0539926ad1be1246e5156
|
[
"Apache-2.0"
] | 3
|
2016-01-11T13:52:41.000Z
|
2019-06-12T08:54:15.000Z
|
import json
def parse_search_resp(resp):
return [i['_source'] for i in json.loads(resp)['hits']['hits']]
def generate_keyword_search(keyword, field='post_content'):
query = {
"query": {
"match": {
field: keyword
}
}
}
return json.dumps(query)
| 18.764706
| 67
| 0.539185
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 49
| 0.153605
|
b8a39d8abd6a6f5c91947df5a4f7859aa7716d4d
| 957
|
py
|
Python
|
HackerRank/Python_Learn/03_Strings/13_The_Minion_Game.py
|
Zubieta/CPP
|
fb4a3cbf2e4edcc590df15663cd28fb9ecab679c
|
[
"MIT"
] | 8
|
2017-03-02T07:56:45.000Z
|
2021-08-07T20:20:19.000Z
|
HackerRank/Python_Learn/03_Strings/13_The_Minion_Game.py
|
zubie7a/Algorithms
|
fb4a3cbf2e4edcc590df15663cd28fb9ecab679c
|
[
"MIT"
] | null | null | null |
HackerRank/Python_Learn/03_Strings/13_The_Minion_Game.py
|
zubie7a/Algorithms
|
fb4a3cbf2e4edcc590df15663cd28fb9ecab679c
|
[
"MIT"
] | 1
|
2021-08-07T20:20:20.000Z
|
2021-08-07T20:20:20.000Z
|
# https://www.hackerrank.com/challenges/the-minion-game
from collections import Counter
def minion_game(string):
# your code goes here
string = string.lower()
consonants = set("bcdfghjklmnpqrstvwxyz")
vowels = set("aeiou")
# Stuart will get 1 point for every non-distinct substring that starts
# with a consonant, Kevin for every that starts with a vowel.
score_S, score_K = 0, 0
length = len(string)
for i in range(length):
# No need to compute the substrings, once we know the starting char,
# we can simply calculate the number of substrings that can be formed
# from here on to the end of the string.
if string[i] in consonants:
score_S += length - i
if string[i] in vowels:
score_K += length - i
if score_S > score_K:
print "Stuart %d" % score_S
elif score_S < score_K:
print "Kevin %d" % score_K
else:
print "Draw"
| 36.807692
| 77
| 0.640543
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 441
| 0.460815
|
b8a3fc4016e36479ead942be25f4a3a83458ff3e
| 1,785
|
py
|
Python
|
Data/girisbolum.py
|
kemalsanli/wordKontrol
|
e0de525f0434cb87ef641fd24e23a559a73bd389
|
[
"MIT"
] | 1
|
2022-01-25T14:47:32.000Z
|
2022-01-25T14:47:32.000Z
|
Data/girisbolum.py
|
FehmiDeniz/wordKontrol
|
e0de525f0434cb87ef641fd24e23a559a73bd389
|
[
"MIT"
] | null | null | null |
Data/girisbolum.py
|
FehmiDeniz/wordKontrol
|
e0de525f0434cb87ef641fd24e23a559a73bd389
|
[
"MIT"
] | 3
|
2020-12-19T01:39:09.000Z
|
2021-01-21T19:20:28.000Z
|
import os
from docx import Document
from docx.shared import Inches
from docx import Document
from docx.text.paragraph import Paragraph
def Iceriyomu(dosyayol):
document = Document('{}'.format(dosyayol))
headings = []
texts = []
para = []
giris = ""
for paragraph in document.paragraphs:
if paragraph.style.name.startswith("Heading"):
if headings:
texts.append(para)
headings.append(paragraph.text)
para = []
elif paragraph.style.name == "Normal" and not paragraph.text.find(' ',0,1) != -1 and paragraph.text !='':
para.append(paragraph.text)
if para or len(headings)>len(texts):
texts.append(texts.append(para))
for h, t in zip(headings, texts):
if h== "GİRİŞ" or h== "Giriş":
giris = t[-1]
#print(giris)
if (giris.find('apsam') != -1 or giris.find('rganizasyon') != -1):
sonuc="Giris bölümünün son bölümünde tezin organizasyonu ve kapsamına yer verilmis "
RaporaEkle(sonuc)
else:
sonuc="Giris bölümünün son bölümünde tezin organizasyonu ve kapsamına yer verilmemis"
RaporaEkle(sonuc)
def RaporaEkle(sonuc):
f = open('WordRapor.docx', 'rb')
document = Document(f)
document.add_paragraph(
sonuc, style='List Number'
)
document.add_heading('16541504-Fatih Uludag', level=1)
document.add_heading('175541058-Doğukan Kurnaz', level=1)
document.add_heading('14545520-Kemal Sanlı', level=1)
document.add_heading('175541059-Batuhan Harmanşah', level=1)
document.save('WordRapor.docx')
f.close()
print("Asama uc tamamlandi...")
print("Word Raporu Olusturuldu...")
| 30.775862
| 114
| 0.610644
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 457
| 0.252765
|
b8a55b880ef0e7fe5cf28dcff59d6249431111b4
| 7,008
|
py
|
Python
|
src/damn_at/analyzers/mesh/analyzer_assimp.py
|
sagar-kohli/peragro-at
|
057687d680d4b098c7642db7d406fc0d8df13019
|
[
"BSD-3-Clause"
] | 5
|
2016-03-01T01:56:00.000Z
|
2021-05-04T03:53:31.000Z
|
src/damn_at/analyzers/mesh/analyzer_assimp.py
|
sagar-kohli/peragro-at
|
057687d680d4b098c7642db7d406fc0d8df13019
|
[
"BSD-3-Clause"
] | 25
|
2016-03-05T07:13:45.000Z
|
2017-07-21T16:32:06.000Z
|
src/damn_at/analyzers/mesh/analyzer_assimp.py
|
sueastside/damn-at
|
057687d680d4b098c7642db7d406fc0d8df13019
|
[
"BSD-3-Clause"
] | 12
|
2016-03-05T18:51:09.000Z
|
2017-12-09T05:52:37.000Z
|
"""Assimp-based analyzer."""
from __future__ import absolute_import
import os
import logging
import subprocess
import pyassimp
from damn_at import (
mimetypes,
MetaDataType,
MetaDataValue,
FileId,
FileDescription,
AssetDescription,
AssetId
)
from damn_at.pluginmanager import IAnalyzer
from six.moves import map
from io import open
LOG = logging.getLogger(__name__)
def get_assimp_types():
"""Extract all possible formats and store their mime types"""
# TODO: not exactly reliable, a lot of unknown mimetypes
# for those extensions :/
try:
pro = subprocess.Popen(
['assimp', 'listext'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
out, err = pro.communicate()
if pro.returncode != 0:
LOG.debug("'assimp listext' failed with error code %d! "
% pro.returncode,
out,
err
)
return []
except OSError as oserror:
LOG.debug("'assimp listext' failed! %s", oserror)
return []
extensions = out.split(';')
mimes = []
for ext in extensions:
mime = mimetypes.guess_type('file.' + ext, False)[0]
LOG.info('Mimetype Info:\n\tExtension: %s\n\tMime: %s', ext, mime)
mimes.append(mime)
return mimes
class AssimpAnalyzer(IAnalyzer):
"""Assimp-based analyzer."""
handled_types = ['application/wavefront-obj',
'application/fbx']
def __init__(self):
IAnalyzer.__init__(self)
def activate(self):
pass
def analyze(self, an_uri):
fileid = FileId(filename=os.path.abspath(an_uri))
file_descr = FileDescription(file=fileid)
file_descr.assets = []
assimp_mimetype = 'application/assimp'
scene = None
try:
scene = pyassimp.load(an_uri)
textures = {}
materials = {}
from damn_at.analyzers.mesh.metadata import (
MetaDataAssimpTexture,
MetaDataAssimpMesh
)
for i, texture in enumerate(scene.textures):
name = texture.name if texture.name else 'texture-'+str(i)
asset_descr = AssetDescription(asset=AssetId(
subname=name,
mimetype=assimp_mimetype + ".texture",
file=fileid
))
asset_descr.metadata = MetaDataAssimpTexture.extract(texture)
file_descr.assets.append(asset_descr)
textures[i] = asset_descr
for i, material in enumerate(scene.materials):
properties = {}
for key, value in material.properties.items():
properties[key] = value
name = properties.get('name', 'material-'+str(i))
asset_descr = AssetDescription(asset=AssetId(
subname=name,
mimetype=assimp_mimetype + ".material",
file=fileid
))
asset_descr.metadata = {}
for key, value in properties.items():
if key == 'name' or key == 'file':
continue
asset_descr.metadata[key] = MetaDataValue(
type=MetaDataType.STRING,
string_value=str(value)
)
file_descr.assets.append(asset_descr)
materials[i] = asset_descr
for i, mesh in enumerate(scene.meshes):
name = mesh.name if mesh.name else 'mesh-' + str(i)
asset_descr = AssetDescription(asset=AssetId(
subname=name,
mimetype=assimp_mimetype + ".mesh",
file=fileid
))
asset_descr.metadata = MetaDataAssimpMesh.extract(mesh)
asset_descr.dependencies = []
# Dependencies
if mesh.materialindex is not None:
if mesh.materialindex in materials:
asset_descr.dependencies.append(
materials[mesh.materialindex].asset
)
file_descr.assets.append(asset_descr)
finally:
pyassimp.release(scene)
'''
obj = Loader(an_uri)
from damn_at.analyzers.mesh.metadata import (
MetaDataWaveFrontDefault,
MetaDataWaveFrontGroup
)
d_asset_descr = AssetDescription(asset=AssetId(
subname='default',
mimetype="application/wavefront-obj",
file=fileid
))
d_asset_descr.metadata = MetaDataWaveFrontDefault.extract(obj)
file_descr.assets.append(d_asset_descr)
for name, group in obj.groups.items():
if name != 'default':
asset_descr = AssetDescription(asset=AssetId(
subname=name,
mimetype="application/wavefront-obj.group",
file=fileid
))
asset_descr.metadata = MetaDataWaveFrontGroup.extract(group)
asset_descr.dependencies = [d_asset_descr.asset]
file_descr.assets.append(asset_descr)'''
return file_descr
class Loader(object):
def __init__(self, path):
vertices = []
normals = []
texcoords = []
default = {'faces': []}
current = default
self.groups = {'default': default}
for line in open(path, "r"):
if line.startswith('#'):
continue
values = line.split()
if not values:
continue
if values[0] == 'g':
current = {'faces': []}
group_name = values[1]
LOG.info("Group:\n%s\n%s", group_name, values)
self.groups[group_name] = current
elif values[0] == 'v':
vertices.append(tuple(map(float, values[1:4])))
elif values[0] == 'vn':
normals.append(tuple(map(float, values[1:4])))
elif values[0] == 'vt':
texcoords.append(tuple(map(float, values[1:3])))
elif values[0] == 's':
current['smooth'] = bool(values[2:3])
elif values[0] == 'f':
faces = current['faces']
face = []
for v in values[1:]:
w = [int(x) if x else None for x in v.split('/')]
w = [x-1 if x is not None and x > 0 else x for x in w]
face.append(tuple(w))
faces.append(tuple(face))
else:
LOG.info('Loader value not known: %s - %s' % (values[0], line))
# save result
self.vertices = vertices
self.normals = normals
self.texcoords = texcoords
| 33.5311
| 79
| 0.519549
| 5,626
| 0.802797
| 0
| 0
| 0
| 0
| 0
| 0
| 1,580
| 0.225457
|
b8a5a0dd0bcce0b6f79040d683a76c1d74e9013f
| 2,423
|
py
|
Python
|
src/stk/ea/selection/selectors/remove_batches.py
|
stevenbennett96/stk
|
6e5af87625b83e0bfc7243bc42d8c7a860cbeb76
|
[
"MIT"
] | 21
|
2018-04-12T16:25:24.000Z
|
2022-02-14T23:05:43.000Z
|
src/stk/ea/selection/selectors/remove_batches.py
|
stevenbennett96/stk
|
6e5af87625b83e0bfc7243bc42d8c7a860cbeb76
|
[
"MIT"
] | 8
|
2019-03-19T12:36:36.000Z
|
2020-11-11T12:46:00.000Z
|
src/stk/ea/selection/selectors/remove_batches.py
|
stevenbennett96/stk
|
6e5af87625b83e0bfc7243bc42d8c7a860cbeb76
|
[
"MIT"
] | 5
|
2018-08-07T13:00:16.000Z
|
2021-11-01T00:55:10.000Z
|
"""
Remove Batches
==============
"""
from .selector import Selector
class RemoveBatches(Selector):
"""
Prevents a :class:`.Selector` from selecting some batches.
Examples
--------
*Removing Batches From Selection*
You want to use :class:`.Roulette` selection on all but the
5 :class:`.Worst` batches
.. testcode:: removing-batches-from-selection
import stk
selector = stk.RemoveBatches(
remover=stk.Worst(5),
selector=stk.Roulette(20),
)
population = tuple(
stk.MoleculeRecord(
topology_graph=stk.polymer.Linear(
building_blocks=(
stk.BuildingBlock(
smiles='BrCCBr',
functional_groups=[stk.BromoFactory()],
),
),
repeating_unit='A',
num_repeating_units=2,
),
).with_fitness_value(i)
for i in range(100)
)
for batch in selector.select(population):
# Do stuff with batch. It was selected with roulette
# selection and is not one of the worst 5 batches.
pass
"""
def __init__(self, remover, selector):
"""
Initialize a :class:`.RemoveBatches` instance.
Parameters
----------
remover : :class:`.Selector`
Selects batches of molecules, which cannot be yielded by
`selector`.
selector : :class:`.Selector`
Selects batches of molecules, except those selected by
`remover`.
"""
self._remover = remover
self._selector = selector
def select(
self,
population,
included_batches=None,
excluded_batches=None,
):
removed_batches = {
batch.get_identity_key()
for batch in self._remover.select(
population=population,
included_batches=included_batches,
excluded_batches=excluded_batches,
)
}
if excluded_batches is not None:
removed_batches |= excluded_batches
yield from self._selector.select(
population=population,
included_batches=included_batches,
excluded_batches=removed_batches,
)
| 26.336957
| 68
| 0.529922
| 2,349
| 0.969459
| 661
| 0.272802
| 0
| 0
| 0
| 0
| 1,565
| 0.645894
|
b8a8d25a2989246934825ecb3bded3322cd894bb
| 446
|
py
|
Python
|
students/migrations/0010_institutionalemail_title_email.py
|
estudeplus/perfil
|
58b847aa226b885ca6a7a128035f09de2322519f
|
[
"MIT"
] | null | null | null |
students/migrations/0010_institutionalemail_title_email.py
|
estudeplus/perfil
|
58b847aa226b885ca6a7a128035f09de2322519f
|
[
"MIT"
] | 21
|
2019-05-11T18:01:10.000Z
|
2022-02-10T11:22:01.000Z
|
students/migrations/0010_institutionalemail_title_email.py
|
estudeplus/perfil
|
58b847aa226b885ca6a7a128035f09de2322519f
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.1 on 2019-06-30 00:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('students', '0009_auto_20190629_0125'),
]
operations = [
migrations.AddField(
model_name='institutionalemail',
name='title_email',
field=models.CharField(default='Assunto do email', editable=False, max_length=20),
),
]
| 23.473684
| 94
| 0.630045
| 353
| 0.79148
| 0
| 0
| 0
| 0
| 0
| 0
| 133
| 0.298206
|
b8a9652011ddd210555829c017c928bd04cf38bf
| 920
|
py
|
Python
|
azure_utility_tool/config.py
|
alextricity25/azure_utility_tool
|
2975b5f415e5c64335618e83ed0216b7923c4166
|
[
"MIT"
] | 5
|
2020-01-02T03:12:14.000Z
|
2020-08-19T02:31:19.000Z
|
azure_utility_tool/config.py
|
alextricity25/azure_utility_tool
|
2975b5f415e5c64335618e83ed0216b7923c4166
|
[
"MIT"
] | null | null | null |
azure_utility_tool/config.py
|
alextricity25/azure_utility_tool
|
2975b5f415e5c64335618e83ed0216b7923c4166
|
[
"MIT"
] | 2
|
2020-03-16T00:19:06.000Z
|
2020-08-20T19:31:10.000Z
|
"""
Author: Miguel Alex Cantu
Email: miguel.can2@gmail.com
Date: 12/21/2019
Description:
Loads Azure Utility Tool configuration file. The configuration
file is a blend of what the Microsoft Authentication Library
requires and some extra directives that the Auzre Utility
Tool requires. It is a JSON file that is required to be
stored in ~/.aut/aut_config.json
"""
import json
import sys
import os
from azure_utility_tool.exceptions import ConfigFileNotFound
def get_config(config_file="~/.aut/aut_config.json"):
CONFIG_PATH = os.path.expanduser(config_file)
# Ensure the directory exists, if not, then throw an Exception.
if not os.path.exists(CONFIG_PATH):
raise ConfigFileNotFound("The configuration file for the Azure"
" Utility Tool was not found in " +
config_file)
return json.load(open(CONFIG_PATH))
| 34.074074
| 71
| 0.702174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 541
| 0.588043
|
b8a965e925e8c33d2b6141373da012de99c134f6
| 1,197
|
py
|
Python
|
ics/structures/secu_avb_settings.py
|
intrepidcs/python_ics
|
7bfa8c2f893763608f9255f9536a2019cfae0c23
|
[
"Unlicense"
] | 45
|
2017-10-17T08:42:08.000Z
|
2022-02-21T16:26:48.000Z
|
ics/structures/secu_avb_settings.py
|
intrepidcs/python_ics
|
7bfa8c2f893763608f9255f9536a2019cfae0c23
|
[
"Unlicense"
] | 106
|
2017-03-07T21:10:39.000Z
|
2022-03-29T15:32:46.000Z
|
ics/structures/secu_avb_settings.py
|
intrepidcs/python_ics
|
7bfa8c2f893763608f9255f9536a2019cfae0c23
|
[
"Unlicense"
] | 17
|
2017-04-04T12:30:22.000Z
|
2022-01-28T05:30:25.000Z
|
# This file was auto generated; Do not modify, if you value your sanity!
import ctypes
import enum
from ics.structures.can_settings import *
from ics.structures.canfd_settings import *
from ics.structures.s_text_api_settings import *
class flags(ctypes.Structure):
_pack_ = 2
_fields_ = [
('disableUsbCheckOnBoot', ctypes.c_uint32, 1),
('enableLatencyTest', ctypes.c_uint32, 1),
('reserved', ctypes.c_uint32, 30),
]
class secu_avb_settings(ctypes.Structure):
_pack_ = 2
_fields_ = [
('perf_en', ctypes.c_uint16),
('can1', CAN_SETTINGS),
('canfd1', CANFD_SETTINGS),
('can2', CAN_SETTINGS),
('canfd2', CANFD_SETTINGS),
('network_enables', ctypes.c_uint64),
('termination_enables', ctypes.c_uint64),
('pwr_man_timeout', ctypes.c_uint32),
('pwr_man_enable', ctypes.c_uint16),
('network_enabled_on_boot', ctypes.c_uint16),
('iso15765_separation_time_offset', ctypes.c_int16),
('text_api', STextAPISettings),
('flags', flags),
]
_neoECU_AVBSettings = secu_avb_settings
ECU_AVBSettings = secu_avb_settings
SECU_AVBSettings = secu_avb_settings
| 27.837209
| 72
| 0.670844
| 839
| 0.700919
| 0
| 0
| 0
| 0
| 0
| 0
| 307
| 0.256475
|
b8a9e103986b1f6f93cdc5df1a8eef20f43536e8
| 1,531
|
py
|
Python
|
unaccepted/Substring_with_Concatenation_of_All_Words.py
|
sheagk/leetcode_solutions
|
7571bd13f4274f6b4b622b43a414d56fc26d3be0
|
[
"MIT"
] | null | null | null |
unaccepted/Substring_with_Concatenation_of_All_Words.py
|
sheagk/leetcode_solutions
|
7571bd13f4274f6b4b622b43a414d56fc26d3be0
|
[
"MIT"
] | null | null | null |
unaccepted/Substring_with_Concatenation_of_All_Words.py
|
sheagk/leetcode_solutions
|
7571bd13f4274f6b4b622b43a414d56fc26d3be0
|
[
"MIT"
] | 1
|
2020-09-03T14:26:00.000Z
|
2020-09-03T14:26:00.000Z
|
## https://leetcode.com/problems/substring-with-concatenation-of-all-words/submissions/
## this method fails on test case 171 of 173 because it's too slow.
## i'm not sure I see a way to avoid checking every starting position
## in s, and I'm also not sure I see a way to avoid having a loop over
## the words too.
## unfortunately, that means my complexity is O(len(s)*len(words)), which
## is too slow for a case where we have a ton of short words and a very long
## string.
class Solution:
def findSubstring(self, s: str, words: List[str]) -> List[int]:
if not len(words):
return []
if not len(s):
return []
from copy import copy
output = []
single_word_length = len(words[0])
words_to_match = len(words)
word_count = {}
for w in words:
word_count[w] = word_count.get(w, 0) + 1
for ii in range(len(s)):
mywords = copy(word_count)
words_left = copy(words_to_match)
index = copy(ii)
while words_left > 0 and index < len(s):
w = s[index:index+single_word_length]
if mywords.get(w, 0) > 0:
mywords[w] = mywords[w] - 1
words_left = words_left - 1
index = index + single_word_length
else:
break
if words_left == 0:
output.append(ii)
return output
| 31.895833
| 87
| 0.53821
| 1,050
| 0.685826
| 0
| 0
| 0
| 0
| 0
| 0
| 469
| 0.306336
|
b8ab8fab99f75b9332a0131cf9ea65ac9a6bcb59
| 1,848
|
py
|
Python
|
python_app/supervised_learning/train_data/Data.py
|
0xsuu/Project-Mahjong
|
e82edc67651ff93c8ec158b590cd728f28504be9
|
[
"Apache-2.0"
] | 9
|
2018-06-08T00:09:08.000Z
|
2021-11-17T11:05:11.000Z
|
python_app/supervised_learning/train_data/Data.py
|
0xsuu/Project-Mahjong
|
e82edc67651ff93c8ec158b590cd728f28504be9
|
[
"Apache-2.0"
] | 1
|
2020-04-25T12:43:26.000Z
|
2020-04-25T12:43:26.000Z
|
python_app/supervised_learning/train_data/Data.py
|
0xsuu/Project-Mahjong
|
e82edc67651ff93c8ec158b590cd728f28504be9
|
[
"Apache-2.0"
] | 2
|
2019-05-30T07:18:45.000Z
|
2019-11-05T09:15:13.000Z
|
#!/usr/bin/env python3
'''
The MIT License (MIT)
Copyright (c) 2014 Mark Haines
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def asdata(obj, asdata):
if isinstance(obj, Data):
return obj.asdata(asdata)
elif isinstance(obj, str):
return obj
elif hasattr(obj, '_asdict'):
return asdata(obj._asdict(), asdata)
elif isinstance(obj, dict):
return dict((k, asdata(v, asdata)) for (k, v) in obj.items())
else:
try:
return list(asdata(child, asdata) for child in obj)
except:
return obj
class Data:
def asdata(self, asdata = asdata):
return dict((k, asdata(v, asdata)) for (k, v) in self.__dict__.items())
def __repr__(self):
return self.asdata().__repr__()
| 36.96
| 82
| 0.682359
| 199
| 0.107684
| 0
| 0
| 0
| 0
| 0
| 0
| 1,204
| 0.651515
|
b8ada7f96a8a91b1795a09283b5bb56adf3d888d
| 2,373
|
py
|
Python
|
tests/_geom/test_path_control_x_interface.py
|
ynsnf/apysc
|
b10ffaf76ec6beb187477d0a744fca00e3efc3fb
|
[
"MIT"
] | 16
|
2021-04-16T02:01:29.000Z
|
2022-01-01T08:53:49.000Z
|
tests/_geom/test_path_control_x_interface.py
|
ynsnf/apysc
|
b10ffaf76ec6beb187477d0a744fca00e3efc3fb
|
[
"MIT"
] | 613
|
2021-03-24T03:37:38.000Z
|
2022-03-26T10:58:37.000Z
|
tests/_geom/test_path_control_x_interface.py
|
simon-ritchie/apyscript
|
c319f8ab2f1f5f7fad8d2a8b4fc06e7195476279
|
[
"MIT"
] | 2
|
2021-06-20T07:32:58.000Z
|
2021-12-26T08:22:11.000Z
|
from random import randint
from retrying import retry
import apysc as ap
from apysc._geom.path_control_x_interface import PathControlXInterface
class TestPathControlXInterface:
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test_control_x(self) -> None:
interface: PathControlXInterface = PathControlXInterface()
interface._control_x = ap.Int(0)
interface.control_x = ap.Int(10)
assert interface.control_x == 10
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__make_snapshot(self) -> None:
interface: PathControlXInterface = PathControlXInterface()
interface._control_x = ap.Int(10)
snapshot_name: str = interface._get_next_snapshot_name()
interface._run_all_make_snapshot_methods(snapshot_name=snapshot_name)
assert interface._control_x_snapshots[snapshot_name] == 10
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__revert(self) -> None:
interface: PathControlXInterface = PathControlXInterface()
interface._control_x = ap.Int(10)
snapshot_name: str = interface._get_next_snapshot_name()
interface._run_all_revert_methods(snapshot_name=snapshot_name)
assert interface.control_x == 10
interface._run_all_make_snapshot_methods(snapshot_name=snapshot_name)
interface._control_x = ap.Int(20)
interface._run_all_revert_methods(snapshot_name=snapshot_name)
assert interface.control_x == 10
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__initialize_control_x_if_not_initialized(self) -> None:
interface: PathControlXInterface = PathControlXInterface()
interface._initialize_control_x_if_not_initialized()
assert interface.control_x == 0
interface.control_x = ap.Int(10)
interface._initialize_control_x_if_not_initialized()
assert interface.control_x == 10
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__append_control_x_linking_setting(self) -> None:
interface: PathControlXInterface = PathControlXInterface()
interface._initialize_control_x_if_not_initialized()
assert interface._attr_linking_stack['control_x'] == [ap.Int(0)]
| 43.944444
| 78
| 0.728614
| 2,215
| 0.933418
| 0
| 0
| 2,143
| 0.903076
| 0
| 0
| 11
| 0.004635
|
b8add48d3538b0aee1f01094470a9d13e1f3491d
| 1,060
|
py
|
Python
|
test/PySrc/tools/collect_tutorials.py
|
lifubang/live-py-plugin
|
38a3cf447fd7d9c4e6014b71134e178b0d8a01de
|
[
"MIT"
] | 224
|
2015-03-22T23:40:52.000Z
|
2022-03-01T21:45:51.000Z
|
test/PySrc/tools/collect_tutorials.py
|
lifubang/live-py-plugin
|
38a3cf447fd7d9c4e6014b71134e178b0d8a01de
|
[
"MIT"
] | 371
|
2015-04-28T05:14:00.000Z
|
2022-03-28T01:31:22.000Z
|
test/PySrc/tools/collect_tutorials.py
|
lifubang/live-py-plugin
|
38a3cf447fd7d9c4e6014b71134e178b0d8a01de
|
[
"MIT"
] | 53
|
2015-10-30T07:52:07.000Z
|
2022-02-28T12:56:35.000Z
|
import json
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter, FileType
from pathlib import Path
def main():
parser = ArgumentParser(description='Collect markdown files, and write JSON.',
formatter_class=ArgumentDefaultsHelpFormatter)
project_path = Path(__file__).parent.parent.parent.parent
parser.add_argument('--source',
type=Path,
default=project_path / 'html' / 'tutorials')
parser.add_argument('--target',
type=FileType('w'),
default=str(project_path / 'html' / 'src' /
'tutorials.json'))
args = parser.parse_args()
tutorials = {}
# source_file: Path
for source_file in args.source.rglob('*.md'):
name = str(source_file.relative_to(args.source).with_suffix(''))
if name == 'README':
continue
source = source_file.read_text()
tutorials[name] = source
json.dump(tutorials, args.target)
main()
| 34.193548
| 82
| 0.592453
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 143
| 0.134906
|
b8ae0034ffcbb27bca0f7745d8873b03677fa88a
| 1,569
|
py
|
Python
|
autobiography.py
|
wcmckee/wcmckee
|
19315a37b592b7bcebb5f2720c965aea58f928ce
|
[
"MIT"
] | null | null | null |
autobiography.py
|
wcmckee/wcmckee
|
19315a37b592b7bcebb5f2720c965aea58f928ce
|
[
"MIT"
] | null | null | null |
autobiography.py
|
wcmckee/wcmckee
|
19315a37b592b7bcebb5f2720c965aea58f928ce
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# My name is William Clifford Mckee and this is my autobiography. Written in November 2014.
#
# Structure:
#
# <markdowncell>
# Hello and goodNIĢ
# testing one two there.
# <markdowncell>
# Hello. Testing one two three.
# Screw you guys. I'm going to bed.
#
# History. Mum and Dad
#
#
# One of my early drawing memories was of my friend Wayne. Around age 10. His art was better than mine. I wanted to be better.
# I can't remember seriously drawing untill after high school.
#
# I had a friend at highschool whos artwork I admired.
# He got in trouble once for drawing nudes.
# I rembmer being ifn the art room at intermediate. I have better memories of cooking and wood work than art.
# Paint yourself said the reliever.
#
# We had art folder. Kids would cover these black folders in art. I was always embarrassed by the art on mine. I would hide it by carrying the folder so that art was facing the inside.
# Today I walk around with a visual diary and will let anyone look.
# I'm always very critical of my art though.
#
#
# I hated using artist models and copy their painting.
# My painting skills were low - needed to develop drawing and confidence.
# I am tired.
# More bad news tonight
#
# I had some excellent tutors that helped me develop my painting. Most notable was Gary Freemantle and Roger Key.
# Gary pushed my abstraction and color
#
# Key pushed obsovational painting and focusing on lights and darks.
#
# The classes I did at The Learning Connextion were
| 32.020408
| 185
| 0.733588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,522
| 0.969427
|
b8ae248b83fdee036686d9358abb1c53e99adc81
| 26,125
|
py
|
Python
|
tests/configured_tests.py
|
maxcountryman/flask-security
|
ccb41df095177b11e8526958c1001d2f887d9feb
|
[
"MIT"
] | null | null | null |
tests/configured_tests.py
|
maxcountryman/flask-security
|
ccb41df095177b11e8526958c1001d2f887d9feb
|
[
"MIT"
] | null | null | null |
tests/configured_tests.py
|
maxcountryman/flask-security
|
ccb41df095177b11e8526958c1001d2f887d9feb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import with_statement
import base64
import time
import simplejson as json
from flask.ext.security.utils import capture_registrations, \
capture_reset_password_requests, capture_passwordless_login_requests
from flask.ext.security.forms import LoginForm, ConfirmRegisterForm, RegisterForm, \
ForgotPasswordForm, ResetPasswordForm, SendConfirmationForm, \
PasswordlessLoginForm
from flask.ext.security.forms import TextField, SubmitField, valid_user_email
from tests import SecurityTest
class ConfiguredPasswordHashSecurityTests(SecurityTest):
AUTH_CONFIG = {
'SECURITY_PASSWORD_HASH': 'bcrypt',
'SECURITY_PASSWORD_SALT': 'so-salty',
'USER_COUNT': 1
}
def test_authenticate(self):
r = self.authenticate(endpoint="/login")
self.assertIn('Home Page', r.data)
class ConfiguredSecurityTests(SecurityTest):
AUTH_CONFIG = {
'SECURITY_REGISTERABLE': True,
'SECURITY_LOGOUT_URL': '/custom_logout',
'SECURITY_LOGIN_URL': '/custom_login',
'SECURITY_POST_LOGIN_VIEW': '/post_login',
'SECURITY_POST_LOGOUT_VIEW': '/post_logout',
'SECURITY_POST_REGISTER_VIEW': '/post_register',
'SECURITY_UNAUTHORIZED_VIEW': '/unauthorized',
'SECURITY_DEFAULT_HTTP_AUTH_REALM': 'Custom Realm'
}
def test_login_view(self):
r = self._get('/custom_login')
self.assertIn("<h1>Login</h1>", r.data)
def test_authenticate(self):
r = self.authenticate(endpoint="/custom_login")
self.assertIn('Post Login', r.data)
def test_logout(self):
self.authenticate(endpoint="/custom_login")
r = self.logout(endpoint="/custom_logout")
self.assertIn('Post Logout', r.data)
def test_register_view(self):
r = self._get('/register')
self.assertIn('<h1>Register</h1>', r.data)
def test_register(self):
data = dict(email='dude@lp.com',
password='password',
password_confirm='password')
r = self._post('/register', data=data, follow_redirects=True)
self.assertIn('Post Register', r.data)
def test_register_with_next_querystring_argument(self):
data = dict(email='dude@lp.com',
password='password',
password_confirm='password')
r = self._post('/register?next=/page1', data=data, follow_redirects=True)
self.assertIn('Page 1', r.data)
def test_register_json(self):
data = '{ "email": "dude@lp.com", "password": "password", "csrf_token":"%s" }' % self.csrf_token
r = self._post('/register', data=data, content_type='application/json')
data = json.loads(r.data)
self.assertEquals(data['meta']['code'], 200)
def test_register_existing_email(self):
data = dict(email='matt@lp.com',
password='password',
password_confirm='password')
r = self._post('/register', data=data, follow_redirects=True)
msg = 'matt@lp.com is already associated with an account'
self.assertIn(msg, r.data)
def test_unauthorized(self):
self.authenticate("joe@lp.com", endpoint="/custom_auth")
r = self._get("/admin", follow_redirects=True)
msg = 'You are not allowed to access the requested resouce'
self.assertIn(msg, r.data)
def test_default_http_auth_realm(self):
r = self._get('/http', headers={
'Authorization': 'Basic ' + base64.b64encode("joe@lp.com:bogus")
})
self.assertIn('<h1>Unauthorized</h1>', r.data)
self.assertIn('WWW-Authenticate', r.headers)
self.assertEquals('Basic realm="Custom Realm"',
r.headers['WWW-Authenticate'])
class BadConfiguredSecurityTests(SecurityTest):
AUTH_CONFIG = {
'SECURITY_PASSWORD_HASH': 'bcrypt',
'USER_COUNT': 1
}
def test_bad_configuration_raises_runtimer_error(self):
self.assertRaises(RuntimeError, self.authenticate)
class DefaultTemplatePathTests(SecurityTest):
AUTH_CONFIG = {
'SECURITY_LOGIN_USER_TEMPLATE': 'custom_security/login_user.html',
}
def test_login_user_template(self):
r = self._get('/login')
self.assertIn('CUSTOM LOGIN USER', r.data)
class RegisterableTemplatePathTests(SecurityTest):
AUTH_CONFIG = {
'SECURITY_REGISTERABLE': True,
'SECURITY_REGISTER_USER_TEMPLATE': 'custom_security/register_user.html'
}
def test_register_user_template(self):
r = self._get('/register')
self.assertIn('CUSTOM REGISTER USER', r.data)
class RecoverableTemplatePathTests(SecurityTest):
AUTH_CONFIG = {
'SECURITY_RECOVERABLE': True,
'SECURITY_FORGOT_PASSWORD_TEMPLATE': 'custom_security/forgot_password.html',
'SECURITY_RESET_PASSWORD_TEMPLATE': 'custom_security/reset_password.html',
}
def test_forgot_password_template(self):
r = self._get('/reset')
self.assertIn('CUSTOM FORGOT PASSWORD', r.data)
def test_reset_password_template(self):
with capture_reset_password_requests() as requests:
r = self._post('/reset',
data=dict(email='joe@lp.com'),
follow_redirects=True)
t = requests[0]['token']
r = self._get('/reset/' + t)
self.assertIn('CUSTOM RESET PASSWORD', r.data)
class ConfirmableTemplatePathTests(SecurityTest):
AUTH_CONFIG = {
'SECURITY_CONFIRMABLE': True,
'SECURITY_SEND_CONFIRMATION_TEMPLATE': 'custom_security/send_confirmation.html'
}
def test_send_confirmation_template(self):
r = self._get('/confirm')
self.assertIn('CUSTOM SEND CONFIRMATION', r.data)
class PasswordlessTemplatePathTests(SecurityTest):
AUTH_CONFIG = {
'SECURITY_PASSWORDLESS': True,
'SECURITY_SEND_LOGIN_TEMPLATE': 'custom_security/send_login.html'
}
def test_send_login_template(self):
r = self._get('/login')
self.assertIn('CUSTOM SEND LOGIN', r.data)
class RegisterableTests(SecurityTest):
AUTH_CONFIG = {
'SECURITY_REGISTERABLE': True,
'USER_COUNT': 1
}
def test_register_valid_user(self):
data = dict(email='dude@lp.com',
password='password',
password_confirm='password')
self._post('/register', data=data, follow_redirects=True)
r = self.authenticate('dude@lp.com')
self.assertIn('Hello dude@lp.com', r.data)
class ConfirmableTests(SecurityTest):
AUTH_CONFIG = {
'SECURITY_CONFIRMABLE': True,
'SECURITY_REGISTERABLE': True,
'SECURITY_EMAIL_SUBJECT_REGISTER': 'Custom welcome subject',
'USER_COUNT': 1
}
def test_login_before_confirmation(self):
e = 'dude@lp.com'
self.register(e)
r = self.authenticate(email=e)
self.assertIn(self.get_message('CONFIRMATION_REQUIRED'), r.data)
def test_send_confirmation_of_already_confirmed_account(self):
e = 'dude@lp.com'
with capture_registrations() as registrations:
self.register(e)
token = registrations[0]['confirm_token']
self.client.get('/confirm/' + token, follow_redirects=True)
self.logout()
r = self._post('/confirm', data=dict(email=e))
self.assertIn(self.get_message('ALREADY_CONFIRMED'), r.data)
def test_register_sends_confirmation_email(self):
e = 'dude@lp.com'
with self.app.extensions['mail'].record_messages() as outbox:
self.register(e)
self.assertEqual(len(outbox), 1)
self.assertIn(e, outbox[0].html)
self.assertEqual('Custom welcome subject', outbox[0].subject)
def test_confirm_email(self):
e = 'dude@lp.com'
with capture_registrations() as registrations:
self.register(e)
token = registrations[0]['confirm_token']
r = self.client.get('/confirm/' + token, follow_redirects=True)
msg = self.app.config['SECURITY_MSG_EMAIL_CONFIRMED'][0]
self.assertIn(msg, r.data)
def test_invalid_token_when_confirming_email(self):
r = self.client.get('/confirm/bogus', follow_redirects=True)
msg = self.app.config['SECURITY_MSG_INVALID_CONFIRMATION_TOKEN'][0]
self.assertIn(msg, r.data)
def test_send_confirmation_json(self):
r = self._post('/confirm', data='{"email": "matt@lp.com"}',
content_type='application/json')
self.assertEquals(r.status_code, 200)
def test_send_confirmation_with_invalid_email(self):
r = self._post('/confirm', data=dict(email='bogus@bogus.com'))
msg = self.app.config['SECURITY_MSG_USER_DOES_NOT_EXIST'][0]
self.assertIn(msg, r.data)
def test_resend_confirmation(self):
e = 'dude@lp.com'
self.register(e)
r = self._post('/confirm', data={'email': e})
msg = self.get_message('CONFIRMATION_REQUEST', email=e)
self.assertIn(msg, r.data)
def test_user_deleted_before_confirmation(self):
e = 'dude@lp.com'
with capture_registrations() as registrations:
self.register(e)
user = registrations[0]['user']
token = registrations[0]['confirm_token']
with self.app.app_context():
from flask_security.core import _security
_security.datastore.delete(user)
_security.datastore.commit()
r = self.client.get('/confirm/' + token, follow_redirects=True)
msg = self.app.config['SECURITY_MSG_INVALID_CONFIRMATION_TOKEN'][0]
self.assertIn(msg, r.data)
class ExpiredConfirmationTest(SecurityTest):
AUTH_CONFIG = {
'SECURITY_CONFIRMABLE': True,
'SECURITY_REGISTERABLE': True,
'SECURITY_CONFIRM_EMAIL_WITHIN': '1 milliseconds',
'USER_COUNT': 1
}
def test_expired_confirmation_token_sends_email(self):
e = 'dude@lp.com'
with capture_registrations() as registrations:
self.register(e)
token = registrations[0]['confirm_token']
time.sleep(1.25)
with self.app.extensions['mail'].record_messages() as outbox:
r = self.client.get('/confirm/' + token, follow_redirects=True)
self.assertEqual(len(outbox), 1)
self.assertNotIn(token, outbox[0].html)
expire_text = self.AUTH_CONFIG['SECURITY_CONFIRM_EMAIL_WITHIN']
msg = self.app.config['SECURITY_MSG_CONFIRMATION_EXPIRED'][0]
msg = msg % dict(within=expire_text, email=e)
self.assertIn(msg, r.data)
class LoginWithoutImmediateConfirmTests(SecurityTest):
AUTH_CONFIG = {
'SECURITY_CONFIRMABLE': True,
'SECURITY_REGISTERABLE': True,
'SECURITY_LOGIN_WITHOUT_CONFIRMATION': True,
'USER_COUNT': 1
}
def test_register_valid_user_automatically_signs_in(self):
e = 'dude@lp.com'
p = 'password'
data = dict(email=e, password=p, password_confirm=p)
r = self._post('/register', data=data, follow_redirects=True)
self.assertIn(e, r.data)
class RecoverableTests(SecurityTest):
AUTH_CONFIG = {
'SECURITY_RECOVERABLE': True,
'SECURITY_RESET_PASSWORD_ERROR_VIEW': '/',
'SECURITY_POST_FORGOT_VIEW': '/'
}
def test_reset_view(self):
with capture_reset_password_requests() as requests:
r = self._post('/reset',
data=dict(email='joe@lp.com'),
follow_redirects=True)
t = requests[0]['token']
r = self._get('/reset/' + t)
self.assertIn('<h1>Reset password</h1>', r.data)
def test_forgot_post_sends_email(self):
with capture_reset_password_requests():
with self.app.extensions['mail'].record_messages() as outbox:
self._post('/reset', data=dict(email='joe@lp.com'))
self.assertEqual(len(outbox), 1)
def test_forgot_password_json(self):
r = self._post('/reset', data='{"email": "matt@lp.com"}',
content_type="application/json")
self.assertEquals(r.status_code, 200)
def test_forgot_password_invalid_email(self):
r = self._post('/reset',
data=dict(email='larry@lp.com'),
follow_redirects=True)
self.assertIn("Specified user does not exist", r.data)
def test_reset_password_with_valid_token(self):
with capture_reset_password_requests() as requests:
r = self._post('/reset',
data=dict(email='joe@lp.com'),
follow_redirects=True)
t = requests[0]['token']
r = self._post('/reset/' + t, data={
'password': 'newpassword',
'password_confirm': 'newpassword'
}, follow_redirects=True)
r = self.logout()
r = self.authenticate('joe@lp.com', 'newpassword')
self.assertIn('Hello joe@lp.com', r.data)
def test_reset_password_with_invalid_token(self):
r = self._post('/reset/bogus', data={
'password': 'newpassword',
'password_confirm': 'newpassword'
}, follow_redirects=True)
self.assertIn(self.get_message('INVALID_RESET_PASSWORD_TOKEN'), r.data)
class ExpiredResetPasswordTest(SecurityTest):
AUTH_CONFIG = {
'SECURITY_RECOVERABLE': True,
'SECURITY_RESET_PASSWORD_WITHIN': '1 milliseconds'
}
def test_reset_password_with_expired_token(self):
with capture_reset_password_requests() as requests:
r = self._post('/reset', data=dict(email='joe@lp.com'),
follow_redirects=True)
t = requests[0]['token']
time.sleep(1)
r = self._post('/reset/' + t, data={
'password': 'newpassword',
'password_confirm': 'newpassword'
}, follow_redirects=True)
self.assertIn('You did not reset your password within', r.data)
class ChangePasswordTest(SecurityTest):
AUTH_CONFIG = {
'SECURITY_RECOVERABLE': True,
'SECURITY_CHANGEABLE': True,
}
def test_change_password(self):
self.authenticate()
r = self.client.get('/change', follow_redirects=True)
self.assertIn('Change password', r.data)
def test_change_password_invalid(self):
self.authenticate()
r = self._post('/change', data={
'password': 'notpassword',
'new_password': 'newpassword',
'new_password_confirm': 'newpassword'
}, follow_redirects=True)
self.assertNotIn('You successfully changed your password', r.data)
self.assertIn('Invalid password', r.data)
def test_change_password_mismatch(self):
self.authenticate()
r = self._post('/change', data={
'password': 'password',
'new_password': 'newpassword',
'new_password_confirm': 'notnewpassword'
}, follow_redirects=True)
self.assertNotIn('You successfully changed your password', r.data)
self.assertIn('Passwords do not match', r.data)
def test_change_password_bad_password(self):
self.authenticate()
r = self._post('/change', data={
'password': 'password',
'new_password': 'a',
'new_password_confirm': 'a'
}, follow_redirects=True)
self.assertNotIn('You successfully changed your password', r.data)
self.assertIn('Field must be between', r.data)
def test_change_password_success(self):
self.authenticate()
with self.app.extensions['mail'].record_messages() as outbox:
r = self._post('/change', data={
'password': 'password',
'new_password': 'newpassword',
'new_password_confirm': 'newpassword'
}, follow_redirects=True)
self.assertIn('You successfully changed your password', r.data)
self.assertIn('Home Page', r.data)
self.assertEqual(len(outbox), 1)
self.assertIn("Your password has been changed", outbox[0].html)
self.assertIn("/reset", outbox[0].html)
class ChangePasswordPostViewTest(SecurityTest):
AUTH_CONFIG = {
'SECURITY_CHANGEABLE': True,
'SECURITY_POST_CHANGE_VIEW': '/profile',
}
def test_change_password_success(self):
self.authenticate()
r = self._post('/change', data={
'password': 'password',
'new_password': 'newpassword',
'new_password_confirm': 'newpassword'
}, follow_redirects=True)
self.assertIn('Profile Page', r.data)
class ChangePasswordDisabledTest(SecurityTest):
AUTH_CONFIG = {
'SECURITY_CHANGEABLE': False,
}
def test_change_password_endpoint_is_404(self):
self.authenticate()
r = self.client.get('/change', follow_redirects=True)
self.assertEqual(404, r.status_code)
class TrackableTests(SecurityTest):
AUTH_CONFIG = {
'SECURITY_TRACKABLE': True,
'USER_COUNT': 1
}
def test_did_track(self):
e = 'matt@lp.com'
self.authenticate(email=e)
self.logout()
self.authenticate(email=e)
with self.app.test_request_context('/profile'):
user = self.app.security.datastore.find_user(email=e)
self.assertIsNotNone(user.last_login_at)
self.assertIsNotNone(user.current_login_at)
self.assertEquals('untrackable', user.last_login_ip)
self.assertEquals('untrackable', user.current_login_ip)
self.assertEquals(2, user.login_count)
class PasswordlessTests(SecurityTest):
AUTH_CONFIG = {
'SECURITY_PASSWORDLESS': True
}
def test_login_request_for_inactive_user(self):
msg = self.app.config['SECURITY_MSG_DISABLED_ACCOUNT'][0]
r = self._post('/login', data=dict(email='tiya@lp.com'),
follow_redirects=True)
self.assertIn(msg, r.data)
def test_request_login_token_with_json_and_valid_email(self):
data = '{"email": "matt@lp.com", "password": "password", "csrf_token":"%s"}' % self.csrf_token
r = self._post('/login', data=data, content_type='application/json')
self.assertEquals(r.status_code, 200)
self.assertNotIn('error', r.data)
def test_request_login_token_with_json_and_invalid_email(self):
data = '{"email": "nobody@lp.com", "password": "password"}'
r = self._post('/login', data=data, content_type='application/json')
self.assertIn('errors', r.data)
def test_request_login_token_sends_email_and_can_login(self):
e = 'matt@lp.com'
r, user, token = None, None, None
with capture_passwordless_login_requests() as requests:
with self.app.extensions['mail'].record_messages() as outbox:
r = self._post('/login', data=dict(email=e),
follow_redirects=True)
self.assertEqual(len(outbox), 1)
self.assertEquals(1, len(requests))
self.assertIn('user', requests[0])
self.assertIn('login_token', requests[0])
user = requests[0]['user']
token = requests[0]['login_token']
msg = self.app.config['SECURITY_MSG_LOGIN_EMAIL_SENT'][0]
msg = msg % dict(email=user.email)
self.assertIn(msg, r.data)
r = self.client.get('/login/' + token, follow_redirects=True)
msg = self.get_message('PASSWORDLESS_LOGIN_SUCCESSFUL')
self.assertIn(msg, r.data)
r = self.client.get('/profile')
self.assertIn('Profile Page', r.data)
def test_invalid_login_token(self):
msg = self.app.config['SECURITY_MSG_INVALID_LOGIN_TOKEN'][0]
r = self._get('/login/bogus', follow_redirects=True)
self.assertIn(msg, r.data)
def test_token_login_when_already_authenticated(self):
with capture_passwordless_login_requests() as requests:
self._post('/login', data=dict(email='matt@lp.com'),
follow_redirects=True)
token = requests[0]['login_token']
r = self.client.get('/login/' + token, follow_redirects=True)
msg = self.get_message('PASSWORDLESS_LOGIN_SUCCESSFUL')
self.assertIn(msg, r.data)
r = self.client.get('/login/' + token, follow_redirects=True)
msg = self.get_message('PASSWORDLESS_LOGIN_SUCCESSFUL')
self.assertNotIn(msg, r.data)
def test_send_login_with_invalid_email(self):
r = self._post('/login', data=dict(email='bogus@bogus.com'))
self.assertIn('Specified user does not exist', r.data)
class ExpiredLoginTokenTests(SecurityTest):
AUTH_CONFIG = {
'SECURITY_PASSWORDLESS': True,
'SECURITY_LOGIN_WITHIN': '1 milliseconds',
'USER_COUNT': 1
}
def test_expired_login_token_sends_email(self):
e = 'matt@lp.com'
with capture_passwordless_login_requests() as requests:
self._post('/login', data=dict(email=e), follow_redirects=True)
token = requests[0]['login_token']
time.sleep(1.25)
with self.app.extensions['mail'].record_messages() as outbox:
r = self.client.get('/login/' + token, follow_redirects=True)
expire_text = self.AUTH_CONFIG['SECURITY_LOGIN_WITHIN']
msg = self.app.config['SECURITY_MSG_LOGIN_EXPIRED'][0]
msg = msg % dict(within=expire_text, email=e)
self.assertIn(msg, r.data)
self.assertEqual(len(outbox), 1)
self.assertIn(e, outbox[0].html)
self.assertNotIn(token, outbox[0].html)
class AsyncMailTaskTests(SecurityTest):
AUTH_CONFIG = {
'SECURITY_RECOVERABLE': True,
'USER_COUNT': 1
}
def setUp(self):
super(AsyncMailTaskTests, self).setUp()
self.mail_sent = False
def test_send_email_task_is_called(self):
@self.app.security.send_mail_task
def send_email(msg):
self.mail_sent = True
self._post('/reset', data=dict(email='matt@lp.com'))
self.assertTrue(self.mail_sent)
class NoBlueprintTests(SecurityTest):
APP_KWARGS = {
'register_blueprint': False,
}
AUTH_CONFIG = {
'USER_COUNT': 1
}
def test_login_endpoint_is_404(self):
r = self._get('/login')
self.assertEqual(404, r.status_code)
def test_http_auth_without_blueprint(self):
auth = 'Basic ' + base64.b64encode("matt@lp.com:password")
r = self._get('/http', headers={'Authorization': auth})
self.assertIn('HTTP Authentication', r.data)
class ExtendFormsTest(SecurityTest):
class MyLoginForm(LoginForm):
email = TextField('My Login Email Address Field')
class MyRegisterForm(RegisterForm):
email = TextField('My Register Email Address Field')
APP_KWARGS = {
'login_form': MyLoginForm,
'register_form': MyRegisterForm,
}
AUTH_CONFIG = {
'SECURITY_CONFIRMABLE': False,
'SECURITY_REGISTERABLE': True,
}
def test_login_view(self):
r = self._get('/login', follow_redirects=True)
self.assertIn("My Login Email Address Field", r.data)
def test_register(self):
r = self._get('/register', follow_redirects=True)
self.assertIn("My Register Email Address Field", r.data)
class RecoverableExtendFormsTest(SecurityTest):
class MyForgotPasswordForm(ForgotPasswordForm):
email = TextField('My Forgot Password Email Address Field',
validators=[valid_user_email])
class MyResetPasswordForm(ResetPasswordForm):
submit = SubmitField("My Reset Password Submit Field")
APP_KWARGS = {
'forgot_password_form': MyForgotPasswordForm,
'reset_password_form': MyResetPasswordForm,
}
AUTH_CONFIG = {
'SECURITY_RECOVERABLE': True,
}
def test_forgot_password(self):
r = self._get('/reset', follow_redirects=True)
self.assertIn("My Forgot Password Email Address Field", r.data)
def test_reset_password(self):
with capture_reset_password_requests() as requests:
self._post('/reset', data=dict(email='joe@lp.com'),
follow_redirects=True)
token = requests[0]['token']
r = self._get('/reset/' + token)
self.assertIn("My Reset Password Submit Field", r.data)
class PasswordlessExtendFormsTest(SecurityTest):
class MyPasswordlessLoginForm(PasswordlessLoginForm):
email = TextField('My Passwordless Login Email Address Field')
APP_KWARGS = {
'passwordless_login_form': MyPasswordlessLoginForm,
}
AUTH_CONFIG = {
'SECURITY_PASSWORDLESS': True,
}
def test_passwordless_login(self):
r = self._get('/login', follow_redirects=True)
self.assertIn("My Passwordless Login Email Address Field", r.data)
class ConfirmableExtendFormsTest(SecurityTest):
class MyConfirmRegisterForm(ConfirmRegisterForm):
email = TextField('My Confirm Register Email Address Field')
class MySendConfirmationForm(SendConfirmationForm):
email = TextField('My Send Confirmation Email Address Field')
APP_KWARGS = {
'confirm_register_form': MyConfirmRegisterForm,
'send_confirmation_form': MySendConfirmationForm,
}
AUTH_CONFIG = {
'SECURITY_CONFIRMABLE': True,
'SECURITY_REGISTERABLE': True,
}
def test_register(self):
r = self._get('/register', follow_redirects=True)
self.assertIn("My Confirm Register Email Address Field", r.data)
def test_send_confirmation(self):
r = self._get('/confirm', follow_redirects=True)
self.assertIn("My Send Confirmation Email Address Field", r.data)
| 33.407928
| 104
| 0.633952
| 25,505
| 0.976268
| 0
| 0
| 96
| 0.003675
| 0
| 0
| 6,536
| 0.250182
|
b8ae72a8774e3f5e5b83670734de99743ac5f598
| 94
|
py
|
Python
|
Server/programs/__init__.py
|
VHirtz/CC-mastermind
|
11dc4e043ed67c86e66230812cbd86f736e6a7d1
|
[
"MIT"
] | null | null | null |
Server/programs/__init__.py
|
VHirtz/CC-mastermind
|
11dc4e043ed67c86e66230812cbd86f736e6a7d1
|
[
"MIT"
] | null | null | null |
Server/programs/__init__.py
|
VHirtz/CC-mastermind
|
11dc4e043ed67c86e66230812cbd86f736e6a7d1
|
[
"MIT"
] | null | null | null |
from . import program
from . import turtle_test
from . import antoine_test
from . import dance
| 23.5
| 26
| 0.797872
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b8af272edd34ec0b3bc42014f30caae48187c86a
| 8,283
|
py
|
Python
|
src/robotkernel/utils.py
|
robocorp/robocode-kernel
|
b9c7ed20ba0046d0b3bae4e461205f9fa19b77a8
|
[
"BSD-3-Clause"
] | 4
|
2020-04-01T16:24:01.000Z
|
2022-02-16T19:22:44.000Z
|
src/robotkernel/utils.py
|
robocorp/robocode-kernel
|
b9c7ed20ba0046d0b3bae4e461205f9fa19b77a8
|
[
"BSD-3-Clause"
] | 8
|
2020-04-21T13:35:02.000Z
|
2022-03-12T00:39:17.000Z
|
src/robotkernel/utils.py
|
robocorp/robocode-kernel
|
b9c7ed20ba0046d0b3bae4e461205f9fa19b77a8
|
[
"BSD-3-Clause"
] | 1
|
2020-04-03T10:48:31.000Z
|
2020-04-03T10:48:31.000Z
|
# -*- coding: utf-8 -*-
from copy import deepcopy
from difflib import SequenceMatcher
from IPython.core.display import Image
from IPython.core.display import JSON
from json import JSONDecodeError
from lunr.builder import Builder
from lunr.stemmer import stemmer
from lunr.stop_word_filter import stop_word_filter
from lunr.trimmer import trimmer
from operator import itemgetter
from pygments.formatters import HtmlFormatter
from pygments.lexers import get_lexer_by_name
from robot.libdocpkg.htmlwriter import DocToHtml
from robotkernel.constants import HAS_RF32_PARSER
import base64
import json
import os
import pygments
import re
if HAS_RF32_PARSER:
class Documentation:
pass # No longer with RF32
else:
from robot.parsing.settings import Documentation
def javascript_uri(html, filename=""):
"""Because data-uri for text/html is not supported by IE."""
if isinstance(html, str):
html = html.encode("utf-8")
return (
"javascript:(function(el){{"
"var w=window.open();var d='{}';"
"w.document.open();"
"w.document.write(window.atob(d));"
"w.document.close();"
"var a=w.document.createElement('a');"
"a.appendChild(w.document.createTextNode('Download'));"
"a.href='data:text/html;base64,' + d;"
"a.download='{}';"
"a.style='position:fixed;top:0;right:0;"
"color:white;background:black;text-decoration:none;"
"font-weight:bold;padding:7px 14px;border-radius:0 0 0 5px;';"
"w.document.body.append(a);"
"}})(this);".format(base64.b64encode(html).decode("utf-8"), filename)
)
def data_uri(mimetype, data):
return "data:{};base64,{}".format(mimetype, base64.b64encode(data).decode("utf-8"))
def highlight(language, data):
lexer = get_lexer_by_name(language)
formatter = HtmlFormatter(noclasses=True, nowrap=True)
return pygments.highlight(data, lexer, formatter)
def lunr_builder(ref, fields):
"""A convenience function to configure and construct a lunr.Builder.
Returns:
Index: The populated Index ready to search against.
"""
builder = Builder()
builder.pipeline.add(trimmer, stop_word_filter, stemmer)
builder.search_pipeline.add(stemmer)
builder.ref(ref)
for field in fields:
builder.field(field)
return builder
def readable_keyword(s):
"""Return keyword with only the first letter in title case."""
if s and not s.startswith("*") and not s.startswith("["):
if s.count("."):
library, name = s.rsplit(".", 1)
return library + "." + name[0].title() + name[1:].lower()
else:
return s[0].title() + s[1:].lower()
else:
return s
def detect_robot_context(code, cursor_pos):
"""Return robot code context in cursor position."""
code = code[:cursor_pos]
line = code.rsplit("\n")[-1]
context_parts = code.rsplit("***", 2)
if len(context_parts) != 3:
return "__root__"
else:
context_name = context_parts[1].strip().lower()
if context_name == "settings":
return "__settings__"
elif line.lstrip() == line:
return "__root__"
elif context_name in ["tasks", "test cases"]:
return "__tasks__"
elif context_name == "keywords":
return "__keywords__"
else:
return "__root__"
NAME_REGEXP = re.compile("`(.+?)`")
def get_keyword_doc(keyword):
title = keyword.name.strip("*").strip()
title_html = f"<strong>{title}</strong>"
if keyword.args:
title += " " + ", ".join(keyword.args)
title_html += " " + ", ".join(keyword.args)
body = ""
if keyword.doc:
if isinstance(keyword.doc, Documentation):
body = "\n\n" + keyword.doc.value.replace("\\n", "\n")
else:
body = "\n\n" + keyword.doc
return {
"text/plain": title + "\n\n" + body,
"text/html": f"<p>{title_html}</p>"
+ NAME_REGEXP.sub(
lambda m: f"<code>{m.group(1)}</code>", DocToHtml(keyword.doc_format)(body)
),
}
def scored_results(needle, results):
results = deepcopy(results)
for result in results:
match = SequenceMatcher(
None, needle.lower(), result["ref"].lower(), autojunk=False
).find_longest_match(0, len(needle), 0, len(result["ref"]))
result["score"] = (match.size, match.size / float(len(result["ref"])))
return list(reversed(sorted(results, key=itemgetter("score"))))
def lunr_query(query):
query = re.sub(r"([:*])", r"\\\1", query, re.U)
query = re.sub(r"[\[\]]", r"", query, re.U)
return f"*{query.strip().lower()}*"
def get_lunr_completions(needle, index, keywords, context):
matches = []
results = []
if needle.rstrip():
query = lunr_query(needle)
results = index.search(query)
results += index.search(query.strip("*"))
for result in scored_results(needle, results):
ref = result["ref"]
if ref.startswith("__") and not ref.startswith(context):
continue
if not ref.startswith(context) and context not in [
"__tasks__",
"__keywords__",
"__settings__",
]:
continue
if not needle.count("."):
keyword = keywords[ref].name
if keyword not in matches:
matches.append(readable_keyword(keyword))
else:
matches.append(readable_keyword(ref))
return matches
def to_html(obj):
"""Return object as highlighted JSON."""
return highlight("json", json.dumps(obj, sort_keys=False, indent=4))
# noinspection PyProtectedMember
def to_mime_and_metadata(obj) -> (dict, dict): # noqa: C901
if isinstance(obj, bytes):
obj = base64.b64encode(obj).decode("utf-8")
return {"text/html": to_html(obj)}, {}
elif isinstance(obj, str) and obj.startswith("http"):
if re.match(r".*\.(gif|jpg|svg|jpeg||png)$", obj, re.I):
try:
return Image(obj, embed=True)._repr_mimebundle_()
except TypeError:
pass
return {"text/html": to_html(obj)}, {}
elif isinstance(obj, str) and len(obj) < 1024 and os.path.exists(obj):
if re.match(r".*\.(gif|jpg|svg|jpeg||png)$", obj, re.I):
try:
return Image(obj, embed=True)._repr_mimebundle_()
except TypeError:
pass
return {"text/html": to_html(obj)}, {}
elif hasattr(obj, "_repr_mimebundle_"):
obj.embed = True
return obj._repr_mimebundle_()
elif hasattr(obj, "_repr_json_"):
obj.embed = True
return {"application/json": obj._repr_json_()}, {}
elif hasattr(obj, "_repr_html_"):
obj.embed = True
return {"text/html": obj._repr_html_()}, {}
elif hasattr(obj, "_repr_png_"):
return {"image/png": obj._repr_png_()}, {}
elif hasattr(obj, "_repr_jpeg_"):
return {"image/jpeg": obj._repr_jpeg_()}, {}
elif hasattr(obj, "_repr_svg_"):
return {"image/svg": obj._repr_svg_()}, {}
try:
if isinstance(obj, str):
return {"text/html": f"<pre>{to_html(obj)}</pre>".replace("\\n", "\n")}, {}
else:
data, metadata = JSON(data=obj, expanded=True)._repr_json_()
return (
{"application/json": data, "text/html": f"<pre>{to_html(obj)}</pre>"},
metadata,
)
except (TypeError, JSONDecodeError):
pass
try:
return {"text/html": to_html(obj)}, {}
except TypeError:
return {}, {}
def yield_current_connection(connections, types_):
for instance in [
connection["instance"]
for connection in connections
if connection["type"] in types_ and connection["current"]
]:
yield instance
break
def close_current_connection(connections, connection_to_close):
match = None
for connection in connections:
if connection["instance"] is connection_to_close:
match = connection
break
if match is not None:
if hasattr(match["instance"], "quit"):
match["instance"].quit()
connections.remove(match)
| 32.482353
| 87
| 0.60268
| 56
| 0.006761
| 251
| 0.030303
| 0
| 0
| 0
| 0
| 1,835
| 0.221538
|
b8b06e91b0fbc55f204d0286612efe3154be4b90
| 5,022
|
py
|
Python
|
Pyduino/__init__.py
|
ItzTheDodo/Pyduino
|
a68d6a3214d5fb452e8b8e53cb013ee7205734bb
|
[
"Apache-2.0"
] | null | null | null |
Pyduino/__init__.py
|
ItzTheDodo/Pyduino
|
a68d6a3214d5fb452e8b8e53cb013ee7205734bb
|
[
"Apache-2.0"
] | null | null | null |
Pyduino/__init__.py
|
ItzTheDodo/Pyduino
|
a68d6a3214d5fb452e8b8e53cb013ee7205734bb
|
[
"Apache-2.0"
] | null | null | null |
# Function Credits: https://github.com/lekum/pyduino/blob/master/pyduino/pyduino.py (lekum (as of 2014))
# Written By: ItzTheDodo
from Pyduino.Boards.Uno import UnoInfo
from Pyduino.Boards.Mega import MegaInfo
from Pyduino.Boards.Diecimila import DiecimilaInfo
from Pyduino.Boards.Due import DueInfo
from Pyduino.Boards.Nano import NanoInfo
from Pyduino.Boards.Mini import MiniInfo
from Pyduino.Boards.Lilypad import LilypadInfo
from Pyduino.Boards.CustomBoardProfile import BoardProfileInfo
from Pyduino.Utils.Pins import *
from Pyduino.Utils.ReadWrite import *
from Pyduino.Utils.ReadOnly import *
import serial
import time
import sys
class Pyduino(object):
def __init__(self, board, serial_port='COM5', baud_rate='9600', read_timeout=5):
__metaclass__ = ReadOnly
self.serialPort = serial_port
self.baudRate = baud_rate
self.readTimeout = read_timeout
self.board = board
self.run = True
self.pinnames = {}
self.pins = {}
self.pinModes = {}
self.conn = serial.Serial(self.serialPort, self.baudRate)
self.conn.timeout = self.readTimeout
if self.board.lower() == "uno":
self.boardinfo = UnoInfo()
elif self.board.lower() == "mega":
self.boardinfo = MegaInfo()
elif self.board.lower() == "diecimila":
self.boardinfo = DiecimilaInfo()
elif self.board.lower() == "due":
self.boardinfo = DueInfo()
def getBoardType(self):
return self.board
def getBoardInfo(self):
return self.boardinfo
def getPinsInUse(self):
return self.pins
def mainloop(self, loop, *params):
while self.run:
loop(*params)
def setup(self, definition, *params):
definition(*params)
def define(self, name, pin):
if pin > self.boardinfo.getMainInfo()[0]:
return
self.pinnames[name] = pin
return name
def setPin(self, pin, option):
if option != HIGH or option != LOW:
return
dv = None
if option == HIGH:
dv = 1
elif option == LOW:
dv = 0
if type(pin) is str:
a = self.pinnames[pin]
self.pins[str(a)] = option
command = ("DW:" + str(a) + ":" + str(dv)).encode()
self.conn.write(command)
elif type(pin) is int:
if pin > self.boardinfo.getMainInfo()[0]:
return
self.pins[str(pin)] = option
command = ("DW:" + str(pin) + ":" + str(dv)).encode()
self.conn.write(command)
else:
return
def newSerial(self, serial_port, baud_rate, read_timeout):
self.conn = serial.Serial(serial_port, baud_rate)
self.conn.timeout = read_timeout
def pinMode(self, pin, mode):
if mode != INPUT or mode != OUTPUT or mode != INPUT_PULLUP:
return
m = ""
if mode == INPUT:
m = "I"
elif mode == OUTPUT:
m = "O"
elif mode == INPUT_PULLUP:
m = "P"
else:
return
if type(pin) is str:
a = self.pinnames[pin]
self.pins[str(a)] = mode
command = ("M:" + str(a) + ":" + m).encode()
self.conn.write(command)
elif type(pin) is int:
if pin > self.boardinfo.getMainInfo()[0]:
return
self.pins[str(pin)] = mode
command = ("M:" + str(pin) + ":" + m).encode()
self.conn.write(command)
else:
return
def newReadWrite(self, pin, digitalval=None, analogval=None):
if digitalval is not None and analogval is not None:
return ReadWrite(self.conn, pin, digitalval=digitalval, analogval=analogval)
elif digitalval is not None:
return ReadWrite(self.conn, pin, digitalval=digitalval)
elif analogval is not None:
return ReadWrite(self.conn, pin, analogval=analogval)
else:
return ReadWrite(self.conn, pin)
def createCustomBoardProfile(self, datapins, analogInPins, GND, pow, TX, RX):
self.boardinfo = BoardProfileInfo(datapins, analogInPins, GND, pow, TX, RX)
def delay(self, t):
a = int(t) / 1000
time.sleep(a)
def stop(self):
sys.exit(2)
if __name__ == "__main__":
# set all pins high for 1 sec then low for 0.5 sec
p = Pyduino("Uno")
for i in range(p.getBoardInfo().getMainInfo()["0"]):
p.pinMode(i, OUTPUT)
while True:
for i in range(p.getBoardInfo().getMainInfo()["0"]):
p.setPin(i, HIGH)
p.delay(1000)
for i in range(p.getBoardInfo().getMainInfo()["0"]):
p.setPin(i, LOW)
p.delay(500)
| 27.293478
| 105
| 0.554162
| 3,906
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 285
| 0.05675
|
b8b10e6e66f7f88c735881020e22e44e43687a75
| 2,218
|
py
|
Python
|
apps/api/v1/social_auth.py
|
asmuratbek/oobamarket
|
1053976a13ea84b9aabfcbbcbcffd79549ce9538
|
[
"MIT"
] | null | null | null |
apps/api/v1/social_auth.py
|
asmuratbek/oobamarket
|
1053976a13ea84b9aabfcbbcbcffd79549ce9538
|
[
"MIT"
] | 7
|
2020-06-05T23:36:01.000Z
|
2022-01-13T01:42:07.000Z
|
apps/api/v1/social_auth.py
|
asmuratbek/oobamarket
|
1053976a13ea84b9aabfcbbcbcffd79549ce9538
|
[
"MIT"
] | null | null | null |
from allauth.socialaccount.helpers import complete_social_login
from allauth.socialaccount.models import SocialApp, SocialToken, SocialLogin, SocialAccount
from allauth.socialaccount.providers.facebook.views import fb_complete_login
from allauth.socialaccount.providers.google.views import GoogleOAuth2Adapter
from django.http import JsonResponse
from requests import HTTPError
from rest_framework.authtoken.models import Token
from apps.users.models import User
__author__ = 'kolyakoikelov'
class SocialAuth(object):
def __init__(self, provider, token_key):
self.provider = provider
self.token_key = token_key
def login(self, request):
try:
original_request = request._request
token = request.POST.get(self.token_key, '')
google_auth_adapter = GoogleOAuth2Adapter(request=original_request)
app = SocialApp.objects.get(provider=self.provider)
social_auth_token = SocialToken(app=app, token=token)
login = google_auth_adapter.complete_login(request=original_request, app=app, token=social_auth_token) \
if self.provider is 'google' else fb_complete_login(request=request, app=app, token=social_auth_token)
extra_data = login.account.extra_data
json_error_response = None
if 'email' not in extra_data:
json_error_response = JsonResponse(dict(message='email is not provided'), status=400)
if json_error_response is not None:
return json_error_response
user = User.objects.filter(email=extra_data['email']).first()
if user is not None:
token, is_created = Token.objects.get_or_create(user=user)
return JsonResponse(dict(key=token.key))
login.token = social_auth_token
login.state = SocialLogin.state_from_request(original_request)
complete_social_login(original_request, login)
token, is_created = Token.objects.get_or_create(user=original_request.user)
return JsonResponse(dict(key=token.key))
except HTTPError as e:
return JsonResponse(dict(message=str(e)), status=400)
| 41.074074
| 118
| 0.701533
| 1,722
| 0.776375
| 0
| 0
| 0
| 0
| 0
| 0
| 62
| 0.027953
|
b8b1d87440313264fbf621f72a939400ed1ccedc
| 390
|
py
|
Python
|
Global and Local Inversions/Solution.py
|
chandrikadeb7/Awesome-LeetCode-Python
|
57902d3394761afd9a51b6405c085c87526e647e
|
[
"MIT"
] | 2
|
2021-07-16T06:46:58.000Z
|
2021-12-08T01:15:09.000Z
|
Global and Local Inversions/Solution.py
|
chandrikadeb7/Awesome-LeetCode-Python
|
57902d3394761afd9a51b6405c085c87526e647e
|
[
"MIT"
] | null | null | null |
Global and Local Inversions/Solution.py
|
chandrikadeb7/Awesome-LeetCode-Python
|
57902d3394761afd9a51b6405c085c87526e647e
|
[
"MIT"
] | null | null | null |
class Solution:
def isIdealPermutation(self, A: List[int]) -> bool:
n = len(A)
g = local = 0
for i in range(1, n):
if A[i] < A[i-1]:
local += 1
if A[i] < i:
diff = i - A[i]
g += diff * (diff+1) // 2
return g == local
| 32.5
| 55
| 0.315385
| 389
| 0.997436
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b8b4581f931e18341efca7f99abcc93a3432695c
| 13,076
|
py
|
Python
|
queryable_properties/properties/common.py
|
W1ldPo1nter/django-queryable-properties
|
9bb4ecb4fbdd7a9e0f610f937c8101a643027fb1
|
[
"BSD-3-Clause"
] | 36
|
2019-10-22T11:44:37.000Z
|
2022-03-15T21:27:03.000Z
|
queryable_properties/properties/common.py
|
W1ldPo1nter/django-queryable-properties
|
9bb4ecb4fbdd7a9e0f610f937c8101a643027fb1
|
[
"BSD-3-Clause"
] | 6
|
2020-10-03T15:13:26.000Z
|
2021-09-25T14:05:50.000Z
|
queryable_properties/properties/common.py
|
W1ldPo1nter/django-queryable-properties
|
9bb4ecb4fbdd7a9e0f610f937c8101a643027fb1
|
[
"BSD-3-Clause"
] | 3
|
2021-04-26T08:30:46.000Z
|
2021-08-18T09:04:49.000Z
|
# encoding: utf-8
import operator
import six
from django.db.models import BooleanField, Field, Q
from ..utils.internal import MISSING_OBJECT, ModelAttributeGetter, QueryPath
from .base import QueryableProperty
from .mixins import AnnotationGetterMixin, AnnotationMixin, boolean_filter, LookupFilterMixin
class BooleanMixin(LookupFilterMixin):
"""
Internal mixin class for common properties that return boolean values,
which is intended to be used in conjunction with one of the annotation
mixins.
"""
filter_requires_annotation = False
def _get_condition(self, cls): # pragma: no cover
"""
Build the query filter condition for this boolean property, which is
used for both the filter and the annotation implementation.
:param type cls: The model class of which a queryset should be filtered
or annotated.
:return: The filter condition for this property.
:rtype: django.db.models.Q
"""
raise NotImplementedError()
@boolean_filter
def get_exact_filter(self, cls):
return self._get_condition(cls)
def get_annotation(self, cls):
from django.db.models import Case, When
return Case(
When(self._get_condition(cls), then=True),
default=False,
output_field=BooleanField()
)
class ValueCheckProperty(BooleanMixin, AnnotationMixin, QueryableProperty):
"""
A property that checks if an attribute of a model instance or a related
object contains a certain value or one of multiple specified values and
returns a corresponding boolean value.
Supports queryset filtering and ``CASE``/``WHEN``-based annotating.
"""
def __init__(self, attribute_path, *values, **kwargs):
"""
Initialize a new property that checks for certain field values.
:param str attribute_path: The name of the attribute to compare
against. May also be a more complex path to
a related attribute using dot-notation (like
with :func:`operator.attrgetter`). If an
intermediate value on the path is None, it
will be treated as "no match" instead of
raising an exception. The behavior is the
same if an intermediate value raises an
ObjectDoesNotExist error.
:param values: The value(s) to check for.
"""
self.attribute_getter = ModelAttributeGetter(attribute_path)
self.values = values
super(ValueCheckProperty, self).__init__(**kwargs)
def get_value(self, obj):
return self.attribute_getter.get_value(obj) in self.values
def _get_condition(self, cls):
return self.attribute_getter.build_filter('in', self.values)
class RangeCheckProperty(BooleanMixin, AnnotationMixin, QueryableProperty):
"""
A property that checks if a static or dynamic value is contained in a range
expressed by two field values and returns a corresponding boolean value.
Supports queryset filtering and ``CASE``/``WHEN``-based annotating.
"""
def __init__(self, min_attribute_path, max_attribute_path, value, include_boundaries=True, in_range=True,
include_missing=False, **kwargs):
"""
Initialize a new property that checks if a value is contained in a
range expressed by two field values.
:param str min_attribute_path: The name of the attribute to get the
lower boundary from. May also be a more
complex path to a related attribute
using dot-notation (like with
:func:`operator.attrgetter`). If an
intermediate value on the path is None,
it will be treated as a missing value
instead of raising an exception. The
behavior is the same if an intermediate
value raises an ``ObjectDoesNotExist``
error.
:param str max_attribute_path: The name of the attribute to get the
upper boundary from. The same behavior
as for the lower boundary applies.
:param value: The value which is tested against the boundary. May be a
callable which can be called without any arguments, whose
return value will then be used as the test value.
:param bool include_boundaries: Whether or not the value is considered
a part of the range if it is exactly
equal to one of the boundaries.
:param bool in_range: Configures whether the property should return
``True`` if the value is in range
(``in_range=True``) or if it is out of the range
(``in_range=False``). This also affects the
impact of the ``include_boundaries`` and
``include_missing`` parameters.
:param bool include_missing: Whether or not a missing value is
considered a part of the range (see the
description of ``min_attribute_path``).
Useful e.g. for nullable fields.
"""
self.min_attribute_getter = ModelAttributeGetter(min_attribute_path)
self.max_attribute_getter = ModelAttributeGetter(max_attribute_path)
self.value = value
self.include_boundaries = include_boundaries
self.in_range = in_range
self.include_missing = include_missing
super(RangeCheckProperty, self).__init__(**kwargs)
@property
def final_value(self):
value = self.value
if callable(value):
value = value()
return value
def get_value(self, obj):
value = self.final_value
min_value = self.min_attribute_getter.get_value(obj)
max_value = self.max_attribute_getter.get_value(obj)
lower_operator = operator.le if self.include_boundaries else operator.lt
greater_operator = operator.ge if self.include_boundaries else operator.gt
contained = self.include_missing if min_value in (None, MISSING_OBJECT) else greater_operator(value, min_value)
contained &= self.include_missing if max_value in (None, MISSING_OBJECT) else lower_operator(value, max_value)
return not (contained ^ self.in_range)
def _get_condition(self, cls):
value = self.final_value
lower_condition = self.min_attribute_getter.build_filter('lte' if self.include_boundaries else 'lt', value)
upper_condition = self.max_attribute_getter.build_filter('gte' if self.include_boundaries else 'gt', value)
if self.include_missing:
lower_condition |= self.min_attribute_getter.build_filter('isnull', True)
upper_condition |= self.max_attribute_getter.build_filter('isnull', True)
if not self.in_range:
return ~lower_condition | ~upper_condition
return lower_condition & upper_condition
class RelatedExistenceCheckProperty(BooleanMixin, AnnotationGetterMixin, QueryableProperty):
"""
A property that checks whether related objects to the one that uses the
property exist in the database and returns a corresponding boolean value.
Supports queryset filtering and ``CASE``/``WHEN``-based annotating.
"""
def __init__(self, relation_path, **kwargs):
"""
Initialize a new property that checks for the existence of related
objects.
:param str relation_path: The path to the object/field whose existence
is to be checked. May contain the lookup
separator (``__``) to check for more remote
relations.
"""
super(RelatedExistenceCheckProperty, self).__init__(**kwargs)
self.filter = (QueryPath(relation_path) + 'isnull').build_filter(False)
def get_value(self, obj):
return self.get_queryset_for_object(obj).filter(self.filter).exists()
def _get_condition(self, cls):
# Perform the filtering via a subquery to avoid any side-effects that may be introduced by JOINs.
subquery = self.get_queryset(cls).filter(self.filter)
return Q(pk__in=subquery)
class MappingProperty(AnnotationMixin, QueryableProperty):
"""
A property that translates values of an attribute into other values using
defined mappings.
"""
# Copy over Django's implementation to forcibly evaluate a lazy value.
_force_value = six.get_unbound_function(Field.get_prep_value)
def __init__(self, attribute_path, output_field, mappings, default=None, **kwargs):
"""
Initialize a property that maps values from an attribute to other
values.
:param str attribute_path: The name of the attribute to compare
against. May also be a more complex path to
a related attribute using dot-notation (like
with :func:`operator.attrgetter`). If an
intermediate value on the path is None, it
will be treated as "no match" instead of
raising an exception. The behavior is the
same if an intermediate value raises an
``ObjectDoesNotExist`` error.
:param django.db.models.Field output_field: The field to represent the
mapped values in querysets.
:param mappings: An iterable containing 2-tuples that represent the
mappings to use (the first value of each tuple is
mapped to the second value).
:type mappings: collections.Iterable[(object, object)]
:param default: A default value to return/use in querysets when in case
none of the mappings match an encountered value.
Defaults to None.
"""
super(MappingProperty, self).__init__(**kwargs)
self.attribute_getter = ModelAttributeGetter(attribute_path)
self.output_field = output_field
self.mappings = mappings
self.default = default
def get_value(self, obj):
attibute_value = self.attribute_getter.get_value(obj)
for from_value, to_value in self.mappings:
if attibute_value == from_value:
return self._force_value(to_value)
return self._force_value(self.default)
def get_annotation(self, cls):
from django.db.models import Case, Value, When
cases = (When(self.attribute_getter.build_filter('exact', from_value), then=Value(self._force_value(to_value)))
for from_value, to_value in self.mappings)
return Case(*cases, default=Value(self._force_value(self.default)), output_field=self.output_field)
class AnnotationProperty(AnnotationGetterMixin, QueryableProperty):
"""
A property that is based on a static annotation that is even used to
provide getter values.
"""
def __init__(self, annotation, **kwargs):
"""
Initialize a new property that gets its value by retrieving an
annotated value from the database.
:param annotation: The static annotation to use to determine the value
of this property.
"""
super(AnnotationProperty, self).__init__(**kwargs)
self.annotation = annotation
def get_annotation(self, cls):
return self.annotation
class AggregateProperty(AnnotationProperty):
"""
A property that is based on an aggregate that is used to provide both
queryset annotations as well as getter values.
"""
def __init__(self, aggregate, **kwargs):
"""
Initialize a new property that gets its value by retrieving an
aggregated value from the database.
:param django.db.models.Aggregate aggregate: The aggregate to use to
determine the value of
this property.
"""
super(AggregateProperty, self).__init__(aggregate, **kwargs)
def get_value(self, obj):
return self.get_queryset_for_object(obj).aggregate(**{self.name: self.annotation})[self.name]
| 45.245675
| 119
| 0.613643
| 12,748
| 0.974916
| 0
| 0
| 232
| 0.017742
| 0
| 0
| 7,478
| 0.571887
|
b8b45d47d2d2b0c8935936a0ff5a2cb55518f1d6
| 2,558
|
py
|
Python
|
experiments/examples/example_run_bench_s1_periodic_bench.py
|
cogsys-tuebingen/uninas
|
06729b9cf517ec416fb798ae387c5bd9c3a278ac
|
[
"MIT"
] | 18
|
2020-11-22T16:03:08.000Z
|
2022-03-15T12:11:46.000Z
|
experiments/examples/example_run_bench_s1_periodic_bench.py
|
cogsys-tuebingen/uninas
|
06729b9cf517ec416fb798ae387c5bd9c3a278ac
|
[
"MIT"
] | 2
|
2022-01-04T08:10:17.000Z
|
2022-01-05T08:13:14.000Z
|
experiments/examples/example_run_bench_s1_periodic_bench.py
|
cogsys-tuebingen/uninas
|
06729b9cf517ec416fb798ae387c5bd9c3a278ac
|
[
"MIT"
] | 6
|
2021-03-08T07:08:52.000Z
|
2022-02-24T12:00:43.000Z
|
"""
training a super-network and periodically evaluating its performance on bench architectures
a work in this direction exists: https://arxiv.org/abs/2001.01431
"""
from uninas.main import Main
# default configurations, for the search process and the network design
# config_files = "{path_conf_bench_tasks}/s1_fairnas_cifar.run_config, {path_conf_net_search}/bench201.run_config"
config_files = "{path_conf_bench_tasks}/s1_random_cifar.run_config, {path_conf_net_search}/bench201.run_config"
# these changes are applied to the default configuration in the config files
changes = {
"{cls_task}.is_test_run": True,
"{cls_task}.save_dir": "{path_tmp}/run_bench_s1_per/",
"{cls_task}.save_del_old": True,
"{cls_trainer}.max_epochs": 4,
"{cls_data}.dir": "{path_data}/cifar_data/",
"{cls_data}.fake": False,
"{cls_data}.download": False,
"{cls_data}.batch_size_train": 96,
# example how to mask options
"{cls_method}.mask_indices": "0, 1, 4", # mask Zero, Skip, Pool
"{cls_network_body}.cell_order": "n, n, r, n, n, r, n, n", # 2 normal cells, one reduction cell, ...
"{cls_network_stem}.features": 16, # start with 16 channels
# some augmentations
"cls_augmentations": "DartsCifarAug", # default augmentations for cifar
"{cls_schedulers#0}.warmup_epochs": 0,
# specifying how to add weights, note that SplitWeightsMixedOp requires a SplitWeightsMixedOpCallback
"{cls_network_cells_primitives#0}.mixed_cls": "MixedOp", # MixedOp, BiasD1MixedOp, ...
"{cls_network_cells_primitives#1}.mixed_cls": "MixedOp", # MixedOp, BiasD1MixedOp, ...
"cls_callbacks": "CheckpointCallback, CreateBenchCallback",
"{cls_callbacks#1}.each_epochs": 1,
"{cls_callbacks#1}.reset_bn": True,
"{cls_callbacks#1}.benchmark_path": "{path_data}/bench/nats/nats_bench_1.1_subset_m_test.pt",
# what and how to evaluate each specific network
"cls_cb_objectives": "NetValueEstimator",
"{cls_cb_objectives#0}.key": "acc1/valid",
"{cls_cb_objectives#0}.is_constraint": False,
"{cls_cb_objectives#0}.is_objective": True,
"{cls_cb_objectives#0}.maximize": True,
"{cls_cb_objectives#0}.load": True,
"{cls_cb_objectives#0}.batches_forward": 20,
"{cls_cb_objectives#0}.batches_train": 0,
"{cls_cb_objectives#0}.batches_eval": -1,
"{cls_cb_objectives#0}.value": "val/accuracy/1",
}
if __name__ == "__main__":
task = Main.new_task(config_files, args_changes=changes)
task.run()
| 41.258065
| 114
| 0.69742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,009
| 0.785379
|
b8b6326ff4e90a353f713e0c09d84e4633fbcdd7
| 9,650
|
py
|
Python
|
zuds/photometry.py
|
charlotteaward/zuds-pipeline
|
52423859498374203d13fdc15c88bdc1260db183
|
[
"BSD-3-Clause"
] | null | null | null |
zuds/photometry.py
|
charlotteaward/zuds-pipeline
|
52423859498374203d13fdc15c88bdc1260db183
|
[
"BSD-3-Clause"
] | null | null | null |
zuds/photometry.py
|
charlotteaward/zuds-pipeline
|
52423859498374203d13fdc15c88bdc1260db183
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql as psql
from sqlalchemy.orm import relationship
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.schema import UniqueConstraint
from astropy import units as u
from .core import Base
from .constants import APER_KEY, APERTURE_RADIUS
__all__ = ['ForcedPhotometry', 'raw_aperture_photometry', 'aperture_photometry']
class ForcedPhotometry(Base):
id = sa.Column(sa.Integer, primary_key=True)
__tablename__ = 'forcedphotometry'
flags = sa.Column(sa.Integer)
ra = sa.Column(psql.DOUBLE_PRECISION)
dec = sa.Column(psql.DOUBLE_PRECISION)
@property
def mag(self):
return -2.5 * np.log10(self.flux) + self.image.header['MAGZP'] + \
self.image.header[self.apcorkey]
@property
def magerr(self):
return 1.08573620476 * self.fluxerr / self.flux
image_id = sa.Column(sa.Integer, sa.ForeignKey('calibratedimages.id',
ondelete='CASCADE'),
index=True)
image = relationship('CalibratedImage', back_populates='forced_photometry',
cascade='all')
# thumbnails = relationship('Thumbnail', cascade='all')
source_id = sa.Column(sa.Text,
sa.ForeignKey('sources.id', ondelete='CASCADE'),
index=True)
source = relationship('Source', cascade='all')
apcorkey='APCOR5'
flux = sa.Column(sa.Float)
fluxerr = sa.Column(sa.Float)
zp = sa.Column(sa.Float)
filtercode = sa.Column(sa.Text)
obsjd = sa.Column(sa.Float)
uniq = UniqueConstraint(image_id, source_id)
reverse_idx = sa.Index('source_image', source_id, image_id)
@hybrid_property
def snr(self):
return self.flux / self.fluxerr
def raw_aperture_photometry(sci_path, rms_path, mask_path, ra, dec,
apply_calibration=False):
import photutils
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.table import vstack
from astropy.wcs import WCS
ra = np.atleast_1d(ra)
dec = np.atleast_1d(dec)
coord = SkyCoord(ra, dec, unit='deg')
with fits.open(sci_path, memmap=False) as shdu:
header = shdu[0].header
swcs = WCS(header)
scipix = shdu[0].data
with fits.open(rms_path, memmap=False) as rhdu:
rmspix = rhdu[0].data
with fits.open(mask_path, memmap=False) as mhdu:
maskpix = mhdu[0].data
apertures = photutils.SkyCircularAperture(coord, r=APERTURE_RADIUS)
phot_table = photutils.aperture_photometry(scipix, apertures,
error=rmspix,
wcs=swcs)
pixap = apertures.to_pixel(swcs)
annulus_masks = pixap.to_mask(method='center')
maskpix = [annulus_mask.cutout(maskpix) for annulus_mask in annulus_masks]
magzp = header['MAGZP']
apcor = header[APER_KEY]
# check for invalid photometry on masked pixels
phot_table['flags'] = [int(np.bitwise_or.reduce(m, axis=(0, 1))) for
m in maskpix]
phot_table['zp'] = magzp + apcor
phot_table['obsjd'] = header['OBSJD']
phot_table['filtercode'] = 'z' + header['FILTER'][-1]
# rename some columns
phot_table.rename_column('aperture_sum', 'flux')
phot_table.rename_column('aperture_sum_err', 'fluxerr')
return phot_table
def aperture_photometry(calibratable, ra, dec, apply_calibration=False,
assume_background_subtracted=False, use_cutout=False,
direct_load=None, survey='ZTF',apfactor=1.0,seeing=1.0):
import photutils
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.table import vstack
from astropy.wcs import WCS
ra = np.atleast_1d(ra)
dec = np.atleast_1d(dec)
coord = SkyCoord(ra, dec, unit='deg')
if not use_cutout:
wcs = calibratable.wcs
if seeing*3*apfactor < 2.5:
apcorkey='APCOR1'
aprad=2.0
elif seeing*3*apfactor >=2.5 and seeing*3*apfactor<3.5:
apcorkey='APCOR2'
aprad=3.0
elif seeing*3*apfactor >=3.5 and 3*apfactor*seeing<5.0:
apcorkey='APCOR3'
aprad=4.0
elif seeing*3*apfactor >=5.0 and 3*apfactor*seeing<8.0:
apcorkey='APCOR4'
aprad=6.0
elif seeing*3*apfactor >=8.0 and 3*apfactor*seeing<12.0:
apcorkey='APCOR5'
aprad=10.0
elif seeing*3*apfactor >=12.0:
apcorkey='APCOR6'
aprad=14
aprad=aprad*u.pixel
apertures = photutils.SkyCircularAperture(coord, r=aprad)#APERTURE_RADIUS*apfactor*seeing)
# something that is photometerable implements mask, background, and wcs
if not assume_background_subtracted:
pixels_bkgsub = calibratable.background_subtracted_image.data
else:
pixels_bkgsub = calibratable.data
bkgrms = calibratable.rms_image.data
mask = calibratable.mask_image.data
phot_table = photutils.aperture_photometry(pixels_bkgsub, apertures,
error=bkgrms,
wcs=wcs)
if survey=='PTF':
phot_table['zp'] = calibratable.header['IMAGEZPT']#['LMGAPCZP']# + calibratable.header['APCOR4']
else:
phot_table['zp'] = calibratable.header['MAGZP'] + calibratable.header[apcorkey]#'APCOR4']
phot_table['obsjd'] = calibratable.header['OBSJD']
phot_table['filtercode'] = 'z' + calibratable.header['FILTER'][-1]
pixap = apertures.to_pixel(wcs)
annulus_masks = pixap.to_mask(method='center')
maskpix = [annulus_mask.cutout(mask.data) for annulus_mask in annulus_masks]
else:
phot_table = []
maskpix = []
for s in coord:
if direct_load is not None and 'sci' in direct_load:
sci_path = direct_load['sci']
else:
if assume_background_subtracted:
sci_path = calibratable.local_path
else:
sci_path = calibratable.background_subtracted_image.local_path
if direct_load is not None and 'mask' in direct_load:
mask_path = direct_load['mask']
else:
mask_path = calibratable.mask_image.local_path
if direct_load is not None and 'rms' in direct_load:
rms_path = direct_load['rms']
else:
rms_path = calibratable.rms_image.local_path
with fits.open(
sci_path,
memmap=True
) as f:
wcs = WCS(f[0].header)
pixcoord = wcs.all_world2pix([[s.ra.deg, s.dec.deg]], 0)[0]
pixx, pixy = pixcoord
nx = calibratable.header['NAXIS1']
ny = calibratable.header['NAXIS2']
xmin = max(0, pixx - 1.5 * aprad)#APERTURE_RADIUS.value * seeing * apfactor)
xmax = min(nx, pixx + 1.5 * aprad)#APERTURE_RADIUS.value * seeing * apfactor)
ymin = max(0, pixy - 1.5 * aprad)#APERTURE_RADIUS.value * seeing * apfactor)
ymax = min(ny, pixy + 1.5 * aprad)#APERTURE_RADIUS.value * seeing * apfactor)
ixmin = int(np.floor(xmin))
ixmax = int(np.ceil(xmax))
iymin = int(np.floor(ymin))
iymax = int(np.ceil(ymax))
ap = photutils.CircularAperture([pixx - ixmin, pixy - iymin],
aprad)#APERTURE_RADIUS.value * seeing * apfactor)
# something that is photometerable implements mask, background, and wcs
with fits.open(
sci_path,
memmap=True
) as f:
pixels_bkgsub = f[0].data[iymin:iymax, ixmin:ixmax]
with fits.open(rms_path, memmap=True) as f:
bkgrms = f[0].data[iymin:iymax, ixmin:ixmax]
with fits.open(mask_path, memmap=True) as f:
mask = f[0].data[iymin:iymax, ixmin:ixmax]
pt = photutils.aperture_photometry(pixels_bkgsub, ap, error=bkgrms)
annulus_mask = ap.to_mask(method='center')
mp = annulus_mask.cutout(mask.data)
maskpix.append(mp)
phot_table.append(pt)
phot_table = vstack(phot_table)
if apply_calibration:
if survey=='PTF':
magzp = calibratable.header['IMAGEZPT']
#apcor = calibratable.header[APER_KEY]
phot_table['mag'] = -2.5 * np.log10(phot_table['aperture_sum']) + magzp# + apcor
phot_table['magerr'] = 1.0826 * phot_table['aperture_sum_err'] / phot_table['aperture_sum']
else:
magzp = calibratable.header['MAGZP']
apcor = calibratable.header[apcorkey]#APER_KEY]
phot_table['mag'] = -2.5 * np.log10(phot_table['aperture_sum']) + magzp + apcor
phot_table['magerr'] = 1.0826 * phot_table['aperture_sum_err'] / phot_table['aperture_sum']
# check for invalid photometry on masked pixels
phot_table['flags'] = [int(np.bitwise_or.reduce(m, axis=(0, 1))) for
m in maskpix]
# rename some columns
phot_table.rename_column('aperture_sum', 'flux')
phot_table.rename_column('aperture_sum_err', 'fluxerr')
return phot_table
| 33.623693
| 108
| 0.594922
| 1,437
| 0.148912
| 0
| 0
| 313
| 0.032435
| 0
| 0
| 1,408
| 0.145907
|
b8b69dfcb1d5f6e006ee8b568536b7b0df129c02
| 5,521
|
py
|
Python
|
models/Libraries/UnitTest.py
|
yangshiquan/GraphDialog
|
5bb1239bf502c8d79c4c888f69c7aff0c02c2928
|
[
"MIT"
] | 26
|
2020-09-25T02:19:43.000Z
|
2022-03-27T09:03:34.000Z
|
models/Libraries/UnitTest.py
|
yangshiquan/GraphDialog
|
5bb1239bf502c8d79c4c888f69c7aff0c02c2928
|
[
"MIT"
] | 1
|
2020-10-28T11:28:35.000Z
|
2020-10-28T11:28:35.000Z
|
models/Libraries/UnitTest.py
|
yangshiquan/GraphDialog
|
5bb1239bf502c8d79c4c888f69c7aff0c02c2928
|
[
"MIT"
] | 2
|
2020-12-17T08:49:13.000Z
|
2021-04-18T13:08:48.000Z
|
import tensorflow as tf
from models.Libraries.BidirectionalGraphEncoder import BidirectionalGraphEncoder
from tensorflow.python.ops import array_ops
if __name__ == "__main__":
# units=2, input_dim=2, edge_types=10, recurrent_size=4
bi_graph_encoder = BidirectionalGraphEncoder(2, 2, 10, 4)
# inputs: batch_size=8, max_len=3, embedding_dim=2
# inputs: batch_size*max_len*embedding_dim
inputs = tf.convert_to_tensor([[[0.1, 0.2],[0.0, 0.0],[0.0, 0.0]],[[0.1, 0.2],[0.3, 0.4],[0.0, 0.0]],[[0.1, 0.2],[0.3, 0.4],[0.5, 0.6]],[[0.1, 0.2],[0.3, 0.4],[0.5, 0.6]],[[0.1, 0.2],[0.3, 0.4],[0.5, 0.6]],[[0.1, 0.2],[0.3, 0.4],[0.5, 0.6]],[[0.1, 0.2],[0.3, 0.4],[0.5, 0.6]],[[0.1, 0.2],[0.3, 0.4],[0.5, 0.6]]])
# deps: 2*batch_size*max_len*(recurrent_size-1)
deps = tf.convert_to_tensor([[[['$', '$', '$'],['$', '$', '$'],['$', '$', '$']],[['$', '$', '$'],['$', '$', '$'],['$', '$', '$']], [['$', '$', '$'],['$', '$', '$'],['$', '$', '$']],[['$', '$', '$'],['$', '$', '$'],['$', '$', '$']],[['$', '$', '$'],['0', '$', '$'],['1', '$', '$']],[['$', '$', '$'],['0', '$', '$'],['1', '$', '$']],[['$', '$', '$'],['0', '$', '$'],['1', '$', '$']],[['$', '$', '$'],['0', '$', '$'],['1', '$', '$']]],[[['$', '$', '$'],['$', '$', '$'],['$', '$', '$']],[['$', '$', '$'],['$', '$', '$'],['$', '$', '$']],[['$', '$', '$'],['0', '$', '$'],['1', '$', '$']],[['$', '$', '$'],['0', '$', '$'],['1', '$', '$']],[['$', '$', '$'],['0', '$', '$'],['1', '$', '$']],[['$', '$', '$'],['0', '$', '$'],['1', '$', '$']],[['$', '$', '$'],['0', '$', '$'],['1', '$', '$']],[['$', '$', '$'],['0', '$', '$'],['1', '$', '$']]]])
# edge_types = tf.convert_to_tensor([[[['0', '$', '$', '$'],['0', '2', '$', '$'],['0', '2', '$', '$']],[['0', '$', '$', '$'],['0', '2', '$', '$'],['0', '2', '$', '$']],[['0', '$', '$', '$'],['0', '2', '$', '$'],['0', '2', '$', '$']],[['0', '$', '$', '$'],['0', '2', '$', '$'],['0', '2', '$', '$']],[['0', '$', '$', '$'],['0', '2', '$', '$'],['0', '2', '$', '$']],[['0', '$', '$', '$'],['0', '2', '$', '$'],['0', '2', '$', '$']],[['0', '$', '$', '$'],['0', '2', '$', '$'],['0', '2', '$', '$']],[['0', '$', '$', '$'],['0', '2', '$', '$'],['0', '2', '$', '$']]], [[['0', '$', '$', '$'],['0', '2', '$', '$'],['0', '2', '$', '$']],[['0', '$', '$', '$'],['0', '2', '$', '$'],['0', '2', '$', '$']],[['0', '$', '$', '$'],['0', '2', '$', '$'],['0', '2', '$', '$']],[['0', '$', '$', '$'],['0', '2', '$', '$'],['0', '2', '$', '$']],[['0', '$', '$', '$'],['0', '2', '$', '$'],['0', '2', '$', '$']],[['0', '$', '$', '$'],['0', '2', '$', '$'],['0', '2', '$', '$']],[['0', '$', '$', '$'],['0', '2', '$', '$'],['0', '2', '$', '$']],[['0', '$', '$', '$'],['0', '2', '$', '$'],['0', '2', '$', '$']]]])
# edge_types = tf.convert_to_tensor([[[[0, -1, -1, -1],[0, 2, -1, -1],[0, 2, -1, -1]],[[0, -1, -1, -1],[0, 2, -1, -1],[0, 2, -1, -1]],[[0, -1, -1, -1],[0, 2, -1, -1],[0, 2, -1, -1]],[[0, -1, -1, -1],[0, 2, -1, -1],[0, 2, -1, -1]],[[0, -1, -1, -1],[0, 2, -1, -1],[0, 2, -1, -1]],[[0, -1, -1, -1],[0, 2, -1, -1],[0, 2, -1, -1]],[[0, -1, -1, -1],[0, 2, -1, -1],[0, 2, -1, -1]],[[0, -1, -1, -1],[0, 2, -1, -1],[0, 2, -1, -1]]],[[[0, -1, -1, -1],[0, 2, -1, -1],[0, 2, -1, -1]],[[0, -1, -1, -1],[0, 2, -1, -1],[0, 2, -1, -1]],[[0, -1, -1, -1],[0, 2, -1, -1],[0, 2, -1, -1]],[[0, -1, -1, -1],[0, 2, -1, -1],[0, 2, -1, -1]],[[0, -1, -1, -1],[0, 2, -1, -1],[0, 2, -1, -1]],[[0, -1, -1, -1],[0, 2, -1, -1],[0, 2, -1, -1]],[[0, -1, -1, -1],[0, 2, -1, -1],[0, 2, -1, -1]],[[0, -1, -1, -1],[0, 2, -1, -1],[0, 2, -1, -1]]]])
# edge_types: 2*batch_size*max_len*recurrent_size
edge_types = tf.convert_to_tensor([[[[0, 9, 9, 9],[0, 2, 9, 9],[0, 2, 9, 9]],[[1, 9, 9, 9],[0, 2, 9, 9],[0, 2, 9, 9]],[[2, 9, 9, 9],[0, 2, 9, 9],[0, 2, 9, 9]],[[0, 9, 9, 9],[0, 2, 9, 9],[0, 2, 9, 9]],[[0, 9, 9, 9],[0, 2, 9, 9],[0, 2, 9, 9]],[[0, 9, 9, 9],[0, 2, 9, 9],[0, 2, 9, 9]],[[0, 9, 9, 9],[0, 2, 9, 9],[0, 2, 9, 9]],[[0, 9, 9, 9],[0, 2, 9, 9],[0, 2, 9, 9]]], [[[0, 9, 9, 9],[0, 2, 9, 9],[0, 2, 9, 9]],[[0, 9, 9, 9],[0, 2, 9, 9],[0, 2, 9, 9]],[[0, 9, 9, 9],[0, 2, 9, 9],[0, 2, 9, 9]],[[0, 9, 9, 9],[0, 2, 9, 9],[0, 2, 9, 9]],[[0, 9, 9, 9],[0, 2, 9, 9],[0, 2, 9, 9]],[[0, 9, 9, 9],[0, 2, 9, 9],[0, 2, 9, 9]],[[0, 9, 9, 9],[0, 2, 9, 9],[0, 2, 9, 9]],[[0, 9, 9, 9],[0, 2, 9, 9],[0, 2, 9, 9]]]])
# mask: batch_size*max_len
mask = tf.convert_to_tensor([[1,1,1],[1,1,1],[1,1,1],[1,1,0],[1,1,0],[1,1,0],[1,0,0],[1,0,0]])
# cell_mask: 2*batch_size*max_len*recurrent_size
cell_mask = tf.convert_to_tensor([[[[1, 0, 0, 0],[1, 1, 0, 0],[1, 1, 0, 0]],[[1, 0, 0, 0],[1, 1, 0, 0],[1, 1, 0, 0]],[[1, 0, 0, 0],[1, 1, 0, 0],[1, 1, 0, 0]],[[1, 0, 0, 0],[1, 1, 0, 0],[1, 1, 0, 0]],[[1, 0, 0, 0],[1, 1, 0, 0],[1, 1, 0, 0]],[[1, 0, 0, 0],[1, 1, 0, 0],[1, 1, 0, 0]],[[1, 0, 0, 0],[1, 1, 0, 0],[1, 1, 0, 0]],[[1, 0, 0, 0],[1, 1, 0, 0],[1, 1, 0, 0]]],[[[1, 0, 0, 0],[1, 1, 0, 0],[1, 1, 0, 0]],[[1, 0, 0, 0],[1, 1, 0, 0],[1, 1, 0, 0]],[[1, 0, 0, 0],[1, 1, 0, 0],[1, 1, 0, 0]],[[1, 0, 0, 0],[1, 1, 0, 0],[1, 1, 0, 0]],[[1, 0, 0, 0],[1, 1, 0, 0],[1, 1, 0, 0]],[[1, 0, 0, 0],[1, 1, 0, 0],[1, 1, 0, 0]],[[1, 0, 0, 0],[1, 1, 0, 0],[1, 1, 0, 0]],[[1, 0, 0, 0],[1, 1, 0, 0],[1, 1, 0, 0]]]])
# initial_state: 2*recurrent_size*batch_size*embedding_dim
initial_state = array_ops.zeros([2, 4, 8, 2])
input_lengths = tf.convert_to_tensor([1, 2, 3, 3, 3, 3, 3, 3])
outputs, hidden_f, hidden_b = bi_graph_encoder(inputs, input_lengths, deps, edge_types, mask, cell_mask, initial_state, True)
print(outputs)
print(hidden_f)
print(hidden_b)
| 197.178571
| 1,087
| 0.307915
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,710
| 0.490853
|
b8b7d48ab2b3078dad4877e762a40e5343a5d8aa
| 96
|
py
|
Python
|
animazya/apps.py
|
KenFon/kenfontaine.fr
|
6b4055de791e3cc47b473c1890b2fcafab8a635d
|
[
"MIT"
] | null | null | null |
animazya/apps.py
|
KenFon/kenfontaine.fr
|
6b4055de791e3cc47b473c1890b2fcafab8a635d
|
[
"MIT"
] | null | null | null |
animazya/apps.py
|
KenFon/kenfontaine.fr
|
6b4055de791e3cc47b473c1890b2fcafab8a635d
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class AnimazyaConfig(AppConfig):
name = 'animazya'
| 16
| 34
| 0.71875
| 55
| 0.572917
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 0.104167
|
b8b7e91501f23e4c04cf067b13d9a9480a460c77
| 59
|
py
|
Python
|
python/testData/debug/test4.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2018-12-29T09:53:39.000Z
|
2018-12-29T09:53:42.000Z
|
python/testData/debug/test4.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/debug/test4.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 1
|
2020-11-27T10:36:50.000Z
|
2020-11-27T10:36:50.000Z
|
xval = 0
xvalue1 = 1
xvalue2 = 2
print(xvalue1 + xvalue2)
| 9.833333
| 24
| 0.677966
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b8b9118b82c32808b4a088d2dcdc263280ad9a3a
| 3,000
|
py
|
Python
|
pres/ray-tracing/main.py
|
sosterwalder/mte7103-qde
|
e4ed8beda40c9cd15d3c815567d9a4e3396adee7
|
[
"MIT"
] | null | null | null |
pres/ray-tracing/main.py
|
sosterwalder/mte7103-qde
|
e4ed8beda40c9cd15d3c815567d9a4e3396adee7
|
[
"MIT"
] | null | null | null |
pres/ray-tracing/main.py
|
sosterwalder/mte7103-qde
|
e4ed8beda40c9cd15d3c815567d9a4e3396adee7
|
[
"MIT"
] | null | null | null |
from kivy.animation import Animation
from kivy.app import App
from kivy.core.window import Window
from kivy.graphics import Color
from kivy.graphics import Ellipse
from kivy.graphics import Line
from kivy.uix.widget import Widget
class ProjCenter(Widget):
def __init__(self, **kwargs):
super(ProjCenter, self).__init__(**kwargs)
self.pos = [10, 10]
self.size = [10, 10]
def redraw(self):
with self.canvas:
Color(0.83, 0.88, 1)
Ellipse(pos=self.pos, size=self.size)
class Ray(Widget):
def __init__(self, **kwargs):
super(Ray, self).__init__(**kwargs)
self._was_triggered = False
self.source = []
self.target = []
def trigger(self):
if not self._was_triggered:
self.canvas.clear()
line = Line(points=[
self.source[0] + 5,
self.source[1] + 5,
self.source[0] + 5,
self.source[1] + 5,
],
width=1,
dash_length=5,
dash_offset=10
)
anim = Animation(points=[
self.source[0] + 5,
self.source[1] + 5,
self.target[0],
self.target[1]
])
self.canvas.add(Color(0.83, 0.88, 1))
self.canvas.add(line)
anim.start(line)
self._was_triggered = True
print(self, " was triggered")
class RayTracingWidget(Widget):
def __init__(self, **kwargs):
super(RayTracingWidget, self).__init__(**kwargs)
self.center = Window.center
self.width = Window.width
self.height = Window.height
print(self.center, self.center_x)
self.proj_center = ProjCenter(**kwargs)
self.proj_center.pos = [
10, self.center_y / 2
]
self.add_widget(self.proj_center)
self.proj_center.redraw()
self.rays = []
for i in range(4):
ray = Ray(**kwargs)
ray.source = [
10, self.center_y / 2
]
ray.target = [self.center_x, self.center_y]
self.rays.append(ray)
def trigger(self):
for ray in self.rays:
ray.trigger()
class RayTracingApp(App):
def __init__(self, **kwargs):
super(RayTracingApp, self).__init__(**kwargs)
self._keyboard = Window.request_keyboard(self._keyboard_closed, self)
self._keyboard.bind(on_key_down = self._on_keyboard_down)
def _keyboard_closed(self):
self._keyboard.unbind(on_key_down = self._on_keyboard_down)
self._keyboard = None
def _on_keyboard_down(self, keyboard, keycode, text, modifiers):
if keycode[1] == 'spacebar':
self.root.trigger()
def build(self):
Window.clearcolor = (1, 1, 1, 1)
return RayTracingWidget()
if __name__ == '__main__':
RayTracingApp().run()
| 28.037383
| 77
| 0.553
| 2,703
| 0.901
| 0
| 0
| 0
| 0
| 0
| 0
| 36
| 0.012
|
b8b92ae2e5cf67849b6f6b332521716f375c2982
| 3,960
|
py
|
Python
|
sookie.py
|
anygard/sookie
|
5732f7644d2d908911735e62c8574863825174a2
|
[
"MIT"
] | null | null | null |
sookie.py
|
anygard/sookie
|
5732f7644d2d908911735e62c8574863825174a2
|
[
"MIT"
] | null | null | null |
sookie.py
|
anygard/sookie
|
5732f7644d2d908911735e62c8574863825174a2
|
[
"MIT"
] | null | null | null |
""" Sookie, is a waiter, waits for a socket to be listening then it moves on
Usage:
sookie <socket> [--timeout=<to>] [--retry=<rt>] [--logsocket=<ls>] [--logfacility=<lf>] [--loglevel=<ll>]
sookie -h | --help
sookie --version
Options:
-h --help Show this screen
--version Show version
--timeout=<to> Timout in seconds [default: 1800]
--retry=<rt> Interval between retries in seconds [default: 20]
--logsocket=<ls> Socket to send syslog messages to, only logging to local syslog if omitted.
--logfacility=<lf> The syslog facility to use for logging [default: user]
--loglevel=<ll> The syslog severity level to use, i.e the verbosity level [default: info]
<socket> Socket to wait for, 'host:port'
Sookie is intended to be a simple way of providing som measure of management of
inter server dependencies in complex environments. All it does is wait for a
socket to start listening for connections then it exits. It is supposed to be
used as a "smart" sleep in a startup script.
Sookie logs to syslog, and optionally to a remote syslog server aswell. Level
and facility values can be taken from syslog(1)
Sookie Stackhouse is a waitress.
exitcodes
0: ok, the server answered
1: waited until timout
2: invalid syntax
"""
import docopt
import logging
import logging.handlers
import os
import socket
import sys
import time
def main(args):
if args['--logsocket']:
logserver = tuple(args['--logsocket'].split(':'))
else:
logserver = None
logfacility = args['--logfacility']
loglevel = args['--loglevel']
logger = logging.getLogger(os.path.basename(__file__))
localsyslog = logging.handlers.SysLogHandler()
if logserver:
remotesyslog = logging.handlers.SysLogHandler(
address=logserver,
facility=logging.handlers.SysLogHandler.facility_names[logfacility]
)
try:
localsyslog.setLevel(logging.handlers.SysLogHandler.priority_names[loglevel])
if logserver:
remotesyslog.setLevel(logging.handlers.SysLogHandler.priority_names[loglevel])
except KeyError:
print "Invalid argument to %s (%s)" % ('--loglevel', args['--loglevel'])
sys.exit(2)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
localsyslog.setFormatter(formatter)
if logserver:
remotesyslog.setFormatter(formatter)
logger.addHandler(localsyslog)
if logserver:
logger.addHandler(remotesyslog)
logger.info('%s Starting' % __file__)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
option = '--timeout'
timeout = int(args[option])
option = '--retry'
interval = int(args[option])
except ValueError:
print "Invalid argument to %s (%s)" % (option, args[option])
sys.exit(2)
server = tuple(args['<socket>'].split(':'))
timeout_time = time.time() + timeout
is_timeout = False
logger.debug('now: %d, timeout: %d, timeout_time: %d)' % (time.time(), timeout, timeout_time))
while True:
t = time.time()
if t >= timeout_time:
is_timeout = True
break
try:
sock.connect(server)
logger.info('Connect')
print server
logger.debug('%ds to spare' % int(timeout_time-t))
break
except socket.error:
logger.debug('Waiting %d more seconds' % step)
time.sleep(step)
except TypeError, E:
print E
print "Invalid socket: %s" % args['<socket>']
sys.exit(2)
logger.info('%s Ending' % __file__)
exitcode = 1 if is_timeout else 0
logger.debug('exitcode: %d' % exitcode)
sys.exit(exitcode)
if __name__ == '__main__':
args = docopt.docopt(__doc__, version='0.1')
main(args)
| 30.697674
| 109
| 0.630303
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,740
| 0.439394
|
b8bd55689822e6f7e5a2823014bfe14020f8b719
| 912
|
py
|
Python
|
tests/parser/syntax/test_ann_assign.py
|
williamremor/vyper
|
4d33dc4140f7d0c339876afb6af7b417bd0ed8e0
|
[
"MIT"
] | 1
|
2018-08-31T02:32:57.000Z
|
2018-08-31T02:32:57.000Z
|
tests/parser/syntax/test_ann_assign.py
|
williamremor/vyper
|
4d33dc4140f7d0c339876afb6af7b417bd0ed8e0
|
[
"MIT"
] | null | null | null |
tests/parser/syntax/test_ann_assign.py
|
williamremor/vyper
|
4d33dc4140f7d0c339876afb6af7b417bd0ed8e0
|
[
"MIT"
] | null | null | null |
import pytest
from pytest import raises
from vyper import compiler
from vyper.exceptions import VariableDeclarationException, TypeMismatchException
fail_list = [
"""
@public
def test():
a = 1
""",
"""
@public
def test():
a = 33.33
""",
"""
@public
def test():
a = "test string"
""",
("""
@public
def test():
a: num = 33.33
""", TypeMismatchException)
]
@pytest.mark.parametrize('bad_code', fail_list)
def test_as_wei_fail(bad_code):
if isinstance(bad_code, tuple):
with raises(bad_code[1]):
compiler.compile(bad_code[0])
else:
with raises(VariableDeclarationException):
compiler.compile(bad_code)
valid_list = [
"""
@public
def test():
a: num = 1
""",
]
@pytest.mark.parametrize('good_code', valid_list)
def test_ann_assign_success(good_code):
assert compiler.compile(good_code) is not None
| 16.888889
| 80
| 0.634868
| 0
| 0
| 0
| 0
| 431
| 0.472588
| 0
| 0
| 256
| 0.280702
|
b8bd59b6d2fd731f6b088f01ce1a174d704adcae
| 7,568
|
py
|
Python
|
tests/test_mongoengine_dsl.py
|
StoneMoe/mongoengine_dsl
|
310d77c30e77ba1f695b3d644737fcfc3c2ab304
|
[
"MIT"
] | 3
|
2021-08-25T02:08:34.000Z
|
2022-03-23T08:32:09.000Z
|
tests/test_mongoengine_dsl.py
|
StoneMoe/mongoengine_dsl
|
310d77c30e77ba1f695b3d644737fcfc3c2ab304
|
[
"MIT"
] | 1
|
2021-08-24T09:41:11.000Z
|
2021-08-24T10:02:43.000Z
|
tests/test_mongoengine_dsl.py
|
StoneMoe/mongoengine_dsl
|
310d77c30e77ba1f695b3d644737fcfc3c2ab304
|
[
"MIT"
] | 1
|
2021-08-24T14:25:28.000Z
|
2021-08-24T14:25:28.000Z
|
#!/usr/bin/env python
import unittest
from mongoengine import Document, Q, StringField, connect
from mongoengine_dsl import Query
from mongoengine_dsl.errors import InvalidSyntaxError, TransformHookError
from tests.utils import ts2dt
class DSLTest(unittest.TestCase):
def test_whitespace(self):
self.assertEqual(
Q(key1='val1') & Q(key2='val2') & Q(key3='val3'),
Query('key1:val1 and key2=val2 and key3==val3'),
)
self.assertEqual(
Q(key1='val1') & Q(key2='val2') & Q(key3='val3'),
Query('key1 : val1 and key2 = val2 and key3 == val3'),
)
def test_token(self):
self.assertEqual(Q(key1='hi_there'), Query('key1: hi_there'))
self.assertEqual(Q(key1='8a'), Query('key1: 8a'))
self.assertEqual(Q(key1='8.8.'), Query('key1: 8.8.'))
self.assertEqual(Q(key1='8.8.8'), Query('key1: 8.8.8'))
self.assertEqual(Q(key1='8.8.8.8'), Query('key1: 8.8.8.8'))
def test_quote_string(self):
self.assertEqual(Q(key1='hi_there'), Query('key1: "hi_there"'))
self.assertEqual(Q(key1='hi_there'), Query("key1: 'hi_there'"))
self.assertEqual(Q(key1='hello world'), Query('key1: "hello world"'))
self.assertEqual(Q(key1='hello world'), Query("key1: 'hello world'"))
self.assertEqual(
Q(key1='escape"this"world'),
Query('key1: "escape\\"this\\"world"'),
)
self.assertEqual(
Q(key1="escape'this'world"),
Query("key1: 'escape\\'this\\'world'"),
)
def test_int(self):
self.assertEqual(Q(key1=1), Query('key1:1'))
self.assertEqual(Q(key1=-1), Query('key1:-1'))
def test_float(self):
self.assertEqual(Q(key1=1.213), Query('key1:1.213'))
self.assertEqual(Q(key1=-1.213), Query('key1:-1.213'))
def test_bool(self):
self.assertEqual(
Q(key1=True) & Q(key2=True) & Q(key3=True),
Query('key1:true and key2:TRUE and key3:True'),
)
self.assertEqual(
Q(key1=False) & Q(key2=False) & Q(key3=False),
Query('key1:false and key2:FALSE and key3:False'),
)
def test_array(self):
self.assertEqual(Q(key1=['hi']), Query('key1:[hi]'))
self.assertEqual(
Q(key1=[False, True, 1, 1.2, 'quote', 'no_quote']),
Query('key1:[false, true, 1, 1.2, "quote", no_quote]'),
)
self.assertEqual( # Full-width comma
Q(key1=[False, True, 1, 1.2, 'quote', 'no_quote']),
Query('key1:[false, true, 1, 1.2, "quote", no_quote]'),
)
self.assertEqual( # no comma
Q(key1=[False, True, 1, 1.2, 'quote', 'no_quote']),
Query('key1:[false true 1 1.2 "quote" no_quote]'),
)
self.assertEqual(Q(key1=[1, [2, 3]]), Query('key1:[1, [2, 3]]')) # nested array
self.assertEqual( # nested more array
Q(key1=[1, 2, [3, [4, 5, 6]]]),
Query('key1:[1, 2, [3, [4, 5, 6]]]'),
)
self.assertRaisesRegex(
InvalidSyntaxError,
'Exclude operator cannot be used in arrays',
Query,
'key1 @ [!,2,3] and key2:"value2"',
)
self.assertRaisesRegex(
InvalidSyntaxError,
'Wildcard operator cannot be used in arrays',
Query,
'key1 !@ [*,2,3] and key2:"value2"',
)
def test_logical_priority(self):
self.assertEqual(
Q(key1='键1') & Q(key2='value2') & Q(键3='value3'),
Query('key1:键1 and key2:"value2" and 键3:value3'),
)
self.assertEqual(
(Q(key1='键1') | Q(key2='value2')) & Q(键3='value3'),
Query('(key1:键1 or key2:"value2") and 键3:value3'),
)
self.assertEqual(
Q(key1='键1') & (Q(key2='value2') | Q(键3='value3')),
Query('key1:键1 and (key2:"value2" or 键3:value3)'),
)
self.assertEqual(
Q(key1='键1') & (Q(key2='value2') | Q(键3='value3') | Q(key4='value4')),
Query('key1:键1 and (key2:"value2" or 键3:value3 or key4: value4)'),
)
def test_equal(self):
self.assertEqual(
Q(key1='val1') & Q(key2='val2') & Q(key3='val3'),
Query('key1:val1 and key2=val2 and key3==val3'),
)
self.assertEqual(
Q(key1='val1') & Q(key2='val2') & Q(key3='val3'),
Query('key1:val1 and key2=val2 and key3==val3'),
)
def test_not_equal(self):
self.assertEqual(Q(key1__ne=1), Query('key1!=1'))
def test_greater_than(self):
self.assertEqual(Q(key1__gt=1), Query('key1>1'))
self.assertEqual(Q(key1__gte=1), Query('key1>=1'))
def test_less_than(self):
self.assertEqual(Q(key1__lt=1), Query('key1<1'))
self.assertEqual(Q(key1__lte=1), Query('key1<=1'))
def test_exists_and_not_exists(self):
self.assertEqual(
Q(key1__exists=True) & Q(key2='value2'),
Query('key1:* and key2:"value2"'),
)
self.assertEqual(
Q(key1__exists=False) & Q(key2='value2'),
Query('key1:! and key2:"value2"'),
)
self.assertRaisesRegex(
InvalidSyntaxError,
'Wildcard operator can only be used for equals',
Query,
'key1 != *',
)
self.assertRaisesRegex(
InvalidSyntaxError,
'Exclude operator can only be used for equals',
Query,
'key1 != !',
)
def test_contain_and_not_contain(self):
self.assertEqual(
Q(key1__in=[1, 2, 3]) & Q(key2='value2'),
Query('key1 @ [1,2,3] and key2:"value2"'),
)
self.assertEqual(
Q(key1__nin=[1, 2, 3]) & Q(key2='value2'),
Query('key1 !@ [1,2,3] and key2:"value2"'),
)
def test_transform_hook(self):
self.assertEqual(
Q(key1=ts2dt(0)) & Q(key2=0),
Query('key1: 0 and key2: 0', transform={'key1': ts2dt}),
)
self.assertEqual( # bypass :*
Q(key1__exists=True) & Q(key2=0),
Query('key1: * and key2: 0', transform={'key1': ts2dt}),
)
self.assertEqual( # bypass :!
Q(key1__exists=False) & Q(key2=0),
Query('key1: ! and key2: 0', transform={'key1': ts2dt}),
)
self.assertEqual( # nested field
Q(nested__key1=ts2dt(0)) & Q(key2=0),
Query('nested.key1: 0 and key2: 0', transform={'nested.key1': ts2dt}),
)
self.assertRaisesRegex( # hook exception handle
TransformHookError,
'Field key1 transform hook error',
Query,
'key1 != abc',
transform={'key1': ts2dt},
)
def test_nested_field(self):
self.assertEqual(Q(key__inner=0), Query('key.inner: 0'))
class OtherTest(unittest.TestCase):
def test_readme_example(self):
connect('mongoengine_test', host='mongomock://localhost')
class User(Document):
fullname = StringField()
User(fullname='Tom').save()
User(fullname='Dick').save()
User(fullname='Harry').save()
self.assertEqual(User.objects(Query('fullname: Dick')).first().fullname, 'Dick')
self.assertEqual(
User.objects(
Query('fullname: dick', transform={'fullname': lambda x: x.title()})
)
.first()
.fullname,
'Dick',
)
| 35.698113
| 88
| 0.532505
| 7,365
| 0.968314
| 0
| 0
| 0
| 0
| 0
| 0
| 2,146
| 0.282146
|
b8be08575104b59466524c927af95ffef96623e1
| 2,891
|
py
|
Python
|
dmidecode/__init__.py
|
hamgom95/dmidecode
|
d8d82fecdbfe578ad5e9c561753dcbc6fdfdc02c
|
[
"MIT"
] | null | null | null |
dmidecode/__init__.py
|
hamgom95/dmidecode
|
d8d82fecdbfe578ad5e9c561753dcbc6fdfdc02c
|
[
"MIT"
] | null | null | null |
dmidecode/__init__.py
|
hamgom95/dmidecode
|
d8d82fecdbfe578ad5e9c561753dcbc6fdfdc02c
|
[
"MIT"
] | null | null | null |
import subprocess
from collections import UserDict
from functools import lru_cache
def _parse_handle_section(lines):
"""
Parse a section of dmidecode output
* 1st line contains address, type and size
* 2nd line is title
* line started with one tab is one option and its value
* line started with two tabs is a member of list
"""
data = {"_title": next(lines).rstrip()}
for line in lines:
line = line.rstrip()
if line.startswith("\t\t"):
try:
data[k].append(line.lstrip())
except AttributeError:
# ignore stray <OUT OF SPEC> lines
pass
elif line.startswith("\t"):
k, v = [i.strip() for i in line.lstrip().split(":", 1)]
if v is "":
data[k] = []
else:
data[k] = v
else:
break
return data
class Dmidecode(UserDict):
"""Dmidecode parser storing parsed data as dict like object."""
TYPE = {
0: "bios",
1: "system",
2: "base board",
3: "chassis",
4: "processor",
7: "cache",
8: "port connector",
9: "system slot",
10: "on board device",
11: "OEM strings",
# 13: 'bios language',
15: "system event log",
16: "physical memory array",
17: "memory device",
19: "memory array mapped address",
24: "hardware security",
25: "system power controls",
27: "cooling device",
32: "system boot",
41: "onboard device",
}
@classmethod
def from_command(cls, args=None):
args = [] if args is None else args
output = subprocess.run_command(["dmidecode", *args], root=True).stdout
return cls(output)
def __init__(self, output):
self.output = output
def i_entries(self):
lines = self.output.strip().splitlines()
for line in lines:
if line.startswith("Handle 0x"):
handle_str, type_str, byte_str = line.split(",", 2)
handle = handle_str.split(" ", 1)[1]
typ = int(type_str.strip()[len("DMI type") :])
if typ in cls.TYPE:
# parse section
section = _parse_handle_section(lines)
# add handle information
entry = {**section, "Handle": handle}
yield (cls.TYPE[typ], entry)
@property
@lru_cache
def entries(self):
return list(self.i_entries())
@property
@lru_cache
def categories(self):
"""Parse dmidecode output to dict of categories with subitems.
"""
d = {}
for category, entry in self.entries:
# gather entries in categories
d.setdefault(category, []).append(entry)
return d
| 28.623762
| 79
| 0.528191
| 1,975
| 0.683155
| 610
| 0.211
| 592
| 0.204773
| 0
| 0
| 850
| 0.294016
|
b8beac8ab26c148a31cb1d0f421ff54922a1ebcd
| 1,580
|
py
|
Python
|
{{cookiecutter.repo_name}}/webapp/config/settings/cache.py
|
bopo/django-template
|
465f48563bc9625e37bb278a32800e7a55d9e256
|
[
"BSD-3-Clause"
] | null | null | null |
{{cookiecutter.repo_name}}/webapp/config/settings/cache.py
|
bopo/django-template
|
465f48563bc9625e37bb278a32800e7a55d9e256
|
[
"BSD-3-Clause"
] | null | null | null |
{{cookiecutter.repo_name}}/webapp/config/settings/cache.py
|
bopo/django-template
|
465f48563bc9625e37bb278a32800e7a55d9e256
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
try:
from .base import MIDDLEWARE_CLASSES
except ImportError as e:
raise e
# MIDDLEWARE_CLASSES += (
# 'django.middleware.cache.CacheMiddleware',
# 'django.middleware.cache.UpdateCacheMiddleware',
# 'django.middleware.cache.FetchFromCacheMiddleware',
# )
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': '/var/tmp/django_cache',
},
# 'locmem': {
# 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
# 'LOCATION': 'unique-snowflake',
# },
# 'dummy': {
# 'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
# },
# 'redis': {
# 'BACKEND': 'redis_cache.RedisCache',
# 'LOCATION': '127.0.0.1:6379',
# 'OPTIONS': {
# 'DB': 0,
# 'PASSWORD': '',
# 'CONNECTION_POOL_CLASS': 'redis.BlockingConnectionPool',
# 'CONNECTION_POOL_CLASS_KWARGS': {
# 'max_connections': 50,
# 'timeout': 20,
# }
# },
# },
# 'memcache': {
# 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
# 'LOCATION': '127.0.0.1:11211',
# 'LOCATION': 'unix:/tmp/memcached.sock',
# },
# 'database': {
# 'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
# 'LOCATION': 'my_cache_table',
# }
}
REDIS_TIMEOUT = 7 * 24 * 60 * 60
CUBES_REDIS_TIMEOUT = 60 * 60
NEVER_REDIS_TIMEOUT = 365 * 24 * 60 * 60
| 28.727273
| 75
| 0.563924
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,145
| 0.724684
|
b8c05b3185ce376bc8351fd54c6fd146defe890b
| 5,144
|
py
|
Python
|
scripts/evaluate_hatexplain.py
|
GKingA/POTATO
|
585eb002d95375a94b496b0f38637fdf69cd8a9e
|
[
"MIT"
] | 26
|
2021-10-05T14:57:33.000Z
|
2022-03-27T04:26:21.000Z
|
scripts/evaluate_hatexplain.py
|
GKingA/POTATO
|
585eb002d95375a94b496b0f38637fdf69cd8a9e
|
[
"MIT"
] | 20
|
2021-12-01T09:03:41.000Z
|
2022-03-09T10:45:58.000Z
|
scripts/evaluate_hatexplain.py
|
GKingA/POTATO
|
585eb002d95375a94b496b0f38637fdf69cd8a9e
|
[
"MIT"
] | 3
|
2021-11-18T07:14:56.000Z
|
2022-02-17T09:14:46.000Z
|
from typing import List, Dict
import json
import numpy as np
from pandas import DataFrame
import logging
from argparse import ArgumentParser, ArgumentError
from sklearn.metrics import classification_report
from xpotato.graph_extractor.extract import FeatureEvaluator
from xpotato.dataset.explainable_dataset import ExplainableDataset
def print_classification_report(df: DataFrame, stats: Dict[str, List]):
print(
classification_report(
df.label_id,
[(n > 0) * 1 for n in np.sum([p for p in stats["Predicted"]], axis=0)],
digits=3,
)
)
def find_good_features(
feature_file: str,
train_files: List[str],
valid_files: List[str],
save_features: str,
target: str,
threshold: float,
) -> None:
with open(feature_file) as feature_json:
features = json.load(feature_json)
if target is None:
target = list(features.keys())[0]
if save_features is None:
logging.warning(
"No path given for the good features. "
'They will be saved to this working directory with the name "good_features.json"'
)
save_features = "good_features.json"
if len(train_files) > 1:
logging.warning(
"Only the first training file will be used to determine the good features, "
"but the features will be evaluated on every file given."
)
train = ExplainableDataset(path=train_files[0], label_vocab={"None": 0, target: 1})
train_df = train.to_dataframe()
evaluator = FeatureEvaluator()
train_stats = evaluator.evaluate_feature(target, features[target], train_df)[0]
good_features = []
bad_features = []
for (index, stats), feature in zip(train_stats.iterrows(), features[target]):
if stats["Precision"] >= threshold:
good_features.append(feature)
if len(stats["False_positive_indices"]) > len(stats["True_positive_indices"]):
bad_features.append(feature)
print(f"Bad features: {len(bad_features)}\n\t{bad_features}")
print(f"Good features: {len(good_features)}\n\t{good_features}")
print(f"Train file {train_files[0]} with every feature:")
print_classification_report(train_df, train_stats)
with open(save_features, "w") as js:
json.dump({target: good_features}, js)
if valid_files is None:
valid_files = []
evaluate(feature_file=save_features, files=train_files + valid_files, target=target)
def evaluate(feature_file: str, files: List[str], target: str):
with open(feature_file) as feature_json:
features = json.load(feature_json)
evaluator = FeatureEvaluator()
if target is None:
target = list(features.keys())[0]
for file in files:
print(f"File: {file}")
potato = ExplainableDataset(path=file, label_vocab={"None": 0, target: 1})
df = potato.to_dataframe()
stats = evaluator.evaluate_feature(target, features[target], df)[0]
print_classification_report(df, stats)
print("------------------------")
if __name__ == "__main__":
argparser = ArgumentParser()
argparser.add_argument(
"--mode",
"-m",
choices=["find_good_features", "evaluate"],
help="The mode of operation",
default="evaluate",
)
argparser.add_argument(
"--features",
"-f",
help="Path to the feature to evaluate.Used in both modes",
required=True,
)
argparser.add_argument(
"--target",
"-tg",
help="The target category of your features. If not given, than the code will choose one from the feature file.",
)
argparser.add_argument(
"--threshold",
"-th",
help="The minimum precision with which we consider a feature good.",
default=0.8,
type=float,
)
argparser.add_argument(
"--train", "-t", help="The train file in potato format", nargs="+"
)
argparser.add_argument(
"--valid", "-v", help="The validation file in potato format", nargs="+"
)
argparser.add_argument(
"--save_features",
"-sf",
help="Path to the feature file where the good features will be saved in find_good features mode",
)
args = argparser.parse_args()
if args.mode == "find_good_features":
if args.train is None:
raise ArgumentError(
argument=args.train,
message="Training file is needed in find_good_features mode",
)
find_good_features(
args.features,
args.train,
args.valid,
args.save_features,
args.target,
args.threshold,
)
else:
if args.train is None and args.valid is None:
raise ArgumentError(
argument=args.train,
message="At least one training file or validation is needed in evaluate mode",
)
train = [] if args.train is None else args.train
valid = [] if args.valid is None else args.valid
evaluate(args.features, train + valid, args.target)
| 34.756757
| 120
| 0.626361
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,270
| 0.24689
|
b8c18579e2101b06416f377ffa427b6e165dcba7
| 53
|
py
|
Python
|
agency/memory/__init__.py
|
jackharmer/agency
|
5a78dd23e14c44c4076e49ea44b83ab1697e51c8
|
[
"MIT"
] | 2
|
2022-03-30T19:51:42.000Z
|
2022-03-30T20:05:39.000Z
|
agency/memory/__init__.py
|
jackharmer/agency
|
5a78dd23e14c44c4076e49ea44b83ab1697e51c8
|
[
"MIT"
] | null | null | null |
agency/memory/__init__.py
|
jackharmer/agency
|
5a78dd23e14c44c4076e49ea44b83ab1697e51c8
|
[
"MIT"
] | null | null | null |
from .episodic import EpisodicMemory, EpisodicBuffer
| 26.5
| 52
| 0.867925
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b8c19e5ee50c09165615a57248929fdadd0a46be
| 1,876
|
py
|
Python
|
examples/car_on_hill_fqi.py
|
doroK/mushroom
|
47e5b1d09b65da585c1b19a6cc7f0366849d7863
|
[
"MIT"
] | null | null | null |
examples/car_on_hill_fqi.py
|
doroK/mushroom
|
47e5b1d09b65da585c1b19a6cc7f0366849d7863
|
[
"MIT"
] | null | null | null |
examples/car_on_hill_fqi.py
|
doroK/mushroom
|
47e5b1d09b65da585c1b19a6cc7f0366849d7863
|
[
"MIT"
] | null | null | null |
import numpy as np
from joblib import Parallel, delayed
from sklearn.ensemble import ExtraTreesRegressor
from mushroom.algorithms.value import FQI
from mushroom.core import Core
from mushroom.environments import *
from mushroom.policy import EpsGreedy
from mushroom.utils.dataset import compute_J
from mushroom.utils.parameters import Parameter
"""
This script aims to replicate the experiments on the Car on Hill MDP as
presented in:
"Tree-Based Batch Mode Reinforcement Learning", Ernst D. et al.. 2005.
"""
def experiment():
np.random.seed()
# MDP
mdp = CarOnHill()
# Policy
epsilon = Parameter(value=1.)
pi = EpsGreedy(epsilon=epsilon)
# Approximator
approximator_params = dict(input_shape=mdp.info.observation_space.shape,
n_actions=mdp.info.action_space.n,
n_estimators=50,
min_samples_split=5,
min_samples_leaf=2)
approximator = ExtraTreesRegressor
# Agent
algorithm_params = dict(n_iterations=20)
agent = FQI(approximator, pi, mdp.info,
approximator_params=approximator_params, **algorithm_params)
# Algorithm
core = Core(agent, mdp)
# Train
core.learn(n_episodes=1000, n_episodes_per_fit=1000)
# Test
test_epsilon = Parameter(0.)
agent.policy.set_epsilon(test_epsilon)
initial_states = np.zeros((289, 2))
cont = 0
for i in range(-8, 9):
for j in range(-8, 9):
initial_states[cont, :] = [0.125 * i, 0.375 * j]
cont += 1
dataset = core.evaluate(initial_states=initial_states)
return np.mean(compute_J(dataset, mdp.info.gamma))
if __name__ == '__main__':
n_experiment = 1
Js = Parallel(n_jobs=-1)(delayed(experiment)() for _ in range(n_experiment))
print((np.mean(Js)))
| 26.422535
| 80
| 0.655117
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 234
| 0.124733
|
b8c1beea49870d673c41dbabd215c2bea4001620
| 1,854
|
py
|
Python
|
db.py
|
dashimaki360/mahjong-line-bot
|
e119e83308bed1bfe6d66d53e41a4b7908dceb5e
|
[
"MIT"
] | null | null | null |
db.py
|
dashimaki360/mahjong-line-bot
|
e119e83308bed1bfe6d66d53e41a4b7908dceb5e
|
[
"MIT"
] | 5
|
2018-04-19T06:59:47.000Z
|
2018-04-20T00:07:34.000Z
|
db.py
|
dashimaki360/mahjong-line-bot
|
e119e83308bed1bfe6d66d53e41a4b7908dceb5e
|
[
"MIT"
] | null | null | null |
import os
from datetime import datetime
from flask_sqlalchemy import SQLAlchemy
# heroku postgresql setting
app.config['SQLALCHEMY_DATABASE_URI'] = os.getenv('DATABASE_URL', None)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
db = SQLAlchemy(app)
class usermessage(db.Model):
'''
user message and reply db
'''
__tablename__ = 'usermessage'
id = db.Column(db.String(50), primary_key=True)
user_id = db.Column(db.String(50))
message = db.Column(db.Text)
reply_message = db.Column(db.Text)
timestamp = db.Column(db.TIMESTAMP)
def __init__(self,
id,
user_id,
message,
reply_message,
timestamp,):
self.id = id
self.user_id = user_id
self.message = message
self.reply_message = reply_message
self.timestamp = timestamp
def to_dict(self):
return dict(
id=self.id,
user_id=self.user_id,
message=self.message,
reply_message=self.reply_message,
timestamp=self.timestamp,
)
def addToSql(event, reply, sticker=False, image=False):
'''
add message data to sql
'''
if sticker:
msg = "stamp {} {}".format(event.message.package_id, event.message.sticker_id)
elif image:
msg = "IMAGE_MESSAGE"
else:
msg = event.message.text,
add_data = usermessage(
id=event.message.id,
user_id=event.source.user_id,
message=msg,
reply_message=reply,
timestamp=datetime.fromtimestamp(int(event.timestamp)/1000)
)
try:
db.session.add(add_data)
db.session.commit()
except (SQLAlchemy.exc.SQLAlchemyError, SQLAlchemy.exc.DBAPIError) as e:
print("sql error happen")
print(e)
| 28.090909
| 86
| 0.60356
| 862
| 0.464941
| 0
| 0
| 0
| 0
| 0
| 0
| 237
| 0.127832
|
b8c255c102573468b31394960a4e3c18d4bdfc95
| 728
|
py
|
Python
|
loss/voxel_match_loss.py
|
sennnnn/Refer-it-in-RGBD
|
ac8dcaed80e28d2708f14cba5142fec5301eb3cc
|
[
"MIT"
] | 28
|
2021-03-26T09:24:23.000Z
|
2022-02-17T20:14:43.000Z
|
loss/voxel_match_loss.py
|
sennnnn/Refer-it-in-RGBD
|
ac8dcaed80e28d2708f14cba5142fec5301eb3cc
|
[
"MIT"
] | 1
|
2021-07-12T02:38:51.000Z
|
2021-07-12T11:43:31.000Z
|
loss/voxel_match_loss.py
|
sennnnn/Refer-it-in-RGBD
|
ac8dcaed80e28d2708f14cba5142fec5301eb3cc
|
[
"MIT"
] | 4
|
2021-08-05T01:57:05.000Z
|
2022-02-17T20:26:35.000Z
|
import torch
import torch.nn as nn
class voxel_match_loss(nn.Module):
def __init__(self):
super().__init__()
self.criterion=nn.MSELoss()
def forward(self,output,label):
positive_mask=torch.zeros(label.shape).cuda()
positive_mask=torch.where(label>0.2,torch.ones_like(positive_mask), positive_mask)
positive_loss=self.criterion(output*positive_mask,label*positive_mask)
negative_mask=torch.zeros(label.shape).cuda()
negative_mask = torch.where(label <= 0.2, torch.ones_like(negative_mask), negative_mask)
negative_loss=self.criterion(output*negative_mask,label*negative_mask)
loss=positive_loss+negative_loss
loss=loss/2
return loss
| 42.823529
| 96
| 0.715659
| 692
| 0.950549
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b8c32fb0b4535e967806c491e7dce8ba89fb1433
| 1,134
|
py
|
Python
|
app/cachedmodel/migrations/0001_initial.py
|
Uniquode/uniquode2
|
385f3e0b26383c042d8da64b52350e82414580ea
|
[
"MIT"
] | null | null | null |
app/cachedmodel/migrations/0001_initial.py
|
Uniquode/uniquode2
|
385f3e0b26383c042d8da64b52350e82414580ea
|
[
"MIT"
] | null | null | null |
app/cachedmodel/migrations/0001_initial.py
|
Uniquode/uniquode2
|
385f3e0b26383c042d8da64b52350e82414580ea
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.7 on 2021-09-19 03:41
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.manager
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='CachedModelTypes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('app_label', models.CharField(max_length=100)),
('model', models.CharField(max_length=100)),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.contenttype')),
],
options={
'ordering': ('app_label', 'model'),
'unique_together': {('app_label', 'model')},
},
managers=[
('objects', django.db.models.manager.Manager()),
('_related', django.db.models.manager.Manager()),
],
),
]
| 32.4
| 128
| 0.574074
| 976
| 0.86067
| 0
| 0
| 0
| 0
| 0
| 0
| 258
| 0.227513
|
b8c4664f2ad6a4052e0d5d282f88dba0b1d97427
| 8,305
|
py
|
Python
|
conduit/utils/awsbatch_operator.py
|
elenimath/saber
|
71acab9798cf3aee1c4d64b09453e5234f8fdf1e
|
[
"Apache-2.0"
] | 12
|
2018-05-14T17:43:18.000Z
|
2021-11-16T04:03:33.000Z
|
conduit/utils/awsbatch_operator.py
|
elenimath/saber
|
71acab9798cf3aee1c4d64b09453e5234f8fdf1e
|
[
"Apache-2.0"
] | 34
|
2019-05-06T19:13:36.000Z
|
2021-05-06T19:12:35.000Z
|
conduit/utils/awsbatch_operator.py
|
elenimath/saber
|
71acab9798cf3aee1c4d64b09453e5234f8fdf1e
|
[
"Apache-2.0"
] | 3
|
2019-10-08T17:42:17.000Z
|
2021-07-28T05:52:02.000Z
|
# Copyright 2019 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import parse
from math import log1p
from time import sleep, time
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.utils import apply_defaults
from conduit.utils.datajoint_hook import DatajointHook, JobMetadata
from airflow.contrib.hooks.aws_hook import AwsHook
from datajoint.errors import DuplicateError
class AWSBatchOperator(BaseOperator):
"""
Execute a job on AWS Batch Service
:param job_name: the name for the job that will run on AWS Batch
:type job_name: str
:param job_definition: the job definition name on AWS Batch
:type job_definition: str
:param queue: the queue name on AWS Batch
:type queue: str
:param: overrides: the same parameter that boto3 will receive on containerOverrides:
http://boto3.readthedocs.io/en/latest/reference/services/batch.html#submit_job
:type: overrides: dict
:param max_retries: exponential backoff retries while waiter is not merged
:type max_retries: int
:param aws_conn_id: connection id of AWS credentials / region name. If None,
credential boto3 strategy will be used (http://boto3.readthedocs.io/en/latest/guide/configuration.html).
:type aws_conn_id: str
:param region_name: region name to use in AWS Hook. Override the region_name in connection (if provided)
"""
ui_color = "#c3dae0"
client = None
arn = None
template_fields = ("overrides",)
@apply_defaults
def __init__(
self,
job_name,
job_definition,
queue,
overrides,
workflow_id,
max_retries=288,
aws_conn_id=None,
region_name=None,
job_parameters={},
score_format="",
**kwargs
):
super(AWSBatchOperator, self).__init__(**kwargs)
self.job_name = job_name
self.aws_conn_id = aws_conn_id
self.region_name = region_name
self.job_definition = job_definition
self.queue = queue
self.overrides = overrides
self.max_retries = max_retries
self.jobParameters = job_parameters
self.jobId = None
self.jobName = None
self.dj_hook = DatajointHook()
self.workflow_id = workflow_id
self.jobmetadata_db = JobMetadata()
self.hook = self.get_hook()
self.score_format = score_format
def execute(self, context):
self.log.info(
"Running AWS Batch Job - Job definition: %s - on queue %s",
self.job_definition,
self.queue,
)
self.log.info("AWSBatchOperator overrides: %s", self.overrides)
self.client = self.hook.get_client_type("batch", region_name=self.region_name)
try:
response = self.client.submit_job(
jobName=self.job_name,
jobQueue=self.queue,
jobDefinition=self.job_definition,
containerOverrides=self.overrides,
parameters=self.jobParameters,
)
self.log.info("AWS Batch Job started: %s", response)
self.jobId = response["jobId"]
self.jobName = response["jobName"]
self._wait_for_task_ended()
self._check_success_task()
task_time, score = self._get_score()
iteration = self.task_id.split(".")[1]
real_task_id = self.task_id.split(".")[0]
self.log.info(
"Inserting {} {} {} {} {} into job metadata database".format(
self.workflow_id, iteration, real_task_id, task_time, score
)
)
self.dj_hook.insert1(
{
"iteration": iteration,
"workflow_id": self.workflow_id,
"job_id": real_task_id,
"cost": task_time,
"score": score,
},
JobMetadata,
)
self.log.info("AWS Batch Job has been successfully executed: %s", response)
except Exception as e:
self.log.info("AWS Batch Job has failed executed")
raise AirflowException(e)
def _wait_for_task_ended(self):
"""
Try to use a waiter from the below pull request
* https://github.com/boto/botocore/pull/1307
If the waiter is not available apply a exponential backoff
* docs.aws.amazon.com/general/latest/gr/api-retries.html
"""
# TODO improve this? Checking every 5s doesn't seem like too often...
try:
waiter = self.client.get_waiter("job_execution_complete")
waiter.config.max_attempts = sys.maxsize # timeout is managed by airflow
waiter.wait(jobs=[self.jobId])
except ValueError:
# If waiter not available use expo
retry = True
retries = 0
while (retries < self.max_retries or self.max_retries <= 0) and retry:
response = self.client.describe_jobs(jobs=[self.jobId])
if response["jobs"][-1]["status"] in ["SUCCEEDED", "FAILED"]:
retry = False
sleep(log1p(retries) * 30)
retries += 1
def _check_success_task(self):
response = self.client.describe_jobs(jobs=[self.jobId],)
self.log.info("AWS Batch stopped, check status: %s", response)
if len(response.get("jobs")) < 1:
raise AirflowException("No job found for {}".format(response))
for job in response["jobs"]:
if "attempts" in job:
containers = job["attempts"]
for container in containers:
if (
job["status"] == "FAILED"
or container["container"]["exitCode"] != 0
):
print("@@@@")
raise AirflowException(
"This containers encounter an error during execution {}".format(
job
)
)
elif job["status"] is not "SUCCEEDED":
raise AirflowException(
"This task is still pending {}".format(job["status"])
)
def get_hook(self):
return AwsHook(aws_conn_id=self.aws_conn_id)
def on_kill(self):
response = self.client.terminate_job(
jobId=self.jobId, reason="Task killed by the user"
)
self.log.info(response)
def _get_score(self):
response = self.client.describe_jobs(jobs=[self.jobId])
runTime = response["jobs"][-1]["stoppedAt"] - response["jobs"][-1]["startedAt"]
if self.score_format:
logStream = response["jobs"][-1]["container"]["logStreamName"]
self.logClient = self.hook.get_client_type(
"logs", region_name=self.region_name
)
response = self.logClient.get_log_events(
logGroupName="/aws/batch/job", logStreamName=logStream,
)
logEvents = response["events"]
# Reads events from most recent to least recent (earliest), so the
# first match is the most recent score. Perhaps change this?
for logEvent in logEvents:
parsed_event = parse.parse(self.score_format, logEvent["message"])
if parsed_event and "score" in parsed_event.named:
return (runTime, float(parsed_event["score"]))
self.log.info("Score format present but no score found in logs...")
return (runTime, None)
| 38.627907
| 116
| 0.596388
| 7,314
| 0.880674
| 0
| 0
| 910
| 0.109573
| 0
| 0
| 2,935
| 0.353402
|
b8c4711e42028105dddd073eaee5ccd39e86f063
| 17,579
|
py
|
Python
|
csapi.py
|
ria-ee/X-Road-cs-api
|
37d28886e47eea21cb4e46ad20b84bbfcafe79ad
|
[
"MIT"
] | 1
|
2020-04-16T06:31:54.000Z
|
2020-04-16T06:31:54.000Z
|
csapi.py
|
ria-ee/X-Road-cs-api
|
37d28886e47eea21cb4e46ad20b84bbfcafe79ad
|
[
"MIT"
] | null | null | null |
csapi.py
|
ria-ee/X-Road-cs-api
|
37d28886e47eea21cb4e46ad20b84bbfcafe79ad
|
[
"MIT"
] | 1
|
2019-09-09T08:07:15.000Z
|
2019-09-09T08:07:15.000Z
|
#!/usr/bin/env python3
"""This is a module for X-Road Central Server API.
This module allows:
* adding new member to the X-Road Central Server.
* adding new subsystem to the X-Road Central Server.
"""
import json
import logging
import re
import psycopg2
from flask import request, jsonify
from flask_restful import Resource
DB_CONF_FILE = '/etc/xroad/db.properties'
LOGGER = logging.getLogger('csapi')
def get_db_conf():
"""Get Central Server database configuration parameters"""
conf = {
'database': '',
'username': '',
'password': ''
}
# Getting database credentials from X-Road configuration
try:
with open(DB_CONF_FILE, 'r') as db_conf:
for line in db_conf:
match_res = re.match('^database\\s*=\\s*(.+)$', line)
if match_res:
conf['database'] = match_res.group(1)
match_res = re.match('^username\\s*=\\s*(.+)$', line)
if match_res:
conf['username'] = match_res.group(1)
match_res = re.match('^password\\s*=\\s*(.+)$', line)
if match_res:
conf['password'] = match_res.group(1)
except IOError:
pass
return conf
def get_db_connection(conf):
"""Get connection object for Central Server database"""
return psycopg2.connect(
'host={} port={} dbname={} user={} password={}'.format(
'localhost', '5432', conf['database'], conf['username'], conf['password']))
def get_member_class_id(cur, member_class):
"""Get ID of member class from Central Server"""
cur.execute("""select id from member_classes where code=%(str)s""", {'str': member_class})
rec = cur.fetchone()
if rec:
return rec[0]
return None
def subsystem_exists(cur, member_id, subsystem_code):
"""Check if subsystem exists in Central Server"""
cur.execute(
"""
select exists(
select * from security_server_clients
where type='Subsystem' and xroad_member_id=%(member_id)s
and subsystem_code=%(subsystem_code)s
)
""", {'member_id': member_id, 'subsystem_code': subsystem_code})
return cur.fetchone()[0]
def get_member_data(cur, class_id, member_code):
"""Get member data from Central Server"""
cur.execute(
"""
select id, name
from security_server_clients
where type='XRoadMember' and member_class_id=%(class_id)s
and member_code=%(member_code)s
""", {'class_id': class_id, 'member_code': member_code})
rec = cur.fetchone()
if rec:
return {'id': rec[0], 'name': rec[1]}
return None
def get_utc_time(cur):
"""Get current time in UTC timezone from Central Server database"""
cur.execute("""select current_timestamp at time zone 'UTC'""")
return cur.fetchone()[0]
def add_member_identifier(cur, **kwargs):
"""Add new X-Road member identifier to Central Server
Required keyword arguments:
member_class, member_code, utc_time
"""
cur.execute(
"""
insert into identifiers (
object_type, xroad_instance, member_class, member_code, type, created_at,
updated_at
) values (
'MEMBER', (select value from system_parameters where key='instanceIdentifier'),
%(class)s, %(code)s, 'ClientId', %(time)s, %(time)s
) returning id
""", {
'class': kwargs['member_class'], 'code': kwargs['member_code'],
'time': kwargs['utc_time']}
)
return cur.fetchone()[0]
def add_subsystem_identifier(cur, **kwargs):
"""Add new X-Road subsystem identifier to Central Server
Required keyword arguments:
member_class, member_code, subsystem_code, utc_time
"""
cur.execute(
"""
insert into identifiers (
object_type, xroad_instance, member_class, member_code, subsystem_code, type,
created_at, updated_at
) values (
'SUBSYSTEM', (select value from system_parameters where key='instanceIdentifier'),
%(class)s, %(member_code)s, %(subsystem_code)s, 'ClientId', %(time)s, %(time)s
) returning id
""", {
'class': kwargs['member_class'], 'member_code': kwargs['member_code'],
'subsystem_code': kwargs['subsystem_code'], 'time': kwargs['utc_time']}
)
return cur.fetchone()[0]
def add_member_client(cur, **kwargs):
"""Add new X-Road member client to Central Server
Required keyword arguments:
member_code, member_name, class_id, identifier_id, utc_time
"""
cur.execute(
"""
insert into security_server_clients (
member_code, name, member_class_id, server_client_id, type, created_at, updated_at
) values (
%(code)s, %(name)s, %(class_id)s, %(identifier_id)s, 'XRoadMember', %(time)s,
%(time)s
)
""", {
'code': kwargs['member_code'], 'name': kwargs['member_name'],
'class_id': kwargs['class_id'], 'identifier_id': kwargs['identifier_id'],
'time': kwargs['utc_time']
}
)
def add_subsystem_client(cur, **kwargs):
"""Add new X-Road subsystem as a client to Central Server
Required keyword arguments:
subsystem_code, member_id, identifier_id, utc_time
"""
cur.execute(
"""
insert into security_server_clients (
subsystem_code, xroad_member_id, server_client_id, type, created_at, updated_at
) values (
%(subsystem_code)s, %(member_id)s, %(identifier_id)s, 'Subsystem', %(time)s,
%(time)s
)
""", {
'subsystem_code': kwargs['subsystem_code'], 'member_id': kwargs['member_id'],
'identifier_id': kwargs['identifier_id'], 'time': kwargs['utc_time']
}
)
def add_client_name(cur, **kwargs):
"""Add new X-Road client name to Central Server
Required keyword arguments:
member_name, identifier_id, utc_time
"""
cur.execute(
"""
insert into security_server_client_names (
name, client_identifier_id, created_at, updated_at
) values (
%(name)s, %(identifier_id)s, %(time)s, %(time)s
)
""", {
'name': kwargs['member_name'], 'identifier_id': kwargs['identifier_id'],
'time': kwargs['utc_time']}
)
def add_member(member_class, member_code, member_name, json_data):
"""Add new X-Road member to Central Server"""
conf = get_db_conf()
if not conf['username'] or not conf['password'] or not conf['database']:
LOGGER.error('DB_CONF_ERROR: Cannot access database configuration')
return {
'http_status': 500, 'code': 'DB_CONF_ERROR',
'msg': 'Cannot access database configuration'}
with get_db_connection(conf) as conn:
with conn.cursor() as cur:
class_id = get_member_class_id(cur, member_class)
if class_id is None:
LOGGER.warning(
'INVALID_MEMBER_CLASS: Provided Member Class does not exist '
'(Request: %s)', json_data)
return {
'http_status': 400, 'code': 'INVALID_MEMBER_CLASS',
'msg': 'Provided Member Class does not exist'}
if get_member_data(cur, class_id, member_code) is not None:
LOGGER.warning(
'MEMBER_EXISTS: Provided Member already exists '
'(Request: %s)', json_data)
return {
'http_status': 409, 'code': 'MEMBER_EXISTS',
'msg': 'Provided Member already exists'}
# Timestamps must be in UTC timezone
utc_time = get_utc_time(cur)
identifier_id = add_member_identifier(
cur, member_class=member_class, member_code=member_code, utc_time=utc_time)
add_member_client(
cur, member_code=member_code, member_name=member_name, class_id=class_id,
identifier_id=identifier_id, utc_time=utc_time)
add_client_name(
cur, member_name=member_name, identifier_id=identifier_id, utc_time=utc_time)
conn.commit()
LOGGER.info(
'Added new Member: member_code=%s, member_name=%s, member_class=%s',
member_code, member_name, member_class)
return {'http_status': 201, 'code': 'CREATED', 'msg': 'New Member added'}
def add_subsystem(member_class, member_code, subsystem_code, json_data):
"""Add new X-Road subsystem to Central Server"""
conf = get_db_conf()
if not conf['username'] or not conf['password'] or not conf['database']:
LOGGER.error('DB_CONF_ERROR: Cannot access database configuration')
return {
'http_status': 500, 'code': 'DB_CONF_ERROR',
'msg': 'Cannot access database configuration'}
with get_db_connection(conf) as conn:
with conn.cursor() as cur:
class_id = get_member_class_id(cur, member_class)
if class_id is None:
LOGGER.warning(
'INVALID_MEMBER_CLASS: Provided Member Class does not exist '
'(Request: %s)', json_data)
return {
'http_status': 400, 'code': 'INVALID_MEMBER_CLASS',
'msg': 'Provided Member Class does not exist'}
member_data = get_member_data(cur, class_id, member_code)
if member_data is None:
LOGGER.warning(
'INVALID_MEMBER: Provided Member does not exist '
'(Request: %s)', json_data)
return {
'http_status': 400, 'code': 'INVALID_MEMBER',
'msg': 'Provided Member does not exist'}
if subsystem_exists(cur, member_data['id'], subsystem_code):
LOGGER.warning(
'SUBSYSTEM_EXISTS: Provided Subsystem already exists '
'(Request: %s)', json_data)
return {
'http_status': 409, 'code': 'SUBSYSTEM_EXISTS',
'msg': 'Provided Subsystem already exists'}
# Timestamps must be in UTC timezone
utc_time = get_utc_time(cur)
identifier_id = add_subsystem_identifier(
cur, member_class=member_class, member_code=member_code,
subsystem_code=subsystem_code, utc_time=utc_time)
add_subsystem_client(
cur, subsystem_code=subsystem_code, member_id=member_data['id'],
identifier_id=identifier_id, utc_time=utc_time)
add_client_name(
cur, member_name=member_data['name'], identifier_id=identifier_id,
utc_time=utc_time)
conn.commit()
LOGGER.info(
'Added new Subsystem: member_class=%s, member_code=%s, subsystem_code=%s',
member_class, member_code, subsystem_code)
return {'http_status': 201, 'code': 'CREATED', 'msg': 'New Subsystem added'}
def make_response(data):
"""Create JSON response object"""
response = jsonify({'code': data['code'], 'msg': data['msg']})
response.status_code = data['http_status']
LOGGER.info('Response: %s', data)
return response
def get_input(json_data, param_name):
"""Get parameter from request parameters
Returns two items:
* parameter value
* error response (if parameter not found).
If one parameter is set then other is always None.
"""
try:
param = json_data[param_name]
except KeyError:
LOGGER.warning(
'MISSING_PARAMETER: Request parameter %s is missing '
'(Request: %s)', param_name, json_data)
return None, {
'http_status': 400, 'code': 'MISSING_PARAMETER',
'msg': 'Request parameter {} is missing'.format(param_name)}
return param, None
def load_config(config_file):
"""Load configuration from JSON file"""
try:
with open(config_file, 'r') as conf:
LOGGER.info('Configuration loaded from file "%s"', config_file)
return json.load(conf)
except IOError as err:
LOGGER.error('Cannot load configuration file "%s": %s', config_file, str(err))
return None
except json.JSONDecodeError as err:
LOGGER.error('Invalid JSON configuration file "%s": %s', config_file, str(err))
return None
def check_client(config, client_dn):
"""Check if client dn is in whitelist"""
# If config is None then all clients are not allowed
if config is None:
return False
if config.get('allow_all', False) is True:
return True
allowed = config.get('allowed')
if client_dn is None or not isinstance(allowed, list):
return False
if client_dn in allowed:
return True
return False
def incorrect_client(client_dn):
"""Return error response when client is not allowed"""
LOGGER.error('FORBIDDEN: Client certificate is not allowed: %s', client_dn)
return make_response({
'http_status': 403, 'code': 'FORBIDDEN',
'msg': 'Client certificate is not allowed: {}'.format(client_dn)})
def test_db():
"""Add new X-Road subsystem to Central Server"""
conf = get_db_conf()
if not conf['username'] or not conf['password'] or not conf['database']:
LOGGER.error('DB_CONF_ERROR: Cannot access database configuration')
return {
'http_status': 500, 'code': 'DB_CONF_ERROR',
'msg': 'Cannot access database configuration'}
with get_db_connection(conf) as conn:
with conn.cursor() as cur:
cur.execute("""select 1 from system_parameters where key='instanceIdentifier'""")
rec = cur.fetchone()
if rec:
return {
'http_status': 200, 'code': 'OK',
'msg': 'API is ready'}
return {'http_status': 500, 'code': 'DB_ERROR', 'msg': 'Unexpected DB state'}
class MemberApi(Resource):
"""Member API class for Flask"""
def __init__(self, config):
self.config = config
def post(self):
"""POST method"""
json_data = request.get_json(force=True)
client_dn = request.headers.get('X-Ssl-Client-S-Dn')
LOGGER.info('Incoming request: %s', json_data)
LOGGER.info('Client DN: %s', client_dn)
if not check_client(self.config, client_dn):
return incorrect_client(client_dn)
(member_class, fault_response) = get_input(json_data, 'member_class')
if member_class is None:
return make_response(fault_response)
(member_code, fault_response) = get_input(json_data, 'member_code')
if member_code is None:
return make_response(fault_response)
(member_name, fault_response) = get_input(json_data, 'member_name')
if member_name is None:
return make_response(fault_response)
try:
response = add_member(member_class, member_code, member_name, json_data)
except psycopg2.Error as err:
LOGGER.error('DB_ERROR: Unclassified database error: %s', err)
response = {
'http_status': 500, 'code': 'DB_ERROR',
'msg': 'Unclassified database error'}
return make_response(response)
class SubsystemApi(Resource):
"""Subsystem API class for Flask"""
def __init__(self, config):
self.config = config
def post(self):
"""POST method"""
json_data = request.get_json(force=True)
client_dn = request.headers.get('X-Ssl-Client-S-Dn')
LOGGER.info('Incoming request: %s', json_data)
LOGGER.info('Client DN: %s', client_dn)
if not check_client(self.config, client_dn):
return incorrect_client(client_dn)
(member_class, fault_response) = get_input(json_data, 'member_class')
if member_class is None:
return make_response(fault_response)
(member_code, fault_response) = get_input(json_data, 'member_code')
if member_code is None:
return make_response(fault_response)
(subsystem_code, fault_response) = get_input(json_data, 'subsystem_code')
if subsystem_code is None:
return make_response(fault_response)
try:
response = add_subsystem(member_class, member_code, subsystem_code, json_data)
except psycopg2.Error as err:
LOGGER.error('DB_ERROR: Unclassified database error: %s', err)
response = {
'http_status': 500, 'code': 'DB_ERROR',
'msg': 'Unclassified database error'}
return make_response(response)
class StatusApi(Resource):
"""Status API class for Flask"""
def __init__(self, config):
self.config = config
@staticmethod
def get():
"""GET method"""
LOGGER.info('Incoming status request')
try:
response = test_db()
except psycopg2.Error as err:
LOGGER.error('DB_ERROR: Unclassified database error: %s', err)
response = {
'http_status': 500, 'code': 'DB_ERROR',
'msg': 'Unclassified database error'}
return make_response(response)
| 34.878968
| 98
| 0.598669
| 3,286
| 0.186928
| 0
| 0
| 435
| 0.024745
| 0
| 0
| 7,709
| 0.438535
|
b210a7b86cf5f45e110a190e8d8eb560c075e998
| 397
|
py
|
Python
|
dotacni_matice/migrations/0002_history.py
|
CzechInvest/ciis
|
c6102598f564a717472e5e31e7eb894bba2c8104
|
[
"MIT"
] | 1
|
2019-05-26T22:24:01.000Z
|
2019-05-26T22:24:01.000Z
|
dotacni_matice/migrations/0002_history.py
|
CzechInvest/ciis
|
c6102598f564a717472e5e31e7eb894bba2c8104
|
[
"MIT"
] | 6
|
2019-01-22T14:53:43.000Z
|
2020-09-22T16:20:28.000Z
|
dotacni_matice/migrations/0002_history.py
|
CzechInvest/ciis
|
c6102598f564a717472e5e31e7eb894bba2c8104
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.3 on 2019-12-27 18:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dotacni_matice', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='dotacnititul',
name='history',
field=models.TextField(blank=True, null=True),
),
]
| 20.894737
| 58
| 0.602015
| 304
| 0.765743
| 0
| 0
| 0
| 0
| 0
| 0
| 100
| 0.251889
|
b2112b5aca6a5b5632c5810795648be898bf0703
| 898
|
py
|
Python
|
telegrambotapiwrapper/printpretty.py
|
pynista/telegrambotapiwrapper
|
4310882a1a7db94f5256b010ff8a3103b405dc0d
|
[
"MIT"
] | 1
|
2021-05-10T06:49:52.000Z
|
2021-05-10T06:49:52.000Z
|
telegrambotapiwrapper/printpretty.py
|
pynista/telegrambotapiwrapper
|
4310882a1a7db94f5256b010ff8a3103b405dc0d
|
[
"MIT"
] | null | null | null |
telegrambotapiwrapper/printpretty.py
|
pynista/telegrambotapiwrapper
|
4310882a1a7db94f5256b010ff8a3103b405dc0d
|
[
"MIT"
] | null | null | null |
from collections import OrderedDict
from dataclasses import (
fields,
)
from prettyprinter.prettyprinter import pretty_call, register_pretty
def is_instance_of_dataclass(value):
try:
fields(value)
except TypeError:
return False
else:
return True
def pretty_dataclass_instance(value, ctx):
cls = type(value)
field_defs = fields(value)
kwargs = []
for field_def in field_defs:
# repr is True by default,
# therefore if this if False, the user
# has explicitly indicated they don't want
# to display the field value.
if not field_def.repr:
continue
kwargs.append((field_def.name, getattr(value, field_def.name)))
return pretty_call(ctx, cls, **OrderedDict(kwargs))
def install():
register_pretty(predicate=is_instance_of_dataclass)(
pretty_dataclass_instance)
| 23.631579
| 71
| 0.678174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 135
| 0.150334
|
b2113a9f179d1a1302e99c7904123f0326d3e145
| 1,055
|
py
|
Python
|
bot.py
|
ctrezevant/GEFS-bot
|
9fdfbb87e33399051ef2287e629baae234800dcf
|
[
"MIT"
] | null | null | null |
bot.py
|
ctrezevant/GEFS-bot
|
9fdfbb87e33399051ef2287e629baae234800dcf
|
[
"MIT"
] | null | null | null |
bot.py
|
ctrezevant/GEFS-bot
|
9fdfbb87e33399051ef2287e629baae234800dcf
|
[
"MIT"
] | null | null | null |
"""
GEFS Chart Bot
Polls https://www.tropicaltidbits.com/storminfo/11L_gefs_latest.png, but it can
really be used to monitor/notify about changes to any file on the web.
(c) Charlton Trezevant 2017
MIT License
Enjoy!
"""
import time, sys
sys.dont_write_bytecode = True
sys.path.insert(0, 'lib')
from EtagMonitor import EtagMonitor
from slackclient import SlackClient
CHART_URL = 'https://www.tropicaltidbits.com/storminfo/11L_gefs_latest.png'
DB_PATH = 'etag.db'
SLACK_TOKEN = ' '
SLACK_CHANNEL = ' '
monitor = EtagMonitor(dbpath=DB_PATH, url=CHART_URL)
slack = SlackClient(SLACK_TOKEN)
if monitor.has_updated() is True:
curtime = time.strftime('%b %d, %Y at %H:%M')
nocache = "?nocache=" + time.strftime('%d%H%M')
msg_text = 'Updated GEFS Chart: ' + curtime + '\n(NOAA, Irma-GEFS)'
msg_attachments = [{"title": "GEFS Chart - Updated " + curtime, "image_url": CHART_URL + nocache}]
slack.api_call("chat.postMessage", channel=SLACK_CHANNEL,
text=msg_text, attachments=msg_attachments)
| 30.142857
| 102
| 0.700474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 470
| 0.445498
|
b2116bed98a7e670916911f64ee1ba8f859af9bb
| 6,442
|
py
|
Python
|
tests/test_periodbase.py
|
pierfra-ro/astrobase
|
b9f62c59a3ab9cdc1388d409fa281c26f1e6db6c
|
[
"MIT"
] | 45
|
2017-03-09T19:08:44.000Z
|
2022-03-24T00:36:28.000Z
|
tests/test_periodbase.py
|
pierfra-ro/astrobase
|
b9f62c59a3ab9cdc1388d409fa281c26f1e6db6c
|
[
"MIT"
] | 92
|
2016-12-21T19:01:20.000Z
|
2022-01-03T15:28:45.000Z
|
tests/test_periodbase.py
|
pierfra-ro/astrobase
|
b9f62c59a3ab9cdc1388d409fa281c26f1e6db6c
|
[
"MIT"
] | 20
|
2016-12-20T23:01:29.000Z
|
2021-03-07T16:24:15.000Z
|
'''test_periodbase.py - Waqas Bhatti (wbhatti@astro.princeton.edu) - Feb 2018
License: MIT - see the LICENSE file for details.
This tests the following:
- downloads a light curve from the github repository notebooks/nb-data dir
- reads the light curve using astrobase.hatlc
- runs the GLS, WIN, PDM, AoV, BLS, AoVMH, and ACF period finders on the LC
'''
from __future__ import print_function
import os
import os.path
try:
from urllib import urlretrieve
except Exception:
from urllib.request import urlretrieve
from numpy.testing import assert_allclose
from astrobase.hatsurveys import hatlc
from astrobase import periodbase
# separate testing for kbls and abls from now on
from astrobase.periodbase import kbls
from astrobase.periodbase import abls
try:
import transitleastsquares
from astrobase.periodbase import htls
htls_ok = True
except Exception:
htls_ok = False
############
## CONFIG ##
############
# this is the light curve used for tests
LCURL = ("https://github.com/waqasbhatti/astrobase-notebooks/raw/master/"
"nb-data/HAT-772-0554686-V0-DR0-hatlc.sqlite.gz")
# this function is used to check progress of the download
def on_download_chunk(transferred,blocksize,totalsize):
progress = transferred*blocksize/float(totalsize)*100.0
print('downloading test LC: {progress:.1f}%'.format(progress=progress),
end='\r')
# get the light curve if it's not there
modpath = os.path.abspath(__file__)
LCPATH = os.path.abspath(os.path.join(os.getcwd(),
'HAT-772-0554686-V0-DR0-hatlc.sqlite.gz'))
if not os.path.exists(LCPATH):
localf, headerr = urlretrieve(
LCURL,LCPATH,reporthook=on_download_chunk
)
###########
## TESTS ##
###########
def test_gls():
'''
Tests periodbase.pgen_lsp.
'''
lcd, msg = hatlc.read_and_filter_sqlitecurve(LCPATH)
gls = periodbase.pgen_lsp(lcd['rjd'], lcd['aep_000'], lcd['aie_000'])
assert isinstance(gls, dict)
assert_allclose(gls['bestperiod'], 1.54289477)
def test_win():
'''
Tests periodbase.specwindow_lsp
'''
lcd, msg = hatlc.read_and_filter_sqlitecurve(LCPATH)
win = periodbase.specwindow_lsp(lcd['rjd'], lcd['aep_000'], lcd['aie_000'])
assert isinstance(win, dict)
assert_allclose(win['bestperiod'], 592.0307682142864)
def test_pdm():
'''
Tests periodbase.stellingwerf_pdm.
'''
lcd, msg = hatlc.read_and_filter_sqlitecurve(LCPATH)
pdm = periodbase.stellingwerf_pdm(lcd['rjd'],
lcd['aep_000'],
lcd['aie_000'])
assert isinstance(pdm, dict)
assert_allclose(pdm['bestperiod'], 3.08578956)
def test_aov():
'''
Tests periodbase.aov_periodfind.
'''
lcd, msg = hatlc.read_and_filter_sqlitecurve(LCPATH)
aov = periodbase.aov_periodfind(lcd['rjd'],
lcd['aep_000'],
lcd['aie_000'])
assert isinstance(aov, dict)
assert_allclose(aov['bestperiod'], 3.08578956)
def test_aovhm():
'''
Tests periodbase.aov_periodfind.
'''
lcd, msg = hatlc.read_and_filter_sqlitecurve(LCPATH)
mav = periodbase.aovhm_periodfind(lcd['rjd'],
lcd['aep_000'],
lcd['aie_000'])
assert isinstance(mav, dict)
assert_allclose(mav['bestperiod'], 3.08578956)
def test_acf():
'''
Tests periodbase.macf_period_find.
'''
lcd, msg = hatlc.read_and_filter_sqlitecurve(LCPATH)
acf = periodbase.macf_period_find(lcd['rjd'],
lcd['aep_000'],
lcd['aie_000'],
smoothacf=721)
assert isinstance(acf, dict)
assert_allclose(acf['bestperiod'], 3.0750854011348565)
def test_kbls_serial():
'''
Tests periodbase.kbls.bls_serial_pfind.
'''
lcd, msg = hatlc.read_and_filter_sqlitecurve(LCPATH)
bls = kbls.bls_serial_pfind(lcd['rjd'],
lcd['aep_000'],
lcd['aie_000'],
startp=1.0)
assert isinstance(bls, dict)
assert_allclose(bls['bestperiod'], 3.08560655)
def test_kbls_parallel():
'''
Tests periodbase.kbls.bls_parallel_pfind.
'''
lcd, msg = hatlc.read_and_filter_sqlitecurve(LCPATH)
bls = kbls.bls_parallel_pfind(lcd['rjd'],
lcd['aep_000'],
lcd['aie_000'],
startp=1.0)
assert isinstance(bls, dict)
assert_allclose(bls['bestperiod'], 3.08560655)
def test_abls_serial():
'''
This tests periodbase.abls.bls_serial_pfind.
'''
EXPECTED_PERIOD = 3.0873018
lcd, msg = hatlc.read_and_filter_sqlitecurve(LCPATH)
bls = abls.bls_serial_pfind(lcd['rjd'],
lcd['aep_000'],
lcd['aie_000'],
startp=1.0,
ndurations=50)
assert isinstance(bls, dict)
assert_allclose(bls['bestperiod'], EXPECTED_PERIOD)
def test_abls_parallel():
'''
This tests periodbase.abls.bls_parallel_pfind.
'''
EXPECTED_PERIOD = 3.0848887
lcd, msg = hatlc.read_and_filter_sqlitecurve(LCPATH)
bls = abls.bls_parallel_pfind(lcd['rjd'],
lcd['aep_000'],
lcd['aie_000'],
startp=1.0,
ndurations=50)
assert isinstance(bls, dict)
assert_allclose(bls['bestperiod'], EXPECTED_PERIOD, atol=1.0e-4)
if htls_ok:
def test_tls_parallel():
'''
This tests periodbase.htls.tls_parallel_pfind.
'''
EXPECTED_PERIOD = 3.0848887
lcd, msg = hatlc.read_and_filter_sqlitecurve(LCPATH)
tlsdict = htls.tls_parallel_pfind(
lcd['rjd'],
lcd['aep_000'],
lcd['aie_000'],
startp=2.0,
endp=5.0
)
tlsresult = tlsdict['tlsresult']
assert isinstance(tlsresult, dict)
# ensure period is within 2 sigma of what's expected.
assert_allclose(tlsdict['bestperiod'], EXPECTED_PERIOD,
atol=2.0*tlsresult['period_uncertainty'])
| 26.510288
| 80
| 0.591276
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,871
| 0.290438
|
b211aa66d913dc22688021d4550c75de1f8811d6
| 2,877
|
py
|
Python
|
tests/sdict/test_sdict_substitutor.py
|
nikitanovosibirsk/district42-exp-types
|
e36e43da62f32d58d4b14c65afa16856dc8849e1
|
[
"Apache-2.0"
] | null | null | null |
tests/sdict/test_sdict_substitutor.py
|
nikitanovosibirsk/district42-exp-types
|
e36e43da62f32d58d4b14c65afa16856dc8849e1
|
[
"Apache-2.0"
] | 2
|
2021-08-01T05:02:21.000Z
|
2021-08-01T10:06:28.000Z
|
tests/sdict/test_sdict_substitutor.py
|
nikitanovosibirsk/district42-exp-types
|
e36e43da62f32d58d4b14c65afa16856dc8849e1
|
[
"Apache-2.0"
] | null | null | null |
from _pytest.python_api import raises
from baby_steps import given, then, when
from district42 import schema
from revolt import substitute
from revolt.errors import SubstitutionError
from district42_exp_types.sdict import schema_sdict
def test_sdict_substitution():
with given:
sch = schema_sdict
with when:
res = substitute(sch, {})
with then:
assert res == schema_sdict({})
assert res != sch
def test_sdict_nested_substitution():
with given:
sch = schema_sdict({
"result": schema_sdict({
"id": schema.int,
"name": schema.str,
"friend": schema_sdict({
"id": schema.int,
"name": schema.str
})
})
})
with when:
res = substitute(sch, {
"result.id": 1,
"result.name": "Bob",
"result.friend.id": 2,
"result.friend.name": "Alice",
})
with then:
assert res == schema_sdict({
"result": schema_sdict({
"id": schema.int(1),
"name": schema.str("Bob"),
"friend": schema_sdict({
"id": schema.int(2),
"name": schema.str("Alice")
})
})
})
assert res != sch
def test_sdict_relaxed_substitution():
with given:
sch = schema_sdict({
"result": schema_sdict({
"id": schema.int,
"name": schema.str,
...: ...
})
})
with when:
res = substitute(sch, {
"result.id": 1,
})
with then:
assert res == schema_sdict({
"result": schema_sdict({
"id": schema.int(1),
"name": schema.str,
...: ...
})
})
assert res != sch
def test_sdict_relaxed_extra_key_substitution_error():
with given:
sch = schema_sdict({
"result": schema_sdict({
"id": schema.int,
"name": schema.str,
...: ...
})
})
with when, raises(Exception) as exception:
substitute(sch, {
"result.id": 1,
"result.deleted_at": None
})
with then:
assert exception.type is SubstitutionError
def test_sdict_relaxed_ellipsis_substitution_error():
with given:
sch = schema_sdict({
"result": schema_sdict({
"id": schema.int,
"name": schema.str,
...: ...
})
})
with when, raises(Exception) as exception:
substitute(sch, {
"result.id": 1,
...: ...
})
with then:
assert exception.type is SubstitutionError
| 23.77686
| 54
| 0.468891
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 282
| 0.098019
|
b211f2e30f6de646dd73a75da8055e28f37f148d
| 1,735
|
py
|
Python
|
algo/problems/pascal_triangle.py
|
avi3tal/knowledgebase
|
fd30805aa94332a6c14c9d8631c7044673fb3e2c
|
[
"MIT"
] | null | null | null |
algo/problems/pascal_triangle.py
|
avi3tal/knowledgebase
|
fd30805aa94332a6c14c9d8631c7044673fb3e2c
|
[
"MIT"
] | null | null | null |
algo/problems/pascal_triangle.py
|
avi3tal/knowledgebase
|
fd30805aa94332a6c14c9d8631c7044673fb3e2c
|
[
"MIT"
] | 1
|
2021-11-19T13:45:59.000Z
|
2021-11-19T13:45:59.000Z
|
"""
Pascal's Triangle
1
11
121
1331
14641
Question:
Find the value in given row and column
First solution: brute force
Second solution: Dynamic programming
alternative
def pascal(r, c):
print(f"row: {r}, col: {c}")
if r == 0 or r == 1 or c == 0:
return 1
return pascal(r-1, c-1) + pascal(r-1, c)
res = pascal(4, 2)
print(res)
"""
from algo import dynamic_programming
def print_triangle(t):
for i in t:
print(", ".join(map(str, i)))
def build_triangle(row, triangle):
if len(triangle) < row:
last_row = len(triangle) - 1
new_row = []
for i in range(0, len(triangle[last_row]) + 1):
if i == 0:
new_row.append(1)
elif i >= len(triangle[last_row]):
new_row.append(1)
else:
new_value = triangle[last_row][i] + triangle[last_row][i-1]
new_row.append(new_value)
triangle.append(new_row)
build_triangle(row, triangle)
def find_value_brute_force(row, column):
if row <= 2:
return 1
triangle = [[1], [1, 1]]
build_triangle(row, triangle)
print_triangle(triangle)
return triangle[row - 1][column - 1]
@dynamic_programming
def find_value_dynamically(row, column):
if row <= 2:
return 1
row = row - 3
previous_row = [1, 1]
while row:
new_row = [1]
[new_row.append(previous_row[i-1] + previous_row[i]) for i in range(1, len(previous_row))]
new_row.append(1)
previous_row = new_row
row -= 1
return previous_row[column-2] + previous_row[column-1]
if __name__ == "__main__":
print(find_value_brute_force(50, 28))
print(find_value_dynamically(50, 28))
| 20.903614
| 98
| 0.595389
| 0
| 0
| 0
| 0
| 409
| 0.235735
| 0
| 0
| 372
| 0.214409
|
b212f83168a6342d8bcbdaa233860a911b7cdadb
| 1,117
|
py
|
Python
|
drf_ujson/parsers.py
|
radzhome/drf-ujson-renderer
|
b65c01edc5311404178a9d245d40ccc10733c5d7
|
[
"MIT"
] | null | null | null |
drf_ujson/parsers.py
|
radzhome/drf-ujson-renderer
|
b65c01edc5311404178a9d245d40ccc10733c5d7
|
[
"MIT"
] | null | null | null |
drf_ujson/parsers.py
|
radzhome/drf-ujson-renderer
|
b65c01edc5311404178a9d245d40ccc10733c5d7
|
[
"MIT"
] | 1
|
2019-04-04T13:25:22.000Z
|
2019-04-04T13:25:22.000Z
|
from __future__ import unicode_literals
import codecs
from django.conf import settings
from rest_framework.compat import six
from rest_framework.parsers import BaseParser, ParseError
from rest_framework import renderers
from rest_framework.settings import api_settings
import ujson
class UJSONParser(BaseParser):
"""
Parses JSON-serialized data.
"""
media_type = 'application/json'
renderer_class = renderers.JSONRenderer
strict = api_settings.STRICT_JSON
def parse(self, stream, media_type=None, parser_context=None):
"""
Parses the incoming bytestream as JSON and returns the resulting data.
"""
parser_context = parser_context or {}
encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
try:
decoded_stream = codecs.getreader(encoding)(stream)
parse_constant = ujson.strict_constant if self.strict else None
return ujson.load(decoded_stream, parse_constant=parse_constant)
except ValueError as exc:
raise ParseError('JSON parse error - %s' % six.text_type(exc))
| 32.852941
| 78
| 0.72068
| 830
| 0.743062
| 0
| 0
| 0
| 0
| 0
| 0
| 189
| 0.169203
|
b213326f9b1abfe3dfc2e0c0ee4f51afa2c00f6e
| 778
|
py
|
Python
|
Software_University/python_basics/exam_preparation/4_exam_prep/renovation.py
|
Ivanazzz/SoftUni-W3resource-Python
|
892321a290e22a91ff2ac2fef5316179a93f2f17
|
[
"MIT"
] | 1
|
2022-01-26T07:38:11.000Z
|
2022-01-26T07:38:11.000Z
|
Software_University/python_basics/exam_preparation/4_exam_prep/renovation.py
|
Ivanazzz/SoftUni-W3resource-Python
|
892321a290e22a91ff2ac2fef5316179a93f2f17
|
[
"MIT"
] | null | null | null |
Software_University/python_basics/exam_preparation/4_exam_prep/renovation.py
|
Ivanazzz/SoftUni-W3resource-Python
|
892321a290e22a91ff2ac2fef5316179a93f2f17
|
[
"MIT"
] | null | null | null |
from math import ceil
walls_hight = int(input())
walls_witdh = int(input())
percentage_walls_tottal_area_not_painted = int(input())
total_walls_area = walls_hight * walls_witdh * 4
quadratic_meters_left = total_walls_area - ceil(total_walls_area * percentage_walls_tottal_area_not_painted / 100)
while True:
paint_liters = input()
if paint_liters == "Tired!":
print(f"{quadratic_meters_left} quadratic m left.")
break
paint_liters = int(paint_liters)
quadratic_meters_left -= paint_liters
if quadratic_meters_left < 0:
print(f"All walls are painted and you have {abs(quadratic_meters_left)} l paint left!")
break
elif quadratic_meters_left == 0:
print("All walls are painted! Great job, Pesho!")
break
| 32.416667
| 114
| 0.717224
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 174
| 0.22365
|
b2142b5c3740f3c4fe33aa1ffaba044ed1c5379b
| 1,078
|
py
|
Python
|
Order_Managment/GEMINI_OM/Order.py
|
anubhavj880/Jack
|
932fc60923ccc17b5fd77bd13ee381b1c8ba0f15
|
[
"Apache-2.0"
] | null | null | null |
Order_Managment/GEMINI_OM/Order.py
|
anubhavj880/Jack
|
932fc60923ccc17b5fd77bd13ee381b1c8ba0f15
|
[
"Apache-2.0"
] | null | null | null |
Order_Managment/GEMINI_OM/Order.py
|
anubhavj880/Jack
|
932fc60923ccc17b5fd77bd13ee381b1c8ba0f15
|
[
"Apache-2.0"
] | null | null | null |
class Order():
def __init__(self, exchCode, sym_, _sym, orderType, price, side, qty, stopPrice=''):
self.odid = None
self.status = None
self.tempOdid = None
self.sym_ = sym_
self._sym = _sym
self.symbol = sym_ + _sym
self.exchCode = exchCode.upper()
self.orderType = orderType
self.price = price
self.fair = -1.0
self.side = side.upper()
self.sign = '+' if self.side == 'BUY' else '-' # for logging only
# self.order_type_id = None # Only for Coinigy
# self.price_type_id = None # Only for Coinigy
self.qty = qty
self.stop_price = stopPrice
self.orderExposure = -1.0
# timestamp
self.createTs = -1.0
self.activeTs = -1.0
self.cxlTs = -1.0
self.cxledTs = -1.0
self.filledTs = -1.0
# for pricing
self.eq = -1.0
# for order handling
self.nbFillQ = 0
self.nbMissingAck = 0
self.nbExtRej = 0
self.nbNone = 0
self.nbFalseActive = 0
| 31.705882
| 88
| 0.541744
| 1,077
| 0.999072
| 0
| 0
| 0
| 0
| 0
| 0
| 167
| 0.154917
|
b21485714fab66b89d8a0a3cada0bde14841a26b
| 19,069
|
py
|
Python
|
Commands.py
|
ehasting/psybot
|
8699f1ad8010bac5d2622486cb549898fc979036
|
[
"BSD-2-Clause"
] | null | null | null |
Commands.py
|
ehasting/psybot
|
8699f1ad8010bac5d2622486cb549898fc979036
|
[
"BSD-2-Clause"
] | null | null | null |
Commands.py
|
ehasting/psybot
|
8699f1ad8010bac5d2622486cb549898fc979036
|
[
"BSD-2-Clause"
] | null | null | null |
import datetime
import re
import os
import requests
import json
import uuid
import random
import calendar
import time
import libs.SerializableDict as SerializableDict
import libs.StorageObjects as StorageObjects
import libs.Models as Models
import libs.Loggiz as Loggiz
from pytz import timezone
import pytz
import telegram
import logging
'''
Copyright (c) 2016, Egil Hasting
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
'''
__author__ = "Egil Hasting"
__copyright__ = "Copyright 2016"
__credits__ = ["Egil Hasting"]
__license__ = "BSD"
__version__ = "1.0.0"
__maintainer__ = "Egil Hasting"
__email__ = "egil.hasting@higen.org"
__status__ = "Production"
class emoji(object):
def __init__(self):
self.used = list()
random.seed(calendar.timegm(time.gmtime()))
def get_randomanimal(self):
animals = [ telegram.Emoji.RAT,
telegram.Emoji.MOUSE,
telegram.Emoji.OX,
telegram.Emoji.WATER_BUFFALO,
telegram.Emoji.COW,
telegram.Emoji.TIGER,
telegram.Emoji.LEOPARD,
telegram.Emoji.RABBIT,
telegram.Emoji.CAT,
telegram.Emoji.DRAGON,
telegram.Emoji.CROCODILE,
telegram.Emoji.WHALE,
telegram.Emoji.RAM,
telegram.Emoji.GOAT,
telegram.Emoji.ROOSTER,
telegram.Emoji.DOG,
telegram.Emoji.PIG]
while True:
foundindex = random.randrange(1, len(animals)) - 1
if foundindex not in self.used:
self.used.append(foundindex)
break
if len(self.used) == len(animals):
self.used = list()
return animals[foundindex]
# self.db = dbobject
# self.uindex = dbobject.Get("userindex")
class GeneralMessageEvent(object):
def __init__(self):
self.dbobject = Models.StaticModels()
self.config = self.dbobject.Get("config")
def run(self, bot, update, args):
raise NotImplementedError()
@classmethod
def stripusername(self, username):
if username.startswith("@"):
# remove @
return username[1:]
else:
return username
class Null(GeneralMessageEvent):
def __init__(self):
GeneralMessageEvent.__init__(self)
def run(self, bot, update, args):
pass
class WebSearchDuckDuckGo(GeneralMessageEvent):
def __init__(self):
GeneralMessageEvent.__init__(self)
def _generate_url(self, searchstring):
searchstring = searchstring.replace(" ", "+")
print(searchstring)
return "http://api.duckduckgo.com/?q={}&format=json".format(searchstring)
def run(self, bot, update, args):
r = requests.get(self._generate_url(" ".join(args)))
if r.status_code == 200:
searchresult = r.json()
resultcount = len(searchresult["RelatedTopics"])
outputstring = "{} (found: {})\n".format(searchresult["Heading"], resultcount)
limitcounter = 0
for article in searchresult["RelatedTopics"]:
outputstring += article.get("Result", "") + "\n"
d = article.get("Result", "")
if d == "":
print(article)
limitcounter += 1
if limitcounter == 3:
break
bot.sendMessage(update.message.chat_id, text="{}".format(outputstring), parse_mode="HTML")
class Time(GeneralMessageEvent):
def __init__(self):
GeneralMessageEvent.__init__(self)
def run(self, bot, update, args):
localtime = datetime.datetime.now()
home = pytz.timezone("Europe/Oslo")
localtime = home.normalize(home.localize(localtime))
timezones = self.config.timezones.Get()
out = "<b>Current Time</b>\n"
out += "Norway: " + str(localtime.strftime('%X %x %Z')) + "\n"
for tz in timezones:
desc = tz[0]
zonename = tz[1]
currentzone = pytz.timezone(zonename)
currentlocaltime = localtime.astimezone(currentzone)
out += "{}: {}\n".format(desc, str(currentlocaltime.strftime('%X %x %Z')))
Loggiz.log.write.info(out)
bot.sendMessage(update.message.chat_id, text="{}".format(out), parse_mode="HTML")
class Configure(GeneralMessageEvent):
def __init__(self):
GeneralMessageEvent.__init__(self)
def addignoreword(self, word):
d = self.config.ignorewords.Get()
if word not in d:
d.append(word)
self.config.ignorewords.Set(d)
return True
return False
def delignoreword(self, word):
d = self.config.ignorewords.Get()
if word in d:
d.remove(word)
self.config.ignorewords.Set(d)
return True
return False
def addtimezone(self, desc, tzstring):
d = self.config.timezones.Get()
for tz in d:
if tz[0] == desc:
return False
d.append([desc, tzstring])
self.config.timezones.Set(d)
return True
def deltimezone(self, desc):
pass
def run(self, bot, update, args):
out = None
if len(args) == 0:
return
if update.message.from_user.username not in self.config.admins.Get() and update.message.from_user.username != "ehasting":
Loggiz.log.write.error("Non admin ({}) tried to configure the bot".format(update.message.from_user.username))
bot.sendMessage(update.message.chat_id, text="{}".format("you need backdoor access... no grid for you!!!!"), parse_mode="HTML")
return
if args[0] == "help":
out = "Available configuration: addignoreword, delignoreword, addtimezone"
elif args[0] == "addignoreword":
for word in args[1:]:
out = self.addignoreword(word)
Loggiz.log.write.info("{} = {}".format(word, out))
elif args[0] == "delignoreword":
for word in args[1:]:
out = self.delignoreword(word)
Loggiz.log.write.info("{} = {}".format(word, out))
elif args[0] == "addtimezone":
out = self.addtimezone(args[1], args[2])
if out is not None:
Loggiz.log.write.info(out)
bot.sendMessage(update.message.chat_id, text="{}".format(out), parse_mode="HTML")
class Stats(GeneralMessageEvent):
def __init__(self):
GeneralMessageEvent.__init__(self)
self.seen = self.dbobject.Get("seenlog")
self.uindex = self.dbobject.Get("userindex")
self.wordcounter = self.dbobject.Get("wordcounter")
def run(self, bot, update, args):
self.ignorewords = self.config.ignorewords.Get()
users = self.seen.usercounter.Get()
data = users.rawdict()
output_string = "<b>Most Active User Stats (by words):</b>\n"
place = 1
placeemoji = emoji()
for key, user in sorted(data, key=self.sort_by_word, reverse=True):
username = key
if username == "":
continue
Loggiz.log.write.info(user)
usercountobject = SerializableDict.UserObject(user)
useremoji = placeemoji.get_randomanimal()
output_string += "{} [{}] {}: {} (Lines: {})\n".format(useremoji, place, username, usercountobject.wordcounter, usercountobject.counter)
if telegram.Emoji.DRAGON == useremoji:
output_string += " - Entering the dragon......\n"
place += 1
output_string += "\n<b>Most used words:</b>\n"
words = self.wordcounter.words.Get()
cnt = 0
for key, value in sorted(words.rawdict(), key=self.sort_by_wordusage, reverse=True):
Loggiz.log.write.info(value)
currentword = SerializableDict.WordStats(value)
Loggiz.log.write.info(currentword.word)
if currentword.word in self.ignorewords:
continue
output_string += "{}: {} times\n".format(currentword.word, currentword.counter)
cnt += 1
if cnt > 4:
break
Loggiz.log.write.info(output_string)
bot.sendMessage(update.message.chat_id, text="{}".format(output_string), parse_mode="HTML")
def sort_by_wordusage(self, worddict):
d = SerializableDict.WordStats(worddict[1])
if not isinstance(d.counter, int):
return 0
return d.counter
def sort_by_word(self, userdict):
usercountobject = SerializableDict.UserObject(userdict[1])
if not isinstance(usercountobject.wordcounter, int):
return 1
Loggiz.log.write.info(usercountobject.wordcounter)
return usercountobject.wordcounter
class Help(GeneralMessageEvent):
def __init__(self):
GeneralMessageEvent.__init__(self)
def run(self, bot, update, args):
output_string = "<b>Available commands</b>\n"
output_string += commands
bot.sendMessage(update.message.chat_id, text="{}".format(output_string), parse_mode="HTML")
@classmethod
def sort_by_word(cls, userdict):
usercountobject = SerializableDict.UserObject(userdict)
if usercountobject.wordcounter == "":
return 0
return usercountobject.wordcounter
class AudioTips(GeneralMessageEvent):
def __init__(self):
GeneralMessageEvent.__init__(self)
self.tipdb = self.dbobject.Get("tipdb")
class Counter(GeneralMessageEvent):
def __init__(self):
GeneralMessageEvent.__init__(self)
self.seen = self.dbobject.Get("seenlog")
self.wordcounter = self.dbobject.Get("wordcounter")
def run(self, bot, update):
user = self.seen.usercounter.Get()
usercount = user.get(update.message.from_user.username)
usercountobject = SerializableDict.UserObject(usercount)
words = self.wordcounter.words.Get()
# Line counter
if usercountobject.counter == "":
usercountobject.counter = 1
else:
usercountobject.counter = usercountobject.counter + 1
# Word counter
currentwordcount = re.findall('\w+', update.message.text.lower())
ignorecharacterlist = [".", "!", "?", ",", ":", ";", "-", "_", "/"]
for word in currentwordcount:
#word = word.translate(None, ''.join(ignorecharacterlist))
current = words.get(word)
current = SerializableDict.WordStats(current)
if current.counter == "":
current.counter = 0
current.word = word
current.counter = int(current.counter) + 1
Loggiz.log.write.info("{}: {}".format(current.word, current.counter))
words.set(word, current.SaveObject())
self.wordcounter.words.Set(words)
print("Words: {}".format(len(currentwordcount)))
if usercountobject.wordcounter == "":
usercountobject.wordcounter = len(currentwordcount)
else:
usercountobject.wordcounter = usercountobject.wordcounter + len(currentwordcount)
# Last seen
usercountobject.timestamp = str(datetime.datetime.now().replace(microsecond=0))
# Metadata
usercountobject.firstname = update.message.from_user.first_name
usercountobject.lastname = update.message.from_user.last_name
usercountobject.username = update.message.from_user.username
# Store object to dictionary and back to DB
user.set(update.message.from_user.username, usercountobject.SaveObject())
self.seen.usercounter.Set(user)
class Seen(GeneralMessageEvent):
def __init__(self):
GeneralMessageEvent.__init__(self)
self.seendb = self.dbobject.Get("seenlog")
def run(self, bot, update, args):
Loggiz.log.write.info("Gettings Stats")
user = self.seendb.usercounter.Get()
if len(args) > 0:
Loggiz.log.write.info("finding user {}".format(args[0]))
username = self.stripusername(args[0])
fetchseenuser = user.get(username)
userseenobject = SerializableDict.UserObject(fetchseenuser)
Loggiz.log.write.info(userseenobject.timestamp)
if userseenobject.timestamp != "":
bot.sendMessage(update.message.chat_id, text="hey! {} was last seen {} (lines/words: {}/{})".format(username, userseenobject.timestamp, userseenobject.counter, userseenobject.wordcounter))
else:
Loggiz.log.write.warn("Did not find any user info!")
else:
bot.sendMessage(update.message.chat_id, text="{} U ale wlong!! do like this!! command @<username>".format(telegram.Emoji.PILE_OF_POO))
class QuoteBase(GeneralMessageEvent):
def __init__(self):
GeneralMessageEvent.__init__(self)
self.uindex = self.dbobject.Get("userindex")
class AddQuote(QuoteBase):
def __init__(self):
QuoteBase.__init__(self)
def run(self, bot, update, args):
new_quote_index = str(uuid.uuid4())
if len(args) < 2:
Loggiz.log.write.info("Argument length was {}".format(len(args)))
bot.sendMessage(update.message.chat_id, text='[USAGE] <username> <quote>')
else:
username = self.stripusername(args[0])
if username not in self.uindex.index.Get():
tmplist = self.uindex.index.Get()
tmplist.append(username)
self.uindex.index.Set(tmplist)
Loggiz.log.write.info("user/nick added to index")
thequote = " ".join(args[1:])
if isinstance(thequote, unicode):
quotetext = StorageObjects.ComnodeObject("quotestext.{}".format(new_quote_index), "unicode", desc="", hidden=False)
else:
quotetext = StorageObjects.ComnodeObject("quotestext.{}".format(new_quote_index), "str", desc="", hidden=False)
quotetext.Set(thequote)
quotemetausername = StorageObjects.ComnodeObject("quotemap.{}".format(username), "list", desc="", hidden=False)
qmun = quotemetausername.Get()
qmun.append(new_quote_index)
quotemetausername.Set(qmun)
bot.sendMessage(update.message.chat_id, text="Quote from {} added with id {}\n#quote\n/addquote {} {}".format(username, new_quote_index, username, thequote))
class Quote(QuoteBase):
def __init__(self):
QuoteBase.__init__(self)
self.taken = list()
random.seed(calendar.timegm(time.gmtime()))
def get_quote(self, username):
username = username.replace("<", "")
username = username.replace(">", "")
quotemetausername = StorageObjects.ComnodeObject("quotemap.{}".format(username), "list", desc="", hidden=False)
qmun = quotemetausername.Get()
if len(qmun) > 0:
foundindex = random.randrange(0, len(qmun))
Loggiz.log.write.info("found: {}, total: {}".format(foundindex, len(qmun)))
if len(qmun) == foundindex:
foundindex = foundindex - 1
if qmun[foundindex] in self.taken:
Loggiz.log.write.info("{} is taken".format(qmun[foundindex]))
return "TAKEN"
else:
quotetext = StorageObjects.ComnodeObject("quotestext.{}".format(qmun[foundindex]), "str", desc="", hidden=False)
self.taken.append(qmun[foundindex])
if quotetext.Get() == "":
return "TAKEN"
quoteoutput = quotetext.Get()
quoteoutput = quoteoutput.replace("<", "")
quoteoutput = quoteoutput.replace(">", "")
return "<i>{}</i>: {}".format(username, quoteoutput)
else:
return None
def findrandomuser(self):
userindexlength = len(self.uindex.index.Get())
if userindexlength == 0:
return
luckyuser = random.randrange(0, userindexlength)
if len(self.uindex.index.Get()) == luckyuser:
luckyuser = luckyuser - 1
return self.uindex.index.Get()[luckyuser]
def run(self, bot, update, args):
emojiz = emoji()
iterationcount = 0
if len(args) == 1:
nums = int(args[0])
if nums > 10:
nums = 10
quoteoutput = "<b>(almost) {} random Quotes</b>\n".format(nums)
Loggiz.log.write.info("Args {} converted to {}".format(str(args), nums))
while True:
if iterationcount > (nums * 20):
Loggiz.log.write.warn("Retry exhausted")
break
randomuser = self.findrandomuser()
currentquote = self.get_quote(randomuser)
if currentquote == "TAKEN":
Loggiz.log.write.info("Quote Taken or blank")
iterationcount += 1
continue
elif currentquote is None:
Loggiz.log.write.info("Quote on {} not found".format(randomuser))
iterationcount += 1
continue
quoteoutput += "{} {}\n".format(emojiz.get_randomanimal(), currentquote)
if len(self.taken) >= nums:
break
else:
quoteoutput = self.get_quote(self.findrandomuser())
if quoteoutput is not None:
Loggiz.log.write.info(str(self.taken))
Loggiz.log.write.info(quoteoutput)
bot.sendMessage(update.message.chat_id, text=quoteoutput, parse_mode="HTML")
self.taken = list()
if __name__ == '__main__':
pass
| 39.317526
| 204
| 0.608579
| 17,043
| 0.893754
| 0
| 0
| 408
| 0.021396
| 0
| 0
| 3,147
| 0.165032
|
b218434a962715f2504f0272b199565a159dcf7b
| 115
|
py
|
Python
|
aim/pytorch.py
|
avkudr/aim
|
5961f31d358929287986ace09c73310886a94704
|
[
"Apache-2.0"
] | 2,195
|
2020-01-23T03:08:11.000Z
|
2022-03-31T14:32:19.000Z
|
aim/pytorch.py
|
deepanprabhu/aim
|
c00d8ec7bb2d9fd230a9430b516ca90cdb8072cb
|
[
"Apache-2.0"
] | 696
|
2020-02-08T21:55:45.000Z
|
2022-03-31T16:52:22.000Z
|
aim/pytorch.py
|
deepanprabhu/aim
|
c00d8ec7bb2d9fd230a9430b516ca90cdb8072cb
|
[
"Apache-2.0"
] | 150
|
2020-03-27T10:44:25.000Z
|
2022-03-21T21:29:41.000Z
|
# Alias to SDK PyTorch utils
from aim.sdk.adapters.pytorch import track_params_dists, track_gradients_dists # noqa
| 38.333333
| 85
| 0.834783
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 34
| 0.295652
|
b21881d06efcd08194a38d1b8b2a7efa72fa56b5
| 890
|
py
|
Python
|
src/tools/checkDeckByUrl.py
|
kentokura/xenoparts
|
d861ca474accdf1ec7bcf6afcac6be9246cf4c85
|
[
"MIT"
] | null | null | null |
src/tools/checkDeckByUrl.py
|
kentokura/xenoparts
|
d861ca474accdf1ec7bcf6afcac6be9246cf4c85
|
[
"MIT"
] | null | null | null |
src/tools/checkDeckByUrl.py
|
kentokura/xenoparts
|
d861ca474accdf1ec7bcf6afcac6be9246cf4c85
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# Your code here!
import csv
def encode_cardd_by_url(url: str) -> dict:
"""
入力:デッキURL
出力:{ card_id, num }
処理:
URLから、カードidごとの枚数の辞書を作成する
"""
site_url, card_url = url.split("c=")
card_url, key_card_url = card_url.split("&")
arr_card_id = card_url.split(".")
deck = { card_id: arr_card_id.count(card_id) for card_id in arr_card_id }
return deck
# 処理はここから
deck = encode_cardd_by_url(input())
card_details = []
# csvを開く, card_dbはwithを抜けると自動で閉じる
with open('../db/dmps_card_db.csv') as card_db:
reader = csv.reader(f)
for row in reader:
for card_id, num in deck.items():
# keyが存在する行をとってくる
card_details.append(row.split(","))
# card_details.append(csv(exist key line).split(","))
# 結果出力
for card_detail in card_details:
print(card_detail)
| 22.25
| 77
| 0.61573
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 409
| 0.400196
|
b21bfec88e0dfd45846324420361a10ba1865cb9
| 193
|
py
|
Python
|
kleeneup/__init__.py
|
caiopo/kleeneup
|
0050054853ac7a3a2e40d492cc5fe741ef737191
|
[
"MIT"
] | null | null | null |
kleeneup/__init__.py
|
caiopo/kleeneup
|
0050054853ac7a3a2e40d492cc5fe741ef737191
|
[
"MIT"
] | null | null | null |
kleeneup/__init__.py
|
caiopo/kleeneup
|
0050054853ac7a3a2e40d492cc5fe741ef737191
|
[
"MIT"
] | 1
|
2018-10-10T00:59:54.000Z
|
2018-10-10T00:59:54.000Z
|
from .regular_grammar import RegularGrammar
from .finite_automaton import FiniteAutomaton, State, Symbol, Sentence
from .regular_expression import RegularExpression, StitchedBinaryTree, Lambda
| 48.25
| 77
| 0.870466
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b21c7a1509c7bfd68dcac48270c795470f743c73
| 89
|
py
|
Python
|
recipe_backend/recipes/apps.py
|
jbernal0019/Recipe_site
|
30090b521cac84156cf5f05429a12dd5889f8703
|
[
"MIT"
] | null | null | null |
recipe_backend/recipes/apps.py
|
jbernal0019/Recipe_site
|
30090b521cac84156cf5f05429a12dd5889f8703
|
[
"MIT"
] | 3
|
2020-02-12T01:22:24.000Z
|
2021-06-10T21:49:21.000Z
|
recipe_backend/recipes/apps.py
|
jbernal0019/Recipe_site
|
30090b521cac84156cf5f05429a12dd5889f8703
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class PluginsConfig(AppConfig):
name = 'recipes'
| 14.833333
| 33
| 0.752809
| 52
| 0.58427
| 0
| 0
| 0
| 0
| 0
| 0
| 9
| 0.101124
|
b21e2321bf77cc16cff7c91db7f72ea88ee39b5b
| 1,337
|
py
|
Python
|
mass_circular_weighing/constants.py
|
MSLNZ/Mass-Circular-Weighing
|
f144158b9e2337d7e9446326d6927e1dd606ed38
|
[
"MIT"
] | 1
|
2020-02-19T09:10:43.000Z
|
2020-02-19T09:10:43.000Z
|
mass_circular_weighing/constants.py
|
MSLNZ/Mass-Circular-Weighing
|
f144158b9e2337d7e9446326d6927e1dd606ed38
|
[
"MIT"
] | null | null | null |
mass_circular_weighing/constants.py
|
MSLNZ/Mass-Circular-Weighing
|
f144158b9e2337d7e9446326d6927e1dd606ed38
|
[
"MIT"
] | null | null | null |
"""
A repository for constants and symbols used in the mass weighing program
Modify default folder paths as necessary
"""
import os
MU_STR = 'µ' # ALT+0181 or 'µ'. use 'u' if running into issues
SIGMA_STR = 'σ' # \u03C3 for sigma sign
DELTA_STR = 'Δ' # \u0394 for capital delta sign
SQUARED_STR = '²'
SUFFIX = {'ng': 1e-9, 'µg': 1e-6, 'ug': 1e-6, 'mg': 1e-3, 'g': 1, 'kg': 1e3}
DEGREE_SIGN = '°' # \xb0
IN_DEGREES_C = ' ('+DEGREE_SIGN+'C)'
NBC = True #
REL_UNC = 0.03 # relative uncertainty in ppm for no buoyancy correction: typically 0.03 or 0.1
local_backup = r'C:\CircularWeighingData'
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
admin_default = os.path.join(ROOT_DIR, r'utils\default_admin.xlsx')
config_default = os.path.join(ROOT_DIR, r'utils\default_config.xml')
save_folder_default = r'G:\My Drive'
commercial21_folder = r'I:\MSL\Private\Mass\Commercial Calibrations\2021'
mass_folder = r'I:\MSL\Private\Mass'
mydrive = r'G:\My Drive'
job_default = "J00000"
client_default = "Client"
client_wt_IDs_default = '1 2 5 10 20 50 100 200 500 1000 2000 5000 10000'.split()
MAX_BAD_RUNS = 6 # limit for aborting circular weighing due to multiple bad runs
FONTSIZE = 32 # size of text in large pop-ups
| 36.135135
| 107
| 0.658938
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 703
| 0.523065
|
b21e3496e5fd13e41a572208964a13c7cf7ed7c2
| 3,032
|
py
|
Python
|
UsbVibrationDevice.py
|
Suitceyes-Project-Code/Vibration-Pattern-Player
|
44d8bac61eed0ee7712eb0299d0d7029f688fe24
|
[
"MIT"
] | null | null | null |
UsbVibrationDevice.py
|
Suitceyes-Project-Code/Vibration-Pattern-Player
|
44d8bac61eed0ee7712eb0299d0d7029f688fe24
|
[
"MIT"
] | null | null | null |
UsbVibrationDevice.py
|
Suitceyes-Project-Code/Vibration-Pattern-Player
|
44d8bac61eed0ee7712eb0299d0d7029f688fe24
|
[
"MIT"
] | 1
|
2021-10-04T14:26:49.000Z
|
2021-10-04T14:26:49.000Z
|
import PyCmdMessenger
from VestDeviceBase import VestDevice
class UsbVestDevice(VestDevice):
"""
Basic interface for sending commands to the vest using a
serial port connection.
"""
commands = [["PinSet","gg"],
["PinMute","g"],
["GloveSet","gg*"],
["GloveMute",""],
["FreqSet","g"],
["PinGet","g"],
["FreqGet",""],
["PinState","gg"],
["FreqState","g"],
["StringMsg","s"],
["DebugSet","g"],
["SetMotor", "gg"],
["SetMotorSpeed", "g"]]
def __init__(self, device):
"""
Creates a new instance of Vest.
Inputs:
device:
The path to the device, e.g.: "/dev/ttyACM0" or "COM3"
"""
self._board = PyCmdMessenger.ArduinoBoard(device, baud_rate=115200)
self._connection = PyCmdMessenger.CmdMessenger(self._board, UsbVestDevice.commands, warnings=False)
def __enter__(self):
self.set_frequency(0)
return self
def __exit__(self, type, value, traceback):
# Make sure the vest is muted and that the connection is closed.
self.mute()
self._board.close()
def set_pin(self,pin,value):
"""
Sets a pin to a given value. This sets the vibration intensity of a given pin.
Inputs:
pin:
The pin index whose value should be set. This should be a byte value.
value:
A byte value (0-255) representing the vibration intensity. 0 is no vibration, 255
is the max intensity.
"""
self._connection.send("PinSet",pin,value)
def mute_pin(self,pin):
"""
Sets the vibration intensity for a given pin to 0.
Inputs:
pin: The pin which will be muted.
"""
self._connection.send("PinMute", pin)
def mute(self):
"""
Mutes all pins on the vest.
"""
self._connection.send("GloveMute")
def set_frequency(self,frequency):
"""
Sets the frequency of the entire vest.
Inputs:
frequency: The frequency in milliseconds.
"""
self._connection.send("FreqSet", frequency)
def set_vest(self, pin_value_dict, frequency):
values = []
for key in pin_value_dict:
values.append(key)
values.append(pin_value_dict[key])
values.append(frequency)
self._connection.send("GloveSet", *values)
def get_pin(self,pin):
"""
Gets the vibration intensity for a given pin.
Inputs:
pin: The pin index whose intensity should be fetched.
"""
self._connection.send("PinGet", pin)
return self._connection.receive()
def set_pins_batched(self, values = dict):
for pin in values:
self.set_pin(pin, values[pin])
| 30.938776
| 115
| 0.539248
| 2,971
| 0.979881
| 0
| 0
| 0
| 0
| 0
| 0
| 1,409
| 0.46471
|
b2207477c30bc92b4836e3b6d2c7c4f40fd9d5d3
| 923
|
py
|
Python
|
webapi/tests/test_models.py
|
c2masamichi/webapp-example-python-django
|
f0771526623bf5d1021ad1c5c8baf480fb285190
|
[
"MIT"
] | null | null | null |
webapi/tests/test_models.py
|
c2masamichi/webapp-example-python-django
|
f0771526623bf5d1021ad1c5c8baf480fb285190
|
[
"MIT"
] | 4
|
2021-03-21T10:43:05.000Z
|
2022-02-10T12:46:20.000Z
|
webapi/tests/test_models.py
|
c2masamichi/webapp-example-python-django
|
f0771526623bf5d1021ad1c5c8baf480fb285190
|
[
"MIT"
] | null | null | null |
from django.core.exceptions import ValidationError
import pytest
from api.models import Product
@pytest.mark.django_db
@pytest.mark.parametrize(
('name', 'price'),
(
('aa', 1000),
('a' * 21, 1000),
('house', 1000000001),
('minus', -1),
('A 01 %', 100),
),
)
def test_create_validate(name, price):
with pytest.raises(ValidationError):
product = Product(name=name, price=price)
product.full_clean()
@pytest.mark.django_db
@pytest.mark.parametrize(
('name', 'price'),
(
('aa', 1000),
('a' * 21, 1000),
('house', 1000000001),
('minus', -1),
('A 01 %', 100),
),
)
def test_update_validate(name, price):
product_id = 2
product = Product.objects.get(pk=product_id)
with pytest.raises(ValidationError):
product.name = name
product.price = price
product.full_clean()
| 21.97619
| 50
| 0.576381
| 0
| 0
| 0
| 0
| 820
| 0.888407
| 0
| 0
| 84
| 0.091008
|
b222b2832f0113a6843a7ce7ec02f0e981a7b9ca
| 12,099
|
py
|
Python
|
tests/app/main/views/test_users.py
|
AusDTO/dto-digitalmarketplace-admin-frontend
|
1858a653623999d81bb4fa3e51f7cb4df4b83079
|
[
"MIT"
] | 1
|
2018-01-04T18:10:28.000Z
|
2018-01-04T18:10:28.000Z
|
tests/app/main/views/test_users.py
|
AusDTO/dto-digitalmarketplace-admin-frontend
|
1858a653623999d81bb4fa3e51f7cb4df4b83079
|
[
"MIT"
] | 5
|
2016-12-12T04:58:12.000Z
|
2019-02-05T21:19:38.000Z
|
tests/app/main/views/test_users.py
|
AusDTO/dto-digitalmarketplace-admin-frontend
|
1858a653623999d81bb4fa3e51f7cb4df4b83079
|
[
"MIT"
] | 3
|
2017-06-19T07:51:38.000Z
|
2021-01-12T12:30:22.000Z
|
import mock
import pytest
import copy
import six
from lxml import html
from ...helpers import LoggedInApplicationTest
from dmapiclient import HTTPError
@mock.patch('app.main.views.users._user_info')
@mock.patch('app.main.views.users.data_api_client')
class TestUsersView(LoggedInApplicationTest):
def test_should_be_a_404_if_user_not_found(self, data_api_client, _user_info):
data_api_client.get_user.return_value = None
response = self.client.get('/admin/users?email_address=some@email.com')
self.assertEqual(response.status_code, 404)
document = html.fromstring(response.get_data(as_text=True))
page_title = document.xpath(
'//p[@class="banner-message"]//text()')[0].strip()
self.assertEqual("Sorry, we couldn't find an account with that email address", page_title)
def test_should_be_a_404_if_no_email_provided(self, data_api_client, _user_info):
data_api_client.get_user.return_value = None
response = self.client.get('/admin/users?email_address=')
self.assertEqual(response.status_code, 404)
document = html.fromstring(response.get_data(as_text=True))
page_title = document.xpath(
'//p[@class="banner-message"]//text()')[0].strip()
self.assertEqual("Sorry, we couldn't find an account with that email address", page_title)
def test_should_be_a_404_if_no_email_param_provided(self, data_api_client, _user_info):
data_api_client.get_user.return_value = None
response = self.client.get('/admin/users')
self.assertEqual(response.status_code, 404)
document = html.fromstring(response.get_data(as_text=True))
page_title = document.xpath(
'//p[@class="banner-message"]//text()')[0].strip()
self.assertEqual("Sorry, we couldn't find an account with that email address", page_title)
def test_should_show_buyer_user(self, data_api_client, _user_info):
buyer = self.load_example_listing("user_response")
buyer.pop('supplier', None)
buyer['users']['role'] = 'buyer'
data_api_client.get_user.return_value = buyer
_user_info.return_value = (None, None)
response = self.client.get('/admin/users?email_address=test.user@sme.com')
self.assertEqual(response.status_code, 200)
document = html.fromstring(response.get_data(as_text=True))
email_address = document.xpath(
'//header[@class="page-heading page-heading-without-breadcrumb"]//h1/text()')[0].strip()
self.assertEqual("test.user@sme.com", email_address)
name = document.xpath(
'//tr[@class="summary-item-row"]//td/span/text()')[0].strip()
self.assertEqual("Test User", name)
role = document.xpath(
'//tr[@class="summary-item-row"]//td/span/text()')[1].strip()
self.assertEqual("buyer", role)
supplier = document.xpath(
'//tr[@class="summary-item-row"]//td/span/text()')[2].strip()
self.assertEqual('', supplier)
last_login = document.xpath(
'//tr[@class="summary-item-row"]//td/span/text()')[3].strip()
self.assertEqual('19:33 23-07-2015', last_login)
last_password_changed = document.xpath(
'//tr[@class="summary-item-row"]//td/span/text()')[4].strip()
self.assertEqual('22:46 29-06-2015', last_password_changed)
locked = document.xpath(
'//tr[@class="summary-item-row"]//td/span/text()')[5].strip()
self.assertEqual('No', locked)
button = document.xpath(
'//input[@class="button-destructive"]')[1].value
self.assertEqual('Deactivate', button)
def test_should_show_supplier_user(self, data_api_client, _user_info):
buyer = self.load_example_listing("user_response")
_user_info.return_value = (None, None)
data_api_client.get_user.return_value = buyer
response = self.client.get('/admin/users?email_address=test.user@sme.com')
self.assertEqual(response.status_code, 200)
document = html.fromstring(response.get_data(as_text=True))
email_address = document.xpath(
'//header[@class="page-heading page-heading-without-breadcrumb"]//h1/text()')[0].strip()
self.assertEqual("test.user@sme.com", email_address)
role = document.xpath(
'//tr[@class="summary-item-row"]//td/span/text()')[1].strip()
self.assertEqual("supplier", role)
supplier = document.xpath(
'//tr[@class="summary-item-row"]//td/span/a/text()')[0].strip()
self.assertEqual('SME Corp UK Limited', supplier)
supplier_link = document.xpath(
'//tr[@class="summary-item-row"]//td/span/a')[0]
self.assertEqual('/admin/suppliers?supplier_code=1000', supplier_link.attrib['href'])
def test_should_show_unlock_button(self, data_api_client, _user_info):
buyer = self.load_example_listing("user_response")
buyer['users']['locked'] = True
data_api_client.get_user.return_value = buyer
_user_info.return_value = (None, None)
response = self.client.get('/admin/users?email_address=test.user@sme.com')
self.assertEqual(response.status_code, 200)
document = html.fromstring(response.get_data(as_text=True))
unlock_button = document.xpath(
'//input[@class="button-secondary"]')[0].attrib['value']
unlock_link = document.xpath(
'//tr[@class="summary-item-row"]//td/span/form')[0]
return_link = document.xpath(
'//tr[@class="summary-item-row"]//td/span/form/input')[1]
self.assertEqual('/admin/suppliers/users/999/unlock', unlock_link.attrib['action'])
self.assertEqual('Unlock', unlock_button)
self.assertEqual('/admin/users?email_address=test.user%40sme.com', return_link.attrib['value'])
@pytest.mark.skip
def test_should_show_password_reset(self, data_api_client, _user_info):
buyer = self.load_example_listing("user_response")
data_api_client.get_user.return_value = buyer
_user_info.return_value = (None, None)
response = self.client.get('/admin/users?email_address=test.user@sme.com')
self.assertEqual(response.status_code, 200)
document = html.fromstring(response.get_data(as_text=True))
reset_link = document.xpath(
'//tr[@class="summary-item-row"]//a[text()="Reset Password"]')[0]
self.assertEqual('/admin/suppliers/users/999/reset_password', reset_link.attrib['href'])
def test_should_show_deactivate_button(self, data_api_client, _user_info):
buyer = self.load_example_listing("user_response")
data_api_client.get_user.return_value = buyer
_user_info.return_value = (None, None)
response = self.client.get('/admin/users?email_address=test.user@sme.com')
self.assertEqual(response.status_code, 200)
document = html.fromstring(response.get_data(as_text=True))
deactivate_button = document.xpath(
'//input[@class="button-destructive"]')[1].attrib['value']
deactivate_link = document.xpath(
'//tr[@class="summary-item-row"]//td/span/form')[1]
return_link = document.xpath(
'//tr[@class="summary-item-row"]//td/span/form/input')[3]
self.assertEqual('/admin/suppliers/users/999/deactivate', deactivate_link.attrib['action'])
self.assertEqual('Deactivate', deactivate_button)
self.assertEqual('/admin/users?email_address=test.user%40sme.com', return_link.attrib['value'])
@mock.patch('app.main.views.users.data_api_client')
class TestUsersExport(LoggedInApplicationTest):
_bad_statuses = ['coming', 'expired']
_valid_framework = {
'name': 'G-Cloud 7',
'slug': 'g-cloud-7',
'status': 'live'
}
_invalid_framework = {
'name': 'G-Cloud 8',
'slug': 'g-cloud-8',
'status': 'coming'
}
def _return_get_user_export_response(self, data_api_client, frameworks):
data_api_client.find_frameworks.return_value = {"frameworks": frameworks}
return self.client.get('/admin/users/download')
def _assert_things_about_frameworks(self, response, frameworks):
def _assert_things_about_valid_frameworks(options, frameworks):
valid_frameworks = [
framework for framework in frameworks if framework['status'] not in self._bad_statuses]
assert len(frameworks) == len(valid_frameworks)
def _assert_things_about_invalid_frameworks(options, frameworks):
invalid_frameworks = [
framework for framework in frameworks if framework['status'] in self._bad_statuses]
for framework in invalid_frameworks:
assert framework['slug'] not in [option.xpath('input')[0].attrib['value'] for option in options]
assert framework['name'] not in ["".join(option.xpath('text()')).strip() for option in options]
document = html.fromstring(response.get_data(as_text=True))
options = document.xpath(
'//fieldset[@id="framework_slug"]/label')
assert response.status_code == 200
_assert_things_about_valid_frameworks(options, frameworks)
_assert_things_about_invalid_frameworks(options, frameworks)
def _return_user_export_response(self, data_api_client, framework, users, framework_slug=None):
if framework_slug is None:
framework_slug = framework['slug']
# collection of users is modified in the route
data_api_client.export_users.return_value = {"users": copy.copy(users)}
data_api_client.find_frameworks.return_value = {"frameworks": [framework]}
if framework_slug == framework['slug']:
data_api_client.get_framework.return_value = {"frameworks": framework}
else:
data_api_client.get_framework.side_effect = HTTPError(mock.Mock(status_code=404))
return self.client.get(
'/admin/users/download/<_valid_framework',
data={'framework_slug': framework_slug}
)
def _assert_things_about_user_export(self, response, users):
rows = [line.split(",") for line in response.get_data(as_text=True).splitlines()]
assert len(rows) == len(users) + 1
if users:
assert sorted(list(users[0].keys())) == sorted(rows[0])
for index, user in enumerate(users):
assert sorted([six.text_type(val) for val in list(user.values())]) == sorted(rows[index+1])
def test_get_form_with_valid_framework(self, data_api_client):
frameworks = [self._valid_framework]
response = self._return_get_user_export_response(data_api_client, frameworks)
assert response.status_code == 200
self._assert_things_about_frameworks(response, frameworks)
def test_user_export_with_one_user(self, data_api_client):
framework = self._valid_framework
users = [{
"application_result": "fail",
"application_status": "no_application",
"declaration_status": "unstarted",
"framework_agreement": False,
"supplier_id": 1,
"user_email": "test.user@sme.com",
"user_name": "Tess User"
}]
response = self._return_user_export_response(data_api_client, framework, users)
assert response.status_code == 200
def test_download_csv(self, data_api_client):
framework = self._valid_framework
users = [{
"application_result": "fail",
"application_status": "no_application",
"declaration_status": "unstarted",
"framework_agreement": False,
"supplier_id": 1,
"user_email": "test.user@sme.com",
"user_name": "Tess User"
}]
response = self._return_user_export_response(data_api_client, framework, users)
assert response.status_code == 200
self._assert_things_about_user_export(response, users)
| 42.452632
| 112
| 0.659889
| 11,788
| 0.974295
| 0
| 0
| 11,939
| 0.986776
| 0
| 0
| 3,020
| 0.249607
|
b223d904c6830f2000cc2bff850aed8bde569ecc
| 3,460
|
py
|
Python
|
code/makestellar.py
|
gitter-badger/DHOD
|
f2f084fea6c299f95d15cbea5ec94d404bc946b5
|
[
"MIT"
] | null | null | null |
code/makestellar.py
|
gitter-badger/DHOD
|
f2f084fea6c299f95d15cbea5ec94d404bc946b5
|
[
"MIT"
] | null | null | null |
code/makestellar.py
|
gitter-badger/DHOD
|
f2f084fea6c299f95d15cbea5ec94d404bc946b5
|
[
"MIT"
] | null | null | null |
import numpy as np
import sys, os
from scipy.optimize import minimize
import json
import matplotlib.pyplot as plt
#
sys.path.append('./utils')
import tools
#
bs, ncf, stepf = 400, 512, 40
path = '../data/z00/'
ftype = 'L%04d_N%04d_S%04d_%02dstep/'
ftypefpm = 'L%04d_N%04d_S%04d_%02dstep_fpm/'
mm = np.load('../data/Illustris_halo_groupmass.npy').T
mh = mm[1]*1e10
ms = mm[2]*1e10
def getstellar(mbins):
scount, smass, lsstd = np.zeros_like(mbins), np.zeros_like(mbins), np.zeros_like(mbins)
hmass = np.zeros_like(mbins)
for i in range(mbins.size-1):
if i == mbins.size-1: mask = (mm[1]*1e10 > mbins[i])
else: mask = (mm[1]*1e10 > mbins[i]) & (mm[1]*1e10<mbins[i+1])
scount[i] = mask.sum()
smass[i] = mm[2][mask].mean()*1e10
#sstd[i] = mm[2][mask].std()*1e10
lsstd[i] = np.log(mm[2][mask]*1e10).std()
hmass[i] = mm[1][mask].mean()*1e10
return scount, smass, lsstd, hmass
def fitstellar(p, smass, hmass, rety=False):
p0, p1 = p
yy = p1*np.log(hmass)+p0
if rety: return np.exp(yy)
return sum((np.log(smass[:-1]) - yy[:-1])**2)
def fitscatter(p, hmass, rstd, rety=False):
p0, p1, p2 = p
xx = np.log(hmass)
yy = p0 + p1*xx + p2*xx**2
if rety: return yy
return sum((yy[:-1] - rstd[:-1])**2)
def dofit():
mbins = 10**np.arange(12, 14, 0.1)
scount, smass, lsstd, hmass = getstellar(mbins)
pp = minimize(lambda p: fitstellar(p, smass, hmass), [1, 1])
#pps = minimize(lambda p: fitscatter(p, hmass, sstd/smass), [0.3, 0.0, .0])
pps = minimize(lambda p: fitscatter(p, hmass, lsstd), [0.3, 0.0, .0])
fname = '../data/stellar.json'
data = {'stellarfit':list(pp.x), 'scatterfit':list(pps.x)}
data['mbins'] = list(mbins)
data['NOTE'] = 'Fit b/w range 1e12, 1e14'
with open(fname, "w") as write_file:
json.dump(data, write_file, indent=4)
def scattercatalog(seed, mmin=1e12):
hmass = tools.readbigfile(path + ftype%(bs, ncf, seed, stepf) + 'FOF/Mass/')[1:].reshape(-1)*1e10
print(hmass.max()/1e12, hmass.min()/1e12)
with open('../data/stellar.json', "r") as read_file:
p = json.load(read_file)
mbins = p['mbins']
pm = p['stellarfit']
ps = p['scatterfit']
print(pm, ps)
smassmean = fitstellar(pm, None, hmass, True)
smasssig = fitscatter(ps, hmass, None, True)
print(fitstellar(pm, None, 1e12,True))
print(fitscatter(ps, 1e12, None, True))
smasssig[smasssig < 0.1] = 0.1
np.random.seed(seed)
scatter = np.random.normal(scale=smasssig)
smass = np.exp(np.log(smassmean) + scatter)
mask = hmass >= mmin
smass[~mask] = -999
np.save(path + ftype%(bs, ncf, seed, stepf) + '/stellarmass', smass)
fig, ax = plt.subplots(1, 2, figsize=(9, 4), sharex=True, sharey=True)
axis = ax[0]
axis.plot(hmass[mask], smass[mask], '.')
axis.plot(hmass[mask], smassmean[mask], '.')
axis.loglog()
axis.grid()
axis.set_title('FastPM')
axis = ax[1]
axis.plot(mh[mh>mmin], ms[mh>mmin], '.')
axis.plot(hmass[mask], smassmean[mask], '.')
axis.loglog()
axis.grid()
axis.set_title('Illustris')
plt.savefig(path + ftype%(bs, ncf, seed, stepf) + '/stellarmass.png')
plt.close()
if __name__=='__main__':
if os.path.isfile('../data/stellar.json'): print('Stellar fit exits')
else: dofit()
dofit()
for seed in range(100, 1000, 100):
scattercatalog(seed)
| 29.827586
| 101
| 0.601734
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 502
| 0.145087
|
b22419f7d9aaad90e17b3010a06a273060fa238e
| 1,729
|
py
|
Python
|
mem_py/login/forms.py
|
Ciuel/Proyecto-Django
|
a466659fa7e84e77d0692f4f3c3f8c5f541079d4
|
[
"MIT"
] | null | null | null |
mem_py/login/forms.py
|
Ciuel/Proyecto-Django
|
a466659fa7e84e77d0692f4f3c3f8c5f541079d4
|
[
"MIT"
] | null | null | null |
mem_py/login/forms.py
|
Ciuel/Proyecto-Django
|
a466659fa7e84e77d0692f4f3c3f8c5f541079d4
|
[
"MIT"
] | 1
|
2021-07-17T19:41:40.000Z
|
2021-07-17T19:41:40.000Z
|
from django import forms
from django.contrib.auth.forms import UserCreationForm,AuthenticationForm
from .models import UserProfile
# Create your forms here
class LoginForm(AuthenticationForm):
def __init__(self, request, *args, **kwargs):
super().__init__(self, request, *args, **kwargs)
self.fields['username'].widget.attrs.update({'placeholder':'Nombre de Usuario'})
self.fields['password'].widget.attrs.update({'placeholder':'Contraseña'})
self.error_messages['invalid_login']="Contraseña o Usuario incorrecto"
for fieldname in ['username','password']:
self.fields[fieldname].help_text = None
self.fields[fieldname].label=""
class Meta:
model= UserProfile
fields=[
"username",
"password",
]
class RegisterForm(UserCreationForm):
def __init__(self, *args, **kwargs):
super(UserCreationForm, self).__init__(*args, **kwargs)
self.fields['username'].widget.attrs.update({'placeholder':'Nombre de Usuario'})
self.fields['email'].widget.attrs.update({'placeholder':'Email'})
self.fields['password1'].widget.attrs.update({'placeholder':'Contraseña'})
self.fields['password2'].widget.attrs.update({'placeholder':'Repetir Contraseña'})
self.error_messages["password_mismatch"]="Las contraseñas no coinciden"
for fieldname in ['username','email','password1', 'password2']:
self.fields[fieldname].help_text = None
self.fields[fieldname].label=""
class Meta:
model= UserProfile
fields=[
"username",
"email",
"password1",
"password2",
]
| 39.295455
| 90
| 0.632736
| 1,572
| 0.906574
| 0
| 0
| 0
| 0
| 0
| 0
| 475
| 0.273933
|
b224af29a62a1d5910e33f4af9c4dfcede1d3b53
| 556
|
py
|
Python
|
diagrams/alibabacloud/analytics.py
|
bry-c/diagrams
|
4c377a073e0aa8fe41934195da7a0869f31c58eb
|
[
"MIT"
] | 17,037
|
2020-02-03T01:30:30.000Z
|
2022-03-31T18:09:15.000Z
|
diagrams/alibabacloud/analytics.py
|
bry-c/diagrams
|
4c377a073e0aa8fe41934195da7a0869f31c58eb
|
[
"MIT"
] | 529
|
2020-02-03T10:43:41.000Z
|
2022-03-31T17:33:08.000Z
|
diagrams/alibabacloud/analytics.py
|
bry-c/diagrams
|
4c377a073e0aa8fe41934195da7a0869f31c58eb
|
[
"MIT"
] | 1,068
|
2020-02-05T11:54:29.000Z
|
2022-03-30T23:28:55.000Z
|
# This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _AlibabaCloud
class _Analytics(_AlibabaCloud):
_type = "analytics"
_icon_dir = "resources/alibabacloud/analytics"
class AnalyticDb(_Analytics):
_icon = "analytic-db.png"
class ClickHouse(_Analytics):
_icon = "click-house.png"
class DataLakeAnalytics(_Analytics):
_icon = "data-lake-analytics.png"
class ElaticMapReduce(_Analytics):
_icon = "elatic-map-reduce.png"
class OpenSearch(_Analytics):
_icon = "open-search.png"
# Aliases
| 17.375
| 68
| 0.726619
| 428
| 0.769784
| 0
| 0
| 0
| 0
| 0
| 0
| 221
| 0.397482
|
b224f08977080d30a8248e3383147fd3fad725df
| 1,487
|
py
|
Python
|
numpy_examples/basic_5_structured_arrays.py
|
stealthness/sklearn-examples
|
e755fd3804cc15dd28ff2a38e299e80c83565d0a
|
[
"BSD-3-Clause"
] | null | null | null |
numpy_examples/basic_5_structured_arrays.py
|
stealthness/sklearn-examples
|
e755fd3804cc15dd28ff2a38e299e80c83565d0a
|
[
"BSD-3-Clause"
] | null | null | null |
numpy_examples/basic_5_structured_arrays.py
|
stealthness/sklearn-examples
|
e755fd3804cc15dd28ff2a38e299e80c83565d0a
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Purpose of this file is to give examples of structured arrays
This script is partially dirived from the LinkedIn learning course
https://www.linkedin.com/learning/numpy-data-science-essential-training/create-arrays-from-python-structures
"""
import numpy as np
person_data_def = [('name', 'S6'), ('height', 'f8'), ('weight', 'f8'), ('age', 'i8')]
# create a structured array
people_array = np.zeros(4, dtype=person_data_def)
print(f'The structured array is of type {type(people_array)}\n{people_array}')
# let us change some the data values
# note that any int for height or weight will processed as default
people_array[2] = ('Cat', 130, 56, 22)
people_array[0] = ('Amy', 126, 60, 25)
people_array[1] = ('Bell', 146, 60, 20)
people_array[3] = ('Amy', 140, 80, 55)
print(people_array)
# we can print the information for name, height, weight and age
ages = people_array['age']
print(f'the ages of the people are {ages}')
print(f'The names of the people are {people_array["name"]}')
print(f'The heights of the people are {people_array["height"]}')
print(f'The weights of the people are {people_array["weight"]}')
youthful = ages/2
print(f'The young ages are {youthful}')
# Note that youthful does not change the original data
print(f'The original ages are {ages}')
print(people_array[['name', 'age']])
# Record array is a thin wrapper around structured array
person_record_array = np.rec.array([('a', 100, 80, 50), ('b', 190, 189, 20)])
print(type(person_record_array[0]))
| 33.795455
| 108
| 0.718897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 972
| 0.653665
|
b225b39eea6ed7af22b6d9216dba4156c3fa8839
| 5,716
|
py
|
Python
|
scripts/ebook_meta_rename.py
|
mcxiaoke/python-labs
|
61c0a1f91008ba82fc2f5a5deb19e60aec9df960
|
[
"Apache-2.0"
] | 7
|
2016-07-08T10:53:13.000Z
|
2021-07-20T00:20:10.000Z
|
scripts/ebook_meta_rename.py
|
mcxiaoke/python-labs
|
61c0a1f91008ba82fc2f5a5deb19e60aec9df960
|
[
"Apache-2.0"
] | 1
|
2021-05-11T05:20:18.000Z
|
2021-05-11T05:20:18.000Z
|
scripts/ebook_meta_rename.py
|
mcxiaoke/python-labs
|
61c0a1f91008ba82fc2f5a5deb19e60aec9df960
|
[
"Apache-2.0"
] | 7
|
2016-10-31T06:31:54.000Z
|
2020-08-31T20:55:00.000Z
|
'''
File: ebook_fix.py
Created: 2021-03-06 15:46:09
Modified: 2021-03-06 15:46:14
Author: mcxiaoke (github@mcxiaoke.com)
License: Apache License 2.0
'''
import sys
import os
from pprint import pprint
from types import new_class
from mobi import Mobi
from ebooklib import epub
import argparse
from multiprocessing.dummy import Pool
from functools import partial
RET_OK = 0
RET_IGNORE = -1
RET_SKIP = -2
RET_PARSE_ERROR = -101
RET_OS_ERROR = -102
BOOK_FORMATS = ('.mobi', '.azw', '.azw3', '.epub')
class BookParser:
def __init__(self, src):
self.src = src
self.src_dir = os.path.dirname(src)
self.src_name = os.path.basename(src)
self.dst = None
self.dst_name = None
self.parse()
def parse(self):
raise('subclass must override this')
def check(self):
if not self.dst_name or not self.dst:
return RET_PARSE_ERROR
elif self.dst_name == self.src_name:
return RET_IGNORE
elif os.path.exists(self.dst):
return RET_SKIP
else:
print('Name Before:\t{}'.format(self.src_name))
print('Name After:\t{}'.format(self.dst_name))
def rename(self):
if not self.dst_name or not self.dst:
# print('Bad Format:\t{}'.format(self.dst_name))
return RET_PARSE_ERROR
elif self.dst_name == self.src_name:
# print('Good Book:\t{}'.format(self.dst_name))
return RET_IGNORE
elif os.path.exists(self.dst):
# print('Skip Book:\t{}'.format(self.dst_name))
return RET_SKIP
else:
try:
# print('Rename From:\t{}'.format(self.src_name))
print('Rename To:\t{}'.format(self.dst_name))
os.rename(self.src, self.dst)
return RET_OK
except Exception as e:
print("Rename Error:\t{}".format(e))
return RET_OS_ERROR
class MobiParser(BookParser):
# using lib mobi-python
def __init__(self, src):
super().__init__(src)
def parse(self):
base, ext = os.path.splitext(self.src_name)
ext = ext and ext.lower()
try:
book = Mobi(self.src)
book.parse()
title = book.config['mobi']['Full Name'].decode('utf8')
self.dst_name = '{}{}'.format(title, ext)
self.dst = os.path.join(self.src_dir, self.dst_name)
# print('Mobi Title:\t{}'.format(self.dst_name))
except Exception as e:
print("Parse Error:\t{}".format(e))
class EpubParser(BookParser):
# using lib
def __init__(self, src):
super().__init__(src)
def parse(self):
base, ext = os.path.splitext(self.src_name)
ext = ext and ext.lower()
try:
book = epub.read_epub(self.src)
title = book.title
self.dst_name = '{}{}'.format(title, ext)
self.dst = os.path.join(self.src_dir, self.dst_name)
# print('EPub Title:\t{}'.format(self.dst_name))
except Exception as e:
print("Parse Error:", e)
def list_files(source, recrusily=False, ext_filter=None):
files = []
if not recrusily:
names = os.listdir(source)
if not ext_filter:
files.extend([os.path.join(source, name) for name in names])
else:
for name in names:
_, ext = os.path.splitext(name)
if ext and ext.lower() in ext_filter:
files.append(os.path.join(source, name))
else:
for root, dirs, names in os.walk(source):
if not ext_filter:
files.extend([os.path.join(root, name) for name in names])
else:
for name in names:
_, ext = os.path.splitext(name)
if ext and ext.lower() in ext_filter:
files.append(os.path.join(root, name))
return files
def rename_one_book(fname, idx, total, execute=False):
print('Task({}/{}):\t{}'.format(idx, total, fname))
name = os.path.basename(fname)
_, ext = os.path.splitext(name)
if ext in ('.mobi', '.azw', '.azw3'):
book = MobiParser(fname)
elif ext == '.epub':
book = EpubParser(fname)
else:
print('Unknown Format: {}'.format(name))
book = None
if book:
if execute:
book.rename()
else:
book.check()
def rename_books(source, execute=False, recrusily=False):
print('=== Source: {} ==='.format(source))
files = list_files(source, recrusily, BOOK_FORMATS)
total = len(files)
p = Pool(8)
try:
for idx, fname in enumerate(files):
# print('Processing({}/{}):\t{}'.format(idx, total, fname))
# partial_rename_one = partial(rename_one_book, execute=execute)
# rename_one_book(fname, execute)
p.apply_async(rename_one_book, (fname, idx, total, execute))
p.close()
p.join()
except KeyboardInterrupt:
print('Warning: User Ctrl-C inerrupt, abort.')
p.terminate()
# sys.exit(1)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'source', help='Source folder contains ebooks')
parser.add_argument('-e', '--execute', action='store_true',
help='Rename all ebooks [default:False]')
parser.add_argument('-r', '--recrusily', action='store_true',
help='Process books in source folder recursively [default:False]')
args = parser.parse_args()
print(args)
rename_books(args.source, args.execute, args.recrusily)
| 31.755556
| 90
| 0.574003
| 2,636
| 0.461162
| 0
| 0
| 0
| 0
| 0
| 0
| 1,159
| 0.202764
|
b2268ed3f38975678da47248462c6f15c287a3c3
| 387
|
py
|
Python
|
sources/boltun/util/collections.py
|
meiblorn/boltun
|
d141f555b4f0ed604d8d71883c0bc8811e74370e
|
[
"MIT"
] | 1
|
2019-12-06T04:19:37.000Z
|
2019-12-06T04:19:37.000Z
|
sources/boltun/util/collections.py
|
meiblorn/boltun
|
d141f555b4f0ed604d8d71883c0bc8811e74370e
|
[
"MIT"
] | null | null | null |
sources/boltun/util/collections.py
|
meiblorn/boltun
|
d141f555b4f0ed604d8d71883c0bc8811e74370e
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, division, print_function
import attr
@attr.s
class Stack(object):
__items = attr.ib(type=list, factory=list)
def push(self, item):
self.__items.append(item)
def pop(self):
return self.__items.pop()
def peek(self):
return self.__items[-1]
def is_empty(self):
return len(self.__items) == 0
| 18.428571
| 64
| 0.648579
| 298
| 0.770026
| 0
| 0
| 306
| 0.790698
| 0
| 0
| 0
| 0
|
b22a4ac4d8d41f1f54853d90f7a7aa435b4d6a78
| 41
|
py
|
Python
|
test/python/echo_hi_then_error.py
|
WrkMetric/Python--NodeJS
|
502bb3d81152ef9a16fb618f71f9e9fc43777349
|
[
"MIT",
"Unlicense"
] | 1,869
|
2015-01-07T18:06:52.000Z
|
2022-03-30T08:35:39.000Z
|
test/python/echo_hi_then_error.py
|
PavanAnanthSharma/python-shell
|
502bb3d81152ef9a16fb618f71f9e9fc43777349
|
[
"MIT",
"Unlicense"
] | 252
|
2015-01-08T17:33:58.000Z
|
2022-03-31T09:04:38.000Z
|
test/python/echo_hi_then_error.py
|
PavanAnanthSharma/python-shell
|
502bb3d81152ef9a16fb618f71f9e9fc43777349
|
[
"MIT",
"Unlicense"
] | 238
|
2015-03-22T11:22:30.000Z
|
2022-03-15T22:01:44.000Z
|
print('hi')
raise Exception('fibble-fah')
| 20.5
| 29
| 0.731707
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 16
| 0.390244
|
b22a61d2c3956ab8bd21246cdd7e1d90a774793b
| 105
|
py
|
Python
|
lamdata_baisal89/df_util.py
|
Baisal89/ds_8_lamdata
|
67911b6f15ae6230a65c439a978303ac4b492075
|
[
"MIT"
] | null | null | null |
lamdata_baisal89/df_util.py
|
Baisal89/ds_8_lamdata
|
67911b6f15ae6230a65c439a978303ac4b492075
|
[
"MIT"
] | 1
|
2020-03-31T11:12:26.000Z
|
2020-03-31T11:12:26.000Z
|
lamdata_baisal89/df_util.py
|
Baisal89/ds_8_lamdata
|
67911b6f15ae6230a65c439a978303ac4b492075
|
[
"MIT"
] | null | null | null |
"""
Utility functions for working with DataFrame
"""
import pandas
TEST_DF = pandas.DataFrame([1,2,3])
| 13.125
| 44
| 0.72381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 52
| 0.495238
|
b22aabd883c6bf5301a8cac5ec9620f3e682a650
| 2,160
|
py
|
Python
|
lab05/parsePhoneNrs.py
|
peter201943/pjm349-CS265-winter2019
|
704ffa8fe0a51795670b6c2b40b153291846fe0b
|
[
"MIT"
] | null | null | null |
lab05/parsePhoneNrs.py
|
peter201943/pjm349-CS265-winter2019
|
704ffa8fe0a51795670b6c2b40b153291846fe0b
|
[
"MIT"
] | null | null | null |
lab05/parsePhoneNrs.py
|
peter201943/pjm349-CS265-winter2019
|
704ffa8fe0a51795670b6c2b40b153291846fe0b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#
#Peter J. Mangelsdorf
#
# Kurt Schmidt
# 7/06
#
#
# parsePhoneNrs.py - an example of 'grouping' - extracting parts of a match
#
# Python 3.5.2
# on Linux 4.4.0-36-generic x86_64
#
# Demonstrates: regexp, re, search, groups
#
# Usage: By default, reads telNrs.txt . You may supply a different filename
#
# Notes:
# The pattern:
# Note that it is not perfect, but allows a bit of leeway in how we
# write a phone #. No extensions.
# Of course, only handles US-style numbers
#
# EDITOR: cols=120, tabstop=2
#
import sys
import re
stderr = sys.stderr
DEF_A_CODE = "None"
def usage() :
print( "Usage:" )
print( "\t" + sys.argv[0] + " [<file>]" )
def searchFile( fileName, pattern ) :
fh = open( fileName, "r" )
for l in fh :
l = l.strip()
# Here's the actual search
match = pattern.search( l )
if match :
nr = match.groups()
# Note, from the pattern, that 0 may be null, but 1 and 2 must exist
if nr[0] is None :
aCode = DEF_A_CODE
else :
aCode = nr[0]
print( "area code: " + aCode + \
", exchange: " + nr[1] + ", trunk: " + nr[2] )
else :
print( "NO MATCH: " + l )
fh.close()
def main() : # stick filename
if len( sys.argv ) < 2 : # no file name
# assume telNrs.txt
fileName = "telNrs.txt"
else :
fileName = sys.argv[1]
# for legibility, Python supplies a 'verbose' pattern
# requires a special flag (re.VERBOSE)
#patString = '(\d{3})*[- .)]*(\d{3})[- .]*(\d{4})'
patString = r'''
# don't match beginning of string (takes care of 1-)
(\d{3})? # area code (3 digits) (optional)
[- .)]* # optional separator (any # of space, dash, or dot,
# or closing ')' )
(\d{3}) # exchange, 3 digits
[- .]* # optional separator (any # of space, dash, or dot)
(\d{4}) # number, 4 digits
'''
# Here is what the pattern would look like as a regular pattern:
#patString = r'(\d{3})\D*(\d{3})\D*(\d{4})'
# Instead of creating a temporary object each time, we will compile this
# regexp once, and store this object
pattern = re.compile( patString, re.VERBOSE )
searchFile( fileName, pattern )
main()
| 24.545455
| 77
| 0.602315
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,447
| 0.669907
|
b22ba815e08036847020bc1f981a8232bfaa3cd2
| 567
|
py
|
Python
|
board/gpio.py
|
JonathanItakpe/realtime-office-light-dashboard
|
a783152bfee3e099d039c574ca1ea5635f79900d
|
[
"MIT"
] | 1
|
2017-09-04T14:05:59.000Z
|
2017-09-04T14:05:59.000Z
|
board/gpio.py
|
JonathanItakpe/realtime-office-light-dashboard
|
a783152bfee3e099d039c574ca1ea5635f79900d
|
[
"MIT"
] | null | null | null |
board/gpio.py
|
JonathanItakpe/realtime-office-light-dashboard
|
a783152bfee3e099d039c574ca1ea5635f79900d
|
[
"MIT"
] | null | null | null |
import RPi.GPIO as gpio
from pusher import Pusher
import time
pusher = Pusher(app_id=u'394325', key=u'cc900daae41222ea463e', secret=u'02ae96830fe03a094573', cluster=u'eu')
gpio.setmode(gpio.BCM)
gpio.setup(2, gpio.OUT)
# TODO: Map each gpio pin to a room eg 2: HNG Main
while True:
gpio.output(2, gpio.OUT)
passcode = raw_input('What is pi? ')
if passcode == 'Awesome':
gpio.output(2, gpio.HIGH)
pusher.trigger(u'statuses', u'new_status', {u'room': u'HNG Main', u'status': u'Off'})
time.sleep(4)
else:
gpio.output(2. gpio.LOW)
print 'Wrong Password'
| 27
| 109
| 0.708995
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 206
| 0.363316
|
b22bc88a2d140b8c45a0fbac6ce8fea46af69f26
| 1,036
|
py
|
Python
|
courses/python/mflac/vuln_app/patched_admin.py
|
tank1st99/securitygym
|
2e4fbdf8002afbe51648706906f0db2c294362a6
|
[
"MIT"
] | 49
|
2021-05-20T12:49:28.000Z
|
2022-03-13T11:35:03.000Z
|
courses/python/mflac/vuln_app/patched_admin.py
|
tank1st99/securitygym
|
2e4fbdf8002afbe51648706906f0db2c294362a6
|
[
"MIT"
] | null | null | null |
courses/python/mflac/vuln_app/patched_admin.py
|
tank1st99/securitygym
|
2e4fbdf8002afbe51648706906f0db2c294362a6
|
[
"MIT"
] | 5
|
2021-05-20T12:58:34.000Z
|
2021-12-05T19:08:13.000Z
|
import functools
from flask import Blueprint
from flask import render_template
from flask import g
from flask import redirect
from flask import url_for
from flask import flash
from mflac.vuln_app.db import get_db
bp = Blueprint("admin", __name__, url_prefix="/admin")
def admin_required(view):
@functools.wraps(view)
def wrapped_view(**kwargs):
if g.user is None or not g.user['is_admin']:
flash("Forbidden. You haven't enough permissions")
return redirect(url_for("index.index"))
return view(**kwargs)
return wrapped_view
def login_required(view):
@functools.wraps(view)
def wrapped_view(**kwargs):
if g.user is None:
return redirect(url_for("auth.login"))
return view(**kwargs)
return wrapped_view
@bp.route("/users_list")
@login_required
@admin_required
def users_list():
db = get_db()
users = db.execute("SELECT id, username, is_admin FROM user").fetchall()
return render_template('admin/users_list.html', users=users)
| 25.268293
| 76
| 0.697876
| 0
| 0
| 0
| 0
| 648
| 0.625483
| 0
| 0
| 170
| 0.164093
|
b22c071ff2cdd5ff5f1c6258280e4c7e042b6c35
| 3,708
|
py
|
Python
|
inpainting/common/eval_test.py
|
yuyay/ASNG-NAS
|
6b908dd25e49471e454d3c2b1e93638af2bd8ecc
|
[
"MIT"
] | 96
|
2019-05-22T19:04:39.000Z
|
2021-12-21T07:50:51.000Z
|
inpainting/common/eval_test.py
|
pawopawo/ASNG-NAS
|
a13c4828cfa9acc1eebd598dc1f88ee18e152159
|
[
"MIT"
] | 3
|
2019-11-11T02:13:24.000Z
|
2019-11-28T13:25:40.000Z
|
inpainting/common/eval_test.py
|
pawopawo/ASNG-NAS
|
a13c4828cfa9acc1eebd598dc1f88ee18e152159
|
[
"MIT"
] | 14
|
2019-05-24T07:50:15.000Z
|
2021-07-25T14:16:18.000Z
|
import os
import pandas as pd
import torch
from torch import nn
import common.utils as util
import scipy.misc as spmi
def save_img(img_np, file_name, out_dir='./'):
if not os.path.exists(out_dir):
os.makedirs(out_dir)
image = img_np.copy().transpose(1, 2, 0)
# for gray image
if img_np.shape[2] == 1:
image = image[:, :, 0]
# Revise values to save the matrix as png image
image[image > 1.] = 1.
image[image < 0.] = 0.
spmi.toimage(image, cmin=0, cmax=1).save(out_dir + file_name)
def evaluate(model, test_data, corrupt_func, gpu_id=0, batchsize=64, img_out_dir=None):
model.eval()
test_loader = torch.utils.data.DataLoader(test_data, batchsize, shuffle=False)
if img_out_dir is not None:
if not os.path.exists(img_out_dir):
os.makedirs(img_out_dir)
df = pd.DataFrame([], columns=['index', 'MLE_MSE', 'MLE_PSNR', 'MLE_SSIM'])
best_img = []
worst_img = []
rank_num = 100
with torch.no_grad():
vals = {'MLE_MSE': 0., 'MLE_PSNR': 0., 'MLE_SSIM': 0.}
loss_func = nn.MSELoss()
i = 0
for X, _ in test_loader:
if gpu_id >= 0:
X = X.cuda(gpu_id)
in_img = corrupt_func(X)
# MLE prediction
out_mle = model.forward_mle(in_img)
for j, (org, in_x) in enumerate(zip(X, in_img)):
# Compute the evaluation measures
mse = loss_func(out_mle[j], org).item()
psnr = util.compute_PSNR(out_mle[j].cpu().numpy(), org.cpu().numpy())
ssim = util.compute_SSIM(out_mle[j].cpu().numpy(), org.cpu().numpy())
vals['MLE_MSE'] += mse / len(test_data)
vals['MLE_PSNR'] += psnr / len(test_data)
vals['MLE_SSIM'] += ssim / len(test_data)
if img_out_dir is not None:
df.loc[i] = [i, mse, psnr, ssim]
if len(best_img) < rank_num:
best_img.append([i, psnr, in_x.cpu().numpy(), out_mle[j].cpu().numpy(), org.cpu().numpy()])
elif psnr > best_img[-1][1]:
best_img[-1] = [i, psnr, in_x.cpu().numpy(), out_mle[j].cpu().numpy(), org.cpu().numpy()]
best_img.sort(key=lambda x: x[1], reverse=True)
if len(worst_img) < rank_num:
worst_img.append([i, psnr, in_x.cpu().numpy(), out_mle[j].cpu().numpy(), org.cpu().numpy()])
elif psnr < worst_img[-1][1]:
worst_img[-1] = [i, psnr, in_x.cpu().numpy(), out_mle[j].cpu().numpy(), org.cpu().numpy()]
worst_img.sort(key=lambda x: x[1])
i += 1
if img_out_dir is not None:
df.to_csv(img_out_dir + 'evaluation.csv', sep=',', header=True, index=False)
# Save images (best and worst 100 images)
for i in range(rank_num):
save_img(best_img[i][2], 'best_rank{:03d}_input_{:05d}.png'.format(i+1, best_img[i][0]), out_dir=img_out_dir)
save_img(best_img[i][3], 'best_rank{:03d}_mle_out_{:05d}.png'.format(i+1, best_img[i][0]), out_dir=img_out_dir)
save_img(best_img[i][4], 'best_rank{:03d}_gt_{:05d}.png'.format(i + 1, best_img[i][0]), out_dir=img_out_dir)
save_img(worst_img[i][2], 'worst_rank{:03d}_input_{:05d}.png'.format(i + 1, worst_img[i][0]), out_dir=img_out_dir)
save_img(worst_img[i][3], 'worst_rank{:03d}_mle_out_{:05d}.png'.format(i + 1, worst_img[i][0]), out_dir=img_out_dir)
save_img(worst_img[i][4], 'worst_rank{:03d}_gt_{:05d}.png'.format(i + 1, worst_img[i][0]), out_dir=img_out_dir)
return vals
| 43.623529
| 128
| 0.562567
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 475
| 0.128101
|
b22daaab8d4ecd141b8b7df40454e33e53d6bbdf
| 10,218
|
py
|
Python
|
connectivity/connectivity.py
|
vagechirkov/NI-project
|
fa0687d81ffad9b2e3737fe9115a151335bda358
|
[
"MIT"
] | 1
|
2021-06-01T08:06:15.000Z
|
2021-06-01T08:06:15.000Z
|
connectivity/connectivity.py
|
vagechirkov/NI-project
|
fa0687d81ffad9b2e3737fe9115a151335bda358
|
[
"MIT"
] | null | null | null |
connectivity/connectivity.py
|
vagechirkov/NI-project
|
fa0687d81ffad9b2e3737fe9115a151335bda358
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
from nxviz import CircosPlot
from neurolib.utils import atlases
# https://doi.org/10.1016/j.neuroimage.2015.07.075 Table 2
# number corresponds to AAL2 labels indices
CORTICAL_REGIONS = {
'central_region': [1, 2, 61, 62, 13, 14],
'frontal_lobe': {
'Lateral surface': [3, 4, 5, 6, 7, 8, 9, 10],
'Medial surface': [19, 20, 15, 16, 73, 74],
'Orbital surface': [11, 12, 17, 18, 21, 22, 23, 24, 25, 26, 27, 28,
29, 30, 31, 32]
},
'temporal_lobe': {
'Lateral surface': [83, 84, 85, 86, 89, 90, 93, 94]
},
'parietal_lobe': {
'Lateral surface': [63, 64, 65, 66, 67, 68, 69, 70],
'Medial surface': [71, 72],
},
'occipital_lobe': {
'Lateral surface': [53, 54, 55, 56, 57, 58],
'Medial and inferior surfaces': [47, 48, 49, 50, 51, 52, 59, 60],
},
'limbic_lobe': [87, 88, 91, 92, 35, 36, 37, 38, 39, 40, 33, 34]
}
def aal2_atlas_add_cortical_regions(aal2_atlas):
"""Add groups of cortical regions.
Parameters
----------
atlas : neurolib.utils.atlases.AutomatedAnatomicalParcellation2()
AAL2 atlas
"""
for i in CORTICAL_REGIONS.items():
inx = []
if not isinstance(i[1], list):
for ii in i[1].items():
inx.append(ii[1])
inx = sum(inx, [])
else:
inx = i[1]
# reindexing from 1 to 0
inx = [i-1 for i in inx]
setattr(aal2_atlas, i[0], inx)
return aal2_atlas
def plot_graph_circos(graph, sc_threshold=0.07):
# Some parts of the code from:
# https://github.com/multinetlab-amsterdam/network_TDA_tutorial
G = graph.copy()
# remove weak connections
for edge in nx.get_edge_attributes(G, 'weight').items():
if edge[1] < sc_threshold:
G.remove_edge(edge[0][0], edge[0][1])
atlas = atlases.AutomatedAnatomicalParcellation2()
atlas = aal2_atlas_add_cortical_regions(atlas)
G = nx.relabel_nodes(G, lambda x: atlas.names('cortex')[x])
sublist = {}
order = {}
n = 0
for group in list(CORTICAL_REGIONS.keys()):
for i in atlas.names(group=group):
sublist[i] = group
if i[-1] == 'L':
order[i] = n
else:
order[i] = n + 1
n += 2
nx.set_node_attributes(G, sublist, 'cortical_region')
nx.set_node_attributes(G, order, 'node_order')
# https://nxviz.readthedocs.io/en/latest/modules.html
circ = CircosPlot(
G, figsize=(15, 15), node_labels=True, node_label_layout='rotation',
edge_color='weight', edge_width='weight', fontsize=10,
node_order='node_order', nodeprops={"radius": 1},
group_label_offset=5, node_color='cortical_region', group_legend=True
)
circ.draw()
circ.sm.colorbar.remove()
labels_networks = sorted(list(set(
[list(circ.graph.nodes.values())[n][
circ.node_color] for n in np.arange(len(circ.nodes))])))
plt.legend(handles=circ.legend_handles,
title="Subnetwork",
ncol=2,
borderpad=1,
shadow=True,
fancybox=True,
bbox_to_anchor=(0.8, 1.05),
loc='upper left',
fontsize=10,
labels=labels_networks)
plt.tight_layout()
return circ
def make_graph(Cmat):
G = nx.from_numpy_matrix(sparsify(make_symmetric(Cmat), threshold=0.01))
G.remove_edges_from(list(nx.selfloop_edges(G)))
G.graph['matrix'] = Cmat
return G
def graph_measures(G, Dmat=None):
graph_measures = {}
# -------------- Degree -------------- #
strength = G.degree(weight='weight')
nx.set_node_attributes(G, dict(strength), 'strength')
# Normalized node strength values 1/N-1
normstrenghts = {node: val * 1/(len(G.nodes)-1)
for (node, val) in strength}
nx.set_node_attributes(G, normstrenghts, 'strengthnorm')
# Computing the mean degree of the network
normstrengthlist = np.array([val * 1/(len(G.nodes)-1)
for (node, val) in strength])
mean_degree = np.sum(normstrengthlist)/len(G.nodes)
graph_measures['mean_degree'] = mean_degree
graph_measures['degree'] = normstrengthlist
# -------------- Centrality -------------- #
# Closeness Centrality
# Distance is an inverse of correlation
# IDEA: use Dmat instead of 1 / abs(weight) ???
if isinstance(Dmat, np.ndarray):
G_distance_dict = {(e1, e2): Dmat[e1, e2]
for e1, e2 in G.edges()}
else:
G_distance_dict = {(e1, e2): 1 / abs(weight)
for e1, e2, weight in G.edges(data='weight')}
nx.set_edge_attributes(G, G_distance_dict, 'distance')
closeness = nx.closeness_centrality(G, distance='distance')
nx.set_node_attributes(G, closeness, 'closecent')
graph_measures['closeness'] = list(closeness.values())
# Betweenness Centrality
betweenness = nx.betweenness_centrality(G, weight='distance',
normalized=True)
nx.set_node_attributes(G, betweenness, 'betweenness_centrality')
graph_measures['betweenness'] = list(betweenness.values())
# Eigenvector Centrality
# eigen = nx.eigenvector_centrality(G, weight='weight')
# nx.set_node_attributes(G, eigen, 'eigen')
# graph_measures['eigenvector_centrality'] = list(eigen.values())
# -------------- Path Length -------------- #
# Average shortest path length
avg_shorterst_path = nx.average_shortest_path_length(G, weight='distance')
graph_measures['mean_shortest_path'] = avg_shorterst_path
# TODO: maybe add more measures
# -------------- Assortativity -------------- #
# Average degree of the neighborhood
average_neighbor_degree = nx.average_neighbor_degree(G, weight='weight')
nx.set_node_attributes(G, average_neighbor_degree, 'neighbor_degree')
graph_measures['neighbor_degree'] = list(average_neighbor_degree.values())
# ------- Avg Neighbor Degree (2nd try) ----- #
avg_nb_deg = compute_avg_neighborhood_degree(G.graph['matrix'])
nx.set_node_attributes(G, avg_nb_deg, 'neighbor_degree_new')
graph_measures['neighbor_degree_new'] = list(avg_nb_deg)
# -------------- Clustering Coefficient -------------- #
clustering = nx.clustering(G, weight='weight')
nx.set_node_attributes(G, clustering, 'clustering_coefficient')
graph_measures['clustering_coefficient'] = list(clustering.values())
graph_measures['mean_clustering_coefficient'] =\
nx.average_clustering(G, weight='weight')
# -------------- Minimum Spanning Tree -------------- #
# backbone of a network
GMST = nx.minimum_spanning_tree(G, weight='distance')
backbone = nx.to_numpy_array(GMST)
graph_measures['backbone'] = backbone
# -------------- Small-world -------------- #
# FIXME: too slow...
# graph_measures['omega'] = nx.omega(G, seed=0, niter=1)
# graph_measures['sigma'] = nx.sigma(G, seed=0, niter=1)
return G, graph_measures
def neighborhoods(adj, epsilon=0.001):
N = np.array(adj > epsilon, dtype=int)
return N
def normalize_neighborhoods(neighborhoods):
N = neighborhoods / np.sum(neighborhoods, axis=0)
return N
def is_symmetric(A, epsilon=0.1):
diff = A - A.T
asymmetry = np.sum(np.abs(diff))
return (asymmetry < epsilon, asymmetry,
np.max(np.abs(diff)), np.mean(np.abs(diff)))
def make_symmetric(A):
S = (A + A.T)/2
assert(is_symmetric(S))
return S
def sparsify(A, threshold=0.01):
A[A < threshold] = 0.0
return A
def compute_avg_neighborhood_degree(A):
degrees = np.sum(A, axis=0)
N = neighborhoods(A)
N = normalize_neighborhoods(N)
avg_nb_deg = N.dot(degrees)
# not always true - should it?
# assert(np.mean(degrees) == np.mean(avg_nb_deg))
return avg_nb_deg
def z_scores(df):
# 'degree', 'clustering_coefficient'
m_dist = ['closeness', 'betweenness',
'neighbor_degree', 'neighbor_degree_new']
m_point = ['mean_degree', 'mean_shortest_path',
'mean_clustering_coefficient'] # , 'omega', 'sigma']
n_subjects = df.shape[0]
df.loc[:, 'subject'] = df.index
for m in m_dist:
value = np.array([df.loc[i, m] for i in range(n_subjects)])
mean = value.mean(axis=1)
std = value.std(axis=1).mean()
df.loc[:, f'mean_{m}_z'] = (mean - mean.mean()) / std
for m in m_point:
mean = df.loc[:, m].mean()
std = df.loc[:, m].std()
df.loc[:, f'{m}_z'] = (df.loc[:, m] - mean) / std
all_values = (['subject'] + [f'{m}_z' for m in m_point]
+ [f'mean_{m}_z' for m in m_dist])
return df.loc[:, all_values]
def similarity_between_subjects(df):
m_dist = ['degree', 'closeness', 'betweenness',
'neighbor_degree', 'neighbor_degree_new',
'clustering_coefficient']
mats = ['Cmat', 'Dmat', 'backbone']
# graph_properties = ['omega', 'sigma']
n_subjects = df.shape[0]
df.loc[:, 'subject'] = df.index
for m in m_dist:
value = np.array([df.loc[i, m] for i in range(n_subjects)])
for s in range(n_subjects):
corr = np.corrcoef(value[0, :], value[s, :])[0, 1]
df.loc[s, f'{m}_corr'] = corr
for m in mats:
for s in range(n_subjects):
corr = np.corrcoef(df.loc[0, m].flatten(),
df.loc[s, m].flatten())[0, 1]
df.loc[s, f'{m}_corr'] = corr
# for m in graph_properties:
# value = np.array([df.loc[i, m] for i in range(n_subjects)])
# for s in range(n_subjects):
# corr = np.corrcoef(value[0], value[s])[0, 1]
# df.loc[s, f'{m}_corr'] = corr
# + graph_properties]
all_values = ['subject'] + [f'{m}_corr' for m in mats + m_dist]
return df.loc[:, all_values]
if __name__ == '__main__':
from neurolib.utils.loadData import Dataset
ds = Dataset("gw")
G = make_graph(ds.Cmats[1])
| 33.501639
| 78
| 0.592582
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,029
| 0.296438
|
b22e2d62e71b8fbbd1c5658b31e4eb4e56a96389
| 140
|
py
|
Python
|
secao5/exercicio6.py
|
robinson-1985/exercicios_python_geek_university
|
6dfc740472de9ff7c029e26a2ba8f51080e3860b
|
[
"MIT"
] | null | null | null |
secao5/exercicio6.py
|
robinson-1985/exercicios_python_geek_university
|
6dfc740472de9ff7c029e26a2ba8f51080e3860b
|
[
"MIT"
] | null | null | null |
secao5/exercicio6.py
|
robinson-1985/exercicios_python_geek_university
|
6dfc740472de9ff7c029e26a2ba8f51080e3860b
|
[
"MIT"
] | null | null | null |
'''6. Escreva um programa que, dados dois números inteiros, mostre na tela o maior deles,
assim como a diferença existente entre ambos.'''
| 35
| 89
| 0.757143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 140
| 0.985915
|
b22e6d6bc215d8e3aa72605534263f2c5a57156d
| 1,694
|
py
|
Python
|
src/conf.py
|
RJTK/dwglasso_cweeds
|
eaaa9cd3b3b4f0120f6d9061b585ec46f0678740
|
[
"MIT"
] | null | null | null |
src/conf.py
|
RJTK/dwglasso_cweeds
|
eaaa9cd3b3b4f0120f6d9061b585ec46f0678740
|
[
"MIT"
] | null | null | null |
src/conf.py
|
RJTK/dwglasso_cweeds
|
eaaa9cd3b3b4f0120f6d9061b585ec46f0678740
|
[
"MIT"
] | null | null | null |
'''
This is the config file for the code in src/. Essentially it
holds things like file and variable names.
'''
# The folder locations of the below files are specified by the
# cookie cutter data science format and are hardcoded into the code.
# I'm not entirely sure that that was the best way to go about it,
# but thats how it is for now.
import os
cwd = os.getcwd() # Current working directory
# Directories continaing data
RAW_DATA_DIR = cwd + '/data/raw/'
INTERIM_DATA_DIR = cwd + '/data/interim/'
PROCESSED_DATA_DIR = cwd + '/data/processed/'
# Path of initial locations text file
LOC_DATA_FILE = RAW_DATA_DIR + 'locations.txt'
# Path to pickle location data
LOC_PKL_FILE = INTERIM_DATA_DIR + 'locations.pkl'
# Path to HDFStores
HDF_INTERIM_FILE = INTERIM_DATA_DIR + 'interim_data.hdf'
HDF_FINAL_FILE = PROCESSED_DATA_DIR + 'final_data.hdf'
# Path to a place to store figures
FIGURE_ROOT = cwd + '/reports/figures/'
# The key for the locations DataFrame in the HDFStore
LOCATIONS_KEY = '/locations/D'
# File prefixes for pickle files
ZZT_FILE_PREFIX = cwd + '/data/processed/ZZT'
YZT_FILE_PREFIX = cwd + '/data/processed/YZT'
X_VALIDATE_FILE_PREFIX = cwd + '/data/processed/X_validate'
# The maximum value of p we are likely to use
MAX_P = 3
# The actual value of p that is used
P_LAG = 2
# The location of the canada shape file for geopandas
CANADA_SHAPE = cwd + '/reports/shapefiles/Canada/Canada.shp'
# Name of the temperature key in hdf
TEMPERATURE_TS_ROOT = 'temperature_ts'
# Used time intervals
INIT_YEAR = 1980 # The initial year for final dataset
FINAL_YEAR = 1990 # The final year for final dataset
FINAL_YEAR_VALIDATE = 1995 # last year for validation set.
| 30.25
| 68
| 0.756198
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,159
| 0.684179
|
b22e87213200baf4d5c3c89eb335262571cc546e
| 1,486
|
py
|
Python
|
HackerEarth/Python/BasicProgramming/InputOutput/BasicsOfInputOutput/MinimizeCost.py
|
cychitivav/programming_exercises
|
e8e7ddb4ec4eea52ee0d3826a144c7dc97195e78
|
[
"MIT"
] | null | null | null |
HackerEarth/Python/BasicProgramming/InputOutput/BasicsOfInputOutput/MinimizeCost.py
|
cychitivav/programming_exercises
|
e8e7ddb4ec4eea52ee0d3826a144c7dc97195e78
|
[
"MIT"
] | null | null | null |
HackerEarth/Python/BasicProgramming/InputOutput/BasicsOfInputOutput/MinimizeCost.py
|
cychitivav/programming_exercises
|
e8e7ddb4ec4eea52ee0d3826a144c7dc97195e78
|
[
"MIT"
] | null | null | null |
#!/Usr/bin/env python
"""
You are given an array of numbers Ai which contains positive as well as negative numbers . The cost of the array can be defined as C(X)
C(x) = |A1 + T1| + |A2 + T2| + ... + |An + Tn|, where T is the transfer array which contains N zeros initially.
You need to minimize this cost. You can transfer value from one array element to another if and only if the distance between them is at most K.
Also, transfer value can't be transferred further.
Say array contains 3, -1, -2 and K = 1
if we transfer 3 from 1st element to 2nd, the array becomes
Original Value 3, -1, -2
Transferred value -3, 3, 0
C(x) = |3 - 3| + |-1 + 3| + ... + |-2 + 0| = 4 which is minimum in this case
Note :
Only positive value can be transferred
It is not necessary to transfer whole value i.e partial transfer is also acceptable. This means that if you have A[i] = 5 then you can distribute the value 5 across many other array elements provided that they finally sum to a number less than equal to 5. For example 5 can be transferred in chunks of smaller values say 2 , 3 but their sum should not exceed 5.
INPUT:
First line contains N and K separated by space
Second line denotes an array of size N
OUTPU:
Minimum value of C(X)
CONSTRAINTS:
1 ≤ N,K ≤ 10^5
-10^9 ≤ Ai ≤ 10^9
"""
import io
__author__ = "Cristian Chitiva"
__date__ = "March 18, 2019"
__email__ = "cychitivav@unal.edu.co"
N=os.read(0,2).decode()
print(type(N))
| 31.617021
| 362
| 0.691117
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,389
| 0.929719
|
b23021f02a20519683a1417ea552146f63c73f1a
| 2,002
|
py
|
Python
|
pava/implementation/natives/java/net/TwoStacksPlainSocketImpl.py
|
laffra/pava
|
54d10cf7f8def2f96e254c0356623d08f221536f
|
[
"MIT"
] | 4
|
2017-03-30T16:51:16.000Z
|
2020-10-05T12:25:47.000Z
|
pava/implementation/natives/java/net/TwoStacksPlainSocketImpl.py
|
laffra/pava
|
54d10cf7f8def2f96e254c0356623d08f221536f
|
[
"MIT"
] | null | null | null |
pava/implementation/natives/java/net/TwoStacksPlainSocketImpl.py
|
laffra/pava
|
54d10cf7f8def2f96e254c0356623d08f221536f
|
[
"MIT"
] | null | null | null |
def add_native_methods(clazz):
def initProto____():
raise NotImplementedError()
def socketCreate__boolean__(a0, a1):
raise NotImplementedError()
def socketConnect__java_net_InetAddress__int__int__(a0, a1, a2, a3):
raise NotImplementedError()
def socketBind__java_net_InetAddress__int__boolean__(a0, a1, a2, a3):
raise NotImplementedError()
def socketListen__int__(a0, a1):
raise NotImplementedError()
def socketAccept__java_net_SocketImpl__(a0, a1):
raise NotImplementedError()
def socketAvailable____(a0):
raise NotImplementedError()
def socketClose0__boolean__(a0, a1):
raise NotImplementedError()
def socketShutdown__int__(a0, a1):
raise NotImplementedError()
def socketNativeSetOption__int__boolean__java_lang_Object__(a0, a1, a2, a3):
raise NotImplementedError()
def socketGetOption__int__java_lang_Object__(a0, a1, a2):
raise NotImplementedError()
def socketSendUrgentData__int__(a0, a1):
raise NotImplementedError()
clazz.initProto____ = staticmethod(initProto____)
clazz.socketCreate__boolean__ = socketCreate__boolean__
clazz.socketConnect__java_net_InetAddress__int__int__ = socketConnect__java_net_InetAddress__int__int__
clazz.socketBind__java_net_InetAddress__int__boolean__ = socketBind__java_net_InetAddress__int__boolean__
clazz.socketListen__int__ = socketListen__int__
clazz.socketAccept__java_net_SocketImpl__ = socketAccept__java_net_SocketImpl__
clazz.socketAvailable____ = socketAvailable____
clazz.socketClose0__boolean__ = socketClose0__boolean__
clazz.socketShutdown__int__ = socketShutdown__int__
clazz.socketNativeSetOption__int__boolean__java_lang_Object__ = socketNativeSetOption__int__boolean__java_lang_Object__
clazz.socketGetOption__int__java_lang_Object__ = socketGetOption__int__java_lang_Object__
clazz.socketSendUrgentData__int__ = socketSendUrgentData__int__
| 39.254902
| 123
| 0.793207
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b231777aaaf136ffab975467c0c084dcecffc14f
| 973
|
py
|
Python
|
ansible/utils/check_droplet.py
|
louis-pre/NewsBlur
|
b4e9a56041ff187ef77b38dfd0778daf41b53f4f
|
[
"MIT"
] | 3,073
|
2015-01-01T07:20:18.000Z
|
2022-03-31T20:33:41.000Z
|
ansible/utils/check_droplet.py
|
louis-pre/NewsBlur
|
b4e9a56041ff187ef77b38dfd0778daf41b53f4f
|
[
"MIT"
] | 1,054
|
2015-01-02T13:32:35.000Z
|
2022-03-30T04:21:21.000Z
|
ansible/utils/check_droplet.py
|
louis-pre/NewsBlur
|
b4e9a56041ff187ef77b38dfd0778daf41b53f4f
|
[
"MIT"
] | 676
|
2015-01-03T16:40:29.000Z
|
2022-03-30T14:00:40.000Z
|
import sys
import time
import digitalocean
import subprocess
def test_ssh(drop):
droplet_ip_address = drop.ip_address
result = subprocess.call(f"ssh -o StrictHostKeyChecking=no root@{droplet_ip_address} ls", shell=True)
if result == 0:
return True
return False
TOKEN_FILE = "/srv/secrets-newsblur/keys/digital_ocean.token"
droplet_name = sys.argv[1]
with open(TOKEN_FILE) as f:
token = f.read().strip()
manager = digitalocean.Manager(token=token)
timeout = 180
timer = 0
ssh_works = False
while not ssh_works:
if timer > timeout:
raise Exception(f"The {droplet_name} droplet was not created.")
droplets = [drop for drop in manager.get_all_droplets() if drop.name == droplet_name]
if droplets:
droplet = droplets[0]
print(f"Found the {droplet_name} droplet. IP address is {droplet.ip_address}. Testing ssh...")
ssh_works = test_ssh(droplet)
time.sleep(3)
timer += 3
print("Success!")
| 27.027778
| 105
| 0.697842
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 254
| 0.261048
|