hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1bca72364354424e756eb0857899935691ba79bb | 422 | py | Python | HIS_void/patient/migrations/0007_auto_20210429_2316.py | YuanchenZhu2020/HIS_void | 7289bf537e9fc4b09750bbca76a4cc8354dc770f | [
"MIT"
] | null | null | null | HIS_void/patient/migrations/0007_auto_20210429_2316.py | YuanchenZhu2020/HIS_void | 7289bf537e9fc4b09750bbca76a4cc8354dc770f | [
"MIT"
] | null | null | null | HIS_void/patient/migrations/0007_auto_20210429_2316.py | YuanchenZhu2020/HIS_void | 7289bf537e9fc4b09750bbca76a4cc8354dc770f | [
"MIT"
] | null | null | null | # Generated by Django 3.1.7 on 2021-04-29 15:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('rbac', '0002_auto_20210426_2345'),
('patient', '0006_auto_20210429_1431'),
]
operations = [
migrations.RenameModel(
old_name='PatientURLPermissions',
new_name='PatientURLPermission',
),
]
| 22.210526 | 48 | 0.597156 | 331 | 0.78436 | 0 | 0 | 0 | 0 | 0 | 0 | 158 | 0.374408 |
1bcacdc24776751d11e226a6fb8a723ec45c8c51 | 2,780 | py | Python | server_py/flatgov/flatgov/celery.py | aih/BillMap | d130b1396cb25b415cd7d9ea7389ad558a34eec1 | [
"CC0-1.0"
] | 2 | 2022-01-18T14:55:52.000Z | 2022-01-31T03:38:39.000Z | server_py/flatgov/flatgov/celery.py | aih/FlatGov | 8201ef1813bbc062841421017f492e877f75a5f8 | [
"CC0-1.0"
] | 321 | 2020-09-01T16:20:35.000Z | 2021-07-03T06:42:34.000Z | server_py/flatgov/flatgov/celery.py | aih/FlatGov | 8201ef1813bbc062841421017f492e877f75a5f8 | [
"CC0-1.0"
] | 1 | 2022-03-31T15:02:49.000Z | 2022-03-31T15:02:49.000Z | import os
from celery import Celery
from celery.schedules import crontab
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'flatgov.dev')
app = Celery('flatgov')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks()
app.conf.redbeat_redis_url = os.getenv('REDIS_URL', 'redis://localhost:6379/0')
app.conf.broker_pool_limit = 1
app.conf.broker_heartbeat = None
app.conf.broker_connection_timeout = 30
app.conf.worker_prefetch_multiplier = 1
app.conf.beat_schedule = {
'download_sources': {
'task': 'events.tasks.download_sources',
'schedule': crontab(minute=0, hour=19)
},
'process_sources': {
'task': 'events.tasks.process_sources',
'schedule': crontab(minute=5, hour=19)
},
'update_bills_daily': {
# Triggers bill download
# When this completes and SUCCESS= True,
# The rest of the bill similarity tasks are triggered in uscongress/models.py
'task': 'uscongress.tasks.update_bill_task',
'schedule': crontab(minute=1, hour=1),
# 'options': {'queue': 'bill'}
},
'sap_biden_scraper_daily': {
# this task is independent of other tasks
# It takes less than 1 minute
'task': 'bills.tasks.sap_biden_task',
'schedule': crontab(minute=0, hour=3),
# 'options': {'queue': 'bill'}
},
'committee_report_scraper_daily': {
# this task depends on updates from the update_bills task
# It takes less than 5 minutes
'task': 'bills.tasks.committee_report_scrapy_task',
'schedule': crontab(minute=10, hour=3),
# 'options': {'queue': 'bill'}
},
'update_cbo_scores_daily': {
# this task depends on updates from the update_bills task
# it runs on only the directory of the current congress
# and should take less than 20 minutes
'task': 'bills.tasks.cbo_task',
'schedule': crontab(minute=30, hour=3),
# 'options': {'queue': 'bill'}
},
'update_cosponsor_daily': {
# the update_cosponsor task deletes the cosponsor table and recreates it
# it takes about 1 hour to run
# this is independent of other tasks, since it gets data directly
# from the YAML file in the unitedstates Github repo
'task': 'bills.tasks.update_cosponsor_comm_task',
'schedule': crontab(minute=20, hour=4),
# 'options': {'queue': 'bill'}
},
'crs_scraper_daily': {
# this task depends on updates from the update_bills task
# to link reports to bills
'task': 'bills.tasks.crs_task',
'schedule': crontab(minute=0, hour=5),
'schedule': crontab(minute=0, hour=5),
# 'options': {'queue': 'bill'}
},
}
app.conf.timezone = 'UTC' | 37.567568 | 85 | 0.641727 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,619 | 0.582374 |
1bce65ef851c4299c8fd10eb4180346ac856e593 | 1,978 | py | Python | Pygame/InvisibleMaze/invisiblemaze.py | kasztp/python-lessons | 2a159ad5e1186c749b96c5d0ede45b7142c6bbb5 | [
"MIT"
] | 35 | 2015-05-18T08:08:41.000Z | 2022-03-07T09:38:02.000Z | Pygame/InvisibleMaze/invisiblemaze.py | kasztp/python-lessons | 2a159ad5e1186c749b96c5d0ede45b7142c6bbb5 | [
"MIT"
] | 1 | 2021-09-29T02:08:26.000Z | 2021-09-29T02:08:26.000Z | Pygame/InvisibleMaze/invisiblemaze.py | kasztp/python-lessons | 2a159ad5e1186c749b96c5d0ede45b7142c6bbb5 | [
"MIT"
] | 40 | 2015-04-28T00:38:54.000Z | 2022-02-13T14:18:34.000Z | import math
import pygame
from pygame.locals import *
from imageutil import loadImage
def main():
pygame.init()
title = 'Invisible Maze'
screen = pygame.display.set_mode((500, 500), 0)
screenRect = screen.get_rect()
pygame.display.flip()
player = loadImage('player.png')
playerRect = player.get_rect()
playerRect.y = screenRect.height - playerRect.height
obstacles = (ob(100, 400), ob(200, 220), ob(350, 230), ob(450, 100),
ob(460, 350))
exitRect = Rect(490, 200, 10, 100)
pygame.display.set_caption(title)
pygame.mouse.set_visible(0)
clock = pygame.time.Clock()
# game loop
loop = True
hit = False
playerVisible = True
while loop:
# get input
for event in pygame.event.get():
if event.type == QUIT \
or (event.type == KEYDOWN and event.key == K_ESCAPE):
loop = False
keystate = pygame.key.get_pressed()
xdir = keystate[K_RIGHT] - keystate[K_LEFT]
ydir = keystate[K_DOWN] - keystate[K_UP]
if xdir or ydir:
playerVisible = False
if not hit:
playerRect = playerRect.move((xdir * 2, ydir * 2)).clamp(screenRect)
screen.fill((0,0,0))
screen.fill((0,255,0), exitRect)
for obstacle in obstacles:
screen.fill(colorFromDistance(playerRect.center, obstacle.center),
obstacle)
if playerRect.colliderect(obstacle):
hit = True
playerVisible = True
if playerVisible:
screen.blit(player, playerRect)
pygame.display.flip()
# maintain frame rate
clock.tick(40)
pygame.quit()
def ob(x, y):
return Rect(x, y, 30, 30)
def colorFromDistance(loc1, loc2):
dx = loc2[0] - loc1[0]
dy = loc2[1] - loc1[1]
dist = min(127, math.sqrt(dx * dx + dy * dy))
return (255-dist*2, 0, 0)
if __name__ == '__main__':
main()
| 27.472222 | 80 | 0.581901 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 81 | 0.04095 |
1bcec325b98d968512d016ef80af5e36f6c9e424 | 3,422 | py | Python | backend/db/entities/mixin/pengaturan.py | R-N/sistem_gaji_vue_thrift | 9ba800b4d8e7849e2c6c4016cb32633caab087be | [
"MIT"
] | null | null | null | backend/db/entities/mixin/pengaturan.py | R-N/sistem_gaji_vue_thrift | 9ba800b4d8e7849e2c6c4016cb32633caab087be | [
"MIT"
] | null | null | null | backend/db/entities/mixin/pengaturan.py | R-N/sistem_gaji_vue_thrift | 9ba800b4d8e7849e2c6c4016cb32633caab087be | [
"MIT"
] | null | null | null | from sqlalchemy import Column, Integer, Numeric
from sqlalchemy.ext.declarative import declared_attr
from .pengaturan_base import MxPengaturanBase
class MxPengaturan(MxPengaturanBase):
# TODO: Set precision & scale for Numerics
@declared_attr
def bpjs_ketenagakerjaan_perusahaan(cls):
return Column(Numeric, nullable=False)
@declared_attr
def bpjs_ketenagakerjaan_karyawan(cls):
return Column(Numeric, nullable=False)
@declared_attr
def bpjs_kesehatan_perusahaan(cls):
return Column(Numeric, nullable=False)
@declared_attr
def bpjs_kesehatan_karyawan(cls):
return Column(Numeric, nullable=False)
@declared_attr
def upah_minimum(cls):
return Column(Integer, nullable=False)
@declared_attr
def iuran_rumah(cls):
return Column(Integer, nullable=False)
@declared_attr
def iuran_koperasi(cls):
return Column(Integer, nullable=False)
@declared_attr
def pendaftaran_koperasi(cls):
return Column(Integer, nullable=False)
@declared_attr
def uang_makan(cls):
return Column(Integer, nullable=False)
@declared_attr
def uang_transport(cls):
return Column(Integer, nullable=False)
@declared_attr
def koef_absen(cls):
return Column(Numeric, nullable=False)
'''
def mx_init(
self,
*args,
bpjs_ketenagakerjaan_perusahaan,
bpjs_ketenagakerjaan_karyawan,
bpjs_kesehatan_perusahaan,
bpjs_kesehatan_karyawan,
upah_minimum,
iuran_rumah,
iuran_koperasi,
pendaftaran_koperasi,
uang_makan,
uang_transport,
koef_absen,
**kwargs
):
MxPengaturanBase.mx_init(*args, **kwargs)
self.bpjs_ketenagakerjaan_perusahaan = bpjs_ketenagakerjaan_perusahaan
self.bpjs_ketenagakerjaan_karyawan = bpjs_ketenagakerjaan_karyawan
self.bpjs_kesehatan_perusahaan = bpjs_kesehatan_perusahaan
self.bpjs_ketenagakerjaan_karyawan = bpjs_ketenagakerjaan_karyawan
self.upah_minimum = upah_minimum
self.iuran_rumah = iuran_rumah
self.iuran_koperasi = iuran_koperasi
self.pendaftaran_koperasi = pendaftaran_koperasi
self.uang_makan = uang_makan
self.uang_transport = uang_transport
self.koef_absen = koef_absen
'''
def mx_reconstruct(self):
MxPengaturanBase.mx_reconstruct(self)
def mx_repr(self):
return '%s' % (MxPengaturanBase.mx_repr(self),)
'''
def mx_repr(self):
return "TODO" % (
self.id, self.nama,
)
'''
def mx_init_repr(self):
ret = MxPengaturanBase.mx_init_repr(self)
ret.update({
'bpjs_ketenagakerjaan_perusahaan': self.bpjs_ketenagakerjaan_perusahaan,
'bpjs_ketenagakerjaan_karyawan': self.bpjs_ketenagakerjaan_karyawan,
'bpjs_kesehatan_perusahaan': self.bpjs_kesehatan_perusahaan,
'bpjs_kesehatan_karyawan': self.bpjs_kesehatan_karyawan,
'upah_minimum': self.upah_minimum,
'iuran_rumah': self.iuran_rumah,
'iuran_koperasi': self.iuran_koperasi,
'pendaftaran_koperasi': self.pendaftaran_koperasi,
'uang_makan': self.uang_makan,
'uang_transport': self.uang_transport,
'koef_absen': self.koef_absen
})
return ret
| 30.553571 | 84 | 0.677382 | 3,272 | 0.956166 | 0 | 0 | 1,035 | 0.302455 | 0 | 0 | 1,406 | 0.410871 |
1bcf3d5c805b7e301f99bf048c1c6a934739d6e9 | 11,234 | py | Python | omnibus/replserver.py | wrmsr/omnibus | 3c4ef5eb17b0fff8593fa6a2284337bf193c18d3 | [
"BSD-3-Clause"
] | 2 | 2020-06-17T19:54:09.000Z | 2020-06-18T20:10:26.000Z | omnibus/replserver.py | wrmsr/omnibus | 3c4ef5eb17b0fff8593fa6a2284337bf193c18d3 | [
"BSD-3-Clause"
] | null | null | null | omnibus/replserver.py | wrmsr/omnibus | 3c4ef5eb17b0fff8593fa6a2284337bf193c18d3 | [
"BSD-3-Clause"
] | null | null | null | """
socat - UNIX-CONNECT:repl.sock
import sys, threading, pdb, functools
def _attach(repl):
frame = sys._current_frames()[threading.enumerate()[0].ident]
debugger = pdb.Pdb(
stdin=repl.conn.makefile('r'),
stdout=repl.conn.makefile('w'),
)
debugger.reset()
while frame:
frame.f_trace = debugger.trace_dispatch
debugger.botframe = frame
frame = frame.f_back
debugger.set_step()
frame.f_trace = debugger.trace_dispatch
"""
import ast
import codeop
import contextlib
import errno
import functools
import logging
import os
import socket as socket_
import sys
import threading
import traceback
import types
import typing as ta
import weakref
from . import check
log = logging.getLogger(__name__)
class DisconnectException(Exception):
pass
class InteractiveSocketConsole:
"""code.InteractiveConsole but just different enough to not be worth subclassing."""
ENCODING = 'utf-8'
def __init__(
self,
conn: socket_.socket,
locals: ta.MutableMapping = None,
filename: str = '<console>'
) -> None:
super().__init__()
if locals is None:
locals = {
'__name__': '__console__',
'__doc__': None,
'__console__': self,
}
self._conn = conn
self._locals = locals
self._filename = filename
self._compiler = codeop.CommandCompiler()
self._buffer: ta.List[str] = []
self._count = 0
self._write_count = -1
def reset_buffer(self) -> None:
self._buffer = []
@property
def conn(self) -> socket_.socket:
return self._conn
CPRT = 'Type "help", "copyright", "credits" or "license" for more information.'
def interact(self, banner: str = None, exitmsg: str = None) -> None:
log.info(f'Console {id(self)} on thread {threading.current_thread().ident} interacting')
try:
ps1 = getattr(sys, 'ps1', '>>> ')
ps2 = getattr(sys, 'ps2', '... ')
if banner is None:
self.write(
'Python %s on %s\n%s\n(%s)\n' %
(sys.version, sys.platform, self.CPRT, self.__class__.__name__))
elif banner:
self.write('%s\n' % (str(banner),))
more = False
while True:
try:
try:
line = self.raw_input(ps2 if more else ps1)
except EOFError:
self.write('\n')
break
else:
more = self.push_line(line)
except KeyboardInterrupt:
self.write('\nKeyboardInterrupt\n')
self.reset_buffer()
more = False
if exitmsg is None:
self.write('now exiting %s...\n' % self.__class__.__name__)
elif exitmsg != '':
self.write('%s\n' % exitmsg)
except DisconnectException:
pass
except OSError as oe:
if oe.errno == errno.EBADF:
pass
finally:
log.info(f'Console {id(self)} on thread {threading.current_thread().ident} finished')
def push_line(self, line: str) -> bool:
self._buffer.append(line)
source = '\n'.join(self._buffer)
more = self.run_source(source, self._filename)
if not more:
self.reset_buffer()
return more
def raw_input(self, prompt: str = '') -> str:
self.write(prompt)
buf = b''
while True:
b = self._conn.recv(1)
if not b:
raise DisconnectException
if b == b'\n':
break
buf += b
return buf.decode(self.ENCODING)
def write(self, data: str) -> None:
self._conn.send(data.encode(self.ENCODING))
def compile(
self,
source: ta.Union[str, ast.AST],
filename: str = '<input>',
symbol: str = 'single'
) -> ta.Optional[types.CodeType]:
if isinstance(source, ast.AST):
return self._compiler.compiler(source, filename, symbol)
else:
return self._compiler(source, filename, symbol)
def run_source(
self,
source: ta.Union[str, ast.AST],
filename: str = '<input>',
symbol: str = 'single',
) -> bool:
try:
code = self.compile(source, filename, symbol)
except (OverflowError, SyntaxError, ValueError):
# Case 1 (incorrect)
self.show_syntax_error(filename)
return False
if code is None:
# Case 2 (incomplete)
return True
# Case 3 (complete)
try:
node = ast.parse(source)
except (OverflowError, SyntaxError, ValueError):
return True
if isinstance(node, ast.Module) and node.body and isinstance(node.body[-1], ast.Expr):
expr = node.body[-1]
source = ast.Interactive(
[
*node.body[:-1],
ast.Assign(
[ast.Name(
f'_{self._count}',
ast.Store(),
lineno=expr.lineno,
col_offset=expr.col_offset,
)],
expr.value,
lineno=expr.lineno,
col_offset=expr.col_offset,
)
],
)
ast.fix_missing_locations(source)
self._write_count = self._count
code = self.compile(source, filename, symbol)
self.run_code(code)
return False
def run_code(self, code: types.CodeType) -> None:
try:
exec(code, self._locals)
except SystemExit:
raise
except Exception:
self.show_traceback()
else:
if self._count == self._write_count:
self.write(repr(self._locals[f'_{self._count}']))
self.write('\n')
self._count += 1
def show_traceback(self) -> None:
sys.last_type, sys.last_value, last_tb = ei = sys.exc_info()
sys.last_traceback = last_tb
try:
lines = traceback.format_exception(ei[0], ei[1], last_tb.tb_next)
self.write(''.join(lines))
finally:
last_tb = ei = None
def show_syntax_error(self, filename: str = None) -> None:
type, value, tb = sys.exc_info()
sys.last_type = type
sys.last_value = value
sys.last_traceback = tb
if filename and type is SyntaxError:
# Work hard to stuff the correct filename in the exception
try:
msg, (dummy_filename, lineno, offset, line) = value.args
except ValueError:
# Not the format we expect; leave it alone
pass
else:
# Stuff in the right filename
value = SyntaxError(msg, (filename, lineno, offset, line))
sys.last_value = value
lines = traceback.format_exception_only(type, value)
self.write(''.join(lines))
class ReplServer:
CONNECTION_THREAD_NAME = 'ReplServerConnection'
def __init__(
self,
path: str,
*,
file_mode: int = None,
poll_interval: float = 0.5,
exit_timeout: float = 10.0,
) -> None:
super().__init__()
self._path = path
self._file_mode = file_mode
self._poll_interval = poll_interval
self._exit_timeout = exit_timeout
self._socket: socket_.socket = None
self._is_running = False
self._consoles_by_threads: ta.MutableMapping[threading.Thread, InteractiveSocketConsole] = weakref.WeakKeyDictionary() # noqa
self._is_shut_down = threading.Event()
self._should_shutdown = False
def __enter__(self):
check.state(not self._is_running)
check.state(not self._is_shut_down.is_set())
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if not self._is_shut_down.is_set():
self.shutdown(True, self._exit_timeout)
def run(self) -> None:
check.state(not self._is_running)
check.state(not self._is_shut_down.is_set())
if os.path.exists(self._path):
os.unlink(self._path)
self._socket = socket_.socket(socket_.AF_UNIX, socket_.SOCK_STREAM)
self._socket.settimeout(self._poll_interval)
self._socket.bind(self._path)
with contextlib.closing(self._socket):
self._socket.listen(1)
log.info(f'Repl server listening on file {self._path}')
self._is_running = True
try:
while not self._should_shutdown:
try:
conn, _ = self._socket.accept()
except socket_.timeout:
continue
log.info(f'Got repl server connection on file {self._path}')
def run(conn):
with contextlib.closing(conn):
variables = globals().copy()
console = InteractiveSocketConsole(conn, variables)
variables['__console__'] = console
log.info(
f'Starting console {id(console)} repl server connection '
f'on file {self._path} '
f'on thread {threading.current_thread().ident}'
)
self._consoles_by_threads[threading.current_thread()] = console
console.interact()
thread = threading.Thread(
target=functools.partial(run, conn),
daemon=True,
name=self.CONNECTION_THREAD_NAME)
thread.start()
for thread, console in self._consoles_by_threads.items():
try:
console.conn.close()
except Exception:
log.exception('Error shutting down')
for thread in self._consoles_by_threads.keys():
try:
thread.join(self._exit_timeout)
except Exception:
log.exception('Error shutting down')
os.unlink(self._path)
finally:
self._is_shut_down.set()
self._is_running = False
def shutdown(self, block: bool = False, timeout: float = None) -> None:
self._should_shutdown = True
if block:
self._is_shut_down.wait(timeout=timeout)
def _main():
with ReplServer('repl.sock') as repl_server:
repl_server.run()
if __name__ == '__main__':
_main()
| 30.778082 | 134 | 0.521186 | 10,331 | 0.919619 | 0 | 0 | 73 | 0.006498 | 0 | 0 | 1,580 | 0.140644 |
1bcf777e137c1ad0fceae7a7461a0f96f18dbe05 | 127 | py | Python | test1/views/models/axfundaddress.py | biz2013/xwjy | 8f4b5e3e3fc964796134052ff34d58d31ed41904 | [
"Apache-2.0"
] | 1 | 2019-12-15T16:56:44.000Z | 2019-12-15T16:56:44.000Z | coinExchange/trading/views/models/axfundaddress.py | biz2013/xwjy | 8f4b5e3e3fc964796134052ff34d58d31ed41904 | [
"Apache-2.0"
] | 87 | 2018-01-06T10:18:31.000Z | 2022-03-11T23:32:30.000Z | test1/views/models/axfundaddress.py | biz2013/xwjy | 8f4b5e3e3fc964796134052ff34d58d31ed41904 | [
"Apache-2.0"
] | null | null | null | class AXFundAddress(object):
def __init__(self, address, alias):
self.address = address
self.alias = alias
| 25.4 | 39 | 0.653543 | 126 | 0.992126 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
1bcf7bc1a1dc49cb3796cf30ba83ab69627ad026 | 6,158 | py | Python | datasets/shapes3d.py | zhuxinqimac/CommutativeLieGroupVAE-Pytorch | 06020834b1ea4abff305d8fb300c3d8fba5b0f27 | [
"Apache-2.0"
] | 13 | 2021-06-08T03:06:36.000Z | 2021-11-01T04:53:44.000Z | datasets/shapes3d.py | zhuxinqimac/CommutativeLieGroupVAE-Pytorch | 06020834b1ea4abff305d8fb300c3d8fba5b0f27 | [
"Apache-2.0"
] | null | null | null | datasets/shapes3d.py | zhuxinqimac/CommutativeLieGroupVAE-Pytorch | 06020834b1ea4abff305d8fb300c3d8fba5b0f27 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#-*- coding: utf-8 -*-
# >.>.>.>.>.>.>.>.>.>.>.>.>.>.>.>.
# Licensed under the Apache License, Version 2.0 (the "License")
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# --- File Name: shapes3d.py
# --- Creation Date: 16-01-2021
# --- Last Modified: Tue 13 Apr 2021 16:55:42 AEST
# --- Author: Xinqi Zhu
# .<.<.<.<.<.<.<.<.<.<.<.<.<.<.<.<
"""
Dataset for 3D Shapes
"""
import numpy as np
from torch.utils.data import Dataset
import os
import shutil
import h5py
import zipfile
from PIL import Image
import torch
import random
from datasets.transforms import PairTransform
class shapes3d(Dataset):
"""
Args:
root (str): Root directory of dataset containing 3dshapes.h5
transform (``Transform``, optional): A function/transform that takes in an PIL image and returns a transformed version. E.g, ``transforms.RandomCrop``
"""
def __init__(self, root, transform=None, fixed_shape=None):
super(shapes3d, self).__init__()
self.file = root
self.transform = transform
self.fixed_shape = fixed_shape
self.dataset_zip = self.load_data()
self.data = self.dataset_zip['images'][:] # array shape [480000,64,64,3], uint8 in range(256)
# self.latents_sizes = np.array([3, 6, 40, 32, 32])
self.latents_sizes = np.array([10, 10, 10, 8, 4, 15])
self.latents_bases = np.concatenate((self.latents_sizes[::-1].cumprod()[::-1][1:], np.array([1, ])))
# self.latents_classes = np.load(os.path.join(self.file, "latents_classes.npy"))
self.latents_classes = self.dataset_zip['labels'][:] # array shape [480000,6], float64
# if fixed_shape is not None:
# self._reduce_data(fixed_shape)
def generative_factors(self, index):
return self.latents_classes[index]
def latent_to_index(self, latents):
return np.dot(latents, self.latents_bases).astype(int)
def index_to_latent(self, index):
return self.latents_classes[index]
def get_img_by_latent(self, latent_code):
"""
Returns the image defined by the latent code
Args:
latent_code (:obj:`list` of :obj:`int`): Latent code of length 6 defining each generative factor
Returns:
Image defined by given code
"""
idx = self.latent_to_index(latent_code)
return self.__getitem__(idx)
def sample_latent(self):
f = []
for factor in self.latents_sizes:
f.append(np.random.randint(0, factor))
return np.array(f)
def load_data(self):
root = os.path.join(self.file, "3dshapes.h5")
dataset_zip = h5py.File(root, 'r')
# data = np.load(root)
return dataset_zip
def __getitem__(self, index):
data = self.data[index]
data = Image.fromarray(data)
labels = self.latents_classes[index]
if self.transform is not None:
data = self.transform(data)
return data, labels[1:]
def __len__(self):
return self.data.shape[0]
class PairShapes3D(shapes3d):
def __init__(self, root, download=False, transform=None, offset=2, max_varied=1, wrapping=False, noise_name=None, output_targets=True, fixed_shape=None):
""" dSprites dataset with symmetry sampling included if output_targets is True.
Args:
root (str): Root directory of dataset containing '3dshapes.h5' or to download it to
transform (``Transform``, optional): A function/transform that takes in an PIL image and returns a transformed version. E.g, ``transforms.RandomCrop``
offset (int, list[int]): Offset of generative factor indices when sampling symmetries
max_varied (int): Max number of symmetries acting per observation
wrapping (bool): Wrap at boundaries or invert action
noise_name (str): Name of noise to add, default None
output_targets (bool): If True output image pair corresponding to symmetry action. If False, standard dSprites.
"""
super().__init__(root, transform)
self.factor = [0, 1, 2, 3, 5]
self.offset = offset
self.max_varied = max_varied
self.wrapping = wrapping
self.noise_transform = PairTransform(noise_name) if noise_name is not None else None
self.output_targets = output_targets
def get_next_img_by_offset(self, label1, img1, factor):
max_offsets = [10, 10, 10, 8, 1, 15]
new_latents = np.array(list(label1))
offset = torch.zeros(label1.shape).to(img1.device)
for f in factor:
cur_offset = self.offset if self.offset < max_offsets[f] else max_offsets[f]
if torch.rand(1) < 0.5:
cur_offset = cur_offset * -1
if self.wrapping:
new_latents[f] = (label1[f] + cur_offset) % (self.latents_sizes[f])
else:
new_latents[f] = (label1[f] + cur_offset).clip(min=0, max=self.latents_sizes[f]-1)
offset[f] = cur_offset
idx = self.latent_to_index(new_latents)
return idx, offset
def get_next_img_by_rand(self, latent1):
idx = torch.randint(len(self), (1,)).int()
offset = self.index_to_latent(idx)[1:] - latent1
return idx, offset
def __getitem__(self, index):
factor = self.factor
img1, label1 = super().__getitem__(index)
if not self.output_targets:
return img1, label1
if not isinstance(factor, list):
factor = [factor]
else:
factor = random.choices(factor, k=self.max_varied)
# TODO: Always set offset to 1 for val set? So we can eval metrics. Images wouldn't show multi steps though...
if self.offset != -1:
idx, offset = self.get_next_img_by_offset(label1, img1, factor)
else:
idx, offset = self.get_next_img_by_rand(label1)
img2, label2 = super().__getitem__(idx)
if self.noise_transform is not None:
img1, img2 = self.noise_transform(img1, img2)
return (img1, offset), img2
| 36.223529 | 162 | 0.628126 | 5,518 | 0.89607 | 0 | 0 | 0 | 0 | 0 | 0 | 2,150 | 0.349139 |
1bd06e44fc58003b96dd0ef2c14102eab2685ebb | 1,920 | py | Python | public_data/serializers.py | danamlewis/open-humans | 9b08310cf151f49032b66ddd005bbd47d466cc4e | [
"MIT"
] | 57 | 2016-09-01T21:55:52.000Z | 2022-03-27T22:15:32.000Z | public_data/serializers.py | danamlewis/open-humans | 9b08310cf151f49032b66ddd005bbd47d466cc4e | [
"MIT"
] | 464 | 2015-03-23T18:08:28.000Z | 2016-08-25T04:57:36.000Z | public_data/serializers.py | danamlewis/open-humans | 9b08310cf151f49032b66ddd005bbd47d466cc4e | [
"MIT"
] | 25 | 2017-01-24T16:23:27.000Z | 2021-11-07T01:51:42.000Z | from collections import OrderedDict
from rest_framework import serializers
from data_import.models import DataFile
from open_humans.models import User
from private_sharing.models import project_membership_visible
class PublicDataFileSerializer(serializers.ModelSerializer):
"""
Serialize a public data file.
"""
metadata = serializers.JSONField()
def to_representation(self, data):
ret = OrderedDict()
fields = self.get_fields()
query_params = dict(self.context.get("request").query_params)
source = getattr(data, "source")
user_t = getattr(data, "user")
usernames = []
if "username" in query_params:
usernames = query_params["username"]
visible = project_membership_visible(user_t.member, source)
if (user_t.username in usernames) and not visible:
return ret
request = self.context.get("request", None)
for field in fields:
item = getattr(data, str(field))
if isinstance(item, User):
if visible:
member = getattr(user_t, "member")
user = {
"id": getattr(member, "member_id"),
"name": getattr(member, "name"),
"username": getattr(item, "username"),
}
else:
user = {"id": None, "name": None, "username": None}
ret["user"] = user
elif field == "download_url":
ret["download_url"] = item(request)
else:
ret[str(field)] = getattr(data, field)
return ret
class Meta: # noqa: D101
model = DataFile
fields = (
"id",
"basename",
"created",
"download_url",
"metadata",
"source",
"user",
)
| 32 | 71 | 0.536458 | 1,702 | 0.886458 | 0 | 0 | 0 | 0 | 0 | 0 | 279 | 0.145313 |
1bd08961b282ce3b26745bfd817a53d3ce607b1f | 164 | py | Python | swingtrader/stockmarketapi/__init__.py | kabylkas/swingtrader | 8682e33464883f54b80f9764cfaf3cc9248774a0 | [
"Apache-2.0"
] | null | null | null | swingtrader/stockmarketapi/__init__.py | kabylkas/swingtrader | 8682e33464883f54b80f9764cfaf3cc9248774a0 | [
"Apache-2.0"
] | null | null | null | swingtrader/stockmarketapi/__init__.py | kabylkas/swingtrader | 8682e33464883f54b80f9764cfaf3cc9248774a0 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021-2022 Kabylkas Labs.
# Licensed under the Apache License, Version 2.0.
from .stockmarketapi import Stock
from .stockmarketapi import StockBucket | 41 | 49 | 0.804878 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 89 | 0.542683 |
1bd09b12f21d9acb607142944ec3b8ac94113d29 | 3,236 | py | Python | xero_python/payrolluk/models/timesheet_line_object.py | sromero84/xero-python | 89558c0baa8080c3f522701eb1b94f909248dbd7 | [
"MIT"
] | null | null | null | xero_python/payrolluk/models/timesheet_line_object.py | sromero84/xero-python | 89558c0baa8080c3f522701eb1b94f909248dbd7 | [
"MIT"
] | null | null | null | xero_python/payrolluk/models/timesheet_line_object.py | sromero84/xero-python | 89558c0baa8080c3f522701eb1b94f909248dbd7 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Xero Payroll UK
This is the Xero Payroll API for orgs in the UK region. # noqa: E501
OpenAPI spec version: 2.3.4
Contact: api@xero.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
from xero_python.models import BaseModel
class TimesheetLineObject(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"pagination": "Pagination",
"problem": "Problem",
"timesheet_line": "TimesheetLine",
}
attribute_map = {
"pagination": "pagination",
"problem": "problem",
"timesheet_line": "timesheetLine",
}
def __init__(
self, pagination=None, problem=None, timesheet_line=None
): # noqa: E501
"""TimesheetLineObject - a model defined in OpenAPI""" # noqa: E501
self._pagination = None
self._problem = None
self._timesheet_line = None
self.discriminator = None
if pagination is not None:
self.pagination = pagination
if problem is not None:
self.problem = problem
if timesheet_line is not None:
self.timesheet_line = timesheet_line
@property
def pagination(self):
"""Gets the pagination of this TimesheetLineObject. # noqa: E501
:return: The pagination of this TimesheetLineObject. # noqa: E501
:rtype: Pagination
"""
return self._pagination
@pagination.setter
def pagination(self, pagination):
"""Sets the pagination of this TimesheetLineObject.
:param pagination: The pagination of this TimesheetLineObject. # noqa: E501
:type: Pagination
"""
self._pagination = pagination
@property
def problem(self):
"""Gets the problem of this TimesheetLineObject. # noqa: E501
:return: The problem of this TimesheetLineObject. # noqa: E501
:rtype: Problem
"""
return self._problem
@problem.setter
def problem(self, problem):
"""Sets the problem of this TimesheetLineObject.
:param problem: The problem of this TimesheetLineObject. # noqa: E501
:type: Problem
"""
self._problem = problem
@property
def timesheet_line(self):
"""Gets the timesheet_line of this TimesheetLineObject. # noqa: E501
:return: The timesheet_line of this TimesheetLineObject. # noqa: E501
:rtype: TimesheetLine
"""
return self._timesheet_line
@timesheet_line.setter
def timesheet_line(self, timesheet_line):
"""Sets the timesheet_line of this TimesheetLineObject.
:param timesheet_line: The timesheet_line of this TimesheetLineObject. # noqa: E501
:type: TimesheetLine
"""
self._timesheet_line = timesheet_line
| 26.096774 | 92 | 0.621755 | 2,937 | 0.907602 | 0 | 0 | 1,623 | 0.501545 | 0 | 0 | 1,946 | 0.60136 |
1bd0cba8c6f81882ac13039efa512c0dfba5b8e9 | 4,492 | py | Python | monocle/resources.py | bigjust/django-monocle | f546a9061834b41a1a701ef89f8371bfaf4f1691 | [
"MIT"
] | 1 | 2019-04-21T17:25:16.000Z | 2019-04-21T17:25:16.000Z | monocle/resources.py | bigjust/django-monocle | f546a9061834b41a1a701ef89f8371bfaf4f1691 | [
"MIT"
] | null | null | null | monocle/resources.py | bigjust/django-monocle | f546a9061834b41a1a701ef89f8371bfaf4f1691 | [
"MIT"
] | null | null | null | import json
import os
import time
from django.template import Context
from django.template.loader import get_template
from django.utils.safestring import mark_safe
from monocle.settings import settings
class Resource(object):
"""
A JSON compatible response from an OEmbed provider
"""
def __init__(self, url, data=None):
self.url = url
self.created = time.time()
self._data = data or {}
def __getitem__(self, key):
if key == 'cache_age':
return self.ttl
return self._data.get(key, '')
def __setitem__(self, key, value):
if key == 'cache_age':
self.ttl = value
else:
self._data[key] = value
def __contains__(self, key):
return key in self._data
def render(self):
"""
Renders this resource to the template corresponding to this resource type.
The template is rendered with variables ``url`` and ``resource`` that represent
the original requested URL and this resource respectively.
If the resource is considered invalid from :func:`is_valid`, the original
requested URL is returned unless ``RESOURCE_URLIZE_INVALID`` is configured
in :mod:`monocle.settings`. If so, then the original URL is returned hyperlinked
:returns: Rendered oembed content
"""
if not self.is_valid:
if settings.RESOURCE_URLIZE_INVALID:
template_name = 'monocle/link.html'
else:
return self.url
else:
template_name = os.path.join('monocle', '%s.html' % self._data['type'])
template = get_template(template_name)
return mark_safe(template.render(Context({'url': self.url, 'resource': self})))
@property
def is_valid(self):
"""
Perform validation against this resource object. The resource is considered
valid if it meets the following criteria:
* It has oembed response data
* It is a valid oembed resource type
* It has the required attributes based on its type
"""
# We can create resources without valid data
if not self._data:
return False
# Must be a valid type
if self._data.get('type') not in settings.RESOURCE_TYPES:
return False
# Must have required fields
has_required = True
for field in settings.RESOURCE_REQUIRED_ATTRS[self._data['type']]:
has_required = has_required and (field in self._data)
if not has_required:
return False
return True
@property
def is_stale(self):
"""
True of the current timestamp is greater than the sum of the resource's
creation timestamp plus its TTL, False otherwise.
"""
return (time.time() - self.created) > self.ttl
def refresh(self):
"""
Returns a version of this resource that is considered fresh by updating
its internal timestamp to now
"""
self.created = time.time()
return self
@property
def json(self):
"""
A JSON string without any empty or null keys
"""
return json.dumps(dict([(k, v) for k, v in self._data.items() if v]))
def get_ttl(self):
"""
Returns the TTL of this resource ensuring that it at minimum the value
of ``RESOURCE_MIN_TTL`` from :mod:`monocle.settings`.
This value could be specified by the provider via the property ``cache_age``.
If it is not, the value ``RESOURCE_DEFAULT_TTL`` from :mod:`monocle.settings`
is used.
:returns: TTL in seconds
"""
try:
return max(settings.RESOURCE_MIN_TTL,
int(self._data.get('cache_age', settings.RESOURCE_DEFAULT_TTL)))
except (ValueError, TypeError):
return settings.RESOURCE_DEFAULT_TTL
def set_ttl(self, value):
"""
Sets the TTL value of this resource ensuring that it is at minimum the value
of ``RESOURCE_MIN_TTL`` from :mod:`monocle.settings`. If it is not, the value
of ``RESOURCE_DEFAULT_TTL`` from :mod:`monocle.settings` is used.
"""
try:
value = max(settings.RESOURCE_MIN_TTL, int(value))
except (ValueError, TypeError):
value = settings.RESOURCE_DEFAULT_TTL
self._data['cache_age'] = value
ttl = property(get_ttl, set_ttl)
| 32.085714 | 88 | 0.616652 | 4,285 | 0.953918 | 0 | 0 | 1,278 | 0.284506 | 0 | 0 | 2,112 | 0.470169 |
1bd1d5cfc2e0f45a350b9178b490c86e26fca79d | 8,346 | py | Python | splunk-cluster/splunk_setup.py | outcoldman/docker-splunk-cluster | 5b3feb8131197b1a0a574dfea9ec4f20703c189b | [
"MIT"
] | 34 | 2016-07-22T16:37:49.000Z | 2021-11-19T22:32:30.000Z | splunk-cluster/splunk_setup.py | mhassan2/docker-splunk-cluster | 5b3feb8131197b1a0a574dfea9ec4f20703c189b | [
"MIT"
] | 5 | 2016-07-25T16:02:29.000Z | 2017-02-17T19:17:45.000Z | splunk-cluster/splunk_setup.py | outcoldman/docker-splunk-cluster | 5b3feb8131197b1a0a574dfea9ec4f20703c189b | [
"MIT"
] | 20 | 2016-07-26T01:02:47.000Z | 2019-09-20T03:01:52.000Z | import os
import sys
import json
import time
import socket
import re
import glob
import subprocess
import requests
import splunk.clilib.cli_common
import splunk.util
var_expandvars_re = re.compile(r'\AENV\((.*)\)$')
var_shell_re = re.compile(r'\ASHELL\((.*)\)$')
def main():
"""
Initialize node. Can run before splunk started and after splunk started
"""
if sys.argv[1] == "--configure":
configure()
elif sys.argv[1] == "--wait-splunk":
wait_splunk(sys.argv[2], sys.argv[3:])
elif sys.argv[1] == "--add-licenses":
add_licenses(sys.argv[2])
elif sys.argv[1] == "--shc-autobootstrap":
shc_autobootstrap(int(sys.argv[2]), sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[6], sys.argv[7], sys.argv[8])
else:
exit(1)
def configure():
"""
using CONF__ notation you can define any configuration, examples
CONF__[{location_under_splunk_home}__]{conf_file}__{stanza}__{key}=value
If location_under_splunk_home is not specified - system is used.
"""
# Allow to set any configurations with this
conf_updates = {}
for env, val in os.environ.iteritems():
if env.startswith("CONF__"):
parts = env.split("__")[1:]
conf_file_name = None
parent = None
conf_folder = "system"
if len(parts) == 4:
conf_folder = parts[0]
parts = parts[1:]
conf_folder_full = __get_conf_folder_full(conf_folder, parent)
file_name = parts[0]
if file_name == "meta":
file_name = "local.meta"
subfolder = "metadata"
else:
file_name = file_name + ".conf"
subfolder = "local"
conf_file = os.path.join(conf_folder_full, subfolder, file_name)
conf_updates.setdefault(conf_file, {}).setdefault(parts[1], {})[parts[2]] = __get_value(val)
for conf_file, conf_update in conf_updates.iteritems():
conf = splunk.clilib.cli_common.readConfFile(conf_file) if os.path.exists(conf_file) else {}
for stanza, values in conf_update.iteritems():
dest_stanza = conf.setdefault(stanza, {})
dest_stanza.update(values)
if "default" in conf and not conf["default"]:
del conf["default"]
folder = os.path.dirname(conf_file)
if not os.path.isdir(folder):
os.makedirs(folder)
splunk.clilib.cli_common.writeConfFile(conf_file, conf)
def __get_value(val):
var_expand_match = var_expandvars_re.match(val)
if var_expand_match:
return os.path.expandvars(var_expand_match.groups()[0])
var_shell_match = var_shell_re.match(val)
if var_shell_match:
return subprocess.check_output(var_expand_match.groups()[0], shell=True)
return val
def __get_conf_folder_full(conf_folder, parent):
if conf_folder == "system":
return os.path.join(os.environ["SPLUNK_HOME"], "etc", conf_folder)
else:
return os.path.join(os.environ["SPLUNK_HOME"], conf_folder)
def wait_splunk(uri, roles):
"""
Wait 5 minutes for dependency
"""
for x in xrange(1, 300):
try:
# This url does not require authentication, ignore certificate
response = requests.get(uri + "/services/server/info?output_mode=json", verify=False)
if response.status_code == 200:
server_roles = response.json()["entry"][0]["content"]["server_roles"]
if not roles or all(any(re.match(role, server_role) for server_role in server_roles) for role in roles):
return
else:
print "Waiting for " + ", ".join(roles) + " in " + uri + " got " + ", ".join(server_roles) + "."
else:
print "Waiting for "+ ", ".join(roles) + " in " + uri + "."
except requests.exceptions.RequestException as exception:
print "Waiting for " + ", ".join(roles) + " in " + uri + ". Exception: " + str(exception)
time.sleep(1)
print "Failed to connect to " + uri + " and check server roles " + ", ".join(roles)
exit(1)
def add_licenses(folder):
while True:
if os.path.isdir(folder):
licenses = glob.glob(os.path.join(folder, "*.lic"))
if licenses:
# Adding all licenses one by one and break
for license in licenses:
args = [
"add",
"licenses",
"-auth", "admin:changeme",
license
]
__splunk_execute(args)
break
print "Waiting for license files under " + folder
time.sleep(1)
def shc_autobootstrap(autobootstrap, mgmt_uri, local_user, local_password, service_discovery_uri, service_discovery_user, service_discovery_password):
"""
Write current uri to the service discovery URL, if current member has index equal
to INIT_SHCLUSTER_AUTOBOOTSTRAP - bootstrap SHC, if more - add itself to existing SHC
"""
__service_discovery_post(service_discovery_uri, service_discovery_user, service_discovery_password, data=json.dumps({"host": mgmt_uri}), headers={"Content-type": "application/json"})
all_members = __service_discovery_get(service_discovery_uri, service_discovery_user, service_discovery_password, params={"sort": "_key"}).json()
for index, member in enumerate(all_members):
if member["host"] == mgmt_uri:
if (index + 1) == autobootstrap:
__splunk_execute([
"bootstrap",
"shcluster-captain",
"-auth", "%s:%s" % (local_user, local_password),
"-servers_list", ",".join(m["host"] for m in all_members[:autobootstrap])
])
elif (index + 1) > autobootstrap:
# We do not check if current list of members already bootstrapped, assuming that autobootstrap is always equal to
# how many instances user creating at beginning
__splunk_execute([
"add",
"shcluster-member",
"-auth", "%s:%s" % (local_user, local_password),
"-current_member_uri", next(m["host"] for m in all_members[:autobootstrap])
])
def __service_discovery_get(service_discovery_uri, service_discovery_user, service_discovery_password, **kwargs):
for x in xrange(1, 300):
try:
response = requests.get(service_discovery_uri,
verify=False,
auth=(service_discovery_user, service_discovery_password),
**kwargs)
response.raise_for_status()
return response
except requests.exceptions.RequestException as ex:
print "Failed to make GET request to service discovery url. " + str(ex)
sys.stdout.flush()
sys.stderr.flush()
time.sleep(1)
print "FAILED. Could not make GET request to service discovery url."
exit(1)
def __service_discovery_post(service_discovery_uri, service_discovery_user, service_discovery_password, **kwargs):
for x in xrange(1, 300):
try:
response = requests.post(service_discovery_uri,
verify=False,
auth=(service_discovery_user, service_discovery_password),
**kwargs)
response.raise_for_status()
return response
except requests.exceptions.RequestException as ex:
print "Failed to make POST request to service discovery url. " + str(ex)
sys.stdout.flush()
sys.stderr.flush()
time.sleep(1)
print "FAILED. Could not make POST request to service discovery url."
exit(1)
def __splunk_execute(args):
"""
Execute splunk with arguments
"""
sys.stdout.flush()
sys.stderr.flush()
splunk_args = [os.path.join(os.environ['SPLUNK_HOME'], "bin", "splunk")]
splunk_args.extend(args)
subprocess.check_call(splunk_args)
sys.stdout.flush()
sys.stderr.flush()
if __name__ == "__main__":
main() | 38.818605 | 186 | 0.594896 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,892 | 0.226695 |
1bd2497956029c869243241e0d24f1edfcdc8997 | 520 | py | Python | darvag/tags/forms.py | Erfi/dorathewordexplorer | 85bcbfd0baf3ba7a2bf511ab9a4fd1087c219c8b | [
"MIT"
] | null | null | null | darvag/tags/forms.py | Erfi/dorathewordexplorer | 85bcbfd0baf3ba7a2bf511ab9a4fd1087c219c8b | [
"MIT"
] | 17 | 2019-10-03T12:23:41.000Z | 2019-11-05T21:28:46.000Z | darvag/tags/forms.py | Erfi/german-api | 85bcbfd0baf3ba7a2bf511ab9a4fd1087c219c8b | [
"MIT"
] | null | null | null | from django.forms import Form, MultipleChoiceField, SelectMultiple, ModelMultipleChoiceField
# --- customizations ---
class TagModelMultipleChoiceField(ModelMultipleChoiceField):
def label_from_instance(self, tag):
return tag.name
# ----------------------
class TagFilterForm(Form):
def __init__(self, *args, tags_queryset, **kwargs):
super().__init__(*args, **kwargs)
self.fields['tags'] = TagModelMultipleChoiceField(queryset=tags_queryset, widget=SelectMultiple, required=False)
| 30.588235 | 120 | 0.715385 | 370 | 0.711538 | 0 | 0 | 0 | 0 | 0 | 0 | 54 | 0.103846 |
1bd33e1813ca1443a89a2ae8c80e1f64e10a3043 | 316 | py | Python | tods/sk_interface/detection_algorithm/SOD_skinterface.py | ZhuangweiKang/tods | fe3f55f8ccb306dd292c668e0f1154f1afdfa556 | [
"Apache-2.0"
] | 544 | 2020-09-21T06:02:33.000Z | 2022-03-27T07:16:32.000Z | tods/sk_interface/detection_algorithm/SOD_skinterface.py | ZhuangweiKang/tods | fe3f55f8ccb306dd292c668e0f1154f1afdfa556 | [
"Apache-2.0"
] | 35 | 2020-09-21T06:33:13.000Z | 2022-03-11T14:20:21.000Z | tods/sk_interface/detection_algorithm/SOD_skinterface.py | ZhuangweiKang/tods | fe3f55f8ccb306dd292c668e0f1154f1afdfa556 | [
"Apache-2.0"
] | 86 | 2020-09-21T16:44:33.000Z | 2022-03-11T18:20:22.000Z | import numpy as np
from ..base import BaseSKI
from tods.detection_algorithm.PyodSOD import SODPrimitive
class SODSKI(BaseSKI):
def __init__(self, **hyperparams):
super().__init__(primitive=SODPrimitive, **hyperparams)
self.fit_available = True
self.predict_available = True
self.produce_available = False
| 28.727273 | 57 | 0.787975 | 209 | 0.661392 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
1bd36a5a050517e6c0b779b2d7a6a6b544da049f | 294 | py | Python | app/db_fill.py | justincredble/Circulation | 9c7537261a89acf737268943ab69fcb6f7d2af7f | [
"MIT"
] | null | null | null | app/db_fill.py | justincredble/Circulation | 9c7537261a89acf737268943ab69fcb6f7d2af7f | [
"MIT"
] | null | null | null | app/db_fill.py | justincredble/Circulation | 9c7537261a89acf737268943ab69fcb6f7d2af7f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from app import app, db
from app.models import User, Role
app_ctx = app.app_context()
app_ctx.push()
db.create_all()
Role.insert_roles()
admin = User(name=u'root', email='root@gmail.com', password='password')
db.session.add(admin)
db.session.commit()
app_ctx.pop()
| 17.294118 | 71 | 0.707483 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 56 | 0.190476 |
1bd49f7322af6da9c72aaa3079fafb0bea1db117 | 9,405 | py | Python | FuelOxTrades.py | AA-284-Team-AA/Prop_Analysis | 47382d628388c3d6a2bb8992e04d483ad5992aed | [
"MIT"
] | 1 | 2020-12-15T11:38:54.000Z | 2020-12-15T11:38:54.000Z | FuelOxTrades.py | AA-284-Team-AA/Prop_Analysis | 47382d628388c3d6a2bb8992e04d483ad5992aed | [
"MIT"
] | null | null | null | FuelOxTrades.py | AA-284-Team-AA/Prop_Analysis | 47382d628388c3d6a2bb8992e04d483ad5992aed | [
"MIT"
] | 2 | 2020-01-24T23:57:09.000Z | 2020-12-15T11:38:59.000Z | import numpy as np
import matplotlib.pyplot as plt
from rocketcea.cea_obj_w_units import CEA_Obj
from rocketcea.cea_obj import add_new_fuel
def maintrade(fuel, oxi, testlims, N, P, testvar):
## inputs
# range of chamber pressures to test (psi) (default: 400; can be list of numbers or range i.e. range(100,400,100))
# testvar = "OF" # pick from OF, ER
# oxi = "N2O"
# fuel = "PMMA" # fuel is controlled by function input
# OF = o/f ratio
# ER = nozzle expansion ratio (Ae/At) (Default)
# TESTLIMS: vector array (2 elements)--upper and lower limit of sweep in either OF or ER
# TESTVAR: "OF" or "ER"
# SETS UP and RUNS TRADES based on inputs
pltname = oxi+fuel+"_"+testvar+"_" # prefix for plot image files
if testvar == "ER":
OFratio = testvar[1] # default=1.5
supAR = np.linspace(testlims[0],testlims[1],num=N,endpoint=True) # vary nozzle expansion ratio --this will make the supAR array
ISP, Cstar, PCPE, cpcv = ARtrade(fuel,oxi,P,N,OFratio,supAR,pltname) # runs expansion ratio trade
xval = supAR
elif testvar == "OF":
OFratio = np.linspace(testlims[0],testlims[1], num=N, endpoint=True) # vary O/F ratio (by mass) --this will make the OFratio array properly
ISP, Cstar, PCPE, cpcv = OFtrade(fuel,oxi,P,N,OFratio,pltname) # runs O/F ratio trade
xval = OFratio
return ISP, Cstar, PCPE, cpcv, xval
# def findER(fu,ox,pcpe): # finds the ideal nozzle expansion ratio
# C = CEA_Obj(oxName=ox, fuelName=fu,
# isp_units='sec',
# cstar_units='m/s') # define CEA object to operate on for rocketCEA
# PCPE_fe = C.get_eps_at_PcOvPe(Pc=P,PcOvPe=pcpe)
# return PCPE_fe
# the XXtrade functions work as templates for running any trade you might want. Just add more get_"" from rocketcea to work with more variables along with corresponding input
def ARtrade(fu,ox,P,N,OFratio,supAR,pltname): # expansion ratio trade
# fu: name of fuel (string, as defined in rocketcea documentation or newly added fuels)
# ox: name of ox (string, as defined in rocketcea documentation of newly added fuels)
# P: chamber pressure (either a single number, list of numbers, or range())
# N: number of desired supersonic area ratios (nozzle expansion ratio) to sweep over
# OFratio: fixed O/F ratio for this trade
# supAR: values of supersonic area ratios (length of this list must match value of N)
C = CEA_Obj(oxName=ox, fuelName=fu,
isp_units='sec',
cstar_units='m/s') # define CEA object to operate on for rocketCEA
if isinstance(P,int)==True: # if P is only one value
y = 1
else:
y = len(P)
# preallocate vars
ISP = np.zeros([y,supAR.shape[0]]) # isp
Cstar = np.zeros([y,supAR.shape[0]]) # cstar eff
PCPE = np.zeros([y,supAR.shape[0]]) # pc/pe
cpcv = np.zeros([y,supAR.shape[0]]) # ratio of specific heats in thrust chamber
for x in range(y):
if y==1:
Pc = P # integers can't be called :(
legends = str(Pc)
else:
Pc = P[x] # chamber pressure
legends = P
for i in range(N):
ISP[x,i] = C.get_Isp(Pc=Pc, MR=OFratio, eps=supAR[i]) # ISP vacuum
Cstar[x,i] = C.get_Cstar(Pc=Pc, MR=OFratio) # Cstar efficiency
PCPE[x,i] = C.get_PcOvPe(Pc=Pc, MR=OFratio, eps=supAR[i]) # Pc/Pe
cpcv[x,i] = C.get_Chamber_Cp(Pc=Pc, MR=OFratio, eps=supAR[i]) # cp/cv
# generate plots for ISP, Cstar, and Pchamb/Pexit. Replace the last input with the vectory array of pressures
# plots(supAR,ISP,"Ae/At","ISP (s)", pltname+"isp.png" , legends ) # isp plot
# plots(supAR,Cstar,"Ae/At","Cstar", pltname+"cstar.png" , legends ) # Cstar plot
# plots(supAR,PCPE,"Ae/At","Pc/Pe", pltname+"pcpe.png" , legends ) # Pc/Pe plot
return ISP, Cstar, PCPE, cpcv
def OFtrade(fu,ox,P,N,OFratio,pltname): # O/F ratio trade (OFratio needs to be a vector array)
# fu: name of fuel (string, as defined in rocketcea documentation or newly added fuels)
# ox: name of ox (string, as defined in rocketcea documentation of newly added fuels)
# P: chamber pressure (either a single number, list of numbers, or range())
# N: number of desired O/F ratios to sweep over
# OFratio: values of O/F ratios (length of this list must match value of N)
# supAR: fixed nozzle expansion ratio
C = CEA_Obj(oxName=ox, fuelName=fu,
isp_units='sec',
cstar_units='m/s') # define CEA object to operate on for rocketCEA
if isinstance(P,int)==True: # if P is only one value
y = 1
else:
y = len(P)
# preallocate vars
ISP = np.zeros([y,OFratio.shape[0]]) # isp
Cstar = np.zeros([y,OFratio.shape[0]]) # cstar eff
PCPE = np.zeros([y,OFratio.shape[0]]) # pc/pe
cpcv = np.zeros([y,OFratio.shape[0]]) # ratio of specific heats in thrust chamber
fe_pcpe = np.zeros([y,OFratio.shape[0]]) # nozzle area ratio for fully expanded flow
for x in range(y):
if y==1:
Pc = P # integers can't be called :(
legends = str(Pc)
else:
Pc = P[x] # chamber pressure
legends = P
pr = Pc/14.7 # pc/pe for fully expanded flo
for i in range(N):
fe_pcpe[x,:]= C.get_eps_at_PcOvPe(Pc=Pc, MR=OFratio[i], PcOvPe=pr)
ISP[x,i] = C.get_Isp(Pc=Pc, MR=OFratio[i], eps=fe_pcpe[x,i]) # ISP vacuum
Cstar[x,i] = C.get_Cstar(Pc=Pc, MR=OFratio[i]) # Cstar efficiency
fe_pcpe[x,i] = C.get_PcOvPe(Pc=Pc, MR=OFratio[i], eps=fe_pcpe[x,i]) # Pc/Pe
cpcv[x,i] = C.get_Chamber_Cp(Pc=Pc, MR=OFratio[i], eps=fe_pcpe[x,i]) # cp/cv
# generate plots for ISP, Cstar, and Pchamb/Pexit
# plots(OFratio,ISP,"O/F ratio","ISP (s)", pltname+"isp.png" , legends ) # isp plot
# plots(OFratio,Cstar,"O/F ratio","Cstar", pltname+"cstar.png" , legends ) # Cstar plot
# plots(OFratio,PCPE,"O/F ratio","Pc/Pe", pltname+"pcpe.png" , legends ) # Pc/Pe plot
return ISP, Cstar, fe_pcpe, cpcv
def plots(xvals,yvals,xname,yname,pltname,labels,plttit): # function to generate plots of the inputted variables
plt.figure()
if yvals.ndim==1:
plt.plot(xvals,yvals[:], ms=10, label=str(labels))
plt.xlim(min(xvals),max(xvals))
else:
for i in range(yvals.shape[0]): # can handle multiple lines (i.e. ISP vs. O/F at various chamber pressures)
plt.plot(xvals,yvals[i,:], ms=10, label=str(labels[i]))
plt.xlabel(xname)
plt.ylabel(yname)
plt.title(plttit)
plt.legend(loc="lower right")
plt.savefig(pltname)
plt.close
### ANALYSIS SET UP ###
# defining fuel will add it's information to the master list of fuels to run rocketCEA with.
# define PMMA
card_str = '''
fuel PMMA C 5 H 8 O 2
h,kj=-430.5 t(k)=299.82
''' # Greg Zilliac's recommendation for modeling PMMA
add_new_fuel('PMMA', card_str) # rocketCEA function to add PMMA to possible inputs
# define HTPB
card_str2 = '''
fuel HTPB C 7.3165 H 10.3360 O 0.1063
h,kj/mol= 456 t(k)=298.15 rho=0.9220
'''
add_new_fuel('HTPB', card_str2) # rocketCEA function to add HTPB to possible inputs
# define ABS (monomer of ABS)
card_str3 = '''
fuel ABS C 3 H 3 N 1
h,kj/mol=172 t(k)=299.82
'''
add_new_fuel('ABS (Monomer)', card_str3) # rocketCEA function to add HTPB to possible inputs
# define Paraffin
card_str4 = '''
fuel Paraffin C 32 H 66 wt%=100.00
h,kj/mol=-938 t(k)=298
'''
add_new_fuel('Paraffin', card_str4)
### BEGIN CODE TO RUN TRADES
testfs = ["PMMA","HTPB","ABS (Monomer)","Paraffin"]
testox = "GOX"
testvar = "OF" # pick OF or ER
N = 100 # number of points in trade study
P = 150 # chamber pressure, psi
pr = P/14.7 # pressure ratio Pc/Pe for fully expanded flow
ISPf = np.zeros([len(testfs),N]) # ISP comparing all fuels
Cstarf = np.zeros([len(testfs),N]) # Cstar comparing all fuels
PCPEf = np.zeros([len(testfs),N]) # Pchamb/Pexit
cpcvf = np.zeros([len(testfs),N]) # ratio of specific heats
fe_pcpe = np.zeros([len(testfs),N]) # nozzle area ratio for fully expanded flow
pltlbls = [] # labels for plot legend
for i in range(len(testfs)): # labels for each line in plot
pltlbls.append(testfs[i] + "/" + testox)
# currently setup for runs with only ONE chamber pressure selection
for f in range(len(testfs)):
ISPf[f,:],Cstarf[f,:],PCPEf[f,:],cpcvf[f,:],xvar = maintrade(testfs[f],testox,[0.1,10],N,P,testvar) # currently overwriting xvar every time
# save plots of results
pltname = testox + "_Fuel Comparison_PC=" + str(P) + "_"
plttit = "P_c = " + str(P) + ",Fully Expanded"
if testvar == "OF":
plots(xvar,ISPf,"O/F Ratio","ISP (s)", pltname + "isp",pltlbls, "O/F Ratio vs. ISP, " + plttit)
plots(xvar,Cstarf,"O/F Ratio","Cstar (m/s)",pltname + "cstar",pltlbls, "O/F Ratio vs. Cstar, " + plttit)
plots(xvar,cpcvf,"O/F Ratio","Cp/Cv", pltname + "cpcv",pltlbls, "O/F Ratio vs. Cp/Cv, " + plttit)
plots(xvar,PCPEf,"O/F Ratio","Ae/At", pltname + "aeat",pltlbls, "O/F Ratio vs. Ae/At, " + plttit)
elif testvar == "ER":
plots(xvar,PCPEf,"Ae/At","Pc/Pe","Fuel Expansion Ratio Comparison.png",pltlbls)
# Note: for default case, Pc/Pe corresponding to fully expanded nozzle is 25.0
# why = np.reshape(np.array([[ISPf[0,:]],[Cstarf[0,:]]]),[2,50])
# plots(xvar,ISPf[0,:],"O/F","ISP and Cstar","ISP_Cstar Optimization",testfs)
| 46.102941 | 174 | 0.644338 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,219 | 0.554918 |
1bd5e7384dac3f4e1c314c62e84166a6ae616194 | 137 | py | Python | lib/nginxlib.py | charmed-kubernetes/juju-layer-nginx | 672d27695b512e50f51777b1eb63c5ff157b3d9e | [
"MIT"
] | 1 | 2015-11-04T03:40:24.000Z | 2015-11-04T03:40:24.000Z | lib/nginxlib.py | charmed-kubernetes/juju-layer-nginx | 672d27695b512e50f51777b1eb63c5ff157b3d9e | [
"MIT"
] | null | null | null | lib/nginxlib.py | charmed-kubernetes/juju-layer-nginx | 672d27695b512e50f51777b1eb63c5ff157b3d9e | [
"MIT"
] | null | null | null | from warnings import warn
from charms.layer.nginx import * # noqa
warn('nginxlib is being deprecated, use charms.layer.nginx instead')
| 27.4 | 68 | 0.781022 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 68 | 0.49635 |
1bd8e96793a790da9280643e9022127c10243cec | 3,614 | py | Python | assignment2.py | andremsouza/dip-t02-enhancement-filtering | 2506cd9c704af5adf7136de190762da66dacf27c | [
"MIT"
] | null | null | null | assignment2.py | andremsouza/dip-t02-enhancement-filtering | 2506cd9c704af5adf7136de190762da66dacf27c | [
"MIT"
] | null | null | null | assignment2.py | andremsouza/dip-t02-enhancement-filtering | 2506cd9c704af5adf7136de190762da66dacf27c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Name: André Moreira Souza
# NUSP: 9778985
# Course Code: SCC0251
# Semester: 2019/1
# Assignment: 2 - Image enhancement and filtering
# -
import numpy as np
import imageio
# ## Defining functions
# +
# method 1 - limiarization
def limiarization(img, t0):
t = 0.5 * (np.nanmean(np.where(img > t0, img, np.NaN)) + np.nanmean(np.where(img <= t0, img, np.NaN))) # calculating threshold
while(abs(t-t0) > 0.5):
t0 = t
m1 = np.nanmean(np.where(img > t, img, np.NaN)) # mean of group1
m2 = np.nanmean(np.where(img <= t, img, np.NaN)) # mean of group2
t = 0.5 * (m1 + m2)
return np.where(img > t, 1, 0)
# method 2 - 1d filtering
def filter1d(img, w):
imgFlat = img.flatten() # flattening img
imgFinal = np.zeros(imgFlat.shape, dtype=np.double) # creating new array and applying filter
for i in range(imgFlat.shape[0]):
imgFinal[i] = np.sum([imgFlat[(i+j) % imgFlat.shape[0]] * w[j] for j in range(len(w))])
return imgFinal.reshape(img.shape)
# method 3 - 2d filtering
def filter2d(img, w, t0):
imgPad = np.pad(img, w.shape[0]//2, 'symmetric') # padding input image to apply filter
imgFinal = np.zeros(img.shape, dtype=np.double) # creating new array and applying filter
for i in range(0, img.shape[0]):
for j in range(0, img.shape[1]):
imgFinal[i][j] = np.sum([[imgPad[i+x][j+y] * w[x][y] for x in range(w.shape[0])] for y in range(w.shape[1])])
return limiarization(imgFinal, t0) # return limiarization of filtered image
# method 4 - 2d median filter
def medianFilter2d(img, n):
imgPad = np.pad(img, n//2, 'constant', constant_values = 0) # padding input image to apply filter
imgFinal = np.zeros(img.shape, dtype=np.double) # creating new array and applying filter
for i in range(0, img.shape[0]):
for j in range(0, img.shape[1]):
imgFinal[i][j] = np.median(imgPad[i:i+n, j:j+n])
return imgFinal
# Normalize value of an numpy array between 0 and a given max value
def normalize (arr, maxvalue):
return (arr-arr.min()) * (maxvalue / (arr.max()-arr.min()))
# root mean squared error (RMSE) function
def rmse (img_g, img_r):
return np.sqrt((1/(img_g.shape[0]*img_g.shape[1])) * np.sum(np.power(img_g.astype(np.double) - img_r.astype(np.double), 2)))
# -
# ## Main function
if __name__ == '__main__':
# get user input
filename = str(input()).strip()
sourceImg = imageio.imread(filename)
method = int(input())
# executing processing based on value of "method" variable
if method == 1:
t0 = np.double(input())
outputImg = normalize(limiarization(sourceImg, t0), 255).astype(np.uint8)
elif method == 2:
n = int(input())
w = np.array(input().split(), dtype=np.double)
if w.shape[0] != n:
raise ValueError("unexpected number of values for filter.")
outputImg = normalize(filter1d(sourceImg, w), 255).astype(np.uint8)
elif method == 3:
n = int(input())
w = np.array([input().split() for i in range(n)], dtype=np.double)
if w.shape != (n, n):
raise ValueError("unexpected number of values for filter.")
t0 = np.double(input())
outputImg = normalize(filter2d(sourceImg, w, t0), 255).astype(np.uint8)
elif method == 4:
n = int(input())
outputImg = normalize(medianFilter2d(sourceImg, n), 255).astype(np.uint8)
else:
raise ValueError("method value not in supported range (minimum = 1, maximum = 4).")
# printing output
print('%.4f' % rmse(sourceImg, outputImg))
| 38.042105 | 130 | 0.627006 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 998 | 0.276072 |
1bd98169236fadcdd2f4be3bc8f9290368ceb686 | 15,192 | py | Python | test/test_api.py | archoversight/u2fval | ec8b6b9e65f880fd609c7e9f82638696341c781f | [
"BSD-2-Clause"
] | 75 | 2015-02-10T08:33:28.000Z | 2021-11-16T21:10:56.000Z | test/test_api.py | archoversight/u2fval | ec8b6b9e65f880fd609c7e9f82638696341c781f | [
"BSD-2-Clause"
] | 39 | 2015-04-09T04:34:26.000Z | 2021-09-04T23:18:58.000Z | test/test_api.py | archoversight/u2fval | ec8b6b9e65f880fd609c7e9f82638696341c781f | [
"BSD-2-Clause"
] | 31 | 2015-02-26T08:38:29.000Z | 2022-02-17T20:03:25.000Z | from u2fval import app, exc
from u2fval.model import db, Client
from .soft_u2f_v2 import SoftU2FDevice, CERT
from six.moves.urllib.parse import quote
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.serialization import Encoding
import unittest
import json
class RestApiTest(unittest.TestCase):
def setUp(self):
app.config['TESTING'] = True
app.config['ALLOW_UNTRUSTED'] = True
db.session.close()
db.drop_all()
db.create_all()
db.session.add(Client('fooclient', 'https://example.com',
['https://example.com']))
db.session.commit()
self.app = app.test_client()
def test_call_without_client(self):
resp = self.app.get('/')
self.assertEqual(resp.status_code, 400)
err = json.loads(resp.data.decode('utf8'))
self.assertEqual(err['errorCode'], exc.BadInputException.code)
def test_call_with_invalid_client(self):
resp = self.app.get('/', environ_base={'REMOTE_USER': 'invalid'})
self.assertEqual(resp.status_code, 404)
err = json.loads(resp.data.decode('utf8'))
self.assertEqual(err['errorCode'], exc.BadInputException.code)
def test_get_trusted_facets(self):
resp = json.loads(
self.app.get('/', environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
self.assertIn('https://example.com', resp['trustedFacets'][0]['ids'])
def test_list_empty_devices(self):
resp = json.loads(
self.app.get('/foouser', environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
self.assertEqual(resp, [])
def test_begin_auth_without_devices(self):
resp = self.app.get('/foouser/sign',
environ_base={'REMOTE_USER': 'fooclient'})
self.assertEqual(resp.status_code, 400)
err = json.loads(resp.data.decode('utf8'))
self.assertEqual(err['errorCode'], exc.NoEligibleDevicesException.code)
def test_register(self):
device = SoftU2FDevice()
self.do_register(device, {'foo': 'bar'})
def test_sign(self):
device = SoftU2FDevice()
self.do_register(device, {'foo': 'bar', 'baz': 'one'})
descriptor = self.do_sign(device, {'baz': 'two'})
self.assertEqual(descriptor['properties'],
{'foo': 'bar', 'baz': 'two'})
def test_get_properties(self):
device = SoftU2FDevice()
descriptor = self.do_register(device, {'foo': 'bar', 'baz': 'foo'})
descriptor2 = json.loads(
self.app.get('/foouser/' + descriptor['handle'],
environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
self.assertEqual(descriptor2['properties'],
{'foo': 'bar', 'baz': 'foo'})
def test_update_properties(self):
device = SoftU2FDevice()
desc = self.do_register(device,
{'foo': 'one', 'bar': 'one', 'baz': 'one'})
self.assertEqual({
'foo': 'one',
'bar': 'one',
'baz': 'one'
}, desc['properties'])
desc2 = json.loads(self.app.post(
'/foouser/' + desc['handle'],
environ_base={'REMOTE_USER': 'fooclient'},
data=json.dumps({'bar': 'two', 'baz': None})
).data.decode('utf8'))
self.assertEqual({
'foo': 'one',
'bar': 'two'
}, desc2['properties'])
desc3 = json.loads(self.app.get(
'/foouser/' + desc['handle'],
environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
self.assertEqual(desc2['properties'], desc3['properties'])
def test_get_devices(self):
self.do_register(SoftU2FDevice())
self.do_register(SoftU2FDevice())
self.do_register(SoftU2FDevice())
resp = json.loads(
self.app.get('/foouser', environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
self.assertEqual(len(resp), 3)
def test_get_device_descriptor_and_cert(self):
desc = self.do_register(SoftU2FDevice())
desc2 = json.loads(
self.app.get('/foouser/' + desc['handle'],
environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
self.assertEqual(desc, desc2)
cert = x509.load_pem_x509_certificate(self.app.get(
'/foouser/' + desc['handle'] + '/certificate',
environ_base={'REMOTE_USER': 'fooclient'}
).data, default_backend())
self.assertEqual(CERT, cert.public_bytes(Encoding.DER))
def test_get_invalid_device(self):
resp = self.app.get('/foouser/' + ('ab' * 16),
environ_base={'REMOTE_USER': 'fooclient'}
)
self.assertEqual(resp.status_code, 404)
self.do_register(SoftU2FDevice())
resp = self.app.get('/foouser/' + ('ab' * 16),
environ_base={'REMOTE_USER': 'fooclient'}
)
self.assertEqual(resp.status_code, 404)
resp = self.app.get('/foouser/InvalidHandle',
environ_base={'REMOTE_USER': 'fooclient'}
)
self.assertEqual(resp.status_code, 400)
def test_delete_user(self):
self.do_register(SoftU2FDevice())
self.do_register(SoftU2FDevice())
self.do_register(SoftU2FDevice())
self.app.delete('/foouser',
environ_base={'REMOTE_USER': 'fooclient'})
resp = json.loads(
self.app.get('/foouser', environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
self.assertEqual(resp, [])
def test_delete_devices(self):
d1 = self.do_register(SoftU2FDevice())
d2 = self.do_register(SoftU2FDevice())
d3 = self.do_register(SoftU2FDevice())
self.app.delete('/foouser/' + d2['handle'],
environ_base={'REMOTE_USER': 'fooclient'})
resp = json.loads(
self.app.get('/foouser',
environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
self.assertEqual(len(resp), 2)
self.app.delete('/foouser/' + d1['handle'],
environ_base={'REMOTE_USER': 'fooclient'})
resp = json.loads(
self.app.get('/foouser',
environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
self.assertEqual(len(resp), 1)
self.assertEqual(d3, resp[0])
self.app.delete('/foouser/' + d3['handle'],
environ_base={'REMOTE_USER': 'fooclient'})
resp = json.loads(
self.app.get('/foouser',
environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
self.assertEqual(resp, [])
def test_set_properties_during_register(self):
device = SoftU2FDevice()
reg_req = json.loads(self.app.get(
'/foouser/register?properties=' + quote(json.dumps(
{'foo': 'one', 'bar': 'one'})),
environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
reg_resp = device.register('https://example.com', reg_req['appId'],
reg_req['registerRequests'][0]).json
desc = json.loads(self.app.post(
'/foouser/register',
data=json.dumps({
'registerResponse': reg_resp,
'properties': {'baz': 'two', 'bar': 'two'}
}),
environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
self.assertEqual({'foo': 'one', 'bar': 'two', 'baz': 'two'},
desc['properties'])
def test_set_properties_during_sign(self):
device = SoftU2FDevice()
self.do_register(device, {'foo': 'one', 'bar': 'one', 'baz': 'one'})
aut_req = json.loads(self.app.get(
'/foouser/sign?properties=' + quote(json.dumps(
{'bar': 'two', 'boo': 'two'})),
environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
aut_resp = device.getAssertion('https://example.com', aut_req['appId'],
aut_req['challenge'],
aut_req['registeredKeys'][0]).json
desc = json.loads(self.app.post(
'/foouser/sign',
data=json.dumps({
'signResponse': aut_resp,
'properties': {'baz': 'three', 'boo': None}
}),
environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
self.assertEqual({
'foo': 'one',
'bar': 'two',
'baz': 'three',
}, desc['properties'])
def test_register_and_sign_with_custom_challenge(self):
device = SoftU2FDevice()
reg_req = json.loads(self.app.get(
'/foouser/register?challenge=ThisIsAChallenge',
environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
self.assertEqual(reg_req['registerRequests'][0]['challenge'],
'ThisIsAChallenge')
reg_resp = device.register('https://example.com', reg_req['appId'],
reg_req['registerRequests'][0]).json
desc1 = json.loads(self.app.post(
'/foouser/register',
data=json.dumps({
'registerResponse': reg_resp
}),
environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
aut_req = json.loads(self.app.get(
'/foouser/sign?challenge=ThisIsAChallenge',
environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
self.assertEqual(aut_req['challenge'], 'ThisIsAChallenge')
aut_resp = device.getAssertion('https://example.com', aut_req['appId'],
aut_req['challenge'],
aut_req['registeredKeys'][0]).json
desc2 = json.loads(self.app.post(
'/foouser/sign',
data=json.dumps({
'signResponse': aut_resp
}),
environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
self.assertEqual(desc1['handle'], desc2['handle'])
def test_sign_with_handle_filtering(self):
dev = SoftU2FDevice()
h1 = self.do_register(dev)['handle']
h2 = self.do_register(dev)['handle']
self.do_register(dev)['handle']
aut_req = json.loads(
self.app.get('/foouser/sign',
environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
self.assertEqual(len(aut_req['registeredKeys']), 3)
self.assertEqual(len(aut_req['descriptors']), 3)
aut_req = json.loads(
self.app.get('/foouser/sign?handle=' + h1,
environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
self.assertEqual(len(aut_req['registeredKeys']), 1)
self.assertEqual(aut_req['descriptors'][0]['handle'], h1)
aut_req = json.loads(
self.app.get(
'/foouser/sign?handle=' + h1 + '&handle=' + h2,
environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
self.assertEqual(len(aut_req['registeredKeys']), 2)
self.assertIn(aut_req['descriptors'][0]['handle'], [h1, h2])
self.assertIn(aut_req['descriptors'][1]['handle'], [h1, h2])
def test_sign_with_invalid_handle(self):
dev = SoftU2FDevice()
self.do_register(dev)
resp = self.app.get('/foouser/sign?handle=foobar',
environ_base={'REMOTE_USER': 'fooclient'})
self.assertEqual(resp.status_code, 400)
def test_device_compromised_on_counter_error(self):
dev = SoftU2FDevice()
self.do_register(dev)
self.do_sign(dev)
self.do_sign(dev)
self.do_sign(dev)
dev.counter = 1
aut_req = json.loads(
self.app.get('/foouser/sign',
environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
aut_resp = dev.getAssertion('https://example.com', aut_req['appId'],
aut_req['challenge'],
aut_req['registeredKeys'][0]).json
resp = self.app.post(
'/foouser/sign',
data=json.dumps({
'signResponse': aut_resp
}),
environ_base={'REMOTE_USER': 'fooclient'}
)
self.assertEqual(400, resp.status_code)
self.assertEqual(12, json.loads(resp.data.decode('utf8'))['errorCode'])
resp = self.app.get('/foouser/sign',
environ_base={'REMOTE_USER': 'fooclient'})
self.assertEqual(400, resp.status_code)
self.assertEqual(11, json.loads(resp.data.decode('utf8'))['errorCode'])
def do_register(self, device, properties=None):
reg_req = json.loads(
self.app.get('/foouser/register',
environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
self.assertEqual(len(reg_req['registeredKeys']),
len(reg_req['descriptors']))
reg_resp = device.register('https://example.com', reg_req['appId'],
reg_req['registerRequests'][0]).json
if properties is None:
properties = {}
descriptor = json.loads(self.app.post(
'/foouser/register',
data=json.dumps({
'registerResponse': reg_resp,
'properties': properties
}),
environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
self.assertEqual(descriptor['properties'], properties)
return descriptor
def do_sign(self, device, properties=None):
aut_req = json.loads(
self.app.get('/foouser/sign',
environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
aut_resp = device.getAssertion('https://example.com', aut_req['appId'],
aut_req['challenge'],
aut_req['registeredKeys'][0]).json
if properties is None:
properties = {}
return json.loads(self.app.post(
'/foouser/sign',
data=json.dumps({
'signResponse': aut_resp,
'properties': properties
}),
environ_base={'REMOTE_USER': 'fooclient'}
).data.decode('utf8'))
| 39.769634 | 79 | 0.535874 | 14,858 | 0.978015 | 0 | 0 | 0 | 0 | 0 | 0 | 3,323 | 0.218734 |
1bdb708dd35b54eb9305795bb23aa7f10f6fd2ff | 656 | py | Python | setup.py | suchanlee/typekit-python | 45b1d09b1934ea87b752405c6b2290ce477afa0d | [
"MIT"
] | 6 | 2016-03-17T10:56:22.000Z | 2020-09-10T02:55:19.000Z | setup.py | suchanlee/typekit-python | 45b1d09b1934ea87b752405c6b2290ce477afa0d | [
"MIT"
] | null | null | null | setup.py | suchanlee/typekit-python | 45b1d09b1934ea87b752405c6b2290ce477afa0d | [
"MIT"
] | 6 | 2015-03-13T12:11:59.000Z | 2020-05-22T20:42:36.000Z | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from typekit._version import __version__ as version
setup(
name = 'typekit',
packages = ['typekit'],
version = version,
license='MIT',
description = 'Python wrapper for Typekit Developer API',
author = 'Suchan Lee',
author_email = 'lee.suchan@gmail.com',
url = 'https://github.com/suchanlee/typekit-python',
download_url = 'https://github.com/suchanlee/typekit-python/tarball/{}'.format(version),
keywords = ['typekit', 'typekit-python', 'typekit python'],
install_requires= [
'requests >= 2.2.1',
],
) | 28.521739 | 92 | 0.667683 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 260 | 0.396341 |
59edeae5675e92a4f2deb7a85fba31c6c92a2875 | 1,315 | py | Python | organization/migrations/0002_auto_20180325_1529.py | H0neyBadger/pmapi | d34dad32170e53f49e14611f5bfbfcb4eb7b8d4d | [
"MIT"
] | null | null | null | organization/migrations/0002_auto_20180325_1529.py | H0neyBadger/pmapi | d34dad32170e53f49e14611f5bfbfcb4eb7b8d4d | [
"MIT"
] | 1 | 2017-09-07T09:15:07.000Z | 2017-09-07T09:15:07.000Z | organization/migrations/0002_auto_20180325_1529.py | H0neyBadger/cmdb | d34dad32170e53f49e14611f5bfbfcb4eb7b8d4d | [
"MIT"
] | null | null | null | # Generated by Django 2.0.3 on 2018-03-25 15:29
from django.conf import settings
import django.contrib.auth.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('organization', '0001_initial'),
]
operations = [
migrations.AlterModelManagers(
name='team',
managers=[
('objects', django.contrib.auth.models.GroupManager()),
],
),
migrations.AlterField(
model_name='team',
name='ciso',
field=models.ForeignKey(help_text='chief information security officer', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='ciso', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='team',
name='manager',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='manager', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='team',
name='technical_contact',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='technical_contact', to=settings.AUTH_USER_MODEL),
),
]
| 34.605263 | 190 | 0.638783 | 1,122 | 0.853232 | 0 | 0 | 0 | 0 | 0 | 0 | 212 | 0.161217 |
59f118b8a100acdad7416211305340cb179b7ad0 | 8,673 | py | Python | translation/train.py | AkshatSh/BinarizedNMT | 7fa15149fdfcad6b1fd0956157c3730f3dcd781f | [
"MIT"
] | 10 | 2019-01-19T08:15:05.000Z | 2021-12-02T08:54:50.000Z | translation/train.py | AkshatSh/BinarizedNMT | 7fa15149fdfcad6b1fd0956157c3730f3dcd781f | [
"MIT"
] | null | null | null | translation/train.py | AkshatSh/BinarizedNMT | 7fa15149fdfcad6b1fd0956157c3730f3dcd781f | [
"MIT"
] | 2 | 2019-01-25T21:19:49.000Z | 2019-03-21T11:38:13.000Z | import torch
import pickle
import argparse
import os
from tqdm import trange, tqdm
import torch
import torchtext
from torchtext import data
from torchtext import datasets
from torch import nn
import torch.nn.functional as F
import math
from models import SimpleLSTMModel, AttentionRNN
from train_args import get_arg_parser
import constants
from vocab import Vocabulary, load_vocab
import dataset as d
def build_model(
parser: argparse.ArgumentParser,
en_vocab: Vocabulary,
fr_vocab: Vocabulary,
) -> nn.Module:
# TODO make switch case
args = parser.parse_args()
if args.model_type == 'SimpleLSTM':
SimpleLSTMModel.add_args(parser)
args = parser.parse_args()
return SimpleLSTMModel.build_model(
src_vocab=en_vocab,
trg_vocab=fr_vocab,
encoder_embed_dim=args.encoder_embed_dim,
encoder_hidden_dim=args.encoder_hidden_dim,
encoder_dropout=args.encoder_dropout,
encoder_num_layers=args.encoder_layers,
decoder_embed_dim=args.decoder_embed_dim,
decoder_hidden_dim=args.decoder_hidden_dim,
decoder_dropout=args.decoder_dropout,
decoder_num_layers=args.decoder_layers,
)
elif args.model_type == 'AttentionRNN':
AttentionRNN.add_args(parser)
args = parser.parse_args()
return AttentionRNN.build_model(
src_vocab=en_vocab,
trg_vocab=fr_vocab,
encoder_embed_dim=args.encoder_embed_dim,
encoder_hidden_dim=args.encoder_hidden_dim,
encoder_dropout=args.encoder_dropout,
encoder_num_layers=args.encoder_layers,
decoder_embed_dim=args.decoder_embed_dim,
decoder_hidden_dim=args.decoder_hidden_dim,
decoder_dropout=args.decoder_dropout,
decoder_num_layers=args.decoder_layers,
teacher_student_ratio=args.teacher_student_ratio,
)
else:
raise Exception(
"Unknown Model Type: {}".format(args.model_type)
)
def train(
train_loader: d.BatchedIterator,
valid_loader: d.BatchedIterator,
model: nn.Module,
epochs: int,
learning_rate: float,
weight_decay: float,
log_dir: str,
save_dir: str,
en_vocab: Vocabulary,
fr_vocab: Vocabulary,
device: str,
multi_gpu: bool,
save_step: int,
model_name: str,
optimizer: str,
) -> None:
model = model.to(device)
if multi_gpu and device == 'cuda':
print('Using multi gpu training')
model = torch.nn.DataParallel(model, device_ids=[0, 1]).cuda()
if optimizer == "sgd":
print("using stochastic gradient descent optimizer")
optim = torch.optim.SGD(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
elif optimizer == "adam":
print("using adam optimizer")
optim = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
else:
raise Exception("Illegal Optimizer {}".format(optimizer))
# [DEBUG]: count number of nans
nan_count = 0
for e in range(epochs):
total_loss = 0.0
count = 0
with tqdm(train_loader, total=len(train_loader)) as pbar:
for i, data in enumerate(pbar):
src, trg, src_lengths, trg_lengths, prev_tokens, prev_lengths = data
src = src.to(device)
trg = trg.to(device)
src_lengths = src_lengths.to(device)
trg_lengths = trg_lengths.to(device)
prev_tokens = prev_tokens.to(device)
prev_lengths = prev_lengths.to(device)
# feed everything into model
# compute loss
# call backwards
# trg_tensor = torch.cat([trg, eos_tensor], dim=1).to(device)
# prev_tokens = torch.cat([eos_tensor, trg], dim=1).to(device)
optim.zero_grad()
predicted, _ = model.forward(src, src_lengths, prev_tokens)
if not multi_gpu:
loss = model.loss(predicted.view(-1, predicted.size(-1)), trg.view(-1))
else:
# if using data parallel, loss has to be computed here
# there is no longer a model loss function that we have
# access to.
# TODO: data parallel kills the computer, why?
loss = F.cross_entropy(
predicted.view(-1, predicted.size(-1)),
trg_tensor.view(-1),
ignore_index=fr_vocab.word2idx(constants.PAD_TOKEN),
)
if math.isnan(loss.item()):
'''
Ignore nan loss for backward, and continue forward
'''
nan_count += 1
print('found nan at {}'.format(i))
torch.save(
model.state_dict(),
os.path.join(save_dir, model_name, 'unk_problem.pt')
)
return
loss.backward()
optim.step()
total_loss += loss.item()
count += 1
pbar.set_postfix(
loss_avg=total_loss/(count),
epoch="{}/{}".format(e + 1, epochs),
curr_loss=loss.item(),
nan_count=nan_count,
)
pbar.refresh()
if (i + 1) % save_step == 0:
print('Saving model at iteration {} for epoch {}'.format(i, e))
model_file_name = "model_epoch_{}_itr_{}".format(e, i)
torch.save(
model.state_dict(),
os.path.join(save_dir, model_name, model_file_name)
)
print("Summary: Total Loss {} | Count {} | Average {}".format(total_loss, count, total_loss / count))
model_file_name = "model_epoch_{}_final".format(e)
print('saving to {}'.format(os.path.join(save_dir, model_name, model_file_name)))
torch.save(
model.state_dict(),
os.path.join(save_dir, model_name, model_file_name)
)
train_loader.reset()
# valid_loader.reset()
def main() -> None:
parser = get_arg_parser()
args = parser.parse_args()
device = "cuda" if torch.cuda.is_available() and args.cuda else "cpu"
print('using device {}'.format(device))
print('loading vocabulary...')
if args.small:
print('using small training set')
en_vocab = load_vocab(constants.SMALL_TRAIN_EN_VOCAB_FILE)
fr_vocab = load_vocab(constants.SMALL_TRAIN_FR_VOCAB_FILE)
else:
en_vocab = load_vocab(constants.TRAIN_EN_VOCAB_FILE)
fr_vocab = load_vocab(constants.TRAIN_FR_VOCAB_FILE)
print('loaded vocabulary')
print('loading datasets...')
if args.small:
train_dataset = d.ShardedCSVDataset(constants.WMT14_EN_FR_SMALL_TRAIN_SHARD)
else:
train_dataset = d.ShardedCSVDataset(constants.WMT14_EN_FR_TRAIN_SHARD)
# valid_dataset = d.DualFileDataset(
# constants.WMT14_EN_FR_VALID + ".en",
# constants.WMT14_EN_FR_VALID + ".fr",
# )
train_loader = d.BatchedIterator(
args.batch_size,
train_dataset,
en_vocab,
fr_vocab,
args.max_sequence_length,
)
# valid_loader = d.BatchedIterator(
# 1,
# valid_dataset,
# en_vocab,
# fr_vocab,
# args.max_sequence_length,
# )
model = build_model(parser, en_vocab, fr_vocab)
print('using model...')
print(model)
if not os.path.exists(args.log_dir):
os.makedirs(args.log_dir)
if not os.path.exists(os.path.join(args.save_dir, args.model_name)):
os.makedirs(os.path.join(args.save_dir, args.model_name))
# model.load_state_dict(torch.load('delete/model_1543183590.2138884/unk_problem.pt'))
train(
train_loader=train_loader,
valid_loader=None, # valid_loader,
model=model,
epochs=args.num_epochs,
learning_rate=args.learning_rate,
weight_decay=args.weight_decay,
log_dir=args.log_dir,
save_dir=args.save_dir,
en_vocab=en_vocab,
fr_vocab=fr_vocab,
device=device,
multi_gpu=args.multi_gpu,
save_step=args.save_step,
model_name=args.model_name,
optimizer=args.optimizer,
)
if __name__ == "__main__":
main() | 35.256098 | 113 | 0.591145 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,387 | 0.159922 |
59f1c15a95f6ece1eb1189e10c53a789c1ff17ed | 1,404 | py | Python | rawquery/rawquery.py | joncombe/django-raw-query | b10c8f5731668bd16fd37cfc86b37dcb0ca65f4f | [
"BSD-3-Clause"
] | 3 | 2020-07-16T20:01:57.000Z | 2022-03-26T06:39:32.000Z | rawquery/rawquery.py | joncombe/django-raw-query | b10c8f5731668bd16fd37cfc86b37dcb0ca65f4f | [
"BSD-3-Clause"
] | null | null | null | rawquery/rawquery.py | joncombe/django-raw-query | b10c8f5731668bd16fd37cfc86b37dcb0ca65f4f | [
"BSD-3-Clause"
] | null | null | null | from django.db import connection
class RawQuery:
# return a list of dicts
# e.g. SELECT * FROM my_table
# [
# {'a': 1, 'b': 2, 'c': 3},
# {'a': 1, 'b': 2, 'c': 3},
# ]
def multiple_rows(self, sql, params=[]):
cursor = self._do_query(sql, params)
columns = [col[0] for col in cursor.description]
return [
dict(zip(columns, row))
for row in cursor.fetchall()
]
# return a single dict
# e.g. SELECT COUNT(*) AS count, AVG(price) AS avg_price FROM my_table
# { 'count': 12, 'avg_price': 95.2 }
def single_row(self, sql, params=[]):
return self.multiple_rows(sql, params)[0]
# return a single value
# e.g. SELECT COUNT(*) FROM my_table
# 134
def single_value(self, sql, params=[]):
cursor = self._do_query(sql, params)
return cursor.fetchone()[0]
# return a list of single values
# e.g. SELECT id FROM my_table
# [1, 2, 3, 4, 5]
def multiple_values(self, sql, params=[]):
cursor = self._do_query(sql, params)
return [row[0] for row in cursor.fetchall()]
# UPDATE, INSERT, etc.
def run(self, sql, params=[]):
cursor = self._do_query(sql, params)
return cursor.rowcount
def _do_query(self, sql, params):
cursor = connection.cursor()
cursor.execute(sql, params)
return cursor
| 29.87234 | 74 | 0.57265 | 1,369 | 0.975071 | 0 | 0 | 0 | 0 | 0 | 0 | 412 | 0.293447 |
59f26542f2fe8d0ce4367beee404e46a3371d4b2 | 37,779 | py | Python | src/PythonUnitTests/UFUNCTests/UFUNC_UINT64.py | thild/numpy.net | 1a607cfb42263f92314a1e8dbec6f5436a7feb73 | [
"BSD-3-Clause"
] | 59 | 2019-01-20T19:43:05.000Z | 2022-03-26T06:08:51.000Z | src/PythonUnitTests/UFUNCTests/UFUNC_UINT64.py | thild/numpy.net | 1a607cfb42263f92314a1e8dbec6f5436a7feb73 | [
"BSD-3-Clause"
] | 21 | 2019-06-06T17:45:01.000Z | 2022-03-30T10:37:24.000Z | src/PythonUnitTests/UFUNCTests/UFUNC_UINT64.py | thild/numpy.net | 1a607cfb42263f92314a1e8dbec6f5436a7feb73 | [
"BSD-3-Clause"
] | 7 | 2019-05-12T21:06:18.000Z | 2022-02-13T12:23:23.000Z | import unittest
import numpy as np
class Test_UFUNC_UINT64(unittest.TestCase):
#region UFUNC UINT64 Tests
#region OUTER Tests
def test_AddOuter_UINT64(self):
a1 = np.arange(0, 5, dtype=np.uint64);
a2 = np.arange(3, 8, dtype=np.uint64);
b = np.add.outer(a1,a2)
print(b)
def test_SubtractOuter_UINT64(self):
a1 = np.arange(0, 5, dtype=np.uint64);
a2 = np.arange(3, 8, dtype=np.uint64);
b = np.subtract.outer(a1,a2)
print(b)
def test_SubtractOuter_UINT32(self):
a1 = np.arange(0, 5, dtype=np.uint32);
a2 = np.arange(3, 8, dtype=np.uint32);
b = np.subtract.outer(a1,a2)
print(b)
def test_MultiplyOuter_UINT64(self):
a1 = np.arange(0, 5, dtype=np.uint64);
a2 = np.arange(3, 8, dtype=np.uint64);
b = np.multiply.outer(a1,a2)
print(b)
def test_DivideOuter_UINT64(self):
a1 = np.arange(0, 5, dtype=np.uint64);
a2 = np.arange(3, 8, dtype=np.uint64);
b = np.divide.outer(a1,a2)
print(b)
def test_RemainderOuter_UINT64(self):
a1 = np.arange(0, 5, dtype=np.uint64);
a2 = np.arange(3, 8, dtype=np.uint64);
b = np.remainder.outer(a1,a2)
print(b)
def test_FModOuter_UINT64(self):
a1 = np.arange(0, 5, dtype=np.uint64);
a2 = np.arange(3, 8, dtype=np.uint64);
b = np.fmod.outer(a1,a2)
print(b)
def test_SquareOuter_UINT64(self):
a1 = np.arange(0, 5, dtype=np.uint64);
a2 = np.arange(3, 8, dtype=np.uint64);
try :
b = np.square.outer(a1,a2)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_ReciprocalOuter_UINT64(self):
a1 = np.arange(0, 5, dtype=np.uint64);
a2 = np.arange(3, 8, dtype=np.uint64);
try :
b = np.reciprocal.outer(a1,a2)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_OnesLikeOuter_UINT64(self):
a1 = np.arange(0, 5, dtype=np.uint64);
a2 = np.arange(3, 8, dtype=np.uint64);
try :
b = np.ones_like.outer(a1,a2)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_SqrtOuter_UINT64(self):
a1 = np.arange(0, 5, dtype=np.uint64);
a2 = np.arange(3, 8, dtype=np.uint64);
try :
b = np.sqrt.outer(a1,a2)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_NegativeOuter_UINT64(self):
a1 = np.arange(0, 5, dtype=np.uint64);
a2 = np.arange(3, 8, dtype=np.uint64);
try :
b = np.negative.outer(a1,a2)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_AbsoluteOuter_UINT64(self):
a1 = np.arange(0, 5, dtype=np.uint64);
a2 = np.arange(3, 8, dtype=np.uint64);
try :
b = np.absolute.outer(a1,a2)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_InvertOuter_UINT64(self):
a1 = np.arange(0, 5, dtype=np.uint64);
a2 = np.arange(3, 8, dtype=np.uint64);
try :
b = np.invert.outer(a1,a2)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_LeftShiftOuter_UINT64(self):
a1 = np.arange(0, 5, dtype=np.uint64);
a2 = np.arange(3, 8, dtype=np.uint64);
try :
b = np.left_shift.outer(a1,a2)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_RightShiftOuter_UINT64(self):
a1 = np.arange(0, 5, dtype=np.uint64);
a2 = np.arange(3, 8, dtype=np.uint64);
try :
b = np.right_shift.outer(a1,a2)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_BitwiseAndOuter_UINT64(self):
a1 = np.arange(0, 5, dtype=np.uint64);
a2 = np.arange(3, 8, dtype=np.uint64);
try :
b = np.bitwise_and.outer(a1,a2)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_BitwiseOrOuter_UINT64(self):
a1 = np.arange(0, 5, dtype=np.uint64);
a2 = np.arange(3, 8, dtype=np.uint64);
try :
b = np.bitwise_or.outer(a1,a2)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_BitwiseXorOuter_UINT64(self):
a1 = np.arange(0, 5, dtype=np.uint64);
a2 = np.arange(3, 8, dtype=np.uint64);
try :
b = np.bitwise_xor.outer(a1,a2)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_LessOuter_UINT64(self):
a1 = np.arange(0, 5, dtype=np.uint64);
a2 = np.arange(3, 8, dtype=np.uint64);
b = np.less.outer(a1,a2)
print(b)
def test_LessEqualOuter_UINT64(self):
a1 = np.arange(0, 5, dtype=np.uint64);
a2 = np.arange(3, 8, dtype=np.uint64);
b = np.less_equal.outer(a1,a2)
print(b)
def test_EqualOuter_UINT64(self):
a1 = np.arange(0, 5, dtype=np.uint64);
a2 = np.arange(3, 8, dtype=np.uint64);
b = np.equal.outer(a1,a2)
print(b)
def test_NotEqualOuter_UINT64(self):
a1 = np.arange(0, 5, dtype=np.uint64);
a2 = np.arange(3, 8, dtype=np.uint64);
b = np.not_equal.outer(a1,a2)
print(b)
def test_GreaterOuter_UINT64(self):
a1 = np.arange(0, 5, dtype=np.uint64);
a2 = np.arange(3, 8, dtype=np.uint64);
b = np.greater.outer(a1,a2)
print(b)
def test_GreaterEqualOuter_UINT64(self):
a1 = np.arange(0, 5, dtype=np.uint64);
a2 = np.arange(3, 8, dtype=np.uint64);
b = np.greater_equal.outer(a1,a2)
print(b)
def test_FloorDivideOuter_UINT64(self):
a1 = np.arange(0, 5, dtype=np.uint64);
a2 = np.arange(3, 8, dtype=np.uint64);
b = np.floor_divide.outer(a1,a2)
print(b)
def test_TrueDivideOuter_UINT64(self):
a1 = np.arange(0, 5, dtype=np.uint64);
a2 = np.arange(3, 8, dtype=np.uint64);
b = np.true_divide.outer(a1,a2)
print(b)
def test_LogicalAndOuter_UINT64(self):
a1 = np.arange(0, 5, dtype=np.uint64);
a2 = np.arange(3, 8, dtype=np.uint64);
b = np.logical_and.outer(a1,a2)
print(b)
def test_LogicalOrOuter_UINT64(self):
a1 = np.arange(0, 5, dtype=np.uint64);
a2 = np.arange(3, 8, dtype=np.uint64);
b = np.logical_or.outer(a1,a2)
print(b)
def test_FloorOuter_UINT64(self):
a1 = np.arange(0, 5, dtype=np.uint64);
a2 = np.arange(3, 8, dtype=np.uint64);
try :
b = np.floor.outer(a1,a2)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_CeilOuter_UINT64(self):
a1 = np.arange(0, 5, dtype=np.uint64);
a2 = np.arange(3, 8, dtype=np.uint64);
try :
b = np.ceil.outer(a1,a2)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_MaximumOuter_UINT64(self):
a1 = np.arange(0, 5, dtype=np.uint64);
a2 = np.arange(3, 8, dtype=np.uint64);
b = np.maximum.outer(a1,a2)
print(b)
def test_MinimumOuter_UINT64(self):
a1 = np.arange(0, 5, dtype=np.uint64);
a2 = np.arange(3, 8, dtype=np.uint64);
b = np.minimum.outer(a1,a2)
print(b)
def test_RintOuter_UINT64(self):
a1 = np.arange(0, 5, dtype=np.uint64);
a2 = np.arange(3, 8, dtype=np.uint64);
try :
b = np.rint.outer(a1,a2)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_ConjugateOuter_UINT64(self):
a1 = np.arange(0, 5, dtype=np.uint64);
a2 = np.arange(3, 8, dtype=np.uint64);
try :
b = np.conjugate.outer(a1,a2)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_IsNANOuter_UINT64(self):
a1 = np.arange(0, 5, dtype=np.uint64);
a2 = np.arange(3, 8, dtype=np.uint64);
try :
b = np.isnan.outer(a1,a2)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_FMaxOuter_UINT64(self):
a1 = np.arange(0, 5, dtype=np.uint64);
a2 = np.arange(3, 8, dtype=np.uint64);
b = np.fmax.outer(a1,a2)
print(b)
def test_FMinOuter_UINT64(self):
a1 = np.arange(0, 5, dtype=np.uint64);
a2 = np.arange(3, 8, dtype=np.uint64);
b = np.fmin.outer(a1,a2)
print(b)
def test_HeavisideOuter_UINT64(self):
a1 = np.arange(0, 5, dtype=np.uint64);
a2 = np.arange(3, 8, dtype=np.uint64);
b = np.heaviside.outer(a1,a2)
print(b)
#endregion
#region REDUCE Tests
def test_AddReduce_UINT64(self):
a1 = np.arange(0, 100, dtype=np.uint64).reshape((10,10));
b = np.add.reduce(a1)
print(b)
def test_SubtractReduce_UINT64(self):
a1 = np.arange(0, 100, dtype=np.uint64).reshape((10,10));
b = np.subtract.reduce(a1)
print(b)
def test_SubtractReduce_UINT32(self):
a1 = np.arange(0, 100, dtype=np.uint32).reshape((10,10));
b = np.subtract.reduce(a1)
print(b)
def test_MultiplyReduce_UINT64(self):
a1 = np.arange(0, 100, dtype=np.uint64).reshape((10,10));
b = np.multiply.reduce(a1)
print(b)
def test_MultiplyReduce_UINT32(self):
a1 = np.arange(0, 100, dtype=np.uint32).reshape((10,10));
b = np.multiply.reduce(a1)
print(b)
def test_DivideReduce_UINT64(self):
a1 = np.arange(0, 100, dtype=np.uint64).reshape((10,10));
b = np.divide.reduce(a1)
print(b)
def test_RemainderReduce_UINT64(self):
a1 = np.arange(0, 100, dtype=np.uint64).reshape((10,10));
b = np.remainder.reduce(a1)
print(b)
def test_FModReduce_UINT64(self):
a1 = np.arange(0, 100, dtype=np.uint64).reshape((10,10));
b = np.fmod.reduce(a1)
print(b)
def test_SquareReduce_UINT64(self):
a1 = np.arange(0, 100, dtype=np.uint64).reshape((10,10));
try :
b = np.square.reduce(a1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_ReciprocalReduce_UINT64(self):
a1 = np.arange(0, 100, dtype=np.uint64).reshape((10,10));
try :
b = np.reciprocal.reduce(a1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_OnesLikeReduce_UINT64(self):
a1 = np.arange(0, 100, dtype=np.uint64).reshape((10,10));
try :
b = np.ones_like.reduce(a1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_SqrtReduce_UINT64(self):
a1 = np.arange(0, 100, dtype=np.uint64).reshape((10,10));
try :
b = np.sqrt.reduce(a1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_NegativeReduce_UINT64(self):
a1 = np.arange(0, 100, dtype=np.uint64).reshape((10,10));
try :
b = np.negative.reduce(a1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_AbsoluteReduce_UINT64(self):
a1 = np.arange(0, 100, dtype=np.uint64).reshape((10,10));
try :
b = np.absolute.reduce(a1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_InvertReduce_UINT64(self):
a1 = np.arange(0, 100, dtype=np.uint64).reshape((10,10));
try :
b = np.invert.reduce(a1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_LeftShiftReduce_UINT64(self):
a1 = np.arange(0, 100, dtype=np.uint64).reshape((10,10));
try :
b = np.left_shift.reduce(a1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_RightShiftReduce_UINT64(self):
a1 = np.arange(0, 100, dtype=np.uint64).reshape((10,10));
try :
b = np.right_shift.reduce(a1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_BitwiseAndReduce_UINT64(self):
a1 = np.arange(0, 100, dtype=np.uint64).reshape((10,10));
try :
b = np.bitwise_and.reduce(a1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_BitwiseOrReduce_UINT64(self):
a1 = np.arange(0, 100, dtype=np.uint64).reshape((10,10));
try :
b = np.bitwise_or.reduce(a1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_BitwiseXorReduce_UINT64(self):
a1 = np.arange(0, 100, dtype=np.uint64).reshape((10,10));
try :
b = np.bitwise_xor.reduce(a1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_LessReduce_UINT64(self):
a1 = np.arange(0, 100, dtype=np.uint64).reshape((10,10));
b = np.less.reduce(a1)
print(b)
def test_LessEqualReduce_UINT64(self):
a1 = np.arange(0, 100, dtype=np.uint64).reshape((10,10));
b = np.less_equal.reduce(a1)
print(b)
def test_EqualReduce_UINT64(self):
a1 = np.arange(0, 100, dtype=np.uint64).reshape((10,10));
b = np.equal.reduce(a1)
print(b)
def test_NotEqualReduce_UINT64(self):
a1 = np.arange(0, 100, dtype=np.uint64).reshape((10,10));
b = np.not_equal.reduce(a1)
print(b)
def test_GreaterReduce_UINT64(self):
a1 = np.arange(0, 100, dtype=np.uint64).reshape((10,10));
b = np.greater.reduce(a1)
print(b)
def test_GreaterEqualReduce_UINT64(self):
a1 = np.arange(0, 100, dtype=np.uint64).reshape((10,10));
b = np.greater_equal.reduce(a1)
print(b)
def test_FloorDivideReduce_UINT64(self):
a1 = np.arange(0, 100, dtype=np.uint64).reshape((10,10));
b = np.floor_divide.reduce(a1)
print(b)
def test_TrueDivideReduce_UINT64(self):
a1 = np.arange(0, 100, dtype=np.uint64).reshape((10,10));
try:
b = np.true_divide.reduce(a1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_LogicalAndReduce_UINT64(self):
a1 = np.arange(0, 100, dtype=np.uint64).reshape((10,10));
b = np.logical_and.reduce(a1)
print(b)
def test_LogicalOrReduce_UINT64(self):
a1 = np.arange(0, 100, dtype=np.uint64).reshape((10,10));
b = np.logical_or.reduce(a1)
print(b)
def test_FloorReduce_UINT64(self):
a1 = np.arange(0, 100, dtype=np.uint64).reshape((10,10));
try :
b = np.floor.reduce(a1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_CeilReduce_UINT64(self):
a1 = np.arange(0, 100, dtype=np.uint64).reshape((10,10));
try :
b = np.ceil.reduce(a1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_MaximumReduce_UINT64(self):
a1 = np.arange(0, 100, dtype=np.uint64).reshape((10,10));
b = np.maximum.reduce(a1)
print(b)
def test_MinimumReduce_UINT64(self):
a1 = np.arange(0, 100, dtype=np.uint64).reshape((10,10));
b = np.minimum.reduce(a1)
print(b)
def test_RintReduce_UINT64(self):
a1 = np.arange(0, 100, dtype=np.uint64).reshape((10,10));
try :
b = np.rint.reduce(a1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_ConjugateReduce_UINT64(self):
a1 = np.arange(0, 100, dtype=np.uint64).reshape((10,10));
try :
b = np.conjugate.reduce(a1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_IsNANReduce_UINT64(self):
a1 = np.arange(0, 100, dtype=np.uint64).reshape((10,10));
try :
b = np.isnan.reduce(a1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_FMaxReduce_UINT64(self):
a1 = np.arange(0, 100, dtype=np.uint64).reshape((10,10));
b = np.fmax.reduce(a1)
print(b)
def test_FMinReduce_UINT64(self):
a1 = np.arange(0, 100, dtype=np.uint64).reshape((10,10));
b = np.fmin.reduce(a1)
print(b)
def test_HeavisideReduce_UINT64(self):
a1 = np.arange(0, 100, dtype=np.uint64).reshape((10,10));
try:
b = np.heaviside.reduce(a1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
#endregion
#region ACCUMULATE Tests
def test_AddAccumulate_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
b = np.add.accumulate(a1)
print(b)
def test_SubtractAccumulate_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
b = np.subtract.accumulate(a1)
print(b)
def test_SubtractAccumulate_UINT32(self):
a1 = np.arange(0, 9, dtype=np.uint32).reshape((3,3));
b = np.subtract.accumulate(a1)
print(b)
def test_MultiplyAccumulate_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
b = np.multiply.accumulate(a1)
print(b)
def test_DivideAccumulate_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
b = np.divide.accumulate(a1)
print(b)
def test_RemainderAccumulate_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
b = np.remainder.accumulate(a1)
print(b)
def test_FModAccumulate_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
b = np.fmod.accumulate(a1)
print(b)
def test_SquareAccumulate_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
try :
b = np.square.accumulate(a1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_ReciprocalAccumulate_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
try :
b = np.reciprocal.accumulate(a1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_OnesLikeAccumulate_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
try :
b = np.ones_like.accumulate(a1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_SqrtAccumulate_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
try :
b = np.sqrt.accumulate(a1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_NegativeAccumulate_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
try :
b = np.negative.accumulate(a1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_AbsoluteAccumulate_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
try :
b = np.absolute.accumulate(a1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_InvertAccumulate_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
try :
b = np.invert.accumulate(a1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_LeftShiftAccumulate_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
try :
b = np.left_shift.accumulate(a1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_RightShiftAccumulate_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
try :
b = np.right_shift.accumulate(a1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_BitwiseAndAccumulate_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
try :
b = np.bitwise_and.accumulate(a1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_BitwiseOrAccumulate_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
try :
b = np.bitwise_or.accumulate(a1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_BitwiseXorAccumulate_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
try :
b = np.bitwise_xor.accumulate(a1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_LessAccumulate_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
b = np.less.accumulate(a1)
print(b)
def test_LessEqualAccumulate_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
b = np.less_equal.accumulate(a1)
print(b)
def test_EqualAccumulate_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
b = np.equal.accumulate(a1)
print(b)
def test_NotEqualAccumulate_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
b = np.not_equal.accumulate(a1)
print(b)
def test_GreaterAccumulate_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
b = np.greater.accumulate(a1)
print(b)
def test_GreaterEqualAccumulate_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
b = np.greater_equal.accumulate(a1)
print(b)
def test_FloorDivideAccumulate_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
b = np.floor_divide.accumulate(a1)
print(b)
def test_TrueDivideAccumulate_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
b = np.true_divide.accumulate(a1)
print(b)
def test_LogicalAndAccumulate_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
b = np.logical_and.accumulate(a1)
print(b)
def test_LogicalOrAccumulate_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
b = np.logical_or.accumulate(a1)
print(b)
def test_FloorAccumulate_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
try :
b = np.floor.accumulate(a1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_CeilAccumulate_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
try :
b = np.ceil.accumulate(a1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_MaximumAccumulate_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
b = np.maximum.accumulate(a1)
print(b)
def test_MinimumAccumulate_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
b = np.minimum.accumulate(a1)
print(b)
def test_RintAccumulate_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
try :
b = np.rint.accumulate(a1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_ConjugateAccumulate_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
try :
b = np.conjugate.accumulate(a1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_IsNANAccumulate_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
try :
b = np.isnan.accumulate(a1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_FMaxAccumulate_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
b = np.fmax.accumulate(a1)
print(b)
def test_FMinAccumulate_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
b = np.fmin.accumulate(a1)
print(b)
def test_HeavisideAccumulate_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
b = np.heaviside.accumulate(a1)
print(b)
#endregion
#region REDUCEAT UINT64 Tests
def test_AddReduceAt_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
b = np.add.reduceat(a1, [0, 2], axis = 1)
print(b)
def test_SubtractReduceAt_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
b = np.subtract.reduceat(a1, [0, 2], axis = 1)
print(b)
def test_SubtractReduceAt_UINT32(self):
a1 = np.arange(0, 9, dtype=np.uint32).reshape((3,3));
b = np.subtract.reduceat(a1, [0, 2], axis = 1)
print(b)
def test_MultiplyReduceAt_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
b = np.multiply.reduceat(a1, [0, 2], axis = 1)
print(b)
def test_DivideReduceAt_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
b = np.divide.reduceat(a1, [0, 2], axis = 1)
print(b)
def test_RemainderReduceAt_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
b = np.remainder.reduceat(a1, [0, 2], axis = 1)
print(b)
def test_FModReduceAt_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
b = np.fmod.reduceat(a1, [0, 2], axis = 1)
print(b)
def test_SquareReduceAt_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
try :
b = np.square.reduceat(a1, [0, 2], axis = 1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_ReciprocalReduceAt_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
try :
b = np.reciprocal.reduceat(a1, [0, 2], axis = 1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_OnesLikeReduceAt_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
try :
b = np.ones_like.reduceat(a1, [0, 2], axis = 1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_SqrtReduceAt_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
try :
b = np.sqrt.reduceat(a1, [0, 2], axis = 1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_NegativeReduceAt_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
try :
b = np.negative.reduceat(a1, [0, 2], axis = 1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_AbsoluteReduceAt_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
try :
b = np.absolute.reduceat(a1, [0, 2], axis = 1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_InvertReduceAt_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
try :
b = np.invert.reduceat(a1, [0, 2], axis = 1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_LeftShiftReduceAt_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
try :
b = np.left_shift.reduceat(a1, [0, 2], axis = 1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_RightShiftReduceAt_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
try :
b = np.right_shift.reduceat(a1, [0, 2], axis = 1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_BitwiseAndReduceAt_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
try :
b = np.bitwise_and.reduceat(a1, [0, 2], axis = 1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_BitwiseOrReduceAt_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
try :
b = np.bitwise_or.reduceat(a1, [0, 2], axis = 1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_BitwiseXorReduceAt_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
try :
b = np.bitwise_xor.reduceat(a1, [0, 2], axis = 1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_LessReduceAt_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
b = np.less.reduceat(a1, [0, 2], axis = 1)
print(b)
def test_LessEqualReduceAt_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
b = np.less_equal.reduceat(a1, [0, 2], axis = 1)
print(b)
def test_EqualReduceAt_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
b = np.equal.reduceat(a1, [0, 2], axis = 1)
print(b)
def test_NotEqualReduceAt_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
b = np.not_equal.reduceat(a1, [0, 2], axis = 1)
print(b)
def test_GreaterReduceAt_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
b = np.greater.reduceat(a1, [0, 2], axis = 1)
print(b)
def test_GreaterEqualReduceAt_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
b = np.greater_equal.reduceat(a1, [0, 2], axis = 1)
print(b)
def test_FloorDivideReduceAt_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
b = np.floor_divide.reduceat(a1, [0, 2], axis = 1)
print(b)
def test_TrueDivideReduceAt_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
b = np.true_divide.reduceat(a1, [0, 2], axis = 1)
print(b)
def test_LogicalAndReduceAt_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
b = np.logical_and.reduceat(a1, [0, 2], axis = 1)
print(b)
def test_LogicalOrReduceAt_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
b = np.logical_or.reduceat(a1, [0, 2], axis = 1)
print(b)
def test_FloorReduceAt_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
try :
b = np.floor.reduceat(a1, [0, 2], axis = 1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_CeilReduceAt_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
try :
b = np.ceil.reduceat(a1, [0, 2], axis = 1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_MaximumReduceAt_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
b = np.maximum.reduceat(a1, [0, 2], axis = 1)
print(b)
def test_MinimumReduceAt_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
b = np.minimum.reduceat(a1, [0, 2], axis = 1)
print(b)
def test_RintReduceAt_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
try :
b = np.rint.reduceat(a1, [0, 2], axis = 1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_ConjugateReduceAt_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
try :
b = np.conjugate.reduceat(a1, [0, 2], axis = 1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_IsNANReduceAt_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
try :
b = np.isnan.reduceat(a1, [0, 2], axis = 1)
print(b)
self.fail("should have thrown exception")
except:
print("Exception occured")
def test_FMaxReduceAt_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
b = np.fmax.reduceat(a1, [0, 2], axis = 1)
print(b)
def test_FMinReduceAt_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
b = np.fmin.reduceat(a1, [0, 2], axis = 1)
print(b)
def test_HeavisideReduceAt_UINT64(self):
a1 = np.arange(0, 9, dtype=np.uint64).reshape((3,3));
b = np.heaviside.reduceat(a1, [0, 2], axis = 1)
print(b)
#endregion
#endregion
if __name__ == '__main__':
unittest.main()
| 25.682529 | 73 | 0.541941 | 37,693 | 0.997724 | 0 | 0 | 0 | 0 | 0 | 0 | 3,609 | 0.095529 |
59f29643b40421242d0bea0de29a9058ef247935 | 49 | py | Python | pis_client/__init__.py | ignertic/pis_client | 12dd56b6801b53388cb46199a27a0a7d3d214523 | [
"MIT"
] | null | null | null | pis_client/__init__.py | ignertic/pis_client | 12dd56b6801b53388cb46199a27a0a7d3d214523 | [
"MIT"
] | null | null | null | pis_client/__init__.py | ignertic/pis_client | 12dd56b6801b53388cb46199a27a0a7d3d214523 | [
"MIT"
] | null | null | null | from .model import Client
__version__= "0.0.1"
| 9.8 | 25 | 0.714286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 0.142857 |
59f56393703c13188954083843d72db2967ca6cd | 607 | py | Python | pychain/node_discovery.py | tylermzeller/pychain | 3cbf49a762cacb9d4698a83a9f5851c601e57bd0 | [
"MIT"
] | null | null | null | pychain/node_discovery.py | tylermzeller/pychain | 3cbf49a762cacb9d4698a83a9f5851c601e57bd0 | [
"MIT"
] | 10 | 2018-06-21T01:15:14.000Z | 2018-07-30T01:02:35.000Z | pychain/node_discovery.py | tylermzeller/pychain | 3cbf49a762cacb9d4698a83a9f5851c601e57bd0 | [
"MIT"
] | null | null | null | '''
"Node discovery". *Hack*
This implementation of p2p node discovery takes
advantage of docker and information sharing through
environment variables. REAL discovery should probably happen
through DNS.
'''
# returns a list of node addresses
def discoverNodes():
import os
import random
numNodes, serviceName = int(os.environ['NUMNODES']), os.environ['SERVICENAME']
if numNodes is None or serviceName is None:
print("Node discovery could not be performed.")
return []
return list(map(lambda i: serviceName + '_' + str(i), list(range(1, numNodes + 1))))
| 33.722222 | 89 | 0.693575 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 313 | 0.515651 |
59f5b6b59a824290eeb563ee136c31e98362ac02 | 5,483 | py | Python | igen/mock_cmd.py | tuan188/MGiGen | 8aa505255b95173adbebe7d1e90f83269bd3343f | [
"MIT"
] | 15 | 2019-04-04T00:57:58.000Z | 2022-02-09T02:35:48.000Z | igen/mock_cmd.py | tuan188/MGiGen | 8aa505255b95173adbebe7d1e90f83269bd3343f | [
"MIT"
] | null | null | null | igen/mock_cmd.py | tuan188/MGiGen | 8aa505255b95173adbebe7d1e90f83269bd3343f | [
"MIT"
] | 8 | 2019-05-17T02:39:13.000Z | 2021-12-01T07:17:24.000Z | # coding=utf-8
import re
from jinja2 import Environment, PackageLoader
from .pb import pasteboard_write
from .command import Command
from .constants import SWIFT_TYPES_DEFAULT_VALUES, SWIFT_TYPES
from .str_helpers import upper_first_letter
class MockCommand(Command):
def __init__(self, protocol_text):
super(MockCommand, self).__init__()
self.protocol_text = protocol_text
def create_mock(self, print_result):
output = Mock(self.protocol_text).create_mock()
if print_result:
print()
print(output)
print()
pasteboard_write(output)
print('The result has been copied to the pasteboard.')
class Mock(object):
class Function(object):
def __init__(self, origin, name, params, return_type):
super(Mock.Function, self).__init__()
self.origin = origin
self.name = name
self.params = list(filter(None, params.split(',')))
self.return_type = return_type
self.is_overloaded = False
def __str__(self):
return self.origin
@property
def return_value(self):
if self.return_type is None:
return_value = '()'
elif self.return_type.endswith('?'):
return_value = 'nil'
elif self.return_type.startswith('Driver'):
regex = re.compile('Driver<(.+)>')
mo = regex.search(self.return_type)
observable_type = mo.group(1)
if observable_type in SWIFT_TYPES:
return_value = 'Driver.just({})'.format(
SWIFT_TYPES_DEFAULT_VALUES[observable_type]
)
else:
return_value = 'Driver<{}>.empty()'.format(observable_type)
elif self.return_type.startswith('Observable'):
regex = re.compile('Observable<(.+)>')
mo = regex.search(self.return_type)
observable_type = mo.group(1)
if observable_type in SWIFT_TYPES:
return_value = 'Observable.just({})'.format(
SWIFT_TYPES_DEFAULT_VALUES[observable_type]
)
else:
return_value = 'Observable<{}>.empty()'.format(
observable_type
)
elif self.return_type in SWIFT_TYPES:
return_value = SWIFT_TYPES_DEFAULT_VALUES[self.return_type]
else:
return_value = '{}()'.format(self.return_type)
return return_value
@property
def return_void(self):
return self.return_type is None
@property
def return_nil(self):
return self.return_value == 'nil'
@property
def first_param(self):
if self.params:
param = self.params[0]
param_name = param.split(':')[0].split(' ')
return param_name[0] \
+ "".join(x.title() for x in param_name[1:])
return None
@property
def first_param_title(self):
if self.first_param is not None:
return upper_first_letter(self.first_param)
return None
@property
def overloaded_name(self):
if self.is_overloaded and (self.first_param_title is not None):
return self.name + self.first_param_title
return self.name
def __init__(self, protocol_text):
super(Mock, self).__init__()
self.protocol_text = protocol_text
def _get_protocol_name(self, str):
regex = re.compile(r'protocol (\w+)')
mo = regex.search(str)
protocol_name = mo.group(1)
if protocol_name.endswith('Type'):
class_name = protocol_name[:-4]
elif protocol_name.endswith('Protocol'):
class_name = protocol_name[:-8]
else:
class_name = protocol_name
return (protocol_name, class_name)
def create_mock(self):
str = self.protocol_text
is_protocol = False
protocol_name = ''
class_name = ''
# get protocol name
try:
(protocol_name, class_name) = self._get_protocol_name(str)
is_protocol = True
except Exception:
pass
# get functions
func_regex = re.compile(r'func (\w+)\((.*)\)( -> (.*))?')
funcs = [Mock.Function(f.group(), f.group(1), f.group(2), f.group(4))
for f in func_regex.finditer(str)]
if not funcs:
print('The protocol or functions in the pasteboard is invalid.')
exit(1)
# check if overloaded
func_dict = {}
for f in funcs:
if f.name in func_dict:
func_dict[f.name] = func_dict[f.name] + 1
else:
func_dict[f.name] = 1
for f in funcs:
f.is_overloaded = func_dict[f.name] > 1
env = Environment(
loader=PackageLoader('igen_templates', 'commands'),
trim_blocks=True,
lstrip_blocks=True
)
template = env.get_template("Mock.swift")
content = template.render(
class_name=class_name,
protocol_name=protocol_name,
functions=funcs,
is_protocol=is_protocol
)
return content
| 33.845679 | 79 | 0.552435 | 5,236 | 0.954952 | 0 | 0 | 2,369 | 0.432063 | 0 | 0 | 448 | 0.081707 |
59f8c3c0d266d9fae2fbc9c6180b27f2870c2fce | 2,063 | py | Python | mmstructlib/tools/clashes.py | academicRobot/mmstructlib | 76949620c9e9ca26faf10ff1a21c6fda1a564f5c | [
"MIT"
] | null | null | null | mmstructlib/tools/clashes.py | academicRobot/mmstructlib | 76949620c9e9ca26faf10ff1a21c6fda1a564f5c | [
"MIT"
] | null | null | null | mmstructlib/tools/clashes.py | academicRobot/mmstructlib | 76949620c9e9ca26faf10ff1a21c6fda1a564f5c | [
"MIT"
] | null | null | null | from mmstructlib.tools.gridhash import GridHash
import numpy as np
from scipy.spatial import distance
def clash_val(atom1, atom2, radii_attr='radius'):
return getattr(atom1, radii_attr) + getattr(atom2, radii_attr) \
- distance.euclidean(atom1.coordinates, atom2.coordinates)
def clash_iter(atoms, min_clash=0.4, radii_attr='radius'):
"""
Returns an iterator over pairs:
-clash value
-the pair of clashing atoms
min_clash.
"""
max_radii = max([getattr(a, radii_attr) for a in atoms])
gridhash = GridHash([(a.coordinates, a) for a in atoms], max_radii*2 - min_clash)
ret = (
(clash_val(a1, a2, radii_attr), (a1, a2))
for (c1, a1), (c2, a2) in gridhash.all_neighbors_iter()
)
return filter(lambda x: x[0] >= min_clash, ret)
def clash_report(atoms, min_clash, radii_attr='radius'):
num_atoms = len(atoms)
max_atom_cv = {a : 0 for a in atoms}
num_clash = 0
for cv, (a1, a2) in clash_iter(atoms, min_clash, radii_attr):
max_atom_cv[a1] = max(cv, max_atom_cv[a1])
max_atom_cv[a2] = max(cv, max_atom_cv[a2])
num_clash += 1
max_atom_cv_v = list(max_atom_cv.values())
return {
"clashes_per_atom" : num_clash / num_atoms,
"median_max_clash_value" : np.median(max_atom_cv_v),
"p90_max_clash_value": np.percentile(max_atom_cv_v, 90),
"p95_max_clash_value": np.percentile(max_atom_cv_v, 95),
"p99_max_clash_value": np.percentile(max_atom_cv_v, 99)
}
if __name__ == "__main__":
import sys
from mmstructlib.IO import load_cif_from_mirror
from mmstructlib.radii import add_radii
from mmstructlib.tools.preprocess import strip_hydrogen, strip_water
if len(sys.argv) != 2:
exit("usage: {} <struct id>".format(sys.argv[0]))
struct = load_cif_from_mirror(sys.argv[1])
strip_water(struct)
strip_hydrogen(struct)
add_radii(struct[0])
res = clash_report(struct[0].atoms, 2.5)
for field, value in res.items():
print("{}: {}".format(field, value)) | 34.966102 | 85 | 0.66699 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 289 | 0.140087 |
59fa6071a6de1608fbc8ebf03f1204cc8f58b9fb | 2,656 | py | Python | tube/settings.py | ohsu-comp-bio/tube | da21ac21448dc50924a01db3b9c48b7b37a60449 | [
"Apache-2.0"
] | null | null | null | tube/settings.py | ohsu-comp-bio/tube | da21ac21448dc50924a01db3b9c48b7b37a60449 | [
"Apache-2.0"
] | null | null | null | tube/settings.py | ohsu-comp-bio/tube | da21ac21448dc50924a01db3b9c48b7b37a60449 | [
"Apache-2.0"
] | null | null | null | import os
from cdislogging import get_logger
from tube.config_helper import *
from utils.general import get_resource_paths_from_yaml
logger = get_logger(__name__)
LIST_TABLES_FILES = 'tables.txt'
#
# Load db credentials from a creds.json file.
# See config_helper.py for paths searched for creds.json
# ex: export XDG_DATA_HOME="$HOME/.local/share"
# and setup $XDG_DATA_HOME/.local/share/gen3/tube/creds.json
#
conf_data = load_json('creds.json', 'tube')
DB_HOST = conf_data.get('db_host', 'localhost')
DB_PORT = conf_data.get('db_port', '5432')
DB_DATABASE = conf_data.get('db_database', 'gdcdb')
DB_USERNAME = conf_data.get('db_username', 'peregrine')
DB_PASSWORD = conf_data.get('db_password', 'unknown')
JDBC = 'jdbc:postgresql://{}:{}/{}'.format(DB_HOST, DB_PORT, DB_DATABASE)
PYDBC = 'postgresql://{}:{}@{}:{}/{}'.format(DB_USERNAME, DB_PASSWORD, DB_HOST, DB_PORT, DB_DATABASE)
DICTIONARY_URL = os.getenv('DICTIONARY_URL', 'https://s3.amazonaws.com/dictionary-artifacts/datadictionary/develop/schema.json')
ES_URL = os.getenv("ES_URL", "esproxy-service")
HDFS_DIR = '/result'
# Three modes: Test, Dev, Prod
RUNNING_MODE = os.getenv('RUNNING_MODE', 'Dev') # 'Prod' or 'Dev'
PARALLEL_JOBS = 1
ES = {
"es.nodes": ES_URL,
"es.port": '9200',
"es.input.json": 'yes',
"es.nodes.client.only": 'false',
"es.nodes.discovery": 'false',
"es.nodes.data.only": 'false',
"es.nodes.wan.only": 'true'
}
HADOOP_HOME = os.getenv('HADOOP_HOME', '/usr/local/Cellar/hadoop/3.1.0/libexec/')
JAVA_HOME = os.getenv('JAVA_HOME', '/Library/Java/JavaVirtualMachines/jdk1.8.0_131.jdk/Contents/Home')
HADOOP_URL = os.getenv('HADOOP_URL', 'http://spark-service:9000')
ES_HADOOP_VERSION = os.getenv("ES_HADOOP_VERSION", "")
ES_HADOOP_HOME_BIN = '{}/elasticsearch-hadoop-{}'.format(os.getenv("ES_HADOOP_HOME", ""), os.getenv("ES_HADOOP_VERSION", ""))
HADOOP_HOST = os.getenv("HADOOP_HOST", "spark-service")
# Searches same folders as load_json above
MAPPING_FILE = find_paths("etlMapping.yaml", 'tube')[0]
try:
USERYAML_FILE = find_paths("user.yaml", 'tube')[0]
except IndexError:
USERYAML_FILE = None
PROJECT_TO_RESOURCE_PATH = get_resource_paths_from_yaml(USERYAML_FILE)
SPARK_MASTER = os.getenv('SPARK_MASTER', 'local[1]') # 'spark-service'
SPARK_EXECUTOR_MEMORY = os.getenv("SPARK_EXECUTOR_MEMORY", "2g")
SPARK_DRIVER_MEMORY = os.getenv("SPARK_DRIVER_MEMORY", "512m")
APP_NAME = 'Gen3 ETL'
os.environ['PYSPARK_SUBMIT_ARGS'] = \
'--jars {}/dist/elasticsearch-spark-20_2.11-{}.jar pyspark-shell' \
.format(ES_HADOOP_HOME_BIN, ES_HADOOP_VERSION)
os.environ['HADOOP_CLIENT_OPTS'] = os.getenv('HADOOP_CLIENT_OPTS', '')
| 39.058824 | 128 | 0.726657 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,353 | 0.509413 |
59fdf5e051bfb98a155a9392dc0c9ca866ade75a | 5,234 | py | Python | t2t_bert/chid_nlpcc2019/model_batch_infer.py | yyht/bert | 480c909e0835a455606e829310ff949c9dd23549 | [
"Apache-2.0"
] | 34 | 2018-12-19T01:00:57.000Z | 2021-03-26T09:36:37.000Z | t2t_bert/chid_nlpcc2019/model_batch_infer.py | yyht/bert | 480c909e0835a455606e829310ff949c9dd23549 | [
"Apache-2.0"
] | 11 | 2018-12-25T03:37:59.000Z | 2021-08-25T14:43:58.000Z | t2t_bert/chid_nlpcc2019/model_batch_infer.py | yyht/bert | 480c909e0835a455606e829310ff949c9dd23549 | [
"Apache-2.0"
] | 9 | 2018-12-27T08:00:44.000Z | 2020-06-08T03:05:14.000Z | from functools import reduce
import numpy as np
import json
import tensorflow as tf
from scipy.optimize import linear_sum_assignment
import os
import time
def deleteDuplicate_v1(input_dict_lst):
f = lambda x,y:x if y in x else x + [y]
return reduce(f, [[], ] + input_dict_lst)
def get_context_pair(resp, l):
label_weights = l['label_weights']
valid_resp = {}
for key in resp:
valid_resp[key] = []
for index, value in enumerate(resp[key]):
if label_weights[index] == 1:
valid_resp[key].append(value)
answer = l['answer_tokens']
position_tokens = l['tokens']
label_position = [lpos-1 for index, lpos in enumerate(l['label_positions']) if label_weights[index]==1]
score_label = []
for index in range(len(valid_resp['pred_label'])):
label = valid_resp['pred_label'][index]
score = valid_resp['max_prob'][index]
position = label_position[index]
position_token = position_tokens[str(position)][1]
if label == 1:
score = 1 - score
score_label.append({"score":score, "label":label,
"position_token":position_token,
"answer":answer})
return score_label
def format_socre_matrix(result_lst, score_merge='mean'):
answer_dict = {}
candidate_dict = {}
answer_index = 0
pos_index = 0
for item in result_lst:
if item['answer'] not in answer_dict:
answer_dict[item['answer']] = answer_index
answer_index += 1
if item['position_token'] not in candidate_dict:
candidate_dict[item['position_token']] = pos_index
pos_index += 1
score_matrix = -np.ones((len(answer_dict), len(candidate_dict)))
for item in result_lst:
answer_pos = answer_dict[item['answer']]
candidate_pos = candidate_dict[item['position_token']]
score_matrix_score = score_matrix[answer_pos, candidate_pos]
if score_matrix_score == -1:
score_matrix[answer_pos, candidate_pos] = item['score']
else:
if score_merge == 'mean':
score_matrix[answer_pos, candidate_pos] += item['score']
score_matrix[answer_pos, candidate_pos] /= 2
elif score_merge == 'max':
if item['score'] > score_matrix[answer_pos, candidate_pos]:
score_matrix[answer_pos, candidate_pos] = item['score']
return score_matrix, answer_dict, candidate_dict
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.logging.set_verbosity(tf.logging.INFO)
flags.DEFINE_string("buckets", "", "oss buckets")
flags.DEFINE_string(
"input_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"output_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"model_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"score_merge", "max",
"Input TF example files (can be a glob or comma separated).")
input_file = os.path.join(FLAGS.buckets, FLAGS.input_file)
output_file = os.path.join(FLAGS.buckets, FLAGS.output_file)
model_file = os.path.join(FLAGS.buckets, FLAGS.model_file)
from tensorflow.contrib import predictor
# model_dict = {
# "model":'/data/xuht/albert.xht/nlpcc2019/open_data/model/1566283032'
# }
model_dict = {
"model":model_file
}
chid_model = predictor.from_saved_model(model_dict['model'])
fwobj = tf.gfile.Open(output_file, "w")
cnt = 0
valid_keys = ['input_ids', 'label_weights',
'label_positions', 'label_ids',
'segment_ids']
with tf.gfile.Open(input_file, "r") as f:
for index, line in enumerate(f):
content = json.loads(line.strip())
total_resp = []
start = time.time()
for t in content:
tmp = {}
for l in t:
for key in valid_keys:
if key in tmp:
tmp[key].append(l[key])
else:
tmp[key] = [l[key]]
# tmp = {
# "input_ids":np.array([l['input_ids']]),
# 'label_weights':np.array([l['label_weights']]),
# 'label_positions':np.array([l['label_positions']]),
# 'label_ids':np.array([l['label_ids']]),
# 'segment_ids':np.array([l['segment_ids']]),
# }
resp = chid_model(tmp)
resp_lst = []
batch_size = int(resp['pred_label'].shape[0]/5)
for key in resp:
resp[key] = np.reshape(resp[key], [-1, 5]).tolist()
for i_index in range(batch_size):
tmp = {
"pred_label":resp['pred_label'][i_index],
"max_prob":resp['max_prob'][i_index],
}
resp_lst.append(tmp)
for i_index in range(len(t)):
resp_ = resp_lst[i_index]
l_ = t[i_index]
result = get_context_pair(resp_, l_)
total_resp.extend(result)
total_resp = deleteDuplicate_v1(total_resp)
resp = format_socre_matrix(total_resp, score_merge=FLAGS.score_merge)
row_ind, col_ind = linear_sum_assignment(resp[0])
mapping_dict = dict(zip(col_ind, row_ind))
dura = time.time()-start
candidte_dict = resp[-1]
candidate_inverse_dict = {}
for key in candidte_dict:
candidate_inverse_dict[candidte_dict[key]] = key
candidate_name_dict = {}
for col in mapping_dict:
col_name = candidate_inverse_dict[col]
candidate_name_dict[col_name] = int(mapping_dict[col])
cnt += len(candidate_name_dict)
if np.mod(index, 100) == 0:
print(candidate_name_dict, index, dura)
fwobj.write(json.dumps(candidate_name_dict, ensure_ascii=False)+"\n")
fwobj.close()
print('==total cnt==', cnt)
| 29.908571 | 104 | 0.700038 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,106 | 0.211311 |
59fe80b466cc0482b71f0268fc89c32034df5ce8 | 10,255 | py | Python | laba/user.py | Snake-Whisper/laba | 78db605d594c2a2c2c3245c00269f08303c6b7ea | [
"MIT"
] | null | null | null | laba/user.py | Snake-Whisper/laba | 78db605d594c2a2c2c3245c00269f08303c6b7ea | [
"MIT"
] | null | null | null | laba/user.py | Snake-Whisper/laba | 78db605d594c2a2c2c3245c00269f08303c6b7ea | [
"MIT"
] | null | null | null | from flask import g, session
import pymysql
import redis
import random
import string
from json import loads, dumps
from exceptions.userException import *
from hashlib import sha256
class User():
__changed = {}
_values = {}
__loggedIn = True
__initialized = False
__health = False
def __init__(self):
raise NotInitializeable("User")
def _init(self, app):
"""User Object"""
self.app = app
if not hasattr(g, 'db'):
g.db = pymysql.connect(user=app.config["DB_USER"], db=app.config["DB_DB"], password=app.config["DB_PWD"], host=app.config["DB_HOST"], cursorclass=pymysql.cursors.DictCursor)
self.cursor = g.db.cursor()
if not hasattr(g, 'redis'):
g.redis = redis.Redis(host=app.config["REDIS_HOST"], port=app.config["REDIS_PORT"], db=app.config["REDIS_DB"])
self.__initialized = True
# |\_/|
# | @ @ Watch!
# | <> _
# | _/\------____ ((| |))
# | `--' |
# ____|_ ___| |___.'
# /_/_____/____/_______|
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def query(self, query, param = ()):
self.cursor.execute(query, param)
return self.cursor.fetchall()
def queryOne(self, query, param = ()):
self.cursor.execute(query, param)
return self.cursor.fetchone()
def recover(self):
"""Call to prevent pymysql Interface error after recovering from session cache"""
if not hasattr(g, 'db'):
g.db = pymysql.connect(user=self.app.config["DB_USER"], db=self.app.config["DB_DB"], password=self.app.config["DB_PWD"], host=self.app.config["DB_HOST"], cursorclass=pymysql.cursors.DictCursor)
self.cursor = g.db.cursor()
if not hasattr(g, 'redis'):
g.redis = redis.Redis(host=self.app.config["REDIS_HOST"], port=self.app.config["REDIS_PORT"], db=self.app.config["REDIS_DB"])
@property
def wsuuid(self):
return g.redis.get(self._values["username"])
@wsuuid.setter
def wsuuid(self, wsuuid):
g.redis.set(self._values["username"], wsuuid, self.app.config["AUTO_LOGOUT"])
@wsuuid.deleter
def wsuuid(self):
g.redis.delete(self._values["username"])
@property
def id(self):
return self._values["id"]
@property
def uuid(self):
return self.__uuid
@property
def health(self):
return self.__health
@property
def username(self):
return self._values["username"]
@username.setter
def username(self, value):
if self._values["username"] != value:
self._values["username"] = value
self.__changed['username'] = value
@property
def firstName(self):
return self._values["firstName"]
@firstName.setter
def firstName(self, value):
if self._values["firstName"] != value:
self._values["firstName"] = value
self.__changed['firstName'] = value
@property
def lastName(self):
return self._values["lastName"]
@lastName.setter
def lastName(self, value):
if self._values["lastName"] != value:
self._values["lastName"] = value
self.__changed['lastName'] = value
@property
def email(self):
return self._values["email"]
@email.setter
def email(self, value):
if self._values["email"] != value:
self._values["email"] = value
self.__changed['email'] = value
@property
def ctime(self):
return self._values["ctime"]
@ctime.setter
def ctime(self, value):
if self._values["ctime"] != value:
self._values["ctime"] = value
self.__changed['ctime'] = value
@property
def atime(self):
return self._values["atime"]
@atime.setter
def atime(self, value):
if self._values["atime"] != value:
self._values["atime"] = value
self.__changed['atime'] = value
@property
def status(self):
return self._values["status"]
@status.setter
def status(self, value):
if self._values["status"] != value:
self._values["status"] = value
self.__changed['status'] = value
@property
def icon(self):
return self._values["icon"]
@icon.setter
def icon(self, value):
if self._values["icon"] != value:
self._values["icon"] = value
self.__changed['icon'] = value
def changePwd (self, old, new):
r = self.cursor.execute("UPDATE users SET password=SHA2(%s, 256) WHERE id=%s AND password=SHA2(%s, 256);", (new, self.__id, old))
if not r:
raise BadUserCredentials(self.__username)
def commit2db(self):
if self.__changed:
sql="UPDATE users SET {0} WHERE users.id = {1}".format(", ".join(i+"=%s" for i in self.__changed.keys()), self._values["id"])
self.query(sql, tuple(self.__changed.values()))
def __serialize(self):
self._values['atime'] = str(self._values['atime']) #Keep private! It's changing self.__value!!!
self._values['ctime'] = str(self._values['ctime'])
return dumps(self._values)
def commit2redis(self):
g.redis.set(self._uuid, self.__serialize(), self.app.config["AUTO_LOGOUT"])
def logOut(self):
self.__loggedIn = False
g.redis.delete(session["uuid"])
session.pop("uuid")
def startSession(self):
self.__health = True
def __del__(self):
if self.__initialized and self.__health:
self.commit2db()
self.cursor.close()
g.db.commit()
if self.__loggedIn:
self.commit2redis()
class LoginUser(User):
def __init__(self, app, username, password):
"""Checks User cred and logs in + moves to redis if ready"""
User._init(self, app)
self._values = self.queryOne("""SELECT
id, username, firstName, lastName, email, ctime, atime, status, icon, enabled
FROM users
WHERE
(username = %s or email = %s)
AND
password = SHA2(%s, 256)""", (username, username, password))
if not self._values:
raise BadUserCredentials(username)
if not self._values["enabled"]:
raise UserDisabled(username)
self.startSession()
self._uuid = ''.join([random.choice(string.ascii_letters + string.digits) for n in range(32)])
session['uuid'] = self._uuid
class RedisUser(User):
def __init__(self, app):
if not 'uuid' in session:
raise UserNotInitialized()
User._init(self, app)
self._uuid = session["uuid"]
vals = g.redis.get(session['uuid'])
if not vals:
session.pop("uuid")
raise UserNotInitialized()
self.startSession()
self._values = loads(vals)
class RegisterUser():
_values = {}
def __init__(self, app):
self.app = app
assert not 'uuid' in session
if not hasattr(g, 'db'):
g.db = pymysql.connect(user=app.config["DB_USER"], db=app.config["DB_DB"], password=app.config["DB_PWD"], host=app.config["DB_HOST"], cursorclass=pymysql.cursors.DictCursor)
self.cursor = g.db.cursor()
if not hasattr(g, 'redis'):
g.redis = redis.Redis(host=app.config["REDIS_HOST"], port=app.config["REDIS_PORT"], db=app.config["REDIS_DB"])
def query(self, query, param = ()):
self.cursor.execute(query, param)
return self.cursor.fetchall()
def queryOne(self, query, param = ()):
self.cursor.execute(query, param)
return self.cursor.fetchone()
@property
def username(self):
return self._values["username"]
@username.setter
def username(self, value):
if self.queryOne("SELECT id FROM users WHERE username=%s", value):
raise RegistrationErrorDupplicate("username")
self._values["username"] = value
@property
def email(self):
return self._values["email"]
@email.setter
def email(self, value):
if self.queryOne("SELECT id FROM users WHERE email=%s", value):
raise RegistrationErrorDupplicate("email")
self._values["email"] = value
@property
def firstName(self):
return self._values["firstName"]
@firstName.setter
def firstName(self, value):
self._values["firstName"] = value
@property
def lastName(self):
return self._values["lastName"]
@lastName.setter
def lastName(self, value):
self._values["lastName"] = value
@property
def password(self):
return self._values["password"]
@password.setter
def password(self, val):
self._values["password"] = sha256(val.encode()).hexdigest()
def commit2redis(self):
if not all(k in self._values for k in ["email", "password", "username", "firstName", "lastName"]):
for i in ["email", "password", "username", "firstName", "lastName"]:
if i not in self._values:
raise RegistrationErrorInfoMissing(i)
token = ''.join([random.choice(string.ascii_letters + string.digits) for n in range(32)])
g.redis.set(token, dumps(self._values), self.app.config["TOKEN_TIMEOUT"])
return token
def confirmToken(self, token):
vals = loads(g.redis.get(token))
if not vals:
raise InvalidToken(token)
g.redis.delete(token)
#WARNING: No check for dupl entry -> time from registerRequest to confirmation: unprotected ~ Problem?
#Without Exception Handling in Prod. env.: YES -> apk BBQ
try:
self.query("INSERT INTO users (email, password, username, firstname, lastname) VALUES (%s, %s, %s, %s, %s)", (
vals["email"],
vals["password"],
vals["username"],
vals["firstName"],
vals["lastName"]))
except pymysql.IntegrityError:
raise RegistrationErrorDupplicate("email / username")
| 33.622951 | 205 | 0.583715 | 10,055 | 0.980497 | 0 | 0 | 3,531 | 0.34432 | 0 | 0 | 2,107 | 0.205461 |
59fe8cddb1924c234b109f3ff5d5d0ddb22f9abc | 1,697 | py | Python | iotbot/config.py | li7yue/python--iotbot | ca721b795114202114a4eb355d20f9ecfd9b8901 | [
"MIT"
] | 1 | 2020-10-05T01:09:15.000Z | 2020-10-05T01:09:15.000Z | iotbot/config.py | li7yue/python--iotbot | ca721b795114202114a4eb355d20f9ecfd9b8901 | [
"MIT"
] | null | null | null | iotbot/config.py | li7yue/python--iotbot | ca721b795114202114a4eb355d20f9ecfd9b8901 | [
"MIT"
] | null | null | null | from typing import List
from .exceptions import InvalidConfigError
from .utils import check_schema
try:
import ujson as json
except Exception:
import json
class _config:
def __init__(self, c: dict) -> None:
# 与iotbot 对应的配置, 不存在只能为None
# ip
host = c.get('host')
if host:
self.host = check_schema(str(host))
else:
self.host = None
# port
try:
self.port = int(c.get('port'))
except Exception:
self.port = None
# 群黑名单
self.group_blacklist: List[int] = c.get('group_blacklist')
# 好友黑名单
self.friend_blacklist: List[int] = c.get('friend_blacklist')
# webhook 相关配置
# 开关
self.webhook = bool(c.get('webhook'))
# 推送地址
webhook_post_url = c.get('webhook_post_url')
if webhook_post_url:
self.webhook_post_url = check_schema(str(webhook_post_url))
else:
self.webhook_post_url = None
# 推送等待延时
try:
self.webhook_timeout = int(c.get('webhook_timeout'))
except Exception:
self.webhook_timeout = 10
_config_dict = {}
try:
with open('./.iotbot.json', encoding='utf-8') as f:
_config_dict = json.load(f)
except FileNotFoundError:
pass
except json.JSONDecodeError as e:
raise InvalidConfigError('配置文件不规范') from e
config = _config(_config_dict)
# print('=====config=====')
# print('port: ', config.port)
# print('host: ', config.host)
# print('webhook: ', config.webhook)
# print('webhook_post_url: ', config.webhook_post_url)
# print('webhook_timeout: ', config.webhook_timeout)
# print('================')
| 26.107692 | 71 | 0.597525 | 1,074 | 0.601681 | 0 | 0 | 0 | 0 | 0 | 0 | 549 | 0.307563 |
59ffd86260272037b69140ff81d306c93dca9bd8 | 3,487 | py | Python | munjong/remove_sejong_period_error.py | cjdans5545/khaiii | 328d5a8af456a5941130383354c07d1cd0e47cf5 | [
"Apache-2.0"
] | 1,235 | 2018-11-30T01:35:13.000Z | 2022-03-31T03:47:48.000Z | munjong/remove_sejong_period_error.py | cjdans5545/khaiii | 328d5a8af456a5941130383354c07d1cd0e47cf5 | [
"Apache-2.0"
] | 91 | 2018-11-30T05:19:28.000Z | 2022-03-14T12:38:44.000Z | munjong/remove_sejong_period_error.py | cjdans5545/khaiii | 328d5a8af456a5941130383354c07d1cd0e47cf5 | [
"Apache-2.0"
] | 332 | 2018-11-30T00:49:04.000Z | 2022-03-30T01:57:54.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
remove wrong sentence breaking marks after period error eojeol
__author__ = 'Jamie (jamie.lim@kakaocorp.com)'
__copyright__ = 'Copyright (C) 2017-, Kakao Corp. All rights reserved.'
"""
###########
# imports #
###########
from argparse import ArgumentParser
import logging
import os
import re
import sys
from typing import TextIO, Tuple
from khaiii.munjong.sejong_corpus import Morph, WORD_ID_PTN
#############
# functions #
#############
def _get_three_lines(fin: TextIO) -> Tuple[str, str, str]:
"""
get three lines tuple from file (generator)
Args:
fin: input file
Yields:
prev. prev. line
prev. line
curr. line
"""
prev_prev_line = fin.readline().rstrip('\r\n')
prev_line = fin.readline().rstrip('\r\n')
# print first two lines
print(prev_prev_line)
print(prev_line)
for curr_line in fin:
curr_line = curr_line.rstrip('\r\n')
yield prev_prev_line, prev_line, curr_line
prev_prev_line = prev_line
prev_line = curr_line
def _is_known_period_error_eojeol(line: str) -> bool:
"""
알려진 특정 문장분리 오류를 포함하는 어절인 지 여부
Args:
line: line (eojeol)
Returns:
whether has error or not
"""
cols = line.split('\t')
if len(cols) != 3 or not WORD_ID_PTN.match(cols[0]):
return False
if '/SF + ' not in cols[2] or re.match(r'.+/EF \+ ./SF$', cols[2]):
return False
if re.match(r'.+/SF \+ [\'"’”]/SS$', cols[2]):
return False
morphs = [Morph.parse(_) for _ in cols[2].split(' + ')]
tags_str = '+'.join([_.tag for _ in morphs])
if 'SN+SF+SN' in tags_str and not tags_str.endswith('+SF'):
# 4.6판: 4/SN + ./SF + 6/SN + 판/NNB
if 'XSN+SF+SN' not in tags_str:
return True
elif 'SL+SF+SL' in tags_str and not tags_str.endswith('+SF'):
# S.M.오너: S/SL + ./SF + M/SL + ./SF + 오너/NNG
return True
return False
def run():
"""
run function which is the start point of program
"""
file_name = os.path.basename(sys.stdin.name)
for line_num, (prev_prev_line, prev_line, curr_line) in enumerate(_get_three_lines(sys.stdin),
start=1):
if curr_line == '</p>' and _is_known_period_error_eojeol(prev_line):
continue
elif prev_line == '</p>' and curr_line == '<p>' and \
_is_known_period_error_eojeol(prev_prev_line):
logging.info('%s:%d\t%s', file_name, line_num, prev_prev_line)
continue
print(curr_line)
########
# main #
########
def main():
"""
main function processes only argument parsing
"""
parser = ArgumentParser(description='remove wrong sentence breaking marks after'
' period error eojeol')
parser.add_argument('--input', help='input file <default: stdin>', metavar='FILE')
parser.add_argument('--output', help='output file <default: stdout>', metavar='FILE')
parser.add_argument('--debug', help='enable debug', action='store_true')
args = parser.parse_args()
if args.input:
sys.stdin = open(args.input, 'rt')
if args.output:
sys.stdout = open(args.output, 'wt')
if args.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
run()
if __name__ == '__main__':
main()
| 28.818182 | 98 | 0.587898 | 0 | 0 | 591 | 0.16662 | 0 | 0 | 0 | 0 | 1,272 | 0.358613 |
9400374362a198841f41c9a2597aa394d299e8f8 | 42 | py | Python | fluent_contents/__init__.py | pombredanne/django-fluent-contents | b8a999a8c1e2fe03a591023688e74c9e4b472d5c | [
"Apache-2.0"
] | null | null | null | fluent_contents/__init__.py | pombredanne/django-fluent-contents | b8a999a8c1e2fe03a591023688e74c9e4b472d5c | [
"Apache-2.0"
] | null | null | null | fluent_contents/__init__.py | pombredanne/django-fluent-contents | b8a999a8c1e2fe03a591023688e74c9e4b472d5c | [
"Apache-2.0"
] | null | null | null | # following PEP 386
__version__ = "1.0a1"
| 14 | 21 | 0.714286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.619048 |
940087754d4f3fba00dce2ece646519c4b46fb0b | 5,417 | py | Python | asp/codegen/scala_ast.py | shoaibkamil/asp | 2bc5fd5595c475d43b9ee4451db1b51eb9165fdb | [
"BSD-3-Clause"
] | 12 | 2015-03-20T17:39:23.000Z | 2021-03-17T17:14:25.000Z | asp/codegen/scala_ast.py | shoaibkamil/asp | 2bc5fd5595c475d43b9ee4451db1b51eb9165fdb | [
"BSD-3-Clause"
] | 1 | 2015-12-28T11:22:48.000Z | 2015-12-28T11:22:48.000Z | asp/codegen/scala_ast.py | shoaibkamil/asp | 2bc5fd5595c475d43b9ee4451db1b51eb9165fdb | [
"BSD-3-Clause"
] | 9 | 2015-01-06T00:36:53.000Z | 2020-09-19T14:31:26.000Z | import ast
"""
I don't use the Generable class inheritance
"""
class Generable():
pass
class func_types(Generable):
def __init__(self, types):
self.types = types
self._fields = []
class Number(Generable):
def __init__(self, num):
self.num = num
self._fields = []
self.done= False
def __iter__(self):
return self
def next(self):
if self.done:
raise StopIteration
else:
self.done = True
return self
class String(Generable):
def __init__(self, text):
self.text = text
self._fields = ['text']
self.done = False
def __iter__(self):
return self
def next(self):
if self.done:
raise StopIteration
else:
self.done = True
return self
class Name(Generable):
def __init__(self,name):
self.name= name
self._fields= []
self.done= False
def __iter__(self):
return self
def next(self):
if self.done:
raise StopIteration
else:
self.done = True
return self
class Function(Generable):
def __init__(self, declaration, body):
self.declaration = declaration
self.body = body
self._fields = []
self.done= False
def __iter__(self):
return self
def next(self):
if self.done:
raise StopIteration
else:
self.done = True
return self
class Arguments(Generable):
def __init__(self, args):
self.args = args
self._fields = []
class FunctionDeclaration(Generable):
def __init__(self, name, args):
self.name = name
self.args = args
class Expression(Generable):
def __init__(self):
# ???
super(Expression, self)
self._fields = []
self.done= False
def __iter__(self):
return self
def next(self):
if self.done:
raise StopIteration
else:
self.done = True
return self
class Call(Expression):
def __init__(self, func, args):
self.func = func
self.args = args
self._fields = []
self.done= False
def __iter__(self):
return self
def next(self):
if self.done:
raise StopIteration
else:
self.done = True
return self
class Attribute(Expression):
def __init__(self, value, attr):
self.attr = attr
self.value = value
class List(Expression):
def __init__(self, elements):
self.elements = elements
self._fields = []
class BinOp(Expression):
def __init__(self, left, op, right):
self. left = left
self.op = op
self.right = right
self._fields = ['left', 'right']
self.done = False
class BoolOp(Expression):
def __init__(self, op, values):
self.op = op
self.values = values
self._fields = ['op', 'values']
self.done= False
def __iter__(self):
return self
def next(self):
if self.done:
raise StopIteration
else:
self.done = True
return self
class UnaryOp(Expression):
def __init__(self, op, operand):
self.op = op
self.operand = operand
self._fields = ['operand']
class Subscript(Expression):
def __init__(self, value, index, context):
self.value = value
self.index = index
self.context = context
self._fields = ['value', 'index', 'context']
class Print(Generable):
def __init__(self,text,newline,dest):
self.text = text
self.newline = newline
self.dest= dest
self.done = False
def __iter__(self):
return self
def next(self):
if self.done:
raise StopIteration
else:
self.done = True
return self
class ReturnStatement(Generable):
def __init__(self, retval):
self.retval = retval
self._fields = ['retval']
self.done = False
def __iter__(self):
return self
def next(self):
if self.done:
raise StopIteration
else:
self.done = True
return self
class AugAssign(Generable):
def __init__(self, target, op, value):
self.target = target
self.op = op
self.value = value
self.done = False
def __iter__(self):
return self
def next(self):
if self.done:
raise StopIteration
else:
self.done = True
return self
class Assign(Generable): #should this inherit from something else??
def __init__(self, lvalue, rvalue):
##??
self.lvalue = lvalue
self.rvalue= rvalue
self._fields = ['lvalue', 'rvalue']
self.done = False
def __iter__(self):
return self
def next(self):
if self.done:
raise StopIteration
else:
self.done = True
return self
class Compare(Generable):
def __init__(self, left,op,right):
self.left = left
self.op = op
self.right = right
self.done=False
self._fields = ('left', 'op', 'right')
def __iter__(self):
return self
def next(self):
if self.done:
raise StopIteration
else:
self.done=True
return self
class IfConv(Generable):
def __init__(self, test, body, orelse, inner_if=False):
self.test = test
self.body = body
self.orelse = orelse
self.inner_if = inner_if
self.done= False
def __iter__(self):
return self
def next(self):
if self.done:
raise StopIteration
else:
self.done = True
return self
class For(Generable):
def __init__(self, target, iter, body):
self.target = target
self.iter = iter
self.body = body
self.done= False
def __iter__(self):
return self
def next(self):
if self.done:
raise StopIteration
else:
self.done = True
return self
class While(Generable):
def __init__(self, test, body):
self.test = test
self.body = body
self._fields = []
self.done= False
def __iter__(self):
return self
def next(self):
if self.done:
raise StopIteration
else:
self.done = True
return self
if __name__ == '__main__':
pass
| 17.142405 | 67 | 0.670113 | 5,239 | 0.96714 | 0 | 0 | 0 | 0 | 0 | 0 | 216 | 0.039874 |
9400dcaa88a2929b28b3ed85154b671a3195ca83 | 265 | py | Python | upload_test.py | binocular-vision/container | 35e86b51c430ccbf5f6a9776e33c21e586eda448 | [
"MIT"
] | null | null | null | upload_test.py | binocular-vision/container | 35e86b51c430ccbf5f6a9776e33c21e586eda448 | [
"MIT"
] | null | null | null | upload_test.py | binocular-vision/container | 35e86b51c430ccbf5f6a9776e33c21e586eda448 | [
"MIT"
] | null | null | null | from google.cloud import storage
import json
client = storage.Client()
bucket = client.get_bucket('ibvdata')
blob = bucket.get_blob("experiments/2018-04-14-03-06-01/outputs/json/a0.05_r3.00_p0.05_t1.00")
check = json.loads(blob.download_as_string())
print(check)
| 26.5 | 94 | 0.773585 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 79 | 0.298113 |
940152d4021e4a7153b7871ac9b9df4b89170155 | 1,578 | py | Python | cdgo/mathops.py | s-gordon/CDGo | 7bd1b3a6780f70f1237a7f0cac5e112c6b804100 | [
"MIT"
] | 1 | 2019-01-24T20:52:19.000Z | 2019-01-24T20:52:19.000Z | cdgo/mathops.py | s-gordon/CDGo | 7bd1b3a6780f70f1237a7f0cac5e112c6b804100 | [
"MIT"
] | 3 | 2015-06-18T06:09:37.000Z | 2017-09-07T02:48:44.000Z | cdgo/mathops.py | s-gordon/CDGo | 7bd1b3a6780f70f1237a7f0cac5e112c6b804100 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
def residuals(fit, obs):
"""Calculate residuals for fit compared to observed data
:fit: list of discrete fit data points
:obs: list of observed data points
:returns: fit minus observed data points
"""
return fit-obs
def fit_stats(obs, fit):
"""
https://stackoverflow.com/questions/19189362/getting-the-r-squared-
value-using-curve-fit
"""
resid = fit - obs
ss_res = np.sum(resid**2)
ss_tot = np.sum((obs - np.mean(obs))**2)
r_squared = 1 - (ss_res / ss_tot)
return r_squared, ss_tot, ss_res, resid
def sum_squares_total(calc, obs):
"""
https://stackoverflow.com/questions/19189362/getting-the-r-squared-
value-using-curve-fit
"""
return np.sum((obs - np.mean(obs))**2)
def sum_squares_residuals(calc, obs):
"""
https://stackoverflow.com/questions/19189362/getting-the-r-squared-
value-using-curve-fit
"""
resids = residuals(calc, obs)
return np.sum(resids**2)
def rms_error(calc, obs):
"""Calculate root mean squared deviation
:calc: calculated data from fit
:obs: experimentally observed data
:returns: rmsd
"""
resids = residuals(calc, obs)
mean_sqrd = np.mean(resids**2)
return np.sqrt(mean_sqrd)
def r_squared(calc, obs):
"""
https://stackoverflow.com/questions/19189362/getting-the-r-squared-
value-using-curve-fit
"""
ss_res = sum_squares_residuals(calc, obs)
ss_tot = sum_squares_total(calc, obs)
return 1 - (ss_res / ss_tot)
| 23.552239 | 71 | 0.653992 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 816 | 0.51711 |
94021a64302eb30d02fce61d892fb9559a6b654c | 2,932 | py | Python | S4/S4 Library/simulation/gsi_handlers/lot_handlers.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | 1 | 2021-05-20T19:33:37.000Z | 2021-05-20T19:33:37.000Z | S4/S4 Library/simulation/gsi_handlers/lot_handlers.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | null | null | null | S4/S4 Library/simulation/gsi_handlers/lot_handlers.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | null | null | null | from gsi_handlers.commodity_tracker_gsi_util import generate_data_from_commodity_tracker, create_schema_for_commodity_tracker
from sims4.gsi.dispatcher import GsiHandler
from sims4.gsi.schema import GsiGridSchema, GsiFieldVisualizers
import services
import sims4
import build_buy
lot_info_schema = GsiGridSchema(label='Lot Info', auto_refresh=False)
lot_info_schema.add_field('neighborhood', label='Neighborhood', unique_field=True)
lot_info_schema.add_field('cur_lot', label='Current Lot', width=0.4)
lot_info_schema.add_field('region_id', label='Region ID', type=GsiFieldVisualizers.INT, width=0.5)
lot_info_schema.add_field('lot_desc_id', label='Description ID', type=GsiFieldVisualizers.INT, width=0.5)
lot_info_schema.add_field('zone_id', label='Zone ID')
lot_info_schema.add_field('venue', label='Venue')
lot_info_schema.add_field('lot_name', label='Lot Name')
with lot_info_schema.add_has_many('statistics', GsiGridSchema, label='Statistics (Current Lot Only)') as sub_schema:
sub_schema.add_field('statistic', label='Statistic')
sub_schema.add_field('value', label='Statistic Value', type=GsiFieldVisualizers.FLOAT, width=0.5)
@GsiHandler('lot_info', lot_info_schema)
def generate_lot_info_data(*args, zone_id:int=None, **kwargs):
lot_infos = []
current_zone = services.current_zone()
lot = current_zone.lot
venue_manager = services.get_instance_manager(sims4.resources.Types.VENUE)
for neighborhood_proto in services.get_persistence_service().get_neighborhoods_proto_buf_gen():
for lot_owner_info in neighborhood_proto.lots:
zone_id = lot_owner_info.zone_instance_id
if zone_id is not None:
venue_tuning_id = build_buy.get_current_venue(zone_id)
venue_tuning = venue_manager.get(venue_tuning_id)
if venue_tuning is not None:
is_current_lot = lot_owner_info.zone_instance_id == lot.zone_id
cur_info = {'neighborhood': neighborhood_proto.name, 'region_id': neighborhood_proto.region_id, 'lot_desc_id': lot_owner_info.lot_description_id, 'zone_id': str(hex(zone_id)), 'venue': venue_tuning.__name__, 'lot_name': lot_owner_info.lot_name, 'cur_lot': 'X' if is_current_lot else ''}
if is_current_lot:
stat_entries = []
for stat in lot.get_all_stats_gen():
stat_entries.append({'statistic': stat.stat_type.__name__, 'value': stat.get_value()})
cur_info['statistics'] = stat_entries
lot_infos.append(cur_info)
return lot_infos
commodity_data_schema = create_schema_for_commodity_tracker('Lot Statistics/Continuous Statistic Data')
@GsiHandler('lot_commodity_data_view', commodity_data_schema)
def generate_lot_commodity_data_view():
lot = services.active_lot()
return generate_data_from_commodity_tracker(lot.commodity_tracker)
| 61.083333 | 306 | 0.74045 | 0 | 0 | 0 | 0 | 1,680 | 0.572988 | 0 | 0 | 437 | 0.149045 |
94036080d0fec3020c25a2d6b0cd82b107d85c23 | 731 | py | Python | cubbie/tests/test_production_model.py | rjw57/cubbie | 9421c0196d12f8d142ecafde5c347e09f99ec127 | [
"MIT"
] | null | null | null | cubbie/tests/test_production_model.py | rjw57/cubbie | 9421c0196d12f8d142ecafde5c347e09f99ec127 | [
"MIT"
] | null | null | null | cubbie/tests/test_production_model.py | rjw57/cubbie | 9421c0196d12f8d142ecafde5c347e09f99ec127 | [
"MIT"
] | null | null | null | """
Test Production model
"""
import pytest
from mixer.backend.flask import mixer
from cubbie.model import Production, Capability
from cubbie.fixture import create_production_fixtures
def test_fixtures_created(productions):
"""The production fixture should have > 3 productions."""
assert Production.query.count() > 3
def test_delete_production_cascades_capabilities(session, productions, users):
cap = mixer.blend(Capability, user=mixer.SELECT, production=mixer.SELECT)
cap_prod = cap.production
session.add(cap)
session.commit()
cap_id = cap.id
assert Capability.query.get(cap_id) is not None
session.delete(cap_prod)
session.commit()
assert Capability.query.get(cap_id) is None
| 27.074074 | 78 | 0.756498 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 87 | 0.119015 |
940405d6c9775503ef1056bd4a4b8fdb5743beea | 3,589 | py | Python | scripts/SentenceClusterizer.py | JoseMan90/sentence-clustering | ddbbe50754058e8f3277894644b013ec505d2870 | [
"Apache-2.0"
] | null | null | null | scripts/SentenceClusterizer.py | JoseMan90/sentence-clustering | ddbbe50754058e8f3277894644b013ec505d2870 | [
"Apache-2.0"
] | null | null | null | scripts/SentenceClusterizer.py | JoseMan90/sentence-clustering | ddbbe50754058e8f3277894644b013ec505d2870 | [
"Apache-2.0"
] | 1 | 2021-08-10T20:49:41.000Z | 2021-08-10T20:49:41.000Z | import nltk
from sklearn.feature_extraction.text import TfidfVectorizer
import string
from sklearn.decomposition import TruncatedSVD
from hdbscan.hdbscan_ import HDBSCAN
import pandas as pd
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import RandomizedSearchCV
from scripts.DisableCV import DisabledCV
def normalized_tokenizer(text):
"""
Returns the normalized (proprocessed and stemmed) tokens
:param text: sentence
:return: list of tokens
"""
punctuations = dict((ord(char), None) for char in string.punctuation)
stemmer = nltk.stem.snowball.SnowballStemmer("english")
tokens = nltk.word_tokenize(text.lower().translate(punctuations))
tokens = [stemmer.stem(item) for item in tokens]
return tokens
def get_word_vector_matrix(texts, dimensions=10):
"""
Calculates and returns the reduced words vector matrix
:param texts: list of sentences
:param dimensions: dimensions to which the word matrix will be reduced into
:return: Work vector matrix
"""
print("Vectorizing sentences into TF-IDF vectors...")
vectorizer = TfidfVectorizer(tokenizer=normalized_tokenizer)
matrix = vectorizer.fit_transform(texts)
print("Word Vector Matrix : " + str(matrix.shape))
decomposer = TruncatedSVD(n_components=dimensions, n_iter=50, random_state=20)
reduced_matrix = decomposer.fit_transform(matrix)
print(decomposer.explained_variance_ratio_)
return reduced_matrix
def hdb_segment(vector_matrix, min_cluster_size=5, min_samples=2, metric="euclidean", cluster_selection_method="eom"):
"""
Segments the given data using the HDB clustering algorithm
:param vector_matrix:
:param min_cluster_size:
:param min_samples:
:param metric:
:param cluster_seletion_method:
:return: cluster labels
"""
print("Running HDB clustering...")
hdb_algo = HDBSCAN(min_cluster_size=min_cluster_size, min_samples=min_samples, metric=metric,
cluster_selection_method=cluster_selection_method)
hdb_algo.fit(vector_matrix)
scores = pd.DataFrame({"label":hdb_algo.labels_, "probability":hdb_algo.probabilities_})
scores["confident"] = 0
scores.loc[scores["probability"]<0.05, "confident"] = 1
print(scores)
print(scores["confident"].mean())
return hdb_algo.labels_
def hdb_scorer(hdb_algo, X):
"""
Segments the given data using the HDB clustering algorithm
"""
hdb_algo.fit(X)
scores = pd.DataFrame({"label":hdb_algo.labels_, "probability":hdb_algo.probabilities_})
scores["confident"] = 0
scores.loc[scores["probability"]>=0.05, "confident"] = 1
scores.loc[scores["label"] == -1, "confident"] = 0
score = scores["confident"].sum()/scores["label"].count()
print("Returning Score : " + str(score))
return score
def hdb_segment_generalized(matrix, iterations=50):
parameter_grid = {
"min_cluster_size": range(5, 100),
"min_samples": range(2, 10),
"metric": ["euclidean"],
"cluster_selection_method": ["eom", "leaf"],
"allow_single_cluster": [True, False]
}
random_search = RandomizedSearchCV(estimator=HDBSCAN(), param_distributions=parameter_grid,
scoring=hdb_scorer, cv=DisabledCV(), n_jobs=-2, random_state=45,
n_iter=iterations, refit=True)
random_search.fit(matrix)
print(random_search.best_score_)
hdb = random_search.best_estimator_
print(pd.Series(hdb.labels_).value_counts(normalize=True))
return hdb.labels_
| 37 | 118 | 0.710783 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,061 | 0.295626 |
940450935e7335999ff4b26c4f843122b393e632 | 219 | py | Python | share/graphql/fields.py | felliott/SHARE | 8fd60ff4749349c9b867f6188650d71f4f0a1a56 | [
"Apache-2.0"
] | null | null | null | share/graphql/fields.py | felliott/SHARE | 8fd60ff4749349c9b867f6188650d71f4f0a1a56 | [
"Apache-2.0"
] | null | null | null | share/graphql/fields.py | felliott/SHARE | 8fd60ff4749349c9b867f6188650d71f4f0a1a56 | [
"Apache-2.0"
] | null | null | null | from graphene.types.scalars import Scalar
# Note should define a couple parse methods but this class is only used for serializing
class JSONField(Scalar):
@staticmethod
def serialize(val):
return val
| 21.9 | 87 | 0.744292 | 86 | 0.392694 | 0 | 0 | 56 | 0.255708 | 0 | 0 | 87 | 0.39726 |
9404aefdf46be89ea0882f84aad9a69dde8da596 | 1,753 | py | Python | gene.py | Chappie733/Neat | 2414842197c9a146293246e984557b36c8b9fb89 | [
"MIT"
] | null | null | null | gene.py | Chappie733/Neat | 2414842197c9a146293246e984557b36c8b9fb89 | [
"MIT"
] | null | null | null | gene.py | Chappie733/Neat | 2414842197c9a146293246e984557b36c8b9fb89 | [
"MIT"
] | null | null | null | from enum import Enum
class GeneType(Enum):
CONNECTION_GENE = 0,
NODE_GENE = 1
class NodeType(Enum):
INPUT = 0,
HIDDEN = 1,
OUTPUT = 2
class Gene:
def __init__(self, _type, innov=1) -> None:
self._type = _type
self.innov = innov
def __str__(self):
return f"Gene:\n\tType: {self._type}\n\tInnovation number: {self.innov}"
class ConnectionGene(Gene):
def __init__(self, start, end, innov, enabled=True, weight=0):
super().__init__(GeneType.CONNECTION_GENE, innov)
self.start, self.end = start, end # indexes of ending and starting node
self.enabled = enabled
self.weight = weight # not usually set when the node is created
def __str__(self):
res = f"Connection Gene: "
res += f"\n\tIn: {self.start}\n\tOut: {self.end}"
res += f"\n\tEnabled: {self.enabled}"
res += f"\n\tInnovation number: {self.innov}"
return res
def equals(self, other) -> bool:
'''
Checks if the gene is equal to another, this doesn't take into account the innovation number, but only the
actual connection this gene represents
'''
return self.end == other.end and self.start == other.start
@staticmethod
def are_equal(f, s) -> bool:
return f.end == s.end and f.start == s.start
class NodeGene(Gene):
def __init__(self, index, _type):
super().__init__(GeneType.NODE_GENE, index)
self.index = index
self._type = _type
self.bias = 0
def __str__(self):
return f"Node Gene:\n\tIndex: {self.index}\n\tInnovation number: {self.innov}\n\tBias: {self.bias}\n\tType: {self._type}" | 30.754386 | 129 | 0.596691 | 1,710 | 0.975471 | 0 | 0 | 101 | 0.057616 | 0 | 0 | 570 | 0.325157 |
9406b5e9df7638de76766cd50b89ec3226c0cfe2 | 634 | py | Python | test/test_qxh.py | flowerysong/quickxorhash | 9cfb16542e2c5020d63f38f0f8e870786593ec64 | [
"MIT"
] | 1 | 2021-02-07T07:24:00.000Z | 2021-02-07T07:24:00.000Z | test/test_qxh.py | flowerysong/quickxorhash | 9cfb16542e2c5020d63f38f0f8e870786593ec64 | [
"MIT"
] | 2 | 2020-11-01T13:55:45.000Z | 2020-11-03T21:47:15.000Z | test/test_qxh.py | flowerysong/quickxorhash | 9cfb16542e2c5020d63f38f0f8e870786593ec64 | [
"MIT"
] | 2 | 2019-09-03T18:16:47.000Z | 2020-11-03T19:27:53.000Z | #!/usr/bin/env python3
import os
import subprocess
import pytest
@pytest.mark.parametrize(
'testfile',
[
('foo.txt', 'ZnjDGxQAAAAAAAAABAAAAAAAAAA=\n'),
('loremipsum.txt', 'ZNqmJyqS9l79QjW7eNx0qjaDpMY=\n'),
('binary.bin', 'GgQWCSPUD9bQ/3xxO0VcOoxc4ZM=\n'),
]
)
def test_qxh(testfile):
basepath = os.path.dirname(os.path.realpath(__file__))
qxh_bin = os.path.realpath(os.path.join(basepath, '..', 'quickxorhash'))
res = subprocess.run(
[qxh_bin, os.path.join(basepath, testfile[0])],
capture_output=True,
text=True,
)
assert res.stdout == testfile[1]
| 23.481481 | 76 | 0.638801 | 0 | 0 | 0 | 0 | 564 | 0.88959 | 0 | 0 | 183 | 0.288644 |
94074a7f69d0726f20e9c615de67c7046452f8cb | 778 | py | Python | Day 1/Puzzle 1.py | rookuu/AdventOfCode-2015 | b94e98f5711d0901412b22342395457a8185bc10 | [
"MIT"
] | null | null | null | Day 1/Puzzle 1.py | rookuu/AdventOfCode-2015 | b94e98f5711d0901412b22342395457a8185bc10 | [
"MIT"
] | null | null | null | Day 1/Puzzle 1.py | rookuu/AdventOfCode-2015 | b94e98f5711d0901412b22342395457a8185bc10 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Solution to Day 1 - Puzzle 1 of the Advent Of Code 2015 series of challenges.
--- Day 1: Not Quite Lisp ---
An opening parenthesis represents an increase in floor and a closing parenthesis represents a decrease in floor.
After taking a 7000 character long input string of assorted parenthesis, determine the resulting floor.
-----------------------------
Author: Luke "rookuu" Roberts
"""
inputData = raw_input("Puzzle Input: ") # Copy and Paste in the input string, hint: use CTRL-A
floor = 0 # Holds the variable to decrement/increment depending on the flavor of parenthesis.
for char in inputData:
if char == "(":
floor += 1
elif char == ")":
floor -= 1
print "The floor Santa arrives at is... Floor " + str(floor)
| 29.923077 | 112 | 0.676093 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 617 | 0.793059 |
94095a079d2202cdff5d27e376cc7dec1b8ab428 | 22 | py | Python | startup.py | felixludos/adversary | bda1d7a07da736056b69903cb51b29ccdf1eb95e | [
"MIT"
] | null | null | null | startup.py | felixludos/adversary | bda1d7a07da736056b69903cb51b29ccdf1eb95e | [
"MIT"
] | null | null | null | startup.py | felixludos/adversary | bda1d7a07da736056b69903cb51b29ccdf1eb95e | [
"MIT"
] | null | null | null |
import adversary
| 5.5 | 17 | 0.681818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
940a6362456b8a9d850cd76157668ee72585c4d1 | 5,226 | py | Python | vezda/svd_utils.py | aaronprunty/vezda | 79a940f30981bf636e68594678d689706b894133 | [
"Apache-2.0"
] | 3 | 2019-02-19T17:53:47.000Z | 2019-07-21T07:17:19.000Z | vezda/svd_utils.py | aaronprunty/vezda | 79a940f30981bf636e68594678d689706b894133 | [
"Apache-2.0"
] | null | null | null | vezda/svd_utils.py | aaronprunty/vezda | 79a940f30981bf636e68594678d689706b894133 | [
"Apache-2.0"
] | 2 | 2018-12-12T15:04:56.000Z | 2019-07-20T16:30:16.000Z | import sys
import time
import numpy as np
import scipy.sparse as sp
from vezda.math_utils import humanReadable
from vezda.LinearOperators import asConvolutionalOperator
def compute_svd(kernel, k, operatorName):
A = asConvolutionalOperator(kernel)
if k_is_valid(k, min(A.shape)):
if operatorName == 'nfo':
name = 'near-field operator'
elif operatorName == 'lso':
name = 'Lippmann-Schwinger operator'
if k == 1:
print('Computing SVD of the %s for 1 singular value/vector...' %(name))
else:
print('Computing SVD of the %s for %s singular values/vectors...' %(name, k))
startTime = time.time()
U, s, Vh = sp.linalg.svds(A, k, which='LM')
endTime = time.time()
print('Elapsed time:', humanReadable(endTime - startTime))
# sort the singular values and corresponding vectors in descending order
# (i.e., largest to smallest)
index = s.argsort()[::-1]
s = s[index]
U = U[:, index]
Vh = Vh[index, :]
if np.issubdtype(U.dtype, np.complexfloating):
# Exploit sparseness of SVD in frequency domain for efficient storage
Nr, Ns = kernel.shape[0], kernel.shape[2]
U = make_sparse(U, Nr, 'csc')
Vh = make_sparse(Vh, Ns, 'csr')
save_svd(U, s, Vh, operatorName)
return U, s, Vh
else:
sys.exit()
def save_svd(U, s, Vh, operatorName):
if operatorName == 'nfo':
filename = 'NFO_SVD.npz'
elif operatorName == 'lso':
filename = 'LSO_SVD.npz'
if np.issubdtype(U.dtype, np.complexfloating):
# singular vectors are complex
# store as sparse matrices
domain = 'freq'
np.savez(filename,
U_data=U.data, U_indices=U.indices, U_indptr=U.indptr,
U_shape=U.shape,
Vh_data=Vh.data, Vh_indices=Vh.indices, Vh_indptr=Vh.indptr,
Vh_shape=Vh.shape,
s=s, domain=domain)
else:
# singular vectors are real
domain = 'time'
np.savez(filename, U=U, s=s, Vh=Vh, domain=domain)
def load_svd(filename):
print('Attempting to load SVD...', end='')
try:
loader = np.load(filename)
print('Success')
except IOError as e:
print('Failure')
raise e
s = loader['s']
domain = loader['domain']
if domain == 'freq':
U = sp.csc_matrix((loader['U_data'], loader['U_indices'], loader['U_indptr']),
shape=loader['U_shape'])
Vh = sp.csr_matrix((loader['Vh_data'], loader['Vh_indices'], loader['Vh_indptr']),
shape=loader['Vh_shape'])
elif domain == 'time':
U = loader['U']
Vh = loader['Vh']
return U, s, Vh
def make_sparse(A, r, compressedFormat):
'''
Return a sparse representation of a matrix A based on the r largest nonzero
row/column elements.
A: a dense matrix (2D array) to make sparse
r: a positive integer giving the number of nonzero row/column elements to extract from A
compressedFormat: compressed storage format of resulting sparse matrix.
'csc' format results in a sparse column matrix. Extracts the r largest nonzero
elements along each column of A. Best for 'tall' matrices.
'csr' format results in a sparse row matrix. Extracts the r largest nonzero
elements along each row of A. Best for 'wide' matrices.
'''
M, N = A.shape
if compressedFormat == 'csc':
indx = np.argpartition(-np.abs(A), r, axis=0)[:r, :]
data = A[indx, np.arange(N)].reshape(r * N, order='F')
rows = indx.reshape(-1, order='F')
cols = (np.ones((N, r), np.int_) * np.arange(N)[:, None]).reshape(-1)
return sp.csc_matrix((data, (rows, cols)), shape=(M, N))
elif compressedFormat == 'csr':
indx = np.argpartition(-np.abs(A), r, axis=1)[:, :r]
data = A[np.arange(M)[:, None], indx].reshape(r * M)
rows = (np.ones((M, r), np.int_) * np.arange(M)[:, None]).reshape(-1)
cols = indx.reshape(-1)
return sp.csr_matrix((data, (rows, cols)), shape=(M, N))
def k_is_valid(k, maxVals):
if type(k) == int:
if k >= 1 and k < maxVals:
return True
else:
print('''Number of singular values must be a positive integer
between {n1} and {n2}.'''.format(n1=1, n2=maxVals))
return False
else:
print('''Number of singular values must be a positive integer
between {n1} and {n2}.'''.format(n1=1, n2=maxVals))
return False
def svd_needs_recomputing(kernel, k, U, s, Vh):
if k is None:
return False
Nr, Nm, Ns = kernel.shape
M, N = Nr * Nm, Ns * Nm
if k_is_valid(k, min(M, N)):
if ((M, k), (k, N)) == (U.shape, Vh.shape) and k == len(s):
return False
else:
print('Inconsistent dimensions: SVD needs recomputing...')
return True
| 32.6625 | 92 | 0.556831 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,557 | 0.297933 |
940a8598f3040a3a7046ea34ba748451869cc818 | 4,448 | py | Python | experiments/cub_hypernym/load_cub_hypernym.py | Glaciohound/VCML | 5a0f01a0baba238cef2f63131fccd412e3d7822b | [
"MIT"
] | 52 | 2019-12-04T22:26:56.000Z | 2022-03-31T17:04:15.000Z | experiments/cub_hypernym/load_cub_hypernym.py | guxiwuruo/VCML | 5a0f01a0baba238cef2f63131fccd412e3d7822b | [
"MIT"
] | 6 | 2020-08-25T07:35:14.000Z | 2021-09-09T04:57:09.000Z | experiments/cub_hypernym/load_cub_hypernym.py | guxiwuruo/VCML | 5a0f01a0baba238cef2f63131fccd412e3d7822b | [
"MIT"
] | 5 | 2020-02-10T07:39:24.000Z | 2021-06-23T02:53:42.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# File : load_cub_hypernym.py
# Author : Chi Han, Jiayuan Mao
# Email : haanchi@gmail.com, maojiayuan@gmail.com
# Date : 07.08.2019
# Last Modified Date: 20.11.2019
# Last Modified By : Chi Han
#
# This file is part of the VCML codebase
# Distributed under MIT license
from ..utils import load_questions
from utility.common import contains
def hypernym_questions(dataset, args, logger):
hyp_questions = load_questions.load_question_file(
dataset, 'hypernym_cub', args, logger
)
return hyp_questions
def raw_hypernym(dataset, args, logger):
logger('Identical hypernym suite')
with logger.levelup():
hyp_questions = hypernym_questions(dataset, args, logger)
hyp_full = load_questions.identical_suite(hyp_questions, logger)
return hyp_full
def hypernym_balanced_full(dataset, args, logger):
logger('Loading a full hypernym suite')
with logger.levelup():
hyp_suite = raw_hypernym(dataset, args, logger)
balanced = load_questions.balance_KwAns_suite(hyp_suite, logger)
return balanced
def hypernym_balanced_split(test_concepts, dataset, args, logger):
logger('Loading balaced hypernym suite splitted by test_concepts')
with logger.levelup():
hyp_suite = raw_hypernym(dataset, args, logger)
hyp_suite = load_questions.split_testConcepts(
hyp_suite, test_concepts, logger)
hyp_suite = load_questions.balance_KwAns_suite(hyp_suite, logger)
return hyp_suite
# Visual part
def raw_classify_hypernym(dataset, args, logger):
logger('Loading classify-hypernym suite')
with logger.levelup():
raw_suite = {
'train': load_questions.load_question_file(
dataset, 'train_cub_classification_hypernym', args, logger
),
'val': load_questions.load_question_file(
dataset, 'val_cub_classification_hypernym', args, logger
),
'test': load_questions.load_question_file(
dataset, 'test_cub_classification_hypernym', args, logger
),
}
return raw_suite
def raw_exist_hypernym(dataset, args, logger):
logger('Loading exist-hypernym suite')
with logger.levelup():
raw_suite = {
'train': load_questions.load_question_file(
dataset, 'train_cub_exist_hypernym', args, logger
),
'val': load_questions.load_question_file(
dataset, 'val_cub_exist_hypernym', args, logger
),
'test': load_questions.load_question_file(
dataset, 'test_cub_exist_hypernym', args, logger
),
}
return raw_suite
def balanced_exist(dataset, args, logger):
logger('Loading a balanced exist suite')
raw_suite = raw_exist_hypernym(dataset, args, logger)
balanced_suite = load_questions.balance_KwAns_suite(raw_suite, logger)
return balanced_suite
def biased_exist(dataset, test_concepts, args, logger):
logger('Biasing dataset ratio according to test concepts')
with logger.levelup():
raw_suite = raw_exist_hypernym(dataset, args, logger)
biased = {
'train': load_questions.fewer_bias(
raw_suite['train'], test_concepts, args.fewshot_ratio, logger
),
'val': raw_suite['val'].filter(
lambda q: not contains(q['keywords'], test_concepts)
),
'test': raw_suite['test'].filter(
lambda q: contains(q['keywords'], test_concepts)
)
}
balanced = load_questions.balance_KwAns_suite(biased, logger)
return balanced
def biased_classify(dataset, test_concepts, args, logger):
logger('Biasing classify dataset according to test concepts')
with logger.levelup():
raw_suite = raw_classify_hypernym(dataset, args, logger)
biased = {
'train': load_questions.fewer_bias_clsf(
raw_suite['train'], test_concepts, args.fewshot_ratio, logger
),
'val': load_questions.fewer_bias_clsf(
raw_suite['val'], test_concepts, 0, logger
),
'test': load_questions.fewer_bias_clsf(
raw_suite['test'], test_concepts, 0, logger,
reverse=True
)
}
return biased
| 33.954198 | 77 | 0.642536 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,000 | 0.22482 |
940b5182827ad683c298cbd7154d0d4cc405715c | 1,143 | py | Python | Ex_Files_Python_EssT/Exercise Files/Chap01/rockPaperScissors.py | cinthiatengan/Basic-Python | 9dbc280af2e9af9b79421018be814d0f178d8097 | [
"MIT"
] | null | null | null | Ex_Files_Python_EssT/Exercise Files/Chap01/rockPaperScissors.py | cinthiatengan/Basic-Python | 9dbc280af2e9af9b79421018be814d0f178d8097 | [
"MIT"
] | null | null | null | Ex_Files_Python_EssT/Exercise Files/Chap01/rockPaperScissors.py | cinthiatengan/Basic-Python | 9dbc280af2e9af9b79421018be814d0f178d8097 | [
"MIT"
] | null | null | null | """ Rock Paper Scissors
--------------------------------------------------------------
"""
import random
import os
import re
os.system('cls' if os.name=='nt' else clear)
while (1< 2):
print ("\n")
print ("Rock, Paper, Scissors - Shoot!")
userChoice = input("Choose your weapon [R]ock, [P]aper, or [S]cissors: ")
if not re.match("[SsRrPp]", userChoice):
print ("Please choose a letter: ")
print ("[R]ock, [P]aper, or [S]cissors.")
continue
# Echo the user's choice
print ("You choose: " + userChoice)
choices = ['R', 'P', 'S']
opponentChoice = random.choice(choices)
print ("I choose: " + opponentChoice)
if opponentChoice == str.upper(userChoice):
print ("Tie!")
elif opponentChoice == 'R' and userChoice.upper() == 'S':
print ("Rock beats Scissors, I lose")
continue
elif opponentChoice == 'S' and userChoice.upper() == 'P':
print ("Scissors beats paper, I lose")
continue
elif opponentChoice == 'P' and userChoice.upper() == 'R':
print ("Paper beats rock, I lose")
continue
else:
print ("I win!") | 33.617647 | 77 | 0.552931 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 433 | 0.378828 |
940bf097a889a2b00a5f8c6a8a5c2eb0138687b3 | 1,350 | py | Python | classes.py | emersonsoaresdasilva/systeam-impacta | b19e5e2e4b9d573fa53ec5d356c004c4ad1c0f54 | [
"Apache-2.0"
] | null | null | null | classes.py | emersonsoaresdasilva/systeam-impacta | b19e5e2e4b9d573fa53ec5d356c004c4ad1c0f54 | [
"Apache-2.0"
] | null | null | null | classes.py | emersonsoaresdasilva/systeam-impacta | b19e5e2e4b9d573fa53ec5d356c004c4ad1c0f54 | [
"Apache-2.0"
] | 1 | 2020-11-19T17:06:18.000Z | 2020-11-19T17:06:18.000Z | class Usuario(object):
def __init__(self, email='', senha=''):
self.email = email
self.senha = senha
def __str__(self):
return f'{self.email}'
class Equipe(object):
def __init__(self, nome='', sigla='', local=''):
self.nome = nome
self.sigla = sigla
self.local = local
def __str__(self):
return f'{self.nome} ({self.sigla})'
class Partida(object):
def __init__(self, equipe_casa, equipe_visita, pontos_casa, pontos_visita):
self.equipe_casa = equipe_casa
self.equipe_visita = equipe_visita
self.pontos_casa = pontos_casa
self.pontos_visita = pontos_visita
def __str__(self):
return f'{self.equipe_casa} ({self.pontos_casa}) - {self.equipe_visita} ({self.pontos_visita})'
def vencedor(self):
if self.pontos_casa > self.pontos_visita:
return self.equipe_casa
elif self.pontos_visita > self.pontos_casa:
return self.equipe_visita
return False
def id(self):
return (self.equipe_casa.sigla+self.equipe_visita.sigla)
def trocar_equipe(self, sigla_anterior, equipe):
if self.equipe_casa.sigla == sigla_anterior:
self.equipe_casa = equipe
elif self.equipe_visita.sigla == sigla_anterior:
self.equipe_visita = equipe
| 28.125 | 103 | 0.637037 | 1,343 | 0.994815 | 0 | 0 | 0 | 0 | 0 | 0 | 142 | 0.105185 |
940ffb0bbc2dbf7544db176a515e248e22ab71ef | 16,927 | py | Python | skorch/helper.py | sakuranew/skorch | 54f5645425db48f49e8adc617fff049c383e0367 | [
"BSD-3-Clause"
] | 1 | 2021-04-12T14:24:56.000Z | 2021-04-12T14:24:56.000Z | skorch/helper.py | sakuranew/skorch | 54f5645425db48f49e8adc617fff049c383e0367 | [
"BSD-3-Clause"
] | null | null | null | skorch/helper.py | sakuranew/skorch | 54f5645425db48f49e8adc617fff049c383e0367 | [
"BSD-3-Clause"
] | 1 | 2020-12-17T14:20:32.000Z | 2020-12-17T14:20:32.000Z | """Helper functions and classes for users.
They should not be used in skorch directly.
"""
from collections import Sequence
from collections import namedtuple
from functools import partial
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.base import TransformerMixin
import torch
from skorch.cli import parse_args
from skorch.utils import _make_split
from skorch.utils import is_torch_data_type
from skorch.utils import to_tensor
class SliceDict(dict):
"""Wrapper for Python dict that makes it sliceable across values.
Use this if your input data is a dictionary and you have problems
with sklearn not being able to slice it. Wrap your dict with
SliceDict and it should usually work.
Note:
* SliceDict cannot be indexed by integers, if you want one row,
say row 3, use `[3:4]`.
* SliceDict accepts numpy arrays and torch tensors as values.
Examples
--------
>>> X = {'key0': val0, 'key1': val1}
>>> search = GridSearchCV(net, params, ...)
>>> search.fit(X, y) # raises error
>>> Xs = SliceDict(key0=val0, key1=val1) # or Xs = SliceDict(**X)
>>> search.fit(Xs, y) # works
"""
def __init__(self, **kwargs):
lengths = [value.shape[0] for value in kwargs.values()]
lengths_set = set(lengths)
if lengths_set and (len(lengths_set) != 1):
raise ValueError(
"Initialized with items of different lengths: {}"
"".format(', '.join(map(str, sorted(lengths_set)))))
if not lengths:
self._len = 0
else:
self._len = lengths[0]
super(SliceDict, self).__init__(**kwargs)
def __len__(self):
return self._len
def __getitem__(self, sl):
if isinstance(sl, int):
# Indexing with integers is not well-defined because that
# recudes the dimension of arrays by one, messing up
# lengths and shapes.
raise ValueError("SliceDict cannot be indexed by integers.")
if isinstance(sl, str):
return super(SliceDict, self).__getitem__(sl)
return SliceDict(**{k: v[sl] for k, v in self.items()})
def __setitem__(self, key, value):
if not isinstance(key, str):
raise TypeError("Key must be str, not {}.".format(type(key)))
length = value.shape[0]
if not self.keys():
self._len = length
if self._len != length:
raise ValueError(
"Cannot set array with shape[0] != {}"
"".format(self._len))
super(SliceDict, self).__setitem__(key, value)
def update(self, kwargs):
for key, value in kwargs.items():
self.__setitem__(key, value)
def __repr__(self):
out = super(SliceDict, self).__repr__()
return "SliceDict(**{})".format(out)
@property
def shape(self):
return (self._len,)
def copy(self):
return type(self)(**self)
def fromkeys(self, *args, **kwargs):
"""fromkeys method makes no sense with SliceDict and is thus not
supported."""
raise TypeError("SliceDict does not support fromkeys.")
def __eq__(self, other):
if self.keys() != other.keys():
return False
for key, val in self.items():
val_other = other[key]
# torch tensors
if is_torch_data_type(val):
if not is_torch_data_type(val_other):
return False
if not (val == val_other).all():
return False
continue
# numpy arrays
if isinstance(val, np.ndarray):
if not isinstance(val_other, np.ndarray):
return False
if not (val == val_other).all():
return False
continue
# rest
if val != val_other:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
# This class must be an instance of Sequence and have an ndim
# attribute because sklearn will test this.
class SliceDataset(Sequence):
# pylint: disable=anomalous-backslash-in-string
"""Helper class that wraps a torch dataset to make it work with
sklearn.
Sometimes, sklearn will touch the input data, e.g. when splitting
the data for a grid search. This will fail when the input data is
a torch dataset. To prevent this, use this wrapper class for your
dataset.
Note: This class will only return the X value by default (i.e. the
first value returned by indexing the original dataset). Sklearn,
and hence skorch, always require 2 values, X and y. Therefore, you
still need to provide the y data separately.
Note: This class behaves similarly to a PyTorch
:class:`~torch.utils.data.Subset` when it is indexed by a slice or
numpy array: It will return another ``SliceDataset`` that
references the subset instead of the actual values. Only when it
is indexed by an int does it return the actual values. The reason
for this is to avoid loading all data into memory when sklearn,
for instance, creates a train/validation split on the
dataset. Data will only be loaded in batches during the fit loop.
Examples
--------
>>> X = MyCustomDataset()
>>> search = GridSearchCV(net, params, ...)
>>> search.fit(X, y) # raises error
>>> ds = SliceDataset(X)
>>> search.fit(ds, y) # works
Parameters
----------
dataset : torch.utils.data.Dataset
A valid torch dataset.
idx : int (default=0)
Indicates which element of the dataset should be
returned. Typically, the dataset returns both X and y
values. SliceDataset can only return 1 value. If you want to
get X, choose idx=0 (default), if you want y, choose idx=1.
indices : list, np.ndarray, or None (default=None)
If you only want to return a subset of the dataset, indicate
which subset that is by passing this argument. Typically, this
can be left to be None, which returns all the data. See also
:class:`~torch.utils.data.Subset`.
"""
def __init__(self, dataset, idx=0, indices=None):
self.dataset = dataset
self.idx = idx
self.indices = indices
self.indices_ = (self.indices if self.indices is not None
else np.arange(len(self.dataset)))
self.ndim = 1
def __len__(self):
return len(self.indices_)
@property
def shape(self):
return (len(self),)
def transform(self, data):
"""Additional transformations on ``data``.
Note: If you use this in conjuction with PyTorch
:class:`~torch.utils.data.DataLoader`, the latter will call
the dataset for each row separately, which means that the
incoming ``data`` is a single rows.
"""
return data
def _select_item(self, Xn):
# Raise a custom error message when accessing out of
# bounds. However, this will only trigger as soon as this is
# indexed by an integer.
try:
return Xn[self.idx]
except IndexError:
name = self.__class__.__name__
msg = ("{} is trying to access element {} but there are only "
"{} elements.".format(name, self.idx, len(Xn)))
raise IndexError(msg)
def __getitem__(self, i):
if isinstance(i, (int, np.integer)):
Xn = self.dataset[self.indices_[i]]
Xi = self._select_item(Xn)
return self.transform(Xi)
if isinstance(i, slice):
return SliceDataset(self.dataset, idx=self.idx, indices=self.indices_[i])
if isinstance(i, np.ndarray):
if i.ndim != 1:
raise IndexError("SliceDataset only supports slicing with 1 "
"dimensional arrays, got {} dimensions instead."
"".format(i.ndim))
if i.dtype == np.bool:
i = np.flatnonzero(i)
return SliceDataset(self.dataset, idx=self.idx, indices=self.indices_[i])
def predefined_split(dataset):
"""Uses ``dataset`` for validiation in :class:`.NeuralNet`.
Examples
--------
>>> valid_ds = skorch.Dataset(X, y)
>>> net = NeuralNet(..., train_split=predefined_split(valid_ds))
Parameters
----------
dataset: torch Dataset
Validiation dataset
"""
return partial(_make_split, valid_ds=dataset)
class DataFrameTransformer(BaseEstimator, TransformerMixin):
"""Transform a DataFrame into a dict useful for working with skorch.
Transforms cardinal data to floats and categorical data to vectors
of ints so that they can be embedded.
Although skorch can deal with pandas DataFrames, the default
behavior is often not very useful. Use this transformer to
transform the DataFrame into a dict with all float columns
concatenated using the key "X" and all categorical values encoded
as integers, using their respective column names as keys.
Your module must have a matching signature for this to work. It
must accept an argument ``X`` for all cardinal
values. Additionally, for all categorical values, it must accept
an argument with the same name as the corresponding column (see
example below). If you need help with the required signature, use
the ``describe_signature`` method of this class and pass it your
data.
You can choose whether you want to treat int columns the same as
float columns (default) or as categorical values.
To one-hot encode categorical features, initialize their
corresponding embedding layers using the identity matrix.
Examples
--------
>>> df = pd.DataFrame({
... 'col_floats': np.linspace(0, 1, 12),
... 'col_ints': [11, 11, 10] * 4,
... 'col_cats': ['a', 'b', 'a'] * 4,
... })
>>> # cast to category dtype to later learn embeddings
>>> df['col_cats'] = df['col_cats'].astype('category')
>>> y = np.asarray([0, 1, 0] * 4)
>>> class MyModule(nn.Module):
... def __init__(self):
... super().__init__()
... self.reset_params()
>>> def reset_params(self):
... self.embedding = nn.Embedding(2, 10)
... self.linear = nn.Linear(2, 10)
... self.out = nn.Linear(20, 2)
... self.nonlin = nn.Softmax(dim=-1)
>>> def forward(self, X, col_cats):
... # "X" contains the values from col_floats and col_ints
... # "col_cats" contains the values from "col_cats"
... X_lin = self.linear(X)
... X_cat = self.embedding(col_cats)
... X_concat = torch.cat((X_lin, X_cat), dim=1)
... return self.nonlin(self.out(X_concat))
>>> net = NeuralNetClassifier(MyModule)
>>> pipe = Pipeline([
... ('transform', DataFrameTransformer()),
... ('net', net),
... ])
>>> pipe.fit(df, y)
Parameters
----------
treat_int_as_categorical : bool (default=False)
Whether to treat integers as categorical values or as cardinal
values, i.e. the same as floats.
float_dtype : numpy dtype or None (default=np.float32)
The dtype to cast the cardinal values to. If None, don't change
them.
int_dtype : numpy dtype or None (default=np.int64)
The dtype to cast the categorical values to. If None, don't
change them. If you do this, it can happen that the categorical
values will have different dtypes, reflecting the number of
unique categories.
Notes
-----
The value of X will always be 2-dimensional, even if it only
contains 1 column.
"""
import pandas as pd
def __init__(
self,
treat_int_as_categorical=False,
float_dtype=np.float32,
int_dtype=np.int64,
):
self.treat_int_as_categorical = treat_int_as_categorical
self.float_dtype = float_dtype
self.int_dtype = int_dtype
def _check_dtypes(self, df):
"""Perform a check on the DataFrame to detect wrong dtypes or keys.
Makes sure that there are no conflicts in key names.
If dtypes are found that cannot be dealt with, raises a
TypeError with a message indicating which ones caused trouble.
Raises
------
ValueError
If there already is a column named 'X'.
TypeError
If a wrong dtype is found.
"""
if 'X' in df:
raise ValueError(
"DataFrame contains a column named 'X', which clashes "
"with the name chosen for cardinal features; consider "
"renaming that column.")
wrong_dtypes = []
for col, dtype in zip(df, df.dtypes):
if isinstance(dtype, self.pd.api.types.CategoricalDtype):
continue
if np.issubdtype(dtype, np.integer):
continue
if np.issubdtype(dtype, np.floating):
continue
wrong_dtypes.append((col, dtype))
if not wrong_dtypes:
return
wrong_dtypes = sorted(wrong_dtypes, key=lambda tup: tup[0])
msg_dtypes = ", ".join(
"{} ({})".format(col, dtype) for col, dtype in wrong_dtypes)
msg = ("The following columns have dtypes that cannot be "
"interpreted as numerical dtypes: {}".format(msg_dtypes))
raise TypeError(msg)
# pylint: disable=unused-argument
def fit(self, df, y=None, **fit_params):
self._check_dtypes(df)
return self
def transform(self, df):
"""Transform DataFrame to become a dict that works well with skorch.
Parameters
----------
df : pd.DataFrame
Incoming DataFrame.
Returns
-------
X_dict: dict
Dictionary with all floats concatenated using the key "X"
and all categorical values encoded as integers, using their
respective column names as keys.
"""
self._check_dtypes(df)
X_dict = {}
Xf = [] # floats
for col, dtype in zip(df, df.dtypes):
X_col = df[col]
if isinstance(dtype, self.pd.api.types.CategoricalDtype):
x = X_col.cat.codes.values
if self.int_dtype is not None:
x = x.astype(self.int_dtype)
X_dict[col] = x
continue
if (
np.issubdtype(dtype, np.integer)
and self.treat_int_as_categorical
):
x = X_col.astype('category').cat.codes.values
if self.int_dtype is not None:
x = x.astype(self.int_dtype)
X_dict[col] = x
continue
Xf.append(X_col.values)
if not Xf:
return X_dict
X = np.stack(Xf, axis=1)
if self.float_dtype is not None:
X = X.astype(self.float_dtype)
X_dict['X'] = X
return X_dict
def describe_signature(self, df):
"""Describe the signature required for the given data.
Pass the DataFrame to receive a description of the signature
required for the module's forward method. The description
consists of three parts:
1. The names of the arguments that the forward method
needs.
2. The dtypes of the torch tensors passed to forward.
3. The number of input units that are required for the
corresponding argument. For the float parameter, this is just
the number of dimensions of the tensor. For categorical
parameters, it is the number of unique elements.
Returns
-------
signature : dict
Returns a dict with each key corresponding to one key
required for the forward method. The values are dictionaries
of two elements. The key "dtype" describes the torch dtype
of the resulting tensor, the key "input_units" describes the
required number of input units.
"""
X_dict = self.fit_transform(df)
signature = {}
X = X_dict.get('X')
if X is not None:
signature['X'] = dict(
dtype=to_tensor(X, device='cpu').dtype,
input_units=X.shape[1],
)
for key, val in X_dict.items():
if key == 'X':
continue
tensor = to_tensor(val, device='cpu')
nunique = len(torch.unique(tensor))
signature[key] = dict(
dtype=tensor.dtype,
input_units=nunique,
)
return signature
| 33.060547 | 85 | 0.595971 | 15,979 | 0.943995 | 0 | 0 | 116 | 0.006853 | 0 | 0 | 9,605 | 0.567437 |
94109df769819af9aa38b685bf904aecad7d9115 | 2,530 | py | Python | expenses/models.py | mateuszwwwrobel/Expense_Tracker_Django | e84bda82433427608e026faa00a634c46a433179 | [
"MIT"
] | null | null | null | expenses/models.py | mateuszwwwrobel/Expense_Tracker_Django | e84bda82433427608e026faa00a634c46a433179 | [
"MIT"
] | null | null | null | expenses/models.py | mateuszwwwrobel/Expense_Tracker_Django | e84bda82433427608e026faa00a634c46a433179 | [
"MIT"
] | null | null | null | from datetime import datetime
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.db.models import Sum
categories = (
('Food', 'Food'),
('Takeaway', 'Takeaway'),
('Entertainment', 'Entertainment'),
('Bills', 'Bills'),
('Household Items', 'Household Items'),
('Other', 'Other'),
('Travel', 'Travel'),
)
currencies = (
('PLN', 'PLN'),
('GBP', 'GBP'),
('EUR', 'EUR'),
)
class Budget(models.Model):
name = models.CharField(max_length=100)
currency = models.CharField(max_length=3, choices=currencies)
created_by = models.ForeignKey(User, on_delete=models.PROTECT, related_name='created_by')
users = models.ManyToManyField(User, related_name='users', blank=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
class Expense(models.Model):
user = models.ForeignKey(User, on_delete=models.PROTECT)
name = models.CharField(max_length=100)
price = models.DecimalField(max_digits=10, decimal_places=2)
category = models.CharField(max_length=15, choices=categories)
budget = models.ForeignKey(Budget, on_delete=models.CASCADE)
created_at = models.DateTimeField(default=datetime.now)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return f"{self.price} - {self.category}"
class Meta:
ordering = ('created_at', )
@classmethod
def get_by_id(cls, id):
try:
expense = cls.objects.get(id=id)
except ObjectDoesNotExist:
return None
else:
return expense
@staticmethod
def get_total_expenses_for_current_year(budget_id):
today = datetime.today()
expenses = Expense.objects.filter(budget=budget_id).filter(created_at__year=today.year)
total = expenses.aggregate(total=Sum('price'))['total']
if total is None:
exp_sum = 0
else:
exp_sum = round(total, 2)
return exp_sum
@staticmethod
def get_total_expenses_for_current_month(budget_id):
today = datetime.today()
expenses = Expense.objects.filter(budget=budget_id).filter(created_at__month=today.month)
total = expenses.aggregate(total=Sum('price'))['total']
if total is None:
exp_sum = 0
else:
exp_sum = round(total, 2)
return exp_sum
| 30.853659 | 97 | 0.66087 | 2,027 | 0.801186 | 0 | 0 | 975 | 0.385375 | 0 | 0 | 262 | 0.103557 |
941112baf421a06451189dbe5c8a03eed694f448 | 968 | py | Python | Quiz/m1_quant_basics/l2_stock_prices/quiz_tests.py | jcrangel/AI-for-Trading | c3b865e992f8eb8deda91e7641428eef1d343636 | [
"Apache-2.0"
] | 98 | 2020-05-22T00:41:23.000Z | 2022-03-24T12:57:15.000Z | Quiz/m1_quant_basics/l2_stock_prices/quiz_tests.py | jcrangel/AI-for-Trading | c3b865e992f8eb8deda91e7641428eef1d343636 | [
"Apache-2.0"
] | 1 | 2020-01-04T05:32:35.000Z | 2020-01-04T18:22:21.000Z | Quiz/m1_quant_basics/l2_stock_prices/quiz_tests.py | jcrangel/AI-for-Trading | c3b865e992f8eb8deda91e7641428eef1d343636 | [
"Apache-2.0"
] | 74 | 2020-05-05T16:44:42.000Z | 2022-03-23T06:59:09.000Z | from collections import OrderedDict
import pandas as pd
from tests import project_test, assert_output
@project_test
def test_csv_to_close(fn):
tickers = ['A', 'B', 'C']
dates = ['2017-09-22', '2017-09-25', '2017-09-26', '2017-09-27', '2017-09-28']
fn_inputs = {
'csv_filepath': 'prices_2017_09_22_2017-09-28.csv',
'field_names': ['ticker', 'date', 'open', 'high', 'low', 'close', 'volume', 'adj_close', 'adj_volume']}
fn_correct_outputs = OrderedDict([
(
'close',
pd.DataFrame(
[
[152.48000000, 149.19000000, 59.35000000],
[151.11000000, 145.06000000, 60.29000000],
[152.42000000, 145.21000000, 57.74000000],
[154.34000000, 147.02000000, 58.41000000],
[153.68000000, 147.19000000, 56.76000000]],
dates, tickers))])
assert_output(fn, fn_inputs, fn_correct_outputs)
| 35.851852 | 111 | 0.566116 | 0 | 0 | 0 | 0 | 863 | 0.891529 | 0 | 0 | 206 | 0.21281 |
9412ae8d794817c6aae2c6e712ed2c24d9187000 | 1,306 | py | Python | Problem58.py | Cleancode404/ProjectEuler | 2f93b256b107bfb6a395b8aa197cfeacc599b00b | [
"MIT"
] | null | null | null | Problem58.py | Cleancode404/ProjectEuler | 2f93b256b107bfb6a395b8aa197cfeacc599b00b | [
"MIT"
] | null | null | null | Problem58.py | Cleancode404/ProjectEuler | 2f93b256b107bfb6a395b8aa197cfeacc599b00b | [
"MIT"
] | null | null | null | """
Starting with 1 and spiralling anticlockwise in the following way,
a square spiral with side length 7 is formed.
37 36 35 34 33 32 31
38 17 16 15 14 13 30
39 18 5 4 3 12 29
40 19 6 1 2 11 28
41 20 7 8 9 10 27
42 21 22 23 24 25 26
43 44 45 46 47 48 49
It is interesting to note that the odd squares lie along
the bottom right diagonal,
but what is more interesting is that 8 out of the 13 numbers
lying along both diagonals are prime; that is, a ratio of 8/13 ≈ 62%.
If one complete new layer is wrapped around the spiral above,
a square spiral with side length 9 will be formed.
If this process is continued,
what is the side length of the square spiral
for which the ratio of primes along both diagonals first falls below 10%?
"""""
import time
def isprime(n):
if n < 2:
return False
if n % 2 == 0 or n % 3 == 0 or n % 5 == 0:
return n == 2 or n == 3 or n == 5
return all (n % k != 0 for k in range (7, int(n**.5) + 1, 2))
if __name__ == '__main__':
start = time.process_time()
count = 3
i = 3
while count / (2*i - 1) >= 0.1:
i += 2
count += [isprime(x) for x in [i**2 - i + 1,
i**2 - 2*i + 2, i**2 - 3 * i + 3]].count(True)
print(i)
print('Runtime is', time.process_time() - start) | 28.391304 | 74 | 0.613323 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 786 | 0.600917 |
94169a27bf1cb9a9f17f568c26d91e83bff1fa33 | 825 | py | Python | src/pretalx/event/migrations/0023_update_featured_visibility.py | lili668668/pretalx | 5ba2185ffd7c5f95254aafe25ad3de340a86eadb | [
"Apache-2.0"
] | 418 | 2017-10-05T05:52:49.000Z | 2022-03-24T09:50:06.000Z | src/pretalx/event/migrations/0023_update_featured_visibility.py | lili668668/pretalx | 5ba2185ffd7c5f95254aafe25ad3de340a86eadb | [
"Apache-2.0"
] | 1,049 | 2017-09-16T09:34:55.000Z | 2022-03-23T16:13:04.000Z | src/pretalx/event/migrations/0023_update_featured_visibility.py | lili668668/pretalx | 5ba2185ffd7c5f95254aafe25ad3de340a86eadb | [
"Apache-2.0"
] | 155 | 2017-10-16T18:32:01.000Z | 2022-03-15T12:48:33.000Z | # Generated by Django 3.0.5 on 2020-07-26 15:45
from django.db import migrations
def update_show_featured(apps, schema_editor):
Event = apps.get_model("event", "Event")
EventSettings = apps.get_model("event", "Event_SettingsStore")
for event in Event.objects.all():
old_value = EventSettings.objects.filter(
object=event, key="show_sneak_peek"
).first()
if old_value and old_value.value == "False":
EventSettings.objects.create(
object=event,
key="show_featured",
value="never",
)
class Migration(migrations.Migration):
dependencies = [
("event", "0022_auto_20200124_1213"),
]
operations = [
migrations.RunPython(update_show_featured, migrations.RunPython.noop),
]
| 26.612903 | 78 | 0.624242 | 217 | 0.26303 | 0 | 0 | 0 | 0 | 0 | 0 | 167 | 0.202424 |
94187ed0c8700f9512490144c63712ffb0927710 | 2,575 | py | Python | meiduo_mall24/meiduo_mall/meiduo_mall/apps/meiduo_admin/views/spus.py | MarioKarting/Django_DRF_meiduo_mall | b9dc85d6d538e4655dd02ef1027524bdcbe497d7 | [
"MIT"
] | 1 | 2020-04-15T03:22:18.000Z | 2020-04-15T03:22:18.000Z | meiduo_mall24/meiduo_mall/meiduo_mall/apps/meiduo_admin/views/spus.py | MarioKarting/Django_DRF_meiduo_mall | b9dc85d6d538e4655dd02ef1027524bdcbe497d7 | [
"MIT"
] | 5 | 2020-05-11T20:29:00.000Z | 2021-11-02T15:46:12.000Z | meiduo_mall24/meiduo_mall/meiduo_mall/apps/meiduo_admin/views/spus.py | MarioKarting/Django_DRF_meiduo_mall | b9dc85d6d538e4655dd02ef1027524bdcbe497d7 | [
"MIT"
] | null | null | null | from rest_framework.permissions import IsAdminUser
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from goods.models import SPU, Brand, GoodsCategory, SKU
from meiduo_admin.serializers.specs import SPUSerializer
from meiduo_admin.serializers.spus import SPUGoodsSerialzier, SPUBrandsSerizliser, CategorysSerizliser
from meiduo_admin.utils import PageNum
from django.conf import settings
from fdfs_client.client import Fdfs_client
#spu表增删改查,一二三级分类
class SPUGoodsView(ModelViewSet):
"""
SPU表的增删改查
"""
# 指定权限
permission_classes = [IsAdminUser]
# 指定序列化器
serializer_class = SPUGoodsSerialzier
# 指定查询及
queryset = SPU.objects.all()
# 指定分页
pagination_class = PageNum
def get_queryset(self):
keyword = self.request.query_params.get('keyword')
if keyword == '' or keyword is None:
return SPU.objects.all()
else:
return SPU.objects.filter(name=keyword)
# 在类中跟定义获取品牌数据的方法
def brand(self, request):
# 1、查询所有品牌数据
data = Brand.objects.all()
# 2、序列化返回品牌数据
ser = SPUBrandsSerizliser(data, many=True)
return Response(ser.data)
def channel(self, request):
# 1、获取一级分类数据
data = GoodsCategory.objects.filter(parent=None)
# 2、序列化返回分类数据
ser = CategorysSerizliser(data, many=True)
return Response(ser.data)
def channels(self, request, pk):
# 1、获取二级和三级分类数据
data = GoodsCategory.objects.filter(parent_id=pk)
# 2、序列化返回分类数据
ser = CategorysSerizliser(data, many=True)
return Response(ser.data)
class SPUSView(ModelViewSet):
"""
spu表的增删改查
"""
serializer_class = SPUSerializer
queryset = SPU.objects.all()
pagination_class = PageNum
def image(self,request):
"""
保存图片
:param request:
:return:
"""
# 1、获取图片数据
data = request.FILES.get('image')
# 验证图片数据
if data is None:
return Response(status=500)
# 2、建立fastDFS连接对象
client = Fdfs_client(settings.FASTDFS_CONF)
# 3、上传图片
res = client.upload_by_buffer(data.read())
# 4、判断上传状态
if res['Status'] != 'Upload successed.':
return Response({'error': '上传失败'}, status=501)
# 5、获取上传的图片路径
image_url = res['Remote file_id']
# 6、结果返回
return Response(
{
'img_url': settings.FDFS_URL+image_url
},
status=201
) | 26.822917 | 102 | 0.624854 | 2,400 | 0.822199 | 0 | 0 | 0 | 0 | 0 | 0 | 770 | 0.263789 |
9418c470da8515d5212fe661a684222a75d24302 | 4,559 | py | Python | firmware_kmk/lib/kmk/handlers/sequences.py | telzo2000/NumAtreus_pico | 3c8558e80869eca3ee753de21a02afc8108e5fcf | [
"MIT"
] | 1 | 2022-01-21T06:09:18.000Z | 2022-01-21T06:09:18.000Z | firmware_kmk/lib/kmk/handlers/sequences.py | telzo2000/NumAtreus_pico | 3c8558e80869eca3ee753de21a02afc8108e5fcf | [
"MIT"
] | null | null | null | firmware_kmk/lib/kmk/handlers/sequences.py | telzo2000/NumAtreus_pico | 3c8558e80869eca3ee753de21a02afc8108e5fcf | [
"MIT"
] | null | null | null | import gc
from kmk.consts import UnicodeMode
from kmk.handlers.stock import passthrough
from kmk.keys import KC, make_key
from kmk.types import AttrDict, KeySequenceMeta
def get_wide_ordinal(char):
if len(char) != 2:
return ord(char)
return 0x10000 + (ord(char[0]) - 0xD800) * 0x400 + (ord(char[1]) - 0xDC00)
def sequence_press_handler(key, keyboard, KC, *args, **kwargs):
oldkeys_pressed = keyboard.keys_pressed
keyboard.keys_pressed = set()
for ikey in key.meta.seq:
if not getattr(ikey, 'no_press', None):
keyboard.process_key(ikey, True)
keyboard._send_hid()
if not getattr(ikey, 'no_release', None):
keyboard.process_key(ikey, False)
keyboard._send_hid()
keyboard.keys_pressed = oldkeys_pressed
return keyboard
def simple_key_sequence(seq):
return make_key(
meta=KeySequenceMeta(seq),
on_press=sequence_press_handler,
on_release=passthrough,
)
def send_string(message):
seq = []
for char in message:
kc = getattr(KC, char.upper())
if char.isupper():
kc = KC.LSHIFT(kc)
seq.append(kc)
return simple_key_sequence(seq)
IBUS_KEY_COMBO = simple_key_sequence((KC.LCTRL(KC.LSHIFT(KC.U)),))
RALT_KEY = simple_key_sequence((KC.RALT,))
U_KEY = simple_key_sequence((KC.U,))
ENTER_KEY = simple_key_sequence((KC.ENTER,))
RALT_DOWN_NO_RELEASE = simple_key_sequence((KC.RALT(no_release=True),))
RALT_UP_NO_PRESS = simple_key_sequence((KC.RALT(no_press=True),))
def compile_unicode_string_sequences(string_table):
'''
Destructively convert ("compile") unicode strings into key sequences. This
will, for RAM saving reasons, empty the input dictionary and trigger
garbage collection.
'''
target = AttrDict()
for k, v in string_table.items():
target[k] = unicode_string_sequence(v)
# now loop through and kill the input dictionary to save RAM
for k in target.keys():
del string_table[k]
gc.collect()
return target
def unicode_string_sequence(unistring):
'''
Allows sending things like (╯°□°)╯︵ ┻━┻ directly, without
manual conversion to Unicode codepoints.
'''
return unicode_codepoint_sequence([hex(get_wide_ordinal(s))[2:] for s in unistring])
def generate_codepoint_keysym_seq(codepoint, expected_length=4):
# To make MacOS and Windows happy, always try to send
# sequences that are of length 4 at a minimum
# On Linux systems, we can happily send longer strings.
# They will almost certainly break on MacOS and Windows,
# but this is a documentation problem more than anything.
# Not sure how to send emojis on Mac/Windows like that,
# though, since (for example) the Canadian flag is assembled
# from two five-character codepoints, 1f1e8 and 1f1e6
seq = [KC.N0 for _ in range(max(len(codepoint), expected_length))]
for idx, codepoint_fragment in enumerate(reversed(codepoint)):
seq[-(idx + 1)] = KC.get(codepoint_fragment)
return seq
def unicode_codepoint_sequence(codepoints):
kc_seqs = (generate_codepoint_keysym_seq(codepoint) for codepoint in codepoints)
kc_macros = [simple_key_sequence(kc_seq) for kc_seq in kc_seqs]
def _unicode_sequence(key, keyboard, *args, **kwargs):
if keyboard.unicode_mode == UnicodeMode.IBUS:
keyboard.process_key(
simple_key_sequence(_ibus_unicode_sequence(kc_macros, keyboard)), True
)
elif keyboard.unicode_mode == UnicodeMode.RALT:
keyboard.process_key(
simple_key_sequence(_ralt_unicode_sequence(kc_macros, keyboard)), True
)
elif keyboard.unicode_mode == UnicodeMode.WINC:
keyboard.process_key(
simple_key_sequence(_winc_unicode_sequence(kc_macros, keyboard)), True
)
return make_key(on_press=_unicode_sequence)
def _ralt_unicode_sequence(kc_macros, keyboard):
for kc_macro in kc_macros:
yield RALT_DOWN_NO_RELEASE
yield kc_macro
yield RALT_UP_NO_PRESS
def _ibus_unicode_sequence(kc_macros, keyboard):
for kc_macro in kc_macros:
yield IBUS_KEY_COMBO
yield kc_macro
yield ENTER_KEY
def _winc_unicode_sequence(kc_macros, keyboard):
'''
Send unicode sequence using WinCompose:
http://wincompose.info/
https://github.com/SamHocevar/wincompose
'''
for kc_macro in kc_macros:
yield RALT_KEY
yield U_KEY
yield kc_macro
| 29.412903 | 88 | 0.686993 | 0 | 0 | 602 | 0.131527 | 0 | 0 | 0 | 0 | 968 | 0.211492 |
9418ef3b1dbc12d433809a3899f7ad32944ce472 | 19,494 | py | Python | tpDcc/tools/datalibrary/widgets/save.py | tpDcc/tpDcc-tools-datalibrary | fe867ac35a59d13300af20a998dccdabc2e145ba | [
"MIT"
] | null | null | null | tpDcc/tools/datalibrary/widgets/save.py | tpDcc/tpDcc-tools-datalibrary | fe867ac35a59d13300af20a998dccdabc2e145ba | [
"MIT"
] | null | null | null | tpDcc/tools/datalibrary/widgets/save.py | tpDcc/tpDcc-tools-datalibrary | fe867ac35a59d13300af20a998dccdabc2e145ba | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains base save widget for data items
"""
from __future__ import print_function, division, absolute_import
import os
import logging
import traceback
from Qt.QtCore import Signal, QSize
from Qt.QtWidgets import QSizePolicy, QFrame, QDialogButtonBox, QFileDialog
from tpDcc import dcc
from tpDcc.managers import resources
from tpDcc.libs.resources.core import theme
from tpDcc.libs.python import decorators
from tpDcc.libs.qt.core import base, qtutils
from tpDcc.libs.qt.widgets import layouts, label, buttons, formwidget, messagebox, snapshot
from tpDcc.tools.datalibrary.core import utils
from tpDcc.tools.datalibrary.widgets import sequence
LOGGER = logging.getLogger('tpDcc-libs-datalibrary')
class _MetaSaveWidget(type):
def __call__(self, *args, **kwargs):
as_class = kwargs.get('as_class', False)
if dcc.client().is_maya():
from tpDcc.tools.datalibrary.dccs.maya.widgets import save
if as_class:
return save.MayaSaveWidget
else:
return type.__call__(save.MayaSaveWidget, *args, **kwargs)
else:
if as_class:
return BaseSaveWidget
else:
return type.__call__(BaseSaveWidget, *args, **kwargs)
@theme.mixin
class BaseSaveWidget(base.BaseWidget, object):
cancelled = Signal()
saved = Signal()
ENABLE_THUMBNAIL_CAPTURE = True
def __init__(self, item_view, client=None, *args, **kwargs):
self._item_view = item_view
self._client = client
self._form_widget = None
self._sequence_widget = None
super(BaseSaveWidget, self).__init__(*args, **kwargs)
self.setObjectName('LibrarySaveWidget')
self._create_sequence_widget()
self.update_thumbnail_size()
self.set_item_view(item_view)
# ============================================================================================================
# OVERRIDES
# ============================================================================================================
def get_main_layout(self):
return layouts.VerticalLayout(spacing=4, margins=(0, 0, 0, 0))
def ui(self):
super(BaseSaveWidget, self).ui()
self.setWindowTitle('Save Item')
title_frame = QFrame(self)
title_frame_layout = layouts.VerticalLayout(spacing=0, margins=(0, 0, 0, 0))
title_frame.setLayout(title_frame_layout)
title_widget = QFrame(self)
title_layout = layouts.VerticalLayout(spacing=0, margins=(0, 0, 0, 0))
title_widget.setLayout(title_layout)
title_buttons_layout = layouts.HorizontalLayout(spacing=0, margins=(0, 0, 0, 0))
title_layout.addLayout(title_buttons_layout)
title_icon = label.BaseLabel(parent=self)
title_button = label.BaseLabel(self.item().menu_name(), parent=self)
title_button.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Preferred)
self._menu_button = buttons.BaseButton(parent=self)
self._menu_button.setIcon(resources.icon('menu_dots'))
self._menu_button.setVisible(False) # Hide by default
title_buttons_layout.addWidget(title_icon)
title_buttons_layout.addSpacing(5)
title_buttons_layout.addWidget(title_button)
title_buttons_layout.addWidget(self._menu_button)
title_frame_layout.addWidget(title_widget)
item_icon_name = self.item().icon() or 'tpDcc'
item_icon = resources.icon(item_icon_name)
if not item_icon:
item_icon = resources.icon('tpDcc')
title_icon.setPixmap(item_icon.pixmap(QSize(20, 20)))
thumbnail_layout = layouts.HorizontalLayout(spacing=0, margins=(0, 0, 0, 0))
self._thumbnail_frame = QFrame(self)
thumbnail_frame_layout = layouts.VerticalLayout(spacing=0, margins=(0, 2, 0, 2))
self._thumbnail_frame.setLayout(thumbnail_frame_layout)
thumbnail_layout.addWidget(self._thumbnail_frame)
self._options_frame = QFrame(self)
options_frame_layout = layouts.VerticalLayout(spacing=0, margins=(4, 2, 4, 2))
self._options_frame.setLayout(options_frame_layout)
preview_buttons_frame = QFrame(self)
self._preview_buttons_layout = layouts.HorizontalLayout(spacing=0, margins=(4, 2, 4, 2))
preview_buttons_frame.setLayout(self._preview_buttons_layout)
self._save_button = buttons.BaseButton('Save', parent=self)
self._save_button.setIcon(resources.icon('save'))
self._cancel_button = buttons.BaseButton('Cancel', parent=self)
self._cancel_button.setIcon(resources.icon('cancel'))
self._preview_buttons_layout.addStretch()
self._preview_buttons_layout.addWidget(self._save_button)
self._preview_buttons_layout.addStretch()
self._preview_buttons_layout.addWidget(self._cancel_button)
self._preview_buttons_layout.addStretch()
self.main_layout.addWidget(title_frame)
self.main_layout.addLayout(thumbnail_layout)
self.main_layout.addWidget(self._options_frame)
self.main_layout.addWidget(preview_buttons_frame)
def setup_signals(self):
self._menu_button.clicked.connect(self._on_show_menu)
self._save_button.clicked.connect(self._on_save)
self._cancel_button.clicked.connect(self._on_cancel)
def resizeEvent(self, event):
"""
Overrides base QWidget resizeEvent function
:param event: QResizeEvent
"""
self.update_thumbnail_size()
def close(self):
"""
Overrides base QWidget close function to disable script job when its is done
"""
if self._form_widget:
self._form_widget.save_persistent_values()
super(BaseSaveWidget, self).close()
# ============================================================================================================
# BASE
# ============================================================================================================
def folder_path(self):
"""
Returns the folder path
:return: str
"""
return self.form_widget().value('folder')
def set_folder_path(self, path):
"""
Sets the destination folder path
:param path: str
"""
self.form_widget().set_value('folder', path)
def set_thumbnail_path(self, path):
"""
Sets the path to the thumbnail image or the image sequence directory
:param path: str
"""
file_name, extension = os.path.splitext(path)
target = utils.temp_path('thumbnail{}'.format(extension))
utils.copy_path(path, target, force=True)
self._sequence_widget.set_path(target)
def library_window(self):
"""
Returns library widget window for the item
:return: LibraryWindow
"""
return self.item_view().library_window()
def set_library_window(self, library_window):
"""
Sets the library widget for the item
:param library_window: LibraryWindow
"""
self.item_view().set_library_window(library_window)
def form_widget(self):
"""
Returns the form widget instance
:return: FormWidget
"""
return self._form_widget
def item(self):
"""
Returns current item
:return:
"""
return self.item_view().item
def item_view(self):
"""
Returns the current item view
:return: LibraryItem
"""
return self._item_view
def set_item_view(self, item_view):
"""
Sets the base item to be created
:param item_view: LibraryItem
"""
self._item_view = item_view
if os.path.exists(item_view.image_sequence_path()):
self.set_thumbnail_path(item_view.image_sequence_path())
elif not item_view.is_default_thumbnail_path():
self.set_thumbnail_path(item_view.thumbnail_path())
schema = self.item().save_schema()
if schema:
form_widget = formwidget.FormWidget(self)
form_widget.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
form_widget.set_schema(schema)
form_widget.set_validator(self.item().save_validator)
# item_name = os.path.basename(item.path())
# form_widget.set_values({'name': item_name})
self._options_frame.layout().addWidget(form_widget)
form_widget.validate()
self._form_widget = form_widget
else:
self._options_frame.setVisible(False)
def update_thumbnail_size(self):
"""
Updates the thumbnail button to teh size of the widget
"""
width = self.width() - 10
if width > 250:
width = 250
size = QSize(width, width)
if self._sequence_widget:
self._sequence_widget.setIconSize(size)
self._sequence_widget.setMaximumSize(size)
self._thumbnail_frame.setMaximumSize(size)
def show_thumbnail_capture_dialog(self):
"""
Asks the user if they would like to capture a thumbnail
:return: int
"""
buttons = QDialogButtonBox.Yes | QDialogButtonBox.Ignore | QDialogButtonBox.Cancel
parent = self.item_view().library_window()
btn = messagebox.MessageBox.question(
parent, 'Create a thumbnail', 'Would you like to capture a thumbnail?', buttons=buttons)
if btn == QDialogButtonBox.Yes:
self.thumbnail_capture()
return btn
def show_by_frame_dialog(self):
"""
Show the by frame dialog
"""
help_text = """
To help speed up the playblast you can set the "by frame" to another greater than 1.
For example if the "by frame" is set to 2 it will playblast every second frame
"""
result = None
options = self.form_widget().values()
by_frame = options.get('byFrame', 1)
start_frame, end_frame = options.get('frameRange', [None, None])
duration = end_frame - start_frame if start_frame is not None and end_frame is not None else 1
if duration > 100 and by_frame == 1:
buttons = QDialogButtonBox.Ok | QDialogButtonBox.Cancel
result = messagebox.MessageBox.question(
self.library_window(), title='Tip', text=help_text, buttons=buttons, enable_dont_show_checkbox=True
)
return result
def thumbnail_capture(self, show=False):
"""
Captures a playblast and saves it to the temporal thumbnail path
:param show: bool
"""
options = self.form_widget().values()
start_frame, end_frame = options.get('frameRange', [None, None])
step = options.get('byFrame', 1)
if not qtutils.is_control_modifier():
result = self.show_by_frame_dialog()
if result == QDialogButtonBox.Cancel:
return
path = utils.temp_path('sequence', 'thumbnail.jpg')
try:
snapshot.SnapshotWindow(path=path, on_save=self._on_thumbnail_captured)
# thumbnail.ThumbnailCaptureDialog.thumbnail_capture(
# path=self._temp_path,
# show=show,
# start_frame=start_frame,
# end_frame=end_frame,
# step=step,
# clear_cache=False,
# captured=self._on_thumbnail_captured
# )
except Exception as e:
messagebox.MessageBox.critical(self.library_window(), 'Error while capturing thumbnail', str(e))
LOGGER.error(traceback.format_exc())
def save(self, path, thumbnail):
"""
Saves the item with the given objects to the given disk location path
:param path: str
:param thumbnail: str
"""
kwargs = self.form_widget().values()
sequence_path = self._sequence_widget.dirname()
item_view = self.item_view()
item_view.item_view.path = path
library_window = self.library_window()
valid_save = item_view.safe_save(thumbnail=thumbnail, sequence_path=sequence_path, **kwargs)
if valid_save:
if library_window:
library_window.refresh()
library_window.select_folder_path(path)
self.saved.emit()
self.close()
# ============================================================================================================
# INTERNAL
# ============================================================================================================
def _create_sequence_widget(self):
"""
Internal function that creates a sequence widget to replace the static thumbnail widget
"""
self._sequence_widget = sequence.ImageSequenceWidget(self)
self._sequence_widget.setObjectName('thumbnailButton')
self._thumbnail_frame.layout().insertWidget(0, self._sequence_widget)
self._sequence_widget.clicked.connect(self._on_thumbnail_capture)
self._sequence_widget.setToolTip(
'Click to capture a thumbnail from the current model panel.\n'
'CTRL + Click to show the capture window for better framing.')
camera_icon = resources.get('icons', self.theme().style(), 'camera.png')
expand_icon = resources.get('icons', self.theme().style(), 'full_screen.png')
folder_icon = resources.get('icons', self.theme().style(), 'folder.png')
self._sequence_widget.addAction(
camera_icon, 'Capture new image', 'Capture new image', self._on_thumbnail_capture)
self._sequence_widget.addAction(
expand_icon, 'Show Capture window', 'Show Capture window', self._on_show_capture_window)
self._sequence_widget.addAction(
folder_icon, 'Load image from disk', 'Load image from disk', self._on_show_browse_image_dialog)
self._sequence_widget.setIcon(resources.icon('tpdcc'))
# ============================================================================================================
# CALLBACKS
# ============================================================================================================
def _on_show_menu(self):
"""
Internal callback function that is called when menu button is clicked byu the user
:return: QAction
"""
pass
def _on_save(self):
if not self.library_window():
return False
library = self.library_window().library()
if not library:
return False
try:
self.form_widget().validate()
if self.form_widget().has_errors():
raise Exception('\n'.join(self.form_widget().errors()))
has_frames = self._sequence_widget.has_frames()
if not has_frames and self.ENABLE_THUMBNAIL_CAPTURE:
button = self.show_thumbnail_capture_dialog()
if button == QDialogButtonBox.Cancel:
return False
name = self.form_widget().value('name')
folder = self.form_widget().value('folder')
comment = self.form_widget().value('comment') or ''
extension = self.item().extension()
if extension and not name.endswith(extension):
name = '{}{}'.format(name, extension)
path = folder + '/' + name
thumbnail = self._sequence_widget.first_frame()
save_item = library.get(path, only_extension=True)
save_function = save_item.functionality().get('save')
if not save_function:
LOGGER.warning('Item "{}" does not supports save operation'.format(save_item))
return False
library_path = self.item().library.identifier
if not library_path or not os.path.isfile(library_path):
LOGGER.warning('Impossible to save data "{}" because its library does not exists: "{}"'.format(
self.item(), library_path))
return
values = self.form_widget().values()
try:
if self._client:
success, message, dependencies = self._client().save_data(
library_path=library_path, data_path=path, values=values)
if not success:
messagebox.MessageBox.critical(self.library_window(), 'Error while saving', str(message))
LOGGER.error(str(message))
return False
else:
dependencies = save_function(**values)
except Exception as exc:
messagebox.MessageBox.critical(self.library_window(), 'Error while saving', str(exc))
LOGGER.error(traceback.format_exc())
return False
except Exception as exc:
messagebox.MessageBox.critical(self.library_window(), 'Error while saving', str(exc))
LOGGER.error(traceback.format_exc())
raise
new_item_path = save_item.format_identifier()
if not new_item_path or not os.path.isfile(new_item_path):
LOGGER.warning('Although saving process for item "{}" was completed, '
'it seems no new data has been generated!'.format(save_item))
self.saved.emit()
return False
save_item.library.add(new_item_path)
# # TODO: Instead of creating a local version, we will use a git system to upload our data to our project repo
# # TODO: Should we save new versions of dependencies too?
# valid = save_item.create_version(comment=comment)
# if not valid:
# LOGGER.warning('Impossible to store new version for data "{}"'.format(save_item))
if thumbnail and os.path.isfile(thumbnail):
save_item.store_thumbnail(thumbnail)
self.library_window().sync()
save_item.update_dependencies(dependencies=dependencies)
self.saved.emit()
return True
def _on_cancel(self):
self.cancelled.emit()
self.close()
def _on_thumbnail_capture(self):
"""
Internal callback function that is called when a thumbnail capture must be done
"""
self.thumbnail_capture(show=False)
def _on_thumbnail_captured(self, captured_path):
"""
Internal callback function that is called when thumbnail is captured
:param captured_path: str
"""
thumb_path = os.path.dirname(captured_path)
self.set_thumbnail_path(thumb_path)
def _on_show_capture_window(self):
"""
Internal callback function that shows the capture window for framing
"""
self.thumbnail_capture(show=True)
def _on_show_browse_image_dialog(self):
"""
Internal callback function that shows a file dialog for choosing an image from disk
"""
file_dialog = QFileDialog(self, caption='Open Image', filter='Image Files (*.png *.jpg)')
file_dialog.fileSelected.connect(self.set_thumbnail_path)
file_dialog.exec_()
@decorators.add_metaclass(_MetaSaveWidget)
class SaveWidget(object):
pass
| 36.642857 | 118 | 0.606238 | 18,661 | 0.957269 | 0 | 0 | 18,166 | 0.931876 | 0 | 0 | 5,030 | 0.258028 |
9419100ffaf554866684191a2a75d8965ac468d8 | 154 | py | Python | app.py | ankan-fullstack/StocksDataConditionalOperation | a8a1d7122f053a9660787e3aa033e2a4bdfc2e61 | [
"MIT"
] | null | null | null | app.py | ankan-fullstack/StocksDataConditionalOperation | a8a1d7122f053a9660787e3aa033e2a4bdfc2e61 | [
"MIT"
] | null | null | null | app.py | ankan-fullstack/StocksDataConditionalOperation | a8a1d7122f053a9660787e3aa033e2a4bdfc2e61 | [
"MIT"
] | null | null | null | from flask import Flask
app = Flask(__name__)
if __name__ == "__main__":
from api import *
app.run(host ='0.0.0.0', port = 5000, debug = False) | 22 | 57 | 0.642857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 19 | 0.123377 |
941a1a67e6695f7c1740b6f37424502f9989d6c5 | 1,061 | py | Python | vframe_cli/commands/synthetic/utils/relabel.py | ngi-nix/vframe | 60469e25203136f9d6a5ecaabe2423695ee9a0f2 | [
"MIT"
] | null | null | null | vframe_cli/commands/synthetic/utils/relabel.py | ngi-nix/vframe | 60469e25203136f9d6a5ecaabe2423695ee9a0f2 | [
"MIT"
] | null | null | null | vframe_cli/commands/synthetic/utils/relabel.py | ngi-nix/vframe | 60469e25203136f9d6a5ecaabe2423695ee9a0f2 | [
"MIT"
] | null | null | null | #############################################################################
#
# VFRAME
# MIT License
# Copyright (c) 2019 Adam Harvey and VFRAME
# https://vframe.io
#
#############################################################################
import click
from vframe.settings import app_cfg
ext_choices = ['jpg', 'png']
@click.command()
@click.option('-i', '--input', 'opt_input', required=True,
help='Input file CSV')
@click.option('-o', '--output', 'opt_output',
help='Input file CSV')
@click.option('--label', 'opt_labels_from_to', required=True, type=(str,str),
multiple=True, help='Label from, to')
@click.pass_context
def cli(ctx, opt_input, opt_output, opt_labels_from_to):
"""Relabel label enum in annotation CSV"""
import pandas as pd
log = app_cfg.LOG
opt_output = opt_output if opt_output else opt_input
df_meta = pd.read_csv(opt_input)
for label_from, label_to in opt_labels_from_to:
df_meta.loc[(df_meta.label_enum == label_from), 'label_enum'] = label_to
# write csv
df_meta.to_csv(opt_output, index=False)
| 27.205128 | 77 | 0.607917 | 0 | 0 | 0 | 0 | 731 | 0.688973 | 0 | 0 | 441 | 0.415646 |
941a1f27b0d66e18cd35a64743c98d71580ec9e8 | 747 | py | Python | migrations/versions/7e15c6b3d73b_.py | Rdbaker/betly | 92c7ae41bd221bbd21997fcd13e0f38b48f66d7d | [
"BSD-3-Clause"
] | null | null | null | migrations/versions/7e15c6b3d73b_.py | Rdbaker/betly | 92c7ae41bd221bbd21997fcd13e0f38b48f66d7d | [
"BSD-3-Clause"
] | null | null | null | migrations/versions/7e15c6b3d73b_.py | Rdbaker/betly | 92c7ae41bd221bbd21997fcd13e0f38b48f66d7d | [
"BSD-3-Clause"
] | null | null | null | """Adds a unique constraint to name and url_name on bets.
Revision ID: 7e15c6b3d73b
Revises: 8b9f5b081137
Create Date: 2016-08-27 18:15:32.180825
"""
# revision identifiers, used by Alembic.
revision = '7e15c6b3d73b'
down_revision = '8b9f5b081137'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_unique_constraint(None, 'bet', ['url_name'])
op.create_unique_constraint(None, 'bet', ['name'])
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'bet', type_='unique')
op.drop_constraint(None, 'bet', type_='unique')
### end Alembic commands ###
| 25.758621 | 63 | 0.697456 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 445 | 0.595716 |
941ba60592ad05d3734afb4acc2a69ff6dc6d844 | 613 | py | Python | notebooks/__code/array.py | neutronimaging/BraggEdgeFitting | 233407fc000425ee79897e514964ef196ca27a08 | [
"BSD-3-Clause"
] | null | null | null | notebooks/__code/array.py | neutronimaging/BraggEdgeFitting | 233407fc000425ee79897e514964ef196ca27a08 | [
"BSD-3-Clause"
] | 2 | 2020-10-06T13:48:24.000Z | 2020-10-07T16:21:46.000Z | notebooks/__code/array.py | neutronimaging/BraggEdgeFitting | 233407fc000425ee79897e514964ef196ca27a08 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
def exclude_y_value_when_error_is_nan(axis, error_axis):
axis_cleaned = []
error_axis_cleaned = []
for _x, _error in zip(axis, error_axis):
if (_x == "None") or (_error == "None") or (_x is None) or (_error is None):
axis_cleaned.append(np.NaN)
error_axis_cleaned.append(np.NaN)
else:
axis_cleaned.append(np.float(_x))
error_axis_cleaned.append(np.float(_error))
return axis_cleaned, error_axis_cleaned
def check_size(x_axis=None, y_axis=None):
size_x = len(x_axis)
size_y = len(y_axis)
min_len = np.min([size_x, size_y])
return x_axis[:min_len], y_axis[:min_len]
| 25.541667 | 78 | 0.724307 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.019576 |
941cb3eef0c25d485bb5f643d2bec4dc1acc33ed | 6,546 | py | Python | prepares/prefix_extra_features.py | nengwenzhao/eWarn | 966785001441d62ba27392d8099161eb07125b0f | [
"MIT"
] | null | null | null | prepares/prefix_extra_features.py | nengwenzhao/eWarn | 966785001441d62ba27392d8099161eb07125b0f | [
"MIT"
] | null | null | null | prepares/prefix_extra_features.py | nengwenzhao/eWarn | 966785001441d62ba27392d8099161eb07125b0f | [
"MIT"
] | null | null | null | # coding: utf-8
import numpy as np
from datetime import datetime
from utils.write_logs import write_log
from prepares.prefix import Prefix
from utils.split_data import split_data
from functools import reduce
class Prefix_extra_features(Prefix):
def __init__(self, app_name='', data_name='data.csv', target='',alert_level = 1):
super(Prefix_extra_features, self).__init__(app_name, data_name, target,alert_level)
self.levels = self.df['N_CUSTOMERSEVERITY'].max()
def get_extra_feature(self, temp, time_index, window_size):
levels = temp['N_CUSTOMERSEVERITY'].values
level_features = [] #告警等级个数
duration_features = [] #有效告警持续时间
hour_features = [] # 窗口起始点的小时
weekday_features = [] #窗口起始点处于一个星期的哪一天
is_weekend_features = [] #是否是周末
def func(x,length):
p = np.zeros(length)
p[x] = 1
return p
map_temp = list(map(lambda x :func(x,self.levels+1),levels))
if len(map_temp)==0:
zero = np.zeros(self.levels+1)
zero[0] = 1
level_features.append(zero)
else:
level_features.append(reduce(lambda x,y: x+y,map_temp))
if not temp.shape[0]:
duration_features.append([0])
else:
duration = abs(max(temp['firsttimestamp'].tolist()) - min(temp['firsttimestamp'].tolist()))/window_size
duration_features.append([duration])
date = datetime.fromtimestamp(time_index)
hour = date.hour
day = date.day
#month = date.month
week = date.weekday()
hour_feature = np.zeros(24)
hour_feature[hour] = 1
day_feature = np.zeros(31)
day_feature[day - 1] = 1
#month_feature = np.zeros(12)
#month_feature[month-1] = 1
week_feature = np.zeros(7)
week_feature[week] = 1
weekend_feature = np.zeros(2)
if week>=5:
weekend_feature[1] = 1
else:
weekend_feature[0] = 1
hour_features.append(hour_feature)
#monthday_features.append(day_feature)
weekday_features.append(week_feature)
#month_features.append(month_feature)
is_weekend_features.append(weekend_feature)
extra_features = list(map(lambda x1,x2,x3,x4,x5:np.concatenate((x1,x2,x3,x4,x5)),level_features,duration_features,hour_features,weekday_features,is_weekend_features))
return extra_features[0]
def sample(self, step=10, window_size=120, react_size=10, positive_range=360, min_log=5):
self.step = step * 60
self.window_size = window_size * 60
self.react_size = react_size * 60
self.positive_range = positive_range * 60
self.min_log = min_log
self.data_time = []
datas = []
labels = []
extra_features = []
start_stamp = self.df['firsttimestamp'].min()
end_stamp = self.df['firsttimestamp'].max()
for i in range(start_stamp, (end_stamp - self.window_size - self.react_size - self.positive_range), self.step):
temp = self.df[(self.df['firsttimestamp'] >= i) & (self.df['firsttimestamp'] < (i + self.window_size))]
if temp.shape[0] < self.min_log:
continue
else:
if temp[(temp.apply(self.keyword, keyword=self.target, axis=1))].shape[0]:
temp = temp[(temp.apply(self.keyword, keyword=self.target, if_true=False, axis=1))]
#temp = temp[(temp['N_CUSTOMERSEVERITY'] != 1)]
extra_features.append(self.get_extra_feature(temp, i, window_size = self.window_size))
tmp = temp['N_SUMMARYCN'].values
tmp = list(np.unique(tmp))
datas.append(list(tmp))
future = self.df[(self.df['firsttimestamp'] >= (i + self.window_size + self.react_size)) & (
self.df['firsttimestamp'] <= (
i + self.window_size + self.react_size + self.positive_range))]
self.data_time.append(i + self.window_size)
if future.shape[0]==0:
labels.append(0)
else:
if future[future.apply(self.keyword, keyword=self.target, axis=1)].shape[0]:
labels.append(1)
else:
labels.append(0)
self.datas = datas
self.labels = labels
self.extra_features = extra_features
print("---sample done---")
def split_data(self, split_percent=0.7):
split_timestamp = self.data_time[int(len(self.data_time) * split_percent)]
train_df = self.df[self.df['firsttimestamp'] < split_timestamp]
test_df = self.df[self.df['firsttimestamp'] >= split_timestamp]
self.train_alert_num = train_df[train_df.apply(self.keyword, keyword=self.target, axis=1)].shape[0]
self.test_alert_num = test_df[test_df.apply(self.keyword, keyword=self.target, axis=1)].shape[0]
train_data, train_label, test_data, test_label = split_data(self.datas, self.labels, split_percent)
train_extra_features, train_label, test_extra_features, test_label = split_data(self.extra_features, self.labels, split_percent)
train_label_num_1 = np.sum(np.array(train_label) == 1)
train_label_num_0 = np.sum(np.array(train_label) == 0)
test_label_num_1 = np.sum(np.array(test_label) == 1)
test_label_num_0 = np.sum(np.array(test_label) == 0)
logs = "\nAPPNAME:{}".format(self.app_name) + \
"\nalert to predict:{}".format(self.target) + \
"\ntraining={}".format(self.train_alert_num) + \
"\ntesting={}".format(self.test_alert_num) + \
"\nstep_size={}min".format(self.step//60) + \
"\nwindow_size={}h".format(self.window_size//3600) + \
"\nreact_size={}min".format(self.react_size//60) + \
"\npositive_range={}h".format(self.positive_range//3600) + \
"\nmin_log={}".format(self.min_log) + \
"\ntrain(+):{}".format(train_label_num_1) + \
"\ntrain(-):{}".format(train_label_num_0) + \
"\ntest(+):{}".format(test_label_num_1) + \
"\ntest(-):{}".format(test_label_num_0)
write_log(logs)
return train_data, train_label, test_data, test_label, train_extra_features, test_extra_features
| 43.350993 | 174 | 0.594409 | 6,409 | 0.966667 | 0 | 0 | 0 | 0 | 0 | 0 | 808 | 0.12187 |
941cea816007d01476d7254fd327defb9c84327e | 1,030 | py | Python | Problem-No-23/Approach-No-1.py | sidhant-sriv/comp-project-grade-11 | a95a39e4624ec8f19e800296f3ab025de49b0d64 | [
"MIT"
] | 3 | 2020-10-23T05:21:23.000Z | 2022-01-10T10:58:42.000Z | Problem-No-23/Approach-No-1.py | sidhant-sriv/comp-project-grade-11 | a95a39e4624ec8f19e800296f3ab025de49b0d64 | [
"MIT"
] | 3 | 2020-10-25T10:44:32.000Z | 2020-10-25T16:44:15.000Z | Problem-No-23/Approach-No-1.py | sidhant-sriv/comp-project-grade-11 | a95a39e4624ec8f19e800296f3ab025de49b0d64 | [
"MIT"
] | 6 | 2020-10-23T05:18:56.000Z | 2021-01-14T09:32:35.000Z | '''
| Write a program that should prompt the user to type some sentences. It should then print number of words, number of characters, number of digits and number of special characters in it. |
|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| We use the input() function to receive input from the user and print() function to print it |
'''
string = input("Enter a sentence...\n")
numbers = '1234567890'
chars = '!@#$%^&*()<>?:-\"\'}+=_{|\][;//.,`~'
num_words = len(string.split())
res = [0,0,0]
for i in string:
if i.isalpha():
res[0] += 1
elif i in numbers:
res[1] += 1
elif i in chars:
res[2] += 1
else:
pass
print(f'There are {num_words} word(s), {res[0]} alphabets, {res[1]} digits and {res[2]} special characters in the given string.')
| 44.782609 | 188 | 0.446602 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 768 | 0.745631 |
941d352a3b96792669ab524440dad9a606275e2e | 442 | py | Python | primacy/__init__.py | FofanovLab/Primacy | 5505b839e33659a50fef725bb5c1c3584827e4f1 | [
"MIT"
] | 4 | 2018-10-19T06:39:46.000Z | 2019-04-18T04:46:19.000Z | primacy/__init__.py | FofanovLab/Primacy | 5505b839e33659a50fef725bb5c1c3584827e4f1 | [
"MIT"
] | null | null | null | primacy/__init__.py | FofanovLab/Primacy | 5505b839e33659a50fef725bb5c1c3584827e4f1 | [
"MIT"
] | 1 | 2018-10-19T06:39:56.000Z | 2018-10-19T06:39:56.000Z | import click
def error(msg, logger=False):
"""Prints an error message to stderr and logs."""
click.secho(msg, fg='red', err=True)
if logger:
logger.error(msg)
def warn(msg, logger=False):
'''Prints a warning message to stderr.'''
click.secho(msg, fg='yellow')
if logger:
logger.warning(msg)
def info(msg, logger=False):
click.secho(msg, fg='green')
if logger:
logger.info(msg) | 21.047619 | 53 | 0.615385 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 110 | 0.248869 |
941da2ff708ac753a5e6740d5eb4d1133bc1a2b2 | 120 | py | Python | python/coursera_python/WESLEYAN/week1/1.py | SayanGhoshBDA/code-backup | 8b6135facc0e598e9686b2e8eb2d69dd68198b80 | [
"MIT"
] | 16 | 2018-11-26T08:39:42.000Z | 2019-05-08T10:09:52.000Z | python/coursera_python/WESLEYAN/week1/1.py | SayanGhoshBDA/code-backup | 8b6135facc0e598e9686b2e8eb2d69dd68198b80 | [
"MIT"
] | 8 | 2020-05-04T06:29:26.000Z | 2022-02-12T05:33:16.000Z | python/coursera_python/WESLEYAN/week1/1.py | SayanGhoshBDA/code-backup | 8b6135facc0e598e9686b2e8eb2d69dd68198b80 | [
"MIT"
] | 5 | 2020-02-11T16:02:21.000Z | 2021-02-05T07:48:30.000Z | def problem1_1():
print("Problem Set 1")
pass # replace this pass (a do-nothing) statement with your code
| 20 | 68 | 0.658333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 74 | 0.616667 |
941fed1bb03847ac97ba42232f45ea371b035a81 | 18,584 | py | Python | mixture_tensorflow/src/gmmvae.py | yixinwang/lidvae-public | 8a0a6a5873888ec68c67dd1802bc42294abe2330 | [
"MIT"
] | 1 | 2022-02-02T12:50:05.000Z | 2022-02-02T12:50:05.000Z | mixture_tensorflow/src/gmmvae.py | yixinwang/lidvae-public | 8a0a6a5873888ec68c67dd1802bc42294abe2330 | [
"MIT"
] | null | null | null | mixture_tensorflow/src/gmmvae.py | yixinwang/lidvae-public | 8a0a6a5873888ec68c67dd1802bc42294abe2330 | [
"MIT"
] | null | null | null | import os
import time
from datetime import datetime
import numpy as np
import numpy.random as npr
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import tensorflow as tf
print(tf.__version__)
from tensorflow.python.client import device_lib
device_lib.list_local_devices()
from tensorflow.contrib.eager.python import tfe
from scipy.stats import mode
from tensorflow.python.keras.constraints import nonneg
import tensorflow_probability as tfp
tfd = tfp.distributions
tfb = tfp.bijectors
import argparse
# always reload dependency code
import importlib
import utils
importlib.reload(utils)
from utils import *
class gmmvae(tf.keras.Model):
def __init__(self, discretez_dim, contiz_dim, h_dim, out_dim, regularizer, num_bijectors, nf_dim, qy_num_hidden_layers, qz_num_hidden_layers, px_num_hidden_layers, kl_weight=1.0, fb=0, target_kl=7., USE_BATCHNORM=False):
super(gmmvae, self).__init__(discretez_dim, contiz_dim, h_dim, out_dim, regularizer, num_bijectors, nf_dim, qy_num_hidden_layers, qz_num_hidden_layers, px_num_hidden_layers, USE_BATCHNORM)
self.discretez_dim = discretez_dim
self.contiz_dim = contiz_dim
self.h_dim = h_dim
self.qy_num_hidden_layers = qy_num_hidden_layers
self.kl_weight = kl_weight
self.target_kl = target_kl
self.fb = fb
self.out_dim = out_dim
self.qy_layers = []
for i in range(self.qy_num_hidden_layers):
self.qy_layers.append(tf.keras.layers.Dense(h_dim, kernel_regularizer=regularizer))
self.qz_num_hidden_layers = qz_num_hidden_layers
self.qz_layers = []
for i in range(self.qz_num_hidden_layers):
self.qz_layers.append(tf.keras.layers.Dense(h_dim, kernel_regularizer=regularizer))
self.px_num_hidden_layers = px_num_hidden_layers
self.px_layers = []
for i in range(self.px_num_hidden_layers):
self.px_layers.append(tf.keras.layers.Dense(h_dim, kernel_regularizer=regularizer))
'''
input convex neural network
input: y
parameters: theta = {W^y_{0:k-1}, W^z_{1:k-1}, b_{0:k-1}}
initial condition: z_0 = 0, W_0^z = 0
intermediate layer: z_{i+1} = g_i(W_i^z z_i + W_i^y y + b_i),
i=0, ..., k-1
final layer: f(y,theta) = z_k
constraints: W^z_{1:k-1} is nonnegative
g_i convex and non-decreasing
'''
# Wy_i involves W_i^y, b_i
# self.px_num_hidden_layers = px_num_hidden_layers
# # first layer is treated separately
# self.Wy0 = tf.keras.layers.Dense(h_dim, kernel_regularizer=regularizer)
# self.icnn_Wy_layers = []
# self.icnn_Wz_layers = []
# for i in range(self.px_num_hidden_layers-1):
# self.icnn_Wy_layers.append(tf.keras.layers.Dense(h_dim, kernel_regularizer=regularizer))
# self.icnn_Wz_layers.append(tf.keras.layers.Dense(h_dim,
# use_bias=False, kernel_constraint=tf.keras.constraints.NonNeg(), kernel_regularizer=regularizer))
# # add final layer with output dimension = 1
# self.icnn_Wy_layers.append(tf.keras.layers.Dense(1, kernel_regularizer=regularizer))
# self.icnn_Wz_layers.append(tf.keras.layers.Dense(1,
# use_bias=False, kernel_constraint=tf.keras.constraints.NonNeg(), kernel_regularizer=regularizer))
self.fc2 = tf.keras.layers.Dense(discretez_dim, kernel_regularizer=regularizer)
self.fc5 = tf.keras.layers.Dense(contiz_dim, kernel_regularizer=regularizer)
self.fc6 = tf.keras.layers.Dense(contiz_dim, kernel_regularizer=regularizer)
self.fc7 = tf.keras.layers.Dense(contiz_dim, kernel_regularizer=regularizer)
self.fc8 = tf.keras.layers.Dense(contiz_dim, kernel_regularizer=regularizer)
self.fc13 = tf.keras.layers.Dense(out_dim, kernel_regularizer=regularizer)
self.shift_and_log_scale_fn = []
for i in range(num_bijectors):
self.shift_and_log_scale_fn.append(tfb.real_nvp_default_template(
hidden_layers=[nf_dim, nf_dim], shift_only=True))
bijectors = []
for i in range(num_bijectors):
bijectors.append(tfb.RealNVP(shift_and_log_scale_fn=self.shift_and_log_scale_fn[i], num_masked=2))
if USE_BATCHNORM and i % 2 == 0:
# BatchNorm helps to stabilize deep normalizing flows, esp. Real-NVP
bijectors.append(tfb.BatchNormalization())
self.bijector = tfb.Chain(list(reversed(bijectors)))
# def icnn_grad(self, x_train_tensor):
# with tf.GradientTape() as icnn_tape:
# icnn_tape.watch(x_train_tensor)
# h = [[None] for i in range(self.px_num_hidden_layers + 1)]
# h[0] = tf.square(tf.nn.leaky_relu(self.Wy0(x_train_tensor)))
# for i in range(self.px_num_hidden_layers):
# h[i+1] = tf.nn.leaky_relu(self.icnn_Wz_layers[i](h[i]) + self.icnn_Wy_layers[i](x_train_tensor))
# dout_dx = icnn_tape.gradient(h[-1], x_train_tensor)
# return dout_dx
def qy_graph(self, x, k):
h = [[None] for i in range(self.qy_num_hidden_layers + 1)]
h[0] = x
for i in range(self.qy_num_hidden_layers):
h[i+1] = tf.nn.relu(self.qy_layers[i](h[i]))
qy_logit = self.fc2(h[-1])
qy = tf.nn.softmax(qy_logit)
return qy_logit, qy
def nfdist(self, zm, zv):
# normalizing flow variational distribution for conti_z
nfdist = tfd.TransformedDistribution(
distribution=tfd.MultivariateNormalDiag(loc=zm, scale_diag=zv),
bijector=self.bijector)
return nfdist
def qz_graph(self, x, y):
xy = tf.concat([x, y], 1)
h = [[None] for i in range(self.qz_num_hidden_layers + 1)]
h[0] = xy
for i in range(self.qz_num_hidden_layers):
h[i+1] = tf.nn.relu(self.qz_layers[i](h[i]))
zm = self.fc5(h[-1])
zv = tf.nn.softplus(self.fc6(h[-1]))
nfdist = self.nfdist(zm, zv)
z = nfdist.sample()
return z, zm, zv
def px_graph(self, z, y):
zm = self.fc7(y)
zv = tf.nn.softplus(self.fc8(y))
h = [[None] for i in range(self.px_num_hidden_layers + 1)]
h[0] = z
for i in range(self.px_num_hidden_layers):
h[i+1] = tf.nn.relu(self.px_layers[i](h[i]))
px_logit = self.fc13(z)
return zm, zv, px_logit
def labeled_loss(self, x, px_logit, z, zm, zv, zm_prior, zv_prior):
xy_loss = -log_bernoulli_with_logits(x, px_logit)
xy_loss += -self.kl_weight * tfd.MultivariateNormalDiag(
loc=zm_prior, scale_diag=zv_prior).log_prob(z)
nfdist = self.nfdist(zm, zv)
xy_loss += self.kl_weight * nfdist.log_prob(z)
xy_loss += self.kl_weight * (-np.log(1./self.discretez_dim))
return xy_loss
def call(self, x):
xb = tf.cast(tf.greater(x, tf.random_uniform(tf.shape(x), 0, 1)), tf.float32)
qy_logit, qy = self.qy_graph(xb, k=self.discretez_dim)
z, zm, zv, zm_prior, zv_prior, px_logit = [[None] * self.discretez_dim for i in range(6)]
y_ = tf.fill(tf.stack([tf.shape(x)[0], self.discretez_dim]), 0.0)
for i in range(self.discretez_dim):
y = tf.add(y_, tf.constant(np.eye(self.discretez_dim)[i], dtype='float32'))
z[i], zm[i], zv[i] = self.qz_graph(xb, y)
zm_prior[i], zv_prior[i], px_logit[i] = self.px_graph(z[i], y)
return xb, qy_logit, qy, z, zm, zv, zm_prior, zv_prior, px_logit
def iw_nll(model, images, iw_nsamples=100):
loglikeratios_list = []
for i in range(iw_nsamples):
xb, qy_logit, qy, z, zm, zv, zm_prior, zv_prior, px_logit = model(images)
losses = [None] * model.discretez_dim
for i in range(model.discretez_dim):
losses[i] = model.labeled_loss(xb, px_logit[i], z[i], zm[i], zv[i], zm_prior[i], zv_prior[i])
loss = tf.reshape(-tf.reduce_mean(tf.add_n([qy[:, i] * losses[i] for i in range(model.discretez_dim)])), [1])
# print("loss", loss)
loglikeratios_list.append(loss)
# print("list", loglikeratios_list)
loglikeratios = tf.concat(loglikeratios_list, 0)
iwae_nll = tf.math.reduce_logsumexp(loglikeratios,0) - tf.math.log(tf.constant([iw_nsamples],dtype=tf.float32))
return iwae_nll
def nent_and_loss(model, images):
xb, qy_logit, qy, z, zm, zv, zm_prior, zv_prior, px_logit = model(images)
nent = -model.kl_weight * cross_entropy_with_logits(qy_logit, qy)
losses = [None] * model.discretez_dim
for i in range(model.discretez_dim):
losses[i] = model.labeled_loss(xb, px_logit[i], z[i], zm[i], zv[i], zm_prior[i], zv_prior[i])
loss = tf.add_n([nent] + [qy[:, i] * losses[i] for i in range(model.discretez_dim)])
# print("loss", loss.numpy())
losses_rc = [None] * model.discretez_dim
losses_kl = [None] * model.discretez_dim
for i in range(model.discretez_dim):
losses_rc[i] = tf.reduce_mean(-log_bernoulli_with_logits(xb, px_logit[i]))
losses_kl[i] = tf.reduce_mean(-tfd.MultivariateNormalDiag(
loc=zm_prior[i], scale_diag=zv_prior[i]).log_prob(z[i]) + model.nfdist(zm[i], zv[i]).log_prob(z[i]) - np.log(1./model.discretez_dim))
# print("losses_kl", losses_kl)
loss_rc = tf.add_n([qy[:, i] * losses_rc[i] for i in range(model.discretez_dim)])
loss_kl = tf.add_n([qy[:, i] * losses_kl[i] for i in range(model.discretez_dim)])
loss_kl -= tf.reduce_mean(cross_entropy_with_logits(qy_logit, qy))
# print("loss_kl", loss_kl.numpy(), "loss_rc", loss_rc.numpy())
# print(loss.numpy(), model.kl_weight * loss_kl + loss_rc)
# print(model.fb)
if model.fb == 1:
kl_mask = (loss_kl > model.target_kl)
loss = loss_rc + kl_mask * model.kl_weight * loss_kl
elif model.fb == 2:
# print("i'm here")
dim_target_kl = model.target_kl / model.discretez_dim
fake_losses_kl = [losses_kl[i] * tf.cast(losses_kl[i] > dim_target_kl, dtype=tf.float32) for i in range(model.discretez_dim) ]
fake_loss_kl = tf.add_n([qy[:, i] * fake_losses_kl[i] for i in range(model.discretez_dim)])
# print(dim_target_kl, fake_losses_kl)
fake_loss_kl -= tf.reduce_mean(cross_entropy_with_logits(qy_logit, qy))
loss = loss_rc + model.kl_weight * fake_loss_kl
kl_discrete = nent - np.log(1./model.discretez_dim)
kl_contis = [[None] for i in range(model.discretez_dim)]
for i in range(model.discretez_dim):
kl_contis[i] = model.nfdist(zm[i], zv[i]).log_prob(z) - \
tfd.MultivariateNormalDiag(
loc=zm_prior[i], scale_diag=zv_prior[i]).log_prob(z)
kl_conti = tf.add_n([qy[:, i] * kl_contis[i] for i in range(model.discretez_dim)])
au_discrete = tf.math.reduce_std(qy, 0)
au_conti = tf.math.reduce_std(tf.add_n(
[tf.multiply(tf.expand_dims(qy[:, i], 1), zm[i]) for i in range(model.discretez_dim)]), 0)
return nent, loss, kl_discrete, au_discrete, kl_conti, au_conti, qy_logit
def eval_model(model, train_data, train_labels, test_data, test_labels, itr, outfilename):
with tf.device('/cpu:0'):
train_evalset = np.random.choice(train_data.shape[0], 1000)
test_evalset = np.random.choice(test_data.shape[0], 1000)
train_images = train_data[train_evalset]
train_labels = train_labels.argmax(1)[train_evalset]
test_images = test_data[test_evalset]
test_labels = test_labels.argmax(1)[test_evalset]
train_nent, train_loss, train_kl_discrete, train_au_discrete, train_kl_conti, train_au_conti, train_qy_logit = nent_and_loss(model, train_images)
test_nent, test_loss, test_kl_discrete, test_au_discrete, test_kl_conti, test_au_conti, test_qy_logit = nent_and_loss(model, test_images)
train_iwnll = iw_nll(model, train_images).numpy()[0]
test_iwnll = iw_nll(model, test_images).numpy()[0]
train_ent, train_loss, train_kl_discrete, train_au_discrete, train_kl_conti, train_au_conti = -train_nent.numpy().mean(), train_loss.numpy().mean(), train_kl_discrete.numpy().mean(), train_au_discrete.numpy(), train_kl_conti.numpy().mean(), train_au_conti.numpy()
test_ent, test_loss, test_kl_discrete, test_au_discrete, test_kl_conti, test_au_conti = -test_nent.numpy().mean(), test_loss.numpy().mean(), test_kl_discrete.numpy().mean(), test_au_discrete.numpy(), test_kl_conti.numpy().mean(), test_au_conti.numpy()
zacc_train, zacc_test = -1, -1
if test_labels is not None:
if train_labels is not None:
zacc_test = z_testacc(test_qy_logit, test_labels)
zacc_train = z_testacc(train_qy_logit, train_labels)
# print(itr)
with open(outfilename+'.log', 'a') as f:
f.write("\n\nItr" + str([itr]) +
"\ntrain ent loss zacc kl_discrete kl_conti iw_nll" + str([train_ent]+ [train_loss] + [zacc_train] +
[train_kl_discrete] + [train_kl_conti] + [train_iwnll]) +
"\ntest ent loss zacc kl_discrete kl_conti iw_nll" + str([test_ent]+ [test_loss] + [zacc_test] +
[test_kl_discrete] + [test_kl_conti] + [test_iwnll]))
with open(outfilename+'_au_discrete.log', 'a') as f:
f.write("\n\nItr" + str([itr]) +
"\ntrain au_discrete" + str([train_au_discrete]) +
"\ntest au_discrete" + str([test_au_discrete]))
with open(outfilename+'_au_conti.log', 'a') as f:
f.write("\n\nItr" + str([itr]) +
"\ntrain au_conti" + str([train_au_conti]) +
"\ntest au_conti" + str([test_au_conti]))
print("\n\nItr", itr,
"\ntrain ent loss zacc kl_discrete kl_conti", train_ent, train_loss, zacc_train, train_kl_discrete, train_kl_conti,
"\ntest ent loss zacc kl_discrete kl_conti", test_ent, test_loss, zacc_test, test_kl_discrete, test_kl_conti)
return train_loss
def train_gmmvae(model, train_data, train_labels, test_data, test_labels, optimizer, batch_size, num_epochs, outfilename, device, grad_clip, checkpoint_path, lagging=0):
dataset = tf.data.Dataset.from_tensor_slices((train_data,))
dataset = dataset.shuffle(batch_size * 5)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(10)
num_batches = train_data.shape[0] // batch_size
for outfile in [outfilename+'.log', outfilename+'_au_discrete.log', outfilename+'_au_conti.log']:
with open(outfile, 'a') as f:
f.write("hi i'm starting")
f.write("gmm_znfot_model")
f.write("\noptimizer")
f.write("discretez_dim"+str(model.discretez_dim)+\
"h_dim"+str(model.h_dim)+\
"contiz_dim"+str(model.contiz_dim))
for epoch in range(num_epochs):
print("epoch", epoch)
for batch, (images,) in enumerate(dataset):
itr = epoch * num_batches + batch
print("itr", itr)
with tf.device(device):
with tf.GradientTape(persistent=True) as loss_tape:
loss_tape.watch(model.variables)
nent, loss, _, _, _, _, _ = nent_and_loss(model, images)
if lagging == 0:
gradients = loss_tape.gradient(loss, model.variables)
capped_gradients = tf.clip_by_global_norm(gradients, grad_clip)[0]
# gradient clipping is essential for normalizing flow
grad_vars = zip(capped_gradients, model.variables)
optimizer.apply_gradients(grad_vars, tf.train.get_or_create_global_step())
elif lagging == 1:
px_vars = model.px_layers.variables + model.fc7.variables + model.fc8.variables + model.fc13.variables
qz_vars = model.qz_layers.variables + model.qy_layers.variables + model.shift_and_log_scale_fn.variables + model.fc2.variables + model.fc5.variables + model.fc6.variables
# update inference parameters much more than generative model parameters
for sub_itr in range(10):
# print(sub_itr)
qz_gradients = loss_tape.gradient(loss, qz_vars)
qz_capped_gradients = tf.clip_by_global_norm(qz_gradients, grad_clip)[0]
# gradient clipping is essential for normalizing flow
qz_grad_vars = zip(qz_capped_gradients, qz_vars)
optimizer.apply_gradients(qz_grad_vars, tf.train.get_or_create_global_step())
px_gradients = loss_tape.gradient(loss, px_vars)
px_capped_gradients = tf.clip_by_global_norm(px_gradients, grad_clip)[0]
# gradient clipping is essential for normalizing flow
px_grad_vars = zip(px_capped_gradients, px_vars)
optimizer.apply_gradients(px_grad_vars, tf.train.get_or_create_global_step())
if itr % num_batches == 0:
# if itr % 10 == 0:
train_loss = eval_model(model, train_data, train_labels, test_data, test_labels, itr, outfilename)
model.save_weights(checkpoint_path.format(itr=itr))
if math.isnan(train_loss):
break
# if model.out_dim == 784:
# cmap=plt.cm.jet
# y_ = tf.fill(tf.stack([batch_size, model.discretez_dim]), 0.0)
# for i in range(model.discretez_dim):
# y = tf.add(y_, tf.constant(np.eye(model.discretez_dim)[i], dtype='float32'))
# zm_prior = model.fc7(y)
# zv_prior = tf.nn.softplus(model.fc8(y))
# z = tfd.MultivariateNormalDiag(loc=zm_prior, scale_diag=zv_prior).sample()
# _, _, px_logit = model.px_graph(z, y)
# out = tf.math.reduce_mean(tf.nn.sigmoid(px_logit), 0)
# out = tf.reshape(out, [28, 28]).numpy() * 255
# out = out.astype(np.uint8)
# plt.imsave(os.path.dirname(checkpoint_path)+'/itr'+str(itr)+'_discretez'+str(i)+'.png', out, cmap=cmap)
return model
| 46.811083 | 271 | 0.632211 | 7,063 | 0.380058 | 0 | 0 | 0 | 0 | 0 | 0 | 3,731 | 0.200764 |
9420f4b16f63eff7579a1baa1337f88c073a8797 | 65 | py | Python | run.py | TheBotCrator/Cools3 | c3711887c6ca61d5bce99573367705c707f17945 | [
"MIT"
] | null | null | null | run.py | TheBotCrator/Cools3 | c3711887c6ca61d5bce99573367705c707f17945 | [
"MIT"
] | null | null | null | run.py | TheBotCrator/Cools3 | c3711887c6ca61d5bce99573367705c707f17945 | [
"MIT"
] | null | null | null | import BonezBot.bot
__author__ = "SpBonez"
__version__ = "0.0.1" | 16.25 | 22 | 0.738462 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 16 | 0.246154 |
942279c3334fa764daedae602da97b0a679adcb5 | 1,168 | py | Python | deployment/src/webapp_main.py | nesfit/pyspark-plaso | 4e0680a1a79a5aebfbc7ae983da30841bf984d95 | [
"Apache-2.0"
] | 2 | 2020-02-09T01:11:08.000Z | 2021-09-17T04:16:31.000Z | deployment/src/webapp_main.py | nesfit/pyspark-plaso | 4e0680a1a79a5aebfbc7ae983da30841bf984d95 | [
"Apache-2.0"
] | null | null | null | deployment/src/webapp_main.py | nesfit/pyspark-plaso | 4e0680a1a79a5aebfbc7ae983da30841bf984d95 | [
"Apache-2.0"
] | 1 | 2021-03-17T09:47:01.000Z | 2021-03-17T09:47:01.000Z | # -*- coding: utf-8 -*-
if __name__ == "__main__":
# Spark Session and Spark Context
from pyspark.sql import SparkSession
spark = SparkSession.builder \
.appName("PySpark Plaso WebAPI Application") \
.getOrCreate()
sc = spark.sparkContext
from os import getenv
from plaso.tarzan.app.pyspark_plaso_webapp import configure_app
app = configure_app(sc, getenv("PP_HDFS_URI", "hdfs://hadoop@namenode:8020/test_data"))
# Enable WSGI access logging via Paste
from paste.translogger import TransLogger
app_logged = TransLogger(app)
# Mount the WSGI callable object (app) on the root directory
import cherrypy
cherrypy.tree.graft(app_logged, '/')
# Set the configuration of the web server
cherrypy.config.update({
'engine.autoreload.on': True,
'log.screen': True,
'server.socket_port': int(getenv("PP_PORT", 54380)),
'server.socket_host': getenv("PP_HOST", '0.0.0.0'),
# remove size-limit for file uploads
'server.max_request_body_size': 0,
})
# Start the CherryPy WSGI web server
cherrypy.engine.start()
cherrypy.engine.block()
| 30.736842 | 91 | 0.669521 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 497 | 0.425514 |
94228f07bd58a62807a2239f88f4d780e3989c40 | 5,892 | py | Python | research/cv/StackedHourglass/src/dataset/DatasetGenerator.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 1 | 2021-11-18T08:17:44.000Z | 2021-11-18T08:17:44.000Z | research/cv/StackedHourglass/src/dataset/DatasetGenerator.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | null | null | null | research/cv/StackedHourglass/src/dataset/DatasetGenerator.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 2 | 2019-09-01T06:17:04.000Z | 2019-10-04T08:39:45.000Z | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
dataset classes
"""
import cv2
import numpy as np
import src.utils.img
from src.dataset.MPIIDataLoader import flipped_parts
class GenerateHeatmap:
"""
get train target heatmap
"""
def __init__(self, output_res, num_parts):
self.output_res = output_res
self.num_parts = num_parts
sigma = self.output_res / 64
self.sigma = sigma
size = 6 * sigma + 3
x = np.arange(0, size, 1, float)
y = x[:, np.newaxis]
x0, y0 = 3 * sigma + 1, 3 * sigma + 1
self.g = np.exp(-((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))
def __call__(self, keypoints):
hms = np.zeros(shape=(self.num_parts, self.output_res, self.output_res), dtype=np.float32)
sigma = self.sigma
for p in keypoints:
for idx, pt in enumerate(p):
if pt[0] > 0:
x, y = int(pt[0]), int(pt[1])
if x < 0 or y < 0 or x >= self.output_res or y >= self.output_res:
continue
ul = int(x - 3 * sigma - 1), int(y - 3 * sigma - 1)
br = int(x + 3 * sigma + 2), int(y + 3 * sigma + 2)
c, d = max(0, -ul[0]), min(br[0], self.output_res) - ul[0]
a, b = max(0, -ul[1]), min(br[1], self.output_res) - ul[1]
cc, dd = max(0, ul[0]), min(br[0], self.output_res)
aa, bb = max(0, ul[1]), min(br[1], self.output_res)
hms[idx, aa:bb, cc:dd] = np.maximum(hms[idx, aa:bb, cc:dd], self.g[a:b, c:d])
return hms
class DatasetGenerator:
"""
mindspore general dataset generator
"""
def __init__(self, input_res, output_res, ds, index):
self.input_res = input_res
self.output_res = output_res
self.generateHeatmap = GenerateHeatmap(self.output_res, 16)
self.ds = ds
self.index = index
def __len__(self):
return len(self.index)
def __getitem__(self, idx):
# print(f"loading...{idx}")
return self.loadImage(self.index[idx])
def loadImage(self, idx):
"""
load and preprocess image
"""
ds = self.ds
# Load + Crop
orig_img = ds.get_img(idx)
orig_keypoints = ds.get_kps(idx)
kptmp = orig_keypoints.copy()
c = ds.get_center(idx)
s = ds.get_scale(idx)
cropped = src.utils.img.crop(orig_img, c, s, (self.input_res, self.input_res))
for i in range(np.shape(orig_keypoints)[1]):
if orig_keypoints[0, i, 0] > 0:
orig_keypoints[0, i, :2] = src.utils.img.transform(
orig_keypoints[0, i, :2], c, s, (self.input_res, self.input_res)
)
keypoints = np.copy(orig_keypoints)
# Random Crop
height, width = cropped.shape[0:2]
center = np.array((width / 2, height / 2))
scale = max(height, width) / 200
aug_rot = 0
aug_rot = (np.random.random() * 2 - 1) * 30.0
aug_scale = np.random.random() * (1.25 - 0.75) + 0.75
scale *= aug_scale
mat_mask = src.utils.img.get_transform(center, scale, (self.output_res, self.output_res), aug_rot)[:2]
mat = src.utils.img.get_transform(center, scale, (self.input_res, self.input_res), aug_rot)[:2]
inp = cv2.warpAffine(cropped, mat, (self.input_res, self.input_res)).astype(np.float32) / 255
keypoints[:, :, 0:2] = src.utils.img.kpt_affine(keypoints[:, :, 0:2], mat_mask)
if np.random.randint(2) == 0:
inp = self.preprocess(inp)
inp = inp[:, ::-1]
keypoints = keypoints[:, flipped_parts["mpii"]]
keypoints[:, :, 0] = self.output_res - keypoints[:, :, 0]
orig_keypoints = orig_keypoints[:, flipped_parts["mpii"]]
orig_keypoints[:, :, 0] = self.input_res - orig_keypoints[:, :, 0]
# If keypoint is invisible, set to 0
for i in range(np.shape(orig_keypoints)[1]):
if kptmp[0, i, 0] == 0 and kptmp[0, i, 1] == 0:
keypoints[0, i, 0] = 0
keypoints[0, i, 1] = 0
orig_keypoints[0, i, 0] = 0
orig_keypoints[0, i, 1] = 0
# Generate target heatmap
heatmaps = self.generateHeatmap(keypoints)
return inp.astype(np.float32), heatmaps.astype(np.float32)
def preprocess(self, data):
"""
preprocess images
"""
# Random hue and saturation
data = cv2.cvtColor(data, cv2.COLOR_RGB2HSV)
delta = (np.random.random() * 2 - 1) * 0.2
data[:, :, 0] = np.mod(data[:, :, 0] + (delta * 360 + 360.0), 360.0)
delta_sature = np.random.random() + 0.5
data[:, :, 1] *= delta_sature
data[:, :, 1] = np.maximum(np.minimum(data[:, :, 1], 1), 0)
data = cv2.cvtColor(data, cv2.COLOR_HSV2RGB)
# Random brightness
delta = (np.random.random() * 2 - 1) * 0.3
data += delta
# Random contrast
mean = data.mean(axis=2, keepdims=True)
data = (data - mean) * (np.random.random() + 0.5) + mean
data = np.minimum(np.maximum(data, 0), 1)
return data
| 36.37037 | 110 | 0.54888 | 5,090 | 0.863883 | 0 | 0 | 0 | 0 | 0 | 0 | 1,046 | 0.177529 |
94231734cc463afef8a707d32dbd418d60688b77 | 9,782 | py | Python | tools/filereader.py | SaibboRiginal/Python_remote_PLC | e0034e363891ebac4f8704f632900530174d22da | [
"MIT"
] | null | null | null | tools/filereader.py | SaibboRiginal/Python_remote_PLC | e0034e363891ebac4f8704f632900530174d22da | [
"MIT"
] | null | null | null | tools/filereader.py | SaibboRiginal/Python_remote_PLC | e0034e363891ebac4f8704f632900530174d22da | [
"MIT"
] | null | null | null | # Essential modules import
import csv, os
# Variables modules import
from tools import *
# Importing custom Utility modules
from utility.logger import MyLogger
log = MyLogger("csv-reader") # Logger
class CSVR():
def __init__(self, filename=None, delimiter=','):
'''
Object CSV : Opens a file CSV to read it and elaborate it, can open the file directly when creating
object giving the filename and the delimiter used.
filename = <STRING> File name.
delimiter = <STRING> Delimiter used in file.
'''
self._name_ = os.path.basename(os.path.realpath(__file__)) # Gets name of script
self._path_ = os.path.realpath(__file__).replace(self._name_ , '') # Gets absolute path of script
self.file = [] # Content of the CSV file when opened
if filename: # Can open file when creating object
self.openfile(filename, delimiter)
def openfile(self, filename, delimiter=','):
'''
Opens File and reads it.
filename : <STRING> File name.
delimiter = <STRING> Delimiter used in file.
'''
if not filename.endswith('.csv'): # Check if has already the file extension in the name
filename += '.csv'
try:
file = csv.reader(open(filename), delimiter=delimiter) # Open file
for row in file: # Saves in self.file all rows as arrays
self.file.append(row)
except Exception as e:
log.exception(e)
log.error("Not able to open csv file")
def headers(self):
'''
Returns the csv file's headers.
return <LIST> of <STRING>
'''
try:
return self.file[0]
except Exception as e:
log.exception(e)
log.error("Not able to return CSV headers")
return None
def rows(self, line=None):
'''
Returns the csv file's row or rows line or can get all rows without specifing lines.
line = <INT> Row line/<LIST> of <INT> Rows line
<None> For all rows.
return <LIST> of <STRING>
'''
try:
if line and type(line) == int: # Checks if line is a single INT so returns a SINGLE ROW
return self.file[line]
elif line and type(line) == list: # Checks if line is a multiple INT in ARRAY so returns MULTIPLE ROWS
r = []
for x in line:
r.append(self.file[x])
return r
return self.file[1:] # Returns ALL ROWS if line is not specified
except Exception as e:
log.exception(e)
log.error("Not able to return CSV content")
def columns(self, col):
'''
Returns the csv file's column or columns by index or number.
col : <INT/STRING> Column /<LIST> of <INT/STRING> Columns.
return <DICT>
'''
try:
if type(col) == list: # Check if is querying for multiple columns
result = {}
for _col in col: # Iterate list of search
if type(_col) == int and _col < len(self.file): # Check if using the INDEX(number) to look for column
column = _col
elif type(_col) == int: # If index is bigger than number of columns skips it
result[_col] = None
continue # Then skip
elif type(_col) == str: # Check if using the NAME(string) to look for column
caseunsensitive = [header.lower() for header in self.file[0]] # Converting column name to lowecase for caseunsensitive search
if _col.lower() in caseunsensitive: # Check if column exist
column = caseunsensitive.index(_col.lower())
else: # If doens't exist/not found skips it
result[_col] = None
continue # Then skip
headers = True; key = ''; values = [] # Key = name of column from first row, values = values in that column
for row in self.file: # Go through the whole list
if headers:
key = row[column]; headers = False # Sets the column name from the first row of headers
continue # Doesn't add header name in result
values.append(row[column]) # Append the values in that column
result[key] = values # Result saved as a Dictionary
else: # Check if is querying for a single column
result = {}
if type(col) == int and col < len(self.file): # Check if using the INDEX(number) to look for column
column = col
elif type(col) == int: # If index is bigger than number of columns skips it
result[col] = None
return result
elif type(col) == str: # Check if using the NAME(string) to look for column
caseunsensitive = [header.lower() for header in self.file[0]] # Converting column name to lowecase for caseunsensitive search
if col.lower() in caseunsensitive: # Check if column exist
column = caseunsensitive.index(col.lower())
else: # If doens't exist/not found skips it
result[col] = None
return result
headers = True; key = ''; values = [] # Check if using the NAME(string) to look for column
for row in self.file: # Go through the whole list
if headers:
key = row[column]; headers = False
continue # Skip headers for search
values.append(row[column]) # Append the values in the column
result[key] = values # Result saved as a Dictionary
return result
except Exception as e:
log.exception(e)
log.error("Not able to get column values")
def cell(self, row, col):
'''
Returns the value in a cell given the row and column number.
row : <INT> Line.
col : <INT> Column.
return <STRING>
'''
try:
return self.file[row][col]
except Exception as e:
log.exception(e)
log.error("Not able to get cell value")
def searching(self, search, quick=True):
'''
Returns (Row number, Col name, Value) of searched value in a csv file,
can make a quick search of just of the rows or a slow accurate search in every cell.
search : <STRING> Value to search.
quick = <BOOL> True(default) to make fast search, False to slow precise search.
return <LIST> of <TUPLE>
'''
try:
result = []; rs = (); num_row = 0
for row in self.file: # Go through all the rows
s = set(row)
if not quick: # Check IN every single CELL (SLOW)
for word in s:
if search.lower() in word.lower():
rs = (num_row, self.headers()[row.index(word)], word)
result.append(rs)
else: # Check IN every single ROW (FAST)
if search in s:
rs = (num_row, self.headers()[row.index(search)], search)
result.append(rs)
num_row += 1
log.info("Found %d results" % len(result))
return result
except Exception as e:
log.exception(e)
log.error("Not able to launch search")
def toString(self, result, stamp=False, separetor=";"):
'''
Transform a list or a dictionary into a string.
result : <DICT/LIST> Values to join togheter.
stamp = <BOOL> True to print directly in terminal, False(default) not to.
separetor = <STRING> What to put between values.
return <STRING>/<LIST> of <STRING>
'''
try:
if type(result) == list: # Checks if is an ARRAY
if type(result[0]) != list: # Checks if is an ARRAY of ARRAY, many rows
r = separetor.join(result)
if stamp:
log.info(r)
return r
else: # If it's just a list of string, a row
rs = []
for l in result:
r = separetor.join(l)
rs.append(r)
if stamp:
log.info(r)
return rs
elif type(result) == dict: # Checks if is a DICTIONARY
rs = []
for key, values in result.items():
if values:
r = str(key) + " : " + separetor.join(values)
rs.append(r)
if stamp:
log.info(r)
return rs
except Exception as e:
log.exception(e)
log.error("Not able to convert results into a string")
if __name__ == "__main__":
asd = CSVR(r"C:\Java\Visual Basic Code\source\mirror 4.0\ABSOLUTE3\PYTHON\files\asd.csv")
asd.toString(asd.rows(), separetor="LOOL", stamp=True)
r = asd.search("Doe")
print(r) | 40.92887 | 149 | 0.508792 | 9,362 | 0.957064 | 0 | 0 | 0 | 0 | 0 | 0 | 4,138 | 0.423022 |
94239941557b0ef32f8bdc3e929a8b7a0ca9bab9 | 1,087 | py | Python | calc/calculations/calculation.py | dhruvshah1996/Project3 | d87ad37f6cf2de0d3402c71d21b25258946aad69 | [
"MIT"
] | null | null | null | calc/calculations/calculation.py | dhruvshah1996/Project3 | d87ad37f6cf2de0d3402c71d21b25258946aad69 | [
"MIT"
] | null | null | null | calc/calculations/calculation.py | dhruvshah1996/Project3 | d87ad37f6cf2de0d3402c71d21b25258946aad69 | [
"MIT"
] | null | null | null | """Calculation Class"""
class Calculation:
""" calculation abstract base class"""
# pylint: disable=too-few-public-methods
def __init__(self,values: tuple):
""" constructor method"""
self.values = Calculation.convert_args_to_tuple_of_float(values)
@classmethod
def create(cls,values: tuple):
""" factory method"""
return cls(values)
@staticmethod
def convert_args_to_tuple_of_float(values: tuple):
""" standardize values to list of floats"""
#lists can be modified and tuple cannot, tuple are faster.
#We need to convert the tuple of potentially random data types (its raw data)
#into a standard data format to keep things consistent so we convert it to float
#then i make it a tuple again because i actually won't need to change the calculation values
#I can also use it as a list and then i would be able to edit the calculation
list_values_float = []
for item in values:
list_values_float.append(float(item))
return tuple(list_values_float) | 47.26087 | 100 | 0.678933 | 1,063 | 0.977921 | 0 | 0 | 802 | 0.73781 | 0 | 0 | 574 | 0.528059 |
94241de093c4473da5fbd63e55e70c05f056f359 | 2,390 | py | Python | tests/test_models/test_user.py | adrian-blip/AirBnB_clone_v2 | c27a9d923631c78ec437e4608b5c98f3f9fd1cad | [
"MIT"
] | null | null | null | tests/test_models/test_user.py | adrian-blip/AirBnB_clone_v2 | c27a9d923631c78ec437e4608b5c98f3f9fd1cad | [
"MIT"
] | null | null | null | tests/test_models/test_user.py | adrian-blip/AirBnB_clone_v2 | c27a9d923631c78ec437e4608b5c98f3f9fd1cad | [
"MIT"
] | 1 | 2021-07-07T21:37:54.000Z | 2021-07-07T21:37:54.000Z | #!/usr/bin/python3
"""
===============================================================================
████████╗███████╗███████╗████████╗ ██████╗ █████╗ ███████╗███████╗███████╗
╚══██╔══╝██╔════╝██╔════╝╚══██╔══╝ ██╔════╝██╔══██╗██╔════╝██╔════╝██╔════╝
██║ █████╗ ███████╗ ██║ ██║ ███████║███████╗█████╗ ███████╗
██║ ██╔══╝ ╚════██║ ██║ ██║ ██╔══██║╚════██║██╔══╝ ╚════██║
██║ ███████╗███████║ ██║ ╚██████╗██║ ██║███████║███████╗███████║
╚═╝ ╚══════╝╚══════╝ ╚═╝ ╚═════╝╚═╝ ╚═╝╚══════╝╚══════╝╚══════╝
===============================================================================
"""
from models.base_model import BaseModel
from models.user import User
import unittest
import json
import pep8
import datetime
class TestUser(unittest.TestCase):
""" Test User class implementation. """
def test_doc_module(self):
""" Module documentation. """
doc = User.__doc__
self.assertGreater(len(doc), 1)
def test_pep8_conformance_base_model(self):
""" Test that models/user.py conforms to PEP8. """
pep8style = pep8.StyleGuide(quiet=True)
result = pep8style.check_files(['models/user.py'])
self.assertEqual(result.total_errors, 0,
"Found code style errors (and warnings).")
def test_pep8_conformance_test_base_model(self):
"""
- Test that tests/test_models/test_user.py conforms to PEP8.
"""
pep8style = pep8.StyleGuide(quiet=True)
res = pep8style.check_files(['tests/test_models/test_user.py'])
self.assertEqual(res.total_errors, 0,
"Found code style errors (and warnings).")
def test_doc_constructor(self):
""" Constructor documentation. """
doc = User.__init__.__doc__
self.assertGreater(len(doc), 1)
def test_class(self):
""" Validate the types of the attributes an class. """
with self.subTest(msg='Inheritance'):
self.assertTrue(issubclass(User, BaseModel))
with self.subTest(msg='Attributes'):
self.assertIsInstance(User.email, str)
self.assertIsInstance(User.password, str)
self.assertIsInstance(User.first_name, str)
self.assertIsInstance(User.last_name, str)
| 39.180328 | 80 | 0.454393 | 1,575 | 0.503195 | 0 | 0 | 0 | 0 | 0 | 0 | 1,860 | 0.594249 |
94248c4b47d73f1af610ca1e30110952cd6738d6 | 1,236 | py | Python | filter_local_tool/test_filter.py | g-freire/web-parser-tools | edbec7b57b33eea8a203e1b32a8c911ef1a22956 | [
"MIT"
] | 1 | 2019-09-25T21:22:14.000Z | 2019-09-25T21:22:14.000Z | filter_local_tool/test_filter.py | g-freire/web-parser-tools | edbec7b57b33eea8a203e1b32a8c911ef1a22956 | [
"MIT"
] | null | null | null | filter_local_tool/test_filter.py | g-freire/web-parser-tools | edbec7b57b33eea8a203e1b32a8c911ef1a22956 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pytest
from filter_tool import *
# critical URL's that can break the algo
product_pattern = ["https://www.epocacosmeticos.com.br/some_product/p","https://www.epocacosmeticos.com.br/sabonete-eme-barra-dermage-secatriz/p"]
duplicates = ["https://www.epocacosmeticos.com.br/sabonete-eme-barra-dermage-secatriz/p","https://www.epocacosmeticos.com.br/sabonete-eme-barra-dermage-secatriz/p","https://www.epocacosmeticos.com.br/sabonete-eme-barra-dermage-secatriz/p"]
notproduct = ["https://www.epocacosmeticos.com.br/sabonete-eme-barra-dermage-secatriz/pr","https://www.epocacosmeticos.com.br/p"]
noturl = ['epoca/a/p', 'www.epocacosmeticos.com.br/a/p', '/www.epocacosmeticos.com.br/p', ]
def test_find_pattern_product():
objeto = Mine('','')
assert objeto.find_pattern_product(product_pattern) == ('https://www.epocacosmeticos.com.br/sabonete-eme-barra-dermage-secatriz/p','https://www.epocacosmeticos.com.br/some_product/p',)
assert objeto.find_pattern_product(duplicates) == ('https://www.epocacosmeticos.com.br/sabonete-eme-barra-dermage-secatriz/p',)
assert objeto.find_pattern_product(notproduct) == ()
assert objeto.find_pattern_product(noturl) == ()
| 56.181818 | 239 | 0.749191 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 822 | 0.665049 |
94255f71fe2c2edb4e8cc5ae13b412e91881739d | 832 | py | Python | src/Reset Ease Automatically/utils.py | RisingOrange/Reset-Ease-Automatically | 7c2fd16b7cac32ba499d87f681c75cfcfb617405 | [
"MIT"
] | 5 | 2020-09-06T10:51:39.000Z | 2021-11-11T01:46:06.000Z | src/Reset Ease Automatically/utils.py | RisingOrange/Reset-Ease-Automatically | 7c2fd16b7cac32ba499d87f681c75cfcfb617405 | [
"MIT"
] | 6 | 2020-09-06T11:28:47.000Z | 2021-06-13T00:22:03.000Z | src/Reset Ease Automatically/utils.py | RisingOrange/Reset-Ease-Automatically | 7c2fd16b7cac32ba499d87f681c75cfcfb617405 | [
"MIT"
] | 1 | 2020-09-06T10:52:43.000Z | 2020-09-06T10:52:43.000Z | from aqt import mw
from .config import get, set
def prepare_deck_to_ease_range():
deck_to_ease_range = d if (d := get('deck_to_ease_range')) else {}
# for backwards compatibilty
deck_to_ease = d if (d := get('deck_to_ease')) else {}
deck_to_ease_range.update(**_to_deck_to_ease_range(deck_to_ease))
set('deck_to_ease', None)
# remove entries of decks that do not exist in anki
# and ensure the deck ids are of type int
cleaned = {
int(deck_id) : ease_range
for deck_id, ease_range in deck_to_ease_range.items()
if str(deck_id) in mw.col.decks.allIds()
}
set('deck_to_ease_range', cleaned)
def _to_deck_to_ease_range(deck_to_ease):
converted = {
deck_id : (ease, ease)
for deck_id, ease in deck_to_ease.items()
}
return converted
| 27.733333 | 70 | 0.671875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 188 | 0.225962 |
942599ab210e3bb5b077aab6360a6c4298208c75 | 507 | py | Python | zhihuer/celery.py | guojy1314/stw1209 | 043889688c2ed55884b8ddde9cdf6949ee52f905 | [
"MIT"
] | 85 | 2018-07-15T06:45:59.000Z | 2021-06-26T06:51:38.000Z | zhihuer/celery.py | guojy1314/stw1209 | 043889688c2ed55884b8ddde9cdf6949ee52f905 | [
"MIT"
] | 8 | 2020-02-12T02:26:58.000Z | 2022-03-12T00:08:05.000Z | zhihuer/celery.py | guojy1314/stw1209 | 043889688c2ed55884b8ddde9cdf6949ee52f905 | [
"MIT"
] | 26 | 2019-01-26T18:05:11.000Z | 2021-06-26T06:51:39.000Z | # 在zhihuer项目目录下,
# cmd运行: celery -A zhihuer worker -l info (-A 默认寻找目录下的celery模块)
# 启动celery服务
from __future__ import absolute_import, unicode_literals
import os
from celery import Celery
from django.conf import settings
# 设置环境变量
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'zhihuer.settings')
# 实例化Celery
app = Celery('zhihuer')
# 使用django的settings文件配置celery
app.config_from_object('django.conf:settings', 'CELERY')
# Celery加载所有注册应用中的tasks.py
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
| 25.35 | 67 | 0.808679 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 342 | 0.567164 |
9425fd387b206e24b54e8691b3da5475d83b5d32 | 1,122 | py | Python | cogs/youtube.py | RuiL1904/ruibot-discord.py | 588406ef2dbebd7d976237f1d876054d641933f6 | [
"MIT"
] | null | null | null | cogs/youtube.py | RuiL1904/ruibot-discord.py | 588406ef2dbebd7d976237f1d876054d641933f6 | [
"MIT"
] | null | null | null | cogs/youtube.py | RuiL1904/ruibot-discord.py | 588406ef2dbebd7d976237f1d876054d641933f6 | [
"MIT"
] | null | null | null | import os
import nextcord as discord
from nextcord.ext import commands
import pytube
class Youtube(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(name = 'youtube', aliases = ['yt'])
async def youtube(self, context, url):
# Check if 20 internal files limit has been exceeded
count = 0
for file in os.listdir('data/music'):
count += 1
if count > 20:
for file in os.listdir('data/music'):
os.remove(file)
# Pytube things
downloader = pytube.YouTube(url)
music = downloader.streams.filter(only_audio = True).first()
out_file = music.download(output_path = 'data/music')
# Create file on my computer
base, ext = os.path.splitext(out_file)
new_file = base + '.mp3'
os.rename(out_file, new_file)
# Send the file to Discord
music_file = discord.File(new_file, filename = 'music.mp3')
await context.reply(file = music_file)
def setup(client):
client.add_cog(Youtube(client)) | 28.769231 | 68 | 0.603387 | 980 | 0.87344 | 0 | 0 | 880 | 0.784314 | 822 | 0.73262 | 187 | 0.166667 |
94267a216f2fcaedda83675c1ece9c6b384d5d17 | 7,273 | py | Python | pysnmp/CTRON-SSR-CONFIG-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 11 | 2021-02-02T16:27:16.000Z | 2021-08-31T06:22:49.000Z | pysnmp/CTRON-SSR-CONFIG-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 75 | 2021-02-24T17:30:31.000Z | 2021-12-08T00:01:18.000Z | pysnmp/CTRON-SSR-CONFIG-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module CTRON-SSR-CONFIG-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CTRON-SSR-CONFIG-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 18:15:46 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueSizeConstraint, ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueSizeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint")
ssrMibs, = mibBuilder.importSymbols("CTRON-SSR-SMI-MIB", "ssrMibs")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity, Counter32, MibIdentifier, IpAddress, Gauge32, Counter64, ModuleIdentity, iso, Bits, NotificationType, TimeTicks, Unsigned32, Integer32 = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity", "Counter32", "MibIdentifier", "IpAddress", "Gauge32", "Counter64", "ModuleIdentity", "iso", "Bits", "NotificationType", "TimeTicks", "Unsigned32", "Integer32")
TextualConvention, TruthValue, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "TruthValue", "DisplayString")
ssrConfigMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 52, 2501, 1, 230))
ssrConfigMIB.setRevisions(('2000-07-15 00:00', '2000-02-20 00:00', '1998-08-17 00:00',))
if mibBuilder.loadTexts: ssrConfigMIB.setLastUpdated('200007150000Z')
if mibBuilder.loadTexts: ssrConfigMIB.setOrganization('Cabletron Systems, Inc')
class SSRErrorCode(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8))
namedValues = NamedValues(("noStatus", 1), ("timeout", 2), ("networkError", 3), ("noSpace", 4), ("invalidConfig", 5), ("commandCompleted", 6), ("internalError", 7), ("tftpServerError", 8))
cfgGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 2501, 1, 231))
cfgTransferOp = MibScalar((1, 3, 6, 1, 4, 1, 52, 2501, 1, 231, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("noop", 1), ("sendConfigToAgent", 2), ("receiveConfigFromAgent", 3), ("receiveBootlogFromAgent", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cfgTransferOp.setStatus('current')
cfgManagerAddress = MibScalar((1, 3, 6, 1, 4, 1, 52, 2501, 1, 231, 2), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cfgManagerAddress.setStatus('current')
cfgFileName = MibScalar((1, 3, 6, 1, 4, 1, 52, 2501, 1, 231, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cfgFileName.setStatus('current')
cfgActivateTransfer = MibScalar((1, 3, 6, 1, 4, 1, 52, 2501, 1, 231, 4), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cfgActivateTransfer.setStatus('current')
cfgTransferStatus = MibScalar((1, 3, 6, 1, 4, 1, 52, 2501, 1, 231, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("idle", 1), ("sending", 2), ("receiving", 3), ("transferComplete", 4), ("error", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfgTransferStatus.setStatus('current')
cfgActivateFile = MibScalar((1, 3, 6, 1, 4, 1, 52, 2501, 1, 231, 6), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cfgActivateFile.setStatus('current')
cfgLastError = MibScalar((1, 3, 6, 1, 4, 1, 52, 2501, 1, 231, 7), SSRErrorCode()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfgLastError.setStatus('current')
cfgLastErrorReason = MibScalar((1, 3, 6, 1, 4, 1, 52, 2501, 1, 231, 8), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfgLastErrorReason.setStatus('current')
cfgActiveImageVersion = MibScalar((1, 3, 6, 1, 4, 1, 52, 2501, 1, 231, 9), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfgActiveImageVersion.setStatus('current')
cfgActiveImageBootLocation = MibScalar((1, 3, 6, 1, 4, 1, 52, 2501, 1, 231, 10), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cfgActiveImageBootLocation.setStatus('current')
configConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 2501, 1, 230, 3))
configCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 2501, 1, 230, 3, 1))
configGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 2501, 1, 230, 3, 2))
configCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 52, 2501, 1, 230, 3, 1, 1)).setObjects(("CTRON-SSR-CONFIG-MIB", "configGroup10"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
configCompliance = configCompliance.setStatus('obsolete')
configCompliance2 = ModuleCompliance((1, 3, 6, 1, 4, 1, 52, 2501, 1, 230, 3, 1, 2)).setObjects(("CTRON-SSR-CONFIG-MIB", "configGroup20"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
configCompliance2 = configCompliance2.setStatus('current')
configGroup10 = ObjectGroup((1, 3, 6, 1, 4, 1, 52, 2501, 1, 230, 3, 2, 1)).setObjects(("CTRON-SSR-CONFIG-MIB", "cfgTransferOp"), ("CTRON-SSR-CONFIG-MIB", "cfgManagerAddress"), ("CTRON-SSR-CONFIG-MIB", "cfgFileName"), ("CTRON-SSR-CONFIG-MIB", "cfgActivateTransfer"), ("CTRON-SSR-CONFIG-MIB", "cfgTransferStatus"), ("CTRON-SSR-CONFIG-MIB", "cfgActivateFile"), ("CTRON-SSR-CONFIG-MIB", "cfgLastError"), ("CTRON-SSR-CONFIG-MIB", "cfgLastErrorReason"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
configGroup10 = configGroup10.setStatus('deprecated')
configGroup20 = ObjectGroup((1, 3, 6, 1, 4, 1, 52, 2501, 1, 230, 3, 2, 2)).setObjects(("CTRON-SSR-CONFIG-MIB", "cfgTransferOp"), ("CTRON-SSR-CONFIG-MIB", "cfgManagerAddress"), ("CTRON-SSR-CONFIG-MIB", "cfgFileName"), ("CTRON-SSR-CONFIG-MIB", "cfgActivateTransfer"), ("CTRON-SSR-CONFIG-MIB", "cfgTransferStatus"), ("CTRON-SSR-CONFIG-MIB", "cfgActivateFile"), ("CTRON-SSR-CONFIG-MIB", "cfgLastError"), ("CTRON-SSR-CONFIG-MIB", "cfgLastErrorReason"), ("CTRON-SSR-CONFIG-MIB", "cfgActiveImageVersion"), ("CTRON-SSR-CONFIG-MIB", "cfgActiveImageBootLocation"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
configGroup20 = configGroup20.setStatus('current')
mibBuilder.exportSymbols("CTRON-SSR-CONFIG-MIB", cfgManagerAddress=cfgManagerAddress, cfgActiveImageVersion=cfgActiveImageVersion, cfgActivateFile=cfgActivateFile, configGroups=configGroups, cfgLastErrorReason=cfgLastErrorReason, ssrConfigMIB=ssrConfigMIB, SSRErrorCode=SSRErrorCode, configCompliance=configCompliance, configGroup20=configGroup20, cfgGroup=cfgGroup, cfgActiveImageBootLocation=cfgActiveImageBootLocation, configConformance=configConformance, configCompliance2=configCompliance2, cfgFileName=cfgFileName, cfgLastError=cfgLastError, configGroup10=configGroup10, cfgTransferOp=cfgTransferOp, PYSNMP_MODULE_ID=ssrConfigMIB, cfgActivateTransfer=cfgActivateTransfer, cfgTransferStatus=cfgTransferStatus, configCompliances=configCompliances)
| 115.444444 | 751 | 0.74371 | 371 | 0.051011 | 0 | 0 | 0 | 0 | 0 | 0 | 2,332 | 0.320638 |
942894d912ee4a7ba9f0aefb2767a546ced6f224 | 19,551 | py | Python | demisto_client/demisto_api/models/widget.py | guytest/demisto-py | 8ca4f56a6177668151b5656cbe675a377003c0e9 | [
"Apache-2.0"
] | 59 | 2017-05-04T05:48:00.000Z | 2022-02-27T21:06:01.000Z | demisto_client/demisto_api/models/widget.py | guytest/demisto-py | 8ca4f56a6177668151b5656cbe675a377003c0e9 | [
"Apache-2.0"
] | 44 | 2017-05-09T17:42:43.000Z | 2022-03-30T05:55:44.000Z | demisto_client/demisto_api/models/widget.py | guytest/demisto-py | 8ca4f56a6177668151b5656cbe675a377003c0e9 | [
"Apache-2.0"
] | 37 | 2017-05-06T04:30:32.000Z | 2022-02-15T04:59:00.000Z | # coding: utf-8
"""
Demisto API
This is the public REST API to integrate with the demisto server. HTTP request can be sent using any HTTP-client. For an example dedicated client take a look at: https://github.com/demisto/demisto-py. Requests must include API-key that can be generated in the Demisto web client under 'Settings' -> 'Integrations' -> 'API keys' Optimistic Locking and Versioning\\: When using Demisto REST API, you will need to make sure to work on the latest version of the item (incident, entry, etc.), otherwise, you will get a DB version error (which not allow you to override a newer item). In addition, you can pass 'version\\: -1' to force data override (make sure that other users data might be lost). Assume that Alice and Bob both read the same data from Demisto server, then they both changed the data, and then both tried to write the new versions back to the server. Whose changes should be saved? Alice’s? Bob’s? To solve this, each data item in Demisto has a numeric incremental version. If Alice saved an item with version 4 and Bob trying to save the same item with version 3, Demisto will rollback Bob request and returns a DB version conflict error. Bob will need to get the latest item and work on it so Alice work will not get lost. Example request using 'curl'\\: ``` curl 'https://hostname:443/incidents/search' -H 'content-type: application/json' -H 'accept: application/json' -H 'Authorization: <API Key goes here>' --data-binary '{\"filter\":{\"query\":\"-status:closed -category:job\",\"period\":{\"by\":\"day\",\"fromValue\":7}}}' --compressed ``` # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from demisto_client.demisto_api.models.date_range import DateRange # noqa: F401,E501
from demisto_client.demisto_api.models.order import Order # noqa: F401,E501
class Widget(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'category': 'str',
'commit_message': 'str',
'data_type': 'str',
'date_range': 'DateRange',
'description': 'str',
'id': 'str',
'is_predefined': 'bool',
'locked': 'bool',
'modified': 'datetime',
'name': 'str',
'params': 'dict(str, object)',
'prev_name': 'str',
'query': 'str',
'should_commit': 'bool',
'size': 'int',
'sort': 'list[Order]',
'sort_values': 'list[str]',
'vc_should_ignore': 'bool',
'version': 'int',
'widget_type': 'str'
}
attribute_map = {
'category': 'category',
'commit_message': 'commitMessage',
'data_type': 'dataType',
'date_range': 'dateRange',
'description': 'description',
'id': 'id',
'is_predefined': 'isPredefined',
'locked': 'locked',
'modified': 'modified',
'name': 'name',
'params': 'params',
'prev_name': 'prevName',
'query': 'query',
'should_commit': 'shouldCommit',
'size': 'size',
'sort': 'sort',
'sort_values': 'sortValues',
'vc_should_ignore': 'vcShouldIgnore',
'version': 'version',
'widget_type': 'widgetType'
}
def __init__(self, category=None, commit_message=None, data_type=None, date_range=None, description=None, id=None, is_predefined=None, locked=None, modified=None, name=None, params=None, prev_name=None, query=None, should_commit=None, size=None, sort=None, sort_values=None, vc_should_ignore=None, version=None, widget_type=None): # noqa: E501
"""Widget - a model defined in Swagger""" # noqa: E501
self._category = None
self._commit_message = None
self._data_type = None
self._date_range = None
self._description = None
self._id = None
self._is_predefined = None
self._locked = None
self._modified = None
self._name = None
self._params = None
self._prev_name = None
self._query = None
self._should_commit = None
self._size = None
self._sort = None
self._sort_values = None
self._vc_should_ignore = None
self._version = None
self._widget_type = None
self.discriminator = None
if category is not None:
self.category = category
if commit_message is not None:
self.commit_message = commit_message
if data_type is not None:
self.data_type = data_type
if date_range is not None:
self.date_range = date_range
if description is not None:
self.description = description
if id is not None:
self.id = id
if is_predefined is not None:
self.is_predefined = is_predefined
if locked is not None:
self.locked = locked
if modified is not None:
self.modified = modified
self.name = name
if params is not None:
self.params = params
if prev_name is not None:
self.prev_name = prev_name
if query is not None:
self.query = query
if should_commit is not None:
self.should_commit = should_commit
if size is not None:
self.size = size
if sort is not None:
self.sort = sort
if sort_values is not None:
self.sort_values = sort_values
if vc_should_ignore is not None:
self.vc_should_ignore = vc_should_ignore
if version is not None:
self.version = version
self.widget_type = widget_type
@property
def category(self):
"""Gets the category of this Widget. # noqa: E501
Category the widget is related to. Used to display in widget library under category or dataType if empty. # noqa: E501
:return: The category of this Widget. # noqa: E501
:rtype: str
"""
return self._category
@category.setter
def category(self, category):
"""Sets the category of this Widget.
Category the widget is related to. Used to display in widget library under category or dataType if empty. # noqa: E501
:param category: The category of this Widget. # noqa: E501
:type: str
"""
self._category = category
@property
def commit_message(self):
"""Gets the commit_message of this Widget. # noqa: E501
:return: The commit_message of this Widget. # noqa: E501
:rtype: str
"""
return self._commit_message
@commit_message.setter
def commit_message(self, commit_message):
"""Sets the commit_message of this Widget.
:param commit_message: The commit_message of this Widget. # noqa: E501
:type: str
"""
self._commit_message = commit_message
@property
def data_type(self):
"""Gets the data_type of this Widget. # noqa: E501
Data type of the widget. Describes what data does the widget query. supporting data types \"incidents\",\"messages\",\"system\",\"entries\",\"tasks\", \"audit\". # noqa: E501
:return: The data_type of this Widget. # noqa: E501
:rtype: str
"""
return self._data_type
@data_type.setter
def data_type(self, data_type):
"""Sets the data_type of this Widget.
Data type of the widget. Describes what data does the widget query. supporting data types \"incidents\",\"messages\",\"system\",\"entries\",\"tasks\", \"audit\". # noqa: E501
:param data_type: The data_type of this Widget. # noqa: E501
:type: str
"""
self._data_type = data_type
@property
def date_range(self):
"""Gets the date_range of this Widget. # noqa: E501
:return: The date_range of this Widget. # noqa: E501
:rtype: DateRange
"""
return self._date_range
@date_range.setter
def date_range(self, date_range):
"""Sets the date_range of this Widget.
:param date_range: The date_range of this Widget. # noqa: E501
:type: DateRange
"""
self._date_range = date_range
@property
def description(self):
"""Gets the description of this Widget. # noqa: E501
The description of the widget's usage and data representation. # noqa: E501
:return: The description of this Widget. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this Widget.
The description of the widget's usage and data representation. # noqa: E501
:param description: The description of this Widget. # noqa: E501
:type: str
"""
self._description = description
@property
def id(self):
"""Gets the id of this Widget. # noqa: E501
:return: The id of this Widget. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Widget.
:param id: The id of this Widget. # noqa: E501
:type: str
"""
self._id = id
@property
def is_predefined(self):
"""Gets the is_predefined of this Widget. # noqa: E501
Is the widget a system widget. # noqa: E501
:return: The is_predefined of this Widget. # noqa: E501
:rtype: bool
"""
return self._is_predefined
@is_predefined.setter
def is_predefined(self, is_predefined):
"""Sets the is_predefined of this Widget.
Is the widget a system widget. # noqa: E501
:param is_predefined: The is_predefined of this Widget. # noqa: E501
:type: bool
"""
self._is_predefined = is_predefined
@property
def locked(self):
"""Gets the locked of this Widget. # noqa: E501
Is the widget locked for editing. # noqa: E501
:return: The locked of this Widget. # noqa: E501
:rtype: bool
"""
return self._locked
@locked.setter
def locked(self, locked):
"""Sets the locked of this Widget.
Is the widget locked for editing. # noqa: E501
:param locked: The locked of this Widget. # noqa: E501
:type: bool
"""
self._locked = locked
@property
def modified(self):
"""Gets the modified of this Widget. # noqa: E501
:return: The modified of this Widget. # noqa: E501
:rtype: datetime
"""
return self._modified
@modified.setter
def modified(self, modified):
"""Sets the modified of this Widget.
:param modified: The modified of this Widget. # noqa: E501
:type: datetime
"""
self._modified = modified
@property
def name(self):
"""Gets the name of this Widget. # noqa: E501
Default name of the widget. # noqa: E501
:return: The name of this Widget. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Widget.
Default name of the widget. # noqa: E501
:param name: The name of this Widget. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def params(self):
"""Gets the params of this Widget. # noqa: E501
Additional parameters for this widget, depends on widget type and data. # noqa: E501
:return: The params of this Widget. # noqa: E501
:rtype: dict(str, object)
"""
return self._params
@params.setter
def params(self, params):
"""Sets the params of this Widget.
Additional parameters for this widget, depends on widget type and data. # noqa: E501
:param params: The params of this Widget. # noqa: E501
:type: dict(str, object)
"""
self._params = params
@property
def prev_name(self):
"""Gets the prev_name of this Widget. # noqa: E501
The previous name of the widget. # noqa: E501
:return: The prev_name of this Widget. # noqa: E501
:rtype: str
"""
return self._prev_name
@prev_name.setter
def prev_name(self, prev_name):
"""Sets the prev_name of this Widget.
The previous name of the widget. # noqa: E501
:param prev_name: The prev_name of this Widget. # noqa: E501
:type: str
"""
self._prev_name = prev_name
@property
def query(self):
"""Gets the query of this Widget. # noqa: E501
Query to search on the dataType. # noqa: E501
:return: The query of this Widget. # noqa: E501
:rtype: str
"""
return self._query
@query.setter
def query(self, query):
"""Sets the query of this Widget.
Query to search on the dataType. # noqa: E501
:param query: The query of this Widget. # noqa: E501
:type: str
"""
self._query = query
@property
def should_commit(self):
"""Gets the should_commit of this Widget. # noqa: E501
:return: The should_commit of this Widget. # noqa: E501
:rtype: bool
"""
return self._should_commit
@should_commit.setter
def should_commit(self, should_commit):
"""Sets the should_commit of this Widget.
:param should_commit: The should_commit of this Widget. # noqa: E501
:type: bool
"""
self._should_commit = should_commit
@property
def size(self):
"""Gets the size of this Widget. # noqa: E501
Maximum size for this widget data returned. # noqa: E501
:return: The size of this Widget. # noqa: E501
:rtype: int
"""
return self._size
@size.setter
def size(self, size):
"""Sets the size of this Widget.
Maximum size for this widget data returned. # noqa: E501
:param size: The size of this Widget. # noqa: E501
:type: int
"""
self._size = size
@property
def sort(self):
"""Gets the sort of this Widget. # noqa: E501
Sorting array to sort the data received by the given Order parameters. # noqa: E501
:return: The sort of this Widget. # noqa: E501
:rtype: list[Order]
"""
return self._sort
@sort.setter
def sort(self, sort):
"""Sets the sort of this Widget.
Sorting array to sort the data received by the given Order parameters. # noqa: E501
:param sort: The sort of this Widget. # noqa: E501
:type: list[Order]
"""
self._sort = sort
@property
def sort_values(self):
"""Gets the sort_values of this Widget. # noqa: E501
:return: The sort_values of this Widget. # noqa: E501
:rtype: list[str]
"""
return self._sort_values
@sort_values.setter
def sort_values(self, sort_values):
"""Sets the sort_values of this Widget.
:param sort_values: The sort_values of this Widget. # noqa: E501
:type: list[str]
"""
self._sort_values = sort_values
@property
def vc_should_ignore(self):
"""Gets the vc_should_ignore of this Widget. # noqa: E501
:return: The vc_should_ignore of this Widget. # noqa: E501
:rtype: bool
"""
return self._vc_should_ignore
@vc_should_ignore.setter
def vc_should_ignore(self, vc_should_ignore):
"""Sets the vc_should_ignore of this Widget.
:param vc_should_ignore: The vc_should_ignore of this Widget. # noqa: E501
:type: bool
"""
self._vc_should_ignore = vc_should_ignore
@property
def version(self):
"""Gets the version of this Widget. # noqa: E501
:return: The version of this Widget. # noqa: E501
:rtype: int
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this Widget.
:param version: The version of this Widget. # noqa: E501
:type: int
"""
self._version = version
@property
def widget_type(self):
"""Gets the widget_type of this Widget. # noqa: E501
Widget type describes how does the widget should recieve the data, and display it. Supporting types: \"bar\", \"column\", \"pie\", \"list\", \"number\", \"trend\", \"text\", \"duration\", \"image\", \"line\", and \"table\". # noqa: E501
:return: The widget_type of this Widget. # noqa: E501
:rtype: str
"""
return self._widget_type
@widget_type.setter
def widget_type(self, widget_type):
"""Sets the widget_type of this Widget.
Widget type describes how does the widget should recieve the data, and display it. Supporting types: \"bar\", \"column\", \"pie\", \"list\", \"number\", \"trend\", \"text\", \"duration\", \"image\", \"line\", and \"table\". # noqa: E501
:param widget_type: The widget_type of this Widget. # noqa: E501
:type: str
"""
if widget_type is None:
raise ValueError("Invalid value for `widget_type`, must not be `None`") # noqa: E501
self._widget_type = widget_type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Widget, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Widget):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 30.596244 | 1,584 | 0.591223 | 17,598 | 0.899923 | 0 | 0 | 11,734 | 0.600051 | 0 | 0 | 11,457 | 0.585886 |
9428c808c828141ff45952921a511681e35a77df | 410 | py | Python | saliency_web_mapper/config/environment.py | HeosSacer/saliency_web_mapper | a2fd744b821086dc1a0af0498361207f7bcddee6 | [
"MIT"
] | null | null | null | saliency_web_mapper/config/environment.py | HeosSacer/saliency_web_mapper | a2fd744b821086dc1a0af0498361207f7bcddee6 | [
"MIT"
] | null | null | null | saliency_web_mapper/config/environment.py | HeosSacer/saliency_web_mapper | a2fd744b821086dc1a0af0498361207f7bcddee6 | [
"MIT"
] | null | null | null | from saliency_web_mapper.config.typesafe_dataclass import TypesafeDataclass
from typing import List, Dict, Tuple, Sequence
class SaliencyWebMapperEnvironment(TypesafeDataclass):
# Defaults with type
url: str = 'http://localhost:3001/'
window_name: str = 'Cart 4.0'
# Debug
debug: bool = False
debug_address: str = "192.168.178.33"
def __init__(self):
super().__init__()
| 25.625 | 75 | 0.707317 | 284 | 0.692683 | 0 | 0 | 0 | 0 | 0 | 0 | 77 | 0.187805 |
942ac8198c8de838154ed09c3da64a2f9cbb9cfe | 1,306 | py | Python | setup.py | web-eid/mobile-id-rest-python-client | 31586f505121c016f6d0be5690205f0ca9310dae | [
"MIT"
] | null | null | null | setup.py | web-eid/mobile-id-rest-python-client | 31586f505121c016f6d0be5690205f0ca9310dae | [
"MIT"
] | null | null | null | setup.py | web-eid/mobile-id-rest-python-client | 31586f505121c016f6d0be5690205f0ca9310dae | [
"MIT"
] | null | null | null | import os
from setuptools import find_packages, setup
VERSION = "0.0.1"
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
description = "Estonian Mobile-ID Python client is an Python library that can be used for easy integration with Mobile-ID REST service"
long_description = description
if os.path.exists("README.txt"):
long_description = open("README.txt").read()
setup(
name="mobile-id-rest-python-client",
version=VERSION,
packages=find_packages(),
include_package_data=True,
license="MIT License",
description=description,
long_description=long_description,
url="https://github.com/web-eid/mobile-id-rest-python-client",
download_url=f"https://github.com/web-eid/mobile-id-rest-python-client/archive/{VERSION}.zip",
author="Mart Sõmermaa",
author_email="mrts.pydev@gmail.com",
keywords=[
"Mobile-ID",
"REST",
],
install_requires=["cryptography", "requests"],
classifiers=[
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Internet :: WWW/HTTP",
],
)
| 32.65 | 135 | 0.680704 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 655 | 0.501148 |
942b66c8fc0310ad42fc87a38834d2efbab930c1 | 185 | py | Python | tests/core/test_setproctitle.py | STATION-I/STAI-blockchain | a8ca05cbd2602eee7c2e4ce49c74c447a091ef0f | [
"Apache-2.0"
] | 10 | 2021-10-02T18:33:56.000Z | 2021-11-14T17:10:48.000Z | tests/core/test_setproctitle.py | STATION-I/STAI-blockchain | a8ca05cbd2602eee7c2e4ce49c74c447a091ef0f | [
"Apache-2.0"
] | 14 | 2021-10-07T22:10:15.000Z | 2021-12-21T09:13:49.000Z | tests/core/test_setproctitle.py | STATION-I/STAI-blockchain | a8ca05cbd2602eee7c2e4ce49c74c447a091ef0f | [
"Apache-2.0"
] | 6 | 2021-10-29T19:36:59.000Z | 2021-12-19T19:52:57.000Z | import unittest
from stai.util.setproctitle import setproctitle
class TestSetProcTitle(unittest.TestCase):
def test_does_not_crash(self):
setproctitle("stai test title")
| 20.555556 | 47 | 0.778378 | 117 | 0.632432 | 0 | 0 | 0 | 0 | 0 | 0 | 17 | 0.091892 |
942c0a693ba17d410d8999d5102376d8d5967101 | 8,825 | py | Python | eurlex2lexparency/transformation/utils/liap.py | Lexparency/eurlex2lexparency | b4958f6fea5c2207eb06d2c3b91be798720c94bd | [
"MIT"
] | null | null | null | eurlex2lexparency/transformation/utils/liap.py | Lexparency/eurlex2lexparency | b4958f6fea5c2207eb06d2c3b91be798720c94bd | [
"MIT"
] | null | null | null | eurlex2lexparency/transformation/utils/liap.py | Lexparency/eurlex2lexparency | b4958f6fea5c2207eb06d2c3b91be798720c94bd | [
"MIT"
] | null | null | null | """
"""
import re
from collections import namedtuple
from functools import lru_cache
from lexref.model import Value
__all__ = ['ListItemsAndPatterns']
romans_pattern = Value.tag_2_pattern('EN')['ROM_L'].pattern.strip('b\\()')
_eur_lex_item_patterns_en = { # key: (itemization-character-pattern, ordered [bool], first two items, decorations)
# TODO: Amendments could cause Itemizations of the type "5a. ". Keep that in mind and see if / how the code
# TODO: can cope with that.
'nump': (re.compile(r'^[1-9][0-9]{,3}\.' + chr(160) + '{,3}', flags=re.UNICODE), True,
('1', '2'), '(). ' + chr(160)),
'numpt': (re.compile(r'^[0-9]{1,3}\.?(?!([0-9/();]| of))'), True, # TODO: This pattern does not belong here!
('1', '2'), '.'), # TODO: => get rid of it!
'numbr': (re.compile(r'^\([0-9]{1,3}\)'), True,
('1', '2'), '()'), # 2
'alpha': (re.compile(r'^\([a-z]\)'), True,
('a', 'b'), '()'), # 3
'roman': (re.compile(r'^\((' + romans_pattern + r')\)'), True,
('i', 'ii'), '()'),
'dash': (re.compile(u'^(—|' + chr(8212) + ')', flags=re.UNICODE), False, None, None)
}
_eur_lex_item_patterns_es = { # key: (itemization-character-pattern, ordered [bool], first two items, decorations)
'nump': (re.compile(r'^[1-9][0-9]{,3}\.' + chr(160) + '{,3}', flags=re.UNICODE), True,
('1', '2'), '(). ' + chr(160)),
'numpt': (re.compile(r'^[0-9]{1,3}\.?(?!([0-9/();]| de))'), True,
# TODO: This pattern does not belong here!
('1', '2'), '.'), # TODO: => get rid of it!
'numbr': (re.compile(r'^\([0-9]{1,3}\)'), True,
('1', '2'), '()'), # 2
'alpha': (re.compile(r'^\([a-z]\)'), True,
('a', 'b'), '()'), # 3
'roman': (re.compile(r'^\((' + romans_pattern + r')\)'), True,
('i', 'ii'), '()'),
'dash': (re.compile(u'^(—|' + chr(8212) + ')', flags=re.UNICODE),
False, None, None)
}
_eur_lex_item_patterns_de = { # key: (itemization-character-pattern, ordered [bool], first two items, decorations)
'nump': (re.compile(r'^\([0-9]{1,3}\)'), True, ('1', '2'), '()'), # 2
'alpha': (re.compile(r'^\([a-z]\)'), True, ('a', 'b'), '()'), # 3
'roman': (re.compile(r'^\((' + romans_pattern + r')\)'), True, ('i', 'ii'), '()'),
'dash': (re.compile(u'^(—|' + chr(8212) + ')', flags=re.UNICODE),
False, None, None)
}
_eur_lex_item_patterns_hierarchy = ['nump', 'numpt', 'numbr', 'alpha', 'roman', 'dash']
class ListItemPattern:
FirstSecond = namedtuple('FirstSecond', ['first', 'second'])
def __init__(self, tag, # Tag is used as CSS class on the surface
item_pattern, ordered, first_two_items, decoration):
self.item_pattern = item_pattern
self.tag = tag
self.ordered = ordered
self.first_two_items = (None if first_two_items is None
else self.FirstSecond(*first_two_items))
self.decoration = decoration
@classmethod
@lru_cache()
def create(cls, tag, # Tag is used as CSS class on the surface
item_pattern, ordered, first_two_items, decoration):
return cls(tag, item_pattern, ordered, first_two_items, decoration)
@lru_cache()
class ListItemsAndPatterns:
TagProposal = namedtuple('TagProposal', ['tags', 'inner'])
def __init__(self, language, document_domain, known_firsts=False):
if document_domain.lower() == 'eu':
try:
_eur_lex_item_patterns = eval(
f'_eur_lex_item_patterns_{language.lower()}')
except NameError:
raise NotImplementedError(
f'It seems that the time has come to implement '
f'language {language} for domain eu.'
)
else:
self.list_item_patterns = {
key: ListItemPattern.create(key, *value)
for key, value in _eur_lex_item_patterns.items()
}
self.known_firsts = known_firsts
self.list_label_generic = re.compile('^(' + '|'.join(
['(' + x.item_pattern.pattern.strip('^') + ')'
for x in self.list_item_patterns.values()]) + ')')
self.tag_hierarchy = _eur_lex_item_patterns_hierarchy
else:
raise NotImplementedError(f'It seems that the time has come to '
f'implement domain {document_domain}')
def get_list_item_tag(self, arg, force_ambivalence_resolution=True):
if type(arg) is str:
if force_ambivalence_resolution:
return self.get_list_item_tag([arg])[0]
else:
tag_candidates = set()
inner = None
for list_item_pattern in self.list_item_patterns.values():
m = list_item_pattern.item_pattern.match(arg)
if m is not None:
if inner is None:
inner = m.group(0).strip(list_item_pattern.decoration)
elif inner != m.group(0).strip(list_item_pattern.decoration):
raise RuntimeError("Unexpected ambivalence (type 0) "
"within ListItemsHandler")
tag_candidates |= {list_item_pattern.tag}
return self.TagProposal(tag_candidates, inner)
elif type(arg) is list:
tags_list = [
self.get_list_item_tag(it, force_ambivalence_resolution=False)
for it in arg
]
self._resolve_ambivalences(tags_list)
return tags_list
def __getitem__(self, item):
return self.list_item_patterns[item]
def _resolve_ambivalences(self, tag_candidates_list):
"""
1. Identify
:param tag_candidates_list:
:return:
TODO: This routine works more or les fine. However, it does not
really take into account all the context sensitivity that may arise.
Furthermore, at least two of the test cases have no unique solution,
but this routine simply chooses one possible solution. That is more
than questionable. Furthermore, this routine, does not take into
account the full nested structure of itemization, which would
clearly help to make the outcome this task more more correct for
all possible input cases.
"""
def ambivalence_resolvable(tag_list):
for tag_l in tag_list:
for tag_r in tag_list:
if tag_r > tag_l:
if self[tag_l] != self[tag_r]:
return True
return False
# TODO: distinction between two types of ambivalence:
ambivalent_cases = [k for k, (tags, inner) in enumerate(tag_candidates_list)
if ambivalence_resolvable(tags)]
# TODO: Not resolvable cases must be handled via the hierarchy
for k in ambivalent_cases:
case = tag_candidates_list[k]
if k < len(tag_candidates_list) - 1:
subsequent = tag_candidates_list[k+1]
if k + 1 not in ambivalent_cases:
# If the adjacent item is not ambivalent. The tag of the subsequent is it
if subsequent.tags.issubset(case.tags) \
and self[subsequent.tags.copy().pop()].first_two_items.first != subsequent.inner:
tag_candidates_list[k] = self.TagProposal(subsequent.tags, case.inner)
continue
if k > 0: # No successor of case but a precedent (of course)
preceding = tag_candidates_list[k-1]
if k - 1 not in ambivalent_cases:
if preceding.tags.issubset(case.tags): # and case is not first with respect to preceding tag
if self[preceding.tags.copy().pop()].first_two_items.first != case.inner:
tag_candidates_list[k] = self.TagProposal(preceding.tags, case.inner)
continue
else:
case.tags.remove(preceding.tags.copy().pop())
continue
for tag in self.tag_hierarchy: # map to hierarchy and take the first one.
if tag in case.tags:
tag_candidates_list[k] = self.TagProposal({tag}, case.inner)
continue
if len([_ for _ in tag_candidates_list if len(_.tags) > 1]) > 0:
self._resolve_ambivalences(tag_candidates_list)
| 47.446237 | 115 | 0.546062 | 6,239 | 0.706969 | 0 | 0 | 5,747 | 0.651218 | 0 | 0 | 2,602 | 0.294844 |
942c240cacbca2c4496639534336a931e698439a | 409 | py | Python | setup.py | JulianGindi/auto-semver | 8e98a341155ac44a698310b08177c3e8c27aa201 | [
"MIT"
] | 5 | 2019-12-18T17:35:09.000Z | 2021-07-06T01:20:58.000Z | setup.py | JulianGindi/auto-increment-semver | 8e98a341155ac44a698310b08177c3e8c27aa201 | [
"MIT"
] | null | null | null | setup.py | JulianGindi/auto-increment-semver | 8e98a341155ac44a698310b08177c3e8c27aa201 | [
"MIT"
] | 1 | 2020-11-13T05:51:35.000Z | 2020-11-13T05:51:35.000Z | from setuptools import setup
setup(
name="auto-semver",
version="0.8.0",
description="Semver swiss-army knife",
url="http://github.com/juliangindi/auto-semver",
author="Julian Gindi",
author_email="julian@gindi.io",
license="MIT",
packages=["auto_semver"],
zip_safe=False,
entry_points={
"console_scripts": ["auto-semver=auto_semver.__main__:main",]
},
)
| 22.722222 | 69 | 0.650367 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 193 | 0.471883 |
942e67e6084cbe0943f0556a6207b5e4ed9dc683 | 42,733 | py | Python | libyang/schema.py | pepa-cz/libyang-python | 634fc348c775890467d1b488bc666af4a2f3ebd1 | [
"MIT"
] | null | null | null | libyang/schema.py | pepa-cz/libyang-python | 634fc348c775890467d1b488bc666af4a2f3ebd1 | [
"MIT"
] | null | null | null | libyang/schema.py | pepa-cz/libyang-python | 634fc348c775890467d1b488bc666af4a2f3ebd1 | [
"MIT"
] | null | null | null | # Copyright (c) 2018-2019 Robin Jarry
# Copyright (c) 2021 RACOM s.r.o.
# SPDX-License-Identifier: MIT
from contextlib import suppress
from typing import IO, Any, Dict, Iterator, Optional, Tuple, Union
from _libyang import ffi, lib
from .util import IOType, c2str, init_output, ly_array_iter, str2c
# -------------------------------------------------------------------------------------
def schema_in_format(fmt_string: str) -> int:
if fmt_string == "yang":
return lib.LYS_IN_YANG
if fmt_string == "yin":
return lib.LYS_IN_YIN
raise ValueError("unknown schema input format: %r" % fmt_string)
# -------------------------------------------------------------------------------------
def schema_out_format(fmt_string: str) -> int:
if fmt_string == "yang":
return lib.LYS_OUT_YANG
if fmt_string == "yin":
return lib.LYS_OUT_YIN
if fmt_string == "tree":
return lib.LYS_OUT_TREE
raise ValueError("unknown schema output format: %r" % fmt_string)
# -------------------------------------------------------------------------------------
def printer_flags(
no_substmt: bool = False,
shrink: bool = False,
) -> int:
flags = 0
if no_substmt:
flags |= lib.LYS_PRINT_NO_SUBSTMT
if shrink:
flags |= lib.LYS_PRINT_SHRINK
return flags
# -------------------------------------------------------------------------------------
class Module:
__slots__ = ("context", "cdata")
def __init__(self, context: "libyang.Context", cdata):
self.context = context
self.cdata = cdata # C type: "struct lys_module *"
def name(self) -> str:
return c2str(self.cdata.name)
def prefix(self) -> str:
return c2str(self.cdata.prefix)
def description(self) -> Optional[str]:
return c2str(self.cdata.dsc)
def filepath(self) -> Optional[str]:
return c2str(self.cdata.filepath)
def implemented(self) -> bool:
return bool(self.cdata.implemented)
def feature_enable(self, name: str) -> None:
p = str2c(name)
q = ffi.new("char *[2]", [p, ffi.NULL])
ret = lib.lys_set_implemented(self.cdata, q)
if ret != lib.LY_SUCCESS:
raise self.context.error("no such feature: %r" % name)
def feature_enable_all(self) -> None:
self.feature_enable("*")
def feature_disable_all(self) -> None:
val = ffi.new("char **", ffi.NULL)
ret = lib.lys_set_implemented(self.cdata, val)
if ret != lib.LY_SUCCESS:
raise self.context.error("cannot disable all features")
def feature_state(self, name: str) -> bool:
ret = lib.lys_feature_value(self.cdata, str2c(name))
if ret == lib.LY_SUCCESS:
return True
if ret == lib.LY_ENOT:
return False
raise self.context.error("no such feature: %r" % name)
def features(self) -> Iterator["Feature"]:
features_list = []
f = ffi.NULL
idx = ffi.new("uint32_t *")
while True:
f = lib.lysp_feature_next(f, self.cdata.parsed, idx)
if f == ffi.NULL:
break
features_list.append(f)
for i in features_list:
yield Feature(self.context, i)
def get_feature(self, name: str) -> "Feature":
for f in self.features():
if f.name() == name:
return f
raise self.context.error("no such feature: %r" % name)
def revisions(self) -> Iterator["Revision"]:
for revision in ly_array_iter(self.cdata.parsed.revs):
yield Revision(self.context, revision, self)
def __iter__(self) -> Iterator["SNode"]:
return self.children()
def children(self, types: Optional[Tuple[int, ...]] = None) -> Iterator["SNode"]:
return iter_children(self.context, self.cdata, types=types)
def __str__(self) -> str:
return self.name()
def print(
self,
fmt: str,
out_type: IOType,
out_target: Union[IO, str, None] = None,
printer_no_substmt: bool = False,
printer_shrink: bool = False,
) -> Union[str, bytes, None]:
fmt = schema_out_format(fmt)
flags = printer_flags(no_substmt=printer_no_substmt, shrink=printer_shrink)
out_data = ffi.new("struct ly_out **")
ret, output = init_output(out_type, out_target, out_data)
if ret != lib.LY_SUCCESS:
raise self.context.error("failed to initialize output target")
ret = lib.lys_print_module(out_data[0], self.cdata, fmt, 0, flags)
if output is not None:
tmp = output[0]
output = c2str(tmp)
lib.free(tmp)
lib.ly_out_free(out_data[0], ffi.NULL, False)
if ret != lib.LY_SUCCESS:
raise self.context.error("failed to write data")
return output
def print_mem(
self,
fmt: str = "tree",
printer_no_substmt: bool = False,
printer_shrink: bool = False,
) -> Union[str, bytes]:
return self.print(
fmt,
IOType.MEMORY,
None,
printer_no_substmt=printer_no_substmt,
printer_shrink=printer_shrink,
)
def print_file(
self,
fileobj: IO,
fmt: str = "tree",
printer_no_substmt: bool = False,
printer_shrink: bool = False,
) -> None:
return self.print(
fmt,
IOType.FD,
fileobj,
printer_no_substmt=printer_no_substmt,
printer_shrink=printer_shrink,
)
def parse_data_dict(
self,
dic: Dict[str, Any],
no_state: bool = False,
validate_present: bool = False,
validate: bool = True,
strict: bool = False,
rpc: bool = False,
rpcreply: bool = False,
notification: bool = False,
) -> "libyang.data.DNode":
"""
Convert a python dictionary to a DNode object following the schema of this
module. The returned value is always a top-level data node (i.e.: without
parent).
:arg dic:
The python dictionary to convert.
:arg no_state:
Consider state data not allowed and raise an error during validation if they are found.
:arg validate_present:
Validate result of the operation against schema.
:arg validate:
Run validation on result of the operation.
:arg strict:
Instead of ignoring data without schema definition, raise an error.
:arg rpc:
Data represents RPC or action input parameters.
:arg rpcreply:
Data represents RPC or action output parameters.
:arg notification:
Data represents a NETCONF notification.
"""
from .data import dict_to_dnode # circular import
return dict_to_dnode(
dic,
self,
no_state=no_state,
validate_present=validate_present,
validate=validate,
strict=strict,
rpc=rpc,
rpcreply=rpcreply,
notification=notification,
)
# -------------------------------------------------------------------------------------
class Revision:
__slots__ = ("context", "cdata", "module")
def __init__(self, context: "libyang.Context", cdata, module):
self.context = context
self.cdata = cdata # C type: "struct lysp_revision *"
self.module = module
def date(self) -> str:
return c2str(self.cdata.date)
def description(self) -> Optional[str]:
return c2str(self.cdata.dsc)
def reference(self) -> Optional[str]:
return c2str(self.cdata.ref)
def extensions(self) -> Iterator["ExtensionParsed"]:
for ext in ly_array_iter(self.cdata.exts):
yield ExtensionParsed(self.context, ext, self.module)
def get_extension(
self, name: str, prefix: Optional[str] = None, arg_value: Optional[str] = None
) -> Optional["ExtensionParsed"]:
for ext in self.extensions():
if ext.name() != name:
continue
if prefix is not None and ext.module().name() != prefix:
continue
if arg_value is not None and ext.argument() != arg_value:
continue
return ext
return None
def __repr__(self):
cls = self.__class__
return "<%s.%s: %s>" % (cls.__module__, cls.__name__, str(self))
def __str__(self):
return self.date()
# -------------------------------------------------------------------------------------
class Extension:
__slots__ = ("context", "cdata")
def __init__(self, context: "libyang.Context", cdata, module_parent: Module = None):
self.context = context
self.cdata = cdata
def argument(self) -> Optional[str]:
return c2str(self.cdata.argument)
def name(self) -> str:
return str(self.cdata)
def __repr__(self):
cls = self.__class__
return "<%s.%s: %s>" % (cls.__module__, cls.__name__, str(self))
def __str__(self):
return self.name()
# -------------------------------------------------------------------------------------
class ExtensionParsed(Extension):
__slots__ = ("module_parent",)
def __init__(self, context: "libyang.Context", cdata, module_parent: Module = None):
super().__init__(context, cdata)
self.module_parent = module_parent
def _module_from_parsed(self) -> Module:
prefix = c2str(self.cdata.name).split(":")[0]
for cdata_imp_mod in ly_array_iter(self.module_parent.cdata.parsed.imports):
if ffi.string(cdata_imp_mod.prefix).decode() == prefix:
return Module(self.context, cdata_imp_mod.module)
raise self.context.error("cannot get module")
def name(self) -> str:
return c2str(self.cdata.name).split(":")[1]
def module(self) -> Module:
return self._module_from_parsed()
# -------------------------------------------------------------------------------------
class ExtensionCompiled(Extension):
__slots__ = ("cdata_def",)
def __init__(self, context: "libyang.Context", cdata):
super().__init__(context, cdata)
self.cdata_def = getattr(cdata, "def", None)
def name(self) -> str:
return c2str(self.cdata_def.name)
def module(self) -> Module:
if not self.cdata_def.module:
raise self.context.error("cannot get module")
return Module(self.context, self.cdata_def.module)
# -------------------------------------------------------------------------------------
class _EnumBit:
__slots__ = ("context", "cdata")
def __init__(self, context: "libyang.Context", cdata):
self.context = context
self.cdata = cdata # C type "struct lys_type_bit" or "struct lys_type_enum"
def position(self) -> int:
return self.cdata.position
def value(self) -> int:
return self.cdata.value
def name(self) -> str:
return c2str(self.cdata.name)
def description(self) -> str:
return c2str(self.cdata.dsc)
def deprecated(self) -> bool:
return bool(self.cdata.flags & lib.LYS_STATUS_DEPRC)
def obsolete(self) -> bool:
return bool(self.cdata.flags & lib.LYS_STATUS_OBSLT)
def status(self) -> str:
if self.cdata.flags & lib.LYS_STATUS_OBSLT:
return "obsolete"
if self.cdata.flags & lib.LYS_STATUS_DEPRC:
return "deprecated"
return "current"
def __repr__(self):
cls = self.__class__
return "<%s.%s: %s>" % (cls.__module__, cls.__name__, self)
def __str__(self):
return self.name()
# -------------------------------------------------------------------------------------
class Enum(_EnumBit):
pass
# -------------------------------------------------------------------------------------
class Bit(_EnumBit):
pass
# -------------------------------------------------------------------------------------
class Type:
__slots__ = ("context", "cdata", "cdata_parsed")
UNKNOWN = lib.LY_TYPE_UNKNOWN
BINARY = lib.LY_TYPE_BINARY
UINT8 = lib.LY_TYPE_UINT8
UINT16 = lib.LY_TYPE_UINT16
UINT32 = lib.LY_TYPE_UINT32
UINT64 = lib.LY_TYPE_UINT64
STRING = lib.LY_TYPE_STRING
BITS = lib.LY_TYPE_BITS
BOOL = lib.LY_TYPE_BOOL
DEC64 = lib.LY_TYPE_DEC64
EMPTY = lib.LY_TYPE_EMPTY
ENUM = lib.LY_TYPE_ENUM
IDENT = lib.LY_TYPE_IDENT
INST = lib.LY_TYPE_INST
LEAFREF = lib.LY_TYPE_LEAFREF
UNION = lib.LY_TYPE_UNION
INT8 = lib.LY_TYPE_INT8
INT16 = lib.LY_TYPE_INT16
INT32 = lib.LY_TYPE_INT32
INT64 = lib.LY_TYPE_INT64
BASENAMES = {
UNKNOWN: "unknown",
BINARY: "binary",
UINT8: "uint8",
UINT16: "uint16",
UINT32: "uint32",
UINT64: "uint64",
STRING: "string",
BITS: "bits",
BOOL: "boolean",
DEC64: "decimal64",
EMPTY: "empty",
ENUM: "enumeration",
IDENT: "identityref",
INST: "instance-id",
LEAFREF: "leafref",
UNION: "union",
INT8: "int8",
INT16: "int16",
INT32: "int32",
INT64: "int64",
}
def __init__(self, context: "libyang.Context", cdata, cdata_parsed):
self.context = context
self.cdata = cdata # C type: "struct lysc_type*"
self.cdata_parsed = cdata_parsed # C type: "struct lysp_type*"
def get_bases(self) -> Iterator["Type"]:
if self.cdata.basetype == lib.LY_TYPE_LEAFREF:
yield from self.leafref_type().get_bases()
elif self.cdata.basetype == lib.LY_TYPE_UNION:
for t in self.union_types():
yield from t.get_bases()
else: # builtin type
yield self
def name(self) -> str:
if self.cdata_parsed is not None and self.cdata_parsed.name:
return c2str(self.cdata_parsed.name)
return self.basename()
def description(self) -> Optional[str]:
return None
def base(self) -> int:
return self.cdata.basetype
def bases(self) -> Iterator[int]:
for b in self.get_bases():
yield b.base()
def basename(self) -> str:
return self.BASENAMES.get(self.cdata.basetype, "unknown")
def basenames(self) -> Iterator[str]:
for b in self.get_bases():
yield b.basename()
def leafref_type(self) -> Optional["Type"]:
if self.cdata.basetype != self.LEAFREF:
return None
lr = ffi.cast("struct lysc_type_leafref *", self.cdata)
return Type(self.context, lr.realtype, None)
def leafref_path(self) -> Optional["str"]:
if self.cdata.basetype != self.LEAFREF:
return None
lr = ffi.cast("struct lysc_type_leafref *", self.cdata)
return c2str(lib.lyxp_get_expr(lr.path))
def union_types(self) -> Iterator["Type"]:
if self.cdata.basetype != self.UNION:
return
t = ffi.cast("struct lysc_type_union *", self.cdata)
for union_type in ly_array_iter(t.types):
yield Type(self.context, union_type, None)
def enums(self) -> Iterator[Enum]:
if self.cdata.basetype != self.ENUM:
return
t = ffi.cast("struct lysc_type_enum *", self.cdata)
for enum in ly_array_iter(t.enums):
yield Enum(self.context, enum)
def all_enums(self) -> Iterator[Enum]:
for b in self.get_bases():
yield from b.enums()
def bits(self) -> Iterator[Bit]:
if self.cdata.basetype != self.BITS:
return
t = ffi.cast("struct lysc_type_bits *", self.cdata)
for bit in ly_array_iter(t.bits):
yield Enum(self.context, bit)
def all_bits(self) -> Iterator[Bit]:
for b in self.get_bases():
yield from b.bits()
NUM_TYPES = frozenset((INT8, INT16, INT32, INT64, UINT8, UINT16, UINT32, UINT64))
def range(self) -> Optional[str]:
if (
self.cdata.basetype in self.NUM_TYPES or self.cdata.basetype == self.DEC64
) and self.cdata_parsed.range != ffi.NULL:
return c2str(self.cdata_parsed.range.arg.str)
return None
def all_ranges(self) -> Iterator[str]:
if self.cdata.basetype == lib.LY_TYPE_UNION:
for t in self.union_types():
yield from t.all_ranges()
else:
rng = self.range()
if rng is not None:
yield rng
STR_TYPES = frozenset((STRING, BINARY, ENUM, IDENT, BITS))
def length(self) -> Optional[str]:
if not self.cdata_parsed:
return None
if (
self.cdata.basetype in (self.STRING, self.BINARY)
) and self.cdata_parsed.length != ffi.NULL:
return c2str(self.cdata_parsed.length.arg.str)
return None
def all_lengths(self) -> Iterator[str]:
if self.cdata.basetype == lib.LY_TYPE_UNION:
for t in self.union_types():
yield from t.all_lengths()
else:
length = self.length()
if length is not None:
yield length
def patterns(self) -> Iterator[Tuple[str, bool]]:
if not self.cdata_parsed or self.cdata.basetype != self.STRING:
return
if self.cdata_parsed.patterns == ffi.NULL:
return
arr_length = ffi.cast("uint64_t *", self.cdata_parsed.patterns)[-1]
for i in range(arr_length):
yield c2str(self.cdata_parsed.patterns[i].arg.str)
def all_patterns(self) -> Iterator[Tuple[str, bool]]:
if self.cdata.basetype == lib.LY_TYPE_UNION:
for t in self.union_types():
yield from t.all_patterns()
else:
yield from self.patterns()
def module(self) -> Module:
# TODO: pointer to the parsed module wehere is the type defined is in self.cdata_parsed.pmod
# however there is no way how to get name of the module from lysp_module
if not self.cdata.der.module:
return None
return Module(self.context, self.cdata.der.module)
def extensions(self) -> Iterator[ExtensionCompiled]:
for i in range(self.cdata.ext_size):
yield ExtensionCompiled(self.context, self.cdata.ext[i])
if self.cdata.parent:
for i in range(self.cdata.parent.ext_size):
yield ExtensionCompiled(self.context, self.cdata.parent.ext[i])
def get_extension(
self, name: str, prefix: Optional[str] = None, arg_value: Optional[str] = None
) -> Optional[ExtensionCompiled]:
for ext in self.extensions():
if ext.name() != name:
continue
if prefix is not None and ext.module().name() != prefix:
continue
if arg_value is not None and ext.argument() != arg_value:
continue
return ext
return None
def __repr__(self):
cls = self.__class__
return "<%s.%s: %s>" % (cls.__module__, cls.__name__, str(self))
def __str__(self):
return self.name()
# -------------------------------------------------------------------------------------
class Feature:
__slots__ = ("context", "cdata")
def __init__(self, context: "libyang.Context", cdata):
self.context = context
self.cdata = cdata # C type: "struct lysp_feature *"
def name(self) -> str:
return c2str(self.cdata.name)
def description(self) -> Optional[str]:
return c2str(self.cdata.dsc)
def reference(self) -> Optional[str]:
return c2str(self.cdata.ref)
def state(self) -> bool:
return bool(self.cdata.flags & lib.LYS_FENABLED)
def deprecated(self) -> bool:
return bool(self.cdata.flags & lib.LYS_STATUS_DEPRC)
def obsolete(self) -> bool:
return bool(self.cdata.flags & lib.LYS_STATUS_OBSLT)
def if_features(self) -> Iterator["IfFeatureExpr"]:
arr_length = ffi.cast("uint64_t *", self.cdata.iffeatures)[-1]
for i in range(arr_length):
yield IfFeatureExpr(self.context, self.cdata.iffeatures[i])
def test_all_if_features(self) -> Iterator["IfFeatureExpr"]:
for cdata_lysc_iffeature in ly_array_iter(self.cdata.iffeatures_c):
for cdata_feature in ly_array_iter(cdata_lysc_iffeature.features):
yield Feature(self.context, cdata_feature)
def module(self) -> Module:
return Module(self.context, self.cdata.module)
def __str__(self):
return self.name()
# -------------------------------------------------------------------------------------
class IfFeatureExpr:
__slots__ = ("context", "cdata", "module_features", "compiled")
def __init__(self, context: "libyang.Context", cdata, module_features=None):
"""
if module_features is not None, it means we are using a parsed IfFeatureExpr
"""
self.context = context
# Can be "struct lysc_iffeature *" if comes from module feature
# Can be "struct lysp_qname *" if comes from lysp_node
self.cdata = cdata
self.module_features = module_features
self.compiled = not module_features
def _get_operator(self, position: int) -> int:
# the ->exp field is a 2bit array of operator values stored under a uint8_t C
# array.
mask = 0x3 # 2bits mask
shift = 2 * (position % 4)
item = self.cdata.expr[position // 4]
result = item & (mask << shift)
return result >> shift
def _get_operands_parsed(self):
qname = ffi.string(self.cdata.str).decode()
tokens = qname.split()
operators = []
features = []
operators_map = {
"or": lib.LYS_IFF_OR,
"and": lib.LYS_IFF_AND,
"not": lib.LYS_IFF_NOT,
"f": lib.LYS_IFF_F,
}
def get_feature(name):
for feature in self.module_features:
if feature.name() == name:
return feature.cdata
raise Exception("No feature %s in module" % name)
def parse_iffeature(tokens):
def oper2(op):
op_index = tokens.index(op)
operators.append(operators_map[op])
left, right = tokens[:op_index], tokens[op_index + 1 :]
parse_iffeature(left)
parse_iffeature(right)
def oper1(op):
op_index = tokens.index(op)
feature_name = tokens[op_index + 1]
operators.append(operators_map[op])
operators.append(operators_map["f"])
features.append(get_feature(feature_name))
oper_map = {"or": oper2, "and": oper2, "not": oper1}
for op, fun in oper_map.items():
with suppress(ValueError):
fun(op)
return
# Token is a feature
operators.append(operators_map["f"])
features.append(get_feature(tokens[0]))
parse_iffeature(tokens)
return operators, features
def _operands(self) -> Iterator[Union["IfFeature", type]]:
if self.compiled:
def get_operator(op_index):
return self._get_operator(op_index)
def get_feature(ft_index):
return self.cdata.features[ft_index]
else:
operators, features = self._get_operands_parsed()
def get_operator(op_index):
return operators[op_index]
def get_feature(ft_index):
return features[ft_index]
op_index = 0
ft_index = 0
expected = 1
while expected > 0:
operator = get_operator(op_index)
op_index += 1
if operator == lib.LYS_IFF_F:
yield IfFeature(self.context, get_feature(ft_index))
ft_index += 1
expected -= 1
elif operator == lib.LYS_IFF_NOT:
yield IfNotFeature
elif operator == lib.LYS_IFF_AND:
yield IfAndFeatures
expected += 1
elif operator == lib.LYS_IFF_OR:
yield IfOrFeatures
expected += 1
def tree(self) -> "IfFeatureExprTree":
def _tree(operands):
op = next(operands)
if op is IfNotFeature:
return op(self.context, _tree(operands))
if op in (IfAndFeatures, IfOrFeatures):
return op(self.context, _tree(operands), _tree(operands))
return op
return _tree(self._operands())
def dump(self) -> str:
return self.tree().dump()
def __str__(self):
return str(self.tree()).strip("()")
# -------------------------------------------------------------------------------------
class IfFeatureExprTree:
def dump(self, indent: int = 0) -> str:
raise NotImplementedError()
def __str__(self):
raise NotImplementedError()
# -------------------------------------------------------------------------------------
class IfFeature(IfFeatureExprTree):
__slots__ = ("context", "cdata")
def __init__(self, context: "libyang.Context", cdata):
self.context = context
self.cdata = cdata # C type: "struct lys_feature *"
def feature(self) -> Feature:
return Feature(self.context, self.cdata)
def state(self) -> bool:
return self.feature().state()
def dump(self, indent: int = 0) -> str:
feat = self.feature()
return "%s%s [%s]\n" % (" " * indent, feat.name(), feat.description())
def __str__(self):
return self.feature().name()
# -------------------------------------------------------------------------------------
class IfNotFeature(IfFeatureExprTree):
__slots__ = ("context", "child")
def __init__(self, context: "libyang.Context", child: IfFeatureExprTree):
self.context = context
self.child = child
def state(self) -> bool:
return not self.child.state()
def dump(self, indent: int = 0) -> str:
return " " * indent + "NOT\n" + self.child.dump(indent + 1)
def __str__(self):
return "NOT %s" % self.child
# -------------------------------------------------------------------------------------
class IfAndFeatures(IfFeatureExprTree):
__slots__ = ("context", "a", "b")
def __init__(
self, context: "libyang.Context", a: IfFeatureExprTree, b: IfFeatureExprTree
):
self.context = context
self.a = a
self.b = b
def state(self) -> bool:
return self.a.state() and self.b.state()
def dump(self, indent: int = 0) -> str:
s = " " * indent + "AND\n"
s += self.a.dump(indent + 1)
s += self.b.dump(indent + 1)
return s
def __str__(self):
return "%s AND %s" % (self.a, self.b)
# -------------------------------------------------------------------------------------
class IfOrFeatures(IfFeatureExprTree):
__slots__ = ("context", "a", "b")
def __init__(
self, context: "libyang.Context", a: IfFeatureExprTree, b: IfFeatureExprTree
):
self.context = context
self.a = a
self.b = b
def state(self) -> bool:
return self.a.state() or self.b.state()
def dump(self, indent: int = 0) -> str:
s = " " * indent + "OR\n"
s += self.a.dump(indent + 1)
s += self.b.dump(indent + 1)
return s
def __str__(self):
return "(%s OR %s)" % (self.a, self.b)
# -------------------------------------------------------------------------------------
class SNode:
__slots__ = ("context", "cdata", "cdata_parsed")
CONTAINER = lib.LYS_CONTAINER
LEAF = lib.LYS_LEAF
LEAFLIST = lib.LYS_LEAFLIST
LIST = lib.LYS_LIST
RPC = lib.LYS_RPC
ACTION = lib.LYS_ACTION
INPUT = lib.LYS_INPUT
OUTPUT = lib.LYS_OUTPUT
NOTIF = lib.LYS_NOTIF
ANYXML = lib.LYS_ANYXML
ANYDATA = lib.LYS_ANYDATA
KEYWORDS = {
CONTAINER: "container",
LEAF: "leaf",
LEAFLIST: "leaf-list",
LIST: "list",
RPC: "rpc",
ACTION: "action",
INPUT: "input",
OUTPUT: "output",
NOTIF: "notification",
ANYXML: "anyxml",
ANYDATA: "anydata",
}
def __init__(self, context: "libyang.Context", cdata):
self.context = context
self.cdata = cdata # C type: "struct lysc_node *"
self.cdata_parsed = ffi.cast("struct lysp_node *", self.cdata.priv)
def nodetype(self) -> int:
return self.cdata.nodetype
def keyword(self) -> str:
return self.KEYWORDS.get(self.cdata.nodetype, "???")
def name(self) -> str:
return c2str(self.cdata.name)
def fullname(self) -> str:
return "%s:%s" % (self.module().name(), self.name())
def description(self) -> Optional[str]:
return c2str(self.cdata.dsc)
def config_set(self) -> bool:
return bool(self.cdata.flags & lib.LYS_SET_CONFIG)
def config_false(self) -> bool:
return bool(self.cdata.flags & lib.LYS_CONFIG_R)
def mandatory(self) -> bool:
return bool(self.cdata.flags & lib.LYS_MAND_TRUE)
def deprecated(self) -> bool:
return bool(self.cdata.flags & lib.LYS_STATUS_DEPRC)
def obsolete(self) -> bool:
return bool(self.cdata.flags & lib.LYS_STATUS_OBSLT)
def status(self) -> str:
if self.cdata.flags & lib.LYS_STATUS_OBSLT:
return "obsolete"
if self.cdata.flags & lib.LYS_STATUS_DEPRC:
return "deprecated"
return "current"
def module(self) -> Module:
return Module(self.context, self.cdata.module)
def schema_path(self) -> str:
try:
s = lib.lysc_path(self.cdata, lib.LYSC_PATH_LOG, ffi.NULL, 0)
return c2str(s)
finally:
lib.free(s)
def data_path(self, key_placeholder: str = "'%s'") -> str:
try:
s = lib.lysc_path(self.cdata, lib.LYSC_PATH_DATA_PATTERN, ffi.NULL, 0)
val = c2str(s)
if key_placeholder != "'%s'":
val = val.replace("'%s'", key_placeholder)
return val
finally:
lib.free(s)
def extensions(self) -> Iterator[ExtensionCompiled]:
ext = ffi.cast("struct lysc_ext_instance *", self.cdata.exts)
if ext == ffi.NULL:
return
for extension in ly_array_iter(ext):
yield ExtensionCompiled(self.context, extension)
def must_conditions(self) -> Iterator[str]:
return iter(())
def get_extension(
self, name: str, prefix: Optional[str] = None, arg_value: Optional[str] = None
) -> Optional[ExtensionCompiled]:
for ext in self.extensions():
if ext.name() != name:
continue
if prefix is not None and ext.module().name() != prefix:
continue
if arg_value is not None and ext.argument() != arg_value:
continue
return ext
return None
def if_features(self) -> Iterator[IfFeatureExpr]:
iff = ffi.cast("struct lysp_qname *", self.cdata_parsed.iffeatures)
module_features = self.module().features()
for if_feature in ly_array_iter(iff):
yield IfFeatureExpr(self.context, if_feature, list(module_features))
def parent(self) -> Optional["SNode"]:
parent_p = self.cdata.parent
while parent_p and parent_p.nodetype not in SNode.NODETYPE_CLASS:
parent_p = parent_p.parent
if parent_p:
return SNode.new(self.context, parent_p)
return None
def when_conditions(self):
wh = ffi.new("struct lysc_when **")
wh = lib.lysc_node_when(self.cdata)
if wh == ffi.NULL:
return
for cond in ly_array_iter(wh):
yield c2str(lib.lyxp_get_expr(cond.cond))
def __repr__(self):
cls = self.__class__
return "<%s.%s: %s>" % (cls.__module__, cls.__name__, str(self))
def __str__(self):
return self.name()
NODETYPE_CLASS = {}
@staticmethod
def register(nodetype):
def _decorator(nodeclass):
SNode.NODETYPE_CLASS[nodetype] = nodeclass
return nodeclass
return _decorator
@staticmethod
def new(context: "libyang.Context", cdata) -> "SNode":
cdata = ffi.cast("struct lysc_node *", cdata)
nodecls = SNode.NODETYPE_CLASS.get(cdata.nodetype, None)
if nodecls is None:
raise TypeError("node type %s not implemented" % cdata.nodetype)
return nodecls(context, cdata)
# -------------------------------------------------------------------------------------
@SNode.register(SNode.LEAF)
class SLeaf(SNode):
__slots__ = ("cdata_leaf", "cdata_leaf_parsed")
def __init__(self, context: "libyang.Context", cdata):
super().__init__(context, cdata)
self.cdata_leaf = ffi.cast("struct lysc_node_leaf *", cdata)
self.cdata_leaf_parsed = ffi.cast("struct lysp_node_leaf *", self.cdata_parsed)
def default(self) -> Optional[str]:
if not self.cdata_leaf.dflt:
return None
val = lib.lyd_value_get_canonical(self.context.cdata, self.cdata_leaf.dflt)
if not val:
return None
val = c2str(val)
val_type = self.cdata_leaf.dflt.realtype
if val_type == Type.BOOL:
return val == "true"
if val_type in Type.NUM_TYPES:
return int(val)
return val
def units(self) -> Optional[str]:
return c2str(self.cdata_leaf.units)
def type(self) -> Type:
return Type(self.context, self.cdata_leaf.type, self.cdata_leaf_parsed.type)
def is_key(self) -> bool:
if self.cdata_leaf.flags & lib.LYS_KEY:
return True
return False
def must_conditions(self) -> Iterator[str]:
pdata = self.cdata_leaf_parsed
if pdata.musts == ffi.NULL:
return
for must in ly_array_iter(pdata.musts):
yield c2str(must.arg.str)
def __str__(self):
return "%s %s" % (self.name(), self.type().name())
# -------------------------------------------------------------------------------------
@SNode.register(SNode.LEAFLIST)
class SLeafList(SNode):
__slots__ = ("cdata_leaflist", "cdata_leaflist_parsed")
def __init__(self, context: "libyang.Context", cdata):
super().__init__(context, cdata)
self.cdata_leaflist = ffi.cast("struct lysc_node_leaflist *", cdata)
self.cdata_leaflist_parsed = ffi.cast(
"struct lysp_node_leaflist *", self.cdata_parsed
)
def ordered(self) -> bool:
return bool(self.cdata.flags & lib.LYS_ORDBY_USER)
def units(self) -> Optional[str]:
return c2str(self.cdata_leaflist.units)
def type(self) -> Type:
return Type(
self.context, self.cdata_leaflist.type, self.cdata_leaflist_parsed.type
)
def defaults(self) -> Iterator[str]:
if self.cdata_leaflist.dflts == ffi.NULL:
return
arr_length = ffi.cast("uint64_t *", self.cdata_leaflist.dflts)[-1]
for i in range(arr_length):
val = lib.lyd_value_get_canonical(
self.context.cdata, self.cdata_leaflist.dflts[i]
)
if not val:
yield None
ret = c2str(val)
val_type = self.cdata_leaflist.dflts[i].realtype
if val_type == Type.BOOL:
ret = val == "true"
elif val_type in Type.NUM_TYPES:
ret = int(val)
yield ret
def must_conditions(self) -> Iterator[str]:
pdata = self.cdata_leaflist_parsed
if pdata.musts == ffi.NULL:
return
for must in ly_array_iter(pdata.musts):
yield c2str(must.arg.str)
def __str__(self):
return "%s %s" % (self.name(), self.type().name())
# -------------------------------------------------------------------------------------
@SNode.register(SNode.CONTAINER)
class SContainer(SNode):
__slots__ = ("cdata_container", "cdata_container_parsed")
def __init__(self, context: "libyang.Context", cdata):
super().__init__(context, cdata)
self.cdata_container = ffi.cast("struct lysc_node_container *", cdata)
self.cdata_container_parsed = ffi.cast(
"struct lysp_node_container *", self.cdata_parsed
)
def presence(self) -> Optional[str]:
if not self.cdata_container.flags & lib.LYS_PRESENCE:
return None
return c2str(self.cdata_container_parsed.presence)
def must_conditions(self) -> Iterator[str]:
pdata = self.cdata_container_parsed
if pdata.musts == ffi.NULL:
return
for must in ly_array_iter(pdata.musts):
yield c2str(must.arg.str)
def __iter__(self) -> Iterator[SNode]:
return self.children()
def children(self, types: Optional[Tuple[int, ...]] = None) -> Iterator[SNode]:
return iter_children(self.context, self.cdata, types=types)
# -------------------------------------------------------------------------------------
@SNode.register(SNode.LIST)
class SList(SNode):
__slots__ = ("cdata_list", "cdata_list_parsed")
def __init__(self, context: "libyang.Context", cdata):
super().__init__(context, cdata)
self.cdata_list = ffi.cast("struct lysc_node_list *", cdata)
self.cdata_list_parsed = ffi.cast("struct lysp_node_list *", self.cdata_parsed)
def ordered(self) -> bool:
return bool(self.cdata.flags & lib.LYS_ORDBY_USER)
def __iter__(self) -> Iterator[SNode]:
return self.children()
def children(
self, skip_keys: bool = False, types: Optional[Tuple[int, ...]] = None
) -> Iterator[SNode]:
return iter_children(self.context, self.cdata, skip_keys=skip_keys, types=types)
def keys(self) -> Iterator[SNode]:
node = lib.lysc_node_child(self.cdata)
while node:
if node.flags & lib.LYS_KEY:
yield SLeaf(self.context, node)
node = node.next
def must_conditions(self) -> Iterator[str]:
pdata = self.cdata_list_parsed
if pdata.musts == ffi.NULL:
return
for must in ly_array_iter(pdata.musts):
yield c2str(must.arg.str)
def __str__(self):
return "%s [%s]" % (self.name(), ", ".join(k.name() for k in self.keys()))
# -------------------------------------------------------------------------------------
@SNode.register(SNode.INPUT)
@SNode.register(SNode.OUTPUT)
class SRpcInOut(SNode):
def __iter__(self) -> Iterator[SNode]:
return self.children()
def children(self, types: Optional[Tuple[int, ...]] = None) -> Iterator[SNode]:
return iter_children(self.context, self.cdata, types=types)
# -------------------------------------------------------------------------------------
@SNode.register(SNode.RPC)
@SNode.register(SNode.ACTION)
class SRpc(SNode):
def input(self) -> Optional[SRpcInOut]:
node = lib.lysc_node_child(self.cdata)
while True:
if not node:
break
if node.nodetype == self.INPUT:
return SNode.new(self.context, node)
node = node.next
return None
def output(self) -> Optional[SRpcInOut]:
node = lib.lysc_node_child(self.cdata)
while True:
if not node:
break
if node.nodetype == self.OUTPUT:
return SNode.new(self.context, node)
node = node.next
return None
def __iter__(self) -> Iterator[SNode]:
return self.children()
def children(self, types: Optional[Tuple[int, ...]] = None) -> Iterator[SNode]:
yield from iter_children(self.context, self.cdata, types=types)
# With libyang2, you can get only input or output
# To keep behavior, we iter 2 times witt output options
yield from iter_children(
self.context, self.cdata, types=types, options=lib.LYS_GETNEXT_OUTPUT
)
# -------------------------------------------------------------------------------------
@SNode.register(SNode.NOTIF)
class SNotif(SNode):
def __iter__(self) -> Iterator[SNode]:
return self.children()
def children(self, types: Optional[Tuple[int, ...]] = None) -> Iterator[SNode]:
return iter_children(self.context, self.cdata, types=types)
# -------------------------------------------------------------------------------------
@SNode.register(SNode.ANYXML)
class SAnyxml(SNode):
pass
# -------------------------------------------------------------------------------------
@SNode.register(SNode.ANYDATA)
class SAnydata(SNode):
pass
# -------------------------------------------------------------------------------------
def iter_children(
context: "libyang.Context",
parent, # C type: Union["struct lys_module *", "struct lys_node *"]
skip_keys: bool = False,
types: Optional[Tuple[int, ...]] = None,
options: int = 0,
) -> Iterator[SNode]:
if types is None:
types = (
lib.LYS_ACTION,
lib.LYS_CONTAINER,
lib.LYS_LIST,
lib.LYS_RPC,
lib.LYS_LEAF,
lib.LYS_LEAFLIST,
lib.LYS_NOTIF,
)
def _skip(node) -> bool:
if node.nodetype not in types:
return True
if not skip_keys:
return False
if node.nodetype != lib.LYS_LEAF:
return False
leaf = ffi.cast("struct lysc_node_leaf *", node)
if leaf.flags & lib.LYS_KEY:
return True
return False
if ffi.typeof(parent) == ffi.typeof("struct lys_module *"):
module = parent.compiled
parent = ffi.NULL
else:
module = ffi.NULL
child = lib.lys_getnext(ffi.NULL, parent, module, options)
while child:
if not _skip(child):
yield SNode.new(context, child)
child = lib.lys_getnext(child, parent, module, options)
# -------------------------------------------------------------------------------------
# compat
Container = SContainer
Leaf = SLeaf
LeafList = SLeafList
List = SList
Node = SNode
Rpc = SRpc
RpcInOut = SRpcInOut
Anyxml = SAnyxml
| 32.009738 | 100 | 0.556034 | 37,174 | 0.869913 | 9,613 | 0.224955 | 7,922 | 0.185384 | 0 | 0 | 7,165 | 0.167669 |
942e6e33a83242bb266836b4cf6d35a4a245ea62 | 568 | py | Python | 2020/day-05/day-05b.py | BenjaminEHowe/advent-of-code | 7d869bbadc60b1c9b03a54edf698b89cc04eab9b | [
"MIT"
] | null | null | null | 2020/day-05/day-05b.py | BenjaminEHowe/advent-of-code | 7d869bbadc60b1c9b03a54edf698b89cc04eab9b | [
"MIT"
] | null | null | null | 2020/day-05/day-05b.py | BenjaminEHowe/advent-of-code | 7d869bbadc60b1c9b03a54edf698b89cc04eab9b | [
"MIT"
] | null | null | null | seats = []
with open("input.txt") as f:
for line in f:
line = line.replace("\n", "")
seat = {}
seat["raw"] = line
seat["row"] = int(seat["raw"][:7].replace("F", "0").replace("B", "1"), 2)
seat["column"] = int(seat["raw"][-3:].replace("L", "0").replace("R", "1"), 2)
seat["id"] = seat["row"] * 8 + seat["column"]
seats.append(seat)
seats = sorted(seats, key=lambda k: k["id"], reverse=True)
for i, seat in enumerate(seats):
if seat["id"]-1 != seats[i+1]["id"]:
print(seat["id"]-1)
break
| 31.555556 | 85 | 0.492958 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 102 | 0.179577 |
942f5cd48c58389e331452b5be336e85b4cdd20b | 415 | py | Python | crmsystem/__init__.py | iomegak12/pythondockertry | dd91dc57a09141f94cb0a73e18a8ad9da4d5aa85 | [
"MIT"
] | null | null | null | crmsystem/__init__.py | iomegak12/pythondockertry | dd91dc57a09141f94cb0a73e18a8ad9da4d5aa85 | [
"MIT"
] | null | null | null | crmsystem/__init__.py | iomegak12/pythondockertry | dd91dc57a09141f94cb0a73e18a8ad9da4d5aa85 | [
"MIT"
] | null | null | null | from .config import GlobalConfiguration
from .controllers import DataController
from .utilities import ErrorProvider, CustomerEncoder, OrderEncoder, PrettyTableGenerator
from .services import CustomerService, OrderService
from .models import Customer, Order, CRMSystemError
from .decorators import Logger
from .routing import CustomerRouteHandler
from .hosting import app as CRMSystemHost
from .app import flaskApp
| 41.5 | 89 | 0.857831 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
942f69fda255bb1418c7d2e2276adda04e3b0630 | 25,683 | py | Python | aliquotmaf/subcommands/vcf_to_aliquot/runners/gdc_1_0_0_aliquot.py | NCI-GDC/aliquot-maf-tools | 6aec9490ab7194ec605bf02c4c8e7c1cfca53973 | [
"Apache-2.0"
] | 1 | 2020-09-18T17:52:37.000Z | 2020-09-18T17:52:37.000Z | aliquotmaf/subcommands/vcf_to_aliquot/runners/gdc_1_0_0_aliquot.py | NCI-GDC/aliquot-maf-tools | 6aec9490ab7194ec605bf02c4c8e7c1cfca53973 | [
"Apache-2.0"
] | null | null | null | aliquotmaf/subcommands/vcf_to_aliquot/runners/gdc_1_0_0_aliquot.py | NCI-GDC/aliquot-maf-tools | 6aec9490ab7194ec605bf02c4c8e7c1cfca53973 | [
"Apache-2.0"
] | 1 | 2020-08-14T08:49:39.000Z | 2020-08-14T08:49:39.000Z | """Main vcf2maf logic for spec gdc-1.0.0-aliquot"""
import urllib.parse
from operator import itemgetter
import pysam
from maflib.header import MafHeader, MafHeaderRecord
from maflib.sort_order import BarcodesAndCoordinate
from maflib.sorter import MafSorter
from maflib.validation import ValidationStringency
from maflib.writer import MafWriter
import aliquotmaf.annotators as Annotators
import aliquotmaf.filters as Filters
import aliquotmaf.subcommands.vcf_to_aliquot.extractors as Extractors
from aliquotmaf.converters.builder import get_builder
from aliquotmaf.converters.collection import InputCollection
from aliquotmaf.converters.formatters import (
format_all_effects,
format_alleles,
format_depths,
format_vcf_columns,
)
from aliquotmaf.converters.utils import get_columns_from_header, init_empty_maf_record
from aliquotmaf.subcommands.utils import (
assert_sample_in_header,
extract_annotation_from_header,
load_enst,
load_json,
)
from aliquotmaf.subcommands.vcf_to_aliquot.runners import BaseRunner
class GDC_1_0_0_Aliquot(BaseRunner):
def __init__(self, options=dict()):
super(GDC_1_0_0_Aliquot, self).__init__(options)
# Load the resource files
self.logger.info("Loading priority files")
self.biotype_priority = load_json(self.options["biotype_priority_file"])
self.effect_priority = load_json(self.options["effect_priority_file"])
self.custom_enst = (
load_enst(self.options["custom_enst"])
if self.options["custom_enst"]
else None
)
# Schema
self.options["version"] = "gdc-1.0.0"
self.options["annotation"] = "gdc-1.0.0-aliquot"
# Annotators
self.annotators = {
"dbsnp_priority_db": None,
"reference_context": None,
"cosmic_id": None,
"mutation_status": None,
"non_tcga_exac": None,
"hotspots": None,
}
# Filters
self.filters = {
"common_in_exac": None,
"gdc_blacklist": None,
"normal_depth": None,
"gdc_pon": None,
"multiallelic": None,
"nonexonic": None,
"offtarget": None,
}
@classmethod
def __validate_options__(cls, options):
"""Validates the tumor only stuff"""
if options.tumor_only:
options.normal_vcf_id = None
else:
if options.normal_aliquot_uuid is None:
raise ValueError("--normal_aliquot_uuid is required")
if options.normal_submitter_id is None:
raise ValueError("--normal_submitter_id is required")
if options.normal_bam_uuid is None:
raise ValueError("--normal_bam_uuid is required")
@classmethod
def __add_arguments__(cls, parser):
"""Add the arguments to the parser"""
vcf = parser.add_argument_group(title="VCF options")
vcf.add_argument(
"--tumor_only", action="store_true", help="Is this a tumor-only VCF?"
)
vcf.add_argument(
"-t",
"--tumor_vcf_id",
default="TUMOR",
help="Name of the tumor sample in the VCF",
)
vcf.add_argument(
"-n",
"--normal_vcf_id",
default="NORMAL",
help="Name of the normal sample in the VCF",
)
vcf.add_argument(
"--caller_id",
required=True,
help="Name of the caller used to detect mutations",
)
vcf.add_argument(
"--src_vcf_uuid", required=True, help="The UUID of the src VCF file"
)
sample = parser.add_argument_group(title="Sample Metadata")
sample.add_argument("--case_uuid", required=True, help="Sample case UUID")
sample.add_argument(
"--tumor_submitter_id",
required=True,
help="Tumor sample aliquot submitter ID",
)
sample.add_argument(
"--tumor_aliquot_uuid", required=True, help="Tumor sample aliquot UUID"
)
sample.add_argument(
"--tumor_bam_uuid", required=True, help="Tumor sample bam UUID"
)
sample.add_argument(
"--normal_submitter_id", help="Normal sample aliquot submitter ID"
)
sample.add_argument("--normal_aliquot_uuid", help="Normal sample aliquot UUID")
sample.add_argument("--normal_bam_uuid", help="Normal sample bam UUID")
sample.add_argument("--sequencer", action="append", help="The sequencer used")
sample.add_argument(
"--maf_center", action="append", required=True, help="The sequencing center"
)
anno = parser.add_argument_group(title="Annotation Resources")
anno.add_argument(
"--biotype_priority_file", required=True, help="Biotype priority JSON"
)
anno.add_argument(
"--effect_priority_file", required=True, help="Effect priority JSON"
)
anno.add_argument(
"--custom_enst", default=None, help="Optional custom ENST overrides"
)
anno.add_argument(
"--dbsnp_priority_db", default=None, help="DBSNP priority sqlite database"
)
anno.add_argument(
"--reference_fasta", required=True, help="Reference fasta file"
)
anno.add_argument(
"--reference_fasta_index", required=True, help="Reference fasta fai file"
)
anno.add_argument(
"--reference_context_size",
type=int,
default=5,
help="Number of BP to add both upstream and "
+ "downstream from variant for reference context",
)
anno.add_argument(
"--cosmic_vcf", default=None, help="Optional COSMIC VCF for annotating"
)
anno.add_argument(
"--non_tcga_exac_vcf",
default=None,
help="Optional non-TCGA ExAC VCF for annotating and filtering",
)
anno.add_argument("--hotspot_tsv", default=None, help="Optional hotspot TSV")
filt = parser.add_argument_group(title="Filtering Options")
filt.add_argument(
"--exac_freq_cutoff",
default=0.001,
type=float,
help="Flag variants where the allele frequency in any ExAC population "
+ "is great than this value as common_in_exac [0.001]",
)
filt.add_argument(
"--gdc_blacklist",
type=str,
default=None,
help="The file containing the blacklist tags and tumor aliquot uuids to "
+ "apply them to.",
)
filt.add_argument(
"--min_n_depth",
default=7,
type=int,
help="Flag variants where normal depth is <= INT as ndp [7].",
)
filt.add_argument(
"--gdc_pon_vcf",
type=str,
default=None,
help="The tabix-indexed panel of normals VCF for applying the gdc "
+ "pon filter",
)
filt.add_argument(
"--nonexonic_intervals",
type=str,
default=None,
help="Flag variants outside of this tabix-indexed bed file "
+ "as NonExonic",
)
filt.add_argument(
"--target_intervals",
action="append",
help="Flag variants outside of these tabix-indexed bed files "
+ "as off_target. Use one or more times.",
)
def setup_maf_header(self):
"""
Sets up the maf header.
"""
self.maf_header = MafHeader.from_defaults(
version=self.options["version"],
annotation=self.options["annotation"],
sort_order=BarcodesAndCoordinate(),
fasta_index=self.options["reference_fasta_index"],
)
header_date = BaseRunner.get_header_date()
self.maf_header[header_date.key] = header_date
if not self.options["tumor_only"]:
normal_aliquot = MafHeaderRecord(
key="normal.aliquot",
value=self.options["normal_aliquot_uuid"]
if not self.options["tumor_only"]
else "",
)
self.maf_header[normal_aliquot.key] = normal_aliquot
tumor_aliquot = MafHeaderRecord(
key="tumor.aliquot", value=self.options["tumor_aliquot_uuid"]
)
self.maf_header[tumor_aliquot.key] = tumor_aliquot
def do_work(self):
"""Main wrapper function for running vcf2maf"""
self.logger.info(
"Processing input vcf {0}...".format(self.options["input_vcf"])
)
# Initialize the maf file
self.setup_maf_header()
sorter = MafSorter(
max_objects_in_ram=100000,
sort_order_name=BarcodesAndCoordinate.name(),
scheme=self.maf_header.scheme(),
fasta_index=self.options["reference_fasta_index"],
)
self._scheme = self.maf_header.scheme()
self._columns = get_columns_from_header(self.maf_header)
self._colset = set(self._columns)
# Initialize vcf reader
vcf_object = pysam.VariantFile(self.options["input_vcf"])
tumor_sample_id = self.options["tumor_vcf_id"]
normal_sample_id = self.options["normal_vcf_id"]
is_tumor_only = self.options["tumor_only"]
try:
# Validate samples
tumor_idx = assert_sample_in_header(
vcf_object, self.options["tumor_vcf_id"]
)
normal_idx = assert_sample_in_header(
vcf_object, self.options["normal_vcf_id"], can_fail=is_tumor_only
)
# extract annotation from header
ann_cols_format, vep_key = extract_annotation_from_header(
vcf_object, vep_key="CSQ"
)
# Initialize annotators
self.setup_annotators()
# Initialize filters
self.setup_filters()
# Convert
line = 0
for vcf_record in vcf_object.fetch():
line += 1
if line % 1000 == 0:
self.logger.info("Processed {0} records...".format(line))
# Extract data
data = self.extract(
tumor_sample_id,
normal_sample_id,
tumor_idx,
normal_idx,
ann_cols_format,
vep_key,
vcf_record,
is_tumor_only,
)
# Skip rare occasions where VEP doesn't provide IMPACT or the consequence is ?
if (
not data["selected_effect"]["IMPACT"]
or data["selected_effect"]["One_Consequence"] == "?"
):
self.logger.warn(
"Skipping record with unknown impact or consequence: {0} - {1}".format(
data["selected_effect"]["IMPACT"],
data["selected_effect"]["One_Consequence"],
)
)
continue
# Transform
maf_record = self.transform(
vcf_record, data, is_tumor_only, line_number=line
)
# Add to sorter
sorter += maf_record
# Write
self.logger.info("Writing {0} sorted records...".format(line))
self.maf_writer = MafWriter.from_path(
path=self.options["output_maf"],
header=self.maf_header,
validation_stringency=ValidationStringency.Strict,
)
counter = 0
for record in sorter:
counter += 1
if counter % 1000 == 0:
self.logger.info("Wrote {0} records...".format(counter))
self.maf_writer += record
self.logger.info("Finished writing {0} records".format(counter))
finally:
vcf_object.close()
sorter.close()
if self.maf_writer:
self.maf_writer.close()
for anno in self.annotators:
if self.annotators[anno]:
self.annotators[anno].shutdown()
self.logger.info("Finished")
def extract(
self,
tumor_sample_id,
normal_sample_id,
tumor_idx,
normal_idx,
ann_cols,
vep_key,
record,
is_tumor_only,
):
"""
Extract the VCF information needed to transform into MAF.
"""
dic = {
"var_allele_idx": None,
"tumor_gt": None,
"tumor_depths": None,
"normal_gt": None,
"normal_depths": None,
"location_data": None,
"effects": None,
"selected_effect": None,
"variant_class": None,
}
# Genotypes
var_allele_idx = Extractors.VariantAlleleIndexExtractor.extract(
tumor_genotype=record.samples[tumor_sample_id]
)
tumor_gt, tumor_depths = Extractors.GenotypeAndDepthsExtractor.extract(
var_allele_idx=var_allele_idx,
genotype=record.samples[tumor_sample_id],
alleles=record.alleles,
)
if not is_tumor_only:
normal_gt, normal_depths = Extractors.GenotypeAndDepthsExtractor.extract(
var_allele_idx=var_allele_idx,
genotype=record.samples[normal_sample_id],
alleles=record.alleles,
)
else:
normal_gt, normal_depths = None, None
# Locations
location_data = Extractors.LocationDataExtractor.extract(
ref_allele=record.ref,
var_allele=record.alleles[var_allele_idx],
position=record.pos,
alleles=record.alleles,
)
# Handle effects
effects = Extractors.EffectsExtractor.extract(
effect_priority=self.effect_priority,
biotype_priority=self.biotype_priority,
effect_keys=ann_cols,
effect_list=[
urllib.parse.unquote(i).split("|") for i in record.info[vep_key]
],
var_idx=var_allele_idx,
)
effects, selected_effect = Extractors.SelectOneEffectExtractor.extract(
all_effects=effects,
effect_priority=self.effect_priority,
biotype_priority=self.biotype_priority,
custom_enst=self.custom_enst,
)
selected_effect = Extractors.PopulationFrequencyExtractor.extract(
effect=selected_effect, var_allele=location_data["var_allele"]
)
# Handle variant class
variant_class = Extractors.VariantClassExtractor.extract(
cons=selected_effect["One_Consequence"],
var_type=location_data["var_type"],
inframe=location_data["inframe"],
)
# Make return dictionary
dic["var_allele_idx"] = var_allele_idx
dic["tumor_gt"] = tumor_gt
dic["tumor_depths"] = tumor_depths
dic["normal_gt"] = normal_gt
dic["normal_depths"] = normal_depths
dic["location_data"] = location_data
dic["effects"] = format_all_effects(effects)
dic["selected_effect"] = selected_effect
dic["variant_class"] = variant_class
dic["vcf_columns"] = format_vcf_columns(
vcf_record=record,
vep_key=vep_key,
tumor_idx=tumor_idx,
normal_idx=normal_idx,
)
return dic
def transform(self, vcf_record, data, is_tumor_only, line_number=None):
"""
Transform into maf record.
"""
# Generic data
collection = InputCollection()
keys = itemgetter("selected_effect", itemgetter("Hugo_Symbol"))
collection.add(
column="Hugo_Symbol",
value=data["selected_effect"].get("Hugo_Symbol"),
default="Unknown",
)
collection.add(
column="Entrez_Gene_Id", value=data["selected_effect"]["Entrez_Gene_Id"]
)
collection.add(column="Center", value=self.options["maf_center"])
collection.add(column="NCBI_Build", value="GRCh38")
collection.add(column="Chromosome", value=vcf_record.chrom)
collection.add(column="Start_Position", value=data["location_data"]["start"])
collection.add(column="End_Position", value=data["location_data"]["stop"])
collection.add(column="Strand", value="+")
collection.add(column="Variant_Classification", value=data["variant_class"])
collection.add(column="Variant_Type", value=data["location_data"]["var_type"])
collection.add(
column="Reference_Allele", value=data["location_data"]["ref_allele"]
)
for k, v in zip(
["Tumor_Seq_Allele1", "Tumor_Seq_Allele2"],
format_alleles(
genotype=data["tumor_gt"],
alleles=data["location_data"]["alleles"],
defaults=[
data["location_data"]["ref_allele"],
data["location_data"]["var_allele"],
],
),
):
collection.add(column=k, value=v)
if not is_tumor_only:
for k, v in zip(
["Match_Norm_Seq_Allele1", "Match_Norm_Seq_Allele2"],
format_alleles(
genotype=data["normal_gt"],
alleles=data["location_data"]["alleles"],
defaults=[
data["location_data"]["ref_allele"],
data["location_data"]["ref_allele"],
],
),
):
collection.add(column=k, value=v)
else:
for k in ["Match_Norm_Seq_Allele1", "Match_Norm_Seq_Allele2"]:
collection.add(column=k, value="")
collection.add(column="dbSNP_RS", value=data["selected_effect"]["dbSNP_RS"])
collection.add(
column="Tumor_Sample_Barcode", value=self.options["tumor_submitter_id"]
)
collection.add(
column="Matched_Norm_Sample_Barcode",
value=self.options["normal_submitter_id"],
default="",
)
collection.add(column="Sequencer", value=self.options["sequencer"], default="")
collection.add(
column="Tumor_Sample_UUID", value=self.options["tumor_aliquot_uuid"]
)
collection.add(
column="Matched_Norm_Sample_UUID",
value=self.options["normal_aliquot_uuid"],
default="",
)
collection.add(column="all_effects", value=";".join(data["effects"]))
for k, v in zip(
["t_depth", "t_ref_count", "t_alt_count"],
format_depths(
genotype=data["tumor_gt"],
depths=data["tumor_depths"],
var_allele_idx=data["var_allele_idx"],
default_total_dp=0,
),
):
collection.add(column=k, value=v)
if not is_tumor_only:
for k, v in zip(
["n_depth", "n_ref_count", "n_alt_count"],
format_depths(
genotype=data["normal_gt"],
depths=data["normal_depths"],
var_allele_idx=data["var_allele_idx"],
),
):
collection.add(column=k, value=v)
else:
for k in ["n_depth", "n_ref_count", "n_alt_count"]:
collection.add(column=k, value=None)
for k in data["selected_effect"]:
if k in self._colset and k not in collection._colset:
collection.add(column=k, value=data["selected_effect"][k])
# Set other uuids
collection.add(column="src_vcf_id", value=self.options["src_vcf_uuid"])
collection.add(column="tumor_bam_uuid", value=self.options["tumor_bam_uuid"])
collection.add(column="normal_bam_uuid", value=self.options["normal_bam_uuid"])
collection.add(column="case_id", value=self.options["case_uuid"])
# VCF columns
collection.add(column="FILTER", value=";".join(sorted(list(vcf_record.filter))))
collection.add(column="vcf_region", value=data["vcf_columns"]["vcf_region"])
collection.add(column="vcf_info", value=data["vcf_columns"]["vcf_info"])
collection.add(column="vcf_format", value=data["vcf_columns"]["vcf_format"])
collection.add(column="vcf_tumor_gt", value=data["vcf_columns"]["vcf_tumor_gt"])
collection.add(
column="vcf_normal_gt", value=data["vcf_columns"].get("vcf_normal_gt")
)
# Set the other columns to none
collection.add(column="Score", value="")
collection.add(column="BAM_File", value="")
collection.add(column="Sequencing_Phase", value="")
anno_set = ("dbSNP_Val_Status", "COSMIC", "CONTEXT", "Mutation_Status")
for i in self._colset - set(collection.columns()):
if i not in anno_set:
collection.add(column=i, value=None)
collection.transform(self._scheme)
# Generate maf record
maf_record = init_empty_maf_record(line_number=line_number)
for i in collection:
maf_record += i.transformed
# Annotations
if self.annotators["dbsnp_priority_db"]:
maf_record = self.annotators["dbsnp_priority_db"].annotate(maf_record)
else:
maf_record["dbSNP_Val_Status"] = get_builder(
"dbSNP_Val_Status", self._scheme, value=None
)
if self.annotators["cosmic_id"]:
maf_record = self.annotators["cosmic_id"].annotate(maf_record, vcf_record)
else:
maf_record["COSMIC"] = get_builder("COSMIC", self._scheme, value=None)
if self.annotators["non_tcga_exac"]:
maf_record = self.annotators["non_tcga_exac"].annotate(
maf_record, vcf_record, var_allele_idx=data["var_allele_idx"]
)
if self.annotators["hotspots"]:
maf_record = self.annotators["hotspots"].annotate(maf_record)
else:
maf_record["hotspot"] = get_builder("hotspot", self._scheme, value=None)
maf_record = self.annotators["reference_context"].annotate(
maf_record, vcf_record
)
maf_record = self.annotators["mutation_status"].annotate(
maf_record, vcf_record, self.options["tumor_vcf_id"]
)
# Filters
gdc_filters = []
for filt_key in self.filters:
filt_obj = self.filters[filt_key]
if filt_obj and filt_obj.filter(maf_record):
gdc_filters.extend(filt_obj.tags)
maf_record["GDC_FILTER"] = get_builder(
"GDC_FILTER", self._scheme, value=";".join(sorted(gdc_filters))
)
return maf_record
def setup_annotators(self):
"""
Sets up all annotator classes.
"""
self.annotators["mutation_status"] = Annotators.MutationStatus.setup(
self._scheme, self.options["caller_id"]
)
self.annotators["reference_context"] = Annotators.ReferenceContext.setup(
self._scheme,
self.options["reference_fasta"],
self.options["reference_context_size"],
)
if self.options["dbsnp_priority_db"]:
self.annotators["dbsnp_priority_db"] = Annotators.DbSnpValidation.setup(
self._scheme, self.options["dbsnp_priority_db"]
)
if self.options["cosmic_vcf"]:
self.annotators["cosmic_id"] = Annotators.CosmicID.setup(
self._scheme, self.options["cosmic_vcf"]
)
if self.options["non_tcga_exac_vcf"]:
self.annotators["non_tcga_exac"] = Annotators.NonTcgaExac.setup(
self._scheme, self.options["non_tcga_exac_vcf"]
)
if self.options["hotspot_tsv"]:
self.annotators["hotspots"] = Annotators.Hotspot.setup(
self._scheme, self.options["hotspot_tsv"]
)
def setup_filters(self):
"""
Sets up all filter classes.
"""
self.filters["common_in_exac"] = Filters.ExAC.setup(
self.options["exac_freq_cutoff"]
)
self.filters["multiallelic"] = Filters.Multiallelic.setup()
if self.options["gdc_blacklist"]:
self.filters["gdc_blacklist"] = Filters.GdcBlacklist.setup(
self.options["gdc_blacklist"]
)
if not self.options["tumor_only"]:
self.filters["normal_depth"] = Filters.NormalDepth.setup(
self.options["min_n_depth"]
)
if self.options["gdc_pon_vcf"]:
self.filters["gdc_pon"] = Filters.GdcPon.setup(self.options["gdc_pon_vcf"])
if self.options["nonexonic_intervals"]:
self.filters["nonexonic"] = Filters.NonExonic.setup(
self.options["nonexonic_intervals"]
)
if self.options["target_intervals"]:
self.filters["off_target"] = Filters.OffTarget.setup(
self.options["target_intervals"]
)
@classmethod
def __tool_name__(cls):
return "gdc-1.0.0-aliquot"
| 35.870112 | 95 | 0.576373 | 24,636 | 0.959234 | 0 | 0 | 5,410 | 0.210645 | 0 | 0 | 6,710 | 0.261262 |
9430c7d26f1a2d5ac3fef69dacab439ea6ec2d2d | 477 | py | Python | shortly/settings.py | fengsp/shortly | 29532523c2db297c995a7e94c84df6d884ce240e | [
"BSD-3-Clause"
] | 11 | 2015-01-01T03:22:09.000Z | 2021-02-12T14:08:06.000Z | shortly/settings.py | fengsp/shortly | 29532523c2db297c995a7e94c84df6d884ce240e | [
"BSD-3-Clause"
] | null | null | null | shortly/settings.py | fengsp/shortly | 29532523c2db297c995a7e94c84df6d884ce240e | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
shortly.settings
~~~~~~~~~~~~~~~~
Shortly config.
:copyright: (c) 2014 by fsp.
:license: BSD.
"""
import os
DEBUG = False
# Detect environment by whether debug named file exists or not
if os.path.exists(os.path.join(os.path.dirname(__file__), 'debug')):
DEBUG = True
if DEBUG:
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
REDIS_DB = 0
else:
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
REDIS_DB = 0
| 17.035714 | 68 | 0.607966 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 237 | 0.496855 |
9430d0d4a88447aeee51cd6bf4204e6a50abf192 | 20,916 | py | Python | examples/fiber_tractography/TractographyHelper.py | MIC-DKFZ/cmdint | b4a222430a82d38f339e2072ef2bad26e94d146b | [
"Apache-2.0"
] | 8 | 2019-02-13T15:55:01.000Z | 2020-07-07T14:38:23.000Z | examples/fiber_tractography/TractographyHelper.py | MIC-DKFZ/cmdint | b4a222430a82d38f339e2072ef2bad26e94d146b | [
"Apache-2.0"
] | null | null | null | examples/fiber_tractography/TractographyHelper.py | MIC-DKFZ/cmdint | b4a222430a82d38f339e2072ef2bad26e94d146b | [
"Apache-2.0"
] | 1 | 2021-02-21T12:47:33.000Z | 2021-02-21T12:47:33.000Z | from cmdint import CmdInterface
import numpy as np
from shutil import copyfile
from dipy.io import read_bvals_bvecs
import os
""" This exapmple contains two classes that help with fiber tractography using MITK Diffusion and MRtrix. It is only
intended as a larger example of multiple usages of CmdInterface and NOT (yet) as a polished class that wraps
command line tools for fiber tractography and diffusion signal modelling.
"""
def flip_bvecs(input_dwi: str, output_dwi: str):
bvals, bvecs = read_bvals_bvecs(input_dwi.replace('.nii.gz', '.bvals'), input_dwi.replace('.nii.gz', '.bvecs'))
bvecs[:, 0] *= -1
np.savetxt(output_dwi.replace('.nii.gz', '.bvecs'), np.transpose(bvecs), fmt='%10.6f')
copyfile(input_dwi, output_dwi)
copyfile(input_dwi.replace('.nii.gz', '.bvals'), output_dwi.replace('.nii.gz', '.bvals'))
class MitkTrackingHelper:
def __init__(self):
pass
@staticmethod
def recon_qball(input_dwi, out_folder, sh_order: int, do_flip_bvecs: bool):
""" Perform analytical q-ball reconstruction with solid angle consideration and output the resulting
spherical harmonics ODFs.
"""
os.makedirs(out_folder, exist_ok=True)
if do_flip_bvecs:
flipper = CmdInterface(flip_bvecs)
flipper.add_arg('input_dwi', input_dwi, check_input=True)
input_dwi = input_dwi.replace(os.path.dirname(input_dwi), out_folder)
input_dwi = input_dwi.replace('.nii.gz', '_flipped.nii.gz')
flipper.add_arg('output_dwi', input_dwi, check_output=True)
flipper.run()
qball_recon = CmdInterface('MitkQballReconstruction')
qball_recon.add_arg(key='-i', arg=input_dwi, check_input=True)
qball_recon.add_arg(key='-o', arg=out_folder + 'odf_qball_mitk.nii.gz', check_output=True)
qball_recon.add_arg(key='--sh_order', arg=sh_order)
qball_recon.run()
return out_folder + 'odf_qball_mitk.nii.gz'
@staticmethod
def recon_tensor(input_dwi: str, out_folder: str, do_flip_bvecs: bool = False):
""" Perform diffusion tesnor modelling of the signal.
"""
os.makedirs(out_folder, exist_ok=True)
if do_flip_bvecs:
flipper = CmdInterface(flip_bvecs)
flipper.add_arg('input_dwi', input_dwi, check_input=True)
input_dwi = input_dwi.replace(os.path.dirname(input_dwi), out_folder)
input_dwi = input_dwi.replace('.nii.gz', '_flipped.nii.gz')
flipper.add_arg('output_dwi', input_dwi, check_output=True)
flipper.run()
tensor_recon = CmdInterface('MitkTensorReconstruction')
tensor_recon.add_arg(key='-i', arg=input_dwi, check_input=True)
tensor_recon.add_arg(key='-o', arg=out_folder + 'tensors_mitk.dti', check_output=True)
tensor_recon.run()
return out_folder + 'tensors_mitk.dti'
@staticmethod
def train_rf(i: str,
t: str,
out_folder: str,
masks: str = None,
wm_masks: str = None,
volume_modification_images: str = None,
additional_feature_images: str = None,
num_trees: int = 30,
max_tree_depth: int = 25,
sample_fraction: float = 0.7,
use_sh_features: bool = 0,
sampling_distance: float = None,
max_wm_samples: int = None,
num_gm_samples: int = None):
"""
Train a random forest for machine learning based tractography.
i: input diffusion-weighted images
t: input training tractograms
o: output random forest (HDF5)
masks: restrict training using a binary mask image (optional)
wm_masks: if no binary white matter mask is specified
volume_modification_images: specify a list of float images that modify the fiber density (optional)
additional_feature_images: specify a list of float images that hold additional features (float) (optional)
num_trees: number of trees (optional)
max_tree_depth: maximum tree depth (optional)
sample_fraction: fraction of samples used per tree (optional)
use_sh_features: use SH features (optional)
sampling_distance: resampling parameter for the input tractogram in mm (determines number of white-matter samples) (optional)
max_wm_samples: upper limit for the number of WM samples (optional)
num_gm_samples: Number of gray matter samples per voxel (optional)
"""
runner = CmdInterface('MitkRfTraining')
runner.add_arg(key='-i', arg=i, check_input=True)
runner.add_arg(key='-t', arg=t, check_input=True)
runner.add_arg(key='-o', arg=out_folder + 'forest_mitk.rf', check_output=True)
if masks is not None:
runner.add_arg(key='--masks', arg=masks)
if wm_masks is not None:
runner.add_arg(key='--wm_masks', arg=wm_masks)
if volume_modification_images is not None:
runner.add_arg(key='--volume_modification_images', arg=volume_modification_images)
if additional_feature_images is not None:
runner.add_arg(key='--additional_feature_images', arg=additional_feature_images)
if num_trees is not None:
runner.add_arg(key='--num_trees', arg=num_trees)
if max_tree_depth is not None:
runner.add_arg(key='--max_tree_depth', arg=max_tree_depth)
if sample_fraction is not None:
runner.add_arg(key='--sample_fraction', arg=sample_fraction)
if use_sh_features is not None:
runner.add_arg(key='--use_sh_features', arg=use_sh_features)
if sampling_distance is not None:
runner.add_arg(key='--sampling_distance', arg=sampling_distance)
if max_wm_samples is not None:
runner.add_arg(key='--max_wm_samples', arg=max_wm_samples)
if num_gm_samples is not None:
runner.add_arg(key='--num_gm_samples', arg=num_gm_samples)
runner.run()
return out_folder + 'forest_mitk.rf'
@staticmethod
def track_streamline(i: str,
out_folder: str,
algorithm: str,
flip_x: bool = False,
flip_y: bool = False,
flip_z: bool = False,
no_data_interpolation: bool = False,
no_mask_interpolation: bool = False,
compress: float = None,
seeds: int = 1,
seed_image: str = None,
trials_per_seed: int = 10,
max_tracts: int = -1,
tracking_mask: str = None,
stop_image: str = None,
exclusion_image: str = None,
ep_constraint: str = None,
target_image: str = None,
sharpen_odfs: bool = False,
cutoff: float = 0.1,
odf_cutoff: float = 0,
step_size: float = 0.5,
min_tract_length: float = 20,
angular_threshold: float = None,
loop_check: float = None,
prior_image: str = None,
prior_weight: float = 0.5,
restrict_to_prior: bool = False,
new_directions_from_prior: bool = False,
num_samples: int = 0,
sampling_distance: float = 0.25,
use_stop_votes: bool = False,
use_only_forward_samples: bool = False,
tend_f: float = 1,
tend_g: float = 0,
forest: str = None,
use_sh_features: bool = False,
additional_images: str = None):
"""
Perform MITK streamline tractography.
i: input image (multiple possible for 'DetTensor' algorithm)
out_folder: output folder
algorithm: which algorithm to use (Peaks
flip_x: multiply x-coordinate of direction proposal by -1 (optional)
flip_y: multiply y-coordinate of direction proposal by -1 (optional)
flip_z: multiply z-coordinate of direction proposal by -1 (optional)
no_data_interpolation: don't interpolate input image values (optional)
no_mask_interpolation: don't interpolate mask image values (optional)
compress: compress output fibers using the given error threshold (in mm) (optional)
seeds: number of seed points per voxel (optional)
seed_image: mask image defining seed voxels (optional)
trials_per_seed: try each seed N times until a valid streamline is obtained (only for probabilistic tractography) (optional)
max_tracts: tractography is stopped if the reconstructed number of tracts is exceeded (optional)
tracking_mask: streamlines leaving the mask will stop immediately (optional)
stop_image: streamlines entering the mask will stop immediately (optional)
exclusion_image: streamlines entering the mask will be discarded (optional)
ep_constraint: determines which fibers are accepted based on their endpoint location - options are NONE
target_image: effact depends on the chosen endpoint constraint (option ep_constraint) (optional)
sharpen_odfs: if you are using dODF images as input
cutoff: set the FA
odf_cutoff: threshold on the ODF magnitude. this is useful in case of CSD fODF tractography. (optional)
step_size: step size (in voxels) (optional)
min_tract_length: minimum fiber length (in mm) (optional)
angular_threshold: angular threshold between two successive steps
loop_check: threshold on angular stdev over the last 4 voxel lengths (optional)
prior_image: tractography prior in thr for of a peak image (optional)
prior_weight: weighting factor between prior and data. (optional)
restrict_to_prior: restrict tractography to regions where the prior is valid. (optional)
new_directions_from_prior: the prior can create directions where there are none in the data. (optional)
num_samples: number of neighborhood samples that are use to determine the next progression direction (optional)
sampling_distance: distance of neighborhood sampling points (in voxels) (optional)
use_stop_votes: use stop votes (optional)
use_only_forward_samples: use only forward samples (optional)
tend_f: weighting factor between first eigenvector (f=1 equals FACT tracking) and input vector dependent direction (f=0). (optional)
tend_g: weighting factor between input vector (g=0) and tensor deflection (g=1 equals TEND tracking) (optional)
forest: input random forest (HDF5 file) (optional)
use_sh_features: use SH features (optional)
additional_images: specify a list of float images that hold additional information (FA
"""
os.makedirs(out_folder, exist_ok=True)
tracts = out_folder + os.path.basename(i).split('.')[0] + '_' + algorithm + '_mitk.trk'
runner = CmdInterface('MitkStreamlineTractography')
runner.add_arg(key='-i', arg=i, check_input=True)
runner.add_arg(key='-o', arg=tracts, check_output=True)
runner.add_arg(key='--algorithm', arg=algorithm)
if flip_x:
runner.add_arg(key='--flip_x')
if flip_y:
runner.add_arg(key='--flip_y')
if flip_z:
runner.add_arg(key='--flip_z')
if no_data_interpolation:
runner.add_arg(key='--no_data_interpolation')
if no_mask_interpolation:
runner.add_arg(key='--no_mask_interpolation')
if compress is not None:
runner.add_arg(key='--compress', arg=compress)
if seeds is not None:
runner.add_arg(key='--seeds', arg=seeds)
if seed_image is not None:
runner.add_arg(key='--seed_image', arg=seed_image)
if trials_per_seed is not None:
runner.add_arg(key='--trials_per_seed', arg=trials_per_seed)
if max_tracts is not None:
runner.add_arg(key='--max_tracts', arg=max_tracts)
if tracking_mask is not None:
runner.add_arg(key='--tracking_mask', arg=tracking_mask)
if stop_image is not None:
runner.add_arg(key='--stop_image', arg=stop_image)
if exclusion_image is not None:
runner.add_arg(key='--exclusion_image', arg=exclusion_image)
if ep_constraint is not None:
runner.add_arg(key='--ep_constraint', arg=ep_constraint)
if target_image is not None:
runner.add_arg(key='--target_image', arg=target_image)
if sharpen_odfs:
runner.add_arg(key='--sharpen_odfs')
if cutoff is not None:
runner.add_arg(key='--cutoff', arg=cutoff)
if odf_cutoff is not None:
runner.add_arg(key='--odf_cutoff', arg=odf_cutoff)
if step_size is not None:
runner.add_arg(key='--step_size', arg=step_size)
if min_tract_length is not None:
runner.add_arg(key='--min_tract_length', arg=min_tract_length)
if angular_threshold is not None:
runner.add_arg(key='--angular_threshold', arg=angular_threshold)
if loop_check is not None:
runner.add_arg(key='--loop_check', arg=loop_check)
if prior_image is not None:
runner.add_arg(key='--prior_image', arg=prior_image)
if prior_weight is not None:
runner.add_arg(key='--prior_weight', arg=prior_weight)
if restrict_to_prior:
runner.add_arg(key='--restrict_to_prior')
if new_directions_from_prior:
runner.add_arg(key='--new_directions_from_prior')
if num_samples is not None:
runner.add_arg(key='--num_samples', arg=num_samples)
if sampling_distance is not None:
runner.add_arg(key='--sampling_distance', arg=sampling_distance)
if use_stop_votes:
runner.add_arg(key='--use_stop_votes')
if use_only_forward_samples:
runner.add_arg(key='--use_only_forward_samples')
if tend_f is not None:
runner.add_arg(key='--tend_f', arg=tend_f)
if tend_g is not None:
runner.add_arg(key='--tend_g', arg=tend_g)
if forest is not None:
runner.add_arg(key='--forest', arg=forest)
if use_sh_features:
runner.add_arg(key='--use_sh_features')
if additional_images is not None:
runner.add_arg(key='--additional_images', arg=additional_images)
runner.run()
return tracts
@staticmethod
def mitkglobaltractography(i: str,
out_folder: str,
parameters: str,
mask: str = None):
"""
Perform MITK global tractography. Save a paramter file for usage using the MITK Diffusion GUI application.
http://mitk.org/wiki/MitkDiffusion
i: input image (tensor
out_folder: output folder
parameters: parameter file (.gtp)
mask: binary mask image (optional)
"""
os.makedirs(out_folder, exist_ok=True)
tracts = out_folder + os.path.basename(i).split('.')[0] + '_Global_mitk.trk'
runner = CmdInterface('MitkGlobalTractography')
runner.add_arg(key='-i', arg=i, check_input=True)
runner.add_arg(key='-o', arg=tracts, check_output=True)
runner.add_arg(key='--parameters', arg=parameters)
if mask is not None:
runner.add_arg(key='--mask', arg=mask, check_input=True)
runner.run()
return tracts
class MRtrixTrackingHelper:
@staticmethod
def recon_csd(input_dwi: str, do_flip_bvecs: bool, out_folder: str, algo: str = 'tournier'):
""" Perform constrained spherical deconvolution modelling and output the resulting spherical harmonics fODFs.
"""
os.makedirs(out_folder, exist_ok=True)
if do_flip_bvecs:
flipper = CmdInterface(flip_bvecs)
flipper.add_arg('input_dwi', input_dwi, check_input=True)
input_dwi = input_dwi.replace(os.path.dirname(input_dwi), out_folder)
input_dwi = input_dwi.replace('.nii.gz', '_flipped.nii.gz')
flipper.add_arg('output_dwi', input_dwi, check_output=True)
flipper.run()
csd_algo = 'csd'
num_responses = 1
if algo != 'tournier':
num_responses = 3
csd_algo = 'msmt_csd'
dwi2response = CmdInterface('dwi2response')
dwi2response.add_arg(arg=algo)
dwi2response.add_arg(arg=input_dwi, check_input=True)
for i in range(num_responses):
dwi2response.add_arg(arg=out_folder + 'response_' + algo + '_' + str(i) + '_mrtrix.txt', check_output=True)
dwi2response.add_arg(key='-force')
dwi2response.add_arg('-nthreads', 12)
dwi2response.add_arg(key='-fslgrad',
arg=[input_dwi.replace('.nii.gz', '.bvecs'), input_dwi.replace('.nii.gz', '.bvals')],
check_input=True)
dwi2response.run()
dwi2fod = CmdInterface('dwi2fod')
dwi2fod.add_arg(arg=csd_algo)
dwi2fod.add_arg(arg=input_dwi, check_input=True)
for i in range(num_responses):
dwi2fod.add_arg(arg=out_folder + 'response_' + algo + '_' + str(i) + '_mrtrix.txt', check_input=True)
dwi2fod.add_arg(arg=out_folder + 'odf_' + csd_algo + '_' + str(i) + '_mrtrix.nii.gz', check_output=True)
dwi2fod.add_arg(key='-force')
dwi2fod.add_arg('-nthreads', 12)
dwi2fod.add_arg(key='-fslgrad',
arg=[input_dwi.replace('.nii.gz', '.bvecs'), input_dwi.replace('.nii.gz', '.bvals')],
check_input=True)
dwi2fod.run(version_arg='--version')
return out_folder + 'odf_' + csd_algo + '_' + str(0) + '_mrtrix.nii.gz'
sh2peaks = CmdInterface('sh2peaks')
sh2peaks.add_arg(arg=out_folder + 'odf_' + csd_algo + '_0_mrtrix.nii.gz', check_input=True)
sh2peaks.add_arg(arg=out_folder + 'peaks_' + csd_algo + '_0_mrtrix.nii.gz', check_output=True)
sh2peaks.add_arg('-threshold', 0.1)
sh2peaks.run(version_arg='--version')
flipper = CmdInterface('MitkFlipPeaks')
flipper.add_arg('-i', out_folder + 'peaks_' + csd_algo + '_0_mrtrix.nii.gz', check_input=True)
flipper.add_arg('-o', out_folder + 'peaks_' + csd_algo + '_0_flipped_mrtrix.nii.gz', check_output=True)
flipper.add_arg('-z')
flipper.run()
@staticmethod
def track_streamline(input_image: str,
out_folder: str,
algo: str,
num_streamlines: int,
cutoff: float = 0.1,
minlength: int = 30,
maxlength: int = 200,
step: float = None,
angle: float = None):
""" Perform MRtrix streamline tractography.
"""
os.makedirs(out_folder, exist_ok=True)
tracts = out_folder + os.path.basename(input_image).split('.')[0] + '_' + algo + '_mrtrix'
tckgen = CmdInterface('tckgen')
tckgen.add_arg(arg=input_image, check_input=True)
if algo == 'Tensor_Det' or algo == 'Tensor_Prob':
print(algo + ' NOT IMPLEMENTED')
exit()
tckgen.add_arg(key='-fslgrad',
arg=[input_image.replace('.nii.gz', '.bvecs'), input_image.replace('.nii.gz', '.bvals')])
tckgen.add_arg(arg=tracts + '.tck', check_output=True)
tckgen.add_arg('-algorithm', algo)
tckgen.add_arg('-seed_dynamic', input_image)
tckgen.add_arg('-nthreads', 12)
tckgen.add_arg('-select', num_streamlines)
tckgen.add_arg('-minlength', minlength)
tckgen.add_arg('-maxlength', maxlength)
tckgen.add_arg('-cutoff', cutoff)
if step is not None:
tckgen.add_arg('-step', step)
if angle is not None:
tckgen.add_arg('-angle', angle)
tckgen.add_arg('-force')
tckgen.run(version_arg='--version')
postproc = CmdInterface('MitkFiberProcessing')
postproc.add_arg('-i', tracts + '.tck', check_input=True)
postproc.add_arg('--compress', 0.1)
postproc.add_arg('-o', tracts + '.trk', check_output=True)
postproc.run()
return tracts + '.trk'
| 48.755245 | 140 | 0.616275 | 20,068 | 0.959457 | 0 | 0 | 19,936 | 0.953146 | 0 | 0 | 7,382 | 0.352936 |
943433e7904bd6cd00af3cb006c316f8db03e34e | 3,063 | py | Python | tests/misc/normalize_volume.py | ysatapathy23/TomoEncoders | 6f3f8c6dd088e4df968337e33a034a42d1f6c799 | [
"BSD-3-Clause"
] | 1 | 2021-06-23T18:09:57.000Z | 2021-06-23T18:09:57.000Z | tests/misc/normalize_volume.py | ysatapathy23/TomoEncoders | 6f3f8c6dd088e4df968337e33a034a42d1f6c799 | [
"BSD-3-Clause"
] | 3 | 2021-08-24T17:53:48.000Z | 2021-11-26T07:50:43.000Z | tests/misc/normalize_volume.py | ysatapathy23/TomoEncoders | 6f3f8c6dd088e4df968337e33a034a42d1f6c799 | [
"BSD-3-Clause"
] | 5 | 2021-07-01T20:56:24.000Z | 2022-03-22T18:25:47.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
"""
import sys
import matplotlib.pyplot as plt
import numpy as np
# from tomo_encoders.misc_utils.feature_maps_vis import view_midplanes
import cupy as cp
import time
import h5py
#from recon_subvol import fbp_filter, recon_patch
# from tomo_encoders import DataFile
import os
fpath = '/data02/MyArchive/AM_part_Xuan/data/mli_L206_HT_650_L3_rec_1x1_uint16.hdf5'
binning = 1
def _rescale_data(data, min_val, max_val):
'''
Recales data to values into range [min_val, max_val]. Data can be any numpy or cupy array of any shape.
'''
xp = cp.get_array_module(data) # 'xp' is a standard usage in the community
eps = 1e-12
data = (data - min_val) / (max_val - min_val + eps)
return data
def _find_min_max(vol, sampling_factor):
ss = slice(None, None, sampling_factor)
xp = cp.get_array_module(vol[ss,ss,ss]) # 'xp' is a standard usage in the community
max_val = xp.max(vol[ss,ss,ss])
min_val = xp.min(vol[ss,ss,ss])
return max_val, min_val
def normalize_volume_gpu(vol, chunk_size = 64, normalize_sampling_factor = 1):
'''
Normalizes volume to values into range [0,1]
'''
tot_len = vol.shape[0]
nchunks = int(np.ceil(tot_len/chunk_size))
max_val, min_val = _find_min_max(vol, normalize_sampling_factor)
proc_times = []
copy_to_times = []
copy_from_times = []
stream1 = cp.cuda.Stream()
t0 = time.time()
vol_gpu = cp.zeros((chunk_size, vol.shape[1], vol.shape[2]), dtype = cp.float32)
for jj in range(nchunks):
t01 = time.time()
sz = slice(jj*chunk_size, min((jj+1)*chunk_size, tot_len))
## copy to gpu from cpu
with stream1:
vol_gpu.set(vol[sz,...])
stream1.synchronize()
t02 = time.time()
copy_to_times.append(t02-t01)
## process
with stream1:
vol_gpu = _rescale_data(vol_gpu, min_val, max_val)
stream1.synchronize()
t03 = time.time()
proc_times.append(t03-t02)
## copy from gpu to cpu
with stream1:
vol[sz,...] = vol_gpu.get()
stream1.synchronize()
t04 = time.time()
copy_from_times.append(t04 - t03)
print("copy to gpu time per %i size chunk: %.2f ms"%(chunk_size,np.mean(copy_to_times)*1000.0))
print("processing time per %i size chunk: %.2f ms"%(chunk_size,np.mean(proc_times)*1000.0))
print("copy from gpu time per %i size chunk: %.2f ms"%(chunk_size,np.mean(copy_from_times)*1000.0))
print("total time: ", time.time() - t0)
return vol
if len(sys.argv) > 1:
chunk_size = int(sys.argv[1])
else:
chunk_size = 64
if __name__ == "__main__":
vol_shape = (512,1224,1224)
vol = np.random.normal(0.0, 1.0, vol_shape).astype(np.float32)
print("input volume: ", vol.shape)
vol = normalize_volume_gpu(vol, chunk_size = chunk_size, normalize_sampling_factor = 4)
| 28.100917 | 109 | 0.625531 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 791 | 0.258244 |
9436ee02169dab30dcc57c297b85baa82e5a02a7 | 246 | py | Python | deeptrade/__init__.py | deeptrade-tech/deeptrade_api | 4612f1057ff4532b3f4b96f3fe07c2b7150f80b7 | [
"MIT"
] | 1 | 2019-09-30T05:33:24.000Z | 2019-09-30T05:33:24.000Z | deeptrade/__init__.py | deeptrade-tech/deeptrade_api | 4612f1057ff4532b3f4b96f3fe07c2b7150f80b7 | [
"MIT"
] | null | null | null | deeptrade/__init__.py | deeptrade-tech/deeptrade_api | 4612f1057ff4532b3f4b96f3fe07c2b7150f80b7 | [
"MIT"
] | null | null | null | from __future__ import absolute_import, division, print_function
# configuration variables
api_key = None
api_base = "https://www.deeptrade.ch/"
# API sentiment
from deeptrade.sentiment import *
# API stocks
from deeptrade.stocks import *
| 15.375 | 64 | 0.776423 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 79 | 0.321138 |
9437b4664dddc71767596c5c57b1d9b243110f39 | 1,415 | py | Python | Movie_IMDB/process_3.py | likelyzhao/Homeland | b639add8f302662bd64c5cd36cdf1cd267b1f55e | [
"Apache-2.0"
] | null | null | null | Movie_IMDB/process_3.py | likelyzhao/Homeland | b639add8f302662bd64c5cd36cdf1cd267b1f55e | [
"Apache-2.0"
] | null | null | null | Movie_IMDB/process_3.py | likelyzhao/Homeland | b639add8f302662bd64c5cd36cdf1cd267b1f55e | [
"Apache-2.0"
] | null | null | null | # -*- coding:utf-8 -*-
import os
import json
access_key = 'Access_Key'
secret_key = 'Secret_Key'
bucket_name = 'Bucket_Name'
bucket_based_url = "Based_url"
localfile = 'bbb.png'
json_file = 'splits.json'
threshold = 0.5
#movie_header = 'http://ozqw10x19.bkt.clouddn.com/IMDB评选TOP250/'
def _mkdir(path):
if not os.path.exists(path):
os.mkdir(path)
expand_range =3
def convertframe2time(frame_idx):
hour = frame_idx/3600
minutes = (frame_idx - 3600 * hour)/60
seconds = (frame_idx - 3600 * hour)%60
return '{0:02}:{1:02}:{2:02}'.format(hour,minutes,seconds)
def make_clips(movie_name,annotation_video):
import subprocess
if len(annotation_video) == 0:
return
cmd = u'curl http://xsio.qiniu.io/IMDB评选TOP250/' + movie_name + ' -H \'Host:ozqw10x19.bkt.clouddn.com\' -o movie_temp'
print(cmd)
subprocess.call([cmd],shell=True)
for idx,anno in enumerate(annotation_video):
ffmpeg_cmd = 'ffmpeg'
start = anno['time'][0]
end = anno['time'][1]
ffmpeg_cmd += ' -ss ' + convertframe2time(start)
ffmpeg_cmd += ' -t ' + convertframe2time(end-start)
ffmpeg_cmd += ' -i movie_temp '
ffmpeg_cmd += movie_name + '_' + str(idx) + '.avi'
print(ffmpeg_cmd)
subprocess.call([ffmpeg_cmd],shell=True)
with open(json_file) as f:
for line in f.readlines():
dict = json.loads(line)
url = dict['url']
movie_name = url.split('/')[-1].split('_')[0]
make_clips(movie_name,dict['video'])
| 23.583333 | 119 | 0.688339 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 359 | 0.252284 |
94388603ebcb4df77f1d83d5d6abd9980c49bf05 | 261 | py | Python | maad/cluster/__init__.py | jflatorreg/scikit-maad | f7c4ac1370dcf416b7014f94784d71549623593f | [
"BSD-3-Clause"
] | 3 | 2021-04-17T21:13:57.000Z | 2021-04-25T00:55:18.000Z | maad/cluster/__init__.py | jflatorreg/scikit-maad | f7c4ac1370dcf416b7014f94784d71549623593f | [
"BSD-3-Clause"
] | null | null | null | maad/cluster/__init__.py | jflatorreg/scikit-maad | f7c4ac1370dcf416b7014f94784d71549623593f | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
""" cluster functions for scikit-maad
Cluster regions of interest using High Dimensional Data Clsutering (HDDC).
"""
from .hdda import (HDDC)
from .cluster_func import (do_PCA)
__all__ = ['HDDC',
'do_PCA']
| 18.642857 | 76 | 0.628352 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 159 | 0.609195 |
943af4c9b4f0d02449e03edde4cb90f8a5ee5e58 | 929 | py | Python | pipng/imagescale/Globals.py | nwiizo/joke | 808c4c998cc7f5b7f6f3fb5a3ce421588a70c087 | [
"MIT"
] | 1 | 2017-01-11T06:12:24.000Z | 2017-01-11T06:12:24.000Z | pipng/imagescale/Globals.py | ShuyaMotouchi/joke | 808c4c998cc7f5b7f6f3fb5a3ce421588a70c087 | [
"MIT"
] | null | null | null | pipng/imagescale/Globals.py | ShuyaMotouchi/joke | 808c4c998cc7f5b7f6f3fb5a3ce421588a70c087 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright © 2012-13 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version. It is provided for
# educational purposes and is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
ABOUT = "About"
APPNAME = "ImageScale"
GENERAL = "General"
PAD = "0.75m"
POSITION = "position"
RESTORE = "Restore"
SOURCE, TARGET = ("SOURCE", "TARGET")
VERSION = "1.0.0"
WORKING, CANCELED, TERMINATING, IDLE = ("WORKING", "CANCELED",
"TERMINATING", "IDLE")
class Canceled(Exception): pass
| 38.708333 | 73 | 0.722282 | 31 | 0.033333 | 0 | 0 | 0 | 0 | 0 | 0 | 723 | 0.777419 |
943af91b6e1ff66300bbebded33c0dacc1346cc2 | 964 | py | Python | explore_dataset.py | andreybicalho/attention-ocr-1 | f963eebd85b6d0f239935c2fe5aa1dfb1f95d387 | [
"MIT"
] | null | null | null | explore_dataset.py | andreybicalho/attention-ocr-1 | f963eebd85b6d0f239935c2fe5aa1dfb1f95d387 | [
"MIT"
] | null | null | null | explore_dataset.py | andreybicalho/attention-ocr-1 | f963eebd85b6d0f239935c2fe5aa1dfb1f95d387 | [
"MIT"
] | null | null | null | import random
from PIL import Image
from captcha.image import ImageCaptcha
from utils.dataset import CaptchaDataset
from utils.img_util import display_images
from torchvision import transforms
import numpy as np
img_width = 160
img_height = 60
n_chars = 7
chars = list('1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ')
gen = ImageCaptcha(img_width, img_height)
#img_trans = transforms.Compose([
# transforms.Grayscale(num_output_channels=1)
# ,transforms.ToTensor()
# ,transforms.Normalize(mean=[0.5], std=[0.5])
##])
img_trans = transforms.Compose([
transforms.Grayscale(num_output_channels=3)
,transforms.ToTensor()
,transforms.Normalize(mean=[0.5, 0.5, 0.5], std=(0.5, 0.5, 0.5))
])
content = [random.randrange(0, len(chars)) for _ in range(n_chars)]
s = ''.join([chars[i] for i in content])
d = gen.generate(s)
d = Image.open(d)
t = img_trans(d)
print(f'\ntensor shape{t.shape}')
display_images(t.numpy(), 1, 3) | 23.512195 | 78 | 0.73444 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 253 | 0.262448 |
943b0458f99c1e70af51f3bcd5f158a987eb40cf | 4,197 | py | Python | features/extract_features_buckeye.py | kamperh/bucktsong_eskmeans | fe1e19aa77bb47e0c71f22f75edff87a25edca94 | [
"MIT"
] | 1 | 2021-02-18T14:44:17.000Z | 2021-02-18T14:44:17.000Z | features/extract_features_buckeye.py | kamperh/bucktsong_eskmeans | fe1e19aa77bb47e0c71f22f75edff87a25edca94 | [
"MIT"
] | null | null | null | features/extract_features_buckeye.py | kamperh/bucktsong_eskmeans | fe1e19aa77bb47e0c71f22f75edff87a25edca94 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Extract MFCC and filterbank features for the Buckeye dataset.
Author: Herman Kamper
Contact: kamperh@gmail.com
Date: 2019, 2021
"""
from datetime import datetime
from os import path
from tqdm import tqdm
import argparse
import numpy as np
import os
import sys
sys.path.append("..")
from paths import buckeye_datadir
import features
import utils
def extract_features_for_subset(subset, feat_type, output_fn):
"""
Extract specified features for a subset.
The `feat_type` parameter can be "mfcc" or "fbank".
"""
# Speakers for subset
speaker_fn = path.join(
"..", "data", "buckeye_" + subset + "_speakers.list"
)
print("Reading:", speaker_fn)
speakers = set()
with open(speaker_fn) as f:
for line in f:
speakers.add(line.strip())
print("Speakers:", ", ".join(sorted(speakers)))
# Raw features
feat_dict = {}
print("Extracting features per speaker:")
for speaker in sorted(speakers):
if feat_type == "mfcc":
speaker_feat_dict = features.extract_mfcc_dir(
path.join(buckeye_datadir, speaker)
)
elif feat_type == "fbank":
speaker_feat_dict = features.extract_fbank_dir(
path.join(buckeye_datadir, speaker)
)
else:
assert False, "invalid feature type"
for wav_key in speaker_feat_dict:
feat_dict[speaker + "_" + wav_key[3:]] = speaker_feat_dict[wav_key]
# Read voice activity regions
fa_fn = path.join("..", "data", "buckeye_english.wrd")
print("Reading:", fa_fn)
vad_dict = utils.read_vad_from_fa(fa_fn)
# Only keep voice active regions
print("Extracting VAD regions:")
feat_dict = features.extract_vad(feat_dict, vad_dict)
# Perform per speaker mean and variance normalisation
print("Per speaker mean and variance normalisation:")
feat_dict = features.speaker_mvn(feat_dict)
# Write output
print("Writing:", output_fn)
np.savez_compressed(output_fn, **feat_dict)
def main():
print(datetime.now())
# RAW FEATURES
# Extract MFCCs for the different sets
mfcc_dir = path.join("mfcc", "buckeye")
for subset in ["devpart1", "devpart2", "zs"]:
if not path.isdir(mfcc_dir):
os.makedirs(mfcc_dir)
output_fn = path.join(mfcc_dir, subset + ".dd.npz")
if not path.isfile(output_fn):
print("Extracting MFCCs:", subset)
extract_features_for_subset(subset, "mfcc", output_fn)
else:
print("Using existing file:", output_fn)
# # Extract filterbanks for the different sets
# fbank_dir = path.join("fbank", "buckeye")
# for subset in ["devpart1", "devpart2", "zs"]:
# if not path.isdir(fbank_dir):
# os.makedirs(fbank_dir)
# output_fn = path.join(fbank_dir, subset + ".npz")
# if not path.isfile(output_fn):
# print("Extracting filterbanks:", subset)
# extract_features_for_subset(subset, "fbank", output_fn)
# else:
# print("Using existing file:", output_fn)
# GROUND TRUTH WORD SEGMENTS
# Create a ground truth word list of at least 50 frames and 5 characters
fa_fn = path.join("..", "data", "buckeye_english.wrd")
list_dir = "lists"
if not path.isdir(list_dir):
os.makedirs(list_dir)
list_fn = path.join(list_dir, "buckeye.samediff.list")
if not path.isfile(list_fn):
utils.write_samediff_words(fa_fn, list_fn)
else:
print("Using existing file:", list_fn)
# Extract word segments from the MFCC NumPy archives
for subset in ["devpart1", "devpart2", "zs"]:
input_npz_fn = path.join(mfcc_dir, subset + ".dd.npz")
output_npz_fn = path.join(mfcc_dir, subset + ".samediff.dd.npz")
if not path.isfile(output_npz_fn):
print("Extracting MFCCs for same-different word tokens:", subset)
utils.segments_from_npz(input_npz_fn, list_fn, output_npz_fn)
else:
print("Using existing file:", output_npz_fn)
print(datetime.now())
if __name__ == "__main__":
main()
| 30.413043 | 79 | 0.637122 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,677 | 0.399571 |
943b21bad087472d917929d954b13b2a355101b8 | 190 | py | Python | by-session/ta-921/j8/atal_matal.py | amiraliakbari/sharif-mabani-python | 5d14a08d165267fe71c28389ddbafe29af7078c5 | [
"MIT"
] | 2 | 2015-04-29T20:59:35.000Z | 2018-09-26T13:33:43.000Z | by-session/ta-921/j8/atal_matal.py | amiraliakbari/sharif-mabani-python | 5d14a08d165267fe71c28389ddbafe29af7078c5 | [
"MIT"
] | null | null | null | by-session/ta-921/j8/atal_matal.py | amiraliakbari/sharif-mabani-python | 5d14a08d165267fe71c28389ddbafe29af7078c5 | [
"MIT"
] | null | null | null | def f(a, start):
if len(a) == 1:
return a[0]
d = (start + 15 - 1) % len(a)
del a[d]
return f(a, d % len(a))
n = int(input())
print 1 + f(range(n*2), 0) / 2
| 19 | 34 | 0.426316 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
943b27a6307e3dd5baf6b7f00cd5eae08fb5833d | 861 | py | Python | examples/old-examples/pygame/01_hello_world.py | Vallentin/ModernGL | 18ce7d3f28c68b2d305da332ae0afd221dc043d1 | [
"MIT"
] | null | null | null | examples/old-examples/pygame/01_hello_world.py | Vallentin/ModernGL | 18ce7d3f28c68b2d305da332ae0afd221dc043d1 | [
"MIT"
] | null | null | null | examples/old-examples/pygame/01_hello_world.py | Vallentin/ModernGL | 18ce7d3f28c68b2d305da332ae0afd221dc043d1 | [
"MIT"
] | 1 | 2020-07-10T23:26:36.000Z | 2020-07-10T23:26:36.000Z | import struct
import ModernGL
import pygame
from pygame.locals import DOUBLEBUF, OPENGL
pygame.init()
pygame.display.set_mode((800, 600), DOUBLEBUF | OPENGL)
ctx = ModernGL.create_context()
vert = ctx.vertex_shader('''
#version 330
in vec2 vert;
void main() {
gl_Position = vec4(vert, 0.0, 1.0);
}
''')
frag = ctx.fragment_shader('''
#version 330
out vec4 color;
void main() {
color = vec4(0.30, 0.50, 1.00, 1.0);
}
''')
prog = ctx.program(vert, frag])
vbo = ctx.buffer(struct.pack('6f', 0.0, 0.8, -0.6, -0.8, 0.6, -0.8))
vao = ctx.simple_vertex_array(prog, vbo, ['vert'])
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
ctx.clear(0.9, 0.9, 0.9)
vao.render()
pygame.display.flip()
pygame.time.wait(10)
| 19.568182 | 68 | 0.609756 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 233 | 0.270616 |
943b73919f38e3439cb685654e78f62bab98a195 | 1,375 | py | Python | phpme/binx/console.py | eghojansu/phpme | 441129e0eaa91c571639dcf87a8e7d0c325aed39 | [
"MIT"
] | null | null | null | phpme/binx/console.py | eghojansu/phpme | 441129e0eaa91c571639dcf87a8e7d0c325aed39 | [
"MIT"
] | null | null | null | phpme/binx/console.py | eghojansu/phpme | 441129e0eaa91c571639dcf87a8e7d0c325aed39 | [
"MIT"
] | null | null | null | import os, json, subprocess
class Console():
"""Run PHP job"""
def get_interface_methods(namespace):
try:
output = Console.run_command('interface-methods', [namespace])
return json.loads(output)
except Exception as e:
return {}
def get_class_methods(namespace):
try:
output = Console.run_command('class-methods', [namespace])
return json.loads(output)
except Exception as e:
return {}
def get_classes(symbol):
try:
output = Console.run_command('classes', [symbol])
return json.loads(output)
except Exception as e:
return []
def git_config(config):
try:
return subprocess.check_output(['git', 'config', '--get', config]).decode('utf-8')
except Exception as e:
print('[Phpme]', 'error: ' + str(e))
def run_command(command, args):
try:
console = os.path.dirname(os.path.abspath(__file__)) + os.sep + 'console.php'
output = subprocess.check_output(['php', '-f', console, command] + args).decode('utf-8')
if output.startswith('error'):
print('[Phpme]', output)
else:
return output
except Exception as e:
print('[Phpme]', 'error: ' + str(e))
| 28.645833 | 100 | 0.546182 | 1,344 | 0.977455 | 0 | 0 | 0 | 0 | 0 | 0 | 168 | 0.122182 |
943bf0558ebf6b4b809909be2b8a669c3b8ef8b4 | 1,189 | py | Python | app/core/tests/test_models.py | prafullkumar41/recipe-app-api | f66d076aafa6c8bb727d7f9bd03955c986e811c9 | [
"MIT"
] | null | null | null | app/core/tests/test_models.py | prafullkumar41/recipe-app-api | f66d076aafa6c8bb727d7f9bd03955c986e811c9 | [
"MIT"
] | null | null | null | app/core/tests/test_models.py | prafullkumar41/recipe-app-api | f66d076aafa6c8bb727d7f9bd03955c986e811c9 | [
"MIT"
] | null | null | null | from django.test import TestCase
from django.contrib.auth import get_user_model
class ModelTests(TestCase):
def test_create_user_with_email(self):
'''Tet creating a new user with an email is sucessfull'''
email = 'test@gmail.com'
password = 'test123'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_user_email_is_normalize(self):
email = 'test@DEV.COM'
user = get_user_model().objects.create_user(email, 'test123')
self.assertEqual(user.email, email.lower())
def test_email_field_not_empty(self):
'''Raises Error if email is not provided'''
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test123')
def test_create_super_user(self):
'''Test Creating a new Super USer'''
user = get_user_model().objects.create_super_user(
'vj"dev.com',
'tst123'
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
| 29 | 69 | 0.644239 | 1,107 | 0.931034 | 0 | 0 | 0 | 0 | 0 | 0 | 213 | 0.179142 |