hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ff47ce89d7e6d1cfd4bc00b15dbb096b0f355329 | 3,896 | py | Python | scripts/gpipe/gene2gene.py | CGATOxford/Optic | 2df92e953b5139ff4e5c383cb4383e6367cd47f1 | [
"MIT"
] | null | null | null | scripts/gpipe/gene2gene.py | CGATOxford/Optic | 2df92e953b5139ff4e5c383cb4383e6367cd47f1 | [
"MIT"
] | null | null | null | scripts/gpipe/gene2gene.py | CGATOxford/Optic | 2df92e953b5139ff4e5c383cb4383e6367cd47f1 | [
"MIT"
] | 1 | 2020-03-31T22:55:50.000Z | 2020-03-31T22:55:50.000Z | ##########################################################################
#
# MRC FGU Computational Genomics Group
#
# $Id$
#
# Copyright (C) 2009 Andreas Heger
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
##########################################################################
'''
gpipe/gene2gene.py -
======================================================
:Author: Andreas Heger
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
.. todo::
describe purpose of the script.
Usage
-----
Example::
python gpipe/gene2gene.py --help
Type::
python gpipe/gene2gene.py --help
for command line help.
Documentation
-------------
Code
----
'''
import sys
import CGAT.Experiment as E
USAGE = """python %s [OPTIONS] < gene_list > graph
print list of all transcripts within a gene.
""" % sys.argv[0]
# add links between genes
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
parser = E.OptionParser(
version="%prog version: $Id: gpipe/gene2gene.py 2781 2009-09-10 11:33:14Z andreas $", usage=globals()["__doc__"])
parser.add_option("-q", "--restrict-quality", dest="restrict_quality", type="string",
help="restrict genes to given quality codes.")
parser.set_defaults(separator="|",
restrict_quality=None)
options, args = E.Start(parser)
if options.restrict_quality:
options.restrict_quality = set(options.restrict_quality.split(","))
ninput, noutput, nskipped, nerrors = 0, 0, 0, 0
def print_lines(lines):
global noutput
if not lines:
return
for x in range(len(lines) - 1):
for y in range(x + 1, len(lines)):
options.stdout.write(options.separator.join(
lines[x]) + "\t" + options.separator.join(lines[y]) + "\t0\n")
noutput += 1
transcripts = []
for line in sys.stdin:
try:
schema, prediction_id, gene_id, quality = line[
:-1].split(options.separator)
except ValueError:
nerrors += 1
if options.loglevel >= 1:
options.stdlog.write("# PARSING ERROR in line %s" % line)
continue
transcripts.append((schema, prediction_id, gene_id, quality))
transcripts.sort(lambda x, y: cmp((x[0], x[2]), (y[0], y[2])))
last_gene_id = None
last_schema = None
lines = []
ninput = len(transcripts)
for schema, prediction_id, gene_id, quality in transcripts:
if last_gene_id != gene_id or last_schema != schema:
print_lines(lines)
lines = []
last_gene_id = gene_id
last_schema = schema
if options.restrict_quality and quality not in options.restrict_quality:
nskipped += 1
continue
lines.append((schema, prediction_id, gene_id, quality))
print_lines(lines)
E.info("ninput=%i, noutput=%i, nskipped=%i, nerrors=%i" %
(ninput, noutput, nskipped, nerrors))
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
| 26.324324 | 121 | 0.587782 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,825 | 0.468429 |
ff48fb4c7680811b739fa3fcf4eebb4da830d50b | 9,053 | py | Python | pyneval/cli/pyneval.py | CSDLLab/PyMets | 24e02dba5615c781ea0215467a4b89b9625ca519 | [
"BSD-3-Clause"
] | 12 | 2020-07-18T16:55:23.000Z | 2022-03-14T12:26:08.000Z | pyneval/cli/pyneval.py | CSDLLab/PyMets | 24e02dba5615c781ea0215467a4b89b9625ca519 | [
"BSD-3-Clause"
] | 5 | 2021-05-31T22:08:51.000Z | 2021-08-31T15:42:44.000Z | pyneval/cli/pyneval.py | CSDLLab/PyMets | 24e02dba5615c781ea0215467a4b89b9625ca519 | [
"BSD-3-Clause"
] | 2 | 2021-09-24T03:02:27.000Z | 2021-11-09T06:21:00.000Z | import argparse
import importlib
import os
import sys
import jsonschema
import pkg_resources
from multiprocessing import Pool, cpu_count
from pyneval.errors.exceptions import InvalidMetricError, PyNevalError
from pyneval.pyneval_io import json_io
from pyneval.pyneval_io import swc_io
from pyneval.metric.utils import anno_utils, config_utils
from pyneval.metric.utils import cli_utils
from pyneval.metric.utils.metric_manager import get_metric_manager
from pyneval.tools.optimize import optimize
# load method in metrics
def import_metrics():
base_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
metric_path = os.path.join(base_dir, "pyneval/metric")
files = os.listdir(metric_path)
metrics = []
for f in files:
m_f = f.split(".")
if len(m_f) == 2 and m_f[0][-7:] == "_metric" and m_f[1] == "py":
metrics.append(m_f[0])
for m in metrics:
md = "pyneval.metric.{}".format(m)
importlib.import_module(md)
def read_parameters():
metric_manager = get_metric_manager()
parser = argparse.ArgumentParser(description="Current version: pyneval {}".format(
pkg_resources.require("pyneval")[0].version)
)
parser.add_argument(
"--gold",
"-G",
help="path of the gold standard SWC file",
required=False
)
parser.add_argument(
"--test",
"-T",
help="a list of reconstructed SWC files or folders for evaluation",
required=False,
nargs="*"
)
parser.add_argument(
"--metric",
"-M",
help="metric choice: " + metric_manager.get_metric_summary(False) + ".",
required=False
)
parser.add_argument(
"--output",
"-O",
help="output path of metric results, output file is in json format with different scores of the metric",
required=False,
)
parser.add_argument(
"--detail",
"-D",
help="output path of detail metric result, swc format presented.\n"
"identify different type according to metric result for each node",
required=False,
)
parser.add_argument(
"--config",
"-C",
help="path of custom configuration file for the specified metric",
required=False,
)
parser.add_argument(
"--parallel",
"-P",
help="Enable the parallel processing",
required=False,
action="store_true"
)
parser.add_argument(
"--optimize",
help="Enable optimizer mode",
required=False,
)
parser.add_argument(
"--path_validation",
help="Enable detailed path validation check",
required=False,
action="store_true"
)
parser.add_argument("--debug", help="print debug info or not", required=False, action="store_true")
return parser.parse_args()
def init(abs_dir):
sys.path.append(abs_dir)
sys.path.append(os.path.join(abs_dir, "src"))
sys.path.append(os.path.join(abs_dir, "test"))
sys.setrecursionlimit(1000000)
def set_configs(abs_dir, args):
# argument: debug
is_debug = False
if args.debug and args.debug.lower() in ("true", "t", "yes"):
is_debug = True
# argument: gold
gold_swc_path = os.path.join(abs_dir, args.gold)
gold_swc_tree = swc_io.read_swc_tree(gold_swc_path) # SwcTree
# argument: metric
metric_manager = get_metric_manager()
metric = metric_manager.get_root_metric(args.metric)
if not metric:
raise InvalidMetricError(args.metric, metric_manager.get_metric_summary(True))
# argument: test
test_swc_paths = [os.path.join(abs_dir, path) for path in args.test]
test_swc_trees = []
# read test trees
for file in test_swc_paths:
if file[-4:].lower() == ".tif":
continue
test_swc_trees.extend(swc_io.read_swc_trees(file))
if len(test_swc_paths) == 0:
raise PyNevalError("test models can't be null")
# info: how many trees read
print("Evaluating {} test model(s) \n".format(len(test_swc_trees)))
# argument: config
config_path = args.config
if config_path is None:
config = config_utils.get_default_configs(metric)
else:
config = json_io.read_json(config_path)
config_schema = config_utils.get_config_schema(metric)
jsonschema.validate(config, config_schema)
# argument: output
output_path = None
if args.output:
output_path = os.path.join(abs_dir, args.output)
# argument: detail
detail_dir = None
if args.detail:
detail_dir = os.path.join(abs_dir, args.detail)
# argument: parallel
is_parallel = False
if args.parallel:
is_parallel = args.parallel
is_path_validation = False
if args.path_validation:
is_path_validation = args.path_validation
# argument: optimize
optimize_config = None
if args.optimize:
optimize_config = json_io.read_json(args.optimize)
return gold_swc_tree, test_swc_trees, test_swc_paths, metric, output_path, detail_dir, config, is_debug, is_parallel, optimize_config, is_path_validation
def excute_metric(metric, gold_swc_tree, test_swc_tree, config, detail_dir, output_path, metric_method, is_path_validation):
test_swc_name = test_swc_tree.name()
result, res_gold_swc_tree, res_test_swc_tree = metric_method(
gold_swc_tree=gold_swc_tree, test_swc_tree=test_swc_tree, config=config
)
screen_output = config_utils.get_screen_output()
result_info = ""
for key in result:
if key in screen_output[metric]:
result_info += "{} = {}\n".format(key.ljust(15, " "), result[key])
print("---------------Result---------------\n" +
"swc_file_name = {}\n".format(test_swc_name) +
result_info +
"----------------End-----------------\n"
)
base_file_name = test_swc_name[:-4] + "_" + metric + "_"
def save_detail(swc_tree, file_name):
detail_path = os.path.normpath(os.path.join(detail_dir, file_name))
if is_path_validation:
detail_path = cli_utils.path_validation(detail_path, ".swc")
else:
detail_path = cli_utils.make_sure_path_not_exist(detail_path)
ok = False
if detail_path is not None:
ok = swc_io.swc_save(
swc_tree=swc_tree,
out_path=detail_path,
extra=anno_utils.get_detail_type(metric),
)
if detail_path is None or not ok:
print("[Warning:] Failed to save details: {}".format(file_name))
if detail_dir:
if res_gold_swc_tree is not None:
save_detail(res_gold_swc_tree, base_file_name+"recall.swc")
if res_test_swc_tree is not None:
save_detail(res_test_swc_tree, base_file_name+"precision.swc")
if output_path:
if is_path_validation:
output_path = cli_utils.path_validation(output_path, ".json")
else:
output_path = cli_utils.make_sure_path_not_exist(output_path)
ok = False
if output_path is not None:
ok = json_io.save_json(data=result, json_file_path=output_path)
if ok:
print("[Info:] Output saved")
if output_path is None or not ok:
print("[Warning:] Failed to save output")
# command program
def run():
abs_dir = os.path.abspath("")
import_metrics()
init(abs_dir)
args = read_parameters()
gold_swc_tree, test_swc_trees, test_swc_paths, metric, output_path, detail_dir, \
config, is_debug, is_parallel, optimize_config, is_path_validation = set_configs(abs_dir, args)
metric_manager = get_metric_manager()
metric_method = metric_manager.get_metric_method(metric)
if optimize_config is not None:
optimize.optimize(gold_swc_tree=gold_swc_tree, test_swc_paths=test_swc_paths,
optimize_config=optimize_config, metric_config=config, metric_method=metric_method)
elif is_parallel:
# use multi process
max_procs = cpu_count()
if len(test_swc_trees) < max_procs:
max_procs = len(test_swc_trees)
p_pool = Pool(max_procs)
for test_swc_tree in test_swc_trees:
p_pool.apply_async(
excute_metric,
args=(metric, gold_swc_tree, test_swc_tree, config, detail_dir, output_path, metric_method, is_path_validation),
)
p_pool.close()
p_pool.join()
else:
for test_swc_tree in test_swc_trees:
excute_metric(
metric=metric,
gold_swc_tree=gold_swc_tree,
test_swc_tree=test_swc_tree,
config=config,
detail_dir=detail_dir,
output_path=output_path,
metric_method=metric_method,
is_path_validation=is_path_validation,
)
print("Done!")
if __name__ == "__main__":
sys.exit(run())
| 32.564748 | 157 | 0.642218 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,430 | 0.157959 |
ff4a89c35219dc2f2732b8374cd3ee1ff05ce0d0 | 572 | py | Python | src/tratamientos/migrations/0006_auto_20170201_1554.py | mava-ar/sgk | cb8b3abf243b4614e6a45e4e2db5bb7cce94dee4 | [
"Apache-2.0"
] | null | null | null | src/tratamientos/migrations/0006_auto_20170201_1554.py | mava-ar/sgk | cb8b3abf243b4614e6a45e4e2db5bb7cce94dee4 | [
"Apache-2.0"
] | 32 | 2016-05-09T19:37:08.000Z | 2022-01-13T01:00:52.000Z | src/tratamientos/migrations/0006_auto_20170201_1554.py | mava-ar/sgk | cb8b3abf243b4614e6a45e4e2db5bb7cce94dee4 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2017-02-01 18:54
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tratamientos', '0005_sesion_profesional'),
]
operations = [
migrations.AlterField(
model_name='sesion',
name='profesional',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sesiones', to='core.Profesional'),
),
]
| 26 | 129 | 0.662587 | 382 | 0.667832 | 0 | 0 | 0 | 0 | 0 | 0 | 158 | 0.276224 |
ff4a945a55cd1faab9edb57d2954c239971d2727 | 3,749 | py | Python | src/alembic/versions/89944f8b35b3_db_games_model_migrations.py | green2cerberuz/db_vgames | d6d71f6b40c7e0c28c307bc314f46675ac0bf5e9 | [
"MIT"
] | null | null | null | src/alembic/versions/89944f8b35b3_db_games_model_migrations.py | green2cerberuz/db_vgames | d6d71f6b40c7e0c28c307bc314f46675ac0bf5e9 | [
"MIT"
] | null | null | null | src/alembic/versions/89944f8b35b3_db_games_model_migrations.py | green2cerberuz/db_vgames | d6d71f6b40c7e0c28c307bc314f46675ac0bf5e9 | [
"MIT"
] | null | null | null | """DB Games model migrations
Revision ID: 89944f8b35b3
Revises:
Create Date: 2020-11-14 03:49:03.255055
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "89944f8b35b3"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"company",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(length=250), nullable=False),
sa.Column("creation_year", sa.DateTime(), nullable=True),
sa.Column("description", sa.String(length=500), nullable=True),
sa.Column("logo", sa.String(length=500), nullable=True),
sa.Column("is_publisher", sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"franchise",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("title", sa.String(length=250), nullable=False),
sa.Column("first_release", sa.DateTime(), nullable=True),
sa.Column("description", sa.String(length=250), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"console",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(length=250), nullable=False),
sa.Column("release_year", sa.DateTime(), nullable=False),
sa.Column("description", sa.String(length=500), nullable=True),
sa.Column("cover", sa.String(length=500), nullable=True),
sa.Column("motto", sa.String(length=100), nullable=False),
sa.Column("company_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(["company_id"], ["company.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"game",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(length=250), nullable=False),
sa.Column("publication_year", sa.DateTime(), nullable=True),
sa.Column("score", sa.Integer(), nullable=True),
sa.Column("description", sa.String(length=500), nullable=True),
sa.Column("cover", sa.String(length=500), nullable=True),
sa.Column("franchise_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(["franchise_id"], ["franchise.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"franchiseassociation",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("franchise_id", sa.Integer(), nullable=True),
sa.Column("game_id", sa.Integer(), nullable=True),
sa.Column("console_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(["console_id"], ["console.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["franchise_id"], ["franchise.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["game_id"], ["game.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"game_console_table",
sa.Column("game_id", sa.Integer(), nullable=True),
sa.Column("console_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
["console_id"],
["console.id"],
),
sa.ForeignKeyConstraint(
["game_id"],
["game.id"],
),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("game_console_table")
op.drop_table("franchiseassociation")
op.drop_table("game")
op.drop_table("console")
op.drop_table("franchise")
op.drop_table("company")
# ### end Alembic commands ###
| 37.868687 | 88 | 0.623099 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,018 | 0.271539 |
ff4bc07be12dc2426e91dab41e8aaf03e6096812 | 468 | py | Python | way/python/first_steps/basics/testgui.py | only-romano/junkyard | b60a25b2643f429cdafee438d20f9966178d6f36 | [
"MIT"
] | null | null | null | way/python/first_steps/basics/testgui.py | only-romano/junkyard | b60a25b2643f429cdafee438d20f9966178d6f36 | [
"MIT"
] | null | null | null | way/python/first_steps/basics/testgui.py | only-romano/junkyard | b60a25b2643f429cdafee438d20f9966178d6f36 | [
"MIT"
] | null | null | null | from tkinter import *
clicks = 0
def click_button():
global clicks
clicks += 1
buttonText.set("Clicks {}".format(clicks))
root = Tk()
root.title("GUI на Python")
root.geometry("300x250")
buttonText = StringVar()
buttonText.set("Clicks {}".format(clicks))
btn = Button(textvariable=buttonText, background="#555", foreground="#ccc",
padx="20", pady="8", font="16", command=click_button)
btn.pack()
root.mainloop()
| 19.5 | 76 | 0.632479 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 71 | 0.151064 |
ff4c909fa06f90aff02e38a609573a49afbaf2e0 | 4,679 | py | Python | genomic_data_service/region_indexer_elastic_search.py | ENCODE-DCC/genomic-data-service | 954017a5bcc5f448fbe2867768186df5e066c67c | [
"MIT"
] | 3 | 2020-10-26T02:15:55.000Z | 2022-01-26T18:39:09.000Z | genomic_data_service/region_indexer_elastic_search.py | ENCODE-DCC/genomic-data-service | 954017a5bcc5f448fbe2867768186df5e066c67c | [
"MIT"
] | 3 | 2021-08-17T02:01:54.000Z | 2022-03-30T17:14:02.000Z | genomic_data_service/region_indexer_elastic_search.py | ENCODE-DCC/genomic-data-service | 954017a5bcc5f448fbe2867768186df5e066c67c | [
"MIT"
] | 1 | 2022-03-24T21:15:34.000Z | 2022-03-24T21:15:34.000Z | from elasticsearch import Elasticsearch
from elasticsearch.exceptions import NotFoundError
from elasticsearch.helpers import bulk
RESIDENTS_INDEX = 'resident_regionsets'
REGION_INDEXER_SHARDS = 2
SEARCH_MAX = 99999
FOR_REGULOME_DB = 'regulomedb'
INDEX_SETTINGS = {
'index': {
'number_of_shards': REGION_INDEXER_SHARDS,
'max_result_window': SEARCH_MAX
}
}
class RegionIndexerElasticSearch():
def __init__(self, es_uri, es_port, supported_chroms, supported_assemblies, use_type=FOR_REGULOME_DB, force_delete=False):
self.es = Elasticsearch(port=es_port, hosts=es_uri)
self.use_type = use_type
self.chroms = [chrom.lower() for chrom in supported_chroms]
self.assemblies = [assembly.lower() for assembly in supported_assemblies]
self.force_delete = force_delete
def setup_indices(self, force_delete=False):
if self.force_delete:
self.destroy_indices()
self.setup_residents_index()
self.setup_snps_index()
self.setup_regions_index()
def destroy_indices(self):
if self.es.indices.exists(RESIDENTS_INDEX):
self.es.indices.delete(index=RESIDENTS_INDEX)
for assembly in self.assemblies:
snp_index = 'snp_' + assembly
if self.es.indices.exists(snp_index):
self.es.indices.delete(index=snp_index)
for chrom in self.chroms:
if self.es.indices.exists(chrom):
self.es.indices.delete(index=chrom)
def setup_residents_index(self):
if not self.es.indices.exists(RESIDENTS_INDEX):
self.es.indices.create(index=RESIDENTS_INDEX, body=INDEX_SETTINGS)
if not self.es.indices.exists_type(index=RESIDENTS_INDEX, doc_type=self.use_type):
mapping = self.get_resident_mapping()
self.es.indices.put_mapping(index=RESIDENTS_INDEX, doc_type=self.use_type, body=mapping)
def setup_snps_index(self):
for assembly in self.assemblies:
snp_index = 'snp_' + assembly
if not self.es.indices.exists(snp_index):
self.es.indices.create(index=snp_index, body=INDEX_SETTINGS)
for chrom in self.chroms:
if not self.es.indices.exists_type(index=snp_index, doc_type=chrom):
mapping = self.get_snp_index_mapping(chrom)
self.es.indices.put_mapping(index=snp_index, doc_type=chrom, body=mapping)
def setup_regions_index(self):
for chrom in self.chroms:
if not self.es.indices.exists(chrom):
self.es.indices.create(index=chrom, body=INDEX_SETTINGS)
for assembly in self.assemblies:
if not self.es.indices.exists_type(index=chrom, doc_type=assembly):
mapping = self.get_chrom_index_mapping(assembly)
self.es.indices.put_mapping(index=chrom, doc_type=assembly, body=mapping)
def get_resident_mapping(self):
return {
self.use_type: { "enabled": False }
}
def get_chrom_index_mapping(self, assembly='hg19'):
return {
assembly: {
'_source': {
'enabled': True
},
'properties': {
'uuid': {
'type': 'keyword'
},
'coordinates': {
'type': 'integer_range'
},
'strand': {
'type': 'string' # + - .
},
'value': {
'type': 'string'
},
}
}
}
def get_snp_index_mapping(self, chrom='chr1'):
return {
chrom: {
'_all': {
'enabled': False
},
'_source': {
'enabled': True
},
'properties': {
'rsid': {
'type': 'keyword'
},
'chrom': {
'type': 'keyword'
},
'coordinates': {
'type': 'integer_range'
},
'maf': {
'type': 'float',
},
'ref_allele_freq': {
'enabled': False,
},
'alt_allele_freq': {
'enabled': False,
},
}
}
}
| 32.047945 | 126 | 0.516777 | 4,293 | 0.917504 | 0 | 0 | 0 | 0 | 0 | 0 | 437 | 0.093396 |
ff4e9e63d1e23b29bf168baa8208ef736d094895 | 4,160 | py | Python | genda/formats/dosages.py | jeffhsu3/genda | 5adbb5b5620c592849fa4a61126b934e1857cd77 | [
"BSD-3-Clause"
] | 5 | 2016-01-12T15:12:18.000Z | 2022-02-10T21:57:39.000Z | genda/formats/dosages.py | jeffhsu3/genda | 5adbb5b5620c592849fa4a61126b934e1857cd77 | [
"BSD-3-Clause"
] | 5 | 2015-01-20T04:22:50.000Z | 2018-10-02T19:39:12.000Z | genda/formats/dosages.py | jeffhsu3/genda | 5adbb5b5620c592849fa4a61126b934e1857cd77 | [
"BSD-3-Clause"
] | 1 | 2022-03-04T06:49:39.000Z | 2022-03-04T06:49:39.000Z | """ Functions for working with tabix dosages in pandas dataframes
"""
import gzip
import numpy as np
import pandas as pd
import pysam
import statsmodels.api as sm
class Dosage(object):
def __init__(self, dosages, annotations, gene_name):
# Match up the annotation dataframe with the dosage dataframe
mindex = np.intersect1d(np.asarray(dosages.index, dtype=str),
np.asarray(annotations.index, dtype=str))
self.annot = annotations.loc[mindex, :]
ordering = self.annot.ix[:, 'pos'].argsort()
self.annot = self.annot.iloc[ordering, :]
self.dosages = dosages.ix[mindex, :]
self.dosages = self.dosages.iloc[ordering, :]
self.gene_name = gene_name
def run_eQTL(self, count_matrix, covariates, extra_snps=None):
#self.pvalues = self.dosages.apply()
pvalues = self.dosages.apply(eQTL_func, axis=1, args=(covariates,
count_matrix.ix[self.gene_name, :]))
self.pvalues = pvalues
def get_dosages_by_range(chrm, start, end, gene_name, annotation_file,
dosage_df, mapping=None):
"""
Fuzzy mapping between annotation and genotypes
Returns Dosage instance.
"""
ann_file = pysam.Tabixfile(annotation_file)
ann_v = ann_file.fetch(chrm, start, end)
rsIDs = []
pos = []
ref = []
alt = []
for i in ann_v:
i = i.split("\t")
rsIDs.append(i[3])
pos.append(int(i[1]))
ref.append(i[6])
alt.append(i[7])
annot = pd.DataFrame({'pos': pos, 'ref': ref, 'alt': alt}, index=pd.Index(rsIDs))
comb_iter = []
for dos in dosage_df:
mindex = np.intersect1d(np.asarray(dos.index, dtype=str),
np.asarray(annot.index, dtype=str))
if len(mindex) > 0:
comb_iter.append(dos.ix[mindex, :])
else:
pass
out_dos = pd.concat(comb_iter)
'''
dosages = pd.read_csv(dosage_path + path, sep=" ", header=None,
index_col = 0, skiprows=roughly_first,
nrows=roughly_end-roughly_first, names=col_names.columns)
'''
print(annot.shape, out_dos.shape, gene_name)
return Dosage(out_dos, annot, gene_name)
def generate_dosage_mapping(dosage_file, mapping_file = None, interval=50):
"""
Returns dictionary of rsIDs: fileposition from a dosage file
"""
if not mapping_file:
with open(dosage_file) as fh:
fh.next()
t = 0
debug = 0
f_i = {}
for i, j in enumerate(fh):
if i % 50 == 0:
f_i[j.split(" ")[0]] = i - 1
else: pass
return(f_i)
def eQTL_func(snps, cov, expression):
"""
"""
cov = cov.T
cov['snps'] = snps
cov = sm.add_constant(cov)
model = sm.OLS(expression, cov)
return(model.fit().pvalues['snps'])
class eQTL(object):
""" Python class for completing eQTLs. Does lazy loading of all large
files.
"""
def __init__(self, dosages_path, expression, vannotation):
self.dosage = dosages_path
self.expression = expression
self.vannotations = vannotations
def generate_mapping():
pass
"""
if mapping:
for i in ann_v:
rsID = i.split("\t")[3]
try:
roughly_first = mapping[rsID]
rsIDs.append(rsID)
pos.append(int(i.split("\t")[1]))
break
except KeyError:
pass
for i in ann_v:
i = i.split("\t")
try:
roughly_end = mapping[i[3]]
except KeyError:
pass
pos.append(int(i[1]))
rsIDs.append(i[3])
"""
def get_annotation(annotation, chrm):
ann_file = pysam.Tabixfile(annotation)
ann_v = ann_file.fetch(chrm)
rsIDs = []
pos = []
ref = []
alt = []
for i in ann_v:
i = i.split("\t")
rsIDs.append(i[3])
pos.append(int(i[1]))
ref.append(i[6])
alt.append(i[7])
annot = pd.DataFrame({'pos': pos, 'ref': ref, 'alt': alt}, index=pd.Index(rsIDs))
return(annot)
| 28.888889 | 85 | 0.566827 | 1,163 | 0.279567 | 0 | 0 | 0 | 0 | 0 | 0 | 1,227 | 0.294952 |
ff4eece822bfb55c7cca08af69a260d962fcf85b | 406 | py | Python | string/Python/0067-add-binary-1.py | ljyljy/LeetCode-Solution-in-Good-Style | 0998211d21796868061eb22e2cbb9bcd112cedce | [
"Apache-2.0"
] | 1 | 2021-01-10T17:03:21.000Z | 2021-01-10T17:03:21.000Z | string/Python/0067-add-binary-1.py | lemonnader/LeetCode-Solution-Well-Formed | baabdb1990fd49ab82a712e121f49c4f68b29459 | [
"Apache-2.0"
] | null | null | null | string/Python/0067-add-binary-1.py | lemonnader/LeetCode-Solution-Well-Formed | baabdb1990fd49ab82a712e121f49c4f68b29459 | [
"Apache-2.0"
] | 1 | 2021-07-25T07:53:14.000Z | 2021-07-25T07:53:14.000Z | class Solution:
def addBinary(self, a: str, b: str) -> str:
return bin(int(a, 2) + int(b, 2))[2:]
if __name__ == '__main__':
a = "11"
b = "1"
solution = Solution()
result = solution.addBinary(a, b)
print(result)
result1 = int(a, 2)
result2 = int(b, 2)
print(result1)
print(result2)
print(bin(result1 + result2))
print(bin(result1 + result2)[2:])
| 20.3 | 47 | 0.561576 | 109 | 0.268473 | 0 | 0 | 0 | 0 | 0 | 0 | 17 | 0.041872 |
ff51372a814f4b42e447cbc94944724fd9e86cc6 | 3,017 | py | Python | envergo/evaluations/migrations/0007_auto_20210816_1212.py | MTES-MCT/envergo | 8bb6e4ffa15a39edda51b39401db6cc12e73ad0a | [
"MIT"
] | null | null | null | envergo/evaluations/migrations/0007_auto_20210816_1212.py | MTES-MCT/envergo | 8bb6e4ffa15a39edda51b39401db6cc12e73ad0a | [
"MIT"
] | 6 | 2021-07-12T14:33:18.000Z | 2022-02-14T10:36:09.000Z | envergo/evaluations/migrations/0007_auto_20210816_1212.py | MTES-MCT/envergo | 8bb6e4ffa15a39edda51b39401db6cc12e73ad0a | [
"MIT"
] | null | null | null | # Generated by Django 3.1.12 on 2021-08-16 12:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('evaluations', '0006_auto_20210709_1235'),
]
operations = [
migrations.AddField(
model_name='evaluation',
name='commune',
field=models.CharField(default='', help_text='The name and postcode of the project commune', max_length=256, verbose_name='Commune'),
preserve_default=False,
),
migrations.AddField(
model_name='evaluation',
name='created_surface',
field=models.IntegerField(default=0, help_text='In square meters', verbose_name='Created surface'),
preserve_default=False,
),
migrations.AddField(
model_name='evaluation',
name='existing_surface',
field=models.IntegerField(blank=True, help_text='In square meters', null=True, verbose_name='Existing surface'),
),
migrations.AddField(
model_name='evaluation',
name='flood_zone_impact',
field=models.TextField(default='', verbose_name='Flood zone impact'),
preserve_default=False,
),
migrations.AddField(
model_name='evaluation',
name='flood_zone_probability',
field=models.IntegerField(choices=[(1, 'Unlikely'), (2, 'Possible'), (3, 'Likely'), (4, 'Very likely')], default=2, verbose_name='Flood zone probability'),
preserve_default=False,
),
migrations.AddField(
model_name='evaluation',
name='global_probability',
field=models.IntegerField(choices=[(1, 'Unlikely'), (2, 'Possible'), (3, 'Likely'), (4, 'Very likely')], default=2, verbose_name='Probability'),
preserve_default=False,
),
migrations.AddField(
model_name='evaluation',
name='rainwater_runoff_impact',
field=models.TextField(default='', verbose_name='Rainwater runoff impact'),
preserve_default=False,
),
migrations.AddField(
model_name='evaluation',
name='rainwater_runoff_probability',
field=models.IntegerField(choices=[(1, 'Unlikely'), (2, 'Possible'), (3, 'Likely'), (4, 'Very likely')], default=2, verbose_name='Rainwater runoff probability'),
preserve_default=False,
),
migrations.AddField(
model_name='evaluation',
name='wetland_impact',
field=models.TextField(default='', verbose_name='Wetland impact'),
preserve_default=False,
),
migrations.AddField(
model_name='evaluation',
name='wetland_probability',
field=models.IntegerField(choices=[(1, 'Unlikely'), (2, 'Possible'), (3, 'Likely'), (4, 'Very likely')], default=2, verbose_name='Wetland probability'),
preserve_default=False,
),
]
| 41.328767 | 173 | 0.598608 | 2,923 | 0.968843 | 0 | 0 | 0 | 0 | 0 | 0 | 851 | 0.282068 |
ff51b99a9a8c88b5a89d588144d9adb2147e11eb | 4,943 | py | Python | app/knowledgecard.py | thaithimyduyen/German-Study-TelegramBot | 8dbcfe1f28dad7a4b012d0d94a19cb17ae7baebd | [
"MIT"
] | 2 | 2020-05-15T17:55:01.000Z | 2020-05-22T15:31:40.000Z | app/knowledgecard.py | thaithimyduyen/German-Study-TelegramBot | 8dbcfe1f28dad7a4b012d0d94a19cb17ae7baebd | [
"MIT"
] | 1 | 2022-01-20T18:33:10.000Z | 2022-01-20T18:38:44.000Z | app/knowledgecard.py | thaithimyduyen/German-Study-TelegramBot | 8dbcfe1f28dad7a4b012d0d94a19cb17ae7baebd | [
"MIT"
] | null | null | null | import logging
import enum
import copy
import telegram.error
from telegram import (
InlineKeyboardButton,
InlineKeyboardMarkup,
ParseMode
)
from app.entities import KnowledgeStatus
from app.card import Card
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO
)
class KnowledgeCard(Card):
def __init__(self, bot, word, listener):
self._view = KnowledgeCardView(bot)
self._model = KnowledgeCardModel(
view=self._view,
word=word,
listener=listener,
)
self._controller = KnowledgeCardController(self._model)
self._is_deleted = False
def set_old(self):
self._model.is_old = True
@property
def is_old(self):
return self._model.is_old
def set_as_deleted(self, update, context):
self._model.set_as_deleted(update, context)
def is_deleted(self) -> bool:
return self._is_deleted
def get_word(self):
return copy.copy(self._model._word)
def start(self, update, context) -> str:
return self._model.start(update, context)
def button_clicked(self, update, context):
self._controller.button_clicked(update, context)
class Knowledge(enum.Enum):
true = "✅"
false = "❌"
class KnowledgeCardModel:
def __init__(self, view, word, listener):
self._view = view
self._word = word
self._listener = listener
self._message_id = None
self.is_old = False
def start(self, update, context) -> str:
self._message_id = self._view.send_card(
update=update,
word=self._word,
translation=None,
)
return self._message_id
def show_translation(self, update, context, knowledge):
knowledge_status = KnowledgeStatus.new_word_know
if knowledge == Knowledge.false:
knowledge_status = KnowledgeStatus.new_word_forgot
self._view.update_card(
update=update,
translation=self._word.get_translation() + " " + knowledge.value,
)
self._listener.on_correct_answer_clicked(
update=update,
context=context,
knowledge_status=knowledge_status,
)
def set_as_deleted(self, update, context):
self._view.update_card_as_deleted(
update=update,
context=context,
message_id=self._message_id,
)
self._is_deleted = True
class KnowledgeCardController:
def __init__(self, model):
self._model = model
def button_clicked(self, update, context):
query_data = update.callback_query.data
if query_data == "know":
self._model.show_translation(
update=update,
context=context,
knowledge=Knowledge.true,
)
elif query_data == "forgot":
self._model.show_translation(
update=update,
context=context,
knowledge=Knowledge.false,
)
class KnowledgeCardView:
def __init__(self, bot):
self._bot = bot
@staticmethod
def _get_card_markup(translation=None):
keyboard = [[
InlineKeyboardButton(
text="Know " + Knowledge.true.value,
callback_data="know"
),
InlineKeyboardButton(
text="Forgot " + Knowledge.false.value,
callback_data="forgot"
),
]]
if translation is not None:
keyboard.pop(0)
keyboard.append([
InlineKeyboardButton(
text=translation,
callback_data="translation")
])
return InlineKeyboardMarkup(keyboard)
def send_card(self, update, word, translation):
markup = KnowledgeCardView._get_card_markup(
translation=translation,
)
return self._bot.send_message(
chat_id=update.effective_message.chat_id,
text="*"+word.get_word()+"*",
reply_markup=markup,
parse_mode=ParseMode.MARKDOWN
).message_id
def update_card(self, update, translation):
reply_markup = KnowledgeCardView._get_card_markup(
translation=translation,
)
try:
return self._bot.edit_message_reply_markup(
chat_id=update.effective_message.chat_id,
message_id=update.effective_message.message_id,
reply_markup=reply_markup
)
except telegram.error.BadRequest:
return None
def update_card_as_deleted(self, update, context, message_id):
return self._bot.edit_message_reply_markup(
chat_id=update.effective_message.chat_id,
message_id=message_id,
reply_markup=None
)
| 27.926554 | 77 | 0.600647 | 4,596 | 0.929048 | 0 | 0 | 720 | 0.145543 | 0 | 0 | 130 | 0.026279 |
ff5309da667685d4448f0288ca00089d68c14df3 | 316 | py | Python | learn_pyblish/myplugins/myplugin2.py | kingmax/py | 4cbc20d21c249e22e8f6b68d3c761a66a981e38f | [
"MIT"
] | null | null | null | learn_pyblish/myplugins/myplugin2.py | kingmax/py | 4cbc20d21c249e22e8f6b68d3c761a66a981e38f | [
"MIT"
] | null | null | null | learn_pyblish/myplugins/myplugin2.py | kingmax/py | 4cbc20d21c249e22e8f6b68d3c761a66a981e38f | [
"MIT"
] | null | null | null | import pyblish.api
########################################################################
class MyPlugin2(pyblish.api.ContextPlugin):
""""""
#----------------------------------------------------------------------
def process(self, context):
print('hello from plugin2')
| 24.307692 | 75 | 0.300633 | 199 | 0.629747 | 0 | 0 | 0 | 0 | 0 | 0 | 169 | 0.53481 |
ff54be97770be18ceff808d68e4d32c92c411113 | 2,411 | py | Python | tests/saliency_tests/visualizer_tests/test_mol_visualizer.py | k-ishiguro/chainer-chemistry | aec33496def16e76bdfbefa508ba01ab9f79a592 | [
"MIT"
] | 1 | 2019-06-19T00:05:59.000Z | 2019-06-19T00:05:59.000Z | tests/saliency_tests/visualizer_tests/test_mol_visualizer.py | k-ishiguro/chainer-chemistry | aec33496def16e76bdfbefa508ba01ab9f79a592 | [
"MIT"
] | null | null | null | tests/saliency_tests/visualizer_tests/test_mol_visualizer.py | k-ishiguro/chainer-chemistry | aec33496def16e76bdfbefa508ba01ab9f79a592 | [
"MIT"
] | 1 | 2020-10-12T07:23:44.000Z | 2020-10-12T07:23:44.000Z | import os
import numpy
import pytest
from rdkit import Chem
from chainer_chemistry.saliency.visualizer.mol_visualizer import MolVisualier # NOQA
from chainer_chemistry.saliency.visualizer.mol_visualizer import SmilesVisualizer # NOQA
def test_mol_visualizer(tmpdir):
# Only test file is saved without error
smiles = 'OCO'
mol = Chem.MolFromSmiles(smiles)
saliency = numpy.array([0.5, 0.3, 0.2])
visualizer = MolVisualier()
# 1. test with setting save_filepath
save_filepath = os.path.join(str(tmpdir), 'tmp.svg')
visualizer.visualize(saliency, mol, save_filepath=save_filepath)
assert os.path.exists(save_filepath)
# 2. test with `save_filepath=None` runs without error
visualizer.visualize(
saliency, mol, save_filepath=None, visualize_ratio=0.5,)
def test_smiles_visualizer(tmpdir):
# Only test file is saved without error
smiles = 'OCO'
saliency = numpy.array([0.5, 0.3, 0.2])
visualizer = SmilesVisualizer()
# 1. test with setting save_filepath
save_filepath = os.path.join(str(tmpdir), 'tmp.svg')
visualizer.visualize(saliency, smiles, save_filepath=save_filepath,
add_Hs=False)
assert os.path.exists(save_filepath)
save_filepath = os.path.join(str(tmpdir), 'tmp.png')
visualizer.visualize(saliency, smiles, save_filepath=save_filepath,
add_Hs=False)
# TODO(nakago): support png save test.
# Do not test for now (cairosvg is necessary)
# assert os.path.exists(save_filepath)
# 2. test with `save_filepath=None` runs without error
visualizer.visualize(
saliency, smiles, save_filepath=None, visualize_ratio=0.5,
add_Hs=False, use_canonical_smiles=True)
def test_mol_visualizer_assert_raises(tmpdir):
visualizer = MolVisualier()
smiles = 'OCO'
mol = Chem.MolFromSmiles(smiles)
with pytest.raises(ValueError):
# --- Invalid saliency shape ---
saliency = numpy.array([[0.5, 0.3, 0.2], [0.5, 0.3, 0.2]])
visualizer.visualize(saliency, mol)
with pytest.raises(ValueError):
# --- Invalid sort key ---
saliency = numpy.array([0.5, 0.3, 0.2])
invalid_ext_filepath = os.path.join(str(tmpdir), 'tmp.hoge')
visualizer.visualize(saliency, mol, save_filepath=invalid_ext_filepath)
if __name__ == '__main__':
pytest.main([__file__, '-v', '-s'])
| 33.957746 | 89 | 0.686022 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 519 | 0.215263 |
ff55f7f2f426e01eb001fb5f6005e1a8245f5527 | 716 | py | Python | src/driver.py | 01mu/gamefaqs-scraper | 1c3ddd7b234937c1f2a5ab0ee2a9cedc33ad3d74 | [
"MIT"
] | 1 | 2022-01-08T18:13:24.000Z | 2022-01-08T18:13:24.000Z | src/driver.py | 01mu/gamefaqs-scraper | 1c3ddd7b234937c1f2a5ab0ee2a9cedc33ad3d74 | [
"MIT"
] | null | null | null | src/driver.py | 01mu/gamefaqs-scraper | 1c3ddd7b234937c1f2a5ab0ee2a9cedc33ad3d74 | [
"MIT"
] | null | null | null | #
# gamefaqs-scraper
# github.com/01mu
#
from gamefaqs_scraper import GFSBoard
from gamefaqs_scraper import GFSThread
board = GFSBoard()
board.get_site('234547-super-smash-bros-ultimate', 0)
threads = board.find()
print("Pages: " + str(board.max_page) + "\n")
for i in range(len(threads)):
print(threads[i].title + "\n" + threads[i].author + "\n" + threads[i].last
+ "\n" + threads[i].replies + "\n" + threads[i].link + "\n")
'''
thread = GFSThread()
thread.get_site('234547-super-smash-bros-ultimate/77126753', 0)
posts = thread.find()
print("Pages: " + str(thread.max_page) + "\n")
for i in range(len(posts)):
print(posts[i].author + "\n" + posts[i].date + "\n" + posts[i].body + "\n")
'''
| 23.866667 | 79 | 0.642458 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 376 | 0.52514 |
ff56d4da695b936f27e4f59c482e9fc6e6d38fb1 | 1,202 | py | Python | ServerPackage/Game.py | PaulieC/rls | fbff3a303a981a65069c9dc679c15d61056b88f1 | [
"Apache-2.0"
] | null | null | null | ServerPackage/Game.py | PaulieC/rls | fbff3a303a981a65069c9dc679c15d61056b88f1 | [
"Apache-2.0"
] | null | null | null | ServerPackage/Game.py | PaulieC/rls | fbff3a303a981a65069c9dc679c15d61056b88f1 | [
"Apache-2.0"
] | null | null | null | __author__ = "Paul Council, Joseph Gonzoph, Anand Patel"
__version__ = "sprint1"
__credits__ = ["Matt Martorana, Justin Read"]
# imports
from ServerPackage import Observer
class Game(Observer.Observer):
name = None
@staticmethod
def num_players_per_game():
"""
Number of players in a game. Default setting is two-player games
:return: the number of players this game will support
:rtype: int
"""
return 2
def get_result(self, moves):
"""
Computes the result for the given moves.
child of this class will have to figure out how win/loss/tie
is determined moves
Don't forget an elimination process if move is illegal
:param moves: A list containing the moves made by the players
:type moves: list
:return: a list containing the result for the players
"""
pass
def is_legal(self, move):
"""
Checks if a given move is legal
:param move: given move
:type move: int
:return: True if the move is legal, false otherwise
:rtype: bool
"""
pass
def get_name(self):
return self.name
| 26.711111 | 72 | 0.616473 | 1,026 | 0.853577 | 0 | 0 | 241 | 0.200499 | 0 | 0 | 834 | 0.693844 |
ff586db231494de039c35a8707572150023a7b03 | 971 | py | Python | crawlers/news/items.py | Ceruleanacg/rena | eb651b7e4d8d8db0ca87e1415a5c9190abb346d2 | [
"MIT"
] | 7 | 2018-04-01T16:07:31.000Z | 2018-07-01T10:33:26.000Z | crawlers/news/items.py | Ceruleanacg/rena | eb651b7e4d8d8db0ca87e1415a5c9190abb346d2 | [
"MIT"
] | null | null | null | crawlers/news/items.py | Ceruleanacg/rena | eb651b7e4d8d8db0ca87e1415a5c9190abb346d2 | [
"MIT"
] | 1 | 2016-02-04T20:33:06.000Z | 2016-02-04T20:33:06.000Z | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
from scrapy import Item, Field
from scrapy.loader.processors import TakeFirst, Join
class NewsItem(Item):
url = Field(
output_processor=TakeFirst()
)
type = Field(
output_processor=TakeFirst()
)
source = Field(
output_processor=TakeFirst()
)
title = Field(
output_processor=TakeFirst()
)
article = Field(
output_processor=TakeFirst()
)
create_date = Field(
output_processor=TakeFirst()
)
image_urls = Field()
class SinaCaptchaItem(Item):
image_urls = Field()
images = Field()
class SinaStarItem(Item):
name = Field(
output_processor=TakeFirst()
)
avatar_url = Field(
output_processor=TakeFirst()
)
weibo_url = Field(
output_processor=TakeFirst()
) | 16.741379 | 52 | 0.625129 | 727 | 0.748713 | 0 | 0 | 0 | 0 | 0 | 0 | 145 | 0.149331 |
ff58f5c94b3ba90cd9f6fdc7fcbabd019709a353 | 611 | py | Python | intermediate/5/5-intermediate.py | KindaExists/daily-programmer | e52519708cac3dbd2cabfbf63024646108b45372 | [
"MIT"
] | null | null | null | intermediate/5/5-intermediate.py | KindaExists/daily-programmer | e52519708cac3dbd2cabfbf63024646108b45372 | [
"MIT"
] | null | null | null | intermediate/5/5-intermediate.py | KindaExists/daily-programmer | e52519708cac3dbd2cabfbf63024646108b45372 | [
"MIT"
] | null | null | null |
"""
[intermediate] challenge #5
Source / Reddit Post - https://www.reddit.com/r/dailyprogrammer/comments/pnhtj/2132012_challenge_5_intermediate/
"""
words = []
sorted_words = []
with open('intermediate/5/words.txt', 'r') as fp:
words = fp.read().split()
sorted_words = [''.join(sorted(word)) for word in words]
fp.close()
word_dict = {}
for ind, word in enumerate(sorted_words):
if word in word_dict:
word_dict[word].append(words[ind])
else:
word_dict[word] = [words[ind]]
print('\n'.join([', '.join(word_set) for word_set in word_dict.values() if len(word_set) > 1]))
| 25.458333 | 112 | 0.662848 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 188 | 0.307692 |
ff5986eb7d80707f56a8beab2a20415bc7fe42b3 | 23,051 | py | Python | Kevin/4.py | kevin-ci/advent-of-code-2020 | 697dd3d2639e30e59b228fa417ca5a76cd94f9e5 | [
"MIT"
] | null | null | null | Kevin/4.py | kevin-ci/advent-of-code-2020 | 697dd3d2639e30e59b228fa417ca5a76cd94f9e5 | [
"MIT"
] | null | null | null | Kevin/4.py | kevin-ci/advent-of-code-2020 | 697dd3d2639e30e59b228fa417ca5a76cd94f9e5 | [
"MIT"
] | null | null | null | import re
input = """iyr:2010 ecl:gry hgt:181cm
pid:591597745 byr:1920 hcl:#6b5442 eyr:2029 cid:123
cid:223 byr:1927
hgt:177cm hcl:#602927 iyr:2016 pid:404183620
ecl:amb
eyr:2020
byr:1998
ecl:hzl
cid:178 hcl:#a97842 iyr:2014 hgt:166cm pid:594143498 eyr:2030
ecl:hzl
pid:795349208 iyr:2018
eyr:2024 hcl:#de745c hgt:157cm
hgt:159cm pid:364060467 eyr:2025 byr:1978 iyr:2018 cid:117
ecl:hzl
hcl:#18171d
hcl:#cfa07d
ecl:amb iyr:2012
hgt:182cm cid:338
eyr:2020
pid:374679609 byr:1925
eyr:2021 byr:1981
hcl:#623a2f cid:195 iyr:2010
pid:579769934 ecl:grn hgt:192cm
byr:1970
ecl:oth
eyr:2025
pid:409994798 iyr:2018 hgt:189cm
hgt:153cm pid:817651329 iyr:2019
eyr:2029
hcl:#623a2f byr:1920
ecl:gry
iyr:2011 ecl:amb hcl:#a97842 byr:1965 pid:648375525 eyr:2028 hgt:177cm cid:287
iyr:2012 pid:369979235 hcl:#c0946f
ecl:amb hgt:178cm
byr:1927 ecl:brn hgt:178cm eyr:2026 hcl:#efcc98
iyr:2011 pid:770851101
eyr:2028
ecl:oth cid:298
byr:1943
hgt:168cm iyr:2018 hcl:#ceb3a1 pid:116783406
eyr:2027 hgt:175cm hcl:#733820
ecl:gry cid:349 iyr:2017 byr:1960
pid:257797292
cid:66 ecl:amb
eyr:2030
iyr:2026 byr:2024
hcl:a22966 hgt:179cm pid:155cm
eyr:2023 hcl:#c0946f pid:081232570 ecl:hzl
iyr:2010 hgt:158cm byr:1969
byr:1958
ecl:grn hcl:#ceb3a1
hgt:173cm
pid:600039004
cid:107 iyr:2012 eyr:2027
ecl:amb pid:021066381
hcl:#ceb3a1 byr:1982 iyr:2017
hgt:167cm eyr:2025 cid:61
hcl:#341e13
cid:268
pid:358390884 hgt:188cm byr:1961 iyr:2014 eyr:2027 ecl:blu
ecl:brn eyr:2020
pid:607203641
hcl:#fffffd iyr:2011
byr:1962
hgt:156cm
iyr:2018
hcl:#b6652a
byr:1942 ecl:blu eyr:2029 hgt:154cm pid:649263319
ecl:oth hgt:73in iyr:2012 hcl:#888785 eyr:2020
pid:147939289
byr:1961
ecl:oth iyr:2015
hgt:189cm hcl:#341e13 pid:686943691 eyr:2023 byr:1987
pid:568844323
eyr:2023 byr:1921 hgt:167cm cid:154 hcl:#b6652a
ecl:gry iyr:2020
eyr:2023 byr:1994
iyr:1937 hgt:177cm hcl:#c0946f pid:686240814 cid:231 ecl:#a8ba32
hcl:#b6652a
byr:1946 pid:543383899 iyr:2013 hgt:153cm ecl:hzl cid:238 eyr:2023
eyr:2028 ecl:blu
hgt:154cm cid:252
pid:196374590
byr:1987 iyr:2011
hcl:#7d3b0c
iyr:2013
ecl:amb cid:187
hgt:187cm pid:593027548 byr:1963
eyr:2024 hcl:#fffffd
pid:588211492 hgt:156cm
iyr:2021 eyr:2021 ecl:gry hcl:z byr:1928
ecl:amb hcl:#888785 hgt:180cm eyr:2022 byr:1923 pid:490291639 cid:173 iyr:2015
iyr:2014 cid:211 pid:404157420 hcl:#602927
ecl:oth byr:1946 eyr:2030 hgt:175cm
hcl:z byr:2026
pid:61805448
hgt:125 iyr:2025
eyr:2028
hgt:156cm
hcl:#341e13 cid:103 ecl:amb iyr:2017 byr:1937 pid:320691739
hgt:185cm
pid:440489464 byr:1929 ecl:amb iyr:2011 eyr:2021 cid:327 hcl:#341e13
byr:1988 ecl:grn
pid:062728732 iyr:2013
hgt:181cm
hcl:#18171d
eyr:2026
pid:000647617
eyr:2029 byr:1937
ecl:gry hcl:#e8eff3 hgt:164cm cid:151
iyr:2016
hgt:179cm
byr:1949
eyr:2029 pid:459190453
ecl:grn iyr:2020 hcl:#c0946f
hgt:160cm pid:476613532 cid:190 iyr:2016 hcl:#4657e5
byr:1929
eyr:2028
ecl:grn
eyr:2027 byr:1982
hcl:#18171d
pid:630408328
cid:65 iyr:2020
hgt:161cm
pid:752776254
hcl:#888785
hgt:189cm
eyr:2027 iyr:2020 ecl:hzl
cid:194 byr:1934
iyr:2015 hgt:167cm byr:1977
eyr:2021 hcl:#14564f pid:504471386 ecl:oth
hgt:84 pid:168cm
hcl:8532fb eyr:2023
iyr:2012 ecl:xry byr:2008 cid:288
cid:323 eyr:2024
iyr:2019
pid:495737304 byr:1966 hcl:#7d3b0c ecl:hzl
hgt:73in
iyr:2020 byr:1953 ecl:hzl hcl:#efcc98 hgt:174cm eyr:2026 pid:546906638
pid:839249028
hcl:z byr:2024
hgt:145 eyr:2034 iyr:2021 ecl:#891c47
eyr:2036
ecl:#89d2ae
cid:183 byr:2014
hcl:b3af0f
pid:12086913 iyr:1981
hgt:61cm
ecl:brn eyr:2030 pid:083487445 byr:1929 hcl:z iyr:2021
hgt:182 cid:318
eyr:2020
pid:188609216 hcl:#341e13
iyr:2012 hgt:179cm
eyr:2029
hcl:#888785 pid:704026565 hgt:173cm iyr:2020 ecl:blu byr:1950 cid:237
ecl:grn
eyr:2030
byr:1961 pid:695808266
iyr:2012 cid:56
hgt:155cm
iyr:2011 ecl:amb
byr:1986 pid:243061330 hgt:163cm eyr:2021
eyr:2030 hcl:#623a2f hgt:170cm ecl:hzl
pid:694575319 iyr:2011
byr:1939
iyr:2014 pid:184152121
hcl:#c0946f hgt:163cm
eyr:2028 byr:1992 cid:114
ecl:hzl
hgt:75in cid:233
hcl:#866857 pid:269157261 iyr:2020
byr:1973 eyr:2029
hgt:174cm
hcl:#f86751 iyr:2016
pid:904779190
ecl:brn eyr:2024 byr:1950
cid:123 iyr:2019
eyr:2030 pid:402585706
ecl:brn byr:1995 hcl:#4ff7fa
hgt:65in
ecl:grn eyr:2029
pid:083364259 iyr:2013 cid:50 byr:1938 hgt:187cm
hcl:#a97842
hcl:#6b5442 cid:101 iyr:2011
ecl:amb eyr:2029 byr:1963 pid:664573740
eyr:2025 hcl:#602927
hgt:188cm
iyr:2019
pid:521514539 byr:1940 ecl:gry
hcl:dc0449 eyr:1981 pid:188cm
cid:151 iyr:1979 hgt:61cm ecl:dne
byr:2028
iyr:2017 byr:1924
hgt:163cm eyr:2024 hcl:#ceb3a1 pid:424127124
ecl:amb
eyr:2039 pid:7837217107 hcl:z byr:2005
iyr:1989 ecl:#d95f4d hgt:190in
ecl:#329eb1 cid:178 hgt:192
eyr:2020 iyr:2012
hcl:#602927
byr:2028 pid:7175349420
ecl:gry byr:1931
hgt:162cm iyr:2014
eyr:2030 cid:50
hcl:#cfa07d pid:653585396
eyr:2025 hgt:177cm
ecl:gry hcl:#efcc98
iyr:2015
byr:1942
pid:388475446
hcl:#efcc98 ecl:grn
hgt:185cm
byr:1921 pid:253592171
eyr:2031 cid:220 iyr:2024
byr:1950
hgt:158cm ecl:gry iyr:2015 hcl:#18171d
eyr:2023
pid:151cm
byr:1957
hcl:z
eyr:2026
ecl:grn
iyr:1971 hgt:192in pid:5479810865
hgt:161cm pid:473851111 iyr:2018
ecl:brn byr:1982
eyr:2029
pid:136216608 byr:1958
cid:226 eyr:2023 hcl:#866857 iyr:2017 ecl:hzl hgt:159cm
byr:1993 hcl:#866857 hgt:169cm pid:488392920
cid:109 iyr:2017 ecl:oth eyr:2029
cid:248 ecl:amb eyr:2025 iyr:2017 byr:1951 hcl:#ceb3a1 pid:731763175 hgt:162cm
hcl:#835e79
eyr:2021
ecl:oth pid:617055193 byr:1997 iyr:2010
hgt:173cm
eyr:2024 pid:257895944
hcl:#ceb3a1
hgt:165cm
ecl:oth iyr:2020
byr:1958
pid:438580092
ecl:grt byr:2025
hcl:z iyr:2000 eyr:1952
cid:271 hgt:170in
iyr:2010 hcl:#6b5442 hgt:156cm
eyr:2026 ecl:grn pid:409793041 byr:1941
pid:076486440
hgt:177cm hcl:#888785 ecl:blu iyr:2017 eyr:2029
eyr:2028 ecl:amb hgt:186cm hcl:#1d5836 pid:563307670 iyr:2019 byr:1950
byr:1939 ecl:hzl hgt:193cm pid:329759796
hcl:#cfa07d eyr:2025 iyr:2011 cid:73
byr:1995
hgt:188cm eyr:2028
ecl:blu
iyr:2016 hcl:#888785 pid:459613739 cid:115
hcl:#623a2f
eyr:2021 cid:197 hgt:187cm ecl:oth
byr:1969
iyr:2010 pid:385660251
hgt:192cm cid:143 byr:1995 hcl:#fffffd
iyr:2017 ecl:oth
eyr:2020 pid:087661720
ecl:oth
byr:1994 hgt:183cm
eyr:2020 iyr:2020 pid:448389966 cid:92 hcl:#866857
pid:088166852 hgt:155cm cid:307 byr:1940
hcl:#7d3b0c
ecl:#af542f eyr:2023 iyr:2014
byr:2026 eyr:2039 hcl:5449b3
ecl:hzl hgt:176in
iyr:1962 pid:177cm
iyr:2020 ecl:amb hgt:164cm hcl:#c0946f
pid:931543453 eyr:2024 byr:2001
iyr:2010 eyr:2023 hgt:188cm
hcl:#866857 ecl:hzl pid:866631112 byr:1997
byr:1958 hgt:184cm
cid:117 hcl:#7d3b0c iyr:2019 pid:615734013 eyr:2028 ecl:gry
hgt:86 iyr:1935 ecl:grt pid:#af8e67 eyr:2031
byr:2018 hcl:6a2940
hgt:73in eyr:2022 pid:580461358 byr:1962
cid:129 iyr:2015 hcl:#7d3b0c
iyr:2019 hcl:#b6652a hgt:172cm ecl:blu pid:077121198 eyr:2021
byr:1995
hcl:#ceb3a1 cid:253
iyr:2015 hgt:177cm byr:1973
ecl:hzl pid:311289324 eyr:2025
iyr:2017 hcl:#efcc98
cid:57 byr:1940 ecl:blu
eyr:2025 hgt:157cm pid:827480048
eyr:2028 hgt:189cm
iyr:2016 byr:1978 ecl:hzl pid:127497651 cid:87
hcl:#623a2f
hcl:#341e13 byr:2015
ecl:brn hgt:187in
pid:82075551
eyr:1936
cid:200
iyr:1939
ecl:grn byr:1962
iyr:2011 hgt:169cm
pid:661559147
hcl:#623a2f eyr:2023
ecl:gry
hcl:#efcc98 eyr:2009 byr:2028
hgt:170in
cid:129 pid:161cm iyr:2018
pid:098090405 hcl:#623a2f byr:1943 ecl:hzl
hgt:152cm iyr:2013 eyr:2029
pid:495271053 iyr:2011 ecl:gry hcl:#623a2f cid:285
byr:1925 eyr:2024 hgt:187cm
cid:306
hgt:73in
iyr:2010 hcl:#448fd7
byr:1946
ecl:grn pid:137146932 eyr:2021
eyr:2020 hgt:159cm cid:90 iyr:2010 ecl:brn hcl:#341e13 byr:1955
hcl:#18171d iyr:2017 ecl:amb
pid:168517472
eyr:2021 hgt:181cm byr:1942
cid:325 eyr:2022 pid:947158470 byr:1994 iyr:2019 ecl:grn hgt:172cm hcl:#ec63ce
iyr:2011
pid:243339529
ecl:amb
hgt:169cm
byr:1967
eyr:2025 hcl:#b6652a
pid:664966826 eyr:2036 iyr:2015 byr:1972 hgt:68in
hcl:z
ecl:#038105
eyr:2021 pid:236054221
hgt:179cm
hcl:#b6652a iyr:2020 ecl:blu
ecl:grn
iyr:2010
pid:870519416 byr:1945 hcl:#a97842
hgt:176cm eyr:2030
hcl:#3318db eyr:2022
byr:1966
ecl:grn iyr:2013
cid:349
hgt:168cm pid:827688488
pid:124116963
hcl:#866857 eyr:2026
iyr:2013 ecl:grn byr:1983 hgt:183cm
iyr:2017 byr:1993
hcl:#18171d ecl:utc hgt:68in cid:168 eyr:2030 pid:#2fd9f2
ecl:blu cid:134 eyr:2025 pid:588957573
iyr:2017
hgt:151cm byr:1942 hcl:#4280c1
hcl:#51b593
iyr:2013
ecl:amb pid:668244584
cid:282
byr:1936
eyr:1985 hgt:161cm
pid:494051052
hgt:185cm byr:1996 eyr:2028 iyr:2018
ecl:amb
hcl:#efcc98
ecl:brn
eyr:2025
iyr:2011
hgt:163cm hcl:#a97842
byr:1989 pid:557549000
pid:828235468 cid:55
iyr:2010 byr:1926 eyr:2029 hgt:153cm hcl:#cfa07d
ecl:blu
hgt:158cm iyr:2015 pid:957913612 ecl:grn eyr:2020 byr:1984 cid:76 hcl:#6b5442
ecl:amb eyr:2020 pid:596116320
byr:1936
hcl:#cfa07d
hgt:165cm cid:86 iyr:2014
iyr:2012
cid:278 hcl:#602927
eyr:2020 ecl:hzl
hgt:176cm byr:1987 pid:594817909
iyr:2011 byr:1929 pid:073211525 eyr:2022
hgt:188cm
ecl:blu
hcl:#733820
hcl:#602927 hgt:187cm
pid:706155322 cid:203
ecl:brn byr:1952 iyr:2017 eyr:2020
hcl:bcb5f7
byr:2002 eyr:2029 pid:850069752 iyr:2019 ecl:hzl
hgt:167cm
hcl:#b6652a hgt:72in iyr:2013
ecl:grn eyr:2024 byr:1920 cid:114
pid:983486664
byr:1931 iyr:2020 pid:182737852 hgt:162cm
ecl:grn hcl:#888785 eyr:2028
eyr:2035
byr:1962 iyr:2012 cid:120
ecl:xry
hgt:61cm hcl:ce89a8 pid:335540582
pid:#05153d iyr:1990
eyr:1927 hgt:71cm
byr:2019 cid:346 ecl:#e38688
hcl:c6abd9
ecl:#cd58d8 pid:166cm iyr:2012
hcl:0d1b02 hgt:68
eyr:1958
pid:976419172 byr:1922 cid:345 hcl:#6b5442 iyr:2010 eyr:2026
ecl:grn hgt:155cm
ecl:gry hcl:#1bbadc hgt:168cm
eyr:2028
byr:1984 cid:179 iyr:2013 pid:706186218
ecl:blu hgt:188cm
pid:764775319 byr:1936 hcl:#7d3b0c iyr:2020
hcl:#623a2f
iyr:2012
pid:382832140 ecl:gry
eyr:2026
cid:350
hgt:165cm byr:1968
hcl:0b87a1 byr:2020 pid:4365879329
cid:110 ecl:grn
eyr:2032 hgt:155cm
iyr:2018
hgt:193cm eyr:2029 hcl:#733820 pid:081071142 byr:1929 ecl:oth
ecl:brn
eyr:2023 pid:876924536 cid:165
hcl:#efcc98 hgt:151cm byr:1972
iyr:2020
hgt:186cm eyr:2022
ecl:grn
byr:1972 pid:997639611 hcl:#ceb3a1 iyr:2013
byr:1926
pid:808460262
iyr:2012 eyr:2031 hcl:#a97842 ecl:amb
hgt:190cm
hgt:163cm
hcl:#ceb3a1 eyr:2028
ecl:grn
byr:1944 pid:381144425 iyr:2012
hcl:#95a232 pid:015229624 byr:1947 iyr:2013 hgt:66cm ecl:gry eyr:2027
hcl:z byr:1965 iyr:2013 hgt:157cm ecl:#8b12fb cid:246 pid:283039791 eyr:2023
ecl:gry byr:1950
hcl:#623a2f cid:276 iyr:2013 eyr:2030 pid:798610943 hgt:189in
eyr:2030 cid:52 hcl:#fffffd pid:041625574 ecl:amb iyr:2016 byr:1944
hgt:191cm
byr:1995
iyr:2015 cid:221 pid:279080024
eyr:2022
hgt:181cm ecl:brn hcl:#888785
hcl:z
ecl:blu
iyr:1970
eyr:2022
hgt:193cm pid:#540e31 cid:95 byr:1952
hcl:z eyr:2024 ecl:hzl
byr:2028
cid:323 pid:1949331457
hgt:69
eyr:2030 hcl:#866857
cid:173 iyr:2017
hgt:190cm byr:1941
ecl:blu
pid:269015932
hcl:#b6652a
iyr:2018
eyr:2022 ecl:brn hgt:185cm pid:456195468
hcl:#6b5442 hgt:188cm
iyr:2019 byr:1966 cid:298
pid:050653473
ecl:gry eyr:2028
cid:208
ecl:amb eyr:2023 hgt:176cm byr:1971 hcl:#7d3b0c pid:650190272 iyr:2018
hgt:68in pid:615309584
iyr:2011 byr:1950
hcl:#efcc98 ecl:oth
eyr:2024
eyr:2022 iyr:2011 hcl:#623a2f ecl:amb byr:1955
hgt:190cm
pid:244918527
iyr:2013 hcl:#ceb3a1 eyr:2029 hgt:164cm
ecl:oth
byr:1928 pid:337615663
hcl:#ceb3a1 pid:#ae7eea byr:2027
cid:254
hgt:125
iyr:1940
ecl:zzz
pid:033663619 iyr:2012 byr:1989 eyr:2030 ecl:hzl
hcl:#b6652a hgt:154cm
hgt:175cm byr:1929 pid:100788192
ecl:#92b14c
iyr:1940 hcl:#ceb3a1 eyr:2033
eyr:2029
pid:357835141 ecl:oth iyr:2019 hcl:#866857 hgt:154cm byr:1954
pid:895992818 byr:1965 iyr:2017 hcl:#efcc98 ecl:amb hgt:153cm eyr:2025
byr:1928 ecl:amb hgt:168cm pid:346938111 eyr:2025 iyr:2014
hcl:#cfa07d
hcl:#b6652a pid:825661608 eyr:2020 iyr:2019 byr:1974
hgt:180cm ecl:amb
byr:1970 hgt:159cm hcl:#733820 pid:101838832 iyr:2015 eyr:2027 ecl:blu
byr:1941 ecl:amb
eyr:2024 pid:015890498
hgt:175cm
iyr:2018 hcl:#cfa07d
hgt:67in
pid:404983369 eyr:2023 iyr:2018 byr:1974 hcl:#602927
ecl:blu
byr:1957
hcl:#fcc940 pid:615831236
iyr:2018 eyr:2020 ecl:brn hgt:181cm cid:218
hcl:#fffffd ecl:grn pid:271614109
eyr:2028 hgt:184cm byr:1974 iyr:2015
ecl:#e45ee0 pid:151cm cid:127 iyr:2014 byr:2022 hcl:973bc1 eyr:2033 hgt:181in
hcl:#6b5442 pid:502739402 eyr:2020 byr:1926 ecl:brn
iyr:2010
ecl:xry hgt:169cm byr:2023
iyr:1973 pid:4137668
eyr:2037 hcl:z
ecl:#3a8c46 hcl:43730a pid:57210146 eyr:2031 cid:117 iyr:2013 byr:2010
hcl:#341e13 cid:237 hgt:150cm iyr:2016 byr:1967 ecl:blu
pid:674080319 eyr:2024
iyr:2011 hcl:#866857 pid:111247018
byr:1920 hgt:192in ecl:#8bf268 eyr:2021
iyr:2022 hcl:z ecl:gry
hgt:159cm
pid:#88e8df
byr:2026 eyr:2032 cid:221
hgt:156cm eyr:2026
ecl:blu
hcl:#192dea cid:280 pid:788808021 byr:1980
iyr:2013
hgt:156in
byr:2024 hcl:4e4dd6
eyr:2030
iyr:2028 pid:35683378
ecl:#3a9fba
pid:081236370 cid:150 hcl:d15b43 byr:2029 hgt:118 iyr:2026 eyr:2038
ecl:grt
eyr:2034 pid:186cm
ecl:utc cid:300 iyr:2009 byr:2018 hcl:163913 hgt:74cm
ecl:hzl
pid:249858519 byr:1936 hgt:182cm
cid:343 iyr:2013 eyr:2030 hcl:#7d3b0c
cid:168
ecl:hzl
hgt:174cm iyr:2020
pid:446135799 hcl:#888785
eyr:2024 byr:1998
pid:545342162
hcl:#5cd3bd cid:126
eyr:2024
iyr:2012 ecl:grn
pid:104835585
byr:1989 hcl:#733820 ecl:oth eyr:2024 iyr:2017
hgt:180cm
hgt:184cm byr:2001 pid:199216567 ecl:gry
eyr:2022
cid:185 hcl:#7d3b0c
iyr:2019
byr:1996 eyr:2022 pid:503963080 ecl:grn iyr:2010 hcl:#fffffd
eyr:2030 iyr:2017
pid:472300557 hcl:#a97842
ecl:grn hgt:190cm
byr:1994
ecl:#2a8a59
eyr:2027
iyr:2015 byr:2021 hgt:158cm pid:365979521 hcl:z cid:242
ecl:gry
iyr:2020 hcl:#866857
pid:363851353 cid:319 hgt:154cm eyr:2027
byr:1953
ecl:grn hgt:165cm eyr:2026
pid:443722683 hcl:#341e13
iyr:2018 byr:1923
byr:1920 ecl:blu
cid:193 hgt:153cm hcl:#341e13 iyr:2010 pid:934896568
eyr:2021
eyr:2025
pid:524699651 cid:92
hcl:#602927 byr:1999
iyr:2011 ecl:brn hgt:164cm
eyr:2030 pid:739947771 iyr:2018
byr:1990
hgt:185cm hcl:#602927 ecl:gry
byr:1967 ecl:amb iyr:2020 hcl:#341e13
hgt:165cm
pid:681478012 eyr:2028
pid:807715479 ecl:blu byr:1955 eyr:1972 iyr:2018 hcl:#a97842 hgt:151
pid:635008585 cid:97
hgt:186cm hcl:#b6652a iyr:2015 eyr:2020 ecl:gry byr:1959
iyr:2017
cid:155 byr:1999 pid:550276277
hcl:#18171d
eyr:2020 hgt:164cm ecl:amb
byr:1977 hcl:#6b5442 ecl:grn iyr:2012 hgt:156cm
eyr:2028 pid:125635376
hgt:65in pid:042700658 byr:1962 iyr:2020
hcl:#888785 eyr:2021 ecl:gry
ecl:blu iyr:2017 hcl:#efcc98 pid:447451869 hgt:176cm
byr:1958
eyr:2024
ecl:amb hgt:155cm eyr:2022 hcl:#efcc98
pid:614496034 byr:1957
iyr:2016
cid:99
eyr:2020
ecl:amb iyr:2017
hgt:163cm pid:128207503 byr:1977
hcl:#866857
ecl:amb cid:342 eyr:2026 hgt:172cm pid:317675262
byr:1942 hcl:#a97842 iyr:2010
ecl:grn pid:077163993
hgt:187cm hcl:#341e13 iyr:2012 byr:1934 eyr:2024
pid:423538706 hgt:156cm
ecl:oth hcl:#341e13 iyr:2016 eyr:2028
iyr:2030 ecl:#faff64
byr:2012
pid:734434105 hgt:164in hcl:z eyr:2023
hgt:150in iyr:2016 pid:173cm hcl:db675a cid:219 eyr:2032 byr:1958
ecl:xry
pid:087437383
eyr:2025 hgt:178cm ecl:gry byr:1954
cid:227 hcl:#fffffd
iyr:2018
pid:152cm
iyr:2030 eyr:2030
byr:2010 hcl:z
hgt:155cm
ecl:amb
byr:1934
hcl:#341e13 hgt:167cm
pid:#7356dd ecl:amb
iyr:2011
eyr:2030
cid:123
eyr:2027
byr:2005
hgt:173cm cid:174 hcl:#ceb3a1 iyr:2018 ecl:amb pid:179cm
iyr:2019 ecl:grn eyr:2023
hgt:162cm
pid:649681621 hcl:#4ee6d2 byr:1955
hgt:165cm byr:1929 ecl:blu pid:839016251 iyr:2017 hcl:#c0946f
eyr:2020
eyr:2020
iyr:2017 hcl:#c7ed42 ecl:blu byr:1928
hgt:74in pid:112604496
eyr:2026 hgt:184 cid:113
byr:1933
pid:952646285
iyr:2019 hcl:#fffffd ecl:gry
pid:455008820 byr:1982 eyr:2030 ecl:gry iyr:2020 cid:103 hcl:#733820 hgt:184cm
hcl:#733820 iyr:2020 hgt:182cm ecl:grn
cid:226 pid:081011361 eyr:2022 byr:1995
iyr:1999
hcl:#18171d pid:9252198900
ecl:amb byr:1999 hgt:175cm eyr:2021
iyr:2020 hgt:165cm
ecl:blu
eyr:2023 pid:760213482
byr:1968
hcl:#c0946f
pid:242381670 ecl:amb
hgt:172cm byr:1980 eyr:2020 iyr:2014 hcl:#866857
byr:2021 pid:#a94a22 hcl:#cfa07d iyr:1969 eyr:2030 ecl:zzz
hgt:76cm
ecl:oth cid:168
byr:1954 pid:079481919 eyr:2025 hcl:#c0946f hgt:172cm
hgt:171cm
eyr:2030
byr:1969 cid:170
pid:164128658 ecl:amb
hcl:#c2265e iyr:2019
byr:1983
cid:163
eyr:2020 pid:232659795 iyr:2013 hcl:#888785 hgt:162cm
ecl:blu
ecl:gry hcl:#7d3b0c
pid:001171231 eyr:2020
byr:1935 hgt:160cm
iyr:2011
iyr:2012 hcl:#a97842
eyr:2029 pid:809880438 hgt:164cm cid:83 byr:1961 ecl:hzl
cid:288 eyr:2027
hgt:181cm byr:1955
iyr:2020
ecl:oth pid:754135833 hcl:#c0946f
iyr:2012 pid:053980893
cid:54 byr:1961 ecl:gry hcl:#602927 eyr:2020 hgt:167cm
iyr:2013
eyr:2025
hgt:176cm pid:169006156 cid:270 ecl:oth byr:2001
cid:244 pid:914067457
iyr:2017 byr:1926 hcl:#733820 ecl:brn hgt:187cm
eyr:2030
ecl:oth byr:1942
hgt:176cm iyr:2020 eyr:2027
hcl:#efcc98
pid:688816242
hgt:177cm hcl:#efcc98 eyr:2030 pid:888703414
iyr:2010 byr:1973 ecl:gry
cid:257 eyr:2030
ecl:brn
pid:359774824
byr:1988 hcl:#6b5442 iyr:2013 hgt:187cm
iyr:2011 hgt:173cm cid:290 byr:2000 ecl:gry
hcl:#7d3b0c
pid:743371399 eyr:2029
cid:162
eyr:1920 byr:2010 pid:#69d6ba hgt:74 hcl:z ecl:#d256f3 iyr:1933
pid:435518624 byr:1938 eyr:2027 iyr:2016 hcl:#18171d
hgt:161cm
ecl:gry
ecl:gry eyr:2027 hcl:#7d3b0c hgt:170cm
pid:928345976 iyr:2020
hcl:#5f4023 ecl:blu
pid:024527693
eyr:1932 iyr:2023 hgt:154cm byr:1948
cid:284 iyr:2011 byr:1920 eyr:2024 ecl:blu hgt:153cm
hcl:#602927 pid:005741906
iyr:2029 hgt:108 byr:2029 hcl:c8b25d
pid:522512400 eyr:2038 ecl:zzz cid:163
pid:371295649
eyr:2022 ecl:hzl
iyr:2019 hgt:153cm byr:1961
hcl:z
eyr:2027 iyr:2020 pid:619653661 byr:1968 hcl:#b6652a cid:62 ecl:hzl
hgt:186cm
iyr:1931
pid:565552342 ecl:#af97bb hcl:c92cd6 eyr:1931 byr:2025 hgt:184in
hgt:187cm
ecl:grn
byr:1954 cid:145
iyr:2016
hcl:#efcc98 eyr:2030 pid:202254357
cid:177
iyr:2013 byr:1926 hcl:#efcc98
pid:298693475 hgt:181cm eyr:2023 ecl:dne
byr:2014
cid:255
iyr:1951 hgt:72in
hcl:#efcc98 eyr:2039 pid:135688013
ecl:grn
byr:2019 eyr:1971 pid:#a95cb4
hcl:#ceb3a1 ecl:#6f919c
hgt:193cm iyr:2012
pid:497726268
ecl:grn
eyr:2025 hcl:#efcc98 iyr:2019 hgt:170cm byr:1970
byr:1939 hcl:#18171d cid:250
iyr:2011 ecl:blu pid:216607711
hgt:158cm eyr:2029
byr:1937
eyr:1931
hcl:#5ee898
pid:#876b1a hgt:190cm
cid:277 ecl:#5f0f80 iyr:2013
ecl:oth hgt:191cm eyr:2025 byr:1978 pid:271136754 hcl:#888785
iyr:2012
hcl:#6b5442
iyr:2015 byr:1958 pid:510020331 hgt:158cm eyr:2024 ecl:blu
byr:1998 cid:142 eyr:2026 iyr:2015 hcl:#733820
pid:671943334 hgt:186cm ecl:oth
eyr:2025 ecl:brn hcl:#7d3b0c pid:000803215
byr:1947
iyr:2017 hgt:168cm cid:230
pid:612432109 hgt:186cm byr:1963 ecl:hzl iyr:2019 eyr:2027
hcl:#efcc98
cid:148
hcl:#c0946f pid:846986027 eyr:2025 byr:1941
cid:154 hgt:158cm iyr:2012
ecl:brn
ecl:gry hgt:186cm
iyr:2015 hcl:#602927 byr:1923 eyr:2023
pid:48544569
pid:857428120 hgt:158cm hcl:#e4a267 iyr:2014 eyr:2020 byr:1975 ecl:blu
ecl:blu pid:559783197 byr:1935 cid:119 iyr:2017 hgt:157cm hcl:#6b5442 eyr:2020
ecl:oth pid:724332293 hcl:#602927
cid:77 iyr:2019
byr:2001 hgt:192cm eyr:2024
ecl:hzl eyr:2031
hcl:#efcc98 byr:2011 cid:280 iyr:2017
pid:377875085
hgt:172cm
byr:1947 hgt:174cm ecl:amb iyr:2018 cid:94 hcl:#a97842 eyr:2026 pid:286225332
hgt:85 ecl:xry eyr:2033 iyr:1952 pid:92902290
hcl:a6f86d
byr:2013
byr:1935 hcl:#c0946f pid:368741489 ecl:blu
eyr:2020 hgt:164cm
iyr:2018
cid:196
pid:718568707
ecl:oth byr:2003 hcl:#a97842 iyr:2010 hgt:168cm eyr:2025 cid:261
hcl:#6b5442
pid:675429853
hgt:62in ecl:grn iyr:2016
eyr:2027 byr:1932
byr:1978
pid:080846464 hcl:#ceb3a1 ecl:gry iyr:2015 hgt:190cm eyr:2029
pid:1756319674
iyr:2010 byr:1998 hcl:#866857 cid:259
eyr:2025 hgt:73in ecl:hzl
eyr:2035
hcl:z hgt:61cm
pid:3267812127
cid:230
byr:2029 iyr:2028 ecl:lzr
hgt:161cm ecl:hzl byr:1934 iyr:2011 eyr:2025 hcl:#cfa07d pid:354474868
pid:727482965
hcl:#623a2f iyr:2010 hgt:156cm eyr:2020 cid:68 ecl:grn byr:1950
pid:040800697 hgt:186cm
hcl:#341e13 iyr:2030 ecl:hzl
byr:1937 eyr:2020
iyr:2013 byr:1928 pid:752644096 eyr:2030 hgt:191cm ecl:hzl
cid:93 hcl:#a97842
pid:022267155 hcl:#cfa07d eyr:2026
ecl:hzl
hgt:187cm iyr:2014 cid:347
hgt:73in
eyr:2021 pid:054367702 ecl:amb hcl:#18171d byr:1965
iyr:2020 cid:267
eyr:2022
cid:140 pid:189859171 byr:1984 iyr:2020 ecl:brn hgt:166cm hcl:#623a2f
byr:1971 iyr:2015
hgt:168cm
eyr:2020 pid:650970816 hcl:#341e13
ecl:grn
cid:168
hcl:#c0946f byr:1948 hgt:189cm
pid:868785851
cid:194 ecl:amb eyr:2024 iyr:2011
eyr:2040
byr:2030 hcl:afde59
hgt:172cm pid:72468598 iyr:1990 cid:165 ecl:#896a8e
iyr:2009 hcl:#6b5442
eyr:2028
cid:53 ecl:hzl
hgt:165cm byr:1999 pid:844037301
cid:281 eyr:2022
iyr:2020 byr:1976 hgt:176cm hcl:#6b5442 ecl:amb pid:755280305
hgt:154cm iyr:2013
pid:059284139 byr:1992
cid:215 ecl:blu eyr:2025 hcl:#b6652a
ecl:grn
cid:308
hgt:187cm pid:009080324 eyr:2027
iyr:2012 byr:1955
pid:083241291 hcl:#7c1810 eyr:2030 iyr:2019 byr:1950 ecl:brn hgt:72in
cid:148 byr:1953 hcl:#623a2f
pid:076848285 hgt:175cm iyr:2017
eyr:2022
ecl:oth
iyr:2020
hgt:160cm
eyr:2028 cid:312 ecl:brn hcl:#888785 pid:681067688 byr:1986
iyr:1972 cid:170 eyr:2023
pid:21811501 ecl:#17c6e8
hgt:158in byr:2015 hcl:5b7956
pid:720571739 cid:304 byr:1951 hgt:191cm
eyr:2025 hcl:#341e13
iyr:2011
eyr:2020 ecl:blu hcl:#cfa07d pid:097863725
hgt:150cm
byr:1951
cid:143 iyr:2013
eyr:2027 iyr:2019 ecl:#a0eeca hcl:#c0946f pid:724783488 byr:1943 cid:282 hgt:124
byr:2012
iyr:2013 eyr:2036 hcl:z hgt:97
pid:#677847 ecl:dne
pid:341708492 hgt:190cm
byr:1988 hcl:#888785
ecl:hzl
iyr:2015 eyr:2029
iyr:2020 byr:1968
ecl:gry
eyr:2030 hcl:#1976b0
cid:127 pid:701862616
hgt:161cm"""
inputs = [i for i in input.split("\n\n")]
"""one"""
requirements = ['ecl', 'pid', 'eyr', 'hcl', 'byr', 'iyr', 'hgt']
total = len(inputs)
valid = [] #tacked on for part 2
for i in inputs:
for r in requirements:
if r not in i:
total -= 1
break
elif r == requirements[-1]: #tacked on for part 2
valid.append(i)
print(total)
"""two"""
eye_colours = ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']
valid_count = 0
def is_valid(entry):
e_dict = {}
for e in entry:
e = e.split(':')
e_dict[e[0]] = e[1]
if all(keys in e_dict for keys in requirements):
if int(e_dict['byr']) < 1920 or int(e_dict['byr']) > 2002:
return False
if int(e_dict['iyr']) < 2010 or int(e_dict['iyr']) > 2020:
return False
if int(e_dict['eyr']) < 2020 or int(e_dict['eyr']) > 2030:
return False
if 'cm' in e_dict["hgt"]:
hgt = int(e_dict["hgt"].replace('cm', ''))
if hgt < 150 or hgt > 193:
return False
elif 'in' in e_dict["hgt"]:
hgt = int(e_dict["hgt"].replace('in', ''))
if hgt < 59 or hgt > 76:
return False
else:
return False
if not re.match('#[0-9a-f]{6}$', e_dict["hcl"]):
return False
if e_dict["ecl"] not in eye_colours:
return False
if not re.match('[0-9]{9}$', e_dict["pid"]):
return False
return True
return False
for v in valid:
v = v.replace(' ', '\n')
v = v.split('\n')
if is_valid(v):
valid_count += 1
print(valid_count)
| 18.817143 | 80 | 0.753633 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 21,624 | 0.938094 |
ff59e3ae0336b603d2a05d8f243ce3a51a93e303 | 802 | py | Python | brdashsite/batchrecords/migrations/0003_auto_20190226_2017.py | JNDib/brdashproject | f9a7cb543024bfb6bb3556b9affdcddb46add2e1 | [
"MIT"
] | null | null | null | brdashsite/batchrecords/migrations/0003_auto_20190226_2017.py | JNDib/brdashproject | f9a7cb543024bfb6bb3556b9affdcddb46add2e1 | [
"MIT"
] | null | null | null | brdashsite/batchrecords/migrations/0003_auto_20190226_2017.py | JNDib/brdashproject | f9a7cb543024bfb6bb3556b9affdcddb46add2e1 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.5 on 2019-02-27 02:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('batchrecords', '0002_auto_20190226_1939'),
]
operations = [
migrations.RemoveField(
model_name='historicalbatchrecord',
name='created_by',
),
migrations.RemoveField(
model_name='historicalbatchrecord',
name='history_user',
),
migrations.RemoveField(
model_name='historicalbatchrecord',
name='product',
),
migrations.RemoveField(
model_name='historicalbatchrecord',
name='updated_by',
),
migrations.DeleteModel(
name='HistoricalBatchRecord',
),
]
| 24.30303 | 52 | 0.574813 | 717 | 0.894015 | 0 | 0 | 0 | 0 | 0 | 0 | 248 | 0.309227 |
ff5afc2fece69197315667ad84fe46cb018812aa | 3,325 | py | Python | precipy/identifiers.py | ananelson/precipy | dfa87de23229903e5d26aeab82de5fa56e2a1ac5 | [
"Apache-2.0"
] | null | null | null | precipy/identifiers.py | ananelson/precipy | dfa87de23229903e5d26aeab82de5fa56e2a1ac5 | [
"Apache-2.0"
] | 2 | 2020-06-26T13:06:31.000Z | 2022-03-31T04:27:58.000Z | precipy/identifiers.py | ananelson/precipy | dfa87de23229903e5d26aeab82de5fa56e2a1ac5 | [
"Apache-2.0"
] | 3 | 2020-05-27T07:14:16.000Z | 2020-06-03T04:15:07.000Z | from enum import Enum
import hashlib
import inspect
import logging
import os
logger = logging.getLogger(name="precipy.identifiers")
metadata_filename = "metadata.pkl"
class FileType(Enum):
ANALYTICS = "analytics"
METADATA = "metadata"
TEMPLATE = "template"
DOCUMENT = "document"
class GeneratedFile(object):
def __init__(self, canonical_filename, h, file_type=FileType.ANALYTICS, cache_filepath=None):
self.canonical_filename = canonical_filename
self.h = h
self.file_type = file_type
self.cache_filepath = cache_filepath
self.ext = os.path.splitext(canonical_filename)[1]
self.public_urls = []
def __repr__(self):
return "<GeneratedFile %s> " % self.canonical_filename
def hash_for_dict(info_dict):
logger.debug("")
logger.debug("computing hash for dict from:")
for k in sorted(info_dict):
v = info_dict[k]
if isinstance(v, dict):
logger.debug(" %s:" % k)
for kk, vv in v.items():
logger.debug(" %s: %s" % (kk, str(vv)))
else:
logger.debug(" %s: %s" % (k, v))
description = u";".join("%s: %s" % (k, info_dict)
for k in sorted(info_dict))
hashvalue = hashlib.sha256(description.encode('utf-8')).hexdigest()
logger.debug(hashvalue)
logger.debug("")
return hashvalue
def hash_for_fn(fn, kwargs, depends=None):
import precipy.batch as batch
import precipy.analytics_function as analytics_function
return hash_for_dict({
'canonical_function_name' : fn.__name__,
'fn_source' : hash_for_src(inspect.getsource(fn)),
'depends' : depends,
'arg_values' : kwargs,
'batch_source' : hash_for_src(inspect.getsource(batch)),
'analytics_function_source' : hash_for_src(inspect.getsource(analytics_function))
})
def hash_for_supplemental_file(canonical_filename, fn_h):
return hash_for_dict({
"fn_hash" : fn_h,
"filename" : canonical_filename
})
def hash_for_src(text):
m = hashlib.md5()
m.update(text.encode('utf-8'))
return m.hexdigest()
def hash_for_template_text(text):
m = hashlib.md5()
m.update(text.encode('utf-8'))
return m.hexdigest()
def hash_for_template_file(filepath):
m = hashlib.md5()
with open(filepath, 'rb') as f:
m.update(f.read())
return m.hexdigest()
def hash_for_document(template_hash, filter_name, filter_ext, filter_args):
x = { "template_hash" : template_hash,
"filter_name" : filter_name,
"filter_ext" : filter_ext}
if isinstance(filter_args, dict):
x.update(filter_args)
else:
x['filter_args'] = str(filter_args)
return hash_for_dict(x)
def hash_for_doc(canonical_filename, hash_args=None):
import precipy.batch as batch
analytics_frameinfo = inspect.stack()[2]
frame = analytics_frameinfo.frame
d = {
'canonical_filename' : canonical_filename,
'batch_source' : hash_for_src(inspect.getsource(batch)),
'frame_source' : hash_for_src(inspect.getsource(frame)),
'values' : inspect.getargvalues(frame).args
}
if hash_args is not None:
d.update(hash_args)
return hash_for_dict(d)
| 29.954955 | 97 | 0.642406 | 582 | 0.175038 | 0 | 0 | 0 | 0 | 0 | 0 | 424 | 0.127519 |
ff5c4a6294ba492144ae065014c970ada8bfb064 | 1,802 | py | Python | src/python/marbles/ie/test/passive_test.py | marbles-ai/ie | b3fef462d3418580c827c94bc206bd2991500c1f | [
"MIT"
] | null | null | null | src/python/marbles/ie/test/passive_test.py | marbles-ai/ie | b3fef462d3418580c827c94bc206bd2991500c1f | [
"MIT"
] | null | null | null | src/python/marbles/ie/test/passive_test.py | marbles-ai/ie | b3fef462d3418580c827c94bc206bd2991500c1f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import unittest
from marbles.ie import grpc
from marbles.ie.ccg import parse_ccg_derivation2 as parse_ccg_derivation
from marbles.ie.drt.drs import Rel
from marbles.ie.semantics.ccg import process_ccg_pt, pt_to_ccg_derivation
from marbles.ie.core.constants import *
from marbles.ie.utils.text import preprocess_sentence
from marbles.test import dprint
class PossessiveTest(unittest.TestCase):
def setUp(self):
self.svc = grpc.CcgParserService('easysrl')
self.stub = self.svc.open_client()
def tearDown(self):
self.svc.shutdown()
def test10_Brutus(self):
text = "Ceasar was stabbed by Brutus"
derivation = grpc.ccg_parse(self.stub, text, grpc.DEFAULT_SESSION)
pt = parse_ccg_derivation(derivation)
sentence = process_ccg_pt(pt, CO_NO_VERBNET|CO_NO_WIKI_SEARCH)
d = sentence.get_drs()
dprint(pt_to_ccg_derivation(pt))
dprint(d)
fnps = sentence.get_np_nominals()
nps = [sp.text for r, sp in fnps]
#self.assertTrue('Average maturity' in nps)
self.assertTrue('Brutus' in nps)
self.assertTrue('Ceasar' in nps)
fvps = sentence.get_vp_nominals()
vps = [sp.text for r, sp in fvps]
self.assertTrue('was stabbed' in vps)
E = filter(lambda x: x[1].text == "was stabbed", fvps)[0][0]
A1 = filter(lambda x: x[1].text == "Brutus", fnps)[0][0]
A0 = filter(lambda x: x[1].text == "Ceasar", fnps)[0][0]
self.assertTrue(d.find_condition(Rel('_ARG0', [E, A0])) is not None)
self.assertTrue(d.find_condition(Rel('_ARG1', [E, A1])) is not None)
if __name__ == '__main__':
unittest.main()
if __name__ == '__main__':
unittest.main()
| 35.333333 | 76 | 0.669256 | 1,268 | 0.703663 | 0 | 0 | 0 | 0 | 0 | 0 | 197 | 0.109323 |
ff5d7939ba961dd0adaed7bb04d2952b658dec7e | 33,088 | py | Python | lib/devops/supervisor.py | jpotter/angel | 66e98f73621a735c7e5a7f3a70926ad99dd39f3e | [
"Apache-2.0"
] | null | null | null | lib/devops/supervisor.py | jpotter/angel | 66e98f73621a735c7e5a7f3a70926ad99dd39f3e | [
"Apache-2.0"
] | null | null | null | lib/devops/supervisor.py | jpotter/angel | 66e98f73621a735c7e5a7f3a70926ad99dd39f3e | [
"Apache-2.0"
] | null | null | null | import fcntl
import os
import random
import select
import signal
import sys
import time
import traceback
from devops.file_and_dir_helpers import *
from angel.util.pidfile import *
from devops.unix_helpers import set_proc_title
from angel.stats.disk_stats import disk_stats_get_usage_for_path
from devops.process_helpers import *
import angel.settings
# This function is similar to Python's subprocess module, with some tweaks and customizations.
# Like subprocess, it forks a child process, waits for it to exit, and re-starts it on exit. It never returns.
# Our supervisor handles shutdown conditions, calling a stop_func when the supervisor process receives SIGTERM.
# We also handle log rotation, rolling over stdout/stderr when the supervisor process receives SIGWINCH.
# Most other signals are propogated to the child process -- that is, sending the supervisor process SIGHUP will
# be passed through to the child process.
def supervisor_manage_process(config, name, pid_filename_for_daemon, run_as_user, run_as_group, log_basepath,
restart_daemon_on_exit, process_oom_adjustment, init_func, exec_func, stop_func):
''' Creates and manages a child process, running given functions.
- If init_func is defined, it is called in the child process first. If it returns a non-zero status, then supervisor will exit.
- exec_func is then called. If restart_daemon_on_exit is True, exec_func is restarted whenever it exits.
- If stop_func is defined, it is called when this managing process receives a SIGTERM.
- pid_filename_for_daemon is used by this manager process to update status info and track that the manager should be running.
- process_oom_adjustment is a value, typically between -15 and 0, that indicates to the Linux kernel how "important" the process is.
This function never returns.
'''
# Create supervisor logger:
supervisor_logfile_path = launcher_get_logpath(config, log_basepath, 'supervisor')
if 0 != create_dirs_if_needed(os.path.dirname(supervisor_logfile_path), owner_user=run_as_user, owner_group=run_as_group):
print >>sys.stderr, "Supervisor error: unable to create log dirs."
os._exit(0) # Never return
try:
supervisor_logger = SupervisorLogger(open(supervisor_logfile_path, 'a', buffering=0))
except Exception as e:
print >>sys.stderr, "Supervisor error: unable to create supervisor log (%s: %s)." % (supervisor_logfile_path, e)
os._exit(0) # Never return
# Send SIGTERM to the supervisor daemon to tell it to quit the child process and exit.
# Send SIGWINCH to the supervisor daemon to tell it to rotate logs.
# Any other trappable_signal is sent to the child process to do any service-defined logic as necessary.
trappable_signals = (signal.SIGINT, signal.SIGWINCH, signal.SIGHUP, signal.SIGTERM, signal.SIGUSR1, signal.SIGUSR2, signal.SIGQUIT)
global supervisor_daemon_exit_requested
supervisor_daemon_exit_requested = False
global run_init_instead_of_exec
run_init_instead_of_exec = False
set_proc_title('supervisor[%s]: starting' % name)
# Always run supervisor with kernel out-of-memory flags set to hold off on killing us.
# This is reset back up to 0 in the child process (or whatever process_oom_adjustment is set to).
set_process_oom_factor(-15)
supervisor_pid = os.getpid()
child_pid = None
daemon_start_time = int(time.time())
last_start_time = None
start_count = 0
continous_restarts = 0
min_delay_between_continous_restarts = 5
max_delay_between_continous_restarts = 30
restart_delay_jitter = 60 # If we hit max_delay, we'll re-try at some interval between (max_delay - jitter) and (max_delay)
# Define a function that waits for a child pid to exit OR for us to receive a signal:
def _supervisor_daemon_waitpid(pid):
if pid is None or pid < 2:
supervisor_logger.warn("Supervisor[%s]: can't wait on invalid pid %s." % (name, pid))
return -1
try:
# To-do: periodically wake up and check that pid_filename_for_daemon contains our pid, or exit
(wait_pid, wait_exitcode) = os.waitpid(pid, 0)
return (wait_exitcode >> 8) % 256
except OSError:
return -2 # waitpid will throw an OSError when our supervisor recieves a kill signal (i.e. SIGTERM to tell us to exit); our code below will loop and re-call this.
return -3
# Define a function that receives a signal and passes it through to our child process:
def _supervisor_daemon_signal_passthru(signum, frame):
if child_pid is None or child_pid < 2:
# This can happen if the supervised child was *just* killed, or isn't running yet (during a re-spawn).
supervisor_logger.warn("Supervisor: invalid pid %s found during kill -%s of process %s" % (child_pid, signum, name))
return
try:
supervisor_logger.info("_supervisor_daemon_signal_passthru: kill -%s %s" % (signum, child_pid))
os.kill(child_pid, signum)
except Exception as e:
supervisor_logger.error("Supervisor %s[%s/%s managing %s]: unable to send signal %s to pid %s: %s" % (name, supervisor_pid, os.getpid(), child_pid, signum, child_pid, e))
# Define a function that receives a signal and rotates logs:
def _supervisor_daemon_rotate_logs(signum, frame):
supervisor_logger.info("Supervisor %s[%s/%s managing %s]: rotate logs not implemented yet; log_basepath=%s" % (name, supervisor_pid, os.getpid(), child_pid, log_basepath))
# Define a function that receives a signal and cleanly shuts down the server:
def _supervisor_daemon_quit(signum, frame):
# Flag that quit has been requested:
global supervisor_daemon_exit_requested
supervisor_daemon_exit_requested = True
if child_pid is None or child_pid < 2:
# This can happen if the supervised child was *just* killed, or isn't running yet (during a re-spawn).
supervisor_logger.warn("Supervisor: invalid pid %s found during kill -%s of process %s" % (child_pid, signum, name))
return
# Check if we're still in an init phase (can't call stop_func on something that hasn't actually started):
global run_init_instead_of_exec
if run_init_instead_of_exec:
# if we're currently invoking a custom init function, then we need to send the supervisor process the kill signal directly so it exits
return _supervisor_daemon_signal_passthru(signum, frame)
# Run stop function if given, otherwise pass along given kill signal to child process:
if stop_func is not None:
try:
import threading
supervisor_logger.info("Supervisor %s[%s/%s managing %s]: quit request received (sig %s in thread %s); calling stop function" % (name, supervisor_pid, os.getpid(), child_pid, signum, threading.currentThread().name))
ret_val = stop_func(child_pid)
supervisor_logger.info("Supervisor %s[%s/%s managing %s]: quit request received (sig %s in thread %s); stop function done (%s)" % (name, supervisor_pid, os.getpid(), child_pid, signum, threading.currentThread().name, ret_val))
return
except Exception:
supervisor_logger.error("Supervisor %s[%s/%s managing %s]: error in stop function: %s" % (name, supervisor_pid, os.getpid(), child_pid, traceback.format_exc(sys.exc_info()[2])))
else:
supervisor_logger.warn("Supervisor %s[%s/%s managing %s]: no stop function given" % (name, supervisor_pid, os.getpid(), child_pid))
return _supervisor_daemon_signal_passthru(signum, frame)
def _install_signal_functions():
signal.signal(signal.SIGWINCH, _supervisor_daemon_rotate_logs)
signal.signal(signal.SIGTERM, _supervisor_daemon_quit)
for sig in trappable_signals:
if sig not in (signal.SIGWINCH, signal.SIGTERM):
signal.signal(sig, _supervisor_daemon_signal_passthru)
def _remove_signal_functions():
for sig in trappable_signals:
signal.signal(sig, signal.SIG_DFL)
def _sleep_without_signal_functions(duration):
# Because there are cases where *we* need to be interrupted:
_remove_signal_functions()
time.sleep(duration)
_install_signal_functions()
# Install signal functions:
_install_signal_functions()
# chdir() to /, to avoid potentially holding a mountpoint open:
os.chdir('/')
# Reset umask:
os.umask(022)
# Redirect STDOUT/STDERR:
# (Redirects run as separate threads in our supervisor process -- don't move these to the child process; os.exec will wipe them out.)
os.setsid()
stdout_redirector = SupervisorStreamRedirector(supervisor_logger, launcher_get_logpath(config, log_basepath, ''), run_as_user=run_as_user, run_as_group=run_as_group)
stderr_redirector = SupervisorStreamRedirector(supervisor_logger, launcher_get_logpath(config, log_basepath, 'error'), run_as_user=run_as_user, run_as_group=run_as_group)
supervisor_redirector = SupervisorStreamRedirector(supervisor_logger, launcher_get_logpath(config, log_basepath, 'supervisor'), run_as_user=run_as_user, run_as_group=run_as_group)
stdout_redirector.startRedirectThread(sys.stdout)
stderr_redirector.startRedirectThread(sys.stderr)
supervisor_redirector.startRedirectThread(supervisor_logger.logger_fd)
# Close STDIN:
sys.stdin.close()
os.close(0)
new_stdin = open(os.devnull, 'r', 0) # So FD 0 isn't available
#new_stdin = open(os.devnull, 'r', 0)
#try:
# os.dup2(new_stdin.fileno(), sys.stdin.fileno())
#except ValueError:
# print >>sys.stderr, "Can't set up STDIN, was it closed on us?"
# Loop until shutdown requested, handling signals and logs and making sure that our server remains running:
while not supervisor_daemon_exit_requested:
if not is_pid_in_pidfile_our_pid(pid_filename_for_daemon):
supervisor_logger.warn("Supervisor[%s/%s]: Warning: invalid pid %s in lock file %s. Re-checking..." % (supervisor_pid, os.getpid(), get_pid_from_pidfile(pid_filename_for_daemon), pid_filename_for_daemon))
try:
time.sleep(0.5)
except:
pass
if not is_pid_in_pidfile_our_pid(pid_filename_for_daemon):
supervisor_logger.error("Supervisor[%s/%s]: FATAL: invalid pid %s in lock file %s. Exiting now." % (supervisor_pid, os.getpid(), get_pid_from_pidfile(pid_filename_for_daemon), pid_filename_for_daemon))
sys.stdout.flush()
sys.stderr.flush()
time.sleep(0.5) # Need to sleep so that logger threads can write out above stderr message. Gross, but it works.
os._exit(1)
lockfile_pid = get_pid_from_pidfile(pid_filename_for_daemon)
if lockfile_pid is None or supervisor_pid != lockfile_pid:
supervisor_logger.error("Supervisor[%s/%s]: FATAL: lock file %s not owned by current process! (pid is %s) Exiting now." % (supervisor_pid, os.getpid(), pid_filename_for_daemon, lockfile_pid))
os._exit(1)
one_time_run = False
run_init_instead_of_exec = False
if start_count == 0 and init_func is not None:
run_init_instead_of_exec = True
if not restart_daemon_on_exit:
# This is a clever trick: we might want to run a command in the background one-time (i.e. priming a service).
# By passing restart_daemon_on_exit as false from way up above us in the callstack,
# we can use our run logic inside the supervisor process and let it exit cleanly.
# This works by reading one_time_run after we've started and flipping supervisor_daemon_exit_requested to True.
one_time_run = True
try:
log_disk_stats = disk_stats_get_usage_for_path(config['LOG_DIR'])
data_disk_stats = disk_stats_get_usage_for_path(config['DATA_DIR'])
run_disk_stats = disk_stats_get_usage_for_path(config['RUN_DIR'])
if log_disk_stats is not None and data_disk_stats is not None and run_disk_stats is not None:
# Only do this check when we can get stats -- otherwise it's possible to rm -rf log_dir and then have the service die.
if log_disk_stats['free_mb'] < 100 or data_disk_stats['free_mb'] < 100 or run_disk_stats['free_mb'] < 100:
supervisor_logger.error("Supervisor[%s/%s]: insufficent disk space to run %s." % (supervisor_pid, os.getpid(), name))
try:
_sleep_without_signal_functions(10)
except:
supervisor_daemon_exit_requested = True
continue
except Exception as e:
supervisor_logger.error("Supervisor[%s/%s]: disk check failed: %s" % (supervisor_pid, os.getpid(), e))
if child_pid is None and not supervisor_daemon_exit_requested:
if one_time_run:
supervisor_daemon_exit_requested = True
# Then we need to fork and start child process:
try:
sys.stdout.flush() # If we have a ' print "Foo", ' statement (e.g. with trailing comma), the forked process ends up with a copy of it, too.
sys.stderr.flush()
child_pid = os.fork()
if child_pid:
# Parent process:
supervisor_logger.info("Supervisor[%s/%s]: managing process %s running as pid %s" % (supervisor_pid, os.getpid(), name, child_pid))
set_proc_title('supervisor: managing %s[%s]' % (name, child_pid))
prior_child_start_time = last_start_time
last_start_time = time.time()
start_count += 1
if 0 != update_pidfile_data(pid_filename_for_daemon, { \
angel.constants.LOCKFILE_DATA_DAEMON_START_TIME: daemon_start_time, \
angel.constants.LOCKFILE_DATA_PRIOR_CHILD_START_TIME: prior_child_start_time, \
angel.constants.LOCKFILE_DATA_CHILD_START_TIME: int(time.time()), \
angel.constants.LOCKFILE_DATA_CHILD_PID: child_pid, \
angel.constants.LOCKFILE_DATA_START_COUNT: start_count, \
} ):
supervisor_logger.error("Supervisor[%s/%s]: error updating pidfile data in pidfile %s" % (supervisor_pid, os.getpid(), pid_filename_for_daemon))
else:
# Child process:
supervisor_logger.info("Supervisor[%s/%s]: running %s" % (supervisor_pid, os.getpid(), name))
set_proc_title('supervisor: starting %s' % name)
# Set our process_oom_adjustment, as the parent process ALWAYS has it set to a very low value to avoid the supervisor from being killed:
set_process_oom_factor(process_oom_adjustment)
# Drop root privileges (has to be done after oom adjustment):
if 0 != process_drop_root_permissions(run_as_user, run_as_group):
supervisor_logger.error("Supervisor[%s/%s]: error setting user/group to %s/%s in child process." % (supervisor_pid, os.getpid(), run_as_user, run_as_group))
os._exit(1)
# We need to reset the signal handlers so as to NOT trap any signals because exec_func and init_func will have python code that runs within our current process.
# We have to unset this in the child process; if we set it in the "parent" branch of the if statement, then we'd be missing them on the next loop.
_remove_signal_functions()
# If there's an init function, run it instead:
if run_init_instead_of_exec:
set_proc_title('%s worker init' % name)
supervisor_logger.info("Supervisor[%s/%s]: starting init for %s" % (supervisor_pid, os.getpid(), name))
init_okay = True
ret_val = None
try:
ret_val = init_func()
except Exception as e:
supervisor_logger.error("Error in init function: %s; bailing." % e)
init_okay = False
if type(ret_val) is not int:
supervisor_logger.warn("Warning: init_func for %s returned non-int; please return 0 on success; non-zero otherwise; or throw an exception." % (name, ret_val))
else:
if ret_val != 0:
init_okay = False
if not init_okay:
supervisor_logger.error("Supervisor[%s/%s]: FATAL: init failed for %s" % (supervisor_pid, os.getpid(), name))
os.kill(supervisor_pid, signal.SIGTERM)
else:
supervisor_logger.info("Supervisor[%s/%s]: init finished for %s" % (supervisor_pid, os.getpid(), name))
os._exit(ret_val) # Exit child process; supervisor will pick
# Run the exec function:
set_proc_title('%s worker' % name)
try:
exec_func() # This should be a function that calls os.exec and replaces our current process
except Exception as e:
supervisor_logger.error("Error in exec function: %s" % e)
supervisor_logger.error("MAJOR ERROR: Supervisor[%s/%s]: function for %s unexepectedly returned." % (supervisor_pid, os.getpid(), name))
os._exit(2)
except Exception as e:
supervisor_logger.error("Supervisor[%s/%s]: child process failed (%s)." % (supervisor_pid, os.getpid(), e))
try:
_sleep_without_signal_functions(10) # Sleep in child to prevent parent from rapidly re-spawning
except:
pass
continue
if child_pid is None:
supervisor_logger.error("Supervisor[%s/%s]: child process setup failed (supervisor_daemon_exit_requested: %s)." % (supervisor_pid, os.getpid(), supervisor_daemon_exit_requested))
try:
_sleep_without_signal_functions(10) # Sleep in child to prevent parent from rapidly re-spawning
except:
supervisor_daemon_exit_requested = True
continue
# The parent process needs to wait for the child process to exit:
wait_exitcode = _supervisor_daemon_waitpid(child_pid)
set_proc_title('supervisor: managing %s[%s exited %s]' % (name, child_pid, wait_exitcode))
if run_init_instead_of_exec:
supervisor_logger.info("Supervisor[%s/%s]: init function finished." % (supervisor_pid, os.getpid()))
child_pid = None
continue
if supervisor_daemon_exit_requested:
set_proc_title('supervisor: managing %s[%s exited %s for exit]' % (name, child_pid, wait_exitcode))
if one_time_run:
supervisor_logger.info('Supervisor[%s/%s]: %s[%s] exited (exit code %s) for one-time run.' % (supervisor_pid, os.getpid(), name, child_pid, wait_exitcode))
else:
supervisor_logger.info('Supervisor[%s/%s]: %s[%s] exited (exit code %s) for shutdown.' % (supervisor_pid, os.getpid(), name, child_pid, wait_exitcode))
break
# The wait-for-child logic above may have returned early due to a signal that we received and passed off to child or otherwise handled.
# Only reset stuff for a restart if the child process actually exited (i.e. waitpid() returned because the child exited, not because the parent received a signal):
if not is_pid_running(child_pid):
set_proc_title('supervisor: restarting %s' % (name))
this_run_duration = time.time() - last_start_time
# Re-try service starts no faster than some minimum interval, backing off to some maximum interval so lengthy outages don't trigger a sudden spike
delay_until_next_restart = 0
if continous_restarts > 0:
delay_until_next_restart = min_delay_between_continous_restarts + (continous_restarts - 1) * 10 - this_run_duration + 2*random.random()
if delay_until_next_restart < min_delay_between_continous_restarts:
delay_until_next_restart = min_delay_between_continous_restarts + 2*random.random()
if delay_until_next_restart > max_delay_between_continous_restarts:
delay_until_next_restart = max_delay_between_continous_restarts - random.random() * restart_delay_jitter
supervisor_logger.error('Supervisor[%s/%s]: %s[%s] unexpected exit (exit code %s) after %s seconds on run number %s, waiting %s seconds before restarting.' %
(supervisor_pid, os.getpid(), name, child_pid, wait_exitcode, this_run_duration, start_count, delay_until_next_restart))
supervisor_logger.error('Supervisor[%s/%s]: more info: run_init_instead_of_exec: %s; restart_daemon_on_exit: %s' %
(supervisor_pid, os.getpid(), run_init_instead_of_exec, restart_daemon_on_exit))
child_pid = None
if this_run_duration < max_delay_between_continous_restarts:
continous_restarts += 1
try:
time_left = delay_until_next_restart
while time_left > 0:
# spit out a log every few seconds so we can see what's going on in the logs -- otherwise it looks wedged:
supervisor_logger.error('Supervisor[%s/%s]: %s[%s] waiting %s seconds.' % (supervisor_pid, os.getpid(), name, child_pid, int(time_left)))
sleep_time = 5
if sleep_time > time_left:
sleep_time = time_left
_sleep_without_signal_functions(sleep_time)
time_left -= sleep_time
except Exception as e:
supervisor_logger.error('Supervisor[%s/%s]: %s had exception while waiting; bailing (%s).' % (supervisor_pid, os.getpid(), name, e))
supervisor_daemon_exit_requested = True
else:
continous_restarts = 0
# We'll only exit above loop when supervisor_daemon_exit_requested is true.
# We keep running until the child process exits, otherwise there's no way
# for the outside world to send further signals to the process.
while is_pid_running(child_pid):
try:
# While we can still send signals to the supervisor process, wait on it
set_proc_title('supervisor: waiting for exit %s[%s]' % (name, child_pid))
supervisor_logger.info("Supervisor[%s/%s]: waiting for exit %s[%s]" % (supervisor_pid, os.getpid(), name, child_pid))
_supervisor_daemon_waitpid(child_pid)
except OSError:
pass
set_proc_title('supervisor: finished monitoring %s[%s]; closing logfiles' % (name, child_pid))
supervisor_logger.info("Supervisor[%s/%s] finished monitoring %s[%s]; exiting" % (supervisor_pid, os.getpid(), name, child_pid))
if os.path.isfile(pid_filename_for_daemon):
# The pid file really should exist, but if it doesn't, there's not a lot we can do anyway, and logging it wi
os.remove(pid_filename_for_daemon)
else:
supervisor_logger.warn("Supervisor[%s/%s]: no lockfile at %s to remove, oh well." % (supervisor_pid, os.getpid(), pid_filename_for_daemon))
# Stop logging threads:
stdout_redirector.stopRedirectThread()
stderr_redirector.stopRedirectThread()
supervisor_redirector.stopRedirectThread()
# Do not return from this function -- and use os._exit instead of sys.exit to nuke any stray threads:
os._exit(0)
# For use by supervisor only -- please consider this 'private' to supervisor.
class SupervisorLogger():
# Yes, re-inventing the wheel here. Trying to keep the external dependencies down to a minimum.
logger_fd = None
def __init__(self, logger_fd):
self.logger_fd = logger_fd
def info(self, message):
self.log('info', message)
def warn(self, message):
self.log('warn', message)
def error(self, message):
self.log('error', message)
def log(self, level, message):
self.logger_fd.write("%s, %s, %s\n" % (time.time(), level, message))
self.logger_fd.flush()
# For use by supervisor only -- please consider this 'private' to supervisor.
from threading import Thread
class SupervisorStreamRedirector(Thread):
supervisor_logger = None
log_data_source = None
stop_event = None
run_as_user = None
run_as_group = None
logfile_inode = None
logfile_dir = None
logfile_path = None
logfile_fd = None
def __init__(self, supervisor_logger, logfile_path, run_as_user=None, run_as_group=None):
Thread.__init__(self)
self.supervisor_logger = supervisor_logger
self.logfile_path = logfile_path
self.logfile_dir = os.path.dirname(self.logfile_path)
self.run_as_user = run_as_user
self.run_as_group = run_as_group
self._create_logdir()
def startRedirectThread(self, data_stream):
if self.stop_event:
if self.supervisor_logger is not None:
self.supervisor_logger.warn("SupervisorStreamRedirector: redirect already started?")
return -4
self.stop_event = threading.Event()
try:
reader, writer = os.pipe()
self.log_data_source = os.fdopen(reader, 'rb', 0)
original_output_dest = os.fdopen(writer, 'wb', 0)
# Flip on non-blocking, otherwise calls to select.select() will block:
flags = fcntl.fcntl(original_output_dest, fcntl.F_GETFL)
fcntl.fcntl(original_output_dest, fcntl.F_SETFL, flags | os.O_NONBLOCK)
flags = fcntl.fcntl(self.log_data_source, fcntl.F_GETFL)
fcntl.fcntl(self.log_data_source, fcntl.F_SETFL, flags | os.O_NONBLOCK)
data_stream.flush()
os.dup2(original_output_dest.fileno(), data_stream.fileno())
except Exception as e:
if self.supervisor_logger is not None:
self.supervisor_logger.warn("SupervisorStreamRedirector: error setting up file streams for redirect: %s" % e)
return -5
try:
self.start()
except Exception as e:
if self.supervisor_logger is not None:
self.supervisor_logger.warn("SupervisorStreamRedirector: error starting redirect thread: %s" % e)
return -6
return 0
def stopRedirectThread(self):
if self.stop_event:
self.stop_event.set()
else:
if self.supervisor_logger is not None:
self.supervisor_logger.warn("SupervisorStreamRedirector: stop_logger not running? (%s)" % self.stop_event)
def _filter_lines(self, lines):
''' Given an array of lines, return a filtered / altered string as desired. '''
# The intent here is to someday pass an object in that implements the filter, so that
# sensative strings can be filtered out of the log files before getting written to disk
# and then sent across the wire via logstash or what have you.
# For now, we do a no-op.
if len(lines) == 0:
return ''
return '\n'.join(lines) + '\n'
# Here's an example that would timestamp every line:
#if len(lines) == 0:
# return ''
#line_beginning = '%11.1f ' % (time.time())
#line_ending = '\n'
#return line_beginning + (line_ending + line_beginning).join(lines) + line_ending
def _create_logdir(self):
if 0 != create_dirs_if_needed(self.logfile_dir, owner_user=self.run_as_user, owner_group=self.run_as_group):
self.supervisor_logger.error("SupervisorStreamRedirector[%s]: unable to create logdir %s" % (os.getpid(), self.logfile_path))
return -7
return 0
def _reset_logfile(self):
if 0 != self._create_logdir():
return -8
try:
if os.path.exists(self.logfile_path):
if os.path.islink(self.logfile_path) or not os.path.isfile(self.logfile_path):
self.supervisor_logger.error("SupervisorStreamRedirector: invalid file at logfile path %s" % self.logfile_path)
return -9
new_fh = open(self.logfile_path, 'a')
if self.logfile_fd is not None:
self.logfile_fd.close()
self.logfile_fd = new_fh
self.logfile_inode = os.stat(self.logfile_path).st_ino
self.supervisor_logger.info("SupervisorStreamRedirector[%s]: writing to logfile %s" % (os.getpid(), self.logfile_path))
except Exception as e:
self.supervisor_logger.error("SupervisorStreamRedirector[%s]: unable to open logfile %s: %s" % (os.getpid(), self.logfile_path, e))
return -10
return 0
def run(self):
okay_to_run = True
last_read_size = 0
last_remainder = ''
while okay_to_run or last_read_size > 0:
if self.stop_event.is_set():
self.supervisor_logger.info("SupervisorStreamRedirector[%s]: stopping logger at %s" % (os.getpid(), self.logfile_path))
okay_to_run = False
self.stop_event.clear()
try:
# Don't use readline() -- it blocks, and there's no way for the main thread
# to tell the logger thread to exit while the i/o call is blocked. Sigh.
[rlist, wlist, xlist] = select.select([self.log_data_source], [], [], 0.25)
if not os.path.exists(self.logfile_dir):
# Re-create the logdir if it goes missing -- do this check every pass through,
# so that if logdir gets completely reset we instantly recreate the path for other
# processes which might also depend on it.
self._create_logdir()
if not rlist:
last_read_size = 0
else:
data = self.log_data_source.read(1024)
last_read_size = len(data)
# We split the data into lines so that we can filter sensative stings out, and potentially do some line-based formatting.
# Because we're not using readline (due to blocking reasons), we have to split the data into lines, and carry over the remainder
# of the last line (if it's mid-line) to the next pass through the loop.
lines = data.split('\n')
if data.endswith('\n'):
lines = lines[:-1]
if len(last_remainder):
lines[0] = last_remainder + lines[0]
last_remainder = ''
if not data.endswith('\n'):
last_remainder = lines[-1]
lines = lines[:-1]
try:
current_inode = os.stat(self.logfile_path).st_ino
if self.logfile_inode != current_inode:
self._reset_logfile()
except:
self._reset_logfile()
if self.logfile_fd is not None:
self.logfile_fd.write(self._filter_lines(lines))
if not okay_to_run and len(last_remainder):
# Then it's our last loop through -- purge out the remainder:
self.logfile_fd.write(self._filter_lines(last_remainder,))
last_remainder = ''
self.logfile_fd.flush()
except Exception as e:
self.supervisor_logger.error("SupervisorStreamRedirector: error in log thread: %s" % e)
self.supervisor_logger.info("SupervisorStreamRedirector stopping; closing %s." % self.logfile_path)
self.logfile_fd.flush()
self.logfile_fd.close()
self.stop_event = None
| 53.977162 | 242 | 0.634913 | 8,251 | 0.249365 | 0 | 0 | 0 | 0 | 0 | 0 | 11,715 | 0.354056 |
ff615d487c1dcefa0094156fbe9268c588f51e48 | 10,752 | py | Python | muspinsim/experiment.py | muon-spectroscopy-computational-project/muspinsim | d9e971edd840ab0c33b143f9b5694bc1b09011d2 | [
"MIT"
] | null | null | null | muspinsim/experiment.py | muon-spectroscopy-computational-project/muspinsim | d9e971edd840ab0c33b143f9b5694bc1b09011d2 | [
"MIT"
] | null | null | null | muspinsim/experiment.py | muon-spectroscopy-computational-project/muspinsim | d9e971edd840ab0c33b143f9b5694bc1b09011d2 | [
"MIT"
] | null | null | null | """experiment.py
Classes and functions to perform actual experiments"""
import logging
import numpy as np
import scipy.constants as cnst
from muspinsim.constants import MU_TAU
from muspinsim.utils import get_xy
from muspinsim.mpi import mpi_controller as mpi
from muspinsim.simconfig import MuSpinConfig, ConfigSnapshot
from muspinsim.input import MuSpinInput
from muspinsim.spinop import DensityOperator, SpinOperator
from muspinsim.hamiltonian import Hamiltonian
from muspinsim.lindbladian import Lindbladian
class ExperimentRunner(object):
"""A class meant to run experiments. Its main purpose as an object is to
provide caching for any quantities that might not need to be recalculated
between successive snapshots."""
def __init__(self, infile: MuSpinInput, variables: dict = {}):
"""Set up an experiment as defined by a MuSpinInput object
Prepare a set of calculations (for multiple files and averages) as
defined by a MuSpinInput object and a set of variable values. Takes
care of parallelism, splitting calculations across nodes etc.
Arguments:
infile {MuSpinInput} -- The input file object defining the
calculations we need to perform.
variables {dict} -- The values of any variables appearing in the input
file
"""
if mpi.is_root:
# On root, we run the evaluation that gives us the actual possible
# values for simulation configurations. These are then broadcast
# across all nodes, each of which runs its own slice of them, and
# finally gathered back together
config = MuSpinConfig(infile.evaluate(**variables))
else:
config = MuSpinConfig()
mpi.broadcast_object(config)
self._config = config
self._system = config.system
# Store single spin operators
self._single_spinops = np.array(
[
[self._system.operator({i: a}).matrix for a in "xyz"]
for i in range(len(self._system))
]
)
# Parameters
self._B = np.zeros(3)
self._p = np.array([1.0, 0, 0])
self._T = np.inf
# Basic Hamiltonian
self._Hsys = self._system.hamiltonian
# Derived quantities
self._rho0 = None
self._Hz = None
self._dops = None
@property
def config(self):
return self._config
@property
def system(self):
return self._system
@property
def B(self):
return self._B
@B.setter
def B(self, x):
x = np.array(x)
if (x != self._B).any():
self._B = x
self._rho0 = None
self._Hz = None
self._dops = None
@property
def p(self):
return self._p
@p.setter
def p(self, x):
x = np.array(x)
if (x != self._p).any():
self._p = x
self._rho0 = None
@property
def T(self):
return self._T
@T.setter
def T(self, x):
if x != self._T:
self._T = x
self._rho0 = None
self._dops = None
@property
def rho0(self):
"""Calculate a thermal density matrix in which the system is prepared
Calculate an approximate thermal density matrix to prepare the system in,
with the muon polarised along a given direction and every other spin in a
thermal equilibrium decohered state.
Returns:
rho0 {DensityOperator} -- Density matrix at t=0
"""
if self._rho0 is None:
T = self._T
muon_axis = self._p
B = self._B
if np.isclose(np.linalg.norm(B), 0.0) and T < np.inf:
logging.warning(
"WARNING: initial density matrix is computed"
" with an approximation that can fail"
" at low fields and finite temperature"
)
mu_i = self._system.muon_index
rhos = []
for i, s in enumerate(self._system.spins):
I = self._system.I(i)
if i == mu_i:
r = DensityOperator.from_vectors(I, muon_axis, 0)
else:
# Get the Zeeman Hamiltonian for this field
Hz = np.sum(
[
B[j] * SpinOperator.from_axes(I, e).matrix
for j, e in enumerate("xyz")
],
axis=0,
)
evals, evecs = np.linalg.eigh(Hz)
E = evals * 1e6 * self._system.gamma(i)
if T > 0:
Z = np.exp(-cnst.h * E / (cnst.k * T))
else:
Z = np.where(E == np.amin(E), 1.0, 0.0)
if np.sum(Z) > 0:
Z /= np.sum(Z)
else:
Z = np.ones(len(E)) / len(E)
rhoI = np.sum(
evecs[:, None, :] * evecs[None, :, :].conj() * Z[None, None, :],
axis=-1,
)
r = DensityOperator(rhoI)
rhos.append(r)
self._rho0 = rhos[0]
for r in rhos[1:]:
self._rho0 = self._rho0.kron(r)
return self._rho0
@property
def Hz(self):
if self._Hz is None:
B = self._B
g = self._system.gammas
Hz = np.sum(
B[None, :, None, None] * g[:, None, None, None] * self._single_spinops,
axis=(0, 1),
)
self._Hz = Hamiltonian(Hz, dim=self._system.dimension)
return self._Hz
@property
def dissipation_operators(self):
if self._dops is None:
# Create a copy of the system
sys = self._system.clone()
# Clean it up of all terms
sys.clear_terms()
sys.clear_dissipative_terms()
T = self._T
# We only go by the intensity of the field
B = np.linalg.norm(self._B)
g = sys.gammas
if T > 0:
Zu = np.exp(-cnst.h * g * B * 1e6 / (cnst.k * T))
if np.isclose(B, 0.0) and T < np.inf:
logging.warning(
"WARNING: dissipation effects are computed"
" with an approximation that can fail"
" at low fields and finite temperature"
)
else:
Zu = g * 0.0
if B == 0:
x, y = np.array([1.0, 0, 0]), np.array([0, 1.0, 0])
else:
z = self._B / B
x, y = get_xy(z)
self._dops = []
for i, a in self._config.dissipation_terms.items():
op_x = np.sum(self._single_spinops[i, :] * x[:, None, None], axis=0)
op_y = np.sum(self._single_spinops[i, :] * y[:, None, None], axis=0)
op_p = SpinOperator(op_x + 1.0j * op_y, dim=self.system.dimension)
op_m = SpinOperator(op_x - 1.0j * op_y, dim=self.system.dimension)
# The 1/pi factor here makes sure that the convention is that when
# a user inputs a certain value of dissipation for a single spin,
# that is exactly the exponential decay factor that is observed.
self._dops.append((op_p, 1 / np.pi * a * Zu[i] / (1 + Zu[i])))
self._dops.append((op_m, 1 / np.pi * a / (1 + Zu[i])))
return self._dops
@property
def Hsys(self):
return self._Hsys
@property
def Htot(self):
# Build the total Hamiltonian
H = self.Hsys + self.Hz
# Do we have dissipation?
if len(self._config.dissipation_terms) > 0:
# Actually use a Lindbladian
H = Lindbladian.from_hamiltonian(H, self.dissipation_operators)
return H
@property
def p_operator(self):
return self._system.muon_operator(self.p)
def run(self):
"""Run the experiment
Run all calculations in the configuration set, gather the results and
return them.
Returns:
results (np.ndarray) -- An array of results, gathered on the root
node.
"""
for cfg in self._config[mpi.rank :: mpi.size]:
dataslice = self.run_single(cfg)
self._config.store_time_slice(cfg.id, dataslice)
self._config.results = mpi.sum_data(self._config.results)
return self._config.results
def load_config(self, cfg_snap: ConfigSnapshot):
"""Load a configuration snapshot in this ExperimentRunner
Load a configuration snapshot in the ExperimentRunner, assigning field,
polarisation and temperature.
Arguments:
cfg_snap {ConfigSnapshot} -- A named tuple defining the values of all
parameters to be used in this calculation.
Returns:
weight {float} -- The weight to assign to this specific simulation
"""
# Let's gather the important stuff
B = cfg_snap.B # Magnetic field
p = cfg_snap.mupol # Muon polarization
T = cfg_snap.T # Temperature
q, w = cfg_snap.orient # Quaternion and Weight for orientation
# Let's start by rotating things
self.B = q.rotate(B)
self.p = q.rotate(p)
self.T = T
return w
def run_single(self, cfg_snap: ConfigSnapshot):
"""Run a muon experiment from a configuration snapshot
Run a muon experiment using the specific parameters and time axis given in
a configuration snapshot.
Arguments:
cfg_snap {ConfigSnapshot} -- A named tuple defining the values of all
parameters to be used in this calculation.
Returns:
result {np.ndarray} -- A 1D array containing the time series of
required results, or a single value.
"""
w = self.load_config(cfg_snap)
# Measurement operator?
S = self.p_operator
H = self.Htot
if cfg_snap.y == "asymmetry":
data = H.evolve(self.rho0, cfg_snap.t, operators=[S])[:, 0]
elif cfg_snap.y == "integral":
data = H.integrate_decaying(self.rho0, MU_TAU, operators=[S])[0] / MU_TAU
return np.real(data) * w
| 31.623529 | 88 | 0.530599 | 10,235 | 0.951916 | 0 | 0 | 5,748 | 0.534598 | 0 | 0 | 3,651 | 0.339565 |
ff617cfb1a089c6b6e48e0e320406fd3e6b05484 | 5,456 | py | Python | obtain_rms_frequency_domain.py | antonyvm1102/Redbox | 8aa7feeaa6513275e7e78ed6cc049bed50bf1ec2 | [
"MIT"
] | null | null | null | obtain_rms_frequency_domain.py | antonyvm1102/Redbox | 8aa7feeaa6513275e7e78ed6cc049bed50bf1ec2 | [
"MIT"
] | null | null | null | obtain_rms_frequency_domain.py | antonyvm1102/Redbox | 8aa7feeaa6513275e7e78ed6cc049bed50bf1ec2 | [
"MIT"
] | null | null | null | import signal_processing as sp
import numpy as np
import sys
import time
"""
Get RMS in frequency domain per 1/3 octave band for all signals and translate to min/mean/max per band.
TODO: add option to exclude zeros from the minimum as in 'capture_min_max_mean_stdev.py'.
"""
def progress(count, total, status=''):
bar_len = 60
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * float(count) / float(total), 2)
bar = '+' * filled_len + '-' * (bar_len - filled_len)
sys.stdout.write('\r[%s] %s%s ...%s' % (bar, percents, '%', status))
sys.stdout.flush()
def largest_power_of_base(n, base = 2):
"""
Returns the largest power of base in a number of samples
:param n: (integer) number of samples
:param base: (integer) base of power to be considered, default is 2
:return: (integer) largest power of base
"""
count = 1
while n // base > 1:
count += 1
n = n // base
return base ** count
stime = time.time()
# dir_path = "C:\\Users\\mel\\Documents\\Python\\Betacampus_pos1\\"
# dir_path = "C:\\Users\\mel\\Documents\\Python\\Betacampus_pos2\\"
dir_path = "C:\\Users\\mel\\Documents\\Python\\Huygensgebouw\\"
file_list = sp.obtain_files(dir_path)
f_band = sp.OneThird_octave(0.625 / 2**(1/3), 80 * 2**(1/3))
rms_x_array = np.zeros((len(f_band), len(file_list)+1))
rms_y_array = np.zeros((len(f_band), len(file_list)+1))
rms_z_array = np.zeros((len(f_band), len(file_list)+1))
rms_x_array[... , 0] = f_band
rms_y_array[... , 0] = f_band
rms_z_array[... , 0] = f_band
for i in range(len(file_list)):
filename = dir_path + file_list[i]
t,x,y,z = np.loadtxt(filename, dtype="float", comments="#", unpack=True)
n = largest_power_of_base(len(t))
xf, f = sp.FFT(x[:n], dT=t[1] - t[0])
yf = sp.FFT_amplitude(y[:n])
zf = sp.FFT_amplitude(z[:n])
rms_x_array[..., i + 1] = sp.FFT_to_OneThird_Octave2(xf, f[1] - f[0], f_band)
rms_y_array[..., i + 1] = sp.FFT_to_OneThird_Octave2(yf, f[1] - f[0], f_band)
rms_z_array[..., i + 1] = sp.FFT_to_OneThird_Octave2(zf, f[1] - f[0], f_band)
progress(i, len(file_list), "processing %s of %s" % (i, len(file_list)))
if i%100 == 0:
# np.savetxt("14208_betacampus_pos1_rms_x_%s.txt" % i, rms_x_array)
# np.savetxt("14208_betacampus_pos1_rms_y_%s.txt" % i, rms_y_array)
# np.savetxt("14208_betacampus_pos1_rms_z_%s.txt" % i, rms_z_array)
# np.savetxt("14208_betacampus_pos2_rms_x_%s.txt" % i, rms_x_array)
# np.savetxt("14208_betacampus_pos2_rms_y_%s.txt" % i, rms_y_array)
# np.savetxt("14208_betacampus_pos2_rms_z_%s.txt" % i, rms_z_array)
np.savetxt("14208_Huygensgebouw_rms_x_%s.txt" % i, rms_x_array)
np.savetxt("14208_Huygensgebouw_rms_y_%s.txt" % i, rms_y_array)
np.savetxt("14208_Huygensgebouw_rms_z_%s.txt" % i, rms_z_array)
# np.savetxt("14208_betacampus_pos1_rms_x.txt", rms_x_array)
# np.savetxt("14208_betacampus_pos1_rms_y.txt", rms_y_array)
# np.savetxt("14208_betacampus_pos1_rms_z.txt", rms_z_array)
# np.savetxt("14208_betacampus_pos2_rms_x.txt", rms_x_array)
# np.savetxt("14208_betacampus_pos2_rms_y.txt", rms_y_array)
# np.savetxt("14208_betacampus_pos2_rms_z.txt", rms_z_array)
np.savetxt("14208_Huygensgebouw_rms_x.txt", rms_x_array)
np.savetxt("14208_Huygensgebouw_rms_y.txt", rms_y_array)
np.savetxt("14208_Huygensgebouw_rms_z.txt", rms_z_array)
rms_x_all = np.zeros((len(f_band), 6))
rms_y_all = np.zeros((len(f_band), 6))
rms_z_all = np.zeros((len(f_band), 6))
rms_x_all[..., 0] = f_band
rms_x_all[..., 1] = rms_x_array[..., 1:].min(axis = 1)
rms_x_all[..., 2] = rms_x_array[..., 1:].mean(axis = 1) - rms_x_array[..., 1:].std(axis = 1)
rms_x_all[..., 3] = rms_x_array[..., 1:].mean(axis = 1)
rms_x_all[..., 4] = rms_x_array[..., 1:].mean(axis = 1) + rms_x_array[..., 1:].std(axis = 1)
rms_x_all[..., 5] = rms_x_array[..., 1:].max(axis = 1)
rms_y_all[..., 0] = f_band
rms_y_all[..., 1] = rms_y_array[..., 1:].min(axis = 1)
rms_y_all[..., 2] = rms_y_array[..., 1:].mean(axis = 1) - rms_y_array[..., 1:].std(axis = 1)
rms_y_all[..., 3] = rms_y_array[..., 1:].mean(axis = 1)
rms_y_all[..., 4] = rms_y_array[..., 1:].mean(axis = 1) + rms_y_array[..., 1:].std(axis = 1)
rms_y_all[..., 5] = rms_y_array[..., 1:].max(axis = 1)
rms_z_all[..., 0] = f_band
rms_z_all[..., 1] = rms_z_array[..., 1:].min(axis = 1)
rms_z_all[..., 2] = rms_z_array[..., 1:].mean(axis = 1) - rms_z_array[..., 1:].std(axis = 1)
rms_z_all[..., 3] = rms_z_array[..., 1:].mean(axis = 1)
rms_z_all[..., 4] = rms_z_array[..., 1:].mean(axis = 1) + rms_z_array[..., 1:].std(axis = 1)
rms_z_all[..., 5] = rms_z_array[..., 1:].max(axis = 1)
# np.savetxt("14208_betacampus_pos1_rms_x_all.txt", rms_x_all)
# np.savetxt("14208_betacampus_pos1_rms_y_all.txt", rms_y_all)
# np.savetxt("14208_betacampus_pos1_rms_z_all.txt", rms_z_all)
# np.savetxt("14208_betacampus_pos2_rms_x_all.txt", rms_x_all)
# np.savetxt("14208_betacampus_pos2_rms_y_all.txt", rms_y_all)
# np.savetxt("14208_betacampus_pos2_rms_z_all.txt", rms_z_all)
np.savetxt("14208_Huygensgebouw_rms_x_all.txt", rms_x_all)
np.savetxt("14208_Huygensgebouw_rms_y_all.txt", rms_y_all)
np.savetxt("14208_Huygensgebouw_rms_z_all.txt", rms_z_all)
etime = time.time()
dtime = etime - stime
print("elapsed time = %s" %dtime)
| 43.648 | 104 | 0.65011 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,176 | 0.398827 |
ff618e7f05dc5906031406a71845d508dc1f772b | 6,515 | py | Python | addons/calendar/tests/test_event_notifications.py | SHIVJITH/Odoo_Machine_Test | 310497a9872db7844b521e6dab5f7a9f61d365a4 | [
"Apache-2.0"
] | null | null | null | addons/calendar/tests/test_event_notifications.py | SHIVJITH/Odoo_Machine_Test | 310497a9872db7844b521e6dab5f7a9f61d365a4 | [
"Apache-2.0"
] | null | null | null | addons/calendar/tests/test_event_notifications.py | SHIVJITH/Odoo_Machine_Test | 310497a9872db7844b521e6dab5f7a9f61d365a4 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from unittest.mock import patch
from datetime import datetime, date
from dateutil.relativedelta import relativedelta
from odoo import fields
from odoo.tests.common import SavepointCase, new_test_user
from odoo.addons.mail.tests.common import MailCase
class TestEventNotifications(SavepointCase, MailCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.event = cls.env['calendar.event'].create({
'name': "Doom's day",
'start': datetime(2019, 10, 25, 8, 0),
'stop': datetime(2019, 10, 27, 18, 0),
}).with_context(mail_notrack=True)
cls.user = new_test_user(cls.env, 'xav', email='em@il.com', notification_type='inbox')
cls.partner = cls.user.partner_id
def test_message_invite(self):
with self.assertSinglePostNotifications([{'partner': self.partner, 'type': 'inbox'}], {
'message_type': 'user_notification',
'subtype': 'mail.mt_note',
}):
self.event.partner_ids = self.partner
def test_message_invite_self(self):
with self.assertNoNotifications():
self.event.with_user(self.user).partner_ids = self.partner
def test_message_inactive_invite(self):
self.event.active = False
with self.assertNoNotifications():
self.event.partner_ids = self.partner
def test_message_set_inactive_invite(self):
self.event.active = False
with self.assertNoNotifications():
self.event.write({
'partner_ids': [(4, self.partner.id)],
'active': False,
})
def test_message_datetime_changed(self):
self.event.partner_ids = self.partner
"Invitation to Presentation of the new Calendar"
with self.assertSinglePostNotifications([{'partner': self.partner, 'type': 'inbox'}], {
'message_type': 'user_notification',
'subtype': 'mail.mt_note',
}):
self.event.start = fields.Datetime.now() + relativedelta(days=1)
def test_message_date_changed(self):
self.event.write({
'allday': True,
'start_date': fields.Date.today() + relativedelta(days=7),
'stop_date': fields.Date.today() + relativedelta(days=8),
})
self.event.partner_ids = self.partner
with self.assertSinglePostNotifications([{'partner': self.partner, 'type': 'inbox'}], {
'message_type': 'user_notification',
'subtype': 'mail.mt_note',
}):
self.event.start_date += relativedelta(days=-1)
def test_message_date_changed_past(self):
self.event.write({
'allday': True,
'start_date': fields.Date.today(),
'stop_date': fields.Date.today() + relativedelta(days=1),
})
self.event.partner_ids = self.partner
with self.assertNoNotifications():
self.event.write({'start': date(2019, 1, 1)})
def test_message_set_inactive_date_changed(self):
self.event.write({
'allday': True,
'start_date': date(2019, 10, 15),
'stop_date': date(2019, 10, 15),
})
self.event.partner_ids = self.partner
with self.assertNoNotifications():
self.event.write({
'start_date': self.event.start_date - relativedelta(days=1),
'active': False,
})
def test_message_inactive_date_changed(self):
self.event.write({
'allday': True,
'start_date': date(2019, 10, 15),
'stop_date': date(2019, 10, 15),
'active': False,
})
self.event.partner_ids = self.partner
with self.assertNoNotifications():
self.event.start_date += relativedelta(days=-1)
def test_message_add_and_date_changed(self):
self.event.partner_ids -= self.partner
with self.assertSinglePostNotifications([{'partner': self.partner, 'type': 'inbox'}], {
'message_type': 'user_notification',
'subtype': 'mail.mt_note',
}):
self.event.write({
'start': self.event.start - relativedelta(days=1),
'partner_ids': [(4, self.partner.id)],
})
def test_bus_notif(self):
alarm = self.env['calendar.alarm'].create({
'name': 'Alarm',
'alarm_type': 'notification',
'interval': 'minutes',
'duration': 30,
})
now = fields.Datetime.now()
with patch.object(fields.Datetime, 'now', lambda: now):
with self.assertBus([(self.env.cr.dbname, 'calendar.alarm', self.partner.id)]):
self.event.with_context(no_mail_to_attendees=True).write({
'start': now + relativedelta(minutes=50),
'stop': now + relativedelta(minutes=55),
'partner_ids': [(4, self.partner.id)],
'alarm_ids': [(4, alarm.id)]
})
bus_message = [{
"alarm_id": alarm.id,
"event_id": self.event.id,
"title": "Doom's day",
"message": self.event.display_time,
"timer": 20*60,
"notify_at": fields.Datetime.to_string(now + relativedelta(minutes=20)),
}]
notif = self.env['calendar.alarm_manager'].with_user(self.user).get_next_notif()
self.assertEqual(notif, bus_message)
def test_email_alarm(self):
alarm = self.env['calendar.alarm'].create({
'name': 'Alarm',
'alarm_type': 'email',
'interval': 'minutes',
'duration': 20,
})
now = fields.Datetime.now()
self.event.write({
'start': now + relativedelta(minutes=15),
'stop': now + relativedelta(minutes=18),
'partner_ids': [(4, self.partner.id)],
'alarm_ids': [(4, alarm.id)],
})
with patch.object(fields.Datetime, 'now', lambda: now):
with self.assertSinglePostNotifications([{'partner': self.partner, 'type': 'inbox'}], {
'message_type': 'user_notification',
'subtype': 'mail.mt_note',
}):
self.env['calendar.alarm_manager'].with_context(lastcall=now - relativedelta(minutes=15))._get_partner_next_mail(self.partner)
| 39.72561 | 142 | 0.576362 | 6,160 | 0.94551 | 0 | 0 | 437 | 0.067076 | 0 | 0 | 1,180 | 0.18112 |
ff61a35da70ce96da4dbed1626f69b49464f0702 | 1,393 | py | Python | transformers/configuration_classifier.py | Rohit--Sharma/transformers | 837b33e31546b0064208770ba37b88a5d93ec826 | [
"Apache-2.0"
] | null | null | null | transformers/configuration_classifier.py | Rohit--Sharma/transformers | 837b33e31546b0064208770ba37b88a5d93ec826 | [
"Apache-2.0"
] | null | null | null | transformers/configuration_classifier.py | Rohit--Sharma/transformers | 837b33e31546b0064208770ba37b88a5d93ec826 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" CNN model configuration """
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import sys
from .configuration_utils import PretrainedConfig
import numpy as np
logger = logging.getLogger(__name__)
class ClassifierConfig(PretrainedConfig):
def __init__(self,
batch_size=50,
class_size=2,
dropout_prob=0.1,
cnn_train=True,
**kwargs):
super(ClassifierConfig, self).__init__(**kwargs)
self.batch_size = batch_size
self.class_size = class_size
self.dropout_prob = dropout_prob
self.cnn_train = cnn_train
| 33.97561 | 83 | 0.712132 | 430 | 0.308686 | 0 | 0 | 0 | 0 | 0 | 0 | 725 | 0.520459 |
ff645fb0b531eb78c5209f67de47acd24606ce15 | 407 | py | Python | teeport/__init__.py | SPEAR3-ML/teeport-client-python | cb0a81e9a8e111bf4116484bbe496252aa8d9edc | [
"MIT"
] | null | null | null | teeport/__init__.py | SPEAR3-ML/teeport-client-python | cb0a81e9a8e111bf4116484bbe496252aa8d9edc | [
"MIT"
] | null | null | null | teeport/__init__.py | SPEAR3-ML/teeport-client-python | cb0a81e9a8e111bf4116484bbe496252aa8d9edc | [
"MIT"
] | null | null | null | # Copyright (c) 2019-2020, SPEAR3 authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
# The reason we need nest_asyncio is that we run multiple event loops in a
# single thread, that's not allowed. The future work should be create a new
# thread for each event loop so we can get rid of nest_asyncio
import nest_asyncio
nest_asyncio.apply()
from .teeport import Teeport
| 37 | 75 | 0.773956 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 330 | 0.810811 |
ff65c0e6971ce7212c2e0454417283a2f36e752e | 1,886 | py | Python | Eval/model.py | gabbar-08/Coupon-Redemption-Prediction | 98a65f1ca2c14bf9d6430aeebcb1594b20211bcf | [
"MIT"
] | null | null | null | Eval/model.py | gabbar-08/Coupon-Redemption-Prediction | 98a65f1ca2c14bf9d6430aeebcb1594b20211bcf | [
"MIT"
] | null | null | null | Eval/model.py | gabbar-08/Coupon-Redemption-Prediction | 98a65f1ca2c14bf9d6430aeebcb1594b20211bcf | [
"MIT"
] | null | null | null |
#Step 1 :- Importing dependancies and train test data generated
from config import *
train_data = pd.read_csv("data/train_data/train_feature.csv")
test_data = pd.read_csv("data/test_data/test_feature.csv")
#Step 2 :- Getting train data insights and drop unnecessary columns, Splitting data into input and target variable sets.
print(list(train_data['redemption_status']).count(0) * 100 / len(train_data['redemption_status']), "% coupons not redeemed in training data ")
X = train_data
X.dropna(inplace=True)
X.drop(["id","campaign_id","c_freq_category","c_rare_category","start_date","end_date","duration","age_range","overall_freq_category","overall_rare_category"], axis=1,inplace=True)
y = train_data['redemption_status']
X.drop('redemption_status',axis = 1, inplace = True)
#Step 3 :- Train-test Split for the model
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
#Step 4 :- Initiate model and fit transform
model = GaussianNB()
model.fit(X_train, y_train)
#Step 5 :- Predict on the test part of the split
y_pred = model.predict(X_test)
#Step 6 :- Save the model for the inference engine
filename = 'model/finalized_model_2.sav'
pickle.dump(model, open(filename, 'wb'))
#Step 7 :- Calculate Training data accuracy of the model
print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
#Step 8 :- Use the model on test data to predict the target in test data
Y = test_data
Y.drop(["id","campaign_id","c_freq_category","c_rare_category","start_date","end_date","duration","age_range","overall_freq_category","overall_rare_category"], axis=1,inplace=True)
Y.dropna(inplace = True)
Predictions = model.predict(Y)
# Print results
print(list(Predictions).count(0) * 100 / len(Predictions) , "% Coupans not redeemed in Test Data" )
| 41.911111 | 181 | 0.742842 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,067 | 0.565748 |
ff66410bdbe1205af5c60fa1d3952254cdea2bbe | 1,078 | py | Python | docs/_ext/django_models.py | bhrutledge/jahhills.com | 74fe94a214f1ed5681bd45159315f0b68daf5a33 | [
"MIT"
] | 1 | 2016-04-12T17:38:26.000Z | 2016-04-12T17:38:26.000Z | docs/_ext/django_models.py | bhrutledge/jahhills.com | 74fe94a214f1ed5681bd45159315f0b68daf5a33 | [
"MIT"
] | 92 | 2015-04-03T10:04:55.000Z | 2021-07-17T11:13:52.000Z | docs/_ext/django_models.py | bhrutledge/jahhills.com | 74fe94a214f1ed5681bd45159315f0b68daf5a33 | [
"MIT"
] | 1 | 2021-01-26T18:02:49.000Z | 2021-01-26T18:02:49.000Z | # Auto-document Django models
# Copied and adapted from https://djangosnippets.org/snippets/2533/
import inspect
from django.utils.html import strip_tags
from django.utils.encoding import force_text
from django.db import models
def process_docstring(app, what, name, obj, options, lines):
# Only look at objects that inherit from Django's base model class
if inspect.isclass(obj) and issubclass(obj, models.Model):
fields = obj._meta.fields
for field in fields:
help_text = strip_tags(force_text(field.help_text))
verbose_name = force_text(field.verbose_name).capitalize()
field_type = type(field).__name__
if help_text:
lines.append(':param %s: %s' % (field.attname, help_text))
else:
lines.append(':param %s: %s' % (field.attname, verbose_name))
lines.append(':type %s: %s' % (field.attname, field_type))
# Return the extended docstring
return lines
def setup(app):
app.connect('autodoc-process-docstring', process_docstring)
| 31.705882 | 77 | 0.669759 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 264 | 0.244898 |
ff67664ebb9e9ff9a5a17938e52f47a51c7667d5 | 75 | py | Python | Hacker_Rank/Python: Division.py | Jai-kishan/Practice-Questions | cf3a3eb5c2e930fcfcb762d822430060bb5deb2d | [
"Apache-2.0"
] | 1 | 2019-05-04T09:21:00.000Z | 2019-05-04T09:21:00.000Z | Hacker_Rank/Python: Division.py | Jai-kishan/Practice-Questions | cf3a3eb5c2e930fcfcb762d822430060bb5deb2d | [
"Apache-2.0"
] | null | null | null | Hacker_Rank/Python: Division.py | Jai-kishan/Practice-Questions | cf3a3eb5c2e930fcfcb762d822430060bb5deb2d | [
"Apache-2.0"
] | null | null | null | a=int(input())
b=int(input())
print (a/b)
a=float(a)
b=float(b)
print (a/b) | 12.5 | 14 | 0.613333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
ff687b565ac41fe08f64ade140dd3095099140f5 | 30,770 | py | Python | math/tests/testcases.py | fuz-woo/gpython | 06dadad26f41e35b57c144ffdac01fb3f23977e9 | [
"BSD-3-Clause"
] | 520 | 2018-08-21T19:28:23.000Z | 2022-03-30T21:26:10.000Z | math/tests/testcases.py | fuz-woo/gpython | 06dadad26f41e35b57c144ffdac01fb3f23977e9 | [
"BSD-3-Clause"
] | 165 | 2018-08-22T07:54:29.000Z | 2022-03-31T20:34:21.000Z | math/tests/testcases.py | fuz-woo/gpython | 06dadad26f41e35b57c144ffdac01fb3f23977e9 | [
"BSD-3-Clause"
] | 84 | 2018-08-27T03:29:09.000Z | 2022-03-15T23:19:13.000Z | # Copyright 2018 The go-python Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# Testcases for functions in math.
#
# Each line takes the form:
#
# <testid> <function> <input_value> -> <output_value> <flags>
#
# where:
#
# <testid> is a short name identifying the test,
#
# <function> is the function to be tested (exp, cos, asinh, ...),
#
# <input_value> is a string representing a floating-point value
#
# <output_value> is the expected (ideal) output value, again
# represented as a string.
#
# <flags> is a list of the floating-point flags required by C99
#
# The possible flags are:
#
# divide-by-zero : raised when a finite input gives a
# mathematically infinite result.
#
# overflow : raised when a finite input gives a finite result that
# is too large to fit in the usual range of an IEEE 754 double.
#
# invalid : raised for invalid inputs (e.g., sqrt(-1))
#
# ignore-sign : indicates that the sign of the result is
# unspecified; e.g., if the result is given as inf,
# then both -inf and inf should be accepted as correct.
#
# Flags may appear in any order.
#
# Lines beginning with '--' (like this one) start a comment, and are
# ignored. Blank lines, or lines containing only whitespace, are also
# ignored.
# Many of the values below were computed with the help of
# version 2.4 of the MPFR library for multiple-precision
# floating-point computations with correct rounding. All output
# values in this file are (modulo yet-to-be-discovered bugs)
# correctly rounded, provided that each input and output decimal
# floating-point value below is interpreted as a representation of
# the corresponding nearest IEEE 754 double-precision value. See the
# MPFR homepage at http://www.mpfr.org for more information about the
# MPFR project.
import math
from libtest import *
from libulp import *
doc="testcases"
inf = float("inf")
nan = float("nan")
def tolerance(a, b, e):
"""Return if a-b is within tolerance e"""
d = a - b
if d < 0:
d = -d
if a != 0:
e = e * a
if e < 0:
e = -e
return d <= e
def acc_check(what, want, got, rel_err=2e-15, abs_err = 5e-323):
"""Determine whether non-NaN floats a and b are equal to within a
(small) rounding error. The default values for rel_err and
abs_err are chosen to be suitable for platforms where a float is
represented by an IEEE 754 double. They allow an error of between
9 and 19 ulps."""
# need to special case infinities, since inf - inf gives nan
if math.isinf(want) and got == want:
return
error = got - want
permitted_error = rel_err * abs(want)
if abs_err > permitted_error:
permitted_error = abs_err
if abs(error) < permitted_error:
return
raise AssertionError("%s: want %g, got %g: error = %g; permitted error = %g" % (what, want, got, error, permitted_error))
def t(name, fn, x, want, exc=None):
global doc
doc = name
if exc is None:
got = fn(x)
if math.isnan(want) and math.isnan(got):
return
if want == inf and got == inf:
return
if want == -inf and got == -inf:
return
if fn == math.lgamma:
# we use a weaker accuracy test for lgamma;
# lgamma only achieves an absolute error of
# a few multiples of the machine accuracy, in
# general.
acc_check(doc, want, got, rel_err = 5e-15, abs_err = 5e-15)
elif fn == math.erfc:
# erfc has less-than-ideal accuracy for large
# arguments (x ~ 25 or so), mainly due to the
# error involved in computing exp(-x*x).
#
# XXX Would be better to weaken this test only
# for large x, instead of for all x.
ulps_check(doc, want, got, 2000)
else:
ulps_check(doc, want, got, 20)
else:
try:
got = fn(x)
except exc as e:
pass
else:
assert False, "%s not raised" % exc
#
# erf: error function --
#
t("erf0000", math.erf, 0.0, 0.0)
t("erf0001", math.erf, -0.0, -0.0)
t("erf0002", math.erf, inf, 1.0)
t("erf0003", math.erf, -inf, -1.0)
t("erf0004", math.erf, nan, nan)
# tiny values
t("erf0010", math.erf, 1e-308, 1.1283791670955125e-308)
t("erf0011", math.erf, 5e-324, 4.9406564584124654e-324)
t("erf0012", math.erf, 1e-10, 1.1283791670955126e-10)
# small integers
t("erf0020", math.erf, 1, 0.84270079294971489)
t("erf0021", math.erf, 2, 0.99532226501895271)
t("erf0022", math.erf, 3, 0.99997790950300136)
t("erf0023", math.erf, 4, 0.99999998458274209)
t("erf0024", math.erf, 5, 0.99999999999846256)
t("erf0025", math.erf, 6, 1.0)
t("erf0030", math.erf, -1, -0.84270079294971489)
t("erf0031", math.erf, -2, -0.99532226501895271)
t("erf0032", math.erf, -3, -0.99997790950300136)
t("erf0033", math.erf, -4, -0.99999998458274209)
t("erf0034", math.erf, -5, -0.99999999999846256)
t("erf0035", math.erf, -6, -1.0)
# huge values should all go to +/-1, depending on sign
t("erf0040", math.erf, -40, -1.0)
t("erf0041", math.erf, 1e16, 1.0)
t("erf0042", math.erf, -1e150, -1.0)
t("erf0043", math.erf, 1.7e308, 1.0)
# Issue 8986: inputs x with exp(-x*x) near the underflow threshold
# incorrectly signalled overflow on some platforms.
t("erf0100", math.erf, 26.2, 1.0)
t("erf0101", math.erf, 26.4, 1.0)
t("erf0102", math.erf, 26.6, 1.0)
t("erf0103", math.erf, 26.8, 1.0)
t("erf0104", math.erf, 27.0, 1.0)
t("erf0105", math.erf, 27.2, 1.0)
t("erf0106", math.erf, 27.4, 1.0)
t("erf0107", math.erf, 27.6, 1.0)
t("erf0110", math.erf, -26.2, -1.0)
t("erf0111", math.erf, -26.4, -1.0)
t("erf0112", math.erf, -26.6, -1.0)
t("erf0113", math.erf, -26.8, -1.0)
t("erf0114", math.erf, -27.0, -1.0)
t("erf0115", math.erf, -27.2, -1.0)
t("erf0116", math.erf, -27.4, -1.0)
t("erf0117", math.erf, -27.6, -1.0)
#
# erfc: complementary error function --
#
t("erfc0000", math.erfc, 0.0, 1.0)
t("erfc0001", math.erfc, -0.0, 1.0)
t("erfc0002", math.erfc, inf, 0.0)
t("erfc0003", math.erfc, -inf, 2.0)
t("erfc0004", math.erfc, nan, nan)
# tiny values
t("erfc0010", math.erfc, 1e-308, 1.0)
t("erfc0011", math.erfc, 5e-324, 1.0)
t("erfc0012", math.erfc, 1e-10, 0.99999999988716204)
# small integers
t("erfc0020", math.erfc, 1, 0.15729920705028513)
t("erfc0021", math.erfc, 2, 0.0046777349810472662)
t("erfc0022", math.erfc, 3, 2.2090496998585441e-05)
t("erfc0023", math.erfc, 4, 1.541725790028002e-08)
t("erfc0024", math.erfc, 5, 1.5374597944280349e-12)
t("erfc0025", math.erfc, 6, 2.1519736712498913e-17)
t("erfc0030", math.erfc, -1, 1.8427007929497148)
t("erfc0031", math.erfc, -2, 1.9953222650189528)
t("erfc0032", math.erfc, -3, 1.9999779095030015)
t("erfc0033", math.erfc, -4, 1.9999999845827421)
t("erfc0034", math.erfc, -5, 1.9999999999984626)
t("erfc0035", math.erfc, -6, 2.0)
# as x -> infinity, erfc(x) behaves like exp(-x*x)/x/sqrt(pi)
t("erfc0040", math.erfc, 20, 5.3958656116079012e-176)
t("erfc0041", math.erfc, 25, 8.3001725711965228e-274)
# FIXME(underflows to 0) t("erfc0042", math.erfc, 27, 5.2370464393526292e-319)
t("erfc0043", math.erfc, 28, 0.0)
# huge values
t("erfc0050", math.erfc, -40, 2.0)
t("erfc0051", math.erfc, 1e16, 0.0)
t("erfc0052", math.erfc, -1e150, 2.0)
t("erfc0053", math.erfc, 1.7e308, 0.0)
# Issue 8986: inputs x with exp(-x*x) near the underflow threshold
# incorrectly signalled overflow on some platforms.
t("erfc0100", math.erfc, 26.2, 1.6432507924389461e-300)
t("erfc0101", math.erfc, 26.4, 4.4017768588035426e-305)
t("erfc0102", math.erfc, 26.6, 1.0885125885442269e-309)
# FIXME(underflows to 0) t("erfc0103", math.erfc, 26.8, 2.4849621571966629e-314)
# FIXME(underflows to 0) t("erfc0104", math.erfc, 27.0, 5.2370464393526292e-319)
# FIXME(underflows to 0) t("erfc0105", math.erfc, 27.2, 9.8813129168249309e-324)
t("erfc0106", math.erfc, 27.4, 0.0)
t("erfc0107", math.erfc, 27.6, 0.0)
t("erfc0110", math.erfc, -26.2, 2.0)
t("erfc0111", math.erfc, -26.4, 2.0)
t("erfc0112", math.erfc, -26.6, 2.0)
t("erfc0113", math.erfc, -26.8, 2.0)
t("erfc0114", math.erfc, -27.0, 2.0)
t("erfc0115", math.erfc, -27.2, 2.0)
t("erfc0116", math.erfc, -27.4, 2.0)
t("erfc0117", math.erfc, -27.6, 2.0)
#
# lgamma: log of absolute value of the gamma function --
#
# special values
t("lgam0000", math.lgamma, 0.0, inf, ValueError)
t("lgam0001", math.lgamma, -0.0, inf, ValueError)
t("lgam0002", math.lgamma, inf, inf)
# FIXME(ValueError) t("lgam0003", math.lgamma, -inf, inf)
t("lgam0004", math.lgamma, nan, nan)
# negative integers
t("lgam0010", math.lgamma, -1, inf, ValueError)
t("lgam0011", math.lgamma, -2, inf, ValueError)
t("lgam0012", math.lgamma, -1e16, inf, ValueError)
t("lgam0013", math.lgamma, -1e300, inf, ValueError)
t("lgam0014", math.lgamma, -1.79e308, inf, ValueError)
# small positive integers give factorials
t("lgam0020", math.lgamma, 1, 0.0)
t("lgam0021", math.lgamma, 2, 0.0)
t("lgam0022", math.lgamma, 3, 0.69314718055994529)
t("lgam0023", math.lgamma, 4, 1.791759469228055)
t("lgam0024", math.lgamma, 5, 3.1780538303479458)
t("lgam0025", math.lgamma, 6, 4.7874917427820458)
# half integers
t("lgam0030", math.lgamma, 0.5, 0.57236494292470008)
t("lgam0031", math.lgamma, 1.5, -0.12078223763524522)
t("lgam0032", math.lgamma, 2.5, 0.28468287047291918)
t("lgam0033", math.lgamma, 3.5, 1.2009736023470743)
t("lgam0034", math.lgamma, -0.5, 1.2655121234846454)
t("lgam0035", math.lgamma, -1.5, 0.86004701537648098)
t("lgam0036", math.lgamma, -2.5, -0.056243716497674054)
t("lgam0037", math.lgamma, -3.5, -1.309006684993042)
# values near 0
t("lgam0040", math.lgamma, 0.1, 2.252712651734206)
t("lgam0041", math.lgamma, 0.01, 4.5994798780420219)
t("lgam0042", math.lgamma, 1e-8, 18.420680738180209)
t("lgam0043", math.lgamma, 1e-16, 36.841361487904734)
t("lgam0044", math.lgamma, 1e-30, 69.077552789821368)
t("lgam0045", math.lgamma, 1e-160, 368.41361487904732)
# FIXME(inaccurate) t("lgam0046", math.lgamma, 1e-308, 709.19620864216608)
# FIXME(inaccurate) t("lgam0047", math.lgamma, 5.6e-309, 709.77602713741896)
# FIXME(inaccurate) t("lgam0048", math.lgamma, 5.5e-309, 709.79404564292167)
# FIXME(inaccurate) t("lgam0049", math.lgamma, 1e-309, 711.49879373516012)
# FIXME(inaccurate) t("lgam0050", math.lgamma, 1e-323, 743.74692474082133)
# FIXME(inaccurate) t("lgam0051", math.lgamma, 5e-324, 744.44007192138122)
t("lgam0060", math.lgamma, -0.1, 2.3689613327287886)
t("lgam0061", math.lgamma, -0.01, 4.6110249927528013)
t("lgam0062", math.lgamma, -1e-8, 18.420680749724522)
t("lgam0063", math.lgamma, -1e-16, 36.841361487904734)
t("lgam0064", math.lgamma, -1e-30, 69.077552789821368)
t("lgam0065", math.lgamma, -1e-160, 368.41361487904732)
# FIXME(inaccurate) t("lgam0066", math.lgamma, -1e-308, 709.19620864216608)
# FIXME(inaccurate) t("lgam0067", math.lgamma, -5.6e-309, 709.77602713741896)
# FIXME(inaccurate) t("lgam0068", math.lgamma, -5.5e-309, 709.79404564292167)
# FIXME(inaccurate) t("lgam0069", math.lgamma, -1e-309, 711.49879373516012)
# FIXME(inaccurate) t("lgam0070", math.lgamma, -1e-323, 743.74692474082133)
# FIXME(inaccurate) t("lgam0071", math.lgamma, -5e-324, 744.44007192138122)
# values near negative integers
t("lgam0080", math.lgamma, -0.99999999999999989, 36.736800569677101)
t("lgam0081", math.lgamma, -1.0000000000000002, 36.043653389117154)
t("lgam0082", math.lgamma, -1.9999999999999998, 35.350506208557213)
t("lgam0083", math.lgamma, -2.0000000000000004, 34.657359027997266)
t("lgam0084", math.lgamma, -100.00000000000001, -331.85460524980607)
t("lgam0085", math.lgamma, -99.999999999999986, -331.85460524980596)
# large inputs
t("lgam0100", math.lgamma, 170, 701.43726380873704)
t("lgam0101", math.lgamma, 171, 706.57306224578736)
t("lgam0102", math.lgamma, 171.624, 709.78077443669895)
t("lgam0103", math.lgamma, 171.625, 709.78591682948365)
t("lgam0104", math.lgamma, 172, 711.71472580228999)
t("lgam0105", math.lgamma, 2000, 13198.923448054265)
t("lgam0106", math.lgamma, 2.55998332785163e305, 1.7976931348623099e+308)
t("lgam0107", math.lgamma, 2.55998332785164e305, inf, OverflowError)
t("lgam0108", math.lgamma, 1.7e308, inf, OverflowError)
# inputs for which gamma(x) is tiny
t("lgam0120", math.lgamma, -100.5, -364.90096830942736)
t("lgam0121", math.lgamma, -160.5, -656.88005261126432)
t("lgam0122", math.lgamma, -170.5, -707.99843314507882)
t("lgam0123", math.lgamma, -171.5, -713.14301641168481)
t("lgam0124", math.lgamma, -176.5, -738.95247590846486)
t("lgam0125", math.lgamma, -177.5, -744.13144651738037)
t("lgam0126", math.lgamma, -178.5, -749.3160351186001)
t("lgam0130", math.lgamma, -1000.5, -5914.4377011168517)
t("lgam0131", math.lgamma, -30000.5, -279278.6629959144)
# FIXME t("lgam0132", math.lgamma, -4503599627370495.5, -1.5782258434492883e+17)
# results close to 0: positive argument ...
t("lgam0150", math.lgamma, 0.99999999999999989, 6.4083812134800075e-17)
t("lgam0151", math.lgamma, 1.0000000000000002, -1.2816762426960008e-16)
t("lgam0152", math.lgamma, 1.9999999999999998, -9.3876980655431170e-17)
t("lgam0153", math.lgamma, 2.0000000000000004, 1.8775396131086244e-16)
# ... and negative argument
# these are very inaccurate in python3
t("lgam0160", math.lgamma, -2.7476826467, -5.2477408147689136e-11)
t("lgam0161", math.lgamma, -2.457024738, 3.3464637541912932e-10)
#
# gamma: Gamma function --
#
# special values
t("gam0000", math.gamma, 0.0, inf, ValueError)
t("gam0001", math.gamma, -0.0, -inf, ValueError)
t("gam0002", math.gamma, inf, inf)
t("gam0003", math.gamma, -inf, nan, ValueError)
t("gam0004", math.gamma, nan, nan)
# negative integers inputs are invalid
t("gam0010", math.gamma, -1, nan, ValueError)
t("gam0011", math.gamma, -2, nan, ValueError)
t("gam0012", math.gamma, -1e16, nan, ValueError)
t("gam0013", math.gamma, -1e300, nan, ValueError)
# small positive integers give factorials
t("gam0020", math.gamma, 1, 1)
t("gam0021", math.gamma, 2, 1)
t("gam0022", math.gamma, 3, 2)
t("gam0023", math.gamma, 4, 6)
t("gam0024", math.gamma, 5, 24)
t("gam0025", math.gamma, 6, 120)
# half integers
t("gam0030", math.gamma, 0.5, 1.7724538509055161)
t("gam0031", math.gamma, 1.5, 0.88622692545275805)
t("gam0032", math.gamma, 2.5, 1.3293403881791370)
t("gam0033", math.gamma, 3.5, 3.3233509704478426)
t("gam0034", math.gamma, -0.5, -3.5449077018110322)
t("gam0035", math.gamma, -1.5, 2.3632718012073548)
t("gam0036", math.gamma, -2.5, -0.94530872048294190)
t("gam0037", math.gamma, -3.5, 0.27008820585226911)
# values near 0
t("gam0040", math.gamma, 0.1, 9.5135076986687306)
t("gam0041", math.gamma, 0.01, 99.432585119150602)
t("gam0042", math.gamma, 1e-8, 99999999.422784343)
t("gam0043", math.gamma, 1e-16, 10000000000000000)
t("gam0044", math.gamma, 1e-30, 9.9999999999999988e+29)
t("gam0045", math.gamma, 1e-160, 1.0000000000000000e+160)
t("gam0046", math.gamma, 1e-308, 1.0000000000000000e+308)
t("gam0047", math.gamma, 5.6e-309, 1.7857142857142848e+308)
t("gam0048", math.gamma, 5.5e-309, inf, OverflowError)
t("gam0049", math.gamma, 1e-309, inf, OverflowError)
t("gam0050", math.gamma, 1e-323, inf, OverflowError)
t("gam0051", math.gamma, 5e-324, inf, OverflowError)
t("gam0060", math.gamma, -0.1, -10.686287021193193)
t("gam0061", math.gamma, -0.01, -100.58719796441078)
t("gam0062", math.gamma, -1e-8, -100000000.57721567)
t("gam0063", math.gamma, -1e-16, -10000000000000000)
t("gam0064", math.gamma, -1e-30, -9.9999999999999988e+29)
t("gam0065", math.gamma, -1e-160, -1.0000000000000000e+160)
t("gam0066", math.gamma, -1e-308, -1.0000000000000000e+308)
t("gam0067", math.gamma, -5.6e-309, -1.7857142857142848e+308)
t("gam0068", math.gamma, -5.5e-309, -inf, OverflowError)
t("gam0069", math.gamma, -1e-309, -inf, OverflowError)
t("gam0070", math.gamma, -1e-323, -inf, OverflowError)
t("gam0071", math.gamma, -5e-324, -inf, OverflowError)
# values near negative integers
t("gam0080", math.gamma, -0.99999999999999989, -9007199254740992.0)
t("gam0081", math.gamma, -1.0000000000000002, 4503599627370495.5)
t("gam0082", math.gamma, -1.9999999999999998, 2251799813685248.5)
t("gam0083", math.gamma, -2.0000000000000004, -1125899906842623.5)
t("gam0084", math.gamma, -100.00000000000001, -7.5400833348831090e-145)
t("gam0085", math.gamma, -99.999999999999986, 7.5400833348840962e-145)
# large inputs
t("gam0100", math.gamma, 170, 4.2690680090047051e+304)
t("gam0101", math.gamma, 171, 7.2574156153079990e+306)
# FIXME(overflows) t("gam0102", math.gamma, 171.624, 1.7942117599248104e+308)
t("gam0103", math.gamma, 171.625, inf, OverflowError)
t("gam0104", math.gamma, 172, inf, OverflowError)
t("gam0105", math.gamma, 2000, inf, OverflowError)
t("gam0106", math.gamma, 1.7e308, inf, OverflowError)
# inputs for which gamma(x) is tiny
t("gam0120", math.gamma, -100.5, -3.3536908198076787e-159)
t("gam0121", math.gamma, -160.5, -5.2555464470078293e-286)
t("gam0122", math.gamma, -170.5, -3.3127395215386074e-308)
# Reported as https://github.com/golang/go/issues/11441
# FIXME(overflows) t("gam0123", math.gamma, -171.5, 1.9316265431711902e-310)
# FIXME(overflows) t("gam0124", math.gamma, -176.5, -1.1956388629358166e-321)
# FIXME(overflows) t("gam0125", math.gamma, -177.5, 4.9406564584124654e-324)
# FIXME(overflows) t("gam0126", math.gamma, -178.5, -0.0)
# FIXME(overflows) t("gam0127", math.gamma, -179.5, 0.0)
# FIXME(overflows) t("gam0128", math.gamma, -201.0001, 0.0)
# FIXME(overflows) t("gam0129", math.gamma, -202.9999, -0.0)
# FIXME(overflows) t("gam0130", math.gamma, -1000.5, -0.0)
# FIXME(overflows) t("gam0131", math.gamma, -1000000000.3, -0.0)
# FIXME(overflows) t("gam0132", math.gamma, -4503599627370495.5, 0.0)
# inputs that cause problems for the standard reflection formula,
# thanks to loss of accuracy in 1-x
t("gam0140", math.gamma, -63.349078729022985, 4.1777971677761880e-88)
t("gam0141", math.gamma, -127.45117632943295, 1.1831110896236810e-214)
#
# log1p: log(1 + x), without precision loss for small x --
#
# special values
t("log1p0000", math.log1p, 0.0, 0.0)
t("log1p0001", math.log1p, -0.0, -0.0)
t("log1p0002", math.log1p, inf, inf)
t("log1p0003", math.log1p, -inf, nan, ValueError)
t("log1p0004", math.log1p, nan, nan)
# singularity at -1.0
t("log1p0010", math.log1p, -1.0, -inf, ValueError)
t("log1p0011", math.log1p, -0.9999999999999999, -36.736800569677101)
# finite values < 1.0 are invalid
t("log1p0020", math.log1p, -1.0000000000000002, nan, ValueError)
t("log1p0021", math.log1p, -1.1, nan, ValueError)
t("log1p0022", math.log1p, -2.0, nan, ValueError)
t("log1p0023", math.log1p, -1e300, nan, ValueError)
# tiny x: log1p(x) ~ x
t("log1p0110", math.log1p, 5e-324, 5e-324)
t("log1p0111", math.log1p, 1e-320, 1e-320)
t("log1p0112", math.log1p, 1e-300, 1e-300)
t("log1p0113", math.log1p, 1e-150, 1e-150)
t("log1p0114", math.log1p, 1e-20, 1e-20)
t("log1p0120", math.log1p, -5e-324, -5e-324)
t("log1p0121", math.log1p, -1e-320, -1e-320)
t("log1p0122", math.log1p, -1e-300, -1e-300)
t("log1p0123", math.log1p, -1e-150, -1e-150)
t("log1p0124", math.log1p, -1e-20, -1e-20)
# some (mostly) random small and moderate-sized values
t("log1p0200", math.log1p, -0.89156889782277482, -2.2216403106762863)
t("log1p0201", math.log1p, -0.23858496047770464, -0.27257668276980057)
t("log1p0202", math.log1p, -0.011641726191307515, -0.011710021654495657)
t("log1p0203", math.log1p, -0.0090126398571693817, -0.0090534993825007650)
t("log1p0204", math.log1p, -0.00023442805985712781, -0.00023445554240995693)
t("log1p0205", math.log1p, -1.5672870980936349e-5, -1.5672993801662046e-5)
t("log1p0206", math.log1p, -7.9650013274825295e-6, -7.9650330482740401e-6)
t("log1p0207", math.log1p, -2.5202948343227410e-7, -2.5202951519170971e-7)
t("log1p0208", math.log1p, -8.2446372820745855e-11, -8.2446372824144559e-11)
t("log1p0209", math.log1p, -8.1663670046490789e-12, -8.1663670046824230e-12)
t("log1p0210", math.log1p, 7.0351735084656292e-18, 7.0351735084656292e-18)
t("log1p0211", math.log1p, 5.2732161907375226e-12, 5.2732161907236188e-12)
t("log1p0212", math.log1p, 1.0000000000000000e-10, 9.9999999995000007e-11)
t("log1p0213", math.log1p, 2.1401273266000197e-9, 2.1401273243099470e-9)
t("log1p0214", math.log1p, 1.2668914653979560e-8, 1.2668914573728861e-8)
t("log1p0215", math.log1p, 1.6250007816299069e-6, 1.6249994613175672e-6)
t("log1p0216", math.log1p, 8.3740495645839399e-6, 8.3740145024266269e-6)
t("log1p0217", math.log1p, 3.0000000000000001e-5, 2.9999550008999799e-5)
t("log1p0218", math.log1p, 0.0070000000000000001, 0.0069756137364252423)
t("log1p0219", math.log1p, 0.013026235315053002, 0.012942123564008787)
t("log1p0220", math.log1p, 0.013497160797236184, 0.013406885521915038)
t("log1p0221", math.log1p, 0.027625599078135284, 0.027250897463483054)
t("log1p0222", math.log1p, 0.14179687245544870, 0.13260322540908789)
# large values
t("log1p0300", math.log1p, 1.7976931348623157e+308, 709.78271289338397)
t("log1p0301", math.log1p, 1.0000000000000001e+300, 690.77552789821368)
t("log1p0302", math.log1p, 1.0000000000000001e+70, 161.18095650958321)
t("log1p0303", math.log1p, 10000000000.000000, 23.025850930040455)
# other values transferred from testLog1p in test_math
t("log1p0400", math.log1p, -0.63212055882855767, -1.0000000000000000)
t("log1p0401", math.log1p, 1.7182818284590451, 1.0000000000000000)
t("log1p0402", math.log1p, 1.0000000000000000, 0.69314718055994529)
t("log1p0403", math.log1p, 1.2379400392853803e+27, 62.383246250395075)
#
# expm1: exp(x) - 1, without precision loss for small x --
#
# special values
t("expm10000", math.expm1, 0.0, 0.0)
t("expm10001", math.expm1, -0.0, -0.0)
t("expm10002", math.expm1, inf, inf)
t("expm10003", math.expm1, -inf, -1.0)
t("expm10004", math.expm1, nan, nan)
# expm1(x) ~ x for tiny x
t("expm10010", math.expm1, 5e-324, 5e-324)
t("expm10011", math.expm1, 1e-320, 1e-320)
t("expm10012", math.expm1, 1e-300, 1e-300)
t("expm10013", math.expm1, 1e-150, 1e-150)
t("expm10014", math.expm1, 1e-20, 1e-20)
t("expm10020", math.expm1, -5e-324, -5e-324)
t("expm10021", math.expm1, -1e-320, -1e-320)
t("expm10022", math.expm1, -1e-300, -1e-300)
t("expm10023", math.expm1, -1e-150, -1e-150)
t("expm10024", math.expm1, -1e-20, -1e-20)
# moderate sized values, where direct evaluation runs into trouble
t("expm10100", math.expm1, 1e-10, 1.0000000000500000e-10)
t("expm10101", math.expm1, -9.9999999999999995e-08, -9.9999995000000163e-8)
t("expm10102", math.expm1, 3.0000000000000001e-05, 3.0000450004500034e-5)
t("expm10103", math.expm1, -0.0070000000000000001, -0.0069755570667648951)
t("expm10104", math.expm1, -0.071499208740094633, -0.069002985744820250)
t("expm10105", math.expm1, -0.063296004180116799, -0.061334416373633009)
t("expm10106", math.expm1, 0.02390954035597756, 0.024197665143819942)
t("expm10107", math.expm1, 0.085637352649044901, 0.089411184580357767)
t("expm10108", math.expm1, 0.5966174947411006, 0.81596588596501485)
t("expm10109", math.expm1, 0.30247206212075139, 0.35319987035848677)
t("expm10110", math.expm1, 0.74574727375889516, 1.1080161116737459)
t("expm10111", math.expm1, 0.97767512926555711, 1.6582689207372185)
t("expm10112", math.expm1, 0.8450154566787712, 1.3280137976535897)
t("expm10113", math.expm1, -0.13979260323125264, -0.13046144381396060)
t("expm10114", math.expm1, -0.52899322039643271, -0.41080213643695923)
t("expm10115", math.expm1, -0.74083261478900631, -0.52328317124797097)
t("expm10116", math.expm1, -0.93847766984546055, -0.60877704724085946)
t("expm10117", math.expm1, 10.0, 22025.465794806718)
t("expm10118", math.expm1, 27.0, 532048240600.79865)
t("expm10119", math.expm1, 123, 2.6195173187490626e+53)
t("expm10120", math.expm1, -12.0, -0.99999385578764666)
t("expm10121", math.expm1, -35.100000000000001, -0.99999999999999944)
# extreme negative values
t("expm10201", math.expm1, -37.0, -0.99999999999999989)
t("expm10200", math.expm1, -38.0, -1.0)
# FIXME(overflows) t("expm10210", math.expm1, -710.0, -1.0)
# the formula expm1(x) = 2 * sinh(x/2) * exp(x/2) doesn't work so
# well when exp(x/2) is subnormal or underflows to zero; check we're
# not using it!
# Reported as https://github.com/golang/go/issues/11442
# FIXME(overflows) t("expm10211", math.expm1, -1420.0, -1.0)
# FIXME(overflows) t("expm10212", math.expm1, -1450.0, -1.0)
# FIXME(overflows) t("expm10213", math.expm1, -1500.0, -1.0)
# FIXME(overflows) t("expm10214", math.expm1, -1e50, -1.0)
# FIXME(overflows) t("expm10215", math.expm1, -1.79e308, -1.0)
# extreme positive values
# FIXME(fails on 32 bit) t("expm10300", math.expm1, 300, 1.9424263952412558e+130)
# FIXME(fails on 32 bit) t("expm10301", math.expm1, 700, 1.0142320547350045e+304)
# the next test (expm10302) is disabled because it causes failure on
# OS X 10.4/Intel: apparently all values over 709.78 produce an
# overflow on that platform. See issue #7575.
# expm10302 expm1 709.78271289328393 -> 1.7976931346824240e+308
t("expm10303", math.expm1, 709.78271289348402, inf, OverflowError)
t("expm10304", math.expm1, 1000, inf, OverflowError)
t("expm10305", math.expm1, 1e50, inf, OverflowError)
t("expm10306", math.expm1, 1.79e308, inf, OverflowError)
# weaker version of expm10302
# FIXME(fails on 32 bit) t("expm10307", math.expm1, 709.5, 1.3549863193146328e+308)
#
# log2: log to base 2 --
#
# special values
t("log20000", math.log2, 0.0, -inf, ValueError)
t("log20001", math.log2, -0.0, -inf, ValueError)
t("log20002", math.log2, inf, inf)
t("log20003", math.log2, -inf, nan, ValueError)
t("log20004", math.log2, nan, nan)
# exact value at 1.0
t("log20010", math.log2, 1.0, 0.0)
# negatives
t("log20020", math.log2, -5e-324, nan, ValueError)
t("log20021", math.log2, -1.0, nan, ValueError)
t("log20022", math.log2, -1.7e-308, nan, ValueError)
# exact values at powers of 2
t("log20100", math.log2, 2.0, 1.0)
t("log20101", math.log2, 4.0, 2.0)
t("log20102", math.log2, 8.0, 3.0)
t("log20103", math.log2, 16.0, 4.0)
t("log20104", math.log2, 32.0, 5.0)
t("log20105", math.log2, 64.0, 6.0)
t("log20106", math.log2, 128.0, 7.0)
t("log20107", math.log2, 256.0, 8.0)
t("log20108", math.log2, 512.0, 9.0)
t("log20109", math.log2, 1024.0, 10.0)
t("log20110", math.log2, 2048.0, 11.0)
t("log20200", math.log2, 0.5, -1.0)
t("log20201", math.log2, 0.25, -2.0)
t("log20202", math.log2, 0.125, -3.0)
t("log20203", math.log2, 0.0625, -4.0)
# values close to 1.0
# FIXME(inaccurate) t("log20300", math.log2, 1.0000000000000002, 3.2034265038149171e-16)
# FIXME(inaccurate) t("log20301", math.log2, 1.0000000001, 1.4426951601859516e-10)
# FIXME(inaccurate) t("log20302", math.log2, 1.00001, 1.4426878274712997e-5)
t("log20310", math.log2, 0.9999999999999999, -1.6017132519074588e-16)
t("log20311", math.log2, 0.9999999999, -1.4426951603302210e-10)
t("log20312", math.log2, 0.99999, -1.4427022544056922e-5)
# tiny values
t("log20400", math.log2, 5e-324, -1074.0)
t("log20401", math.log2, 1e-323, -1073.0)
t("log20402", math.log2, 1.5e-323, -1072.4150374992789)
t("log20403", math.log2, 2e-323, -1072.0)
t("log20410", math.log2, 1e-308, -1023.1538532253076)
t("log20411", math.log2, 2.2250738585072014e-308, -1022.0)
t("log20412", math.log2, 4.4501477170144028e-308, -1021.0)
t("log20413", math.log2, 1e-307, -1019.8319251304202)
# huge values
t("log20500", math.log2, 1.7976931348623157e+308, 1024.0)
t("log20501", math.log2, 1.7e+308, 1023.9193879716706)
t("log20502", math.log2, 8.9884656743115795e+307, 1023.0)
# selection of random values
t("log20600", math.log2, -7.2174324841039838e+289, nan, ValueError)
t("log20601", math.log2, -2.861319734089617e+265, nan, ValueError)
t("log20602", math.log2, -4.3507646894008962e+257, nan, ValueError)
t("log20603", math.log2, -6.6717265307520224e+234, nan, ValueError)
t("log20604", math.log2, -3.9118023786619294e+229, nan, ValueError)
t("log20605", math.log2, -1.5478221302505161e+206, nan, ValueError)
t("log20606", math.log2, -1.4380485131364602e+200, nan, ValueError)
t("log20607", math.log2, -3.7235198730382645e+185, nan, ValueError)
t("log20608", math.log2, -1.0472242235095724e+184, nan, ValueError)
t("log20609", math.log2, -5.0141781956163884e+160, nan, ValueError)
t("log20610", math.log2, -2.1157958031160324e+124, nan, ValueError)
t("log20611", math.log2, -7.9677558612567718e+90, nan, ValueError)
t("log20612", math.log2, -5.5553906194063732e+45, nan, ValueError)
t("log20613", math.log2, -16573900952607.953, nan, ValueError)
t("log20614", math.log2, -37198371019.888618, nan, ValueError)
t("log20615", math.log2, -6.0727115121422674e-32, nan, ValueError)
t("log20616", math.log2, -2.5406841656526057e-38, nan, ValueError)
t("log20617", math.log2, -4.9056766703267657e-43, nan, ValueError)
t("log20618", math.log2, -2.1646786075228305e-71, nan, ValueError)
t("log20619", math.log2, -2.470826790488573e-78, nan, ValueError)
t("log20620", math.log2, -3.8661709303489064e-165, nan, ValueError)
t("log20621", math.log2, -1.0516496976649986e-182, nan, ValueError)
t("log20622", math.log2, -1.5935458614317996e-255, nan, ValueError)
t("log20623", math.log2, -2.8750977267336654e-293, nan, ValueError)
t("log20624", math.log2, -7.6079466794732585e-296, nan, ValueError)
t("log20625", math.log2, 3.2073253539988545e-307, -1018.1505544209213)
t("log20626", math.log2, 1.674937885472249e-244, -809.80634755783126)
t("log20627", math.log2, 1.0911259044931283e-214, -710.76679472274213)
t("log20628", math.log2, 2.0275372624809709e-154, -510.55719818383272)
t("log20629", math.log2, 7.3926087369631841e-115, -379.13564735312292)
t("log20630", math.log2, 1.3480198206342423e-86, -285.25497445094436)
t("log20631", math.log2, 8.9927384655719947e-83, -272.55127136401637)
t("log20632", math.log2, 3.1452398713597487e-60, -197.66251564496875)
t("log20633", math.log2, 7.0706573215457351e-55, -179.88420087782217)
t("log20634", math.log2, 3.1258285390731669e-49, -161.13023800505653)
t("log20635", math.log2, 8.2253046627829942e-41, -133.15898277355879)
t("log20636", math.log2, 7.8691367397519897e+49, 165.75068202732419)
t("log20637", math.log2, 2.9920561983925013e+64, 214.18453534573757)
t("log20638", math.log2, 4.7827254553946841e+77, 258.04629628445673)
t("log20639", math.log2, 3.1903566496481868e+105, 350.47616767491166)
t("log20640", math.log2, 5.6195082449502419e+113, 377.86831861008250)
t("log20641", math.log2, 9.9625658250651047e+125, 418.55752921228753)
t("log20642", math.log2, 2.7358945220961532e+145, 483.13158636923413)
t("log20643", math.log2, 2.785842387926931e+174, 579.49360214860280)
t("log20644", math.log2, 2.4169172507252751e+193, 642.40529039289652)
t("log20645", math.log2, 3.1689091206395632e+205, 682.65924573798395)
t("log20646", math.log2, 2.535995592365391e+208, 692.30359597460460)
t("log20647", math.log2, 6.2011236566089916e+233, 776.64177576730913)
t("log20648", math.log2, 2.1843274820677632e+253, 841.57499717289647)
t("log20649", math.log2, 8.7493931063474791e+297, 989.74182713073981)
doc="finished"
| 43.034965 | 125 | 0.70819 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 11,887 | 0.386318 |
ff6a1f66860f729064e1d9b6a13de4fb71f5b15a | 3,535 | py | Python | db_query/middleware/complex_bundles_parse.py | hbolzan/django-sql-to-rest | 008f2d726fc7b758e45d24eb2e32ae605f194947 | [
"BSD-2-Clause"
] | 1 | 2019-03-06T19:46:18.000Z | 2019-03-06T19:46:18.000Z | db_query/middleware/complex_bundles_parse.py | hbolzan/django-sql-to-rest | 008f2d726fc7b758e45d24eb2e32ae605f194947 | [
"BSD-2-Clause"
] | 2 | 2020-06-05T19:41:06.000Z | 2021-06-10T21:05:53.000Z | db_query/middleware/complex_bundles_parse.py | hbolzan/django-sql-to-rest | 008f2d726fc7b758e45d24eb2e32ae605f194947 | [
"BSD-2-Clause"
] | null | null | null | import requests
import functools
from django.shortcuts import get_object_or_404
from db_query.models import PersistentQuery
def apply_middleware(raw_data, exec_sql_fn, *args, **kw_args):
return adapt_bundle(raw_data[0], exec_sql_fn)
def adapt_bundle(raw_data, exec_sql_fn):
return {
"id": raw_data.get("id"),
"form-title": raw_data.get("titulo_do_form"),
"bundled-tables": adapt_bundled_tables(raw_data.get("bundled-tables"), exec_sql_fn),
}
def adapt_bundled_tables(bundled_tables, exec_sql_fn):
return [adapt_bundled_table(table, exec_sql_fn) for table in bundled_tables]
def adapt_bundled_table(table, exec_sql_fn):
raw_params = params_to_dict(table.get("parametros").split("\n"))
return {
"tab-title": raw_params.get("TITULO_ABA"),
"master": raw_params.get("PAINEL_MASTER") == "S",
"complex-id": raw_params.get("COMPLEXA_ID"),
"detail": raw_params.get("DETAIL") == "S",
"related-fields": [c.strip() for c in raw_params.get("COLUNAS_DETAIL", "").split(",")],
"master-fields": [c.strip() for c in raw_params.get("COLUNAS_MASTER", "").split(",")],
"bundle-actions": parse_bundle_actions(raw_params.get("BUNDLE_ACTIONS", "")),
"definition": fix_none_results(get_child_definition(raw_params.get("COMPLEXA_ID"))),
}
def parse_bundle_actions(raw_actions):
try:
return [parse_bundle_action(raw_action) for raw_action in raw_actions.split("~")]
except (IndexError, AttributeError):
return []
def parse_bundle_action(raw_action):
"""
Parse action attributes such as
caption:Recalcular;type:primary;action:reglass_cotacoes.recalcular;enabled-states:@view,pending
"""
def parse_array_attr(attr):
print(attr)
if attr[0] != "@":
return attr
return attr[1:].split(",")
def parse_attr(attrs, attr):
attr_parts = attr.split(":")
return dict(attrs, **{attr_parts[0]: parse_array_attr(attr_parts[1])})
return functools.reduce(
parse_attr,
[action_attrs for action_attrs in raw_action.split(";")],
{}
)
def get_complex_definition(raw_params):
if raw_params.get("DETAIL") != "S":
return None
complex_id = raw_params.get("COMPLEXA_ID")
related_fields = [c.strip() for c in raw_params.get("COLUNAS_DETAIL", "").split(",")]
master_fields = [c.strip() for c in raw_params.get("COLUNAS_MASTER", "").split(",")]
# TODO: do this in a better way
HOST = "http://localhost:8000"
PATH = "/api/query/persistent/complex-tables/?id={id}&middleware=complex_forms&depth=1"
BASE_URL = HOST + PATH
def get_child_definition(complex_id):
r = requests.get(BASE_URL.format(id=complex_id))
if r.status_code == 200:
return r.json().get("data")
return {}
def fix_none_results(data):
if isinstance(data, dict):
return {k: fix_none_results(v) for k, v in data.items()}
if isinstance(data, list):
return [fix_none_results(v) for v in data]
if data == "None":
return None
return data
def params_to_dict(params):
def split_param(param):
p = param.split("=")
if len(p) < 2:
return [param, None]
return p[0], p[1].strip()
return {p[0]: p[1] for p in map(split_param, params)}
# "COMPLEXA_ID": None,
# "TITULO_ABA": "tab-title",
# "PAINEL_MASTER": "master",
# "DETAIL": None,
# "COLUNAS_DETAIL": "related-columns",
# "COLUNAS_MASTER": "master-columns",
# "DETAIL_PAGINA_MASTER": ,
| 30.73913 | 99 | 0.657709 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 883 | 0.249788 |
ff6d9a9eb43a919d390d165fa0fb6535151149e1 | 5,160 | py | Python | template/template/scripts/launcher.py | chrismear/renios | 8eed53054fb4361eecb09e3c0eb3e26fd76d0cf0 | [
"CC-BY-3.0"
] | 15 | 2015-02-01T12:31:41.000Z | 2021-08-07T01:17:10.000Z | template/template/scripts/launcher.py | chrismear/renios | 8eed53054fb4361eecb09e3c0eb3e26fd76d0cf0 | [
"CC-BY-3.0"
] | null | null | null | template/template/scripts/launcher.py | chrismear/renios | 8eed53054fb4361eecb09e3c0eb3e26fd76d0cf0 | [
"CC-BY-3.0"
] | 4 | 2015-01-23T00:00:14.000Z | 2019-12-02T15:15:28.000Z | #!/usr/bin/env python
#@PydevCodeAnalysisIgnore
# Copyright 2004-2012 Tom Rothamel <pytom@bishoujo.us>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
import sys
import warnings
# Functions to be customized by distributors. ################################
# Given the Ren'Py base directory (usually the directory containing
# this file), this is expected to return the path to the common directory.
def path_to_common(renpy_base):
return renpy_base + "/common"
# Given a directory holding a Ren'Py game, this is expected to return
# the path to a directory that will hold save files.
def path_to_saves(gamedir):
import renpy #@UnresolvedImport
if not renpy.config.save_directory:
return gamedir + "/saves"
# Search the path above Ren'Py for a directory named "Ren'Py Data".
# If it exists, then use that for our save directory.
path = renpy.config.renpy_base
while True:
if os.path.isdir(path + "/Ren'Py Data"):
return path + "/Ren'Py Data/" + renpy.config.save_directory
newpath = os.path.dirname(path)
if path == newpath:
break
path = newpath
# Otherwise, put the saves in a platform-specific location.
if renpy.android:
return gamedir + "/saves"
elif renpy.macintosh:
rv = "~/Library/RenPy/" + renpy.config.save_directory
return os.path.expanduser(rv)
elif renpy.windows:
if 'APPDATA' in os.environ:
return os.environ['APPDATA'] + "/RenPy/" + renpy.config.save_directory
else:
rv = "~/RenPy/" + renpy.config.save_directory
return os.path.expanduser(rv)
else:
rv = "~/.renpy/" + renpy.config.save_directory
return os.path.expanduser(rv)
# Returns the path to the Ren'Py base directory (containing common and
# the launcher, usually.)
def path_to_renpy_base():
renpy_base = os.path.dirname(sys.argv[0])
renpy_base = os.environ.get('RENPY_BASE', renpy_base)
renpy_base = os.path.abspath(renpy_base)
return renpy_base
##############################################################################
# The version of the Mac Launcher and py4renpy that we require.
macos_version = (6, 14, 0)
linux_version = (6, 14, 0)
# Doing the version check this way also doubles as an import of ast,
# which helps py2exe et al.
try:
import ast; ast
except:
raise
print "Ren'Py requires at least python 2.6."
sys.exit(0)
android = ("ANDROID_PRIVATE" in os.environ)
# Android requires us to add code to the main module, and to command some
# renderers.
if android:
__main__ = sys.modules["__main__"]
__main__.path_to_renpy_base = path_to_renpy_base
__main__.path_to_common = path_to_common
__main__.path_to_saves = path_to_saves
os.environ["RENPY_RENDERER"] = "gl"
os.environ["RENPY_GL_ENVIRON"] = "limited"
#print "Ren'iOS: forcing renderer settings"
#os.environ["RENPY_RENDERER"] = "gl"
#os.environ["RENPY_GL_ENVIRON"] = "shader_es"
def main():
renpy_base = path_to_renpy_base()
# Add paths.
if os.path.exists(renpy_base + "/module"):
sys.path.append(renpy_base + "/module")
sys.path.append(renpy_base)
# This is looked for by the mac launcher.
if os.path.exists(renpy_base + "/renpy.zip"):
sys.path.append(renpy_base + "/renpy.zip")
# Ignore warnings that happen.
warnings.simplefilter("ignore", DeprecationWarning)
# Start Ren'Py proper.
try:
import renpy.bootstrap
except ImportError:
print >>sys.stderr, "Could not import renpy.bootstrap. Please ensure you decompressed Ren'Py"
print >>sys.stderr, "correctly, preserving the directory structure."
raise
if android:
renpy.linux = False
renpy.android = True
renpy.bootstrap.bootstrap(renpy_base)
#import profile
#profile.run('main()')
#print "Test STDOUT"
#
#import trace
#tracer = trace.Trace(
# ignoredirs=[sys.prefix, sys.exec_prefix],
# trace=1)
#tracer.run('main()')
if __name__ == "__main__":
main()
| 31.463415 | 101 | 0.673256 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,954 | 0.572481 |
ff6fcc0a58ed321ca99604158b2cf643d6bc1d63 | 1,462 | py | Python | do_aramco_sz.py | Meso272/PyTorch-VAE | b1f80082a92c706969a63162ae083b9f7d15d9aa | [
"Apache-2.0"
] | null | null | null | do_aramco_sz.py | Meso272/PyTorch-VAE | b1f80082a92c706969a63162ae083b9f7d15d9aa | [
"Apache-2.0"
] | null | null | null | do_aramco_sz.py | Meso272/PyTorch-VAE | b1f80082a92c706969a63162ae083b9f7d15d9aa | [
"Apache-2.0"
] | 1 | 2022-02-11T23:22:41.000Z | 2022-02-11T23:22:41.000Z | import os
import numpy as np
datafolder="/home/jliu447/lossycompression/aramco"
ebs=[i*1e-4 for i in range(1,10)]+[i*1e-3 for i in range(1,10)]+[i*1e-2 for i in range(1,11)]
cr=np.zeros((29,51),dtype=np.float32)
psnr=np.zeros((29,51),dtype=np.float32)
filelist=os.listdir(datafolder)
filelist=[x for x in filelist if x.split('.')[-1]=='f32']
filelist=sorted(filelist)
for i,eb in enumerate(ebs):
cr[i+1][0]=eb
psnr[i+1][0]=eb
for j in range(1510,1601,10):
y_index=(j-1510)//10+1
if j==1600:
j=1599
cr[0][y_index]=j
psnr[0][y_index]=j
filename="aramco-snapshot-%s.f32" % str(j)
filepath=os.path.join(datafolder,filename)
comm="sz -z -f -i %s -M REL -R %f -3 235 449 449" % (filepath,eb)
os.system(comm)
szpath=filepath+".sz"
comm="sz -x -f -i %s -s %s -3 235 449 449 -a>temp.txt" % (filepath,szpath)
os.system(comm)
with open("temp.txt","r") as f:
lines=f.read().splitlines()
p=eval(lines[4].split(',')[0].split('=')[1])
r=eval(lines[7].split('=')[1])
cr[i+1][y_index]=r
psnr[i+1][y_index]=p
comm="rm -f %s" % szpath
os.system(comm)
comm="rm -f %s" % szpath+".out"
os.system(comm)
os.system("rm -f temp.txt")
np.savetxt("sz_aramco_cr.txt",cr,delimiter='\t')
np.savetxt("sz_aramco_psnr.txt",psnr,delimiter='\t') | 31.782609 | 93 | 0.55472 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 279 | 0.190834 |
ff6fd947f9d7fe716c3704fa2463950dd4e84bb7 | 402 | py | Python | setup.py | leandrocorreasantos/rapidapi_criptobot_br | 4102fed414708dae101c1d809c2723f7e27f272a | [
"MIT"
] | null | null | null | setup.py | leandrocorreasantos/rapidapi_criptobot_br | 4102fed414708dae101c1d809c2723f7e27f272a | [
"MIT"
] | null | null | null | setup.py | leandrocorreasantos/rapidapi_criptobot_br | 4102fed414708dae101c1d809c2723f7e27f272a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from setuptools import setup
setup(
name='rapidapi-criptobot-br',
version='1.1.0',
packages=['api'],
description='class to access criptobot-br on rapidapi',
author='Leandro Correa dos Santos',
author_email='leandro.admo@gmail.com',
install_requires=['requests', 'json'],
keywords='cripto currency robot rapidapi api exchange technical analysis'
)
| 28.714286 | 77 | 0.706468 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 229 | 0.569652 |
ff6fe730533e41ad87f3efe9f8ca1a45fdd6aee6 | 4,521 | py | Python | ch10_rnn/7_3_BiRNN.py | pythonProjectLearn/TensorflowLearning | 7a72ebea060ce0a0db9a00994e4725ec5d84c10a | [
"MIT"
] | null | null | null | ch10_rnn/7_3_BiRNN.py | pythonProjectLearn/TensorflowLearning | 7a72ebea060ce0a0db9a00994e4725ec5d84c10a | [
"MIT"
] | null | null | null | ch10_rnn/7_3_BiRNN.py | pythonProjectLearn/TensorflowLearning | 7a72ebea060ce0a0db9a00994e4725ec5d84c10a | [
"MIT"
] | null | null | null | #%%
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
import numpy as np
# Import MINST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
# Parameters
learning_rate = 0.01
max_samples = 400000
batch_size = 128
display_step = 10
# Network Parameters
n_input = 28 # MNIST data input (img shape: 28*28)
n_steps = 28 # timesteps
n_hidden = 256 # hidden layer num of features
n_classes = 10 # MNIST total classes (0-9 digits)
# tf Graph input
x = tf.placeholder("float", [None, n_steps, n_input])
y = tf.placeholder("float", [None, n_classes])
# Define weights
weights = {
# Hidden layer weights => 2*n_hidden because of foward + backward cells
'out': tf.Variable(tf.random_normal([2*n_hidden, n_classes]))
}
biases = {
'out': tf.Variable(tf.random_normal([n_classes]))
}
def BiRNN(x, weights, biases):
# Prepare data shape to match `bidirectional_rnn` function requirements
# Current data input shape: (batch_size, n_steps, n_input)
# Required shape: 'n_steps' tensors list of shape (batch_size, n_input)
# Permuting batch_size and n_steps
x = tf.transpose(x, [1, 0, 2])
# Reshape to (n_steps*batch_size, n_input)
x = tf.reshape(x, [-1, n_input])
# Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)
x = tf.split(x, n_steps)
# Define lstm cells with tensorflow
# Forward direction cell
lstm_fw_cell = tf.contrib.rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
# Backward direction cell
lstm_bw_cell = tf.contrib.rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
# Get lstm cell output
# try:
outputs, _, _ = tf.contrib.rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
dtype=tf.float32)
# except Exception: # Old TensorFlow version only returns outputs not states
# outputs = rnn.bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
# dtype=tf.float32)
# Linear activation, using rnn inner loop last output
return tf.matmul(outputs[-1], weights['out']) + biases['out']
pred = BiRNN(x, weights, biases)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Evaluate Model
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initializing the variables
init = tf.global_variables_initializer()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
step = 1
# Keep training until reach max iterations
while step * batch_size < max_samples:
batch_x, batch_y = mnist.train.next_batch(batch_size)
# Reshape data to get 28 seq of 28 elements
batch_x = batch_x.reshape((batch_size, n_steps, n_input))
# Run optimization op (backprop)
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
if step % display_step == 0:
# Calculate batch accuracy
acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y})
# Calculate batch loss
loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y})
print("Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
"{:.6f}".format(loss) + ", Training Accuracy= " + \
"{:.5f}".format(acc))
step += 1
print("Optimization Finished!")
# Calculate accuracy for 128 mnist test images
test_len = 10000
test_data = mnist.test.images[:test_len].reshape((-1, n_steps, n_input))
test_label = mnist.test.labels[:test_len]
print("Testing Accuracy:", \
sess.run(accuracy, feed_dict={x: test_data, y: test_label}))
| 35.598425 | 90 | 0.668436 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,124 | 0.469808 |
ff7195eb6d1c0726820fcae2c2519958295ff036 | 551 | py | Python | server.py | YuliyaSinkevich/idealtrust | ba7a8dc5bfba9ac189318defce0d75335ccb2ba8 | [
"BSD-3-Clause"
] | null | null | null | server.py | YuliyaSinkevich/idealtrust | ba7a8dc5bfba9ac189318defce0d75335ccb2ba8 | [
"BSD-3-Clause"
] | null | null | null | server.py | YuliyaSinkevich/idealtrust | ba7a8dc5bfba9ac189318defce0d75335ccb2ba8 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
from app import app
import argparse
HOST = '127.0.0.1'
PORT = 8080
PROJECT_NAME = 'idealtrust'
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog=PROJECT_NAME, usage='%(prog)s [options]')
parser.add_argument('--port', help='port (default: {0})'.format(PORT), default=PORT)
parser.add_argument('--host', help='host (default: {0})'.format(HOST), default=HOST)
argv = parser.parse_args()
app.run(host=argv.host, port=argv.port, debug=True, use_reloader=False) # debug=True, use_reloader=False
| 34.4375 | 109 | 0.69873 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 165 | 0.299456 |
ff759b9f072d1f9296066ec627c9c855987dabdc | 29 | py | Python | security/open_alchemy/__init__.py | open-alchemy/OpenAlchemyPackage | 8bf0ed62ed7f6c5015f1bf1c4658dc353395fe9b | [
"Apache-2.0"
] | null | null | null | security/open_alchemy/__init__.py | open-alchemy/OpenAlchemyPackage | 8bf0ed62ed7f6c5015f1bf1c4658dc353395fe9b | [
"Apache-2.0"
] | 79 | 2020-11-28T04:02:25.000Z | 2021-01-06T08:52:30.000Z | security/open_alchemy/__init__.py | open-alchemy/Package | 8bf0ed62ed7f6c5015f1bf1c4658dc353395fe9b | [
"Apache-2.0"
] | null | null | null | """Namespace placeholder."""
| 14.5 | 28 | 0.689655 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.965517 |
ff768287172a736a44947e427a9380a3153897d7 | 3,133 | py | Python | source/StartWindow.py | FelixTheoret/Ergocycle | 0656e6cb43b535dd2f845420ad71742b1b85e412 | [
"MIT"
] | null | null | null | source/StartWindow.py | FelixTheoret/Ergocycle | 0656e6cb43b535dd2f845420ad71742b1b85e412 | [
"MIT"
] | null | null | null | source/StartWindow.py | FelixTheoret/Ergocycle | 0656e6cb43b535dd2f845420ad71742b1b85e412 | [
"MIT"
] | 2 | 2022-03-17T23:17:56.000Z | 2022-03-23T01:08:58.000Z | """
Created on Wed March 24 11:15:00 2022
@author: Frédérique Leclerc
"""
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import *
from PyQt5.QtGui import QFont, QPixmap
import numpy
import datetime
#from Ergocycle.source.TestingWindow import TestingWindow
from TestingWindow import TestingWindow
from MainWindowStim import MainWindowStim
from PIL import Image
from numpy import *
SCREEN_WIDTH = 1920
SCREEN_HEIGTH = 1080
class StartWindow(QWidget):
def __init__(self):
super(StartWindow, self).__init__()
self.setGeometry(0, 30, SCREEN_WIDTH, SCREEN_HEIGTH)
### 1.1. Initialisation de la fenêtre ###
self.setWindowTitle("Interface usager des stimulations électriques fonctionnelles")
self.setStyleSheet("background-color: white;")
self.button_dictionary = {}
self.initUI()
def initUI(self):
### 1.2. Mettre le logo du laboratoire dans le coin gauche de la fenêtre ###
self.imageS2M = Image.open("img_S2M_JPG.jpg")
self.petite_imageS2M = self.imageS2M.resize((200, 150))
self.petite_imageS2M.save('image_400.jpg')
self.logo_label = QtWidgets.QLabel(self)
self.logo_jpg = QPixmap('image_400.jpg') # Modifier la taille de l'image au besoin
self.logo_label.setPixmap(self.logo_jpg)
self.logo_label.resize(self.logo_jpg.width(), self.logo_jpg.height())
### 1.3. Titre menu des instructions ###
self.title_label = QtWidgets.QLabel(self)
self.title_label.setText("Bienvenue dans l'interface usager des stimulations électriques fonctionnelles")
self.title_label.move(400,75)
self.title_label.setFont(QFont('Arial', 20, weight = QFont.Bold))
self.title_label.adjustSize()
self.question_label = QtWidgets.QLabel(self)
self.question_label.setText("Désirez-vous débuter un entraînement en stimulation ou effectuer des tests?")
self.question_label.move(500,300)
self.question_label.setFont(QFont('Arial', 16))
self.question_label.adjustSize()
### 1.4. Bouton pour débuter l'entraînement ###
self.training_button = QtWidgets.QPushButton(self)
self.training_button.setText(" Débuter un entraînement ")
self.training_button.setStyleSheet("background-color: palegreen; border: 1 solid;")
self.training_button.move(150, 600)
self.training_button.setFont(QFont('Arial', 30, weight = QFont.Bold))
self.training_button.adjustSize()
### 1.5. Bouton pour faire des tests ###
self.test_button = QtWidgets.QPushButton(self)
self.test_button.setText(" Effectuer des tests ")
self.test_button.setStyleSheet("background-color: palegreen; border: 1 solid;")
self.test_button.move(1150, 600)
self.test_button.setFont(QFont('Arial', 30, weight = QFont.Bold))
self.test_button.adjustSize()
def get_test_parameters(self, stim_parameters):
stim_parameters = numpy.array([0,30,200])
#print(self.test_parameters)
# return(self.test_parameters) | 43.513889 | 114 | 0.685605 | 2,717 | 0.863636 | 0 | 0 | 0 | 0 | 0 | 0 | 956 | 0.303878 |
ff76c4694c2928b9db2d2f5242ae179057fe0c52 | 7,318 | py | Python | LinkShopSite/models/fileService.py | mikiec84/linkshop | 72959ceca0003be226edeca6496f915502831596 | [
"Apache-2.0"
] | 6 | 2017-07-18T15:28:33.000Z | 2020-03-03T14:45:45.000Z | LinkShopSite/models/fileService.py | mikiec84/linkshop | 72959ceca0003be226edeca6496f915502831596 | [
"Apache-2.0"
] | null | null | null | LinkShopSite/models/fileService.py | mikiec84/linkshop | 72959ceca0003be226edeca6496f915502831596 | [
"Apache-2.0"
] | 3 | 2017-09-09T00:36:48.000Z | 2020-03-03T14:45:49.000Z | import json
import os
import pymongo
'''
fileService.py
Author: Jeffrey Bigg
'''
mongo_client = pymongo.MongoClient()
#db = {}
'''
initialize
Takes a 'unique_id'entifier and sets up a database in MongoDB
and ensures that that database has collections associated with
the various file types that are stored.
'''
def initialize(unique_id='default'):
#global db
#if unique_id is None:
# unique_id = 'default';
db = mongo_client[unique_id]
if(not 'ontology' in db.collection_names()):
db.create_collection('ontology')
if(not 'abstraction' in db.collection_names()):
db.create_collection('abstraction')
if(not 'commands' in db.collection_names()):
db.create_collection('commands')
if(not 'linkograph' in db.collection_names()):
db.create_collection('linkograph')
return db
'''
FileNotFound
Custom exception class for reporting a file not found exception.
Value should be the name of the file as a string.
'''
class FileNotFound(Exception):
def __init__(self, value):
self.value=value
def __str__(self):
return "File "+self.value+" not found!"
'''
FileTypeNotFound
Custom exception class for reporting a file type not found.
Value should be the name of the file type as a string.
'''
class FileTypeNotFound(Exception):
def __init__(self,value):
self.value=value
def __str__(self):
return "File type "+self.value+" not found!"
'''
FileTypeMismatch
Custom exception class for reporting a conflict in a type given
by the user and a type found by the type detection system.
Given and found should both be the file types as strings.
'''
class FileTypeMismatch(Exception):
def __init__(self,given,found):
self.given = given
self.found = found
def __str__(self):
return "Given "+self.given+", but found "+self.found
'''
loadFile
Looks for a fileName of fileType. Both arguments are strings.
Upon success returns the file, throws exceptions when either
the type or name is not found.
'''
def loadFile(fileName,fileType,unique_id='default'):
db=initialize(unique_id)
if (not fileType in db.collection_names()):
raise FileTypeNotFound(fileType)
if (None==db[fileType].find_one({'name':fileName})):
raise FileNotFound(fileName)
result = db[fileType].find_one({'name':fileName})
result.pop('_id',None) #Removes the MongoDB object id
return result #What will usually be returned is a dictionary
#of the type {'name':[...],'content':[....]}.
#The caller will be responsible for handling
#this format.
'''
fileList
Returns a list of all of the file names for files of
type fileType. Argument is a string. Throws error when
fileType is not found.
'''
def fileList(fileType,unique_id='default'):
db=initialize(unique_id)
if (not fileType in db.collection_names()):
raise FileTypeNotFound(fileType)
results = []
for record in db[fileType].find():
if 'name' in record:
results.append(record['name'])
return results
'''
saveLinko
Helper function for saving a linkograph.
All arguments are strings. Throws an error if the commandsName
file cannot be found.
'''
def saveLinko(fileName,fileContent,commandsName,unique_id):
try:
db=initialize(unique_id)
loadFile(commandsName,'commands',unique_id)
toSave = {}
toSave['content'] = fileContent
toSave['commands'] = commandsName
toSave['name']=fileName
db['linkograph'].insert_one(toSave)
return "File " +fileName + " is saved as type linkograph"
except:
raise FileNotFound(fileName)
'''
saveFile
Takes a file and the content stored in it and saves it in the file store.
If the fileType is unknown or there is a mismatch, and exception is thrown.
If fileType isn't given, the system will try and detect the file type.
Stores it in the mongo database in the format of {'name':fileName,'content':
fileContent}, except in the case of a linkograph, at which point it the commandsName is stored along with it with a key of 'commands'.
'''
def saveFile(fileName,fileContent,fileType=None,commandsName=None,unique_id='default'):
db=initialize(unique_id)
if fileType==None:
fileType=detectFiletype(fileContent)
else:
if not fileType == detectFiletype(fileContent):
raise FileTypeMismatch(fileType,detectFiletype(fileContent))
if fileType == "Unknown file":
raise FileTypeNotFound(fileType)
if fileType == "linkograph":
if commandsName==None:
raise FileNotFound("commands file")
return saveLinko(fileName,fileContent,commandsName,unique_id)
if fileType in db.collection_names():
if not None==db[fileType].find_one({'name':fileName}):
if fileContent==db[fileType].find_one({'name':fileName})['content']:
return "We already have "+fileName
else:
fileName=fileName+"new"
return saveFile(fileName,fileContent,fileType,unique_id=unique_id)
else:
toSave = {}
toSave['name'] = fileName
toSave['content'] = fileContent
db[fileType].insert_one(toSave)
return "File "+fileName+" saved as type "+fileType
raise FileTypeNotFound(fileType)
'''
detectFiletype
Function which takes the contents of a file and tries to detect what sort
of file it is. Currently has support for detecting commands, abstraction
and ontology files.
'''
def detectFiletype(fileContent):
try:
file_parsed = json.loads(fileContent)
if (type(file_parsed) is list):
if(type(file_parsed[0]) is dict):
if("ts" in file_parsed[0] and "cmd" in file_parsed[0]):
return "commands"
else:
return "Unknown file"
if(type(file_parsed[0]) is list):
if(len(file_parsed[0])==0):
return "Unknown file"
for label in file_parsed[0]:
if not type(label) is str:
return "Unknown file"
for tupl in file_parsed[1:]:
if not type(tupl) is list:
return "Unknown file"
if not len(tupl)==3:
return "Unknown file"
return "linkograph"
return "Unknown file"
elif (type(file_parsed) is dict):
if(len(file_parsed.keys())==0):
return "Unknown file"
longest_entry = []
for key in file_parsed:
if not type(file_parsed[key]) is list:
return "Unknown file"
if len(file_parsed[key])>len(longest_entry):
longest_entry=file_parsed[key]
if len(longest_entry)==0:
return "Unknown file"
if type(longest_entry[0]) is str:
return "ontology"
if type(longest_entry[0]) is dict:
if "command" in longest_entry[0]:
return "abstraction"
return "Unknown file"
return "Unknown file"
except:
return "Unknown file"
#initialize()
| 33.568807 | 134 | 0.633506 | 530 | 0.072424 | 0 | 0 | 0 | 0 | 0 | 0 | 2,733 | 0.373463 |
ff79ab339815dd3e4e7d425b1a4605c72add3e26 | 3,577 | py | Python | dlib/tools/python/test/test_vector.py | asm-jaime/facerec | e5e101b9f478168ead179e6b08b5605ea7607da2 | [
"CC0-1.0"
] | 11,719 | 2015-01-03T22:38:57.000Z | 2022-03-30T21:45:04.000Z | tools/python/test/test_vector.py | KiLJ4EdeN/dlib | eb1f08ce6ab3ca6f9d10425d899103de3c0df56c | [
"BSL-1.0"
] | 2,518 | 2015-01-04T04:38:06.000Z | 2022-03-31T11:55:43.000Z | tools/python/test/test_vector.py | KiLJ4EdeN/dlib | eb1f08ce6ab3ca6f9d10425d899103de3c0df56c | [
"BSL-1.0"
] | 3,308 | 2015-01-01T14:34:16.000Z | 2022-03-31T07:20:07.000Z | from dlib import vector, vectors, vectorss, dot
try:
import cPickle as pickle # Use cPickle on Python 2.7
except ImportError:
import pickle
from pytest import raises
def test_vector_empty_init():
v = vector()
assert len(v) == 0
assert v.shape == (0, 1)
assert str(v) == ""
assert repr(v) == "dlib.vector([])"
def test_vector_init_with_number():
v = vector(3)
assert len(v) == 3
assert v.shape == (3, 1)
assert str(v) == "0\n0\n0"
assert repr(v) == "dlib.vector([0, 0, 0])"
def test_vector_set_size():
v = vector(3)
v.set_size(0)
assert len(v) == 0
assert v.shape == (0, 1)
v.resize(10)
assert len(v) == 10
assert v.shape == (10, 1)
for i in range(10):
assert v[i] == 0
def test_vector_init_with_list():
v = vector([1, 2, 3])
assert len(v) == 3
assert v.shape == (3, 1)
assert str(v) == "1\n2\n3"
assert repr(v) == "dlib.vector([1, 2, 3])"
def test_vector_getitem():
v = vector([1, 2, 3])
assert v[0] == 1
assert v[-1] == 3
assert v[1] == v[-2]
def test_vector_slice():
v = vector([1, 2, 3, 4, 5])
v_slice = v[1:4]
assert len(v_slice) == 3
for idx, val in enumerate([2, 3, 4]):
assert v_slice[idx] == val
v_slice = v[-3:-1]
assert len(v_slice) == 2
for idx, val in enumerate([3, 4]):
assert v_slice[idx] == val
v_slice = v[1:-2]
assert len(v_slice) == 2
for idx, val in enumerate([2, 3]):
assert v_slice[idx] == val
def test_vector_invalid_getitem():
v = vector([1, 2, 3])
with raises(IndexError):
v[-4]
with raises(IndexError):
v[3]
def test_vector_init_with_negative_number():
with raises(Exception):
vector(-3)
def test_dot():
v1 = vector([1, 0])
v2 = vector([0, 1])
v3 = vector([-1, 0])
assert dot(v1, v1) == 1
assert dot(v1, v2) == 0
assert dot(v1, v3) == -1
def test_vector_serialization():
v = vector([1, 2, 3])
ser = pickle.dumps(v, 2)
deser = pickle.loads(ser)
assert str(v) == str(deser)
def generate_test_vectors():
vs = vectors()
vs.append(vector([0, 1, 2]))
vs.append(vector([3, 4, 5]))
vs.append(vector([6, 7, 8]))
assert len(vs) == 3
return vs
def generate_test_vectorss():
vss = vectorss()
vss.append(generate_test_vectors())
vss.append(generate_test_vectors())
vss.append(generate_test_vectors())
assert len(vss) == 3
return vss
def test_vectors_serialization():
vs = generate_test_vectors()
ser = pickle.dumps(vs, 2)
deser = pickle.loads(ser)
assert vs == deser
def test_vectors_clear():
vs = generate_test_vectors()
vs.clear()
assert len(vs) == 0
def test_vectors_resize():
vs = vectors()
vs.resize(100)
assert len(vs) == 100
for i in range(100):
assert len(vs[i]) == 0
def test_vectors_extend():
vs = vectors()
vs.extend([vector([1, 2, 3]), vector([4, 5, 6])])
assert len(vs) == 2
def test_vectorss_serialization():
vss = generate_test_vectorss()
ser = pickle.dumps(vss, 2)
deser = pickle.loads(ser)
assert vss == deser
def test_vectorss_clear():
vss = generate_test_vectorss()
vss.clear()
assert len(vss) == 0
def test_vectorss_resize():
vss = vectorss()
vss.resize(100)
assert len(vss) == 100
for i in range(100):
assert len(vss[i]) == 0
def test_vectorss_extend():
vss = vectorss()
vss.extend([generate_test_vectors(), generate_test_vectors()])
assert len(vss) == 2
| 20.918129 | 66 | 0.587364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 112 | 0.031311 |
ff7a35b774cb2375cae41358c19078d6b9f8e8d1 | 49,053 | py | Python | superresolution_stage/models/archs/sftmd.py | xian1234/SRBuildSeg | db16ae2aba6aaa336a0b612446c80b4546b96a1f | [
"MIT"
] | 9 | 2021-04-06T12:46:47.000Z | 2022-03-26T09:10:11.000Z | superresolution_stage/models/archs/sftmd.py | xian1234/SRBuildSeg | db16ae2aba6aaa336a0b612446c80b4546b96a1f | [
"MIT"
] | null | null | null | superresolution_stage/models/archs/sftmd.py | xian1234/SRBuildSeg | db16ae2aba6aaa336a0b612446c80b4546b96a1f | [
"MIT"
] | null | null | null | """ Architecture for SFTMD """
import functools
import torch
import torch.nn as nn
import torch.nn.functional as F
import models.archs.arch_util as arch_util
import torch.nn.utils.spectral_norm as spectral_norm
class SFTLayer(nn.Module):
def __init__(self, nf=64, n_condition=10):
super(SFTLayer, self).__init__()
# TODO: can use shared convolution layers to save computation
self.mul_conv1 = nn.Conv2d(nf + n_condition, 32, kernel_size=3, stride=1, padding=1)
self.mul_conv2 = nn.Conv2d(32, nf, kernel_size=3, stride=1, padding=1)
self.add_conv1 = nn.Conv2d(nf + n_condition, 32, kernel_size=3, stride=1, padding=1)
self.add_conv2 = nn.Conv2d(32, nf, kernel_size=3, stride=1, padding=1)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def forward(self, features, conditions):
cat_input = torch.cat((features, conditions), dim=1)
mul = torch.sigmoid(self.mul_conv2(self.lrelu(self.mul_conv1(cat_input))))
add = self.add_conv2(self.lrelu(self.add_conv1(cat_input)))
return features * mul + add
class SFTLayer_SN(nn.Module):
def __init__(self, nf=64, n_condition=10, n_power_iterations=1, bias_sn=False):
super(SFTLayer_SN, self).__init__()
# TODO: can use shared convolution layers to save computation
self.mul_conv1 = spectral_norm(
nn.Conv2d(nf + n_condition, 32, kernel_size=3, stride=1, padding=1), name='weight',
n_power_iterations=n_power_iterations)
self.mul_conv2 = spectral_norm(nn.Conv2d(32, nf, kernel_size=3, stride=1, padding=1),
name='weight', n_power_iterations=n_power_iterations)
self.add_conv1 = spectral_norm(
nn.Conv2d(nf + n_condition, 32, kernel_size=3, stride=1, padding=1), name='weight',
n_power_iterations=n_power_iterations)
self.add_conv2 = spectral_norm(nn.Conv2d(32, nf, kernel_size=3, stride=1, padding=1),
name='weight', n_power_iterations=n_power_iterations)
if bias_sn:
self.mul_conv1 = spectral_norm(self.mul_conv1, name='bias',
n_power_iterations=n_power_iterations)
self.mul_conv2 = spectral_norm(self.mul_conv2, name='bias',
n_power_iterations=n_power_iterations)
self.add_conv1 = spectral_norm(self.add_conv1, name='bias',
n_power_iterations=n_power_iterations)
self.add_conv2 = spectral_norm(self.add_conv2, name='bias',
n_power_iterations=n_power_iterations)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def forward(self, features, conditions):
cat_input = torch.cat((features, conditions), dim=1)
mul = torch.sigmoid(self.mul_conv2(self.lrelu(self.mul_conv1(cat_input))))
add = self.add_conv2(self.lrelu(self.add_conv1(cat_input)))
return features * mul + add
class SFTLayer_SN_Norm(nn.Module):
def __init__(self, nf=64, n_condition=10, n_power_iterations=1, norm='batch'):
super(SFTLayer_SN_Norm, self).__init__()
# TODO: can use shared convolution layers to save computation
if norm == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif norm == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=True,
track_running_stats=True)
self.mul_conv1 = spectral_norm(
nn.Conv2d(nf + n_condition, 32, kernel_size=3, stride=1, padding=1), name='weight',
n_power_iterations=n_power_iterations)
self.mul_norm1 = norm_layer(num_features=32)
self.mul_conv2 = spectral_norm(nn.Conv2d(32, nf, kernel_size=3, stride=1, padding=1),
name='weight', n_power_iterations=n_power_iterations)
self.mul_norm2 = norm_layer(num_features=nf)
self.add_conv1 = spectral_norm(
nn.Conv2d(nf + n_condition, 32, kernel_size=3, stride=1, padding=1), name='weight',
n_power_iterations=n_power_iterations)
self.add_norm1 = norm_layer(num_features=32)
self.add_conv2 = spectral_norm(nn.Conv2d(32, nf, kernel_size=3, stride=1, padding=1),
name='weight', n_power_iterations=n_power_iterations)
self.add_norm2 = norm_layer(num_features=nf)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def forward(self, features, conditions):
cat_input = torch.cat((features, conditions), dim=1)
mul = torch.sigmoid(
self.mul_norm2(self.mul_conv2(self.lrelu(self.mul_norm1(self.mul_conv1(cat_input))))))
add = self.add_norm2(self.add_conv2(self.lrelu(self.add_norm1(self.add_conv1(cat_input)))))
return features * mul + add
class SFTLayer_SN_ReLU(nn.Module):
def __init__(self, nf=64, n_condition=10, n_power_iterations=1):
super(SFTLayer_SN_ReLU, self).__init__()
# TODO: can use shared convolution layers to save computation
self.mul_conv1 = spectral_norm(
nn.Conv2d(nf + n_condition, 32, kernel_size=3, stride=1, padding=1), name='weight',
n_power_iterations=n_power_iterations)
self.mul_conv2 = spectral_norm(nn.Conv2d(32, nf, kernel_size=3, stride=1, padding=1),
name='weight', n_power_iterations=n_power_iterations)
self.add_conv1 = spectral_norm(
nn.Conv2d(nf + n_condition, 32, kernel_size=3, stride=1, padding=1), name='weight',
n_power_iterations=n_power_iterations)
self.add_conv2 = spectral_norm(nn.Conv2d(32, nf, kernel_size=3, stride=1, padding=1),
name='weight', n_power_iterations=n_power_iterations)
self.relu = nn.ReLU(inplace=True)
def forward(self, features, conditions):
cat_input = torch.cat((features, conditions), dim=1)
mul = torch.sigmoid(self.mul_conv2(self.relu(self.mul_conv1(cat_input))))
add = self.add_conv2(self.relu(self.add_conv1(cat_input)))
return features * mul + add
class SFTResidualBlock(nn.Module):
def __init__(self, nf=64, n_condition=10):
super(SFTResidualBlock, self).__init__()
self.sft1 = SFTLayer(nf=nf, n_condition=n_condition)
self.sft2 = SFTLayer(nf=nf, n_condition=n_condition)
self.conv1 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=True)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=True)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
arch_util.initialize_weights([self.conv1, self.conv2], 0.1)
def forward(self, features, conditions):
fea = self.lrelu(self.sft1(features, conditions))
fea = self.lrelu(self.sft2(self.conv1(fea), conditions))
fea = self.conv2(fea)
return features + fea
class SFTResidualBlock_SN(nn.Module):
def __init__(self, nf=64, n_condition=10, n_power_iterations=1, bias_sn=False):
super(SFTResidualBlock_SN, self).__init__()
self.sft1 = SFTLayer_SN(nf=nf, n_condition=n_condition)
self.sft2 = SFTLayer_SN(nf=nf, n_condition=n_condition)
self.conv1 = spectral_norm(nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=True),
name='weight', n_power_iterations=n_power_iterations)
self.conv2 = spectral_norm(nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=True),
name='weight', n_power_iterations=n_power_iterations)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
if bias_sn:
self.conv1 = spectral_norm(self.conv1, name='bias',
n_power_iterations=n_power_iterations)
self.conv2 = spectral_norm(self.conv2, name='bias',
n_power_iterations=n_power_iterations)
arch_util.initialize_weights([self.conv1, self.conv2], 0.1)
def forward(self, features, conditions):
fea = self.lrelu(self.sft1(features, conditions))
fea = self.lrelu(self.sft2(self.conv1(fea), conditions))
fea = self.conv2(fea)
return features + fea
class SFTResidualBlock_SN_Norm(nn.Module):
def __init__(self, nf=64, n_condition=10, n_power_iterations=1, norm='batch'):
super(SFTResidualBlock_SN_Norm, self).__init__()
if norm == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif norm == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=True,
track_running_stats=True)
self.sft1 = SFTLayer_SN_Norm(nf=nf, n_condition=n_condition,
n_power_iterations=n_power_iterations, norm=norm)
self.sft2 = SFTLayer_SN_Norm(nf=nf, n_condition=n_condition,
n_power_iterations=n_power_iterations, norm=norm)
self.conv1 = spectral_norm(nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=True),
name='weight', n_power_iterations=n_power_iterations)
self.norm1 = norm_layer(num_features=64)
self.conv2 = spectral_norm(nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=True),
name='weight', n_power_iterations=n_power_iterations)
self.norm2 = norm_layer(num_features=64)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
arch_util.initialize_weights([self.conv1, self.conv2], 0.1)
def forward(self, features, conditions):
fea = self.lrelu(self.sft1(features, conditions))
fea = self.lrelu(self.sft2(self.norm1(self.conv1(fea)), conditions))
fea = self.norm2(self.conv2(fea))
return features + fea
class SFTResidualBlock_SN_ReLU(nn.Module):
def __init__(self, nf=64, n_condition=10, n_power_iterations=1):
super(SFTResidualBlock_SN_ReLU, self).__init__()
self.sft1 = SFTLayer_SN_ReLU(nf=nf, n_condition=n_condition)
self.sft2 = SFTLayer_SN_ReLU(nf=nf, n_condition=n_condition)
self.conv1 = spectral_norm(nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=True),
name='weight', n_power_iterations=n_power_iterations)
self.conv2 = spectral_norm(nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=True),
name='weight', n_power_iterations=n_power_iterations)
self.relu = nn.ReLU(inplace=True)
arch_util.initialize_weights([self.conv1, self.conv2], 0.1)
def forward(self, features, conditions):
fea = self.relu(self.sft1(features, conditions))
fea = self.relu(self.sft2(self.conv1(fea), conditions))
fea = self.conv2(fea)
return features + fea
class SFTMD(nn.Module):
def __init__(self, inc=3, nf=64, n_condition=10, scale=4, n_RB=16):
super(SFTMD, self).__init__()
self.n_RB = n_RB
self.conv_first = nn.Conv2d(inc, nf, 3, stride=1, padding=1)
for i in range(n_RB):
self.add_module('SFTRB' + str(i), SFTResidualBlock(nf=nf, n_condition=n_condition))
self.sft_extra = SFTLayer(nf=nf, n_condition=n_condition)
self.conv_extra = nn.Conv2d(nf, nf, kernel_size=3, stride=1, padding=1, bias=True)
if scale == 4:
self.upscale = nn.Sequential(
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True),
nn.PixelShuffle(scale // 2),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True),
nn.PixelShuffle(scale // 2),
nn.LeakyReLU(0.1, inplace=True),
)
else:
self.upscale = nn.Sequential(
nn.Conv2d(nf, nf * scale**2, kernel_size=3, stride=1, padding=1, bias=True),
nn.PixelShuffle(scale),
nn.LeakyReLU(0.1, inplace=True),
)
self.conv_final = nn.Conv2d(nf, inc, kernel_size=3, stride=1, padding=1, bias=True)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def forward(self, input, kernel_code, spatial=False, extra=False):
_, _, H, W = input.size()
if not spatial:
Bk, Ck = kernel_code.size()
kernel_code = kernel_code.view((Bk, Ck, 1, 1)).expand((Bk, Ck, H, W))
fea = self.lrelu(self.conv_first(input))
fea_sft = fea.clone()
for i in range(self.n_RB):
fea_sft = self.__getattr__('SFTRB' + str(i))(fea_sft, kernel_code)
fea = fea + fea_sft
fea = self.conv_extra(self.lrelu(self.sft_extra(fea, kernel_code)))
out = self.conv_final(self.upscale(fea))
if extra:
return out, fea
else:
return out
class SFTMD_Ushape(nn.Module):
def __init__(self, inc=3, nf=64, n_condition=10, scale=4, n_RB=16):
super(SFTMD_Ushape, self).__init__()
self.n_RB = n_RB
self.conv_first = nn.Conv2d(inc, nf, 3, stride=1, padding=1)
# downsample operation
for i in range(n_RB // 2):
self.add_module('SFTRB_down' + str(i), SFTResidualBlock(nf=nf, n_condition=n_condition))
self.mid_layer = SFTResidualBlock(nf=nf, n_condition=n_condition)
# upsample operation
for i in range(n_RB // 2):
self.add_module('SFTRB_up' + str(i), SFTResidualBlock(nf=nf, n_condition=n_condition))
self.sft_extra = SFTLayer(nf=nf, n_condition=n_condition)
self.conv_extra = nn.Conv2d(nf, nf, kernel_size=3, stride=1, padding=1, bias=True)
if scale == 4:
self.upscale = nn.Sequential(
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True),
nn.PixelShuffle(scale // 2),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True),
nn.PixelShuffle(scale // 2),
nn.LeakyReLU(0.1, inplace=True),
)
else:
self.upscale = nn.Sequential(
nn.Conv2d(nf, nf * scale**2, kernel_size=3, stride=1, padding=1, bias=True),
nn.PixelShuffle(scale),
nn.LeakyReLU(0.1, inplace=True),
)
self.conv_final = nn.Conv2d(nf, inc, kernel_size=3, stride=1, padding=1, bias=True)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
self.max_pool = nn.MaxPool2d(2, 2)
def forward(self, input, kernel_code, spatial=False, extra=False):
_, _, H_in, W_in = input.size()
kernel_code_ori = kernel_code.clone()
# if not spatial:
# Bk, Ck = kernel_code_ori.size()
# kernel_code = kernel_code_ori.view((Bk, Ck, 1, 1)).expand((Bk, Ck, H, W))
Bk, Ck = kernel_code_ori.size()
fea = self.lrelu(self.conv_first(input))
fea_sft = fea.clone()
# down_scale
kernel_code_list = []
for i in range(self.n_RB // 2):
H = int(H_in * 2 ** (-1 * i))
W = int(W_in * 2 ** (-1 * i))
kernel_code = kernel_code_ori.view((Bk, Ck, 1, 1)).expand((Bk, Ck, H, W))
fea_sft_x2 = self.__getattr__('SFTRB_down' + str(i))(fea_sft, kernel_code)
fea_sft = self.max_pool(fea_sft_x2)
kernel_code_list.insert(0, kernel_code)
H = int(H_in * 2 ** (-1 * (self.n_RB // 2)))
W = int(W_in * 2 ** (-1 * (self.n_RB // 2)))
kernel_code = kernel_code_ori.view((Bk, Ck, 1, 1)).expand((Bk, Ck, H, W))
fea_sft = self.mid_layer(fea_sft, kernel_code)
#up_scale
for i in range(self.n_RB // 2):
fea_sft = F.interpolate(fea_sft, scale_factor=2, mode='bilinear', align_corners=False)
fea_sft = self.__getattr__('SFTRB_up' + str(i))(fea_sft, kernel_code_list[i])
kernel_code = kernel_code_list[self.n_RB // 2 - 1]
fea = fea + fea_sft
fea = self.conv_extra(self.lrelu(self.sft_extra(fea, kernel_code)))
out = self.conv_final(self.upscale(fea))
if extra:
return out, fea
else:
return out
class SFTMD_Noise_JPEG(nn.Module):
def __init__(self, inc=3, nf=64, n_condition=12, scale=4, n_RB=16):
super(SFTMD_Noise_JPEG, self).__init__()
self.n_RB = n_RB
self.conv_first = nn.Conv2d(inc, nf, 3, stride=1, padding=1)
for i in range(n_RB):
self.add_module('SFTRB' + str(i), SFTResidualBlock(nf=nf, n_condition=n_condition))
self.sft_extra = SFTLayer(nf=nf, n_condition=n_condition)
self.conv_extra = nn.Conv2d(nf, nf, kernel_size=3, stride=1, padding=1, bias=True)
if scale == 4:
self.upscale = nn.Sequential(
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True),
nn.PixelShuffle(scale // 2),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True),
nn.PixelShuffle(scale // 2),
nn.LeakyReLU(0.1, inplace=True),
)
else:
self.upscale = nn.Sequential(
nn.Conv2d(nf, nf * scale**2, kernel_size=3, stride=1, padding=1, bias=True),
nn.PixelShuffle(scale),
nn.LeakyReLU(0.1, inplace=True),
)
self.conv_final = nn.Conv2d(nf, inc, kernel_size=3, stride=1, padding=1, bias=True)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def forward(self, input, kernel_code, noise, jpeg, spatial=False, extra=False):
_, _, H, W = input.size()
if not spatial:
codes = torch.cat((kernel_code, noise, jpeg), dim=1)
Bk, Ck = codes.size()
codes = codes.view((Bk, Ck, 1, 1)).expand((Bk, Ck, H, W))
fea = self.lrelu(self.conv_first(input))
fea_sft = fea.clone()
for i in range(self.n_RB):
fea_sft = self.__getattr__('SFTRB' + str(i))(fea_sft, codes)
fea = fea + fea_sft
fea = self.conv_extra(self.lrelu(self.sft_extra(fea, codes)))
out = self.conv_final(self.upscale(fea))
if extra:
return out, fea
else:
return out
class SFTMD_SN_Noise_JPEG(nn.Module):
def __init__(self, inc=3, nf=64, n_condition=10, scale=4, n_RB=16, n_power_iterations=1,
norm=None, bias_sn=False):
super(SFTMD_SN_Noise_JPEG, self).__init__()
self.n_RB = n_RB
if bias_sn:
print('Bias SN')
self.conv_first = spectral_norm(nn.Conv2d(inc, nf, 3, stride=1, padding=1), name='weight',
n_power_iterations=n_power_iterations)
if bias_sn:
self.conv_first = spectral_norm(self.conv_first, name='bias',
n_power_iterations=n_power_iterations)
for i in range(n_RB):
if norm is None:
self.add_module('SFTRB' + str(i),
SFTResidualBlock_SN(nf=nf, n_condition=n_condition, bias_sn=False))
else:
self.add_module(
'SFTRB' + str(i),
SFTResidualBlock_SN_Norm(nf=nf, n_condition=n_condition,
n_power_iterations=n_power_iterations, norm=norm))
if norm is None:
self.sft_extra = SFTLayer_SN(nf=nf, n_condition=n_condition, bias_sn=False)
else:
self.sft_extra = SFTLayer_SN_Norm(nf=nf, n_condition=n_condition,
n_power_iterations=n_power_iterations, norm=norm)
self.conv_extra = spectral_norm(
nn.Conv2d(nf, nf, kernel_size=3, stride=1, padding=1, bias=True), name='weight',
n_power_iterations=n_power_iterations)
if bias_sn:
self.conv_extra = spectral_norm(self.conv_extra, name='bias',
n_power_iterations=n_power_iterations)
if scale == 4:
if bias_sn:
self.upscale = nn.Sequential(
spectral_norm(
spectral_norm(
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1,
bias=True), name='weight',
n_power_iterations=n_power_iterations), name='bias',
n_power_iterations=n_power_iterations),
nn.PixelShuffle(scale // 2),
nn.LeakyReLU(0.1, inplace=True),
spectral_norm(
spectral_norm(
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1,
bias=True), name='weight',
n_power_iterations=n_power_iterations), name='bias',
n_power_iterations=n_power_iterations),
nn.PixelShuffle(scale // 2),
nn.LeakyReLU(0.1, inplace=True),
)
else:
self.upscale = nn.Sequential(
spectral_norm(
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True),
name='weight', n_power_iterations=n_power_iterations),
nn.PixelShuffle(scale // 2),
nn.LeakyReLU(0.1, inplace=True),
spectral_norm(
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True),
name='weight', n_power_iterations=n_power_iterations),
nn.PixelShuffle(scale // 2),
nn.LeakyReLU(0.1, inplace=True),
)
else:
if bias_sn:
self.upscale = nn.Sequential(
spectral_norm(
spectral_norm(
nn.Conv2d(nf, nf * scale**2, kernel_size=3, stride=1, padding=1,
bias=True), name='weight',
n_power_iterations=n_power_iterations), name='bias',
n_power_iterations=n_power_iterations),
nn.PixelShuffle(scale),
nn.LeakyReLU(0.1, inplace=True),
)
else:
self.upscale = nn.Sequential(
spectral_norm(
nn.Conv2d(nf, nf * scale**2, kernel_size=3, stride=1, padding=1, bias=True),
name='weight', n_power_iterations=n_power_iterations),
nn.PixelShuffle(scale),
nn.LeakyReLU(0.1, inplace=True),
)
self.conv_final = spectral_norm(
nn.Conv2d(nf, inc, kernel_size=3, stride=1, padding=1, bias=True), name='weight',
n_power_iterations=n_power_iterations)
if bias_sn:
self.conv_final = spectral_norm(self.conv_final, name='bias',
n_power_iterations=n_power_iterations)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def forward(self, input, kernel_code, noise, jpeg, spatial=False, extra=False):
_, _, H, W = input.size()
if not spatial:
codes = torch.cat((kernel_code, noise, jpeg), dim=1)
Bk, Ck = codes.size()
codes = codes.view((Bk, Ck, 1, 1)).expand((Bk, Ck, H, W))
fea = self.lrelu(self.conv_first(input))
fea_sft = fea.clone()
for i in range(self.n_RB):
fea_sft = self.__getattr__('SFTRB' + str(i))(fea_sft, codes)
fea = fea + fea_sft
fea = self.conv_extra(self.lrelu(self.sft_extra(fea, codes)))
out = self.conv_final(self.upscale(fea))
if extra:
return out, fea
else:
return out
class SFTMD_SN(nn.Module):
def __init__(self, inc=3, nf=64, n_condition=10, scale=4, n_RB=16, n_power_iterations=1,
norm=None, bias_sn=False):
super(SFTMD_SN, self).__init__()
self.n_RB = n_RB
if bias_sn:
print('Bias SN')
self.conv_first = spectral_norm(nn.Conv2d(inc, nf, 3, stride=1, padding=1), name='weight',
n_power_iterations=n_power_iterations)
if bias_sn:
self.conv_first = spectral_norm(self.conv_first, name='bias',
n_power_iterations=n_power_iterations)
for i in range(n_RB):
if norm is None:
self.add_module('SFTRB' + str(i), SFTResidualBlock_SN(nf=nf,
n_condition=n_condition, bias_sn=False))
else:
self.add_module(
'SFTRB' + str(i),
SFTResidualBlock_SN_Norm(nf=nf, n_condition=n_condition,
n_power_iterations=n_power_iterations, norm=norm))
if norm is None:
self.sft_extra = SFTLayer_SN(nf=nf, n_condition=n_condition, bias_sn=False)
else:
self.sft_extra = SFTLayer_SN_Norm(nf=nf, n_condition=n_condition,
n_power_iterations=n_power_iterations, norm=norm)
self.conv_extra = spectral_norm(
nn.Conv2d(nf, nf, kernel_size=3, stride=1, padding=1, bias=True), name='weight',
n_power_iterations=n_power_iterations)
if bias_sn:
self.conv_extra = spectral_norm(self.conv_extra, name='bias',
n_power_iterations=n_power_iterations)
if scale == 4:
if bias_sn:
self.upscale = nn.Sequential(
spectral_norm(
spectral_norm(
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1,
bias=True), name='weight',
n_power_iterations=n_power_iterations), name='bias',
n_power_iterations=n_power_iterations),
nn.PixelShuffle(scale // 2),
nn.LeakyReLU(0.1, inplace=True),
spectral_norm(
spectral_norm(
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1,
bias=True), name='weight',
n_power_iterations=n_power_iterations), name='bias',
n_power_iterations=n_power_iterations),
nn.PixelShuffle(scale // 2),
nn.LeakyReLU(0.1, inplace=True),
)
else:
self.upscale = nn.Sequential(
spectral_norm(
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True),
name='weight', n_power_iterations=n_power_iterations),
nn.PixelShuffle(scale // 2),
nn.LeakyReLU(0.1, inplace=True),
spectral_norm(
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True),
name='weight', n_power_iterations=n_power_iterations),
nn.PixelShuffle(scale // 2),
nn.LeakyReLU(0.1, inplace=True),
)
else:
if bias_sn:
self.upscale = nn.Sequential(
spectral_norm(
spectral_norm(
nn.Conv2d(nf, nf * scale**2, kernel_size=3, stride=1, padding=1,
bias=True), name='weight',
n_power_iterations=n_power_iterations), name='bias',
n_power_iterations=n_power_iterations),
nn.PixelShuffle(scale),
nn.LeakyReLU(0.1, inplace=True),
)
else:
self.upscale = nn.Sequential(
spectral_norm(
nn.Conv2d(nf, nf * scale**2, kernel_size=3, stride=1, padding=1, bias=True),
name='weight', n_power_iterations=n_power_iterations),
nn.PixelShuffle(scale),
nn.LeakyReLU(0.1, inplace=True),
)
self.conv_final = spectral_norm(
nn.Conv2d(nf, inc, kernel_size=3, stride=1, padding=1, bias=True), name='weight',
n_power_iterations=n_power_iterations)
if bias_sn:
self.conv_final = spectral_norm(self.conv_final, name='bias',
n_power_iterations=n_power_iterations)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def forward(self, input, kernel_code, spatial=False, extra=False):
_, _, H, W = input.size()
if not spatial:
Bk, Ck = kernel_code.size()
kernel_code = kernel_code.view((Bk, Ck, 1, 1)).expand((Bk, Ck, H, W))
fea = self.lrelu(self.conv_first(input))
fea_sft = fea.clone()
for i in range(self.n_RB):
fea_sft = self.__getattr__('SFTRB' + str(i))(fea_sft, kernel_code)
fea = fea + fea_sft
fea = self.conv_extra(self.lrelu(self.sft_extra(fea, kernel_code)))
out = self.conv_final(self.upscale(fea))
if extra:
return out, fea
else:
return out
class SFTMD_SN_Dropout(nn.Module):
def __init__(self, inc=3, nf=64, n_condition=10, scale=4, n_RB=16, n_power_iterations=1,
norm=None, dropSN=True):
super(SFTMD_SN_Dropout, self).__init__()
self.n_RB = n_RB
self.conv_first = spectral_norm(nn.Conv2d(inc, nf, 3, stride=1, padding=1), name='weight',
n_power_iterations=n_power_iterations)
for i in range(n_RB):
if norm is None:
self.add_module('SFTRB' + str(i), SFTResidualBlock_SN(nf=nf,
n_condition=n_condition))
else:
self.add_module(
'SFTRB' + str(i),
SFTResidualBlock_SN_Norm(nf=nf, n_condition=n_condition,
n_power_iterations=n_power_iterations, norm=norm))
if norm is None:
self.sft_extra = SFTLayer_SN(nf=nf, n_condition=n_condition)
else:
self.sft_extra = SFTLayer_SN_Norm(nf=nf, n_condition=n_condition,
n_power_iterations=n_power_iterations, norm=norm)
if dropSN:
self.conv_extra = spectral_norm(
nn.Conv2d(nf, nf * 2, kernel_size=3, stride=1, padding=1, bias=True), name='weight',
n_power_iterations=n_power_iterations)
self.conv_extra2 = spectral_norm(
nn.Conv2d(nf * 2, nf, kernel_size=3, stride=1, padding=1, bias=True), name='weight',
n_power_iterations=n_power_iterations)
else:
self.conv_extra = nn.Conv2d(nf, nf * 2, kernel_size=3, stride=1, padding=1, bias=True)
self.conv_extra2 = nn.Conv2d(nf * 2, nf, kernel_size=3, stride=1, padding=1, bias=True)
self.dropout = nn.Dropout2d(p=0.5, inplace=False)
if scale == 4:
self.upscale = nn.Sequential(
spectral_norm(
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True),
name='weight', n_power_iterations=n_power_iterations),
nn.PixelShuffle(scale // 2),
nn.LeakyReLU(0.1, inplace=True),
spectral_norm(
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True),
name='weight', n_power_iterations=n_power_iterations),
nn.PixelShuffle(scale // 2),
nn.LeakyReLU(0.1, inplace=True),
)
else:
self.upscale = nn.Sequential(
spectral_norm(
nn.Conv2d(nf, nf * scale**2, kernel_size=3, stride=1, padding=1, bias=True),
name='weight', n_power_iterations=n_power_iterations),
nn.PixelShuffle(scale),
nn.LeakyReLU(0.1, inplace=True),
)
self.conv_final = spectral_norm(
nn.Conv2d(nf, inc, kernel_size=3, stride=1, padding=1, bias=True), name='weight',
n_power_iterations=n_power_iterations)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def forward(self, input, kernel_code):
_, _, H, W = input.size()
Bk, Ck = kernel_code.size()
kernel_code = kernel_code.view((Bk, Ck, 1, 1)).expand((Bk, Ck, H, W))
fea = self.lrelu(self.conv_first(input))
fea_sft = fea.clone()
for i in range(self.n_RB):
fea_sft = self.__getattr__('SFTRB' + str(i))(fea_sft, kernel_code)
fea = fea + fea_sft
fea = self.conv_extra(self.lrelu(self.sft_extra(fea, kernel_code)))
fea = self.dropout(fea)
fea = self.conv_extra2(fea)
out = self.conv_final(self.upscale(fea))
return out
class SFTMD_SN_ReLU(nn.Module):
def __init__(self, inc=3, nf=64, n_condition=10, scale=4, n_RB=16):
super(SFTMD_SN_ReLU, self).__init__()
self.n_RB = n_RB
n_power_iterations = 1
self.conv_first = spectral_norm(nn.Conv2d(inc, nf, 3, stride=1, padding=1), name='weight',
n_power_iterations=n_power_iterations)
for i in range(n_RB):
self.add_module('SFTRB' + str(i),
SFTResidualBlock_SN_ReLU(nf=nf, n_condition=n_condition))
self.sft_extra = SFTLayer_SN_ReLU(nf=nf, n_condition=n_condition)
self.conv_extra = spectral_norm(
nn.Conv2d(nf, nf, kernel_size=3, stride=1, padding=1, bias=True), name='weight',
n_power_iterations=n_power_iterations)
if scale == 4:
self.upscale = nn.Sequential(
spectral_norm(
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True),
name='weight', n_power_iterations=n_power_iterations),
nn.PixelShuffle(scale // 2),
nn.ReLU(inplace=True),
spectral_norm(
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True),
name='weight', n_power_iterations=n_power_iterations),
nn.PixelShuffle(scale // 2),
nn.ReLU(inplace=True),
)
else:
self.upscale = nn.Sequential(
spectral_norm(
nn.Conv2d(nf, nf * scale**2, kernel_size=3, stride=1, padding=1, bias=True),
name='weight', n_power_iterations=n_power_iterations),
nn.PixelShuffle(scale),
nn.ReLU(inplace=True),
)
self.conv_final = spectral_norm(
nn.Conv2d(nf, inc, kernel_size=3, stride=1, padding=1, bias=True), name='weight',
n_power_iterations=n_power_iterations)
self.relu = nn.ReLU(inplace=True)
def forward(self, input, kernel_code):
_, _, H, W = input.size()
Bk, Ck = kernel_code.size()
kernel_code = kernel_code.view((Bk, Ck, 1, 1)).expand((Bk, Ck, H, W))
fea = self.relu(self.conv_first(input))
fea_sft = fea.clone()
for i in range(self.n_RB):
fea_sft = self.__getattr__('SFTRB' + str(i))(fea_sft, kernel_code)
fea = fea + fea_sft
fea = self.conv_extra(self.relu(self.sft_extra(fea, kernel_code)))
out = self.conv_final(self.upscale(fea))
return out
class SFTMD_concat(nn.Module):
def __init__(self, inc=3, nf=64, n_condition=10, scale=4, n_RB=16):
super(SFTMD_concat, self).__init__()
self.n_RB = n_RB
self.conv_first = nn.Conv2d(n_condition + 3, nf, 3, stride=1, padding=1)
for i in range(n_RB):
self.add_module('SFTRB' + str(i), arch_util.ResidualBlock_noBN(nf=nf))
self.conv_extra = nn.Conv2d(nf, nf, kernel_size=3, stride=1, padding=1, bias=True)
if scale == 4:
self.upscale = nn.Sequential(
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True),
nn.PixelShuffle(scale // 2),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True),
nn.PixelShuffle(scale // 2),
nn.LeakyReLU(0.1, inplace=True),
)
else:
self.upscale = nn.Sequential(
nn.Conv2d(nf, nf * scale**2, kernel_size=3, stride=1, padding=1, bias=True),
nn.PixelShuffle(scale),
nn.LeakyReLU(0.1, inplace=True),
)
self.conv_final = nn.Conv2d(nf, inc, kernel_size=3, stride=1, padding=1, bias=True)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def forward(self, input, kernel_code):
B, _, H, W = input.size()
Bk, Ck = kernel_code.size()
kernel_code = kernel_code.view((Bk, Ck, 1, 1)).expand((Bk, Ck, H, W))
fea = self.lrelu(self.conv_first(torch.cat((input, kernel_code), 1)))
fea_sft = fea.clone()
for i in range(self.n_RB):
fea_sft = self.__getattr__('SFTRB' + str(i))(fea_sft)
fea = fea + fea_sft
fea = self.conv_extra(self.lrelu(fea))
out = self.conv_final(self.upscale(fea))
return out
class SFTMD_kernel(nn.Module):
def __init__(self, inc=3, nf=64, n_condition=10, scale=4, n_RB=16, k=11):
super(SFTMD_kernel, self).__init__()
self.n_RB = n_RB
self.fc_share_1 = nn.Linear(32, 100)
self.fc_share_2 = nn.Linear(100, 200)
self.fc_share_3 = nn.Linear(200, 400)
self.fc_share_4 = nn.Linear(400, 200)
self.fc_share_conv1_1 = nn.Linear(200, 200)
self.fc_share_conv1_2 = nn.Linear(200, 10 * 3 * k * 1)
self.fc_share_conv2_1 = nn.Linear(200, 200)
self.fc_share_conv2_2 = nn.Linear(200, 10 * 10 * k * 1)
self.conv_first = nn.Conv2d(10, nf, 3, stride=1, padding=1)
for i in range(n_RB):
self.add_module('SFTRB' + str(i), arch_util.ResidualBlock_noBN(nf=nf))
self.conv_extra = nn.Conv2d(nf, nf, kernel_size=3, stride=1, padding=1, bias=True)
if scale == 4:
self.upscale = nn.Sequential(
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True),
nn.PixelShuffle(scale // 2),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True),
nn.PixelShuffle(scale // 2),
nn.LeakyReLU(0.1, inplace=True),
)
else:
self.upscale = nn.Sequential(
nn.Conv2d(nf, nf * scale**2, kernel_size=3, stride=1, padding=1, bias=True),
nn.PixelShuffle(scale),
nn.LeakyReLU(0.1, inplace=True),
)
self.conv_final = nn.Conv2d(nf, inc, kernel_size=3, stride=1, padding=1, bias=True)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
self.pad = (k - 1) // 2
self.k = k
def forward(self, input, kernel_code):
B, _, H, W = input.size()
# generate conv code
kernel_code = kernel_code.view((B, -1))
kernel_code = self.lrelu(self.fc_share_1(kernel_code))
kernel_code = self.lrelu(self.fc_share_2(kernel_code))
kernel_code = self.lrelu(self.fc_share_3(kernel_code))
kernel_code = self.lrelu(self.fc_share_4(kernel_code))
conv1_weight = self.fc_share_conv1_2(self.lrelu(self.fc_share_conv1_1(kernel_code)))
conv2_weight = self.fc_share_conv2_2(self.lrelu(self.fc_share_conv2_1(kernel_code)))
conv1_weight = conv1_weight.view((10, 3, self.k, 1))
conv2_weight = conv2_weight.view((10, 10, 1, self.k))
fea = self.lrelu(F.conv2d(input, conv1_weight, padding=(self.pad, 0)))
fea = self.lrelu(F.conv2d(fea, conv2_weight, padding=(0, self.pad)))
fea = self.lrelu(self.conv_first(fea))
fea_sft = fea.clone()
for i in range(self.n_RB):
fea_sft = self.__getattr__('SFTRB' + str(i))(fea_sft)
fea = fea + fea_sft
fea = self.conv_extra(self.lrelu(fea))
out = self.conv_final(self.upscale(fea))
return out
class SFTMD_coderefine(nn.Module):
def __init__(self, inc=3, nf=64, n_condition=10, scale=4, n_RB=16):
super(SFTMD_coderefine, self).__init__()
self.n_RB = n_RB
self.conv_first = nn.Conv2d(inc, nf, 3, stride=1, padding=1)
for i in range(n_RB):
self.add_module('SFTRB' + str(i), SFTResidualBlock(nf=nf, n_condition=n_condition))
self.sft_extra = SFTLayer(nf=nf, n_condition=n_condition)
self.conv_extra = nn.Conv2d(nf, nf, kernel_size=3, stride=1, padding=1, bias=True)
if scale == 4:
self.upscale = nn.Sequential(
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True),
nn.PixelShuffle(scale // 2),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(nf, nf * scale, kernel_size=3, stride=1, padding=1, bias=True),
nn.PixelShuffle(scale // 2),
nn.LeakyReLU(0.1, inplace=True),
)
else:
self.upscale = nn.Sequential(
nn.Conv2d(nf, nf * scale**2, kernel_size=3, stride=1, padding=1, bias=True),
nn.PixelShuffle(scale),
nn.LeakyReLU(0.1, inplace=True),
)
self.conv_final = nn.Conv2d(nf, inc, kernel_size=3, stride=1, padding=1, bias=True)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
self.fc1 = nn.Linear(n_condition, 400)
self.fc2 = nn.Linear(400, 400)
self.fc3 = nn.Linear(400, 200)
self.fc4 = nn.Linear(200, n_condition)
def forward(self, input, kernel_code):
_, _, H, W = input.size()
kernel_code = self.lrelu(self.fc1(kernel_code))
kernel_code = self.lrelu(self.fc2(kernel_code))
kernel_code = self.lrelu(self.fc3(kernel_code))
kernel_code = self.fc4(kernel_code)
Bk, Ck = kernel_code.size()
kernel_code = kernel_code.view((Bk, Ck, 1, 1)).expand((Bk, Ck, H, W))
fea = self.lrelu(self.conv_first(input))
fea_sft = fea.clone()
for i in range(self.n_RB):
fea_sft = self.__getattr__('SFTRB' + str(i))(fea_sft, kernel_code)
fea = fea + fea_sft
fea = self.conv_extra(self.lrelu(self.sft_extra(fea, kernel_code)))
out = self.conv_final(self.upscale(fea))
return out
class Corrector(nn.Module):
def __init__(self, inc=3, n_condition=10, nf=64, conv_merge=True, use_bias=True):
super(Corrector, self).__init__()
self.ConvNet = nn.Sequential(*[
nn.Conv2d(inc, nf, kernel_size=5, stride=1, padding=2, bias=use_bias),
nn.LeakyReLU(0.1, True),
nn.Conv2d(nf, nf, kernel_size=5, stride=2, padding=2, bias=use_bias),
nn.LeakyReLU(0.1, True),
nn.Conv2d(nf, nf, kernel_size=5, stride=1, padding=2, bias=use_bias),
nn.LeakyReLU(0.1, True),
nn.Conv2d(nf, nf, kernel_size=5, stride=2, padding=2, bias=use_bias),
nn.LeakyReLU(0.1, True),
nn.Conv2d(nf, nf, kernel_size=5, stride=1, padding=2, bias=use_bias),
nn.LeakyReLU(0.1, True),
nn.Conv2d(nf, nf, kernel_size=5, stride=1, padding=2, bias=use_bias),
nn.LeakyReLU(0.1, True),
nn.Conv2d(nf, nf, kernel_size=5, stride=1, padding=2, bias=use_bias),
nn.LeakyReLU(0.1, True),
])
self.code_dense = nn.Sequential(*[
nn.Linear(n_condition, nf, bias=use_bias),
nn.LeakyReLU(0.1, True),
nn.Linear(nf, nf, bias=use_bias),
])
if conv_merge:
self.global_dense = nn.Sequential(*[
nn.Conv2d(nf * 2, nf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
nn.LeakyReLU(0.1, True),
nn.Conv2d(nf * 2, nf, kernel_size=1, stride=1, padding=0, bias=use_bias),
nn.LeakyReLU(0.1, True),
nn.Conv2d(nf, nf, kernel_size=1, stride=1, padding=0, bias=use_bias),
nn.LeakyReLU(0.1, True),
])
self.nf = nf
self.conv_merge = conv_merge
self.fc1 = nn.Linear(nf, nf, bias=True)
self.fc2 = nn.Linear(nf, nf, bias=True)
self.fc3 = nn.Linear(nf, n_condition, bias=True)
self.globalpooling = nn.AdaptiveAvgPool2d((1, 1))
self.lrelu = nn.LeakyReLU(0.1, True)
def forward(self, input, code):
conv_input = self.ConvNet(input)
B, C_f, H_f, W_f = conv_input.size() # LR_size
code_ori = self.code_dense(code)
if self.conv_merge:
conv_code = code_ori.view((B, self.nf, 1, 1)).expand((B, self.nf, H_f, W_f))
conv_mid = torch.cat((conv_input, conv_code), dim=1)
conv_input = self.global_dense(conv_mid)
fea = self.globalpooling(conv_input).view(conv_input.size(0), -1)
fea = self.lrelu(self.fc1(fea))
fea = self.lrelu(self.fc2(fea))
out = self.fc3(fea)
return out + code
class CorrectorV2(nn.Module):
def __init__(self, inc=3, n_condition=10, nf=64, conv_merge=False, use_bias=True):
super(CorrectorV2, self).__init__()
self.ConvNet = nn.Sequential(*[
nn.Conv2d(inc, nf, kernel_size=5, stride=1, padding=2, bias=use_bias),
nn.LeakyReLU(0.1, True),
nn.Conv2d(nf, nf, kernel_size=5, stride=2, padding=2, bias=use_bias),
nn.LeakyReLU(0.1, True),
nn.Conv2d(nf, nf, kernel_size=5, stride=1, padding=2, bias=use_bias),
nn.LeakyReLU(0.1, True),
nn.Conv2d(nf, nf, kernel_size=5, stride=2, padding=2, bias=use_bias),
nn.LeakyReLU(0.1, True),
nn.Conv2d(nf, nf, kernel_size=5, stride=1, padding=2, bias=use_bias),
nn.LeakyReLU(0.1, True),
nn.Conv2d(nf, nf, kernel_size=5, stride=1, padding=2, bias=use_bias),
nn.LeakyReLU(0.1, True),
nn.Conv2d(nf, nf, kernel_size=5, stride=1, padding=2, bias=use_bias),
nn.LeakyReLU(0.1, True),
])
self.code_dense = nn.Sequential(*[
nn.Linear(n_condition, nf, bias=use_bias),
nn.LeakyReLU(0.1, True),
nn.Linear(nf, nf, bias=use_bias),
])
if conv_merge:
self.global_dense = nn.Sequential(*[
nn.Conv2d(nf * 2, nf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
nn.LeakyReLU(0.1, True),
nn.Conv2d(nf * 2, nf, kernel_size=1, stride=1, padding=0, bias=use_bias),
nn.LeakyReLU(0.1, True),
nn.Conv2d(nf, nf, kernel_size=1, stride=1, padding=0, bias=use_bias),
nn.LeakyReLU(0.1, True),
])
self.nf = nf
self.conv_merge = conv_merge
self.fc1 = nn.Linear(nf, nf, bias=True)
self.fc2 = nn.Linear(nf, nf, bias=True)
self.fc3 = nn.Linear(nf, n_condition, bias=True)
self.globalpooling = nn.AdaptiveAvgPool2d((1, 1))
self.lrelu = nn.LeakyReLU(0.1, True)
def forward(self, input, code):
conv_input = self.ConvNet(input)
B, C_f, H_f, W_f = conv_input.size() # LR_size
code_ori = self.code_dense(code)
if self.conv_merge:
conv_code = code_ori.view((B, self.nf, 1, 1)).expand((B, self.nf, H_f, W_f))
conv_mid = torch.cat((conv_input, conv_code), dim=1)
conv_input = self.global_dense(conv_mid)
fea = self.globalpooling(conv_input).view(conv_input.size(0), -1)
fea = self.lrelu(self.fc1(fea))
fea = self.lrelu(self.fc2(fea))
out = self.fc3(fea)
return out + code_ori
| 45.251845 | 110 | 0.572687 | 48,784 | 0.994516 | 0 | 0 | 0 | 0 | 0 | 0 | 1,275 | 0.025992 |
ff7b38d79dec939d19abc7521dcac28b5a65f30d | 3,351 | py | Python | modals/data_util.py | jamestszhim/modals | 58f9966213634a6226fdc451990e9e8ce4831154 | [
"MIT"
] | 29 | 2021-03-02T20:37:08.000Z | 2022-03-28T07:53:49.000Z | modals/data_util.py | jamestszhim/modals | 58f9966213634a6226fdc451990e9e8ce4831154 | [
"MIT"
] | 1 | 2021-07-12T15:24:29.000Z | 2021-07-12T16:04:59.000Z | modals/data_util.py | jamestszhim/modals | 58f9966213634a6226fdc451990e9e8ce4831154 | [
"MIT"
] | 3 | 2021-06-22T08:55:45.000Z | 2022-03-06T07:41:29.000Z | import copy
import os
import random
from pathlib import Path
import dill
import torch
import torchtext.data as data
import torchtext.datasets as datasets
from torch.utils.data import Sampler
from torchtext.vocab import GloVe
from modals.setup import EMB_DIR
def save_txt_dataset(dataset, path):
if not isinstance(path, Path):
path = Path(path)
path.mkdir(parents=True, exist_ok=True)
torch.save(dataset.examples, path/"examples.pkl", pickle_module=dill)
torch.save(dataset.fields, path/"fields.pkl", pickle_module=dill)
def load_txt_dataset(path, fields):
if not isinstance(path, Path):
path = Path(path)
examples = torch.load(path/"examples.pkl", pickle_module=dill)
# fields = torch.load(path/"fields.pkl", pickle_module=dill)
return data.Dataset(examples, fields)
class SubsetSampler(Sampler):
r"""Samples elements from a given list of indices, without replacement.
Arguments:
indices (sequence): a sequence of indices
"""
def __init__(self, indices):
self.indices = indices
def __iter__(self):
return (i for i in self.indices)
def __len__(self):
return len(self.indices)
def binarize(dataset):
binary_examples = []
for example in dataset.examples:
if example.label != 'neutral':
binary_examples.append(example)
dataset.examples = binary_examples
return dataset
def get_text_dataloaders(dataset_name, valid_size, batch_size, subtrain_ratio=1.0, dataroot='.data'):
TEXT = data.Field(lower=True, include_lengths=True, batch_first=False)
LABEL = data.Field(sequential=False)
fields = {'text': TEXT, 'label': LABEL}
if dataset_name == 'sst2':
train, valid, test = datasets.SST.splits(TEXT, LABEL, root=dataroot)
train, valid, test = binarize(train), binarize(valid), binarize(test)
if subtrain_ratio < 1.0:
train, hold_train = train.split(
split_ratio=subtrain_ratio, stratified=True)
classes = ['negative', 'positive']
elif dataset_name == 'trec':
random.seed(0)
train, test = datasets.TREC.splits(
TEXT, LABEL, fine_grained=False, root=dataroot)
if valid_size > 0:
train, valid = train.split(
stratified=True, random_state=random.getstate()) # default 0.7
else:
valid = None
if subtrain_ratio < 1.0:
train, hold_train = train.split(
split_ratio=subtrain_ratio, stratified=True, random_state=random.getstate())
classes = ['DESC', 'ENTY', 'ABBR', 'HUM', 'NUM', 'LOC']
else:
ValueError(f'Invalid dataset name={dataset_name}')
TEXT.build_vocab(train, vectors=GloVe(name='6B', dim=300, cache=EMB_DIR))
LABEL.build_vocab(train)
train_loader, valid_loader, test_loader = data.BucketIterator.splits(
(train, valid, test), batch_size=batch_size, sort=True, sort_key=lambda x: len(x.text),
sort_within_batch=True)
print('### Dataset ###')
print(f'=>{dataset_name}')
print(f' |Train size:\t{len(train)}')
if valid is not None:
print(f' |Valid size:\t{len(valid)}')
print(f' |Test size:\t{len(test)}')
print(f' |Vocab size:\t{len(TEXT.vocab)}')
return train_loader, valid_loader, test_loader, classes, TEXT.vocab
| 33.178218 | 101 | 0.661295 | 366 | 0.109221 | 0 | 0 | 0 | 0 | 0 | 0 | 556 | 0.165921 |
ff7e68a90fd42c1fc91158dc7301bfaa77695ec1 | 1,089 | py | Python | setup.py | octopart/beaker_extensions | d4a0306d1c6c428dd919b64fc951d4304050c37b | [
"MIT"
] | 1 | 2016-01-01T02:03:42.000Z | 2016-01-01T02:03:42.000Z | setup.py | octopart/beaker_extensions | d4a0306d1c6c428dd919b64fc951d4304050c37b | [
"MIT"
] | null | null | null | setup.py | octopart/beaker_extensions | d4a0306d1c6c428dd919b64fc951d4304050c37b | [
"MIT"
] | 1 | 2015-11-10T06:50:36.000Z | 2015-11-10T06:50:36.000Z | from setuptools import setup, find_packages
import sys, os
version = '11octo'
setup(name='beaker_extensions',
version=version,
description="Beaker extensions for additional back-end stores.",
long_description="""\
""",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='',
author='Didip Kerabat',
author_email='didipk@gmail.com',
url='',
license='',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
# -*- Extra requirements: -*-
],
entry_points="""
# -*- Entry points: -*-
[beaker.backends]
redis = beaker_extensions.redis_:RedisManager
tyrant = beaker_extensions.tyrant_:TokyoTyrantManager
riak = beaker_extensions.riak_:RiakManager
dynomite = beaker_extensions.dynomite_:DynomiteManager
ringo = beaker_extensions.ringo:RingoManager
cassandra = beaker_extensions.cassandra:CassandraManager
""",
)
| 32.029412 | 95 | 0.663912 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 657 | 0.603306 |
ff83e388de90ffa2ddc5c91abc3eff56e6220ae3 | 1,170 | py | Python | pyvims/planets/__type__.py | seignovert/pyvims | a70b5b9b8bc5c37fa43b7db4d15407f312a31849 | [
"BSD-3-Clause"
] | 4 | 2019-09-16T15:50:22.000Z | 2021-04-08T15:32:48.000Z | pyvims/planets/__type__.py | seignovert/pyvims | a70b5b9b8bc5c37fa43b7db4d15407f312a31849 | [
"BSD-3-Clause"
] | 3 | 2018-05-04T09:28:24.000Z | 2018-12-03T09:00:31.000Z | pyvims/planets/__type__.py | seignovert/pyvims | a70b5b9b8bc5c37fa43b7db4d15407f312a31849 | [
"BSD-3-Clause"
] | 1 | 2020-10-12T15:14:17.000Z | 2020-10-12T15:14:17.000Z | """Abstract planet type."""
class Planet(type):
"""Abstract Planet object."""
MEAN_RADIUS = (None, None) # [km] ± err [km]
RADII = ((None, None), (None, None), (None, None)) # [km] ± err [km]
def __str__(cls):
return cls.__name__
def __repr__(cls):
return f'<{cls.__class__.__name__}> {cls}'
def __eq__(cls, other):
return str(cls).lower() == other.lower()
@property
def radius(cls):
"""Mean radius [km]."""
return cls.MEAN_RADIUS[0]
@property
def r(cls):
"""Mean radius (shortcut) [km]."""
return cls.radius
@property
def radii(cls):
"""Planet RADII (a, b, c) [km]."""
return tuple([abc[0] for abc in cls.RADII])
@property
def a(cls):
"""Planet a-axis radius [km]."""
return cls.RADII[0][0]
@property
def b(cls):
"""Planet b-axis radius [km]."""
return cls.RADII[1][0]
@property
def c(cls):
"""Planet c-axis radius [km]."""
return cls.RADII[2][0]
def lower(cls):
"""Planet name in lowercase."""
return str(cls).lower()
| 22.941176 | 73 | 0.513675 | 1,142 | 0.974403 | 0 | 0 | 605 | 0.516212 | 0 | 0 | 345 | 0.294369 |
ff856a8948ab1f46267e26568f1e8292a52d0cff | 15,718 | py | Python | core/models/sparse_bp_resnet.py | JeremieMelo/L2ight | 67f93b66ddf8bb5a365834b84ed6acdbc4f48eaf | [
"MIT"
] | 7 | 2021-11-02T16:21:47.000Z | 2022-03-09T06:01:25.000Z | core/models/sparse_bp_resnet.py | JeremieMelo/L2ight | 67f93b66ddf8bb5a365834b84ed6acdbc4f48eaf | [
"MIT"
] | null | null | null | core/models/sparse_bp_resnet.py | JeremieMelo/L2ight | 67f93b66ddf8bb5a365834b84ed6acdbc4f48eaf | [
"MIT"
] | null | null | null | """
Description:
Author: Jiaqi Gu (jqgu@utexas.edu)
Date: 2021-10-24 16:24:50
LastEditors: Jiaqi Gu (jqgu@utexas.edu)
LastEditTime: 2021-10-24 16:24:50
"""
from typing import Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from pyutils.general import logger
from torch import Tensor, nn
from torch.nn.modules.activation import ReLU
from torch.types import Device, _size
from .layers.activation import ReLUN
from .layers.custom_conv2d import MZIBlockConv2d
from .layers.custom_linear import MZIBlockLinear
from .sparse_bp_base import SparseBP_Base
__all__ = [
"SparseBP_MZI_ResNet18",
"SparseBP_MZI_ResNet34",
"SparseBP_MZI_ResNet50",
"SparseBP_MZI_ResNet101",
"SparseBP_MZI_ResNet152",
]
def conv3x3(
in_planes,
out_planes,
miniblock: int = 8,
bias: bool = False,
stride: Union[int, _size] = 1,
padding: Union[int, _size] = 0,
mode: str = "weight",
v_max: float = 4.36, # 0-pi for clements, # 6.166 is v_2pi, 0-2pi for reck
v_pi: float = 4.36,
w_bit: int = 16,
in_bit: int = 16,
photodetect: bool = False,
device: Device = torch.device("cuda"),
):
conv = MZIBlockConv2d(
in_planes,
out_planes,
3,
miniblock,
bias,
stride,
padding,
mode=mode,
v_max=v_max,
v_pi=v_pi,
w_bit=w_bit,
in_bit=in_bit,
photodetect=photodetect,
device=device,
)
return conv
def conv1x1(
in_planes,
out_planes,
miniblock: int = 8,
bias: bool = False,
stride: Union[int, _size] = 1,
padding: Union[int, _size] = 0,
mode: str = "weight",
v_max: float = 4.36, # 0-pi for clements, # 6.166 is v_2pi, 0-2pi for reck
v_pi: float = 4.36,
w_bit: int = 16,
in_bit: int = 16,
photodetect: bool = False,
device: Device = torch.device("cuda"),
):
conv = MZIBlockConv2d(
in_planes,
out_planes,
1,
miniblock,
bias,
stride,
padding,
mode=mode,
v_max=v_max,
v_pi=v_pi,
w_bit=w_bit,
in_bit=in_bit,
photodetect=photodetect,
device=device,
)
return conv
def Linear(
in_channel,
out_channel,
miniblock: int = 8,
bias: bool = False,
mode: str = "weight",
v_max: float = 4.36, # 0-pi for clements, # 6.166 is v_2pi, 0-2pi for reck
v_pi: float = 4.36,
w_bit: int = 16,
in_bit: int = 16,
photodetect: bool = False,
device: Device = torch.device("cuda"),
):
# linear = nn.Linear(in_channel, out_channel)
linear = MZIBlockLinear(
in_channel, out_channel, miniblock, bias, mode, v_max, v_pi, w_bit, in_bit, photodetect, device=device
)
return linear
class BasicBlock(nn.Module):
expansion = 1
def __init__(
self,
in_planes,
planes,
stride=1,
# unique parameters
miniblock: int = 8,
mode: str = "weight",
v_max: float = 4.36, # 0-pi for clements, # 6.166 is v_2pi, 0-2pi for reck
v_pi: float = 4.36,
w_bit: int = 16,
in_bit: int = 16,
photodetect: bool = False,
act_thres: int = 6,
device: Device = torch.device("cuda"),
) -> None:
super(BasicBlock, self).__init__()
# self.conv1 = nn.Conv2d(
# in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.conv1 = conv3x3(
in_planes,
planes,
miniblock=miniblock,
bias=False,
stride=stride,
padding=1,
mode=mode,
v_max=v_max,
v_pi=v_pi,
in_bit=in_bit,
w_bit=w_bit,
photodetect=photodetect,
device=device,
)
self.bn1 = nn.BatchNorm2d(planes)
self.act1 = ReLUN(act_thres, inplace=True) if act_thres <= 6 else ReLU(inplace=True)
# self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
# stride=1, padding=1, bias=False)
self.conv2 = conv3x3(
planes,
planes,
miniblock=miniblock,
bias=False,
stride=1,
padding=1,
mode=mode,
v_max=v_max,
v_pi=v_pi,
in_bit=in_bit,
w_bit=w_bit,
photodetect=photodetect,
device=device,
)
self.bn2 = nn.BatchNorm2d(planes)
self.act2 = ReLUN(act_thres, inplace=True) if act_thres <= 6 else ReLU(inplace=True)
self.shortcut = nn.Identity()
# self.shortcut.conv1_spatial_sparsity = self.conv1.bp_input_sampler.spatial_sparsity
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
conv1x1(
in_planes,
self.expansion * planes,
miniblock=miniblock,
bias=False,
stride=stride,
padding=0,
mode=mode,
v_max=v_max,
v_pi=v_pi,
in_bit=in_bit,
w_bit=w_bit,
photodetect=photodetect,
device=device,
),
nn.BatchNorm2d(self.expansion * planes),
)
def forward(self, x):
out = self.act1(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = self.act2(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(
self,
in_planes: int,
planes: int,
stride: int = 1,
# unique parameters
miniblock: int = 8,
mode: str = "weight",
v_max: float = 4.36, # 0-pi for clements, # 6.166 is v_2pi, 0-2pi for reck
v_pi: float = 4.36,
w_bit: int = 16,
in_bit: int = 16,
photodetect: bool = False,
act_thres: int = 6,
device: Device = torch.device("cuda"),
) -> None:
super(Bottleneck, self).__init__()
# self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.conv1 = conv1x1(
in_planes,
planes,
miniblock=miniblock,
bias=False,
stride=1,
padding=0,
mode=mode,
v_max=v_max,
v_pi=v_pi,
in_bit=in_bit,
w_bit=w_bit,
photodetect=photodetect,
device=device,
)
self.bn1 = nn.BatchNorm2d(planes)
self.act1 = ReLUN(act_thres, inplace=True) if act_thres <= 6 else ReLU(inplace=True)
# self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.conv2 = conv3x3(
planes,
planes,
miniblock=miniblock,
bias=False,
stride=stride,
padding=1,
mode=mode,
v_max=v_max,
v_pi=v_pi,
in_bit=in_bit,
w_bit=w_bit,
photodetect=photodetect,
device=device,
)
self.bn2 = nn.BatchNorm2d(planes)
self.act2 = ReLUN(act_thres, inplace=True) if act_thres <= 6 else ReLU(inplace=True)
self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False)
self.conv3 = conv1x1(
planes,
self.expansion * planes,
miniblock=miniblock,
bias=False,
stride=1,
padding=0,
mode=mode,
v_max=v_max,
v_pi=v_pi,
in_bit=in_bit,
w_bit=w_bit,
photodetect=photodetect,
device=device,
)
self.bn3 = nn.BatchNorm2d(self.expansion * planes)
self.act3 = ReLUN(act_thres, inplace=True) if act_thres <= 6 else ReLU(inplace=True)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
# nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
conv1x1(
in_planes,
self.expansion * planes,
miniblock=miniblock,
bias=False,
stride=stride,
padding=0,
mode=mode,
v_max=v_max,
v_pi=v_pi,
in_bit=in_bit,
w_bit=w_bit,
photodetect=photodetect,
device=device,
),
nn.BatchNorm2d(self.expansion * planes),
)
def forward(self, x):
out = self.act1(self.bn1(self.conv1(x)))
out = self.act2(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = self.act3(out)
return out
class ResNet(SparseBP_Base):
"""MZI ResNet (Shen+, Nature Photonics 2017). Support sparse backpropagation. Blocking matrix multiplication."""
def __init__(
self,
block,
num_blocks,
img_height: int,
img_width: int,
in_channel: int,
n_class: int,
block_list: List[int] = [8],
in_bit: int = 32,
w_bit: int = 32,
mode: str = "usv",
v_max: float = 10.8,
v_pi: float = 4.36,
act_thres: float = 6.0,
photodetect: bool = True,
bias: bool = False,
device: Device = torch.device("cuda"),
) -> None:
super().__init__()
# resnet params
self.block = block
self.num_blocks = num_blocks
self.in_planes = 64
self.img_height = img_height
self.img_width = img_width
self.in_channel = in_channel
self.n_class = n_class
# list of block size
self.block_list = block_list
self.in_bit = in_bit
self.w_bit = w_bit
self.mode = mode
self.v_max = v_max
self.v_pi = v_pi
self.act_thres = act_thres
self.photodetect = photodetect
self.device = device
# build layers
blkIdx = 0
self.conv1 = conv3x3(
in_channel,
64,
miniblock=self.block_list[0],
bias=False,
stride=1 if img_height <= 64 else 2, # downsample for imagenet, dogs, cars
padding=1,
mode=mode,
v_max=self.v_max,
v_pi=self.v_pi,
in_bit=self.in_bit,
w_bit=self.w_bit,
photodetect=self.photodetect,
device=self.device,
)
self.bn1 = nn.BatchNorm2d(64)
blkIdx += 1
self.layer1 = self._make_layer(
block,
64,
num_blocks[0],
stride=1,
miniblock=self.block_list[0],
mode=self.mode,
v_max=self.v_max,
v_pi=self.v_pi,
in_bit=self.in_bit,
w_bit=self.w_bit,
photodetect=self.photodetect,
device=device,
)
blkIdx += 1
self.layer2 = self._make_layer(
block,
128,
num_blocks[1],
stride=2,
miniblock=self.block_list[0],
mode=self.mode,
v_max=self.v_max,
v_pi=self.v_pi,
in_bit=self.in_bit,
w_bit=self.w_bit,
photodetect=self.photodetect,
device=device,
)
blkIdx += 1
self.layer3 = self._make_layer(
block,
256,
num_blocks[2],
stride=2,
miniblock=self.block_list[0],
mode=self.mode,
v_max=self.v_max,
v_pi=self.v_pi,
in_bit=self.in_bit,
w_bit=self.w_bit,
photodetect=self.photodetect,
device=device,
)
blkIdx += 1
self.layer4 = self._make_layer(
block,
512,
num_blocks[3],
stride=2,
miniblock=self.block_list[0],
mode=self.mode,
v_max=self.v_max,
v_pi=self.v_pi,
in_bit=self.in_bit,
w_bit=self.w_bit,
photodetect=self.photodetect,
device=device,
)
blkIdx += 1
self.linear = Linear(
512 * block.expansion,
self.n_class,
miniblock=self.block_list[0],
bias=False,
mode=self.mode,
v_max=self.v_max,
v_pi=self.v_pi,
in_bit=self.in_bit,
w_bit=self.w_bit,
photodetect=self.photodetect,
device=device,
)
self.drop_masks = None
self.reset_parameters()
self.gamma_noise_std = 0
self.crosstalk_factor = 0
def _make_layer(
self,
block,
planes,
num_blocks,
stride,
# unique parameters
miniblock: int = 8,
mode: str = "usv",
v_max: float = 10.8,
v_pi: float = 4.36,
in_bit: int = 32,
w_bit: int = 32,
act_thres: float = 6.0,
photodetect: bool = True,
device: Device = torch.device("cuda"),
):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(
block(
self.in_planes,
planes,
stride,
miniblock=miniblock,
mode=mode,
v_max=v_max,
v_pi=v_pi,
in_bit=in_bit,
w_bit=w_bit,
act_thres=act_thres,
photodetect=photodetect,
device=device,
)
)
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x: Tensor) -> Tensor:
out = F.relu(self.bn1(self.conv1(x)), inplace=True)
if x.size(-1) > 64: # 224 x 224, e.g., cars, dogs, imagenet
out = F.max_pool2d(out, kernel_size=3, stride=2, padding=1)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.adaptive_avg_pool2d(out, 1)
out = torch.flatten(out, 1)
out = self.linear(out)
return out
def SparseBP_MZI_ResNet18(*args, **kwargs):
return ResNet(BasicBlock, [2, 2, 2, 2], *args, **kwargs)
def SparseBP_MZI_ResNet34(*args, **kwargs):
return ResNet(BasicBlock, [3, 4, 6, 3], *args, **kwargs)
def SparseBP_MZI_ResNet50(*args, **kwargs):
return ResNet(Bottleneck, [3, 4, 6, 3], *args, **kwargs)
def SparseBP_MZI_ResNet101(*args, **kwargs):
return ResNet(Bottleneck, [3, 4, 23, 3], *args, **kwargs)
def SparseBP_MZI_ResNet152(*args, **kwargs):
return ResNet(Bottleneck, [3, 8, 36, 3], *args, **kwargs)
def test():
device = torch.device("cuda")
net = SparseBP_MZI_ResNet18(
in_channel=3,
n_class=10,
block_list=[8, 8, 8, 8, 8, 8],
in_bit=32,
w_bit=32,
mode="usv",
v_max=10.8,
v_pi=4.36,
act_thres=6,
photodetect=True,
device=device,
).to(device)
x = torch.randn(2, 3, 32, 32).to(device)
print(net)
y = net(x)
print(y.shape)
if __name__ == "__main__":
test()
| 27.623902 | 116 | 0.517814 | 11,877 | 0.75563 | 0 | 0 | 0 | 0 | 0 | 0 | 1,539 | 0.097913 |
ff876a86d9b2e921d22ed00df25042c1865eb9da | 2,387 | py | Python | src/discord_utils.py | pocc/bga_discord | 51fdf1b9c7d7d39abd6628f2d79b5b217c3e85b2 | [
"Apache-2.0"
] | 8 | 2020-09-25T22:57:50.000Z | 2021-11-03T14:49:10.000Z | src/discord_utils.py | pocc/bga_discord | 51fdf1b9c7d7d39abd6628f2d79b5b217c3e85b2 | [
"Apache-2.0"
] | 40 | 2020-10-20T01:12:46.000Z | 2021-05-24T23:22:22.000Z | src/discord_utils.py | pocc/bga_discord | 51fdf1b9c7d7d39abd6628f2d79b5b217c3e85b2 | [
"Apache-2.0"
] | 2 | 2020-10-18T18:56:05.000Z | 2021-10-19T05:23:47.000Z | """Utils that require discord."""
import logging
from logging.handlers import RotatingFileHandler
import discord
LOG_FILENAME = "errs"
logger = logging.getLogger(__name__)
handler = RotatingFileHandler(LOG_FILENAME, maxBytes=10000000, backupCount=0)
formatter = logging.Formatter("%(asctime)s | %(name)s | %(levelname)s | %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
async def send_table_embed(message, game, desc, author, players, second_title, second_content):
"""Create a discord embed to send the message about table creation."""
logger.debug(
f"Sending embed with message: {message}, game {game}, url {desc}, author {author}, players {players}, 2nd title {second_title}, 2nd content {second_content}",
)
retmsg = discord.Embed(
title=game,
description=desc,
color=3447003,
)
retmsg.set_author(name=message.author.display_name, icon_url=message.author.avatar_url)
if len(author) > 0:
retmsg.add_field(name="Creator", value=author, inline=False)
if players:
retmsg.add_field(name="Invited", value=players, inline=False)
if second_content:
retmsg.add_field(name=second_title, value=second_content, inline=False)
await message.channel.send(embed=retmsg)
async def send_options_embed(message, opt_type, options, description="", cancellable=True):
"""Ask the user which option they want and return the number."""
options_text = ""
for i in range(len(options)):
option = options[i]
options_text += f"\n**{i+1}** {option}" # options start at 1
retmsg = discord.Embed(
title=f"Choose a {opt_type} number",
color=3447003,
)
retmsg.add_field(name="Options", value=options_text, inline=False)
if description:
retmsg.description = description
if cancellable:
retmsg.set_footer(text="Type cancel to quit")
await message.author.send(embed=retmsg)
async def send_simple_embed(message, title, description="", fields={}):
retmsg = discord.Embed(
title=title,
color=3447003,
)
if description:
retmsg.description = description
for field in fields:
retmsg.add_field(name=field, value=fields[field], inline=False)
retmsg.set_footer(text="Type cancel to quit")
await message.author.send(embed=retmsg)
| 36.723077 | 166 | 0.697528 | 0 | 0 | 0 | 0 | 0 | 0 | 1,950 | 0.816925 | 531 | 0.222455 |
ff882c001970e1862001ff127a71976cb7552386 | 1,765 | py | Python | crackers/online_hash_cracker.py | MomboteQ/HASH | 1dece78fdf8fe21ab20906dc61f6e213d09c592b | [
"MIT"
] | null | null | null | crackers/online_hash_cracker.py | MomboteQ/HASH | 1dece78fdf8fe21ab20906dc61f6e213d09c592b | [
"MIT"
] | 1 | 2022-03-28T16:57:27.000Z | 2022-03-28T16:57:27.000Z | crackers/online_hash_cracker.py | MomboteQ/HASH | 1dece78fdf8fe21ab20906dc61f6e213d09c592b | [
"MIT"
] | null | null | null | ###################################################
# #
# Name : Online Hash Cracker (HASH++) #
# Created by : MomboteQ #
# Version : 1.0 #
# #
###################################################
from colorama import Fore, Style, init
import urllib3
import urllib
import re
def online_crack(hash):
init()
if len(hash) == 32:
hash_type = 'MD5'
elif len(hash) == 40:
hash_type = 'SHA-1'
elif len(hash) == 64:
hash_type = 'SHA-256'
elif len(hash) == 96:
hash_type = 'SHA-384'
elif len(hash) == 128:
hash_type = 'SHA-512'
else:
hash_type = None
if hash_type != None:
print(f'\n[{Fore.LIGHTGREEN_EX}✓{Style.RESET_ALL}] Detected hash type : {Fore.LIGHTBLUE_EX + hash_type + Style.RESET_ALL}')
http = urllib3.PoolManager()
try:
response = http.request('GET', f'https://hashtoolkit.com/decrypt-hash/?hash={hash}')
except:
print(f'[{Fore.LIGHTRED_EX}✗{Style.RESET_ALL}] Check your internet connection!\n')
return
try:
decrypted = urllib.parse.unquote(re.search(r'/generate-hash/\?text=(.*?)"', response.data.decode()).group(1))
print(f'[{Fore.LIGHTGREEN_EX}✓{Style.RESET_ALL}] {hash} : {Fore.LIGHTGREEN_EX + decrypted + Style.RESET_ALL}\n')
except:
print(f'[{Fore.LIGHTRED_EX}✗{Style.RESET_ALL}] {hash} : {Fore.LIGHTRED_EX}This hash was not found in the database!{Style.RESET_ALL}\n')
else:
print(f'\n[{Fore.LIGHTRED_EX}✗{Style.RESET_ALL}] This hash type is not supported.\n') | 30.431034 | 147 | 0.505949 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 996 | 0.561127 |
ff88e0f5ccecc3c701f512768bb897a2dfbe68cb | 1,035 | py | Python | sis-api/test/bdd/steps/membership.serialization.py | maxbilbow/7054CEM-sis | 1c5067c9afc38e340fcce046048f8ae21d267365 | [
"MIT"
] | null | null | null | sis-api/test/bdd/steps/membership.serialization.py | maxbilbow/7054CEM-sis | 1c5067c9afc38e340fcce046048f8ae21d267365 | [
"MIT"
] | null | null | null | sis-api/test/bdd/steps/membership.serialization.py | maxbilbow/7054CEM-sis | 1c5067c9afc38e340fcce046048f8ae21d267365 | [
"MIT"
] | null | null | null | from datetime import date
from behave import *
from core.model import to_dict
from core.model.membership import Membership
from core.model.membership_type import MembershipType
from core.utils.serialization import serialize
use_step_matcher("re")
@when("a serialized membership is requested")
def step_impl(context):
"""
:type context: behave.runner.Context
"""
context.dict = serialize(context.membership).for_api()
@given("a membership is created with date (?P<date_string>.+)")
def step_impl(context, date_string: str):
"""
:type context: behave.runner.Context
:type date: str
"""
date_obj = date.fromisoformat(date_string)
context.membership = Membership(id=1, user_id=1,start_date=date_obj, end_date=date_obj, type=MembershipType.Smart)
@then("the dates are converted to ISO date format (?P<date_string>.+)")
def step_impl(context, date_string: str):
"""
:type context: behave.runner.Context
:type date: str
"""
assert context.dict["start_date"] == date_string
| 27.236842 | 118 | 0.72657 | 0 | 0 | 0 | 0 | 776 | 0.749758 | 0 | 0 | 369 | 0.356522 |
ff8936a87c1d6aa51ae7c9ad6e4ebecebdf9df3e | 317 | py | Python | DPGAnalysis/SiStripTools/python/poolSource_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | DPGAnalysis/SiStripTools/python/poolSource_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | DPGAnalysis/SiStripTools/python/poolSource_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(),
# skipBadFiles = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring("keep *", "drop *_MEtoEDMConverter_*_*")
)
| 35.222222 | 98 | 0.564669 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 110 | 0.347003 |
ff8987904a9a228b1a63aba1997b83073f7d2bbf | 10,744 | py | Python | orange3/Orange/preprocess/impute.py | rgschmitz1/BioDepot-workflow-builder | f74d904eeaf91ec52ec9b703d9fb38e9064e5a66 | [
"MIT"
] | 54 | 2017-01-08T17:21:49.000Z | 2021-11-02T08:46:07.000Z | orange3/Orange/preprocess/impute.py | Synthia-3/BioDepot-workflow-builder | 4ee93abe2d79465755e82a145af3b6a6e1e79fd4 | [
"MIT"
] | 22 | 2017-03-28T06:03:14.000Z | 2021-07-28T05:43:55.000Z | orange3/Orange/preprocess/impute.py | Synthia-3/BioDepot-workflow-builder | 4ee93abe2d79465755e82a145af3b6a6e1e79fd4 | [
"MIT"
] | 21 | 2017-01-26T21:12:09.000Z | 2022-01-31T21:34:59.000Z | import numpy as np
import scipy.sparse as sp
import Orange.data
from Orange.statistics import distribution, basic_stats
from Orange.util import Reprable
from .transformation import Transformation, Lookup
__all__ = [
"ReplaceUnknowns",
"Average",
"DoNotImpute",
"DropInstances",
"Model",
"AsValue",
"Random",
"Default",
]
class ReplaceUnknowns(Transformation):
"""
A column transformation which replaces unknown values with a fixed `value`.
Parameters
----------
variable : Orange.data.Variable
The target variable for imputation.
value : int or float
The value with which to replace the unknown values
"""
def __init__(self, variable, value=0):
super().__init__(variable)
self.value = value
def transform(self, c):
if sp.issparse(c):
c.data = np.where(np.isnan(c.data), self.value, c.data)
return c
else:
return np.where(np.isnan(c), self.value, c)
class BaseImputeMethod(Reprable):
name = ""
short_name = ""
description = ""
format = "{var.name} -> {self.short_name}"
columns_only = False
def __call__(self, data, variable):
""" Imputes table along variable column.
Args:
data (Table): A table to impute.
variable (Variable): Variable for completing missing values.
Returns:
A new Variable instance with completed missing values or
a array mask of rows to drop out.
"""
raise NotImplementedError
def format_variable(self, var):
return self.format.format(var=var, self=self)
def __str__(self):
return self.name
def copy(self):
return self
@classmethod
def supports_variable(cls, variable):
return True
class DoNotImpute(BaseImputeMethod):
name = "Don't impute"
short_name = "leave"
description = ""
def __call__(self, data, variable):
return variable
class DropInstances(BaseImputeMethod):
name = "Remove instances with unknown values"
short_name = "drop"
description = ""
def __call__(self, data, variable):
col, _ = data.get_column_view(variable)
return np.isnan(col)
class Average(BaseImputeMethod):
name = "Average/Most frequent"
short_name = "average"
description = "Replace with average/mode of the column"
def __call__(self, data, variable, value=None):
variable = data.domain[variable]
if value is None:
if variable.is_continuous:
stats = basic_stats.BasicStats(data, variable)
value = stats.mean
elif variable.is_discrete:
dist = distribution.get_distribution(data, variable)
value = dist.modus()
else:
raise TypeError("Variable must be continuous or discrete")
a = variable.copy(compute_value=ReplaceUnknowns(variable, value))
a.to_sql = ImputeSql(variable, value)
return a
class ImputeSql(Reprable):
def __init__(self, var, default):
self.var = var
self.default = default
def __call__(self):
return "coalesce(%s, %s)" % (self.var.to_sql(), str(self.default))
class Default(BaseImputeMethod):
name = "Value"
short_name = "value"
description = ""
columns_only = True
format = "{var} -> {self.default}"
def __init__(self, default=0):
self.default = default
def __call__(self, data, variable, *, default=None):
variable = data.domain[variable]
default = default if default is not None else self.default
return variable.copy(compute_value=ReplaceUnknowns(variable, default))
def copy(self):
return Default(self.default)
class ReplaceUnknownsModel(Reprable):
"""
Replace unknown values with predicted values using a `Orange.base.Model`
Parameters
----------
variable : Orange.data.Variable
The target variable for the imputation.
model : Orange.base.Model
A fitted model predicting `variable`.
"""
def __init__(self, variable, model):
assert model.domain.class_var == variable
self.variable = variable
self.model = model
def __call__(self, data):
if isinstance(data, Orange.data.Instance):
column = np.array([float(data[self.variable])])
else:
column = np.array(data.get_column_view(self.variable)[0], copy=True)
mask = np.isnan(column)
if not np.any(mask):
return column
if isinstance(data, Orange.data.Instance):
predicted = self.model(data)
else:
predicted = self.model(data[mask])
column[mask] = predicted
return column
class Model(BaseImputeMethod):
_name = "Model-based imputer"
short_name = "model"
description = ""
format = BaseImputeMethod.format + " ({self.learner.name})"
@property
def name(self):
return "{} ({})".format(self._name, getattr(self.learner, "name", ""))
def __init__(self, learner):
self.learner = learner
def __call__(self, data, variable):
variable = data.domain[variable]
domain = domain_with_class_var(data.domain, variable)
if self.learner.check_learner_adequacy(domain):
data = data.transform(domain)
model = self.learner(data)
assert model.domain.class_var == variable
return variable.copy(compute_value=ReplaceUnknownsModel(variable, model))
else:
raise ValueError(
"`{}` doesn't support domain type".format(self.learner.name)
)
def copy(self):
return Model(self.learner)
def supports_variable(self, variable):
domain = Orange.data.Domain([], class_vars=variable)
return self.learner.check_learner_adequacy(domain)
def domain_with_class_var(domain, class_var):
"""
Return a domain with class_var as output domain.class_var.
If class_var is in the input domain's attributes it is removed from the
output's domain.attributes.
"""
if domain.class_var is class_var:
return domain
elif class_var in domain.attributes:
attrs = [var for var in domain.attributes if var is not class_var]
else:
attrs = domain.attributes
return Orange.data.Domain(attrs, class_var)
class IsDefined(Transformation):
def transform(self, c):
if sp.issparse(c):
c = c.toarray()
return ~np.isnan(c)
class AsValue(BaseImputeMethod):
name = "As a distinct value"
short_name = "new value"
description = ""
def __call__(self, data, variable):
variable = data.domain[variable]
if variable.is_discrete:
fmt = "{var.name}"
value = "N/A"
var = Orange.data.DiscreteVariable(
fmt.format(var=variable),
values=variable.values + [value],
base_value=variable.base_value,
compute_value=Lookup(
variable,
np.arange(len(variable.values), dtype=int),
unknown=len(variable.values),
),
sparse=variable.sparse,
)
return var
elif variable.is_continuous:
fmt = "{var.name}_def"
indicator_var = Orange.data.DiscreteVariable(
fmt.format(var=variable),
values=("undef", "def"),
compute_value=IsDefined(variable),
sparse=variable.sparse,
)
stats = basic_stats.BasicStats(data, variable)
return (
variable.copy(compute_value=ReplaceUnknowns(variable, stats.mean)),
indicator_var,
)
else:
raise TypeError(type(variable))
class ReplaceUnknownsRandom(Transformation):
"""
A column transformation replacing unknowns with values drawn randomly from
an empirical distribution.
Parameters
----------
variable : Orange.data.Variable
The target variable for imputation.
distribution : Orange.statistics.distribution.Distribution
The corresponding sampling distribution
"""
def __init__(self, variable, distribution):
assert distribution.size > 0
assert distribution.variable == variable
super().__init__(variable)
self.distribution = distribution
if variable.is_discrete:
counts = np.array(distribution)
elif variable.is_continuous:
counts = np.array(distribution)[1, :]
else:
raise TypeError("Only discrete and continuous " "variables are supported")
csum = np.sum(counts)
if csum > 0:
self.sample_prob = counts / csum
else:
self.sample_prob = np.ones_like(counts) / len(counts)
def transform(self, c):
if not sp.issparse(c):
c = np.array(c, copy=True)
else:
c = c.toarray().ravel()
nanindices = np.flatnonzero(np.isnan(c))
if self.variable.is_discrete:
sample = np.random.choice(
len(self.variable.values),
size=len(nanindices),
replace=True,
p=self.sample_prob,
)
else:
sample = np.random.choice(
np.asarray(self.distribution)[0, :],
size=len(nanindices),
replace=True,
p=self.sample_prob,
)
c[nanindices] = sample
return c
class Random(BaseImputeMethod):
name = "Random values"
short_name = "random"
description = "Replace with a random value"
def __call__(self, data, variable):
variable = data.domain[variable]
dist = distribution.get_distribution(data, variable)
# A distribution is invalid if a continuous variable's column does not
# contain any known values or if a discrete variable's .values == []
isinvalid = dist.size == 0
if isinvalid and variable.is_discrete:
assert len(variable.values) == 0
raise ValueError("'{}' has no values".format(variable))
elif isinvalid and variable.is_continuous:
raise ValueError("'{}' has an unknown distribution".format(variable))
if variable.is_discrete and np.sum(dist) == 0:
dist += 1 / len(dist)
elif variable.is_continuous and np.sum(dist[1, :]) == 0:
dist[1, :] += 1 / dist.shape[1]
return variable.copy(compute_value=ReplaceUnknownsRandom(variable, dist))
| 29.844444 | 86 | 0.605454 | 9,846 | 0.916418 | 0 | 0 | 182 | 0.01694 | 0 | 0 | 2,261 | 0.210443 |
ff8a930c6b87ec6cefccbb51fe911fbb227348dc | 5,488 | py | Python | aiida_vasp/workchains/verify.py | muhrin/aiida-vasp | 641fdc2ccd40bdd041e59af1fa3e1dcf9b037415 | [
"MIT"
] | null | null | null | aiida_vasp/workchains/verify.py | muhrin/aiida-vasp | 641fdc2ccd40bdd041e59af1fa3e1dcf9b037415 | [
"MIT"
] | null | null | null | aiida_vasp/workchains/verify.py | muhrin/aiida-vasp | 641fdc2ccd40bdd041e59af1fa3e1dcf9b037415 | [
"MIT"
] | null | null | null | """
Verify workchain.
-----------------
Indented to be used to verify a calculation, perform corrections in inputs files and
restart depending on physical principles etc. E.g. issues that are outside the Calculators awereness,
or not currently checked in it. This workchain does currently nothing.
"""
# pylint: disable=attribute-defined-outside-init
from aiida.common.extendeddicts import AttributeDict
from aiida.engine import WorkChain, while_, append_
from aiida.plugins import WorkflowFactory
from aiida_vasp.utils.aiida_utils import get_data_class, get_data_node
from aiida_vasp.utils.workchains import prepare_process_inputs, compose_exit_code
class VerifyWorkChain(WorkChain):
"""Verify the calculations based on basic principles from physics, chemistry and material science."""
_verbose = False
_next_workchain_string = 'vasp.vasp'
_next_workchain = WorkflowFactory(_next_workchain_string)
@classmethod
def define(cls, spec):
super(VerifyWorkChain, cls).define(spec)
spec.expose_inputs(cls._next_workchain)
spec.input('verify.max_iterations',
valid_type=get_data_class('int'),
required=False,
default=get_data_node('int', 1),
help="""
The maximum number of iterations to perform.
""")
spec.exit_code(0, 'NO_ERROR', message='the sun is shining')
spec.exit_code(420, 'ERROR_NO_CALLED_WORKCHAIN', message='no called workchain detected')
spec.exit_code(500, 'ERROR_UNKNOWN', message='unknown error detected in the verify workchain')
spec.outline(
cls.initialize,
while_(cls.run_next_workchains)(
cls.init_next_workchain,
cls.run_next_workchain,
cls.verify_next_workchain
),
cls.finalize
) # yapf: disable
spec.expose_outputs(cls._next_workchain)
def initialize(self):
"""Initialize."""
self._init_context()
self._init_inputs()
def _init_context(self):
"""Initialize context variables that are used during the logical flow."""
self.ctx.exit_code = self.exit_codes.ERROR_UNKNOWN # pylint: disable=no-member
self.ctx.is_finished = False
self.ctx.iteration = 0
self.ctx.inputs = AttributeDict()
def _init_inputs(self):
"""Initialize inputs."""
try:
self._verbose = self.inputs.verbose.value
self.ctx.inputs.verbose = self.inputs.verbose
except AttributeError:
pass
def run_next_workchains(self):
"""
Return whether a new calculation should be run.
This is the case as long as the last calculation has not finished successfully and the maximum number of restarts
has not yet been exceeded.
"""
return not self.ctx.is_finished and self.ctx.iteration <= self.inputs.verify.max_iterations.value
def init_next_workchain(self):
"""Initialize the next workchain."""
self.ctx.iteration += 1
try:
self.ctx.inputs
except AttributeError:
raise ValueError('No input dictionary was defined in self.ctx.inputs')
# Add exposed inputs
self.ctx.inputs.update(self.exposed_inputs(self._next_workchain))
# Make sure we do not have any floating dict (convert to Dict)
self.ctx.inputs = prepare_process_inputs(self.ctx.inputs)
def run_next_workchain(self):
"""Run the next workchain."""
inputs = self.ctx.inputs
running = self.submit(self._next_workchain, **inputs)
self.report('launching {}<{}> iteration #{}'.format(self._next_workchain.__name__, running.pk, self.ctx.iteration))
return self.to_context(workchains=append_(running))
def verify_next_workchain(self):
"""
Correct for unexpected physics/chemistry/material science behavior.
Here we should correct all things that voids what we expect from
physics/chemistry/material science. I.e. things that cannot be corrected for at the
calculation level (simple restarts etc.).
"""
# Currently only set to finished on first go.
self.ctx.is_finished = True
try:
workchain = self.ctx.workchains[-1]
except IndexError:
self.report('There is no {} in the called workchain list.'.format(self._next_workchain.__name__))
return self.exit_codes.ERROR_NO_CALLED_WORKCHAIN # pylint: disable=no-member
# Inherit exit status from last workchain (supposed to be
# successfull)
next_workchain_exit_status = workchain.exit_status
next_workchain_exit_message = workchain.exit_message
if not next_workchain_exit_status:
self.ctx.exit_code = self.exit_codes.NO_ERROR # pylint: disable=no-member
else:
self.ctx.exit_code = compose_exit_code(next_workchain_exit_status, next_workchain_exit_message)
self.report('The called {}<{}> returned a non-zero exit status. '
'The exit status {} is inherited'.format(workchain.__class__.__name__, workchain.pk, self.ctx.exit_code))
return self.ctx.exit_code
def finalize(self):
"""Finalize the workchain."""
workchain = self.ctx.workchains[-1]
self.out_many(self.exposed_outputs(workchain, self._next_workchain))
| 39.482014 | 129 | 0.664723 | 4,832 | 0.880466 | 0 | 0 | 1,039 | 0.189322 | 0 | 0 | 1,989 | 0.362427 |
ff8af24bea3735f79c333dd490eca9b96b47bda2 | 113 | py | Python | cytominer_eval/operations/__init__.py | hillsbury/cytominer-eval | 56bd9e545d4ce5dea8c2d3897024a4eb241d06db | [
"BSD-3-Clause"
] | null | null | null | cytominer_eval/operations/__init__.py | hillsbury/cytominer-eval | 56bd9e545d4ce5dea8c2d3897024a4eb241d06db | [
"BSD-3-Clause"
] | null | null | null | cytominer_eval/operations/__init__.py | hillsbury/cytominer-eval | 56bd9e545d4ce5dea8c2d3897024a4eb241d06db | [
"BSD-3-Clause"
] | null | null | null | from .percent_strong import percent_strong
from .precision_recall import precision_recall
from .grit import grit
| 28.25 | 46 | 0.867257 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
ff8c823fffa1bf5ff551023b0ae5196140f382b1 | 3,086 | py | Python | storm_analysis/spliner/find_peaks_std.py | bintulab/storm-analysis | 71ae493cbd17ddb97938d0ae2032d97a0eaa76b2 | [
"CNRI-Python"
] | null | null | null | storm_analysis/spliner/find_peaks_std.py | bintulab/storm-analysis | 71ae493cbd17ddb97938d0ae2032d97a0eaa76b2 | [
"CNRI-Python"
] | null | null | null | storm_analysis/spliner/find_peaks_std.py | bintulab/storm-analysis | 71ae493cbd17ddb97938d0ae2032d97a0eaa76b2 | [
"CNRI-Python"
] | null | null | null | #!/usr/bin/env python
"""
Cubic spline peak finder.
Hazen 03/16
"""
import pickle
import numpy
import tifffile
import storm_analysis.sa_library.analysis_io as analysisIO
import storm_analysis.sa_library.fitting as fitting
import storm_analysis.sa_library.ia_utilities_c as utilC
import storm_analysis.sa_library.matched_filter_c as matchedFilterC
import storm_analysis.spliner.cubic_fit_c as cubicFitC
import storm_analysis.spliner.spline_to_psf as splineToPSF
def initFitter(finder, parameters, spline_fn):
"""
Initialize and return a cubicFitC.CSplineFit object.
"""
# Load variance, scale by gain.
#
# Offset is in units of ADU.
# Variance is in units of ADU*ADU.
# Gain is ADU/photo-electron.
# RQE is dimensionless, it should be around 1.0.
#
rqe = None
variance = None
if parameters.hasAttr("camera_calibration"):
[offset, variance, gain, rqe] = analysisIO.loadCMOSCalibration(parameters.getAttr("camera_calibration"))
variance = variance/(gain*gain)
# Set variance in the peak finder, this method also pads the
# variance to the correct size.
variance = finder.setVariance(variance)
# Pad relative quantum efficiency array to the correct size.
rqe = finder.padArray(rqe)
# Create C fitter object.
mfitter = None
kwds = {'rqe' : rqe,
'scmos_cal' : variance,
'spline_fn' : spline_fn}
emodel = parameters.getAttr("fit_error_model")
if (spline_fn.getType() == "2D"):
if (emodel == "MLE"):
mfitter = cubicFitC.CSpline2DFit(**kwds)
else:
if (emodel == "MLE"):
return cubicFitC.CSpline3DFit(**kwds)
elif (emodel == "ALS"):
return cubicFitC.CSpline3DFitALS(**kwds)
elif (emodel == "LS"):
return cubicFitC.CSpline3DFitLS(**kwds)
elif (emodel == "FWLS"):
return cubicFitC.CSpline3DFitFWLS(**kwds)
if mfitter is None:
raise Exception("Request error model is not available. " + emodel)
return mfitter
def initFindAndFit(parameters):
"""
Initialize and return a SplinerFinderFitter object.
"""
# Create spline object.
spline_fn = splineToPSF.loadSpline(parameters.getAttr("spline"))
# Create peak finder.
finder = fitting.PeakFinderArbitraryPSF(parameters = parameters,
psf_object = spline_fn)
# Create cubicFitC.CSplineFit object.
mfitter = initFitter(finder, parameters, spline_fn)
# Create peak fitter.
fitter = fitting.PeakFitterArbitraryPSF(mfitter = mfitter,
parameters = parameters)
# Specify which properties we want from the analysis.
properties = ["background", "error", "height", "iterations", "significance", "sum", "x", "y", "z"]
return fitting.PeakFinderFitter(peak_finder = finder,
peak_fitter = fitter,
properties = properties)
| 32.145833 | 112 | 0.63545 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 933 | 0.302333 |
ff8d466cb78def77047b6716960d07d37e514d93 | 3,805 | py | Python | server02.py | timgates42/csdesign | dd63c304e1b16ecd65bea072f9360986993df845 | [
"MIT"
] | 116 | 2015-01-07T08:39:32.000Z | 2021-12-21T13:07:53.000Z | server02.py | afcarl/csdesign | dd63c304e1b16ecd65bea072f9360986993df845 | [
"MIT"
] | null | null | null | server02.py | afcarl/csdesign | dd63c304e1b16ecd65bea072f9360986993df845 | [
"MIT"
] | 28 | 2015-01-07T07:27:16.000Z | 2021-07-17T22:26:37.000Z | ###############################################################################
#
# Copyright (c) 2012 Ruslan Spivak
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
"""
TCP Concurrent Server, I/O Multiplexing (select).
Single server process to handle any number of clients.
"""
__author__ = 'Ruslan Spivak <ruslan.spivak@gmail.com>'
import os
import sys
import errno
import select
import socket
import optparse
BACKLOG = 5
def serve_forever(host, port):
# create, bind. listen
lstsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# re-use the port
lstsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# put listening socket into non-blocking mode
lstsock.setblocking(0)
lstsock.bind((host, port))
lstsock.listen(BACKLOG)
print 'Listening on port %d ...' % port
# read, write, exception lists with sockets to poll
rlist, wlist, elist = [lstsock], [], []
while True:
# block in select
readables, writables, exceptions = select.select(rlist, wlist, elist)
for sock in readables:
if sock is lstsock: # new client connection, we can accept now
try:
conn, client_address = lstsock.accept()
except IOError as e:
code, msg = e.args
if code == errno.EINTR:
continue
else:
raise
# add the new connection to the 'read' list to poll
# in the next loop cycle
rlist.append(conn)
else:
# read a line that tells us how many bytes to write
bytes = sock.recv(1024)
if not bytes: # connection closed by client
sock.close()
rlist.remove(sock)
else:
print ('Got request to send %s bytes. '
'Sending them all...' % bytes)
# send them all
# XXX: this is cheating, we should use 'select' and wlist
# to determine whether socket is ready to be written to
data = os.urandom(int(bytes))
sock.sendall(data)
def main():
parser = optparse.OptionParser()
parser.add_option(
'-i', '--host', dest='host', default='0.0.0.0',
help='Hostname or IP address. Default is 0.0.0.0'
)
parser.add_option(
'-p', '--port', dest='port', type='int', default=2000,
help='Port. Default is 2000')
options, args = parser.parse_args()
serve_forever(options.host, options.port)
if __name__ == '__main__':
main()
| 34.279279 | 79 | 0.59343 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,069 | 0.543758 |
ff8ffce3211e8d216d8eb1a826f3e6a2b3bf5022 | 110 | py | Python | synth/__init__.py | lummax/switching-lattice-synth | 47cf9e64c900cb179c392b46a392049e99dfebab | [
"MIT"
] | null | null | null | synth/__init__.py | lummax/switching-lattice-synth | 47cf9e64c900cb179c392b46a392049e99dfebab | [
"MIT"
] | null | null | null | synth/__init__.py | lummax/switching-lattice-synth | 47cf9e64c900cb179c392b46a392049e99dfebab | [
"MIT"
] | null | null | null | import synth.timer
from synth.base import Function
from synth.dp_construction import DualProductConstruction
| 22 | 57 | 0.872727 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
ff90cad1c5012214e02b97154f23b7b726789505 | 628 | py | Python | ipsc/2016/g.py | csferng/competitive-programming | f7ae710392c0fd606f735df6efcfca5e8fbd3501 | [
"MIT"
] | null | null | null | ipsc/2016/g.py | csferng/competitive-programming | f7ae710392c0fd606f735df6efcfca5e8fbd3501 | [
"MIT"
] | null | null | null | ipsc/2016/g.py | csferng/competitive-programming | f7ae710392c0fd606f735df6efcfca5e8fbd3501 | [
"MIT"
] | null | null | null | import collections
import itertools
import re
import sys
read_str = lambda : sys.stdin.readline().strip()
read_str_list = lambda : sys.stdin.readline().strip().split()
read_int = lambda : int(read_str())
read_int_list = lambda : map(int, read_str_list())
read_float = lambda : float(read_str())
read_float_list = lambda : map(float, read_str_list())
def solve(S):
dgt = ''.join(re.findall(r'[0-9]+', S))
while len(dgt) > 1 and dgt[0] == '0':
dgt = dgt[1:]
return dgt
def main():
T = read_int()
for _ in xrange(T):
read_str()
S = read_str()
ans = solve(S)
print '%s' % (ans)
if __name__ == "__main__":
main()
| 21.655172 | 61 | 0.659236 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.044586 |
ff91159e63cb1bbf4e041487fffe2ea77828c464 | 1,051 | py | Python | book/sunglasses.py | gcvalderrama/python_foundations | 5ac045085dcc6c906729b481f833fa6a7889bd19 | [
"MIT"
] | null | null | null | book/sunglasses.py | gcvalderrama/python_foundations | 5ac045085dcc6c906729b481f833fa6a7889bd19 | [
"MIT"
] | null | null | null | book/sunglasses.py | gcvalderrama/python_foundations | 5ac045085dcc6c906729b481f833fa6a7889bd19 | [
"MIT"
] | null | null | null | import unittest
def navigation(rows, cols, x, y):
if 1 <= x <= rows - 2 and 1 <= y <= cols - 2:
return [(x - 1 , y - 1), ( x - 1, y), ( x - 1, y + 1),
( x , y),
(x + 1 , y - 1), ( x + 1, y), ( x + 1, y + 1)]
return []
def hourglassSum(arr):
if not arr:
return 0
rows = len(arr)
cols = len(arr[0])
maximun = -10000000
for i in range(1, rows):
for j in range(1, cols):
temp = navigation(rows, cols, i, j)
acc = -10000000
for pivot in temp:
acc += arr[pivot[0]][pivot[1]]
maximun = max(acc, maximun)
return maximun
class Test(unittest.TestCase):
def test_case(self):
arr = [[-1, -1, 0, -9, -2, -2],
[-2, -1, -6, -8, -2, -5],
[-1, -1, -1, -2, -3, -4],
[-1, -9, -2, -4, -4, -5],
[-7, -3, -3, -2, -9, -9],
[-1, -3, -1, -2, -4, -5]]
result = hourglassSum(arr)
print(result)
| 25.634146 | 62 | 0.385347 | 358 | 0.340628 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
ff936e93a7e1cf7cfc3a635b054cc4fd8c3b5c44 | 4,126 | py | Python | TakeBlipPosTagger/mlflowlogger.py | guireis1/testpos | efe22724d4c3512dd5336dc9788704793793122d | [
"MIT"
] | null | null | null | TakeBlipPosTagger/mlflowlogger.py | guireis1/testpos | efe22724d4c3512dd5336dc9788704793793122d | [
"MIT"
] | null | null | null | TakeBlipPosTagger/mlflowlogger.py | guireis1/testpos | efe22724d4c3512dd5336dc9788704793793122d | [
"MIT"
] | null | null | null | import os
import psutil
import mlflow
import seaborn as sns
import matplotlib.pyplot as plt
def save_confusion_matrix_from_tensor(confusion_matrix, labels,
current_epoch, save_dir):
"""Receive a confusion matrix from tensor, generate a image
with seaborn and save as .png in mlflow experiment
Args:
confusion_matrix (torch.Tensor): Tensor of confusion matrix
labels (list): Classification labels
current_epoch (int): Current epoch number
save_dir (str): Directory to save
"""
image_file_name = 'confusion_matrix_validation_{}.png'.format(
current_epoch)
plt.figure(figsize=(16, 10))
matrix = sns.heatmap(confusion_matrix.long().numpy(), annot=True,
cmap=plt.cm.Blues, xticklabels=labels, yticklabels=labels,
fmt='d')
plt.yticks(rotation=0)
plt.savefig(os.path.join(save_dir, image_file_name))
mlflow.log_artifact(os.path.join(
save_dir, image_file_name), artifact_path="images")
def save_metrics(confusion_matrix, labels):
"""Receive a confusion matrix from tensor,
calculates desirable metrics and log in mlflow experiment
Args:
confusion_matrix (torch.Tensor): Tensor of confusion matrix
labels (list): Classification labels
"""
precision = confusion_matrix.diag() / confusion_matrix.sum(dim=0)
recall = confusion_matrix.diag() / confusion_matrix.sum(dim=1)
f1_score = 2*(precision*recall / (precision + recall))
for index, label in enumerate(labels):
mlflow.log_metric(label + ' F1-score', f1_score[index].numpy().item())
mlflow.log_metric('Model Precision',
precision[precision >= 0].mean().numpy().item())
mlflow.log_metric(
'Model Recall', recall[recall >= 0].mean().numpy().item())
mlflow.log_metric('Model F1-score',
f1_score[f1_score >= 0].mean().numpy().item())
def save_report(report):
"""Receive a metric report and log in mlflow experiment
Args:
report (dict): Dictionary of calculated metrics
"""
mlflow.log_metric('Accuracy', report['accuracy'])
mlflow.log_metric('Precision - Macro Avg',
report['macro avg']['precision'])
mlflow.log_metric('Recall - Macro Avg', report['macro avg']['recall'])
mlflow.log_metric('F1-score - Macro Avg', report['macro avg']['f1-score'])
mlflow.log_metric('Precision - Weighted Avg',
report['weighted avg']['precision'])
mlflow.log_metric('Recall - Weighted Avg',
report['weighted avg']['recall'])
mlflow.log_metric('F1-score - Weighted Avg',
report['weighted avg']['f1-score'])
def save_system_metrics():
"""Log system metrics in mlflow experiment
"""
mlflow.log_metric('cpu_percent', float(psutil.cpu_percent()))
mlflow.log_metric('memory_percent', float(psutil.virtual_memory().percent))
def save_model(model, model_name):
"""Save model as artifact in mlflow experiment
Args:
model: Trained LSTM model on pytorch
model_name (str): Name of the saved model
"""
mlflow.sklearn.log_model(model, artifact_path='models',
registered_model_name=model_name)
def save_predict(save_dir, file_name):
"""Save predict file as artifact in mlflow experiment
Args:
save_dir (str): Directory of the file
file_name (str): File name
"""
mlflow.log_artifact(os.path.join(save_dir, file_name),
artifact_path="data")
def save_param(name, variable):
"""Save parameter in mlflow experiment
Args:
name (str): Name to log on mlflow
variable (Union[int, float]): Variable that is going to be logged
"""
mlflow.log_param(name, variable)
def save_metric(name, variable):
"""Save metric in mlflow experiment
Args:
name (str): Name to log on mlflow
variable (Union[int, float]): Variable that is going to be logged
"""
mlflow.log_metric(name, variable)
| 34.383333 | 83 | 0.646631 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,852 | 0.448861 |
ff948cf51a93bea99afde69a0e455b73c180d9a7 | 2,219 | py | Python | scripts/camera_module/camera_module.py | dos-group/gardening-adventure | 1d5eb99dedd3426b6451a17c1a3b62a5fd767d72 | [
"Apache-2.0"
] | 5 | 2019-11-08T17:11:54.000Z | 2020-04-19T17:32:07.000Z | scripts/camera_module/camera_module.py | dos-group/gardening-adventure | 1d5eb99dedd3426b6451a17c1a3b62a5fd767d72 | [
"Apache-2.0"
] | 48 | 2019-11-07T19:39:49.000Z | 2020-03-27T20:44:51.000Z | scripts/camera_module/camera_module.py | citlab/gardening-adventure | 1d5eb99dedd3426b6451a17c1a3b62a5fd767d72 | [
"Apache-2.0"
] | 3 | 2019-11-11T18:58:51.000Z | 2020-09-30T20:47:52.000Z | from argparse import ArgumentParser
from picamera import PiCamera
from time import sleep
from pathlib import Path
import os
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--resolution',
dest='res',
default='1920-1080',
help='Supported resolutions: 1920-1080, 3280-2464, 1640-1232, 1640-922, 1280-720, 640-480')
parser.add_argument('--output',
dest='out_folder',
default='/camera_output/continuous_captures/',
help='Location to store captured photos.')
parser.add_argument('--interval',
dest='interval',
default=10,
help='Time interval between capture. Default value is 10 seconds.')
parser.add_argument('--iso',
dest='iso',
default=100,
help='Camera ISO value. Default value is 100')
# parse command line arguments
args = parser.parse_args()
# parse resolution
res = args.res.split('-')
res_width = int(res[0])
res_height = int(res[1])
# parse output location
output_folder = args.out_folder
# parse time interval
interval = int(args.interval)
# parse Camera ISO
iso = int(args.iso)
# initialize camera
camera = PiCamera()
camera_wakeup = camera_cooldown = 2
# set camera resolution
camera.resolution = (res_width, res_height)
# set camera ISO
camera.iso = iso
# wait for automatic gain control to settle
sleep(camera_wakeup)
# set output folder
Path(output_folder).mkdir(parents=True, exist_ok=True)
os.chdir(output_folder)
camera.start_preview()
sleep(camera_wakeup)
while True:
for filename in camera.capture_continuous(os.environ['DEVICE_NAME'] + '_img{timestamp:%Y-%m-%d-%H-%M-%S}.jpg'):
camera.start_preview()
sleep(camera_wakeup)
print('image captured... %s' % filename)
sleep(camera_cooldown)
camera.stop_preview()
sleep(interval - camera_wakeup - camera_cooldown)
| 28.818182 | 119 | 0.587652 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 662 | 0.298333 |
ff95ea9c8a58457600ab644dcc9310c524b6fa2e | 11,719 | py | Python | nova/scheduler/solvers/pluggable_hosts_pulp_solver.py | rishabh1jain1/schedwise | b8c56362b11767cc583ea96b42dc28c408bb76cd | [
"Apache-2.0"
] | null | null | null | nova/scheduler/solvers/pluggable_hosts_pulp_solver.py | rishabh1jain1/schedwise | b8c56362b11767cc583ea96b42dc28c408bb76cd | [
"Apache-2.0"
] | 5 | 2020-06-05T17:58:28.000Z | 2022-02-11T03:39:35.000Z | nova/scheduler/solvers/pluggable_hosts_pulp_solver.py | rishabh1jain1/schedwise | b8c56362b11767cc583ea96b42dc28c408bb76cd | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2014 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from pulp import constants
from pulp import pulp
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.scheduler import solvers as scheduler_solver
LOG = logging.getLogger(__name__)
class HostsPulpSolver(scheduler_solver.BaseHostSolver):
"""A LP based pluggable LP solver implemented using PULP modeler."""
def __init__(self):
self.cost_classes = self._get_cost_classes()
self.constraint_classes = self._get_constraint_classes()
self.cost_weights = self._get_cost_weights()
def update_with_soft_affinity_constraints_and_objective(self,variables,prob, num_hosts, num_instances):
#Adding column sum variables whose value is 1 if there is any instance on that host
column_sum_var = []
for i in range(num_hosts):
column_sum_var.append(pulp.LpVariable("Normalised_Column_Sum_Host_"+str(i), 0, 1, constants.LpInteger))
#Adding normalisation constraint
for i in range(num_hosts):
prob += pulp.lpSum([variables[i][j]] for j in range(num_instances)) <= num_instances*column_sum_var[i]
prob += column_sum_var[i] <= pulp.lpSum([variables[i][j]] for j in range(num_instances))
z_variables =[]
#Adding 'z' variables
for i in range(num_hosts):
for j in range(num_hosts):
if i != j:
z_variables.append(pulp.LpVariable("Z_variable_Col_"+str(i)+"Col_"+str(j), 0, 1, constants.LpInteger))
temp = 0
for i in range(num_hosts):
for j in range(num_hosts):
if i != j:
prob += column_sum_var[i] + column_sum_var[j] <= z_variables[temp] + 1
prob += 2 * z_variables[temp]<=column_sum_var[i] + column_sum_var[j]
#print str(temp) + " " + str(z_variables[temp])
temp = temp + 1
#Adding the objective
prob+=z_variables[0] * 0 + z_variables[1] * 3 + z_variables[2] * 1 + z_variables[4] * 3 + z_variables[5] * 5 + z_variables[8] * 4
return prob
def update_with_strict_affinity_constraints_and_objective(self,variables,prob, num_hosts, num_instances):
#Adding column sum variables whose value is 1 if there is any instance on that host
column_sum_var = []
for i in range(num_hosts):
column_sum_var.append(pulp.LpVariable("Normalised_Column_Sum_Host_"+str(i), 0, 1, constants.LpInteger))
#Adding normalisation constraint
for i in range(num_hosts):
prob += pulp.lpSum([variables[i][j]] for j in range(num_instances)) <= num_instances*column_sum_var[i]
prob += column_sum_var[i] <= pulp.lpSum([variables[i][j]] for j in range(num_instances))
prob += pulp.lpSum([column_sum_var[i]] for i in range(num_hosts)) == 1
return prob
def update_with_strict_anti_affinity_constraints_and_objective(self,variables,prob, num_hosts, num_instances):
#Adding column sum variables whose value is 1 if there is any instance on that host
column_sum_var = []
for i in range(num_hosts):
column_sum_var.append(pulp.LpVariable("Normalised_Column_Sum_Host_"+str(i), 0, 1, constants.LpInteger))
#Adding normalisation constraint
for i in range(num_hosts):
prob += pulp.lpSum([variables[i][j]] for j in range(num_instances)) <= num_instances*column_sum_var[i]
prob += column_sum_var[i] <= pulp.lpSum([variables[i][j]] for j in range(num_instances))
prob += pulp.lpSum([column_sum_var[i]] for i in range(num_hosts)) == num_instances
return prob
def update_with_soft_anti_affinity_constraints_and_objective(self,variables,prob, num_hosts, num_instances):
#Adding column sum variables whose value is 1 if there is any instance on that host
column_sum_var = []
for i in range(num_hosts):
column_sum_var.append(pulp.LpVariable("Normalised_Column_Sum_Host_"+str(i), 0, 1, constants.LpInteger))
#Adding normalisation constraint
for i in range(num_hosts):
prob += pulp.lpSum([variables[i][j]] for j in range(num_instances)) <= num_instances*column_sum_var[i]
prob += column_sum_var[i] <= pulp.lpSum([variables[i][j]] for j in range(num_instances))
prob += -1 * (pulp.lpSum([column_sum_var[i]] for i in range(num_hosts)))
return prob
def update_with_host_count_constraints_and_objective(self,variables,prob, num_hosts, num_instances,limit):
#Adding column sum variables whose value is 1 if there is any instance on that host
column_sum_var = []
for i in range(num_hosts):
column_sum_var.append(pulp.LpVariable("Normalised_Column_Sum_Host_"+str(i), 0, 1, constants.LpInteger))
#Adding normalisation constraint
for i in range(num_hosts):
prob += pulp.lpSum([variables[i][j]] for j in range(num_instances)) <= num_instances*column_sum_var[i]
prob += column_sum_var[i] <= pulp.lpSum([variables[i][j]] for j in range(num_instances))
prob += pulp.lpSum([column_sum_var[i]] for i in range(num_hosts)) <= int(limit)
return prob
def host_solve(self, hosts, instance_uuids, request_spec,
filter_properties):
"""This method returns a list of tuples - (host, instance_uuid)
that are returned by the solver. Here the assumption is that
all instance_uuids have the same requirement as specified in
filter_properties.
"""
host_instance_tuples_list = []
print filter_properties['instance_type']['memory_mb']
if instance_uuids:
num_instances = len(instance_uuids)
else:
num_instances = request_spec.get('num_instances', 1)
#Setting a unset uuid string for each instance.
instance_uuids = ['unset_uuid' + str(i)
for i in xrange(num_instances)]
num_hosts = len(hosts)
LOG.debug(_("All Hosts: %s") % [h.host for h in hosts])
for host in hosts:
LOG.debug(_("Host state: %s") % host)
# Create dictionaries mapping host/instance IDs to hosts/instances.
host_ids = ['Host' + str(i) for i in range(num_hosts)]
host_id_dict = dict(zip(host_ids, hosts))
instance_ids = ['Instance' + str(i) for i in range(num_instances)]
instance_id_dict = dict(zip(instance_ids, instance_uuids))
# Create the 'prob' variable to contain the problem data.
prob = pulp.LpProblem("Host Instance Scheduler Problem",
constants.LpMinimize)
# Create the 'variables' matrix to contain the referenced variables.
variables = [[pulp.LpVariable("IA" + "_Host" + str(i) + "_Instance" +
str(j), 0, 1, constants.LpInteger) for j in
range(num_instances)] for i in range(num_hosts)]
# Get costs and constraints and formulate the linear problem.
self.cost_objects = [cost() for cost in self.cost_classes]
self.constraint_objects = [constraint(variables, hosts,
instance_uuids, request_spec, filter_properties)
for constraint in self.constraint_classes]
costs = [[0 for j in range(num_instances)] for i in range(num_hosts)]
for cost_object in self.cost_objects:
cost = cost_object.get_cost_matrix(hosts, instance_uuids,
request_spec, filter_properties)
cost = cost_object.normalize_cost_matrix(cost, 0.0, 1.0)
weight = float(self.cost_weights[cost_object.__class__.__name__])
costs = [[costs[i][j] + weight * cost[i][j]
for j in range(num_instances)] for i in range(num_hosts)]
prob += (pulp.lpSum([costs[i][j] * variables[i][j]
for i in range(num_hosts) for j in range(num_instances)]),
"Sum_of_Host_Instance_Scheduling_Costs")
for constraint_object in self.constraint_objects:
coefficient_vectors = constraint_object.get_coefficient_vectors(
variables, hosts, instance_uuids,
request_spec, filter_properties)
variable_vectors = constraint_object.get_variable_vectors(
variables, hosts, instance_uuids,
request_spec, filter_properties)
operations = constraint_object.get_operations(
variables, hosts, instance_uuids,
request_spec, filter_properties)
for i in range(len(operations)):
operation = operations[i]
len_vector = len(variable_vectors[i])
prob += (operation(pulp.lpSum([coefficient_vectors[i][j]
* variable_vectors[i][j] for j in range(len_vector)])),
"Costraint_Name_%s" % constraint_object.__class__.__name__
+ "_No._%s" % i)
prob.writeLP('test.lp')
if filter_properties['instance_type']['constraint'] == "soft_affinity":
prob = self.update_with_soft_affinity_constraints_and_objective(variables,prob,num_hosts,num_instances)
elif filter_properties['instance_type']['constraint'] == "strict_affinity":
prob = self.update_with_strict_affinity_constraints_and_objective(variables,prob,num_hosts,num_instances)
elif filter_properties['instance_type']['constraint'] == "strict_antiaffinity":
prob = self.update_with_strict_anti_affinity_constraints_and_objective(variables,prob,num_hosts,num_instances)
elif filter_properties['instance_type']['constraint'] == "soft_antiaffinity":
prob = self.update_with_soft_anti_affinity_constraints_and_objective(variables,prob,num_hosts,num_instances)
else:
temp = filter_properties['instance_type']['constraint']
temp = temp.split("_")
prob = self.update_with_host_count_constraints_and_objective(variables,prob,num_hosts,num_instances,temp[3])
print prob
# The problem is solved using PULP's choice of Solver.
prob.solve()
# Create host-instance tuples from the solutions.
if pulp.LpStatus[prob.status] == 'Optimal':
for v in prob.variables():
if v.name.startswith('IA'):
(host_id, instance_id) = v.name.lstrip('IA').lstrip(
'_').split('_')
if v.varValue == 1.0:
host_instance_tuples_list.append(
(host_id_dict[host_id],
instance_id_dict[instance_id]))
return host_instance_tuples_list
| 49.65678 | 138 | 0.631112 | 10,839 | 0.924908 | 0 | 0 | 0 | 0 | 0 | 0 | 2,632 | 0.224593 |
ff96d3a3b7aacb1f5da4484d7bd890372ab32f78 | 1,092 | py | Python | run/gen-explicit-fee-schedules.py | three-Vs/hedera-services | 3e3c6e1815ecb545d5a65e5ff4d87c46d365112e | [
"Apache-2.0"
] | 164 | 2020-08-06T17:02:44.000Z | 2022-03-30T15:56:55.000Z | run/gen-explicit-fee-schedules.py | three-Vs/hedera-services | 3e3c6e1815ecb545d5a65e5ff4d87c46d365112e | [
"Apache-2.0"
] | 2,439 | 2020-08-06T17:06:15.000Z | 2022-03-31T23:38:38.000Z | run/gen-explicit-fee-schedules.py | shemnon/hedera-services | 042d0763738bfc839ecea81c38094fe9181e803b | [
"Apache-2.0"
] | 51 | 2020-08-06T18:53:48.000Z | 2022-02-03T13:12:37.000Z | ###
# A script to convert the Services-consumable feeSchedules.json
# into the "typed" format used by the public pricing calculator.
###
import json
providers = ['nodedata', 'networkdata', 'servicedata']
typed_schedules = {}
with open('hedera-node/src/main/resources/feeSchedules.json', 'r') as fin:
cur_and_next_schedules = json.load(fin)
schedules = cur_and_next_schedules[0]['currentFeeSchedule']
for tfs in schedules:
if 'expiryTime' in tfs:
break
tfs = tfs['transactionFeeSchedule']
function = tfs['hederaFunctionality']
prices_list = tfs['fees']
prices_by_type = {}
for typed_prices in prices_list:
this_type = typed_prices.get('subType', 'DEFAULT')
this_type_prices = {}
for provider in providers:
this_type_prices[provider] = typed_prices[provider]
prices_by_type[this_type] = this_type_prices
typed_schedules[function] = prices_by_type
with open('typedFeeSchedules.json', 'w') as fout:
json.dump(typed_schedules, fout, indent=2)
| 36.4 | 74 | 0.667582 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 353 | 0.32326 |
ff97c39597a36001944f8a9c37b2bf4f5888de12 | 839 | py | Python | secparse/config.py | clwatkins/sec_edgar_parser | 6ccf74eae14407e64ad033bb74d422d7f1c91495 | [
"MIT"
] | 8 | 2019-05-18T21:11:18.000Z | 2021-05-22T02:04:03.000Z | secparse/config.py | clwatkins/sec_edgar_parser | 6ccf74eae14407e64ad033bb74d422d7f1c91495 | [
"MIT"
] | null | null | null | secparse/config.py | clwatkins/sec_edgar_parser | 6ccf74eae14407e64ad033bb74d422d7f1c91495 | [
"MIT"
] | 3 | 2020-06-24T13:47:15.000Z | 2021-03-26T14:20:15.000Z | from pathlib import Path
import requests_oauthlib
# Define where filing data will be stored -- defaults to folder within user's home directory
ROOT_DIR = Path.home().joinpath("sec_parse_data")
DB_FILE_LOC = ROOT_DIR.joinpath("sec_parse_db.sqlite3")
# Yahoo API key (some finance tables require authorisation)
AUTH = requests_oauthlib.OAuth1(
'dj0yJmk9anpzUDNHSjdoaEZvJmQ9WVdrOVIwZDBXRlpTTkdNbWNHbzlNQS0tJnM9Y29uc3VtZXJzZWNyZXQmeD0xOA--',
'f839901ff46492b372e81fa8325ab61483f0e538'
)
# Parallelisation config
MULTIPROCESSING_NUMBER = 25
# Valid form types to try parsing -- changing not recommended
VALID_FORMS = ['10-Q', '10-K', '10-Q/A', 'S-4', '8-K']
# DB table name -- changing not recommended
DB_FILING_TABLE = 'filing_info'
DB_FILING_DATA_TABLE = 'filing_data'
DB_COMPANY_TABLE = 'company_info'
DB_SIC_TABLE = 'sic_info'
| 33.56 | 99 | 0.789035 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 533 | 0.63528 |
ff99f867f26c3a4bc29359318147a0a261c378c3 | 5,157 | py | Python | howdy/core/core_admin.py | tanimislam/plexstuff | 811dd504e8464df1270a27084ef465c15299b00a | [
"BSD-2-Clause"
] | 9 | 2019-11-10T16:41:24.000Z | 2020-06-17T12:35:42.000Z | howdy/core/core_admin.py | tanimislam/plexstuff | 811dd504e8464df1270a27084ef465c15299b00a | [
"BSD-2-Clause"
] | 2 | 2020-06-27T15:52:22.000Z | 2020-07-29T20:36:07.000Z | howdy/core/core_admin.py | tanimislam/howdy | 81fbe8b5d6a8320f4279d3d29c93405540eaba28 | [
"BSD-2-Clause"
] | 2 | 2019-10-28T10:03:06.000Z | 2020-05-22T18:32:40.000Z | import requests, os, sys, numpy, requests
from plexapi.server import PlexServer
from tqdm import tqdm
#
from howdy.core import core, return_error_raw
def get_tautulli_apikey( username, password, endpoint ):
"""
Gets the tautulli API key with provided Tautulli_ username and password.
:param str username: the Tautulli_ username.
:param str password: the Tautulli_ password.
:param str endpoint: the Tautulli_ server endpoint.
:returns: the Tautulli_ API key.
:rtype: str
.. _Tautulli: https://tautulli.com
"""
full_url = os.path.join( endpoint, 'api', 'v2' )
#
## follow this reference: https://github.com/Tautulli/Tautulli/wiki/Tautulli-API-Reference#get_apikey
response = requests.get( full_url,
params = {
'username' : username,
'password' : password,
'cmd' : 'get_apikey' } )
if response.status_code != 200:
raise ValueError("Error, could not find the Tautulli API key.")
return response.json( )[ 'response' ][ 'data' ]
def get_tautulli_activity( endpoint, apikey ):
"""
Gets the activity on the Plex_ server (using Tautulli_).
:param str endpoint: the Tautulli_ server endpoint.
:param str apikey: the Tautulli_ API Key.
"""
full_url = os.path.join( endpoint, 'api', 'v2' )
#
## follow this reference: https://github.com/Tautulli/Tautulli/wiki/Tautulli-API-Reference#get_activity
response = requests.get( full_url,
params = {
'apikey' : apikey,
'cmd' : 'get_activity' })
if response.status_code != 200:
raise ValueError("Error, could not get the activity from the Plex server.")
#
## now the data
data = response.json( )[ 'response' ][ 'data' ]
if data['stream_count'] == 0: return [ ]
#
## now if there are streams
def get_relevant_info( session_info ):
session_dat = {
'title' : session_info['title'], 'type' : session_info['media_type'].upper( ),
'username' : session_info['username'], 'progress' : int( session_info['progress_percent'] ) }
if 'friendly_name' in session_info:
session_dat['friendly name'] = session_info[ 'friendly_name' ]
return session_dat
return list(map(get_relevant_info, data['sessions'] ) )
def plex_check_for_update( token, fullURL = 'http://localhost:32400' ):
"""
Determines whether there are any new Plex_ server releases.
:param str token: the Plex_ server access token.
:param str fullURL: the Plex_ server address.
:returns: a :py:class:`tuple` of :py:class:`Release <plexapi.server.Release>` and "SUCCESS" if successful. Otherwise returns the :py:class:`tuple` returned by :py:meth:`return_error_raw <howdy.core.return_error_raw>`.
:rtype: tuple
.. _Plex: https://plex.tv
"""
try:
plex = PlexServer( fullURL, token )
release = plex.checkForUpdate( )
return release, "SUCCESS"
except Exception as e:
return return_error_raw( str( e ) )
def plex_download_release( release, destination_dir = os.getcwd( ), do_progress = False ):
"""
Downloads the Plex_ update into a specific directory, with optional progress bar.
:param release: the :py:class:`Release <plexapi.server.Release>` containing the Plex_ update information.
:type release: :py:class:`Release <plexapi.server.Release>`
:pararm str destination_dir: the destination directory into which to download.
:param bool do_progress: whether to show the progress bar or not. Default is ``False``.
:returns: If unsuccessful an error message. If successful, the full path of the downloaded file.
:rtype: str
"""
downloadURL = release.downloadURL
response = requests.get( downloadURL, stream = True )
if not response.ok:
return "ERROR, %s IS NOT ACCESSIBLE" % downloadURL
#
## destination of the PLEX download
r2 = requests.head( downloadURL )
if not r2.ok:
return "ERROR, %s IS NOT ACCESSIBLE WITH REQUESTS.HEAD" % downloadURL
destination = os.path.join( destination_dir, os.path.basename( r2.headers['Location'] ) )
#
## following instructions from https://stackoverflow.com/a/37573701/3362358
total_size_in_bytes = int(response.headers.get('content-length', 0))
block_size = 1 << 16
if not do_progress:
with open( destination, 'wb' ) as openfile:
for chunk in response.iter_content( block_size ):
openfile.write( chunk )
return destination
#
## have a progress bar
with tqdm( total = total_size_in_bytes, unit='iB', unit_scale=True) as progress_bar, open( destination, 'wb' ) as openfile:
for chunk in response.iter_content( block_size ):
progress_bar.update(len(chunk))
openfile.write( chunk )
if total_size_in_bytes != 0 and progress_bar.n != total_size_in_bytes:
return "ERROR, something went wrong"
return destination
| 42.975 | 221 | 0.643397 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,469 | 0.478767 |
ff9a9ebc21d387b55d1ddf8725dbdb3f48e0a128 | 4,756 | py | Python | halolib/models.py | yoramk2/halolib | c05bc9f9c37d09700c5a42dcd3b9a74c0c5c0c29 | [
"MIT"
] | 2 | 2020-07-22T13:28:47.000Z | 2021-02-08T04:38:06.000Z | halolib/models.py | yoramk2/halolib | c05bc9f9c37d09700c5a42dcd3b9a74c0c5c0c29 | [
"MIT"
] | 2 | 2021-06-10T20:59:03.000Z | 2021-11-15T17:47:59.000Z | halolib/models.py | yoramk2/halolib | c05bc9f9c37d09700c5a42dcd3b9a74c0c5c0c29 | [
"MIT"
] | 1 | 2021-02-08T04:38:09.000Z | 2021-02-08T04:38:09.000Z | from __future__ import print_function
import datetime
import hashlib
import logging
from abc import ABCMeta
from pynamodb.attributes import UnicodeAttribute
from pynamodb.models import Model
from halolib.exceptions import DbIdemError
from halolib.logs import log_json
from .settingsx import settingsx
settings = settingsx()
# java -Djava.library.path=./DynamoDBLocal_lib -jar DynamoDBLocal.jar -sharedDb -port 8600
# java -D"java.library.path"=./DynamoDBLocal_lib -jar DynamoDBLocal.jar -sharedDb -port 8600
logger = logging.getLogger(__name__)
ver = settings.DB_VER
uri = settings.DB_URL
tbl = False
page_size = settings.PAGE_SIZE
class AbsDbMixin(object):
__metaclass__ = ABCMeta
# intercept db calls
req_context = None
def __init__(self, req_context):
self.req_context = req_context
def __getattribute__(self, name):
attr = object.__getattribute__(self, name)
if hasattr(attr, '__call__'):
def newfunc(*args, **kwargs):
now = datetime.datetime.now()
result = attr(*args, **kwargs)
total = datetime.datetime.now() - now
logger.info("performance_data", extra=log_json(self.req_context,
{"type": "DBACCESS",
"milliseconds": int(total.total_seconds() * 1000),
"function": str(attr.__name__)}))
return result
return newfunc
else:
return attr
class AbsModel(Model):
__metaclass__ = ABCMeta
halo_request_id = UnicodeAttribute(null=False)
@classmethod
def get_pre(cls):
"""
:return:
"""
hash_key_name = super(AbsModel, cls)._hash_key_attribute().attr_name
range_key_name = None
attr = super(AbsModel, cls)._range_key_attribute()
if attr:
range_key_name = attr.attr_name
logger.debug("\nhash_key_name=" + str(hash_key_name))
logger.debug("\nrange_key_name=" + str(range_key_name))
return hash_key_name, range_key_name
def get_pre_val(self):
"""
:return:
"""
hash_key_name, range_key_name = self.get_pre()
hash_key_val = super(AbsModel, self).__getattribute__(hash_key_name)
range_key_val = None
if range_key_name:
range_key_val = super(AbsModel, self).__getattribute__(range_key_name)
logger.debug("\nhash_key_name=" + hash_key_name + "=" + str(hash_key_val))
if range_key_val:
logger.debug("\nrange_key_val=" + range_key_name + "=" + str(range_key_val))
return hash_key_val, range_key_val
def get_idempotent_id(self, halo_request_id): # return fixed size id of 128 bit hash value
"""
:param halo_request_id:
:return:
"""
if halo_request_id is None or halo_request_id == "":
raise DbIdemError("empty request id")
hash_key_val, range_key_val = self.get_pre_val()
request_id = halo_request_id + "-" + str(hash_key_val)
if range_key_val:
request_id = request_id + "-" + str(range_key_val)
idempotent_id = hashlib.md5(halo_request_id.encode() + request_id.encode()).hexdigest()
return idempotent_id
def save(self, halo_request_id, condition=None, conditional_operator=None, **expected_values):
"""
:param halo_request_id:
:param condition:
:param conditional_operator:
:param expected_values:
:return:
"""
if condition is None:
condition = AbsModel.halo_request_id.does_not_exist()
else:
condition = condition & (AbsModel.halo_request_id.does_not_exist())
self.halo_request_id = self.get_idempotent_id(halo_request_id)
return super(AbsModel, self).save(condition, conditional_operator, **expected_values)
def update(self, halo_request_id, attributes=None, actions=None, condition=None, conditional_operator=None,
**expected_values):
"""
:param halo_request_id:
:param attributes:
:param actions:
:param condition:
:param conditional_operator:
:param expected_values:
:return:
"""
if condition is None:
condition = AbsModel.halo_request_id.does_not_exist()
else:
condition = condition & (AbsModel.halo_request_id.does_not_exist())
self.halo_request_id = self.get_idempotent_id(halo_request_id)
return super(AbsModel, self).update(attributes, actions, condition, conditional_operator, **expected_values)
| 33.971429 | 116 | 0.631623 | 4,111 | 0.864382 | 0 | 0 | 474 | 0.099664 | 0 | 0 | 921 | 0.19365 |
ff9c87881ced0c2a95f22bc798cffd2d0381f7e0 | 721 | py | Python | cblue/data/__init__.py | dfhby0/CBLUE | 36bdb52f17c4379d4a5f8b407890ba294017b5e2 | [
"Apache-2.0"
] | 293 | 2021-06-07T06:04:37.000Z | 2022-03-28T09:38:28.000Z | cblue/data/__init__.py | dfhby0/CBLUE | 36bdb52f17c4379d4a5f8b407890ba294017b5e2 | [
"Apache-2.0"
] | 6 | 2021-06-11T09:50:15.000Z | 2022-03-18T07:33:56.000Z | cblue/data/__init__.py | dfhby0/CBLUE | 36bdb52f17c4379d4a5f8b407890ba294017b5e2 | [
"Apache-2.0"
] | 61 | 2021-06-07T06:38:42.000Z | 2022-03-30T07:16:46.000Z | from .data_process import EEDataProcessor, REDataProcessor, ERDataProcessor, CTCDataProcessor, \
CDNDataProcessor, STSDataProcessor, QQRDataProcessor, QICDataProcessor, QTRDataProcessor
from .dataset import EEDataset, REDataset, ERDataset, CTCDataset, CDNDataset, STSDataset, \
QQRDataset, QICDataset, QTRDataset
__all__ = ['EEDataProcessor', 'EEDataset',
'REDataProcessor', 'REDataset',
'ERDataProcessor', 'ERDataset',
'CDNDataProcessor', 'CDNDataset',
'CTCDataProcessor', 'CTCDataset',
'STSDataProcessor', 'STSDataset',
'QQRDataProcessor', 'QQRDataset',
'QICDataProcessor', 'QICDataset',
'QTRDataProcessor', 'QTRDataset']
| 48.066667 | 96 | 0.696255 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 264 | 0.366158 |
ff9cd6711e3c12c38e5a0b8a5852f05f78df64e8 | 3,315 | py | Python | write_input_files.py | jensengroup/take_elementary_step | 63dd4caecd54a2458cad72352221e61102a455b2 | [
"MIT"
] | 4 | 2018-03-05T07:58:04.000Z | 2019-10-24T20:03:25.000Z | write_input_files.py | jensengroup/take_elementary_step | 63dd4caecd54a2458cad72352221e61102a455b2 | [
"MIT"
] | 2 | 2018-03-05T11:29:31.000Z | 2018-08-09T12:29:57.000Z | write_input_files.py | jensengroup/take_elementary_step | 63dd4caecd54a2458cad72352221e61102a455b2 | [
"MIT"
] | 6 | 2018-08-02T12:29:21.000Z | 2021-04-24T10:29:02.000Z | from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import rdMolDescriptors
def get_fragments(mol,name):
fragment_names = []
fragments = Chem.GetMolFrags(mol,asMols=True)
labels = ["A","B","C"]
for label,fragment in zip(labels,fragments):
fragment_names.append(name+label)
return fragments, fragment_names
def generate_conformations(fragments, max_confs=20):
for fragment in fragments:
rot_bond = rdMolDescriptors.CalcNumRotatableBonds(fragment)
confs = min(3 + 3*rot_bond,max_confs)
AllChem.EmbedMultipleConfs(fragment,numConfs=confs)
return fragments
def write_mopac_input_file(fragment, fragment_name, keywords):
number_of_atoms = fragment.GetNumAtoms()
charge = Chem.GetFormalCharge(fragment)
symbols = [a.GetSymbol() for a in fragment.GetAtoms()]
for i,conf in enumerate(fragment.GetConformers()):
file_name = fragment_name+"+"+str(i)+".mop"
with open(file_name, "w") as file:
file.write(keywords+str(charge)+"\n")
file.write(" \n")
file.write(" \n")
for atom,symbol in enumerate(symbols):
p = conf.GetAtomPosition(atom)
line = " ".join((symbol,str(p.x),"1",str(p.y),"1",str(p.z),"1","\n"))
file.write(line)
def write_xtb_input_file(fragment, fragment_name):
number_of_atoms = fragment.GetNumAtoms()
charge = Chem.GetFormalCharge(fragment)
symbols = [a.GetSymbol() for a in fragment.GetAtoms()]
for i,conf in enumerate(fragment.GetConformers()):
file_name = fragment_name+"+"+str(i)+".xyz"
with open(file_name, "w") as file:
file.write(str(number_of_atoms)+"\n")
file.write("title\n")
for atom,symbol in enumerate(symbols):
p = conf.GetAtomPosition(atom)
line = " ".join((symbol,str(p.x),str(p.y),str(p.z),"\n"))
file.write(line)
if charge !=0:
file.write("$set\n")
file.write("chrg "+str(charge)+"\n")
file.write("$end")
# GFN-xTB and MOPAC automatically switches to UHF if the number of electrons is odd, so there is no need
# to specify the multiplicity.
# If you need to do that for another program you can compute the number of electrons by
# atomic_numbers = [a.GetAtomicNum() for a in fragment.GetAtoms()]
# number_of_electrons = sum(atomic_numbers) - charge
def write_input_files(mol,name,method,keywords):
# This version writes input files for GFN-xTB and MOPAC. If you want to use another program then replace
# add additional functions
fragments, fragment_names = get_fragments(mol,name)
fragments = generate_conformations(fragments)
for fragment, fragment_name in zip(fragments, fragment_names):
if method == "xtb":
write_xtb_input_file(fragment, fragment_name)
if method == "mopac":
write_mopac_input_file(fragment, fragment_name,keywords)
if __name__ == "__main__":
smiles = "CCC.CC"
name = "test"
#method = "xtb"
#keywords = ""
method = "mopac"
keywords = "pm3 cycles=200 charge="
mol = Chem.MolFromSmiles(smiles)
mol = Chem.AddHs(mol)
write_input_files(mol,name,method,keywords)
| 38.103448 | 104 | 0.645551 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 673 | 0.203017 |
ff9dcee27fd788e74a17caecd46d9f95eb475438 | 1,008 | py | Python | djapi/authtoken/models.py | Suor/djapi | 929b266d01aacc49f805c3ac7eec55766634babe | [
"BSD-2-Clause"
] | 12 | 2017-10-23T10:52:30.000Z | 2021-09-06T19:08:57.000Z | djapi/authtoken/models.py | Suor/djapi | 929b266d01aacc49f805c3ac7eec55766634babe | [
"BSD-2-Clause"
] | null | null | null | djapi/authtoken/models.py | Suor/djapi | 929b266d01aacc49f805c3ac7eec55766634babe | [
"BSD-2-Clause"
] | null | null | null | import binascii
import os
from django.conf import settings
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
@python_2_unicode_compatible
class Token(models.Model):
"""
The default authorization token model.
"""
key = models.CharField(_("Key"), max_length=40, primary_key=True)
user = models.OneToOneField(
settings.AUTH_USER_MODEL, related_name='auth_token',
on_delete=models.CASCADE, verbose_name=_("User")
)
created = models.DateTimeField(_("Created"), auto_now_add=True)
class Meta:
verbose_name = _("Token")
verbose_name_plural = _("Tokens")
def save(self, *args, **kwargs):
if not self.key:
self.key = self.generate_key()
return super(Token, self).save(*args, **kwargs)
def generate_key(self):
return binascii.hexlify(os.urandom(20)).decode()
def __str__(self):
return self.key
| 28 | 69 | 0.686508 | 769 | 0.762897 | 0 | 0 | 798 | 0.791667 | 0 | 0 | 101 | 0.100198 |
ff9f80880a91fdc6467f39d9797eb0e52dc6a441 | 2,035 | py | Python | app/lib/docker_config.py | joshburt/com.shapeandshare.therowantree.client.api | 6f17f469482a91071bedb7eedd158539cebe5639 | [
"MIT"
] | null | null | null | app/lib/docker_config.py | joshburt/com.shapeandshare.therowantree.client.api | 6f17f469482a91071bedb7eedd158539cebe5639 | [
"MIT"
] | null | null | null | app/lib/docker_config.py | joshburt/com.shapeandshare.therowantree.client.api | 6f17f469482a91071bedb7eedd158539cebe5639 | [
"MIT"
] | null | null | null | # Allow over-riding the defaults with non-secure ENV variables, or secure docker secrets
import therowantree_config as default_config
import os
###############################################################################
# Direcrtory Options
###############################################################################
LOGS_DIR = default_config.LOGS_DIR
if 'LOGS_DIR' in os.environ:
LOGS_DIR = os.environ['LOGS_DIR']
TMP_DIR = default_config.TMP_DIR
if 'TMP_DIR' in os.environ:
TMP_DIR = os.environ['TMP_DIR']
###############################################################################
# Server Options
###############################################################################
API_ACCESS_KEY = default_config.API_ACCESS_KEY
if 'API_ACCESS_KEY' in os.environ:
API_ACCESS_KEY = os.environ['API_ACCESS_KEY']
API_VERSION = default_config.API_VERSION
if 'API_VERSION' in os.environ:
API_VERSION = os.environ['API_VERSION']
LISTENING_HOST = default_config.LISTENING_HOST
if 'LISTENING_HOST' in os.environ:
LISTENING_HOST = os.environ['LISTENING_HOST']
FLASK_DEBUG = default_config.FLASK_DEBUG
if 'FLASK_DEBUG' in os.environ:
FLASK_DEBUG = bool(os.environ['FLASK_DEBUG'])
###############################################################################
# Database Options
###############################################################################
API_DATABASE_SERVER = default_config.API_DATABASE_SERVER
if 'API_DATABASE_SERVER' in os.environ:
API_DATABASE_SERVER = os.environ['API_DATABASE_SERVER']
API_DATABASE_NAME = default_config.API_DATABASE_NAME
if 'API_DATABASE_NAME' in os.environ:
API_DATABASE_NAME = os.environ['API_DATABASE_NAME']
API_DATABASE_USERNAME = default_config.API_DATABASE_USERNAME
if 'API_DATABASE_USERNAME' in os.environ:
API_DATABASE_USERNAME = os.environ['API_DATABASE_USERNAME']
API_DATABASE_PASSWORD = default_config.API_DATABASE_PASSWORD
if 'API_DATABASE_PASSWORD' in os.environ:
API_DATABASE_PASSWORD = os.environ['API_DATABASE_PASSWORD']
| 37 | 88 | 0.611794 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 943 | 0.463391 |
ff9f8f871223cbf21633f777d013d3d15bbc6700 | 66 | py | Python | run/__init__.py | ealcobaca/optimizer_pool | e93ac72c1547bc3813a0edf822d5fd453f22ce49 | [
"MIT"
] | 1 | 2022-03-10T21:46:07.000Z | 2022-03-10T21:46:07.000Z | run/__init__.py | ealcobaca/optimizer_pool | e93ac72c1547bc3813a0edf822d5fd453f22ce49 | [
"MIT"
] | null | null | null | run/__init__.py | ealcobaca/optimizer_pool | e93ac72c1547bc3813a0edf822d5fd453f22ce49 | [
"MIT"
] | 1 | 2022-03-10T21:46:09.000Z | 2022-03-10T21:46:09.000Z | from run.run_real import Run_real
from run.run_PSO import Run_PSO
| 22 | 33 | 0.848485 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
ffa09b777a2b94bab6e5aebe14cc717ea0c9bb89 | 2,715 | py | Python | scripts/util/test_cloudlab.py | neilgiri/hotstuff | d6dfb482706393d725225736e63b5e5d7aa99d2b | [
"MIT"
] | 2 | 2020-12-03T20:50:16.000Z | 2021-03-24T02:53:59.000Z | scripts/util/test_cloudlab.py | neilgiri/hotstuff | d6dfb482706393d725225736e63b5e5d7aa99d2b | [
"MIT"
] | null | null | null | scripts/util/test_cloudlab.py | neilgiri/hotstuff | d6dfb482706393d725225736e63b5e5d7aa99d2b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import cloudlab_util as cl
from geni.rspec import pg as rspec
from geni.util import loadContext
def see_slice():
c = loadContext("/tmp/context.json", key_passphrase='TeFxy^FVv8Z5')
print(c.cf.listProjects(c))
cl.do_release(c, 'testing138', ['cl-utah'])
#cl.release(experiment_name='testing136',
# cloudlab_user='giridhn',
# cloudlab_password='TeFxy^FVv8Z5',
# cloudlab_project='Consensus',
## cloudlab_cert_path='cloudlab.pem',
# cloudlab_key_path='~/.ssh/id_ed25519.pub')
print("Available slices: {}".format(c.cf.listSlices(c).keys()))
def setup():
node = rspec.RawPC("node")
#img = "urn:publicid:IDN+apt.emulab.net+image+schedock-PG0:docker-ubuntu16:0"
#node.disk_image = img
node.hardware_type = 'm400'
iface1 = node.addInterface("if1")
# Specify the component id and the IPv4 address
iface1.component_id = "eth1"
iface1.addAddress(rspec.IPv4Address("192.168.1.1", "255.255.255.0"))
link = rspec.LAN("lan")
link.addInterface(iface1)
r = rspec.Request()
r.addResource(node)
request = {}
request['cl-utah'] = r
m = cl.request(experiment_name='testing138',
requests=request,
expiration=960,
timeout=15,
cloudlab_user='giridhn',
cloudlab_password='TeFxy^FVv8Z5',
cloudlab_project='Consensus',
cloudlab_cert_path='cloudlab.pem',
cloudlab_key_path='~/.ssh/id_ed25519.pub')
# read info in manifests to introspect allocation
print(m['cl-utah'].nodes)
for node in m['cl-utah'].nodes:
print("Node")
print(node)
print(node.component_id)
for iface in node.interfaces:
print("Interface")
print(iface)
print(node.hostipv4)
print(iface.address_info)
print(iface.sliver_id)
# run your experiment...
# once done with experiment, release resources
#m = cl.request(experiment_name='myexp',
# cloudlab_user='myuser',
# cloudlab_password='mypassword',
# cloudlab_project='myproject',
# cloudlab_cert_path='/path/to/cloudlab.pem',
# cloudlab_pubkey_path='/path/to/cloudlab_rsa.pub')
#cl.print_slivers(experiment_name = 'testing131', cloudlab_user='giridhn',
# cloudlab_password='TeFxy^FVv8Z5', cloudlab_project='Consensus',
# cloudlab_cert_path='/Users/neilgiridharan/Downloads/cloudlab.pem', cloudlab_key_path='/Users/neilgiridharan/.ssh/id_ed25519.pub')
#setup()
see_slice() | 35.25974 | 148 | 0.613628 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,421 | 0.523389 |
ffa0bb48d7404e573b6c9e5fa52446d5003efd93 | 99 | py | Python | cdp_scrapers/instances/__init__.py | dhanya-shraddha/cdp-scrapers | 7e0d841a2a64963405a075cd91985d24e3dedfa6 | [
"MIT"
] | null | null | null | cdp_scrapers/instances/__init__.py | dhanya-shraddha/cdp-scrapers | 7e0d841a2a64963405a075cd91985d24e3dedfa6 | [
"MIT"
] | 1 | 2021-10-01T05:27:21.000Z | 2021-10-01T05:27:21.000Z | cdp_scrapers/instances/__init__.py | dhanya-shraddha/cdp-scrapers | 7e0d841a2a64963405a075cd91985d24e3dedfa6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Individual scratchpad and maybe up-to-date CDP instance scrapers.
"""
| 16.5 | 65 | 0.656566 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 96 | 0.969697 |
ffa18167feceb603d67181577f1faa242f575b75 | 3,195 | py | Python | src/lager/core.py | jessekrubin/lager | 942d8b158495b5d782a159f36ef801abb972a87f | [
"BSD-2-Clause"
] | 1 | 2020-05-14T03:51:40.000Z | 2020-05-14T03:51:40.000Z | src/lager/core.py | dynamic-graphics-inc/lager | 942d8b158495b5d782a159f36ef801abb972a87f | [
"MIT"
] | 6 | 2020-05-02T18:20:18.000Z | 2020-06-16T23:31:25.000Z | src/lager/core.py | jessekrubin/lager | 942d8b158495b5d782a159f36ef801abb972a87f | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Python lager brewed by a loguru"""
import asyncio
from functools import wraps
from time import time
from typing import Union
from loguru import logger
from lager.const import LOG_LEVELS
__all__ = ['loglevel', 'flog', 'handlers', 'logger', 'log', 'LOG', 'ln', 'LN']
logger.t = logger.trace
logger.d = logger.debug
logger.i = logger.info
logger.s = logger.success
logger.w = logger.warning
logger.e = logger.error
logger.c = logger.critical
# commonly used dgpy aliases
log = logger
LOG = logger
# ln => natural log
ln = logger
LN = logger
def loglevel(level: Union[str, int]) -> str:
"""Convert log-level abrev to a valid loguru log level"""
return LOG_LEVELS[str(level).strip("'").strip('"').lower()]
def flog(funk=None, level="debug", enter=True, exit=True):
"""Log function (sync/async) enter and exit using this decorator
Args:
funk (Callable): Function to decorate
level (Union[int, str]): Log level
enter (bool): Log function entry if True
exit (bool): Log function exit if False
Returns:
A wrapped function that now has logging!
Usage:
# SYNC
@flog
def add(a, b):
return a + b
add(1, 4)
# ASYNC
@flog
async def add_async(a, b):
return a + b
import asyncio
asyncio.run(add_async(1, 4))
"""
def _flog(funk):
name = funk.__name__
@wraps(funk)
def _flog_decorator(*args, **kwargs):
logger_ = logger.opt(depth=1)
if enter:
logger_.log(
loglevel(level),
"FLOG-ENTER > '{}' (args={}, kwargs={})",
name,
args,
kwargs,
)
ti = time()
result = funk(*args, **kwargs)
tf = time()
if exit:
logger_.log(
loglevel(level),
"FLOG-EXIT < '{}' (return={}, dt_sec={})",
name,
result,
tf - ti,
)
return result
@wraps(funk)
async def _flog_decorator_async(*args, **kwargs):
logger_ = logger.opt(depth=7)
if enter:
logger_.log(
loglevel(level),
"FLOG-ENTER > '{}' (args={}, kwargs={})",
name,
args,
kwargs,
)
ti = time()
result = await funk(*args, **kwargs)
tf = time()
if exit:
logger_.log(
loglevel(level),
"FLOG-EXIT < '{}' (return={}, dt_sec={})",
name,
result,
tf - ti,
)
return result
if asyncio.iscoroutinefunction(funk) or asyncio.iscoroutine(funk):
return _flog_decorator_async
return _flog_decorator
return _flog(funk) if funk else _flog
def handlers():
"""Return all handlers"""
return logger._core.handlers
| 25.357143 | 78 | 0.489202 | 0 | 0 | 0 | 0 | 1,450 | 0.453834 | 713 | 0.223161 | 1,007 | 0.31518 |
ffa241b51a436f0dcd5e0dcb3336517e8a723091 | 1,626 | py | Python | Tests/Plot/LamHole/test_Hole_54_plot.py | PMSMcqut/pyleecan-of-manatee | 3efa06e8bc53c81a3e35457c108290e1d9ec1373 | [
"Apache-2.0"
] | 2 | 2020-06-29T13:48:37.000Z | 2021-06-15T07:34:05.000Z | Tests/Plot/LamHole/test_Hole_54_plot.py | PMSMcqut/pyleecan-of-manatee | 3efa06e8bc53c81a3e35457c108290e1d9ec1373 | [
"Apache-2.0"
] | null | null | null | Tests/Plot/LamHole/test_Hole_54_plot.py | PMSMcqut/pyleecan-of-manatee | 3efa06e8bc53c81a3e35457c108290e1d9ec1373 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
@date Created on Wed Jan 13 17:45:15 2016
@copyright (C) 2015-2016 EOMYS ENGINEERING.
@author pierre_b
"""
from os.path import join
from unittest import TestCase
import matplotlib.pyplot as plt
from numpy import pi
from pyleecan.Classes.Frame import Frame
from pyleecan.Classes.LamHole import LamHole
from pyleecan.Classes.Lamination import Lamination
from pyleecan.Classes.Machine import Machine
from pyleecan.Classes.Magnet import Magnet
from pyleecan.Classes.Shaft import Shaft
from pyleecan.Classes.MatLamination import MatLamination
from pyleecan.Classes.HoleM54 import HoleM54
from pyleecan.Tests.Plot import save_path
class test_Hole_54_plot(TestCase):
"""unittest for Lamination with Hole plot"""
def test_Lam_Hole_54_plot(self):
"""Test machine plot hole 54"""
plt.close("all")
test_obj = Machine()
test_obj.rotor = LamHole(
is_internal=True, Rint=0.1, Rext=0.2, is_stator=False, L1=0.7
)
test_obj.rotor.hole = list()
test_obj.rotor.hole.append(
HoleM54(Zh=8, W0=pi / 4, H0=50e-3, H1=10e-3, R1=100e-3)
)
test_obj.rotor.hole.append(
HoleM54(Zh=8, W0=pi / 6, H0=25e-3, H1=10e-3, R1=100e-3)
)
test_obj.rotor.plot()
fig = plt.gcf()
fig.savefig(join(save_path, "test_Lam_Hole_s54-Rotor.png"))
self.assertEqual(len(fig.axes[0].patches), 18)
test_obj.rotor.hole[0].plot()
fig = plt.gcf()
fig.savefig(join(save_path, "test_Lam_Hole_s54-Rotor hole.png"))
self.assertEqual(len(fig.axes[0].patches), 1)
| 30.679245 | 73 | 0.675892 | 967 | 0.594711 | 0 | 0 | 0 | 0 | 0 | 0 | 276 | 0.169742 |
ffa3b283be454bc2ceeae7e1c5b1524c04eaba07 | 5,694 | py | Python | invenio_rdm_records/config.py | kprzerwa/invenio-rdm-records | d5919b5f1b5f453074fcb2b63929a60bfcb240ed | [
"MIT"
] | null | null | null | invenio_rdm_records/config.py | kprzerwa/invenio-rdm-records | d5919b5f1b5f453074fcb2b63929a60bfcb240ed | [
"MIT"
] | null | null | null | invenio_rdm_records/config.py | kprzerwa/invenio-rdm-records | d5919b5f1b5f453074fcb2b63929a60bfcb240ed | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2019 CERN.
# Copyright (C) 2019 Northwestern University.
#
# Invenio-RDM-Records is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""DataCite-based data model for Invenio."""
from .utils import previewer_record_file_factory
def _(x):
"""Identity function for string extraction."""
return x
# Records REST API endpoints.
# NOTE: We have to keep this until invenio-records-files and
# invenio-communities use the new records-resources way of creating APIs
RECORDS_REST_ENDPOINTS = {}
"""REST API for invenio_rdm_records."""
# Files REST
# FILES_REST_PERMISSION_FACTORY = record_files_permission_factory
"""Set default files permission factory."""
# Invenio-IIIF
# =================
# See https://invenio-iiif.readthedocs.io/en/latest/configuration.html
IIIF_PREVIEW_TEMPLATE = "invenio_rdm_records/iiif_preview.html"
"""Template for IIIF image preview."""
# Invenio-Previewer
# =================
# See https://github.com/inveniosoftware/invenio-previewer/blob/master/invenio_previewer/config.py # noqa
PREVIEWER_PREFERENCE = [
'csv_dthreejs',
# TODO: IIIF checks bucket-level permissions, and thus won't work
# 'iiif_image',
'simple_image',
'json_prismjs',
'xml_prismjs',
'mistune',
'pdfjs',
'ipynb',
'zip',
]
"""Preferred previewers."""
# Invenio-Records-UI
# ==================
# See https://invenio-records-ui.readthedocs.io/en/latest/configuration.html
RECORDS_UI_ENDPOINTS = {
'recid': {
'pid_type': 'recid',
'record_class': 'invenio_rdm_records.records:BibliographicRecord',
'route': '/records/<pid_value>',
'template': 'invenio_rdm_records/record_landing_page.html'
},
'recid_files': {
'pid_type': 'recid',
'record_class': 'invenio_rdm_records.records:BibliographicRecord',
'route': '/records/<pid_value>/files/<path:filename>',
'view_imp': 'invenio_rdm_records.theme.views.file_download_ui',
},
'recid_previewer': {
'pid_type': 'recid',
'record_class': 'invenio_rdm_records.records:BibliographicRecord',
'route': '/records/<pid_value>/preview/<path:filename>',
'view_imp': 'invenio_previewer.views.preview',
},
}
"""Records UI for RDM Records."""
# Invenio-Formatter
# =================
FORMATTER_BADGES_ALLOWED_TITLES = ['DOI', 'doi']
"""List of allowed titles in badges."""
FORMATTER_BADGES_TITLE_MAPPING = {'doi': 'DOI'}
"""Mapping of titles."""
# Invenio-RDM-Records
# ===================
RDM_RECORDS_LOCAL_DOI_PREFIXES = ['10.9999']
"""List of locally managed DOI prefixes."""
RDM_RECORDS_METADATA_NAMESPACES = {}
"""Namespaces for fields *added* to the metadata schema.
Of the shape:
.. code-block:: python
{
'<prefix1>': {
'@context': '<url>'
},
# ...
'<prefixN>': {
'@context': '<url>'
}
}
For example:
.. code-block:: python
{
'dwc': {
'@context': 'http://rs.tdwg.org/dwc/terms/'
},
'z':{
'@context': 'https://zenodo.org/terms'
}
}
Use :const:`invenio_rdm_records.config.RDM_RECORDS_METADATA_EXTENSIONS` to
define the added fields.
See :class:`invenio_rdm_records.services.schemas.\
metadata_extensions.MetadataExtensions` for
how this configuration variable is used.
"""
RDM_RECORDS_METADATA_EXTENSIONS = {}
"""Fields added to the metadata schema.
Of the shape:
.. code-block:: python
{
'<prefix1>:<field1>': {
'elasticsearch': '<allowed elasticsearch type>'
'marshmallow': '<allowed marshmallow type>'
},
# ...
'<prefixN>:<fieldN>': {
'elasticsearch': '<allowed elasticsearch type>'
'marshmallow': '<allowed marshmallow type>'
}
}
For example:
.. code-block:: python
{
'dwc:family': {
'elasticsearch': 'keyword',
'marshmallow': SanitizedUnicode()
},
'dwc:behavior': {
'elasticsearch': 'text',
'marshmallow': SanitizedUnicode()
},
'z:department': {
'elasticsearch': 'text',
'marshmallow': SanitizedUnicode()
}
}
Use :const:`invenio_rdm_records.config.RDM_RECORDS_METADATA_NAMESPACES` to
define the prefixes.
See :class:`invenio_rdm_records.services.schemas.\
metadata_extensions.MetadataExtensions` for
allowed types and how this configuration variable is used.
"""
RDM_RECORDS_CUSTOM_VOCABULARIES = {}
"""Paths to custom controlled vocabularies.
Of the shape:
.. code-block:: python
{
'<dotted>.<path>.<to field1>': {
'path': '<absolute path to CSV file containing it>'
},
# ...
'<dotted>.<path>.<to fieldN>': {
'path': '<absolute path to CSV file containing it>'
}
}
For example:
.. code-block:: python
{
'resource_type': {
'path': os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'my_resource_types.csv'
)
},
'contributors.role': {
'path': os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'my_contributor_roles.csv'
)
}
}
"""
RDM_RECORDS_UI_EDIT_URL = "/uploads/<pid_value>"
"""Default UI URL for the edit page of a Bibliographic Record."""
#: Default site URL (used only when not in a context - e.g. like celery tasks).
THEME_SITEURL = "http://localhost:5000"
PREVIEWER_RECORD_FILE_FACOTRY = previewer_record_file_factory
| 25.19469 | 106 | 0.619073 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,868 | 0.854935 |
ffa5465bfb3e2c7f5391b04142db505e1022b582 | 424 | py | Python | tests/dictionary1.py | Oshanath/lpython | 582e1718c04fcccd2b6e444d85dda3aae3e3292e | [
"BSD-3-Clause"
] | null | null | null | tests/dictionary1.py | Oshanath/lpython | 582e1718c04fcccd2b6e444d85dda3aae3e3292e | [
"BSD-3-Clause"
] | null | null | null | tests/dictionary1.py | Oshanath/lpython | 582e1718c04fcccd2b6e444d85dda3aae3e3292e | [
"BSD-3-Clause"
] | null | null | null | def test_Dict():
x: dict[i32, i32]
x = {1: 2, 3: 4}
# x = {1: "2", "3": 4} -> sematic error
y: dict[str, i32]
y = {"a": -1, "b": -2}
z: i32
z = y["a"]
z = y["b"]
z = x[1]
def test_dict_insert():
y: dict[str, i32]
y = {"a": -1, "b": -2}
y["c"] = -3
def test_dict_get():
y: dict[str, i32]
y = {"a": -1, "b": -2}
x: i32
x = y.get("a")
x = y.get("a", 0)
| 15.703704 | 43 | 0.379717 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 72 | 0.169811 |
ffa5af6affd31cfe1fe0ccc78cba42510b4fe8d2 | 521 | py | Python | mysite/addOne.py | xinkaiwang/robotJump | 622e97451f450b755aecbd60e15b2cd47d875f47 | [
"MIT"
] | null | null | null | mysite/addOne.py | xinkaiwang/robotJump | 622e97451f450b755aecbd60e15b2cd47d875f47 | [
"MIT"
] | null | null | null | mysite/addOne.py | xinkaiwang/robotJump | 622e97451f450b755aecbd60e15b2cd47d875f47 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mysite.settings")
import django
django.setup()
# your imports, e.g. Django models
from buckets.models import *
from buckets.name2date import name2date
bucket = BucketInfo.objects.get(name='xinkaibuk1')
allInDb = set()
allInDb.update(ImageInfo.objects.all())
file_name='20180227.062043.749.jpg'
date = name2date(file_name)
# print date
img = ImageInfo(file_name=file_name, date_time=date, bucket=bucket)
img.save()
print 'image saved' | 21.708333 | 67 | 0.773512 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 158 | 0.303263 |
ffa7676c4e5ed4bfd01c893490761de5a7bbcf0c | 255 | py | Python | backend/test.py | Harin329/Masterplan | 0e06d596751c5073b104d0e4aa6c1e8fd0a4fb0a | [
"Unlicense"
] | null | null | null | backend/test.py | Harin329/Masterplan | 0e06d596751c5073b104d0e4aa6c1e8fd0a4fb0a | [
"Unlicense"
] | null | null | null | backend/test.py | Harin329/Masterplan | 0e06d596751c5073b104d0e4aa6c1e8fd0a4fb0a | [
"Unlicense"
] | null | null | null | import boto3
import os
from dotenv import load_dotenv
load_dotenv()
client = boto3.client(
'dynamodb',
aws_access_key_id=os.getenv('AWS_ACCESS_KEY_ID'),
aws_secret_access_key=os.getenv('AWS_SECRET_ACCESS_KEY'),
region_name="us-east-1"
)
| 19.615385 | 61 | 0.752941 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 63 | 0.247059 |
ffa7a56bf9b9188e3506ddbbb4e8bf32ea7b9845 | 321 | py | Python | buildingspy/__init__.py | marcusfuchs/BuildingsPy | 954f23597585e6674c03097ec2085e1326e9b820 | [
"BSD-3-Clause-LBNL"
] | 66 | 2015-01-26T15:57:05.000Z | 2022-03-24T18:43:01.000Z | buildingspy/__init__.py | marcusfuchs/BuildingsPy | 954f23597585e6674c03097ec2085e1326e9b820 | [
"BSD-3-Clause-LBNL"
] | 259 | 2015-01-06T21:37:52.000Z | 2022-03-07T18:02:38.000Z | buildingspy/__init__.py | marcusfuchs/BuildingsPy | 954f23597585e6674c03097ec2085e1326e9b820 | [
"BSD-3-Clause-LBNL"
] | 34 | 2015-01-14T11:35:57.000Z | 2022-03-15T22:10:25.000Z | """
:mod:`buildingspy` Modules for post-processing simulation output files
======================================================================
"""
import os
# Version.
version_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'VERSION'))
with open(version_path) as f:
__version__ = f.read().strip()
| 26.75 | 82 | 0.563863 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 168 | 0.523364 |
ffa7ba43fb77d122b3328d05f195ebbbdbe93ef0 | 7,577 | py | Python | kuryr_kubernetes/tests/unit/controller/drivers/test_public_ip.py | BoringWenn/kuryr-kubernetes | 625ddb0a39ab5e8752b83565e6b9689c2ab1775f | [
"Apache-2.0"
] | null | null | null | kuryr_kubernetes/tests/unit/controller/drivers/test_public_ip.py | BoringWenn/kuryr-kubernetes | 625ddb0a39ab5e8752b83565e6b9689c2ab1775f | [
"Apache-2.0"
] | null | null | null | kuryr_kubernetes/tests/unit/controller/drivers/test_public_ip.py | BoringWenn/kuryr-kubernetes | 625ddb0a39ab5e8752b83565e6b9689c2ab1775f | [
"Apache-2.0"
] | 1 | 2018-08-01T13:41:55.000Z | 2018-08-01T13:41:55.000Z | # Copyright (c) 2017 RedHat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutronclient.common import exceptions as n_exc
from kuryr_kubernetes.controller.drivers import public_ip\
as d_public_ip
from kuryr_kubernetes.tests import base as test_base
from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix
class TestFipPubIpDriver(test_base.TestCase):
def test_is_ip_available_none_param(self):
cls = d_public_ip.FipPubIpDriver
m_driver = mock.Mock(spec=cls)
fip_ip_addr = None
fip_id = cls.is_ip_available(m_driver, fip_ip_addr)
self.assertIsNone(fip_id)
def test_is_ip_available_empty_param(self):
cls = d_public_ip.FipPubIpDriver
m_driver = mock.Mock(spec=cls)
fip_ip_addr = None
fip_id = cls.is_ip_available(m_driver, fip_ip_addr)
self.assertIsNone(fip_id)
def test_is_ip_available_ip_not_exist(self):
cls = d_public_ip.FipPubIpDriver
m_driver = mock.Mock(spec=cls)
neutron = self.useFixture(k_fix.MockNeutronClient()).client
floating_ip = {'floating_ip_address': '1.2.3.4', 'port_id': None,
'id': 'a2a62ea7-e3bf-40df-8c09-aa0c29876a6b'}
neutron.list_floatingips.return_value = {'floatingips': [floating_ip]}
fip_ip_addr = '1.1.1.1'
fip_id = cls.is_ip_available(m_driver, fip_ip_addr)
self.assertIsNone(fip_id)
def test_is_ip_available_empty_fip_list(self):
cls = d_public_ip.FipPubIpDriver
m_driver = mock.Mock(spec=cls)
neutron = self.useFixture(k_fix.MockNeutronClient()).client
floating_ip = None
neutron.list_floatingips.return_value = {'floatingips': [floating_ip]}
fip_ip_addr = '1.1.1.1'
fip_id = cls.is_ip_available(m_driver, fip_ip_addr)
self.assertIsNone(fip_id)
def test_is_ip_available_occupied_fip(self):
cls = d_public_ip.FipPubIpDriver
m_driver = mock.Mock(spec=cls)
neutron = self.useFixture(k_fix.MockNeutronClient()).client
floating_ip = {'floating_ip_address': '1.2.3.4',
'port_id': 'ec29d641-fec4-4f67-928a-124a76b3a8e6'}
neutron.list_floatingips.return_value = {'floatingips': [floating_ip]}
fip_ip_addr = '1.2.3.4'
fip_id = cls.is_ip_available(m_driver, fip_ip_addr)
self.assertIsNone(fip_id)
def test_is_ip_available_ip_exist_and_available(self):
cls = d_public_ip.FipPubIpDriver
m_driver = mock.Mock(spec=cls)
neutron = self.useFixture(k_fix.MockNeutronClient()).client
floating_ip = {'floating_ip_address': '1.2.3.4', 'port_id': None,
'id': 'a2a62ea7-e3bf-40df-8c09-aa0c29876a6b'}
neutron.list_floatingips.return_value = {'floatingips': [floating_ip]}
fip_ip_addr = '1.2.3.4'
fip_id = cls.is_ip_available(m_driver, fip_ip_addr)
self.assertEqual(fip_id, 'a2a62ea7-e3bf-40df-8c09-aa0c29876a6b')
def test_allocate_ip_all_green(self):
cls = d_public_ip.FipPubIpDriver
m_driver = mock.Mock(spec=cls)
pub_net_id = mock.sentinel.pub_net_id
pub_subnet_id = mock.sentinel.pub_subnet_id
project_id = mock.sentinel.project_id
description = mock.sentinel.description
neutron = self.useFixture(k_fix.MockNeutronClient()).client
floating_ip = {'floating_ip_address': '1.2.3.5',
'id': 'ec29d641-fec4-4f67-928a-124a76b3a888'}
neutron.create_floatingip.return_value = {'floatingip': floating_ip}
fip_id, fip_addr = cls.allocate_ip(
m_driver, pub_net_id, project_id, pub_subnet_id, description)
self.assertEqual(fip_id, floating_ip['id'])
self.assertEqual(fip_addr, floating_ip['floating_ip_address'])
def test_allocate_ip_neutron_exception(self):
cls = d_public_ip.FipPubIpDriver
m_driver = mock.Mock(spec=cls)
pub_net_id = mock.sentinel.pub_net_id
pub_subnet_id = mock.sentinel.pub_subnet_id
project_id = mock.sentinel.project_id
description = mock.sentinel.description
neutron = self.useFixture(k_fix.MockNeutronClient()).client
neutron.create_floatingip.side_effect = n_exc.NeutronClientException
self.assertRaises(
n_exc.NeutronClientException, cls.allocate_ip,
m_driver, pub_net_id, project_id, pub_subnet_id, description)
def test_free_ip_neutron_exception(self):
cls = d_public_ip.FipPubIpDriver
m_driver = mock.Mock(spec=cls)
res_id = mock.sentinel.res_id
neutron = self.useFixture(k_fix.MockNeutronClient()).client
neutron.delete_floatingip.side_effect = n_exc.NeutronClientException
rc = cls.free_ip(m_driver, res_id)
self.assertEqual(rc, False)
def test_free_ip_succeeded(self):
cls = d_public_ip.FipPubIpDriver
m_driver = mock.Mock(spec=cls)
res_id = mock.sentinel.res_id
neutron = self.useFixture(k_fix.MockNeutronClient()).client
neutron.delete_floatingip.return_value = None
rc = cls.free_ip(m_driver, res_id)
self.assertEqual(rc, True)
# try:
# cls.free_ip(m_driver, res_id)
# except Exception:
# self.fail("Encountered an unexpected exception.")
def test_associate_neutron_exception(self):
cls = d_public_ip.FipPubIpDriver
m_driver = mock.Mock(spec=cls)
res_id = mock.sentinel.res_id
vip_port_id = mock.sentinel.vip_port_id
neutron = self.useFixture(k_fix.MockNeutronClient()).client
neutron.update_floatingip.side_effect = n_exc.NeutronClientException
retcode = cls.associate(m_driver, res_id, vip_port_id)
self.assertIsNone(retcode)
def test_associate_succeeded(self):
cls = d_public_ip.FipPubIpDriver
m_driver = mock.Mock(spec=cls)
res_id = mock.sentinel.res_id
vip_port_id = mock.sentinel.vip_port_id
neutron = self.useFixture(k_fix.MockNeutronClient()).client
neutron.update_floatingip.return_value = None
retcode = cls.associate(m_driver, res_id, vip_port_id)
self.assertIsNone(retcode)
def test_disassociate_neutron_exception(self):
cls = d_public_ip.FipPubIpDriver
m_driver = mock.Mock(spec=cls)
res_id = mock.sentinel.res_id
neutron = self.useFixture(k_fix.MockNeutronClient()).client
neutron.update_floatingip.side_effect = n_exc.NeutronClientException
self.assertIsNone(cls.disassociate
(m_driver, res_id))
def test_disassociate_succeeded(self):
cls = d_public_ip.FipPubIpDriver
m_driver = mock.Mock(spec=cls)
res_id = mock.sentinel.res_id
neutron = self.useFixture(k_fix.MockNeutronClient()).client
neutron.update_floatingip.return_value = None
self.assertIsNone(cls.disassociate
(m_driver, res_id))
| 39.463542 | 78 | 0.685628 | 6,682 | 0.881879 | 0 | 0 | 0 | 0 | 0 | 0 | 1,234 | 0.162861 |
ffa7d2f631804521b98eedfa063b62785267c23b | 1,879 | py | Python | day/day19part1.py | swemoney/adventofcode2020 | 94c3c032e68b182881dc545771a79fd884b7cf93 | [
"MIT"
] | 1 | 2020-12-07T13:18:09.000Z | 2020-12-07T13:18:09.000Z | day/day19part1.py | swemoney/adventofcode2020 | 94c3c032e68b182881dc545771a79fd884b7cf93 | [
"MIT"
] | null | null | null | day/day19part1.py | swemoney/adventofcode2020 | 94c3c032e68b182881dc545771a79fd884b7cf93 | [
"MIT"
] | null | null | null | from functools import cached_property
from puzzle import Puzzle # pylint: disable=import-error
class Day19Part1:
puzzle = None
def run(self):
self.rules, messages = self.parsed_input
print(sum([self.match('0', message) for message in messages]))
def match(self, rule_id, message):
return any(m == '' for m in self.traverse_rule(rule_id, message))
def traverse_rule(self, rule_id, message):
if isinstance(self.rules[rule_id], list):
yield from self.traverse_expand(self.rules[rule_id], message)
else:
if message and message[0] == self.rules[rule_id]:
yield message[1:]
def traverse_expand(self, rule_list, message):
for new_rule_list in rule_list:
yield from self.check(new_rule_list, message)
def check(self, rule_list, message):
if not rule_list: yield message
else:
i, *rule_list = rule_list
for message in self.traverse_rule(i, message):
yield from self.check(rule_list, message)
@cached_property
def parsed_input(self):
all_input = [line.strip() for line in self.puzzle.input]
separator = all_input.index('')
messages = all_input[separator+1:]
rules = {}
for rule in all_input[:separator]:
rule_id = rule.split(": ")[0]
rule_contents = rule.split(": ")[1]
if '"' in rule_contents:
rules[rule_id] = rule_contents.replace('"','')
else:
rule_ids = rule_contents.split(" ")
if '|' in rule_ids:
pipe_idx = rule_ids.index('|')
rules[rule_id] = [list(rule_ids[:pipe_idx]), list(rule_ids[pipe_idx+1:])]
else:
rules[rule_id] = [list(rule_ids)]
return (rules, messages)
| 36.843137 | 93 | 0.583821 | 1,782 | 0.948377 | 665 | 0.353912 | 804 | 0.427887 | 0 | 0 | 62 | 0.032996 |
ffa80060b230b8e11785d9bbb7f5c79cb5159e64 | 7,523 | py | Python | volatility/volatility/plugins/linux/pslist.py | williamclot/MemoryVisualizer | 2ff9f30f07519d6578bc36c12f8d08acc9cb4383 | [
"MIT"
] | 2 | 2018-07-16T13:30:40.000Z | 2018-07-17T12:02:05.000Z | volatility/volatility/plugins/linux/pslist.py | williamclot/MemoryVisualizer | 2ff9f30f07519d6578bc36c12f8d08acc9cb4383 | [
"MIT"
] | null | null | null | volatility/volatility/plugins/linux/pslist.py | williamclot/MemoryVisualizer | 2ff9f30f07519d6578bc36c12f8d08acc9cb4383 | [
"MIT"
] | null | null | null | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: atcuno@gmail.com
@organization:
"""
import volatility.obj as obj
import volatility.utils as utils
import volatility.plugins.linux.common as linux_common
from volatility.renderers import TreeGrid
from volatility.renderers.basic import Address
class linux_pslist(linux_common.AbstractLinuxCommand):
"""Gather active tasks by walking the task_struct->task list"""
def __init__(self, config, *args, **kwargs):
linux_common.AbstractLinuxCommand.__init__(self, config, *args, **kwargs)
config.add_option('PID', short_option = 'p', default = None,
help = 'Operate on these Process IDs (comma-separated)',
action = 'store', type = 'str')
@staticmethod
def virtual_process_from_physical_offset(addr_space, offset):
pspace = utils.load_as(addr_space.get_config(), astype = 'physical')
task = obj.Object("task_struct", vm = pspace, offset = offset)
parent = obj.Object("task_struct", vm = addr_space, offset = task.parent)
for child in parent.children.list_of_type("task_struct", "sibling"):
if child.obj_vm.vtop(child.obj_offset) == task.obj_offset:
return child
return obj.NoneObject("Unable to bounce back from task_struct->parent->task_struct")
def allprocs(self):
linux_common.set_plugin_members(self)
init_task_addr = self.addr_space.profile.get_symbol("init_task")
init_task = obj.Object("task_struct", vm = self.addr_space, offset = init_task_addr)
# walk the ->tasks list, note that this will *not* display "swapper"
for task in init_task.tasks:
yield task
def calculate(self):
linux_common.set_plugin_members(self)
pidlist = self._config.PID
if pidlist:
pidlist = [int(p) for p in self._config.PID.split(',')]
for task in self.allprocs():
if not pidlist or task.pid in pidlist:
yield task
def unified_output(self, data):
return TreeGrid([("Offset", Address),
("Name", str),
("Pid", int),
("Uid", str),
("Gid", str),
("DTB", Address),
("StartTime", str)],
self.generator(data))
def _get_task_vals(self, task):
if task.parent.is_valid():
ppid = str(task.parent.pid)
else:
ppid = "-"
uid = task.uid
if uid == None or uid > 10000:
uid = "-"
gid = task.gid
if gid == None or gid > 100000:
gid = "-"
start_time = task.get_task_start_time()
if start_time == None:
start_time = "-"
if task.mm.pgd == None:
dtb = task.mm.pgd
else:
dtb = self.addr_space.vtop(task.mm.pgd) or task.mm.pgd
task_offset = None
if hasattr(self, "wants_physical") and task.obj_vm.base:
task_offset = self.addr_space.vtop(task.obj_offset)
if task_offset == None:
task_offset = task.obj_offset
return task_offset, dtb, ppid, uid, gid, str(start_time)
def generator(self, data):
for task in data:
task_offset, dtb, ppid, uid, gid, start_time = self._get_task_vals(task)
yield (0, [Address(task_offset),
str(task.comm),
int(task.pid),
str(uid),
str(gid),
Address(dtb),
start_time])
def render_text(self, outfd, data):
self.table_header(outfd, [("Offset", "[addrpad]"),
("Name", "20"),
("Pid", "15"),
("PPid", "15"),
("Uid", "15"),
("Gid", "6"),
("DTB", "[addrpad]"),
("Start Time", "")])
for task in data:
task_offset, dtb, ppid, uid, gid, start_time = self._get_task_vals(task)
self.table_row(outfd, task_offset,
task.comm,
str(task.pid),
str(ppid),
str(uid),
str(gid),
dtb,
str(start_time))
class linux_memmap(linux_pslist):
"""Dumps the memory map for linux tasks"""
def unified_output(self, data):
return TreeGrid([("Task", str),
("Pid", int),
("Virtual", Address),
("Physical", Address),
("Size", Address)],
self.generator(data))
def generator(self, data):
for task in data:
task_space = task.get_process_address_space()
pagedata = task_space.get_available_pages()
if pagedata:
for p in pagedata:
pa = task_space.vtop(p[0])
# pa can be 0, according to the old memmap, but can't == None(NoneObject)
if pa != None:
yield (0, [str(task.comm), int(task.pid), Address(p[0]), Address(pa), Address(p[1])])
else:
yield(0, [str(task.comm), int(task.pid), Address(-1), Address(-1), Address(-1)])
def render_text(self, outfd, data):
self.table_header(outfd, [("Task", "16"),
("Pid", "8"),
("Virtual", "[addrpad]"),
("Physical", "[addrpad]"),
("Size", "[addr]")])
for task in data:
task_space = task.get_process_address_space()
pagedata = task_space.get_available_pages()
if pagedata:
for p in pagedata:
pa = task_space.vtop(p[0])
# pa can be 0, according to the old memmap, but can't == None(NoneObject)
if pa != None:
self.table_row(outfd, task.comm, task.pid, p[0], pa, p[1])
#else:
# outfd.write("0x{0:10x} 0x000000 0x{1:12x}\n".format(p[0], p[1]))
else:
outfd.write("Unable to read pages for {0} pid {1}.\n".format(task.comm, task.pid))
| 38.382653 | 109 | 0.510833 | 6,452 | 0.857637 | 1,770 | 0.235278 | 597 | 0.079357 | 0 | 0 | 1,770 | 0.235278 |
ffa81bd84acea21fe770bfd3605b4b7cde9424f3 | 266 | py | Python | demo/code/2016-11-08/asyncio_demo/2.py | uxlsl/uxlsl.github.io | 91b62eb01fad38b913016236bb687eb6f0fe2012 | [
"MIT"
] | null | null | null | demo/code/2016-11-08/asyncio_demo/2.py | uxlsl/uxlsl.github.io | 91b62eb01fad38b913016236bb687eb6f0fe2012 | [
"MIT"
] | null | null | null | demo/code/2016-11-08/asyncio_demo/2.py | uxlsl/uxlsl.github.io | 91b62eb01fad38b913016236bb687eb6f0fe2012 | [
"MIT"
] | null | null | null |
import asyncio
async def f1():
print("f1")
return "f1"
async def f2():
result = await f1()
print(result)
return "f2"
loop = asyncio.get_event_loop()
try:
result = loop.run_until_complete(f2())
print(result)
finally:
print("exit")
| 14 | 42 | 0.620301 | 0 | 0 | 0 | 0 | 0 | 0 | 120 | 0.451128 | 18 | 0.067669 |
ffa8a6dc6156e32b6503b1d0530884e4f28a8bfa | 5,600 | py | Python | src/opendr/control/mobile_manipulation/mobileRL/envs/eeplanner.py | makistsantekidis/opendr | 07dee3b59d3487b9c5a93d6946317178a02c9890 | [
"Apache-2.0"
] | 3 | 2021-06-24T01:54:25.000Z | 2021-12-12T16:21:24.000Z | src/opendr/control/mobile_manipulation/mobileRL/envs/eeplanner.py | makistsantekidis/opendr | 07dee3b59d3487b9c5a93d6946317178a02c9890 | [
"Apache-2.0"
] | 79 | 2021-06-23T10:40:10.000Z | 2021-12-16T07:59:42.000Z | src/opendr/control/mobile_manipulation/mobileRL/envs/eeplanner.py | makistsantekidis/opendr | 07dee3b59d3487b9c5a93d6946317178a02c9890 | [
"Apache-2.0"
] | 5 | 2021-07-04T07:38:50.000Z | 2021-12-12T16:18:47.000Z | # Copyright 2020-2021 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pybindings import RobotObs, EEObs, LinearPlanner, GMMPlanner
MIN_PLANNER_VELOCITY = 0.001
MAX_PLANNER_VELOCITY = 0.1
# also defined in robot_env.cpp!
TIME_STEP_TRAIN = 0.1
class EEPlanner:
def __init__(self,
gripper_goal_tip,
gripper_goal_wrist,
head_start,
map):
self.gripper_goal_tip = gripper_goal_tip
self.gripper_goal_wrist = gripper_goal_wrist
self._head_start = head_start
self._map = map
def reset(self,
robot_obs: RobotObs,
slow_down_factor: float,
is_analytic_env: bool,
success_thres_dist: float,
success_thres_rot: float) -> EEObs:
raise NotImplementedError()
def step(self, robot_obs: RobotObs, learned_vel_norm: float) -> EEObs:
raise NotImplementedError()
def generate_obs_step(self, robot_state: RobotObs) -> EEObs:
raise NotImplementedError()
class LinearPlannerWrapper(EEPlanner):
def __init__(self,
gripper_goal_tip,
gripper_goal_wrist,
head_start,
map):
super(LinearPlannerWrapper, self).__init__(gripper_goal_tip,
gripper_goal_wrist,
head_start,
map)
self._planner = None
def reset(self,
robot_obs: RobotObs,
slow_down_factor: float,
is_analytic_env: bool,
success_thres_dist: float,
success_thres_rot: float) -> EEObs:
self._planner = LinearPlanner(self.gripper_goal_wrist,
robot_obs.gripper_tf,
[0, 0, 0, 0, 0, 0, 1],
robot_obs.base_tf,
success_thres_dist,
success_thres_rot,
MIN_PLANNER_VELOCITY,
MAX_PLANNER_VELOCITY,
slow_down_factor,
self._head_start,
TIME_STEP_TRAIN,
is_analytic_env)
return self.generate_obs_step(robot_obs)
def step(self, robot_obs: RobotObs, learned_vel_norm: float) -> EEObs:
return self._planner.step(robot_obs, learned_vel_norm)
def generate_obs_step(self, robot_state: RobotObs) -> EEObs:
return self._planner.generate_obs_step(robot_state)
class GMMPlannerWrapper(EEPlanner):
def __init__(self,
gripper_goal_tip,
gripper_goal_wrist,
head_start,
map,
gmm_model_path: str,
robot_config):
super(GMMPlannerWrapper, self).__init__(gripper_goal_tip,
gripper_goal_wrist,
head_start,
map)
self._planner = None
assert os.path.exists(gmm_model_path), f"Path {gmm_model_path} doesn't exist"
self._gmm_model_path = gmm_model_path
self._robot_config = robot_config
def reset(self,
robot_obs: RobotObs,
slow_down_factor: float,
is_analytic_env: bool,
success_thres_dist,
success_thres_rot) -> EEObs:
# NOTE: planners either take in the goal for the tip or the wrist, but always output plans for the wrist!
self._planner = GMMPlanner(self.gripper_goal_wrist,
robot_obs.gripper_tf,
[0, 0, 0, 0, 0, 0, 1],
robot_obs.base_tf,
success_thres_dist,
success_thres_rot,
MIN_PLANNER_VELOCITY,
MAX_PLANNER_VELOCITY,
slow_down_factor,
self._head_start,
TIME_STEP_TRAIN,
is_analytic_env,
self._robot_config["tip_to_gripper_offset"],
self._robot_config["gripper_to_base_rot_offset"],
str(self._gmm_model_path),
self._robot_config["gmm_base_offset"])
return self.generate_obs_step(robot_obs)
def step(self, robot_obs: RobotObs, learned_vel_norm: float) -> EEObs:
return self._planner.step(robot_obs, learned_vel_norm)
def generate_obs_step(self, robot_state: RobotObs) -> EEObs:
return self._planner.generate_obs_step(robot_state)
| 40.875912 | 113 | 0.531964 | 4,809 | 0.85875 | 0 | 0 | 0 | 0 | 0 | 0 | 822 | 0.146786 |
ffac029080c8e95478b5522f00cf420e582609ac | 27,042 | py | Python | spectral_clustering_audio.py | gamaievsky/DescripteursHarmoniquesAudio | 551e253058502049a91803da8b0412b5ffb1bd60 | [
"MIT"
] | null | null | null | spectral_clustering_audio.py | gamaievsky/DescripteursHarmoniquesAudio | 551e253058502049a91803da8b0412b5ffb1bd60 | [
"MIT"
] | null | null | null | spectral_clustering_audio.py | gamaievsky/DescripteursHarmoniquesAudio | 551e253058502049a91803da8b0412b5ffb1bd60 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
======================
Laplacian segmentation
======================
This notebook implements the laplacian segmentation method of
`McFee and Ellis, 2014 <http://bmcfee.github.io/papers/ismir2014_spectral.pdf>`_,
with a couple of minor stability improvements.
This implementation is available at https://librosa.github.io/librosa/auto_examples/plot_segmentation.html
Additional functions have been added to the core segmentation:
- unsupervised determination of the number of clusters suitable for the running task
- different feature packages: spectral, cepstral and chroma.
- a cosine distance between the different clusters that is plot together with cluster segmentation
- a set of parameters reported in params.py file necessary for tuning the segmentation model.
usage:
python3 spectral_clustering_audio.py audiofilename.wav [.mp3]
Input:
- name of audio file to be analyzed
Output:
- Segmentation and grouping of the different musical sections synchronized on user-chosen onsets
- Optional plots of similarity and recurrence matrix
- Optional timestamps text file with parameters and time boundaries
"""
# Code source by Marie Tahon (2018) adapted from Brian McFee (2014)
# License: ISC
###################################
# Imports
# - numpy for basic functionality
# - scipy for graph Laplacian
# - matplotlib for visualization
# - sklearn.cluster for K-Means, for metrics and scaling.
# - warnings to delete warning message for scipy package
from __future__ import division
import numpy as np
import scipy
import warnings
warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd")
import sys, os
import argparse
import matplotlib.pyplot as plt
from matplotlib import gridspec
import sklearn.cluster
from sklearn.preprocessing import scale
import sklearn.metrics
import sklearn.utils
import librosa
import librosa.display
import cluster_rotate
import params
plt.rcParams.update({'font.size': 8})
BINS_PER_OCTAVE = params.BINS_PER_OCTAVE
N_OCTAVES = params.N_OCTAVES
NFFT = int(params.NFFT)
STEP = int(params.STEP)
#######################################
def detect_onsets(y, sr, M):
#detect onsets
oenv = librosa.onset.onset_strength(S=M, sr=sr)
# Detect events without backtracking
onset_raw = librosa.onset.onset_detect(onset_envelope=oenv, backtrack=False)
## Backtrack the events using the onset envelope
onset_bt = librosa.onset.onset_backtrack(onset_raw, oenv)
# we fix_frames to include non-beat frames 0 and C.shape[1] (final frame)
onset_frames = librosa.util.fix_frames(onset_raw, x_min=0, x_max=M.shape[1]-1)
onset_times = librosa.frames_to_time(onset_frames, sr=sr, hop_length = STEP)
# To reduce dimensionality, we'll beat-synchronous the CQT
Msync = librosa.util.sync(M, onset_raw, aggregate=np.median)
if params.onset_plot:
plt.figure(figsize=(12, 4))
plt.plot(oenv, label='Onset strength')
plt.vlines(onset_raw, 0, oenv.max(), label='Raw onsets')
plt.vlines(onset_bt, 0, oenv.max(), label='Backtracked', color='r')
plt.legend(frameon=True, framealpha=0.75)
plt.tight_layout()
plt.figure(figsize=(12, 4))
plt.subplot(2,1,1)
plt.title('CQT spectrogram')
librosa.display.specshow(M, y_axis='cqt_hz', sr=sr, hop_length= STEP, bins_per_octave=BINS_PER_OCTAVE, x_axis='time')
plt.tight_layout()
plt.subplot(2,1,2)
plt.title('CQT spectrogram synchronized on onsets')
librosa.display.specshow(Msync, bins_per_octave=BINS_PER_OCTAVE, y_axis='cqt_hz', x_axis='time', x_coords=onset_times)
plt.tight_layout()
return onset_raw, onset_times, Msync
##############################################
def detect_beats(y, sr, M):
tempo, beats = librosa.beat.beat_track(y=y, sr=sr, hop_length = STEP, trim=False)
print('Detected tempo: {0:.2f} bpm'.format(tempo))
beat_period = np.diff(librosa.frames_to_time(beats, sr=sr, hop_length= STEP))
print('mean beat period: {0:.2f} ; std beat period: {1:.2f}'.format(60/np.mean(beat_period), np.std(beat_period)))
beats_frames = librosa.util.fix_frames(beats, x_min=0, x_max=M.shape[1]-1)
beat_times = librosa.frames_to_time(beats_frames, sr=sr, hop_length = STEP)
Msync = librosa.util.sync(M, beats_frames, aggregate=np.median)
if params.onset_plot:
plt.figure(figsize=(12, 4))
plt.subplot(2,1,1)
plt.title('CQT spectrogram')
librosa.display.specshow(M, y_axis='cqt_hz', sr=sr, hop_length=STEP, bins_per_octave=BINS_PER_OCTAVE, x_axis='time')
plt.tight_layout()
# For plotting purposes, we'll need the timing of the beats
# we fix_frames to include non-beat frames 0 and C.shape[1] (final frame)
plt.subplot(2,1,2)
plt.title('CQT spectrogram synchronized on beats')
librosa.display.specshow(Msync, bins_per_octave=BINS_PER_OCTAVE, y_axis='cqt_hz', x_axis='time', x_coords=beat_times)
plt.tight_layout()
return beats_frames, beat_times, Msync
##############################################
def no_onsets(sr, M):
onsets = np.arange(0, M.shape[1])
onset_times = librosa.samples_to_time(onsets, sr=sr/STEP)
if params.onset_plot:
plt.figure(figsize=(12, 4))
plt.title('CQT spectrogram')
librosa.display.specshow(M, y_axis='cqt_hz', sr=sr, bins_per_octave=BINS_PER_OCTAVE, x_axis='time', x_coords=onset_times)
plt.tight_layout()
return onsets, onset_times, M
def get_manual_beats(sr, M, filename):
with open(filename, 'r') as f:
data = f.readlines()
times = np.array([float(x.strip()) for x in data[1:]])
frames = np.array([int(x * sr / STEP) for x in times])
onsets = librosa.util.fix_frames(frames, x_min=0, x_max=M.shape[1]-1)
onset_times = librosa.frames_to_time(onsets, sr=sr, hop_length = STEP)
Msync = librosa.util.sync(M, onsets, aggregate=np.median)
if params.onset_plot:
plt.figure(figsize=(12, 4))
plt.subplot(2,1,1)
plt.title('CQT spectrogram')
librosa.display.specshow(M, y_axis='cqt_hz', sr=sr, hop_length=STEP, bins_per_octave=BINS_PER_OCTAVE, x_axis='time')
plt.tight_layout()
plt.subplot(2,1,2)
plt.title('CQT spectrogram synchronized on beats')
librosa.display.specshow(Msync, bins_per_octave=BINS_PER_OCTAVE, y_axis='cqt_hz', x_axis='time', x_coords=onset_times)
plt.tight_layout()
return onsets, onset_times, Msync
def extract_onsets(y, sr, manual_opt):
method = params.onset
#compute the CQT transform C: np.array((252, Tmax*sr/STEP))
C = librosa.amplitude_to_db(librosa.core.magphase(librosa.cqt(y=y, sr=sr, bins_per_octave=BINS_PER_OCTAVE, n_bins=N_OCTAVES * BINS_PER_OCTAVE, hop_length = STEP))[0], ref=np.max)
#to reduce dimensionality, we'll onset-synchronous the CQT
#onset is a vector of onset indexes np.array((N+1,)) including 0
#onset_times is a vector of onset times np.array((N+1,)) including 0
#Csync is the CQT transform synchronized on onsets np.array((252, N))
if method == 'no':
onset, onset_times, Csync = no_onsets(sr, C)
elif method == 'onset':
onset, onset_times, Csync = detect_onsets(y, sr, C)
elif method == 'beat':
onset, onset_times, Csync = detect_beats(y, sr, C)
elif method == 'manual':
onset, onset_times, Csync = get_manual_beats(sr, C, manual_opt)
else:
print('onset parameter is not well-defined')
sys.exit()
return onset, onset_times, Csync
def build_weighted_rec_matrix(M):
# Let's build a weighted recurrence affinity matrix using onset-synchronous CQT
# the similarity matrix is filtered to prevent linkage errors and fill the gaps
# the filter corresponds to a width=3 time window and a majority vote.
R = librosa.segment.recurrence_matrix(M, width=3, mode='affinity',sym=True)
# Enhance diagonals with a median filter
df = librosa.segment.timelag_filter(scipy.ndimage.median_filter)
Rf = df(R, size=(1, 7))
return Rf
def build_seq_matrix(M, x):
#build the sequence matrix using feature-similarity
#Rpath[i, i+/-1] = \exp(- |M[i] - C[i+/-1]|^2 / sigma^2)`
#synchronize features with onsets
Msync = librosa.util.sync(M, x, aggregate=np.median)
#Msync = M #pas de syncrhonisation
#normalize (rescale) features between 0 and 1
Msync_normed = scale(Msync)
#constant scaling
path_distance = np.sum(np.diff(Msync_normed, axis=1)**2, axis=0)
#sigma is the median distance between successive beats/onsets.
sigma = np.median(path_distance)
path_sim = np.exp(-path_distance / sigma)
#local scaling from A Spectral Clustering Approach to Speaker Diarization, Huazhong Ning, Ming Liu, Hao Tang, Thomas Huang
R_path = np.diag(path_sim, k=1) + np.diag(path_sim, k=-1)
return R_path
def build_laplacian_and_evec(Rf, R_path, opt, onsets):
# And compute the balanced combination A of the two similarity matrices Rf and R_path
deg_path = np.sum(R_path, axis=1)
deg_rec = np.sum(Rf, axis=1)
mu = deg_path.dot(deg_path + deg_rec) / np.sum((deg_path + deg_rec)**2)
print('Optimal weight value (mu): {0:.2f}'.format(mu))
A = mu * Rf + (1 - mu) * R_path
# Plot the resulting graphs
if opt: plot_similarity(Rf, R_path, A, onsets)
# L: symetrized normalized Laplacian
L = scipy.sparse.csgraph.laplacian(A, normed=True)
# and its spectral decomposition (Find eigenvalues w and optionally eigenvectors v of matrix L)
evals, evecs = np.linalg.eigh(L)
print('L shape:', L.shape)
# We can clean this up further with a median filter.
# This can help smooth over small discontinuities
evecs = scipy.ndimage.median_filter(evecs, size=(9, 1))
# cumulative normalization is needed for symmetric normalize laplacian eigenvectors
Cnorm = np.cumsum(evecs**2, axis=1)**0.5
return Cnorm, evals, evecs
################################################
def compute_nb_clusters(method, evals, evecs, Tmax):
if method == 'fixed':
c = params.cluster_nb # list
elif method == 'max':
nc = []
for it in range(params.cluster_max):
nc.append(cluster_rotate.cluster_rotate(evecs/Cnorm, evals, range(1,10), 1, False))
c = [int(np.mean(nc))+1]
elif method == 'evals':
ind = np.where(1- evals > 0.75)[0]
#print(ind)
return [len(ind)+1 ]
elif method in ['silhouette', 'davies_bouldin', 'calinski_harabaz']:
list_k = range(2,50,2)
Cnorm = np.cumsum(e**2, axis=1)**0.5 #eigenvectors in input
for k in list_k:
print('nb of clusters:', k)
X = e[:, :k] / Cnorm[:, k-1:k]
# Let's use these k components to cluster beats into segments
# (Algorithm 1)
KM = sklearn.cluster.KMeans(n_clusters=k)
seg_ids = KM.fit_predict(X)
score = []
if method == 'silhouette':
score.append(sklearn.metrics.silhouette_score(X, seg_ids, metric='euclidean')) #max (proche de 1)
elif method == 'davies_bouldin':
score.append(davies_bouldin_score(X, seg_ids)) #min
elif method == 'calinski_harabaz':
score.append(sklearn.metrics.calinski_harabaz_score(X, seg_ids)) #max
if method == 'silhouette':
return list_k[np.argmax(score)]
elif method == 'davies_bouldin':
return list_k[np.argmin(score)]
elif method == 'calinski_harabaz':
return list_k[np.argmax(score)]
else:
print('method for finding the right number of clusters is unknown')
sys.exit()
print('nb of clusters:', c)
return c
def davies_bouldin_score(X, labels):
"""Computes the Davies-Bouldin score.
The score is defined as the ratio of within-cluster distances to
between-cluster distances.
Read more in the :ref:`User Guide <davies-bouldin_index>`.
Parameters
----------
X : array-like, shape (``n_samples``, ``n_features``)
List of ``n_features``-dimensional data points. Each row corresponds
to a single data point.
labels : array-like, shape (``n_samples``,)
Predicted labels for each sample.
Returns
-------
score: float
The resulting Davies-Bouldin score.
References
----------
.. [1] `Davies, David L.; Bouldin, Donald W. (1979).
"A Cluster Separation Measure". IEEE Transactions on
Pattern Analysis and Machine Intelligence. PAMI-1 (2): 224-227`_
"""
X, labels = sklearn.utils.check_X_y(X, labels)
le = sklearn.preprocessing.LabelEncoder()
labels = le.fit_transform(labels)
n_samples, _ = X.shape
n_labels = len(le.classes_)
if not 1 < n_labels < n_samples:
raise ValueError("Number of labels is %d. Valid values are 2 to n_samples - 1 (inclusive)" % n_labels)
intra_dists = np.zeros(n_labels)
centroids = np.zeros((n_labels, len(X[0])), dtype=np.float)
for k in range(n_labels):
cluster_k = sklearn.utils.safe_indexing(X, labels == k)
centroid = cluster_k.mean(axis=0)
centroids[k] = centroid
intra_dists[k] = np.average(sklearn.metrics.pairwise.pairwise_distances(cluster_k, [centroid]))
centroid_distances = sklearn.metrics.pairwise.pairwise_distances(centroids)
if np.allclose(intra_dists, 0) or np.allclose(centroid_distances, 0):
return 0.0
score = (intra_dists[:, None] + intra_dists) / centroid_distances
score[score == np.inf] = np.nan
return np.mean(np.nanmax(score, axis=1))
def plot_similarity(Rf, R_path, A, onset_times):
plt.figure(figsize=(12, 4))
plt.subplot(1, 3, 1)
librosa.display.specshow(Rf, cmap='inferno_r', y_axis='time', y_coords=onset_times)
plt.title('Long-range recurrence similarity (Rrec)')
plt.subplot(1, 3, 2)
librosa.display.specshow(R_path, cmap='inferno_r')
plt.title('Local path similarity (Rloc)')
plt.subplot(1, 3, 3)
librosa.display.specshow(A, cmap='inferno_r')
plt.title('Combined graph (A = m Rrec + (1-m) Rloc)')
plt.tight_layout()
def plot_structure(Rf, X, seg_ids, k, onset_times):
fig_s = plt.figure(figsize=(12, 4))
colors = plt.get_cmap('Paired', k)
ax_s1 = fig_s.add_subplot(1, 3, 2)
librosa.display.specshow(Rf, cmap='inferno_r')
ax_s1.set_title('Long-range recurrence similarity (Rrec)')
ax_s2 =fig_s.add_subplot(1, 3, 1)
librosa.display.specshow(X, y_axis='time', y_coords=onset_times)
ax_s2.set_title('Structure components (Eigen vectors)')
ax_s3 = fig_s.add_subplot(1, 3, 3)
librosa.display.specshow(np.atleast_2d(seg_ids).T, cmap=colors)
ax_s3.set_title('Estimated segments')
plt.colorbar(ticks=range(k))
plt.tight_layout()
#################################################
def compute_musical_density(C, onset_times, w, alpha):
N = C.shape[1]
density = []
for n in range(N):
t1 = np.min([onset_times[-1], onset_times[n] + w])
t2 = np.min([onset_times[-1] -w, onset_times[n]])
idw = np.where((onset_times < t1) & (onset_times >= t2))
#if n + w < :
threshold_chroma = np.max(C[:,idw])
#else:
#threshold_chroma = np.mean(C[:, N - w : N])
idx = np.where(C[:,n] > alpha * threshold_chroma)
density.append(len(idx[0]))
return density
def plot_features(X, onsets, onset_times):
Xsync = librosa.util.sync(X, onsets, aggregate=np.median)
#print(X.shape, Xsync.shape)
#print(onset_times)
if params.feat[0] == 'chroma':
fig_c = plt.figure(figsize=(12, 6))
ax0_c = fig_c.add_subplot(3,1,1)
ax0_c.set_title('onset-synchronous chroma (12)')
#ax0_c.pcolor(distance, cmap = 'plasma')
librosa.display.specshow(Xsync[:12,:], y_axis='chroma', x_axis='time', x_coords=onset_times, cmap = 'OrRd')
#plt.colorbar()
ax1_c = fig_c.add_subplot(3,1,2, sharex = ax0_c)
ax1_c.set_title('onset-synchronous delta chroma (12)')
librosa.display.specshow(np.abs(Xsync[12:,:]), y_axis='chroma', x_axis='time', x_coords=onset_times, cmap = 'OrRd')
#plt.colorbar()
density = compute_musical_density(Xsync[:12,:], onset_times, params.norm_density_win, params.alpha)
print(len(onset_times), len(density))
ax2_c = fig_c.add_subplot(3,1,3, sharex = ax0_c)
ax2_c.set_title('musical density')
ax2_c.plot(onset_times, density)
plt.tight_layout()
elif params.feat[0] == 'cepstral':
fig_s = plt.figure(figsize=(12, 6))
ax0_s = fig_s.add_subplot(3,1,1)
ax0_s.set_title('onset-synchronous MFCC (20)')
librosa.display.specshow(Xsync[:21,:], x_axis='time', x_coords=onset_times)
#plt.colorbar()
#plt.tight_layout()
ax1_s = fig_s.add_subplot(3,1,2, sharex = ax0_s)
ax1_s.set_title('onset-synchronous delta MFCC (20)')
librosa.display.specshow(np.abs(Xsync[20:,:]), x_axis='time', x_coords=onset_times)
#plt.colorbar()
density = compute_musical_density(Xsync[:21,:], onset_times, params.norm_density_win, params.alpha)
ax2_s = fig_s.add_subplot(3,1,2, sharex = ax0_s)
ax2_s.set_title('musical density')
ax2_s.plot(onset_times, density)
plt.tight_layout()
else:
print('these parameters can not be plot')
def load_wav_percu(filename, start, duration, opt_percussive_part):
y, sr = librosa.load(filename, offset=start, duration = duration)
if opt_percussive_part:
#separate harmonics and percussives into two wavforms
y_harmo, yo = librosa.effects.hpss(y)
librosa.output.write_wav(filename + '_harmo.wav', y_harmo, sr)
librosa.output.write_wav(filename + '_percu.wav', y_percu, sr)
return yo, sr
else:
return y, sr
################################################
def feature_extraction(y, sr, opt_tuning):
if opt_tuning:
#extraction of tuning
A440 = librosa.estimate_tuning(y=y, sr=sr, resolution=1e-3)
print('Deviation from A440 is : {0:.2f}'.format(A440))
else:
A440 = 0.0
print('Features for local similarity: ', ' '.join(params.feat))
full = []
idx_chroma = 0
if 'cepstral' in params.feat:
mfcc = librosa.feature.mfcc(y=y, sr=sr, n_mfcc = 20, n_fft = NFFT, hop_length = STEP)
mfcc_delta = librosa.feature.delta(mfcc)
fcep = np.concatenate((mfcc, mfcc_delta), axis=0)
full.append(fcep)
if 'chroma' in params.feat:
chroma = librosa.feature.chroma_cqt(y=y, sr=sr, n_chroma = 12, n_octaves = N_OCTAVES, hop_length = STEP, norm = None, tuning= A440)
chroma_delta = librosa.feature.delta(chroma)
fchr = np.concatenate((chroma, chroma_delta), axis=0)
idx_chroma = len(full)
full.append(fchr)
if 'spectral' in params.feat:
centroid = librosa.feature.spectral_centroid(y=y, sr=sr, n_fft = NFFT, hop_length = STEP)
contrast = librosa.feature.spectral_contrast(y=y, sr=sr, n_fft = NFFT, n_bands=6, hop_length = STEP)
flatness = librosa.feature.spectral_flatness(y=y, n_fft = NFFT, hop_length = STEP)
rolloff05 = librosa.feature.spectral_rolloff(y=y, sr= sr, n_fft = NFFT, hop_length = STEP, roll_percent= 0.05)
rolloff25 = librosa.feature.spectral_rolloff(y=y, sr= sr, n_fft = NFFT, hop_length = STEP, roll_percent= 0.25)
rolloff50 = librosa.feature.spectral_rolloff(y=y, sr= sr, n_fft = NFFT, hop_length = STEP, roll_percent= 0.50)
rolloff75 = librosa.feature.spectral_rolloff(y=y, sr= sr, n_fft = NFFT, hop_length = STEP, roll_percent= 0.75)
rolloff95 = librosa.feature.spectral_rolloff(y=y, sr= sr, n_fft = NFFT, hop_length = STEP, roll_percent= 0.95)
spec = np.concatenate((centroid, contrast, flatness, rolloff05,rolloff25,rolloff50,rolloff75,rolloff95), axis=0)
spec_delta = librosa.feature.delta(spec)
fspec = np.concatenate((spec, spec_delta), axis = 0)
full.append(fspec)
full = np.array(full)[0]
print('feature shape', full.shape)
return full, idx_chroma
def extract_time_boundaries(cluster_ids, onsets, nb_frames, sr):
# Locate segment boundaries from the label sequence
bound_beats = 1 + np.flatnonzero(cluster_ids[:-1] != cluster_ids[1:])
# Count beat 0 as a boundary
bound_beats = librosa.util.fix_frames(bound_beats, x_min=0)
# Compute the segment label for each boundary
bound_labels = list(cluster_ids[bound_beats])
# Convert beat indices to frames
bound_frames = onsets[bound_beats]
# Make sure we cover to the end of the track
bound_frames = librosa.util.fix_frames(bound_frames, x_min=None, x_max=nb_frames-1)
bound_times = librosa.frames_to_time(bound_frames, sr=sr, hop_length = STEP)
return bound_times, bound_labels
##################################
def extract_cosine_distance_clusters(center_clusters, distance_ref, type_dist = 'cos'):
distance = []
for center in center_clusters:
if type_dist == 'cos':
distance.append( scipy.spatial.distance.cosine( center, distance_ref) )
elif type_dist == 'eucl':
distance.append(np.sqrt( np.sum( (center - distance_ref)**2) ))
return distance
def extract_distance_between_clusters(center_clusters, type_dist = 'cos'):
distance = np.zeros((center_clusters.shape))
for i, center_i in enumerate(center_clusters):
for j, center_j in enumerate(center_clusters):
if type_dist == 'cos':
distance[i,j] = scipy.spatial.distance.cosine( center_i, center_j)
elif type_dist == 'eucl':
distance[i,j] = np.sqrt( np.sum( (center_i - center_j)**2) )
x = range(i+1)
y = range(j+1)
xloc = [c + 0.5 for c in x]
cx = [str(c) for c in x]
#print(cx)
fig_d, ax_d = plt.subplots(figsize=(5, 4))
p_d = ax_d.pcolor(distance, cmap = 'inferno_r')
cb = fig_d.colorbar(p_d)
ax_d.xaxis.set_ticks(xloc)
ax_d.xaxis.set_ticklabels(cx)
ax_d.yaxis.set_ticks(xloc)
ax_d.yaxis.set_ticklabels(cx)
ax_d.set_title('Distance between clusters')
ax_d.set_xlabel('clusters numbers')
plt.tight_layout()
return distance
def extract_ref_signal(X, onset_times):
ind = np.where((onset_times >= params.begin_ref) & (onset_times < params.end_ref))
return X[ind,:]
def main():
parser = argparse.ArgumentParser(description='Segmentation and clustering of musical sections with spectral clustering (Laplacian matrix and eigen values)')
parser.add_argument('filename', type=str, help='name of audio file')
parser.add_argument('manual_onset', nargs='?', type=str, help='name of the file containing manual annotations for onset timestamps (with method=manual)')
args = parser.parse_args()
#==================
# Signal processing
#==================
#extract waveform from audio signal of given duration and begining. If onset_percu is True, extract only percussive part of the signal.
y, sr = load_wav_percu(args.filename, params.begin, params.duration, params.onset_percu)
print('signal shape:', y.shape, ' sr=', sr, 'win duration=%.2f' %(NFFT / sr))
#extract acoustic feature from audio signal feat is a matrix np.array((nb features, Tmax*sr/STEP))
feat, idx_chroma = feature_extraction(y, sr, params.opt_tuning)
#extract onset indexes and times + onset-synchronous CQT transform on onsets.
onsets, onset_times, Csync = extract_onsets(y, sr, args.manual_onset)
#if 'chroma' in params.feat:
# compute_musical_density(Csync, onset_times, idx_chroma, params.norm_density_win, params.alpha, sr)
if params.plot_features: plot_features(feat, onsets, onset_times)
#================
# Affinity matrix
#================
#compute a non-negative affinity matrix using onset-synchronous CQT (with Gaussian kernel)
#represent local consistency of timbral (CQT) features
Rf = build_weighted_rec_matrix(Csync)
#compute a non-negative affinity matrix using onset-synchronous feature matrix (with Gaussian kernel)
#represent long-range repeating forms of harmonic features
R_path = build_seq_matrix(feat, onsets)
#compute Laplacian (sequence augmented affinity matrix) as a linear combination of Rf and Rpath and extract eigenvalues and vectors.
Cnorm, evals, evecs = build_laplacian_and_evec(Rf, R_path, params.plot_simi, onset_times)
#===========
# Clustering
#===========
#determine number of clusters kl is a list of potential numbers of cluster.
kl = compute_nb_clusters(params.cluster_method, evals, evecs, y.shape[0]*sr)
N_CLUST = len(kl)
#=================
# Start plotting
#=================
import matplotlib.patches as patches
fig_f = plt.figure(figsize = (12, 3+2*N_CLUST))
#fig.subplots_adjust(hspace=.5)
#plot onset-synchronous CQT
hr = [1] * (N_CLUST +1)
hr[0] = 2
gs = gridspec.GridSpec(1 + N_CLUST,1, height_ratios=hr)
ax_f0 = fig_f.add_subplot(gs[0])
librosa.display.specshow(Csync, y_axis='cqt_hz', sr=sr, hop_length = STEP, bins_per_octave=BINS_PER_OCTAVE, x_axis='time', x_coords=onset_times)
#librosa.display.specshow(feat, y_axis='chroma', x_axis='time') #ou
ax_f0.set_title('CQT spectrogram synchronized {0}'.format(params.onset))
for it, k in enumerate(kl):
#limit the number of clusters per second
if k > params.cluster_nb_max*sr*y.shape[0]:
k = params.cluster_nb_max*sr*y.shape[0]
print('nb of clusters: {} for it {}/{}'.format(k, it, N_CLUST))
#for k clusters, use the first k normalized eigenvectors.
#X can be interpretable as an onset-synchronous matrix containing relevant feature information for local and log-range structure segmentation
X = evecs[:, :k] / Cnorm[:, k-1:k]
#onsets are grouped into k clusters, each cluster having its own acoustic characteristics
KM = sklearn.cluster.KMeans(n_clusters=k)
#seg_ids is a np.array((label)) label being a number corresponding to one cluster seg_ids[i] is the label of onset i
seg_ids = KM.fit_predict(X)
#if needed compute the cosine distance between each cluster and a reference taken at the very begining of th signal
#KM.cluster_centers_ : array, [n_clusters, n_features]
if params.cluster_dist:
ref_signal = extract_ref_signal(X, onset_times)
distance_cosine_cluster = extract_cosine_distance_clusters( KM.cluster_centers_, np.mean(X[:10*NFFT,:], axis=0))
else:
distance_cosine_cluster = None
if params.plot_dist:
distance_between_clusters = extract_distance_between_clusters( KM.cluster_centers_ )
# and plot the resulting structure representation
if params.plot_struct: plot_structure(Rf, X, seg_ids, k, onset_times)
bound_times, bound_labels = extract_time_boundaries(seg_ids, onsets, feat.shape[1], sr)
freqs = librosa.cqt_frequencies(n_bins=Csync.shape[0], fmin=librosa.note_to_hz('C1'), bins_per_octave=BINS_PER_OCTAVE)
timestamps_name = os.path.splitext(args.filename)[0] + '_timestamps.txt'
#=============
# Plot results
#=============
cmap = plt.get_cmap('Paired', k)
#write header of text file with parameters.
if params.timestamps:
f = open(timestamps_name, 'a')
f.write('WIN = {0:.2f} sec, NFFT = {1}, STEP = {2}, begin = {3}, duration = {4}\n'.format(NFFT / sr, NFFT, STEP, params.begin, params.duration))
f.write('Nb of clusters: {0} obtained with method {1} and features {2}\n'.format(k, params.cluster_method, '-'.join(params.feat)))
#plot onset-synchronous CQT
#if it == 0:
#plot segmentation and clusters grouping (+ cosine distance.)
#also write obtained boundaries in the text file.
ax_f1 = fig_f.add_subplot(gs[it + 1], sharex = ax_f0)
for interval, label in zip(zip(bound_times, bound_times[1:]), bound_labels):
if params.timestamps: f.write('{0:.2f} \t {1:.2f} \t {2} \n'.format(interval[0], interval[1], label))
if params.cluster_dist: ax_f1.plot([interval[0], interval[1]],[distance_cosine_cluster[label], distance_cosine_cluster[label]], 'k')
ax_f1.add_patch(patches.Rectangle((interval[0], 0), interval[1] - interval[0], 1, facecolor=cmap(label), alpha=1))
ax_f1.text(interval[0]+(interval[1]-interval[0])/2, 0.9, label, fontsize=8)
if params.timestamps: f.close()
#plt.subplots_adjust(hspace=.0)
plt.tight_layout()
plt.show()
if __name__ == '__main__':
main()
title = 'Palestrina'
# Palestrina, AccordsMajeurs, AccordsMineur, Majeur3et4notes, Majeur3et4notes, Accords3Notes, DispoMajeurMineur, Tension
# Cadence3V, Cadence4VMaj, Cadence4Vmin,
audio = load('/Users/manuel/Dropbox (TMG)/Thèse/code/DescripteursHarmoniquesAudio/'+title+'.wav')
main(audio)
| 35.912351 | 179 | 0.717329 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9,698 | 0.358614 |
ffac5d6d4f8a223b64490743a33929b7ee781bea | 8,851 | py | Python | src/distributed_cache.py | kontexia/soam | 5c2dc97420804ae601780e6aa0d827bafadd64b7 | [
"BSD-3-Clause"
] | 1 | 2020-11-15T18:35:48.000Z | 2020-11-15T18:35:48.000Z | src/distributed_cache.py | kontexia/soam | 5c2dc97420804ae601780e6aa0d827bafadd64b7 | [
"BSD-3-Clause"
] | null | null | null | src/distributed_cache.py | kontexia/soam | 5c2dc97420804ae601780e6aa0d827bafadd64b7 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from typing import Optional, Dict, Union, List
from dask.distributed import Client, Variable, Lock, ActorFuture, TimeoutError
from src.kv_cache import KVGraphCache, KVCacheValueType
class DistributedCache:
def __init__(self, config: Optional[Dict[str, Union[str, bool]]], dask_client=None):
"""
Class that implements a simple distributed key value cache that takes care of persisting and restoring to database
:param config: keys required:\n
db_username: the username to login to db with
db_system: the db type - arango-db
db_name: the name of the database
db_config_file_path: the path with the database configuration
db_queries_file_path: the path with the query definitions
named_query_collection - postfix for a store's named query collection
scheduler_address - the address of the Dask Cluster Scheduler
"""
self.config = config
if dask_client is None:
self.client = Client(config['scheduler_address'])
else:
self.client = dask_client
@staticmethod
def lock_store(store_name: str) -> Lock:
"""
method to create a distributed lock on a complete store
:param store_name: the store to lock
:return: lock object to use within a with statement
"""
distributed_store_name = 'KVCache:{}'.format(store_name)
return Lock(distributed_store_name)
@staticmethod
def lock_key(store_name: str, key: str) -> Lock:
"""
method to create a distributed lock on a key within a store
:param store_name: the store to lock
:param key: the key in the store to lock
:return: lock object to use within a with statement
"""
distributed_store_name_key = 'KVCache:{}:{}'.format(store_name, key)
return Lock(distributed_store_name_key)
def create_distributed_store(self, store_name: str, key: Optional[Union[List[str], str]] = None, restore: bool = True):
"""
method to initialise a store and restore
:param store_name: the store_name
:param key: a key or list of keys to restore
:param restore: If true then restore from database
:return: True if restored else false
"""
store_lock = self.lock_store(store_name=store_name)
store_lock.acquire()
# create a distributed variable with the kv_cache object
#
distributed_store_name = 'KVCache_{}'.format(store_name)
# flag indicating if restored from DB
#
result = False
try:
# try to get the cache
#
Variable(distributed_store_name).get(timeout=0.1)
except TimeoutError:
# the distributed variable does not exist so create it
#
cache_future = self.client.submit(KVGraphCache, self.config, actor=True)
# create the distributed variable and store the new cache actor
#
Variable(distributed_store_name).set(cache_future)
# only restore if not in cache and asked to
#
if restore:
cache = cache_future.result()
cache.restore(store_name=store_name, key=key, include_history=False)
result = True
store_lock.release()
return result
def set_kv(self, store_name: str, key: str, value: KVCacheValueType, set_update_flag: bool = True, persist: bool = True, keep_cache: bool = True, lock_cache: bool = True) -> ActorFuture:
"""
method to add a key value pair to a store. If key already exists then will overwrite
:param store_name: the store name
:param key: the unique key within the store
:param value: the data to save
:param set_update_flag: If true then the update flag will be set
:param persist: If true the changes will be persisted in database
:param lock_cache: If True the cache will be locked before accessing. If False it assumes a lock has already been acquired
:param keep_cache: If False then cache will be deleted after being persisted
:return: ActorFuture with True if success else False
"""
distributed_store_name = 'KVCache_{}'.format(store_name)
try:
dist_cache_var = Variable(distributed_store_name).get(timeout=0.1)
cache = dist_cache_var.result()
if lock_cache:
cache_lock = self.lock_key(store_name=store_name, key=key)
cache_lock.acquire()
else:
cache_lock = None
result = cache.set_kv(store_name=store_name, key=key, value=value, set_update_flag=set_update_flag)
if persist:
result = cache.persist(store_name=store_name, keep_cache=keep_cache)
if cache_lock is not None:
cache_lock.release()
except TimeoutError:
result = None
return result
def del_kv(self, store_name: str, key: Optional[str] = None, persist=True, lock_cache: bool = True) -> ActorFuture:
"""
method to delete a key within a store or the whole store. If the store/key has been persisted then the delete will be propagated when persist() is called
:param store_name: the store name
:param key: the key to delete
:param persist: If true the changes will be persisted in database
:param lock_cache: If True the cache will be locked before accessing. If False it assumes a lock has already been acquired
:return: ActorFuture with True if success else False
"""
distributed_store_name = 'KVCache_{}'.format(store_name)
try:
dist_cache_var = Variable(distributed_store_name).get(timeout=0.1)
cache = dist_cache_var.result()
if lock_cache:
cache_lock = self.lock_key(store_name=store_name, key=key)
cache_lock.acquire()
else:
cache_lock = None
result = cache.del_kv(store_name=store_name, key=key, delete_from_db=persist)
if persist:
result = cache.persist(store_name=store_name)
if cache_lock is not None:
cache_lock.release()
except TimeoutError:
result = None
return result
def get_kv(self, store_name: str, key: Optional[str] = None, lock_cache: bool = True) -> ActorFuture:
"""
method to retrieve a stored value
:param store_name: the store name
:param key: the key within the store
:param lock_cache: If True the cache will be locked before accessing. If False it assumes a lock has already been acquired
:return: ActorFuture with either KVCacheValueType or None if it doesnt exist
"""
distributed_store_name = 'KVCache_{}'.format(store_name)
try:
dist_cache_var = Variable(distributed_store_name).get(timeout=0.1)
cache = dist_cache_var.result()
if lock_cache:
cache_lock = self.lock_key(store_name=store_name, key=key)
cache_lock.acquire()
else:
cache_lock = None
result = cache.get_kv(store_name=store_name, key=key)
if cache_lock is not None:
cache_lock.release()
except TimeoutError:
result = None
return result
def restore(self, store_name, include_history: bool = False, lock_cache: bool = True) -> bool:
"""
method to restore a store_name and optionally specific key
:param store_name: the store_name to restore
:param include_history: If True restore all history
:param lock_cache: If True the cache will be locked before accessing. If False it assumes a lock has already been acquired
:return: ActorFuture with True if restored else False
"""
distributed_store_name = 'KVCache_{}'.format(store_name)
try:
dist_cache_var = Variable(distributed_store_name).get(timeout=0.1)
cache = dist_cache_var.result()
if lock_cache:
cache_lock = self.lock_store(store_name=store_name)
cache_lock.acquire()
else:
cache_lock = None
result = cache.restore(store_name=store_name, include_history=include_history)
if cache_lock is not None:
cache_lock.release()
except TimeoutError:
result = None
return result
| 38.482609 | 190 | 0.619817 | 8,617 | 0.973562 | 0 | 0 | 797 | 0.090046 | 0 | 0 | 3,825 | 0.432155 |
ffae59649ff2b990843a197023e765523f170d67 | 7,220 | py | Python | snaps/openstack/tests/create_user_tests.py | hashnfv/hashnfv-snaps | 0dfca494ef7c2778babfac48d9b701953860b54f | [
"Apache-2.0"
] | null | null | null | snaps/openstack/tests/create_user_tests.py | hashnfv/hashnfv-snaps | 0dfca494ef7c2778babfac48d9b701953860b54f | [
"Apache-2.0"
] | null | null | null | snaps/openstack/tests/create_user_tests.py | hashnfv/hashnfv-snaps | 0dfca494ef7c2778babfac48d9b701953860b54f | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2017 Cable Television Laboratories, Inc. ("CableLabs")
# and others. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import uuid
from snaps.openstack.create_user import OpenStackUser, UserSettings
from snaps.openstack.tests.os_source_file_test import OSComponentTestCase
from snaps.openstack.utils import keystone_utils
__author__ = 'spisarski'
class UserSettingsUnitTests(unittest.TestCase):
"""
Tests the construction of the UserSettings class
"""
def test_no_params(self):
with self.assertRaises(Exception):
UserSettings()
def test_empty_config(self):
with self.assertRaises(Exception):
UserSettings(**dict())
def test_name_only(self):
with self.assertRaises(Exception):
UserSettings(name='foo')
def test_config_with_name_only(self):
with self.assertRaises(Exception):
UserSettings(**{'name': 'foo'})
def test_name_pass_enabled_str(self):
with self.assertRaises(Exception):
UserSettings(name='foo', password='bar', enabled='true')
def test_config_with_name_pass_enabled_str(self):
with self.assertRaises(Exception):
UserSettings(
**{'name': 'foo', 'password': 'bar', 'enabled': 'true'})
def test_name_pass_only(self):
settings = UserSettings(name='foo', password='bar')
self.assertEqual('foo', settings.name)
self.assertEqual('bar', settings.password)
self.assertIsNone(settings.project_name)
self.assertIsNone(settings.email)
self.assertTrue(settings.enabled)
def test_config_with_name_pass_only(self):
settings = UserSettings(**{'name': 'foo', 'password': 'bar'})
self.assertEqual('foo', settings.name)
self.assertEqual('bar', settings.password)
self.assertIsNone(settings.project_name)
self.assertIsNone(settings.email)
self.assertTrue(settings.enabled)
def test_all(self):
settings = UserSettings(name='foo', password='bar',
project_name='proj-foo', email='foo@bar.com',
enabled=False)
self.assertEqual('foo', settings.name)
self.assertEqual('bar', settings.password)
self.assertEqual('proj-foo', settings.project_name)
self.assertEqual('foo@bar.com', settings.email)
self.assertFalse(settings.enabled)
def test_config_all(self):
settings = UserSettings(**{'name': 'foo', 'password': 'bar',
'project_name': 'proj-foo',
'email': 'foo@bar.com',
'enabled': False})
self.assertEqual('foo', settings.name)
self.assertEqual('bar', settings.password)
self.assertEqual('proj-foo', settings.project_name)
self.assertEqual('foo@bar.com', settings.email)
self.assertFalse(settings.enabled)
class CreateUserSuccessTests(OSComponentTestCase):
"""
Test for the CreateImage class defined in create_image.py
"""
def setUp(self):
"""
Instantiates the CreateImage object that is responsible for downloading
and creating an OS image file within OpenStack
"""
guid = str(uuid.uuid4())[:-19]
guid = self.__class__.__name__ + '-' + guid
self.user_settings = UserSettings(
name=guid + '-name',
password=guid + '-password',
roles={'admin': self.os_creds.project_name},
domain_name=self.os_creds.user_domain_name)
self.keystone = keystone_utils.keystone_client(self.os_creds)
# Initialize for cleanup
self.user_creator = None
def tearDown(self):
"""
Cleans the image and downloaded image file
"""
if self.user_creator:
self.user_creator.clean()
def test_create_user(self):
"""
Tests the creation of an OpenStack user.
"""
self.user_creator = OpenStackUser(self.os_creds, self.user_settings)
created_user = self.user_creator.create()
self.assertIsNotNone(created_user)
retrieved_user = keystone_utils.get_user(self.keystone,
self.user_settings.name)
self.assertIsNotNone(retrieved_user)
self.assertEqual(created_user, retrieved_user)
def test_create_user_2x(self):
"""
Tests the creation of an OpenStack user twice to ensure it only creates
one.
"""
self.user_creator = OpenStackUser(self.os_creds, self.user_settings)
created_user = self.user_creator.create()
self.assertIsNotNone(created_user)
retrieved_user = keystone_utils.get_user(self.keystone,
self.user_settings.name)
self.assertIsNotNone(retrieved_user)
self.assertEqual(created_user, retrieved_user)
# Create user for the second time to ensure it is the same
user2 = OpenStackUser(self.os_creds, self.user_settings).create()
self.assertEqual(retrieved_user, user2)
def test_create_delete_user(self):
"""
Tests the creation of an OpenStack user then delete.
"""
# Create Image
self.user_creator = OpenStackUser(self.os_creds, self.user_settings)
created_user = self.user_creator.create()
self.assertIsNotNone(created_user)
keystone_utils.delete_user(self.keystone, created_user)
# Delete user
self.user_creator.clean()
self.assertIsNone(self.user_creator.get_user())
def test_create_admin_user(self):
"""
Tests the creation of an OpenStack user.
"""
self.user_creator = OpenStackUser(self.os_creds, self.user_settings)
created_user = self.user_creator.create()
self.assertIsNotNone(created_user)
retrieved_user = keystone_utils.get_user(self.keystone,
self.user_settings.name)
self.assertIsNotNone(retrieved_user)
self.assertEqual(created_user, retrieved_user)
role = keystone_utils.get_role_by_name(self.keystone, 'admin')
self.assertIsNotNone(role)
os_proj = keystone_utils.get_project(
keystone=self.keystone, project_name=self.os_creds.project_name)
user_roles = keystone_utils.get_roles_by_user(
self.keystone, retrieved_user, os_proj)
self.assertIsNotNone(user_roles)
self.assertEqual(1, len(user_roles))
self.assertEqual(role.id, user_roles[0].id)
| 37.604167 | 79 | 0.645429 | 6,295 | 0.871884 | 0 | 0 | 0 | 0 | 0 | 0 | 1,786 | 0.247368 |
ffaec0f610e83eef313804d44ec449835d851f4a | 1,443 | py | Python | eveonline-assistant/users/menus.py | wengole/eveonline-assistant | 35041952509bd347c5c9458630404726d7ddd5d8 | [
"BSD-3-Clause"
] | 1 | 2016-07-01T03:15:16.000Z | 2016-07-01T03:15:16.000Z | eveonline-assistant/users/menus.py | wengole/eveonline-assistant | 35041952509bd347c5c9458630404726d7ddd5d8 | [
"BSD-3-Clause"
] | null | null | null | eveonline-assistant/users/menus.py | wengole/eveonline-assistant | 35041952509bd347c5c9458630404726d7ddd5d8 | [
"BSD-3-Clause"
] | null | null | null | from django.core.urlresolvers import reverse
from menu import Menu, MenuItem
def profile_title(request):
"""
Return a personalized title for our profile menu item
"""
# we don't need to check if the user is authenticated because our menu
# item will have a check that does that for us
name = request.user.get_full_name() or request.user
return '%s' % name
def profile_menu(request):
menu = []
menu.append(
MenuItem(
title=profile_title,
url='#',
classes='dropdown-header'
)
)
menu.append(
MenuItem(
title='Update My Info',
url=reverse('users:update'),
)
)
menu.append(
MenuItem(
title='Manage E-Mail Addresses',
url=reverse('account_email')
)
)
return menu
Menu.add_item(
'top_nav_right',
MenuItem(
title=profile_title,
url='#',
children=profile_menu,
classes='dropdown',
check=lambda request: request.user.is_authenticated()
)
)
Menu.add_item(
'top_nav_right',
MenuItem(
title='Logout',
url=reverse('account_logout'),
check=lambda request: request.user.is_authenticated()
)
)
Menu.add_item(
'top_nav_right',
MenuItem(
title='Sign Up',
url=reverse('account_signup'),
check=lambda request: request.user.is_anonymous()
)
) | 22.2 | 74 | 0.588358 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 386 | 0.267498 |
ffb0437102c25d65c5fe21edead0730f028adb91 | 5,367 | py | Python | thetagang_notifications/earnings.py | major/thetagang-notifications | 30800e6edd68c00eb9d9a523b126bad115c445ea | [
"Apache-2.0"
] | null | null | null | thetagang_notifications/earnings.py | major/thetagang-notifications | 30800e6edd68c00eb9d9a523b126bad115c445ea | [
"Apache-2.0"
] | 6 | 2022-02-01T22:00:41.000Z | 2022-03-14T12:19:30.000Z | thetagang_notifications/earnings.py | major/thetagang-notifications | 30800e6edd68c00eb9d9a523b126bad115c445ea | [
"Apache-2.0"
] | null | null | null | """Handle earnings updates."""
from functools import cached_property
import logging
import re
from discord_webhook import DiscordWebhook, DiscordEmbed
import tweepy
from thetagang_notifications import config, utils
log = logging.getLogger(__name__)
# Earnings notification colors.
EARNINGS_COLOR_BEAT = "20d420"
EARNINGS_COLOR_MISSED = "d42020"
EARNINGS_COLOR_NO_CONSENSUS = "000000"
# Wide, transparent PNG that helps to keep the earnings embeds the same width.
TRANSPARENT_PNG = "https://cdn.major.io/wide-empty-transparent.png"
class EarningsReport:
"""Class for handling new earnings reports."""
def __init__(self, tweet):
"""Initialize the basics of the class."""
self.tweet = tweet
@property
def consensus(self):
"""Find the consensus rating in the tweet (if present)."""
regex = r"consensus was (\(?\$[0-9\.]+\)?)"
result = re.findall(regex, self.tweet)
# Some earnings reports, especially for smaller stocks, don't have an analyst
# consensus number.
if not result:
return None
# Parse the consensus and handle negative numbers.
raw_consensus = result[0]
if "(" in raw_consensus:
# We have an expected loss.
consensus = float(re.findall(r"[0-9\.]+", raw_consensus)[0]) * -1
else:
# We have an expected gain.
consensus = float(re.findall(r"[0-9\.]+", raw_consensus)[0])
return consensus
@property
def discord_color(self):
"""Choose a color based on the consensus and earnings relationship."""
if not self.consensus:
return EARNINGS_COLOR_NO_CONSENSUS
elif self.earnings < self.consensus:
return EARNINGS_COLOR_MISSED
else:
return EARNINGS_COLOR_BEAT
@property
def discord_description(self):
"""Generate a description for the Discord notification."""
description = [f"**Earnings:** {self.earnings} ({self.consensus})"]
if self.finviz:
description = [
f"**Sector:** {self.finviz['Sector']} - {self.finviz['Industry']}"
] + description
return "\n".join(description)
@property
def earnings(self):
"""Find the earnings number in the tweet."""
# Look for positive earnings by default. 🎉
regex = r"reported (?:earnings of )?\$([0-9\.]+)"
multiplier = 1
# Sometimes there's a loss and we make the number negative. 😞
if "reported a loss of" in self.tweet:
regex = r"reported a loss of \$([0-9\.]+)"
multiplier = -1
# Search for earnings results and exit early if they are missing.
result = re.findall(regex, self.tweet)
if not result:
return None
return float(result[0]) * multiplier
@cached_property
def finviz(self):
"""Get data from finviz.com about our trending symbol."""
return utils.get_finviz_stock(self.ticker)
@property
def logo(self):
"""Get the stock logo."""
return utils.get_stock_logo(self.ticker)
@property
def discord_title(self):
"""Generate a title for the notification."""
if self.finviz:
return f"{self.ticker}: {self.finviz['Company']}"
return self.ticker
def notify(self):
"""Send notification to Discord."""
# Exit early if we couldn't find a ticker in the tweet.
if not self.ticker:
return None
webhook = DiscordWebhook(
url=config.WEBHOOK_URL_EARNINGS,
rate_limit_retry=True,
username=config.DISCORD_USERNAME,
)
webhook.add_embed(self.prepare_embed())
webhook.execute()
return webhook
def prepare_embed(self):
"""Prepare the webhook embed data."""
embed = DiscordEmbed(
title=self.discord_title,
color=self.discord_color,
description=self.discord_description,
)
embed.set_thumbnail(url=self.logo)
# Temporarily disable the transparent image to make the embed less tall.
# embed.set_image(url=TRANSPARENT_PNG)
return embed
@property
def ticker(self):
"""Find the ticker in the earnings tweet."""
result = re.findall(r"^\$([A-Z]+)", self.tweet)
return result[0] if result else None
# Create a class to handle stream events.
class EarningsStream(tweepy.Stream): # pragma: no cover
"""Extending the tweepy.Stream class to do earnings things."""
def on_connect(self):
log.debug("Earnings: Tweepy stream connected")
return super().on_connect()
def on_status(self, status):
"""Parse tweets and send notifications."""
if status.user.id == 55395551:
log.info("Earnings: %s", status.text)
ern = EarningsReport(status.text)
ern.notify()
def main(): # pragma: no cover
"""Run the earnings notifications."""
stream = EarningsStream(
config.TWITTER_CONSUMER_KEY,
config.TWITTER_CONSUMER_SECRET,
config.TWITTER_ACCESS_TOKEN,
config.TWITTER_ACCESS_TOKEN_SECRET,
)
stream.filter(follow=["55395551"], threaded=True)
print("Streaming!")
if __name__ == "__main__": # pragma: no cover
main()
| 30.668571 | 85 | 0.619899 | 4,381 | 0.815373 | 0 | 0 | 2,781 | 0.517588 | 0 | 0 | 1,959 | 0.364601 |
ffb099ff1d0c02aa50b56099d003bd1e52a1170d | 8,795 | py | Python | client/Client.py | DSteam1/Sudoku | 4b16d1606712eac76898cd397ee2c3d9fdb4efcf | [
"MIT"
] | null | null | null | client/Client.py | DSteam1/Sudoku | 4b16d1606712eac76898cd397ee2c3d9fdb4efcf | [
"MIT"
] | 2 | 2017-11-03T15:34:27.000Z | 2017-11-14T20:53:41.000Z | client/Client.py | DSteam1/Sudoku | 4b16d1606712eac76898cd397ee2c3d9fdb4efcf | [
"MIT"
] | null | null | null | from mtTkinter import *
import tkMessageBox
from socket import AF_INET, SOCK_STREAM, socket, error
import OtherViews as OV
import MainView as MV
import GameView as GV
from threading import Thread
from common.utils import init_logging
from common.constants import *
import common.protocol as protocol
LOG = init_logging()
_WIDTH = 666
_HEIGHT = 300
_GAME_HEIGHT = 600
_GAME_WIDTH = 800
class Application():
def __init__(self):
self.root = Tk()
self.root.minsize(width=_WIDTH, height=_HEIGHT)
self.root.title("Sudoku")
self.frame_container = Frame(self.root)
self.frame_container.place(relx=0.5, rely=0.5, anchor=CENTER)
self.existing_main_view = None
self.existing_game_view = None
self.game_started = False
self.game_open = False
self.nickname_view() # Show nickname initially
self.root.mainloop()
# After exiting from main loop
self.exit_game()
self.disconnect()
# Connect to server
def connect(self):
success = self.tryCreateConnection()
if success:
self.listener = ClientListener(self.socket, self)
self.send_nickname()
self.get_games()
else:
tkMessageBox.showinfo("Error", "Error connecting to server")
self.count += 1
def disconnect(self):
try:
self.socket.fileno()
except:
return
LOG.info("Disconnected from server.")
self.socket.close()
def tryCreateConnection(self):
LOG.info("Connecting to %s." % self.server)
port = 7777
try:
self.socket = socket(AF_INET, SOCK_STREAM)
LOG.info("TCP socket created")
server_address = (self.server, port)
self.socket.connect(server_address)
LOG.info('Socket connected to %s:%d' % self.socket.getpeername())
LOG.info('Local end-point is bound to %s:%d' % self.socket.getsockname())
return True
except:
self.socket.close()
self.master.destroy()
return False
def send_nickname(self):
LOG.info("Sending nickname to server")
protocol.send(self.socket, NICKNAME_MSG, self.nickname)
LOG.info("Client is not expecting response for nickname message")
def get_games(self):
LOG.info("Requesting available games from server")
protocol.send(self.socket, REQ_GAMES_MSG, "")
LOG.info("Waiting response for games request")
def create_game(self, players):
LOG.info("Requesting new game creation")
protocol.send(self.socket, CREATE_GAME_MSG, players)
self.game_started = False
LOG.info("Waiting response for new game creation")
def join_game(self, id):
LOG.info("Requesting joining a game")
protocol.send(self.socket, JOIN_GAME_MSG, str(id))
LOG.info("Waiting response for join game")
def insert_number(self, row, column, digit):
LOG.info("Requesting number insertion")
msg = protocol.assemble_insert_msg_content(row, column, digit)
protocol.send(self.socket, INSERT_MSG, msg)
LOG.info("Waiting response for number insertion")
# VIEWS
def nickname_view(self):
self.window_resize(_WIDTH, _HEIGHT)
self.empty_frame(self.frame_container)
OV.NicknameView(self.frame_container, self)
def server_address_view(self):
self.window_resize(_WIDTH, _HEIGHT)
self.empty_frame(self.frame_container)
OV.ServerAddressView(self.frame_container, self)
def main_view(self, games):
self.selected_game = None
self.window_resize(_WIDTH, _HEIGHT)
self.empty_frame(self.frame_container)
self.existing_main_view = MV.MainView(self.frame_container, self, games)
def update_main_view(self, games):
if self.existing_main_view is None:
self.main_view(games)
else:
self.existing_main_view.games = games
self.existing_main_view.fill_games()
def game_join_fault(self):
if self.existing_main_view is not None:
self.existing_main_view.display_join_fault()
def game_full_fault(self):
if self.existing_main_view is not None:
self.existing_main_view.display_game_full()
def game_view(self, digitsTypes = "", scores = ""):
self.window_resize(_GAME_WIDTH, _GAME_HEIGHT)
self.empty_frame(self.frame_container)
self.existing_game_view = GV.GameView(self.frame_container, self, digitsTypes, scores, self.game_started)
self.existing_main_view = None
def update_game_view(self, digitsTypes, scores):
if self.existing_game_view is None:
self.game_view(digitsTypes, scores)
else:
if digitsTypes != "" :
self.existing_game_view.update_board(digitsTypes)
if scores != "":
self.existing_game_view.fill_players(scores)
def start_game(self):
self.game_started = True
if self.existing_game_view is not None:
self.existing_game_view.hide_waiting_txt()
def exit_game(self):
LOG.info("Sending exit game message")
protocol.send(self.socket, EXIT_GAME_MSG, "")
LOG.info("Client is not expecting response for exit game message")
self.game_open = False
self.existing_game_view = None
self.get_games()
def show_end(self, content):
self.existing_game_view.show_end(content)
def empty_frame(self, frame):
for widget in frame.winfo_children():
widget.destroy()
def window_resize(self, width, height):
self.root.minsize(width=width, height=height)
class ClientListener(Thread):
def __init__(self, socket, app):
Thread.__init__(self)
self.socket = socket
self.app = app
self.daemon = True
self.start()
def run(self):
try:
while True:
msg = protocol.receive(self.socket)
if msg:
self.parse_and_handle_message(msg)
else:
self.shut_down()
break
except error:
return
def parse_and_handle_message(self, msg):
message_type, content = protocol.parse(msg)
LOG.info("Received response with type " + message_type)
if message_type == GAME_LIST_MSG:
games = content.split(CONTENT_SEPARATOR)
self.app.update_main_view(games)
LOG.info("Handled response with type " + message_type)
elif message_type == SUCCESSFUL_JOIN_MSG:
self.app.game_started = False
LOG.info("Handled response with type " + message_type)
elif message_type == START_GAME_MSG:
self.app.start_game()
LOG.info("Handled response with type " + message_type)
elif message_type == BOARD_STATE_MSG:
if self.app.game_open:
digits, types = protocol.separate_board_state_msg_content(content)
self.app.update_game_view((digits, types), "")
LOG.info("Handled response with type " + message_type)
elif message_type == SEND_SCORES_MSG:
if self.app.game_open:
scores = protocol.parse_score_message(content)
self.app.update_game_view("", scores)
LOG.info("Handled response with type " + message_type)
elif message_type == SUCCESSFUL_INS_MSG:
LOG.info("Handled response with type " + message_type + ": " + content)
elif message_type == FAILED_INS_MSG:
LOG.info("Handled response with type " + message_type + ": " + content)
elif message_type == GAME_OVER_VICTORY_MSG:
if self.app.game_open:
self.app.show_end(content)
LOG.info("Handled response with type " + message_type + ": " + content)
elif message_type == GAME_OVER_LOSS_MSG:
if self.app.game_open:
self.app.show_end(content)
LOG.info("Handled response with type " + message_type + ": " + content)
elif message_type == FAILED_JOIN_MSG:
self.app.game_join_fault()
LOG.info("Handled response with type " + message_type + ": " + content)
elif message_type == GAME_FULL_MSG:
self.app.game_full_fault()
LOG.info("Handled response with type " + message_type + ": " + content)
else:
LOG.info("Unknown message with type " + message_type)
pass
def shut_down(self):
self.app.disconnect()
if __name__ == '__main__':
Application()
| 34.490196 | 113 | 0.624787 | 8,350 | 0.949403 | 0 | 0 | 0 | 0 | 0 | 0 | 1,119 | 0.127231 |
ffb0b6ed4e63e9dd129dd43fed181495e316a61a | 856 | py | Python | idact/detail/deployment_sync/nodes/get_nodes_deployment_definition.py | intdata-bsc/idact | 54cb65a711c145351e205970c27c83e6393cccf5 | [
"MIT"
] | 5 | 2018-12-06T15:40:34.000Z | 2019-06-19T11:22:58.000Z | idact/detail/deployment_sync/nodes/get_nodes_deployment_definition.py | garstka/idact | b9c8405c94db362c4a51d6bfdf418b14f06f0da1 | [
"MIT"
] | 9 | 2018-12-06T16:35:26.000Z | 2019-04-28T19:01:40.000Z | idact/detail/deployment_sync/nodes/get_nodes_deployment_definition.py | intdata-bsc/idact | 54cb65a711c145351e205970c27c83e6393cccf5 | [
"MIT"
] | 2 | 2019-04-28T19:18:58.000Z | 2019-06-17T06:56:28.000Z | from idact.detail.deployment_sync.deployment_definition import \
DeploymentDefinition
from idact.detail.deployment_sync.nodes.get_expiration_date_from_nodes \
import get_expiration_date_from_nodes
from idact.detail.nodes.nodes_impl import NodesImpl
# pylint: disable=bad-continuation
def get_nodes_deployment_definition(
deployment: NodesImpl) -> DeploymentDefinition: # noqa
"""Obtains a definition from an allocation deployment.
Expiration date is the minimum node allocation end date,
or one day from now if the nodes were not yet allocated.
:param deployment: Deployment to obtain the definition of.
"""
expiration_date = get_expiration_date_from_nodes(nodes=deployment)
return DeploymentDefinition(value=deployment.serialize(),
expiration_date=expiration_date)
| 38.909091 | 72 | 0.760514 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 301 | 0.351636 |
ffb11c0fb609d6059063047b9dfca31cac6fe559 | 276 | py | Python | setup.py | muhammad-alaref/CAM2RequestsCLI | 83911b52de7fe575682184ef9714cbb2b6fc5af4 | [
"Apache-2.0"
] | null | null | null | setup.py | muhammad-alaref/CAM2RequestsCLI | 83911b52de7fe575682184ef9714cbb2b6fc5af4 | [
"Apache-2.0"
] | null | null | null | setup.py | muhammad-alaref/CAM2RequestsCLI | 83911b52de7fe575682184ef9714cbb2b6fc5af4 | [
"Apache-2.0"
] | null | null | null | from setuptools import setup, find_packages
setup(
name='CAM2RequestsCLI',
version='v1.0-rc0',
packages=find_packages(),
zip_safe=False,
install_requires=[
'click',
'requests',
],
entry_points='''
[console_scripts]
CAM2RequestsCLI=CAM2RequestsCLI:cli
''',
)
| 16.235294 | 43 | 0.717391 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 110 | 0.398551 |
ffb26bbf8c3177f73bd85828c0a16edb1e85eea5 | 2,006 | py | Python | work/fromcsv.py | ujjvlh/pdf-glyph-mapping | e9978f7103e8a3c540cd6cecb3158bf7c37ff687 | [
"MIT"
] | null | null | null | work/fromcsv.py | ujjvlh/pdf-glyph-mapping | e9978f7103e8a3c540cd6cecb3158bf7c37ff687 | [
"MIT"
] | 8 | 2021-07-30T05:45:07.000Z | 2021-10-06T04:22:25.000Z | work/fromcsv.py | ujjvlh/pdf-glyph-mapping | e9978f7103e8a3c540cd6cecb3158bf7c37ff687 | [
"MIT"
] | 1 | 2021-07-30T05:49:07.000Z | 2021-07-30T05:49:07.000Z | # Downloaded from Google Sheets, edited out the first line, and then...
# reader = csv.DictReader(open('from-sheets.csv', newline=''))
import csv
import toml
reader = csv.reader(open('from-sheets.csv', newline=''))
rows = [row for row in reader]
final_bold = {}
final_regular = {}
for (i, row) in enumerate(rows):
if i == 0:
continue
# Columns:
# 0: glyph_id
# 1: Bold, from PDF
# 2: Regular, from PDF
# 3: Bold, from S
# 4: Regular, from S
# 5 and 6: Images
# 7: Bold, from U
# 8: Regular, from U
glyph_id = row[0]
# Order of preference: Bold 1 > 7 > 3, and Regular; 2 > 8 > 4
def get_final(cols):
if cols[0]:
if cols[1] and cols[2]:
assert cols[0] == cols[1] == cols[2], (glyph_id, cols[0], cols[1], cols[2])
return cols[0]
if not cols[1] and not cols[2]:
return cols[0]
if cols[1] and not cols[2]:
assert cols[0] == cols[1]
return cols[0]
if not cols[1] and cols[2]:
assert cols[0] == cols[2]
return cols[0]
else:
if cols[1] and cols[2]:
if cols[1] == 'ों' and cols[2] == 'र्<CCprec>े':
return cols[1]
assert cols[1] == cols[2], (cols[1], cols[2])
return cols[1]
if cols[1] and not cols[2]:
return cols[1]
if not cols[1] and cols[2]:
return cols[2]
if not cols[1] and not cols[2]:
return None
final_bold[glyph_id] = get_final([row[1], row[7], row[3]])
final_regular[glyph_id] = get_final([row[2], row[8], row[4]])
# Manually add these
final_bold['0003'] = final_regular['0003'] = ' '
final_bold['0262'] = final_regular['025E'] = ''
toml.dump(final_bold, open('from-csv-bold.toml', 'w'))
toml.dump(final_regular, open('from-csv-regular.toml', 'w'))
| 34.586207 | 91 | 0.506481 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 525 | 0.260417 |
ffb3693a744924e8d030e03e9bfbc5d9ff55cdda | 5,767 | py | Python | agnostic/postgres.py | a6patch/agnostic | 9f9cdc8c54e92e9e3f52e340aaddafb6eb33d787 | [
"MIT"
] | null | null | null | agnostic/postgres.py | a6patch/agnostic | 9f9cdc8c54e92e9e3f52e340aaddafb6eb33d787 | [
"MIT"
] | null | null | null | agnostic/postgres.py | a6patch/agnostic | 9f9cdc8c54e92e9e3f52e340aaddafb6eb33d787 | [
"MIT"
] | null | null | null | import subprocess
import pg8000
from agnostic import AbstractBackend
class PostgresBackend(AbstractBackend):
''' Support for PostgreSQL. '''
def backup_db(self, backup_file):
'''
Return a ``Popen`` instance that will backup the database to the
``backup_file`` handle.
'''
env = {'PGPASSWORD': self._password}
command = [
'pg_dump',
'-h', self._host,
'-U', self._user,
]
if self._port is not None:
command.append('-p')
command.append(str(self._port))
for schema in self._split_schema():
command.append('-n')
command.append(schema)
command.append(self._database)
process = subprocess.Popen(
command,
env=env,
stdout=backup_file,
stderr=subprocess.PIPE
)
return process
def clear_db(self, cursor):
''' Remove all objects from the database. '''
# Drop tables.
cursor.execute('''
SELECT schemaname, tablename FROM pg_tables
WHERE tableowner = %s
AND schemaname != 'pg_catalog'
AND schemaname != 'information_schema'
''', (self._user,))
tables = ['"{}"."{}"'.format(r[0], r[1]) for r in cursor.fetchall()]
if len(tables) > 0:
sql = 'DROP TABLE {} CASCADE'.format(', '.join(tables))
cursor.execute(sql)
# Drop sequences.
cursor.execute('''
SELECT relname FROM pg_class
WHERE relkind = 'S'
''')
sequences = ['"{}"'.format(row[0]) for row in cursor.fetchall()]
if len(sequences) > 0:
sql = 'DROP SEQUENCE {} CASCADE'.format(','.join(sequences))
cursor.execute(sql)
# Drop custom types, e.g. ENUM types.
cursor.execute('''
SELECT typname FROM pg_type
WHERE typtype = 'e'
''')
types = ['"{}"'.format(row[0]) for row in cursor.fetchall()]
if len(types) > 0:
sql = 'DROP TYPE {} CASCADE'.format(','.join(types))
cursor.execute(sql)
# Drop schema objects.
for schema in self._split_schema():
if schema != 'public':
sql = 'DROP SCHEMA IF EXISTS {} CASCADE'.format(schema)
cursor.execute(sql)
def connect_db(self):
''' Connect to PostgreSQL. '''
connect_args = {
'host': self._host,
'user': self._user,
'password': self._password,
'database': self._database,
}
if self._port is not None:
connect_args['port'] = self._port
db = pg8000.connect(**connect_args)
db.autocommit = True
if self._schema is not None:
cursor = db.cursor()
cursor.execute("SET SCHEMA '{}'".format(self._schema))
return db
def get_schema_command(self):
''' Return a command that will set the current schema. '''
if self._schema is None:
return 'SET search_path = "$user",public;\n'
else:
return 'SET search_path = {};\n'.format(self._schema)
def restore_db(self, backup_file):
'''
Return a ``Popen`` instance that will restore the database from the
``backup_file`` handle.
'''
env = {'PGPASSWORD': self._password}
command = [
'psql',
'-h', self._host,
'-U', self._user,
'-v', 'ON_ERROR_STOP=1', # Fail fast if an error occurs.
]
if self._port is not None:
command.append('-p')
command.append(str(self._port))
command.append(self._database)
process = subprocess.Popen(
command,
env=env,
stdin=backup_file,
stdout=subprocess.DEVNULL,
stderr=subprocess.PIPE
)
return process
def snapshot_db(self, snapshot_file):
'''
Return a ``Popen`` instance that writes a snapshot to ``snapshot_file``.
'''
env = {'PGPASSWORD': self._password}
command = [
'pg_dump',
'-h', self._host,
'-U', self._user,
'-s', # dump schema only
'-x', # don't dump grant/revoke statements
'-O', # don't dump ownership commands
'--no-tablespaces',
]
if self._port is not None:
command.append('-p')
command.append(str(self._port))
if self._schema is not None:
for schema in self._split_schema():
command.extend(('-n', schema))
command.append(self._database)
process = subprocess.Popen(
command,
env=env,
stdout=snapshot_file,
stderr=subprocess.PIPE
)
return process
def _split_schema(self):
'''
Split schema string into separate schema names.
PostgreSQL allows specifying the schema name as a search path that
look for objects in more than one schema. This method breaks that
search path into individual schema names.
It also replaces the special schema name ``"$user"`` (quotes included)
with the current username, mimicking the ``SET SEARCH PATH TO ...``
behavior in PostgreSQL.
'''
schemas = list()
if self._schema is not None:
for schema in map(str.strip, self._schema.split(',')):
if schema == '"$user"':
schemas.append(self._user)
else:
schemas.append(schema)
return schemas
| 27.593301 | 80 | 0.524363 | 5,693 | 0.987168 | 0 | 0 | 0 | 0 | 0 | 0 | 1,981 | 0.343506 |
ffb40f832f9264e117197fcc4b4e443a97fdff44 | 2,825 | py | Python | vasppy/scripts/fat_bands.py | cajfisher/vasppy | a460db14163b7db3bce54d754dd476c45a3ed85b | [
"MIT"
] | 28 | 2017-02-16T13:22:34.000Z | 2021-04-29T06:10:10.000Z | vasppy/scripts/fat_bands.py | cajfisher/vasppy | a460db14163b7db3bce54d754dd476c45a3ed85b | [
"MIT"
] | 15 | 2016-05-09T13:08:42.000Z | 2021-08-09T10:59:58.000Z | vasppy/scripts/fat_bands.py | cajfisher/vasppy | a460db14163b7db3bce54d754dd476c45a3ed85b | [
"MIT"
] | 25 | 2015-10-12T11:29:22.000Z | 2021-08-20T17:33:27.000Z | #! /usr/bin/env python3
from vasppy import procar
from vasppy.outcar import reciprocal_lattice_from_outcar
import argparse
#def x_axis( cartesian_k_points ):
# if cartesian_k_points is not None:
# x_axis = [ 0.0 ]
# for i in range( 1, len(cartesian_k_points) ):
# d = cartesian_k_points[i - 1] - cartesian_k_points[i]
# d = np.sqrt( np.dot( d, d) )
# x_axis.append( d + x_axis[-1] )
# x_axis = np.array( x_axis )
# else:
# x_axis = np.arange( len( cartesian_k_points ) )
# return x_axis
def orbitals_with_l( l ):
to_return = { 's' : [ 0 ],
'p' : [ 1, 2, 3 ],
'd' : [ 4, 5, 6, 7, 8 ],
'f' : [ 9, 10, 11, 12, 13 ],
'all' : None }
return to_return[ l ]
def main():
parser = argparse.ArgumentParser()
parser.add_argument( '-i', '--ions', help='ion indices for band projection (default: sum over all ions)', nargs='+', type=int )
parser.add_argument( '-s', '--spins', help='spin indices for band projection (default [ 1 ])', nargs='+', type=int, default=[ 1 ] )
parser.add_argument( '-o', '--orbitals', help='orbital indices for band projection (default: sum over all orbitals)', nargs='+', type=int )
parser.add_argument( '-e', '--efermi', help='set fermi energy as reference for energy scale', type=float, default=0.0 )
parser.add_argument( '-l', '--l-angular-momentum', help='select all orbitals with angular momentum L for band projection. This supercedes the --orbitals option', choices=[ 's', 'p', 'd', 'f', 'all' ] )
parser.add_argument( '-f', '--procar', help='PROCAR filename (default PROCAR)', type=str, default='PROCAR' )
parser.add_argument( '--scaling', help='Energy scaling for band widths (default 0.2 eV)', type=float, default=0.2 )
parser.add_argument( '-x', '--xscaling', help='Automatic scaling of x-axis using reciprocal lattice vectors read from OUTCAR', action='store_true', default=False )
args = parser.parse_args()
if args.l_angular_momentum:
args.orbitals = orbitals_with_l( args.l_angular_momentum )
if args.xscaling:
reciprocal_lattice = reciprocal_lattice_from_outcar( 'OUTCAR' ) # Move reading the reciprocal lattice to procar.py
else:
reciprocal_lattice = None
pcar = procar.Procar()
pcar.read_from_file( args.procar )
pcar.print_weighted_band_structure( spins = args.spins,
ions = args.ions,
orbitals = args.orbitals,
scaling = args.scaling,
e_fermi = args.efermi,
reciprocal_lattice = reciprocal_lattice )
if __name__ == '__main__':
main()
| 48.706897 | 205 | 0.593274 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,196 | 0.423363 |
ffb515697db4de0f590ab614acfa4b387f024c15 | 482 | py | Python | io_scene_vrm/editor/extension.py | 989onan/VRM_Addon_for_Blender | af894a87bdf2bc02b69842c6de082f097f1e0939 | [
"MIT"
] | 344 | 2021-02-09T15:06:26.000Z | 2022-03-31T07:13:40.000Z | io_scene_vrm/editor/extension.py | 989onan/VRM_Addon_for_Blender | af894a87bdf2bc02b69842c6de082f097f1e0939 | [
"MIT"
] | 47 | 2021-02-06T23:44:43.000Z | 2022-03-22T21:45:21.000Z | io_scene_vrm/editor/extension.py | 989onan/VRM_Addon_for_Blender | af894a87bdf2bc02b69842c6de082f097f1e0939 | [
"MIT"
] | 44 | 2021-02-21T14:15:38.000Z | 2022-03-31T01:53:12.000Z | import bpy
from .vrm0.property_group import Vrm0PropertyGroup
class VrmAddonArmatureExtensionPropertyGroup(bpy.types.PropertyGroup): # type: ignore[misc]
addon_version: bpy.props.IntVectorProperty( # type: ignore[valid-type]
size=3 # noqa: F722
)
vrm0: bpy.props.PointerProperty( # type: ignore[valid-type]
name="VRM 0.x", type=Vrm0PropertyGroup # noqa: F722
)
armature_data_name: bpy.props.StringProperty() # type: ignore[valid-type]
| 30.125 | 92 | 0.715768 | 416 | 0.863071 | 0 | 0 | 0 | 0 | 0 | 0 | 131 | 0.271784 |
ffb738b58324008d07a0a6382006195a88d42d5f | 2,014 | py | Python | examples/data/data.py | chenyangh/pytorch-struct | 2c87ef20c615c27194f85723e78b9269e6c8da14 | [
"MIT"
] | null | null | null | examples/data/data.py | chenyangh/pytorch-struct | 2c87ef20c615c27194f85723e78b9269e6c8da14 | [
"MIT"
] | null | null | null | examples/data/data.py | chenyangh/pytorch-struct | 2c87ef20c615c27194f85723e78b9269e6c8da14 | [
"MIT"
] | null | null | null | from torchtext.legacy import data
import torch
def token_pre(tokenizer, q):
st = " ".join(q)
s = tokenizer.tokenize(st)
out = [0]
cur = 0
expect = ""
first = True
for i, w in enumerate(s):
if len(expect) == 0:
cur += 1
expect = q[cur - 1].lower()
first = True
if w.startswith("##"):
out.append(-1)
expect = expect[len(w) - 2 :]
elif first:
out.append(cur)
expect = expect[len(w) :]
first = False
else:
expect = expect[len(w) :]
out.append(cur + 1)
# assert cur == len(q)-1, "%s %s \n%s\n%s"%(len(q), cur, q, s)
if cur != len(q):
print("error")
return [0] * (len(q) + 2), [0] * (len(q) + 2)
return tokenizer.encode(st, add_special_tokens=True), out
def token_post(ls):
lengths = [len(l[0]) for l in ls]
length = max(lengths)
out = [l[0] + ([0] * (length - len(l[0]))) for l in ls]
lengths2 = [max(l[1]) + 1 for l in ls]
length2 = max(lengths2)
out2 = torch.zeros(len(ls), length, length2)
for b, l in enumerate(ls):
for i, w in enumerate(l[1]):
if w != -1:
out2[b, i, w] = 1
return torch.LongTensor(out), out2.long(), lengths
def SubTokenizedField(tokenizer):
"""
Field for use with pytorch-transformer
"""
FIELD = data.RawField(
preprocessing=lambda s: token_pre(tokenizer, s), postprocessing=token_post
)
FIELD.is_target = False
return FIELD
def TokenBucket(
train, batch_size, device="cuda:0", key=lambda x: max(len(x.word[0]), 5)
):
def batch_size_fn(x, _, size):
return size + key(x)
return data.BucketIterator(
train,
train=True,
sort=False,
sort_within_batch=True,
shuffle=True,
batch_size=batch_size,
sort_key=lambda x: key(x),
repeat=True,
batch_size_fn=batch_size_fn,
device=device,
)
| 25.175 | 82 | 0.53426 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 140 | 0.069513 |
ffb8ae45bde268b15c5f3fd6b4ea8944eb386fd4 | 2,497 | py | Python | start.py | FoerMaster/GachiBot-Discord | bfafb7b989c0181332724e8a8975ceceb3e0acaf | [
"MIT"
] | 1 | 2020-09-05T15:30:51.000Z | 2020-09-05T15:30:51.000Z | start.py | FoerMaster/GachiBot-Discord | bfafb7b989c0181332724e8a8975ceceb3e0acaf | [
"MIT"
] | null | null | null | start.py | FoerMaster/GachiBot-Discord | bfafb7b989c0181332724e8a8975ceceb3e0acaf | [
"MIT"
] | null | null | null |
import gachi
import discord
import functions
import kachalochka
commands = functions.commands()
gachi = gachi.gachi() #gachi class
kachalaka = kachalochka.kachalka()
################################################################
## FUNCS INITIALIZATION ##
################################################################
def setup_functions():
gachi.register("помоги",commands.show_help,"`- Показать все доступные команды")
gachi.register("цитата",commands.show_citate,"`- Попросить великую цитату от гачи-бота")
gachi.register("шар",commands.predirect_ball,"[вопрос]` - Спросить что-либо у бота")
gachi.register("стата",commands.gachi_stata,"[пользователь]` - Узнать статистику")
gachi.register("имя",commands.gachi_changename,"[имя]` - Сменить имя")
gachi.register("борьба",commands.gachi_punch,"[пользователь]` - Сделать fisting другому пользователю")
gachi.register("создатель",commands.show_creators,"` - узнать создателей")
################################################################
## BOT INITIALIZATION ##
################################################################
@gachi.discord.event
async def on_ready():
gachi.msg('We have logged in as {0.user}'.format(gachi.discord))
setup_functions()
activity = discord.Game(name="anal fisting")
await gachi.discord.change_presence(status=discord.Status.idle, activity=activity)
@gachi.discord.event
async def on_message(message):
#Ignoring self
if message.author == gachi.discord.user:
return
channel = message.channel
messa = message.content
command = messa.split(' ')
kachalaka.user_strong_up(message.author.id,1)
if not command[0].lower().startswith('гачи'):
return
try:
if gachi.has(command[1].lower()):
await channel.send(embed=gachi.activate(message,command[1],command))
else:
embed_obj = discord.Embed()
embed_obj.description = "Я не знаю такой команды, пиши **/гачи помоги** для подробностей!"
embed_obj.color=0xff00d4
await channel.send(embed=embed_obj)
except:
embed_obj = discord.Embed()
embed_obj.color=0xff00d4
embed_obj.description ="Используй **/гачи [команда] [аргументы...]**"
await channel.send(embed=embed_obj)
gachi.discord.run('TOKEN')
| 37.268657 | 108 | 0.578694 | 0 | 0 | 0 | 0 | 1,305 | 0.466738 | 1,261 | 0.451001 | 1,195 | 0.427396 |
ffba6d1b0d4c9fc9bf206561d7474e75ff42b979 | 343 | py | Python | pushit/backends/mpns.py | rhblind/django-pushit | 491ab14b7b1004eb73420d23bcc2b74fafaaf7a3 | [
"MIT"
] | null | null | null | pushit/backends/mpns.py | rhblind/django-pushit | 491ab14b7b1004eb73420d23bcc2b74fafaaf7a3 | [
"MIT"
] | null | null | null | pushit/backends/mpns.py | rhblind/django-pushit | 491ab14b7b1004eb73420d23bcc2b74fafaaf7a3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from pushit.backends import PushBackend
class MPNSPushBackend(PushBackend):
"""
"""
def __init__(self, connection_alias, **options):
super(MPNSPushBackend, self).__init__(connection_alias, **options)
raise NotImplementedError()
| 21.4375 | 74 | 0.71137 | 217 | 0.632653 | 0 | 0 | 0 | 0 | 0 | 0 | 35 | 0.102041 |
ffbb0fd9a15722477aa695b38c34143fbc58e500 | 1,133 | py | Python | hic_covid_repository/versions/002_Virology.py | LCBRU/hic_covid | eb5a37339185ed71246235e307a81d91dc91f9ec | [
"MIT"
] | null | null | null | hic_covid_repository/versions/002_Virology.py | LCBRU/hic_covid | eb5a37339185ed71246235e307a81d91dc91f9ec | [
"MIT"
] | null | null | null | hic_covid_repository/versions/002_Virology.py | LCBRU/hic_covid | eb5a37339185ed71246235e307a81d91dc91f9ec | [
"MIT"
] | null | null | null | from sqlalchemy import (
MetaData,
Table,
Column,
Integer,
NVARCHAR,
DateTime,
Date,
Boolean,
)
meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
t = Table(
"virology",
meta,
Column("id", Integer, primary_key=True),
Column("uhl_system_number", NVARCHAR(50), index=True, nullable=False),
Column("test_id", Integer, nullable=False, index=True, unique=True),
Column("laboratory_code", NVARCHAR(50)),
Column("order_code", NVARCHAR(50)),
Column("order_name", NVARCHAR(50)),
Column("test_code", NVARCHAR(50)),
Column("test_name", NVARCHAR(50)),
Column("organism", NVARCHAR(50)),
Column("test_result", NVARCHAR(50)),
Column("sample_collected_date_time", DateTime),
Column("sample_received_date_time", DateTime),
Column("sample_available_date_time", DateTime),
Column("order_status", NVARCHAR(50)),
)
t.create()
def downgrade(migrate_engine):
meta.bind = migrate_engine
t = Table("virology", meta, autoload=True)
t.drop()
| 25.75 | 78 | 0.624007 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 235 | 0.207414 |
4400856d181d08220fa50547c19d7bab6d8e6fbd | 2,393 | py | Python | marlgrid/pz_envs/contentFB.py | aivaslab/marlgrid | 10b53d27ce224fadeeb5830d6034350a69feb4b4 | [
"Apache-2.0"
] | null | null | null | marlgrid/pz_envs/contentFB.py | aivaslab/marlgrid | 10b53d27ce224fadeeb5830d6034350a69feb4b4 | [
"Apache-2.0"
] | null | null | null | marlgrid/pz_envs/contentFB.py | aivaslab/marlgrid | 10b53d27ce224fadeeb5830d6034350a69feb4b4 | [
"Apache-2.0"
] | null | null | null | from ..base_AEC import *
from ..objects import *
from random import randrange
import random
import math
class ContentFBEnv(para_MultiGridEnv):
"""
Environment with a door and key, sparse reward.
Similar to DoorKeyEnv in
https://github.com/maximecb/gym-minigrid/blob/master/gym_minigrid/envs/doorkey.py
"""
mission = "use the key to open the door and then get to the goal"
metadata = {}
def init_agents(self, arg, agent_kwargs):
if arg == 0:
self.apos = [(6,11,3)]
for agent in self.apos:
self.add_agent(GridAgentInterface(**agent_kwargs))
def _gen_grid(self, width, height):
# Create an empty grid
self.grid = MultiGrid((width, height))
colors = random.sample(['green','purple','orange','yellow','blue','pink','red'], 4)
# Generate the surrounding walls
self.grid.wall_rect(0, 0, width-2, height)
for k, x in enumerate(range(0,width-4,4)):
self.grid.wall_rect(x, 0, 5, 5)
self.put_obj(Goal(color=colors[k], reward=1), x+2, 2)
self.put_obj(Door(color=colors[k]), x+2, 4)
#self.put_obj(Key(color=colors[k],), x+2, 4)
self.agent_spawn_kwargs = {'top':(1,1)}
self.place_agents(**self.agent_spawn_kwargs)
class ContentFBEnv2(para_MultiGridEnv):
"""
Environment with a door and key, sparse reward.
Similar to DoorKeyEnv in
https://github.com/maximecb/gym-minigrid/blob/master/gym_minigrid/envs/doorkey.py
"""
mission = "use the key to open the door and then get to the goal"
metadata = {}
def init_agents(self, arg, agent_kwargs):
if arg == 0:
self.apos = [(6,11,3)]
for agent in self.apos:
self.add_agent(GridAgentInterface(**agent_kwargs))
def _gen_grid(self, width, height):
# Create an empty grid
self.grid = MultiGrid((width, height))
colors = ['green','purple','orange']
# Generate the surrounding walls
self.grid.wall_rect(0, 0, width-2, height)
for k, x in enumerate(range(0,width-4,4)):
self.grid.wall_rect(x, 0, 5, 5)
self.put_obj(Ball(color=colors[k],), x+2, 2)
self.put_obj(Wall(color=colors[(k+1) % 3],), x+2, 4)
self.agent_spawn_kwargs = {}
self.place_agents(**self.agent_spawn_kwargs)
| 32.337838 | 91 | 0.610113 | 2,284 | 0.95445 | 0 | 0 | 0 | 0 | 0 | 0 | 704 | 0.294191 |