hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3bd9caeeddf847dd9546e4e833234ce3cce7f394 | 28 | py | Python | patton_server/service/__init__.py | directionless/patton-server | da39cb8b09029dbcf4edd5c78abb150dc53e8ebe | [
"Apache-2.0"
] | null | null | null | patton_server/service/__init__.py | directionless/patton-server | da39cb8b09029dbcf4edd5c78abb150dc53e8ebe | [
"Apache-2.0"
] | null | null | null | patton_server/service/__init__.py | directionless/patton-server | da39cb8b09029dbcf4edd5c78abb150dc53e8ebe | [
"Apache-2.0"
] | null | null | null | from .make_web_app import *
| 14 | 27 | 0.785714 |
3bdb6220329725e793142bac8d5000ba99303cc3 | 989 | py | Python | common/permissions.py | pedro-hs/financial-account | 7e8e4d0f3ac888fa36a091d0e733a8e1926180d2 | [
"MIT"
] | null | null | null | common/permissions.py | pedro-hs/financial-account | 7e8e4d0f3ac888fa36a091d0e733a8e1926180d2 | [
"MIT"
] | null | null | null | common/permissions.py | pedro-hs/financial-account | 7e8e4d0f3ac888fa36a091d0e733a8e1926180d2 | [
"MIT"
] | null | null | null | from rest_framework.permissions import BasePermission, IsAuthenticated
| 34.103448 | 107 | 0.706775 |
3bdba7505ba48dff77d09ed882c1ad53fae133f6 | 956 | py | Python | mcp_generation/mcqa_formatter.py | yuchenlin/XCSR | ace4336de98a8567fcad43498907e0efefe70de4 | [
"MIT"
] | 16 | 2021-06-14T00:54:28.000Z | 2022-03-06T08:52:21.000Z | mcp_generation/mcqa_formatter.py | yuchenlin/XCSR | ace4336de98a8567fcad43498907e0efefe70de4 | [
"MIT"
] | null | null | null | mcp_generation/mcqa_formatter.py | yuchenlin/XCSR | ace4336de98a8567fcad43498907e0efefe70de4 | [
"MIT"
] | 2 | 2021-08-02T18:54:33.000Z | 2021-09-20T05:37:02.000Z | import json
probes = []
with open("./multilingual_probes.jsonl",'r') as f:
for line in f:
probes.append(json.loads(line.rstrip('\n|\r')))
results = []
for probe in probes:
new_items = {}
answer_labels = ["A", "B", "C", "D", "E","F","G","H"]
print(probe["truth_id"])
answerKey = answer_labels[probe["truth_id"]]
new_items["id"] = probe["id"]
new_items["lang"] = probe["langs"]
new_items["question"] = {"stem": " "}
new_items["question"]["choices"] = [{"label": l , "text":t} for l,t in zip(answer_labels, probe["probes"])]
new_items["answerKey"] = answerKey
results.append(new_items)
with open('/path/to/mcp_data/train.jsonl','w') as f:
for result in results[:-1000]:
json.dump(result,f, ensure_ascii=False)
f.write('\n')
with open('/path/to/mcp_data/dev.jsonl','w') as f:
for result in results[-1000:]:
json.dump(result,f, ensure_ascii=False)
f.write('\n') | 29.875 | 111 | 0.599372 |
3bdc4d0f00442b263a279d7821b9572ea9833620 | 2,016 | py | Python | tests/test_behavior.py | beskyfil/labels | 0a1e4831621ce2027ebc9af3e4161f03ff946a6d | [
"MIT"
] | null | null | null | tests/test_behavior.py | beskyfil/labels | 0a1e4831621ce2027ebc9af3e4161f03ff946a6d | [
"MIT"
] | null | null | null | tests/test_behavior.py | beskyfil/labels | 0a1e4831621ce2027ebc9af3e4161f03ff946a6d | [
"MIT"
] | null | null | null | import pytest
from labelsync.github import Github
from labelsync.helpers import HTTPError
from tests.helpers import fl, FIXTURES_PATH, create_cfg_env, get_labels
c = create_cfg_env('good.cfg')
github = Github(c, name='github', api_url='https://api.github.com/repos')
label = {
'name':'blabla',
'color':'aa11bb',
'description':'whatever'
}
label_bug = {
'name':'bug',
'color':'d73a4a',
'description':'Something isn\'t working'
}
label_new_bug = {
'name':'ERROR',
'color':'ffffff',
'description':'ERROR'
}
| 28.8 | 73 | 0.695933 |
3bdfbd90f140aef1f2b7005698a05751030fadf0 | 4,249 | py | Python | authentication/cryptosign/function/authenticator.py | oberstet/crossbar-examples | 852680eee646cf5479bff18ec727a8026d9bdcda | [
"Apache-2.0"
] | null | null | null | authentication/cryptosign/function/authenticator.py | oberstet/crossbar-examples | 852680eee646cf5479bff18ec727a8026d9bdcda | [
"Apache-2.0"
] | null | null | null | authentication/cryptosign/function/authenticator.py | oberstet/crossbar-examples | 852680eee646cf5479bff18ec727a8026d9bdcda | [
"Apache-2.0"
] | null | null | null | import copy
import random
from pprint import pformat
from txaio import make_logger
from autobahn.wamp.exception import ApplicationError
from autobahn.util import hl, hltype, hlid, hlval
# a simple principals database. in real world use, this likey would be
# replaced by some persistent database used to store principals.
PRINCIPALS = [
{
# when a session is authenticating use one of the authorized_keys,
# then assign it all the data below
"authid": "client01@example.com",
"realm": "devices",
"role": "device",
"extra": {
"foo": 23
},
"authorized_keys": [
"545efb0a2192db8d43f118e9bf9aee081466e1ef36c708b96ee6f62dddad9122"
]
},
{
"authid": "client02@example.com",
"realm": "devices",
"role": "device",
"extra": {
"foo": 42,
"bar": "baz"
},
"authorized_keys": [
"9c194391af3bf566fc11a619e8df200ba02efb35b91bdd98b424f20f4163875e",
"585df51991780ee8dce4766324058a04ecae429dffd786ee80839c9467468c28"
]
}
]
log = make_logger()
| 33.195313 | 138 | 0.589786 |
3bdfdc921f29e9f07e8dacf34bfc075882611de3 | 1,368 | py | Python | syd/syd_stitch_image.py | OpenSyd/syd | 0f7478c7dedb623ab955e906c103cb64a7abb4b3 | [
"Apache-2.0"
] | 4 | 2015-07-29T19:10:35.000Z | 2020-11-17T07:48:41.000Z | syd/syd_stitch_image.py | OpenSyd/syd | 0f7478c7dedb623ab955e906c103cb64a7abb4b3 | [
"Apache-2.0"
] | 9 | 2015-05-14T09:07:37.000Z | 2022-03-15T10:13:59.000Z | syd/syd_stitch_image.py | OpenSyd/syd | 0f7478c7dedb623ab955e906c103cb64a7abb4b3 | [
"Apache-2.0"
] | 3 | 2016-09-07T06:26:52.000Z | 2016-10-04T12:29:03.000Z | #!/usr/bin/env python3
import itk
import syd
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
| 23.186441 | 79 | 0.557749 |
3be0272cc4ef59d691881a66ca66a56c66ec41a0 | 2,617 | py | Python | pylinear/h5table/ddt.py | npirzkal/pyLINEAR | 00419dcbd91ea7b64386e6fe4f3164cd141333f2 | [
"MIT"
] | null | null | null | pylinear/h5table/ddt.py | npirzkal/pyLINEAR | 00419dcbd91ea7b64386e6fe4f3164cd141333f2 | [
"MIT"
] | null | null | null | pylinear/h5table/ddt.py | npirzkal/pyLINEAR | 00419dcbd91ea7b64386e6fe4f3164cd141333f2 | [
"MIT"
] | null | null | null | import numpy as np
import pdb
from . import columns
from . import h5utils
from .base import Base
| 24.231481 | 75 | 0.534964 |
3be1c731ef6e27de1ae8fcab0e00a570b8b671ef | 851 | py | Python | tools/auto_freeze.py | airacid/pruned-face-detector | ef587e274ccf87633af653694890eb6712d6b3eb | [
"MIT"
] | 1 | 2021-11-01T02:39:36.000Z | 2021-11-01T02:39:36.000Z | tools/auto_freeze.py | airacid/pruned-face-detector | ef587e274ccf87633af653694890eb6712d6b3eb | [
"MIT"
] | null | null | null | tools/auto_freeze.py | airacid/pruned-face-detector | ef587e274ccf87633af653694890eb6712d6b3eb | [
"MIT"
] | 1 | 2021-11-01T02:39:37.000Z | 2021-11-01T02:39:37.000Z |
import os
import tensorflow as tf
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--ckpt_path', type=str)
parser.add_argument('--output_path', type=str)
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
checkpoint = args.ckpt_path
##input_checkpoint
input_checkpoint = checkpoint
##input_graph
input_meta_graph = input_checkpoint + '.meta'
##output_node_names
output_node_names='tower_0/images,tower_0/boxes,tower_0/scores,tower_0/labels,tower_0/num_detections,training_flag'
#output_graph
output_graph = os.path.join(args.output_path,'detector.pb')
print('excuted')
command="python tools/freeze.py --input_checkpoint %s --input_meta_graph %s --output_node_names %s --output_graph %s"\
%(input_checkpoint,input_meta_graph,output_node_names,output_graph)
os.system(command) | 29.344828 | 119 | 0.774383 |
3be1c8da8fb0704e33d69f4791863e002d5b116a | 2,045 | py | Python | examples/nowcoder/SQL3/models.py | zhengtong0898/django-decode | 69680853a4a5b07f6a9c4b65c7d86b2d401a92b1 | [
"MIT"
] | 5 | 2020-07-14T07:48:10.000Z | 2021-12-20T21:20:10.000Z | examples/nowcoder/SQL3/models.py | zhengtong0898/django-decode | 69680853a4a5b07f6a9c4b65c7d86b2d401a92b1 | [
"MIT"
] | 7 | 2021-03-26T03:13:38.000Z | 2022-03-12T00:42:03.000Z | examples/nowcoder/SQL3/models.py | zhengtong0898/django-decode | 69680853a4a5b07f6a9c4b65c7d86b2d401a92b1 | [
"MIT"
] | 1 | 2021-02-16T07:04:25.000Z | 2021-02-16T07:04:25.000Z | from django.db import models
# 1. Django(Composite Primary Key).
# 2. Django(Disable Primary Key),
# Primary Key ,
# Djangoid, primary keyid.
#
#
# , InnoDB, Primary Key,
# Unique Key , InnoDBUnique Key.
# Unique Key, InnoDBPrimaryKey().
#
#
# , , Model.
# CREATE TABLE `salaries` (
# `emp_no` int(11) NOT NULL,
# `salary` int(11) NOT NULL,
# `from_date` date NOT NULL,
# `to_date` date NOT NULL,
# PRIMARY KEY (`emp_no`,`from_date`)
# );
| 33.52459 | 109 | 0.684597 |
3be3d29eecfe1ea6c347859c1388d314f37ccbc5 | 1,247 | py | Python | concat_csv.py | jweckman/vim | c225f36ab05c2bdcedfc9866c367c1ddc4cd3646 | [
"MIT"
] | null | null | null | concat_csv.py | jweckman/vim | c225f36ab05c2bdcedfc9866c367c1ddc4cd3646 | [
"MIT"
] | null | null | null | concat_csv.py | jweckman/vim | c225f36ab05c2bdcedfc9866c367c1ddc4cd3646 | [
"MIT"
] | null | null | null | import pandas as pd
from pathlib import Path
import sys
''' Concatenates all csv files in the folder passed to stdin '''
path = Path(sys.argv[1])
if __name__ == '__main__':
csv_files = get_csv_paths(path)
encoding, delimiter = ask_details()
try:
frames = get_frames(csv_files, encoding, delimiter)
concat_output(frames)
except Exception as e:
print('Seems like there were files that could not be read\n')
print(str(e))
encoding, delimiter = ask_details()
frames = get_frames(csv_files, encoding, delimiter)
concat_output(frames)
| 30.414634 | 94 | 0.653569 |
3be3ffc19dbd5fc20c5420fc3ab9c6320aeeee0a | 2,589 | py | Python | catkin_ws/src/rostest_example/tests/duckiecall_tester_node.py | DiegoOrtegoP/Software | 4a07dd2dab29db910ca2e26848fa6b53b7ab00cd | [
"CC-BY-2.0"
] | 12 | 2016-04-14T12:21:46.000Z | 2021-06-18T07:51:40.000Z | catkin_ws/src/rostest_example/tests/duckiecall_tester_node.py | DiegoOrtegoP/Software | 4a07dd2dab29db910ca2e26848fa6b53b7ab00cd | [
"CC-BY-2.0"
] | 14 | 2017-03-03T23:33:05.000Z | 2018-04-03T18:07:53.000Z | catkin_ws/src/rostest_example/tests/duckiecall_tester_node.py | DiegoOrtegoP/Software | 4a07dd2dab29db910ca2e26848fa6b53b7ab00cd | [
"CC-BY-2.0"
] | 113 | 2016-05-03T06:11:42.000Z | 2019-06-01T14:37:38.000Z | #!/usr/bin/env python
import rospy
import unittest, rostest
from rostest_example.Quacker import *
from std_msgs.msg import String, Int32
if __name__ == '__main__':
rospy.init_node('duckiecall_tester_node', anonymous=False)
rostest.rosrun('rostest_example', 'duckiecall_tester_node', DuckiecallTesterNode)
| 43.15 | 127 | 0.696794 |
3be4dea35fbe812684c863cfb56967cde0971e92 | 1,679 | py | Python | buildroot/support/testing/tests/init/test_busybox.py | rbrenton/hassos | fa6f7ac74ddba50e76f5779c613c56d937684844 | [
"Apache-2.0"
] | 617 | 2015-01-04T14:33:56.000Z | 2022-03-24T22:42:25.000Z | buildroot/support/testing/tests/init/test_busybox.py | rbrenton/hassos | fa6f7ac74ddba50e76f5779c613c56d937684844 | [
"Apache-2.0"
] | 631 | 2015-01-01T22:53:25.000Z | 2022-03-17T18:41:00.000Z | buildroot/support/testing/tests/init/test_busybox.py | rbrenton/hassos | fa6f7ac74ddba50e76f5779c613c56d937684844 | [
"Apache-2.0"
] | 133 | 2015-03-03T18:40:05.000Z | 2022-03-18T13:34:26.000Z | import infra.basetest
from tests.init.base import InitSystemBase as InitSystemBase
| 25.830769 | 69 | 0.659321 |
3be559b23f04ad4fbb4310964aaa62522258d721 | 8,529 | py | Python | mayan/apps/linking/api_views.py | darrenflexxu/Mayan-EDMS | 6707365bfacd137e625ddc1b990168012246fa07 | [
"Apache-2.0"
] | null | null | null | mayan/apps/linking/api_views.py | darrenflexxu/Mayan-EDMS | 6707365bfacd137e625ddc1b990168012246fa07 | [
"Apache-2.0"
] | 5 | 2021-03-19T22:59:52.000Z | 2022-03-12T00:13:16.000Z | mayan/apps/linking/api_views.py | Sumit-Kumar-Jha/mayan | 5b7ddeccf080b9e41cc1074c70e27dfe447be19f | [
"Apache-2.0"
] | 1 | 2020-07-29T21:03:27.000Z | 2020-07-29T21:03:27.000Z | from __future__ import absolute_import, unicode_literals
from django.shortcuts import get_object_or_404
from mayan.apps.acls.models import AccessControlList
from mayan.apps.documents.models import Document
from mayan.apps.documents.permissions import permission_document_view
from mayan.apps.rest_api import generics
from .models import SmartLink
from .permissions import (
permission_smart_link_create, permission_smart_link_delete,
permission_smart_link_edit, permission_smart_link_view
)
from .serializers import (
ResolvedSmartLinkDocumentSerializer, ResolvedSmartLinkSerializer,
SmartLinkConditionSerializer, SmartLinkSerializer,
WritableSmartLinkSerializer
)
| 30.569892 | 85 | 0.653183 |
3be6d032aab66cc3f999f8f1017e760af49f209f | 4,013 | py | Python | download_stats.py | zhengsipeng/kinetics-downloader | c85c6946a4408d1f9219441ae3f7aed679b10458 | [
"MIT"
] | 263 | 2018-03-10T15:44:35.000Z | 2022-03-16T10:57:30.000Z | download_stats.py | zhengsipeng/kinetics-downloader | c85c6946a4408d1f9219441ae3f7aed679b10458 | [
"MIT"
] | 17 | 2018-09-13T00:30:22.000Z | 2021-07-26T17:42:33.000Z | download_stats.py | zhengsipeng/kinetics-downloader | c85c6946a4408d1f9219441ae3f7aed679b10458 | [
"MIT"
] | 85 | 2018-07-12T03:45:38.000Z | 2022-03-21T23:11:36.000Z | import argparse, os
import lib.config as config
import lib.utils as utils
def count_present_and_missing(cls, directory, metadata):
"""
Count present and missing videos for a class based on metadata.
:param cls: The class. If None, count all videos (used for testing videos - no classes).
:param directory: Directory containing the videos.
:param metadata: Kinetics metadata json.
:return: Tuple: number present videos, number of missing videos
"""
present = 0
missing = 0
for key in metadata:
if cls is None or metadata[key]["annotations"]["label"] == cls:
if os.path.isfile(os.path.join(directory, "{}.mp4".format(key))):
present += 1
else:
missing += 1
return present, missing
if __name__ == "__main__":
parser = argparse.ArgumentParser("Print statistics about downloaded videos.")
parser.add_argument("-d", "--details", action="store_true", default=False, help="detailed stats for each found class")
parser.add_argument("-s", "--subset", help="path to a JSON file containing a subset of Kinetics classes")
parsed = parser.parse_args()
main(parsed) | 31.108527 | 120 | 0.697982 |
3beac65b5cb6099092c07d4a94aab675261b906d | 3,885 | py | Python | e2e/test_accessbot_show_resources.py | arthurSena0704/accessbot | 5097453c45c5193e6516bc1f9441e90e49b3d324 | [
"Apache-2.0"
] | null | null | null | e2e/test_accessbot_show_resources.py | arthurSena0704/accessbot | 5097453c45c5193e6516bc1f9441e90e49b3d324 | [
"Apache-2.0"
] | null | null | null | e2e/test_accessbot_show_resources.py | arthurSena0704/accessbot | 5097453c45c5193e6516bc1f9441e90e49b3d324 | [
"Apache-2.0"
] | 3 | 2021-08-16T22:34:05.000Z | 2021-09-22T02:51:13.000Z | # pylint: disable=invalid-name
import pytest
import sys
from unittest.mock import MagicMock
from test_common import create_config, DummyResource
sys.path.append('plugins/sdm')
from lib import ShowResourcesHelper
pytest_plugins = ["errbot.backends.test"]
extra_plugin_dir = 'plugins/sdm'
def test_show_resources_when_hide_resource_tag_false(self, mocked_testbot_hide_resource_false):
mocked_testbot_hide_resource_false.push_message("show available resources")
message = mocked_testbot_hide_resource_false.pop_message()
assert "Aaa (type: DummyResource)" in message
assert "Bbb (type: DummyResource)" in message
class Test_show_resources_by_role:
def test_show_resources_command(self, mocked_testbot):
mocked_testbot.push_message("show available resources")
message = mocked_testbot.pop_message()
assert "Aaa in role (type: DummyResource)" in message
assert "Bbb in role (type: DummyResource)" in message
# pylint: disable=dangerous-default-value
| 45.174419 | 111 | 0.7426 |
3beb73cbef34b508a909878716873d4472cedd74 | 64 | py | Python | tftf/layers/activations/tanh.py | yusugomori/tftf | e98b9ddffdbaa1fe04320437a47f12f3182ab6f3 | [
"Apache-2.0"
] | 35 | 2018-08-11T05:01:41.000Z | 2021-01-29T02:28:47.000Z | tftf/layers/activations/tanh.py | yusugomori/tftf | e98b9ddffdbaa1fe04320437a47f12f3182ab6f3 | [
"Apache-2.0"
] | null | null | null | tftf/layers/activations/tanh.py | yusugomori/tftf | e98b9ddffdbaa1fe04320437a47f12f3182ab6f3 | [
"Apache-2.0"
] | 4 | 2018-10-19T14:12:04.000Z | 2021-01-29T02:28:49.000Z | import tensorflow as tf
| 10.666667 | 24 | 0.671875 |
3becb3cb8a9347c5c892e9c12331df179e27be0f | 406 | py | Python | game/migrations/0011_onlinegame_playersready.py | dimamelnik22/drawfulru | da2d21ef4c0b6776fc7c1059dbdf617f591c4ef8 | [
"Apache-2.0"
] | null | null | null | game/migrations/0011_onlinegame_playersready.py | dimamelnik22/drawfulru | da2d21ef4c0b6776fc7c1059dbdf617f591c4ef8 | [
"Apache-2.0"
] | 7 | 2020-06-05T20:14:47.000Z | 2021-09-22T18:18:06.000Z | game/migrations/0011_onlinegame_playersready.py | dimamelnik22/drawfulru | da2d21ef4c0b6776fc7c1059dbdf617f591c4ef8 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.0 on 2019-12-23 06:19
from django.db import migrations, models
| 21.368421 | 50 | 0.576355 |
3bed882365f0c947238e86347d95e522a56968a9 | 2,380 | py | Python | deprecated.py | tungr/CoeusBot | 90bdc869a1f8c077a1f88dcf1335d20a19d49fee | [
"MIT"
] | null | null | null | deprecated.py | tungr/CoeusBot | 90bdc869a1f8c077a1f88dcf1335d20a19d49fee | [
"MIT"
] | null | null | null | deprecated.py | tungr/CoeusBot | 90bdc869a1f8c077a1f88dcf1335d20a19d49fee | [
"MIT"
] | null | null | null | #### Transfer data from JSON file to MongoDB ####
# @client.command()
# async def qupload(self, ctx):
# mclient = MongoClient(host="localhost", port=27017)
# db = mclient.coeusbot
# quotesdb = db.quotes
# with open('quotes.json', 'r') as f:
# quotes = json.load(f)
# for quotenum in range(1, len(quotes)):
# datetime = quotes[str(quotenum)]['date_time']
# author = quotes[str(quotenum)]['author']
# quote = quotes[str(quotenum)]['quote']
# guild = ctx.guild.id
# qamount = quotesdb.find({"guild": ctx.guild.id}) # Grab all quotes of same guild id
# qid = 1
# # Increment qid based on # of quotes in guild
# for qnum in qamount:
# qid += 1
# mquote = {
# "datetime": datetime,
# "author": author,
# "quote": quote,
# "guild": guild,
# "qid": qid
# }
# result = quotesdb.insert_one(mquote)
# mclient.close()
# await ctx.reply(f'Quotes transferred')
#### Add quote to JSON file ####
# @client.command(aliases=['qua'])
# async def quoteadd(self, ctx, *quote):
# with open('quotes.json', 'r') as f:
# quotes = json.load(f)
# if str(len(quotes)+1) not in quotes:
# now = dt.datetime.now()
# date_time = now.strftime("%m/%d/%Y, %I:%M%p")
# q_amount = len(quotes) + 1
# quotes[str(q_amount)] = {}
# quotes[str(q_amount)]['quote'] = quote
# quotes[str(q_amount)]['date_time'] = date_time
# quotes[str(q_amount)]['author'] = str(ctx.author)
# with open('quotes.json', 'w') as f:
# json.dump(quotes, f)
# await ctx.reply(f'Quote added')
#### Grab quote from JSON file ####
# @client.command()
# async def quotes(self, ctx):
# with open('quotes.json', 'r') as f:
# quotes = json.load(f)
# randquote = random.randint(1,len(quotes))
# quote = quotes[str(randquote)]['quote']
# date_time = quotes[str(randquote)]['date_time']
# author = quotes[str(randquote)]['author']
# quote_embed = discord.Embed(title=f' Quote #{randquote}', color=0x03fcce)
# newquote = ' '.join(quote)
# quote_embed.add_field(name='\u200b', value=f'{newquote}', inline=False)
# quote_embed.set_footer(text=f'{date_time}')
# await ctx.send(embed=quote_embed) | 32.162162 | 93 | 0.561765 |
3bedf4765622764f7282bd201ee9a488ae9fdbd2 | 370 | py | Python | packaging/scripts/collect_matlab.py | robotraconteur/robotraconteur | ff997351761a687be364234684202e3348c4083c | [
"Apache-2.0"
] | 37 | 2019-01-31T06:05:17.000Z | 2022-03-21T06:56:18.000Z | packaging/scripts/collect_matlab.py | robotraconteur/robotraconteur | ff997351761a687be364234684202e3348c4083c | [
"Apache-2.0"
] | 14 | 2019-07-18T04:09:45.000Z | 2021-08-31T02:04:22.000Z | packaging/scripts/collect_matlab.py | robotraconteur/robotraconteur | ff997351761a687be364234684202e3348c4083c | [
"Apache-2.0"
] | 3 | 2018-11-23T22:03:22.000Z | 2021-11-02T10:03:39.000Z | import shutil
import pathlib
asset_dirs = ["artifacts/main", "artifacts/build_python_version"]
pathlib.Path("distfiles").mkdir(exist_ok=True)
for asset_dir in asset_dirs:
for fname in list(pathlib.Path(asset_dir).glob('**/RobotRaconteur-*-MATLAB*')):
print(fname)
dest = pathlib.Path(fname)
shutil.copy(str(fname),"distfiles/" + dest.name) | 30.833333 | 83 | 0.705405 |
3bee8a2e3ce8d0e0dbf5627d1dd4f2bc366b92ab | 821 | py | Python | setup.py | bryan-he/closek | b0367e09d7fa1a096580d762db6fd948e04c1d9e | [
"MIT"
] | null | null | null | setup.py | bryan-he/closek | b0367e09d7fa1a096580d762db6fd948e04c1d9e | [
"MIT"
] | null | null | null | setup.py | bryan-he/closek | b0367e09d7fa1a096580d762db6fd948e04c1d9e | [
"MIT"
] | null | null | null | """Metadata for package to allow installation with pip."""
import setuptools
exec(open("closek/version.py").read())
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="closek",
description="Scikit-learn-style implementation of the close-k classifier.",
long_description=long_description,
long_description_content_type="text/markdown",
author="Bryan He",
author_email="bryanhe@stanford.edu",
url="https://github.com/bryan-he/close-k",
version=__version__,
packages=setuptools.find_packages(),
install_requires=[
"torch",
"numpy",
"sklearn",
],
tests_require=[
"pmlb",
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
]
)
| 24.878788 | 79 | 0.6419 |
3beed423b84aed994aacbe9098f28892995cd210 | 491 | py | Python | ramda/memoize_with_test.py | jakobkolb/ramda.py | 982b2172f4bb95b9a5b09eff8077362d6f2f0920 | [
"MIT"
] | 56 | 2018-08-06T08:44:58.000Z | 2022-03-17T09:49:03.000Z | ramda/memoize_with_test.py | jakobkolb/ramda.py | 982b2172f4bb95b9a5b09eff8077362d6f2f0920 | [
"MIT"
] | 28 | 2019-06-17T11:09:52.000Z | 2022-02-18T16:59:21.000Z | ramda/memoize_with_test.py | jakobkolb/ramda.py | 982b2172f4bb95b9a5b09eff8077362d6f2f0920 | [
"MIT"
] | 5 | 2019-09-18T09:24:38.000Z | 2021-07-21T08:40:23.000Z | from ramda.memoize_with import memoize_with
from ramda.product import product
from ramda.private.asserts import assert_equal as e
count = 0
| 20.458333 | 51 | 0.631365 |
3beefe8b0cd9218be467b3453fa033b4d6ace79a | 18,821 | py | Python | cdlib/evaluation/comparison.py | xing-lab-pitt/cdlib | 590e145429cda1db4d3671c994c502bedd77f108 | [
"BSD-2-Clause"
] | 248 | 2019-02-17T05:31:22.000Z | 2022-03-30T04:57:20.000Z | cdlib/evaluation/comparison.py | xing-lab-pitt/cdlib | 590e145429cda1db4d3671c994c502bedd77f108 | [
"BSD-2-Clause"
] | 130 | 2019-02-10T19:35:55.000Z | 2022-03-31T10:58:39.000Z | cdlib/evaluation/comparison.py | xing-lab-pitt/cdlib | 590e145429cda1db4d3671c994c502bedd77f108 | [
"BSD-2-Clause"
] | 70 | 2019-02-15T19:04:29.000Z | 2022-03-27T12:58:50.000Z | import numpy as np
from cdlib.evaluation.internal import onmi
from cdlib.evaluation.internal.omega import Omega
from nf1 import NF1
from collections import namedtuple, defaultdict
__all__ = [
"MatchingResult",
"normalized_mutual_information",
"overlapping_normalized_mutual_information_LFK",
"overlapping_normalized_mutual_information_MGH",
"omega",
"f1",
"nf1",
"adjusted_rand_index",
"adjusted_mutual_information",
"variation_of_information",
"partition_closeness_simple",
]
# MatchingResult = namedtuple("MatchingResult", ['mean', 'std'])
MatchingResult = namedtuple("MatchingResult", "score std")
MatchingResult.__new__.__defaults__ = (None,) * len(MatchingResult._fields)
def normalized_mutual_information(
first_partition: object, second_partition: object
) -> MatchingResult:
"""
Normalized Mutual Information between two clusterings.
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.normalized_mutual_information(louvain_communities,leiden_communities)
"""
__check_partition_coverage(first_partition, second_partition)
__check_partition_overlap(first_partition, second_partition)
first_partition_c = [
x[1]
for x in sorted(
[
(node, nid)
for nid, cluster in enumerate(first_partition.communities)
for node in cluster
],
key=lambda x: x[0],
)
]
second_partition_c = [
x[1]
for x in sorted(
[
(node, nid)
for nid, cluster in enumerate(second_partition.communities)
for node in cluster
],
key=lambda x: x[0],
)
]
from sklearn.metrics import normalized_mutual_info_score
return MatchingResult(
score=normalized_mutual_info_score(first_partition_c, second_partition_c)
)
def overlapping_normalized_mutual_information_LFK(
first_partition: object, second_partition: object
) -> MatchingResult:
"""
Overlapping Normalized Mutual Information between two clusterings.
Extension of the Normalized Mutual Information (NMI) score to cope with overlapping partitions.
This is the version proposed by Lancichinetti et al. (1)
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.overlapping_normalized_mutual_information_LFK(louvain_communities,leiden_communities)
:Reference:
1. Lancichinetti, A., Fortunato, S., & Kertesz, J. (2009). Detecting the overlapping and hierarchical community structure in complex networks. New Journal of Physics, 11(3), 033015.
"""
return MatchingResult(
score=onmi.onmi(
[set(x) for x in first_partition.communities],
[set(x) for x in second_partition.communities],
)
)
def overlapping_normalized_mutual_information_MGH(
first_partition: object, second_partition: object, normalization: str = "max"
) -> MatchingResult:
"""
Overlapping Normalized Mutual Information between two clusterings.
Extension of the Normalized Mutual Information (NMI) score to cope with overlapping partitions.
This is the version proposed by McDaid et al. using a different normalization than the original LFR one. See ref.
for more details.
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:param normalization: one of "max" or "LFK". Default "max" (corresponds to the main method described in the article)
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.overlapping_normalized_mutual_information_MGH(louvain_communities,leiden_communities)
:Reference:
1. McDaid, A. F., Greene, D., & Hurley, N. (2011). Normalized mutual information to evaluate overlapping community finding algorithms. arXiv preprint arXiv:1110.2515. Chicago
"""
if normalization == "max":
variant = "MGH"
elif normalization == "LFK":
variant = "MGH_LFK"
else:
raise ValueError(
"Wrong 'normalization' value. Please specify one among [max, LFK]."
)
return MatchingResult(
score=onmi.onmi(
[set(x) for x in first_partition.communities],
[set(x) for x in second_partition.communities],
variant=variant,
)
)
def omega(first_partition: object, second_partition: object) -> MatchingResult:
"""
Index of resemblance for overlapping, complete coverage, network clusterings.
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.omega(louvain_communities,leiden_communities)
:Reference:
1. Gabriel Murray, Giuseppe Carenini, and Raymond Ng. 2012. `Using the omega index for evaluating abstractive algorithms detection. <https://pdfs.semanticscholar.org/59d6/5d5aa09d789408fd9fd3c009a1b070ff5859.pdf/>`_ In Proceedings of Workshop on Evaluation Metrics and System Comparison for Automatic Summarization. Association for Computational Linguistics, Stroudsburg, PA, USA, 10-18.
"""
__check_partition_coverage(first_partition, second_partition)
first_partition = {k: v for k, v in enumerate(first_partition.communities)}
second_partition = {k: v for k, v in enumerate(second_partition.communities)}
om_idx = Omega(first_partition, second_partition)
return MatchingResult(score=om_idx.omega_score)
def f1(first_partition: object, second_partition: object) -> MatchingResult:
"""
Compute the average F1 score of the optimal algorithms matches among the partitions in input.
Works on overlapping/non-overlapping complete/partial coverage partitions.
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.f1(louvain_communities,leiden_communities)
:Reference:
1. Rossetti, G., Pappalardo, L., & Rinzivillo, S. (2016). `A novel approach to evaluate algorithms detection internal on ground truth. <https://www.researchgate.net/publication/287204505_A_novel_approach_to_evaluate_community_detection_algorithms_on_ground_truth/>`_ In Complex Networks VII (pp. 133-144). Springer, Cham.
"""
nf = NF1(first_partition.communities, second_partition.communities)
results = nf.summary()
return MatchingResult(
score=results["details"]["F1 mean"][0], std=results["details"]["F1 std"][0]
)
def nf1(first_partition: object, second_partition: object) -> MatchingResult:
"""
Compute the Normalized F1 score of the optimal algorithms matches among the partitions in input.
Works on overlapping/non-overlapping complete/partial coverage partitions.
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.nf1(louvain_communities,leiden_communities)
:Reference:
1. Rossetti, G., Pappalardo, L., & Rinzivillo, S. (2016). `A novel approach to evaluate algorithms detection internal on ground truth. <https://www.researchgate.net/publication/287204505_A_novel_approach_to_evaluate_community_detection_algorithms_on_ground_truth/>`_
2. Rossetti, G. (2017). : `RDyn: graph benchmark handling algorithms dynamics. Journal of Complex Networks. <https://academic.oup.com/comnet/article-abstract/5/6/893/3925036?redirectedFrom=PDF/>`_ 5(6), 893-912.
"""
nf = NF1(first_partition.communities, second_partition.communities)
results = nf.summary()
return MatchingResult(score=results["scores"].loc["NF1"][0])
def adjusted_rand_index(
first_partition: object, second_partition: object
) -> MatchingResult:
"""Rand index adjusted for chance.
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_index(a, b) == adjusted_rand_index(b, a)
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.adjusted_rand_index(louvain_communities,leiden_communities)
:Reference:
1. Hubert, L., & Arabie, P. (1985). `Comparing partitions. <https://link.springer.com/article/10.1007/BF01908075/>`_ Journal of classification, 2(1), 193-218.
"""
__check_partition_coverage(first_partition, second_partition)
__check_partition_overlap(first_partition, second_partition)
first_partition_c = [
x[1]
for x in sorted(
[
(node, nid)
for nid, cluster in enumerate(first_partition.communities)
for node in cluster
],
key=lambda x: x[0],
)
]
second_partition_c = [
x[1]
for x in sorted(
[
(node, nid)
for nid, cluster in enumerate(second_partition.communities)
for node in cluster
],
key=lambda x: x[0],
)
]
from sklearn.metrics import adjusted_rand_score
return MatchingResult(
score=adjusted_rand_score(first_partition_c, second_partition_c)
)
def adjusted_mutual_information(
first_partition: object, second_partition: object
) -> MatchingResult:
"""Adjusted Mutual Information between two clusterings.
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.adjusted_mutual_information(louvain_communities,leiden_communities)
:Reference:
1. Vinh, N. X., Epps, J., & Bailey, J. (2010). `Information theoretic measures for clusterings comparison: Variants, properties, normalization and correction for chance. <http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf/>`_ Journal of Machine Learning Research, 11(Oct), 2837-2854.
"""
__check_partition_coverage(first_partition, second_partition)
__check_partition_overlap(first_partition, second_partition)
first_partition_c = [
x[1]
for x in sorted(
[
(node, nid)
for nid, cluster in enumerate(first_partition.communities)
for node in cluster
],
key=lambda x: x[0],
)
]
second_partition_c = [
x[1]
for x in sorted(
[
(node, nid)
for nid, cluster in enumerate(second_partition.communities)
for node in cluster
],
key=lambda x: x[0],
)
]
from sklearn.metrics import adjusted_mutual_info_score
return MatchingResult(
score=adjusted_mutual_info_score(first_partition_c, second_partition_c)
)
def variation_of_information(
first_partition: object, second_partition: object
) -> MatchingResult:
"""Variation of Information among two nodes partitions.
$$ H(p)+H(q)-2MI(p, q) $$
where MI is the mutual information, H the partition entropy and p,q are the algorithms sets
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.variation_of_information(louvain_communities,leiden_communities)
:Reference:
1. Meila, M. (2007). `Comparing clusterings - an information based distance. <https://www.sciencedirect.com/science/article/pii/S0047259X06002016/>`_ Journal of Multivariate Analysis, 98, 873-895. doi:10.1016/j.jmva.2006.11.013
"""
__check_partition_coverage(first_partition, second_partition)
__check_partition_overlap(first_partition, second_partition)
n = float(sum([len(c1) for c1 in first_partition.communities]))
sigma = 0.0
for c1 in first_partition.communities:
p = len(c1) / n
for c2 in second_partition.communities:
q = len(c2) / n
r = len(set(c1) & set(c2)) / n
if r > 0.0:
sigma += r * (np.log2(r / p) + np.log2(r / q))
return MatchingResult(score=abs(sigma))
def partition_closeness_simple(
first_partition: object, second_partition: object
) -> MatchingResult:
"""Community size density closeness.
Simple implementation that does not leverage kernel density estimator.
$$ S_G(A,B) = \frac{1}{2} \Sum_{i=1}^{r}\Sum_{j=1}^{s} min(\frac{n^a(x^a_i)}{N^a}, \frac{n^b_j(x^b_j)}{N^b}) \delta(x_i^a,x_j^b) $$
where:
$$ N^a $$ total number of communities in A of any size;
$$ x^a $$ ordered list of community sizes for A;
$$ n^a $$ multiplicity of community sizes for A.
(symmetrically for B)
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.partition_closeness_simple(louvain_communities,leiden_communities)
:Reference:
1. Dao, Vinh-Loc, Ccile Bothorel, and Philippe Lenca. "Estimating the similarity of community detection methods based on cluster size distribution." International Conference on Complex Networks and their Applications. Springer, Cham, 2018.
"""
coms_a = sorted(list(set([len(c) for c in first_partition.communities])))
freq_a = defaultdict(int)
for a in coms_a:
freq_a[a] += 1
freq_a = [freq_a[a] for a in sorted(freq_a)]
n_a = sum([coms_a[i] * freq_a[i] for i in range(0, len(coms_a))])
coms_b = sorted(list(set([len(c) for c in second_partition.communities])))
freq_b = defaultdict(int)
for b in coms_b:
freq_b[b] += 1
freq_b = [freq_b[b] for b in sorted(freq_b)]
n_b = sum([coms_b[i] * freq_b[i] for i in range(0, len(coms_b))])
closeness = 0
for i in range(0, len(coms_a)):
for j in range(0, len(coms_b)):
if coms_a[i] == coms_b[j]:
closeness += min(
(coms_a[i] * freq_a[i]) / n_a, (coms_b[j] * freq_b[j]) / n_b
)
closeness *= 0.5
return MatchingResult(score=closeness)
| 36.263969 | 391 | 0.692684 |
3bef530282cd351acc8d5d5fce296f7123e0bfe8 | 56 | py | Python | node/views/__init__.py | mohamedmansor/path-detector | 14954795ea47109d404b54f74575337f86d6134f | [
"MIT"
] | null | null | null | node/views/__init__.py | mohamedmansor/path-detector | 14954795ea47109d404b54f74575337f86d6134f | [
"MIT"
] | null | null | null | node/views/__init__.py | mohamedmansor/path-detector | 14954795ea47109d404b54f74575337f86d6134f | [
"MIT"
] | null | null | null | from .node_view import ConnectNodesViewSet, PathViewSet
| 28 | 55 | 0.875 |
3bf1bbdf44b6a8b3ce4f31f26290f905b3426047 | 1,193 | py | Python | tests/modules/extra/fastapi/controller/integration/test_fastapi_app_with_controller.py | alice-biometrics/petisco | b96e697cc875f67a28e60b4fc0d9ed9fc646cd86 | [
"MIT"
] | 19 | 2019-11-01T09:27:17.000Z | 2021-12-15T10:52:31.000Z | tests/modules/extra/fastapi/controller/integration/test_fastapi_app_with_controller.py | alice-biometrics/petisco | b96e697cc875f67a28e60b4fc0d9ed9fc646cd86 | [
"MIT"
] | 68 | 2020-01-15T06:55:00.000Z | 2022-02-22T15:57:24.000Z | tests/modules/extra/fastapi/controller/integration/test_fastapi_app_with_controller.py | alice-biometrics/petisco | b96e697cc875f67a28e60b4fc0d9ed9fc646cd86 | [
"MIT"
] | 2 | 2019-11-19T10:40:25.000Z | 2019-11-28T07:12:07.000Z | from typing import Optional
import pytest
from fastapi import FastAPI, Header
from fastapi.testclient import TestClient
from meiga import BoolResult, Failure, isFailure, isSuccess
from petisco import NotFound, assert_http
from petisco.extra.fastapi import FastAPIController
app = FastAPI(title="test-app")
result_from_expected_behavior = {
"success": isSuccess,
"failure_generic": isFailure,
"failure_not_found": Failure(NotFound()),
}
| 29.097561 | 78 | 0.75943 |
3bf3ea019e2b8d99252bef80157556503f118e91 | 438 | py | Python | component/reminder/tasks.py | pablo0723/just-a-test | 31e8157a5d1f50b30d83d945b77caaa2b7b717ba | [
"MIT"
] | null | null | null | component/reminder/tasks.py | pablo0723/just-a-test | 31e8157a5d1f50b30d83d945b77caaa2b7b717ba | [
"MIT"
] | null | null | null | component/reminder/tasks.py | pablo0723/just-a-test | 31e8157a5d1f50b30d83d945b77caaa2b7b717ba | [
"MIT"
] | null | null | null | from django.core.mail import send_mail
from component.reminder.models import Reminder
from server.celery import app
| 27.375 | 53 | 0.636986 |
3bf45f24ab2dd0e2ee1d2a8a4c89e7d8442c50d9 | 1,203 | py | Python | skmine/tests/test_base.py | remiadon/scikit-mine | 769d7d5ea0dda5d4adea33236733f4ce1ea0c815 | [
"BSD-3-Clause"
] | null | null | null | skmine/tests/test_base.py | remiadon/scikit-mine | 769d7d5ea0dda5d4adea33236733f4ce1ea0c815 | [
"BSD-3-Clause"
] | null | null | null | skmine/tests/test_base.py | remiadon/scikit-mine | 769d7d5ea0dda5d4adea33236733f4ce1ea0c815 | [
"BSD-3-Clause"
] | null | null | null | import pandas as pd
import pytest
from ..base import BaseMiner, MDLOptimizer
| 20.741379 | 70 | 0.591022 |
3bf5e5434eef73539dca4c83819a0c06da30de79 | 893 | py | Python | src/season/data/websrc/modules/intro/controller/index.py | season-framework/season-flask-wiz | 95d75758a6036d387c1b803bd6a68f238ec430e0 | [
"MIT"
] | 6 | 2021-12-09T05:06:49.000Z | 2022-01-18T02:38:03.000Z | src/season/data/websrc/modules/intro/controller/index.py | season-framework/season-flask-wiz | 95d75758a6036d387c1b803bd6a68f238ec430e0 | [
"MIT"
] | 2 | 2022-02-18T02:00:36.000Z | 2022-03-22T05:18:30.000Z | src/season/data/websrc/modules/intro/controller/index.py | season-framework/season-flask-wiz | 95d75758a6036d387c1b803bd6a68f238ec430e0 | [
"MIT"
] | 2 | 2022-01-07T00:26:00.000Z | 2022-03-07T06:24:27.000Z | import season
import random | 33.074074 | 80 | 0.571109 |
3bf63b37e1c270fbc81e663a1141ad00744d52eb | 11,770 | py | Python | crypten/nn/onnx_converter.py | chenfar/CrypTen | 9a11b79f1fa9d707eb38abf7d812911980520559 | [
"MIT"
] | null | null | null | crypten/nn/onnx_converter.py | chenfar/CrypTen | 9a11b79f1fa9d707eb38abf7d812911980520559 | [
"MIT"
] | null | null | null | crypten/nn/onnx_converter.py | chenfar/CrypTen | 9a11b79f1fa9d707eb38abf7d812911980520559 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import io
import onnx
import torch
import torch.onnx.symbolic_helper as sym_help
import torch.onnx.symbolic_registry as sym_registry
import torch.onnx.utils
from onnx import numpy_helper
from torch.onnx import OperatorExportTypes
from . import module
try:
import tensorflow as tf # noqa
import tf2onnx
TF_AND_TF2ONNX = True
except ImportError:
TF_AND_TF2ONNX = False
def from_onnx(onnx_string_or_file):
"""
Converts an ONNX model serialized in an `onnx_string_or_file` to a CrypTen model.
"""
onnx_model = _load_onnx_model(onnx_string_or_file)
return _to_crypten(onnx_model)
def from_pytorch(pytorch_model, dummy_input):
"""
Converts a PyTorch model `pytorch_model` into a CrypTen model by tracing it
using the input `dummy_input`.
"""
# construct CrypTen model:
f = _from_pytorch_to_bytes(pytorch_model, dummy_input)
crypten_model = from_onnx(f)
f.close()
# set model architecture to export model back to pytorch model
crypten_model.pytorch_model = copy.deepcopy(pytorch_model)
# make sure training / eval setting is copied:
crypten_model.train(mode=pytorch_model.training)
return crypten_model
def from_tensorflow(tensorflow_graph_def, inputs, outputs):
"""
Function that converts Tensorflow model into CrypTen model based on
https://github.com/onnx/tensorflow-onnx/blob/master/tf2onnx/convert.py
The model is returned in evaluation mode.
Args:
`tensorflow_graph_def`: Input Tensorflow GraphDef to be converted
`inputs`: input nodes
`outputs`: output nodes
"""
raise DeprecationWarning(
"crypten.nn.from_tensorflow is deprecated. ",
"CrypTen will no longer support model conversion from TensorFlow.",
)
# Exporting model to ONNX graph
if not TF_AND_TF2ONNX:
raise ImportError("Please install both tensorflow and tf2onnx packages")
with tf.Graph().as_default() as tf_graph:
tf.import_graph_def(tensorflow_graph_def, name="")
with tf2onnx.tf_loader.tf_session(graph=tf_graph):
g = tf2onnx.tfonnx.process_tf_graph(
tf_graph,
opset=10,
continue_on_error=False,
input_names=inputs,
output_names=outputs,
)
onnx_graph = tf2onnx.optimizer.optimize_graph(g)
model_proto = onnx_graph.make_model(
"converted from {}".format(tensorflow_graph_def)
)
f = io.BytesIO()
f.write(model_proto.SerializeToString())
# construct CrypTen model
# Note: We don't convert crypten model to training mode, as Tensorflow
# models are used for both training and evaluation without the specific
# conversion of one mode to another
f.seek(0)
crypten_model = from_onnx(f)
return crypten_model
def _from_pytorch_to_bytes(pytorch_model, dummy_input):
"""
Returns I/O stream containing ONNX graph for `pytorch_model` traced with
input `dummy_input`.
"""
# first export is only used to obtain the PyTorch-to-ONNX symbolic registry:
with io.BytesIO() as f:
_export_pytorch_model(f, pytorch_model, dummy_input)
# update ONNX symbolic registry with CrypTen-specific functions:
_update_onnx_symbolic_registry()
# export again so the graph is created with CrypTen-specific registry:
f = io.BytesIO()
f = _export_pytorch_model(f, pytorch_model, dummy_input)
f.seek(0)
return f
def _export_pytorch_model(f, pytorch_model, dummy_input):
"""
Returns a binary I/O stream containing ONNX-exported pytorch_model that was
traced with input `dummy_input`.
"""
kwargs = {
"do_constant_folding": False,
"export_params": True,
"enable_onnx_checker": True,
"input_names": ["input"],
"operator_export_type": OperatorExportTypes.ONNX,
"output_names": ["output"],
}
torch.onnx.export(pytorch_model, dummy_input, f, **kwargs)
return f
# mapping from ONNX to crypten.nn for modules with different names:
ONNX_TO_CRYPTEN = {
"adaptive_avg_pool2d": module.AdaptiveAvgPool2d,
"adaptive_max_pool2d": module.AdaptiveMaxPool2d,
"AveragePool": module.AvgPool2d,
"Clip": module.Hardtanh,
"MaxPool": module.MaxPool2d,
"Pad": module._ConstantPad,
"Relu": module.ReLU,
"ReduceMean": module.Mean,
"ReduceSum": module.Sum,
}
def _to_crypten(onnx_model):
"""
Function that converts an `onnx_model` to a CrypTen model.
"""
# create graph:
input_names, output_names = _get_input_output_names(onnx_model)
assert len(output_names) == 1, "Only one output per model supported."
crypten_model = module.Graph(input_names, output_names[0])
# create nodes for the parameters:
for node in onnx_model.graph.initializer:
param = torch.from_numpy(numpy_helper.to_array(node))
crypten_model.add_module(node.name, module.Parameter(param), [])
# loop over all nodes:
for node in onnx_model.graph.node:
# get attributes and node type:
attributes = {attr.name: _get_attribute_value(attr) for attr in node.attribute}
crypten_class = _get_operator_class(node.op_type, attributes)
# add CrypTen module to graph:
crypten_module = crypten_class.from_onnx(attributes=attributes)
input_names = list(node.input)
output_names = list(node.output)
if node.op_type == "Dropout":
output_names = [output_names[0]] # do not output Dropout mask
crypten_model.add_module(
output_names[0], crypten_module, input_names, output_names=output_names
)
# return final model:
crypten_model = _get_model_or_module(crypten_model)
return crypten_model
def _load_onnx_model(onnx_string_or_file):
"""
Loads ONNX model from file or string.
"""
if hasattr(onnx_string_or_file, "seek"):
onnx_string_or_file.seek(0)
return onnx.load(onnx_string_or_file)
return onnx.load_model_from_string(onnx_string_or_file)
def _get_input_output_names(onnx_model):
"""
Return input and output names of the ONNX graph.
"""
input_names = [input.name for input in onnx_model.graph.input]
output_names = [output.name for output in onnx_model.graph.output]
assert len(input_names) >= 1, "number of inputs should be at least 1"
assert len(output_names) == 1, "number of outputs should be 1"
return input_names, output_names
def _get_model_or_module(crypten_model):
"""
Returns `Module` if model contains only one module. Otherwise returns model.
"""
num_modules = len(list(crypten_model.modules()))
if num_modules == 1:
for crypten_module in crypten_model.modules():
return crypten_module
return crypten_model
def _get_attribute_value(attr):
"""
Retrieves value from an ONNX attribute.
"""
if attr.HasField("f"): # floating-point attribute
return attr.f
elif attr.HasField("i"): # integer attribute
return attr.i
elif attr.HasField("s"): # string attribute
return attr.s # TODO: Sanitize string.
elif attr.HasField("t"): # tensor attribute
return torch.from_numpy(numpy_helper.to_array(attr.t))
elif len(attr.ints) > 0:
return list(attr.ints)
elif len(attr.floats) > 0:
return list(attr.floats)
raise ValueError("Unknown attribute type for attribute %s." % attr.name)
def _get_operator_class(node_op_type, attributes):
"""
Returns the `crypten.nn.Module` type corresponding to an ONNX node.
"""
crypten_class = getattr(
module, node_op_type, ONNX_TO_CRYPTEN.get(node_op_type, None)
)
if crypten_class is None:
raise ValueError(f"CrypTen does not support ONNX op {node_op_type}.")
return crypten_class
def _update_onnx_symbolic_registry():
"""
Updates the ONNX symbolic registry for operators that need a CrypTen-specific
implementation and custom operators.
"""
# update PyTorch's symbolic ONNX registry to output different functions:
for version_key, version_val in sym_registry._registry.items():
for function_key in version_val.keys():
if function_key == "softmax":
sym_registry._registry[version_key][
function_key
] = _onnx_crypten_softmax
if function_key == "log_softmax":
sym_registry._registry[version_key][
function_key
] = _onnx_crypten_logsoftmax
if function_key == "dropout":
sym_registry._registry[version_key][
function_key
] = _onnx_crypten_dropout
if function_key == "feature_dropout":
sym_registry._registry[version_key][
function_key
] = _onnx_crypten_feature_dropout
| 34.925816 | 87 | 0.693203 |
3bf70a1a9f2bab5e2d13cf95f5bb6e7cbc23fec9 | 3,582 | py | Python | examples/pipeline.py | nicolay-r/AREk | 19c39ec0dc9a17464cade03b9c4da0c6d1d21191 | [
"MIT"
] | null | null | null | examples/pipeline.py | nicolay-r/AREk | 19c39ec0dc9a17464cade03b9c4da0c6d1d21191 | [
"MIT"
] | null | null | null | examples/pipeline.py | nicolay-r/AREk | 19c39ec0dc9a17464cade03b9c4da0c6d1d21191 | [
"MIT"
] | null | null | null | from arekit.common.data.input.providers.label.multiple import MultipleLabelProvider
from arekit.common.data.row_ids.multiple import MultipleIDProvider
from arekit.common.data.storages.base import BaseRowsStorage
from arekit.common.data.views.samples import BaseSampleStorageView
from arekit.common.experiment.data_type import DataType
from arekit.common.labels.scaler import BaseLabelScaler
from arekit.contrib.experiment_rusentrel.labels.scalers.three import ThreeLabelScaler
from arekit.contrib.networks.context.architectures.pcnn import PiecewiseCNN
from arekit.contrib.networks.context.configurations.cnn import CNNConfig
from arekit.contrib.networks.core.ctx_inference import InferenceContext
from arekit.contrib.networks.core.feeding.bags.collection.single import SingleBagsCollection
from arekit.contrib.networks.core.input.helper_embedding import EmbeddingHelper
from arekit.contrib.networks.core.model import BaseTensorflowModel
from arekit.contrib.networks.core.model_io import NeuralNetworkModelIO
from arekit.contrib.networks.core.predict.provider import BasePredictProvider
from arekit.contrib.networks.core.predict.tsv_writer import TsvPredictWriter
from arekit.contrib.networks.shapes import NetworkInputShapes
from examples.input import EXAMPLES
from examples.repository import pipeline_serialize
if __name__ == '__main__':
text = EXAMPLES["simple"]
labels_scaler = ThreeLabelScaler()
label_provider = MultipleLabelProvider(label_scaler=labels_scaler)
pipeline_serialize(sentences_text_list=text, label_provider=label_provider)
pipeline_infer(labels_scaler)
| 40.704545 | 105 | 0.769961 |
3bf77e53ccae2099f5deb07947c3ee02b77cf7b8 | 9,038 | py | Python | python/lapack_like/reflect.py | justusc/Elemental | 145ccb28411f3f0c65ca30ecea776df33297e4ff | [
"BSD-3-Clause"
] | null | null | null | python/lapack_like/reflect.py | justusc/Elemental | 145ccb28411f3f0c65ca30ecea776df33297e4ff | [
"BSD-3-Clause"
] | null | null | null | python/lapack_like/reflect.py | justusc/Elemental | 145ccb28411f3f0c65ca30ecea776df33297e4ff | [
"BSD-3-Clause"
] | null | null | null | #
# Copyright (c) 2009-2015, Jack Poulson
# All rights reserved.
#
# This file is part of Elemental and is under the BSD 2-Clause License,
# which can be found in the LICENSE file in the root directory, or at
# http://opensource.org/licenses/BSD-2-Clause
#
from ..core import *
import ctypes
# Apply packed reflectors
# =======================
lib.ElApplyPackedReflectors_s.argtypes = \
lib.ElApplyPackedReflectors_d.argtypes = \
lib.ElApplyPackedReflectors_c.argtypes = \
lib.ElApplyPackedReflectors_z.argtypes = \
lib.ElApplyPackedReflectorsDist_s.argtypes = \
lib.ElApplyPackedReflectorsDist_d.argtypes = \
lib.ElApplyPackedReflectorsDist_c.argtypes = \
lib.ElApplyPackedReflectorsDist_z.argtypes = \
[c_uint,c_uint,c_uint,c_uint,iType,c_void_p,c_void_p,c_void_p]
# Expand packed reflectors
# ========================
lib.ElExpandPackedReflectors_s.argtypes = \
lib.ElExpandPackedReflectors_d.argtypes = \
lib.ElExpandPackedReflectors_c.argtypes = \
lib.ElExpandPackedReflectors_z.argtypes = \
lib.ElExpandPackedReflectorsDist_s.argtypes = \
lib.ElExpandPackedReflectorsDist_d.argtypes = \
lib.ElExpandPackedReflectorsDist_c.argtypes = \
lib.ElExpandPackedReflectorsDist_z.argtypes = \
[c_uint,c_uint,iType,c_void_p,c_void_p]
# Hyperbolic reflector
# ====================
# Left application
# ----------------
lib.ElLeftHyperbolicReflector_s.argtypes = \
[POINTER(sType),c_void_p,POINTER(sType)]
lib.ElLeftHyperbolicReflector_d.argtypes = \
[POINTER(dType),c_void_p,POINTER(dType)]
lib.ElLeftHyperbolicReflector_c.argtypes = \
[POINTER(cType),c_void_p,POINTER(cType)]
lib.ElLeftHyperbolicReflector_z.argtypes = \
[POINTER(zType),c_void_p,POINTER(zType)]
lib.ElLeftHyperbolicReflectorDist_s.argtypes = \
[POINTER(sType),c_void_p,POINTER(sType)]
lib.ElLeftHyperbolicReflectorDist_d.argtypes = \
[POINTER(dType),c_void_p,POINTER(dType)]
lib.ElLeftHyperbolicReflectorDist_c.argtypes = \
[POINTER(cType),c_void_p,POINTER(cType)]
lib.ElLeftHyperbolicReflectorDist_z.argtypes = \
[POINTER(zType),c_void_p,POINTER(zType)]
# Right application
# -----------------
lib.ElRightHyperbolicReflector_s.argtypes = \
[POINTER(sType),c_void_p,POINTER(sType)]
lib.ElRightHyperbolicReflector_d.argtypes = \
[POINTER(dType),c_void_p,POINTER(dType)]
lib.ElRightHyperbolicReflector_c.argtypes = \
[POINTER(cType),c_void_p,POINTER(cType)]
lib.ElRightHyperbolicReflector_z.argtypes = \
[POINTER(zType),c_void_p,POINTER(zType)]
lib.ElRightHyperbolicReflectorDist_s.argtypes = \
[POINTER(sType),c_void_p,POINTER(sType)]
lib.ElRightHyperbolicReflectorDist_d.argtypes = \
[POINTER(dType),c_void_p,POINTER(dType)]
lib.ElRightHyperbolicReflectorDist_c.argtypes = \
[POINTER(cType),c_void_p,POINTER(cType)]
lib.ElRightHyperbolicReflectorDist_z.argtypes = \
[POINTER(zType),c_void_p,POINTER(zType)]
# Householder reflector
# =====================
# Left application
# ----------------
lib.ElLeftReflector_s.argtypes = [POINTER(sType),c_void_p,POINTER(sType)]
lib.ElLeftReflector_d.argtypes = [POINTER(dType),c_void_p,POINTER(dType)]
lib.ElLeftReflector_c.argtypes = [POINTER(cType),c_void_p,POINTER(cType)]
lib.ElLeftReflector_z.argtypes = [POINTER(zType),c_void_p,POINTER(zType)]
lib.ElLeftReflectorDist_s.argtypes = [POINTER(sType),c_void_p,POINTER(sType)]
lib.ElLeftReflectorDist_d.argtypes = [POINTER(dType),c_void_p,POINTER(dType)]
lib.ElLeftReflectorDist_c.argtypes = [POINTER(cType),c_void_p,POINTER(cType)]
lib.ElLeftReflectorDist_z.argtypes = [POINTER(zType),c_void_p,POINTER(zType)]
# Right application
# -----------------
lib.ElRightReflector_s.argtypes = [POINTER(sType),c_void_p,POINTER(sType)]
lib.ElRightReflector_d.argtypes = [POINTER(dType),c_void_p,POINTER(dType)]
lib.ElRightReflector_c.argtypes = [POINTER(cType),c_void_p,POINTER(cType)]
lib.ElRightReflector_z.argtypes = [POINTER(zType),c_void_p,POINTER(zType)]
lib.ElRightReflectorDist_s.argtypes = [POINTER(sType),c_void_p,POINTER(sType)]
lib.ElRightReflectorDist_d.argtypes = [POINTER(dType),c_void_p,POINTER(dType)]
lib.ElRightReflectorDist_c.argtypes = [POINTER(cType),c_void_p,POINTER(cType)]
lib.ElRightReflectorDist_z.argtypes = [POINTER(zType),c_void_p,POINTER(zType)]
| 42.834123 | 78 | 0.731356 |
3bf87ad7597d41df2c5bff20fab72d6e34dbefa1 | 2,443 | py | Python | src/PointClasses/Bisector.py | Lovely-XPP/tkzgeom | bf68e139dc05f759542d6611f4dc07f4f2727b92 | [
"MIT"
] | 41 | 2021-11-24T05:54:08.000Z | 2022-03-26T10:19:30.000Z | src/PointClasses/Bisector.py | Lovely-XPP/tkzgeom | bf68e139dc05f759542d6611f4dc07f4f2727b92 | [
"MIT"
] | 1 | 2022-02-28T04:34:51.000Z | 2022-03-07T10:49:27.000Z | src/PointClasses/Bisector.py | Lovely-XPP/tkzgeom | bf68e139dc05f759542d6611f4dc07f4f2727b92 | [
"MIT"
] | 10 | 2021-11-24T07:35:17.000Z | 2022-03-25T18:42:14.000Z | from Point import Point
import Constant as c
from GeometryMath import bisector_point
| 40.04918 | 119 | 0.56447 |
3bf883b35e2fe868219f30a0db3d466b114010f3 | 354 | py | Python | custom_components/fitx/const.py | Raukze/home-assistant-fitx | 2808200e0e87a0559b927dc013765bf1cd20030e | [
"MIT"
] | 3 | 2022-03-02T07:49:47.000Z | 2022-03-18T08:59:05.000Z | custom_components/fitx/const.py | Raukze/home-assistant-fitx | 2808200e0e87a0559b927dc013765bf1cd20030e | [
"MIT"
] | null | null | null | custom_components/fitx/const.py | Raukze/home-assistant-fitx | 2808200e0e87a0559b927dc013765bf1cd20030e | [
"MIT"
] | null | null | null | DOMAIN = "fitx"
ICON = "mdi:weight-lifter"
CONF_LOCATIONS = 'locations'
CONF_ID = 'id'
ATTR_ADDRESS = "address"
ATTR_STUDIO_NAME = "studioName"
ATTR_ID = CONF_ID
ATTR_URL = "url"
DEFAULT_ENDPOINT = "https://www.fitx.de/fitnessstudios/{id}"
REQUEST_METHOD = "GET"
REQUEST_AUTH = None
REQUEST_HEADERS = None
REQUEST_PAYLOAD = None
REQUEST_VERIFY_SSL = True | 25.285714 | 60 | 0.762712 |
3bf93c870b2bc30c3baf9567a64d06171558f06b | 1,894 | py | Python | youtube_dl/extractor/scivee.py | Logmytech/youtube-dl-QT | 1497297719a95c4f70fbfa32e0fa4e38cdd475dc | [
"MIT"
] | 5 | 2016-04-25T16:26:07.000Z | 2021-04-28T16:10:29.000Z | youtube_dl/extractor/scivee.py | Logmytech/youtube-dl-QT | 1497297719a95c4f70fbfa32e0fa4e38cdd475dc | [
"MIT"
] | 5 | 2016-04-22T01:33:31.000Z | 2016-08-04T15:33:19.000Z | youtube_dl/extractor/scivee.py | Logmytech/youtube-dl-QT | 1497297719a95c4f70fbfa32e0fa4e38cdd475dc | [
"MIT"
] | 5 | 2020-10-25T09:18:58.000Z | 2021-05-23T22:57:55.000Z | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import int_or_none
| 33.22807 | 116 | 0.541711 |
3bfa7757212343833fdcee31409e1364ca82a73d | 11,790 | py | Python | examples/plot_tuh_eeg_corpus.py | SciMK/braindecode | 65b8de3e8a542e299996c0917ea3383aea5a9a69 | [
"BSD-3-Clause"
] | null | null | null | examples/plot_tuh_eeg_corpus.py | SciMK/braindecode | 65b8de3e8a542e299996c0917ea3383aea5a9a69 | [
"BSD-3-Clause"
] | null | null | null | examples/plot_tuh_eeg_corpus.py | SciMK/braindecode | 65b8de3e8a542e299996c0917ea3383aea5a9a69 | [
"BSD-3-Clause"
] | null | null | null | """Process a big data EEG resource (TUH EEG Corpus)
===================================================
In this example, we showcase usage of the Temple University Hospital EEG Corpus
(https://www.isip.piconepress.com/projects/tuh_eeg/html/downloads.shtml#c_tueg)
including simple preprocessing steps as well as cutting of compute windows.
"""
# Author: Lukas Gemein <l.gemein@gmail.com>
#
# License: BSD (3-clause)
import os
import tempfile
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('seaborn')
import mne
from braindecode.datasets import TUH
from braindecode.preprocessing import preprocess, Preprocessor, create_fixed_length_windows
from braindecode.datautil.serialization import load_concat_dataset
mne.set_log_level('ERROR') # avoid messages everytime a window is extracted
###############################################################################
# If you want to try this code with the actual data, please delete the next
# section. We are required to mock some dataset functionality, since the data
# is not available at creation time of this example.
from unittest import mock
FAKE_PATHS = {
'tuh_eeg/v1.1.0/edf/01_tcp_ar/000/00000000/s001_2015_12_30/00000000_s001_t000.edf': b'0 00000000 M 01-JAN-1978 00000000 Age:37 ', # noqa E501
'tuh_eeg/v1.1.0/edf/02_tcp_le/000/00000058/s001_2003_02_05/00000058_s001_t000.edf': b'0 00000058 M 01-JAN-2003 00000058 Age:0.0109 ', # noqa E501
'tuh_eeg/v1.2.0/edf/03_tcp_ar_a/149/00014928/s004_2016_01_15/00014928_s004_t007.edf': b'0 00014928 F 01-JAN-1933 00014928 Age:83 ', # noqa E501
}
tuh = mock_get_data()
###############################################################################
# We start by creating a TUH dataset. First, the class generates a description
# of the recordings in `TUH_PATH` (which is later accessible as
# `tuh.description`) without actually touching the files. This will parse
# information from file paths such as patient id, recording data, etc and should
# be really fast. Afterwards, the files are sorted chronologically by year,
# month, day, patient id, recording session and segment.
# In the following, a subset of the description corresponding to `recording_ids`
# is used.
# Afterwards, the files will be iterated a second time, slower than before.
# The files are now actually touched. Additional information about subjects
# like age and gender are parsed directly from the EDF file header. If existent,
# the physician report is added to the description. Furthermore, the recordings
# are read with `mne.io.read_raw_edf` with `preload=False`. Finally, we will get
# a `BaseConcatDataset` of `BaseDatasets` each holding a single
# `nme.io.Raw` which is fully compatible with other braindecode functionalities.
# Uncomment the lines below to actually run this code on real data.
# tuh = TUH(
# path=<TUH_PATH>, # please insert actual path to data here
# recording_ids=None,
# target_name=None,
# preload=False,
# add_physician_reports=False,
# )
###############################################################################
# We can easily create descriptive statistics using the description `DataFrame`,
# for example an age histogram split by gender of patients.
fig, ax = plt.subplots(1, 1, figsize=(15, 5))
genders = tuh.description.gender.unique()
x = [tuh.description.age[tuh.description.gender == g] for g in genders]
ax.hist(
x=x,
stacked=True,
bins=np.arange(100, dtype=int),
alpha=.5,
)
ax.legend(genders)
ax.set_xlabel('Age [years]')
ax.set_ylabel('Count')
###############################################################################
# Next, we will perform some preprocessing steps. First, we will do some
# selection of available recordings based on the duration. We will select those
# recordings, that have at least five minutes duration. Data is not loaded here.
tmin = 5 * 60
tmax = None
tuh = select_by_duration(tuh, tmin, tmax)
###############################################################################
# Next, we will discard all recordings that have an incomplete channel
# configuration (wrt the channels that we are interested in, i.e. the 21
# channels of the international 10-20-placement). The dataset is subdivided into
# recordings with 'le' and 'ar' reference which we will have to consider. Data
# is not loaded here.
short_ch_names = sorted([
'A1', 'A2',
'FP1', 'FP2', 'F3', 'F4', 'C3', 'C4', 'P3', 'P4', 'O1', 'O2',
'F7', 'F8', 'T3', 'T4', 'T5', 'T6', 'FZ', 'CZ', 'PZ'])
ar_ch_names = sorted([
'EEG A1-REF', 'EEG A2-REF',
'EEG FP1-REF', 'EEG FP2-REF', 'EEG F3-REF', 'EEG F4-REF', 'EEG C3-REF',
'EEG C4-REF', 'EEG P3-REF', 'EEG P4-REF', 'EEG O1-REF', 'EEG O2-REF',
'EEG F7-REF', 'EEG F8-REF', 'EEG T3-REF', 'EEG T4-REF', 'EEG T5-REF',
'EEG T6-REF', 'EEG FZ-REF', 'EEG CZ-REF', 'EEG PZ-REF'])
le_ch_names = sorted([
'EEG A1-LE', 'EEG A2-LE',
'EEG FP1-LE', 'EEG FP2-LE', 'EEG F3-LE', 'EEG F4-LE', 'EEG C3-LE',
'EEG C4-LE', 'EEG P3-LE', 'EEG P4-LE', 'EEG O1-LE', 'EEG O2-LE',
'EEG F7-LE', 'EEG F8-LE', 'EEG T3-LE', 'EEG T4-LE', 'EEG T5-LE',
'EEG T6-LE', 'EEG FZ-LE', 'EEG CZ-LE', 'EEG PZ-LE'])
assert len(short_ch_names) == len(ar_ch_names) == len(le_ch_names)
ar_ch_mapping = {ch_name: short_ch_name for ch_name, short_ch_name in zip(
ar_ch_names, short_ch_names)}
le_ch_mapping = {ch_name: short_ch_name for ch_name, short_ch_name in zip(
le_ch_names, short_ch_names)}
ch_mapping = {'ar': ar_ch_mapping, 'le': le_ch_mapping}
tuh = select_by_channels(tuh, ch_mapping)
###############################################################################
# Next, we will chain several preprocessing steps that are realized through
# `mne`. Data will be loaded by the first preprocessor that has a mention of it
# in brackets:
#
# #. crop the recordings to a region of interest
# #. re-reference all recordings to 'ar' (requires load)
# #. rename channels to short channel names
# #. pick channels of interest
# #. scale signals to microvolts (requires load)
# #. resample recordings to a common frequency (requires load)
# #. create compute windows
tmin = 1 * 60
tmax = 6 * 60
sfreq = 100
preprocessors = [
Preprocessor(custom_crop, tmin=tmin, tmax=tmax, include_tmax=False,
apply_on_array=False),
Preprocessor('set_eeg_reference', ref_channels='average', ch_type='eeg'),
Preprocessor(custom_rename_channels, mapping=ch_mapping,
apply_on_array=False),
Preprocessor('pick_channels', ch_names=short_ch_names, ordered=True),
Preprocessor(lambda x: x * 1e6),
Preprocessor('resample', sfreq=sfreq),
]
###############################################################################
# The preprocessing loop works as follows. For every recording, we apply the
# preprocessors as defined above. Then, we update the description of the rec,
# since we have altered the duration, the reference, and the sampling
# frequency. Afterwards, we store each recording to a unique subdirectory that
# is named corresponding to the rec id. To save memory we delete the raw
# dataset after storing. This gives us the option to try different windowing
# parameters after reloading the data.
OUT_PATH = tempfile.mkdtemp() # plaese insert actual output directory here
tuh_splits = tuh.split([[i] for i in range(len(tuh.datasets))])
for rec_i, tuh_subset in tuh_splits.items():
preprocess(tuh_subset, preprocessors)
# update description of the recording(s)
tuh_subset.set_description({
'sfreq': len(tuh_subset.datasets) * [sfreq],
'reference': len(tuh_subset.datasets) * ['ar'],
'n_samples': [len(d) for d in tuh_subset.datasets],
}, overwrite=True)
# create one directory for every recording
rec_path = os.path.join(OUT_PATH, str(rec_i))
if not os.path.exists(rec_path):
os.makedirs(rec_path)
tuh_subset.save(rec_path)
# save memory by deleting raw recording
del tuh_subset.datasets[0].raw
###############################################################################
# We reload the preprocessed data again in a lazy fashion (`preload=False`).
tuh_loaded = load_concat_dataset(OUT_PATH, preload=False)
###############################################################################
# We generate compute windows. The resulting dataset is now ready to be used
# for model training.
window_size_samples = 1000
window_stride_samples = 1000
# generate compute windows here and store them to disk
tuh_windows = create_fixed_length_windows(
tuh_loaded,
start_offset_samples=0,
stop_offset_samples=None,
window_size_samples=window_size_samples,
window_stride_samples=window_stride_samples,
drop_last_window=False
)
# store the number of windows required for loading later on
tuh_windows.set_description({
"n_windows": [len(d) for d in tuh_windows.datasets]})
| 39.966102 | 195 | 0.653605 |
3bfae0d38025f9ed469b1477352c2cbb4d204cae | 9,752 | py | Python | tests/metrics/test_metrics.py | HiromuHota/emmental | eb1e29b3406fc0ac301b2d29e06db5e6774eb9f0 | [
"MIT"
] | null | null | null | tests/metrics/test_metrics.py | HiromuHota/emmental | eb1e29b3406fc0ac301b2d29e06db5e6774eb9f0 | [
"MIT"
] | null | null | null | tests/metrics/test_metrics.py | HiromuHota/emmental | eb1e29b3406fc0ac301b2d29e06db5e6774eb9f0 | [
"MIT"
] | null | null | null | import logging
import numpy as np
from emmental.metrics.accuracy import accuracy_scorer
from emmental.metrics.accuracy_f1 import accuracy_f1_scorer
from emmental.metrics.fbeta import f1_scorer, fbeta_scorer
from emmental.metrics.matthews_correlation import (
matthews_correlation_coefficient_scorer,
)
from emmental.metrics.mean_squared_error import mean_squared_error_scorer
from emmental.metrics.pearson_correlation import pearson_correlation_scorer
from emmental.metrics.pearson_spearman import pearson_spearman_scorer
from emmental.metrics.precision import precision_scorer
from emmental.metrics.recall import recall_scorer
from emmental.metrics.roc_auc import roc_auc_scorer
from emmental.metrics.spearman_correlation import spearman_correlation_scorer
from tests.utils import isequal
def test_accuracy(caplog):
"""Unit test of accuracy_scorer"""
caplog.set_level(logging.INFO)
golds = np.array([0, 1, 0, 1, 0, 1])
gold_probs = np.array(
[[0.6, 0.4], [0.1, 0.9], [0.7, 0.3], [0.2, 0.8], [0.9, 0.1], [0.4, 0.6]]
)
probs = np.array(
[[0.9, 0.1], [0.6, 0.4], [1.0, 0.0], [0.8, 0.2], [0.6, 0.4], [0.05, 0.95]]
)
preds = np.array([0, 0, 0, 0, 0, 1])
metric_dict = accuracy_scorer(golds, None, preds)
assert isequal(metric_dict, {"accuracy": 0.6666666666666666})
metric_dict = accuracy_scorer(golds, probs, None)
assert isequal(metric_dict, {"accuracy": 0.6666666666666666})
metric_dict = accuracy_scorer(golds, probs, preds, topk=2)
assert isequal(metric_dict, {"accuracy@2": 1.0})
metric_dict = accuracy_scorer(gold_probs, None, preds)
assert isequal(metric_dict, {"accuracy": 0.6666666666666666})
metric_dict = accuracy_scorer(gold_probs, probs, preds, topk=2)
assert isequal(metric_dict, {"accuracy@2": 1.0})
metric_dict = accuracy_scorer(golds, None, preds, normalize=False)
assert isequal(metric_dict, {"accuracy": 4})
metric_dict = accuracy_scorer(gold_probs, probs, preds, topk=2, normalize=False)
assert isequal(metric_dict, {"accuracy@2": 6})
def test_precision(caplog):
"""Unit test of precision_scorer"""
caplog.set_level(logging.INFO)
golds = np.array([0, 1, 0, 1, 0, 1])
gold_probs = np.array(
[[0.6, 0.4], [0.1, 0.9], [0.7, 0.3], [0.2, 0.8], [0.9, 0.1], [0.4, 0.6]]
)
preds = np.array([0, 0, 0, 0, 0, 1])
metric_dict = precision_scorer(golds, None, preds, pos_label=1)
assert isequal(metric_dict, {"precision": 1})
metric_dict = precision_scorer(golds, None, preds, pos_label=0)
assert isequal(metric_dict, {"precision": 0.6})
metric_dict = precision_scorer(gold_probs, None, preds, pos_label=1)
assert isequal(metric_dict, {"precision": 1})
metric_dict = precision_scorer(gold_probs, None, preds, pos_label=0)
assert isequal(metric_dict, {"precision": 0.6})
def test_recall(caplog):
"""Unit test of recall_scorer"""
caplog.set_level(logging.INFO)
golds = np.array([0, 1, 0, 1, 0, 1])
gold_probs = np.array(
[[0.6, 0.4], [0.1, 0.9], [0.7, 0.3], [0.2, 0.8], [0.9, 0.1], [0.4, 0.6]]
)
preds = np.array([0, 0, 0, 0, 0, 1])
metric_dict = recall_scorer(golds, None, preds, pos_label=1)
assert isequal(metric_dict, {"recall": 0.3333333333333333})
metric_dict = recall_scorer(golds, None, preds, pos_label=0)
assert isequal(metric_dict, {"recall": 1})
metric_dict = recall_scorer(gold_probs, None, preds, pos_label=1)
assert isequal(metric_dict, {"recall": 0.3333333333333333})
metric_dict = recall_scorer(gold_probs, None, preds, pos_label=0)
assert isequal(metric_dict, {"recall": 1})
def test_f1(caplog):
"""Unit test of f1_scorer"""
caplog.set_level(logging.INFO)
golds = np.array([0, 1, 0, 1, 0, 1])
gold_probs = np.array(
[[0.6, 0.4], [0.1, 0.9], [0.7, 0.3], [0.2, 0.8], [0.9, 0.1], [0.4, 0.6]]
)
preds = np.array([0, 0, 0, 0, 0, 1])
metric_dict = f1_scorer(golds, None, preds, pos_label=1)
assert isequal(metric_dict, {"f1": 0.5})
metric_dict = f1_scorer(golds, None, preds, pos_label=0)
assert isequal(metric_dict, {"f1": 0.7499999999999999})
metric_dict = f1_scorer(gold_probs, None, preds, pos_label=1)
assert isequal(metric_dict, {"f1": 0.5})
metric_dict = f1_scorer(gold_probs, None, preds, pos_label=0)
assert isequal(metric_dict, {"f1": 0.7499999999999999})
def test_fbeta(caplog):
"""Unit test of fbeta_scorer"""
caplog.set_level(logging.INFO)
golds = np.array([0, 1, 0, 1, 0, 1])
gold_probs = np.array(
[[0.6, 0.4], [0.1, 0.9], [0.7, 0.3], [0.2, 0.8], [0.9, 0.1], [0.4, 0.6]]
)
preds = np.array([0, 0, 0, 0, 0, 1])
metric_dict = fbeta_scorer(golds, None, preds, pos_label=1, beta=2)
assert isequal(metric_dict, {"f2": 0.3846153846153846})
metric_dict = fbeta_scorer(golds, None, preds, pos_label=0, beta=2)
assert isequal(metric_dict, {"f2": 0.8823529411764706})
metric_dict = fbeta_scorer(gold_probs, None, preds, pos_label=1, beta=2)
assert isequal(metric_dict, {"f2": 0.3846153846153846})
metric_dict = fbeta_scorer(gold_probs, None, preds, pos_label=0, beta=2)
assert isequal(metric_dict, {"f2": 0.8823529411764706})
def test_matthews_corrcoef(caplog):
"""Unit test of matthews_correlation_coefficient_scorer"""
caplog.set_level(logging.INFO)
golds = np.array([0, 1, 0, 1, 0, 1])
preds = np.array([0, 0, 0, 0, 0, 1])
metric_dict = matthews_correlation_coefficient_scorer(golds, None, preds)
assert isequal(metric_dict, {"matthews_corrcoef": 0.4472135954999579})
def test_mean_squared_error(caplog):
"""Unit test of mean_squared_error_scorer"""
caplog.set_level(logging.INFO)
golds = np.array([3, -0.5, 2, 7])
probs = np.array([2.5, 0.0, 2, 8])
metric_dict = mean_squared_error_scorer(golds, probs, None)
assert isequal(metric_dict, {"mean_squared_error": 0.375})
golds = np.array([[0.5, 1], [-1, 1], [7, -6]])
probs = np.array([[0, 2], [-1, 2], [8, -5]])
metric_dict = mean_squared_error_scorer(golds, probs, None)
assert isequal(metric_dict, {"mean_squared_error": 0.7083333333333334})
def test_pearson_correlation(caplog):
"""Unit test of pearson_correlation_scorer"""
caplog.set_level(logging.INFO)
golds = np.array([1, 0, 1, 0, 1, 0])
probs = np.array([0.8, 0.6, 0.9, 0.7, 0.7, 0.2])
metric_dict = pearson_correlation_scorer(golds, probs, None)
assert isequal(metric_dict, {"pearson_correlation": 0.6764814252025461})
metric_dict = pearson_correlation_scorer(golds, probs, None, return_pvalue=True)
assert isequal(
metric_dict,
{
"pearson_correlation": 0.6764814252025461,
"pearson_pvalue": 0.14006598491201777,
},
)
def test_spearman_correlation(caplog):
"""Unit test of spearman_correlation_scorer"""
caplog.set_level(logging.INFO)
golds = np.array([1, 0, 1, 0, 1, 0])
probs = np.array([0.8, 0.6, 0.9, 0.7, 0.7, 0.2])
metric_dict = spearman_correlation_scorer(golds, probs, None)
assert isequal(metric_dict, {"spearman_correlation": 0.7921180343813395})
metric_dict = spearman_correlation_scorer(golds, probs, None, return_pvalue=True)
assert isequal(
metric_dict,
{
"spearman_correlation": 0.7921180343813395,
"spearman_pvalue": 0.06033056705743058,
},
)
def test_pearson_spearman(caplog):
"""Unit test of pearson_spearman_scorer"""
caplog.set_level(logging.INFO)
golds = np.array([1, 0, 1, 0, 1, 0])
probs = np.array([0.8, 0.6, 0.9, 0.7, 0.7, 0.2])
metric_dict = pearson_spearman_scorer(golds, probs, None)
assert isequal(metric_dict, {"pearson_spearman": 0.7342997297919428})
def test_roc_auc(caplog):
"""Unit test of roc_auc_scorer"""
caplog.set_level(logging.INFO)
golds = np.array([[1], [0], [1], [0], [1], [0]])
gold_probs = np.array(
[[0.4, 0.6], [0.9, 0.1], [0.3, 0.7], [0.8, 0.2], [0.1, 0.9], [0.6, 0.4]]
)
probs = np.array(
[[0.2, 0.8], [0.4, 0.6], [0.1, 0.9], [0.3, 0.7], [0.3, 0.7], [0.8, 0.2]]
)
preds = np.array([[0.8], [0.6], [0.9], [0.7], [0.7], [0.2]])
metric_dict = roc_auc_scorer(golds, probs, None)
assert isequal(metric_dict, {"roc_auc": 0.9444444444444444})
metric_dict = roc_auc_scorer(gold_probs, probs, None)
assert isequal(metric_dict, {"roc_auc": 0.9444444444444444})
metric_dict = roc_auc_scorer(golds, preds, None)
assert isequal(metric_dict, {"roc_auc": 0.9444444444444444})
metric_dict = roc_auc_scorer(gold_probs, preds, None)
assert isequal(metric_dict, {"roc_auc": 0.9444444444444444})
golds = np.array([1, 1, 1, 1, 1, 1])
metric_dict = roc_auc_scorer(golds, probs, None)
assert isequal(metric_dict, {"roc_auc": float("nan")})
def test_accuracy_f1(caplog):
"""Unit test of accuracy_f1_scorer"""
caplog.set_level(logging.INFO)
golds = np.array([0, 1, 0, 1, 0, 1])
gold_probs = np.array(
[[0.6, 0.4], [0.1, 0.9], [0.7, 0.3], [0.2, 0.8], [0.9, 0.1], [0.4, 0.6]]
)
preds = np.array([0, 0, 0, 0, 0, 1])
metric_dict = accuracy_f1_scorer(golds, None, preds)
assert isequal(metric_dict, {"accuracy_f1": 0.5833333333333333})
metric_dict = accuracy_f1_scorer(golds, None, preds, pos_label=1)
assert isequal(metric_dict, {"accuracy_f1": 0.5833333333333333})
metric_dict = accuracy_f1_scorer(golds, None, preds, pos_label=0)
assert isequal(metric_dict, {"accuracy_f1": 0.7083333333333333})
metric_dict = accuracy_f1_scorer(gold_probs, None, preds)
assert isequal(metric_dict, {"accuracy_f1": 0.5833333333333333})
| 31.869281 | 85 | 0.658839 |
3bfbc45da374cdb7d8360321c18d5a979fdef4e1 | 3,986 | py | Python | vesper/command/job_logging_manager.py | HaroldMills/NFC | 356b2234dc3c7d180282a597fa1e039ae79e03c6 | [
"MIT"
] | null | null | null | vesper/command/job_logging_manager.py | HaroldMills/NFC | 356b2234dc3c7d180282a597fa1e039ae79e03c6 | [
"MIT"
] | 1 | 2015-01-12T12:41:29.000Z | 2015-01-12T12:41:29.000Z | vesper/command/job_logging_manager.py | HaroldMills/NFC | 356b2234dc3c7d180282a597fa1e039ae79e03c6 | [
"MIT"
] | null | null | null | """Module containing class `JobLoggingManager`."""
from collections import defaultdict
from logging import FileHandler, Handler
from logging.handlers import QueueHandler, QueueListener
from multiprocessing import Queue
import logging
import vesper.util.logging_utils as logging_utils
import vesper.util.os_utils as os_utils
# TODO: Add record count fields to the `Job` model class, and modify
# the record counts handler to update the fields both while a job is
# running and upon completion.
def shut_down_logging(self):
# Tell logging listener to terminate, and wait for it to do so.
self._listener.stop()
logging.shutdown()
| 34.068376 | 76 | 0.659809 |
3bfc3d39f5d7c8e9a54f0fc8a5c3d30aa858a4b2 | 4,837 | py | Python | evaluation/metrics.py | victorperezpiqueras/MONRP | f20bbde8895867d37b735dec7a5fd95ee90fadf6 | [
"MIT"
] | null | null | null | evaluation/metrics.py | victorperezpiqueras/MONRP | f20bbde8895867d37b735dec7a5fd95ee90fadf6 | [
"MIT"
] | 2 | 2021-05-05T14:41:24.000Z | 2022-01-18T09:08:06.000Z | evaluation/metrics.py | victorperezpiqueras/MONRP | f20bbde8895867d37b735dec7a5fd95ee90fadf6 | [
"MIT"
] | null | null | null | import math
from typing import List
import numpy as np
from datasets.Dataset import Dataset
from models.Solution import Solution
| 30.23125 | 92 | 0.64999 |
3bfc6525bf99e8218a93653bc016cb8baae15ea1 | 3,803 | py | Python | networkx/classes/tests/test_digraph_historical.py | KyleBenson/networkx | 26ccb4a380ba0e5304d7bbff53eb9859c6e4c93a | [
"BSD-3-Clause"
] | null | null | null | networkx/classes/tests/test_digraph_historical.py | KyleBenson/networkx | 26ccb4a380ba0e5304d7bbff53eb9859c6e4c93a | [
"BSD-3-Clause"
] | null | null | null | networkx/classes/tests/test_digraph_historical.py | KyleBenson/networkx | 26ccb4a380ba0e5304d7bbff53eb9859c6e4c93a | [
"BSD-3-Clause"
] | 1 | 2019-01-30T17:57:36.000Z | 2019-01-30T17:57:36.000Z | #!/usr/bin/env python
"""Original NetworkX graph tests"""
from nose.tools import *
import networkx
import networkx as nx
from networkx.testing.utils import *
from historical_tests import HistoricalTests
| 34.889908 | 79 | 0.519853 |
3bfc66ab6394f443698742193984f19425d0486f | 6,325 | py | Python | older/fn_res_to_icd/fn_res_to_icd/components/res_to_icd_function.py | nickpartner-goahead/resilient-community-apps | 097c0dbefddbd221b31149d82af9809420498134 | [
"MIT"
] | 65 | 2017-12-04T13:58:32.000Z | 2022-03-24T18:33:17.000Z | older/fn_res_to_icd/fn_res_to_icd/components/res_to_icd_function.py | nickpartner-goahead/resilient-community-apps | 097c0dbefddbd221b31149d82af9809420498134 | [
"MIT"
] | 48 | 2018-03-02T19:17:14.000Z | 2022-03-09T22:00:38.000Z | older/fn_res_to_icd/fn_res_to_icd/components/res_to_icd_function.py | nickpartner-goahead/resilient-community-apps | 097c0dbefddbd221b31149d82af9809420498134 | [
"MIT"
] | 95 | 2018-01-11T16:23:39.000Z | 2022-03-21T11:34:29.000Z | # -*- coding: utf-8 -*-
# (c) Copyright IBM Corp. 2019. All Rights Reserved.
# pragma pylint: disable=unused-argument, no-self-use
import logging
import re
import sys
import requests
from bs4 import BeautifulSoup as bsoup
from resilient_circuits import ResilientComponent, function, handler
from resilient_circuits import StatusMessage, FunctionResult, FunctionError
from resilient_lib import ResultPayload, readable_datetime
from resilient_lib.components.resilient_common import validate_fields
# The lowest priority an ICD ticket can have as a default setting for escalation
MIN_PRIORITY_ICD = 4
| 49.031008 | 191 | 0.591146 |
ce017638896c04f18c2cb7532f41f9850780cdae | 28,484 | py | Python | nimbleclient/v1/api/groups.py | prachiruparelia-hpe/nimble-python-sdk | a3e99d89e647291caf7936300ae853d21d94d6e5 | [
"Apache-2.0"
] | 1 | 2020-05-28T19:48:59.000Z | 2020-05-28T19:48:59.000Z | nimbleclient/v1/api/groups.py | prachiruparelia-hpe/nimble-python-sdk | a3e99d89e647291caf7936300ae853d21d94d6e5 | [
"Apache-2.0"
] | null | null | null | nimbleclient/v1/api/groups.py | prachiruparelia-hpe/nimble-python-sdk | a3e99d89e647291caf7936300ae853d21d94d6e5 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2020 Hewlett Packard Enterprise Development LP
#
# This file was auto-generated by the Python SDK generator; DO NOT EDIT.
#
from ...resource import Resource, Collection
from ...exceptions import NimOSAPIOperationUnsupported
| 44.5759 | 179 | 0.584082 |
ce02069f82a4f0531c7597c44775348bc1d10f18 | 309 | py | Python | sdk_client/scripts/cards2json.py | victorlacorte/MTG-SDK-Client | 33fdbfbf545e9f3961369b123a2f7fe783ce8f12 | [
"DOC"
] | null | null | null | sdk_client/scripts/cards2json.py | victorlacorte/MTG-SDK-Client | 33fdbfbf545e9f3961369b123a2f7fe783ce8f12 | [
"DOC"
] | null | null | null | sdk_client/scripts/cards2json.py | victorlacorte/MTG-SDK-Client | 33fdbfbf545e9f3961369b123a2f7fe783ce8f12 | [
"DOC"
] | null | null | null | import json
import mtgsdk as mtg
magic_sets = ('grn',)
if __name__ == '__main__':
main()
| 19.3125 | 62 | 0.585761 |
ce027805c06db61c04f315262615e01faa30ae5a | 18,148 | py | Python | metview/param.py | ecmwf/metview-python | 641e57716ac1bb105394dd3a871ccd1e5ed60b26 | [
"Apache-2.0"
] | 88 | 2018-06-08T14:21:18.000Z | 2022-03-31T12:25:59.000Z | metview/param.py | ecmwf/metview-python | 641e57716ac1bb105394dd3a871ccd1e5ed60b26 | [
"Apache-2.0"
] | 37 | 2018-11-01T09:50:07.000Z | 2022-02-24T12:20:16.000Z | metview/param.py | ecmwf/metview-python | 641e57716ac1bb105394dd3a871ccd1e5ed60b26 | [
"Apache-2.0"
] | 26 | 2018-06-08T14:21:28.000Z | 2022-01-28T12:55:16.000Z | # (C) Copyright 2017- ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
#
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
import logging
from metview import dataset
import re
import pandas as pd
import metview as mv
from metview.indexer import GribIndexer
# logging.basicConfig(level=logging.INFO, format="%(levelname)s - %(message)s")
# logging.basicConfig(level=logging.DEBUG, format="%(levelname)s - %(message)s")
LOG = logging.getLogger(__name__)
PANDAS_ORI_OPTIONS = {}
class ParamDesc:
def __init__(self, name):
self.db = None
# self.name = name
self.md = {}
self.levels = {}
self._short_name = None
self._param_id = None
self._long_name = None
self._units = None
| 32.407143 | 113 | 0.454761 |
ce03e73e55d15e74f86d8e0bd047fcc03b6a00ce | 316 | py | Python | flask_edu_1/file1.py | fulkgl/Flask_edu_1 | cccb70742949577fce5ed279a9d70e6348465643 | [
"MIT"
] | 1 | 2019-12-16T21:55:53.000Z | 2019-12-16T21:55:53.000Z | flask_edu_1/file1.py | fulkgl/Flask_edu_1 | cccb70742949577fce5ed279a9d70e6348465643 | [
"MIT"
] | null | null | null | flask_edu_1/file1.py | fulkgl/Flask_edu_1 | cccb70742949577fce5ed279a9d70e6348465643 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# coding: UTF-8
'''!
module description
@author <A href="email:fulkgl@gmail.com">George L Fulk</A>
'''
__version__ = 0.01
def main():
'''!
main description
'''
print("Hello world")
return 0
if __name__ == "__main__":
# command line entry point
main()
# END #
| 13.166667 | 58 | 0.598101 |
ce07948f6f31a33c9447bac9ba7da84e0cc0cfdb | 25 | py | Python | write_grok/__init__.py | namedyangfan/Python_practice | 7f7394d82bb5afc13b039eec286b9485a775ae39 | [
"MIT"
] | null | null | null | write_grok/__init__.py | namedyangfan/Python_practice | 7f7394d82bb5afc13b039eec286b9485a775ae39 | [
"MIT"
] | null | null | null | write_grok/__init__.py | namedyangfan/Python_practice | 7f7394d82bb5afc13b039eec286b9485a775ae39 | [
"MIT"
] | null | null | null | from .write_grok import * | 25 | 25 | 0.8 |
ce079ba915fb3b960bd7c0c9b579e190a8341d22 | 1,883 | py | Python | backend/usuarios/views.py | alfmorais/pi-univesp | 45a149e9a404f7b0238b84eb335db7111cd15ebb | [
"MIT"
] | 1 | 2021-12-24T20:32:51.000Z | 2021-12-24T20:32:51.000Z | backend/usuarios/views.py | alfmorais/pi-univesp | 45a149e9a404f7b0238b84eb335db7111cd15ebb | [
"MIT"
] | null | null | null | backend/usuarios/views.py | alfmorais/pi-univesp | 45a149e9a404f7b0238b84eb335db7111cd15ebb | [
"MIT"
] | null | null | null | from hashlib import sha256
from django.http import HttpResponse
from django.shortcuts import redirect, render
from .models import Usuarios
| 25.445946 | 70 | 0.627722 |
ce07ca9cf794023383e230a89ff64c045e2a41a9 | 2,737 | py | Python | textclf/tester/dl_tester.py | lswjkllc/textclf | e4e7504989dd5d39c9376eafda1abc580c053913 | [
"MIT"
] | 146 | 2020-02-20T02:29:55.000Z | 2022-01-21T09:49:40.000Z | textclf/tester/dl_tester.py | lswjkllc/textclf | e4e7504989dd5d39c9376eafda1abc580c053913 | [
"MIT"
] | 4 | 2020-03-08T03:24:16.000Z | 2021-03-26T05:34:09.000Z | textclf/tester/dl_tester.py | lswjkllc/textclf | e4e7504989dd5d39c9376eafda1abc580c053913 | [
"MIT"
] | 16 | 2020-02-26T04:45:40.000Z | 2021-05-08T03:52:38.000Z | import torch
from transformers import BertTokenizer
from .base_tester import Tester
from textclf.utils.raw_data import create_tokenizer
from textclf.utils.create import create_instance
from textclf.config import DLTesterConfig
from textclf.data.dictionary import Dictionary
| 38.549296 | 85 | 0.652905 |
ce0890d24a487d376e2478b4bdab9793e27e76ac | 3,303 | py | Python | scripts/pughpore/randomwalk/get_D_old.py | jhwnkim/nanopores | 98b3dbb5d36464fbdc03f59d224d38e4255324ce | [
"MIT"
] | 8 | 2016-09-07T01:59:31.000Z | 2021-03-06T12:14:31.000Z | scripts/pughpore/randomwalk/get_D_old.py | jhwnkim/nanopores | 98b3dbb5d36464fbdc03f59d224d38e4255324ce | [
"MIT"
] | null | null | null | scripts/pughpore/randomwalk/get_D_old.py | jhwnkim/nanopores | 98b3dbb5d36464fbdc03f59d224d38e4255324ce | [
"MIT"
] | 4 | 2017-12-06T17:43:01.000Z | 2020-05-01T05:41:14.000Z | import matplotlib
matplotlib.use("Agg")
from matplotlib import pyplot as plt
import numpy as np
import os
from nanopores.tools import fields
from scipy.interpolate import interp1d
HOME = os.path.expanduser("~")
DATADIR = os.path.join(HOME, "Dropbox", "nanopores", "fields")
fields.set_dir(DATADIR)
data = fields.get_fields("pugh_diff3D_cross", bulkbc=True, rMolecule=2.0779)
x = [z[0] for z in data["x"]]
data, x = fields._sorted(data, x)
eps=5e-3
x_=x[:]
#x_.extend([1.,1.+eps,1.+2*eps,1.+3*eps])
x.extend([(x[-1]+1.)/2.,1.,1.+eps,1.+2*eps,1.+3*eps,1.+4*eps,1.+5*eps])
dstr = ["x", "y", "z"]
Dxx = [D[0][0] for D in data["D"]]
Dyy = [D[1][1] for D in data["D"]]
Dzz = [D[2][2] for D in data["D"]]
Dxx_ = [D[0][0] for D in data["D"]]
Dyy_ = [D[1][1] for D in data["D"]]
Dzz_ = [D[2][2] for D in data["D"]]
Dxx.extend([0.,0.,0.,0.,0.,0.,0.])
Dyy.extend([Dyy[-1]/2.,0.,0.,0.,0.,0.,0.])
Dzz.extend([Dzz[-1]/2.,0.,0.,0.,0.,0.,0.])
#Dxx_.extend([0.,0.,0.,0.])
#Dyy_.extend([0.,0.,0.,0.])
#Dzz_.extend([0.,0.,0.,0.])
Dxx=smooth5(smooth3(Dxx))
Dyy=smooth5(smooth3(Dyy))
Dzz=smooth5(smooth3(Dzz))
Dx = interp1d(x,Dxx)
Dy = interp1d(x,Dyy)
Dz = interp1d(x,Dzz)
DDxx = [0.]+[(Dxx[i+1]-Dxx[i-1])/(x[i+1]-x[i-1]) for i in range(1,len(x)-1)]+[0.]
DDyy = [0.]+[(Dyy[i+1]-Dyy[i-1])/(x[i+1]-x[i-1]) for i in range(1,len(x)-1)]+[0.]
DDzz = [0.]+[(Dzz[i+1]-Dzz[i-1])/(x[i+1]-x[i-1]) for i in range(1,len(x)-1)]+[0.]
dDx = interp1d(x,DDxx)
dDy = interp1d(x,DDyy)
dDz = interp1d(x,DDzz)
if __name__=='__main__':
xc=np.linspace(0.,1.,100)
plt.plot(x_,Dxx_,color='blue',linestyle=':')
plt.scatter(x_,Dxx_,color='blue')
plt.scatter(x,Dxx,color='blue')
#plt.plot(x,Dxx,color='blue')
plt.plot(xc,Dx(xc),color='blue',label=r"$D_{%s%s}$" % (dstr[0], dstr[0]))
plt.scatter(x,DDxx,color='blue')
#plt.plot(x,DDxx,color='blue')
plt.plot(xc,dDx(xc),color='blue')
plt.plot(x_,Dyy_,color='red',linestyle=':')
plt.scatter(x_,Dyy_,color='red')
plt.scatter(x,Dyy,color='red')
#plt.plot(x,Dyy,color='red')
plt.plot(xc,Dy(xc),color='red',label=r"$D_{%s%s}$" % (dstr[1], dstr[1]))
plt.scatter(x,DDyy,color='red')
#plt.plot(x,DDyy,color='red')
plt.plot(xc,dDy(xc),color='red')
plt.plot(x_,Dzz_,color='green',linestyle=':')
plt.scatter(x_,Dzz_,color='green')
plt.scatter(x,Dzz,color='green')
#plt.plot(x,Dzz,color='green')
plt.plot(xc,Dz(xc),color='green',label=r"$D_{%s%s}$" % (dstr[2], dstr[2]))
plt.scatter(x,DDzz,color='green')
#plt.plot(x,DDzz,color='green')
plt.plot(xc,dDz(xc),color='green')
plt.xlabel('distance from pore center [nm]')
plt.ylabel('diffusivity relative to bulk')
plt.legend(loc='lower left')
plt.tight_layout()
plt.savefig('get_new.png')
| 28.230769 | 81 | 0.599758 |
ce08db747b526cc7a8cef1e5d71b70335cd56cae | 7,885 | py | Python | scripts/unseen_pairs_prepare.py | dhh1995/SCL | 6b481709c11acc10909fed2105a7b485dab0887c | [
"MIT"
] | 32 | 2020-07-10T04:50:03.000Z | 2021-11-26T16:57:01.000Z | scripts/unseen_pairs_prepare.py | dhh1995/SCL | 6b481709c11acc10909fed2105a7b485dab0887c | [
"MIT"
] | 5 | 2020-07-10T07:55:34.000Z | 2021-11-24T02:45:32.000Z | scripts/unseen_pairs_prepare.py | dhh1995/SCL | 6b481709c11acc10909fed2105a7b485dab0887c | [
"MIT"
] | 3 | 2020-08-20T15:10:35.000Z | 2022-02-20T16:31:01.000Z | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : unseen_pairs_prepare.py
# Author : Honghua Dong
# Email : dhh19951@gmail.com
# Date : 02/04/2019
#
# Distributed under terms of the MIT license.
'''
To split dataset into {train/val/test}_split_{rel}_{attr}_{args}.pkl
It will produce a set of indexes stored in pkls and can be used by specifying
both --index-file-dir and --split args of the main.py program.
[NOTE] It may require more examples to fulfill the 6k,2k,2k split regime.
# Usage
python3 unseen_pairs_prepare.py $DATA_DIR $NUM -r $REL(s) -a $ATTR(s)
# [NOTE] '-indenp' can prepare all required split for a table result
# (only held-out a certain pair)
'''
import argparse
import collections
import numpy as np
import os
import os.path as osp
import pickle
from utils import get_rule_pairs_from_meta_matrix
# from IPython import embed
parser = argparse.ArgumentParser()
parser.add_argument('data_dir', type=str, help='the dataset file')
parser.add_argument('num', type=int, help='the dataset size')
# parser.add_argument('--task', '-t', type=str, required=True,
# choices=['center_single', 'up_down', 'left_right', 'in_out',
# 'distribute_four', 'distribute_nine'], help='the task')
parser.add_argument('--relations', '-r', type=int, nargs='+', required=True,
help='the held-out relations for (rel, attr) pairs, 0:Const, 1:Pro, 2:Arith, 3:Union')
parser.add_argument('--attributes', '-a', type=int, nargs='+', required=True,
help='the helo-out attributes for (rel, attr) pairs, 0:Num, 1:Pos, 2:Type, 3:Size, 4:Color')
parser.add_argument('--list-format', '-lf', action='store_true',
help='regard the rels and attrs as list of pairs, rather the tensor prod, if True')
parser.add_argument('--all-belong-to', '-all', action='store_true',
help='split to val when all (instead of any) rule_pairs of data belong to held-out-pairs, if True')
parser.add_argument('--dump-dir', '-du', type=str, required=True,
help='the dump dir for inds')
parser.add_argument('--use-visual-inputs', '-v', action='store_true',
help='Use visual inputs if True')
parser.add_argument('--independent-split', '-indenp', action='store_true',
help='regard the held-out pairs independently, and split for each of them')
# exclude
parser.add_argument('--exclude-relations', '-er', type=int, nargs='+', default=[],
help='the exclude relations for (rel, attr) pairs, 0:Const, 1:Pro, 2:Arith, 3:Union')
parser.add_argument('--exclude-attributes', '-ea', type=int, nargs='+', default=[],
help='the exclude attributes for (rel, attr) pairs, 0:Num, 1:Pos, 2:Type, 3:Size, 4:Color')
parser.add_argument('--exclude-list-format', '-elf', action='store_true',
help='regard the ex-rels and ex-attrs as list of pairs, rather the tensor prod, if True')
args = parser.parse_args()
ORIGIN_DATA_SPLIT = {
'train': [0, 1, 2, 3, 4, 5],
'val': [6, 7],
'test': [8, 9],
}
# relations are represented by a 8x9 meta matrix
# Meta matrix format
# ["Constant", "Progression", "Arithmetic", "Distribute_Three",
# "Number", "Position", "Type", "Size", "Color"]
# check whether this data-point should be held out
if __name__ == '__main__':
main()
| 36.50463 | 103 | 0.625491 |
ce0afbb54da9c5cda767047eb0fb4add36a18205 | 1,533 | py | Python | apis/common/models/movie.py | sunil28rana/flask-imdb-sample-project | df28655327a42c0ec28e485d64ebbc5d525275e7 | [
"MIT"
] | null | null | null | apis/common/models/movie.py | sunil28rana/flask-imdb-sample-project | df28655327a42c0ec28e485d64ebbc5d525275e7 | [
"MIT"
] | null | null | null | apis/common/models/movie.py | sunil28rana/flask-imdb-sample-project | df28655327a42c0ec28e485d64ebbc5d525275e7 | [
"MIT"
] | 1 | 2020-10-22T10:31:00.000Z | 2020-10-22T10:31:00.000Z | from datetime import datetime
from sqlalchemy import UniqueConstraint
from apis.initialization import db
| 35.651163 | 92 | 0.703849 |
ce0ca8f2fe98f3ab332870eee82d60c59dac39aa | 719 | py | Python | setup.py | DewMaple/toolkit | a1f04d1b53420c64e15f684c83acb54276031346 | [
"BSD-3-Clause"
] | null | null | null | setup.py | DewMaple/toolkit | a1f04d1b53420c64e15f684c83acb54276031346 | [
"BSD-3-Clause"
] | null | null | null | setup.py | DewMaple/toolkit | a1f04d1b53420c64e15f684c83acb54276031346 | [
"BSD-3-Clause"
] | null | null | null | # from distutils.core import setup
from setuptools import setup, find_packages
setup(
name='py-toolkit',
version='0.0.3',
packages=find_packages(exclude=("tests",)),
url='https://github.com/DewMaple/toolkit',
description='python toolkit for common usage',
author='DewMaple',
author_email='dewmaple@gmail.com',
license='',
keywords=['python', "schema meta"],
classifiers=['Programming Language :: Python :: 3.6'],
project_urls={
'Bug Reports': 'https://github.com/DewMaple/toolkit/issues',
'Source': 'https://github.com/DewMaple/toolkit',
},
tests_require=[
"pytest",
"pytest-cov",
"pytest-xprocess",
],
zip_safe=True
) | 28.76 | 68 | 0.631433 |
ce0da279383850a16ffabcd3fe15ce7341142e46 | 3,934 | py | Python | ui.py | xKynn/zerox-assistant | 292525bf55cd08f930338310869dba1c25a00cf4 | [
"MIT"
] | 1 | 2021-11-07T14:49:13.000Z | 2021-11-07T14:49:13.000Z | ui.py | xKynn/pyTunes | 292525bf55cd08f930338310869dba1c25a00cf4 | [
"MIT"
] | null | null | null | ui.py | xKynn/pyTunes | 292525bf55cd08f930338310869dba1c25a00cf4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'hnc.ui'
#
# Created by: PyQt5 UI code generator 5.15.6
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
| 42.76087 | 87 | 0.543976 |
ce0dbcf0753017f4de48e972ead2feb9166619cc | 6,373 | py | Python | text_clf/data_load.py | kejunxiao/TextClf | aa1c195cb5908c32a3e6ed6891142603cb198d87 | [
"BSD-3-Clause"
] | 2 | 2018-05-13T13:00:10.000Z | 2018-05-13T13:00:12.000Z | text_clf/data_load.py | kejunxiao/TextClf | aa1c195cb5908c32a3e6ed6891142603cb198d87 | [
"BSD-3-Clause"
] | null | null | null | text_clf/data_load.py | kejunxiao/TextClf | aa1c195cb5908c32a3e6ed6891142603cb198d87 | [
"BSD-3-Clause"
] | null | null | null | """
data preprocessing and get batch
"""
import os
import re
import logging
import itertools
from collections import Counter
import numpy as np
import pandas as pd
if __name__ == '__main__':
params = {
'data_path': '../dataset/San_Francisco_Crime/train.csv.zip',
'batch_size': 32,
'num_epochs': 200,
'forced_seq_len': 14,
'dev_sample_rate':0.05
}
data = DataLoad(data_path=params['data_path'],
batch_size=params['batch_size'],
num_epochs=params['num_epochs'],
forced_seq_len=params['forced_seq_len'],
dev_sample_rate=params['dev_sample_rate'])
batches = data.train_batch_iter()
batch_x, batch_y = next(batches)
# print(len(batches))
print(batch_x.shape)
print(batch_y.shape)
| 35.209945 | 100 | 0.557822 |
ce0e92d74f72ee04e6c2fbb871130425f6c911e3 | 11,629 | py | Python | pydsm/audio_weightings.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | pydsm/audio_weightings.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | pydsm/audio_weightings.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2013, Sergio Callegari
# All rights reserved.
# This file is part of PyDSM.
# PyDSM is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# PyDSM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with PyDSM. If not, see <http://www.gnu.org/licenses/>.
"""
Acoustic weighting functions (:mod:`pydsm.audio_weightings`)
============================================================
Some standard acoustic weighting functions.
This module includes the A-, B- and C-weightings from the
ANSI Standards S1.4-1983 and S1.42-2001.
It also includes the D-weighting from the now withdrawn IEC 537.
It also includes the F-weighting proposed by R. A. Wannamaker.
The weighting functions can be expressed either in terms of
acoustic power or in terms of signal amplitude.
The weighting functions are also available in terms of a filter-based
implementation. In this case, be careful since no normalization is
present so that the gain at 1 kHz can be arbitrary. The filter
transfer function is referred to a signal amplitude weighting.
.. currentmodule:: pydsm.audio_weightings
Weighting functions
-------------------
.. autosummary::
:toctree: generated/
a_weighting
b_weighting
c_weighting
d_weighting
f_weighting
Filter implementation of weighting functions
--------------------------------------------
.. autodata:: a_zpk
:annotation:
.. autodata:: b_zpk
:annotation:
.. autodata:: c_zpk
:annotation:
.. autodata:: d_zpk
:annotation:
.. autodata:: f_zpk
:annotation:
Normalization constants
-----------------------
.. autodata:: a_weighting_gain
:annotation:
.. autodata:: b_weighting_gain
:annotation:
.. autodata:: c_weighting_gain
:annotation:
.. autodata:: d_weighting_gain
:annotation:
.. autodata:: f_weighting_gain
:annotation:
Notes
-----
The ANSI and IEC weightings are also described in Wikipedia [1]
and summarized in some illustrative web pages such as [2]_ and
[3]_. The F-weighting is documented in [4]_.
The filter-based implementation of the F-weighting is so high-order that
evaluation of the transfer function may require special care.
.. [1] Wikipedia (http://en.wikipedia.org/wiki/A-weighting)
.. [2] Cross spectrum (http://www.cross-spectrum.com/audio/weighting.html)
.. [3] Product Technology Parters "Noise Measurement Briefing"
(http://www.ptpart.co.uk/noise-measurement-briefing/)
.. [4] Robert A. Wannamaker "Psychoacoustically Optimal Noise
Shaping," J. Audio Eng. Soc., Vol. 40 No. 7/8 1992 July/August
"""
from __future__ import division, print_function
import numpy as np
__all__ = ["a_zpk", "a_weighting", "b_zpk", "b_weighting",
"c_zpk", "c_weighting", "d_zpk", "d_weighting",
"f_zpk", "f_weighting"]
a_zpk = (2*np.pi*np.asarray([0., 0., 0., 0.]),
2*np.pi*np.asarray([-20.6, -20.6, -107.7, -739.9, -12200., -12200.]),
(2*np.pi*12200.)**2)
"""A-weighting filter in zpk form."""
b_zpk = (2*np.pi*np.asarray([0., 0., 0.]),
2*np.pi*np.asarray([-20.6, -20.6, -158.5, -12200., -12200.]),
(2*np.pi*12200.)**2)
"""B-weighting filter in zpk form."""
c_zpk = (2*np.pi*np.asarray([0., 0.]),
2*np.pi*np.asarray([-20.6, -20.6, -12200., -12200.]),
(2*np.pi*12200.)**2)
"""C-weighting filter in zpk form."""
d_zpk = (2*np.pi*np.asarray([0., -519.8+876.2j, -519.8-876.2j]),
2*np.pi*np.asarray([-282.7, -1160., -1712+2628j, -1712-2628j]),
91104.32)
"""D-weighting filter in zpk form."""
f_zpk = (2*np.pi*np.asarray([0., 0., 0.,
-580+1030j, -580-1030j,
-3180+8750j, -3180-8750j,
-3180+8750j, -3180-8750j,
-3180+8750j, -3180-8750j]),
2*np.pi*np.asarray([-180., -180., -180.,
-1630., -1630.,
-2510+3850j, -2510-3850j,
-2510+3850j, -2510-3850j,
-2510+3850j, -2510-3850j,
-2510+3850j, -2510-3850j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j,
-6620+14290j, -6620-14290j]),
1.6810544531883432e+207)
"""F-weighting filter in zpk form."""
# Note: evaluating the transfer function of f_zpk may require special care
# since the high order implies that for many frequency values both the
# numerator and the denominator take very large values (in magnitude). Taking
# the ratio of large complex values may lead to overflow in numpy even if
# individually the numerator, the denominator and the result should not
# overflow.
def a_weighting(f, normal=True, power=True):
"""Returns the A-weighting as a function of frequency.
Parameters
----------
f : float or array of floats
frequency where the weighting function is computed
normal : bool
whether the function should be normalized to have unit gain at
1 kHz.
power : bool
whether the function should express the weighting in terms of
acoustic power or signal amplitude
Returns
-------
w : float or array of floats
value of the weigting function
"""
if power:
return a_weighting(f, normal, power=False)**2
w = (12200.0**2*f**4)/((f**2+20.6**2) *
np.sqrt((f**2+107.7**2) *
(f**2+737.9**2))*(f**2+12200.0**2))
return w if not normal else w*a_weighting_gain
a_weighting_gain = 1/a_weighting(1000, normal=False, power=False)
"""Normalization gain to apply to A-weighting filter (namely, the
attenuation of the filter at 1 kHz)"""
def b_weighting(f, normal=True, power=True):
"""Returns the B-weighting as a function of frequency.
Parameters
----------
f : float or array of floats
frequency where the weighting function is computed
normal : bool
whether the function should be normalized to have unit gain at
1 kHz.
power : bool
whether the function should express the weighting in terms of
acoustic power or signal amplitude
Returns
-------
w : float or array of floats
value of the weigting function
"""
if power:
return b_weighting(f, normal, power=False)**2
w = (12200.0**2*f**3)/((f**2+20.6**2) *
np.sqrt(f**2+158.5**2)*(f**2+12200.0**2))
return w if not normal else w*b_weighting_gain
b_weighting_gain = 1/b_weighting(1000, normal=False, power=False)
"""Normalization gain to apply to B-weighting filter (namely, the
attenuation of the filter at 1 kHz)"""
def c_weighting(f, normal=True, power=True):
"""Returns the C-weighting as a function of frequency.
Parameters
----------
f : float or array of floats
frequency where the weighting function is computed
normal : bool
whether the function should be normalized to have unit gain at
1 kHz.
power : bool
whether the function should express the weighting in terms of
acoustic power or signal amplitude
Returns
-------
w : float or array of floats
value of the weigting function
"""
if power:
return c_weighting(f, normal, power=False)**2
w = (12200.0**2*f**2)/((f**2+20.6**2)*(f**2+12200.0**2))
return w if not normal else w*c_weighting_gain
c_weighting_gain = 1/c_weighting(1000, normal=False, power=False)
"""Normalization gain to apply to C-weighting filter (namely, the
attenuation of the filter at 1 kHz)"""
def d_weighting(f, normal=True, power=True):
"""Returns the D-weighting as a function of frequency.
Parameters
----------
f : float or array of floats
frequency where the weighting function is computed
normal : bool
whether the function should be normalized to have unit gain at
1 kHz. This parameter is ignored, since this weighting function
is always normalized.
power : bool
whether the function should express the weighting in terms of
acoustic power or signal amplitude
Returns
-------
w : float or array of floats
value of the weigting function
"""
if power:
return d_weighting(f, normal, power=False)**2
return (f/6.8966888496476E-5 *
np.sqrt(h(f)/((f**2+79919.29)*(f**2+1345600.0))))
d_weighting_gain = 1.
"""Normalization gain to apply to D-weighting filter (namely, the
attenuation of the filter at 1 kHz)"""
def f_weighting(f, normal=True, power=True):
"""Returns the F-weighting as a function of frequency.
Parameters
----------
f : float or array of floats
frequency where the weighting function is computed
normal : bool
whether the function should be normalized to have unit gain at
1 kHz.
power : bool
whether the function should express the weighting in terms of
acoustic power or signal amplitude
Returns
-------
w : float or array of floats
value of the weigting function
Notes
-----
The F-weighting function is documented in [1]_.
.. [1] Robert A. Wannamaker "Psychoacoustically Optimal Noise Shaping,"
J. Audio Eng. Soc., Vol. 40 No. 7/8 1992 July/August
"""
if not power:
return np.sqrt(f_weighting(f, normal, power=True))
fx = f/1000.
g = 2.536e-5
z1 = fx**2
z2 = ((0.58**2)+(1.03**2)-z1)**2 + 4.0*(0.58**2)*z1
z3 = ((3.18**2)+(8.75**2)-z1)**2 + 4.0*(3.18**2)*z1
p1 = 0.18**2+z1
p2 = 1.63**2+z1
p3 = ((2.51**2)+(3.85**2)-z1)**2 + 4.0*(2.51**2)*z1
p4 = ((6.62**2)+(14.29**2)-z1)**2 + 4.0*(6.62**2)*z1
w = ((g*((z1**3)*z2*(z3**3)) /
((p1**3)*(p2**2)*(p3**4))*((1e5/p4)**20)))
return w if not normal else w*f_weighting_gain
# Set normalization gain
f_weighting_gain = 1/f_weighting(1000, normal=False, power=True)
"""Normalization gain to apply to F-weighting filter (namely, the
attenuation of the filter at 1 kHz)"""
| 33.707246 | 78 | 0.600224 |
ce0ea9cd4625661b89c457658572716294eaef3b | 1,258 | py | Python | data_custom/data_load.py | icon-lab/provoGAN | e4abee668ca5a5733a04c0e27e379a0434b0270f | [
"BSD-3-Clause"
] | 1 | 2022-03-27T09:16:22.000Z | 2022-03-27T09:16:22.000Z | data_custom/data_load.py | icon-lab/provoGAN | e4abee668ca5a5733a04c0e27e379a0434b0270f | [
"BSD-3-Clause"
] | null | null | null | data_custom/data_load.py | icon-lab/provoGAN | e4abee668ca5a5733a04c0e27e379a0434b0270f | [
"BSD-3-Clause"
] | null | null | null | import os
import nibabel
import numpy as np
import random
from scipy import ndimage
import SimpleITK as sitk
def load_nifty_volume_as_array(filename, with_header = False):
"""
load nifty image into numpy array, and transpose it based on the [z,y,x] axis order
The output array shape is like [Depth, Height, Width]
inputs:
filename: the input file name, should be *.nii or *.nii.gz
with_header: return affine and hearder infomation
outputs:
data: a numpy data array
"""
img = nibabel.load(filename)
data = img.get_data()
data = np.transpose(data, [2,1,0])
if(with_header):
return data, img.affine, img.header
else:
return data
def save_array_as_nifty_volume(data, filename, reference_name = None):
"""
save a numpy array as nifty image
inputs:
data: a numpy array with shape [Depth, Height, Width]
filename: the ouput file name
reference_name: file name of the reference image of which affine and header are used
outputs: None
"""
img = sitk.GetImageFromArray(data)
if(reference_name is not None):
img_ref = sitk.ReadImage(reference_name)
img.CopyInformation(img_ref)
sitk.WriteImage(img, filename)
| 29.952381 | 92 | 0.678855 |
ce0ffdd605799570a773639f27bdbc3a5cc51708 | 8,802 | py | Python | project/server/user/views.py | kangusrm/XML-parser | adb2a7049b5946fb6293f58e20c860fbb07a6806 | [
"MIT"
] | 1 | 2016-09-20T09:07:34.000Z | 2016-09-20T09:07:34.000Z | project/server/user/views.py | kangusrm/XML-parser | adb2a7049b5946fb6293f58e20c860fbb07a6806 | [
"MIT"
] | null | null | null | project/server/user/views.py | kangusrm/XML-parser | adb2a7049b5946fb6293f58e20c860fbb07a6806 | [
"MIT"
] | 1 | 2016-09-20T09:07:37.000Z | 2016-09-20T09:07:37.000Z | # project/server/user/views.py
#################
#### imports ####
#################
from flask import render_template, Blueprint, url_for, \
redirect, flash, request, session
from flask_login import login_user, logout_user, login_required
from project.server import bcrypt, db
from project.server.models import User, Data, prevod
from project.server.user.forms import LoginForm, RegisterForm, UploadForm, ConnectForm
import xml.etree.ElementTree as ET
import pymysql
import pymysql.cursors
import tempfile
import os
################
#### config ####
################
user_blueprint = Blueprint('user', __name__, )
################
#### routes ####
################
| 32.360294 | 123 | 0.538287 |
ce10a73d0706d4c9c4b471fbf0c74937c35cf813 | 5,477 | py | Python | active_feature_extractor/experiments/linear_q_learner.py | benblack769/atari_q_learner | adae53e91ec6013ffaeefc9a058c7ab933593cea | [
"MIT"
] | null | null | null | active_feature_extractor/experiments/linear_q_learner.py | benblack769/atari_q_learner | adae53e91ec6013ffaeefc9a058c7ab933593cea | [
"MIT"
] | null | null | null | active_feature_extractor/experiments/linear_q_learner.py | benblack769/atari_q_learner | adae53e91ec6013ffaeefc9a058c7ab933593cea | [
"MIT"
] | null | null | null | import torch
import numpy as np
from torch import nn
# class DefaultModel(nn.Model):
# def __init__(self)
#
| 40.57037 | 125 | 0.651999 |
ce1252998459ede1ce9e5326a029f03393ec65ef | 660 | py | Python | Qualification/lazyLoader.py | monisjaved/Facebook-Hacker-Cup | 569052ecf1c94162cfbbef2533519b46d73d9328 | [
"MIT"
] | null | null | null | Qualification/lazyLoader.py | monisjaved/Facebook-Hacker-Cup | 569052ecf1c94162cfbbef2533519b46d73d9328 | [
"MIT"
] | null | null | null | Qualification/lazyLoader.py | monisjaved/Facebook-Hacker-Cup | 569052ecf1c94162cfbbef2533519b46d73d9328 | [
"MIT"
] | null | null | null | # https://www.facebook.com/hackercup/problem/169401886867367/
__author__ = "Moonis Javed"
__email__ = "monis.javed@gmail.com"
if __name__ == "__main__":
f = open("input2.txt").read().split("\n")
writeF = open("output2.txt","w")
n = int(f[0])
del f[0]
for i in range(1,n+1):
t = int(f[0])
del f[0]
arr =[None]*t
for j in xrange(t):
arr[j] = int(f[0])
del f[0]
writeF.write("Case #%d: %d\n" % (i,numberOfDays(arr)))
# print i
| 18.333333 | 61 | 0.568182 |
ce14ba7248ea553bc8bf340da9e895166445335c | 47 | py | Python | libs/messaging_service/__init__.py | wip-abramson/aries-jupyter-playground | 872f1a319f9072d7160298fcce82fb64c93d7397 | [
"Apache-2.0"
] | 6 | 2021-05-27T12:51:32.000Z | 2022-01-11T05:49:12.000Z | libs/messaging_service/__init__.py | SoftwareImpacts/SIMPAC-2021-64 | 4089946109e05516bbea70359d3bf1d02b245f4a | [
"Apache-2.0"
] | 2 | 2021-10-05T07:38:05.000Z | 2022-02-10T11:38:18.000Z | libs/messaging_service/__init__.py | SoftwareImpacts/SIMPAC-2021-64 | 4089946109e05516bbea70359d3bf1d02b245f4a | [
"Apache-2.0"
] | 7 | 2021-04-22T14:18:06.000Z | 2022-02-14T10:30:52.000Z | from .messaging_service import MessagingService | 47 | 47 | 0.914894 |
ce1563691214ec353e2ec66f0c158ddd18f4c456 | 556 | py | Python | Ex087.py | andrade-lcs/ex_curso_em_video_python | f2d029efe7a20cdf0fcb5b602f9992e27d37c263 | [
"MIT"
] | null | null | null | Ex087.py | andrade-lcs/ex_curso_em_video_python | f2d029efe7a20cdf0fcb5b602f9992e27d37c263 | [
"MIT"
] | null | null | null | Ex087.py | andrade-lcs/ex_curso_em_video_python | f2d029efe7a20cdf0fcb5b602f9992e27d37c263 | [
"MIT"
] | null | null | null | from random import randint
s = t = ma = 0
m = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
for l in range(0, 3):
for c in range(0, 3):
m[l][c] = randint(0, 100)
print('-='*15)
for l in range(0, 3):
t += m[l][2]
for c in range(0, 3):
print(f'[{m[l][c]:^5}]', end='')
if m[l][c] % 2 == 0:
s += m[l][c]
if m[1][c] > ma:
ma = m[1][c]
print()
print('-='*15)
print(f'A soma dos nemros pares {s}')
print(f'A soma dos valores da terceira coluna {t}')
print(f'O maior valor da segunda linha {ma}') | 27.8 | 53 | 0.47482 |
ce1579bf8768e7cef70aebd7b3896b98ea1a0187 | 54 | py | Python | networkx-d3-v2/networkx/tests/__init__.py | suraj-testing2/Clock_Websites | 0e65331da40cfd3766f1bde17f0a9c7ff6666dea | [
"Apache-2.0"
] | null | null | null | networkx-d3-v2/networkx/tests/__init__.py | suraj-testing2/Clock_Websites | 0e65331da40cfd3766f1bde17f0a9c7ff6666dea | [
"Apache-2.0"
] | null | null | null | networkx-d3-v2/networkx/tests/__init__.py | suraj-testing2/Clock_Websites | 0e65331da40cfd3766f1bde17f0a9c7ff6666dea | [
"Apache-2.0"
] | null | null | null | from .utils_tests import *
from .views_tests import *
| 18 | 26 | 0.777778 |
ce1581e90ef98f01e93c9852612c4c137d683a10 | 7,851 | py | Python | ros/src/waypoint_updater/waypoint_updater.py | dan-fern/CarND-Capstone-P9 | 004853c7a14dfd5e99563c4082e7609885b4f6b2 | [
"MIT"
] | null | null | null | ros/src/waypoint_updater/waypoint_updater.py | dan-fern/CarND-Capstone-P9 | 004853c7a14dfd5e99563c4082e7609885b4f6b2 | [
"MIT"
] | null | null | null | ros/src/waypoint_updater/waypoint_updater.py | dan-fern/CarND-Capstone-P9 | 004853c7a14dfd5e99563c4082e7609885b4f6b2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import rospy as rp
import numpy as np
import math as math
from geometry_msgs.msg import PoseStamped, TwistStamped
from styx_msgs.msg import Lane, Waypoint
from scipy.spatial import KDTree
from std_msgs.msg import Int32
'''
This node will publish waypoints from the car's current position to some `x` distance ahead.
As mentioned in the doc, you should ideally first implement a version which does not care
about traffic lights or obstacles.
Once you have created dbw_node, you will update this node to use the status of traffic lights too.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
TODO (for Yousuf and Aaron): Stopline location for each traffic light.
'''
# Number of waypoints we will publish.
LOOKAHEAD_WPS = 150
MAX_DECEL = 0.5
if __name__ == '__main__':
try:
WaypointUpdater()
except rp.ROSInterruptException:
rp.logerr('Could not start waypoint updater node.')
| 31.154762 | 98 | 0.610495 |
ce1627eb06d19834ba84ea0cd7b1055080fe6187 | 595 | py | Python | oandapy/exceptions.py | extreme4all/oandapy | 48dcfbe154316a83ca6e62e6b939062165cabc3e | [
"MIT"
] | null | null | null | oandapy/exceptions.py | extreme4all/oandapy | 48dcfbe154316a83ca6e62e6b939062165cabc3e | [
"MIT"
] | null | null | null | oandapy/exceptions.py | extreme4all/oandapy | 48dcfbe154316a83ca6e62e6b939062165cabc3e | [
"MIT"
] | null | null | null | """Exceptions."""
| 29.75 | 103 | 0.67563 |
ce1666960c0a0228d2a06407d11294362e8b8691 | 4,444 | py | Python | synthesis/reverse_map/reverse_map_ast.py | jajajaqlt/nsg | 1873f2b5e10441110c3c69940ceb4650f9684ac0 | [
"Apache-2.0"
] | 10 | 2021-11-02T18:30:38.000Z | 2022-03-21T06:31:33.000Z | synthesis/reverse_map/reverse_map_ast.py | rohanmukh/nag | f2c4b8e60a97c58a6a1c549cc8b4753ebfe8a5e3 | [
"Apache-2.0"
] | 2 | 2021-11-05T18:40:42.000Z | 2022-03-30T04:33:08.000Z | synthesis/reverse_map/reverse_map_ast.py | rohanmukh/nag | f2c4b8e60a97c58a6a1c549cc8b4753ebfe8a5e3 | [
"Apache-2.0"
] | 2 | 2021-11-03T19:14:06.000Z | 2021-11-03T23:47:09.000Z | # Copyright 2017 Rice University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from program_helper.ast.ops import DAPIInvoke
from synthesis.ops.candidate_ast import SYMTAB_MOD, TYPE_NODE, API_NODE, VAR_NODE, OP_NODE, METHOD_NODE, CLSTYPE_NODE, \
VAR_DECL_NODE
| 36.727273 | 120 | 0.599685 |
ce17307a9a0665319fcd15ea71bb54693784de3c | 135 | py | Python | ch10/myproject_virtualenv/src/django-myproject/myproject/settings/production.py | PacktPublishing/Django-3-Web-Development-Cookbook | 6ffe6e0add93a43a9abaff62e0147dc1f4f5351a | [
"MIT"
] | 159 | 2019-11-13T14:11:39.000Z | 2022-03-24T05:47:10.000Z | ch10/myproject_virtualenv/src/django-myproject/myproject/settings/production.py | PacktPublishing/Django-3-Web-Development-Cookbook | 6ffe6e0add93a43a9abaff62e0147dc1f4f5351a | [
"MIT"
] | 34 | 2019-11-06T08:32:48.000Z | 2022-01-14T11:31:29.000Z | ch10/myproject_virtualenv/src/django-myproject/myproject/settings/production.py | PacktPublishing/Django-3-Web-Development-Cookbook | 6ffe6e0add93a43a9abaff62e0147dc1f4f5351a | [
"MIT"
] | 103 | 2019-08-15T21:35:26.000Z | 2022-03-20T05:29:11.000Z | from ._base import *
DEBUG = False
WEBSITE_URL = "https://example.com" # without trailing slash
MEDIA_URL = f"{WEBSITE_URL}/media/"
| 19.285714 | 61 | 0.718519 |
ce1739ffa8890ca468f44112dbe677b551c2a05c | 1,657 | py | Python | v2/gui.py | appills/pyascii | 525411327ecb8835e14f8f84b3ac19f059dbd0bc | [
"MIT"
] | null | null | null | v2/gui.py | appills/pyascii | 525411327ecb8835e14f8f84b3ac19f059dbd0bc | [
"MIT"
] | null | null | null | v2/gui.py | appills/pyascii | 525411327ecb8835e14f8f84b3ac19f059dbd0bc | [
"MIT"
] | null | null | null | from tkinter import *
from tkinter import filedialog
from pyascii import main
root=Tk()
root.wm_title("Pyascii")
app = App(root)
root.mainloop() | 30.127273 | 108 | 0.677127 |
ce1986e97c39f7b0d9070c20a8cf44a57d43a5a3 | 13,093 | py | Python | tests/integration_tests/test_solution/test_solution_interior.py | cwentland0/perform | e08771cb776a7e6518c43350746e2ca72f79b153 | [
"MIT"
] | 6 | 2021-03-24T21:42:06.000Z | 2022-01-28T20:00:13.000Z | tests/integration_tests/test_solution/test_solution_interior.py | cwentland0/perform | e08771cb776a7e6518c43350746e2ca72f79b153 | [
"MIT"
] | 38 | 2021-04-15T15:30:21.000Z | 2022-01-29T01:23:57.000Z | tests/integration_tests/test_solution/test_solution_interior.py | cwentland0/perform | e08771cb776a7e6518c43350746e2ca72f79b153 | [
"MIT"
] | 1 | 2021-07-03T03:13:36.000Z | 2021-07-03T03:13:36.000Z | import unittest
import os
import numpy as np
from constants import (
del_test_dir,
gen_test_dir,
get_output_mode,
solution_domain_setup,
CHEM_DICT_REACT,
SOL_PRIM_IN_REACT,
TEST_DIR,
)
from perform.constants import REAL_TYPE
from perform.system_solver import SystemSolver
from perform.input_funcs import read_restart_file
from perform.gas_model.calorically_perfect_gas import CaloricallyPerfectGas
from perform.time_integrator.implicit_integrator import BDF
from perform.solution.solution_interior import SolutionInterior
| 39.796353 | 120 | 0.612465 |
ce1a18c48b194d0b3451c941f83d9e8945a1714d | 4,139 | py | Python | tests/system/post_cars_positive_test.py | ikostan/REST_API_AUTOMATION | cdb4d30fbc7457b2a403b4dad6fe1efa2e754681 | [
"Unlicense"
] | 8 | 2020-03-17T09:15:28.000Z | 2022-01-29T19:50:45.000Z | tests/system/post_cars_positive_test.py | ikostan/REST_API_AUTOMATION | cdb4d30fbc7457b2a403b4dad6fe1efa2e754681 | [
"Unlicense"
] | 1 | 2021-06-02T00:26:58.000Z | 2021-06-02T00:26:58.000Z | tests/system/post_cars_positive_test.py | ikostan/REST_API_AUTOMATION | cdb4d30fbc7457b2a403b4dad6fe1efa2e754681 | [
"Unlicense"
] | 1 | 2021-11-22T16:10:27.000Z | 2021-11-22T16:10:27.000Z | #!/path/to/interpreter
"""
Flask App REST API testing: POST
"""
# Created by Egor Kostan.
# GitHub: https://github.com/ikostan
# LinkedIn: https://www.linkedin.com/in/egor-kostan/
import allure
import requests
from tests.system.base_test import BaseTestCase
from api.cars_app import USER_LIST
| 31.59542 | 64 | 0.490457 |
ce1a3fb80b6bbd849c64cd660cd72979f447cba6 | 1,165 | py | Python | bin/h5zero.py | ickc/dautil-py | 9cdd87080ec85774d7386e3cd2f55c2bc6b6aadd | [
"BSD-3-Clause"
] | null | null | null | bin/h5zero.py | ickc/dautil-py | 9cdd87080ec85774d7386e3cd2f55c2bc6b6aadd | [
"BSD-3-Clause"
] | null | null | null | bin/h5zero.py | ickc/dautil-py | 9cdd87080ec85774d7386e3cd2f55c2bc6b6aadd | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
'''Assert HDF5 input is non-zero.
Print to stderr if not.
For example,
find . -iname '*.hdf5' -exec h5zero.py {} +
'''
from __future__ import print_function
import argparse
import sys
import h5py
from dautil.IO.h5 import h5assert_nonzero
__version__ = '0.1'
if __name__ == "__main__":
cli()
| 23.3 | 82 | 0.612017 |
ce1a78c4b8b64234867f3d62b124351c7a4de964 | 195 | py | Python | cla_backend/apps/core/validators.py | uk-gov-mirror/ministryofjustice.cla_backend | 4d524c10e7bd31f085d9c5f7bf6e08a6bb39c0a6 | [
"MIT"
] | 3 | 2019-10-02T15:31:03.000Z | 2022-01-13T10:15:53.000Z | cla_backend/apps/core/validators.py | uk-gov-mirror/ministryofjustice.cla_backend | 4d524c10e7bd31f085d9c5f7bf6e08a6bb39c0a6 | [
"MIT"
] | 206 | 2015-01-02T16:50:11.000Z | 2022-02-16T20:16:05.000Z | cla_backend/apps/core/validators.py | uk-gov-mirror/ministryofjustice.cla_backend | 4d524c10e7bd31f085d9c5f7bf6e08a6bb39c0a6 | [
"MIT"
] | 6 | 2015-03-23T23:08:42.000Z | 2022-02-15T17:04:44.000Z | from django.core.exceptions import ValidationError
| 27.857143 | 82 | 0.733333 |
ce1ae0dcedfa059f4a8bffab465b0fca2f146769 | 51 | py | Python | app/_version.py | sunhailin-Leo/myMacAssistant | 30ba955a4f91a800197cbfdc2ab5d3a5cd993eef | [
"MIT"
] | 63 | 2020-11-02T00:58:49.000Z | 2022-03-20T21:39:02.000Z | fastapi_profiler/_version.py | sunhailin-Leo/fastapi_profiler | b414af6f0b2d92e7b509b6b3e54cde13ec5795e2 | [
"MIT"
] | 10 | 2021-02-23T11:00:39.000Z | 2022-02-07T02:44:05.000Z | app/_version.py | sunhailin-Leo/myMacAssistant | 30ba955a4f91a800197cbfdc2ab5d3a5cd993eef | [
"MIT"
] | 7 | 2020-11-24T08:34:46.000Z | 2022-01-10T12:58:51.000Z | __version__ = "1.0.0"
__author__ = "sunhailin-Leo"
| 17 | 28 | 0.705882 |
ce1e707dde07e49cd3190510d21820c11fc3a580 | 1,525 | py | Python | Week6/GFG(Day8-14)/Day14/Day14 - Solution.py | ShreyaPanale/100DaysOfCode | de7832d97fca36f783812868b867676b6f77c7b3 | [
"MIT"
] | 22 | 2021-05-25T16:01:31.000Z | 2021-06-07T06:32:27.000Z | Week6/GFG(Day8-14)/Day14/Day14 - Solution.py | shreya-panale/100DaysOfCode | de7832d97fca36f783812868b867676b6f77c7b3 | [
"MIT"
] | null | null | null | Week6/GFG(Day8-14)/Day14/Day14 - Solution.py | shreya-panale/100DaysOfCode | de7832d97fca36f783812868b867676b6f77c7b3 | [
"MIT"
] | null | null | null | #User function Template for python3
#{
# Driver Code Starts
#Initial Template for Python 3
import atexit
import io
import sys
_INPUT_LINES = sys.stdin.read().splitlines()
input = iter(_INPUT_LINES).__next__
_OUTPUT_BUFFER = io.StringIO()
sys.stdout = _OUTPUT_BUFFER
if __name__ == '__main__':
test_cases = int(input())
for cases in range(test_cases) :
n = int(input())
a = list(map(int,input().strip().split()))
obj = Solution()
ans = obj.calculateSpan(a, n);
print(*ans) # print space seperated elements of span array
# } Driver Code Ends
| 28.773585 | 117 | 0.593443 |
ce1e9aca26ecdef56f6ff4c3c6d9a23230b8bd4f | 2,768 | py | Python | test/tests.py | jasedit/papers_base | af8aa6e9a164861ad7b44471ce543002fa7129d9 | [
"MIT"
] | 8 | 2016-08-17T14:40:49.000Z | 2020-03-05T00:08:07.000Z | test/tests.py | jasedit/scriptorium | af8aa6e9a164861ad7b44471ce543002fa7129d9 | [
"MIT"
] | 35 | 2016-08-07T19:58:02.000Z | 2021-05-09T10:08:06.000Z | test/tests.py | jasedit/scriptorium | af8aa6e9a164861ad7b44471ce543002fa7129d9 | [
"MIT"
] | 2 | 2017-09-21T17:57:46.000Z | 2019-06-30T13:06:21.000Z | #!python
# -*- coding: utf-8 -*-
"""Unit testing for scriptorium"""
import os
import tempfile
import shutil
import textwrap
import unittest
import scriptorium
def testCreation(self):
"""Test simple paper creation."""
example_config = {
'author': 'John Doe',
'title': 'Example Report'
}
old_dir = os.getcwd()
os.chdir(TestScriptorium.paper_dir)
self.assertEqual(scriptorium.create('ex_report', 'report', config=example_config), set())
os.chdir('ex_report')
self.assertEqual(scriptorium.paper_root('.'), 'paper.mmd')
self.assertEqual(scriptorium.get_template('paper.mmd'), 'report')
example_text = textwrap.dedent("""\n
# Introduction
This is an example paper.
# Conclusion
This paper is awesome.
""")
with open('paper.mmd', 'a') as fp:
fp.write(example_text)
pdf_path = scriptorium.to_pdf('.')
self.assertTrue(os.path.exists(pdf_path))
os.chdir(old_dir)
def testConfigLoading(self):
"""Test saving and loading configuration."""
config = scriptorium.CONFIG.copy()
scriptorium.save_config()
scriptorium.read_config()
self.assertEqual(config, scriptorium.CONFIG)
def testConfiguration(self):
"""Test configuration option issues"""
test_template_dir = "~/.scriptorium"
scriptorium.CONFIG['TEMPLATE_DIR'] = test_template_dir
scriptorium.save_config()
scriptorium.read_config()
self.assertEqual(scriptorium.CONFIG['TEMPLATE_DIR'], os.path.expanduser(test_template_dir))
scriptorium.CONFIG['TEMPLATE_DIR'] = self.template_dir
if __name__ == '__main__':
unittest.main()
| 31.816092 | 101 | 0.688223 |
ce1f98db217162180757b8a6044a17804f866924 | 4,794 | py | Python | imblearn/combine/tests/test_smote_enn.py | themrzmaster/imbalanced-learn | e1be8695b22ca58aa5443057b9ae3f2885a45d60 | [
"MIT"
] | 2 | 2019-09-14T23:23:35.000Z | 2019-09-16T18:17:19.000Z | imblearn/combine/tests/test_smote_enn.py | themrzmaster/imbalanced-learn | e1be8695b22ca58aa5443057b9ae3f2885a45d60 | [
"MIT"
] | null | null | null | imblearn/combine/tests/test_smote_enn.py | themrzmaster/imbalanced-learn | e1be8695b22ca58aa5443057b9ae3f2885a45d60 | [
"MIT"
] | 1 | 2021-04-23T04:46:10.000Z | 2021-04-23T04:46:10.000Z | """Test the module SMOTE ENN."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
import pytest
import numpy as np
from sklearn.utils.testing import assert_allclose, assert_array_equal
from imblearn.combine import SMOTEENN
from imblearn.under_sampling import EditedNearestNeighbours
from imblearn.over_sampling import SMOTE
RND_SEED = 0
X = np.array([[0.11622591, -0.0317206], [0.77481731, 0.60935141], [
1.25192108, -0.22367336
], [0.53366841, -0.30312976], [1.52091956,
-0.49283504], [-0.28162401, -2.10400981],
[0.83680821,
1.72827342], [0.3084254, 0.33299982], [0.70472253, -0.73309052],
[0.28893132, -0.38761769], [1.15514042, 0.0129463], [
0.88407872, 0.35454207
], [1.31301027, -0.92648734], [-1.11515198, -0.93689695], [
-0.18410027, -0.45194484
], [0.9281014, 0.53085498], [-0.14374509, 0.27370049], [
-0.41635887, -0.38299653
], [0.08711622, 0.93259929], [1.70580611, -0.11219234]])
Y = np.array([0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0])
R_TOL = 1e-4
| 38.047619 | 79 | 0.635378 |
ce21a48448d28f3cf598b5cbc7c2ecedcc9ebfb2 | 46,925 | py | Python | tests/unittests/test_mock_network_plugin_public_nat.py | cloudify-cosmo/tosca-vcloud-plugin | c5196abd066ba5315b66911e5390b0ed6c15988f | [
"Apache-2.0"
] | 4 | 2015-02-25T12:39:01.000Z | 2018-02-14T15:14:16.000Z | tests/unittests/test_mock_network_plugin_public_nat.py | cloudify-cosmo/tosca-vcloud-plugin | c5196abd066ba5315b66911e5390b0ed6c15988f | [
"Apache-2.0"
] | 45 | 2015-01-13T13:55:10.000Z | 2020-02-04T15:06:15.000Z | tests/unittests/test_mock_network_plugin_public_nat.py | cloudify-cosmo/tosca-vcloud-plugin | c5196abd066ba5315b66911e5390b0ed6c15988f | [
"Apache-2.0"
] | 21 | 2015-01-21T17:17:18.000Z | 2021-05-05T14:08:25.000Z | # Copyright (c) 2014-2020 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
from cloudify import exceptions as cfy_exc
from tests.unittests import test_mock_base
from vcloud_network_plugin import public_nat
from vcloud_network_plugin import utils
import vcloud_network_plugin
import vcloud_plugin_common
from IPy import IP
if __name__ == '__main__':
unittest.main()
| 36.919748 | 79 | 0.542248 |
ce21d57f1cc21cb2e5990bffc69d3403f42d2835 | 519 | py | Python | Taller_estruturas_de_control_secuenciales/Python_yere/Ejercicio_17.py | Matieljimenez/Algoritmos_y_programacion | cdc381478581e6842c6672d4840dd948833c4ec7 | [
"MIT"
] | null | null | null | Taller_estruturas_de_control_secuenciales/Python_yere/Ejercicio_17.py | Matieljimenez/Algoritmos_y_programacion | cdc381478581e6842c6672d4840dd948833c4ec7 | [
"MIT"
] | null | null | null | Taller_estruturas_de_control_secuenciales/Python_yere/Ejercicio_17.py | Matieljimenez/Algoritmos_y_programacion | cdc381478581e6842c6672d4840dd948833c4ec7 | [
"MIT"
] | null | null | null | """
Entradas
monto de dinero presupuestal-->float-->a
Salidas
dinero correspondiente para ginecologia-->float-->b
dinero correspondiente para traumatologia-->float-->c
dinero correspondiente para pediatria-->float-->d
"""
a=float(input("Presupuesto anual al Hospital rural "))
b=a*0.40
c=a*0.30
d=a*0.30
print("El presupuesto del hospital rural para ginecologa es: "+str(b))
print("El presupuesto del hospital rural para traumatologa es: "+str(c))
print("El presupuesto del hospital rural para pediatra es: "+str(d)) | 34.6 | 73 | 0.759152 |
ce2288a47d9c672cc8785e5719f15a00192e23e2 | 5,926 | py | Python | tools/generate_things/generate_navigation.py | akalenuk/wordsandbuttons | c8ad9e8668fc49f4c39ae3b510e36a5a52ec3c91 | [
"Unlicense"
] | 367 | 2018-01-29T17:45:00.000Z | 2022-03-08T03:50:52.000Z | tools/generate_things/generate_navigation.py | akalenuk/wordsandbuttons | c8ad9e8668fc49f4c39ae3b510e36a5a52ec3c91 | [
"Unlicense"
] | 9 | 2017-12-21T16:48:08.000Z | 2021-01-23T17:20:20.000Z | tools/generate_things/generate_navigation.py | akalenuk/wordsandbuttons | c8ad9e8668fc49f4c39ae3b510e36a5a52ec3c91 | [
"Unlicense"
] | 20 | 2018-02-18T11:52:36.000Z | 2021-11-22T09:46:53.000Z | import os
import subprocess
PAGES_DIR = "../../pages"
keyword_note = {
'tutorials': '',
'demos': '',
'quizzes': '',
'mathematics': '',
'algorithms': '',
'programming': 'By the way, if you prefer books to blogs, <a href="https://wordsandbuttons.online/SYTYKC.pdf">there is a free book</a> that was originally made from this section.'
}
index_title = 'Hello, world!'
index_description = 'This is <i>Words and Buttons Online</i>a growing collection of interactive tutorials, demos, and quizzes about maths, algorithms, and programming.'
all_span_ids = []
date_link_title_description_keywords = []
all_keywords = set()
for filename in os.listdir(PAGES_DIR):
if filename == 'index.html':
continue
if filename == 'faq.html':
continue
if filename.endswith(".html"):
f = open(PAGES_DIR + "/" + filename, 'rt')
content = f.read()
f.close
if content.find("meta name=\"keywords\"") == -1:
continue
date_from_git = subprocess.run(["git", "log", "--reverse", "--date=iso", "--format=%cd", "--", filename], \
cwd=PAGES_DIR, \
stdout=subprocess.PIPE)
full_date = date_from_git.stdout.decode('utf-8')
date = full_date.split(' ')[0]
title = content.split("<title>")[1].split("</title>")[0]
description = content.split('<meta name="description" content="')[1].split('">')[0]
keywords = content.split('<meta name="keywords" content="')[1].split('">')[0].split(', ')
if keywords[0] == "":
continue
date_link_title_description_keywords += [(date, filename, title, description, keywords)]
all_keywords.update(keywords)
date_link_title_description_keywords.sort()
# index
f = open('index.template')
template = f.read()
f.close()
index = '%s' % template
f = open('links.txt')
links = f.readlines()
f.close()
links_html = '<h1>More interactive learning</h1>'
for link in links:
if link.strip().find(' ') != -1:
url = link.split(' ')[0]
title_chunks = link.split(' ')[1:]
title = title_chunks[0]
for chunk in title_chunks[1:]: # no hanging short words
if len(chunk) < 2:
title += ' ' + chunk
else:
title += ' ' + chunk
links_html += '<p style="margin-bottom: 12pt;">'+title+'<br><a href="'+url+'">'+url+'</a></p>\n'
menu = '<p class="links" style="width: 555pt;">'
for (kw, _) in keyword_note.items():
menu += '<nobr><a style="padding-right: 4pt;" href="all_' + kw + '.html">#' + kw + '</a></nobr> '
menu += '</p>'
# index is now real index not a timeline
the_index = '<h1 title="A real index on index.html! How cool is that!">Index</h1>'
spans = read_index_spans(PAGES_DIR)
cur_letter = ''
for (f, i, t) in sorted(spans, key = lambda fit: fit[2].upper()):
letter = t[0].upper()
if cur_letter != letter:
if cur_letter != '':
the_index += '</p>\n'
the_index += '<h2>'+letter+'</h2>\n'
the_index += '<p class="index_items">\n'
cur_letter = letter
the_index += '<nobr><a style="padding-right: 24pt;" href="' + f + '#' + i + '">' + t + '</a></nobr>\n'
the_index += '</p>\n'
index = index.replace('<h1>Title</h1>', '<h1>' + index_title + '</h1>')
index = index.replace('<p>Description</p>', '<p style="width: 555pt;">' + index_description + '</p>')
index = index.replace('<div id="menu"></div>', '\n' + menu + '\n')
index = index.replace('<p>Note</p>', '')
index = index.replace('<div id="timeline"></div>', '\n' + the_index + '\n')
index = index.replace('<div id="links"></div>', '\n' + links_html + '\n')
f = open('../../pages/' + 'index.html', 'w')
f.write(index)
f.close
# tag's all_* pages
for title in list(all_keywords):
page = '%s' % template
timeline = ''
menu = '<p class="links" style="width: 555pt;">'
for (kw, _) in keyword_note.items():
if kw == title:
menu += '<nobr><span style="padding-right: 4pt; color: #999;">#' + kw + '</span></nobr> '
else:
menu += '<nobr><a style="padding-right: 4pt;" href="all_' + kw + '.html">#' + kw + '</a></nobr> '
menu += '</p>'
for (d, l, t, desc, kwds) in date_link_title_description_keywords[::-1]:
if not title in kwds:
continue
timeline += '<p class="title">' + '<a href="' + l + '">' + t + '</a></p>\n'
timeline += '<p class="description">' + desc + '</p>\n'
timeline += '<p class="links">'
for kw in sorted(list(kwds)):
if kw == title:
timeline += '<span style="padding-right: 8pt; color: #999;">#' + kw + '</span> '
else:
timeline += '<a style="padding-right: 8pt;" href="all_' + kw + '.html">#' + kw + '</a> '
timeline += '</p>\n'
page = page.replace('<h1>Title</h1>', '<h1><a href="index.html">Words and Buttons</a>: ' + title + '</h1>')
page = page.replace('<p>Description</p>', '')
page = page.replace('<div id="menu"></div>', '\n' + menu + '\n')
page = page.replace('<p>Note</p>', '<p style="width: 555pt;">' + keyword_note[title] + '</p>')
page = page.replace('<div id="timeline"></div>', '\n' + timeline + '\n')
page = page.replace('<div id="links"></div>', '')
f = open('../../pages/all_' + title + '.html', 'w')
f.write(page)
f.close
| 35.48503 | 179 | 0.602599 |
ce25004f312bc46b4d6a3d278373562bc87e4202 | 316 | py | Python | apps/listings/migrations/0002_remove_post_author.py | favours-io/favours | 6f26a207d2684e752857aa21e5fafa607a4707e6 | [
"MIT"
] | 11 | 2020-07-23T19:07:32.000Z | 2021-11-18T17:16:29.000Z | apps/listings/migrations/0002_remove_post_author.py | favours-io/favours | 6f26a207d2684e752857aa21e5fafa607a4707e6 | [
"MIT"
] | 16 | 2020-08-29T01:57:05.000Z | 2022-01-13T03:16:41.000Z | apps/listings/migrations/0002_remove_post_author.py | favours-io/favours | 6f26a207d2684e752857aa21e5fafa607a4707e6 | [
"MIT"
] | 4 | 2020-09-18T18:40:12.000Z | 2021-11-09T06:36:36.000Z | # Generated by Django 3.0.7 on 2020-09-22 05:14
from django.db import migrations
| 17.555556 | 47 | 0.575949 |
ce2713a447d11afd7d04a70a5793ef6b8c8b2009 | 303 | py | Python | venv/Lib/site-packages/bootstrap4/widgets.py | HRangelov/gallery | 3ccf712ef2e1765a6dfd6567d58e6678e0b2ff6f | [
"MIT"
] | 3 | 2021-02-02T11:13:15.000Z | 2021-02-10T07:26:10.000Z | venv/Lib/site-packages/bootstrap4/widgets.py | HRangelov/gallery | 3ccf712ef2e1765a6dfd6567d58e6678e0b2ff6f | [
"MIT"
] | 3 | 2021-03-30T14:15:20.000Z | 2021-09-22T19:31:57.000Z | cypher_venv/Lib/site-packages/bootstrap4/widgets.py | FrancisLangit/cypher | 4921e2f53ef8154ad63ff4de7f8068b27f29f485 | [
"MIT"
] | null | null | null | from django.forms import RadioSelect
| 25.25 | 99 | 0.762376 |
ce2810e264659103f1cf2c4c793eb498a673a023 | 2,990 | py | Python | workflower/services/workflow/loader.py | dmenezesgabriel/workflower | db2358abdd2d133b85baea726e013e71171e5cf3 | [
"MIT"
] | null | null | null | workflower/services/workflow/loader.py | dmenezesgabriel/workflower | db2358abdd2d133b85baea726e013e71171e5cf3 | [
"MIT"
] | null | null | null | workflower/services/workflow/loader.py | dmenezesgabriel/workflower | db2358abdd2d133b85baea726e013e71171e5cf3 | [
"MIT"
] | null | null | null | import logging
import os
import traceback
from typing import List
from workflower.adapters.sqlalchemy.setup import Session
from workflower.adapters.sqlalchemy.unit_of_work import SqlAlchemyUnitOfWork
from workflower.application.event.commands import CreateEventCommand
from workflower.application.workflow.commands import (
ActivateWorkflowCommand,
LoadWorkflowFromYamlFileCommand,
SetWorkflowTriggerCommand,
)
from workflower.domain.entities.workflow import Workflow
logger = logging.getLogger("workflower.loader")
| 32.5 | 79 | 0.596321 |
ce281d8807b114456a5700d5486fb898099afb81 | 2,492 | py | Python | setup.py | neuroticnerd/dragoncon-bot | 44c4d96743cf11ea0e8eaa567100e42afa4de565 | [
"Apache-2.0"
] | 2 | 2015-12-18T05:28:02.000Z | 2018-05-24T04:18:26.000Z | setup.py | neuroticnerd/dragoncon-bot | 44c4d96743cf11ea0e8eaa567100e42afa4de565 | [
"Apache-2.0"
] | 11 | 2016-08-27T22:05:18.000Z | 2021-12-13T19:41:44.000Z | setup.py | neuroticnerd/dragoncon-bot | 44c4d96743cf11ea0e8eaa567100e42afa4de565 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import io
import os
import re
from setuptools import find_packages, setup
here = os.path.abspath(os.path.dirname(__file__))
PROJECT_MODULE = 'dragonite'
PROJECT = 'dragonite'
AUTHOR = 'Bryce Eggleton'
EMAIL = 'eggleton.bryce@gmail.com'
DESC = 'Dragon Con command line utility'
LONG_DESC = ''
KEYWORDS = ('dragonite', 'dragoncon', 'dragon', 'con')
URL = "https://github.com/neuroticnerd/dragoncon-bot"
REQUIRES = []
EXTRAS = {
'dev': (
'flake8 >= 2.5.0',
'twine >= 1.8.1',
'pytest >= 2.8.4',
'coverage >= 4.0.3',
),
# 'caching': (
# 'redis>=2.10.3',
# 'hiredis>=0.2.0',
# ),
}
SCRIPTS = {
"console_scripts": [
'dragonite = dragonite.cli:dragonite',
]}
LICENSE = 'Apache License, Version 2.0'
VERSION = ''
CLASSIFIERS = [
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Topic :: Utilities',
]
version_file = os.path.join(here, '{0}/__init__.py'.format(PROJECT_MODULE))
ver_find = r'^\s*__version__\s*=\s*[\"\'](.*)[\"\']$'
with io.open(version_file, 'r', encoding='utf-8') as ver_file:
VERSION = re.search(ver_find, ver_file.read(), re.MULTILINE).group(1)
readme_file = os.path.join(here, 'README.rst')
with io.open(readme_file, 'r', encoding='utf-8') as f:
LONG_DESC = f.read()
requirements_file = os.path.join(here, 'requirements.txt')
with io.open(requirements_file, 'r') as reqs_file:
for rawline in reqs_file:
line = rawline.strip()
if line.startswith('http'):
continue
REQUIRES.append(' >= '.join(line.split('==')))
if __name__ == '__main__':
setup(
name=PROJECT,
version=VERSION,
packages=find_packages(include=[PROJECT_MODULE + '*']),
author=AUTHOR,
author_email=EMAIL,
url=URL,
description=DESC,
long_description=LONG_DESC,
classifiers=CLASSIFIERS,
platforms=('any',),
license=LICENSE,
keywords=KEYWORDS,
install_requires=REQUIRES,
extras_require=EXTRAS,
entry_points=SCRIPTS,
)
| 28 | 75 | 0.617978 |
ce282a6ed0fc710a4b6a368e5d2307c23cfaf901 | 3,427 | py | Python | backend/api.py | RuiL1904/Hackathon | 94eed04b2fa3fb48b3479045a0b279b0217744fb | [
"MIT"
] | 5 | 2022-02-20T12:59:19.000Z | 2022-02-20T17:30:49.000Z | backend/api.py | RuiL1904/Hackathon | 94eed04b2fa3fb48b3479045a0b279b0217744fb | [
"MIT"
] | null | null | null | backend/api.py | RuiL1904/Hackathon | 94eed04b2fa3fb48b3479045a0b279b0217744fb | [
"MIT"
] | 1 | 2022-03-08T20:21:03.000Z | 2022-03-08T20:21:03.000Z | import random
import database
from fastapi import FastAPI, Request
from fastapi.middleware.cors import CORSMiddleware
import uvicorn
# Instantiate FastAPI
app = FastAPI()
# Whitelist origins
app.add_middleware(
CORSMiddleware,
allow_origins = ["*"],
allow_credentials = True,
allow_methods = ["*"],
allow_headers = ["*"]
)
# POST
# GET
# POST
# Checks for coliding events inside the main schedule
def check_colide(schedule: list) -> list:
colided = []
for i in range(len(schedule)):
for j in range(i + 1, len(schedule)):
if (check_colide_aux(schedule[i], schedule[j])):
colided.append((i,j))
return colided
def check_colide_aux(h1, h2) -> bool:
start1 = h1['date_start']
end1 = h1['date_end']
start2 = h2['date_start']
end2 = h2['date_end']
if start1 == start2 and end1 == end2:
return True
if start1 < start2 and end1 > start2:
return True
if start1 > start2 and end1 < end2:
return True
if start1 < start2 and end1 > start2:
return True
if start1 > start2 and end1 < end2:
return True
return False
if __name__ == "__main__":
uvicorn.run("api:app", host = "0.0.0.0", port = 8000, reload = True)
| 26.160305 | 121 | 0.569594 |
ce282fdf98dc253cf62921347890761e924022a6 | 1,211 | py | Python | lfs/portlet/models/pages.py | zhammami/django-lfs | b921295e71fe827377a67b5e7ae1a8bf7f72a1e6 | [
"BSD-3-Clause"
] | null | null | null | lfs/portlet/models/pages.py | zhammami/django-lfs | b921295e71fe827377a67b5e7ae1a8bf7f72a1e6 | [
"BSD-3-Clause"
] | null | null | null | lfs/portlet/models/pages.py | zhammami/django-lfs | b921295e71fe827377a67b5e7ae1a8bf7f72a1e6 | [
"BSD-3-Clause"
] | null | null | null | # django imports
from django import forms
from django.conf import settings
from django.core.cache import cache
from django.template.loader import render_to_string
# portlets imports
from portlets.models import Portlet
# lfs imports
from lfs.page.models import Page
| 24.714286 | 85 | 0.641618 |
ce2b25ff23e864e881234a2380df580d2b3d114d | 829 | py | Python | feed/models.py | kassupto007/photo-sharing-app | 97ed237815134fd3d53431be348a050c505db499 | [
"Apache-2.0"
] | null | null | null | feed/models.py | kassupto007/photo-sharing-app | 97ed237815134fd3d53431be348a050c505db499 | [
"Apache-2.0"
] | null | null | null | feed/models.py | kassupto007/photo-sharing-app | 97ed237815134fd3d53431be348a050c505db499 | [
"Apache-2.0"
] | null | null | null | from django.conf import settings
from django.db import models
from django.utils import timezone
from users.models import Profile
| 37.681818 | 106 | 0.77684 |
ce2ba9ff2aa3d5ef4daa942e79661e4a012dddf3 | 2,168 | py | Python | zampol/osoba/admin.py | VadymRud/zampolit | 80bbd5dc197041c3595831a8d0ddae130e10418c | [
"Apache-2.0"
] | null | null | null | zampol/osoba/admin.py | VadymRud/zampolit | 80bbd5dc197041c3595831a8d0ddae130e10418c | [
"Apache-2.0"
] | null | null | null | zampol/osoba/admin.py | VadymRud/zampolit | 80bbd5dc197041c3595831a8d0ddae130e10418c | [
"Apache-2.0"
] | null | null | null | from django.contrib import admin
from django.db import models
from django.utils.translation import gettext as _
from .models import (MilitaryRank, Platoon, ServiseID, Unit, OfficialPosition, Company,
Education, Creed, Nationality, Command)
from osoba.widgets import CustomDatePickerInput
admin.site.register(Company)
admin.site.register(MilitaryRank)
admin.site.register(Platoon)
admin.site.register(ServiseID, ServiseIDAdmin)
admin.site.register(Unit)
admin.site.register(OfficialPosition)
admin.site.register(Creed)
admin.site.register(Nationality)
admin.site.register(Education)
admin.site.register(Command) | 31.882353 | 95 | 0.571494 |
ce2c3b1def15247a90a747a7d6db93245d2f364a | 725 | py | Python | python/src/problem/leetcode/easy/leetcode_189.py | yipwinghong/Algorithm | e594df043c9d965dbfbd958554e88c533c844a45 | [
"MIT"
] | 9 | 2019-10-31T16:58:31.000Z | 2022-02-08T08:42:30.000Z | python/src/problem/leetcode/easy/leetcode_189.py | yipwinghong/Algorithm | e594df043c9d965dbfbd958554e88c533c844a45 | [
"MIT"
] | null | null | null | python/src/problem/leetcode/easy/leetcode_189.py | yipwinghong/Algorithm | e594df043c9d965dbfbd958554e88c533c844a45 | [
"MIT"
] | null | null | null | # coding=utf-8
from typing import List
| 18.589744 | 63 | 0.411034 |
ce2e1eeb2d14e83f19c6e30702d48f326de87b43 | 931 | py | Python | brainmix_register/display/display.py | ThunderShiviah/brainmix-register | fd42445ed2649ae8bdbb3c3e653adc4465190052 | [
"MIT",
"Unlicense"
] | 4 | 2015-07-10T01:13:43.000Z | 2018-07-08T09:05:05.000Z | brainmix_register/display/display.py | ThunderShiviah/brainmix-register | fd42445ed2649ae8bdbb3c3e653adc4465190052 | [
"MIT",
"Unlicense"
] | 3 | 2015-04-08T17:51:36.000Z | 2015-06-01T04:19:33.000Z | brainmix_register/display/display.py | ThunderShiviah/brainmix_register | fd42445ed2649ae8bdbb3c3e653adc4465190052 | [
"MIT",
"Unlicense"
] | null | null | null | import sys, os, glob
from skimage import io
from skimage import viewer
import registration as reg
from skimage import data
if __name__ == "__main__":
# ------------------Create input ndarray------------------------
inputDir = '../data/test/'
imageFiles = glob.glob(os.path.join(inputDir, '*.jpg'))
imageVolume = io.ImageCollection(imageFiles, as_grey=True).concatenate()
stack = imageVolume
# ------------------Check that single image registration works----
src = stack[0]
dst = stack[1]
reg_dst = reg.reg(src, dst)
# ------------- Check that stack registration works -----------
reg_stack = reg.registration(stack)
merged = [reg.overlay_pics(stack[0], img) for img in stack]
merged_reg = [reg.overlay_pics(reg_stack[0], img) for img in reg_stack]
image = data.coins()
viewer = viewer.CollectionViewer(merged_reg)
viewer.show()
| 25.861111 | 76 | 0.61869 |
ce30447567aca3b3740596e2dcf70ae66968d0b3 | 1,605 | py | Python | lib/datasets/LFW2G.py | blacknwhite5/facial-anonymizer | 48878f0b704cc9203b6e13b962f0b53cecae78c6 | [
"MIT"
] | 10 | 2019-04-18T03:30:55.000Z | 2021-04-03T22:51:50.000Z | lib/datasets/LFW2G.py | blacknwhite5/facial-anonymizer | 48878f0b704cc9203b6e13b962f0b53cecae78c6 | [
"MIT"
] | 3 | 2020-05-28T15:04:05.000Z | 2020-12-16T10:31:42.000Z | lib/datasets/LFW2G.py | blacknwhite5/facial-anonymizer | 48878f0b704cc9203b6e13b962f0b53cecae78c6 | [
"MIT"
] | 6 | 2019-04-15T11:16:02.000Z | 2021-09-08T03:16:49.000Z | import numpy as np
import torch
import torch.utils.data as data
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import os, random, glob, cv2
if __name__ == "__main__":
main()
| 32.1 | 91 | 0.63053 |
ce307dac43c76b9afca0ff0e962a64169f480199 | 4,536 | py | Python | questions.py | lasyasreepada/iplaw-for-digital-teens | a1ac53f7b3438876db644450413f78ec8d612bac | [
"MIT"
] | null | null | null | questions.py | lasyasreepada/iplaw-for-digital-teens | a1ac53f7b3438876db644450413f78ec8d612bac | [
"MIT"
] | null | null | null | questions.py | lasyasreepada/iplaw-for-digital-teens | a1ac53f7b3438876db644450413f78ec8d612bac | [
"MIT"
] | null | null | null | """Set of questions for the IP Law quiz
questions.py
Lasya Sreepada
Yale College '19
May 6, 2017
"""
from random import shuffle
import time
quiz()
| 49.846154 | 334 | 0.665785 |
ce313caa11cce1219bbc0ca784238958335d4a0b | 529 | py | Python | Python/leetcode/Triangle.py | darrencheng0817/AlgorithmLearning | aec1ddd0c51b619c1bae1e05f940d9ed587aa82f | [
"MIT"
] | 2 | 2015-12-02T06:44:01.000Z | 2016-05-04T21:40:54.000Z | Python/leetcode/Triangle.py | darrencheng0817/AlgorithmLearning | aec1ddd0c51b619c1bae1e05f940d9ed587aa82f | [
"MIT"
] | null | null | null | Python/leetcode/Triangle.py | darrencheng0817/AlgorithmLearning | aec1ddd0c51b619c1bae1e05f940d9ed587aa82f | [
"MIT"
] | null | null | null | '''
Created on 1.12.2016
@author: Darren
''''''
Given a triangle, find the minimum path sum from top to bottom. Each step you may move to adjacent numbers on the row below.
For example, given the following triangle
[
[2],
[3,4],
[6,5,7],
[4,1,8,3]
]
The minimum path sum from top to bottom is 11 (i.e., 2 + 3 + 5 + 1 = 11).
Note:
Bonus point if you are able to do this using only O(n) extra space, where n is the total number of rows in the triangle.
"
'''
| 18.892857 | 126 | 0.587902 |
ce31a76d07584d9441c2b8024946e9ee56bc2a7f | 8,286 | py | Python | regulations/tests/layers_toc_applier_tests.py | contolini/regulations-site | c31a9ce3097910877657f61b4c19a4ccbd0f967f | [
"CC0-1.0"
] | 18 | 2015-01-14T15:58:45.000Z | 2019-08-17T06:15:59.000Z | regulations/tests/layers_toc_applier_tests.py | contolini/regulations-site | c31a9ce3097910877657f61b4c19a4ccbd0f967f | [
"CC0-1.0"
] | 142 | 2015-01-08T15:28:50.000Z | 2018-07-16T16:48:07.000Z | regulations/tests/layers_toc_applier_tests.py | contolini/regulations-site | c31a9ce3097910877657f61b4c19a4ccbd0f967f | [
"CC0-1.0"
] | 45 | 2015-01-26T16:24:46.000Z | 2021-02-20T10:50:59.000Z | from unittest import TestCase
from regulations.generator.layers.toc_applier import *
| 40.223301 | 79 | 0.506758 |
ce33d42b39da049e5244eeed1b27927c33f5fb8c | 1,929 | py | Python | array_range.py | fasiha/array-range-slices-py | 940bfd1879a7e041b59349f6d9cbc2d79dacb891 | [
"Unlicense"
] | 1 | 2021-02-03T14:01:56.000Z | 2021-02-03T14:01:56.000Z | array_range.py | fasiha/array-range-slices-py | 940bfd1879a7e041b59349f6d9cbc2d79dacb891 | [
"Unlicense"
] | null | null | null | array_range.py | fasiha/array-range-slices-py | 940bfd1879a7e041b59349f6d9cbc2d79dacb891 | [
"Unlicense"
] | null | null | null | """
Numpy's `split` can split a multidimensional array into non-overlapping
sub-arrays. However, this is not a memory-efficient way of dealing with
non-overlapping partitions of an array because it effectively doubles
memory usage.
This module provides an iterable generator that produces tuples of slices,
each of which can be used to index into a Numpy array and obtain a small
view into it. It is very memory-efficient since no copy of the array is
ever created.
This all works because Numpy ndarrays can be indexed using a tuple of
slices: that is, `arr[a:b, c:d, e:f]` is equivalent to
`arr[(slice(a, b), slice(c, d), slice(e, f))]`.
This module doesn't import Numpy at all since it generates Python slices.
"""
from itertools import product
from typing import List, Iterable, Tuple
def array_range(start: List[int], stop: List[int], step: List[int]) -> Iterable[Tuple]:
"""
Makes an iterable of non-overlapping slices, e.g., to partition an array
Returns an iterable of tuples of slices, each of which can be used to
index into a multidimensional array such as Numpy's ndarray.
>> [arr[tup] for tup in array_range([0, 0], arr.shape, [5, 7])]
where `arr` can be indexed with a tuple of slices (e.g., Numpy), will
evaluate to a list of sub-arrays.
Same arguments as `range` except all three arguments are required and
expected to be list-like of same length. `start` indicates the indexes
to start each dimension. `stop` indicates the stop index for each
dimension. `step` is the size of the chunk in each dimension.
"""
assert len(start) == len(stop)
assert len(stop) == len(step)
assert all(map(lambda x: x > 0, step))
startRangesGen = map(lambda v: range(*v), zip(start, stop, step))
startToSliceMapper = lambda multiStart: tuple(
slice(i, min(i + step, stop)) for i, stop, step in zip(multiStart, stop, step))
return map(startToSliceMapper, product(*startRangesGen))
| 41.042553 | 87 | 0.729912 |
ce34ebaf15612703873e6a27020070246ab042d8 | 7,197 | py | Python | test-framework/test-suites/integration/tests/add/test_add_host_bonded.py | knutsonchris/stacki | 33087dd5fa311984a66ccecfeee6f9c2c25f665d | [
"BSD-3-Clause"
] | 123 | 2015-05-12T23:36:45.000Z | 2017-07-05T23:26:57.000Z | test-framework/test-suites/integration/tests/add/test_add_host_bonded.py | knutsonchris/stacki | 33087dd5fa311984a66ccecfeee6f9c2c25f665d | [
"BSD-3-Clause"
] | 177 | 2015-06-05T19:17:47.000Z | 2017-07-07T17:57:24.000Z | test-framework/test-suites/integration/tests/add/test_add_host_bonded.py | knutsonchris/stacki | 33087dd5fa311984a66ccecfeee6f9c2c25f665d | [
"BSD-3-Clause"
] | 32 | 2015-06-07T02:25:03.000Z | 2017-06-23T07:35:35.000Z | import json
from textwrap import dedent
import pytest
| 28.559524 | 106 | 0.632208 |
ce3501af1f45e1223934bba47fc0e9a49f9b32bd | 1,669 | py | Python | BITs/2014/Kozlov_A_D/task_8_11.py | YukkaSarasti/pythonintask | eadf4245abb65f4400a3bae30a4256b4658e009c | [
"Apache-2.0"
] | null | null | null | BITs/2014/Kozlov_A_D/task_8_11.py | YukkaSarasti/pythonintask | eadf4245abb65f4400a3bae30a4256b4658e009c | [
"Apache-2.0"
] | null | null | null | BITs/2014/Kozlov_A_D/task_8_11.py | YukkaSarasti/pythonintask | eadf4245abb65f4400a3bae30a4256b4658e009c | [
"Apache-2.0"
] | null | null | null | # 8. 11.
#1-50. "" (. . Python. .4) , . , . , , , , .
# ..
#04.04.2016
import random
words = ("","","","","","")
word=random.choice(words)
correct=word
score=10;
i=0
jumble=""
while word:
position=random.randrange(len(word))
jumble+=word[position]
word=word[:position]+word[(position+1):]
print("""
''!
, .
: .
( Enter, .)
""")
print(" : ", jumble)
guess=input(" : ")
if guess=="":
score-=1
print(str(i+1),": ",correct[i])
i+=1
while guess !=correct and guess!="":
guess=input(" : ")
if guess=="":
if i==len(correct):
print(" .")
continue
score-=1
print(str(i+1),": ",correct[i])
i+=1
continue
if guess==correct:
print(". ! ! ",score," !")
else:
print(" , .")
print(" .")
input("\n\n Enter, ")
| 37.088889 | 362 | 0.656681 |
ce358cccd6bb9246d24f50b9e468818c256a0701 | 1,254 | py | Python | master/teachkids-master/teachkids-master/ch09/Challenge2_ColorPaint.py | AlexRogalskiy/DevArtifacts | 931aabb8cbf27656151c54856eb2ea7d1153203a | [
"MIT"
] | 4 | 2018-09-07T15:35:24.000Z | 2019-03-27T09:48:12.000Z | master/teachkids-master/teachkids-master/ch09/Challenge2_ColorPaint.py | AlexRogalskiy/DevArtifacts | 931aabb8cbf27656151c54856eb2ea7d1153203a | [
"MIT"
] | 371 | 2020-03-04T21:51:56.000Z | 2022-03-31T20:59:11.000Z | master/teachkids-master/teachkids-master/ch09/Challenge2_ColorPaint.py | AlexRogalskiy/DevArtifacts | 931aabb8cbf27656151c54856eb2ea7d1153203a | [
"MIT"
] | 3 | 2019-06-18T19:57:17.000Z | 2020-11-06T03:55:08.000Z | # ColorPaint.py
import pygame # setup
import random
pygame.init()
screen = pygame.display.set_mode([800, 600])
pygame.display.set_caption('Click and drag to draw, using up to 3 mouse buttons')
keepGoing = True
ORANGE = (255,255,0) # RGB color triplets for 3 mousebutton colors
GREEN = (0,255,0)
PURPLE = (128,0,128)
radius = 15
mousedown = False
while keepGoing: # game loop
for event in pygame.event.get(): # handling events
if event.type == pygame.QUIT:
keepGoing = False
if event.type == pygame.MOUSEBUTTONDOWN:
mousedown = True
if event.type == pygame.MOUSEBUTTONUP:
mousedown = False
if mousedown: # draw/update graphics
spot = pygame.mouse.get_pos()
if pygame.mouse.get_pressed()[0] : # boolean for button1
button_color = ORANGE
elif pygame.mouse.get_pressed()[1]: # boolean for button2
button_color = GREEN
else: # must be button3
button_color = PURPLE
pygame.draw.circle(screen, button_color, spot, radius)
pygame.display.update() # update display
pygame.quit() # exit
| 36.882353 | 81 | 0.587719 |
ce35c483fa1d1e28e070fa3ddb8145549538c79c | 14,508 | py | Python | eventmanager/events/tests.py | karinakozarova/EventManager | b09fa7a788b4aa11761fc34096cc711304c288c7 | [
"MIT"
] | 4 | 2019-01-06T16:58:20.000Z | 2019-04-08T10:20:46.000Z | eventmanager/events/tests.py | EventManagerTeam/EventManager | b09fa7a788b4aa11761fc34096cc711304c288c7 | [
"MIT"
] | 297 | 2018-11-14T13:59:19.000Z | 2022-03-11T23:33:28.000Z | eventmanager/events/tests.py | karinakozarova/EventManager | b09fa7a788b4aa11761fc34096cc711304c288c7 | [
"MIT"
] | 1 | 2019-04-22T15:17:32.000Z | 2019-04-22T15:17:32.000Z | import datetime
import unittest
from accounts.models import AccountDetails
from categories.models import Category
from django.contrib.auth.models import User
from django.test import Client
from django.test import TestCase
from django.urls import reverse
from events.models import Comment
from events.models import Event
from events.models import Invite
from tasks.models import Task
| 30.543158 | 79 | 0.565826 |
ce35f5d501c181ecbb1339e8615379517cb18794 | 159 | py | Python | billing/tests/views.py | hkhanna/django-stripe-billing | 75a53c183ff86b1c7edf741683ffe3330e733d87 | [
"MIT"
] | 1 | 2022-03-29T20:16:34.000Z | 2022-03-29T20:16:34.000Z | billing/tests/views.py | hkhanna/django-stripe-billing | 75a53c183ff86b1c7edf741683ffe3330e733d87 | [
"MIT"
] | 2 | 2022-02-21T17:38:22.000Z | 2022-02-22T20:56:39.000Z | billing/tests/views.py | hkhanna/django-stripe-billing | 75a53c183ff86b1c7edf741683ffe3330e733d87 | [
"MIT"
] | null | null | null | from django.views.generic import TemplateView
from .. import mixins
| 22.714286 | 53 | 0.792453 |
ce36dcd7976f6556078f7dfa2fbd33e0565d593e | 4,225 | py | Python | core/model/meta/mtl.py | Aamer98/LibFewShot_NoAffine | 1203d2a9f5cb4705038748dbda03a4b7c37bf647 | [
"MIT"
] | 1 | 2021-11-07T03:34:41.000Z | 2021-11-07T03:34:41.000Z | core/model/meta/mtl.py | taylor1355/LibFewShot | c53b4ee3772c5c8033fd54aa73586091eee2d0b0 | [
"MIT"
] | null | null | null | core/model/meta/mtl.py | taylor1355/LibFewShot | c53b4ee3772c5c8033fd54aa73586091eee2d0b0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
@inproceedings{DBLP:conf/cvpr/SunLCS19,
author = {Qianru Sun and
Yaoyao Liu and
Tat{-}Seng Chua and
Bernt Schiele},
title = {Meta-Transfer Learning for Few-Shot Learning},
booktitle = {{IEEE} Conference on Computer Vision and Pattern Recognition, {CVPR}
2019, Long Beach, CA, USA, June 16-20, 2019},
pages = {403--412},
year = {2019},
url = {http://openaccess.thecvf.com/content_CVPR_2019/html/Sun_Meta-Transfer_Learning_for_Few
-Shot_Learning_CVPR_2019_paper.html},
doi = {10.1109/CVPR.2019.00049}
}
https://arxiv.org/abs/1812.02391
Adapted from https://github.com/yaoyao-liu/meta-transfer-learning.
"""
import torch
from torch import digamma, nn
import torch.nn.functional as F
import copy
from core.utils import accuracy
from .meta_model import MetaModel
from ..backbone.utils import convert_mtl_module
| 32.751938 | 101 | 0.64568 |
ce378179f8b40837991f7c71e128ec7eb52c6132 | 1,023 | py | Python | game.py | gustavonaldoni/command-line-hangman | a740a446ce1dfad2100ab7e6ea1db817c6a57a47 | [
"MIT"
] | null | null | null | game.py | gustavonaldoni/command-line-hangman | a740a446ce1dfad2100ab7e6ea1db817c6a57a47 | [
"MIT"
] | null | null | null | game.py | gustavonaldoni/command-line-hangman | a740a446ce1dfad2100ab7e6ea1db817c6a57a47 | [
"MIT"
] | null | null | null | from capture_words import capture_words_from_file
import random
| 21.765957 | 72 | 0.691105 |
ce37b76dcc82f7204803dfa179451058b3f38a92 | 4,895 | py | Python | src/OTLMOW/OTLModel/Classes/DwarseMarkeringVerschuind.py | davidvlaminck/OTLClassPython | 71330afeb37c3ea6d9981f521ff8f4a3f8b946fc | [
"MIT"
] | 2 | 2022-02-01T08:58:11.000Z | 2022-02-08T13:35:17.000Z | src/OTLMOW/OTLModel/Classes/DwarseMarkeringVerschuind.py | davidvlaminck/OTLMOW | 71330afeb37c3ea6d9981f521ff8f4a3f8b946fc | [
"MIT"
] | null | null | null | src/OTLMOW/OTLModel/Classes/DwarseMarkeringVerschuind.py | davidvlaminck/OTLMOW | 71330afeb37c3ea6d9981f521ff8f4a3f8b946fc | [
"MIT"
] | null | null | null | # coding=utf-8
from OTLMOW.OTLModel.BaseClasses.OTLAttribuut import OTLAttribuut
from OTLMOW.OTLModel.Classes.DwarseMarkeringToegang import DwarseMarkeringToegang
from OTLMOW.OTLModel.Datatypes.KlDwarseMarkeringVerschuindCode import KlDwarseMarkeringVerschuindCode
from OTLMOW.OTLModel.Datatypes.KlDwarseMarkeringVerschuindSoort import KlDwarseMarkeringVerschuindSoort
from OTLMOW.OTLModel.Datatypes.KwantWrdInDecimaleGraden import KwantWrdInDecimaleGraden
from OTLMOW.OTLModel.Datatypes.KwantWrdInVierkanteMeter import KwantWrdInVierkanteMeter
# Generated with OTLClassCreator. To modify: extend, do not edit
| 49.444444 | 158 | 0.608784 |
ce37e19c6bb3e23ffae3d35e78de1e2b5a16ea5f | 549 | py | Python | backend/reviews/forms.py | ranwise/djangochannel | 9c719d292b5c1d0fd008a16a64509a309bdd642e | [
"BSD-3-Clause"
] | 45 | 2019-10-04T10:12:54.000Z | 2022-03-29T18:12:34.000Z | backend/reviews/forms.py | ranwise/djangochannel | 9c719d292b5c1d0fd008a16a64509a309bdd642e | [
"BSD-3-Clause"
] | 6 | 2019-10-09T07:37:14.000Z | 2022-01-27T16:41:16.000Z | backend/reviews/forms.py | ranwise/djangochannel | 9c719d292b5c1d0fd008a16a64509a309bdd642e | [
"BSD-3-Clause"
] | 35 | 2019-10-04T10:18:48.000Z | 2022-01-14T22:40:38.000Z | from django import forms
from .models import Review
| 26.142857 | 74 | 0.495446 |