hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2a54b836b10d0f5d5aa4979c845c18b9275fdecd | 3,287 | py | Python | insights/parsers/ceph_insights.py | lhuett/insights-core | 1c84eeffc037f85e2bbf60c9a302c83aa1a50cf8 | [
"Apache-2.0"
] | 121 | 2017-05-30T20:23:25.000Z | 2022-03-23T12:52:15.000Z | insights/parsers/ceph_insights.py | lhuett/insights-core | 1c84eeffc037f85e2bbf60c9a302c83aa1a50cf8 | [
"Apache-2.0"
] | 1,977 | 2017-05-26T14:36:03.000Z | 2022-03-31T10:38:53.000Z | insights/parsers/ceph_insights.py | lhuett/insights-core | 1c84eeffc037f85e2bbf60c9a302c83aa1a50cf8 | [
"Apache-2.0"
] | 244 | 2017-05-30T20:22:57.000Z | 2022-03-26T10:09:39.000Z | """
ceph_insights - command ``ceph insights``
=========================================
"""
import json
import re
from .. import CommandParser, parser
from insights.specs import Specs
@parser(Specs.ceph_insights)
class CephInsights(CommandParser):
"""
Parse the output of the ``ceph insights`` command.
Attributes:
version (dict): version information of the Ceph cluster.
data (dict): a dictionary of the parsed output.
The ``data`` attribute is a dictionary containing the parsed output of the
``ceph insights`` command. The following are available in ``data``:
* ``crashes`` - summary of daemon crashes for the past 24 hours
* ``health`` - the current and historical (past 24 hours) health checks
* ``config`` - cluster and daemon configuration settings
* ``osd_dump`` - osd and pool information
* ``df`` - storage usage statistics
* ``osd_tree`` - osd topology
* ``fs_map`` - file system map
* ``crush_map`` - the CRUSH map
* ``mon_map`` - monitor map
* ``service_map`` - service map
* ``manager_map`` - manager map
* ``mon_status`` - monitor status
* ``pg_summary`` - placement group summary
* ``osd_metadata`` - per-OSD metadata
* ``version`` - ceph software version
* ``errors`` - any errors encountered collecting this data
The ``version`` attribute contains a normalized view of ``self.data["version"]``.
Examples:
>>> ceph_insights.version["release"] == 14
True
>>> ceph_insights.version["major"] == 0
True
>>> ceph_insights.version["minor"] == 0
True
>>> isinstance(ceph_insights.data["crashes"], dict)
True
>>> isinstance(ceph_insights.data["health"], dict)
True
"""
IGNORE_RE = [
"\*\*\* DEVELOPER MODE",
"\d+-\d+-\d+.+WARNING: all dangerous"
]
bad_lines = [
"module 'insights' is not enabled",
"no valid command found"
]
def __init__(self, *args, **kwargs):
kwargs.update(dict(extra_bad_lines=self.bad_lines))
super(CephInsights, self).__init__(*args, **kwargs)
def _sanitize_content(self, content):
"""Remove lines matching IGNORE_RE at start of content"""
slice_point = 0
ignore_re = re.compile('|'.join(CephInsights.IGNORE_RE))
for line in content:
if not line or ignore_re.match(line):
slice_point += 1
continue
break
return content[slice_point:]
def _parse_version(self):
"""
Add a Ceph version property as a dictionary with the keys "release",
"major", "minor" containing numeric values, and the key "full" with the
full version string. If Ceph is not compiled with verison information
(this should never be the case in a production system), then "release",
"major", and "minor" are set to None.
"""
self.version = {
"release": None,
"major": None,
"minor": None
}
self.version.update(self.data["version"])
def parse_content(self, content):
content = self._sanitize_content(content)
self.data = json.loads(''.join(content))
self._parse_version()
| 33.540816 | 85 | 0.60146 | 3,071 | 0.934287 | 0 | 0 | 3,100 | 0.943109 | 0 | 0 | 2,201 | 0.669608 |
2a54c4b02d79ac04b72ebbd0cef513843105c1f0 | 227 | py | Python | src/learners/__init__.py | OscarPedaVendere/sc2MultiAgentES | 54df34eb94f00294c2bcd1ac5a0c14386c0bbceb | [
"Apache-2.0"
] | null | null | null | src/learners/__init__.py | OscarPedaVendere/sc2MultiAgentES | 54df34eb94f00294c2bcd1ac5a0c14386c0bbceb | [
"Apache-2.0"
] | 4 | 2021-03-19T03:43:34.000Z | 2022-01-13T01:39:12.000Z | src/learners/__init__.py | OscarPedaVendere/sc2MultiAgentES | 54df34eb94f00294c2bcd1ac5a0c14386c0bbceb | [
"Apache-2.0"
] | null | null | null | from .q_learner import QLearner
from .coma_learner import COMALearner
from .es_learner import ESLearner
REGISTRY = {}
REGISTRY["q_learner"] = QLearner
REGISTRY["coma_learner"] = COMALearner
REGISTRY["es_learner"] = ESLearner
| 22.7 | 38 | 0.792952 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 37 | 0.162996 |
2a56f5a13c5e84abb272e6b2ff55a131592699d7 | 3,582 | py | Python | ietf/utils/management/base.py | hassanakbar4/ietfdb | cabee059092ae776015410640226064331c293b7 | [
"BSD-3-Clause"
] | 25 | 2022-03-05T08:26:52.000Z | 2022-03-30T15:45:42.000Z | ietf/utils/management/base.py | hassanakbar4/ietfdb | cabee059092ae776015410640226064331c293b7 | [
"BSD-3-Clause"
] | 219 | 2022-03-04T17:29:12.000Z | 2022-03-31T21:16:14.000Z | ietf/utils/management/base.py | hassanakbar4/ietfdb | cabee059092ae776015410640226064331c293b7 | [
"BSD-3-Clause"
] | 22 | 2022-03-04T15:34:34.000Z | 2022-03-28T13:30:59.000Z | # Copyright The IETF Trust 2013-2020, All Rights Reserved
# -*- coding: utf-8 -*-
from email.message import EmailMessage
from textwrap import dedent
from traceback import format_exception, extract_tb
from django.conf import settings
from django.core.management.base import BaseCommand
from ietf.utils.mail import send_smtp
import debug # pyflakes:ignore
class EmailOnFailureCommand(BaseCommand):
"""Command that sends email when an exception occurs
Subclasses can override failure_message, failure_subject, and failure_recipients
to customize the behavior. Both failure_subject and failure_message are formatted
with keywords for interpolation. By default, the following substitutions are
available:
{error} - the exception instance
{error_summary} - multiline summary of error type and location where it occurred
More interpolation values can be added through the **extra argument to
make_failure_message().
By default, the full traceback will be attached to the notification email.
To disable this, set failure_email_includes_traceback to False.
When a command is executed, its handle() method will be called as usual.
If an exception occurs, instead of printing this to the terminal and
exiting with an error, a message generated via the make_failure_message()
method will be sent to failure_recipients. The command will exit successfully
to the shell.
This can be prevented for debugging by passing the --no-failure-email option.
In this case, the usual error handling will be used. To make this available,
the subclass must call super().add_arguments() in its own add_arguments() method.
"""
failure_message = dedent("""\
An exception occurred:
{error}
""")
failure_subject = 'Exception in management command'
failure_email_includes_traceback = True
@property
def failure_recipients(self):
return tuple(item[1] for item in settings.ADMINS)
def execute(self, *args, **options):
try:
super().execute(*args, **options)
except Exception as error:
if options['email_on_failure']:
msg = self.make_failure_message(error)
send_smtp(msg)
else:
raise
def _summarize_error(self, error):
frame = extract_tb(error.__traceback__)[-1]
return dedent(f"""\
Error details:
Exception type: {type(error).__module__}.{type(error).__name__}
File: {frame.filename}
Line: {frame.lineno}""")
def make_failure_message(self, error, **extra):
"""Generate an EmailMessage to report an error"""
format_values = dict(
error=error,
error_summary=self._summarize_error(error),
)
format_values.update(**extra)
msg = EmailMessage()
msg['To'] = self.failure_recipients
msg['From'] = settings.SERVER_EMAIL
msg['Subject'] = self.failure_subject.format(**format_values)
msg.set_content(
self.failure_message.format(**format_values)
)
if self.failure_email_includes_traceback:
msg.add_attachment(
''.join(format_exception(None, error, error.__traceback__)),
filename='traceback.txt',
)
return msg
def add_arguments(self, parser):
parser.add_argument('--no-failure-email', dest='email_on_failure', action='store_false',
help='Disable sending email on failure') | 37.3125 | 96 | 0.672808 | 3,221 | 0.899218 | 0 | 0 | 101 | 0.028197 | 0 | 0 | 1,846 | 0.515355 |
2a572edc22a1007d158ae57f509ba56d4cf54a6e | 603 | py | Python | lab01/calculadora/urls.py | josepilco7501/TECSUP-DAE-2021-2 | f6e433193edd2b9547a7385f0e03b0aacdb4dcd0 | [
"MIT"
] | null | null | null | lab01/calculadora/urls.py | josepilco7501/TECSUP-DAE-2021-2 | f6e433193edd2b9547a7385f0e03b0aacdb4dcd0 | [
"MIT"
] | null | null | null | lab01/calculadora/urls.py | josepilco7501/TECSUP-DAE-2021-2 | f6e433193edd2b9547a7385f0e03b0aacdb4dcd0 | [
"MIT"
] | null | null | null | from django.urls import path
from.import views
urlpatterns = [
#ex: localhost:8080/app/
path('', views.index, name='index'),
#ex: localhost:8080/app/sumar/n1+n2/
path('sumar/<int:n1>/<int:n2>', views.sumar, name='suma'),
#ex: localhost:8080/app/sumar/n1-n2/
path('restar/<int:n1>/<int:n2>', views.restar, name='resta'),
#ex: localhost:8080/app/multiplicar/n1*n2/
path('multiplicar/<int:n1>/<int:n2>', views.multiplicar, name='multiplicacion'),
#ex: localhost:8080/app/multiplicar/n1/n2/
path('dividir/<int:n1>/<int:n2>', views.dividir, name='division'),
]
| 33.5 | 84 | 0.650083 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 337 | 0.558872 |
2a58d724de9a2eb16bfc33ebafdd4ad0b59e5837 | 688 | py | Python | kmeans_poc/data/generate_dataset.py | katyamust/with_git_actions | 73d9213be1a1bb626b0062afa165ea73d3283443 | [
"MIT"
] | null | null | null | kmeans_poc/data/generate_dataset.py | katyamust/with_git_actions | 73d9213be1a1bb626b0062afa165ea73d3283443 | [
"MIT"
] | null | null | null | kmeans_poc/data/generate_dataset.py | katyamust/with_git_actions | 73d9213be1a1bb626b0062afa165ea73d3283443 | [
"MIT"
] | 1 | 2020-07-03T02:08:43.000Z | 2020-07-03T02:08:43.000Z | # generate dataset : random data in a two-dimensional space
#https://towardsdatascience.com/understanding-k-means-clustering-in-machine-learning-6a6e67336aa1
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
#%matplotlib inline
def generate_x():
X = -2 * np.random.rand(100,2)
X1 = 1 + 2 * np.random.rand(50,2)
X[50:100, :] = X1
return X
def plot_x(X):
plt.scatter(X[ : , 0], X[ :, 1], s = 50) #, c = ‘b’)
plt.show()
pass
def save_x(X):
np.savetxt("x_poc.csv", X, delimiter=",")
pass
def main():
X = generate_x()
plot_x(X)
save_x(X)
if __name__ == '__main__':
main()
| 17.641026 | 97 | 0.632267 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 214 | 0.309249 |
2a5b794eee3881243d82a5403699324690ff176a | 115 | py | Python | stere/__init__.py | thomasmost/stere | f10ae1a40269c1642b9da49df06936fafb9b4a5b | [
"MIT"
] | 17 | 2017-12-11T15:49:10.000Z | 2021-06-16T19:29:45.000Z | stere/__init__.py | thomasmost/stere | f10ae1a40269c1642b9da49df06936fafb9b4a5b | [
"MIT"
] | 328 | 2018-02-06T02:29:21.000Z | 2022-03-30T10:47:33.000Z | stere/__init__.py | thomasmost/stere | f10ae1a40269c1642b9da49df06936fafb9b4a5b | [
"MIT"
] | 3 | 2019-04-05T19:40:55.000Z | 2021-11-04T06:50:32.000Z | from .browserenabled import BrowserEnabled as Stere
from .page import Page
__all__ = [
"Stere",
"Page",
]
| 14.375 | 51 | 0.686957 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 0.113043 |
2a5f82639377b0ea5dadf1fcd65d906ed85ab902 | 2,190 | py | Python | antlr/SevenBillionHumansParser.py | behrmann/7billionhumans | 36d53daf278ef4f3729bc5cba2f2398d5411bd6d | [
"MIT"
] | 45 | 2018-09-05T04:56:59.000Z | 2021-11-22T08:57:26.000Z | antlr/SevenBillionHumansParser.py | behrmann/7billionhumans | 36d53daf278ef4f3729bc5cba2f2398d5411bd6d | [
"MIT"
] | 36 | 2018-09-01T11:34:26.000Z | 2021-05-19T23:20:49.000Z | antlr/SevenBillionHumansParser.py | behrmann/7billionhumans | 36d53daf278ef4f3729bc5cba2f2398d5411bd6d | [
"MIT"
] | 36 | 2018-09-01T07:44:19.000Z | 2021-09-10T19:07:35.000Z | from antlr4 import *
from antlr4.error.ErrorListener import ErrorListener
from antlr.SBHasmLexer import SBHasmLexer
from antlr.SBHasmListener import SBHasmListener
from antlr.SBHasmParser import SBHasmParser
class MyErrorListener(ErrorListener):
def __init__(self):
super(MyErrorListener, self).__init__()
def syntaxError(self, recognizer, offendingSymbol, line, column, msg, e):
raise Exception("SyntaxError in {},{} msg={}".format(line, column, msg))
def reportAmbiguity(self, recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs):
raise Exception("reportAmbiguity")
def reportAttemptingFullContext(self, recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs):
raise Exception("reportAttemptingFullContext")
def reportContextSensitivity(self, recognizer, dfa, startIndex, stopIndex, prediction, configs):
raise Exception("reportContextSensitivity")
class SBHCodeSizeListener(SBHasmListener):
def __init__(self):
self.cmd_cnt = 0
def enterCmd(self, ctx):
self.cmd_cnt += 1
def enterSonst(self, ctx):
self.cmd_cnt += 1
class Pickup:
def __init__(self, item):
self.item = item
def __str__(self):
return "Pickup"
class Mem:
def __init__(self, slot):
self.slot = slot
def __str__(self):
return self.slot
class Dir:
def __init__(self, direction):
self.dir = direction
class SevenBillionHumansParser:
def __init__(self, filepath=None, source=None):
if source:
self.parse(InputStream(source))
elif filepath:
self.parse(FileStream(filepath))
def parse(self, source_stream):
lexer = SBHasmLexer(source_stream)
stream = CommonTokenStream(lexer)
parser = SBHasmParser(stream)
# parser._listeners = [ MyErrorListener() ]
tree = parser.asm()
printer = SBHCodeSizeListener()
walker = ParseTreeWalker()
walker.walk(printer, tree)
self.cmd_size = printer.cmd_cnt
if __name__ == '__main__':
s = SevenBillionHumansParser("../solutions/55 - Data Flowers/size-10_speed-23.asm")
| 26.707317 | 108 | 0.682192 | 1,845 | 0.842466 | 0 | 0 | 0 | 0 | 0 | 0 | 215 | 0.098174 |
2a5ff21418d48ac09c9e93cc6ba5b5b67b0da4ce | 442 | py | Python | pig-latin/pig_latin.py | cmccandless/ExercismSolutions-python | d80bf441c842daa2eb446bdba9c03d3e8864ea58 | [
"MIT"
] | null | null | null | pig-latin/pig_latin.py | cmccandless/ExercismSolutions-python | d80bf441c842daa2eb446bdba9c03d3e8864ea58 | [
"MIT"
] | null | null | null | pig-latin/pig_latin.py | cmccandless/ExercismSolutions-python | d80bf441c842daa2eb446bdba9c03d3e8864ea58 | [
"MIT"
] | null | null | null | consonents = ['sch', 'squ', 'thr', 'qu', 'th', 'sc', 'sh', 'ch', 'st', 'rh']
consonents.extend('bcdfghjklmnpqrstvwxyz')
def prefix(word):
if word[:2] not in ['xr', 'yt']:
for x in consonents:
if word.startswith(x):
return (x, word[len(x):])
return ('', word)
def translate(phrase):
return ' '.join([x + y + 'ay' for y, x in
map(prefix, phrase.split())])
| 27.625 | 77 | 0.488688 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 83 | 0.187783 |
2a6147abbf7955e413192c373cc1c16dc8668901 | 5,315 | py | Python | amalia/simulation/PoissonSimulation.py | Aganonce/AMALIA-lite | a9c854b45cc6486763349c262e44ee4d27d6bfac | [
"MIT"
] | null | null | null | amalia/simulation/PoissonSimulation.py | Aganonce/AMALIA-lite | a9c854b45cc6486763349c262e44ee4d27d6bfac | [
"MIT"
] | null | null | null | amalia/simulation/PoissonSimulation.py | Aganonce/AMALIA-lite | a9c854b45cc6486763349c262e44ee4d27d6bfac | [
"MIT"
] | 2 | 2021-05-21T07:55:54.000Z | 2021-09-23T12:58:50.000Z | import logging
from tools.EventGeneration import convert_date, generate_random_time, generate_random_node_id
logger = logging.getLogger(__name__.split('.')[-1])
from features.ResponseTypeFeature import ResponseTypeFeature
from features.ReplayTimeSeriesFeature import ReplayTimeSeriesFeature
import tools.Cache as Cache
import random
import pandas as pd
import warnings
from scipy.sparse import SparseEfficiencyWarning
warnings.simplefilter('ignore', SparseEfficiencyWarning)
random.seed(1234)
class PoissonSimulation:
'''
Simple event simulation. Given a replay of base events
and probabilities of responses, generate arbitrary single-layer
event cascades.
Parameters
----------
Parameters here
'''
def __init__(self, cfg, generate_replies=None, **kwargs):
self.start_date = cfg.get("limits.start_date", type=convert_date)
self.end_date = cfg.get("limits.end_date", type=convert_date)
self.time_delta = cfg.get("limits.time_delta", type=pd.Timedelta).total_seconds()
if generate_replies is None:
self.generate_replies = cfg.get("poisson_simulation.generate_replies", True)
else:
self.generate_replies = generate_replies
self.cfg = cfg
@Cache.amalia_cache
def compute(self, dfs, train_dfs=None):
# Retrieve replay time-series feature and response type feature
ts = ReplayTimeSeriesFeature(self.cfg).compute(dfs)
responses = ResponseTypeFeature(self.cfg).compute(dfs)
res = []
platforms = dfs.get_platforms()
logger.warning('Very slow for dense data generation. Use ParallelPoissonSimulation to reduce runtime.')
for platform in platforms:
ts = ts[platform]
responses = responses[platform]
node_map = dfs.get_node_map(platform)
# For all users that have a nonzero row in their ts, generate events
logger.info('Generating new events.')
nonzero_rows, __ = ts.nonzero()
res = res + _generate_base_event(ts, node_map, nonzero_rows, self.start_date, responses, self.generate_replies, platform)
# Return a pandas DataFrame sorted by time
# Feed into the output module for actual result generation
res = pd.DataFrame(res)
if len(res) == 0:
logger.error('PoissonSimulation produced no events. Terminating.')
raise ValueError('PoissonSimulation produced no events.')
return res.sort_values(by=['nodeTime']).reset_index(drop=True)
def _generate_base_event(ts, node_map, nonzero_rows, start_date, responses, generate_replies, platform):
res = []
for root_user_id in nonzero_rows:
ts_row = ts.getrow(root_user_id)
__, events = ts_row.nonzero()
# For each user, get event counts and the time index in which those events occurred
event_counts = [ts_row.getcol(event).toarray()[0][0] for event in events]
for i in range(len(event_counts)):
for j in range(event_counts[i]):
# Generate the base event
current_day_time = int(start_date + events[i] * 86400)
root_event_id = generate_random_node_id()
res.append({'nodeID': root_event_id, 'nodeUserID': node_map[root_user_id], 'parentID': root_event_id,
'rootID': root_event_id, 'actionType': 'tweet', 'nodeTime': current_day_time,
'platform': platform})
# Generate responses to the base event
if generate_replies:
generated_responses = _generate_responses(root_event_id, root_user_id, current_day_time, responses,
node_map, platform)
# if len(generated_responses) == 0:
# msg = 'Root user ID ' + str(root_user_id) + ' generated no responses.'
# logger.warning(msg)
res = res + generated_responses
return res
def _generate_responses(root_event_id, root_user_id, current_day_time, responses, node_map, platform):
res = []
# For each event type generate responses using associated probabilities
for response_type in responses:
# Get the user response probabilities for the given event type and root user id
response_row = responses[response_type].getrow(root_user_id)
# If the probability is below some threshold, zero it out
# Have the users associated with the nonzero indices generate an event
response_row[response_row < random.random()] = 0
__, acting_indices = response_row.nonzero()
# Generate random timestamps and find the associated user id for each new event
time_stamps = [generate_random_time(current_day_time) for x in acting_indices]
node_user_ids = [node_map[x] for x in acting_indices]
res = res + [{'nodeID': generate_random_node_id(), 'nodeUserID': node_user_id, 'parentID': root_event_id,
'rootID': root_event_id, 'actionType': response_type, 'nodeTime': node_time,
'platform': platform} for
node_user_id, node_time in zip(node_user_ids, time_stamps)]
return res
| 40.572519 | 133 | 0.660207 | 2,066 | 0.388711 | 0 | 0 | 1,303 | 0.245155 | 0 | 0 | 1,548 | 0.291251 |
2a6482a0a0ce793fafb8fa2a355bc1331003895e | 13,174 | py | Python | flee/pmicro_flee.py | jataware/flee | 67c00c4572e71dd2bbfb390d7d7ede13ffb9594e | [
"BSD-3-Clause"
] | null | null | null | flee/pmicro_flee.py | jataware/flee | 67c00c4572e71dd2bbfb390d7d7ede13ffb9594e | [
"BSD-3-Clause"
] | null | null | null | flee/pmicro_flee.py | jataware/flee | 67c00c4572e71dd2bbfb390d7d7ede13ffb9594e | [
"BSD-3-Clause"
] | null | null | null | import datetime
import math
import os
import sys
from functools import wraps
from typing import Optional, Tuple
from flee import pflee
from flee.SimulationSettings import SimulationSettings # noqa, pylint: disable=W0611
if os.getenv("FLEE_TYPE_CHECK") is not None and os.environ["FLEE_TYPE_CHECK"].lower() == "true":
from beartype import beartype as check_args_type
else:
def check_args_type(func):
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
class MPIManager(pflee.MPIManager):
"""
The MPIManager class
"""
def __init__(self):
super().__init__()
class Person(pflee.Person):
"""
The Person class
"""
def __init__(self, e, location):
super().__init__(e, location)
class Location(pflee.Location):
"""
The Location class
"""
@check_args_type
def __init__(
self,
e,
cur_id: int,
name: str,
x: float = 0.0,
y: float = 0.0,
location_type: Optional[str] = None,
movechance: float = 0.001,
capacity: int = -1,
pop: int = 0,
foreign: bool = False,
country: str = "unknown",
) -> None:
super().__init__(
e=e,
cur_id=cur_id,
name=name,
x=x,
y=y,
location_type=location_type,
movechance=movechance,
capacity=capacity,
pop=pop,
foreign=foreign,
country=country,
)
class Ecosystem(pflee.Ecosystem):
"""
The Ecosystem class
"""
def __init__(self):
super().__init__()
@check_args_type
def linkUp(
self,
endpoint1: str,
endpoint2: str,
distance: float = 1.0,
forced_redirection: bool = False,
link_type: str = None,
) -> None:
"""
Creates a link between two endpoint locations
Args:
endpoint1 (str): Description
endpoint2 (str): Description
distance (float, optional): Description
forced_redirection (bool, optional): Description
link_type (str, optional): Description
"""
endpoint1_index = -1
endpoint2_index = -1
for i, location_name in enumerate(self.locationNames):
if location_name == endpoint1:
endpoint1_index = i
if location_name == endpoint2:
endpoint2_index = i
if endpoint1_index < 0:
print("Diagnostic: Ecosystem.locationNames: ", self.locationNames)
print(
"Error: link created to non-existent source: {} with dest {}".format(
endpoint1, endpoint2
)
)
sys.exit()
if endpoint2_index < 0:
print("Diagnostic: Ecosystem.locationNames: ", self.locationNames)
print(
"Error: link created to non-existent destination: {} with source {}".format(
endpoint2, endpoint1
)
)
sys.exit()
self.locations[endpoint1_index].links.append(
Link(
startpoint=self.locations[endpoint1_index],
endpoint=self.locations[endpoint2_index],
distance=distance,
forced_redirection=forced_redirection,
link_type=link_type,
)
)
self.locations[endpoint2_index].links.append(
Link(
startpoint=self.locations[endpoint2_index],
endpoint=self.locations[endpoint1_index],
distance=distance,
)
)
# -------------------------------------------------------------------------
# modified version of class Link for weather coupling
# -------------------------------------------------------------------------
class Link(pflee.Link):
"""
the Link class
"""
@check_args_type
def __init__(
self,
startpoint,
endpoint,
distance: float,
forced_redirection: bool = False,
link_type: str = None,
):
super().__init__(startpoint, endpoint, distance, forced_redirection)
self.link_type = link_type
weather_source_files = {}
class Link_weather_coupling(pflee.Link):
"""
the Link_weather_coupling class
"""
@check_args_type
def __init__(
self,
startpoint,
endpoint,
distance: float,
forced_redirection: bool = False,
link_type: Optional[str] = None,
):
self.name = "__link__"
self.closed = False
# distance in km.
self.__distance = float(distance)
# links for now always connect two endpoints
self.startpoint = startpoint
self.endpoint = endpoint
# number of agents that are in transit.
self.numAgents = 0
# refugee population on current rank (for pflee).
self.numAgentsOnRank = 0
# if True, then all Persons will go down this link.
self.forced_redirection = forced_redirection
self.link_type = link_type
self.latMid, self.lonMid = self.midpoint()
self.X1, self.X2 = self.X1_X2()
df = weather_source_files["precipitation"]
link_direct = self.startpoint.name + " - " + self.endpoint.name
link_reverse = self.endpoint.name + " - " + self.startpoint.name
self.prec = df.loc[:, df.columns.isin([link_direct, link_reverse])]
if self.link_type == "crossing":
self.discharge = weather_source_files["river_discharge"]
self.discharge_dict = self.discharge[["lat", "lon"]].to_dict("records")
self.closest_location = self.closest(
data=self.discharge_dict, v={"lat": self.latMid, "lon": self.lonMid}
)
self.dl = self.discharge[
(self.discharge["lat"] == self.closest_location["lat"])
& (self.discharge["lon"] == self.closest_location["lon"])
]
def DecrementNumAgents(self):
"""
Summary
"""
self.numAgents -= 1
def IncrementNumAgents(self):
"""
Summary
"""
self.numAgents += 1
@check_args_type
def get_start_date(self, time: int):
"""
Summary
Args:
time (TYPE): Description
Returns:
TYPE: Description
"""
start_date = weather_source_files["conflict_start_date"]
date = datetime.datetime.strptime(start_date, "%Y-%m-%d").date()
date += datetime.timedelta(time)
date = date.strftime("%Y-%m-%d")
return date
@check_args_type
def midpoint(self) -> Tuple[float, float]:
"""
This function returns the geoghraphical midpoint of two given locations
Returns:
Tuple[float, float]: Description
"""
lat1 = math.radians(self.get_latitude(location_name=self.startpoint.name))
lon1 = math.radians(self.get_longitude(location_name=self.startpoint.name))
lat2 = math.radians(self.get_latitude(location_name=self.endpoint.name))
lon2 = math.radians(self.get_longitude(location_name=self.endpoint.name))
bx = math.cos(lat2) * math.cos(lon2 - lon1)
by = math.cos(lat2) * math.sin(lon2 - lon1)
latMid = math.atan2(
math.sin(lat1) + math.sin(lat2),
math.sqrt((math.cos(lat1) + bx) * (math.cos(lat1) + bx) + by ** 2),
)
lonMid = lon1 + math.atan2(by, math.cos(lat1) + bx)
latMid = round(math.degrees(latMid), 2)
lonMid = round(math.degrees(lonMid), 2)
latMid = float(round(latMid))
lonMid = float(round(lonMid))
return latMid, lonMid
@check_args_type
def get_longitude(self, location_name: str) -> float:
"""
This function returns the longitude of given location name based on 40 years dataset of
South Sudan total precipitation
Args:
location_name (str): Description
Returns:
float: Description
"""
history = weather_source_files["40yrs_total_precipitation"]
coordination = history[history["names"] == location_name]
longitude = coordination["longitude"].mean()
return longitude
@check_args_type
def get_latitude(self, location_name: str) -> float:
"""
This function returns the latitude of given location name based on 40 years dataset of
South Sudan total precipitation
Args:
location_name (str): Description
Returns:
float: Description
"""
history = weather_source_files["40yrs_total_precipitation"]
coordination = history[history["names"] == location_name]
latitude = coordination["latitude"].mean()
return latitude
@check_args_type
def X1_X2(self) -> Tuple[float, float]:
"""
This function returns the two treshholds of X1 and X2.
The way of calculating threshholds needs to be discussed!
Returns:
Tuple[float, float]: Description
"""
# print(link)
X1 = []
X2 = []
history = weather_source_files["40yrs_total_precipitation"]
latitude = history[history["latitude"] == self.latMid]
if latitude.empty:
result_index = history.iloc[(history["latitude"] - self.latMid).abs().argsort()[:1]]
latitude_index = result_index["latitude"].to_numpy()
latitude = history[history["latitude"] == float(latitude_index)]
treshhold_tp = latitude[latitude["longitude"] == self.lonMid]
if treshhold_tp.empty:
result_index = latitude.iloc[(latitude["longitude"] - self.lonMid).abs().argsort()[:1]]
longitude_index = result_index["longitude"].to_numpy()
treshhold_tp = latitude[latitude["longitude"] == float(longitude_index)]
X1 = treshhold_tp["tp"].quantile(q=0.15)
X2 = treshhold_tp["tp"].quantile(q=0.75)
return X1, X2
@check_args_type
def haversine_distance(self, lat1: float, lon1: float, lat2: float, lon2: float) -> float:
"""
Summary
Args:
lat1 (float): Description
lon1 (float): Description
lat2 (float): Description
lon2 (float): Description
Returns:
float: Description
"""
p = 0.017453292519943295
a = (
0.5
- math.cos((lat2 - lat1) * p) / 2
+ math.cos(lat1 * p) * math.cos(lat2 * p) * (1 - math.cos((lon2 - lon1) * p)) / 2
)
return 12742 * math.asin(math.sqrt(a))
def closest(self, data, v):
"""
Summary
Args:
data (TYPE): Description
v (TYPE): Description
Returns:
TYPE: Description
"""
return min(
data,
key=lambda p: self.haversine_distance(
lat1=v["lat"], lon1=v["lon"], lat2=p["lat"], lon2=p["lon"]
),
)
@check_args_type
def get_distance(self, time: int) -> float:
"""
Summary
Args:
time (int): Description
Returns:
TYPE: Description
"""
if len(weather_source_files) == 0:
print("Error!!! there is NO input file names for weather coupling")
sys.exit()
elif self.link_type == "crossing":
date = self.get_start_date(time=time)
dis_level = self.dl[self.dl["time"] == date].iloc[0]["dis24"]
dis_threshold = 8000
# log_flag = False
if dis_level < dis_threshold:
new_distance = self.__distance * 1
else:
new_distance = self.__distance * 10000
# log_flag = True
else:
# log_flag = False
tp = self.prec.loc[self.prec.index[time]].values[0]
if tp <= self.X1:
new_distance = self.__distance * 1
elif tp <= self.X2:
new_distance = self.__distance * 2
# log_flag = True
elif tp > self.X2 and tp > 15:
new_distance = self.__distance * 10000
# log_flag = True
else:
new_distance = self.__distance * 2
# log_flag = True
"""
if log_flag is True:
log_file = weather_source_files["output_log"]
with open(log_file, "a+") as f:
f.write(
"day {} distance between {} - {} "
"change from {} --> {}\n".format(
time, self.startpoint.name,
self.endpoint.name, self.__distance, new_distance
)
)
f.flush()
"""
return new_distance
if __name__ == "__main__":
print("No testing functionality here yet.")
| 29.340757 | 99 | 0.546759 | 12,300 | 0.933657 | 0 | 0 | 11,110 | 0.843328 | 0 | 0 | 3,959 | 0.300516 |
2a6744398cdf360569c0ec830105f233ecaacbeb | 2,211 | py | Python | src/my_pubsub.py | shirosweets/vosk-speech-to-text | 4667b107dd3ba174435e8deab1c122d83381e902 | [
"MIT"
] | 1 | 2021-04-16T01:49:39.000Z | 2021-04-16T01:49:39.000Z | src/my_pubsub.py | shirosweets/vosk-speech-to-text | 4667b107dd3ba174435e8deab1c122d83381e902 | [
"MIT"
] | null | null | null | src/my_pubsub.py | shirosweets/vosk-speech-to-text | 4667b107dd3ba174435e8deab1c122d83381e902 | [
"MIT"
] | null | null | null | import os
import datetime
from json import dumps
from google.cloud import pubsub_v1
from config_speech import config as conf
from google.api_core.exceptions import AlreadyExists
class Pubsub():
def __init__(self):
PROJECT_PATH = "projects/project_name"
TOPIC_NAME = 'topic_name'
self.TOPIC_PATH = f"{PROJECT_PATH}/topics/"+TOPIC_NAME
self.SUBSCRIPTION_PATH = f"{PROJECT_PATH}/subscriptions/"+'topic_name'
self._set_google_credentials()
def _set_google_credentials(self):
print("Pubsub _set_google...")
credentials_path = conf.FILE_PATH_GOOGLE_CREDENTIALS
gkey = "GOOGLE_APPLICATION_CREDENTIALS"
if os.getenv(gkey, default=None) is None:
try:
CRED_DIR = os.getcwd() + credentials_path
os.environ[gkey] = CRED_DIR
except Exception as e:
print(f"Error : {e}\n")
print(f"I can't use {credentials_path}")
raise(e)
class Subscriber(Pubsub):
def __init__(self, callback):
Pubsub.__init__(self)
self.callback = callback
def _subscribe(self):
self.subscriber = pubsub_v1.SubscriberClient()
self.flow_control = pubsub_v1.types.FlowControl(max_messages=6)
try:
self.subscriber.create_subscription(
name=self.SUBSCRIPTION_PATH,
topic=self.TOPIC_PATH)
except AlreadyExists:
print(f"Already Exists")
except Exception as e:
print(f"Error: {e}")
# Wrap subscriber
self.f = self.subscriber.subscribe(
self.SUBSCRIPTION_PATH,
callback=self.callback,
flow_control=self.flow_control)
def listen_messages(self):
# Wrap subscriber
self._subscribe()
with self.subscriber:
try:
print("Listening messages...")
self.f.result()
print(self.f.result())
except KeyboardInterrupt:
self.f.cancel()
print("\nInterrumpted listening messages...")
except Exception as e:
print(f"Error listening messages: {e}")
| 33 | 78 | 0.602895 | 2,026 | 0.916327 | 0 | 0 | 0 | 0 | 0 | 0 | 365 | 0.165084 |
2a67605a117277819275759c2973c2e8735861bc | 2,497 | py | Python | ui/_alert.py | isbm/pybug | 406e47d0b75961d8b8a984b02053a9182f96c56d | [
"MIT"
] | null | null | null | ui/_alert.py | isbm/pybug | 406e47d0b75961d8b8a984b02053a9182f96c56d | [
"MIT"
] | null | null | null | ui/_alert.py | isbm/pybug | 406e47d0b75961d8b8a984b02053a9182f96c56d | [
"MIT"
] | 1 | 2020-03-04T10:11:41.000Z | 2020-03-04T10:11:41.000Z | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'xml/alert.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.resize(287, 171)
Dialog.setWindowOpacity(0.8)
Dialog.setAutoFillBackground(False)
Dialog.setSizeGripEnabled(False)
Dialog.setModal(True)
self.verticalLayout_2 = QtGui.QVBoxLayout(Dialog)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.label = QtGui.QLabel(Dialog)
self.label.setObjectName(_fromUtf8("label"))
self.verticalLayout.addWidget(self.label)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem)
self.verticalLayout_2.addLayout(self.verticalLayout)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.okButton = QtGui.QPushButton(Dialog)
self.okButton.setObjectName(_fromUtf8("okButton"))
self.horizontalLayout.addWidget(self.okButton)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.retranslateUi(Dialog)
QtCore.QObject.connect(self.okButton, QtCore.SIGNAL(_fromUtf8("clicked()")), Dialog.on_alert)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_translate("Dialog", "Alert", None))
self.label.setText(_translate("Dialog", "TextLabel", None))
self.okButton.setText(_translate("Dialog", "Close", None))
| 40.934426 | 103 | 0.716059 | 1,841 | 0.737285 | 0 | 0 | 0 | 0 | 0 | 0 | 327 | 0.130957 |
2a68fb1e3fb5aa04aab2b0e5a4bef92bf6a0aa37 | 689 | py | Python | scripts/client_lidar.py | Paulllit/projet_proto_lidar | a84c80261c81ab08e365c575c416863c06355b71 | [
"BSD-2-Clause"
] | null | null | null | scripts/client_lidar.py | Paulllit/projet_proto_lidar | a84c80261c81ab08e365c575c416863c06355b71 | [
"BSD-2-Clause"
] | null | null | null | scripts/client_lidar.py | Paulllit/projet_proto_lidar | a84c80261c81ab08e365c575c416863c06355b71 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
import rospy
import math
from sensor_msgs.msg import LaserScan #import des donnes laser du lidar
import time
from rplidar_ros.srv import *
class Client():
vel=None
angle_est=None
#Init
def __init__(self):
rospy.init_node('client_lidar')
def listen_angle(self):
r = rospy.Rate(10)
while not rospy.is_shutdown():
self.angle_est=rospy.ServiceProxy('angle',angle)
self.angle_est()
print(self.angle_est().angle_mes)
########################################################## Main ######################################################
if __name__ == "__main__":
l=Client()
l.listen_angle()
rospy.spin()
| 23.758621 | 118 | 0.56894 | 325 | 0.471698 | 0 | 0 | 0 | 0 | 0 | 0 | 208 | 0.301887 |
2a695f1df515fd4839a432790f5921c3bfa4f906 | 681 | py | Python | tests/util/config_mixin.py | pruh/backee | 3e88d799a50f75033ae15d6c16655da8ae791886 | [
"MIT"
] | null | null | null | tests/util/config_mixin.py | pruh/backee | 3e88d799a50f75033ae15d6c16655da8ae791886 | [
"MIT"
] | 2 | 2020-01-22T22:21:02.000Z | 2020-01-22T22:28:37.000Z | tests/util/config_mixin.py | pruh/backee | 3e88d799a50f75033ae15d6c16655da8ae791886 | [
"MIT"
] | null | null | null | import os
import logging
from unittest import mock
from backee.parser.config_parser import parse_config
class ConfigMixin:
def __get_config_file_contents(self, filename: str):
config_file = os.path.join(
os.path.dirname(__file__), os.pardir, "resources", filename
)
with open(config_file) as f:
return f.read()
def _get_parsed_config(self, filename: str):
mock_config = mock.mock_open(
read_data=self.__get_config_file_contents(filename)
)
with mock.patch("builtins.open", mock_config, create=True):
parsed_config = parse_config(filename=None)
return parsed_config
| 29.608696 | 71 | 0.674009 | 573 | 0.84141 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.038179 |
2a6a3ba6e47587bb91c1059380ff1f854f20f597 | 2,022 | py | Python | utils/__init__.py | KpaBap/palbot | 38d2b7958e310f45a28cf1b3173967b92f819946 | [
"MIT"
] | null | null | null | utils/__init__.py | KpaBap/palbot | 38d2b7958e310f45a28cf1b3173967b92f819946 | [
"MIT"
] | null | null | null | utils/__init__.py | KpaBap/palbot | 38d2b7958e310f45a28cf1b3173967b92f819946 | [
"MIT"
] | null | null | null | #from utils import ordinal
import re
from urllib.parse import quote as uriquote
import asyncio
from bs4 import BeautifulSoup
import collections
from utils.context import MoreContext
from utils.context import Location
from utils.paginator import Paginator
from utils.units import units
ordinal = lambda n: "%d%s" % (n,"tsnrhtdd"[(n//10%10!=1)*(n%10<4)*n%10::4])
tagregex = re.compile(r'<.*?>')
def remove_html_tags(data):
#removes all html tags from a given string
return tagregex.sub('', data)
async def google_for_urls(bot, search_term, *, url_regex=None, return_full_data=False):
url = 'https://www.googleapis.com/customsearch/v1?key={}&cx={}&q={}'
url = url.format(bot.config.gsearchapi,
bot.config.gsearchcx, uriquote(search_term))
async with bot.session.get(url) as resp:
json = await resp.json()
if resp.status != 200:
print(resp, json)
return
if return_full_data:
return json['items']
if 'items' not in json:
return None
results = []
for result in json['items']:
if url_regex:
check = re.search(url_regex, result['link'])
if not check:
continue
results.append(result['link'].replace('%25', '%'))
return results
async def bs_from_url(bot, url, return_url=False):
headers = {'User-Agent': 'Mozilla/5.0 PalBot'}
async with bot.session.get(url, headers=headers) as resp:
assert "text" in resp.headers['Content-Type']
data = await resp.read()
page = BeautifulSoup(data, 'lxml')
if return_url:
return page, resp.url
else:
return page
def dict_merge(dct, merge_dct):
for k, v in merge_dct.items():
if (k in dct and isinstance(dct[k], dict)
and isinstance(merge_dct[k], collections.Mapping)):
dict_merge(dct[k], merge_dct[k])
else:
dct[k] = merge_dct[k]
| 30.636364 | 87 | 0.607319 | 0 | 0 | 0 | 0 | 0 | 0 | 1,231 | 0.608803 | 255 | 0.126113 |
2a6ac7f9df2f1802e7060c999c2722525fb5cfb6 | 647 | py | Python | tests/expectations/cat-hs-x-cat-date-smoothed-col-pct-w3.py | Crunch-io/crunch-cube | 80986d5b2106c774f05176fb6c6a5ea0d840f09d | [
"MIT"
] | 3 | 2021-01-22T20:42:31.000Z | 2021-06-02T17:53:19.000Z | tests/expectations/cat-hs-x-cat-date-smoothed-col-pct-w3.py | Crunch-io/crunch-cube | 80986d5b2106c774f05176fb6c6a5ea0d840f09d | [
"MIT"
] | 331 | 2017-11-13T22:41:56.000Z | 2021-12-02T21:59:43.000Z | tests/expectations/cat-hs-x-cat-date-smoothed-col-pct-w3.py | Crunch-io/crunch-cube | 80986d5b2106c774f05176fb6c6a5ea0d840f09d | [
"MIT"
] | 1 | 2021-02-19T02:49:00.000Z | 2021-02-19T02:49:00.000Z | [
[float("NaN"), float("NaN"), 73.55631426, 76.45173763],
[float("NaN"), float("NaN"), 71.11031587, 73.6557548],
[float("NaN"), float("NaN"), 11.32891221, 9.80444014],
[float("NaN"), float("NaN"), 10.27812002, 8.15602626],
[float("NaN"), float("NaN"), 21.60703222, 17.9604664],
[float("NaN"), float("NaN"), 2.44599839, 2.79598283],
[float("NaN"), float("NaN"), 0.61043458, 0.41397093],
[float("NaN"), float("NaN"), 2.65390256, 2.9383991],
[float("NaN"), float("NaN"), 1.36865038, 1.84961059],
[float("NaN"), float("NaN"), 0.20366599, 0.38581535],
[float("NaN"), float("NaN"), 1.57231637, 2.23542594],
]
| 46.214286 | 59 | 0.585781 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 110 | 0.170015 |
2a6c2e36e5eed7008fca91c8906b878bedc5fb28 | 242 | py | Python | 02-Tuples-and-sets/count_same_values.py | nmoskova/Python-advanced | 007f496e868aa151e39d79446b055e76ffb2db95 | [
"MIT"
] | null | null | null | 02-Tuples-and-sets/count_same_values.py | nmoskova/Python-advanced | 007f496e868aa151e39d79446b055e76ffb2db95 | [
"MIT"
] | null | null | null | 02-Tuples-and-sets/count_same_values.py | nmoskova/Python-advanced | 007f496e868aa151e39d79446b055e76ffb2db95 | [
"MIT"
] | null | null | null | numbers = [float(num) for num in input().split()]
nums_dict = {}
for el in numbers:
if el not in nums_dict:
nums_dict[el] = 0
nums_dict[el] += 1
[print(f"{el:.1f} - {occurence} times") for el, occurence in nums_dict.items()] | 26.888889 | 79 | 0.628099 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 0.128099 |
2a6d411eb9f892a9e63182fd8d2cb98e424999b0 | 3,021 | py | Python | main.py | maxc0d3r/google-workspace-manager | d2a4e561ae8278455cd5b0c03283a413ebeaff54 | [
"MIT"
] | null | null | null | main.py | maxc0d3r/google-workspace-manager | d2a4e561ae8278455cd5b0c03283a413ebeaff54 | [
"MIT"
] | null | null | null | main.py | maxc0d3r/google-workspace-manager | d2a4e561ae8278455cd5b0c03283a413ebeaff54 | [
"MIT"
] | null | null | null | """
Usage:
main.py domains list
main.py domains add [--body=<request_body>] [--file=<input_file>]
main.py users list [--domain=<domain_name>]
main.py users get [--email=<email>]
main.py users add [--body=<request_body>] [--file=<input_file>]
main.py users update [--email=<email>] [--body=<request_body>] [--file=<input_file>]
main.py (-h | --help)
main.py (-V | --version)
Options:
-h --help Show this screen
-v --version Show version
--body=<request_body> Request body in JSON format
--domain=<domain_name> Domain name
--file=<input_file> JSON file
"""
from docopt import docopt
import csv
import os
import json
import random
import string
import gwm.directory.domains
import gwm.directory.users
import gwm.directory.groups
from gwm.directory.domains import list_domains, add_domain, delete_domain
from gwm.directory.users import list_users, add_user, delete_user, get_user, update_user
APP_VERSION='0.0.0'
def generate_password():
source = string.ascii_letters + string.digits
password = ''.join((random.choice(source) for i in range(10)))
return password
def main(args):
customer_id = os.getenv('CUSTOMER_ID')
if args['domains']:
if args['list']:
domains = list_domains(customer_id)
print(json.dumps(domains,indent=1))
elif args['add']:
if args['--file']:
with open(args['--file'],'r') as f:
body = json.load(f)
response = add_domain(customer_id, body)
else:
response = add_domain(customer_id, json.loads(args['--body']))
print(json.dumps(response,indent=1))
elif args['get']:
pass
elif args['delete']:
pass
elif args['users']:
if args['list']:
users = list_users(args['--domain'])
print(json.dumps(users,indent=1))
elif args['add']:
if args['--file']:
with open(args['--file'],'r') as f:
body = json.load(f)
response = add_user(body)
else:
request_body = json.loads(args['--body'])
response = add_user(request_body)
print(json.dumps(response,indent=1))
elif args['get']:
response = get_user(args['--email'])
print(json.dumps(response,indent=1))
elif args['update']:
if args['--file']:
with open(args['--file'],'r') as f:
body = json.load(f)
response = update_user(args['--email'],body)
else:
request_body = json.loads(args['--body'])
response = update_user(args['--email'],request_body)
print(json.dumps(response,indent=1))
else:
pass
if __name__ == "__main__":
arguments = docopt(__doc__, version='Google Workspace Manager {}'.format(APP_VERSION))
main(arguments)
| 34.329545 | 90 | 0.568355 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 883 | 0.292287 |
2a6e1a8bb73dc390e5d24fbf0db6bcb3ee7ba1cf | 266 | py | Python | run_tests.py | Knitschi/CPFBuildscripts | ee20e4b08b3e063535750d019c6e730e55426cce | [
"MIT"
] | 1 | 2018-02-12T14:30:39.000Z | 2018-02-12T14:30:39.000Z | run_tests.py | Knitschi/CPFBuildscripts | ee20e4b08b3e063535750d019c6e730e55426cce | [
"MIT"
] | 8 | 2018-04-18T12:40:13.000Z | 2019-05-19T17:00:56.000Z | run_tests.py | Knitschi/CPFBuildscripts | ee20e4b08b3e063535750d019c6e730e55426cce | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import unittest
import sys
import os
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
from python.buildautomat_unit_tests import *
from python.filesystemaccess_unit_tests import *
if __name__ == '__main__':
unittest.main()
| 19 | 60 | 0.766917 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.105263 |
2a6ec9b3a897e3fc51ab524a28ca41d20795c9f3 | 1,790 | py | Python | django_formset_vuejs/forms.py | fatse/django-formsets-vuejs | c2d6e134e58c3905539eb7c5322aeeda1db7fad4 | [
"MIT"
] | 11 | 2020-10-24T04:00:01.000Z | 2022-03-07T01:14:28.000Z | django_formset_vuejs/forms.py | fatse/django-formsets-vuejs | c2d6e134e58c3905539eb7c5322aeeda1db7fad4 | [
"MIT"
] | 5 | 2021-03-19T08:08:16.000Z | 2021-06-21T01:26:25.000Z | django_formset_vuejs/forms.py | fatse/django-formsets-vuejs | c2d6e134e58c3905539eb7c5322aeeda1db7fad4 | [
"MIT"
] | 2 | 2021-05-02T18:43:32.000Z | 2022-03-10T08:23:16.000Z | from django.forms import ModelForm, inlineformset_factory, BaseInlineFormSet
from . import models
class AuthorContainerForm(ModelForm):
class Meta:
model = models.AuthorContainer
exclude = ('id',)
class AuthorForm(ModelForm):
class Meta:
model = models.Author
fields = ('first_name', 'last_name')
class BookForm(ModelForm):
class Meta:
model = models.Book
fields = ('title', 'isbn',)
BookFormset = inlineformset_factory(models.Author, models.Book, form=BookForm, can_delete=True, extra=0)
class BaseAuthorFormset(BaseInlineFormSet):
def add_fields(self, form, index):
super(BaseAuthorFormset, self).add_fields(form, index)
form.nested_book = BookFormset(
instance=form.instance,
data=form.data if form.is_bound else None,
files=form.files if form.is_bound else None,
prefix='nested_book-%s-%s' % (
form.prefix,
BookFormset.get_default_prefix()))
def is_valid(self):
result = super(BaseAuthorFormset, self).is_valid()
if self.is_bound:
for form in self.forms:
if hasattr(form, 'nested_book'):
result = result and form.nested_book.is_valid()
return result
def save(self, commit=True):
result = super(BaseAuthorFormset, self).save(commit=commit)
for form in self.forms:
if hasattr(form, 'nested_book'):
if not self._should_delete_form(form):
form.nested_book.save(commit=commit)
return result
AuthorsFormset = inlineformset_factory(models.AuthorContainer, models.Author, formset=BaseAuthorFormset,
form=AuthorForm, extra=0)
| 29.833333 | 104 | 0.62905 | 1,401 | 0.782682 | 0 | 0 | 0 | 0 | 0 | 0 | 85 | 0.047486 |
2a6ed08ae7b12c0e3073c649e6772dd81c550a9f | 74 | py | Python | test/__init__.py | shikajiro/picosdk-python-wrappers | c441870e1d39aa6a85b3cfd5acb1b3968669a7cf | [
"0BSD"
] | 114 | 2018-10-05T07:11:20.000Z | 2022-03-14T15:51:15.000Z | test/__init__.py | shikajiro/picosdk-python-wrappers | c441870e1d39aa6a85b3cfd5acb1b3968669a7cf | [
"0BSD"
] | 26 | 2018-09-24T13:43:55.000Z | 2021-05-03T21:18:02.000Z | test/__init__.py | shikajiro/picosdk-python-wrappers | c441870e1d39aa6a85b3cfd5acb1b3968669a7cf | [
"0BSD"
] | 84 | 2018-09-14T16:05:32.000Z | 2022-03-30T19:25:56.000Z | #
# Copyright (C) 2018 Pico Technology Ltd. See LICENSE file for terms.
#
| 18.5 | 69 | 0.716216 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 71 | 0.959459 |
2a6f14b3af74899266a2956f8a6a90c52813e8dd | 6,105 | py | Python | BMES_exps/BMES.py | volgachen/Chinese-Tokenization | 467e08da6fe271b6e33258d5aa6682c0405a3f32 | [
"Apache-2.0"
] | null | null | null | BMES_exps/BMES.py | volgachen/Chinese-Tokenization | 467e08da6fe271b6e33258d5aa6682c0405a3f32 | [
"Apache-2.0"
] | null | null | null | BMES_exps/BMES.py | volgachen/Chinese-Tokenization | 467e08da6fe271b6e33258d5aa6682c0405a3f32 | [
"Apache-2.0"
] | 1 | 2020-07-12T10:38:34.000Z | 2020-07-12T10:38:34.000Z | from collections import Counter
from math import log
from tqdm import tqdm
import re
from evaluation import evaluateSet
def build_model(train_set):
hmm_model = {i:Counter() for i in 'SBME'}
trans = {'SS':0,
'SB':0,
'BM':0,
'BE':0,
'MM':0,
'ME':0,
'ES':0,
'EB':0
}
with open(train_set,'r',encoding='utf-8') as f:
cha = []
tag = []
for l in f:
l = l.split()
if (len(l) == 0) :
cha += " "
tag += " "
else:
cha += l[0]
tag += l[1]
for i in range(len(tag)):
if tag[i] != ' ':
hmm_model[tag[i]][cha[i]] += int(1)
if i+1<len(tag) and tag[i+1] != ' ':
trans[tag[i]+tag[i+1]] +=1
s_ = trans['SS'] + trans['SB']
trans['SS'] /= s_
trans['SB'] /= s_
b_ = trans['BM'] + trans['BE']
trans['BM'] /= b_
trans['BE'] /= b_
m_ = trans['MM'] + trans['ME']
trans['MM'] /= m_
trans['ME'] /= m_
e_ = trans['ES'] + trans['EB']
trans['ES'] /= e_
trans['EB'] /= e_
log_total = {i:log(sum(hmm_model[i].values())) for i in 'SBME'}
trans = {i:log(j) for i,j in trans.items()}
return hmm_model, trans, log_total
def viterbi(nodes):
paths = nodes[0]
for l in range(1, len(nodes)):
paths_ = paths
paths = {}
for i in nodes[l]:
nows = {}
for j in paths_:
if j[-1]+i in trans:
nows[j+i]=paths_[j]+nodes[l][i]+trans[j[-1]+i]
k = list(nows.values()).index(max(nows.values()))
paths[list(nows.keys())[k]] = list(nows.values())[k]
return list(paths.keys())[list(paths.values()).index(max(list(paths.values())))]
def hmm_cut(s):
nodes = [{i:log(j[t]+1)-log_total[i] for i,j in hmm_model.items()} for t in s]
tags = viterbi(nodes)
words = [s[0]]
for i in range(1, len(s)):
if tags[i] in ['B', 'S']:
words.append(s[i])
else:
words[-1] += s[i]
return words
def changenum(ustring):
rstr = ""
for uchar in ustring:
unic=ord(uchar)
if unic == 12288:
unic = 32
elif (65296 <= unic <= 65305) or (65345 <= unic <= 65370) or (65313 <= unic <= 65338):
unic -= 65248
rstr += chr(unic)
# 所有数字改为 0
rstr = re.sub(r"\d+\.?\d*", "0", rstr)
# 所有英文单词改为 1
rstr = re.sub(r"[a-zA-Z]+\/", "1/", rstr)
return rstr
if __name__ == '__main__':
print("Train Set: PKU; Test Set: Weibo, w/o re-replacement")
hmm_model, trans, log_total = build_model("BMES_corpus/rmrb_BMES.txt")
# load test set without number and english replace
nlpcc_f = open('data/nlpcc2016-wordseg-dev.dat', 'r', encoding='utf-8')
lines = nlpcc_f.readlines()
lines = [line.strip().split() for line in lines]
nlpcc_f.close()
# Test with Simple 2-gram model
results = []
for line in tqdm(lines):
ori_line = ''.join(line)
res = hmm_cut(ori_line)
results.append(res)
evaluateSet(results, lines)
print("Train Set: PKU; Test Set: Weibo, w/ re-replacement")
hmm_model, trans, log_total = build_model("BMES_corpus/rmrb_BMES_nonum.txt")
# load test set without number and english replace
nlpcc_f = open('data/nlpcc2016-wordseg-dev.dat', 'r', encoding='utf-8')
lines = nlpcc_f.readlines()
lines = [changenum(line) for line in lines]
lines = [line.strip().split() for line in lines]
nlpcc_f.close()
# Test with Simple 2-gram model
results = []
for line in tqdm(lines):
ori_line = ''.join(line)
res = hmm_cut(ori_line)
results.append(res)
evaluateSet(results, lines)
print("Train Set: MSR; Test Set: PKU, w/ re-replacement")
hmm_model, trans, log_total = build_model("BMES_corpus/msr_BMES_nonum.txt")
# load test set without number and english replace
nlpcc_f = open('data/nlpcc2016-wordseg-dev.dat', 'r', encoding='utf-8')
lines = nlpcc_f.readlines()
lines = [changenum(line) for line in lines]
lines = [line.strip().split() for line in lines]
nlpcc_f.close()
# Test with Simple 2-gram model
results = []
for line in tqdm(lines):
ori_line = ''.join(line)
res = hmm_cut(ori_line)
results.append(res)
evaluateSet(results, lines)
print("Train Set: PKU; Test Set: PKU, w/ re-replacement")
hmm_model, trans, log_total = build_model("BMES_corpus/rmrb_BMES_nonum.txt")
# load test set without number and english replace
nlpcc_f = open('BMES_corpus/pku_training.utf8', 'r', encoding='utf-8')
lines = nlpcc_f.readlines()
lines = [changenum(line) for line in lines]
lines = [line.strip().split() for line in lines]
lines = [line for line in lines if len(line)]
nlpcc_f.close()
# Test with Simple 2-gram model
results = []
for line in tqdm(lines):
ori_line = ''.join(line)
res = hmm_cut(ori_line)
results.append(res)
evaluateSet(results, lines)
print("Train Set: MSR; Test Set: MSR, w/ re-replacement")
hmm_model, trans, log_total = build_model("BMES_corpus/msr_BMES_nonum.txt")
# load test set without number and english replace
nlpcc_f = open('BMES_corpus/msr_training.utf8', 'r', encoding='utf-8')
lines = nlpcc_f.readlines()
lines = [changenum(line) for line in lines]
lines = [line.strip().split() for line in lines]
lines = [line for line in lines if len(line)]
nlpcc_f.close()
# Test with Simple 2-gram model
results = []
for line in tqdm(lines):
ori_line = ''.join(line)
res = hmm_cut(ori_line)
results.append(res)
evaluateSet(results, lines)
| 30.989848 | 95 | 0.53923 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,282 | 0.209033 |
2a6f4b7eb3da6c0aa67838ecc1441c566fdf5e21 | 5,770 | py | Python | third_party/nucleus/io/bedgraph.py | llevar/deepvariant | aba55537eec832e6cea678349422124ef50680f4 | [
"BSD-3-Clause"
] | 1 | 2020-11-28T13:11:08.000Z | 2020-11-28T13:11:08.000Z | third_party/nucleus/io/bedgraph.py | hcxiong/deepvariant | aba55537eec832e6cea678349422124ef50680f4 | [
"BSD-3-Clause"
] | null | null | null | third_party/nucleus/io/bedgraph.py | hcxiong/deepvariant | aba55537eec832e6cea678349422124ef50680f4 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2018 Google Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Classes for reading and writing BedGraph files.
The BedGraph format is described at
https://genome.ucsc.edu/goldenpath/help/bedgraph.html
API for reading:
```python
from third_party.nucleus.io import bedgraph
# Iterate through all records.
with bed.BedGraphReader(input_path) as reader:
for record in reader:
print(record)
```
where `record` is a `nucleus.genomics.v1.BedGraphRecord` protocol buffer.
API for writing:
```python
from third_party.nucleus.io import bedgraph
from third_party.nucleus.protos import bedgraph_pb2
# records is an iterable of nucleus.genomics.v1.BedGraphRecord protocol buffers.
records = ...
# Write all records to the desired output path.
with bed.BedGraphWriter(output_path) as writer:
for record in records:
writer.write(record)
```
For both reading and writing, if the path provided to the constructor contains
'.tfrecord' as an extension, a `TFRecord` file is assumed and attempted to be
read or written. Otherwise, the filename is treated as a true BedGraph file.
Files that end in a '.gz' suffix cause the file to be treated as compressed
(with BGZF if it is a BedGraph file, and with gzip if it is a TFRecord file).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from third_party.nucleus.io import genomics_reader
from third_party.nucleus.io import genomics_writer
from third_party.nucleus.io.python import bedgraph_reader
from third_party.nucleus.io.python import bedgraph_writer
from third_party.nucleus.protos import bedgraph_pb2
class NativeBedGraphReader(genomics_reader.GenomicsReader):
"""Class for reading from native BedGraph files.
Most users will want to use BedGraphReader instead, because it dynamically
dispatches between reading native BedGraph files and TFRecord files based on
the filename's extension.
"""
def __init__(self, input_path, num_fields=0):
"""Initializes a NativeBedGraphReader.
Args:
input_path: string. A path to a resource containing BedGraph records.
num_fields: int. The number of fields to read in the BedGraph. If unset or
set to zero, all fields in the input are read.
"""
super(NativeBedGraphReader, self).__init__()
bedgraph_path = input_path.encode('utf8')
self._reader = bedgraph_reader.BedGraphReader.from_file(bedgraph_path)
def query(self):
"""Returns an iterator for going through the records in the region.
NOTE: This function is not currently implemented by NativeBedGraphReader
though it could be implemented for sorted, tabix-indexed BedGraph files.
"""
raise NotImplementedError('Can not currently query a BedGraph file')
def iterate(self):
"""Returns an iterable of BedGraphRecord protos in the file."""
return self._reader.iterate()
def __exit__(self, exit_type, exit_value, exit_traceback):
self._reader.__exit__(exit_type, exit_value, exit_traceback)
class BedGraphReader(genomics_reader.DispatchingGenomicsReader):
"""Class for reading BedGraphRecord protos from BedGraph or TFRecord files."""
def _native_reader(self, input_path, **kwargs):
return NativeBedGraphReader(input_path, **kwargs)
def _record_proto(self):
return bedgraph_pb2.BedGraphRecord
class NativeBedGraphWriter(genomics_writer.GenomicsWriter):
"""Class for writing to native BedGraph files.
Most users will want BedGraphWriter, which will write to either native
BedGraph files or TFRecord files, based on the output filename's extension.
"""
def __init__(self, output_path, header=None):
"""Initializer for NativeBedGraphWriter.
Args:
output_path: str. The path to which to write the BedGraph file.
"""
super(NativeBedGraphWriter, self).__init__()
self._writer = bedgraph_writer.BedGraphWriter.to_file(output_path)
def write(self, proto):
self._writer.write(proto)
def __exit__(self, exit_type, exit_value, exit_traceback):
self._writer.__exit__(exit_type, exit_value, exit_traceback)
class BedGraphWriter(genomics_writer.DispatchingGenomicsWriter):
"""Class for writing BedGraphRecord protos to BedGraph or TFRecord files."""
def _native_writer(self, output_path):
return NativeBedGraphWriter(output_path)
| 36.987179 | 80 | 0.775043 | 2,674 | 0.463432 | 0 | 0 | 0 | 0 | 0 | 0 | 4,009 | 0.694801 |
2a6f89f48cd8c7b16e844264141418335e7aca6a | 199 | py | Python | code/bqutils/auth.py | victoria-cds-sig/explore_mimiciv | 856705ab1af7ecbfe30d5e8054747b61de8af031 | [
"MIT"
] | 3 | 2020-11-23T04:29:16.000Z | 2021-05-29T06:22:00.000Z | code/bqutils/auth.py | victoria-cds-sig/explore_mimiciv | 856705ab1af7ecbfe30d5e8054747b61de8af031 | [
"MIT"
] | null | null | null | code/bqutils/auth.py | victoria-cds-sig/explore_mimiciv | 856705ab1af7ecbfe30d5e8054747b61de8af031 | [
"MIT"
] | 4 | 2020-11-23T05:18:54.000Z | 2021-05-29T06:23:12.000Z | import google.auth
def get_gcreds(scopes = None):
if scopes == None:
scopes = ["https://www.googleapis.com/auth/bigquery"]
return google.auth.default(
scopes = scopes )
| 18.090909 | 61 | 0.628141 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 42 | 0.211055 |
2a6fdaada48dd95b31346bb6ce7bd0eeab1a0b16 | 12,312 | py | Python | solnml/components/evaluators/rgs_evaluator.py | zhengjian2322/soln-ml | ceff6c432921df9809f9552aaa1b0bd5a1b7e8d2 | [
"MIT"
] | null | null | null | solnml/components/evaluators/rgs_evaluator.py | zhengjian2322/soln-ml | ceff6c432921df9809f9552aaa1b0bd5a1b7e8d2 | [
"MIT"
] | null | null | null | solnml/components/evaluators/rgs_evaluator.py | zhengjian2322/soln-ml | ceff6c432921df9809f9552aaa1b0bd5a1b7e8d2 | [
"MIT"
] | null | null | null | from ConfigSpace import ConfigurationSpace, CategoricalHyperparameter
import time
import warnings
import os
import numpy as np
import pickle as pkl
from sklearn.metrics.scorer import balanced_accuracy_scorer
from solnml.utils.logging_utils import get_logger
from solnml.components.evaluators.base_evaluator import _BaseEvaluator
from solnml.components.evaluators.evaluate_func import validation
from solnml.components.feature_engineering.task_space import get_task_hyperparameter_space
from solnml.components.feature_engineering.parse import parse_config, construct_node
from solnml.components.utils.topk_saver import CombinedTopKModelSaver
from solnml.components.utils.class_loader import get_combined_candidtates
from solnml.components.models.regression import _regressors, _addons
from solnml.components.utils.constants import *
def get_estimator(config, estimator_id):
regressor_type = estimator_id
config_ = config.copy()
config_['%s:random_state' % regressor_type] = 1
hpo_config = dict()
for key in config_:
key_name = key.split(':')[0]
if regressor_type == key_name:
act_key = key.split(':')[1]
hpo_config[act_key] = config_[key]
_candidates = get_combined_candidtates(_regressors, _addons)
estimator = _candidates[regressor_type](**hpo_config)
if hasattr(estimator, 'n_jobs'):
setattr(estimator, 'n_jobs', 1)
return regressor_type, estimator
def get_hpo_cs(estimator_id, task_type=REGRESSION):
_candidates = get_combined_candidtates(_regressors, _addons)
if estimator_id in _candidates:
rgs_class = _candidates[estimator_id]
else:
raise ValueError("Algorithm %s not supported!" % estimator_id)
cs = rgs_class.get_hyperparameter_search_space()
return cs
def get_cash_cs(include_algorithms=None, task_type=REGRESSION):
_candidates = get_combined_candidtates(_regressors, _addons)
if include_algorithms is not None:
_candidates = set(include_algorithms).intersection(set(_candidates.keys()))
if len(_candidates) == 0:
raise ValueError("No algorithms included! Please check the spelling of the included algorithms!")
cs = ConfigurationSpace()
algo = CategoricalHyperparameter('algorithm', list(_candidates))
cs.add_hyperparameter(algo)
for estimator_id in _candidates:
estimator_cs = get_hpo_cs(estimator_id)
parent_hyperparameter = {'parent': algo,
'value': estimator_id}
cs.add_configuration_space(estimator_id, estimator_cs, parent_hyperparameter=parent_hyperparameter)
return cs
def get_fe_cs(task_type=REGRESSION, include_image=False, include_text=False, include_preprocessors=None):
cs = get_task_hyperparameter_space(task_type=task_type, include_image=include_image, include_text=include_text,
include_preprocessors=include_preprocessors)
return cs
def get_combined_cs(task_type=REGRESSION, include_image=False, include_text=False,
include_preprocessors=None):
cash_cs = get_cash_cs(task_type)
fe_cs = get_fe_cs(task_type,
include_image=include_image, include_text=include_text,
include_preprocessors=include_preprocessors)
for hp in fe_cs.get_hyperparameters():
cash_cs.add_hyperparameter(hp)
for cond in fe_cs.get_conditions():
cash_cs.add_condition(cond)
for bid in fe_cs.get_forbiddens():
cash_cs.add_forbidden_clause(bid)
return cash_cs
class RegressionEvaluator(_BaseEvaluator):
def __init__(self, fixed_config=None, scorer=None, data_node=None, task_type=REGRESSION, resampling_strategy='cv',
resampling_params=None, timestamp=None, output_dir=None, seed=1):
self.resampling_strategy = resampling_strategy
self.resampling_params = resampling_params
self.fixed_config = fixed_config
self.scorer = scorer if scorer is not None else balanced_accuracy_scorer
self.task_type = task_type
self.data_node = data_node
self.output_dir = output_dir
self.seed = seed
self.onehot_encoder = None
self.logger = get_logger(self.__module__ + "." + self.__class__.__name__)
self.continue_training = False
self.train_node = data_node.copy_()
self.val_node = data_node.copy_()
self.timestamp = timestamp
def __call__(self, config, **kwargs):
start_time = time.time()
return_dict = dict()
self.seed = 1
downsample_ratio = kwargs.get('resource_ratio', 1.0)
# Convert Configuration into dictionary
if not isinstance(config, dict):
config = config.get_dictionary().copy()
else:
config = config.copy()
if self.fixed_config is not None:
config.update(self.fixed_config)
self.estimator_id = config['algorithm']
if 'holdout' in self.resampling_strategy:
# Prepare data node.
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
if self.resampling_params is None or 'test_size' not in self.resampling_params:
test_size = 0.33
else:
test_size = self.resampling_params['test_size']
from sklearn.model_selection import ShuffleSplit
ss = ShuffleSplit(n_splits=1, test_size=test_size, random_state=self.seed)
for train_index, test_index in ss.split(self.data_node.data[0], self.data_node.data[1]):
_x_train, _x_val = self.data_node.data[0][train_index], self.data_node.data[0][test_index]
_y_train, _y_val = self.data_node.data[1][train_index], self.data_node.data[1][test_index]
self.train_node.data = [_x_train, _y_train]
self.val_node.data = [_x_val, _y_val]
data_node, op_list = parse_config(self.train_node, config, record=True)
_val_node = self.val_node.copy_()
_val_node = construct_node(_val_node, op_list)
_x_train, _y_train = data_node.data
_x_val, _y_val = _val_node.data
config_dict = config.copy()
# regression gadgets
regressor_id, clf = get_estimator(config_dict, self.estimator_id)
score = validation(clf, self.scorer, _x_train, _y_train, _x_val, _y_val,
random_state=self.seed)
if np.isfinite(score):
model_path = CombinedTopKModelSaver.get_path_by_config(self.output_dir, config, self.timestamp)
if not os.path.exists(model_path):
with open(model_path, 'wb') as f:
pkl.dump([op_list, clf, score], f)
else:
with open(model_path, 'rb') as f:
_, _, perf = pkl.load(f)
if score > perf:
with open(model_path, 'wb') as f:
pkl.dump([op_list, clf, score], f)
self.logger.info("Model saved to %s" % model_path)
elif 'cv' in self.resampling_strategy:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
if 'cv' in self.resampling_strategy:
if self.resampling_params is None or 'folds' not in self.resampling_params:
folds = 5
else:
folds = self.resampling_params['folds']
from sklearn.model_selection import KFold
kfold = KFold(n_splits=folds, random_state=self.seed, shuffle=False)
scores = list()
for train_index, test_index in kfold.split(self.data_node.data[0], self.data_node.data[1]):
_x_train, _x_val = self.data_node.data[0][train_index], self.data_node.data[0][test_index]
_y_train, _y_val = self.data_node.data[1][train_index], self.data_node.data[1][test_index]
self.train_node.data = [_x_train, _y_train]
self.val_node.data = [_x_val, _y_val]
data_node, op_list = parse_config(self.train_node, config, record=True)
_val_node = self.val_node.copy_()
_val_node = construct_node(_val_node, op_list)
_x_train, _y_train = data_node.data
_x_val, _y_val = _val_node.data
config_dict = config.copy()
# regressor gadgets
regressor_id, clf = get_estimator(config_dict, self.estimator_id)
_score = validation(clf, self.scorer, _x_train, _y_train, _x_val, _y_val,
random_state=self.seed)
scores.append(_score)
score = np.mean(scores)
elif 'partial' in self.resampling_strategy:
# Prepare data node.
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
if self.resampling_params is None or 'test_size' not in self.resampling_params:
test_size = 0.33
else:
test_size = self.resampling_params['test_size']
from sklearn.model_selection import ShuffleSplit
ss = ShuffleSplit(n_splits=1, test_size=test_size, random_state=self.seed)
for train_index, test_index in ss.split(self.data_node.data[0], self.data_node.data[1]):
_x_train, _x_val = self.data_node.data[0][train_index], self.data_node.data[0][test_index]
_y_train, _y_val = self.data_node.data[1][train_index], self.data_node.data[1][test_index]
self.train_node.data = [_x_train, _y_train]
self.val_node.data = [_x_val, _y_val]
data_node, op_list = parse_config(self.train_node, config, record=True)
_val_node = self.val_node.copy_()
_val_node = construct_node(_val_node, op_list)
_x_train, _y_train = data_node.data
if downsample_ratio != 1:
down_ss = ShuffleSplit(n_splits=1, test_size=downsample_ratio,
random_state=self.seed)
for _, _val_index in down_ss.split(_x_train, _y_train):
_act_x_train, _act_y_train = _x_train[_val_index], _y_train[_val_index]
else:
_act_x_train, _act_y_train = _x_train, _y_train
_val_index = list(range(len(_x_train)))
_x_val, _y_val = _val_node.data
config_dict = config.copy()
# Regressor gadgets
regressor_id, clf = get_estimator(config_dict, self.estimator_id)
score = validation(clf, self.scorer, _act_x_train, _act_y_train, _x_val, _y_val,
random_state=self.seed)
if np.isfinite(score) and downsample_ratio == 1:
model_path = CombinedTopKModelSaver.get_path_by_config(self.output_dir, config, self.timestamp)
if not os.path.exists(model_path):
with open(model_path, 'wb') as f:
pkl.dump([op_list, clf, score], f)
else:
with open(model_path, 'rb') as f:
_, _, perf = pkl.load(f)
if score > perf:
with open(model_path, 'wb') as f:
pkl.dump([op_list, clf, score], f)
self.logger.info("Model saved to %s" % model_path)
else:
raise ValueError('Invalid resampling strategy: %s!' % self.resampling_strategy)
try:
self.logger.info('Evaluation<%s> | Score: %.4f | Time cost: %.2f seconds | Shape: %s' %
(regressor_id,
self.scorer._sign * score,
time.time() - start_time, _x_train.shape))
except:
pass
# Turn it into a minimization problem.
return_dict['objective_value'] = -score
return -score
| 44.608696 | 118 | 0.619964 | 8,751 | 0.71077 | 0 | 0 | 0 | 0 | 0 | 0 | 671 | 0.0545 |
2a71ca14c7810cf8c3b652bf81d516b7e465ec6e | 5,830 | py | Python | testing/MLDB-1104-input-data-spec.py | kstepanmpmg/mldb | f78791cd34d01796705c0f173a14359ec1b2e021 | [
"Apache-2.0"
] | 665 | 2015-12-09T17:00:14.000Z | 2022-03-25T07:46:46.000Z | testing/MLDB-1104-input-data-spec.py | tomzhang/mldb | a09cf2d9ca454d1966b9e49ae69f2fe6bf571494 | [
"Apache-2.0"
] | 797 | 2015-12-09T19:48:19.000Z | 2022-03-07T02:19:47.000Z | testing/MLDB-1104-input-data-spec.py | matebestek/mldb | f78791cd34d01796705c0f173a14359ec1b2e021 | [
"Apache-2.0"
] | 103 | 2015-12-25T04:39:29.000Z | 2022-02-03T02:55:22.000Z | #
# MLDB-1104-input-data-spec.py
# mldb.ai inc, 2015
# This file is part of MLDB. Copyright 2015 mldb.ai inc. All rights reserved.
#
import unittest
import datetime
import random
from mldb import mldb, ResponseException
class InputDataSpecTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.load_kmeans_dataset()
cls.load_classifier_dataset()
@classmethod
def load_kmeans_dataset(cls):
kmeans_example = mldb.create_dataset({
"type": "sparse.mutable",
'id' : 'kmeans_example'
})
now = datetime.datetime.now()
for i in range(100):
val_x = float(random.randint(-5, 5))
val_y = float(random.randint(-5, 5))
row = [['x', val_x, now], ['y', val_y, now]]
kmeans_example.record_row('row_%d' % i, row)
kmeans_example.commit()
def train_kmeans(self, training_data):
metric = "euclidean"
mldb.put("/v1/procedures/kmeans", {
'type' : 'kmeans.train',
'params' : {
'trainingData' : training_data,
'centroidsDataset' : {
'id' : 'kmeans_centroids',
'type' : 'embedding',
'params': {
'metric': metric
}
},
'numClusters' : 2,
'metric': metric
}
})
def train_svd(self, training_data):
mldb.put("/v1/procedures/svd", {
'type' : 'svd.train',
'params' : {
'trainingData' : training_data,
'runOnCreation' : True
}
})
@classmethod
def load_classifier_dataset(cls):
dataset = mldb.create_dataset({
"type": "sparse.mutable",
"id": "iris_dataset"
})
with open("./mldb/testing/dataset/iris.data") as f:
for i, line in enumerate(f):
cols = []
line_split = line.split(',')
if len(line_split) != 5:
continue
# Jemery's what if a feature is named label
cols.append(["label", float(line_split[0]), 0]) # sepal length
cols.append(["labels", float(line_split[1]), 0]) # sepal width
cols.append(["petal length", float(line_split[2]), 0])
cols.append(["petal width", float(line_split[3]), 0])
cols.append(["features", line_split[4].strip('\n"'), 0]) #class
dataset.record_row(str(i+1), cols)
dataset.commit()
def train_classifier(self, training_data):
result = mldb.put("/v1/procedures/classifier", {
'type' : 'classifier.train',
'params' : {
'trainingData' : training_data,
"configuration": {
"type": "decision_tree",
"max_depth": 8,
"verbosity": 3,
"update_alg": "prob"
},
"modelFileUrl": "file://tmp/MLDB-1104.cls",
"mode": "categorical",
"functionName": "classifier_apply",
'runOnCreation' : True
}
})
return result.json()
def test_train_kmeans(self):
# KMEANS TRAIN PROCEDURE WITH BOTH TYPE OF INPUT DATA
self.train_kmeans('select * from kmeans_example')
self.train_kmeans('select x + y as x, y + x as y from kmeans_example')
self.train_kmeans({'select' : '*', 'from' : {'id' : 'kmeans_example'}})
# TEST ERROR CASE
with self.assertRaises(ResponseException):
self.train_kmeans(
'select x, y from kmeans_example group by x')
with self.assertRaises(ResponseException):
self.train_kmeans(
'select x, y from kmeans_example group by x having y > 2')
def test_train_svd(self):
self.train_svd('select * from kmeans_example')
self.train_svd('select x, y from kmeans_example')
self.train_svd('select x AS z, y from kmeans_example')
self.train_svd('select * EXCLUDING(x) from kmeans_example')
self.train_svd({'select' : '*', 'from' : {'id' : 'kmeans_example'}})
self.train_svd('select x + 1, y from kmeans_example')
with self.assertRaises(ResponseException):
self.train_svd('select x, y from kmeans_example group by x')
with self.assertRaises(ResponseException):
self.train_svd(
'select x, y from kmeans_example group by x having y > 2')
def test_train_classifier(self):
mldb.log(self.train_classifier(
"select {label, labels} as features, features as label "
"from iris_dataset"))
result = mldb.get(
"/v1/query",
q="SELECT classifier_apply({{label, labels} as features}) as *, features from iris_dataset")
rows = result.json()
mldb.log("-------------------------------");
mldb.log(rows)
# compare the classifier results on the train data with the original
# label
count = 0
for row in rows:
_max = 0
category = ""
for column in row['columns'][1:4]:
if column[1] > _max:
_max = column[1]
# remove the leading scores. and quotation marks
category = column[0][10:-3]
if category != row['columns'][0][1]:
count += 1
# misclassified result should be a small fraction
self.assertTrue(
float(count) / len(rows) < 0.2,
'the classifier results on the train data are strangely low')
if __name__ == '__main__':
mldb.run_tests()
| 35.766871 | 104 | 0.527444 | 5,558 | 0.953345 | 0 | 0 | 1,529 | 0.262264 | 0 | 0 | 1,971 | 0.338079 |
2a73ad7dadbf46c49b812817e0143b14c755fc20 | 6,000 | py | Python | code/python/external/pi3d/shape/EnvironmentCube.py | rec/echomesh | be668971a687b141660fd2e5635d2fd598992a01 | [
"MIT"
] | 30 | 2015-02-18T14:07:00.000Z | 2021-12-11T15:19:01.000Z | code/python/external/pi3d/shape/EnvironmentCube.py | silky/echomesh | 2fe5a00a79c215b4aca4083e5252fcdcbd0507aa | [
"MIT"
] | 16 | 2015-01-01T23:17:24.000Z | 2015-04-18T23:49:27.000Z | code/python/external/pi3d/shape/EnvironmentCube.py | silky/echomesh | 2fe5a00a79c215b4aca4083e5252fcdcbd0507aa | [
"MIT"
] | 31 | 2015-03-11T20:04:07.000Z | 2020-11-02T13:56:59.000Z | import os.path
from pi3d import *
from pi3d.Buffer import Buffer
from pi3d.Shape import Shape
from pi3d.Texture import Texture
CUBE_PARTS = ['front', 'right', 'top', 'bottom', 'left', 'back']
BOTTOM_INDEX = 3
def loadECfiles(path, fname, suffix='jpg', nobottom=False):
"""Helper for loading environment cube faces.
TODO nobottom will redraw the top on the bottom of cube. It really should
substitute a blank (black) texture instead!
Arguments:
*path*
to the image files relative to the top directory.
*fname*
The stem of the file name without the _top, _bottom, _right etc.
Keyword arguments:
*suffix*
String to add after the '_top','_bottom' has been added to the stem.
*nobottom*
If True then only load five parts into array the bottom will be
drawn with the previous image i.e. top.
"""
if nobottom:
parts = [p for p in CUBE_PARTS if p != 'bottom']
else:
parts = CUBE_PARTS
files = (os.path.join(path, '%s_%s.%s' % (fname, p, suffix)) for p in parts)
return [Texture(f) for f in files]
class EnvironmentCube(Shape):
""" 3d model inherits from Shape"""
def __init__(self, camera=None, light=None, size=500.0, maptype="HALFCROSS", name="", x=0.0, y=0.0, z=0.0,
rx=0.0, ry=0.0, rz=0.0, cx=0.0, cy=0.0, cz=0.0, nobottom=False):
"""uses standard constructor for Shape extra Keyword arguments:
*size*
Dimensions of the cube
*maptype*
HALFCROSS (default) or CROSS any other defaults to CUBE type
and will require 6 (or 5 with nobottom) image files to render it
"""
super(EnvironmentCube,self).__init__(camera, light, name, x, y, z, rx, ry, rz,
1.0, 1.0, 1.0, cx, cy, cz)
self.width = size
self.height = size
self.depth = size
self.ssize = 36
self.ttype = GL_TRIANGLES
self.nobottom = nobottom
ww = size / 2.0
hh = size / 2.0
dd = size / 2.0
#cuboid data - faces are separated out for texturing..
self.vertices = ((-ww, hh, dd), (ww, hh, dd), (ww,-hh, dd), (-ww, -hh, dd),
(ww, hh, dd), (ww, hh, -dd), (ww, -hh, -dd), (ww, -hh, dd),
(-ww, hh, dd), (-ww, hh, -dd), (ww, hh, -dd), (ww, hh, dd),
(ww, -hh, dd), (ww, -hh, -dd), (-ww, -hh, -dd),(-ww, -hh, dd),
(-ww, -hh, dd),(-ww, -hh, -dd),(-ww, hh, -dd), (-ww, hh, dd),
(-ww, hh, -dd),(ww, hh, -dd), (ww, -hh, -dd), (-ww,-hh,-dd))
self.normals = ((0.0, 0.0, 1), (0.0, 0.0, 1), (0.0, 0.0, 1), (0.0, 0.0, 1),
(1, 0.0, 0), (1, 0.0, 0), (1, 0.0, 0), (1, 0.0, 0),
(0.0, 1, 0), (0.0, 1, 0), (0.0, 1, 0), (0.0, 1, 0),
(0.0, -1, 0), (0,- 1, 0), (0.0, -1, 0), (0.0, -1, 0),
(-1, 0.0, 0), (-1, 0.0, 0), (-1, 0.0, 0), (-1, 0.0, 0),
(0.0, 0.0, -1),(0.0, 0.0, -1),(0.0, 0.0, -1), (0.0, 0.0, -1))
self.indices = ((3, 0, 1), (2, 3, 1), (7, 4, 5), (6, 7, 5),
(11, 8, 9), (10, 11, 9), (15, 12, 13), (14, 15, 13),
(17, 18, 19),(16, 17, 19),(22, 21, 20), (23, 22, 20))
if maptype == "CROSS":
self.tex_coords = ((1.0, 0.34), (0.75, 0.34), (0.75, 0.661), (1.0, 0.661), #front
(0.75, 0.34), (0.5, 0.34), (0.5, 0.661), (0.75, 0.661), #right
(0.251, 0.0), (0.251, 0.34), (0.498, 0.34), (0.498, 0.0), #top
(0.498, 0.998), (0.498, 0.66), (0.251, 0.66), (0.251, 0.998), #bottom
(0.0, 0.661), (0.25, 0.661), (0.25, 0.34), (0.0, 0.34), #left
(0.25, 0.34), (0.5, 0.34), (0.5, 0.661), (0.25, 0.661)) #back
self.buf = []
self.buf.append(Buffer(self, self.vertices, self.tex_coords, self.indices, self.normals))
elif maptype == "HALFCROSS":
self.tex_coords = ((0.25,0.25), (0.25,0.75), (-0.25,0.75), (-0.25,0.25), #front
(0.25,0.75), (0.75,0.75), (0.75,1.25), (0.25,1.25), #right
(0.25,0.25), (0.75,0.25), (0.75,0.75), (0.25,0.75), #top
(0,0), (1,0), (1,1), (0,1), #bottom
(0.25,-0.25), (0.75,-0.25), (0.75,0.25), (0.25,0.25), #left
(0.75,0.25), (0.75,0.75), (1.25,0.75), (1.25,0.25)) #back
self.buf = []
self.buf.append(Buffer(self, self.vertices, self.tex_coords, self.indices, self.normals))
else:
self.tex_coords = ((0.002,0.002), (0.998,0.002), (0.998,0.998),(0.002,0.998),
(0.002,0.002), (0.998,0.002), (0.998,0.998), (0.002,0.998),
(0.002,0.998), (0.002,0.002), (0.998,0.002), (0.998,0.998),
(0.998,0.002), (0.998,0.998), (0.002,0.998), (0.002,0.002),
(0.998,0.998), (0.002,0.998), (0.002,0.002), (0.998,0.002),
(0.998,0.002), (0.002,0.002), (0.002,0.998), (0.998,0.998))
self.buf = []
self.buf.append(Buffer(self, self.vertices[0:4], self.tex_coords[0:4], ((3,0,1), (2,3,1)), self.normals[0:4])) #front
self.buf.append(Buffer(self, self.vertices[4:8], self.tex_coords[4:8], ((3,0,1), (2,3,1)), self.normals[4:8])) #right
self.buf.append(Buffer(self, self.vertices[8:12], self.tex_coords[8:12], ((3,0,1), (2,3,1)), self.normals[8:12])) #top
self.buf.append(Buffer(self, self.vertices[12:16], self.tex_coords[12:16], ((3,0,1), (2,3,1)), self.normals[12:16])) #bottom
self.buf.append(Buffer(self, self.vertices[16:20], self.tex_coords[16:20], ((3,0,1), (2,3,1)), self.normals[16:20])) #left
self.buf.append(Buffer(self, self.vertices[20:24], self.tex_coords[20:24], ((3,1,0), (2,1,3)), self.normals[20:24])) #back
def set_draw_details(self, shader, textures, ntiles=0.0, shiny=0.0, umult=1.0, vmult=1.0):
"""overrides this method in Shape to cope with nobottom option"""
if not (type(textures) is list):
textures = [textures]
elif len(textures) == 5:
# this should be the only circumstance. Saves setting it in the constructor
self.nobottom = True
for i, b in enumerate(self.buf):
j = i - 1 if (self.nobottom and i >= BOTTOM_INDEX) else i
b.set_draw_details(shader, [textures[j]], ntiles, shiny, umult, vmult)
| 45.801527 | 130 | 0.5405 | 4,924 | 0.820667 | 0 | 0 | 0 | 0 | 0 | 0 | 1,281 | 0.2135 |
2a74193bd4405cdd9a1190cbabe2ed90d97be2b2 | 5,678 | py | Python | views.py | margish100/Api-json | 82b0561f6335708702b997dc8dd1e62c7df5d03d | [
"MIT"
] | null | null | null | views.py | margish100/Api-json | 82b0561f6335708702b997dc8dd1e62c7df5d03d | [
"MIT"
] | null | null | null | views.py | margish100/Api-json | 82b0561f6335708702b997dc8dd1e62c7df5d03d | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
from rest_framework.parsers import JSONParser
from .models import Article
from .serializers import ArticleSerializer
from django.views.decorators.csrf import csrf_exempt
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework import status
from rest_framework.views import APIView
from rest_framework import generics
from rest_framework import mixins
from rest_framework.authentication import SessionAuthentication,TokenAuthentication, BasicAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework import viewsets
from django.shortcuts import get_list_or_404
class ArticleAPIViewSet (viewsets.ViewSet):
def list(self, request):
articles = Article.objects.all()
serializer = ArticleSerializer(articles, many=True)
return Response(serializer.data)
def create(self, request):
serializer = ArticleSerializer(data = request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status= status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def retrieve(self, request, pk=None):
queryset = Article.objects.all()
article = get_list_or_404(queryset, pk=pk)
serializer = ArticleSerializer(article)
return Response(serializer.data)
def update(self, request, pk=None):
article = Article.objects.get(pk=pk)
serializer = ArticleSerializer(article, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class GenericAPIView(generics.GenericAPIView, mixins.ListModelMixin, mixins.CreateModelMixin,
mixins.UpdateModelMixin, mixins.RetrieveModelMixin,
mixins.DestroyModelMixin):
serializer_class = ArticleSerializer
queryset = Article.objects.all()
lookup_field = 'id'
#authentication_classes =[SessionAuthentication, BasicAuthentication]
authentication_classes = [ TokenAuthentication]
permission_classes= [IsAuthenticated]
def get(self, request, id = None):
if id:
return self.retrieve(request)
else:
return self.list(request)
def post (self, request):
return self.create(request)
def put(self, request, id=None):
return self.update(request, id)
def delete(self, request, id):
return self.destroy(request, id)
class ArticleAPIView(APIView):
def get(self, request):
articles = Article.objects.all()
serializer = ArticleSerializer(articles, many=True)
return Response(serializer.data)
def post(self, request):
serializer = ArticleSerializer(data = request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status= status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class ArticleDetails(APIView):
def get_object(self, id):
try:
return Article.objects.get(id = id)
except Article.DoesNotExit:
return HttpResponse(status=status.HTTP_404_NOT_FOUND)
def get(self, request, id):
article = self.get_object(id)
serializer = ArticleSerializer(article)
return Response(serializer.data)
def put(self, request, id):
article = self.get_object(id)
serializer = ArticleSerializer(article, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, id):
article = self.get_object(id)
article.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
@api_view(['GET', 'POST'])
def article_list(request):
if request.method =='GET':
articles = Article.objects.all()
serializer = ArticleSerializer(articles, many=True)
return Response(serializer.data)
elif request.method == 'POST':
serializer = ArticleSerializer(data = request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status= status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET' , 'PUT', 'DELETE'])
def article_detail(request, pk):
try:
article = Article.objects.get(pk=pk)
except Article.DoesNotExit:
return HttpResponse(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = ArticleSerializer(article)
return Response(serializer.data)
elif request.method == 'PUT':
#data = JSONParser().parse(request)
serializer = ArticleSerializer(article, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
article.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
| 30.202128 | 105 | 0.66608 | 3,442 | 0.606199 | 0 | 0 | 1,385 | 0.243924 | 0 | 0 | 168 | 0.029588 |
2a748e6927d352016f97afb5ed56a3003dfed71b | 4,101 | py | Python | Hangman.py | Draxsis/hangman-game | ca7cbb353d25b3d4def331d150e202e43940da97 | [
"MIT"
] | null | null | null | Hangman.py | Draxsis/hangman-game | ca7cbb353d25b3d4def331d150e202e43940da97 | [
"MIT"
] | null | null | null | Hangman.py | Draxsis/hangman-game | ca7cbb353d25b3d4def331d150e202e43940da97 | [
"MIT"
] | null | null | null | import random
import time
# Welcome and start
time.sleep(1)
player_name = input('Please enter your player name : ')
time.sleep(1)
print(f'Welcome to hangman game {player_name}')
# main function
def main():
global count
global word
global display
global already_guessed
global length
global play_game
words_to_guess = ['time','cloth','ale','queen','beer','man','watermelon','gta','corona','home','german','yellow','usa','loop','water','cola']
word = random.choice(words_to_guess)
count = 0
length = len(word)
display = "_" * length
already_guessed = []
play_game = ""
# If the player wants to play again or not
def play_loop():
global play_game
play_game = input("Do You want to play again? y = yes, n = no \n")
while play_game not in ["y", "n","Y","N"]:
play_game = input("Do You want to play again? y = yes, n = no \n")
if play_game == "y":
main()
elif play_game == "n":
print("Thanks For Playing! We expect you back again!")
exit()
# hangman game function
def hangman():
global count
global display
global word
global already_guessed
global play_game
limit = 5
guess = input("This is the Hangman Word: " + display + " Enter your guess: \n")
guess = guess.strip()
if len(guess.strip()) == 0 or len(guess.strip()) >= 2 or guess <= "9":
print("Invalid Input, Try a letter\n")
elif guess in word:
already_guessed.extend([guess])
index = word.find(guess)
word = word[:index] + "_" + word[index + 1:]
display = display[:index] + guess + display[index + 1:]
print(display + "\n")
elif guess in already_guessed:
print("Try another letter.\n")
else:
count += 1
if count == 1:
time.sleep(1)
print(" _____ \n"
" | \n"
" | \n"
" | \n"
" | \n"
" | \n"
" | \n"
"__|__\n")
print("Wrong guess. " + str(limit - count) + " guesses remaining\n")
elif count == 2:
time.sleep(1)
print(" _____ \n"
" | | \n"
" | |\n"
" | \n"
" | \n"
" | \n"
" | \n"
"__|__\n")
print("Wrong guess. " + str(limit - count) + " guesses remaining\n")
elif count == 3:
time.sleep(1)
print(" _____ \n"
" | | \n"
" | |\n"
" | | \n"
" | \n"
" | \n"
" | \n"
"__|__\n")
print("Wrong guess. " + str(limit - count) + " guesses remaining\n")
elif count == 4:
time.sleep(1)
print(" _____ \n"
" | | \n"
" | |\n"
" | | \n"
" | O \n"
" | \n"
" | \n"
"__|__\n")
print("Wrong guess. " + str(limit - count) + " last guess remaining\n")
elif count == 5: # lose expression
time.sleep(1)
print(" _____ \n"
" | | \n"
" | |\n"
" | | \n"
" | O \n"
" | /|\ \n"
" | / \ \n"
"__|__\n")
print("Wrong guess. You are hanged!!!\n")
print("The word was:",already_guessed,word)
play_loop()
if word == '_' * length: # win expression
print("Congrats! You have guessed the word correctly!")
play_loop()
elif count != limit:
hangman()
main()
hangman() | 31.546154 | 146 | 0.407462 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,363 | 0.332358 |
2a75088725eb73c79e1b36480f5ee8a4db2071f4 | 7,601 | py | Python | vermin_rules_3.9.py | gousaiyang/python-change-parser | 94195e42d3e5106b69792e352d225b8b2b484fe1 | [
"MIT"
] | 3 | 2020-01-01T03:19:34.000Z | 2020-01-11T13:59:58.000Z | vermin_rules_3.9.py | gousaiyang/python-change-parser | 94195e42d3e5106b69792e352d225b8b2b484fe1 | [
"MIT"
] | null | null | null | vermin_rules_3.9.py | gousaiyang/python-change-parser | 94195e42d3e5106b69792e352d225b8b2b484fe1 | [
"MIT"
] | null | null | null | modules_rules = {
"graphlib": (None, (3, 9)),
"test.support.socket_helper": (None, (3, 9)),
"zoneinfo": (None, (3, 9)),
}
classes_rules = {
"asyncio.BufferedProtocol": (None, (3, 7)),
"asyncio.PidfdChildWatcher": (None, (3, 9)),
"importlib.abc.Traversable": (None, (3, 9)),
"importlib.abc.TraversableReader": (None, (3, 9)),
"pstats.FunctionProfile": (None, (3, 9)),
"pstats.StatsProfile": (None, (3, 9)),
}
exceptions_rules = {
}
functions_rules = {
"ast.unparse": (None, (3, 9)),
"asyncio.loop.shutdown_default_executor": (None, (3, 9)),
"asyncio.to_thread": (None, (3, 9)),
"bytearray.removeprefix": (None, (3, 9)),
"bytearray.removesuffix": (None, (3, 9)),
"bytes.removeprefix": (None, (3, 9)),
"bytes.removesuffix": (None, (3, 9)),
"curses.get_escdelay": (None, (3, 9)),
"curses.get_tabsize": (None, (3, 9)),
"curses.set_escdelay": (None, (3, 9)),
"curses.set_tabsize": (None, (3, 9)),
"gc.is_finalized": (None, (3, 9)),
"imaplib.IMAP4.unselect": (None, (3, 9)),
"importlib.machinery.FrozenImporter.create_module": (None, (3, 4)),
"importlib.machinery.FrozenImporter.exec_module": (None, (3, 4)),
"importlib.resources.files": (None, (3, 9)),
"keyword.issoftkeyword": (None, (3, 9)),
"logging.StreamHandler.setStream": (None, (3, 7)),
"math.lcm": (None, (3, 9)),
"math.nextafter": (None, (3, 9)),
"math.ulp": (None, (3, 9)),
"multiprocessing.SimpleQueue.close": (None, (3, 9)),
"os.pidfd_open": (None, (3, 9)),
"os.waitstatus_to_exitcode": (None, (3, 9)),
"pathlib.Path.link_to": (None, (3, 8)),
"pathlib.Path.readlink": (None, (3, 9)),
"pathlib.PurePath.is_relative_to": (None, (3, 9)),
"pathlib.PurePath.with_stem": (None, (3, 9)),
"pkgutil.resolve_name": (None, (3, 9)),
"pstats.Stats.get_stats_profile": (None, (3, 9)),
"random.randbytes": (None, (3, 9)),
"signal.pidfd_send_signal": (None, (3, 9)),
"socket.recv_fds": (None, (3, 9)),
"socket.send_fds": (None, (3, 9)),
"statistics.NormalDist.zscore": (None, (3, 9)),
"str.removeprefix": (None, (3, 9)),
"str.removesuffix": (None, (3, 9)),
"test.support.print_warning": (None, (3, 9)),
"test.support.wait_process": (None, (3, 9)),
"tracemalloc.reset_peak": (None, (3, 9)),
"types.CodeType.replace": (None, (3, 8)),
"typing.get_origin": (None, (3, 8)),
"venv.EnvBuilder.upgrade_dependencies": (None, (3, 9)),
"xml.etree.ElementTree.indent": (None, (3, 9)),
}
variables_and_constants_rules = {
"decimal.HAVE_CONTEXTVAR": (None, (3, 7)),
"difflib.SequenceMatcher.bjunk": (None, (3, 2)),
"difflib.SequenceMatcher.bpopular": (None, (3, 2)),
"fcntl.F_GETPATH": (None, (3, 9)),
"fcntl.F_OFD_GETLK": (None, (3, 9)),
"fcntl.F_OFD_SETLK": (None, (3, 9)),
"fcntl.F_OFD_SETLKW": (None, (3, 9)),
"http.HTTPStatus.EARLY_HINTS": (None, (3, 9)),
"http.HTTPStatus.IM_A_TEAPOT": (None, (3, 9)),
"http.HTTPStatus.TOO_EARLY": (None, (3, 9)),
"keyword.softkwlist": (None, (3, 9)),
"logging.StreamHandler.terminator": (None, (3, 2)),
"os.CLD_KILLED": (None, (3, 9)),
"os.CLD_STOPPED": (None, (3, 9)),
"os.P_PIDFD": (None, (3, 9)),
"socket.CAN_J1939": (None, (3, 9)),
"socket.CAN_RAW_JOIN_FILTERS": (None, (3, 9)),
"socket.IPPROTO_UDPLITE": (None, (3, 9)),
"sys.__unraisablehook__": (None, (3, 8)),
"sys.flags.dev_mode": (None, (3, 7)),
"sys.flags.utf8_mode": (None, (3, 7)),
"sys.platlibdir": (None, (3, 9)),
"time.CLOCK_TAI": (None, (3, 9)),
"token.COLONEQUAL": (None, (3, 8)),
"token.TYPE_COMMENT": (None, (3, 8)),
"token.TYPE_IGNORE": (None, (3, 8)),
"tracemalloc.Traceback.total_nframe": (None, (3, 9)),
"typing.Annotated": (None, (3, 9)),
"unittest.mock.Mock.call_args.args": (None, (3, 8)),
"unittest.mock.Mock.call_args.kwargs": (None, (3, 8)),
"urllib.response.addinfourl.status": (None, (3, 9)),
}
decorators_rules = {
"functools.cache": (None, (3, 9)),
}
kwargs_rules = {
("argparse.ArgumentParser", "exit_on_error"): (None, (3, 9)),
("ast.dump", "indent"): (None, (3, 9)),
("asyncio.Future.cancel", "msg"): (None, (3, 9)),
("asyncio.Task.cancel", "msg"): (None, (3, 9)),
("asyncio.loop.create_connection", "happy_eyeballs_delay"): (None, (3, 8)),
("asyncio.loop.create_connection", "interleave"): (None, (3, 8)),
("bytearray.hex", "bytes_per_sep"): (None, (3, 8)),
("bytearray.hex", "sep"): (None, (3, 8)),
("compileall.compile_dir", "hardlink_dupes"): (None, (3, 9)),
("compileall.compile_dir", "limit_sl_dest"): (None, (3, 9)),
("compileall.compile_dir", "prependdir"): (None, (3, 9)),
("compileall.compile_dir", "stripdir"): (None, (3, 9)),
("compileall.compile_file", "hardlink_dupes"): (None, (3, 9)),
("compileall.compile_file", "limit_sl_dest"): (None, (3, 9)),
("compileall.compile_file", "prependdir"): (None, (3, 9)),
("compileall.compile_file", "stripdir"): (None, (3, 9)),
("concurrent.futures.Executor.shutdown", "cancel_futures"): (None, (3, 9)),
("ftplib.FTP", "encoding"): (None, (3, 9)),
("ftplib.FTP_TLS", "encoding"): (None, (3, 9)),
("hashlib.blake2b", "usedforsecurity"): (None, (3, 9)),
("hashlib.blake2s", "usedforsecurity"): (None, (3, 9)),
("hashlib.md5", "usedforsecurity"): (None, (3, 9)),
("hashlib.new", "usedforsecurity"): (None, (3, 9)),
("hashlib.sha1", "usedforsecurity"): (None, (3, 9)),
("hashlib.sha224", "usedforsecurity"): (None, (3, 9)),
("hashlib.sha256", "usedforsecurity"): (None, (3, 9)),
("hashlib.sha384", "usedforsecurity"): (None, (3, 9)),
("hashlib.sha3_224", "usedforsecurity"): (None, (3, 9)),
("hashlib.sha3_256", "usedforsecurity"): (None, (3, 9)),
("hashlib.sha3_384", "usedforsecurity"): (None, (3, 9)),
("hashlib.sha3_512", "usedforsecurity"): (None, (3, 9)),
("hashlib.sha512", "usedforsecurity"): (None, (3, 9)),
("hashlib.shake_128", "usedforsecurity"): (None, (3, 9)),
("hashlib.shake_256", "usedforsecurity"): (None, (3, 9)),
("imaplib.IMAP4", "timeout"): (None, (3, 9)),
("imaplib.IMAP4.open", "timeout"): (None, (3, 9)),
("imaplib.IMAP4_SSL", "ssl_context"): (None, (3, 3)),
("imaplib.IMAP4_SSL", "timeout"): (None, (3, 9)),
("logging.basicConfig", "encoding"): (None, (3, 9)),
("logging.basicConfig", "errors"): (None, (3, 9)),
("logging.handlers.RotatingFileHandler", "errors"): (None, (3, 9)),
("logging.handlers.TimedRotatingFileHandler", "errors"): (None, (3, 9)),
("logging.handlers.WatchedFileHandler", "errors"): (None, (3, 9)),
("memoryview.hex", "bytes_per_sep"): (None, (3, 8)),
("memoryview.hex", "sep"): (None, (3, 8)),
("os.sendfile", "in_fd"): (None, (3, 9)),
("os.sendfile", "out_fd"): (None, (3, 9)),
("pow", "base"): (None, (3, 8)),
("pow", "exp"): (None, (3, 8)),
("pow", "mod"): (None, (3, 8)),
("random.sample", "counts"): (None, (3, 9)),
("smtplib.LMTP", "timeout"): (None, (3, 9)),
("subprocess.Popen", "extra_groups"): (None, (3, 9)),
("subprocess.Popen", "group"): (None, (3, 9)),
("subprocess.Popen", "umask"): (None, (3, 9)),
("subprocess.Popen", "user"): (None, (3, 9)),
("threading.Semaphore.release", "n"): (None, (3, 9)),
("typing.get_type_hints", "include_extras"): (None, (3, 9)),
("venv.EnvBuilder", "upgrade_deps"): (None, (3, 9)),
("xml.etree.ElementInclude.include", "base_url"): (None, (3, 9)),
("xml.etree.ElementInclude.include", "max_depth"): (None, (3, 9)),
}
| 45.51497 | 79 | 0.572951 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,976 | 0.523089 |
2a75f505d31db107fb926b596e2c6197ac0613a9 | 48,831 | py | Python | src/pynvraw/nvapi_api.py | JustAMan/pynvraw | 9a860c547256cfc6f527bf4be3aeeed27d95fd85 | [
"MIT"
] | 8 | 2021-07-29T22:35:31.000Z | 2022-01-27T06:41:36.000Z | src/pynvraw/nvapi_api.py | JustAMan/pynvraw | 9a860c547256cfc6f527bf4be3aeeed27d95fd85 | [
"MIT"
] | 1 | 2021-12-19T05:28:29.000Z | 2021-12-19T05:28:29.000Z | src/pynvraw/nvapi_api.py | JustAMan/pynvraw | 9a860c547256cfc6f527bf4be3aeeed27d95fd85 | [
"MIT"
] | 1 | 2021-10-05T09:24:54.000Z | 2021-10-05T09:24:54.000Z | '''Low-level API working through obscure nvapi.dll.'''
import ctypes
import typing
import sys
import collections
import enum
from .status import NvStatus, NvError, NVAPI_OK
nvapi = ctypes.CDLL('nvapi64.dll' if sys.maxsize > 2**32 else 'nvapi.dll')
_nvapi_QueryInterface = nvapi.nvapi_QueryInterface
_nvapi_QueryInterface.restype = ctypes.c_void_p
_nvapi_QueryInterface.argtypes = [ctypes.c_int]
NVAPI_MAX_PHYSICAL_GPUS = 64
NVAPI_MAX_THERMAL_SENSORS_PER_GPU = 3
NVAPI_THERMAL_TARGET_GPU = 1
NVAPI_THERMAL_TARGET_ALL = 15
NVAPI_SHORT_STRING_MAX = 64
NVAPI_LONG_STRING_MAX = 256
NVAPI_COOLER_POLICY_USER = 1
NVAPI_MAX_GPU_PUBLIC_CLOCKS = 32
NV_GPU_CLOCK_FREQUENCIES_CURRENT_FREQ = 0
NV_GPU_CLOCK_FREQUENCIES_BASE_CLOCK = 1
NV_GPU_CLOCK_FREQUENCIES_BOOST_CLOCK = 2
NVAPI_GPU_PUBLIC_CLOCK_GRAPHICS = 0
NVAPI_GPU_PUBLIC_CLOCK_MEMORY = 4
NVAPI_GPU_PUBLIC_CLOCK_PROCESSOR = 7
NVAPI_GPU_PUBLIC_CLOCK_VIDEO = 8
NVAPI_MAX_GPU_PSTATE20_PSTATES = 16
NVAPI_MAX_GPU_PSTATE20_CLOCKS = 8
NVAPI_MAX_GPU_PSTATE20_BASE_VOLTAGES = 4
NVAPI_MAX_GPU_TOPOLOGY_ENTRIES = 4
NVAPI_MAX_NUMBER_OF_APPLICATIONS = 128
NvAPI_ShortString = ctypes.c_char * NVAPI_SHORT_STRING_MAX
NvAPI_LongString = ctypes.c_char * NVAPI_LONG_STRING_MAX
class StrMixin:
def __str__(self):
dct = self.as_dict()
result = [dct.pop('__name__') + ':']
for k, v in dct.items():
if isinstance(v, list):
result.append(f'\t{k}=[')
for e in v:
result.extend(f'\t\t{l}' for l in str(e).splitlines())
result.append('\t]')
elif isinstance(v, (dict, collections.OrderedDict, StrMixin)):
v = str(v).splitlines()
result.append(f'\t{k}={v[0]}')
result.extend(f'\t{e}' for e in v[1:])
else:
result.append(f'\t{k}={v!s}')
return '\n'.join(result)
def __repr__(self):
return self.__str__()
@classmethod
def _cast(cls, obj):
if isinstance(obj, cls):
return obj.as_dict()
return str(obj)
def as_dict(self):
result = collections.OrderedDict(__name__=self.__class__.__name__)
shown = []
for base in reversed(self.__class__.__mro__):
fields = getattr(base, '_fields_', [])
for fld in fields:
name = fld[0]
if name.startswith('reserved'):
continue
if name.startswith('_') and hasattr(self, name[1:]):
name = name[1:]
if name not in shown:
shown.append(name)
for name, value in base.__dict__.items():
if isinstance(value, property) and name not in shown:
shown.append(name)
for name in shown:
value = getattr(self, name)
if value is None:
continue
if isinstance(value, (list, tuple, ctypes.Array)):
if len(value) == 0:
value = '[]'
elif isinstance(value[0], (int, float, str, ctypes._SimpleCData)):
value = f'[{", ".join(map(str, value))}]'
else:
value = [self._cast(e) for e in value]
result[name] = value
return result
class StrStructure(StrMixin, ctypes.Structure):
pass
class StrUnion(StrMixin, ctypes.Union):
pass
class NvPhysicalGpu(ctypes.Structure):
_pack_ = 8
_fields_ = [('unused', ctypes.c_int),
('pad', ctypes.c_int8)]
NV_ENUM_GPUS = NvPhysicalGpu * NVAPI_MAX_PHYSICAL_GPUS
class NvVersioned(StrStructure):
def __init__(self):
self.version = ctypes.sizeof(self) + (self._nv_version_ << 16)
class NV_THERMAL_SENSOR(StrStructure):
_fields_ = [('controller', ctypes.c_int),
('defaultMinTemp', ctypes.c_int32),
('defaultMaxTemp', ctypes.c_int32),
('currentTemp', ctypes.c_int32),
('target', ctypes.c_int)]
class NV_GPU_THERMAL_SETTINGS(NvVersioned):
_nv_version_ = 2
_fields_ = [('version', ctypes.c_uint32),
('count', ctypes.c_uint32),
('sensor', NV_THERMAL_SENSOR * NVAPI_MAX_THERMAL_SENSORS_PER_GPU)]
class NV_GPU_THERMAL_EX(NvVersioned):
_nv_version_ = 2
_pack_ = 1
_fields_ = [('version', ctypes.c_uint32),
('mask', ctypes.c_uint32),
('pad', ctypes.c_uint32 * 8),
('_sensors', ctypes.c_uint32 * 32)]
@property
def sensors(self):
return tuple(x / 256.0 for x in self._sensors)
class NV_COOLER_TARGET(enum.IntFlag):
NONE = 0
GPU = 1
MEMORY = 2
POWER_SUPPLY = 4
ALL = 7
class NvCoolerLevels(NvVersioned):
class _NvCoolerLevel(StrStructure):
_pack_ = 1
_fields_ = [('level', ctypes.c_uint32),
('policy', ctypes.c_uint32)]
_nv_version_ = 1
_fields_ = [('version', ctypes.c_uint32),
('levels', _NvCoolerLevel * 20)]
class NV_GPU_COOLER_SETTINGS(NvVersioned):
class NV_SINGLE_COOLER(StrStructure):
_fields_ = [('type', ctypes.c_int32),
('controller', ctypes.c_int32),
('default_min', ctypes.c_int32),
('default_max', ctypes.c_int32),
('current_min', ctypes.c_int32),
('current_max', ctypes.c_int32),
('current_level', ctypes.c_int32),
('default_policy', ctypes.c_int32),
('current_policy', ctypes.c_int32),
('_target', ctypes.c_int32),
('control_type', ctypes.c_int32),
('active', ctypes.c_int32)]
@property
def target(self):
return NV_COOLER_TARGET(self._target)
_nv_version_ = 2
_fields_ = [('version', ctypes.c_uint32),
('count', ctypes.c_uint32),
('coolers', NV_SINGLE_COOLER * 20)]
class NV_GPU_FAN_COOLERS_INFO(NvVersioned):
class NV_GPU_FAN_COOLERS_INFO_ENTRY(StrStructure):
_pack_ = 8
_fields_ = [('coolerId', ctypes.c_uint32),
('unknown', ctypes.c_uint32 * 2),
('maxRpm', ctypes.c_uint32),
('reserved', ctypes.c_uint32 * 8)]
_nv_version_ = 1
_pack_ = 8
_fields_ = [('version', ctypes.c_uint32),
('supported', ctypes.c_bool),
('count', ctypes.c_uint32),
('reserved', ctypes.c_uint32 * 8),
('_entries', NV_GPU_FAN_COOLERS_INFO_ENTRY * 32)]
@property
def entries(self):
return self._entries[:self.count]
class NV_GPU_FAN_COOLERS_STATUS(NvVersioned):
class NV_GPU_FAN_COOLERS_STATUS_ENTRY(StrStructure):
_pack_ = 8
_fields_ = [('coolerId', ctypes.c_uint32),
('currentRpm', ctypes.c_uint32),
('minLevel', ctypes.c_uint32),
('maxLevel', ctypes.c_uint32),
('level', ctypes.c_uint32),
('reserved', ctypes.c_uint32 * 8)]
_nv_version_ = 1
_pack_ = 8
_fields_ = [('version', ctypes.c_uint32),
('count', ctypes.c_uint32),
('reserved', ctypes.c_uint32 * 8),
('_entries', NV_GPU_FAN_COOLERS_STATUS_ENTRY * 32)]
@property
def entries(self):
return self._entries[:self.count]
class FAN_COOLER_CONTROL_MODE(enum.IntEnum):
AUTO = 0
MANUAL = 1
class NV_GPU_FAN_COOLERS_CONTROL(NvVersioned):
class NV_GPU_FAN_COOLERS_CONTROL_ENTRY(StrStructure):
_pack_ = 8
_fields_ = [('coolerId', ctypes.c_uint32),
('level', ctypes.c_uint32),
('_mode', ctypes.c_uint32),
('reserved', ctypes.c_uint32 * 8)]
@property
def mode(self):
return FAN_COOLER_CONTROL_MODE(self._mode)
@mode.setter
def mode(self, value):
self._mode = int(value)
_nv_version_ = 1
_pack_ = 8
_fields_ = [('version', ctypes.c_uint32),
('unknown', ctypes.c_uint32),
('count', ctypes.c_uint32),
('reserved', ctypes.c_uint32 * 8),
('_entries', NV_GPU_FAN_COOLERS_CONTROL_ENTRY * 32)]
@property
def entries(self):
return self._entries[:self.count]
class NV_GPU_CLOCK_DOMAIN(StrStructure):
_fields_ = [('bIsPresent', ctypes.c_uint32, 1),
('reserved', ctypes.c_uint32, 31),
('frequency', ctypes.c_uint32)]
class NV_GPU_CLOCK_FREQUENCIES(NvVersioned):
_nv_version_ = 3
_fields_ = [('version', ctypes.c_uint32),
('ClockType', ctypes.c_uint32, 4),
('reserved', ctypes.c_uint32, 20),
('reserved1', ctypes.c_uint32, 8),
('domain', NV_GPU_CLOCK_DOMAIN * NVAPI_MAX_GPU_PUBLIC_CLOCKS)]
class NV_GPU_CLOCKS_INFO(NvVersioned):
_nv_version_ = 2
_fields_ = [('version', ctypes.c_uint32),
('clocks', ctypes.c_uint32 * 288)]
class NV_GPU_PERF_PSTATES20_PARAM_DELTA(StrStructure):
_fields_ = [('value', ctypes.c_int32),
('valueMin', ctypes.c_int32),
('valueMax', ctypes.c_int32)]
class NV_GPU_PSTATE20_BASE_VOLTAGE_ENTRY_V1(StrStructure):
_fields_ = [('domainId', ctypes.c_int),
('bIsEditable', ctypes.c_uint32, 1),
('reserved', ctypes.c_uint32, 31),
('volt_uV', ctypes.c_uint32),
('voltDelta_uV', NV_GPU_PERF_PSTATES20_PARAM_DELTA)]
class ClockType(enum.IntEnum):
SINGLE = 0
RANGE = 1
class NV_GPU_PUBLIC_CLOCK_ID(enum.IntEnum):
GRAPHICS = 0
MEMORY = 1
PROCESSOR = 2
VIDEO = 3
MEMORY2 = 4
UNDEFINED1 = 5
UNDEFINED2 = 6
class PerformanceStateId(enum.IntEnum):
P0_3DPerformance = 0
P1_3DPerformance = 1
P2_Balanced = 2
P3_Balanced = 3
P4 = 4
P5 = 5
P6 = 6
P7 = 7
P8_HDVideoPlayback = 8
P9 = 9
P10_DVDPlayback = 10
P11 = 11
P12_Idle = 12
P13 = 13
P14 = 14
P15 = 15
Undefined = NVAPI_MAX_GPU_PSTATE20_PSTATES
All = 16
class NV_GPU_PERF_PSTATES20_INFO(NvVersioned):
class _NV_GPU_PSTATE(StrStructure):
class NV_GPU_PSTATE20_CLOCK_ENTRY_V1(StrStructure):
class _NV_GPU_PSTATE_DATA_U(StrUnion):
class _NV_GPU_PSTATE_DATA_RANGE(StrStructure):
_fields_ = [('_minFreq', ctypes.c_uint32),
('_maxFreq', ctypes.c_uint32),
('domainId', ctypes.c_int),
('_minVoltage', ctypes.c_uint32),
('_maxVoltage', ctypes.c_uint32)]
@property
def minFreq(self):
return self._minFreq / 1000.0
@property
def maxFreq(self):
return self._maxFreq / 1000.0
@property
def minVoltage(self):
return self._minVoltage / 1000000.0
@property
def maxVoltage(self):
return self._maxVoltage / 1000000.0
_fields_ = [('_singleFreq', ctypes.c_uint32),
('range', _NV_GPU_PSTATE_DATA_RANGE)]
@property
def singleFreq(self):
return self._singleFreq / 1000.0
_fields_ = [('_domainId', ctypes.c_int),
('_typeId', ctypes.c_int),
('bIsEditable', ctypes.c_uint32, 1),
('reserved', ctypes.c_uint32, 31),
('freqDelta_kHz', NV_GPU_PERF_PSTATES20_PARAM_DELTA),
('_data', _NV_GPU_PSTATE_DATA_U)]
@property
def domainId(self):
return NV_GPU_PUBLIC_CLOCK_ID(self._domainId)
@property
def typeId(self):
return ClockType(self._typeId)
@property
def data(self):
attr = {ClockType.SINGLE: 'singleFreq',
ClockType.RANGE: 'range'}[self.typeId]
return getattr(self._data, attr)
_fields_ = [('_pstateId', ctypes.c_int),
('bIsEditable', ctypes.c_uint32, 1),
('reserved', ctypes.c_uint32, 31),
('_clocks', NV_GPU_PSTATE20_CLOCK_ENTRY_V1 * NVAPI_MAX_GPU_PSTATE20_CLOCKS),
('_baseVoltages', NV_GPU_PSTATE20_BASE_VOLTAGE_ENTRY_V1 * NVAPI_MAX_GPU_PSTATE20_BASE_VOLTAGES)]
def _get_nums_from_parent_pstate20(self):
pstates_array = self._b_base_
if pstates_array is None:
return NVAPI_MAX_GPU_PSTATE20_CLOCKS, NVAPI_MAX_GPU_PSTATE20_BASE_VOLTAGES
pstate20 = pstates_array._b_base_
if pstate20 is None or not isinstance(pstate20, NV_GPU_PERF_PSTATES20_INFO):
return NVAPI_MAX_GPU_PSTATE20_CLOCKS, NVAPI_MAX_GPU_PSTATE20_BASE_VOLTAGES
return pstate20.numClocks, pstate20.numBaseVoltages
@property
def pstateId(self):
return PerformanceStateId(self._pstateId)
@property
def clocks(self):
return self._clocks[:self._get_nums_from_parent_pstate20()[0]]
@property
def baseVoltages(self):
return self._baseVoltages[:self._get_nums_from_parent_pstate20()[1]]
class _NV_GPU_OVERVOLT(StrStructure):
_fields_ = [('numVoltages', ctypes.c_uint32),
('_voltages', NV_GPU_PSTATE20_BASE_VOLTAGE_ENTRY_V1 * NVAPI_MAX_GPU_PSTATE20_BASE_VOLTAGES)]
@property
def voltages(self):
return self._voltages[:self.numVoltages]
_nv_version_ = 2
_fields_ = [('version', ctypes.c_uint32),
('bIsEditable', ctypes.c_uint32, 1),
('reserved', ctypes.c_uint32, 31),
('numPstates', ctypes.c_uint32),
('numClocks', ctypes.c_uint32),
('numBaseVoltages', ctypes.c_uint32),
('_pstates', _NV_GPU_PSTATE * NVAPI_MAX_GPU_PSTATE20_PSTATES),
('ov', _NV_GPU_OVERVOLT)]
@property
def pstates(self):
return self._pstates[:self.numPstates]
class _NV_GPU_POWER_INFO_ENTRY(StrStructure):
_fields_ = [('pstate', ctypes.c_uint32),
('pad0', ctypes.c_uint32 * 2),
('min_power', ctypes.c_uint32),
('pad1', ctypes.c_uint32 * 2),
('def_power', ctypes.c_uint32),
('pad2', ctypes.c_uint32 * 2),
('max_power', ctypes.c_uint32),
('pad3', ctypes.c_uint32)]
class NV_GPU_POWER_INFO(NvVersioned):
_nv_version_ = 1
_fields_ = [('version', ctypes.c_uint32),
('valid', ctypes.c_uint8),
('count', ctypes.c_uint8),
('padding', ctypes.c_uint8 * 2),
('_entries', _NV_GPU_POWER_INFO_ENTRY * 4)]
@property
def entries(self):
return self._entries[:self.count]
class _NV_GPU_POWER_STATUS_ENTRY(StrStructure):
_fields_ = [('pad0', ctypes.c_uint32),
('pad1', ctypes.c_uint32),
('power', ctypes.c_uint32),
('pad2', ctypes.c_uint32)]
class NV_GPU_POWER_STATUS(NvVersioned):
_nv_version_ = 1
_fields_ = [('version', ctypes.c_uint32),
('count', ctypes.c_uint32),
('_entries', _NV_GPU_POWER_STATUS_ENTRY * 4)]
@property
def entries(self):
return self._entries[:self.count]
class NV_GPU_TOPOLOGY_ENTRY(StrStructure):
_fields_ = [('domain', ctypes.c_int),
('reserved1', ctypes.c_int),
('_power', ctypes.c_uint32),
('reserved2', ctypes.c_int)]
@property
def power(self) -> float:
return self._power / 1000.0
class NV_GPU_TOPOLOGY_STATUS(NvVersioned):
_nv_version_ = 1
_fields_ = [('version', ctypes.c_uint32),
('count', ctypes.c_uint32),
('_entries', NV_GPU_TOPOLOGY_ENTRY * NVAPI_MAX_GPU_TOPOLOGY_ENTRIES)]
@property
def entries(self):
return self._entries[:self.count]
class PowerChannelType(enum.IntEnum):
DEFAULT = 0
SUMMATION = 1
ESTIMATION = 2
SLOW = 3
GEMINI_CORRECTION = 4
C1X = 5
SENSOR = 6
PSTATE_ESTIMATION_LUT = 7
SENSOR_CLIENT_ALIGNED = 8
class PowerRailType(enum.IntEnum):
UNKNOWN = 0
OUT_NVVDD = 1
OUT_FBVDD = 2
OUT_FBVDDQ = 3
OUT_FBVDD_Q = 4
OUT_PEXVDD = 5
OUT_A3V3 = 6
OUT_3V3NV = 7
OUT_TOTAL_GPU = 8
OUT_FBVDDQ_GPU = 9
OUT_FBVDDQ_MEM = 10
OUT_SRAM = 11
IN_PEX12V1 = 222
IN_TOTAL_BOARD2 = 223
IN_HIGH_VOLT0 = 224
IN_HIGH_VOLT1 = 225
IN_NVVDD1 = 226
IN_NVVDD2 = 227
IN_EXT12V_8PIN2 = 228
IN_EXT12V_8PIN3 = 229
IN_EXT12V_8PIN4 = 230
IN_EXT12V_8PIN5 = 231
IN_MISC0 = 232
IN_MISC1 = 233
IN_MISC2 = 234
IN_MISC3 = 235
IN_USBC0 = 236
IN_USBC1 = 237
IN_FAN0 = 238
IN_FAN1 = 239
IN_SRAM = 240
IN_PWR_SRC_PP = 241
IN_3V3_PP = 242
IN_3V3_MAIN = 243
IN_3V3_AON = 244
IN_TOTAL_BOARD = 245
IN_NVVDD = 246
IN_FBVDD = 247
IN_FBVDDQ = 248
IN_FBVDD_Q = 249
IN_EXT12V_8PIN0 = 250
IN_EXT12V_8PIN1 = 251
IN_EXT12V_6PIN0 = 252
IN_EXT12V_6PIN1 = 253
IN_PEX3V3 = 254
IN_PEX12V = 255
class NV_POWER_MONITOR_INFO(NvVersioned):
class NV_POWER_MONITOR_INFO_CHANNEL_INFO(StrStructure):
class NV_POWER_MONITOR_INFO_CHANNEL_INFO_DATA(StrUnion):
class NV_GPU_POWER_MONITOR_POWER_CHANNEL_1X_INFO(StrStructure):
_fields_ = [('powerDeviceMask', ctypes.c_uint32),
('powerLimit_mW', ctypes.c_uint32)]
class NV_GPU_POWER_MONITOR_POWER_CHANNEL_SLOW_INFO(NV_GPU_POWER_MONITOR_POWER_CHANNEL_1X_INFO):
pass
class NV_GPU_POWER_MONITOR_POWER_CHANNEL_GEMINI_CORRECTION_INFO(NV_GPU_POWER_MONITOR_POWER_CHANNEL_SLOW_INFO):
pass
class NV_GPU_POWER_MONITOR_POWER_CHANNEL_SENSOR_INFO(StrStructure):
_fields_ = [('powerDeviceIndex', ctypes.c_uint8),
('powerDeviceProviderIndex', ctypes.c_uint8)]
class NV_GPU_POWER_MONITOR_POWER_CHANNEL_SUMMATION_INFO(StrStructure):
_fields_ = [('relationIndexFirst', ctypes.c_uint8),
('relationIndexLast', ctypes.c_uint8)]
class NV_GPU_POWER_MONITOR_POWER_CHANNEL_PSTATE_ESTIMATION_LUT_INFO(StrStructure):
class NV_GPU_POWER_MONITOR_POWER_CHANNEL_PSTATE_ESTIMATION_LUT_ENTRY_INFO(StrStructure):
_fields_ = [('pstateId', ctypes.c_int32),
('powerOffset', ctypes.c_uint32)]
_fields_ = [('entries', NV_GPU_POWER_MONITOR_POWER_CHANNEL_PSTATE_ESTIMATION_LUT_ENTRY_INFO * 2)]
_fields_ = [('c1x', NV_GPU_POWER_MONITOR_POWER_CHANNEL_1X_INFO),
('gemmCorr', NV_GPU_POWER_MONITOR_POWER_CHANNEL_GEMINI_CORRECTION_INFO),
('sensor', NV_GPU_POWER_MONITOR_POWER_CHANNEL_SENSOR_INFO),
('slow', NV_GPU_POWER_MONITOR_POWER_CHANNEL_SLOW_INFO),
('sum', NV_GPU_POWER_MONITOR_POWER_CHANNEL_SUMMATION_INFO),
('pstateEstLUT', NV_GPU_POWER_MONITOR_POWER_CHANNEL_PSTATE_ESTIMATION_LUT_INFO),
('reserved', ctypes.c_uint8 * 16)]
_fields_ = [('deviceMask', ctypes.c_uint32),
('_offset', ctypes.c_uint32),
('_limit', ctypes.c_uint32),
('_type', ctypes.c_uint32),
('_rail', ctypes.c_uint32),
('_voltFixed', ctypes.c_uint32),
('powerCorrectSlope', ctypes.c_uint32),
('currentCorrectSlope', ctypes.c_uint32),
('currentOffset_mA', ctypes.c_int32),
('reserved', ctypes.c_uint8 * 8),
('_data', NV_POWER_MONITOR_INFO_CHANNEL_INFO_DATA)]
TYPES = {
PowerChannelType.SUMMATION: 'sum',
PowerChannelType.SLOW: 'slow',
PowerChannelType.GEMINI_CORRECTION: 'gemmCorr',
PowerChannelType.C1X: 'c1x',
PowerChannelType.SENSOR: 'sensor',
PowerChannelType.PSTATE_ESTIMATION_LUT: 'pstateEstLUT',
}
@property
def rail(self):
return PowerRailType(self._rail)
@property
def type(self):
return PowerChannelType(self._type)
@property
def data(self):
attr = self.TYPES.get(self.type)
if attr:
return getattr(self._data, attr)
return None
@property
def offset(self):
'''Returns power offset in Watts.'''
return self._offset / 1000.0
@property
def limit(self):
'''Returns power limit in Watts.'''
return self._limit / 1000.0
@property
def volt(self):
'''Returns fixed voltage in Volts.'''
return self._voltFixed / 1000.0
class NV_GPU_POWER_MONITOR_POWER_CHANNEL_RELATIONSHIP_INFO(StrStructure):
class NV_GPU_POWER_MONITOR_POWER_CHANNEL_RELATIONSHIP_INFO_DATA(StrUnion):
class NV_GPU_POWER_MONITOR_POWER_CHANNEL_RELATIONSHIP_WEIGHT_INFO(StrStructure):
_fields_ = [('weight', ctypes.c_int32)]
class NV_GPU_POWER_MONITOR_POWER_CHANNEL_RELATIONSHIP_BALANCED_PHASE_EST_INFO(StrStructure):
_fields_ = [('numTotalPhases', ctypes.c_uint8),
('numStaticPhases', ctypes.c_uint8),
('balancedPhasePolicyRelationIndexFirst', ctypes.c_uint8),
('balancedPhasePolicyRelationIndexLast', ctypes.c_uint8)]
class NV_GPU_POWER_MONITOR_POWER_CHANNEL_RELATIONSHIP_BALANCING_PWM_WEIGHT_INFO(StrStructure):
_fields_ = [('balancingRelationIndex', ctypes.c_uint8),
('bPrimary', ctypes.c_uint8)]
class NV_GPU_POWER_MONITOR_POWER_CHANNEL_RELATIONSHIP_REGULATOR_LOSS_EST_INFO(StrStructure):
_fields_ = [('regulatorType', ctypes.c_uint8),
('coefficients', ctypes.c_int32 * 6)]
class NV_GPU_POWER_MONITOR_POWER_CHANNEL_RELATIONSHIP_REGULATOR_LOSS_DYN_INFO(StrStructure):
_fields_ = [('thermMonIdx', ctypes.c_uint8),
('voltDomain', ctypes.c_uint8)]
_fields_ = [('weight', NV_GPU_POWER_MONITOR_POWER_CHANNEL_RELATIONSHIP_WEIGHT_INFO),
('balancedPhaseEst', NV_GPU_POWER_MONITOR_POWER_CHANNEL_RELATIONSHIP_BALANCED_PHASE_EST_INFO),
('balancingPwmWeight', NV_GPU_POWER_MONITOR_POWER_CHANNEL_RELATIONSHIP_BALANCING_PWM_WEIGHT_INFO),
('regulatorLossEst', NV_GPU_POWER_MONITOR_POWER_CHANNEL_RELATIONSHIP_REGULATOR_LOSS_EST_INFO),
('regulatorLossDyn', NV_GPU_POWER_MONITOR_POWER_CHANNEL_RELATIONSHIP_REGULATOR_LOSS_DYN_INFO),
('reserved', ctypes.c_uint8 * 32)]
_fields_ = [('_type', ctypes.c_int32),
('channelIndex', ctypes.c_uint8),
('_data', NV_GPU_POWER_MONITOR_POWER_CHANNEL_RELATIONSHIP_INFO_DATA)
]
TYPES = {0: ('WEIGHT', 'weight'),
1: ('BALANCED_PHASE_EST', 'balancedPhaseEst'),
2: ('BALANCING_PWM_WEIGHT', 'balancingPwmWeight'),
3: ('REGULATOR_LOSS_EST', 'regulatorLossEst'),
4: ('REGULATOR_LOSS_DYN', 'regulatorLossDyn'),
-1: ('UNKNOWN', None)}
@property
def type(self):
return self.TYPES.get(self._type, [str(self._type)])[0]
@property
def data(self):
attr = self.TYPES.get(self._type, [None, None])[1]
if attr:
return getattr(self._data, attr)
return None
_nv_version_ = 3
#_pack_ = 4
_fields_ = [('version', ctypes.c_uint32),
('isSupported', ctypes.c_bool),
('_samplingPeriod', ctypes.c_uint32),
('samplingCount', ctypes.c_uint32),
('channelMask', ctypes.c_uint32),
('channelRelationMask', ctypes.c_uint32),
('totalGpuPowerChannelMask', ctypes.c_uint32),
('totalGpuChannelIndex', ctypes.c_uint8),
('reserved', ctypes.c_uint8 * 8),
('channels', NV_POWER_MONITOR_INFO_CHANNEL_INFO * 32),
('relations', NV_GPU_POWER_MONITOR_POWER_CHANNEL_RELATIONSHIP_INFO * 32)]
@property
def samplingPeriod(self) -> float:
'''Sampling period in seconds.'''
return self._samplingPeriod / 1000.0
class NV_POWER_MONITOR_STATUS(NvVersioned):
class NV_POWER_MONITOR_STATUS_ENTRY(StrStructure):
_pack_ = 1
_fields_ = [('_powerAvg', ctypes.c_uint32),
('_powerMin', ctypes.c_uint32),
('_powerMax', ctypes.c_uint32),
('_current', ctypes.c_uint32),
('_voltage', ctypes.c_uint32),
('_energy', ctypes.c_uint64),
('reserved', ctypes.c_uint8 * 16)]
@property
def power(self) -> float:
'''Power consumption in Watts.'''
return self._powerAvg / 1000.0
@property
def current(self):
'''Current in Amperes.'''
return self._current / 1000.0
@property
def voltage(self):
'''Voltage in Volts.'''
return self._voltage / 1000000.0
_nv_version_ = 1
# check: version == 0x1059C
_fields_ = [('version', ctypes.c_uint32),
('channelMask', ctypes.c_uint32),
('_totalPower', ctypes.c_uint32),
('reserved', ctypes.c_uint8 * 16),
('entries', NV_POWER_MONITOR_STATUS_ENTRY * 32)]
@property
def totalPower(self) -> float:
return self._totalPower / 1000.0
class NV_GPU_VOLTAGE_STATUS(NvVersioned):
_nv_version_ = 1
_fields_ = [('version', ctypes.c_uint32),
('reserved1', ctypes.c_uint32),
('reserved2', ctypes.c_uint32 * 8),
('_voltage', ctypes.c_uint32),
('reserved3', ctypes.c_uint32 * 8)]
@property
def voltage(self):
return self._voltage / 1000000.0
class NV_GPU_CLOCKBOOST_MASK(NvVersioned):
class NV_GPU_CLOCKBOOST_MASK_CLOCK(StrStructure):
_fields_ = [('_type', ctypes.c_uint32),
('enabled', ctypes.c_bool),
('reserved', ctypes.c_uint32 * 4)]
@property
def type(self):
return NV_GPU_PUBLIC_CLOCK_ID(self._type)
_nv_version_ = 1
_pack_ = 8
_fields_ = [('version', ctypes.c_uint32),
('masks', ctypes.c_uint32 * 16),
('clocks', NV_GPU_CLOCKBOOST_MASK_CLOCK * 255)]
class NV_GPU_CLOCKBOOST_TABLE(NvVersioned):
class NV_GPU_CLOCKBOOST_TABLE_CLOCK(StrStructure):
_fields_ = [('_type', ctypes.c_uint32),
('reserved1', ctypes.c_uint32 * 4),
('_freqDelta', ctypes.c_int32),
('reserved2', ctypes.c_uint32 * 3)]
@property
def freqDelta(self):
return self._freqDelta / 1000.0
@property
def type(self):
return NV_GPU_PUBLIC_CLOCK_ID(self._type)
_nv_version_ = 1
_pack_ = 8
_fields_ = [('version', ctypes.c_uint32),
('masks', ctypes.c_uint32 * 16),
('clocks', NV_GPU_CLOCKBOOST_TABLE_CLOCK * 255)]
class NV_GPU_VFP_CURVE(NvVersioned):
class NV_GPU_VFP_CURVE_CLOCK(StrStructure):
_fields_ = [('_type', ctypes.c_uint32),
('_frequency', ctypes.c_uint32),
('_voltage', ctypes.c_uint32),
('reserved', ctypes.c_uint32 * 4)]
@property
def frequency(self):
return self._frequency / 1000.0
@property
def voltage(self):
return self._voltage / 1000000.0
@property
def type(self):
return NV_GPU_PUBLIC_CLOCK_ID(self._type)
_nv_version_ = 1
_fields_ = [('version', ctypes.c_uint32),
('masks', ctypes.c_uint32 * 16),
('clocks', NV_GPU_VFP_CURVE_CLOCK * 255)]
class PerfCapReason(enum.IntFlag):
NONE = 0
POWER = 1
TEMPERATURE = 2
VOLTAGE = 4
UNKNOWN = 8
NO_LOAD = 16
class NV_GPU_PERFORMANCE_STATUS(NvVersioned):
_nv_version_ = 1
_pack_ = 8
_fields_ = [('version', ctypes.c_uint32),
('unknown1', ctypes.c_uint32),
('_timer', ctypes.c_ulonglong),
('_limit', ctypes.c_uint32),
('unknown2', ctypes.c_uint32 * 3),
('_timers', ctypes.c_ulonglong * 3),
('unknown3', ctypes.c_uint32 * 326)]
@property
def timer(self):
return self._timer / 1e9
@property
def limit(self):
return PerfCapReason(self._limit)
class RamType(enum.IntEnum):
Unknown = 0
SDRAM = 1
DDR1 = 2
DDR2 = 3
GDDR2 = 4
GDDR3 = 5
GDDR4 = 6
DDR3 = 7
GDDR5 = 8
LPDDR2 = 9
GDDR5X = 10
HBM2 = 12
GDDR6 = 14
GDDR6X = 15
class DisplayDriverMemoryInfoV1(NvVersioned):
_nv_version_ = 1
_fields_ = [('version', ctypes.c_uint32),
('_dedicatedVideoMemory', ctypes.c_uint32),
('_availableDedicatedVideoMemory', ctypes.c_uint32),
('_systemVideoMemory', ctypes.c_uint32),
('_sharedSystemMemory', ctypes.c_uint32)]
@property
def dedicatedVideoMemory(self):
return self._dedicatedVideoMemory / 1024
@property
def availableDedicatedVideoMemory(self):
return self._availableDedicatedVideoMemory / 1024
@property
def systemVideoMemory(self):
return self._systemVideoMemory / 1024
@property
def sharedSystemMemory(self):
return self._sharedSystemMemory / 1024
class DisplayDriverMemoryInfoV2(DisplayDriverMemoryInfoV1):
_nv_version_ = 2
_fields_ = [('_currentAvailableDedicatedVideoMemory', ctypes.c_uint32)]
@property
def currentAvailableDedicatedVideoMemory(self):
return self._currentAvailableDedicatedVideoMemory / 1024
class DisplayDriverMemoryInfoV3(DisplayDriverMemoryInfoV2):
_nv_version_ = 3
_fields_ = [('_dedicatedVideoMemoryEvictionsSize', ctypes.c_uint32),
('dedicatedVideoMemoryEvictionCount', ctypes.c_uint32)]
@property
def dedicatedVideoMemoryEvictionsSize(self):
return self._dedicatedVideoMemoryEvictionsSize / 1024
class ClockLockMode(enum.IntEnum):
NONE = 0
MANUAL = 3
class PrivateClockBoostLockV2(NvVersioned):
class ClockBoostLock(StrStructure):
_fields_ = [('_domain', ctypes.c_uint32),
('reserved1', ctypes.c_uint32),
('_lockMode', ctypes.c_uint32),
('reserved2', ctypes.c_uint32),
('_voltage', ctypes.c_uint32),
('reserved3', ctypes.c_uint32)]
@property
def domain(self):
return NV_GPU_PUBLIC_CLOCK_ID(self._domain)
@property
def lockMode(self):
return ClockLockMode(self._lockMode)
@property
def voltage(self):
return self._voltage / 1e6
_nv_version_ = 2
_fields_ = [('version', ctypes.c_uint32),
('reserved', ctypes.c_uint32),
('count', ctypes.c_uint32),
('_locks', ClockBoostLock * NVAPI_MAX_GPU_PUBLIC_CLOCKS)
]
@property
def locks(self):
return self._locks[:self.count]
class UtilizationDomain(enum.IntEnum):
GPU = 0
FrameBuffer = 1
VideoEngine = 2
BusInterface = 3
class DynamicPerformanceStatesInfoV1(NvVersioned):
class UtilizationDomainInfo(StrStructure):
_fields_ = [('_present', ctypes.c_uint32),
('percent', ctypes.c_uint32)]
@property
def present(self):
return bool(self._present & 0b1)
_nv_version_ = 1
_fields_ = [('version', ctypes.c_uint32),
('_flags', ctypes.c_uint32),
('_utilization', UtilizationDomainInfo * 8)]
@property
def is_dynamic_pstate_enabled(self):
return bool(self._flags & 0b1)
@property
def utilization(self):
return {domain: self._utilization[domain.value] for domain in UtilizationDomain.__members__.values()}
class PrivateActiveApplicationV2(NvVersioned):
_nv_version_ = 2
_fields_ = [('version', ctypes.c_uint32),
('pid', ctypes.c_uint32),
('_name', NvAPI_LongString)]
@property
def name(self):
return self._name.decode('utf8')
class PrivateActiveApplicationArray(PrivateActiveApplicationV2 * NVAPI_MAX_NUMBER_OF_APPLICATIONS):
def __init__(self):
super().__init__()
for app in self:
app.__init__()
class Method:
def __init__(self, offset, restype, *argtypes):
self.proto = ctypes.CFUNCTYPE(restype, *argtypes, use_errno=True, use_last_error=True)
self.offset = offset
self.func = None
def __call__(self, *args):
if self.func is None:
addr = _nvapi_QueryInterface(self.offset)
if addr == 0:
raise RuntimeError(f'Cannot get nvapi function by offset {self.offset}')
self.func = self.proto(addr)
return self.func(*args)
class NvMethod(Method):
def __init__(self, offset, name, *argtypes, allowed_returns=()):
super().__init__(offset, ctypes.c_int, *argtypes)
self.name = name
self.allowed_returns = set(NvStatus.cast(x) for x in allowed_returns) | set([NVAPI_OK])
def __call__(self, *args):
result = NvStatus.by_value(super().__call__(*args))
if result in self.allowed_returns:
return result
raise NvError(f'Error in {self.name}: {result}', result)
class NvAPI:
NvAPI_Initialize = NvMethod(0x0150E828, 'NvAPI_Initialize')
NvAPI_Unload = NvMethod(0xD22BDD7E, 'NvAPI_Unload')
NvAPI_EnumPhysicalGPUs = NvMethod(0xE5AC921F, 'NvAPI_EnumPhysicalGPUs', NV_ENUM_GPUS, ctypes.POINTER(ctypes.c_int))
NvAPI_SYS_GetDriverAndBranchVersion = NvMethod(0x2926AAAD, 'NvAPI_SYS_GetDriverAndBranchVersion', ctypes.POINTER(ctypes.c_uint32), ctypes.POINTER(NvAPI_ShortString))
NvAPI_GPU_GetBusId = NvMethod(0x1BE0B8E5, 'NvAPI_GPU_GetBusId', NvPhysicalGpu, ctypes.POINTER(ctypes.c_uint32))
NvAPI_GPU_GetBusSlotId = NvMethod(0x2A0A350F, 'NvAPI_GPU_GetBusSlotId', NvPhysicalGpu, ctypes.POINTER(ctypes.c_uint32))
NvAPI_GPU_GetThermalSettings = NvMethod(0xE3640A56, 'NvAPI_GPU_GetThermalSettings', NvPhysicalGpu, ctypes.c_uint32, ctypes.POINTER(NV_GPU_THERMAL_SETTINGS))
NvAPI_GPU_QueryThermalSensors = NvMethod(0x65FE3AAD, 'NvAPI_GPU_QueryThermalSensors ', NvPhysicalGpu, ctypes.POINTER(NV_GPU_THERMAL_EX))
NvAPI_GPU_GetFullName = NvMethod(0xCEEE8E9F, 'NvAPI_GPU_GetFullName', NvPhysicalGpu, ctypes.POINTER(NvAPI_ShortString))
NvAPI_GPU_SetCoolerLevels = NvMethod(0x891FA0AE, 'NvAPI_GPU_SetCoolerLevels', NvPhysicalGpu, ctypes.c_int32, ctypes.POINTER(NvCoolerLevels))
NvAPI_GPU_GetCoolerSettings = NvMethod(0xDA141340, 'NvAPI_GPU_GetCoolerSettings', NvPhysicalGpu, ctypes.c_int32, ctypes.POINTER(NV_GPU_COOLER_SETTINGS))
NvAPI_GPU_GetAllClockFrequencies = NvMethod(0xDCB616C3, 'NvAPI_GPU_GetAllClockFrequencies', NvPhysicalGpu, ctypes.POINTER(NV_GPU_CLOCK_FREQUENCIES))
NvAPI_GPU_GetAllClocks = NvMethod(0x1BD69F49, 'NvAPI_GPU_GetAllClocks', NvPhysicalGpu, ctypes.POINTER(NV_GPU_CLOCKS_INFO))
NvAPI_GPU_RestoreCoolerSettings = NvMethod(0x8F6ED0FB, 'NvAPI_GPU_RestoreCoolerSettings', NvPhysicalGpu, ctypes.POINTER(ctypes.c_uint32), ctypes.c_uint32)
NvAPI_GPU_GetPstates20 = NvMethod(0x6FF81213, 'NvAPI_GPU_GetPstates20', NvPhysicalGpu, ctypes.POINTER(NV_GPU_PERF_PSTATES20_INFO))
NvAPI_GPU_SetPstates20 = NvMethod(0x0F4DAE6B, 'NvAPI_GPU_SetPstates20', NvPhysicalGpu, ctypes.POINTER(NV_GPU_PERF_PSTATES20_INFO))
NvAPI_GPU_ClientPowerPoliciesGetInfo = NvMethod(0x34206D86, 'NvAPI_GPU_ClientPowerPoliciesGetInfo', NvPhysicalGpu, ctypes.POINTER(NV_GPU_POWER_INFO))
NvAPI_GPU_ClientPowerPoliciesGetStatus = NvMethod(0x70916171, 'NvAPI_GPU_ClientPowerPoliciesGetStatus', NvPhysicalGpu, ctypes.POINTER(NV_GPU_POWER_STATUS))
NvAPI_GPU_ClientPowerPoliciesSetStatus = NvMethod(0xAD95F5ED, 'NvAPI_GPU_ClientPowerPoliciesSetStatus', NvPhysicalGpu, ctypes.POINTER(NV_GPU_POWER_STATUS))
NvAPI_GPU_ClientPowerTopologyGetStatus = NvMethod(0xEDCF624E, 'NvAPI_GPU_ClientPowerTopologyGetStatus', NvPhysicalGpu, ctypes.POINTER(NV_GPU_TOPOLOGY_STATUS))
NvAPI_GPU_PowerMonitorGetInfo = NvMethod(0xC12EB19E, 'NvAPI_GPU_PowerMonitorGetInfo', NvPhysicalGpu, ctypes.POINTER(NV_POWER_MONITOR_INFO))
NvAPI_GPU_PowerMonitorGetStatus = NvMethod(0xF40238EF, 'NvAPI_GPU_PowerMonitorGetStatus', NvPhysicalGpu, ctypes.POINTER(NV_POWER_MONITOR_STATUS))
NvAPI_GPU_ClientFanCoolersGetInfo = NvMethod(0xFB85B01E, 'NvAPI_GPU_ClientFanCoolersGetInfo', NvPhysicalGpu, ctypes.POINTER(NV_GPU_FAN_COOLERS_INFO))
NvAPI_GPU_ClientFanCoolersGetStatus = NvMethod(0x35AED5E8, 'NvAPI_GPU_ClientFanCoolersGetStatus', NvPhysicalGpu, ctypes.POINTER(NV_GPU_FAN_COOLERS_STATUS))
NvAPI_GPU_ClientFanCoolersGetControl = NvMethod(0x814B209F, 'NvAPI_GPU_ClientFanCoolersGetControl', NvPhysicalGpu, ctypes.POINTER(NV_GPU_FAN_COOLERS_CONTROL))
NvAPI_GPU_ClientFanCoolersSetControl = NvMethod(0xA58971A5, 'NvAPI_GPU_ClientFanCoolersSetControl', NvPhysicalGpu, ctypes.POINTER(NV_GPU_FAN_COOLERS_CONTROL))
NvAPI_GPU_GetCurrentVoltage = NvMethod(0x465F9BCF, 'NvAPI_GPU_GetCurrentVoltage', NvPhysicalGpu, ctypes.POINTER(NV_GPU_VOLTAGE_STATUS))
NvAPI_GPU_GetClockBoostMask = NvMethod(0x507B4B59, 'NvAPI_GPU_GetClockBoostMask', NvPhysicalGpu, ctypes.POINTER(NV_GPU_CLOCKBOOST_MASK))
NvAPI_GPU_GetVFPCurve = NvMethod(0x21537AD4, 'NvAPI_GPU_GetVFPCurve', NvPhysicalGpu, ctypes.POINTER(NV_GPU_VFP_CURVE))
NvAPI_GPU_GetClockBoostTable = NvMethod(0x23F1B133, 'NvAPI_GPU_GetClockBoostTable', NvPhysicalGpu, ctypes.POINTER(NV_GPU_CLOCKBOOST_TABLE))
NvAPI_GPU_SetClockBoostTable = NvMethod(0x733E009, 'NvAPI_GPU_SetClockBoostTable', NvPhysicalGpu, ctypes.POINTER(NV_GPU_CLOCKBOOST_TABLE))
NvAPI_GPU_PerfPoliciesGetStatus = NvMethod(0x3D358A0C, 'NvAPI_GPU_PerfPoliciesGetStatus', NvPhysicalGpu, ctypes.POINTER(NV_GPU_PERFORMANCE_STATUS))
NvAPI_RestartDisplayDriver = NvMethod(0xB4B26B65, 'NvAPI_RestartDisplayDriver')
NvAPI_GPU_GetRamType = NvMethod(0x57F7CAAC, 'NvAPI_GPU_GetRamType', NvPhysicalGpu, ctypes.POINTER(ctypes.c_uint32))
NvAPI_GPU_GetMemoryInfo = NvMethod(0x7F9B368, 'NvAPI_GPU_GetMemoryInfo', NvPhysicalGpu, ctypes.POINTER(DisplayDriverMemoryInfoV1))
NvAPI_GPU_GetClockBoostLock = NvMethod(0xE440B867, 'NvAPI_GPU_GetClockBoostLock', NvPhysicalGpu, ctypes.POINTER(PrivateClockBoostLockV2))
NvAPI_GPU_GetCurrentPstate = NvMethod(0x927DA4F6, 'NvAPI_GPU_GetCurrentPstate', NvPhysicalGpu, ctypes.POINTER(ctypes.c_int))
NvAPI_GPU_GetDynamicPstatesInfoEx = NvMethod(0x60DED2ED, 'NvAPI_GPU_GetDynamicPstatesInfoEx', NvPhysicalGpu, ctypes.POINTER(DynamicPerformanceStatesInfoV1))
NvAPI_GPU_QueryActiveApps = NvMethod(0x65B1C5F5, 'NvAPI_GPU_QueryActiveApps', NvPhysicalGpu, PrivateActiveApplicationArray, ctypes.POINTER(ctypes.c_uint32))
def __init__(self):
self.NvAPI_Initialize()
self.__gpus = None
version = ctypes.c_uint32(0)
branch = NvAPI_ShortString()
self.NvAPI_SYS_GetDriverAndBranchVersion(ctypes.pointer(version), branch)
self.__version = version.value
self.__branch = branch.value.decode('utf8')
assert self.__version > 0x4650, f'Too old NVidia drivers (version={self.__version}, branch={self.__branch}): unsupported'
def __del__(self):
self.NvAPI_Unload()
def get_driver_version(self) -> typing.Tuple[int, str]:
'''Returns driver version as int and branch as str.'''
return self.__version, self.__branch
@property
def gpu_handles(self) -> typing.List[NvPhysicalGpu]:
if self.__gpus is None:
gpus = NV_ENUM_GPUS()
gpuCount = ctypes.c_int(-1)
self.NvAPI_EnumPhysicalGPUs(gpus, ctypes.pointer(gpuCount))
self.__gpus = [gpus[i] for i in range(gpuCount.value)]
return self.__gpus
def get_gpu_by_bus(self, busId: int, slotId: int) -> NvPhysicalGpu:
for gpu in self.gpu_handles:
devBusId = ctypes.c_uint32(0)
devSlotId = ctypes.c_uint32(0)
self.NvAPI_GPU_GetBusId(gpu, ctypes.pointer(devBusId))
self.NvAPI_GPU_GetBusSlotId(gpu, ctypes.pointer(devSlotId))
if devBusId.value == busId and devSlotId.value == slotId:
return gpu
raise ValueError(f'Cannot find a GPU with bus={busId} and slot={slotId}')
def read_thermal_sensors(self, dev: NvPhysicalGpu, sensor_hint=None) -> typing.Tuple[int, typing.Tuple[float]]:
exc = None
counts = [sensor_hint] if sensor_hint is not None else range(32, 1, -1)
for count in counts:
thermal = NV_GPU_THERMAL_EX()
thermal.mask = (1 << count) - 1
try:
self.NvAPI_GPU_QueryThermalSensors(dev, ctypes.pointer(thermal))
except NvError as ex:
exc = ex
continue
break
else:
raise exc
return count, thermal.sensors
def set_cooler_duty(self, dev: NvPhysicalGpu, cooler: int, duty: int):
duty = max(min(duty, 100), 0)
levels = NvCoolerLevels()
for i in range(len(levels.levels)):
levels.levels[i].level = duty
levels.levels[i].policy = NVAPI_COOLER_POLICY_USER
self.NvAPI_GPU_SetCoolerLevels(dev, cooler, ctypes.pointer(levels))
def get_cooler_settings(self, dev: NvPhysicalGpu, cooler: NV_COOLER_TARGET=NV_COOLER_TARGET.ALL) -> NV_GPU_COOLER_SETTINGS:
value = NV_GPU_COOLER_SETTINGS()
self.NvAPI_GPU_GetCoolerSettings(dev, int(cooler), ctypes.pointer(value))
return value
def get_freqs(self, dev: NvPhysicalGpu, type: int) -> NV_GPU_CLOCK_FREQUENCIES:
value = NV_GPU_CLOCK_FREQUENCIES()
value.ClockType = type
self.NvAPI_GPU_GetAllClockFrequencies(dev, ctypes.pointer(value))
return value
def restore_coolers(self, dev: NvPhysicalGpu):
self.NvAPI_GPU_RestoreCoolerSettings(dev, None, 0)
def get_pstates(self, dev: NvPhysicalGpu) -> NV_GPU_PERF_PSTATES20_INFO:
value = NV_GPU_PERF_PSTATES20_INFO()
self.NvAPI_GPU_GetPstates20(dev, ctypes.pointer(value))
return value
def get_power_info(self, dev: NvPhysicalGpu) -> NV_GPU_POWER_INFO:
value = NV_GPU_POWER_INFO()
self.NvAPI_GPU_ClientPowerPoliciesGetInfo(dev, ctypes.pointer(value))
return value
def get_power_status(self, dev: NvPhysicalGpu) -> NV_GPU_POWER_STATUS:
value = NV_GPU_POWER_STATUS()
self.NvAPI_GPU_ClientPowerPoliciesGetStatus(dev, ctypes.pointer(value))
return value
def get_topology_status(self, dev: NvPhysicalGpu) -> NV_GPU_TOPOLOGY_STATUS:
value = NV_GPU_TOPOLOGY_STATUS()
self.NvAPI_GPU_ClientPowerTopologyGetStatus(dev, ctypes.pointer(value))
return value
def get_power_monitor_info(self, dev: NvPhysicalGpu) -> NV_POWER_MONITOR_INFO:
value = NV_POWER_MONITOR_INFO()
self.NvAPI_GPU_PowerMonitorGetInfo(dev, ctypes.pointer(value))
return value
def get_power_monitor_status(self, dev: NvPhysicalGpu, info: NV_POWER_MONITOR_INFO) -> NV_POWER_MONITOR_STATUS:
value = NV_POWER_MONITOR_STATUS()
value.channelMask = info.channelMask
self.NvAPI_GPU_PowerMonitorGetStatus(dev, ctypes.pointer(value))
return value
def get_coolers_info(self, dev: NvPhysicalGpu) -> NV_GPU_FAN_COOLERS_INFO:
if self.__version < 0x9C40:
raise ValueError('This feature requires new drivers')
value = NV_GPU_FAN_COOLERS_INFO()
self.NvAPI_GPU_ClientFanCoolersGetInfo(dev, ctypes.pointer(value))
return value
def get_coolers_status(self, dev: NvPhysicalGpu) -> NV_GPU_FAN_COOLERS_STATUS:
if self.__version < 0x9C40:
raise ValueError('This feature requires new drivers')
value = NV_GPU_FAN_COOLERS_STATUS()
self.NvAPI_GPU_ClientFanCoolersGetStatus(dev, ctypes.pointer(value))
return value
def get_coolers_control(self, dev: NvPhysicalGpu) -> NV_GPU_FAN_COOLERS_CONTROL:
if self.__version < 0x9C40:
raise ValueError('This feature requires new drivers')
value = NV_GPU_FAN_COOLERS_CONTROL()
self.NvAPI_GPU_ClientFanCoolersGetControl(dev, ctypes.pointer(value))
return value
def set_coolers_control(self, dev: NvPhysicalGpu, control: NV_GPU_FAN_COOLERS_CONTROL):
if self.__version < 0x9C40:
raise ValueError('This feature requires new drivers')
self.NvAPI_GPU_ClientFanCoolersSetControl(dev, ctypes.pointer(control))
def get_core_voltage(self, dev: NvPhysicalGpu) -> float:
value = NV_GPU_VOLTAGE_STATUS()
self.NvAPI_GPU_GetCurrentVoltage(dev, ctypes.pointer(value))
return value.voltage
def get_boost_mask(self, dev: NvPhysicalGpu) -> NV_GPU_CLOCKBOOST_MASK:
value = NV_GPU_CLOCKBOOST_MASK()
self.NvAPI_GPU_GetClockBoostMask(dev, ctypes.pointer(value))
return value
def get_boost_table(self, dev: NvPhysicalGpu, boost_mask: NV_GPU_CLOCKBOOST_MASK) -> NV_GPU_CLOCKBOOST_TABLE:
value = NV_GPU_CLOCKBOOST_TABLE()
for i, m in enumerate(boost_mask.masks):
value.masks[i] = m
self.NvAPI_GPU_GetClockBoostTable(dev, ctypes.pointer(value))
return value
def get_vfp_curve(self, dev: NvPhysicalGpu, boost_mask: NV_GPU_CLOCKBOOST_MASK) -> NV_GPU_VFP_CURVE:
value = NV_GPU_VFP_CURVE()
for i, m in enumerate(boost_mask.masks):
value.masks[i] = m
self.NvAPI_GPU_GetVFPCurve(dev, ctypes.pointer(value))
return value
def get_performance_limit(self, dev: NvPhysicalGpu) -> PerfCapReason:
value = NV_GPU_PERFORMANCE_STATUS()
self.NvAPI_GPU_PerfPoliciesGetStatus(dev, ctypes.pointer(value))
return value.limit
def get_ram_type(self, dev: NvPhysicalGpu) -> RamType:
value = ctypes.c_uint32(0)
self.NvAPI_GPU_GetRamType(dev, ctypes.pointer(value))
return RamType(value.value)
def get_memory_info(self, dev: NvPhysicalGpu) -> typing.Union[DisplayDriverMemoryInfoV3, DisplayDriverMemoryInfoV2, DisplayDriverMemoryInfoV1]:
for klass in (DisplayDriverMemoryInfoV3, DisplayDriverMemoryInfoV2, DisplayDriverMemoryInfoV1):
value = klass()
try:
self.NvAPI_GPU_GetMemoryInfo(dev, ctypes.pointer(value))
except NvError as ex:
if ex == 'NVAPI_INCOMPATIBLE_STRUCT_VERSION':
continue
raise
return value
raise NvError('Not found suitable memory info struct', 'NVAPI_INCOMPATIBLE_STRUCT_VERSION')
def get_clocklock(self, dev: NvPhysicalGpu) -> PrivateClockBoostLockV2:
value = PrivateClockBoostLockV2()
self.NvAPI_GPU_GetClockBoostLock(dev, ctypes.pointer(value))
return value
def get_current_pstate(self, dev: NvPhysicalGpu) -> PerformanceStateId:
value = ctypes.c_int()
self.NvAPI_GPU_GetCurrentPstate(dev, ctypes.pointer(value))
return PerformanceStateId(value.value)
def get_dynamic_pstates_info(self, dev: NvPhysicalGpu) -> DynamicPerformanceStatesInfoV1:
value = DynamicPerformanceStatesInfoV1()
self.NvAPI_GPU_GetDynamicPstatesInfoEx(dev, ctypes.pointer(value))
return value
def get_active_apps(self, dev: NvPhysicalGpu) -> typing.Tuple[PrivateActiveApplicationV2]:
count = ctypes.c_uint32()
apps = PrivateActiveApplicationArray()
self.NvAPI_GPU_QueryActiveApps(dev, apps, ctypes.pointer(count))
return tuple(apps[:count.value])
| 41.347163 | 169 | 0.636379 | 47,448 | 0.971678 | 0 | 0 | 6,368 | 0.130409 | 0 | 0 | 5,363 | 0.109828 |
2a7713ec88b506d2773b4218843ce5cb75b2a38f | 5,537 | py | Python | prototype/ukwa/lib/sip/ids.py | GilHoggarth/ukwa-manage | 5893e9ea16c02e76eb81b2ccf7e161eeb183db9a | [
"Apache-2.0"
] | 1 | 2021-05-18T21:47:29.000Z | 2021-05-18T21:47:29.000Z | prototype/ukwa/lib/sip/ids.py | GilHoggarth/ukwa-manage | 5893e9ea16c02e76eb81b2ccf7e161eeb183db9a | [
"Apache-2.0"
] | 67 | 2017-11-22T11:13:18.000Z | 2022-03-25T09:48:49.000Z | prototype/ukwa/lib/sip/ids.py | GilHoggarth/ukwa-manage | 5893e9ea16c02e76eb81b2ccf7e161eeb183db9a | [
"Apache-2.0"
] | 4 | 2020-01-17T17:23:54.000Z | 2021-04-11T09:46:09.000Z | #!/usr/bin/env python
"""
Generic methods used for verifying/indexing SIPs.
"""
from __future__ import absolute_import
import re
import logging
import tarfile
import hdfs
from lxml import etree
from StringIO import StringIO
# import the Celery app context
#from crawl.celery import app
#from crawl.celery import cfg
HDFS_URL='http://hdfs:14000'
HDFS_USER='hdfs'
SIP_ROOT="/heritrix/sips/2015-domain-crawl"
NS={"mets": "http://www.loc.gov/METS/", "premis": "info:lc/xmlns/premis-v2"}
XLINK="{http://www.w3.org/1999/xlink}"
handler = logging.StreamHandler()
formatter = logging.Formatter("[%(asctime)s] %(levelname)s %(filename)s.%(funcName)s: %(message)s")
handler.setFormatter(formatter)
# attach to root logger
logging.root.addHandler(handler)
logging.root.setLevel(logging.INFO)
logger = logging.getLogger(__name__)
logger.setLevel( logging.INFO )
logger.info("INFO LOGGING ENABLED")
logger.debug("DEBUG LOGGING ENABLED")
def get_warc_identifiers(sip):
for item in get_all_identifiers(sip):
if item['mimetype'] == "application/warc":
yield item
def get_all_identifiers(sip):
"""Parses the SIP in HDFS and retrieves FILE/ARK tuples."""
# client = hdfs.InsecureClient(cfg.get('hdfs', 'url'), user=cfg.get('hdfs', 'user'))
client = hdfs.InsecureClient(HDFS_URL, HDFS_USER)
tar = "%s/%s.tar.gz" % (SIP_ROOT, sip)
status = client.status(tar,strict=False)
if status:
# Catch empty packages:
if status['length'] == 0:
logger.warning("Empty (zero byte) SIP package: %s" % tar)
yield None
else:
with client.read(tar) as reader:
t = reader.read()
# Open the package:
tar = tarfile.open(mode="r:gz", fileobj=StringIO(t))
foundMets = False
for i in tar.getmembers():
logger.debug("Examining %s" % i.name)
if i.name.endswith(".xml"):
foundMets = True
xml = tar.extractfile(i).read()
try:
tree = etree.fromstring(xml)
files = {}
n_files = 0
for mfile in tree.xpath("//mets:file", namespaces=NS):
#logger.debug("Found mets:file = %s " % etree.tostring(mfile))
admid = mfile.attrib["ADMID"]
logger.info("Found mets:file admid = %s " % admid)
path = mfile.xpath("mets:FLocat", namespaces=NS)[0].attrib["%shref" % XLINK]
files[admid] = { "path": path, "mimetype": mfile.attrib["MIMETYPE"], "size": mfile.attrib["SIZE"],
"checksum_type": mfile.attrib["CHECKSUMTYPE"], "checksum": mfile.attrib["CHECKSUM"] }
n_files = n_files + 1
if len(files.keys()) != n_files:
logger.error("ERROR, more files than IDs")
n_amdsecs = 0
for amdsec in tree.xpath("//mets:amdSec", namespaces=NS):
#logger.debug("Found mets:amdSec = %s " % etree.tostring(amdsec))
admid = amdsec.attrib["ID"]
logger.info("Found mets:amdSec id = %s " % admid)
oiv = amdsec.xpath("mets:digiprovMD/mets:mdWrap/mets:xmlData/premis:object/premis:objectIdentifier/premis:objectIdentifierValue", namespaces=NS)
if oiv and len(oiv) == 1:
files[admid]['ark'] = oiv[0].text
n_amdsecs = n_amdsecs + 1
logger.debug("Yielding %s" % files[admid] )
yield files[admid]
else:
logger.info("Skipping amdSec ID=%s" % admid)
if n_files != n_amdsecs:
logger.error("ERROR finding all amdSec elements")
except IndexError as i:
logger.error("Problem parsing METS for SIP: %s" % sip)
logger.exception(i)
if not foundMets:
logger.error("No METS XML file found!")
else:
logger.warning("Could not find SIP: hdfs://%s" % tar)
def find_identifiers(output_file):
with open(output_file, 'w') as f:
# client = hdfs.InsecureClient(cfg.get('hdfs', 'url'), user=cfg.get('hdfs', 'user'))
client = hdfs.InsecureClient(HDFS_URL, HDFS_USER)
for (path, dirs, files) in client.walk(SIP_ROOT):
logger.info("Looking at path "+path)
for file in files:
logger.info("Looking at file " + file)
if file.endswith('.tar.gz'):
sip = "%s/%s" % (path, file)
sip = sip[len(SIP_ROOT) + 1:]
sip = sip[:-7]
logger.info("Scanning %s..." % sip)
for waid in get_all_identifiers(sip):
f.write("%s %s\n" % (sip, waid) )
def main():
find_identifiers('identifiers.txt')
# Test
#for waid in get_all_identifiers("weekly-wed2300/20141210230151"):
# print(waid)
#sys.exit(0)
if __name__ == "__main__":
main()
| 42.267176 | 176 | 0.519234 | 0 | 0 | 3,634 | 0.656312 | 0 | 0 | 0 | 0 | 1,690 | 0.305219 |
2a7afbc42adc7196fbe7873b0d70fede0a2ca8a1 | 186 | py | Python | gentelella/registeration/forms.py | horoyoii/admin_dashboard_edgex | 9aea5e43eeb3da17d9e9c65c3ed0337fe7694cb8 | [
"MIT"
] | 2 | 2020-05-24T20:34:41.000Z | 2021-08-28T07:27:45.000Z | dashboard/registeration/forms.py | horoyoii/graduation_piece | 4f907a10636e3862d09e950c6eb5f12e95b1a8e5 | [
"MIT"
] | 5 | 2021-03-19T09:14:10.000Z | 2021-06-10T19:54:28.000Z | gentelella/registeration/forms.py | horoyoii/admin_dashboard_edgex | 9aea5e43eeb3da17d9e9c65c3ed0337fe7694cb8 | [
"MIT"
] | 1 | 2020-04-04T07:58:51.000Z | 2020-04-04T07:58:51.000Z | from django import forms
from .models import *
class ProfileFormModel(forms.ModelForm):
class Meta:
model = Device_profile
fields = ['device_profile_file']
| 14.307692 | 40 | 0.672043 | 128 | 0.688172 | 0 | 0 | 0 | 0 | 0 | 0 | 21 | 0.112903 |
2a7c07919fb5a1ec10d504c1070cf336089772fc | 1,334 | py | Python | tests/tensors/similarity_functions/dot_product_test.py | richarajpal/deep_qa | d918335a1bed71b9cfccf1d5743321cee9c61952 | [
"Apache-2.0"
] | 459 | 2017-02-08T13:40:17.000Z | 2021-12-12T12:57:48.000Z | deep_qa-master/tests/tensors/similarity_functions/dot_product_test.py | RTHMaK/RPGOne | 3f3ada7db1762781668bfb2377154fdc00e17212 | [
"Apache-2.0"
] | 176 | 2017-01-26T01:19:41.000Z | 2018-04-22T19:16:01.000Z | deep_qa-master/tests/tensors/similarity_functions/dot_product_test.py | RTHMaK/RPGOne | 3f3ada7db1762781668bfb2377154fdc00e17212 | [
"Apache-2.0"
] | 154 | 2017-01-26T01:00:30.000Z | 2021-02-05T10:44:42.000Z | # pylint: disable=no-self-use,invalid-name
import numpy
from numpy.testing import assert_almost_equal
import keras.backend as K
from deep_qa.tensors.similarity_functions.dot_product import DotProduct
class TestDotProductSimilarityFunction:
dot_product = DotProduct(name='dot_product')
def test_initialize_weights_returns_empty(self):
weights = self.dot_product.initialize_weights(3, 3)
assert isinstance(weights, list) and len(weights) == 0
def test_compute_similarity_does_a_dot_product(self):
a_vectors = numpy.asarray([[1, 1, 1], [-1, -1, -1]])
b_vectors = numpy.asarray([[1, 0, 1], [1, 0, 0]])
result = K.eval(self.dot_product.compute_similarity(K.variable(a_vectors), K.variable(b_vectors)))
assert result.shape == (2,)
assert numpy.all(result == [2, -1])
def test_compute_similarity_works_with_higher_order_tensors(self):
a_vectors = numpy.random.rand(5, 4, 3, 6, 7)
b_vectors = numpy.random.rand(5, 4, 3, 6, 7)
result = K.eval(self.dot_product.compute_similarity(K.variable(a_vectors), K.variable(b_vectors)))
assert result.shape == (5, 4, 3, 6)
assert_almost_equal(result[3, 2, 1, 3],
numpy.dot(a_vectors[3, 2, 1, 3], b_vectors[3, 2, 1, 3]),
decimal=6)
| 44.466667 | 106 | 0.664168 | 1,130 | 0.847076 | 0 | 0 | 0 | 0 | 0 | 0 | 55 | 0.041229 |
2a7c4f4b2ed2f4f224397934a4adf80a74d5d652 | 8,699 | py | Python | kiwi/cogs/Mod.py | Andres2055/Kiwi-bot | 1f76ad886972de89fe0e6e8b1078ff01cc76ae7d | [
"MIT"
] | 1 | 2021-03-30T05:15:36.000Z | 2021-03-30T05:15:36.000Z | kiwi/cogs/Mod.py | Andres2055/Kiwi-bot | 1f76ad886972de89fe0e6e8b1078ff01cc76ae7d | [
"MIT"
] | null | null | null | kiwi/cogs/Mod.py | Andres2055/Kiwi-bot | 1f76ad886972de89fe0e6e8b1078ff01cc76ae7d | [
"MIT"
] | 1 | 2018-09-23T07:04:23.000Z | 2018-09-23T07:04:23.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
He copiado y modificado software ajeno. Gran parte de este script es una
modificación y/o mejora del original, por ello doy los debidos créditos al
autor del software original:
MIT License
Copyright (c) 2016 - 2017 Eduard Nikoleisen
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
ORIGINAL SCRIPT: []
"""
import discord
from discord.ext import commands
import kiwi_config
from .scpUtils import UTC_TIME
class Mod:
'''Comandos generales para administradores y moderadores'''
def __init__(self, bot):
self.bot = bot
self.log = kiwi_config.__log_channel__
async def _recording(self, msg):
"""Enviá registros de mensajes al canal __log_channel__"""
await self.log.send('**{}**'.format(UTC_TIME.get_time()))
await self.log.send(msg)
@commands.group(hidden=True, aliases=['purgame'],
description="Elimina mis odiosos mensajes (MOD)")
@commands.has_any_role(['AT Mod', 'AT Admin'])
async def eliminar(self, ctx, limit: int):
"""Ahora podrás hacer la limpieza del chat mucho más rápida
y sencilla con este comando. ¡Si! Borro tonebytes de mensajes
por ti, bag@-sempai! ^^
**::Sintaxis::**
-----------
keliminar <n de mensajes> (Véase Subcomandos)
**::Ejemplo::**
---------
>>> keliminar 200
Esto debería eliminar los primeros 200 mensajes del canal en
donde se haya activado el comando.
**::Subcomandos::**
---------
* Con "keliminar me <n de mensajes>" eliminaras mis mensajes.
* Con "keliminar a <miembro> <n de mensajes>" eliminaras los mensajes de
esas odiosas ratas llamadas miembros.
"""
if ctx.invoked_subcommand is None:
await ctx.channel.purge(ctx.channel, limit=limit)
await ctx.send('Fueron eliminados {} mensaje(s)'.format(len(deleted)))
@eliminar.command(name='me', description='Borra mis mensajes')
async def me(self, ctx, limit: int):
'''Con esto borraras mis odiosos mensajes.
Misma sintaxis que eliminar.'''
def is_me(m):
return m.author.id == ctx.bot.user.id
await ctx.channel.purge(ctx.channel, limit=limit, check=is_me)
await ctx.send('Fueron eliminados {} mensaje(s)'.format(len(deleted)))
@eliminar.command(name='a', description='Borra los mensajes de un miembro')
async def a(self, ctx, member: discord.Member, limit: int):
'''Con esto borraras los mensajes de aquellos hijos del averno que nunca
debieron pisar un pie en la tierra.
Misma sintaxis que eliminar, sólo que agregando una mención miembro al
que le deseas borrar los mensajes.'''
def is_member(m):
return m.author.id == member.id
await ctx.channel.purge(ctx.channel, limit=limit, check=is_member)
await ctx.send('Fueron eliminados {} mensaje(s)'.format(len(deleted)))
@commands.command(hidden=True, aliases=['patear'],
description="Kickea a tus enemigos-nya (MOD)")
@commands.has_any_role(['AT Mod', 'AT Admin'])
async def kick(self, ctx, member: discord.Member = None, *, reason):
"""
(COMANDO DE MODERACIÓN)
**::Sintaxis::**
---------
kkick <mención al usuario> <razón>
**::Ejemplo::**
---------
>>> kkick <@kiwi> Por ser mala
Esto debería enviarme un Mensaje Privado con la razón del kick.
"""
msg = ""
if member in ctx.message.guild.members:
if reason:
await member.kick(reason=reason)
re = '{} ha sido kickeado del {} debido a: {}'
await self._recording(re.format(member.name, ctx.message.guild.name, reason))
else:
await member.kick()
re = '{} ha sido kickeado del {} debido a: {}'
await self._recording(re.format(member.name, ctx.message.guild.name))
else:
msg += "Jeje {0.message.author.mention}-tan,"
msg += "{1.name.mention} no es miembro de este servidor, tal vez ni existe ^^"
msg = msg.format(ctx, member)
await ctx.send(msg)
@commands.command(hidden=True, description="DESTROZA a tus enemigos-nya (MOD)")
@commands.has_any_role(['AT Mod', 'AT Admin'])
async def ban(self, ctx, member: discord.Member, delete_message_days = 0, *, reason):
"""
(COMANDO DE MODERACIÓN)
**::Sintaxis::**
---------
kban <mención al usuario> <mensajes a eliminar> <razón>
<mensajes a eliminar> es igual al número de días desde cuando se quiere borrar los mensajes
del usuario a banear.
**::Ejemplo::**
---------
>>> kban <@kiwi> 2 Por ser mala
Borrará los mensajes desde hace 2 días al presente y me enviará un MP con la razón del baneo.
"""
msg = ""
if member in ctx.message.guild.members:
if delete_message_days != 0:
if reason:
await member.ban(delete_message_days = int(delete_message_days), reason=reason)
re = '{} ha sido kickeado del {} por {}'
await self._recording(re.format(member.name, ctx.message.guild.name, reason))
else:
await member.ban(delete_message_days = int(delete_message_days))
re = '{} ha sido banneado del {}, las razones no fueron especificadas'
await self._recording(re.format(member.name, ctx.message.guild.name))
else:
msg += "{0.message.author.mention}-tan, ¿quieres borrar de la existencia a {1.name.mention}-nya?"
msg += "Si es así, dígame de 0 a 7 desde cuantos días atrás empiezo a borrar sus mensajes."
msg = msg.format(ctx, member)
await ctx.send(msg)
else:
msg += "Jeje {0.message.author.mention}-tan,"
msg += "{1.name.mention} no es miembro de este servidor, tal vez ni existe ^^"
msg = msg.format(ctx, member)
await ctx.send(msg)
@commands.command(hidden=True, description="Renombra a tus sirvientes o3o")
@commands.has_any_role(['AT Mod', 'AT Admin'])
async def renombrar(self, member: discord.Member, *, new_name):
old_name = member.name
log_msg = "{0.message.author} a renombrado a {1} como {2}"
await self._recording(log_msg.format(member, old_name, new_name))
await self.bot.change_nickname(member, new_name)
@commands.command(hidden=True, aliases=['darr'],
description='Dale roles a tus compañeros')
@commands.has_any_role(['AT Mod', 'AT Admin'])
async def darRol(self, ctx, member: discord.Member=None, *, rankName: str):
'''¡Con este comando puedes darle roles a los miembros!
Esto sin mover mucho las manos ^.^
**::Sintaxis::**
-----------
kdarRol <mención al usuario> <rango a dar>
**::Ejemplo::**
---------
>>> kdarRol <@kiwi> Kiwi-sempai
¡Esto me dará el rango Kiwi-sempai si tu servidor lo tiene! OuO
'''
rank = discord.utils.get(ctx.guild.roles, name=rankName)
if member is not None:
await member.add_roles(rank)
await ctx.send('¡**{member.mention}**! Has obtenido el rango **{rank.name}**')
else:
await ctx.send('¿Eh? No encuentro a ese miembro')
@commands.command(hidden=True, aliases=['rmrole'],
description='Quitale los roles a quienes no se los merecen')
@commands.has_any_role(['AT Mod', 'AT Admin'])
async def quitarRol(self, ctx, member: discord.Member=None, *, rankName: str):
'''Con este comando podrás desterrar de sus escaños a los miembros con roles
prestigiosos! Igual que con darRol, no moverás mucho las manos ^.^
**::Sintaxis::**
-----------
kquitarRol <mención al usuario> <rango a quitar>
**::Ejemplo::**
---------
>>> kquitarRol <@kiwi> Kiwi-sempai
Ya no seré tu sempai T.T
'''
rank = discord.utils.get(ctx.guild.roles, name=rankName)
if member is not None:
await member.remove_roles(rank)
await ctx.send('¡**{member.mention}**! Ya no tienes el rango **{rank.name}**')
else:
await ctx.send('¿Eh? No encuentro a ese miembro')
def setup(bot):
bot.add_cog(Mod(bot))
| 37.175214 | 102 | 0.676859 | 7,217 | 0.82546 | 0 | 0 | 6,824 | 0.78051 | 5,966 | 0.682374 | 5,121 | 0.585726 |
2a7c896a391a02f788ac4dbbb70ff15ab41ff427 | 8,566 | py | Python | scripts/genotype/GetReadDepthDiff.py | EichlerLab/smrtsv2 | b71db8b66a2b701835998982f4fa6ec84ed46504 | [
"MIT"
] | 45 | 2019-01-25T20:01:30.000Z | 2022-01-11T23:56:24.000Z | scripts/genotype/GetReadDepthDiff.py | EichlerLab/smrtsv2 | b71db8b66a2b701835998982f4fa6ec84ed46504 | [
"MIT"
] | 58 | 2018-11-21T00:07:44.000Z | 2022-01-18T13:45:03.000Z | scripts/genotype/GetReadDepthDiff.py | EichlerLab/smrtsv2 | b71db8b66a2b701835998982f4fa6ec84ed46504 | [
"MIT"
] | 8 | 2019-01-27T18:49:55.000Z | 2019-11-26T14:21:19.000Z | #!/usr/bin/env python3
import argparse
import gc
import numpy as np
import os
import pandas as pd
import pysam
# Number of SVs to process before resetting pysam (close and re-open file). Avoids a memory leak in pysam.
PYSAM_RESET_INTERVAL = 1000
def get_read_depth(df_subset, bam_file_name, mapq, ref_filename=None):
"""
Get read depths over one or more breakpoints.
:param df_subset: Subset dataframe with a column for contigs (first column) and one or more columns for the
location of breakpoints to quantify.
:param bam_file_name: Name of alignment file to query.
:param mapq: Minimum mapping quality.
:return: A Series with with one element for each row of `df_subset` containing the average of read depths over
the breakpoints for each variant.
"""
# Init pysam query count (for memory leak prevention)
pysam_count = 0
bam_file = pysam.AlignmentFile(bam_file_name, 'r', reference_filename=ref_filename)
# Init dataframe
df_subset = df_subset.copy()
n_loc_cols = df_subset.shape[1] - 1 # Number of location columns; depth is averaged for each
df_subset.columns = ['CONTIG'] + ['LOC_{}'.format(col) for col in range(n_loc_cols)]
# Init count
df_subset['N'] = np.zeros(df_subset.shape[0], np.float64)
n_index = df_subset.shape[1] - 1
# Count
for subset_index in range(n_loc_cols):
# Use numeric index, skip chromosome column
subset_index += 1
for row_index in range(df_subset.shape[0]):
n_reads = 0
# Get position
contig = df_subset.iloc[row_index, 0]
pos = df_subset.iloc[row_index, subset_index]
# Reset pysam periodically (avoids memory leak)
pysam_count += 1
if pysam_count >= PYSAM_RESET_INTERVAL:
if bam_file is not None:
bam_file.close()
gc.collect()
bam_file = pysam.AlignmentFile(bam_file_name, 'r', reference_filename=ref_filename)
pysam_count = 0
# Count
for segment in bam_file.fetch(str(contig), pos, pos + 1):
if segment.mapping_quality >= mapq and segment.is_proper_pair:
n_reads += 1
df_subset.iloc[row_index, n_index] += n_reads
# Return mean of depths (divide by the number of locations)
return df_subset['N'] / n_loc_cols
def get_ref_contig_sizes(altref_file):
"""
Get a Series of contigs lengths. Includes primary and alt contigs.
:param altref_file: BED file of contig information where each record spans the whole contig. Must contain
columns "#CHROM" and "END".
:return: Series of contig lengths indexed by the contig name.
"""
# Get reference chromosome sizes
ref_len_series = pd.read_table(altref_file, header=0)
ref_len_series.index = ref_len_series['#CHROM']
ref_len_series = ref_len_series['END']
return ref_len_series
def annotate_variant_info(variant_table, ref_len_series, flank):
"""
Annotate variant info with locations reads will be extracted from.
:param variant_table: Variant info table.
:param ref_len_series: Series of contig sizes.
:param flank: Number of bases from variant breakpoints.
:return: `variant_table` with additional fields.
"""
# Annotate variant info with flank locations
variant_table['FLANK_L_REF'] = variant_table['POS'] - flank
variant_table['FLANK_L_REF'] = variant_table['FLANK_L_REF'].apply(lambda pos: pos if pos > 0 else 0)
variant_table['FLANK_R_REF'] = variant_table['END'] + flank
variant_table['FLANK_R_REF'] = variant_table.apply(lambda row: min(row['FLANK_R_REF'], ref_len_series[row['#CHROM']]), axis=1)
variant_table['FLANK_L_CTG'] = variant_table['CONTIG_START'] - flank
variant_table['FLANK_L_CTG'] = variant_table['FLANK_L_CTG'].apply(lambda pos: pos if pos > 0 else 0)
variant_table['FLANK_R_CTG'] = variant_table['CONTIG_END'] + flank
variant_table['FLANK_R_CTG'] = variant_table.apply(lambda row: min(row['FLANK_R_CTG'], ref_len_series[row['CONTIG']]), axis=1)
# Annotate with the midpoint of the variant sequence
variant_table['VAR_CONTIG'] = variant_table.apply(lambda row: row['#CHROM'] if row['SVTYPE'] == 'DEL' else row['CONTIG'], axis=1)
variant_table['VAR_MIDPOINT'] = variant_table.apply(
lambda row:
(row['POS'] + row['END']) / 2 if row['SVTYPE'] == 'DEL' else (row['CONTIG_START'] + row['CONTIG_END']) / 2,
axis=1)
variant_table['VAR_MIDPOINT'] = variant_table['VAR_MIDPOINT'].astype(np.int64)
return variant_table
# Main
if __name__ == '__main__':
# Get arguments
arg_parser = argparse.ArgumentParser(description='Get insert size deltas on the reference over the SV breakpoints.')
arg_parser.add_argument('bam', help='BAM file of short read alignments.')
arg_parser.add_argument('bed', help='SV info BED file with columns "#CHROM", "POS", "END", "SVTYPE", "CONTIG", '
'"CONTIG_START", and "CONTIG_END", including a header line.')
arg_parser.add_argument('alt_info', help='BED file of contigs in the reference.')
arg_parser.add_argument('out', help='Output file.')
arg_parser.add_argument('--out_stats',
help='Output depth distribution statistics.')
arg_parser.add_argument('--mapq', type=int, default=20,
help='Minimum mapping quality of aligned reads.')
arg_parser.add_argument('--flank', type=int, default=100,
help='Number of reference bases on each side of the SV for flanking regions.')
arg_parser.add_argument('--ref', nargs='?',
default=None, help='Reference for records are aligned against.')
args = arg_parser.parse_args()
# Check arguments
if not os.path.isfile(args.bam):
raise RuntimeError('Input BAM file does not exist or is not a regular file: {}'.format(args.bam))
if args.mapq < 0:
raise RuntimeError('Mapping quality is negative: {}'.format(args.mapq))
if args.flank < 0:
raise RuntimeError('Flank is negative: {}'.format(args.flank))
args.out = args.out.strip()
if not args.out:
raise RuntimeError('Output file name is empty.')
# Get variant info
df_bed = pd.read_table(args.bed, header=0)
# Get reference chromosome sizes
ref_len = get_ref_contig_sizes(args.alt_info)
# Annotate variant info with locations reads are extracted from
df_bed = annotate_variant_info(df_bed, ref_len, args.flank)
# Count reads over variant midpoint
df_bed['DP_N_VAR'] =\
get_read_depth(df_bed.loc[:, ['VAR_CONTIG', 'VAR_MIDPOINT']], args.bam, args.mapq, ref_filename=args.ref)
# Count reads over reference flank
df_bed['DP_N_PROX_REF'] =\
get_read_depth(df_bed.loc[:, ['#CHROM', 'FLANK_L_REF', 'FLANK_R_REF']], args.bam, args.mapq, ref_filename=args.ref)
# Count reads over contig flank
df_bed['DP_N_PROX_CTG'] =\
get_read_depth(df_bed.loc[:, ['CONTIG', 'FLANK_L_CTG', 'FLANK_R_CTG']], args.bam, args.mapq, ref_filename=args.ref)
# Get global stats
ref_mean = np.mean(df_bed['DP_N_PROX_REF'])
ref_sd = np.std(df_bed['DP_N_PROX_REF'])
if ref_mean == 0:
raise RuntimeError('Cannot compute global depth stats: Global mean of proximal reference breakpoint depths is 0')
# Combine total depths
df_bed['DP_N_VAR_PROX_REF'] = df_bed['DP_N_VAR'] + df_bed['DP_N_PROX_REF']
df_bed['DP_N_VAR_PROX_CTG'] = df_bed['DP_N_VAR'] + df_bed['DP_N_PROX_CTG']
# Set relative ratios
df_bed['DP_VAR_REF'] = df_bed.apply(
lambda row: row['DP_N_VAR'] / row['DP_N_VAR_PROX_REF'] if row['DP_N_VAR_PROX_REF'] > 0 else 0,
axis=1
)
df_bed['DP_VAR_CTG'] = df_bed.apply(
lambda row: row['DP_N_VAR'] / row['DP_N_VAR_PROX_CTG'] if row['DP_N_VAR_PROX_CTG'] > 0 else 0,
axis=1
)
df_bed['DP_VAR_GLOBAL'] = df_bed['DP_N_VAR'] / ref_mean
# Write
df_features = df_bed.loc[
:, ('INDEX', 'DP_VAR_REF', 'DP_VAR_CTG', 'DP_VAR_GLOBAL', 'DP_N_VAR', 'DP_N_PROX_REF', 'DP_N_PROX_CTG')
]
df_features.to_csv(
args.out, sep='\t', index=False
)
# Write stats
if args.out_stats:
with open(args.out_stats, 'w') as stats_out:
stats_out.write('ref_mean\t{:.6f}\n'.format(ref_mean))
stats_out.write('ref_sd\t{:.6f}\n'.format(ref_sd))
| 35.991597 | 133 | 0.661919 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,680 | 0.429605 |
2a7e107792c23c49f4303524f074d2e06ddfaa6a | 16,017 | py | Python | examples/cpcdata.py | zhmcclient/python-zhmcclient | 7d200afb0343a02535c52dc8b6ba0d224010075c | [
"Apache-2.0"
] | 30 | 2016-08-24T10:02:19.000Z | 2021-11-25T10:44:26.000Z | examples/cpcdata.py | zhmcclient/python-zhmcclient | 7d200afb0343a02535c52dc8b6ba0d224010075c | [
"Apache-2.0"
] | 883 | 2016-08-23T12:32:12.000Z | 2022-03-28T13:18:24.000Z | examples/cpcdata.py | zhmcclient/python-zhmcclient | 7d200afb0343a02535c52dc8b6ba0d224010075c | [
"Apache-2.0"
] | 25 | 2017-06-23T18:10:51.000Z | 2022-03-28T02:53:29.000Z | #!/usr/bin/env python
# Copyright 2016-2021 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Display information about CPCs and their basic resources in the data center.
"""
from __future__ import absolute_import, print_function
import os
import platform
import sys
import logging
import argparse
from datetime import datetime
from platform import python_version
from requests.packages import urllib3
import yaml
import zhmcclient
MYNAME = 'cpcdata'
# Model information:
MACH_TYPE_INFO = {
# mach-type: (name, max-partitions)
'2064': ('z900', 15),
'2084': ('z990', 30),
'2094': ('z9 EC', 60),
'2097': ('z10 EC', 60),
'2817': ('z196', 60),
'2827': ('zEC12', 60),
'2964': ('z13', 85), # Also LinuxONE Emperor
'3906': ('z14', 85), # Also LinuxONE Emperor II
'3907': ('z14-ZR1', 40), # Also LinuxONE Rockhopper II
'2066': ('z800', 15),
'2086': ('z890', 30),
'2096': ('z9 BC', 30), # Some models have only 15 partitions
'2098': ('z10 BC', 30),
'2818': ('z114', 30),
'2828': ('zBC12', 30),
'2965': ('z13s', 40), # Also LinuxONE Rockhopper
}
# Status values for "running" partitions:
PARTITION_RUNNING_STATI = (
'starting',
'active',
'stopping',
'degraded',
'reservation-error',
'paused',
)
LPAR_RUNNING_STATI = (
'operating',
'exceptions',
)
# Defines order of columns in CSV output.
# The names are used both as column headings in the CSV output, and as
# names in the cpc_info dictionary.
CSV_FIELDS = (
'timestamp',
'hmc',
'name',
'description',
'machine-type',
'machine-model',
'machine-type-name',
'dpm-enabled',
'is-ensemble-member',
'iml-mode',
'processors-ifl',
'processors-cp',
'memory-total',
'memory-available',
'partitions-maximum',
'partitions-defined',
'partitions-running',
)
def main():
"""
Main function.
"""
urllib3.disable_warnings()
args = parse_args()
config_file = args.config_file
with open(args.config_file, 'r') as fp:
config_root = yaml.safe_load(fp)
config_this = config_root.get(MYNAME, None)
if config_this is None:
raise ConfigError("'%s' item not found in config file %s" %
(MYNAME, config_file))
config_hmcs = config_this.get("hmcs", None)
if config_hmcs is None:
raise ConfigError("'%s' / 'hmcs' item not found in config "
"file %s" % (MYNAME, config_file))
config = argparse.Namespace()
config.loglevel = config_this.get("loglevel", None)
config.logmodule = config_this.get("logmodule", 'zhmcclient')
config.timestats = config_this.get("timestats", False)
config.verbose = args.verbose
config.csv_file = args.csv_file
config.timestamp = datetime.now().replace(second=0, microsecond=0)
if config.loglevel is not None:
level = getattr(logging, config.loglevel.upper(), None)
if level is None:
raise ConfigError("Invalid value for 'loglevel' item in "
"config file %s: %s" %
(config_file, config.loglevel))
logmodule = config.logmodule
if config.logmodule is None:
config.logmodule = '' # root logger
handler = logging.StreamHandler() # log to stdout
format_string = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
handler.setFormatter(logging.Formatter(format_string))
logger = logging.getLogger(logmodule)
logger.addHandler(handler)
logger.setLevel(level)
if config.verbose:
print("Logging to stdout for module %s with level %s" %
(config.logmodule, config.loglevel))
try:
print_csv_header(config)
for hmc_host in config_hmcs:
config_hmc = config_root.get(hmc_host, None)
if config_hmc is None:
raise ConfigError("'%s' item (credentials for that HMC) not "
"found in config file %s" % config_file)
hmc_userid = config_hmc.get('userid', None)
if hmc_userid is None:
raise ConfigError("'%s' / 'userid' item not found in config "
"file %s" % config_file)
hmc_password = config_hmc.get('password', None)
if hmc_password is None:
raise ConfigError("'%s' / 'password' item not found in config "
"file %s" % config_file)
process_hmc(config, hmc_host, hmc_userid, hmc_password)
except zhmcclient.Error as exc:
print("%s: %s" % (exc.__class__.__name__, exc))
# traceback.print_exc()
sys.exit(1)
except ConfigError as exc:
print("%s: %s" % (exc.__class__.__name__, exc))
sys.exit(1)
def process_hmc(config, hmc_host, hmc_userid, hmc_password):
"""
Process the HMC and display info about it.
"""
if config.verbose:
print("Processing HMC %s" % hmc_host)
# Test whether we can ping the HMC
if config.verbose:
print("Attempting to ping HMC ...")
reachable = ping(hmc_host)
if not reachable:
print("Warning: Cannot ping HMC %s" % hmc_host)
return
try:
session = zhmcclient.Session(hmc_host, hmc_userid, hmc_password)
client = zhmcclient.Client(session)
if config.timestats:
session.time_stats_keeper.enable()
# Test whether we can use an operation that does not require logon
try:
if config.verbose:
print("Attempting to get HMC version ...")
client.version_info()
except zhmcclient.ConnectionError:
print("Warning: Cannot connect to API on HMC %s" % hmc_host)
return
# This is the first operation that requires logon
if config.verbose:
print("Attempting to list managed CPCs ...")
cpcs = client.cpcs.list()
for cpc in sorted(cpcs, key=lambda cpc: cpc.prop('name', '')):
process_cpc(config, cpc, hmc_host)
session.logoff()
if config.timestats:
print(session.time_stats_keeper)
except zhmcclient.Error as exc:
print("Warning: %s on HMC %s: %s" %
(exc.__class__.__name__, hmc_host, exc))
return
def process_cpc(config, cpc, hmc_host):
"""
Process the CPC and display info about it.
"""
if config.verbose:
print("Attempting to list partitions on CPC %s ..." % cpc.prop('name'))
if cpc.dpm_enabled:
partitions = cpc.partitions.list()
else:
partitions = cpc.lpars.list()
if config.verbose:
print("Attempting to retrieve properties of CPC %s ..." %
cpc.prop('name'))
cpc_info = {}
cpc_info['timestamp'] = config.timestamp
cpc_info['hmc'] = hmc_host
cpc_info['name'] = cpc.prop('name')
cpc_info['description'] = cpc.prop('description')
cpc_info['status'] = cpc.prop('status')
cpc_info['machine-type'] = cpc.prop('machine-type')
cpc_info['machine-model'] = cpc.prop('machine-model')
cpc_info['machine-type-name'] = model_name(cpc)
cpc_info['dpm-enabled'] = cpc.prop('dpm-enabled', False)
cpc_info['is-ensemble-member'] = cpc.prop('is-ensemble-member', False)
cpc_info['iml-mode'] = cpc.prop('iml-mode')
cpc_info['processors-ifl'] = cpc.prop('processor-count-ifl')
cpc_info['processors-cp'] = cpc.prop('processor-count-general-purpose')
# in MiB, may be None on older models:
cpc_info['memory-total'] = cpc.prop('storage-customer', None)
# in MiB, may be None on older models:
cpc_info['memory-available'] = cpc.prop('storage-customer-available', None)
# may be None if unknown:
cpc_info['partitions-maximum'] = max_partitions(cpc)
cpc_info['partitions-defined'] = defined_partitions(partitions)
cpc_info['partitions-running'] = running_partitions(partitions)
print_cpc_as_text(config, cpc_info)
print_cpc_as_csv(config, cpc_info)
def print_cpc_as_text(config, cpc_info):
# pylint: disable=unused-argument
"""
Print info about the CPC as text.
"""
print("CPC {name} managed by HMC {hmc}:".format(**cpc_info))
print(" Description: {description}".format(**cpc_info))
print(" Machine: {machine-type}-{machine-model} ({machine-type-name})".
format(**cpc_info))
print(" DPM enabled: {dpm-enabled}".format(**cpc_info))
print(" Member of ensemble: {is-ensemble-member}".format(**cpc_info))
print(" IML mode: {iml-mode}".format(**cpc_info))
print(" Status: {status}".format(**cpc_info))
print(" Processors: CPs: {processors-cp}, IFLs: {processors-ifl}".
format(**cpc_info))
mem_total = ("{} GiB".format(cpc_info['memory-total'] / 1024)) \
if cpc_info['memory-total'] else "N/A"
mem_avail = ("{} GiB".format(cpc_info['memory-available'] / 1024)) \
if cpc_info['memory-available'] else "N/A"
print(" Memory for partitions: total: {}, available: {}".
format(mem_total, mem_avail))
print(" Partitions: max-active: {}, defined: {}, running: {}".
format(cpc_info['partitions-maximum'] or "N/A",
cpc_info['partitions-defined'] or "N/A",
cpc_info['partitions-running'] or "N/A"))
def print_cpc_as_csv(config, cpc_info):
"""
Print info about the CPC as CSV.
"""
if config.csv_file:
with open(config.csv_file, "a") as fp:
data_line = ','.join(
['"{}"'.format(cpc_info[col]) for col in CSV_FIELDS])
fp.write(data_line)
fp.write('\n')
def print_csv_header(config):
"""
Print CSV header.
"""
if config.csv_file:
if not os.path.isfile(config.csv_file):
if config.verbose:
print("Creating new CSV output file: %s" % config.csv_file)
with open(config.csv_file, "w") as fp:
header_line = ','.join(
['"{}"'.format(col) for col in CSV_FIELDS])
fp.write(header_line)
fp.write('\n')
else:
if config.verbose:
print("Appending to existing CSV output file: %s" %
config.csv_file)
def parse_args():
"""
Parse command line arguments and return the parsed args.
In case of argument errors, print an error message and exit.
"""
version = zhmcclient.__version__ # pylint: disable=no-member
usage = "%(prog)s [options] CONFIGFILE"
desc = "Gather data about all CPCs managed by a set of HMCs. The data " \
"is displayed and optionally written to a CSV-formatted spreadsheet."
epilog = ""
argparser = argparse.ArgumentParser(
prog=MYNAME, usage=usage, description=desc, epilog=epilog,
add_help=False)
pos_arggroup = argparser.add_argument_group(
'Positional arguments')
pos_arggroup.add_argument(
'config_file', metavar='CONFIGFILE', nargs='?', default=None,
help='File path of config file for this tool. See --help-config '
'for details about the config file format.')
general_arggroup = argparser.add_argument_group('Options')
version_str = '%s/zhmcclient %s, Python %s' %\
(MYNAME, version, python_version())
general_arggroup.add_argument(
'--csv', dest='csv_file', metavar='CSVFILE',
help='Write/append data to a CSV spreadsheet file.')
general_arggroup.add_argument(
'-v', '--verbose', dest='verbose', action='store_true',
help='Show more messages while processing.')
general_arggroup.add_argument(
'--version', action='version', version=version_str,
help='Show the relevant versions and exit.')
general_arggroup.add_argument(
'-h', '--help', action='help',
help='Show this help message and exit.')
general_arggroup.add_argument(
'-hc', '--help-config', dest='help_config', action='store_true',
help='Show help about the config file format and exit.')
args = argparser.parse_args()
if args.help_config:
help_config()
if not args.config_file:
argparser.error('No config file specified (--help-config for details)')
return args
def help_config():
"""
Displpay help about the config file.
"""
print("""
Format of config file.
The config file is a YAML file with the following entries. Unknown entries
are ignored. This format is compatible to the HMC credential file format
used by the zhmcclient examples, so the same file can be used.
The following template shows the format. Anything in angle brackets <> is
meant to be replaced by a real value::
{myname}:
hmcs:
- "<hmc-host-1>"
- "<hmc-host-2>"
- ...
loglevel: <null|info|warning|error|debug>
logmodule: <zhmcclient|...>
timestats: <no|yes>
"<hmc-host-1>":
userid: <userid for hmc-host-1>
password: <password for hmc-host-1>
"<hmc-host-2>":
userid: <userid for hmc-host-1>
password: <password for hmc-host-1>
...
Notes:
- HMC hosts can be specified as IP v4/v6 addresses or long or short host names.
- The "hmcs" entry defines which HMCs are contacted. All CPCs managed by these
HMCs are shown.
- If multiple choices are shown (e.g. for loglevel), the first choice is always
the default.
""".format(myname=MYNAME))
sys.exit(2)
class ConfigError(Exception):
"""
Indicates an issue in the config file.
"""
pass
def ping(host, timeout=10):
"""
Ping a host with one ICMP packet and return whether it responded to the
ping request.
Parameters:
host (string): IP address or host name.
timeout (integer): Timeout in seconds.
Returns:
bool: Host has responded.
"""
if platform.system() == "Windows":
ping_options = "-n 1 -w %d" % (timeout * 1000)
ping_drop_output = ">nul 2>&1"
else: # Linux or OS-X
ping_options = "-c 1 -W %d" % timeout
ping_drop_output = ">/dev/null 2>&1"
rc = os.system("ping %s %s %s" % (ping_options, host, ping_drop_output))
return rc == 0
def model_name(cpc):
"""
Return the model name for a CPC.
"""
mach_type = cpc.prop('machine-type')
try:
_model_name = MACH_TYPE_INFO[mach_type][0]
except KeyError:
_model_name = None
if _model_name:
return _model_name
return "unknown"
def max_partitions(cpc):
"""
Return the maxiumum number of user partitions or LPARs for a CPC.
"""
mach_type = cpc.prop('machine-type')
try:
max_parts = MACH_TYPE_INFO[mach_type][1]
except KeyError:
max_parts = None
if max_parts:
return max_parts
return "?"
def defined_partitions(partitions):
"""
Return the defined number of user partitions or LPARs.
"""
return len(partitions)
def running_partitions(partitions):
"""
Return the number of running user partitions or LPARs.
"""
count = 0
for p in partitions:
if isinstance(p, zhmcclient.Partition):
running_stati = PARTITION_RUNNING_STATI
else:
running_stati = LPAR_RUNNING_STATI
if p.prop('status') in running_stati:
count += 1
return count
if __name__ == '__main__':
main()
| 30.566794 | 79 | 0.617157 | 97 | 0.006056 | 0 | 0 | 0 | 0 | 0 | 0 | 6,992 | 0.436536 |
2a7eae6a3cfc5393e3f489827ec889c44c331afe | 5,991 | py | Python | model-training/mtl_processor.py | imdiptanu/MUDESv2 | 9cab6571a381e076c1174a600c5a27858ef78f53 | [
"Apache-2.0"
] | null | null | null | model-training/mtl_processor.py | imdiptanu/MUDESv2 | 9cab6571a381e076c1174a600c5a27858ef78f53 | [
"Apache-2.0"
] | null | null | null | model-training/mtl_processor.py | imdiptanu/MUDESv2 | 9cab6571a381e076c1174a600c5a27858ef78f53 | [
"Apache-2.0"
] | null | null | null | from farm.data_handler.processor import Processor
from tokenizers.pre_tokenizers import WhitespaceSplit
from farm.data_handler.samples import (
Sample,
SampleBasket,
)
from farm.data_handler.utils import expand_labels
import ast
import numpy as np
import pandas as pd
class MTLProcessor(Processor):
def __init__(
self,
tokenizer,
max_seq_len,
data_dir,
train_filename,
test_filename,
delimiter,
dev_split=0.15,
dev_filename=None,
label_list=None,
metric=None,
proxies=None,
**kwargs,
):
self.delimiter = delimiter
super(MTLProcessor, self).__init__(
tokenizer=tokenizer,
max_seq_len=max_seq_len,
train_filename=train_filename,
dev_filename=dev_filename,
test_filename=test_filename,
dev_split=dev_split,
data_dir=data_dir,
tasks={},
proxies=proxies,
)
def file_to_dicts(self, file: str):
dicts = list()
df = pd.read_csv(file)
for text, label, tokens in zip(
df.post_tokens.values, df.post_label.values, df.toxic_tokens.values
):
columns = dict()
text = ast.literal_eval(text)
tokens = ast.literal_eval(tokens)
columns["text"] = " ".join(text)
columns["document_level_task_label"] = label # Key hard-coded
columns["token_level_task_label"] = list(map(str, tokens)) # Key hard-coded
dicts.append(columns)
return dicts
@staticmethod
def _get_start_of_word(word_ids):
words = np.array(word_ids)
words[words == None] = -1
start_of_word_single = [0] + list(np.ediff1d(words) > 0)
start_of_word_single = [int(x) for x in start_of_word_single]
return start_of_word_single
# Most of the code is copied from NERProcessor - dataset_from_dicts()
def dataset_from_dicts(
self, dicts, indices=None, return_baskets=False, non_initial_token="X"
):
self.baskets = []
self.pre_tokenizer = WhitespaceSplit()
texts = [x["text"] for x in dicts]
words_and_spans = [self.pre_tokenizer.pre_tokenize_str(x) for x in texts]
words = [[x[0] for x in y] for y in words_and_spans]
word_spans_batch = [[x[1] for x in y] for y in words_and_spans]
tokenized_batch = self.tokenizer.batch_encode_plus(
words,
return_offsets_mapping=True,
return_special_tokens_mask=True,
return_token_type_ids=True,
return_attention_mask=True,
truncation=True,
max_length=self.max_seq_len,
padding="max_length",
is_split_into_words=True,
)
for i in range(len(dicts)):
tokenized = tokenized_batch[i]
d = dicts[i]
id_external = self._id_from_dict(d)
if indices:
id_internal = indices[i]
else:
id_internal = i
input_ids = tokenized.ids
segment_ids = tokenized.type_ids
initial_mask = self._get_start_of_word(tokenized.words)
assert len(initial_mask) == len(input_ids)
padding_mask = tokenized.attention_mask
if return_baskets:
token_to_word_map = tokenized.words
word_spans = word_spans_batch[i]
tokenized_dict = {
"tokens": tokenized.tokens,
"word_spans": word_spans,
"token_to_word_map": token_to_word_map,
"start_of_word": initial_mask,
}
else:
tokenized_dict = {}
feature_dict = {
"input_ids": input_ids,
"padding_mask": padding_mask,
"segment_ids": segment_ids,
"initial_mask": initial_mask,
}
for task_name, task in self.tasks.items():
try:
label_name = task["label_name"]
labels_word = d[label_name]
label_list = task["label_list"]
label_tensor_name = task["label_tensor_name"]
if task["task_type"] == "classification":
label_ids = [label_list.index(labels_word)]
elif task["task_type"] == "ner":
labels_token = expand_labels(
labels_word, initial_mask, non_initial_token
)
label_ids = [label_list.index(lt) for lt in labels_token]
except ValueError:
label_ids = None
problematic_labels = set(labels_token).difference(set(label_list))
print(
f"[Task: {task_name}] Could not convert labels to ids via label_list!"
f"\nWe found a problem with labels {str(problematic_labels)}"
)
except KeyError:
label_ids = None
if label_ids:
feature_dict[label_tensor_name] = label_ids
curr_sample = Sample(
id=None, clear_text=d, tokenized=tokenized_dict, features=[feature_dict]
)
curr_basket = SampleBasket(
id_internal=id_internal,
raw=d,
id_external=id_external,
samples=[curr_sample],
)
self.baskets.append(curr_basket)
if indices and 0 not in indices:
pass
else:
self._log_samples(1)
dataset, tensor_names = self._create_dataset()
ret = [dataset, tensor_names, self.problematic_sample_ids]
if return_baskets:
ret.append(self.baskets)
return tuple(ret)
| 34.630058 | 94 | 0.55333 | 5,712 | 0.95343 | 0 | 0 | 291 | 0.048573 | 0 | 0 | 505 | 0.084293 |
2a7ffe46901a396481978f7847250513748371b6 | 611 | py | Python | knightmovs_test.py | rcolomina/pythonchess | 1b12ea4a1668da6c47dd39ff16d1e48af33ea2f5 | [
"MIT"
] | null | null | null | knightmovs_test.py | rcolomina/pythonchess | 1b12ea4a1668da6c47dd39ff16d1e48af33ea2f5 | [
"MIT"
] | 2 | 2016-11-01T09:57:36.000Z | 2016-11-01T10:05:50.000Z | knightmovs_test.py | rcolomina/pythonchess | 1b12ea4a1668da6c47dd39ff16d1e48af33ea2f5 | [
"MIT"
] | null | null | null | #!/usr/bin/python
from piece import Piece
from gameNode import GameNode
from knightmovs import *
from functions import *
listPiecesWhite=[]
listPiecesBlack=[]
w1=Piece('Q',[3,1])
w2=Piece('N',[3,2])
w3=Piece('N',[4,2])
w4=Piece('B',[2,3])
listPiecesWhite.append(w1)
listPiecesWhite.append(w2)
listPiecesWhite.append(w3)
listPiecesWhite.append(w4)
b1=Piece('q',[3,4])
listPiecesBlack.append(b1)
gameNode=GameNode(listPiecesWhite,listPiecesBlack,"white")
assert(listTargetsKnight(gameNode,w3)==[[3,4]])
print "Checked knight movements on ",w2.coordenates," which should be ",listTargetsKnight(gameNode,w3)
| 21.821429 | 102 | 0.752864 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 88 | 0.144026 |
2a828d4b761c0ddb67592e07965d549c25c16293 | 1,519 | py | Python | Rendering/Core/Testing/Python/ImageActor.py | jasper-yeh/VtkDotNet | 84b56f781cb511694e4380cebfb245bbefe2560b | [
"BSD-3-Clause"
] | 3 | 2020-06-20T23:31:06.000Z | 2021-01-11T02:17:16.000Z | Rendering/Core/Testing/Python/ImageActor.py | jasper-yeh/VtkDotNet | 84b56f781cb511694e4380cebfb245bbefe2560b | [
"BSD-3-Clause"
] | 1 | 2020-12-01T23:21:02.000Z | 2020-12-02T23:44:43.000Z | Rendering/Core/Testing/Python/ImageActor.py | jasper-yeh/VtkDotNet | 84b56f781cb511694e4380cebfb245bbefe2560b | [
"BSD-3-Clause"
] | 5 | 2015-10-09T04:12:29.000Z | 2021-12-15T16:57:11.000Z | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Create the RenderWindow, Renderer and both Actors
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# load in the image
#
pnmReader = vtk.vtkTIFFReader()
pnmReader.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/beach.tif")
# "beach.tif" image contains ORIENTATION tag which is
# ORIENTATION_TOPLEFT (row 0 top, col 0 lhs) type. The TIFF
# reader parses this tag and sets the internal TIFF image
# orientation accordingly. To overwrite this orientation with a vtk
# convention of ORIENTATION_BOTLEFT (row 0 bottom, col 0 lhs ), invoke
# SetOrientationType method with parameter value of 4.
pnmReader.SetOrientationType(4)
lum = vtk.vtkImageLuminance()
lum.SetInputConnection(pnmReader.GetOutputPort())
ia = vtk.vtkImageActor()
ia.GetMapper().SetInputConnection(lum.GetOutputPort())
# Add the actors to the renderer, set the background and size
ren1.AddActor(ia)
ren1.SetBackground(0.1,0.2,0.4)
renWin.SetSize(400,400)
# render the image
renWin.Render()
# switch from greyscale input to RGB to test against an old bug
ia.GetMapper().SetInputConnection(pnmReader.GetOutputPort())
cam1 = ren1.GetActiveCamera()
cam1.Elevation(-30)
cam1.Roll(-20)
ren1.ResetCameraClippingRange()
renWin.Render()
# prevent the tk window from showing up then start the event loop
# --- end of script --
| 35.325581 | 70 | 0.780118 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 704 | 0.463463 |
2a8363a38588e48c3e8be09e652671f8d40a05b1 | 152 | py | Python | bot/typings/common.py | Axelancerr/Life | 1e214af2a46439a756c442967be4bfa8b05fd99c | [
"MIT"
] | 27 | 2020-10-18T04:35:00.000Z | 2021-08-03T13:21:27.000Z | dashboard/typings/common.py | Axelancerr/Life-dashboard | ecde503c1a90fdedd680ae19d22b3f5c9da4c4c2 | [
"MIT"
] | 19 | 2020-12-04T23:03:51.000Z | 2021-08-14T20:21:53.000Z | dashboard/typings/common.py | Axelancerr/Life-dashboard | ecde503c1a90fdedd680ae19d22b3f5c9da4c4c2 | [
"MIT"
] | 7 | 2020-10-26T18:51:17.000Z | 2021-07-07T05:39:01.000Z | # Future
from __future__ import annotations
# Standard Library
from typing import Literal
ImageFormat = Literal["webp", "jpeg", "jpg", "png", "gif"]
| 16.888889 | 58 | 0.723684 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 53 | 0.348684 |
2a84c203c8e242c343a0a2986ab07c180e14f97b | 6,920 | py | Python | boggle.py | tron32213021/boggle-game | 3203e4bb51ab9ccee09cc292106e0385d1cfbb4c | [
"MIT"
] | null | null | null | boggle.py | tron32213021/boggle-game | 3203e4bb51ab9ccee09cc292106e0385d1cfbb4c | [
"MIT"
] | null | null | null | boggle.py | tron32213021/boggle-game | 3203e4bb51ab9ccee09cc292106e0385d1cfbb4c | [
"MIT"
] | null | null | null | #!/usr/local/anaconda/bin/python
#copyright: Zhenye Jiang, tronsupernova@outlook.com
from random import random
from tkinter import *
from copy import deepcopy
class Boggle():
'''
@param dict F
@param dict T
@param int size
@param int cellWidth
@param array clone
@param list soln
@param cube ActionNow
@param bool ActionNow_correct
'''
def __init__(self, file='words.dat'):
self.readData(file)
self.size=5
self.cellWidth=30
self.newGame()
def readData(self, file):
self.F=dict()
self.T=dict()
obj=open(file)
lines=obj.readlines()
words_number=len(lines)
for word in lines:
word=word.strip()
for c in word:
if c in self.F.keys(): #计算字符分布总数
self.F[c]+=1
else:
self.F[c]=1
t=self.T #生成T字典
for i in range(4):
if word[i] not in t.keys():
t[word[i]]=dict()
t=t[word[i]]
t[word[4]]=word
for key in self.F.keys():
self.F[key]/=words_number*5
def ckSoln(self,soln): #若指定路径无效,返回False;若指定路径对应单词,返回字符串单词;否则返回剩余路径对应的字典
for index in range(len(soln)-1):
if abs(soln[index][0]-soln[index+1][0])+abs(soln[index][1]-soln[index+1][1])>1:
return False
t=self.T
for (x,y) in soln:
c=self.board[x][y]
if c not in t:
return False
t=t[c]
return t
def resetGame(self):
self.board=self.clone
self.soln=list()
self.ActionNow=None
self.ActionNow_correct=None
def newGame(self):
self.board=list()
for i in range(self.size):
t=list()
self.board.append(t)
for j in range(self.size):
t.append(self.randChoice())
#self.board=[['b','y','u','u','n'],['s','x','o','y','r'],['h','s','l','o','r'],['t','y','a','f','n'],['b','c','r','o','c']] #供测试用
self.soln=list()
self.ActionNow=None
self.ActionNow_correct=None
self.clone=self.board
#按权重返回一个字母
def randChoice(self):
z=random()
p=0
for (key,value) in self.F.items():
p+=value
if p>=z:
return key
def playTK(self):
self.initTK()
print("<Left Click>: Choose character\n<Mid Click>: New Game\n<Right Click>: Reset Game\n<Triple Click>: Show all solutions")
self.win.mainloop()
def initTK(self):
#创建窗口对象
self.win =Tk()
self.win.title('Boggle')
#创建画布
self.canvas=Canvas(self.win,width=self.size*self.cellWidth,height=self.size*self.cellWidth,bg='white')
self.canvas.pack()
self.drawCanvas()
#绑定事件
self.canvas.bind("<Button-1>",self.extend)
self.canvas.bind("<Button-2>",self.new)
self.canvas.bind("<Button-3>",self.reset)
self.canvas.bind("<Triple-Button-1>",self.getAllSolutions)
self.canvas.focus_set()
self.updateTK()
#画游戏板块
def drawCanvas(self):
self.canvas.create_rectangle(0,0,self.size*self.cellWidth,self.size*self.cellWidth,fill='white')
#画格子
for i in range(self.size):
for j in range(self.size):
self.canvas.create_rectangle(i*self.cellWidth,j*self.cellWidth,(i+1)*self.cellWidth,(j+1)*self.cellWidth)
for i in range(self.size):
for j in range(self.size):
self.canvas.create_text(j*self.cellWidth+self.cellWidth/2,
i*self.cellWidth+self.cellWidth/2,
text=self.board[i][j].upper(),
fill='black')
#更新绿圈或者红圈
def updateTK(self):
if self.ActionNow != None:
if self.ActionNow_correct:
color='green'
else:
color='red'
x=self.ActionNow[0]
y=self.ActionNow[1]
self.canvas.create_oval(y*self.cellWidth+1,
x*self.cellWidth+1,
(y+1)*self.cellWidth-1,
(x+1)*self.cellWidth-1,
fill=color)
self.canvas.create_text(y*self.cellWidth+self.cellWidth/2,
x*self.cellWidth+self.cellWidth/2,
text=self.board[x][y].upper(),
fill='black')
#选中一个格子
def extend(self,event):
row=event.y//self.cellWidth
col=event.x//self.cellWidth
if (row,col) in self.soln:
return
self.ActionNow=(row,col)
self.soln.append((row,col))
result=self.ckSoln(self.soln)
if type(result)==type(False):
self.soln.pop()
self.ActionNow_correct=False
else:
self.ActionNow_correct=True
self.updateTK()
if type(result)==type(''):
self.findASolution(result)
#新的游戏
def new(self,event):
self.newGame()
self.drawCanvas()
#重置当前游戏
def reset(self,event):
self.resetGame()
self.drawCanvas()
#输出当前游戏所有解
def getAllSolutions(self,event):
self.solve()
#当用户找到了一个解
def findASolution(self,result):
print("You find a word:",result)
self.resetGame()
self.drawCanvas()
#寻找所有解
def solve(self):
self.allSolutions=list()
for i in range(self.size):
for j in range(self.size):
self.solutionPath=list()
self.findAllSolutions((i,j))
print("All solutions:"+str(self.allSolutions)+'\n')
def findAllSolutions(self,coor):
x=coor[0]
y=coor[1]
self.solutionPath.append(coor)
result=self.ckSoln(self.solutionPath)
if type(result)==type(False):
return
if type(result)==type(''):
self.allSolutions.append(result)
return
if(x>0 and (x-1,y) not in self.solutionPath):
self.findAllSolutions((x-1,y))
self.solutionPath.pop()
if(y>0 and (x,y-1) not in self.solutionPath):
self.findAllSolutions((x,y-1))
self.solutionPath.pop()
if(y<4 and (x,y+1) not in self.solutionPath):
self.findAllSolutions((x,y+1))
self.solutionPath.pop()
if(x<4 and (x+1,y) not in self.solutionPath):
self.findAllSolutions((x+1,y))
self.solutionPath.pop()
if __name__ == "__main__":
Boggle().playTK()
| 31.454545 | 137 | 0.506647 | 6,975 | 0.969828 | 0 | 0 | 0 | 0 | 0 | 0 | 1,142 | 0.158788 |
2a84cbc2017bac94e59b7e27f9e01644f4ef565c | 7,826 | py | Python | chart/message_passing_tree/message_passing_tree/decorators.py | JoeyBF/sseq | d553df5e2466aaad47f4a36bf5e051a3922b0dd0 | [
"Apache-2.0",
"MIT"
] | 7 | 2021-04-22T04:06:09.000Z | 2022-01-25T04:05:49.000Z | chart/message_passing_tree/message_passing_tree/decorators.py | JoeyBF/sseq | d553df5e2466aaad47f4a36bf5e051a3922b0dd0 | [
"Apache-2.0",
"MIT"
] | 68 | 2020-03-21T22:37:24.000Z | 2022-03-31T02:51:35.000Z | chart/message_passing_tree/message_passing_tree/decorators.py | JoeyBF/sseq | d553df5e2466aaad47f4a36bf5e051a3922b0dd0 | [
"Apache-2.0",
"MIT"
] | 5 | 2021-02-17T06:37:43.000Z | 2022-02-01T03:53:22.000Z | from copy import copy
import functools
import inspect
import sys
import traceback
from . import ansi
from .agent import Agent
def reset_global_handlers():
global HANDLERS
HANDLERS = {
"in" : { },
"out" : { },
}
reset_global_handlers()
def subscribe_to(subs):
def helper(cls):
if subs == "*":
cls.subscriptions = set(["*"])
elif type(subs) is list:
cls.subscriptions = set(subs)
else:
raise TypeError(f"""Subscribe decorator argument expected to be either "*" or a list, not "{subs}".""")
return cls
return helper
def add_inherited_handlers(cls):
outward_handlers = {}
inward_handlers = {}
for super in cls.__bases__:
if hasattr(super, "outward_handlers") and super.outward_handlers is not None:
outward_handlers.update(super.outward_handlers)
if hasattr(super, "inward_handlers") and super.inward_handlers is not None:
inward_handlers.update(super.inward_handlers)
outward_handlers.update(cls.outward_handlers)
inward_handlers.update(cls.inward_handlers)
cls.outward_handlers = outward_handlers
cls.inward_handlers = inward_handlers
return cls
def collect_handlers(*, inherit):
def helper(cls):
cls.outward_handlers = HANDLERS["out"]
cls.inward_handlers = HANDLERS["in"]
reset_global_handlers()
if inherit:
add_inherited_handlers(cls)
return cls
return helper
def handle_inbound_messages(func):
return handle("in")(func)
def handle_outbound_messages(func):
return handle("out")(func)
handler_source_agent_argument_name = {"in" : "source_agent_path", "out" : "source_agent_id"}
def declared_at(func):
filename = inspect.getsourcefile(func)
lineno = inspect.getsourcelines(func)[1]
ctx = inspect.getframeinfo(inspect.stack()[3][0])
try:
cls = ctx.function
finally:
del ctx
return f""""{ansi.info(func.__name__)}" was declared:\n""" +\
f""" in file "{ansi.info(filename)}"\n""" +\
f""" in class "{ansi.info(cls)}"\n""" +\
f""" on line {ansi.info(lineno)}"""
def handle(in_or_out : str):
if in_or_out not in HANDLERS:
raise ValueError(f"""Second argument "in_or_out" should be "in" or "out" not "{in_or_out}".""")
def helper(func):
colored_func_name = f"{ansi.info(func.__name__)}"
func_args = inspect.getargspec(func).args
second_argument_name = handler_source_agent_argument_name[in_or_out]
def get_sample_declaration(colored_positions):
subs = [ansi.INFO]*6
for i, pos in enumerate(["async", "self", "envelope"]):
if pos in colored_positions:
subs[2*i] = ansi.CORRECTION
return f""""{colored_func_name}" should be declared as"""+\
f""" "{ansi.INFO}%sasync%s def {func.__name__}(%sself%s, %senvelope%s, ...){ansi.NOCOLOR}".""" % tuple(subs)
if not inspect.iscoroutinefunction(func):
raise TypeError(
f"""Handler method "{colored_func_name}" """ +\
f"""should be defined with the "{ansi.correction("async")}" keyword.\n""" +\
get_sample_declaration(["async"]) + "\n" +\
declared_at(func) + "\n" +\
declared_at(func)
)
prefix = "handle__"
suffix = "__a"
if not func.__name__.startswith(prefix):
raise TypeError(
f"""Handler method name "{ansi.mistake(func.__name__)}" """ +\
f"""should start with "{ansi.correction(prefix)}".""" + "\n" +\
declared_at(func)
)
if not func.__name__.endswith(suffix):
raise TypeError(
f"""Handler method name "{ansi.mistake(func.__name__)}" """ +\
f"""should end with "{ansi.correction(suffix)}".""" + "\n" +\
declared_at(func)
)
if len(func_args) < 2:
raise TypeError(
f"""Handler method "{colored_func_name}" """ +\
f"""should have at least two positional arguments.\n""" +\
get_sample_declaration(["self", "envelope"]) + "\n" +\
declared_at(func)
)
if func_args[0] != "self":
raise TypeError(
f"""The first argument of handler method "{colored_func_name}" """ +\
f"""should be named "{ansi.correction("self")}" not "{ansi.mistake(func_args[0])}".\n""" +\
get_sample_declaration(["self"]) + "\n" +\
declared_at(func)
)
if func_args[1] != "envelope":
raise TypeError(
f"""The second argument of handler function "{colored_func_name}" """ +\
f"""should be named "{ansi.correction("envelope")}" not "{ansi.mistake(func_args[1])}".\n""" +\
get_sample_declaration(["envelope"]) + "\n" +\
declared_at(func)
)
handler_cmd = get_handler_cmd(func)
wrapper = get_handler_wrapper(in_or_out, func)
HANDLERS[in_or_out][handler_cmd] = wrapper
return wrapper
return helper
# Given a function named "handle__cmd__sub_cmd__a" return "cmd.sub_cmd"
def get_handler_cmd(func):
prefix = "handle__"
if not func.__name__.startswith(prefix):
raise ValueError(f"""Method name {func.__name__} should start with "{prefix}".""")
suffix="__a"
if not func.__name__.endswith(suffix):
raise ValueError(f"""Method name {func.__name__} should end with "{suffix}".""")
result = func.__name__[len(prefix):-len(suffix)].replace("__", ".")
if result == "all":
return "*"
return result
def get_handler_wrapper(in_or_out, func_a):
async def handler_wrapper_a(self, envelope):
self.log_envelope_task(f"handle_{in_or_out}bound_method", envelope)
try:
await func_a(self,
envelope,
*envelope.msg.args, **envelope.msg.kwargs
)
except TypeError as e:
add_wrapped_func_to_stack_trace_if_necessary(e, handler_wrapper_a, func_a)
raise
if in_or_out == "out":
msg = envelope.msg
new_msg = copy(msg)
new_msg.cmd = copy(msg.cmd)
envelope.msg = new_msg
return handler_wrapper_a
class MockTraceback:
def __init__(self, tb_frame, tb_lineno):
self.tb_frame = tb_frame
self.tb_lineno = tb_lineno
self.tb_next = None
class MockFrame:
def __init__(self, code):
self.f_code = code
self.f_globals = globals()
def add_wrapped_func_to_stack_trace_if_necessary(exception, wrapper, func):
""" If either the message is wrong or the argspec of the handler function is wrong,
then we might get a TypeError reporting that the wrapped function has incorrect arguments.
By default, the resulting stacktrace only mentions "func" leaving the identity of the wrapped
function completely unclear.
If there is an error
"""
if traceback.extract_tb(exception.__traceback__)[-1].name != wrapper.__name__:
return
# exc_type, exc_instance, exc_traceback = exc_info
filename = inspect.getsourcefile(func)
lineno = inspect.getsourcelines(func)[1]
exception.extra_traceback = traceback.extract_tb(
MockTraceback(
tb_lineno=lineno,
tb_frame=MockFrame(func.__code__)
)
) | 36.915094 | 126 | 0.583312 | 276 | 0.035267 | 0 | 0 | 0 | 0 | 582 | 0.074367 | 2,175 | 0.27792 |
2a860262b33282503fccf38e95544724adfe85c7 | 347 | py | Python | Python/bolha_curta.py | robertogoes/exercicios-coursera-python | 5093cc31ccda764b6131987fa81601b7179a9bfc | [
"MIT"
] | null | null | null | Python/bolha_curta.py | robertogoes/exercicios-coursera-python | 5093cc31ccda764b6131987fa81601b7179a9bfc | [
"MIT"
] | null | null | null | Python/bolha_curta.py | robertogoes/exercicios-coursera-python | 5093cc31ccda764b6131987fa81601b7179a9bfc | [
"MIT"
] | null | null | null | def bolha_curta(self, lista):
fim = len(lista)
for i in range(fim-1, 0, -1):
trocou = False
for j in range(i):
if lista[j] > lista[j+1]:
lista[j], lista[j+1] = lista[j+1], lista[j]
trocou = True
if trocou== False:
return
| 26.692308 | 63 | 0.414986 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2a8616811d43ccd05d2af0c119eb8d355bf9f7bd | 7,740 | py | Python | tests/lightning/utils/helpers.py | jexio/fulmo | daa4bd4f1cf3b8bd785a9024a413db9a0238f10c | [
"MIT"
] | null | null | null | tests/lightning/utils/helpers.py | jexio/fulmo | daa4bd4f1cf3b8bd785a9024a413db9a0238f10c | [
"MIT"
] | 80 | 2021-07-13T12:58:25.000Z | 2022-03-24T03:17:08.000Z | tests/lightning/utils/helpers.py | jexio/fulmo | daa4bd4f1cf3b8bd785a9024a413db9a0238f10c | [
"MIT"
] | null | null | null | # flake8: noqa
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, Optional, Tuple
import numpy as np
import torch
from pytorch_lightning import LightningDataModule, LightningModule
from torch.utils.data import DataLoader, Dataset, IterableDataset, Subset
from fulmo.readers import IReader
class RandomDictDataset(Dataset):
def __init__(self, size: int, length: int):
self.len = length
self.data = torch.randn(length, size)
def __getitem__(self, index):
a = self.data[index]
b = a + 2
return {"a": a, "b": b}
def __len__(self):
return self.len
class RandomDataset(Dataset):
def __init__(self, size: int, length: int):
self.len = length
self.data = torch.randn(length, size)
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return self.len
class RandomIterableDataset(IterableDataset):
def __init__(self, size: int, count: int):
self.count = count
self.size = size
def __iter__(self):
for _ in range(self.count):
yield torch.randn(self.size)
class RandomIterableDatasetWithLen(IterableDataset):
def __init__(self, size: int, count: int):
self.count = count
self.size = size
def __iter__(self):
for _ in range(len(self)):
yield torch.randn(self.size)
def __len__(self):
return self.count
class NpyGenerator(IReader):
"""Npy array readers abstraction. Reads arrays from a ``csv`` datasets."""
def __init__(
self,
input_key: Optional[str] = None,
output_key: Optional[str] = None,
shape: Tuple[int, int, int] = (256, 256, 3),
) -> None:
"""Create a new instance of NpyReader.
Args:
input_key: key to use from annotation dict
output_key: key to use to store the result
"""
super().__init__(input_key, output_key or input_key)
self.shape = shape
def __call__(self, element: Dict[str, Any]) -> Dict[str, np.ndarray]:
"""Generate random array and transfer it.
Args:
element: elem in your datasets
Returns:
Dict[`output_key`, np.ndarray]
"""
array = np.random.rand(*self.shape)
output = {self.output_key: array}
return output
class TestModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.backbone = torch.nn.Sequential(
torch.nn.Linear(32, 32),
torch.nn.ReLU(),
torch.nn.Dropout(),
)
self.head = torch.nn.Sequential(
torch.nn.Linear(32, 16),
torch.nn.Dropout(0.5),
torch.nn.ReLU(),
torch.nn.Linear(16, 8),
torch.nn.Dropout(0.5),
torch.nn.ReLU(),
torch.nn.Linear(8, 2),
)
def forward(self, x):
return self.head(self.backbone(x))
class BoringModel(LightningModule):
def __init__(self):
"""
Testing PL Module
Use as follows:
- subclass
- modify the behavior for what you want
class TestModel(BaseTestModel):
def training_step(...):
# do your own thing
or:
model = BaseTestModel()
model.training_epoch_end = None
"""
super().__init__()
self.model = TestModel()
self._train_data = DataLoader(RandomDataset(32, 64))
self._val_data = DataLoader(RandomDataset(32, 64))
self._test_data = DataLoader(RandomDataset(32, 64))
self._predict_data = DataLoader(RandomDataset(32, 64))
def set_train_data(self, value) -> None:
self._train_data = value
def set_val_data(self, value) -> None:
self._val_data = value
def set_test_data(self, value) -> None:
self._test_data = value
def set_predict_data(self, value) -> None:
self._predict_data = value
def forward(self, x):
return self.model(x)
def loss(self, batch, prediction):
# An arbitrary loss to have a loss that updates the model weights during `Trainer.fit` calls
return torch.nn.functional.mse_loss(prediction, torch.ones_like(prediction))
def step(self, x):
x = self(x)
out = torch.nn.functional.mse_loss(x, torch.ones_like(x))
return out
def training_step(self, batch, batch_idx):
output = self(batch)
loss = self.loss(batch, output)
return {"loss": loss}
def training_step_end(self, training_step_outputs):
return training_step_outputs
def training_epoch_end(self, outputs) -> None:
torch.stack([x["loss"] for x in outputs]).mean()
def validation_step(self, batch, batch_idx):
output = self(batch)
loss = self.loss(batch, output)
return {"x": loss}
def validation_epoch_end(self, outputs) -> None:
torch.stack([x["x"] for x in outputs]).mean()
def test_step(self, batch, batch_idx):
output = self(batch)
loss = self.loss(batch, output)
return {"y": loss}
def test_epoch_end(self, outputs) -> None:
torch.stack([x["y"] for x in outputs]).mean()
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.model.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
return [optimizer], [lr_scheduler]
def train_dataloader(self):
return self._train_data
def val_dataloader(self):
return self._val_data
def test_dataloader(self):
return self._test_data
def predict_dataloader(self):
return self._predict_data
class BoringDataModule(LightningDataModule):
def __init__(self, data_dir: str = "./"):
super().__init__()
self.data_dir = data_dir
self.non_picklable = None
self.checkpoint_state: Optional[str] = None
def prepare_data(self):
self.random_full = RandomDataset(32, 64 * 4)
def setup(self, stage: Optional[str] = None):
if stage == "fit" or stage is None:
self.random_train = Subset(self.random_full, indices=range(64))
self.dims = self.random_train[0].shape
if stage in ("fit", "validate") or stage is None:
self.random_val = Subset(self.random_full, indices=range(64, 64 * 2))
if stage == "test" or stage is None:
self.random_test = Subset(self.random_full, indices=range(64 * 2, 64 * 3))
self.dims = getattr(self, "dims", self.random_test[0].shape)
if stage == "predict" or stage is None:
self.random_predict = Subset(self.random_full, indices=range(64 * 3, 64 * 4))
self.dims = getattr(self, "dims", self.random_predict[0].shape)
def train_dataloader(self):
return DataLoader(self.random_train)
def val_dataloader(self):
return DataLoader(self.random_val)
def test_dataloader(self):
return DataLoader(self.random_test)
def predict_dataloader(self):
return DataLoader(self.random_predict)
__all__ = ["BoringModel", "NpyGenerator"]
| 30 | 100 | 0.62584 | 6,816 | 0.88062 | 191 | 0.024677 | 0 | 0 | 0 | 0 | 1,536 | 0.19845 |
2a863733ace28ef6349e137d5a4a527f5a173db6 | 423 | py | Python | bootstrapvz/providers/virtualbox/tasks/boot.py | qqshfox/bootstrap-vz | 38fc7c52407d015d3c75867bfea266d0aec6d7e2 | [
"Apache-2.0"
] | 1 | 2016-07-21T15:10:26.000Z | 2016-07-21T15:10:26.000Z | bootstrapvz/providers/virtualbox/tasks/boot.py | qqshfox/bootstrap-vz | 38fc7c52407d015d3c75867bfea266d0aec6d7e2 | [
"Apache-2.0"
] | 3 | 2017-05-10T15:04:10.000Z | 2017-06-02T18:14:50.000Z | bootstrapvz/providers/virtualbox/tasks/boot.py | qqshfox/bootstrap-vz | 38fc7c52407d015d3c75867bfea266d0aec6d7e2 | [
"Apache-2.0"
] | 14 | 2016-12-15T09:29:10.000Z | 2021-01-28T13:06:14.000Z | from bootstrapvz.base import Task
from bootstrapvz.common import phases
from bootstrapvz.common.tasks import grub
class AddVirtualConsoleGrubOutputDevice(Task):
description = 'Adding `tty0\' as output device for grub'
phase = phases.system_modification
successors = [grub.WriteGrubConfig]
@classmethod
def run(cls, info):
info.grub_config['GRUB_CMDLINE_LINUX_DEFAULT'].append('console=tty0')
| 30.214286 | 77 | 0.763593 | 306 | 0.723404 | 0 | 0 | 114 | 0.269504 | 0 | 0 | 84 | 0.198582 |
2a89f5934d16a217f8db591d993837148debfab4 | 12,444 | py | Python | tensorflow_similarity/distances.py | djm2131/similarity | 1431e39b77b17b72513d6ccb582f8af871ba76d0 | [
"Apache-2.0"
] | null | null | null | tensorflow_similarity/distances.py | djm2131/similarity | 1431e39b77b17b72513d6ccb582f8af871ba76d0 | [
"Apache-2.0"
] | null | null | null | tensorflow_similarity/distances.py | djm2131/similarity | 1431e39b77b17b72513d6ccb582f8af871ba76d0 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Vectorized embedding pairwise distances computation functions"""
from abc import ABC, abstractmethod
from typing import Union, List
import tensorflow as tf
from .types import FloatTensor
class Distance(ABC):
"""
Note: don't forget to add your distance to the DISTANCES list
and add alias names in it.
"""
def __init__(self, name: str, aliases: List[str] = []):
self.name = name
self.aliases = aliases
@abstractmethod
def call(self, embeddings: FloatTensor) -> FloatTensor:
"""Compute pairwise distances for a given batch.
Args:
embeddings: Embeddings to compute the pairwise one.
Returns:
FloatTensor: Pairwise distance tensor.
"""
def __call__(self, embeddings: FloatTensor):
return self.call(embeddings)
def __str__(self) -> str:
return self.name
def get_config(self):
return {}
@tf.keras.utils.register_keras_serializable(package="Similarity")
class InnerProductSimilarity(Distance):
"""Compute the pairwise inner product between embeddings.
The [Inner product](https://en.wikipedia.org/wiki/Inner_product_space) is
a measure of similarity where the more similar vectors have the largest
values.
NOTE! This is not a distance and is likely not what you want to use with
the built in losses. At the very least this will flip the sign on the
margin in many of the losses. This is likely meant to be used with custom
loss functions that expect a similarity instead of a distance.
"""
def __init__(self):
"Init Inner product similarity"
super().__init__('inner_product', ['ip'])
@tf.function
def call(self, embeddings: FloatTensor) -> FloatTensor:
"""Compute pairwise similarities for a given batch of embeddings.
Args:
embeddings: Embeddings to compute the pairwise one.
Returns:
FloatTensor: Pairwise distance tensor.
"""
sims: FloatTensor = tf.linalg.matmul(embeddings, embeddings, transpose_b=True)
return sims
@tf.keras.utils.register_keras_serializable(package="Similarity")
class CosineDistance(Distance):
"""Compute pairwise cosine distances between embeddings.
The [Cosine Distance](https://en.wikipedia.org/wiki/Cosine_similarity) is
an angular distance that varies from 0 (similar) to 1 (dissimilar).
"""
def __init__(self):
"Init Cosine distance"
super().__init__('cosine')
@tf.function
def call(self, embeddings: FloatTensor) -> FloatTensor:
"""Compute pairwise distances for a given batch of embeddings.
Args:
embeddings: Embeddings to compute the pairwise one. The embeddings
are expected to be normalized.
Returns:
FloatTensor: Pairwise distance tensor.
"""
distances = 1 - tf.linalg.matmul(
embeddings, embeddings, transpose_b=True)
min_clip_distances: FloatTensor = tf.math.maximum(distances, 0.0)
return min_clip_distances
@tf.keras.utils.register_keras_serializable(package="Similarity")
class EuclideanDistance(Distance):
"""Compute pairwise euclidean distances between embeddings.
The [Euclidean Distance](https://en.wikipedia.org/wiki/Euclidean_distance)
is the standard distance to measure the line segment between two embeddings
in the Cartesian point. The larger the distance the more dissimilar
the embeddings are.
**Alias**: L2 Norm, Pythagorean
"""
def __init__(self):
"Init Euclidean distance"
super().__init__('euclidean', ['l2', 'pythagorean'])
@tf.function
def call(self, embeddings: FloatTensor) -> FloatTensor:
"""Compute pairwise distances for a given batch of embeddings.
Args:
embeddings: Embeddings to compute the pairwise one.
Returns:
FloatTensor: Pairwise distance tensor.
"""
squared_norm = tf.math.square(embeddings)
squared_norm = tf.math.reduce_sum(squared_norm, axis=1, keepdims=True)
distances: FloatTensor = 2.0 * tf.linalg.matmul(
embeddings, embeddings, transpose_b=True)
distances = squared_norm - distances + tf.transpose(squared_norm)
# Avoid NaN and inf gradients when back propagating through the sqrt.
# values smaller than 1e-18 produce inf for the gradient, and 0.0
# produces NaN. All values smaller than 1e-13 should produce a gradient
# of 1.0.
dist_mask = tf.math.greater_equal(distances, 1e-18)
distances = tf.math.maximum(distances, 1e-18)
distances = tf.math.sqrt(distances) * tf.cast(dist_mask, tf.float32)
return distances
@tf.keras.utils.register_keras_serializable(package="Similarity")
class ModifiedEuclideanDistance(Distance):
"""Compute pairwise euclidean distances between embeddings.
Assumes last element of embedding vector is output of adjustment network.
The [Euclidean Distance](https://en.wikipedia.org/wiki/Euclidean_distance)
is the standard distance to measure the line segment between two embeddings
in the Cartesian point. The larger the distance the more dissimilar
the embeddings are.
"""
def __init__(self):
"Init Modified Euclidean distance"
super().__init__('modified_euclidean', ['modl2', 'modeuclidean'])
@tf.function
def call(self, embeddings: FloatTensor) -> FloatTensor:
"""Compute pairwise distances for a given batch of embeddings.
Args:
embeddings: Embeddings to compute the pairwise one.
Returns:
FloatTensor: Pairwise distance tensor.
"""
squared_norm = tf.math.square(embeddings)
squared_norm = tf.math.reduce_sum(squared_norm, axis=1, keepdims=True)
distances: FloatTensor = 2.0 * tf.linalg.matmul(
embeddings[:,:-1], embeddings[:,:-1], transpose_b=True)
distances = squared_norm - distances + tf.transpose(squared_norm)
# Avoid NaN and inf gradients when back propagating through the sqrt.
# values smaller than 1e-18 produce inf for the gradient, and 0.0
# produces NaN. All values smaller than 1e-13 should produce a gradient
# of 1.0.
dist_mask = tf.math.greater_equal(distances, 1e-18)
distances = tf.math.maximum(distances, 1e-18)
distances = tf.math.sqrt(distances) * tf.cast(dist_mask, tf.float32)
return distances
@tf.keras.utils.register_keras_serializable(package="Similarity")
class SquaredEuclideanDistance(Distance):
"""Compute pairwise squared Euclidean distance.
The [Squared Euclidean Distance](https://en.wikipedia.org/wiki/Euclidean_distance#Squared_Euclidean_distance) is
a distance that varies from 0 (similar) to infinity (dissimilar).
"""
def __init__(self):
super().__init__('squared_euclidean', ['sql2', 'sqeuclidean'])
@tf.function
def call(self, embeddings: FloatTensor) -> FloatTensor:
"""Compute pairwise distances for a given batch of embeddings.
Args:
embeddings: Embeddings to compute the pairwise one.
Returns:
FloatTensor: Pairwise distance tensor.
"""
squared_norm = tf.math.square(embeddings)
squared_norm = tf.math.reduce_sum(squared_norm, axis=1, keepdims=True)
distances: FloatTensor = 2.0 * tf.linalg.matmul(
embeddings, embeddings, transpose_b=True)
distances = squared_norm - distances + tf.transpose(squared_norm)
distances = tf.math.maximum(distances, 0.0)
return distances
@tf.keras.utils.register_keras_serializable(package="Similarity")
class ManhattanDistance(Distance):
"""Compute pairwise Manhattan distances between embeddings.
The [Manhattan Distance](https://en.wikipedia.org/wiki/Euclidean_distance)
is the sum of the lengths of the projections of the line segment between
two embeddings onto the Cartesian axes. The larger the distance the more
dissimilar the embeddings are.
"""
def __init__(self):
"Init Manhattan distance"
super().__init__('manhattan', ['l1', 'taxicab'])
@tf.function
def call(self, embeddings: FloatTensor) -> FloatTensor:
"""Compute pairwise distances for a given batch of embeddings.
Args:
embeddings: Embeddings to compute the pairwise one.
Returns:
FloatTensor: Pairwise distance tensor.
"""
x_rs = tf.reshape(embeddings, shape=[tf.shape(embeddings)[0], -1])
deltas = tf.expand_dims(x_rs, axis=1) - tf.expand_dims(x_rs, axis=0)
distances: FloatTensor = tf.norm(deltas, 1, axis=2)
return distances
@tf.keras.utils.register_keras_serializable(package="Similarity")
class SNRDistance(Distance):
"""
Computes pairwise SNR distances between embeddings.
The [Signal-to-Noise Ratio distance](https://arxiv.org/abs/1904.02616)
is the ratio of noise variance to the feature variance.
"""
def __init__(self):
"Init SNR distance"
super().__init__('snr')
@tf.function
def call(self, embeddings: FloatTensor) -> FloatTensor:
"""Compute pairwise snr distances for a given batch of embeddings.
SNR(i, j): anchor i and compared feature j
SNR(i,j) may not be equal to SNR(j, i)
Args:
embeddings: Embeddings to compute the pairwise one.
Returns:
FloatTensor: Pairwise distance tensor.
"""
# Calculating feature variance for each example
embed_mean = tf.math.reduce_mean(embeddings, axis=1)
embed_square = tf.math.square(embeddings)
embed_sq_mean = tf.math.reduce_mean(embed_square, axis=1)
anchor_var = embed_sq_mean - tf.square(embed_mean)
# Calculating pairwise noise variances
x_rs = tf.reshape(embeddings, shape=[tf.shape(embeddings)[0], -1])
delta = tf.expand_dims(x_rs, axis=1) - tf.expand_dims(x_rs, axis=0)
delta_mean = tf.math.reduce_mean(delta, axis=2)
delta_sq = tf.math.square(delta)
delta_sq_mean = tf.math.reduce_mean(delta_sq, axis=2)
noise_var = delta_sq_mean - tf.square(delta_mean)
distances: FloatTensor = tf.divide(noise_var,
tf.expand_dims(anchor_var, axis=1))
return distances
# List of implemented distances
DISTANCES = [
InnerProductSimilarity(),
EuclideanDistance(),
ModifiedEuclideanDistance(),
SquaredEuclideanDistance(),
ManhattanDistance(),
CosineDistance(),
SNRDistance()
]
def distance_canonicalizer(user_distance: Union[Distance, str]) -> Distance:
"""Normalize user requested distance to its matching Distance object.
Args:
user_distance: Requested distance either by name or by object
Returns:
Distance: Requested object name.
"""
# just return Distance object
if isinstance(user_distance, Distance):
# user supplied distance function
return user_distance
mapping = {}
name2fn = {}
for distance in DISTANCES:
# self reference
mapping[distance.name] = distance.name
name2fn[distance.name] = distance
# aliasing
for alias in distance.aliases:
mapping[alias] = distance.name
if isinstance(user_distance, str):
user_distance = user_distance.lower().strip()
if user_distance in mapping:
user_distance = mapping[user_distance]
else:
raise ValueError('Metric not supported by the framework')
return name2fn[user_distance]
raise ValueError('Unknown distance: must either be a MetricDistance\
or a known distance function')
| 35.452991 | 116 | 0.676712 | 9,785 | 0.786323 | 0 | 0 | 9,803 | 0.787769 | 0 | 0 | 6,351 | 0.510366 |
2a8a638716da9a921d2678fd212392c39c24f195 | 547 | py | Python | satang_pro_signer/signer.py | thebevrishot/satang-pro-signer | e0d6affdaf3b3bf5a670bda160f8a7d341b41707 | [
"MIT"
] | null | null | null | satang_pro_signer/signer.py | thebevrishot/satang-pro-signer | e0d6affdaf3b3bf5a670bda160f8a7d341b41707 | [
"MIT"
] | null | null | null | satang_pro_signer/signer.py | thebevrishot/satang-pro-signer | e0d6affdaf3b3bf5a670bda160f8a7d341b41707 | [
"MIT"
] | null | null | null | import hashlib
import hmac
from satang_pro_signer import preparer
class Signer:
def __init__(self, secret: bytes):
self.secret = secret
def sign(self, obj) -> bytes:
parsed = preparer.Preparer(obj).encode()
msg = bytes(parsed, encoding='utf-8')
try:
# better performance
return hmac.digest(self.secret, msg, 'sha512')
except AttributeError:
# compatible with Python 3.6
m = hmac.new(self.secret, msg, hashlib.sha512)
return m.digest() | 26.047619 | 58 | 0.605119 | 479 | 0.875686 | 0 | 0 | 0 | 0 | 0 | 0 | 63 | 0.115174 |
2a8b148c32e02ed52154cfd52c0605d99687fd17 | 76 | py | Python | atcoder/abc/a035.py | tomato-300yen/coding | db6f440a96d8c83f486005c650461a69f27e3926 | [
"MIT"
] | null | null | null | atcoder/abc/a035.py | tomato-300yen/coding | db6f440a96d8c83f486005c650461a69f27e3926 | [
"MIT"
] | null | null | null | atcoder/abc/a035.py | tomato-300yen/coding | db6f440a96d8c83f486005c650461a69f27e3926 | [
"MIT"
] | null | null | null | W, H = map(int, input().split())
print("4:3" if 4 * H == 3 * W else "16:9")
| 25.333333 | 42 | 0.5 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 0.144737 |
2a8c3235521952c85b8f72cbb5f29d72cf790334 | 24,976 | py | Python | carbondesign/tests/test_multi_select_html.py | dozymoe/django-carbondesign | 34aed0cfdccfa90fcb5bf2bbd347229815f1417b | [
"MIT"
] | null | null | null | carbondesign/tests/test_multi_select_html.py | dozymoe/django-carbondesign | 34aed0cfdccfa90fcb5bf2bbd347229815f1417b | [
"MIT"
] | null | null | null | carbondesign/tests/test_multi_select_html.py | dozymoe/django-carbondesign | 34aed0cfdccfa90fcb5bf2bbd347229815f1417b | [
"MIT"
] | null | null | null | # pylint:disable=missing-module-docstring,missing-class-docstring,missing-function-docstring
from django import forms
#-
from .base import compare_template, SimpleTestCase
class DummyForm(forms.Form):
choice1 = forms.ChoiceField(
required=False,
help_text="Optional helper text here")
choice2 = forms.ChoiceField(
required=False,
choices=(
('one', 'Option 1'), ('two', 'Option 2'),
('three', 'Option 3'), ('four', 'Option 4'),
('five', "An example option that is really long to show what "
"should be done to handle long text"),
),
help_text="Optional helper text here")
class MultiSelectHtmlTest(SimpleTestCase):
maxDiff = None
def test_filterable(self):
form = DummyForm(data={'choice2': ['one']})
context = {'form': form}
template = """
{% load carbondesign %}
{% MultiSelect form.choice1 mode="filterable" label="Multi-Select label" %}
"""
expected = """
<div class="bx--form-item">
<div class="bx--list-box__wrapper ">
<label class="bx--label">
Multi-Select label
</label>
<div class="bx--multi-select bx--list-box bx--combo-box bx--multi-select--filterable">
<div role="button" class="bx--list-box__field" tabindex="0"
aria-label="Open menu" aria-haspopup="true" aria-expanded="false">
<input class="bx--text-input" placeholder="Filter...">
<div class="bx--list-box__menu-icon">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
aria-label="Open menu" width="16" height="16" viewBox="0 0 16 16"
role="img">
<path d="M8 11L3 6 3.7 5.3 8 9.6 12.3 5.3 13 6z"></path>
</svg>
</div>
</div>
<fieldset class="bx--list-box__menu" role="listbox">
<legend class="bx--assistive-text">
Multi-Select label
</legend>
</fieldset>
</div>
<div id="hint-id_choice1" class="bx--form__helper-text">
Optional helper text here
</div>
</div>
</div>
"""
rendered = compare_template(template, expected, context)
self.assertEqual(*rendered)
def test_filterable_expanded(self):
form = DummyForm(data={'choice2': ['one']})
context = {'form': form}
template = """
{% load carbondesign %}
{% MultiSelect form.choice2 mode="filterable" label="Multi-Select label" expanded=True %}
"""
expected = """
<div class="bx--form-item">
<div class="bx--list-box__wrapper">
<label class="bx--label">
Multi-Select label
</label>
<div class="bx--multi-select bx--list-box bx--combo-box bx--multi-select--filterable bx--list-box--expanded bx--multi-select--selected">
<div role="button" class="bx--list-box__field" tabindex="0"
aria-label="Close menu" aria-haspopup="true" aria-expanded="true">
<div role="button"
class="bx--list-box__selection bx--list-box__selection--multi bx--tag--filter"
tabindex="0" title="Clear all selected items">
1
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
aria-label="Clear selection" width="16" height="16"
viewBox="0 0 32 32" role="img">
<path d="M24 9.4L22.6 8 16 14.6 9.4 8 8 9.4 14.6 16 8 22.6 9.4 24 16 17.4 22.6 24 24 22.6 17.4 16 24 9.4z"></path>
</svg>
</div>
<input class="bx--text-input" placeholder="Filter...">
<div class="bx--list-box__menu-icon">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
aria-label="Close menu" width="16" height="16" viewBox="0 0 16 16"
role="img">
<path d="M8 5L13 10 12.3 10.7 8 6.4 3.7 10.7 3 10z"></path>
</svg>
</div>
</div>
<fieldset class="bx--list-box__menu" role="listbox">
<legend class="bx--assistive-text">
Multi-Select label
</legend>
<div class="bx--list-box__menu-item">
<div class="bx--list-box__menu-item__option">
<div class="bx--form-item bx--checkbox-wrapper">
<label title="Option 1" class="bx--checkbox-label">
<input type="checkbox" name="choice2" readonly class="bx--checkbox"
id="id_choice2-1" value="one" checked>
<span class="bx--checkbox-appearance"></span>
<span class="bx--checkbox-label-text">
Option 1
</span>
</label>
</div>
</div>
</div>
<div class="bx--list-box__menu-item">
<div class="bx--list-box__menu-item__option">
<div class="bx--form-item bx--checkbox-wrapper">
<label title="Option 2" class="bx--checkbox-label">
<input type="checkbox" name="choice2" readonly class="bx--checkbox"
id="id_choice2-2" value="two">
<span class="bx--checkbox-appearance"></span>
<span class="bx--checkbox-label-text">
Option 2
</span>
</label>
</div>
</div>
</div>
<div class="bx--list-box__menu-item">
<div class="bx--list-box__menu-item__option">
<div class="bx--form-item bx--checkbox-wrapper">
<label title="Option 3" class="bx--checkbox-label">
<input type="checkbox" name="choice2" readonly class="bx--checkbox"
id="id_choice2-3" value="three">
<span class="bx--checkbox-appearance"></span>
<span class="bx--checkbox-label-text">
Option 3
</span>
</label>
</div>
</div>
</div>
<div class="bx--list-box__menu-item">
<div class="bx--list-box__menu-item__option">
<div class="bx--form-item bx--checkbox-wrapper">
<label title="Option 4" class="bx--checkbox-label">
<input type="checkbox" name="choice2" readonly class="bx--checkbox"
id="id_choice2-4" value="four">
<span class="bx--checkbox-appearance"></span>
<span class="bx--checkbox-label-text">
Option 4
</span>
</label>
</div>
</div>
</div>
<div class="bx--list-box__menu-item">
<div class="bx--list-box__menu-item__option">
<div class="bx--form-item bx--checkbox-wrapper">
<label title="An example option that is really long to show what should be done to handle long text" class="bx--checkbox-label">
<input type="checkbox" name="choice2" readonly class="bx--checkbox"
id="id_choice2-5" value="five" >
<span class="bx--checkbox-appearance"></span>
<span class="bx--checkbox-label-text">
An example option that is really long to show what should be done to handle long text
</span>
</label>
</div>
</div>
</div>
</fieldset>
</div>
<div id="hint-id_choice2" class="bx--form__helper-text">
Optional helper text here
</div>
</div>
</div>
"""
rendered = compare_template(template, expected, context)
self.assertEqual(*rendered)
def test_inline(self):
form = DummyForm(data={'choice2': ['one']})
context = {'form': form}
template = """
{% load carbondesign %}
{% MultiSelect form.choice1 mode="inline" label="Multi-Select label" %}
"""
expected = """
<div class="bx--form-item">
<div class="bx--list-box__wrapper bx--list-box__wrapper--inline">
<label class="bx--label">
Multi-Select label
</label>
<div class="bx--multi-select bx--list-box bx--list-box--inline">
<div role="button" class="bx--list-box__field" tabindex="0"
aria-label="Open menu" aria-haspopup="true" aria-expanded="false">
<span class="bx--list-box__label">Multi select options</span>
<div class="bx--list-box__menu-icon">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
aria-label="Open menu" width="16" height="16" viewBox="0 0 16 16"
role="img">
<path d="M8 11L3 6 3.7 5.3 8 9.6 12.3 5.3 13 6z"></path>
</svg>
</div>
</div>
<fieldset class="bx--list-box__menu" role="listbox">
<legend class="bx--assistive-text">
Multi-Select label
</legend>
</fieldset>
</div>
<div id="hint-id_choice1" class="bx--form__helper-text">
Optional helper text here
</div>
</div>
</div>
"""
rendered = compare_template(template, expected, context)
self.assertEqual(*rendered)
def test_inline_expanded(self):
form = DummyForm(data={'choice2': ['one']})
context = {'form': form}
template = """
{% load carbondesign %}
{% MultiSelect form.choice2 mode="inline" label="Multi-Select label" expanded=True %}
"""
expected = """
<div class="bx--form-item">
<div class="bx--list-box__wrapper bx--list-box__wrapper--inline">
<label class="bx--label">
Multi-Select label
</label>
<div class="bx--multi-select bx--list-box bx--list-box--inline bx--list-box--expanded bx--multi-select--selected">
<div role="button" class="bx--list-box__field" tabindex="0"
aria-label="Close menu" aria-haspopup="true" aria-expanded="true">
<div role="button"
class="bx--list-box__selection bx--list-box__selection--multi bx--tag--filter"
tabindex="0" title="Clear all selected items">
1
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
aria-label="Clear selection" width="16" height="16"
viewBox="0 0 32 32" role="img">
<path d="M24 9.4L22.6 8 16 14.6 9.4 8 8 9.4 14.6 16 8 22.6 9.4 24 16 17.4 22.6 24 24 22.6 17.4 16 24 9.4z"></path>
</svg>
</div>
<span class="bx--list-box__label">Multi select options</span>
<div class="bx--list-box__menu-icon">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
aria-label="Close menu" width="16" height="16" viewBox="0 0 16 16"
role="img">
<path d="M8 5L13 10 12.3 10.7 8 6.4 3.7 10.7 3 10z"></path>
</svg>
</div>
</div>
<fieldset class="bx--list-box__menu" role="listbox">
<legend class="bx--assistive-text">
Multi-Select label
</legend>
<div class="bx--list-box__menu-item">
<div class="bx--list-box__menu-item__option">
<div class="bx--form-item bx--checkbox-wrapper">
<label title="Option 1" class="bx--checkbox-label">
<input type="checkbox" name="choice2" readonly class="bx--checkbox"
id="id_choice2-1" value="one" checked>
<span class="bx--checkbox-appearance"></span>
<span class="bx--checkbox-label-text">
Option 1
</span>
</label>
</div>
</div>
</div>
<div class="bx--list-box__menu-item">
<div class="bx--list-box__menu-item__option">
<div class="bx--form-item bx--checkbox-wrapper">
<label title="Option 2" class="bx--checkbox-label">
<input type="checkbox" name="choice2" readonly class="bx--checkbox"
id="id_choice2-2" value="two">
<span class="bx--checkbox-appearance"></span>
<span class="bx--checkbox-label-text">
Option 2
</span>
</label>
</div>
</div>
</div>
<div class="bx--list-box__menu-item">
<div class="bx--list-box__menu-item__option">
<div class="bx--form-item bx--checkbox-wrapper">
<label title="Option 3" class="bx--checkbox-label">
<input type="checkbox" name="choice2" readonly class="bx--checkbox"
id="id_choice2-3" value="three">
<span class="bx--checkbox-appearance"></span>
<span class="bx--checkbox-label-text">
Option 3
</span>
</label>
</div>
</div>
</div>
<div class="bx--list-box__menu-item">
<div class="bx--list-box__menu-item__option">
<div class="bx--form-item bx--checkbox-wrapper">
<label title="Option 4" class="bx--checkbox-label">
<input type="checkbox" name="choice2" readonly class="bx--checkbox"
id="id_choice2-4" value="four">
<span class="bx--checkbox-appearance"></span>
<span class="bx--checkbox-label-text">
Option 4
</span>
</label>
</div>
</div>
</div>
<div class="bx--list-box__menu-item">
<div class="bx--list-box__menu-item__option">
<div class="bx--form-item bx--checkbox-wrapper">
<label title="An example option that is really long to show what should be done to handle long text" class="bx--checkbox-label">
<input type="checkbox" name="choice2" readonly class="bx--checkbox"
id="id_choice2-5" value="five">
<span class="bx--checkbox-appearance"></span>
<span class="bx--checkbox-label-text">
An example option that is really long to show what should be done to handle long text
</span>
</label>
</div>
</div>
</div>
</fieldset>
</div>
<div id="hint-id_choice2" class="bx--form__helper-text">
Optional helper text here
</div>
</div>
</div>
"""
rendered = compare_template(template, expected, context)
self.assertEqual(*rendered)
def test_light(self):
form = DummyForm(data={'choice2': ['one']})
context = {'form': form}
template = """
{% load carbondesign %}
{% MultiSelect form.choice1 label="Multi-Select label" light=True %}
"""
expected = """
<div class="bx--form-item">
<div class="bx--list-box__wrapper">
<label class="bx--label">
Multi-Select label
</label>
<div class="bx--multi-select bx--list-box bx--list-box--light">
<div role="button" class="bx--list-box__field" tabindex="0"
aria-label="Open menu" aria-haspopup="true" aria-expanded="false">
<span class="bx--list-box__label">Multi select options</span>
<div class="bx--list-box__menu-icon">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
aria-label="Open menu" width="16" height="16" viewBox="0 0 16 16"
role="img">
<path d="M8 11L3 6 3.7 5.3 8 9.6 12.3 5.3 13 6z"></path>
</svg>
</div>
</div>
<fieldset class="bx--list-box__menu" role="listbox">
<legend class="bx--assistive-text">
Multi-Select label
</legend>
</fieldset>
</div>
<div id="hint-id_choice1" class="bx--form__helper-text">
Optional helper text here
</div>
</div>
</div>
"""
rendered = compare_template(template, expected, context)
self.assertEqual(*rendered)
def test_light_expanded(self):
form = DummyForm(data={'choice2': ['one']})
context = {'form': form}
template = """
{% load carbondesign %}
{% MultiSelect form.choice2 label="Multi-Select label" expanded=True light=True %}
"""
expected = """
<div class="bx--form-item">
<div class="bx--list-box__wrapper">
<label class="bx--label">
Multi-Select label
</label>
<div class="bx--multi-select bx--list-box bx--list-box--light bx--list-box--expanded bx--multi-select--selected">
<div role="button" class="bx--list-box__field" tabindex="0"
aria-label="Close menu" aria-haspopup="true" aria-expanded="true">
<div role="button"
class="bx--list-box__selection bx--list-box__selection--multi bx--tag--filter"
tabindex="0" title="Clear all selected items">
1
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
aria-label="Clear selection" width="16" height="16"
viewBox="0 0 32 32" role="img">
<path d="M24 9.4L22.6 8 16 14.6 9.4 8 8 9.4 14.6 16 8 22.6 9.4 24 16 17.4 22.6 24 24 22.6 17.4 16 24 9.4z"></path>
</svg>
</div>
<span class="bx--list-box__label">Multi select options</span>
<div class="bx--list-box__menu-icon">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
aria-label="Close menu" width="16" height="16" viewBox="0 0 16 16"
role="img">
<path d="M8 5L13 10 12.3 10.7 8 6.4 3.7 10.7 3 10z"></path>
</svg>
</div>
</div>
<fieldset class="bx--list-box__menu" role="listbox">
<legend class="bx--assistive-text">
Multi-Select label
</legend>
<div class="bx--list-box__menu-item">
<div class="bx--list-box__menu-item__option">
<div class="bx--form-item bx--checkbox-wrapper">
<label title="Option 1" class="bx--checkbox-label">
<input type="checkbox" name="choice2" readonly class="bx--checkbox"
id="id_choice2-1" value="one" checked>
<span class="bx--checkbox-appearance"></span>
<span class="bx--checkbox-label-text">
Option 1
</span>
</label>
</div>
</div>
</div>
<div class="bx--list-box__menu-item">
<div class="bx--list-box__menu-item__option">
<div class="bx--form-item bx--checkbox-wrapper">
<label title="Option 2" class="bx--checkbox-label">
<input type="checkbox" name="choice2" readonly class="bx--checkbox"
id="id_choice2-2" value="two">
<span class="bx--checkbox-appearance"></span>
<span class="bx--checkbox-label-text">
Option 2
</span>
</label>
</div>
</div>
</div>
<div class="bx--list-box__menu-item">
<div class="bx--list-box__menu-item__option">
<div class="bx--form-item bx--checkbox-wrapper">
<label title="Option 3" class="bx--checkbox-label">
<input type="checkbox" name="choice2" readonly class="bx--checkbox"
id="id_choice2-3" value="three">
<span class="bx--checkbox-appearance"></span>
<span class="bx--checkbox-label-text">
Option 3
</span>
</label>
</div>
</div>
</div>
<div class="bx--list-box__menu-item">
<div class="bx--list-box__menu-item__option">
<div class="bx--form-item bx--checkbox-wrapper">
<label title="Option 4" class="bx--checkbox-label">
<input type="checkbox" name="choice2" readonly class="bx--checkbox"
id="id_choice2-4" value="four">
<span class="bx--checkbox-appearance"></span>
<span class="bx--checkbox-label-text">
Option 4
</span>
</label>
</div>
</div>
</div>
<div class="bx--list-box__menu-item">
<div class="bx--list-box__menu-item__option">
<div class="bx--form-item bx--checkbox-wrapper">
<label title="An example option that is really long to show what should be done to handle long text" class="bx--checkbox-label">
<input type="checkbox" name="choice2" readonly class="bx--checkbox"
id="id_choice2-5" value="five" >
<span class="bx--checkbox-appearance"></span>
<span class="bx--checkbox-label-text">
An example option that is really long to show what should be done to handle long text
</span>
</label>
</div>
</div>
</div>
</fieldset>
</div>
<div id="hint-id_choice2" class="bx--form__helper-text">
Optional helper text here
</div>
</div>
</div>
"""
rendered = compare_template(template, expected, context)
self.assertEqual(*rendered)
def test_default(self):
form = DummyForm(data={'choice2': ['one']})
context = {'form': form}
template = """
{% load carbondesign %}
{% MultiSelect form.choice1 label="Multi-Select label" %}
"""
expected = """
<div class="bx--form-item">
<div class="bx--list-box__wrapper ">
<label class="bx--label">
Multi-Select label
</label>
<div class="bx--multi-select bx--list-box">
<div role="button" class="bx--list-box__field" tabindex="0"
aria-label="Open menu" aria-haspopup="true" aria-expanded="false">
<span class="bx--list-box__label">Multi select options</span>
<div class="bx--list-box__menu-icon">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
aria-label="Open menu" width="16" height="16" viewBox="0 0 16 16"
role="img">
<path d="M8 11L3 6 3.7 5.3 8 9.6 12.3 5.3 13 6z"></path>
</svg>
</div>
</div>
<fieldset class="bx--list-box__menu" role="listbox">
<legend class="bx--assistive-text">
Multi-Select label
</legend>
</fieldset>
</div>
<div id="hint-id_choice1" class="bx--form__helper-text">
Optional helper text here
</div>
</div>
</div>
"""
rendered = compare_template(template, expected, context)
self.assertEqual(*rendered)
def test_default_expanded(self):
form = DummyForm(data={'choice2': ['one']})
context = {'form': form}
template = """
{% load carbondesign %}
{% MultiSelect form.choice2 label="Multi-Select label" expanded=True %}
"""
expected = """
<div class="bx--form-item">
<div class="bx--list-box__wrapper">
<label class="bx--label">
Multi-Select label
</label>
<div class="bx--multi-select bx--list-box bx--list-box--expanded bx--multi-select--selected">
<div role="button" class="bx--list-box__field" tabindex="0"
aria-label="Close menu" aria-haspopup="true" aria-expanded="true">
<div role="button"
class="bx--list-box__selection bx--list-box__selection--multi bx--tag--filter"
tabindex="0" title="Clear all selected items">
1
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
aria-label="Clear selection" width="16" height="16"
viewBox="0 0 32 32" role="img">
<path d="M24 9.4L22.6 8 16 14.6 9.4 8 8 9.4 14.6 16 8 22.6 9.4 24 16 17.4 22.6 24 24 22.6 17.4 16 24 9.4z"></path>
</svg>
</div>
<span class="bx--list-box__label">Multi select options</span>
<div class="bx--list-box__menu-icon">
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
aria-label="Close menu" width="16" height="16" viewBox="0 0 16 16"
role="img">
<path d="M8 5L13 10 12.3 10.7 8 6.4 3.7 10.7 3 10z"></path>
</svg>
</div>
</div>
<fieldset class="bx--list-box__menu" role="listbox">
<legend class="bx--assistive-text">
Multi-Select label
</legend>
<div class="bx--list-box__menu-item">
<div class="bx--list-box__menu-item__option">
<div class="bx--form-item bx--checkbox-wrapper">
<label title="Option 1" class="bx--checkbox-label">
<input type="checkbox" name="choice2" readonly class="bx--checkbox"
id="id_choice2-1" value="one" checked>
<span class="bx--checkbox-appearance"></span>
<span class="bx--checkbox-label-text">
Option 1
</span>
</label>
</div>
</div>
</div>
<div class="bx--list-box__menu-item">
<div class="bx--list-box__menu-item__option">
<div class="bx--form-item bx--checkbox-wrapper">
<label title="Option 2" class="bx--checkbox-label">
<input type="checkbox" name="choice2" readonly class="bx--checkbox"
id="id_choice2-2" value="two">
<span class="bx--checkbox-appearance"></span>
<span class="bx--checkbox-label-text">
Option 2
</span>
</label>
</div>
</div>
</div>
<div class="bx--list-box__menu-item">
<div class="bx--list-box__menu-item__option">
<div class="bx--form-item bx--checkbox-wrapper">
<label title="Option 3" class="bx--checkbox-label">
<input type="checkbox" name="choice2" readonly class="bx--checkbox"
id="id_choice2-3" value="three">
<span class="bx--checkbox-appearance"></span>
<span class="bx--checkbox-label-text">
Option 3
</span>
</label>
</div>
</div>
</div>
<div class="bx--list-box__menu-item">
<div class="bx--list-box__menu-item__option">
<div class="bx--form-item bx--checkbox-wrapper">
<label title="Option 4" class="bx--checkbox-label">
<input type="checkbox" name="choice2" readonly class="bx--checkbox"
id="id_choice2-4" value="four">
<span class="bx--checkbox-appearance"></span>
<span class="bx--checkbox-label-text">
Option 4
</span>
</label>
</div>
</div>
</div>
<div class="bx--list-box__menu-item">
<div class="bx--list-box__menu-item__option">
<div class="bx--form-item bx--checkbox-wrapper">
<label title="An example option that is really long to show what should be done to handle long text" class="bx--checkbox-label">
<input type="checkbox" name="choice2" readonly class="bx--checkbox"
id="id_choice2-5" value="five">
<span class="bx--checkbox-appearance"></span>
<span class="bx--checkbox-label-text">
An example option that is really long to show what should be done to handle long text
</span>
</label>
</div>
</div>
</div>
</fieldset>
</div>
<div id="hint-id_choice2" class="bx--form__helper-text">
Optional helper text here
</div>
</div>
</div>
"""
rendered = compare_template(template, expected, context)
self.assertEqual(*rendered)
| 36.621701 | 140 | 0.61427 | 24,800 | 0.992953 | 0 | 0 | 0 | 0 | 0 | 0 | 22,570 | 0.903668 |
2a8e83999cf4e97fafb8dbeb31077fa54eca387c | 7,826 | py | Python | visualization/POF/utils/keypoint_conversion.py | alvaro-budria/body2hands | 0eba438b4343604548120bdb03c7e1cb2b08bcd6 | [
"BSD-3-Clause"
] | 63 | 2021-05-14T02:55:16.000Z | 2022-03-13T01:51:12.000Z | visualization/POF/utils/keypoint_conversion.py | human2b/body2hands | 8ab4b206dc397c3b326f2b4ec9448c84ee8801fe | [
"BSD-3-Clause"
] | 9 | 2021-06-24T09:59:41.000Z | 2021-12-31T08:15:20.000Z | visualization/POF/utils/keypoint_conversion.py | human2b/body2hands | 8ab4b206dc397c3b326f2b4ec9448c84ee8801fe | [
"BSD-3-Clause"
] | 9 | 2021-05-17T03:33:28.000Z | 2022-02-17T02:30:44.000Z | import numpy as np
import numpy.linalg as nl
from utils.general import connMat
a4_to_main = {
'body': np.array([1, 0, 9, 10, 11, 3, 4, 5, 12, 13, 14, 6, 7, 8, 17, 15, 18, 16, 19, 20], dtype=np.int64), # convert to order of openpose
'1_body': np.array([1, 0, 9, 10, 11, 3, 4, 5, 12, 13, 14, 6, 7, 8, 17, 15, 18, 16, 19, 20], dtype=np.int64), # convert to order of openpose
'2_body': np.array([1, 0, 9, 10, 11, 3, 4, 5, 12, 13, 14, 6, 7, 8, 17, 15, 18, 16, 19, 20], dtype=np.int64), # convert to order of openpose
'left_hand': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64), # convert to order of freiburg
'1_left_hand': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64), # convert to order of freiburg
'2_left_hand': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64), # convert to order of freiburg
'right_hand': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64), # convert to order of freiburg
'1_right_hand': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64), # convert to order of freiburg
'2_right_hand': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64), # convert to order of freiburg
'openpose_lhand': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64),
'openpose_rhand': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64),
'openpose_lhand_score': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64),
'openpose_rhand_score': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64),
}
human36m_to_main = {
'body': np.array([9, 8, 14, 15, 16, 11, 12, 13, 4, 5, 6, 1, 2, 3, 17, 17, 17, 17, 10, 17], dtype=np.int64)
}
mpi3d_to_main = {
'body': np.array([6, 5, 14, 15, 16, 9, 10, 11, 23, 24, 25, 18, 19, 20, 28, 28, 28, 28, 7], dtype=np.int64)
}
adam_to_main = {
'body': np.array([12, 17, 19, 21, 16, 18, 20, 2, 5, 8, 1, 4, 7], dtype=np.int64),
'select_body_main': np.arange(1, 14, dtype=np.int64)
}
COCO_to_main = {
'body': np.array([0, 17, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3, 18, 19], dtype=np.int64),
'body_valid': np.array([0, 17, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3, 18, 19], dtype=np.int64),
'all_body': np.array([0, 17, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3, 18, 19], dtype=np.int64),
'all_body_valid': np.array([0, 17, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3, 18, 19], dtype=np.int64)
}
SMPL_to_main = { # actually COCOPLUS regressor to main
'body': np.array([14, 12, 8, 7, 6, 9, 10, 11, 2, 1, 0, 3, 4, 5, 16, 15, 18, 17, 13], dtype=np.int64)
}
STB_to_main = {
'left_hand': np.array([0, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1], dtype=np.int64)
}
MPII_to_main = {
'body': np.array([16, 8, 12, 11, 10, 13, 14, 15, 2, 1, 0, 3, 4, 5, 16, 16, 16, 16, 9], dtype=np.int64),
'body_valid': np.array([16, 8, 12, 11, 10, 13, 14, 15, 2, 1, 0, 3, 4, 5, 16, 16, 16, 16, 9], dtype=np.int64)
}
tsimon_to_main = {
'left_hand': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64),
'right_hand': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64),
'left_hand_valid': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64),
'right_hand_valid': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64),
}
GAnerated_to_main = {
'left_hand': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64),
'left_hand_valid': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64),
'left_hand_3d': np.array([0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9, 16, 15, 14, 13, 20, 19, 18, 17], dtype=np.int64),
'right_hand': np.arange(21, dtype=np.int64),
'right_hand_valid': np.arange(21, dtype=np.int64),
'right_hand_3d': np.arange(21, dtype=np.int64)
}
std_body_size = 267.807
std_hand_size = (82.2705 + 79.8843) / 2
def compute_size(joint3d, type_str):
""" use this to compute size for scaling: joints are in main order.
"""
length = 0.0
for ic, conn in enumerate(connMat[type_str]):
if type_str == 'body':
if ic in (2, 3, 5, 6, 8, 9, 11, 12):
length += nl.norm(joint3d[conn[0]] - joint3d[conn[1]])
else:
assert type_str == 'hand'
length += nl.norm(joint3d[conn[0]] - joint3d[conn[1]])
return length
def main_to_a4(joint):
assert joint.shape[0] == 20
output = np.zeros((21, joint.shape[1]), dtype=joint.dtype)
for io, ic in enumerate(a4_to_main['body']):
output[ic, :] = joint[io, :]
output[2, :] = (output[6, :] + output[12, :]) / 2
return output
def main_to_a4_hand(joint):
assert joint.shape[0] == 21
output = np.zeros(joint.shape, dtype=joint.dtype)
output[0] = joint[0]
for i in (1, 5, 9, 13, 17):
output[i:i + 4] = joint[i + 3:i - 1:-1]
return output
def assemble_total_3d(body, lhand, rhand):
len_b = compute_size(body, 'body')
if len_b > 0:
sbody = (std_body_size / len_b) * body
else:
sbody = body
len_l = compute_size(lhand, 'hand')
if len_l > 0:
slhand = (std_hand_size / len_l) * lhand
else:
slhand = lhand
len_r = compute_size(rhand, 'hand')
if len_r > 0:
srhand = (std_hand_size / len_r) * rhand
else:
srhand = rhand
sbody = main_to_a4(sbody)
slhand = main_to_a4_hand(slhand)
srhand = main_to_a4_hand(srhand)
slhand_invalid = (slhand[:, 0] == 0) * (slhand[:, 1] == 0) * (slhand[:, 2] == 0)
srhand_invalid = (srhand[:, 0] == 0) * (srhand[:, 1] == 0) * (srhand[:, 2] == 0)
if not slhand[0].any():
slhand_invalid[:] = True
if not srhand[0].any():
srhand_invalid[:] = True
lhand_idx_a4 = 5
rhand_idx_a4 = 11
shift_lhand = sbody[lhand_idx_a4] - slhand[0]
shift_rhand = sbody[rhand_idx_a4] - srhand[0]
slhand += shift_lhand
srhand += shift_rhand
slhand[slhand_invalid] = 0
srhand[srhand_invalid] = 0
return np.concatenate([sbody, slhand, srhand], axis=0), std_body_size / len_b
def assemble_total_2d(body_2d, lhand_2d, rhand_2d):
keypoint_list = []
for i, item in enumerate((body_2d, lhand_2d, rhand_2d)):
keypoint = item['uv_local']
keypoint = (keypoint - 184) / item['scale2d'] + item['crop_center2d']
valid = item['valid']
keypoint = keypoint * np.stack([valid, valid], axis=1) # remove those invalid values
if i == 0:
keypoint = main_to_a4(keypoint)
else:
keypoint = main_to_a4_hand(keypoint)
keypoint_list.append(keypoint)
ret = np.concatenate(keypoint_list, axis=0)
ret[np.isnan(ret)] = 0.0 # nan when the whole joint is zero
return ret
def main_to_human36m(joint):
# except 9, 10 in human36m
out = np.zeros((17, 3), dtype=joint.dtype)
for im, ih in enumerate(human36m_to_main['body']):
if ih == 17: # virtual zero joint
continue
out[ih] = np.copy(joint[im, :])
out[0] = (out[1] + out[4]) / 2 # middle hip
out[7] = (out[1] + out[4] + out[11] + out[14]) / 4 # abdomen (average of l/r hip, l/r shoulder)
return out
| 43.966292 | 153 | 0.564528 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,065 | 0.136085 |
2a8e849bd418c1e14d8f941c5af6a81e0f4c5254 | 15,584 | py | Python | rubika/__init__.py | sajjadsoleimani/rubika | 328e60b76dd04be695285b0735c36e151e65ef5d | [
"MIT"
] | 1 | 2022-01-22T10:29:12.000Z | 2022-01-22T10:29:12.000Z | rubika/__init__.py | Mahdiblackstar/rubika | 328e60b76dd04be695285b0735c36e151e65ef5d | [
"MIT"
] | null | null | null | rubika/__init__.py | Mahdiblackstar/rubika | 328e60b76dd04be695285b0735c36e151e65ef5d | [
"MIT"
] | 1 | 2022-03-21T07:44:49.000Z | 2022-03-21T07:44:49.000Z | from requests import post
from random import randint
from json import loads, dumps
import asyncio,base64,glob,json,math,urllib3,os,pathlib,random,sys,concurrent.futures,time
from tqdm import tqdm
from Crypto.Cipher import AES
from Crypto.Util.Padding import pad, unpad
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
class encryption:
def __init__(self, auth):
self.key = bytearray(self.secret(auth), "UTF-8")
self.iv = bytearray.fromhex('00000000000000000000000000000000')
def replaceCharAt(self, e, t, i):
return e[0:t] + i + e[t + len(i):]
def secret(self, e):
t = e[0:8]
i = e[8:16]
n = e[16:24] + t + e[24:32] + i
s = 0
while s < len(n):
e = n[s]
if e >= '0' and e <= '9':
t = chr((ord(e[0]) - ord('0') + 5) % 10 + ord('0'))
n = self.replaceCharAt(n, s, t)
else:
t = chr((ord(e[0]) - ord('a') + 9) % 26 + ord('a'))
n = self.replaceCharAt(n, s, t)
s += 1
return n
def encrypt(self, text):
raw = pad(text.encode('UTF-8'), AES.block_size)
aes = AES.new(self.key, AES.MODE_CBC, self.iv)
enc = aes.encrypt(raw)
result = base64.b64encode(enc).decode('UTF-8')
return result
def decrypt(self, text):
aes = AES.new(self.key, AES.MODE_CBC, self.iv)
dec = aes.decrypt(base64.urlsafe_b64decode(text.encode('UTF-8')))
result = unpad(dec, AES.block_size).decode('UTF-8')
return result
class Bot:
def __init__(self, auth):
self.auth = auth
self.enc = encryption(auth)
def sendMessage(self, chat_id, text, message_id=None):
if message_id == None:
return post(json={"api_version":"5","auth":self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"sendMessage",
"input":{
"object_guid":chat_id,
"rnd":f"{randint(100000,900000)}",
"text":text,
"reply_to_message_id":message_id
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c17.iranlms.ir/")
else:
return post(json={"api_version":"5","auth":self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"sendMessage",
"input":{
"object_guid":chat_id,
"rnd":f"{randint(100000,900000)}",
"text":text,
"reply_to_message_id":message_id
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c17.iranlms.ir/")
def deleteMessages(self, chat_id, message_ids):
return post(json={"api_version":"5","auth":self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"deleteMessages",
"input":{
"object_guid":chat_id,
"message_ids":message_ids,
"type":"Global"
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c66.iranlms.ir/")
def getUserInfo(self, chat_id):
return loads(self.enc.decrypt(post(json={"api_version":"5","auth":self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"getUserInfo",
"input":{
"user_guid":chat_id
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c37.iranlms.ir/").json()["data_enc"]))
def getMessages(self, chat_id,min_id):
return loads(self.enc.decrypt(post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"getMessagesInterval",
"input":{
"object_guid":chat_id,
"middle_message_id":min_id
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c67.iranlms.ir/").json().get("data_enc"))).get("data").get("messages")
def getInfoByUsername(self, username):
''' username should be without @ '''
return loads(self.enc.decrypt(post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"getObjectByUsername",
"input":{
"username":username
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c23.iranlms.ir/").json().get("data_enc")))
def banGroupMember(self, chat_id, user_id):
return post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"banGroupMember",
"input":{
"group_guid": chat_id,
"member_guid": user_id,
"action":"Set"
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c21.iranlms.ir/")
def invite(self, chat_id, user_ids):
return post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"addGroupMembers",
"input":{
"group_guid": chat_id,
"member_guids": user_ids
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c22.iranlms.ir/")
def getGroupAdmins(self, chat_id):
return loads(self.enc.decrypt(post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"client":{
"app_name":"Main",
"app_version":"2.9.5",
"lang_code":"fa",
"package":"ir.resaneh1.iptv",
"platform":"Android"
},
"input":{
"group_guid":chat_id
},
"method":"getGroupAdminMembers"
}))},url="https://messengerg2c22.iranlms.ir/").json().get("data_enc")))
def getMessagesInfo(self, chat_id, message_ids):
return loads(self.enc.decrypt(post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"getMessagesByID",
"input":{
"object_guid": chat_id,
"message_ids": message_ids
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))}, url="https://messengerg2c24.iranlms.ir/").json()["data_enc"])).get("data").get("messages")
def setMembersAccess(self, chat_id, access_list):
return post(json={
"api_version": "4",
"auth": self.auth,
"client": {
"app_name": "Main",
"app_version": "2.9.5",
"lang_code": "fa",
"package": "ir.resaneh1.iptv",
"platform": "Android"
},
"data_enc": self.enc.encrypt(dumps({
"access_list": access_list,
"group_guid": chat_id
})),
"method": "setGroupDefaultAccess"
}, url="https://messengerg2c24.iranlms.ir/")
def getGroupMembers(self, chat_id):
return loads(self.enc.decrypt(post(json={
"api_version":"5",
"auth": self.auth,
"data_enc": self.enc.encrypt(dumps({
"method":"getGroupAllMembers",
"input":{
"group_guid": chat_id,
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))
}, url="https://messengerg2c17.iranlms.ir/").json()["data_enc"]))["data"]["in_chat_members"]
def getGroupInfo(self, chat_id):
return loads(self.enc.decrypt(post(
json={
"api_version":"5",
"auth": self.auth,
"data_enc": self.enc.encrypt(dumps({
"method":"getGroupInfo",
"input":{
"group_guid": chat_id,
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))}, url="https://messengerg2c24.iranlms.ir/").json()["data_enc"]))
def getGroupLink(self, chat_id):
return loads(self.enc.decrypt(post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"getGroupLink",
"input":{
"group_guid":chat_id
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c67.iranlms.ir/").json().get("data_enc"))).get("data").get("join_link")
# thanks for Sajjad Soleymani
def get_updates_all_chats(self):
time_stamp = str(random._floor(datetime.datetime.today().timestamp()) - 200)
return loads(self.enc.decrypt(post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"getChatsUpdates",
"input":{
"state":time_stamp,
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c67.iranlms.ir/").json().get("data_enc"))).get("data").get("chats")
def get_updates_chat(self, chat_id):
time_stamp = str(random._floor(datetime.datetime.today().timestamp()) - 200)
return loads(self.enc.decrypt(post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"getMessagesUpdates",
"input":{
"object_guid":chat_id,
"state":time_stamp
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c67.iranlms.ir/").json().get("data_enc"))).get("data").get("updated_messages")
def my_sticker_set(self):
time_stamp = str(random._floor(datetime.datetime.today().timestamp()) - 200)
return loads(self.enc.decrypt(post(json={"api_version":"5","auth": self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"getMyStickerSets",
"input":{},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c67.iranlms.ir/").json().get("data_enc"))).get("data")
def requestFile(name, size , mime):
o = ''
while str(o) != '<Response [200]>':
o = post(json={"api_version":"5","auth":self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"requestSendFile",
"input":{
"file_name":name,
"size":size,
"mime":mime
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c66.iranlms.ir/")
try:
k = loads(self.enc.decrypt(o.json()["data_enc"]))
if k['status'] != 'OK' or k['status_det'] != 'OK':
o = '502'
except:
o = '502'
return k['data']
def fileUpload(bytef ,hash_send ,file_id ,url):
if len(bytef) <= 131072:
h = {
'auth':self.auth,
'chunk-size':str(len(bytef)),
'file-id':str(file_id),
'access-hash-send':hash_send,
'total-part':str(1),
'part-number':str(1)
}
t = False
while t == False:
try:
j = post(data=bytef,url=url,headers=h).text
j = loads(j)['data']['access_hash_rec']
t = True
except:
t = False
return j
else:
t = len(bytef) / 131072
t += 1
t = random._floor(t)
for i in range(1,t+1):
if i != t:
k = i - 1
k = k * 131072
t2 = False
while t2 == False:
try:
o = post(data=bytef[k:k + 131072],url=url,headers={
'auth':self.auth,
'chunk-size':str(131072),
'file-id':file_id,
'access-hash-send':hash_send,
'total-part':str(t),
'part-number':str(i)
}).text
o = loads(o)['data']
t2 = True
except:
t2 = False
j = k + 131072
j = round(j / 1024)
j2 = round(len(bytef) / 1024)
print(str(j) + 'kb / ' + str(j2) + ' kb')
else:
k = i - 1
k = k * 131072
t2 = False
while t2 == False:
try:
p = post(data=bytef[k:],url=url,headers={
'auth':self.auth,
'chunk-size':str(len(bytef[k:])),
'file-id':file_id,
'access-hash-send':hash_send,
'total-part':str(t),
'part-number':str(i)
}).text
p = loads(p)['data']['access_hash_rec']
t2 = True
except:
t2 = False
j2 = round(len(bytef) / 1024)
print(str(j2) + 'kb / ' + str(j2) + ' kb')
return p
def sendFile(chat_id, file_id , mime , dc_id, access_hash_rec, file_name, size, text=None, message_id=None):
if text == None:
if message_id == None:
t = False
while t == False:
try:
p = loads(self.enc.decrypt(loads(post(json={"api_version":"5","auth":self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"sendMessage",
"input":{
"object_guid":chat_id,
"rnd":f"{randint(100000,900000)}",
"file_inline":{
"dc_id":str(dc_id),
"file_id":str(file_id),
"type":"File",
"file_name":file_name,
"size":size,
"mime":mime,
"access_hash_rec":access_hash_rec
}
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c17.iranlms.ir/").text)['data_enc']))
t = True
except:
t = False
return p
else:
return loads(self.enc.decrypt(loads(post(json={"api_version":"5","auth":self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"sendMessage",
"input":{
"object_guid":chat_id,
"rnd":f"{randint(100000,900000)}",
"reply_to_message_id":message_id,
"file_inline":{
"dc_id":str(dc_id),
"file_id":str(file_id),
"type":"File",
"file_name":file_name,
"size":size,
"mime":mime,
"access_hash_rec":access_hash_rec
}
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c17.iranlms.ir/").text)['data_enc']))
else:
if message_id == None:
return loads(self.enc.decrypt(loads(post(json={"api_version":"5","auth":self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"sendMessage",
"input":{
"object_guid":chat_id,
"rnd":f"{randint(100000,900000)}",
"text":text,
"file_inline":{
"dc_id":str(dc_id),
"file_id":str(file_id),
"type":"File",
"file_name":file_name,
"size":size,
"mime":mime,
"access_hash_rec":access_hash_rec
}
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c17.iranlms.ir/").text)['data_enc']))
else:
return loads(self.enc.decrypt(loads(post(json={"api_version":"5","auth":self.auth,"data_enc":self.enc.encrypt(dumps({
"method":"sendMessage",
"input":{
"object_guid":chat_id,
"rnd":f"{randint(100000,900000)}",
"text":text,
"reply_to_message_id":message_id,
"file_inline":{
"dc_id":str(dc_id),
"file_id":str(file_id),
"type":"File",
"file_name":file_name,
"size":size,
"mime":mime,
"access_hash_rec":access_hash_rec
}
},
"client":{
"app_name":"Main",
"app_version":"3.2.1",
"platform":"Web",
"package":"web.rubika.ir",
"lang_code":"fa"
}
}))},url="https://messengerg2c17.iranlms.ir/").text)['data_enc'])) | 28.752768 | 121 | 0.57681 | 15,242 | 0.978054 | 0 | 0 | 0 | 0 | 0 | 0 | 6,138 | 0.393866 |
2a8ef90a770448ffb4442173cc6d0ca697f44aeb | 3,201 | py | Python | colab_zirc_dims/non_std_cfgs/non_std_cfgs.py | MCSitar/colab-zirc-dims | 1669bbac762e0ee117e27d189313547b8e9ca404 | [
"Apache-2.0"
] | 2 | 2021-11-04T01:15:21.000Z | 2021-11-07T02:27:35.000Z | colab_zirc_dims/non_std_cfgs/non_std_cfgs.py | MCSitar/colab-zirc-dims | 1669bbac762e0ee117e27d189313547b8e9ca404 | [
"Apache-2.0"
] | null | null | null | colab_zirc_dims/non_std_cfgs/non_std_cfgs.py | MCSitar/colab-zirc-dims | 1669bbac762e0ee117e27d189313547b8e9ca404 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Functions to install dependencies for non-standard models (e.g., Centermask2)
and get compatible Detectron2 configs for them.
"""
import sys
import subprocess
try:
from detectron2.config import get_cfg
except ModuleNotFoundError:
print('WARNING: Detectron2 not installed on (virtual?) machine;',
'colab_zirc_dims model loading functions unavailable')
__all__ = ['get_czd_swint_cfg',
'get_czd_centermask2_cfg']
def get_czd_swint_cfg():
"""Install dependencies for swint_detectron2 and/or get a Swin-T Mask RCNN
Detectron2 cfg.
Returns
-------
out_cfg : Detectron2 Config instance
A D2 config for a MaskRCNN model with a Swin-T (see swint_detectron2)
backbone. Lacks usable weights path; this must be added in main
Notebook.
"""
from detectron2.config import get_cfg
try:
import timm.utils as test_timm
except ModuleNotFoundError:
print('Installing module: timm')
try:
subpout = subprocess.run(["pip", "install", "timm"],
capture_output=True, check=True)
print(str(subpout.stdout.decode('UTF-8')))
except subprocess.CalledProcessError as check:
print(check)
try:
import swint
except ModuleNotFoundError:
print('Cloning module: Swint_detectron2')
try:
subpout = subprocess.run(["git", "clone",
"https://github.com/xiaohu2015/SwinT_detectron2",
"swinT_repo"], capture_output=True,
check=True)
print(str(subpout.stdout.decode('UTF-8')))
except subprocess.CalledProcessError as check:
print(check)
sys.path.insert(0, '/content/swinT_repo')
import swint
out_cfg = get_cfg()
swint.add_swint_config(out_cfg)
out_cfg.merge_from_file('/content/swinT_repo/configs/SwinT/mask_rcnn_swint_T_FPN_3x.yaml')
return out_cfg
def get_czd_centermask2_cfg():
"""Clone dependency for Centermask2 and/or get a Centermask2 VoVNet2-backbone
Detectron2 cfg.
Returns
-------
out_cfg : Detectron2 Config instance
A D2 config for a Centermask2 model with VoVNet2 backbone. Lacks usable
weights path; this must be added in main Notebook.
"""
try:
import centermask
except ModuleNotFoundError:
print('Cloning module: Centermask2')
try:
subpout = subprocess.run(["git", "clone",
"https://github.com/youngwanLEE/centermask2.git",
"centermask"], capture_output=True,
check = True)
print(str(subpout.stdout.decode('UTF-8')))
except subprocess.CalledProcessError as check:
print(check)
sys.path.insert(0, '/content/centermask')
import centermask
from centermask.config import get_cfg
out_cfg = get_cfg()
out_cfg.merge_from_file('/content/centermask/configs/centermask/centermask_V_99_eSE_FPN_ms_3x.yaml')
return out_cfg
| 35.566667 | 104 | 0.619494 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,420 | 0.443611 |
2a8f1bffef647e648e98b668cdc27ae2039961fc | 1,870 | py | Python | main_train_brf.py | MahshidJabari/accident-prediction-montreal | f496269cbf6fada2bbff66325328f98d11a60c68 | [
"MIT"
] | 9 | 2019-10-26T20:42:34.000Z | 2021-10-04T14:33:24.000Z | main_train_brf.py | MahshidJabari/accident-prediction-montreal | f496269cbf6fada2bbff66325328f98d11a60c68 | [
"MIT"
] | 5 | 2019-05-16T20:01:39.000Z | 2021-12-13T20:01:57.000Z | main_train_brf.py | MahshidJabari/accident-prediction-montreal | f496269cbf6fada2bbff66325328f98d11a60c68 | [
"MIT"
] | 10 | 2019-04-26T21:26:12.000Z | 2021-06-18T14:27:28.000Z | #!/usr/bin/env python
from preprocess import get_negative_samples, get_positive_samples
from utils import init_spark
from preprocess import get_dataset_df
from pyspark.ml.classification import RandomForestClassifier
from pyspark.ml.tuning import ParamGridBuilder, TrainValidationSplit, \
CrossValidator
from pyspark.ml import Pipeline
from class_weighter import ClassWeighter
from random_forest import get_feature_importances
from export_results import *
result_dir = create_result_dir('brf')
spark = init_spark()
neg_samples = get_negative_samples(spark).sample(0.5)
pos_samples = get_positive_samples(spark)
imbalance_ratio = (neg_samples.count()/pos_samples.count())
train_set, test_set = get_dataset_df(spark, pos_samples, neg_samples)
train_set, test_set = train_set.persist(), test_set.persist()
brf = RandomForestClassifier(labelCol="label",
featuresCol="features",
cacheNodeIds=True,
maxDepth=25,
impurity='entropy',
featureSubsetStrategy='13',
weightCol='weight',
minInstancesPerNode=10,
numTrees=100,
subsamplingRate=1.0,
maxMemoryInMB=256)
cw = ClassWeighter().setClassWeight([1/imbalance_ratio, 1.0])
pipeline = Pipeline().setStages([cw, brf])
model = pipeline.fit(train_set)
predictions = model.transform(test_set).persist()
train_predictions = model.transform(train_set).persist()
write_params(model, result_dir)
write_results(predictions, train_predictions, result_dir)
# Write feature importances
feature_importances = get_feature_importances(model.stages[1])
feature_importances.to_csv(result_dir + '/feature_importances.csv')
| 40.652174 | 71 | 0.683422 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 117 | 0.062567 |
2a904c8d13c0a4e09fe39c713c7e34d6fd595da9 | 1,302 | py | Python | operations/tests/rally/custom_runner/ocss_search_runner.py | irin4eto/open-commerce-search | 79b58b93c2c028ba5bade5ff9228cc63d6f3394b | [
"Apache-2.0"
] | 22 | 2020-04-24T19:13:29.000Z | 2022-02-15T13:09:37.000Z | operations/tests/rally/custom_runner/ocss_search_runner.py | irin4eto/open-commerce-search | 79b58b93c2c028ba5bade5ff9228cc63d6f3394b | [
"Apache-2.0"
] | 7 | 2019-12-06T15:08:16.000Z | 2022-03-31T09:13:52.000Z | operations/tests/rally/custom_runner/ocss_search_runner.py | irin4eto/open-commerce-search | 79b58b93c2c028ba5bade5ff9228cc63d6f3394b | [
"Apache-2.0"
] | 2 | 2021-08-05T13:30:42.000Z | 2022-01-24T22:53:13.000Z | # -*- coding: utf-8 -*-
import json
class OCSSSearchRunner:
"""
This runner should perform searches from ocss search logfile against elastic
"""
# define search data
search_data = []
def initialize(self, params):
# check given parameter
if "index" in params and type(params["index"]) is str:
self.index = params["index"]
else:
raise RuntimeError from None
if "source-file" not in params or type(params["source-file"]) is not str:
print("ERROR no source data file given, or wrong format", end=". ")
raise RuntimeError from None
# load / check search data
if self.search_data is None or len(self.search_data) < 1:
with open(params["source-file"]) as json_file:
for line in json_file:
self.search_data.append(json.loads(line))
async def __call__(self, es, params):
self.initialize(params=params)
# perform search here
search = self.search_data.pop()
search_body = search["query"]
search_response = await es.search(body=search_body, index=self.index)
# get time
return search_response["took"], "ms"
def __repr__(self, *args, **kwargs):
return "ocss-search"
| 31 | 81 | 0.604455 | 1,262 | 0.969278 | 0 | 0 | 0 | 0 | 327 | 0.251152 | 359 | 0.27573 |
2a91308d7165b5bb29f38552b30075037b3d559a | 8,574 | py | Python | Packages/LiveReload/server/PluginAPI.py | kangTaehee/st3 | 34aa17bcdac88b94cc38d37276fdc4983b27c76d | [
"Apache-2.0"
] | 4 | 2018-06-08T23:18:47.000Z | 2020-02-24T06:14:06.000Z | Packages/LiveReload/server/PluginAPI.py | kangTaehee/st3 | 34aa17bcdac88b94cc38d37276fdc4983b27c76d | [
"Apache-2.0"
] | 3 | 2021-05-10T18:59:14.000Z | 2021-09-02T01:50:15.000Z | Packages/LiveReload/server/PluginAPI.py | kangTaehee/st3 | 34aa17bcdac88b94cc38d37276fdc4983b27c76d | [
"Apache-2.0"
] | 2 | 2019-04-10T01:02:42.000Z | 2021-02-05T08:41:38.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
import LiveReload
import json
import sublime
try:
from .Settings import Settings
except ValueError:
from Settings import Settings
def log(msg):
pass
class PluginFactory(type):
"""
Based on example from http://martyalchin.com/2008/jan/10/simple-plugin-framework/
"""
def __init__(
mcs,
name,
bases,
attrs,
):
if not hasattr(mcs, 'plugins'):
mcs.settings = Settings()
mcs.plugins = []
mcs.enabled_plugins = mcs.settings.get('enabled_plugins',
[])
else:
log('LiveReload new plugin: ' + mcs.__name__)
# remove old plug-in
for plugin in mcs.plugins:
if plugin.__name__ == mcs.__name__:
mcs.plugins.remove(plugin)
mcs.plugins.append(mcs)
def togglePlugin(mcs, index):
plugin = mcs.plugins[index]()
if plugin.name in mcs.enabled_plugins:
mcs.enabled_plugins.remove(plugin.name)
sublime.set_timeout(lambda : \
sublime.status_message('"%s" the LiveReload plug-in has been disabled!'
% plugin.title), 100)
plugin.onDisabled()
else:
mcs.enabled_plugins.append(plugin.name)
sublime.set_timeout(lambda : \
sublime.status_message('"%s" the LiveReload plug-in has been enabled!'
% plugin.title), 100)
plugin.onEnabled()
# should only save permanent plug-ins
p_enabled_plugins = []
for p in mcs.enabled_plugins:
try:
if mcs.getPlugin(p).this_session_only is not True:
p_enabled_plugins.append(p)
except Exception:
pass
mcs.settings.set('enabled_plugins', p_enabled_plugins)
def getPlugin(mcs, className):
for p in mcs.plugins:
if p.__name__ == className:
return p() # instance
return False
def listAllDefinedFilters(mcs):
file_types = []
for plugin in mcs.plugins:
if plugin.__name__ in mcs.enabled_plugins:
if not plugin.file_types is '*':
for ext in plugin.file_types.split(','):
file_types.append(ext)
return file_types
def listPlugins(mcs):
plist = []
for plugin in mcs.plugins:
p = []
if plugin.__name__ in mcs.enabled_plugins:
p.append('Disable - ' + str(plugin.title))
else:
if plugin.this_session_only is not True:
p.append('Enable - ' + str(plugin.title))
else:
p.append('Enable - ' + str(plugin.title)
+ ' (this session)')
if plugin.description:
p.append(str(plugin.description) + ' ('
+ str(plugin.file_types) + ')')
plist.append(p)
return plist
def dispatch_OnReceive(mcs, data, origin):
log(data)
for plugin in mcs.plugins:
try:
plugin().onReceive(data, origin)
except Exception as e:
log(e)
try:
_wscallback = LiveReload.API.has_callback(data.path)
if _wscallback:
try:
func = getattr(sys.modules['LiveReload'
].Plugin.getPlugin(_wscallback['mcs'
]), _wscallback['name'], None)
if func:
func(data)
except Exception as e:
log(e)
except Exception:
log('no WS handler')
class PluginClass:
"""
Class for implementing your custom plug-ins, sublime_plugins compatible
Plug-ins implementing this reference should provide the following attributes:
- description (string) describing your plug-in
- title (string) naming your plug-in
- file_types (string) file_types which should trigger refresh for this plug-in
"""
@property
def name(self):
return str(self.__class__).split('.')[1].rstrip("'>")
@property
def isEnabled(self):
return self.name in self.enabled_plugins
def should_run(self, filename=False):
""" Returns True if specified filename is allowed for plug-in, and plug-in itself is enabled """
if self.isEnabled:
all_filters = LiveReload.Plugin.listAllDefinedFilters()
def otherPluginsWithFilter():
for f in all_filters:
if filename.endswith(f):
return False
return True
this_plugin = self.file_types.split(',')
if [f for f in this_plugin if filename.endswith(f)]:
return True
elif self.file_types is '*' and otherPluginsWithFilter():
return True
else:
return False
else:
return False
def addResource(
self,
req_path,
blob,
content_type='text/plain',
):
"""
- (string) req_path; browser path to file you want to serve. Ex: /yourfile.js
- (string/file) buffer; string or file instance to file you want to serve
- (string) content_type; Mime-type of file you want to serve
"""
LiveReload.API.add_static_file(req_path, blob, content_type)
def sendCommand(
self,
command,
settings,
filename=False,
):
"""
- (instance) plug-in; instance
- (string) command; to trigger in livereload.js (refresh, info, or one of the plugins)
- (object) settings; additional data that gets passed to command (should be json parsable)
- (string) original name of file
"""
if self.isEnabled:
if command is 'refresh': # to support new protocol
settings['command'] = 'reload'
try:
if not filename:
filename = settings['path'].strip(' ')
except Exception:
log('Missing path definition')
if self.should_run(filename):
sublime.set_timeout(lambda : \
sublime.status_message('LiveReload refresh from %s'
% self.name), 100)
# if we have defined filter
LiveReload.API.send(json.dumps(settings))
else:
log('Skipping '+ self.name)
def refresh(self, filename, settings=None):
"""
Generic refresh command
- (string) filename; file to refresh (.css, .js, jpg ...)
- (object) settings; how to reload(entire page or just parts)
"""
if not settings:
settings = {
'path': filename,
'apply_js_live': self.settings.get('apply_js_live'),
'apply_css_live': self.settings.get('apply_css_live'),
'apply_images_live': self.settings.get('apply_images_live'
),
}
self.sendCommand('refresh', settings)
def listClients(self):
""" returns list with all connected clients with their req_url and origin"""
return LiveReload.API.list_clients()
def onReceive(self, data, origin):
"""
Event handler which fires when browser plug-ins sends data
- (string) data sent by browser
- (string) origin of data
"""
pass
def onEnabled(self):
""" Runs when plug-in is enabled via menu"""
pass
def onDisabled(self):
""" Runs when plug-in is disabled via menu"""
pass
@property
def this_session_only(self):
""" Should it stay enabled forever or this session only """
return False
@property
def file_types(self):
""" Run plug-in only with this file extensions, defaults to all extensions"""
return '*'
##black magic, python2 vs python3
try:
PluginInterface = PluginFactory('PluginInterface', (object,
PluginClass), {})
except TypeError:
PluginInterface = PluginFactory('PluginInterface', (PluginClass, ),
{})
| 30.29682 | 104 | 0.533357 | 8,072 | 0.941451 | 0 | 0 | 447 | 0.052134 | 0 | 0 | 2,456 | 0.286447 |
2a91414de57b627b89516d25ee369f8bbf7d2897 | 431 | py | Python | team/migrations/0014_auto_20200608_1855.py | Aleccc/gtcrew | 7e6e7024afdbf48ee796cb1f9a86b913e6843dda | [
"MIT"
] | null | null | null | team/migrations/0014_auto_20200608_1855.py | Aleccc/gtcrew | 7e6e7024afdbf48ee796cb1f9a86b913e6843dda | [
"MIT"
] | 21 | 2019-02-14T02:47:34.000Z | 2022-01-23T02:22:54.000Z | team/migrations/0014_auto_20200608_1855.py | Aleccc/gtcrew | 7e6e7024afdbf48ee796cb1f9a86b913e6843dda | [
"MIT"
] | null | null | null | # Generated by Django 3.0.4 on 2020-06-08 22:55
from django.db import migrations
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
('team', '0013_auto_20200608_1824'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='bio',
field=wagtail.core.fields.RichTextField(blank=True, max_length=1500),
),
]
| 21.55 | 81 | 0.62413 | 319 | 0.740139 | 0 | 0 | 0 | 0 | 0 | 0 | 92 | 0.213457 |
2a920b7ccb1c0d280a100c971c21a949a0ed335a | 408 | py | Python | discussions/migrations/0006_channel_is_deleted.py | Wassaf-Shahzad/micromasters | b1340a8c233499b1d8d22872a6bc1fe7f49fd323 | [
"BSD-3-Clause"
] | 32 | 2016-03-25T01:03:13.000Z | 2022-01-15T19:35:42.000Z | discussions/migrations/0006_channel_is_deleted.py | Wassaf-Shahzad/micromasters | b1340a8c233499b1d8d22872a6bc1fe7f49fd323 | [
"BSD-3-Clause"
] | 4,858 | 2016-03-03T13:48:30.000Z | 2022-03-29T22:09:51.000Z | discussions/migrations/0006_channel_is_deleted.py | umarmughal824/micromasters | ea92d3bcea9be4601150fc497302ddacc1161622 | [
"BSD-3-Clause"
] | 20 | 2016-08-18T22:07:44.000Z | 2021-11-15T13:35:35.000Z | # Generated by Django 2.1.5 on 2019-03-22 07:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('discussions', '0005_timestamped_discussions_models'),
]
operations = [
migrations.AddField(
model_name='channel',
name='is_deleted',
field=models.BooleanField(default=False),
),
]
| 21.473684 | 63 | 0.620098 | 315 | 0.772059 | 0 | 0 | 0 | 0 | 0 | 0 | 118 | 0.289216 |
2a9522b6d5b9307922a2f1edce02700ed9d5c7b1 | 8,562 | py | Python | exabel_data_sdk/client/api/search_service.py | burk/python-sdk | 83fb81d09e0d6a407c8907a75bebb895decc7edc | [
"MIT"
] | null | null | null | exabel_data_sdk/client/api/search_service.py | burk/python-sdk | 83fb81d09e0d6a407c8907a75bebb895decc7edc | [
"MIT"
] | null | null | null | exabel_data_sdk/client/api/search_service.py | burk/python-sdk | 83fb81d09e0d6a407c8907a75bebb895decc7edc | [
"MIT"
] | null | null | null | import itertools
from typing import Mapping, Sequence, Tuple, TypeVar, Union
from exabel_data_sdk.client.api.api_client.entity_api_client import EntityApiClient
from exabel_data_sdk.client.api.data_classes.entity import Entity
from exabel_data_sdk.stubs.exabel.api.data.v1.all_pb2 import (
SearchEntitiesRequest,
SearchEntitiesResponse,
SearchTerm,
)
_COMPANY_ENTITY_TYPE = "entityTypes/company"
_SECURITY_ENTITY_TYPE = "entityTypes/security"
_LISTING_ENTITY_TYPE = "entityTypes/listing"
TKey = TypeVar("TKey")
class SearchService:
"""
Service for entity search.
"""
def __init__(self, client: EntityApiClient):
self.client = client
def company_by_isin(self, *isins: str) -> Mapping[str, Entity]:
"""
Look up companies by ISIN (International Securities Identification Number).
The return value is a dict with the input values as keys and with the corresponding Entity
objects as values. ISINs which did not return any results, are not included.
"""
return self._company_by_field("isin", *isins)
def security_by_isin(self, *isins: str) -> Mapping[str, Entity]:
"""
Look up securities by ISIN (International Securities Identification Number).
The return value is a dict with the input values as keys and with the corresponding Entity
objects as values. ISINs which did not return any results, are not included.
"""
return self._security_by_field("isin", *isins)
def company_by_bloomberg_ticker(self, *tickers: str) -> Mapping[str, Entity]:
"""
Look up companies by Bloomberg tickers.
The return value is a dict with the input values as keys and with the corresponding Entity
objects as values. Tickers which did not return any results, are not included.
"""
return self._company_by_field("bloomberg_ticker", *tickers)
def company_by_bloomberg_symbol(self, *symbols: str) -> Mapping[str, Entity]:
"""
Look up companies by Bloomberg symbols.
The return value is a dict with the input values as keys and with the corresponding Entity
objects as values. Symbols which did not return any results, are not included.
"""
return self._company_by_field("bloomberg_symbol", *symbols)
def company_by_figi(self, *symbols: str) -> Mapping[str, Entity]:
"""
Look up companies by FIGI (Financial Instrument Global Identifier).
The return value is a dict with the input values as keys and with the corresponding Entity
objects as values. Symbols which did not return any results, are not included.
"""
return self._company_by_field("figi", *symbols)
def company_by_factset_identifier(self, *identifiers: str) -> Mapping[str, Entity]:
"""
Look up companies by FactSet identifiers.
The return value is a dict with the input values as keys and with the corresponding Entity
objects as values. Identifiers which did not return any results, are not included.
"""
return self._company_by_field("factset_identifier", *identifiers)
def companies_by_text(self, *texts: str) -> Mapping[str, Sequence[Entity]]:
"""
Search for companies based on text search.
The method searches for ISINs, tickers and company names, and if the search term is
sufficiently long, a prefix search is performed.
A maximum of five companies is returned for each search.
The return value is a dict with the input values as keys and with a sequence of Entity
objects as values. Search terms which did not return any results, are not included.
"""
return self._companies_by_field("text", *texts)
def company_by_mic_and_ticker(
self, *mic_and_ticker: Tuple[str, str]
) -> Mapping[Tuple[str, str], Entity]:
"""
Look up companies by MIC (Market Identifier Code) and ticker.
The return value is a dict with the input values as keys and with the corresponding Entity
objects as values. MICs and tickers which did not return any results, are not included.
"""
return self._by_mic_and_ticker(_COMPANY_ENTITY_TYPE, *mic_and_ticker)
def security_by_mic_and_ticker(
self, *mic_and_ticker: Tuple[str, str]
) -> Mapping[Tuple[str, str], Entity]:
"""
Look up securities by MIC (Market Identifier Code) and ticker.
The return value is a dict with the input values as keys and with the corresponding Entity
objects as values. MICs and tickers which did not return any results, are not included.
"""
return self._by_mic_and_ticker(_SECURITY_ENTITY_TYPE, *mic_and_ticker)
def listing_by_mic_and_ticker(
self, *mic_and_ticker: Tuple[str, str]
) -> Mapping[Tuple[str, str], Entity]:
"""
Look up listings by MIC (Market Identifier Code) and ticker.
The return value is a dict with the input values as keys and with the corresponding Entity
objects as values. MICs and tickers which did not return any results, are not included.
"""
return self._by_mic_and_ticker(_LISTING_ENTITY_TYPE, *mic_and_ticker)
def entities_by_terms(
self, entity_type: str, terms: Sequence[Union[SearchTerm, Tuple[str, str]]]
) -> Sequence[SearchEntitiesResponse.SearchResult]:
"""
Look up entities of a given type based on a series of search terms.
The searches that are performed are determined by the input terms. In most cases one search
term defines a single query. The exception to this are the 'MIC' and 'ticker' fields, which
must come in pairs, with 'MIC' immediately before 'ticker'. One such pair is treated as one
search query.
The return value contains one SearchResult for every query.
"""
request = SearchEntitiesRequest(
parent=entity_type,
terms=[
term if isinstance(term, SearchTerm) else SearchTerm(field=term[0], query=term[1])
for term in terms
],
)
response = self.client.search_entities(request)
return response.results
def _company_by_field(self, field: str, *values: str) -> Mapping[str, Entity]:
return self._single_result(self._companies_by_field(field, *values))
def _companies_by_field(self, field: str, *values: str) -> Mapping[str, Sequence[Entity]]:
return self._by_field(_COMPANY_ENTITY_TYPE, field, *values)
def _security_by_field(self, field: str, *values: str) -> Mapping[str, Entity]:
return self._single_result(self._by_field(_SECURITY_ENTITY_TYPE, field, *values))
def _by_mic_and_ticker(
self, entity_type: str, *mic_and_ticker: Tuple[str, str]
) -> Mapping[Tuple[str, str], Entity]:
results = self._by_fields(entity_type, ("mic", "ticker"), *itertools.chain(*mic_and_ticker))
return self._single_result(results) # type: ignore[arg-type]
def _single_result(self, results: Mapping[TKey, Sequence[Entity]]) -> Mapping[TKey, Entity]:
new_results = {}
for key, value in results.items():
assert len(value) == 1
new_results[key] = value[0]
return new_results
def _by_field(
self, entity_type: str, field: str, *values: str
) -> Mapping[str, Sequence[Entity]]:
result: Mapping[Tuple[str, ...], Sequence[Entity]] = self._by_fields(
entity_type, [field], *values
)
return {query[0]: entities for query, entities in result.items()}
def _by_fields(
self, entity_type: str, fields: Sequence[str], *values: str
) -> Mapping[Tuple[str, ...], Sequence[Entity]]:
if not values:
raise ValueError("No search terms provided.")
tuples = []
for field, value in zip(itertools.cycle(fields), values):
tuples.append((field, value))
results = self.entities_by_terms(entity_type, tuples)
to_return = {}
for result in results:
assert len(result.terms) == len(fields)
assert list(fields) == [
term.field for term in result.terms
], f"{fields} != {[term.field for term in result.terms]}"
if result.entities:
to_return[tuple(term.query for term in result.terms)] = [
Entity.from_proto(e) for e in result.entities
]
return to_return
| 42.597015 | 100 | 0.666083 | 8,033 | 0.938215 | 0 | 0 | 0 | 0 | 0 | 0 | 3,709 | 0.433193 |
2a97aa991dbbe490d3f89c2502461d05c3f3477d | 3,808 | py | Python | test/test_CustomLayers.py | tomasheiskanen/pro_gan_pytorch | af44df67a2e771207ed96af4c0948980edd9d3d7 | [
"MIT"
] | 2 | 2020-11-19T14:20:51.000Z | 2020-12-10T10:54:39.000Z | test/test_CustomLayers.py | tomasheiskanen/pro_gan_pytorch | af44df67a2e771207ed96af4c0948980edd9d3d7 | [
"MIT"
] | null | null | null | test/test_CustomLayers.py | tomasheiskanen/pro_gan_pytorch | af44df67a2e771207ed96af4c0948980edd9d3d7 | [
"MIT"
] | 2 | 2020-11-19T11:32:46.000Z | 2020-12-10T23:32:18.000Z | import torch as th
from unittest import TestCase
from pro_gan_pytorch import CustomLayers as cL
device = th.device("cuda" if th.cuda.is_available() else "cpu")
class Test_equalized_conv2d(TestCase):
def setUp(self):
self.conv_block = cL._equalized_conv2d(21, 3, k_size=(3, 3), pad=1)
# print the Equalized conv block
print("\nEqualized conv block:\n%s" % str(self.conv_block))
def test_forward(self):
mock_in = th.randn(32, 21, 16, 16).to(device)
mock_out = self.conv_block(mock_in)
# check output
self.assertEqual(mock_out.shape, (32, 3, 16, 16))
self.assertEqual(th.isnan(mock_out).sum().item(), 0)
self.assertEqual(th.isinf(mock_out).sum().item(), 0)
# check the weight's scale
self.assertAlmostEqual(self.conv_block.weight.data.std(), 1, delta=1e-1)
def tearDown(self):
# delete the computational resources
del self.conv_block
class Test_equalized_deconv2d(TestCase):
def setUp(self):
self.deconv_block = cL._equalized_deconv2d(21, 3, k_size=(3, 3), pad=1)
# print the Equalized conv block
print("\nEqualized conv block:\n%s" % str(self.deconv_block))
def test_forward(self):
mock_in = th.randn(32, 21, 16, 16).to(device)
mock_out = self.deconv_block(mock_in)
# check output
self.assertEqual(mock_out.shape, (32, 3, 16, 16))
self.assertEqual(th.isnan(mock_out).sum().item(), 0)
self.assertEqual(th.isinf(mock_out).sum().item(), 0)
# check the weight's scale
self.assertAlmostEqual(self.deconv_block.weight.data.std(), 1, delta=1e-1)
def tearDown(self):
# delete the computational resources
del self.deconv_block
class Test_equalized_linear(TestCase):
def setUp(self):
self.lin_block = cL._equalized_linear(13, 52)
# print the Equalized conv block
print("\nEqualized linear block:\n%s" % str(self.lin_block))
def test_forward(self):
# test the forward for the first res block
mock_in = th.randn(32, 13).to(device)
mock_out = self.lin_block(mock_in)
# check output
self.assertEqual(mock_out.shape, (32, 52))
self.assertEqual(th.isnan(mock_out).sum().item(), 0)
self.assertEqual(th.isinf(mock_out).sum().item(), 0)
# check the weight's scale
self.assertAlmostEqual(self.lin_block.weight.data.std(), 1, delta=1e-1)
def tearDown(self):
# delete the computational resources
del self.lin_block
class Test_PixelwiseNorm(TestCase):
def setUp(self):
self.normalizer = cL.PixelwiseNorm()
def test_forward(self):
mock_in = th.randn(1, 13, 1, 1).to(device)
mock_out = self.normalizer(mock_in)
# check output
self.assertEqual(mock_out.shape, mock_in.shape)
self.assertEqual(th.isnan(mock_out).sum().item(), 0)
self.assertEqual(th.isinf(mock_out).sum().item(), 0)
# we cannot comment that the norm of the output tensor
# will always be less than the norm of the input tensor
# so no more checking can be done
def tearDown(self):
# delete the computational resources
del self.normalizer
class Test_MinibatchStdDev(TestCase):
def setUp(self):
self.minStdD = cL.MinibatchStdDev()
def test_forward(self):
mock_in = th.randn(1, 13, 16, 16).to(device)
mock_out = self.minStdD(mock_in)
# check output
self.assertEqual(mock_out.shape[1], mock_in.shape[1] + 1)
self.assertEqual(th.isnan(mock_out).sum().item(), 0)
self.assertEqual(th.isinf(mock_out).sum().item(), 0)
def tearDown(self):
# delete the computational resources
del self.minStdD
| 30.464 | 82 | 0.642595 | 3,631 | 0.953519 | 0 | 0 | 0 | 0 | 0 | 0 | 708 | 0.185924 |
aa48f57729bf1af92cafcf77cd0d806243d6e2f7 | 511 | py | Python | onlinecourse/tests.py | vicbolo78/Week-8-Lab | 39e086c2d8a07875218bce461b5ab1ca63b3b931 | [
"Apache-2.0"
] | null | null | null | onlinecourse/tests.py | vicbolo78/Week-8-Lab | 39e086c2d8a07875218bce461b5ab1ca63b3b931 | [
"Apache-2.0"
] | null | null | null | onlinecourse/tests.py | vicbolo78/Week-8-Lab | 39e086c2d8a07875218bce461b5ab1ca63b3b931 | [
"Apache-2.0"
] | null | null | null | import unittest
from django.test import TestCase
class TestQuestion(unittest.TestCasel):
def test_is_get_score(self, selected_ids):
self.assertNotEqual(all_answers = self.choice_set.filter(is_correct=True).count()
self.assertEqual(selected_correct = self.choice_set.filter(is_correct=True, id__in=selected_ids).count()
if all_answers == selected_correct:
return True
else:
return False
if __name__=='__main__':
unittest.main()
| 28.388889 | 112 | 0.688845 | 396 | 0.774951 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.019569 |
aa49195189dacb2ef252f543c4131fc6f7dbb7a4 | 1,482 | py | Python | trnsystor/statement/overwritecheck.py | samuelduchesne/pyTrnsys | f2deb5eb340a2814722eead5f8b6278a945c730d | [
"MIT"
] | 5 | 2021-07-20T16:07:06.000Z | 2022-02-09T07:57:21.000Z | trnsystor/statement/overwritecheck.py | samuelduchesne/pyTrnsys | f2deb5eb340a2814722eead5f8b6278a945c730d | [
"MIT"
] | 50 | 2021-02-12T07:36:55.000Z | 2022-03-21T10:40:47.000Z | trnsystor/statement/overwritecheck.py | samuelduchesne/pyTrnsys | f2deb5eb340a2814722eead5f8b6278a945c730d | [
"MIT"
] | 4 | 2019-06-28T17:56:05.000Z | 2020-02-24T16:33:28.000Z | """OverwriteCheck Statement."""
from trnsystor.statement.statement import Statement
class OverwriteCheck(Statement):
"""OverwriteCheck Statement.
A common error in non standard and user written TRNSYS Type routines is
to reserve too little space in the global output array. By default, each
Type is accorded 20 spots in the global TRNSYS output array. However, there
is no way to prevent the Type from then writing in (for example) the 21st
spot; the entire global output array is always accessible. By activating the
OVERWRITE_CHECK statement, the TRNSYS kernel checks to make sure that each
Type did not write outside its allotted space. As with the NAN_CHECK
statement, OVERWRITE_CHECK is a time consuming process and should only be
used as a debugging tool when a simulation is ending in error.
"""
def __init__(self, n=0):
"""Initialize an OVERWRITE_CHECK object.
Hint:
OVERWRITE_CHECK is a time consuming process and should only be used
as a debugging tool when a simulation is ending in error.
Args:
n (int): Is 0 if the OVERWRITE_CHECK feature is not desired or 1 if
OVERWRITE_CHECK feature is desired.
"""
super().__init__()
self.n = int(n)
self.doc = "The OVERWRITE_CHECK Statement"
def _to_deck(self):
"""Return deck representation of self."""
return "OVERWRITE_CHECK {}".format(self.n)
| 39 | 80 | 0.691633 | 1,394 | 0.940621 | 0 | 0 | 0 | 0 | 0 | 0 | 1,213 | 0.818489 |
aa4ca18a2fac84c67cbd77ed0d5889f5f97ea255 | 3,012 | py | Python | src/python/serif/model/impl/relation_mention/aida_relation_mention_model.py | BBN-E/text-open | c508f6caeaa51a43cdb0bc27d8ed77e5750fdda9 | [
"Apache-2.0"
] | 2 | 2022-03-24T14:37:51.000Z | 2022-03-24T19:56:45.000Z | src/python/serif/model/impl/relation_mention/aida_relation_mention_model.py | BBN-E/text-open | c508f6caeaa51a43cdb0bc27d8ed77e5750fdda9 | [
"Apache-2.0"
] | null | null | null | src/python/serif/model/impl/relation_mention/aida_relation_mention_model.py | BBN-E/text-open | c508f6caeaa51a43cdb0bc27d8ed77e5750fdda9 | [
"Apache-2.0"
] | null | null | null | import os
from serif.model.relation_mention_model import RelationMentionModel
from serif.theory.enumerated_type import Tense, Modality
# Modified from DogFoodFinderRelationMentionModel
class AIDARelationMentionModel(RelationMentionModel):
'''adds TACRED relations to TACRED entities'''
def __init__(self, mapping_file, **kwargs):
super(AIDARelationMentionModel,self).__init__(**kwargs)
self.words2anno = self.load_words2anno_dict(mapping_file)
self.anno_dict = self.load_anno_dict(self.words2anno)
self.external_tag_file = True # to permit the model to accept annotations file as argument
def get_relation_mention_info(self, sentence):
#annotations = self.anno_dict[serif_doc_name][sent_index_in_doc]
annotations = self.anno_dict[sentence.document.docid][sentence.sent_no]
[subj_start, subj_end, subj_type, obj_start, obj_end, obj_type, relation] = annotations
# each TACRED sentence should have exactly two entity mentions created
print(sentence.mention_set)
l_mention = sentence.mention_set[0]
r_mention = sentence.mention_set[1]
tuples = [(relation, l_mention, r_mention, Tense.Unspecified, Modality.Asserted)]
return tuples
def load_words2anno_dict(self, mapping_file):
'''
:param mapping_file: tab-separated file of "doc[.words] doc.annotations" line for each doc to be processed
TACRED relation annotations consist of 7 fields per line: "subj_start","subj_end","subj_type", "obj_start","obj_end","obj_type", "relation"
:return: {"doc":"doc.annotations" for each doc}
'''
words2anno = dict()
with open(mapping_file, 'r') as f:
for l in f.readlines():
words_file = l.strip().split()[0]
tags_file = l.strip().split()[1]
words2anno[os.path.basename(words_file)] = tags_file
return words2anno
def load_anno_dict(self, mapping_dict):
'''
:param mapping_dict: created by self.load_mapping_dict
:return: {train_doc=":[["subj_start","subj_end","subj_type", "obj_start","obj_end","obj_type", "relation"],
["subj_start","subj_end","subj_type", "obj_start","obj_end","obj_type", "relation"],
...]], for each train, dev, test }
'''
anno_dict = dict()
for words_file,anno_file in mapping_dict.items():
anno_sents = self.preprocess_anno_file(anno_file)
anno_dict[os.path.basename(words_file)] = anno_sents
return anno_dict
def preprocess_anno_file(self, anno_file):
'''processes supplementary .annotations file into sents to provide as labelling info to entity indices, types and relations to doc'''
anno_sents = [s.strip().split() for s in open(anno_file).readlines()]
return anno_sents
| 40.702703 | 154 | 0.648406 | 2,824 | 0.937583 | 0 | 0 | 0 | 0 | 0 | 0 | 1,227 | 0.407371 |
aa4ce841fb5c0a7a5017a31ae4183d5366d1cbf9 | 1,209 | py | Python | datamodel_code_generator/model/pydantic/__init__.py | languitar/datamodel-code-generator | ddd909746a66df5c8268d782f3ae24bee636be92 | [
"MIT"
] | null | null | null | datamodel_code_generator/model/pydantic/__init__.py | languitar/datamodel-code-generator | ddd909746a66df5c8268d782f3ae24bee636be92 | [
"MIT"
] | null | null | null | datamodel_code_generator/model/pydantic/__init__.py | languitar/datamodel-code-generator | ddd909746a66df5c8268d782f3ae24bee636be92 | [
"MIT"
] | null | null | null | from pathlib import Path
from typing import List, Optional
from jinja2 import Environment, FileSystemLoader, Template
from pydantic import BaseModel as _BaseModel
from ..base import TEMPLATE_DIR
from .base_model import BaseModel, DataModelField
from .custom_root_type import CustomRootType
from .dataclass import DataClass
def dump_resolve_reference_action(class_names: List[str]) -> str:
return '\n'.join(
f'{class_name}.update_forward_refs()' for class_name in class_names
)
class Config(_BaseModel):
extra: Optional[str] = None
title: Optional[str] = None
allow_population_by_field_name: Optional[bool] = None
# def get_validator_template() -> Template:
# template_file_path: Path = Path('pydantic') / 'one_of_validator.jinja2'
# loader = FileSystemLoader(str(TEMPLATE_DIR / template_file_path.parent))
# environment: Environment = Environment(loader=loader, autoescape=True)
# return environment.get_template(template_file_path.name)
#
#
# VALIDATOR_TEMPLATE: Template = get_validator_template()
__all__ = [
'BaseModel',
'DataModelField',
'CustomRootType',
'DataClass',
'dump_resolve_reference_action',
'VALIDATOR_TEMPLATE',
]
| 28.116279 | 78 | 0.753515 | 147 | 0.121588 | 0 | 0 | 0 | 0 | 0 | 0 | 541 | 0.447477 |
aa4cf8c4cd4ceb51b8a674e955be5c558d98e6e8 | 2,313 | py | Python | tools/test_everything.py | cdleary/bazel_rules_hdl | 3ef172ce63cecf43bb4f3648bfc8cd887030d1a4 | [
"Apache-2.0"
] | null | null | null | tools/test_everything.py | cdleary/bazel_rules_hdl | 3ef172ce63cecf43bb4f3648bfc8cd887030d1a4 | [
"Apache-2.0"
] | null | null | null | tools/test_everything.py | cdleary/bazel_rules_hdl | 3ef172ce63cecf43bb4f3648bfc8cd887030d1a4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''This script builds and tests everything that rules_hdl has.
This is what the CI builds do, and the script also works locally. Run it with
`tools/test_everything.py`.'''
import os
import subprocess
import sys
ALL_TARGETS = [
'//...',
'@at_clifford_icestorm//...',
'@at_clifford_yosys//...',
'@com_github_westes_flex//...',
'@com_github_yosyshq_nextpnr//...',
'@com_github_yosyshq_prjtrellis//...',
'@com_github_yosyshq_prjtrellis_db//...',
'@com_google_skywater_pdk//...',
'@com_google_skywater_pdk_sky130_fd_sc_ms//...',
'@com_google_skywater_pdk_sky130_fd_sc_ls//...',
'@com_google_skywater_pdk_sky130_fd_sc_lp//...',
'@com_google_skywater_pdk_sky130_fd_sc_hvl//...',
'@com_google_skywater_pdk_sky130_fd_sc_hs//...',
'@com_google_skywater_pdk_sky130_fd_sc_hdll//...',
'@com_google_skywater_pdk_sky130_fd_sc_hd//...',
'@com_google_skywater_pdk_sky130_fd_pr//...',
'@com_google_skywater_pdk_sky130_fd_io//...',
'@com_icarus_iverilog//...',
'@com_opencircuitdesign_magic//...',
'@com_opencircuitdesign_netgen//...',
'@edu_berkeley_abc//...',
'@net_sourceforge_ngspice//...',
'@net_zlib//...',
'@org_fftw//...',
'@org_gnu_bison//...',
'@org_gnu_gperf//...',
'@org_gnu_m4//...',
'@org_gnu_readline//...',
'@org_sourceware_bzip2//...',
'@org_sourceware_libffi//...',
'@org_tuxfamily_eigen//...',
'@pybind11//...',
'@tk_tcl//...',
]
for action in ['build', 'test']:
command = ' '.join([
'bazel', action, os.environ.get('EXTRA_BAZEL_ARGS', ''), os.environ.get('EXTRA_%s_BAZEL_ARGS' % action.upper(), '')
] + ALL_TARGETS)
print(command)
return_code = subprocess.call(command, shell=True)
if return_code != 0:
sys.exit(return_code)
| 33.521739 | 119 | 0.695633 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,857 | 0.802853 |
aa4ec0e530ddfb88006f27f4a9ccb49f5c973bdc | 3,039 | py | Python | A069675/A069675_helper.py | sethtroisi/OEIS | 2c10b86d8a8be69aa8020623d4802e3d68772ede | [
"Apache-2.0"
] | 3 | 2019-05-25T23:08:48.000Z | 2021-12-11T03:59:42.000Z | A069675/A069675_helper.py | sethtroisi/OEIS | 2c10b86d8a8be69aa8020623d4802e3d68772ede | [
"Apache-2.0"
] | 1 | 2019-03-07T21:22:52.000Z | 2019-03-07T21:22:52.000Z | A069675/A069675_helper.py | sethtroisi/OEIS | 2c10b86d8a8be69aa8020623d4802e3d68772ede | [
"Apache-2.0"
] | 1 | 2021-04-29T06:35:07.000Z | 2021-04-29T06:35:07.000Z | import math
import gmpy2
# How many you want to find
MAX_COUNT = 500
K_COUNT = 3.7 # d = 1000 yields ~264
#for parallel C++
K_COST = 4.14 * 1e-11 # d = 5000 takes ~400s
K_FILTER_COST = 1.0 * 1e-9 # d = 5000, sieve = 30M takes 10.3s
def optimal_sieve(d, expected_cost):
non_trivial_a_b = d * 23 # removes 2, 3, 5,
expected_after_sieve = non_trivial_a_b
sieve_cost = 0
best_cost = expected_cost + 1.0
prime_pi = 3
current_prime = gmpy2.mpz(5)
while True:
if current_prime < 1e5:
group_size = 1
current_prime = int(gmpy2.next_prime(current_prime))
else:
# do groups of primes at the same time
group_size = int(current_prime / 10000)
current_prime += group_size * math.log(current_prime)
prime_pi += group_size
filter_rate = (1 - (0.99 / current_prime)) ** group_size
expected_after_sieve *= filter_rate
calc_cost = group_size * d * K_FILTER_COST
sieve_cost += calc_cost
filter_ratio = expected_after_sieve / non_trivial_a_b
new_cost = sieve_cost + filter_ratio * expected_cost
if new_cost > best_cost:
break
best_cost = new_cost
return (sieve_cost,
expected_cost * filter_ratio,
int(current_prime),
prime_pi,
int(expected_after_sieve))
def cost_test_d(d):
log_d = d * math.log(10)
# log_a is trivial compared to log_d
log_num = log_d # + log_a
# In theory log_num ^ 2
# In practice log_num ^ 2.3
d_cost = log_num ** 2.3
d_count = 1 / log_num
# 24 a,b pairs are valid
t_cost = 24 * K_COST * d_cost
t_count = 24 * K_COUNT * d_count
return t_cost, t_count
def maybe_M(n):
if n < 1e7:
return n
if n < 1e9:
return "{:.1f}M".format(n / 1e6)
if n < 1e12:
return "{:.1f}B".format(n / 1e9)
return "{:.1f}T".format(n / 1e12)
def maybe_H(n):
if n < 3 * 3600:
return "{:.1f} seconds".format(n)
if n < 2 * 86400:
return "{:.1f} hours".format(n / 3600.0)
if n < 365 * 86400:
return "{:.1f} days".format(n / 86400.0)
return "{:.1f} years".format(n / 86400.0 / 365.0)
expected_count = 170 # count below a googol
expected_cost = 0
last_print_count = 0
# paired with expected_count = 170 this helps with the initial
# not-quite-so normal zone of the function.
d = 100
while expected_count < MAX_COUNT:
mult = 1 if d < 1000 else int(math.sqrt(d))
t_cost, t_count = cost_test_d(d)
expected_cost += mult * t_cost
expected_count += mult * t_count
if int(expected_count) > int(last_print_count):
sieve_cost, post_sieve_cost, sieve_limit, prime_pi, to_check = \
optimal_sieve(d, expected_cost)
sieve_stats = "optimal sieve: PrimePi({}) ~= {}, leaves {} cost ~~{}".format(
maybe_M(sieve_limit), maybe_M(prime_pi),
to_check,
maybe_H(sieve_cost))
print ("expect {:.0f} around 10^{} ({}) cost: ~~{}".format(
expected_count, d, sieve_stats, maybe_H(post_sieve_cost)))
last_print_count = expected_count
d += mult
| 25.537815 | 81 | 0.636064 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 610 | 0.200724 |
aa4ef2d9ea34cac394dfd068462eacae94338cce | 8,849 | py | Python | devilry/devilry_examiner/views/assignment/bulkoperations/bulk_feedback.py | devilry/devilry-django | 9ae28e462dfa4cfee966ebacbca04ade9627e715 | [
"BSD-3-Clause"
] | 29 | 2015-01-18T22:56:23.000Z | 2020-11-10T21:28:27.000Z | devilry/devilry_examiner/views/assignment/bulkoperations/bulk_feedback.py | devilry/devilry-django | 9ae28e462dfa4cfee966ebacbca04ade9627e715 | [
"BSD-3-Clause"
] | 786 | 2015-01-06T16:10:18.000Z | 2022-03-16T11:10:50.000Z | devilry/devilry_examiner/views/assignment/bulkoperations/bulk_feedback.py | devilry/devilry-django | 9ae28e462dfa4cfee966ebacbca04ade9627e715 | [
"BSD-3-Clause"
] | 15 | 2015-04-06T06:18:43.000Z | 2021-02-24T12:28:30.000Z |
from django import forms
from django.contrib import messages
from django.db import models
from django.db import transaction
from django.http import HttpResponseRedirect, Http404
from django.utils import timezone
from django.utils.translation import gettext_lazy, pgettext_lazy
from django.views.generic import View
import django_rq
from devilry.apps.core import models as core_models
from devilry.devilry_comment import models as comment_models
from devilry.devilry_cradmin import devilry_listbuilder
from devilry.devilry_examiner.views.assignment.bulkoperations import bulk_operations_grouplist
from devilry.devilry_group import models as group_models
from devilry.devilry_email.feedback_email import feedback_email
class AssignPointsForm(bulk_operations_grouplist.SelectedAssignmentGroupForm):
"""
Subclassed the select form and adds a ``IntegerField`` for points.
"""
#: Set the amount of points.
points = forms.IntegerField(
min_value=0,
help_text='Add a score that will be given to all selected assignment groups.',
required=True,
label=pgettext_lazy('Points'))
def get_grading_points(self):
return self.cleaned_data['points']
class PointsTargetRenderer(bulk_operations_grouplist.AssignmentGroupTargetRenderer):
def get_field_layout(self):
layout = super(PointsTargetRenderer, self).get_field_layout()
layout.append('points')
return layout
class AssignPassedFailedForm(bulk_operations_grouplist.SelectedAssignmentGroupForm):
"""
Subclassed the select form and adds a ``Boolean`` field to provide a
passed/failed grade.
"""
#: Set delivery as passed or failed.
passed = forms.BooleanField(
label=pgettext_lazy('grading', 'Passed?'),
help_text=pgettext_lazy('grading', 'Check to provide a passing grade.'),
initial=True,
required=False)
def get_grading_points(self):
if self.cleaned_data['passed']:
return self.assignment.max_points
else:
return 0
class PassedFailedTargetRenderer(bulk_operations_grouplist.AssignmentGroupTargetRenderer):
def get_field_layout(self):
layout = super(PassedFailedTargetRenderer, self).get_field_layout()
layout.append('passed')
return layout
class AbstractBulkFeedbackListView(bulk_operations_grouplist.AbstractAssignmentGroupMultiSelectListFilterView):
"""
Base class that handles all the logic of bulk creating feedbacks.
Extend this class with a subclass that uses a form suited for the
:attr:``~.devilry.apps.core.models.Assignment.grading_system_plugin_id``.
Example:
Bulk feedback class points based Assignment::
class BulkFeedbackPassedFailedView(AbstractBulkFeedbackListView):
def get_filterlist_url(self, filters_string):
return self.request.cradmin_app.reverse_appurl(
'bulk-feedback-passedfailed-filter', kwargs={'filters_string': filters_string})
def get_target_renderer_class(self):
return PassedFailedTargetRenderer
def get_form_class(self):
return AssignPassedFailedForm
"""
value_renderer_class = devilry_listbuilder.assignmentgroup.ExaminerMultiselectItemValue
template_name = 'devilry_examiner/assignment/bulk_create_feedback.django.html'
def get_pagetitle(self):
return gettext_lazy('Bulk create feedback')
def get_filterlist_url(self, filters_string):
raise NotImplementedError()
def get_unfiltered_queryset_for_role(self, role):
queryset = super(AbstractBulkFeedbackListView, self).get_unfiltered_queryset_for_role(role)
return queryset\
.filter_examiner_has_access(user=self.request.user) \
.exclude(cached_data__last_published_feedbackset=models.F('cached_data__last_feedbackset'))
def __create_grading_groupcomment(self, feedback_set_id, published_time, text):
"""
Create an entry of :class:`~.devilry.devilry_group.models.GroupComment` as part of grading
for the :class:`~.devilry.devilry_group.models.FeedbackSet` that received feedback.
Args:
feedback_set_id: comment for this feedback.
published_time: Time the comment was published.
text: Text provided by examiner.
"""
group_models.GroupComment.objects.create(
feedback_set_id=feedback_set_id,
part_of_grading=True,
visibility=group_models.GroupComment.VISIBILITY_VISIBLE_TO_EVERYONE,
user=self.request.user,
user_role=comment_models.Comment.USER_ROLE_EXAMINER,
text=text,
comment_type=comment_models.Comment.COMMENT_TYPE_GROUPCOMMENT,
published_datetime=published_time
)
def form_valid(self, form):
"""
Creates entries of :class:`~.devilry.devilry_group.models.GroupComment`s for all the
:class:`~.devilry.devilry_group.models.FeedbackSet`s that is given a bulk feedback.
Note:
Using ``transaction.atomic()`` for single transaction when creating ``GroupComment``s and
updating the ``FeedbackSet``s.
If anything goes wrong, the transaction is rolled back and nothing is saved to the database.
Args:
form: cleaned form.
"""
feedback_set_ids = self.get_feedbackset_ids_from_posted_ids(form=form)
points = form.get_grading_points()
text = form.cleaned_data['feedback_comment_text']
# Cache anonymous display names before transaction. Needed for django messages.
displaynames = self.get_group_displaynames(form=form)
now_without_microseconds = timezone.now().replace(microsecond=0)
with transaction.atomic():
for feedback_set_id in feedback_set_ids:
self.__create_grading_groupcomment(
feedback_set_id=feedback_set_id,
published_time=now_without_microseconds,
text=text)
group_models.FeedbackSet.objects\
.filter(id__in=feedback_set_ids)\
.update(
grading_published_by=self.request.user,
grading_published_datetime=now_without_microseconds + timezone.timedelta(microseconds=1),
grading_points=points)
feedback_email.bulk_send_feedback_created_email(
assignment_id=self.assignment.id,
feedbackset_id_list=feedback_set_ids,
domain_url_start=self.request.build_absolute_uri('/'))
self.add_success_message(displaynames)
return super(AbstractBulkFeedbackListView, self).form_valid(form=form)
def add_success_message(self, anonymous_display_names):
message = gettext_lazy('Bulk added feedback for %(group_names)s') % {
'group_names': ', '.join(anonymous_display_names)}
messages.success(self.request, message=message)
class BulkFeedbackPointsView(AbstractBulkFeedbackListView):
"""
Handles bulkfeedback for assignment with points-based grading system.
"""
def get_filterlist_url(self, filters_string):
return self.request.cradmin_app.reverse_appurl(
'bulk-feedback-points-filter', kwargs={'filters_string': filters_string})
def get_target_renderer_class(self):
return PointsTargetRenderer
def get_form_class(self):
return AssignPointsForm
class BulkFeedbackPassedFailedView(AbstractBulkFeedbackListView):
"""
Handles bulkfeedback for assignment with passed/failed grading system.
"""
def get_filterlist_url(self, filters_string):
return self.request.cradmin_app.reverse_appurl(
'bulk-feedback-passedfailed-filter', kwargs={'filters_string': filters_string})
def get_target_renderer_class(self):
return PassedFailedTargetRenderer
def get_form_class(self):
return AssignPassedFailedForm
class BulkFeedbackRedirectView(View):
"""
Redirects to the appropriate view based on the assignments grading system type.
"""
def dispatch(self, request, *args, **kwargs):
grading_plugin_id = self.request.cradmin_role.grading_system_plugin_id
if grading_plugin_id == core_models.Assignment.GRADING_SYSTEM_PLUGIN_ID_POINTS:
return HttpResponseRedirect(request.cradmin_app.reverse_appurl('bulk-feedback-points'))
grading_plugin_id = self.request.cradmin_role.grading_system_plugin_id
if grading_plugin_id == core_models.Assignment.GRADING_SYSTEM_PLUGIN_ID_PASSEDFAILED:
return HttpResponseRedirect(request.cradmin_app.reverse_appurl('bulk-feedback-passedfailed'))
return Http404()
| 40.591743 | 111 | 0.711154 | 8,104 | 0.91581 | 0 | 0 | 0 | 0 | 0 | 0 | 2,821 | 0.318793 |
aa4fab7f9af5d5e5f5046cccd34fc0d3664a73ba | 3,258 | py | Python | tests/test_util.py | NickVeld/battery_handyman | 9be3f5b2fd5990a266f709853ef87508e47d4b6a | [
"Apache-2.0"
] | 2 | 2021-07-19T11:08:04.000Z | 2021-11-16T19:01:07.000Z | tests/test_util.py | NickVeld/battery_handyman | 9be3f5b2fd5990a266f709853ef87508e47d4b6a | [
"Apache-2.0"
] | null | null | null | tests/test_util.py | NickVeld/battery_handyman | 9be3f5b2fd5990a266f709853ef87508e47d4b6a | [
"Apache-2.0"
] | null | null | null | # Copyright [2021] [Nikolay Veld]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The tests for battery_handyman/util.py"""
import unittest.mock
import psutil
import pytest
import battery_handyman.constants
import battery_handyman.util
def test_get_battery_info():
"""Test that get_battery_info provides the needed data"""
actual_result = battery_handyman.util.get_battery_info()
# pylint: disable=no-member
assert actual_result.is_charging is not None
assert actual_result.left_in_percents is not None
@pytest.mark.parametrize(
"mock_value, expected_charging, expected_left_in_percents", [
pytest.param(
psutil._common.sbattery(
percent=98, secsleft=9898, power_plugged=False
), False, 98,
), pytest.param(
psutil._common.sbattery(
percent=42, secsleft=psutil._common.BatteryTime.POWER_TIME_UNLIMITED,
power_plugged=True
), True, 42
),
]
)
@unittest.mock.patch('psutil.sensors_battery')
# The arg order: mock, from the parametrization, the fixtures
def test_get_battery_info_with_mock(
mock_psutil_sensors_battery, mock_value, expected_charging, expected_left_in_percents
):
"""Test that get_battery_info process the value from the sensors correctly"""
mock_psutil_sensors_battery.return_value = mock_value
actual_result = battery_handyman.util.get_battery_info()
# pylint: disable=no-member
assert actual_result.is_charging == expected_charging
assert actual_result.left_in_percents == expected_left_in_percents
@pytest.mark.parametrize(
"request_template, expected_result", [
pytest.param("/power/{needs_charging}", ["needs_charging"]),
pytest.param("/cm?cmnd=Power%20{needs_charging}", ["needs_charging"]),
]
)
def test_parse_request_data_key_list(request_template, expected_result):
"""Test that parse_request_data_key_list parse the provided template correctly"""
actual_result = battery_handyman.util.parse_request_data_key_list(request_template)
assert actual_result == expected_result
def test_do_not_toogle_charging_exception():
"""Tests that default DoNotToogleChargingException provides the right message"""
try:
raise battery_handyman.util.DoNotToogleChargingException()
except battery_handyman.util.DoNotToogleChargingException as error:
assert str(error) == battery_handyman.constants.MSG_CHARGING_MUST_NOT_TO_BE_TOOGLED
test_message = "Test message"
try:
raise battery_handyman.util.DoNotToogleChargingException(test_message)
except battery_handyman.util.DoNotToogleChargingException as error:
assert str(error) == test_message
| 39.253012 | 93 | 0.741559 | 0 | 0 | 0 | 0 | 1,585 | 0.486495 | 0 | 0 | 1,274 | 0.391037 |
aa517365a385efd5eb9f84b7a97466471cc557a7 | 19,978 | py | Python | tests/http_wrappers/test_response.py | pabarros/asgard-api | 3c10d5f99f584df5e8011558cf42e8b201d567e9 | [
"MIT"
] | 3 | 2020-01-10T02:16:09.000Z | 2020-02-19T18:42:37.000Z | tests/http_wrappers/test_response.py | pabarros/asgard-api | 3c10d5f99f584df5e8011558cf42e8b201d567e9 | [
"MIT"
] | 13 | 2020-01-15T18:22:35.000Z | 2021-03-31T19:21:54.000Z | tests/http_wrappers/test_response.py | rockerbacon/asgard-api | 1c1eb19225ace4bbecb06b65b1b9c4ab131eb24a | [
"MIT"
] | 6 | 2020-03-07T09:49:19.000Z | 2021-07-25T03:14:10.000Z | import json
import unittest
from copy import deepcopy
from http import HTTPStatus
from unittest.mock import call, patch
from flask import Response as FlaskResponse
from marathon import MarathonApp
from marathon.models.group import MarathonGroup
from marathon.models.task import MarathonTask
from asgard.models.account import AccountDB as Account
from hollowman.app import application
from hollowman.http_wrappers import Response
from hollowman.marathon.group import AsgardAppGroup
from hollowman.marathonapp import AsgardApp
from hollowman.models import User
from tests.utils import with_json_fixture
class ResponseTest(unittest.TestCase):
def test_remove_namespace_if_exists(self):
response = Response(None, None)
self.assertEqual("", response._remove_namespace_if_exists("dev", ""))
self.assertEqual("/", response._remove_namespace_if_exists("dev", "/"))
self.assertEqual(
"/", response._remove_namespace_if_exists("dev", "/dev/")
)
self.assertEqual(
"", response._remove_namespace_if_exists("dev", "/dev")
)
self.assertEqual(
"/foo", response._remove_namespace_if_exists("dev", "/dev/foo")
)
self.assertEqual(
"/foo/dev",
response._remove_namespace_if_exists("dev", "/dev/foo/dev"),
)
self.assertEqual(
"/dev", response._remove_namespace_if_exists("dev", "/dev/dev")
)
self.assertEqual(
None, response._remove_namespace_if_exists("dev", None)
)
class SplitTests(unittest.TestCase):
def setUp(self):
self.empty_ok_response = FlaskResponse(
response=b"{}", status=HTTPStatus.OK, headers={}
)
self.user = User(tx_name="User One", tx_email="user@host.com")
self.user.current_account = Account(
name="Dev", namespace="dev", owner="company"
)
@with_json_fixture("single_full_app.json")
def test_a_single_app_response_returns_a_single_marathonapp(self, fixture):
with application.test_request_context(
"/v2/apps//foo", method="GET", data=b""
) as ctx:
flask_response = FlaskResponse(
response=json.dumps({"app": fixture}),
status=HTTPStatus.OK,
headers={},
)
response = Response(ctx.request, flask_response)
with patch.object(response, "marathon_client") as client:
client.get_app.return_value = AsgardApp.from_json(fixture)
apps = list(response.split())
self.assertEqual([call("/foo")], client.get_app.call_args_list)
self.assertEqual(
apps,
[(AsgardApp.from_json(fixture), client.get_app.return_value)],
)
@with_json_fixture("single_full_app.json")
def test_multiapp_response_returns_multiple_marathonapp_instances(
self, fixture
):
modified_app = fixture.copy()
modified_app["id"] = "/xablau"
apps = [fixture, modified_app]
with application.test_request_context(
"/v2/apps/", method="GET", data=b""
) as ctx:
response = FlaskResponse(
response=json.dumps({"apps": apps}),
status=HTTPStatus.OK,
headers={},
)
response = Response(ctx.request, response)
with patch.object(response, "marathon_client") as client:
original_apps = [MarathonApp.from_json(app) for app in apps]
client.get_app.side_effect = original_apps
apps = list(response.split())
self.assertEqual(
apps,
[
(AsgardApp.from_json(fixture), original_apps[0]),
(AsgardApp.from_json(modified_app), original_apps[1]),
],
)
@with_json_fixture("single_full_app.json")
def test_a_response_for_restart_operation_with_appid_in_url_path_does_not_split_response(
self, fixture
):
"""
Quando o response retorna um Deployment, não fazemos split.
"""
with application.test_request_context(
"/v2/apps/xablau/restart", method="PUT", data=b'{"force": true}'
) as ctx:
response = FlaskResponse(
response=b"{}", status=HTTPStatus.OK, headers={}
)
response = Response(ctx.request, response)
apps = list(response.split())
self.assertEqual(0, len(apps))
@with_json_fixture("../fixtures/group_dev_namespace_with_apps.json")
def test_split_groups_read_on_root_group(self, group_dev_namespace_fixture):
with application.test_request_context(
"/v2/groups/", method="GET"
) as ctx:
response = FlaskResponse(
response=json.dumps(group_dev_namespace_fixture),
status=HTTPStatus.OK,
headers={},
)
ctx.request.user = self.user
response = Response(ctx.request, response)
groups_tuple = list(response.split())
self.assertEqual(5, len(groups_tuple))
expected_groups = [
AsgardAppGroup(g)
for g in AsgardAppGroup(
MarathonGroup.from_json(group_dev_namespace_fixture)
).iterate_groups()
]
# Compara com os groups originais
self.assertEqual(expected_groups, [g[1] for g in groups_tuple])
@with_json_fixture("../fixtures/group_dev_namespace_with_apps.json")
def test_split_group_nonroot_empty_group(self, group_dev_namespace_fixture):
with application.test_request_context(
"/v2/groups/group-c", method="GET"
) as ctx:
response = FlaskResponse(
response=json.dumps(group_dev_namespace_fixture["groups"][2]),
status=HTTPStatus.OK,
headers={},
)
ctx.request.user = self.user
response = Response(ctx.request, response)
groups_tuple = list(response.split())
self.assertEqual(1, len(groups_tuple))
expected_groups = [
AsgardAppGroup(g)
for g in AsgardAppGroup(
MarathonGroup.from_json(
group_dev_namespace_fixture["groups"][2]
)
).iterate_groups()
]
# Compara com os groups originais
self.assertEqual(expected_groups, [g[1] for g in groups_tuple])
@unittest.skip("A ser implementado")
def test_split_groups_write_PUT_on_group(self):
self.fail()
@with_json_fixture("../fixtures/group_dev_namespace_with_apps.json")
def test_split_groups_read_on_specific_group(
self, group_dev_namespace_fixture
):
with application.test_request_context(
"/v2/groups/group-b", method="GET"
) as ctx:
response = FlaskResponse(
response=json.dumps(group_dev_namespace_fixture["groups"][1]),
status=HTTPStatus.OK,
headers={},
)
ctx.request.user = self.user
response = Response(ctx.request, response)
groups_tuple = list(response.split())
self.assertEqual(2, len(groups_tuple))
expected_groups = [
AsgardAppGroup(g)
for g in AsgardAppGroup(
MarathonGroup.from_json(
group_dev_namespace_fixture["groups"][1]
)
).iterate_groups()
]
# Compara com os groups originais
self.assertEqual(expected_groups, [g[1] for g in groups_tuple])
@with_json_fixture("../fixtures/tasks/get.json")
def test_split_tasks_GET(self, tasks_get_fixture):
"""
No cado de um GET, o retorno sempre é uma lista de apps.
"""
with application.test_request_context(
"/v2/tasks/", method="GET"
) as ctx:
response = FlaskResponse(
response=json.dumps(tasks_get_fixture), status=HTTPStatus.OK
)
ctx.request.user = self.user
response = Response(ctx.request, response)
tasks_tuple = list(response.split())
self.assertEqual(
[
MarathonTask.from_json(task)
for task in tasks_get_fixture["tasks"]
],
[task[0] for task in tasks_tuple],
)
@with_json_fixture("../fixtures/tasks/get.json")
def test_split_staks_POST_scale_false(self, tasks_get_fixture):
"""
No caso do POST com `?scale=false` o retorno é:
- Lista de apps que foram killed
Por isso usamos a fixture de tasks/get.json aqui
"""
with application.test_request_context(
"/v2/tasks/delete?scale=false", method="POST"
) as ctx:
response = FlaskResponse(
response=json.dumps(tasks_get_fixture), status=HTTPStatus.OK
)
ctx.request.user = self.user
response = Response(ctx.request, response)
tasks_tuple = list(response.split())
self.assertEqual(
[
MarathonTask.from_json(task)
for task in tasks_get_fixture["tasks"]
],
[task[0] for task in tasks_tuple],
)
@with_json_fixture("../fixtures/tasks/post?scale=true.json")
def test_split_staks_POST_scale_true(self, tasks_post_fixture):
"""
No caso do POST com `?scale=true` o retorno é:
- Deployment Id
Isso significa que não faremos split do response
"""
with application.test_request_context(
"/v2/tasks/delete?scale=true", method="POST"
) as ctx:
response = FlaskResponse(
response=json.dumps(tasks_post_fixture), status=HTTPStatus.OK
)
ctx.request.user = self.user
response = Response(ctx.request, response)
tasks_tuple = list(response.split())
self.assertEqual(0, len(tasks_tuple))
@with_json_fixture("../fixtures/queue/get.json")
def test_split_queue_GET(self, queue_get_fixture):
with application.test_request_context("/v2/queue", method="GET") as ctx:
response = FlaskResponse(
response=json.dumps(queue_get_fixture), status=HTTPStatus.OK
)
ctx.request.user = self.user
response = Response(ctx.request, response)
queue_tuples = list(response.split())
self.assertEqual(2, len(queue_tuples))
class JoinTests(unittest.TestCase):
def setUp(self):
self.user = User(tx_name="User One", tx_email="user@host.com")
self.user.current_account = Account(
name="Dev", namespace="dev", owner="company"
)
def test_join_a_uknown_response(self):
"""
Como o repsonse roda para qualquer requiest que retornou 200 no upstream,
muitas vezes pode passar por ele um request que ele "não trata", ou seja,
que ele não tem nada o que fazer.
Esse teste certifica que o join() não quebra em casos como esse
"""
with application.test_request_context(
"/v2/apps/myapp/restart", method="POST"
) as ctx:
response = FlaskResponse(
response=json.dumps({"deploymentId": "myId"}),
status=HTTPStatus.OK,
)
ctx.request.user = self.user
response = Response(ctx.request, response)
joined_response = response.join([])
joined_response_data = json.loads(joined_response.data)
self.assertEqual("myId", joined_response_data["deploymentId"])
@with_json_fixture("single_full_app.json")
def test_it_recreates_a_get_response_for_a_single_app(self, fixture):
with application.test_request_context(
"/v2/apps//foo", method="GET", data=b""
) as ctx:
response = FlaskResponse(
response=json.dumps({"app": fixture}),
status=HTTPStatus.OK,
headers={},
)
response = Response(ctx.request, response)
with patch.object(response, "marathon_client") as client:
client.get_app.return_value = AsgardApp.from_json(deepcopy(fixture))
apps = list(response.split())
joined_response = response.join(apps)
self.assertIsInstance(joined_response, FlaskResponse)
self.assertDictEqual(
json.loads(joined_response.data), {"app": fixture}
)
@with_json_fixture("single_full_app.json")
def test_it_recreates_a_get_response_for_multiple_apps(self, fixture):
modified_app = deepcopy(fixture)
modified_app["id"] = "/xablau"
fixtures = [fixture, modified_app]
expected_response = deepcopy(fixtures)
with application.test_request_context(
"/v2/apps/", method="GET", data=b""
) as ctx:
response = FlaskResponse(
response=json.dumps({"apps": fixtures}),
status=HTTPStatus.OK,
headers={},
)
response = Response(ctx.request, response)
with patch.object(response, "marathon_client") as client:
original_apps = [AsgardApp.from_json(app) for app in fixtures]
client.get_app.side_effect = original_apps
apps = list(response.split())
joined_response = response.join(apps)
self.assertIsInstance(joined_response, FlaskResponse)
self.assertDictEqual(
json.loads(joined_response.data), {"apps": expected_response}
)
@with_json_fixture("single_full_app.json")
def test_should_join_an_empty_list_into_an_empty_response_single_app(
self, single_full_app_fixture
):
with application.test_request_context(
"/v2/apps//foo", method="GET", data=b""
) as ctx:
response = FlaskResponse(
response=json.dumps({"app": single_full_app_fixture}),
status=HTTPStatus.OK,
headers={},
)
response = Response(ctx.request, response)
joined_response = response.join([])
self.assertIsInstance(joined_response, FlaskResponse)
self.assertDictEqual(json.loads(joined_response.data), {"app": {}})
@with_json_fixture("single_full_app.json")
def test_should_join_an_empty_list_into_an_empty_response_multi_app(
self, single_full_app_fixture
):
modified_app = deepcopy(single_full_app_fixture)
modified_app["id"] = "/other-app"
fixtures = [single_full_app_fixture, modified_app]
expected_response = deepcopy(fixtures)
with application.test_request_context(
"/v2/apps/", method="GET", data=b""
) as ctx:
response = FlaskResponse(
response=json.dumps({"apps": fixtures}),
status=HTTPStatus.OK,
headers={},
)
response = Response(ctx.request, response)
joined_response = response.join([])
self.assertIsInstance(joined_response, FlaskResponse)
self.assertDictEqual(json.loads(joined_response.data), {"apps": []})
@with_json_fixture("../fixtures/group_dev_namespace_with_one_full_app.json")
def test_join_groups(self, group_dev_namespace_fixture):
with application.test_request_context(
"/v2/groups/", method="GET"
) as ctx:
response = FlaskResponse(
response=json.dumps(group_dev_namespace_fixture),
status=HTTPStatus.OK,
headers={},
)
ctx.request.user = self.user
response = Response(ctx.request, response)
groups_tuple = list(response.split())
joined_response = response.join(groups_tuple)
joined_response_data = json.loads(joined_response.data)
self.assertEqual("/dev", joined_response_data["id"])
self.assertEqual(
"/dev/group-b", joined_response_data["groups"][0]["id"]
)
self.assertEqual(
[], joined_response_data["dependencies"]
) # Groups should be reendered in full
self.assertEqual(1, len(joined_response_data["groups"][0]["apps"]))
self.assertEqual(
[], joined_response_data["groups"][0]["apps"][0]["constraints"]
) # Apps should also be renderen in full
@with_json_fixture("../fixtures/tasks/get_single_namespace.json")
def test_join_tasks_GET(self, tasks_single_namespace_fixture):
with application.test_request_context(
"/v2/tasks/", method="GET"
) as ctx:
response = FlaskResponse(
response=json.dumps(tasks_single_namespace_fixture),
status=HTTPStatus.OK,
)
ctx.request.user = self.user
response = Response(ctx.request, response)
tasks_tuple = list(response.split())
joined_response = response.join(tasks_tuple)
joined_response_data = json.loads(joined_response.data)
self.assertEqual(3, len(joined_response_data["tasks"]))
def test_join_tasks_empty_list_GET(self):
"""
Se o request for GET e a lista de tasks for vazia, significa que todas as tasks
foram removidas do response, isso significa que temos que retornar um response vazio.
"""
with application.test_request_context(
"/v2/tasks/", method="GET"
) as ctx:
response = FlaskResponse(
response=json.dumps({"tasks": [{"id": "some-filtered-task"}]}),
status=HTTPStatus.OK,
)
ctx.request.user = self.user
response = Response(ctx.request, response)
joined_response = response.join([])
joined_response_data = json.loads(joined_response.data)
self.assertEqual(0, len(joined_response_data["tasks"]))
@with_json_fixture("../fixtures/tasks/post?scale=true.json")
def test_join_tasks_POST_scale_true(self, tasks_post_fixture):
with application.test_request_context(
"/v2/tasks/delete?scale=true", method="POST"
) as ctx:
response = FlaskResponse(
response=json.dumps(tasks_post_fixture), status=HTTPStatus.OK
)
ctx.request.user = self.user
response = Response(ctx.request, response)
tasks_tuple = list(response.split())
joined_response = response.join(tasks_tuple)
joined_response_data = json.loads(joined_response.data)
self.assertEqual(
"5ed4c0c5-9ff8-4a6f-a0cd-f57f59a34b43",
joined_response_data["deploymentId"],
)
@with_json_fixture("../fixtures/tasks/get.json")
def test_join_tasks_POST_scale_false(self, tasks_get_fixture):
with application.test_request_context(
"/v2/tasks/delete?scale=false", method="POST"
) as ctx:
response = FlaskResponse(
response=json.dumps(tasks_get_fixture), status=HTTPStatus.OK
)
ctx.request.user = self.user
response = Response(ctx.request, response)
tasks_tuple = list(response.split())
joined_response = response.join(tasks_tuple)
joined_response_data = json.loads(joined_response.data)
self.assertEqual(3, len(joined_response_data["tasks"]))
| 38.642166 | 93 | 0.598659 | 19,374 | 0.969379 | 0 | 0 | 15,991 | 0.80011 | 0 | 0 | 2,946 | 0.147403 |
aa51ffea60a8f3722501b0b1a0f4252d675d1628 | 1,481 | py | Python | airbyte-integrations/connectors/source-chargebee/unit_tests/conftest.py | onaio/airbyte | 38302e82a25f1b66742c3febfbff0668556920f2 | [
"MIT"
] | 22 | 2020-08-27T00:47:20.000Z | 2020-09-17T15:39:39.000Z | airbyte-integrations/connectors/source-chargebee/unit_tests/conftest.py | onaio/airbyte | 38302e82a25f1b66742c3febfbff0668556920f2 | [
"MIT"
] | 116 | 2020-08-27T01:11:27.000Z | 2020-09-19T02:47:52.000Z | airbyte-integrations/connectors/source-chargebee/unit_tests/conftest.py | onaio/airbyte | 38302e82a25f1b66742c3febfbff0668556920f2 | [
"MIT"
] | 1 | 2020-09-15T06:10:01.000Z | 2020-09-15T06:10:01.000Z | #
# Copyright (c) 2022 Airbyte, Inc., all rights reserved.
#
import json
import os
from pytest import fixture
def load_file(fn):
return open(os.path.join("unit_tests", "responses", fn)).read()
@fixture
def test_config_v1():
return {"site": "airbyte-test", "site_api_key": "site_api_key", "start_date": "2021-05-22T06:57:44Z", "product_catalog": "1.0"}
@fixture
def test_config_v2():
return {"site": "airbyte-test", "site_api_key": "site_api_key", "start_date": "2021-05-22T06:57:44Z", "product_catalog": "2.0"}
@fixture
def addons_response():
return json.loads(load_file("addons.json"))
@fixture
def plans_response():
return json.loads(load_file("plans.json"))
@fixture
def coupons_response():
return json.loads(load_file("coupons.json"))
@fixture
def customers_response():
return json.loads(load_file("customers.json"))
@fixture
def invoices_response():
return json.loads(load_file("invoices.json"))
@fixture
def orders_response():
return json.loads(load_file("orders.json"))
@fixture
def events_response():
return json.loads(load_file("events.json"))
@fixture
def subscriptions_response():
return json.loads(load_file("subscriptions.json"))
@fixture
def items_response():
return json.loads(load_file("items.json"))
@fixture
def item_prices_response():
return json.loads(load_file("item_prices.json"))
@fixture
def attached_items_response():
return json.loads(load_file("attached_items.json"))
| 18.987179 | 131 | 0.717083 | 0 | 0 | 0 | 0 | 1,241 | 0.837947 | 0 | 0 | 456 | 0.3079 |
aa5257c3d2bbc31453ce0957dc8133ceb385c1ff | 3,855 | py | Python | tests/test_rfc8226.py | alvistack/etingof-pyasn1-modules | bdbcc5d9650a8e8382979f089df3307dd4121b49 | [
"BSD-2-Clause"
] | 34 | 2016-04-03T09:10:31.000Z | 2022-02-12T20:38:31.000Z | tests/test_rfc8226.py | alvistack/etingof-pyasn1-modules | bdbcc5d9650a8e8382979f089df3307dd4121b49 | [
"BSD-2-Clause"
] | 138 | 2017-05-31T09:25:10.000Z | 2022-02-07T09:00:19.000Z | tests/test_rfc8226.py | alvistack/etingof-pyasn1-modules | bdbcc5d9650a8e8382979f089df3307dd4121b49 | [
"BSD-2-Clause"
] | 36 | 2016-03-16T00:37:04.000Z | 2021-11-12T12:09:43.000Z | #
# This file is part of pyasn1-modules software.
#
# Created by Russ Housley
# Copyright (c) 2019, Vigil Security, LLC
# License: http://snmplabs.com/pyasn1/license.html
#
import sys
import unittest
from pyasn1.codec.der import decoder as der_decoder
from pyasn1.codec.der import encoder as der_encoder
from pyasn1_modules import pem
from pyasn1_modules import rfc5280
from pyasn1_modules import rfc8226
class JWTClaimConstraintsTestCase(unittest.TestCase):
jwtcc_pem_text = ("MD2gBzAFFgNmb2+hMjAwMBkWA2ZvbzASDARmb28xDARmb28yDARmb2"
"8zMBMWA2JhcjAMDARiYXIxDARiYXIy")
def setUp(self):
self.asn1Spec = rfc8226.JWTClaimConstraints()
def testDerCodec(self):
substrate = pem.readBase64fromText(self.jwtcc_pem_text)
asn1Object, rest = der_decoder.decode(
substrate, asn1Spec=self.asn1Spec)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder.encode(asn1Object))
class TNAuthorizationListTestCase(unittest.TestCase):
tnal_pem_text = ("MCugBxYFYm9ndXOhEjAQFgo1NzE1NTUxMjEyAgIDFKIMFgo3MDM1NTU"
"xMjEy")
def setUp(self):
self.asn1Spec = rfc8226.TNAuthorizationList()
def testDerCodec(self):
substrate = pem.readBase64fromText(self.tnal_pem_text)
asn1Object, rest = der_decoder.decode(
substrate, asn1Spec=self.asn1Spec)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder.encode(asn1Object))
class CertificateOpenTypesTestCase(unittest.TestCase):
cert_pem_text = """\
MIICkTCCAhegAwIBAgIJAKWzVCgbsG4+MAoGCCqGSM49BAMDMD8xCzAJBgNVBAYT
AlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9n
dXMgQ0EwHhcNMTkwNzE4MTUwNzQ5WhcNMjAwNzE3MTUwNzQ5WjBxMQswCQYDVQQG
EwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xKDAmBgNVBAoTH0Zh
a2UgVGVsZXBob25lIFNlcnZpY2UgUHJvdmlkZXIxGTAXBgNVBAMTEGZha2UuZXhh
bXBsZS5jb20wdjAQBgcqhkjOPQIBBgUrgQQAIgNiAARLyLhnsvrS9WBY29tmN2LI
CF/wuX4ohhUy3sxO0ynCplHHojpDg+tghGzusf0aLtMDu1II915O8YK5XVL+KZJD
C82jybxWIKjjzX2qc5/O06joUttdEDzkTaD0kgbcXl6jgawwgakwCwYDVR0PBAQD
AgeAMEIGCWCGSAGG+EIBDQQ1FjNUaGlzIGNlcnRpZmljYXRlIGNhbm5vdCBiZSB0
cnVzdGVkIGZvciBhbnkgcHVycG9zZS4wHQYDVR0OBBYEFHOI3GpDt9dWsTAZxhcj
96uyL2aIMB8GA1UdIwQYMBaAFPI12zQE2qVV8r1pA5mwYuziFQjBMBYGCCsGAQUF
BwEaBAowCKAGFgRmYWtlMAoGCCqGSM49BAMDA2gAMGUCMQCy+qFhT7X1i18jcyIa
Jkgz/tumrPsaBA2RihkooTEr4GbqC650Z4Cwt7+x2xZq37sCMFSM6fRueLyV5StG
yEFWA6G95b/HbtPMTjLpPKtrOjhofc4LyVCDYhFhKzpvHh1qeA==
"""
def setUp(self):
self.asn1Spec = rfc5280.Certificate()
def testDerCodec(self):
substrate = pem.readBase64fromText(self.cert_pem_text)
asn1Object, rest = der_decoder.decode(
substrate, asn1Spec=self.asn1Spec)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder.encode(asn1Object))
extn_list = []
for extn in asn1Object['tbsCertificate']['extensions']:
extn_list.append(extn['extnID'])
if extn['extnID'] in rfc5280.certificateExtensionsMap.keys():
extnValue, rest = der_decoder.decode(
extn['extnValue'],
asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']])
self.assertEqual(
extn['extnValue'], der_encoder.encode(extnValue))
if extn['extnID'] == rfc8226.id_pe_TNAuthList:
self.assertEqual('fake', extnValue[0]['spc'])
self.assertIn(rfc8226.id_pe_TNAuthList, extn_list)
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not result.wasSuccessful())
| 36.714286 | 78 | 0.751751 | 3,234 | 0.838911 | 0 | 0 | 0 | 0 | 0 | 0 | 1,327 | 0.344228 |
aa53b31e51246da4e5cbdd1eaa930f83edb36fc7 | 533 | py | Python | Maior_e_Menor.py | PedroFelipe-G-Arruda/Grupo-python | a43e6575cf3b7c0de22aa3b19995a86ccab37ad2 | [
"MIT"
] | null | null | null | Maior_e_Menor.py | PedroFelipe-G-Arruda/Grupo-python | a43e6575cf3b7c0de22aa3b19995a86ccab37ad2 | [
"MIT"
] | null | null | null | Maior_e_Menor.py | PedroFelipe-G-Arruda/Grupo-python | a43e6575cf3b7c0de22aa3b19995a86ccab37ad2 | [
"MIT"
] | null | null | null | a = float(input('digite um numero:'))
b = float(input('digite um numero:'))
c = float(input('digite um numero:'))
if a > b:
if a > c:
ma = a
if b > c:
mi = c
if c > b:
mi = b
if b > a:
if b > c:
ma = b
if a > c:
mi = c
if c > a:
mi = a
if c > b:
if c > a:
ma = c
if b > a:
mi = a
if a > b:
mi = b
print('O menor numero e : {} e o maior e {}'.format(mi, ma)) | 18.37931 | 60 | 0.348968 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 95 | 0.178236 |
aa580b5cf5cb0ae3973ea18eba36047b23561408 | 699 | py | Python | nosai/numbers.py | kurianbenoy/Reading_like_AI | 47eaa1353c86a8e5b3b8b6a2b3833e08039d47f8 | [
"MIT"
] | null | null | null | nosai/numbers.py | kurianbenoy/Reading_like_AI | 47eaa1353c86a8e5b3b8b6a2b3833e08039d47f8 | [
"MIT"
] | 3 | 2020-11-15T16:14:18.000Z | 2020-11-15T16:16:08.000Z | nosai/numbers.py | kurianbenoy/Reading_like_AI | 47eaa1353c86a8e5b3b8b6a2b3833e08039d47f8 | [
"MIT"
] | null | null | null | """
1999- Nineteen Ninty Nine
1888 - Eighteen Eighty Eight
1777 - Seventeen Seventy Seven
1111 - Oneeen Onety One
Not fully huristics
"""
from num2words import num2words
def spell(N):
if N // 1_0000 ==0:
N = divmod(N, 100)
top, bot = N[0], N[1]
result = [ ]
if top == 11:
result.append("OneTeen")
elif top == 12:
result.append("TwoTeen")
else:
result.append(num2words(top))
botx, boty = divmod(bot, 10)
if botx == 1:
result.append("Onety")
result.append(num2words(boty))
else:
result.append(num2words(bot))
return ' '.join(result)
| 17.923077 | 42 | 0.532189 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 166 | 0.237482 |
aa5994c29326d8c20f2aaf6fae735a3edfc657c4 | 3,711 | py | Python | src/preprocessing.py | kushagrasharma/bilstm-for-seq2f | 6e576cb5bbc3d0a3c2e0a4735f0f1cc95187690f | [
"MIT"
] | null | null | null | src/preprocessing.py | kushagrasharma/bilstm-for-seq2f | 6e576cb5bbc3d0a3c2e0a4735f0f1cc95187690f | [
"MIT"
] | null | null | null | src/preprocessing.py | kushagrasharma/bilstm-for-seq2f | 6e576cb5bbc3d0a3c2e0a4735f0f1cc95187690f | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
function2idx = {"negative": 0, "ferritin": 1, "gpcr": 2, "p450": 3, "protease": 4}
input_dir = '../data/raw/'
data_dir = '../data/processed/'
max_seq_len = 800
def read_and_concat_data():
df_cysteine = pd.read_csv(input_dir + 'uniprot-cysteine+protease+AND+reviewed_yes.tab', sep='\t', skiprows=(0),
header=(0))
df_cysteine.drop(['Entry name', "Status"], axis=1, inplace=True)
df_cysteine.columns = ['id', 'sequence']
df_cysteine['function'] = function2idx['protease']
df_serine = pd.read_csv(input_dir + 'uniprot-serine+protease+AND+reviewed_yes.tab', sep='\t', skiprows=(0),
header=(0))
df_serine.drop(['Entry name', "Status"], axis=1, inplace=True)
df_serine.columns = ['id', 'sequence']
df_serine['function'] = function2idx['protease']
df_gpcr = pd.read_csv(input_dir + 'uniprot-gpcr+AND+reviewed_yes.tab', sep='\t', skiprows=(0), header=(0))
df_gpcr.drop(['Entry name', "Status"], axis=1, inplace=True)
df_gpcr.columns = ['id', 'sequence']
df_gpcr['function'] = function2idx['gpcr']
df_p450 = pd.read_csv(input_dir + 'uniprot-p450+AND+reviewed_yes.tab', sep='\t', skiprows=(0), header=(0))
df_p450.drop(['Entry name', "Status"], axis=1, inplace=True)
df_p450.columns = ['id', 'sequence']
df_p450['function'] = function2idx['p450']
df_f = pd.read_csv(input_dir + 'uniprot-ferritin-filtered-reviewed_yes.tab', sep='\t', skiprows=(0), header=(0))
df_f.drop(['Entry name', "Status"], axis=1, inplace=True)
df_f.columns = ['id', 'sequence']
df_f['function'] = function2idx['ferritin']
df_positive = pd.concat([df_cysteine, df_serine, df_f, df_gpcr, df_p450], ignore_index=True)
duplicates = list(df_positive[df_positive.duplicated('id')].id)
df_uniprot = pd.read_csv(input_dir + 'uniprot-reviewed_yes.tab', sep='\t', skiprows=(0), header=(0))
df_uniprot = df_uniprot.drop(["Entry name", "Status", "Gene names", "Gene ontology (molecular function)",
"Gene ontology IDs", "Gene ontology (cellular component)",
"Gene ontology (biological process)", "Gene ontology (GO)"], axis=1)
df_uniprot['function'] = function2idx['negative']
df_uniprot.columns = ['id', 'sequence', 'function']
df_uniprot[~df_uniprot.id.isin(duplicates)]
df_all = pd.concat([df_uniprot, df_positive], ignore_index=True)
df_all.sort_values(by='function', inplace=True, ascending=False)
df_all = df_all.drop_duplicates(subset='id').reset_index(drop=True)
print("Finished reading raw data and concating")
return df_all
def clean_sequence_length(dataframe):
# Add 800 amino acids from C-terminus for the longest proteins
reverse_rows = []
for index, row in dataframe[dataframe.sequence.apply(len) > max_seq_len].iterrows():
reverse_rows.append([row.id + '_r', row.sequence[::-1], row.function])
reverse_rows = pd.DataFrame(reverse_rows, columns=['id', 'sequence', 'function'])
dataframe = pd.concat([dataframe, reverse_rows], ignore_index=True)
# Cut all sequences to 800 char
dataframe['sequence'] = dataframe.sequence.apply(lambda x: x[:max_seq_len])
dataframe['length'] = dataframe.sequence.apply(len)
dataframe = dataframe.sort_values(by='length').reset_index(drop=True)
print("Finished cleaning sequences by length")
return dataframe
df = read_and_concat_data()
df = clean_sequence_length(df)
np.savetxt(data_dir + 'sequence.txt', df.sequence.values, fmt='%s')
np.savetxt(data_dir + 'function.txt', df.function.values, fmt='%s')
print("Saved sequence and function to txt") | 43.151163 | 116 | 0.668283 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,136 | 0.306117 |
aa599682efaab603093212095b2252a141edf8de | 509 | py | Python | errores.py | facmartoni/python_exercises | 7f05c7491a0eee05e32f04c7f07ddc1ba688b7a2 | [
"Apache-2.0"
] | null | null | null | errores.py | facmartoni/python_exercises | 7f05c7491a0eee05e32f04c7f07ddc1ba688b7a2 | [
"Apache-2.0"
] | null | null | null | errores.py | facmartoni/python_exercises | 7f05c7491a0eee05e32f04c7f07ddc1ba688b7a2 | [
"Apache-2.0"
] | 1 | 2021-10-11T00:25:14.000Z | 2021-10-11T00:25:14.000Z |
def run():
countries = {
'mexico': 122,
'colombia': 49,
'argentina': 43,
'chile': 18,
'peru': 31
}
while True:
try:
country = input(
'¿De que país quieres saber la población?: ').lower()
print(
f'La población de {country} es {countries[country]} millones\n')
except KeyError:
print('No tenemos ese país en la base de datos :(\n')
if __name__ == '__main__':
run()
| 20.36 | 80 | 0.479371 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 210 | 0.40856 |
aa5cac558a4f7f7ce93e05bc22da8c8c1d651763 | 88 | py | Python | Ara_Ara/apps.py | ksdfg/ARA-ARA | 408c5d55f670279c6d92826fdf78d5f26f8b6bcc | [
"MIT"
] | null | null | null | Ara_Ara/apps.py | ksdfg/ARA-ARA | 408c5d55f670279c6d92826fdf78d5f26f8b6bcc | [
"MIT"
] | null | null | null | Ara_Ara/apps.py | ksdfg/ARA-ARA | 408c5d55f670279c6d92826fdf78d5f26f8b6bcc | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class AraAraConfig(AppConfig):
name = "Ara_Ara"
| 14.666667 | 33 | 0.75 | 51 | 0.579545 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | 0.102273 |
aa5fa5ff3ffffa84887da1cb448b70b03ad5cd67 | 45 | py | Python | ahmed-package/__init__.py | gokcelahmed/ahmed-package | a6c34fb2d85105ad33063b840c84f70ff7a0aa4d | [
"MIT"
] | null | null | null | ahmed-package/__init__.py | gokcelahmed/ahmed-package | a6c34fb2d85105ad33063b840c84f70ff7a0aa4d | [
"MIT"
] | null | null | null | ahmed-package/__init__.py | gokcelahmed/ahmed-package | a6c34fb2d85105ad33063b840c84f70ff7a0aa4d | [
"MIT"
] | null | null | null | from ahmed-package.functions import printName | 45 | 45 | 0.888889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
aa60b0079dcb6f5a27a15460a7a773255408abdc | 91 | py | Python | fsdviz/stocking/forms/__init__.py | AdamCottrill/fsdivz | 98dd1f35a08dba26424e2951a40715e01399478c | [
"MIT"
] | null | null | null | fsdviz/stocking/forms/__init__.py | AdamCottrill/fsdivz | 98dd1f35a08dba26424e2951a40715e01399478c | [
"MIT"
] | 6 | 2020-02-12T00:03:40.000Z | 2020-11-30T01:20:56.000Z | fsdviz/stocking/forms/__init__.py | AdamCottrill/fsdviz | 98dd1f35a08dba26424e2951a40715e01399478c | [
"MIT"
] | null | null | null | from .FindEventsForm import *
from .StockingEventForm import *
from .XlsEventForm import *
| 22.75 | 32 | 0.802198 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
aa61678d6fbbb6831ab89015e270d094a885eab5 | 9,034 | py | Python | Chapter 16/Lookup/algorithms.py | codered-by-ec-council/Micro-Degree-in-Python-Security | cb16ed78ee38dad32e3909371edec8ff3ce6e6a7 | [
"MIT"
] | 4 | 2020-09-25T05:57:22.000Z | 2021-02-27T14:56:23.000Z | Chapter 16/Lookup/algorithms.py | codered-by-ec-council/Micro-Degree-in-Python-Security | cb16ed78ee38dad32e3909371edec8ff3ce6e6a7 | [
"MIT"
] | 4 | 2021-06-08T23:01:11.000Z | 2022-03-12T00:54:16.000Z | Chapter 16/Lookup/algorithms.py | codered-by-ec-council/Micro-Degree-in-Python-Security | cb16ed78ee38dad32e3909371edec8ff3ce6e6a7 | [
"MIT"
] | 5 | 2020-10-15T10:22:04.000Z | 2021-11-16T22:17:50.000Z | #!/usr/bin/env python3
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
---------------------------------------------------------------------
The first step in creating a cryptographic hash lookup table.
Creates a file of the following format:
[HASH_PART][WORDLIST_OFFSET][HASH_PART][WORDLIST_OFFSET]...
HASH_PART is the first 64 BITS of the hash, right-padded with zeroes if
necessary. WORDLIST_OFFSET is the position of the first character of the
word in the dictionary encoded as a 48-bit LITTLE ENDIAN integer.
'''
import sys
import hashlib
from binascii import hexlify, unhexlify
try:
import passlib
# from passlib.utils.handlers import MAX_PASSWORD_SIZE
from passlib.hash import nthash, lmhash, mysql41, oracle10, mysql323
from passlib.hash import msdcc, msdcc2, postgres_md5
except ImportError:
err = "\nFailed to import passlib"
sys.stderr.write(err)
sys.stderr.flush()
passlib = None
try:
import whirlpool
except ImportError:
sys.stderr.write("\nFailed to import whirlpool")
sys.stderr.flush()
whirlpool = None
class BaseAlgorithm(object):
'''
Gives us a single interface to passlib and hashlib
'''
_data = None
def __init__(self, data=None):
self.data = data if data is not None else b''
@property
def data(self):
return self._data
@data.setter
def data(self, value):
if isinstance(value, str):
value = value.encode()
if not isinstance(value, bytes):
raise TypeError('Data must be bytes')
self._data = value
def update(self, data):
if isinstance(data, str):
data = data.encode()
if not isinstance(data, bytes):
raise TypeError('Data must be bytes')
self._data += data
def digest(self):
raise NotImplementedError()
def hexdigest(self):
return hexlify(self.digest())
##########################################################
# > HASHLIB
##########################################################
class Md4(BaseAlgorithm):
name = 'Message Digest 4'
key = 'md4'
hex_length = 32
def digest(self):
return hashlib.new('md4', self.data).digest()
class Md5(BaseAlgorithm):
name = 'Message Digest 5'
key = 'md5'
hex_length = 32
def digest(self):
return hashlib.md5(self.data).digest()
class Sha1(BaseAlgorithm):
name = 'Secure Hashing Algorithm 1'
key = 'sha1'
hex_length = 40
def digest(self):
return hashlib.sha1(self.data).digest()
class Sha224(BaseAlgorithm):
name = 'Secure Hashing Algorithm 2 (224 bit)'
key = 'sha2-224'
hex_length = 56
def digest(self):
return hashlib.sha224(self.data).digest()
class Sha256(BaseAlgorithm):
name = 'Secure Hashing Algorithm 2 (256 bit)'
key = 'sha2-256'
hex_length = 64
def digest(self):
return hashlib.sha256(self.data).digest()
class Sha384(BaseAlgorithm):
name = 'Secure Hashing Algorithm 2 (384 bit)'
key = 'sha2-384'
hex_length = 96
def digest(self):
return hashlib.sha384(self.data).digest()
class Sha512(BaseAlgorithm):
name = 'Secure Hashing Algorithm 2 (512 bit)'
key = 'sha2-512'
hex_length = 128
def digest(self):
return hashlib.sha512(self.data).digest()
class Ripemd160(BaseAlgorithm):
name = "RACE Integrity Primitives Evaluation Message Digest (160 bit)"
key = "ripemd160"
hex_length = 40
def digest(self):
md = hashlib.new('ripemd160')
md.update(self._data)
return md.digest()
##########################################################
# > SHA3
##########################################################
class Sha3_224(BaseAlgorithm):
name = 'Secure Hashing Algorithm 3 (224 bit)'
key = 'sha3-224'
hex_length = 56
def digest(self):
return hashlib.sha3_224(self.data).digest()
class Sha3_256(BaseAlgorithm):
name = 'Secure Hashing Algorithm 3 (256 bit)'
key = 'sha3-256'
hex_length = 64
def digest(self):
return hashlib.sha3_256(self.data).digest()
class Sha3_384(BaseAlgorithm):
name = 'Secure Hashing Algorithm 3 (384 bit)'
key = 'sha3-384'
hex_length = 96
def digest(self):
return hashlib.sha3_384(self._data).digest()
class Sha3_512(BaseAlgorithm):
name = 'Secure Hashing Algorithm 3 (512 bit)'
key = 'sha3-512'
hex_length = 128
def digest(self):
return hashlib.sha3_512(self.data).digest()
##########################################################
# > PASSLIB
##########################################################
class Lm(BaseAlgorithm):
name = 'LM'
key = 'lm'
hex_length = 32
def digest(self):
return unhexlify(lmhash.encrypt(self.data[:15]))
class Ntlm(BaseAlgorithm):
name = 'NTLM'
key = 'ntlm'
hex_length = 32
def digest(self):
return unhexlify(nthash.encrypt(self.data[:127]))
class MySql323(BaseAlgorithm):
name = 'MySQL v3.2.3'
key = 'mysql323'
hex_length = 16
def digest(self):
return unhexlify(mysql323.encrypt(self.data[:64]))
class MySql41(BaseAlgorithm):
''' Ignore the preceeding "*" symbol '''
name = 'MySQL v4.1'
key = 'mysql41'
hex_length = 40
def digest(self):
return unhexlify(mysql41.encrypt(self.data[:64])[1:])
class Oracle10(BaseAlgorithm):
'''
Base Oracle 10g algorithm, this algorithm is salted with a username.
Subclasses contain common usernames.
'''
hex_length = 16
_user = ''
def digest(self):
return unhexlify(oracle10.encrypt(self.data[:64], user=self._user))
class Oracle10_Sys(Oracle10):
name = 'Oracle 10g (SYS)'
key = 'oracle10g-sys'
_user = 'SYS'
class Oracle10_System(Oracle10):
name = 'Oracle 10g (SYSTEM)'
key = 'oracle10g-system'
_user = 'SYSTEM'
class PostgresMd5(BaseAlgorithm):
hex_length = 32
_user = ''
def digest(self):
''' Removes the "md5" prefix '''
return unhexlify(postgres_md5.encrypt(self._data[:64], user=self._user)[3:])
class PostgresMd5_Root(PostgresMd5):
name = 'Postgres MD5 (root)'
key = 'postgres_md5-root'
_user = 'root'
class PostgresMd5_Postgres(PostgresMd5):
name = 'Postgres MD5 (postgres)'
key = 'postgres_md5-postgres'
_user = 'postgres'
class PostgresMd5_Admin(PostgresMd5):
name = 'Postgres MD5 (admin)'
key = 'postgres_md5-admin'
_user = 'admin'
class Msdcc_Administrator(BaseAlgorithm):
name = 'MS Domain Cached Credentials'
key = 'msdcc-administrator'
hex_length = 32
_user = "administrator"
def digest(self):
return unhexlify(msdcc.encrypt(self._data[:64], user=self._user))
class Msdcc2_Administrator(BaseAlgorithm):
name = 'MS Domain Cached Credentials v2'
key = 'msdcc2-administrator'
hex_length = 32
_user = "administrator"
def digest(self):
return unhexlify(msdcc2.encrypt(self._data[:64], user=self._user))
##########################################################
# > Whirlpool
##########################################################
class Whirlpool(BaseAlgorithm):
name = "Whirlpool"
key = "whirlpool"
hex_length = 128
def digest(self):
return whirlpool.new(self._data).digest()
# Base algorithms
algorithms = {
Md4.key: Md4,
Md5.key: Md5,
Sha1.key: Sha1,
Sha224.key: Sha224,
Sha256.key: Sha256,
Sha384.key: Sha384,
Sha512.key: Sha512,
Sha3_224.key: Sha3_224,
Sha3_256.key: Sha3_256,
Sha3_384.key: Sha3_384,
Sha3_512.key: Sha3_512
}
if hasattr(hashlib, "algorithms_available"):
if 'ripemd160' in hashlib.algorithms_available:
algorithms[Ripemd160.key] = Ripemd160
if passlib is not None:
algorithms[Lm.key] = Lm
algorithms[Ntlm.key] = Ntlm
algorithms[MySql323.key] = MySql323
algorithms[MySql41.key] = MySql41
algorithms[Oracle10_Sys.key] = Oracle10_Sys
algorithms[Oracle10_System.key] = Oracle10_System
algorithms[Msdcc_Administrator.key] = Msdcc_Administrator
algorithms[Msdcc2_Administrator.key] = Msdcc2_Administrator
algorithms[PostgresMd5_Admin.key] = PostgresMd5_Admin
algorithms[PostgresMd5_Postgres.key] = PostgresMd5_Postgres
algorithms[PostgresMd5_Root.key] = PostgresMd5_Root
if whirlpool is not None:
algorithms[Whirlpool.key] = Whirlpool
| 23.464935 | 84 | 0.627629 | 5,705 | 0.631503 | 0 | 0 | 282 | 0.031215 | 0 | 0 | 3,086 | 0.341598 |
aa61d3193d1f93969b18dcfab958b79720287b12 | 3,643 | py | Python | neurolang/region_solver.py | hndgzkn/NeuroLang | a3178d47f80bc0941440d9bb09e06c2f217b9566 | [
"BSD-3-Clause"
] | 1 | 2021-01-07T02:00:22.000Z | 2021-01-07T02:00:22.000Z | neurolang/region_solver.py | hndgzkn/NeuroLang | a3178d47f80bc0941440d9bb09e06c2f217b9566 | [
"BSD-3-Clause"
] | 207 | 2020-11-04T12:51:10.000Z | 2022-03-30T13:42:26.000Z | neurolang/region_solver.py | hndgzkn/NeuroLang | a3178d47f80bc0941440d9bb09e06c2f217b9566 | [
"BSD-3-Clause"
] | 6 | 2020-11-04T13:59:35.000Z | 2021-03-19T05:28:10.000Z | import typing
import re
from .CD_relations import cardinal_relation, inverse_directions
from .regions import Region, region_union
from .expression_walker import PatternWalker
from .expressions import Constant
REFINE_OVERLAPPING = True
class RegionSolver(PatternWalker[Region]):
type_name = 'Region'
def __new__(cls, *args, **kwargs):
cardinal_operations = {
'inferior_of': 'I', 'superior_of': 'S',
'posterior_of': 'P', 'anterior_of': 'A',
'left_of': 'L', 'right_of': 'R',
'overlapping': 'O'
}
refine_overlapping = kwargs.get(
'refine_overlapping',
REFINE_OVERLAPPING
)
max_tree_depth_level = kwargs.get(
'max_tree_depth_level',
None
)
def build_function(relation, refine_overlapping=False):
def fun(self, x: Region, y: Region) -> bool:
return bool(cardinal_relation(
x, y, relation,
refine_overlapping=refine_overlapping,
stop_at=max_tree_depth_level
))
return fun
def anatomical_direction_function(relation, refine_overlapping=False):
def func(self, x: Region, y: Region) -> bool:
return bool(
cardinal_relation(
x, y, relation,
refine_overlapping=refine_overlapping,
stop_at=max_tree_depth_level
) and not (
cardinal_relation(
x, y, inverse_directions[relation],
refine_overlapping=refine_overlapping,
stop_at=max_tree_depth_level
) or
cardinal_relation(
x, y, cardinal_operations['overlapping'],
refine_overlapping=refine_overlapping,
stop_at=max_tree_depth_level
)
)
)
return func
for key, value in cardinal_operations.items():
setattr(
cls, f'function_{key}',
build_function(value, refine_overlapping=refine_overlapping)
)
anatomical_correct_operations = {
k: cardinal_operations[k] for k in (
'inferior_of', 'superior_of',
'posterior_of', 'anterior_of'
)
}
for key, value in anatomical_correct_operations.items():
setattr(
cls, f'function_anatomical_{key}',
anatomical_direction_function(
value, refine_overlapping=refine_overlapping
)
)
return PatternWalker.__new__(cls)
def function_regexp(
self, regexp: typing.Text
) -> typing.AbstractSet[Region]:
regions = []
for k in self.symbol_table.symbols_by_type(Region):
if re.search(regexp, k.name):
regions.append(k)
return frozenset(regions)
def function_region_union(
self, region_set: typing.AbstractSet[Region]
) -> Region:
new_region_set = []
for region in region_set:
region = self.walk(region)
if not isinstance(region, Constant):
raise ValueError(
"Region union can only be evaluated on resolved regions"
)
new_region_set.append(region.value)
return region_union(new_region_set)
| 31.678261 | 78 | 0.533626 | 3,402 | 0.933846 | 0 | 0 | 0 | 0 | 0 | 0 | 323 | 0.088663 |
aa64678b26839c7ab3d6b1c16d9887e4d904607c | 4,868 | py | Python | More/E00_Publications/Infographics/conditionsHierarchy.py | freder/PageBotExamples | eb4ced53a673b9376e8357afa9ea0795b022b13c | [
"Ruby",
"MIT"
] | 5 | 2020-06-20T22:01:23.000Z | 2021-08-06T04:39:50.000Z | More/E00_Publications/Infographics/conditionsHierarchy.py | freder/PageBotExamples | eb4ced53a673b9376e8357afa9ea0795b022b13c | [
"Ruby",
"MIT"
] | 5 | 2020-05-17T09:32:27.000Z | 2021-03-15T19:45:52.000Z | More/E00_Publications/Infographics/conditionsHierarchy.py | freder/PageBotExamples | eb4ced53a673b9376e8357afa9ea0795b022b13c | [
"Ruby",
"MIT"
] | 2 | 2021-02-25T19:07:45.000Z | 2022-01-09T21:14:06.000Z | # -----------------------------------------------------------------------------
#
# P A G E B O T E X A M P L E S
#
# Copyright (c) 2017 Thom Janssen <https://github.com/thomgb>
# www.pagebot.io
# Licensed under MIT conditions
#
# Supporting DrawBot, www.drawbot.com
# Supporting Flat, xxyxyz.org/flat
# -----------------------------------------------------------------------------
#
# conditionsHierarchy.py
#
import sys, inspect
from pagebot import getContext
from pagebot.toolbox.units import *
from pagebot.toolbox.color import Color, blackColor, blueColor, greenColor
from pagebot.fonttoolbox.objects.font import findFont
from pagebot.conditions import *
context = getContext()
X0 = 100
Y0 = 100
WIDTH = 1600
HEIGHT = 1400
HBOX = 34
WBOX = 170
GAP = 20
HGAP = 60
P = 15
TEXTSIZE = pt(12)
OFFSET = 9
titleFont = findFont('BungeeInline-Regular')
font = findFont('Roboto-Regular')
boldFont = findFont('BungeeOutline-Regular')
def drawClassHierarchy(obj, colorRange, i):
previous = None
y = Y0
x = X0
for c in list(obj.__mro__)[::-1]:
current = c.__name__
#if current == 'object':
# continue
if i >= len(colorRange):
i = 0
drawClass(current, x, y, colorRange[i])
if previous is not None:
drawConnection(current, previous)
previous = current
y += HGAP
i += 1
return i
def drawConnection(current, previous):
if sorted([current, previous]) in connections:
return
pos0 = drawnclasses[current]
p0x, p0y = pos0
pos1 = drawnclasses[previous]
p1x, p1y = pos1
context.stroke(blueColor)
# Determines box entry / exit points.
if p0y > p1y:
#print('%s > %s' % (current, previous))
p0x += WBOX / 2
p1x += WBOX / 2
p1y += HBOX
elif p0y < p1y:
# Never happens?
p0x + WBOX / 2
p1x + WBOX / 2
p0y += HBOX
elif p0y == p1y:
p0y += HBOX / 2
p1y += HBOX / 2
if p1x > p0x:
p0x += WBOX
elif p1x < p0x:
p1x += WBOX
# TODO: draw only once for any location.
context.circle(p0x, p0y, 3)
context.circle(p1x, p1y, 3)
# Curve.
path = context.newPath()
context.moveTo((p0x, p0y))
cp0x = p0x - OFFSET
cp0y = p0y - (p0y - p1y) / 3
context.stroke(greenColor)
#context.fill(None)
#context.circle(cp0x, cp0y, 3)
cp1x = p1x + OFFSET
cp1y = p1y + (p0y - p1y) / 3
context.stroke(greenColor)
#context.fill(None)
#context.circle(cp1x, cp1y, 3)
context.fill(None)
context.stroke((1, 0, 1, 0.5))
context.curveTo((cp0x, cp0y), (cp1x, cp1y), (p1x, p1y))
drawPath(path)
#cp1x = p1x
#path.moveTo((p0x, p0y))
connections.append(sorted([current, previous]))
#print(connections)
def drawClass(name, x, y, color):
if name in drawnclasses:
return
pos = (x, y)
while pos in positions:
px, py = pos
newx = px + GAP + WBOX
if newx >= WIDTH - WBOX - GAP:
newx = X0
py += HGAP# / 2
pos = (newx, py)
context.fill(blackColor)
context.fontSize(TEXTSIZE)
boxx, boxy = pos
textx = boxx + P
texty = boxy + P
context.stroke(None)
#color = Color(0.6, 1, 0.6)
context.fill(color)
context.roundedRect(boxx, boxy, WBOX, HBOX)
context.fill(blackColor)
style = dict(font=font.path, fontSize=TEXTSIZE, textFill=0.1)
bs = context.newString(name, style=style)
context.text(bs, (textx, texty))
drawnclasses[name] = pos
positions.append(pos)
def getColorRange(l):
colorRange = []
for i in range(l):
v = i * 1.0 / l
c = Color(0.7, 0.7, v)
colorRange.append(c)
return colorRange
def drawClasses(inspected):
classes = []
for _, obj in inspected:
if inspect.isclass(obj):
classes.append(obj)
l = len(classes)
colorRange = getColorRange(l)
i = 0
for o in classes:
i = drawClassHierarchy(o, colorRange, i)
context.newPage(pt(WIDTH), pt(HEIGHT))
connections = []
drawnclasses = {}
positions = []
classes = []
classes.extend(inspect.getmembers(sys.modules['pagebot.conditions']))
drawClasses(classes)
context.fill(0)
context.stroke(None)
context.fontSize(42)
msg = 'PageBot Alignment Conditions '
msg1 = 'Object Hierarchy'
style = dict(font=titleFont.path, fontSize=36, textFill=0.5)
boldStyle = dict(font=boldFont.path, fontSize=36, textFill=0)
bs = context.newString(msg, style=style)
bs += context.newString(msg1, style=boldStyle)
context.text(bs, (100, HEIGHT - 100))
context.saveDrawing('_export/conditionObjectHierarchy.png')
context.saveDrawing('_export/conditionObjectHierarchy.pdf')
| 24.462312 | 79 | 0.581553 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 993 | 0.203985 |
aa64bf35cc2af88a116c7474f047c294202fae37 | 1,955 | py | Python | mcarch/model/file.py | Scotsguy/MCArchive | 89847bab722c6782fa53c7b11ee83f1f5b2d9f05 | [
"MIT"
] | null | null | null | mcarch/model/file.py | Scotsguy/MCArchive | 89847bab722c6782fa53c7b11ee83f1f5b2d9f05 | [
"MIT"
] | null | null | null | mcarch/model/file.py | Scotsguy/MCArchive | 89847bab722c6782fa53c7b11ee83f1f5b2d9f05 | [
"MIT"
] | null | null | null | import os
import enum
import hashlib
from urllib.parse import urljoin
from flask import url_for, current_app as app
from mcarch.app import db, get_b2bucket
class StoredFile(db.Model):
"""Represents a file stored in some sort of storage medium."""
__tablename__ = 'stored_file'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), nullable=False)
sha256 = db.Column(db.String(130), nullable=False)
upload_by_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=True)
upload_by = db.relationship('User')
# Path to this file within the B2 bucket. Null if file is not on B2.
b2_path = db.Column(db.String(300), nullable=True)
def b2_download_url(self):
"""Gets the URL to download this file from the archive's B2 bucket."""
if self.b2_path:
return urljoin(app.config['B2_PUBLIC_URL'], self.b2_path)
def gen_b2_path(filename, sha):
"""Generates the path where a file should be stored in B2 based on name and hash."""
return os.path.join(sha, filename)
def sha256_file(path):
BUF_SZ = 65536
h = hashlib.sha256()
with open(path, 'rb') as f:
buf = f.read(BUF_SZ)
while buf:
h.update(buf)
buf = f.read(BUF_SZ)
return h.hexdigest()
def upload_b2_file(path, name, user=None):
"""Uploads a local file to B2, adds it to the DB, and returns the StoredFile.
This adds the StoredFile to the database and does a commit.
@param path: path to the file on disk
@param name: name of the file as it should be in B2
@param user: user to associate the stored file with. Can be None
"""
bucket = get_b2bucket()
fhash = sha256_file(path)
b2path = gen_b2_path(name, fhash)
bucket.upload_local_file(path, b2path)
stored = StoredFile(name=name, sha256=fhash, b2_path=b2path, upload_by=user)
db.session.add(stored)
db.session.commit()
return stored
| 31.031746 | 88 | 0.679284 | 742 | 0.37954 | 0 | 0 | 0 | 0 | 0 | 0 | 649 | 0.331969 |
aa66b44f404f54a5640fb09e472c1518a47d7552 | 5,142 | py | Python | taxi/migrations/0001_initial.py | alifelan/taxi-unico-web | edcbe8a8a9584350452fcea04a83247c5676b8f6 | [
"MIT"
] | 1 | 2021-05-26T09:06:08.000Z | 2021-05-26T09:06:08.000Z | taxi/migrations/0001_initial.py | alifelan/taxi-unico-web | edcbe8a8a9584350452fcea04a83247c5676b8f6 | [
"MIT"
] | 10 | 2019-04-13T00:08:17.000Z | 2019-05-09T10:43:06.000Z | taxi/migrations/0001_initial.py | alifelan/taxi-unico-web | edcbe8a8a9584350452fcea04a83247c5676b8f6 | [
"MIT"
] | null | null | null | # Generated by Django 2.2 on 2019-05-09 06:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='BusTrip',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('first_departure_date', models.DateTimeField()),
('first_arrival_date', models.DateTimeField()),
('second_departure_date', models.DateTimeField(blank=True, null=True)),
('second_arrival_date', models.DateTimeField(blank=True, null=True)),
('round_trip', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='City',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('city', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=50)),
('address', models.CharField(max_length=50)),
('latitude', models.FloatField()),
('longitude', models.FloatField()),
('city', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='locations', to='taxi.City')),
],
),
migrations.CreateModel(
name='State',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('state', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Taxi',
fields=[
('driver_name', models.CharField(max_length=50)),
('email', models.CharField(max_length=50, primary_key=True, serialize=False)),
('password', models.CharField(max_length=50)),
('plate', models.CharField(max_length=15)),
('model', models.CharField(max_length=50)),
('brand', models.CharField(max_length=50)),
('taxi_number', models.IntegerField()),
('city', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='taxis', to='taxi.City')),
],
),
migrations.CreateModel(
name='User',
fields=[
('name', models.CharField(max_length=50)),
('email', models.CharField(max_length=50, primary_key=True, serialize=False)),
('password', models.CharField(max_length=50)),
('card', models.CharField(max_length=16)),
],
),
migrations.CreateModel(
name='TaxiTrip',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('departure_date', models.DateTimeField()),
('arrival_date', models.DateTimeField()),
('price', models.FloatField(blank=True, null=True)),
('taxi_rating', models.PositiveSmallIntegerField(blank=True, null=True)),
('user_rating', models.PositiveSmallIntegerField(blank=True, null=True)),
('distance_meters', models.FloatField()),
('distance_string', models.CharField(max_length=100)),
('time_seconds', models.BigIntegerField()),
('time_string', models.CharField(max_length=100)),
('status', models.CharField(max_length=9)),
('bus_trip', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='taxiTrips', to='taxi.BusTrip')),
('destination', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='taxiTripsD', to='taxi.Location')),
('origin', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='taxiTripsO', to='taxi.Location')),
('taxi', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='trips', to='taxi.Taxi')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='taxiTrips', to='taxi.User')),
],
),
migrations.AddField(
model_name='city',
name='state',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='cities', to='taxi.State'),
),
migrations.AddField(
model_name='bustrip',
name='destination',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='busTripsD', to='taxi.Location'),
),
migrations.AddField(
model_name='bustrip',
name='origin',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='busTripsO', to='taxi.Location'),
),
]
| 46.745455 | 154 | 0.571373 | 5,018 | 0.975885 | 0 | 0 | 0 | 0 | 0 | 0 | 829 | 0.161221 |
aa678b1085b17ea09b4872f0d8cdbfe36718c786 | 1,181 | py | Python | mainapp/migrations/0006_auto_20200308_1745.py | Raistlin11123/inquiry-soso | 380d27219a5855f9e25da90db47f85e46e5e8b5d | [
"MIT"
] | null | null | null | mainapp/migrations/0006_auto_20200308_1745.py | Raistlin11123/inquiry-soso | 380d27219a5855f9e25da90db47f85e46e5e8b5d | [
"MIT"
] | null | null | null | mainapp/migrations/0006_auto_20200308_1745.py | Raistlin11123/inquiry-soso | 380d27219a5855f9e25da90db47f85e46e5e8b5d | [
"MIT"
] | null | null | null | # Generated by Django 2.2 on 2020-03-08 16:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0005_auto_20200308_1735'),
]
operations = [
migrations.AlterField(
model_name='clue',
name='paragraph1',
field=models.TextField(blank=True, null=True, verbose_name='paragraph1'),
),
migrations.AlterField(
model_name='clue',
name='paragraph2',
field=models.TextField(blank=True, null=True, verbose_name='paragraph2'),
),
migrations.AlterField(
model_name='clue',
name='paragraph3',
field=models.TextField(blank=True, null=True, verbose_name='paragraph3'),
),
migrations.AlterField(
model_name='clue',
name='paragraph4',
field=models.TextField(blank=True, null=True, verbose_name='paragraph4'),
),
migrations.AlterField(
model_name='clue',
name='paragraph5',
field=models.TextField(blank=True, null=True, verbose_name='paragraph5'),
),
]
| 30.282051 | 85 | 0.58171 | 1,090 | 0.922947 | 0 | 0 | 0 | 0 | 0 | 0 | 229 | 0.193903 |
aa6888c3b91fb826ed9f4b53bf760fb19c052e84 | 1,362 | py | Python | utils/youtube.py | ramrathi/baritone | 6a57aab99362b740903c0cd52f1f6cbb0e32909b | [
"MIT"
] | 3 | 2020-04-03T15:11:26.000Z | 2020-05-03T06:52:09.000Z | utils/youtube.py | ramrathi/baritone | 6a57aab99362b740903c0cd52f1f6cbb0e32909b | [
"MIT"
] | null | null | null | utils/youtube.py | ramrathi/baritone | 6a57aab99362b740903c0cd52f1f6cbb0e32909b | [
"MIT"
] | null | null | null | import subprocess
import scipy.io.wavfile as wav
import sys
import numpy as np
# import pyaudio
import time
import wave
import os
from pydub import AudioSegment
import pafy
from youtube_transcript_api import YouTubeTranscriptApi
from youtube_dl import YoutubeDL
dirname = os.path.dirname(os.path.abspath(__file__))
sys.path.append(dirname)
def get_youtube_cc(url):
try:
video_ids = [url.split('?v=')[1]]
id = video_ids[0]
captions = str()
cc = (YouTubeTranscriptApi.get_transcripts(video_ids, languages=['de', 'en']))
for line in (cc[0][id]):
captions+=(' '+line['text'])
return (captions,True)
except Exception as e:
return ("Can't fetch from youtube captions",False)
def get_youtube_audio(url):
try:
dirname = os.path.dirname(os.path.dirname(__file__))
video_ids = [url.split('?v=')[1]]
id = video_ids[0]
ydl_opts = {
'format': 'bestaudio/best',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
}],
'outtmpl': dirname+'/temp/%(id)s.%(etx)s',
'quiet': False
}
ydl = YoutubeDL(ydl_opts)
ydl.download(video_ids)
return (id,True)
except Exception as e:
return (e,False)
| 25.698113 | 86 | 0.607195 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 225 | 0.165198 |
aa6a2735bc18f7d76cf04368018e139d17c5dd19 | 1,116 | py | Python | pepdb/cms_pages/migrations/0006_auto_20151024_2019.py | dchaplinsky/pep.org.ua | 8633a65fb657d7f04dbdb12eb8ae705fa6be67e3 | [
"MIT"
] | 7 | 2015-12-21T03:52:46.000Z | 2020-07-24T19:17:23.000Z | pepdb/cms_pages/migrations/0006_auto_20151024_2019.py | dchaplinsky/pep.org.ua | 8633a65fb657d7f04dbdb12eb8ae705fa6be67e3 | [
"MIT"
] | 12 | 2016-03-05T18:11:05.000Z | 2021-06-17T20:20:03.000Z | pepdb/cms_pages/migrations/0006_auto_20151024_2019.py | dchaplinsky/pep.org.ua | 8633a65fb657d7f04dbdb12eb8ae705fa6be67e3 | [
"MIT"
] | 4 | 2016-07-17T20:19:38.000Z | 2021-03-23T12:47:20.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('cms_pages', '0005_auto_20150829_1516'),
]
operations = [
migrations.AddField(
model_name='homepage',
name='body_en',
field=wagtail.wagtailcore.fields.RichTextField(default='', verbose_name='[EN] \u0422\u0435\u043a\u0441\u0442 \u043d\u0430 \u0431\u043b\u0430\u043a\u0438\u0442\u043d\u0456\u0439 \u043f\u0430\u043d\u0435\u043b\u0456'),
),
migrations.AddField(
model_name='homepage',
name='title_en',
field=models.CharField(default='', max_length=255),
),
migrations.AlterField(
model_name='homepage',
name='body',
field=wagtail.wagtailcore.fields.RichTextField(default='', verbose_name='[UA] \u0422\u0435\u043a\u0441\u0442 \u043d\u0430 \u0431\u043b\u0430\u043a\u0438\u0442\u043d\u0456\u0439 \u043f\u0430\u043d\u0435\u043b\u0456'),
),
]
| 36 | 228 | 0.648746 | 973 | 0.871864 | 0 | 0 | 0 | 0 | 0 | 0 | 404 | 0.362007 |
aa6aeec0b7ed691ded9f765e25e1d736dda9bbf4 | 1,255 | py | Python | autorop/leak/puts.py | Tanson/autorop | 0d2fc71cdcc9649a6006aee641a3808f884d7fc4 | [
"MIT"
] | null | null | null | autorop/leak/puts.py | Tanson/autorop | 0d2fc71cdcc9649a6006aee641a3808f884d7fc4 | [
"MIT"
] | null | null | null | autorop/leak/puts.py | Tanson/autorop | 0d2fc71cdcc9649a6006aee641a3808f884d7fc4 | [
"MIT"
] | null | null | null | from autorop import PwnState, arutil
from pwn import ROP
def puts(state: PwnState) -> PwnState:
"""Leak libc addresses using ``puts``.
This function leaks the libc addresses of ``__libc_start_main`` and ``puts``
using ``puts``, placing them in ``state.leaks``.
Arguments:
state: The current ``PwnState`` with the following set
- ``target``: What we want to exploit.
- ``_elf``: pwntools ``ELF`` of ``state.binary_name``.
- ``overwriter``: Function which writes rop chain to the "right place".
- ``vuln_function``: Name of vulnerable function in binary,
which we can return to repeatedly.
Returns:
Mutated ``PwnState``, with the following updated
- ``target``: The instance of target from which we got a successful leak.
Hopefully it can still be interacted with.
- ``leaks``: Updated with ``"symbol": address`` pairs for each
function address of libc that was leaked.
"""
LEAK_FUNCS = ["__libc_start_main", "puts"]
def leaker(rop: ROP, address: int) -> ROP:
arutil.align_call(rop, "puts", [address])
return rop
return arutil.leak_helper(state, leaker, LEAK_FUNCS)
| 35.857143 | 85 | 0.619124 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 961 | 0.765737 |
aa6d58be81bd3ec465617d1b907661af609ecd48 | 2,515 | py | Python | tests/unit/drivers/simple_vlan.py | gsilvis/haas | 70d4b5dec081b803c87f0295e7ae8dc3b2a76ce9 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | tests/unit/drivers/simple_vlan.py | gsilvis/haas | 70d4b5dec081b803c87f0295e7ae8dc3b2a76ce9 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | tests/unit/drivers/simple_vlan.py | gsilvis/haas | 70d4b5dec081b803c87f0295e7ae8dc3b2a76ce9 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright 2013-2014 Massachusetts Open Cloud Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS
# IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""Unit tests for VLAN helper functions"""
from functools import wraps
from haas import model, api
from haas.test_common import *
import pytest
from haas.config import cfg
from haas.drivers.simple_vlan import *
def vlan_test(vlan_list):
"""A decorator for tests of the simple_vlan driver. Pass in a string for
the vlan_list configuration option, which determines which vlans can be
used for networking.
"""
def dec(f):
def config_initialize():
# Use the 'dell' backend for these tests
cfg.add_section('general')
cfg.set('general', 'driver', 'simple_vlan')
cfg.add_section('vlan')
cfg.set('vlan', 'vlans', vlan_list)
cfg.add_section('driver simple_vlan')
cfg.set('driver simple_vlan', 'switch', '{"switch":"null"}')
cfg.add_section('devel')
cfg.set('devel', 'dry_run', 'True')
@wraps(f)
@clear_configuration
def wrapped(self):
config_initialize()
db = newDB()
f(self, db)
releaseDB(db)
return wrapped
return dec
class TestSimpleVLAN:
"""Tests basic operation of Simple VLAN driver"""
@vlan_test('84')
def test_simple_vlan_network_operations(self, db):
api.project_create('anvil-nextgen')
network_create_simple('hammernet', 'anvil-nextgen')
for k in range(97,100):
nodename = 'node-' + str(k)
api.node_register(nodename, 'ipmihost', 'root', 'tapeworm')
api.node_register_nic(nodename, 'eth0', 'DE:AD:BE:EF:20:14')
api.project_connect_node('anvil-nextgen', nodename)
api.port_register(nodename)
api.port_connect_nic(nodename, nodename, 'eth0')
api.project_detach_node('anvil-nextgen', 'node-97')
api.node_connect_network('node-98', 'eth0', 'hammernet')
| 34.452055 | 77 | 0.654076 | 763 | 0.30338 | 0 | 0 | 854 | 0.339563 | 0 | 0 | 1,236 | 0.491451 |
aa6e9617b11941b64bd3afef174d9947aebe3d32 | 403 | py | Python | norm_test.py | szmybs/GOES-R-2017-HurricaneExtraction | 2bacc7e035183d89448835e30947f639cff2776c | [
"MIT"
] | null | null | null | norm_test.py | szmybs/GOES-R-2017-HurricaneExtraction | 2bacc7e035183d89448835e30947f639cff2776c | [
"MIT"
] | null | null | null | norm_test.py | szmybs/GOES-R-2017-HurricaneExtraction | 2bacc7e035183d89448835e30947f639cff2776c | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from extract import HurricaneExtraction
#npy_file = './Data/NpyData/LIDIA/20172450002.npz'
npy_file = './Data/NpyData/IRMA/20172531622.npz'
data = HurricaneExtraction.read_extraction_data(npy_file)
data = HurricaneExtraction.normalize_using_physics(data)
for d in data:
fig = plt.figure()
im = plt.imshow(d, cmap='Greys_r')
plt.show()
| 23.705882 | 57 | 0.759305 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 96 | 0.238213 |
aa7040dd6fc8f362a76b2ed6b5d28ac0731ddd6b | 910 | py | Python | util/get_weather_icons_map.py | multipolygon/esphome-configs | fb574cd6ffadcc6f5a1e1e89da8d4bd0b56845d8 | [
"MIT"
] | null | null | null | util/get_weather_icons_map.py | multipolygon/esphome-configs | fb574cd6ffadcc6f5a1e1e89da8d4bd0b56845d8 | [
"MIT"
] | null | null | null | util/get_weather_icons_map.py | multipolygon/esphome-configs | fb574cd6ffadcc6f5a1e1e89da8d4bd0b56845d8 | [
"MIT"
] | 1 | 2022-03-27T00:18:17.000Z | 2022-03-27T00:18:17.000Z | from urllib.request import urlopen
import json
import re
url = urlopen("https://raw.githubusercontent.com/Templarian/MaterialDesign/master/meta.json")
meta = [(i['name'], i['codepoint']) for i in json.loads(url.read()) if re.search('^weather-', i['name'])]
print('''---
esphome:
# ...
includes:
- weather_icon_map.h
# ...
font:
- file: fonts/materialdesignicons-webfont.ttf
id: ...
size: ...
glyphs:''')
for name, codepoint in meta:
print(' - "\\U000%s" # %s' % (codepoint, name))
with open('weather_icon_map.h', 'w') as h:
h.write('#include <map>\nstd::map<std::string, std::string> weather_icon_map\n')
h.write(' {\n')
for name, codepoint in meta:
h.write(' {"%s", "\\U000%s"},\n' % (name.replace('weather-', ''), codepoint))
h.write(' };\n')
print('---')
for name, codepoint in meta:
print(' "%s",' % (name.replace('weather-', '')))
| 26 | 105 | 0.591209 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 476 | 0.523077 |
aa706634207ec5cdf904d41b11723983513e464f | 3,913 | py | Python | network_anomaly/KDDCup99-LSTM/binary/lskerastest.py | kidrabit/Data-Visualization-Lab-RND | baa19ee4e9f3422a052794e50791495632290b36 | [
"Apache-2.0"
] | 1 | 2022-01-18T01:53:34.000Z | 2022-01-18T01:53:34.000Z | network_anomaly/UNSW-NB15-LSTM/binary/lskerastest.py | kidrabit/Data-Visualization-Lab-RND | baa19ee4e9f3422a052794e50791495632290b36 | [
"Apache-2.0"
] | null | null | null | network_anomaly/UNSW-NB15-LSTM/binary/lskerastest.py | kidrabit/Data-Visualization-Lab-RND | baa19ee4e9f3422a052794e50791495632290b36 | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
from sklearn.cross_validation import train_test_split
import pandas as pd
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.preprocessing import sequence
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Embedding
from keras.layers import LSTM, SimpleRNN, GRU
from keras.datasets import imdb
from keras.utils.np_utils import to_categorical
from sklearn.metrics import (precision_score, recall_score,f1_score, accuracy_score,mean_squared_error,mean_absolute_error)
from sklearn import metrics
from sklearn.preprocessing import Normalizer
import h5py
from keras import callbacks
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, CSVLogger
testdata = pd.read_csv('kdd/binary/kddtest.csv', header=None)
C = testdata.iloc[:,0]
T = testdata.iloc[:,1:42]
scaler = Normalizer().fit(T)
testT = scaler.transform(T)
# summarize transformed data
np.set_printoptions(precision=3)
#print(testT[0:5,:])
y_test = np.array(C)
# reshape input to be [samples, time steps, features]
X_train = np.reshape(testT, (testT.shape[0], 1, testT.shape[1]))
batch_size = 32
# 1. define the network
model = Sequential()
model.add(LSTM(4,input_dim=41)) # try using a GRU instead, for fun
model.add(Dropout(0.1))
model.add(Dense(1))
model.add(Activation('sigmoid'))
# try using different optimizers and different optimizer configs
model.load_weights("kddresults/lstm1layer/checkpoint-51.hdf5")
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
y_train1 = y_test
y_pred = model.predict_classes(X_train)
accuracy = accuracy_score(y_train1, y_pred)
recall = recall_score(y_train1, y_pred , average="binary")
precision = precision_score(y_train1, y_pred , average="binary")
f1 = f1_score(y_train1, y_pred, average="binary")
print("confusion matrix")
print("----------------------------------------------")
print("accuracy")
print("%.3f" %accuracy)
print("racall")
print("%.3f" %recall)
print("precision")
print("%.3f" %precision)
print("f1score")
print("%.3f" %f1)
cm = metrics.confusion_matrix(y_train1, y_pred)
print("==============================================")
print("==============================================")
print(cm)
tp = cm[0][0]
fp = cm[0][1]
tn = cm[1][1]
fn = cm[1][0]
print("tp")
print(tp)
print("fp")
print(fp)
print("tn")
print(tn)
print("fn")
print(fn)
print("tpr")
tpr = float(tp)/(tp+fn)
print("fpr")
fpr = float(fp)/(fp+tn)
print("LSTM acc")
print(tpr)
print(fpr)
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
loss, accuracy = model.evaluate(X_train, y_train1)
print("\nLoss: %.2f, Accuracy: %.2f%%" % (loss, accuracy*100))
t_probs = model.predict_proba(X_train)
print(t_probs)
np.savetxt('prob.txt', t_probs)
print(t_probs.shape)
'''
# try using different optimizers and different optimizer configs
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
checkpointer = callbacks.ModelCheckpoint(filepath="kddresults/lstm1layer/checkpoint-{epoch:02d}.hdf5", verbose=1, save_best_only=True, monitor='val_acc',mode='max')
csv_logger = CSVLogger('training_set_iranalysis.csv',separator=',', append=False)
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=1000, validation_data=(X_test, y_test),callbacks=[checkpointer,csv_logger])
model.save("kddresults/lstm1layer/fullmodel/lstm1layer_model.hdf5")
loss, accuracy = model.evaluate(X_test, y_test)
print("\nLoss: %.2f, Accuracy: %.2f%%" % (loss, accuracy*100))
y_pred = model.predict_classes(X_test)
np.savetxt('kddresults/lstm1layer/lstm1predicted.txt', y_pred, fmt='%01d')
'''
'''
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
loss, accuracy = model.evaluate(X_train, y_train1)
print("\nLoss: %.2f, Accuracy: %.2f%%" % (loss, accuracy*100))
'''
| 29.201493 | 164 | 0.729108 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,748 | 0.446716 |
aa707bae2c10ff673808c3e9ddea8f24a1136fa7 | 4,463 | py | Python | train.py | MannyKayy/PlayableVideoGeneration | 14133f94e14a40fc17f283e5b01c168bc45990b6 | [
"MIT"
] | 1 | 2021-06-19T11:34:29.000Z | 2021-06-19T11:34:29.000Z | train.py | MannyKayy/PlayableVideoGeneration | 14133f94e14a40fc17f283e5b01c168bc45990b6 | [
"MIT"
] | null | null | null | train.py | MannyKayy/PlayableVideoGeneration | 14133f94e14a40fc17f283e5b01c168bc45990b6 | [
"MIT"
] | null | null | null | import argparse
import importlib
import os
import torch
import torch.nn as nn
import torchvision
import numpy as np
from dataset.dataset_splitter import DatasetSplitter
from dataset.transforms import TransformsGenerator
from dataset.video_dataset import VideoDataset
from evaluation.action_sampler import OneHotActionSampler, GroundTruthActionSampler
from evaluation.evaluator import Evaluator
from training.trainer import Trainer
from utils.configuration import Configuration
from utils.logger import Logger
torch.backends.cudnn.benchmark = True
if __name__ == "__main__":
# Loads configuration file
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, required=True)
arguments = parser.parse_args()
config_path = arguments.config
configuration = Configuration(config_path)
configuration.check_config()
configuration.create_directory_structure()
config = configuration.get_config()
logger = Logger(config)
search_name = config["model"]["architecture"]
model = getattr(importlib.import_module(search_name), 'model')(config)
model.cuda()
datasets = {}
dataset_splits = DatasetSplitter.generate_splits(config)
transformations = TransformsGenerator.get_final_transforms(config)
for key in dataset_splits:
path, batching_config, split = dataset_splits[key]
transform = transformations[key]
datasets[key] = VideoDataset(path, batching_config, transform, split)
# Creates trainer and evaluator
trainer = getattr(importlib.import_module(config["training"]["trainer"]), 'trainer')(config, model, datasets["train"], logger)
# Evaluators will be assigned their specific action samplers to implement the evaluation strategy
evaluator_inferred_actions = getattr(importlib.import_module(config["evaluation"]["evaluator"]), 'evaluator')(config, datasets["validation"], logger, action_sampler=None, logger_prefix="validation_inferred_actions")
evaluator_inferred_actions_onehot = getattr(importlib.import_module(config["evaluation"]["evaluator"]), 'evaluator')(config, datasets["validation"], logger, action_sampler=OneHotActionSampler(), logger_prefix="validation_inferred_actions_onehot")
evaluator_ground_truth_actions = getattr(importlib.import_module(config["evaluation"]["evaluator"]), 'evaluator')(config, datasets["validation"], logger, action_sampler=None, logger_prefix="validation_gt_actions")
# Resume training
try:
trainer.load_checkpoint(model)
except Exception as e:
logger.print(e)
logger.print("- Warning: training without loading saved checkpoint")
model = nn.DataParallel(model)
model.cuda()
logger.get_wandb().watch(model, log='all')
last_save_step = 0
last_eval_step = 0
# Makes the model parallel and train
while trainer.global_step < config["training"]["max_steps"]:
model.train()
trainer.train_epoch(model)
# Saves the model
trainer.save_checkpoint(model)
if trainer.global_step > last_save_step + config["training"]["save_freq"]:
trainer.save_checkpoint(model, f"checkpoint_{trainer.global_step}")
last_save_step = trainer.global_step
model.eval()
# Evaluates the model
if trainer.global_step > last_eval_step + config["evaluation"]["eval_freq"]:
# Evaluates with actions predicted from the model
evaluator_inferred_actions.evaluate(model, trainer.global_step)
# Evaluates with actions predicted from the model in one hot version
# Disabled to improve evaluation time
#evaluator_inferred_actions_onehot.evaluate(model, trainer.global_step)
if config["data"]["ground_truth_available"]:
# Evaluates with ground truth actions translated to the model action space
# Uses the mapping between inferred and ground truth actions to configure the
# ground truth action space -> model action space translation function
action_mapping = evaluator_inferred_actions.get_best_action_mappings()
ground_truth_action_sampler = GroundTruthActionSampler(action_mapping)
evaluator_ground_truth_actions.set_action_sampler(ground_truth_action_sampler)
evaluator_ground_truth_actions.evaluate(model, trainer.global_step)
last_eval_step = trainer.global_step
| 40.572727 | 250 | 0.734484 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,190 | 0.266637 |
aa70ac474ebd39afb7c876d6f6c1c99f03f6a7de | 2,545 | py | Python | src/gripit/edgelib/maxlinedev.py | yor1001/GripIt | a06b300df56473f692cbb9154d60525d35137ee3 | [
"MIT"
] | null | null | null | src/gripit/edgelib/maxlinedev.py | yor1001/GripIt | a06b300df56473f692cbb9154d60525d35137ee3 | [
"MIT"
] | null | null | null | src/gripit/edgelib/maxlinedev.py | yor1001/GripIt | a06b300df56473f692cbb9154d60525d35137ee3 | [
"MIT"
] | null | null | null | from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
# MAXLINEDEV - Finds max deviation from a line in an edge contour.
#
# Function finds the point of maximum deviation from a line joining the
# endpoints of an edge contour.
#
# Usage: maxlinedev(x, y)
#
#
# Arguments:
# x, y - lists of x,y (row, col) indicies of connected pixels
# on the contour.
# Returns:
# maxdev = Maximum deviation of contour point from the line
# joining the end points of the contour (pixels).
#
# May 2016 - Original version
from future import standard_library
standard_library.install_aliases()
import math
import numpy as np
def maxlinedev(x, y):
# Number of points is the size of the input array.
num_pts = len(x)
# Check whether array has enough points to form a contour.
if num_pts == 1:
# print "error: contour of length 1."
maxdev = 0
index_max = 1
dist_contour = 1
return maxdev, index_max
elif num_pts == 0:
print("error: contour of length 0.")
return
# Bounds of the array.
num_pts -= 1
# Find the endpoint distance using the distance formula.
endpt_dist = math.sqrt(np.power(x[0] - x[num_pts], 2) + np.power(y[0] - y[num_pts], 2))
# If there's a meaningful distance we can proceed.
eps = np.finfo(float).eps
if endpt_dist > eps:
# Eqn of line joining end pts (x1 y1) and (x2 y2) can be parameterised by
#
# x*(y1-y2) + y*(x2-x1) + y2*x1 - y1*x2 = 0
#
# (See Jain, Rangachar and Schunck, "Machine Vision", McGraw-Hill
# 1996. pp 194-196)
# Compute the parameters.
y1my2 = y[0] - y[num_pts]
x2mx1 = x[num_pts] - x[0]
contour = y[num_pts] * x[0] - y[0] * x[num_pts]
dist_contour = abs(x * y1my2 + y * x2mx1 + contour) / endpt_dist
else:
# Endpoint distances are coincident (they're the same point),
# so calculate distances from first point.
dist_contour = np.sqrt(np.power(x - x[0], 2) + np.power(y - y[0], 2))
# Set endpoint distance to 1 so that normalized error can be used. ???
endpt_dist = 1
# Find which index the maxdev occurs so that the line can be stopped there.
index_max = np.argmax(dist_contour)
maxdev = np.amax(dist_contour)
# Return a tuple/list.
# Unpack it by doing: maxdev, index = foo()
return (maxdev, index_max)
| 31.419753 | 91 | 0.630648 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,339 | 0.52613 |
aa70d111c58a251e0d85de6d7587ec453c5870c3 | 997 | py | Python | common.py | tim-moody/mdwiki-cacher | a9aaa386c77c34b8e39f0b5a8a070e6ceff21560 | [
"CC0-1.0"
] | null | null | null | common.py | tim-moody/mdwiki-cacher | a9aaa386c77c34b8e39f0b5a8a070e6ceff21560 | [
"CC0-1.0"
] | null | null | null | common.py | tim-moody/mdwiki-cacher | a9aaa386c77c34b8e39f0b5a8a070e6ceff21560 | [
"CC0-1.0"
] | null | null | null | # common functions
import sys
import json
# taken from sp_lib
def read_json_file(file_path):
try:
with open(file_path, 'r') as json_file:
readstr = json_file.read()
json_dict = json.loads(readstr)
return json_dict
except OSError as e:
print('Unable to read url json file', e)
raise
def write_json_file(src_dict, target_file, sort_keys=False):
try:
with open(target_file, 'w', encoding='utf8') as json_file:
json.dump(src_dict, json_file, ensure_ascii=False, indent=2, sort_keys=sort_keys)
json_file.write("\n") # Add newline cause Py JSON does not
except OSError as e:
raise
def write_list(data, file):
with open(file, 'w') as f:
for d in data:
f.write(d + '\n')
def read_file(file_path, mode='rt'):
try:
with open(file_path, mode) as f:
return f.read()
except OSError as e:
print('Unable to read file', e)
raise | 28.485714 | 93 | 0.609829 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 151 | 0.151454 |
aa7171ed1f15768b83ea9a034a29465b15d2d581 | 1,039 | py | Python | integration-test/291-483-suppress-historical-closed.py | roman-ianivskyy/vector-datasource | 3d59c0d9856d6bc2a78c4a9273b4e850c2e41d92 | [
"MIT"
] | null | null | null | integration-test/291-483-suppress-historical-closed.py | roman-ianivskyy/vector-datasource | 3d59c0d9856d6bc2a78c4a9273b4e850c2e41d92 | [
"MIT"
] | null | null | null | integration-test/291-483-suppress-historical-closed.py | roman-ianivskyy/vector-datasource | 3d59c0d9856d6bc2a78c4a9273b4e850c2e41d92 | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
import dsl
from shapely.wkt import loads as wkt_loads
from . import FixtureTest
class SuppressHistoricalClosed(FixtureTest):
def test_cartoon_museum(self):
# Cartoon Art Museum (closed)
self.generate_fixtures(dsl.way(368173967, wkt_loads('POINT (-122.400856246311 37.78696485494709)'), {u'name': u'Cartoon Art Museum (closed)', u'gnis:reviewed': u'no', u'addr:state': u'CA', u'ele': u'7',
u'source': u'openstreetmap.org', u'wikidata': u'Q1045990', u'gnis:import_uuid': u'57871b70-0100-4405-bb30-88b2e001a944', u'gnis:feature_id': u'1657282', u'tourism': u'museum', u'gnis:county_name': u'San Francisco'}))
# POI shouldn't be visible early
self.assert_no_matching_feature(
15, 5242, 12664, 'pois',
{'id': 368173967})
# but POI should be present at z17 and marked as closed
self.assert_has_feature(
16, 10485, 25328, 'pois',
{'id': 368173967, 'kind': 'closed', 'min_zoom': 17})
| 43.291667 | 247 | 0.636189 | 929 | 0.894129 | 0 | 0 | 0 | 0 | 0 | 0 | 507 | 0.487969 |
aa718898352cee72928a11efe02c9cc6a37ed12e | 2,454 | py | Python | dots_testsuite.py | nguyent2/Practice-with-Modules | a075447ceb00b2e2b6812775b2b8c069511c53e2 | [
"MIT"
] | null | null | null | dots_testsuite.py | nguyent2/Practice-with-Modules | a075447ceb00b2e2b6812775b2b8c069511c53e2 | [
"MIT"
] | null | null | null | dots_testsuite.py | nguyent2/Practice-with-Modules | a075447ceb00b2e2b6812775b2b8c069511c53e2 | [
"MIT"
] | null | null | null | /Learn more or give us feedback
######################################################################
# Author: Thy H. Nguyen
# TODO: Change this to your names
# Username: nguyent2
# TODO: Change this to your usernames
#
# Assignment: A06: It's in your Genes
#
# Purpose: A test suite for testing the a06_genes.py program
#
######################################################################
# Acknowledgements:
# Original Author: Dr. Jan Pearce
#
# Idea from: http://www.cs.uni.edu/~schafer/1140/assignments/pa9/index.htm
#
# licensed under a Creative Commons
# Attribution-Noncommercial-Share Alike 3.0 United States License.
####################################################################################
import sys
from t12_dots import *
# You may get red squiggly lines from PyCharm; it'll be okay though.
# To remove them, right click the folder where this file is located, and select
# "Mark directory as" and then "Sources Root"
def testit(did_pass):
"""
Print the result of a unit test.
:param did_pass: a boolean representing the test
:return: None
"""
# This function works correctly--it is verbatim from the text
linenum = sys._getframe(1).f_lineno # Get the caller's line number.
if did_pass:
msg = "Test at line {0} ok.".format(linenum)
else:
msg = ("Test at line {0} FAILED.".format(linenum))
print(msg)
def dots_test_suite():
"""
The dots_test_suite() is designed to test the following:
calculate_size(num_dots)
is_valid_size(dot_width, dot_height, distance, screen_width, screen_height)
:return: None
"""
# The following tests test the calculate_size() function
print("\nTesting calculate_size")
testit(calculate_size(25) == (5,5))
testit(calculate_size(36) == (6,6))
testit(calculate_size(49) == (7,7))
testit(calculate_size(64)==(8,8))
#testit(calculate_size("hat") == "Error")
# The following tests test the is_valid_size() function
print("\n is_valid_size")
testit(is_valid_size(45,100,20,1100,650)== False)
testit(is_valid_size(5, 7, 20, 1100, 650) == True)
testit(is_valid_size(3, 3, 20, 1100, 650) == True)
testit(is_valid_size(3,3,44,1100,650)==True)
dots_test_suite()
# Notice the lack of a main? This is because this is a test suite; it's not intended to be run as a main() program.
# However, to run the test suite, you run this file. It should just work.
| 32.72 | 115 | 0.624287 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,708 | 0.696007 |
aa718ac8fc1ac872735a24111d8686444d3abf4f | 1,023 | py | Python | comparison_order_butter_cheby.py | ivanpauno/filter_design_examples | 1665db877251a4f9794e048cd099081af57d737b | [
"Apache-2.0"
] | null | null | null | comparison_order_butter_cheby.py | ivanpauno/filter_design_examples | 1665db877251a4f9794e048cd099081af57d737b | [
"Apache-2.0"
] | null | null | null | comparison_order_butter_cheby.py | ivanpauno/filter_design_examples | 1665db877251a4f9794e048cd099081af57d737b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 13 11:03:51 2019
@author: ivanpauno
"""
import matplotlib.pyplot as plt
import numpy as np
def main():
# A = sqrt(10^(.1*alpha_min-1)/10^(.1*alpha_max-1))
A = np.logspace(np.log10(2), np.log10(100), num=200)
ws_array = [1.1, 1.5, 2, 3]
n_butter = [np.log(A)/np.log(ws) for ws in ws_array]
n_cheby = [np.arccosh(A)/np.arccosh(ws) for ws in ws_array]
# Para verlo redondeado, descomentar las dos lineas siguientes.
n_butter = np.ceil(n_butter)
n_cheby = np.ceil(n_cheby)
for i in range(len(n_butter)):
fig, ax = plt.subplots()
ax.ticklabel_format(useOffset=False)
ax.set_xlabel('A')
ax.set_ylabel('n')
ax.grid(True)
ax.plot(A, n_butter[i], 'k')
ax.plot(A, n_cheby[i], 'r')
title = 'Order comparison ws={}'.format(ws_array[i])
fig.suptitle(title)
fig.canvas.set_window_title(title)
plt.show()
if __name__ == '__main__':
main()
| 27.648649 | 67 | 0.607038 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 268 | 0.261975 |
aa71d3a9bb29857114e644eeaaa3f518e6a6ce22 | 1,090 | py | Python | zipline/pipeline/common.py | lv-cha/zipline-chinese | 65928d98db46568409add0a9a404bc41729d0bb5 | [
"Apache-2.0"
] | 606 | 2017-04-07T03:49:37.000Z | 2022-03-21T06:56:14.000Z | zipline/pipeline/common.py | pm58/zipline-chinese | 86904cac4b6e928271f640910aa83675ce945b8b | [
"Apache-2.0"
] | 6 | 2017-10-04T06:03:14.000Z | 2020-06-05T06:49:49.000Z | zipline/pipeline/common.py | pm58/zipline-chinese | 86904cac4b6e928271f640910aa83675ce945b8b | [
"Apache-2.0"
] | 251 | 2017-04-07T00:55:17.000Z | 2022-03-31T05:33:14.000Z | """
Common constants for Pipeline.
"""
AD_FIELD_NAME = 'asof_date'
ANNOUNCEMENT_FIELD_NAME = 'announcement_date'
CASH_FIELD_NAME = 'cash'
CASH_AMOUNT_FIELD_NAME = 'cash_amount'
BUYBACK_ANNOUNCEMENT_FIELD_NAME = 'buyback_date'
DAYS_SINCE_PREV = 'days_since_prev'
DAYS_SINCE_PREV_DIVIDEND_ANNOUNCEMENT = 'days_since_prev_dividend_announcement'
DAYS_SINCE_PREV_EX_DATE = 'days_since_prev_ex_date'
DAYS_TO_NEXT = 'days_to_next'
DAYS_TO_NEXT_EX_DATE = 'days_to_next_ex_date'
EX_DATE_FIELD_NAME = 'ex_date'
NEXT_AMOUNT = 'next_amount'
NEXT_ANNOUNCEMENT = 'next_announcement'
NEXT_EX_DATE = 'next_ex_date'
NEXT_PAY_DATE = 'next_pay_date'
PAY_DATE_FIELD_NAME = 'pay_date'
PREVIOUS_AMOUNT = 'previous_amount'
PREVIOUS_ANNOUNCEMENT = 'previous_announcement'
PREVIOUS_BUYBACK_ANNOUNCEMENT = 'previous_buyback_announcement'
PREVIOUS_BUYBACK_CASH = 'previous_buyback_cash'
PREVIOUS_BUYBACK_SHARE_COUNT = 'previous_buyback_share_count'
PREVIOUS_EX_DATE = 'previous_ex_date'
PREVIOUS_PAY_DATE = 'previous_pay_date'
SHARE_COUNT_FIELD_NAME = 'share_count'
SID_FIELD_NAME = 'sid'
TS_FIELD_NAME = 'timestamp'
| 36.333333 | 79 | 0.844954 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 488 | 0.447706 |
aa73ceee10ce4e7d09848f6bc69f8d1e2190fd5c | 2,220 | py | Python | akaocr/utils/runtime.py | qai-research/Efficient_Text_Detection | e5cfe51148cc4fbf4c4f3afede040e4ebd624e8b | [
"MIT"
] | 2 | 2021-04-28T04:13:09.000Z | 2021-06-05T04:11:11.000Z | akaocr/utils/runtime.py | qai-research/Efficient_Text_Detection | e5cfe51148cc4fbf4c4f3afede040e4ebd624e8b | [
"MIT"
] | 2 | 2021-05-06T13:49:52.000Z | 2021-05-14T08:45:13.000Z | akaocr/utils/runtime.py | qai-research/Efficient_Text_Detection | e5cfe51148cc4fbf4c4f3afede040e4ebd624e8b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
_____________________________________________________________________________
Created By : Nguyen Viet Bac - Bacnv6
Created Date: Mon November 03 10:00:00 VNT 2020
Project : AkaOCR core
_____________________________________________________________________________
This file contain runtime utilities
_____________________________________________________________________________
"""
import sys
import signal
import torch, time, gc
from contextlib import contextmanager
class Color: # pylint: disable=W0232
GRAY = 30
RED = 31
GREEN = 32
YELLOW = 33
BLUE = 34
MAGENTA = 35
CYAN = 36
WHITE = 37
CRIMSON = 38
def colorize(num, string, bold=False, highlight=False):
assert isinstance(num, int)
attr = []
if highlight: num += 10
attr.append(str(num))
if bold: attr.append('1')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)
def colorprint(colorcode, text, o=sys.stdout, bold=False):
o.write(colorize(colorcode, text, bold=bold))
def warn(msg):
print(colorize(Color.YELLOW, msg))
def error(msg):
print(colorize(Color.RED, msg))
# http://stackoverflow.com/questions/366682/how-to-limit-execution-time-of-a-function-call-in-python
class TimeoutException(Exception): pass
@contextmanager
def time_limit(seconds):
def signal_handler(signum, frame):
raise TimeoutException(colorize(Color.RED, " *** Timed out!", highlight=True))
signal.signal(signal.SIGALRM, signal_handler)
signal.alarm(seconds)
try:
yield
finally:
signal.alarm(0)
# Timing utilities
start_time = None
def start_timer():
global start_time
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.synchronize()
start_time = time.time()
def end_timer_and_print(local_msg):
torch.cuda.synchronize()
end_time = time.time()
print("\n" + local_msg)
print("Total execution time = {:.3f} sec".format(end_time - start_time))
print("Max memory used by tensors = {} bytes".format(torch.cuda.max_memory_allocated())) | 25.813953 | 101 | 0.682432 | 220 | 0.099099 | 298 | 0.134234 | 315 | 0.141892 | 0 | 0 | 710 | 0.31982 |
aa7564f0bdcd97403180bc4f639bc31610a2dc78 | 456 | py | Python | sample_problems/problems_with_solution23.py | adi01trip01/adi_workspace | f493b3ba84645eec3a57607243760a826880d1a3 | [
"MIT"
] | null | null | null | sample_problems/problems_with_solution23.py | adi01trip01/adi_workspace | f493b3ba84645eec3a57607243760a826880d1a3 | [
"MIT"
] | null | null | null | sample_problems/problems_with_solution23.py | adi01trip01/adi_workspace | f493b3ba84645eec3a57607243760a826880d1a3 | [
"MIT"
] | null | null | null | # Write a Python program to get the n (non-negative integer) copies of the first 2 characters of a given string.
# Return the n copies of the whole string if the length is less than 2
s = input("Enter a string: ")
def copies(string, number):
copy = ""
for i in range(number):
copy += string[0] + string[1]
return copy
if len(s) > 2:
n = int(input("Enter the number of copies: "))
print(copies(s, n))
else:
print(s + s)
| 24 | 112 | 0.638158 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 232 | 0.508772 |