hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
943c183a8cf29871bd50107c7e20c5b1b41d6909 | 15,977 | py | Python | oyProjectManager/models/entity.py | gcodebackups/oyprojectmanager | 3085ecbe1cc04a73ec69b4848b789009546feae7 | [
"BSD-2-Clause"
] | 1 | 2020-10-22T00:14:01.000Z | 2020-10-22T00:14:01.000Z | oyProjectManager/models/entity.py | gcodebackups/oyprojectmanager | 3085ecbe1cc04a73ec69b4848b789009546feae7 | [
"BSD-2-Clause"
] | null | null | null | oyProjectManager/models/entity.py | gcodebackups/oyprojectmanager | 3085ecbe1cc04a73ec69b4848b789009546feae7 | [
"BSD-2-Clause"
] | 2 | 2016-12-02T10:15:08.000Z | 2020-02-15T15:51:21.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2009-2014, Erkan Ozgur Yilmaz
#
# This module is part of oyProjectManager and is released under the BSD 2
# License: http://www.opensource.org/licenses/BSD-2-Clause
from exceptions import TypeError
import os
import jinja2
from sqlalchemy import UniqueConstraint, Column, String, Integer, ForeignKey
from sqlalchemy.ext.declarative import synonym_for
from sqlalchemy.orm import relationship, validates, backref
from oyProjectManager import conf
from oyProjectManager.db import Base
from oyProjectManager.models.version import Version
# create a logger
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
class VersionableBase(Base):
"""A base class for :class:`~oyProjectManager.models.shot.Shot` and
:class:`~oyProjectManager.models.asset.Asset` classes.
It will supply the base attributes to be able to attach a
:class:`~oyProjectManager.models.version.Version` to the
:class:`~oyProjectManager.models.shot.Shot` and
:class:`~oyProjectManager.models.asset.Asset` instances.
It doesn't need any parameter while initialization.
It supplies only one read-only attribute called
:attr:`~oyProjectManager.models.entity.VersionableBase.versions` which is a
list and holds :class:`~oyProjectManager.models.version.Version` instances.
"""
__tablename__ = "Versionables"
__table_args__ = (
UniqueConstraint("_code", "project_id"),
UniqueConstraint("_name", "project_id"),
{"extend_existing": True}
)
versionable_type = Column(String(64), nullable=False)
__mapper_args__ = {
"polymorphic_on": versionable_type,
"polymorphic_identity": "VersionableBase"
}
id = Column(Integer, primary_key=True)
_versions = relationship(
"Version",
cascade='all, delete, delete-orphan'
)
project_id = Column(
Integer, ForeignKey("Projects.id", ondelete='CASCADE'),
nullable=False
)
_project = relationship(
"Project",
backref=backref("versionables", cascade="all, delete")
)
_code = Column(
String(128),
doc="""The nicely formatted version of the
:attr:`~oyProjectManager.models.asset.Asset.name` attribute or
:attr:`~oyProjectManager.models.shot.Shot.number` attribute. It will
be overloaded in the :class:`~oyProjectManager.models.asset.Asset` or
:class:`~oyProjectManager.models.shot.Shot` class.
"""
)
_name = Column(String(128))
description = Column(String)
@synonym_for("_versions")
@property
def versions(self):
"""the Version instances attached to this object
It is a read-only attribute
"""
return self._versions
@synonym_for("_project")
@property
def project(self):
"""the Project instance which this object is related to
It is a read-only attribute
"""
return self._project
@validates("description")
def _validate_description(self, key, description):
"""validates the given description value
"""
if not isinstance(description, (str, unicode)):
raise TypeError("Asset.description should be an instance of "
"string or unicode")
return description
@property
def thumbnail_full_path(self):
"""returns the thumbnail full path for this versionable
"""
from oyProjectManager.models.asset import Asset
from oyProjectManager.models.shot import Shot
# just render a thumbnail path
template_vars = {}
# define the template for the versionable type (asset or shot)
path_template = ''
filename_template = ''
if isinstance(self, Asset):
path_template = jinja2.Template(conf.asset_thumbnail_path)
filename_template = jinja2.Template(conf.asset_thumbnail_filename)
template_vars.update(
{
"project": self.project,
"asset": self,
"extension": conf.thumbnail_format
}
)
elif isinstance(self, Shot):
path_template = jinja2.Template(conf.shot_thumbnail_path)
filename_template = jinja2.Template(conf.shot_thumbnail_filename)
template_vars.update(
{
"project": self.project,
"sequence": self.sequence,
"shot": self,
"extension": conf.thumbnail_format
}
)
# render the templates
path = path_template.render(**template_vars)
filename = filename_template.render(**template_vars)
# the path should be $REPO relative
thumbnail_full_path = os.path.join(
os.environ[conf.repository_env_key], path, filename
).replace('\\', '/')
return thumbnail_full_path
class EnvironmentBase(object):
"""Connects the environment (the host program) to the oyProjectManager.
In oyProjectManager, an Environment is a host application like Maya, Nuke,
Houdini etc.
Generally a GUI for the end user is given an environment which helps
the QtGui to be able to open, save, import or export a Version without
knowing the details of the environment.
.. note::
For now the :class:`~oyProjectManager.models.entity.EnvironmentBase`
inherits from the Python object class. There were no benefit to inherit
it from the ``DeclarativeBase``.
To create a new environment for you own program, just instantiate this
class and override the methods as necessary. And call the UI with by
giving an environment instance to it, so the interface can call the correct
methods as needed.
Here is an example how to create an environment for a program and use the
GUI::
from oyProjectManager.core import EnvironmentBase
class MyProgram(EnvironmentBase):
\"""This is a class which will be used by the UI
\"""
def open():
\"""uses the programs own Python API to open a version of an
asset
\"""
# do anything that needs to be done before opening the file
my_programs_own_python_api.open(filepath=self.version.full_path)
def save():
\"""uses the programs own Python API to save the current file
as a new version.
\"""
# do anything that needs to be done before saving the file
my_programs_own_python_api.save(filepath=self.version.full_path)
# do anything that needs to be done after saving the file
and that is it.
The environment class by default has a property called ``version``.
Holding the current open version. It is None for a new scene and a
:class:`~oyProjectManager.models.version.Version` instance in any other
case.
:param name: To initialize the class the name of the environment should be
given in the name argument. It can not be skipped or None or an empty
string.
"""
# __tablename__ = "Environments"
# id = Column(Integer, primary_key=True)
name = "EnvironmentBase"
def __str__(self):
"""the string representation of the environment
"""
return self._name
@property
def version(self):
"""returns the current Version instance which is open in the
environment
"""
return self.get_current_version()
@property
def name(self):
"""returns the environment name
"""
return self._name
@name.setter
def name(self, name):
"""sets the environment name
"""
self._name = name
def save_as(self, version):
"""The save as action of this environment. It should save the current
scene or file to the given version.full_path
"""
raise NotImplemented
def export_as(self, version):
"""Exports the contents of the open document as the given version.
:param version: A :class:`~oyProjectManager.models.version.Version`
instance holding the desired version.
"""
raise NotImplemented
def open_(self, version, force=False):
"""the open action
"""
raise NotImplemented
def post_open(self, version):
"""Runs after opening a file
"""
raise NotImplemented
def import_(self, asset):
"""the import action
"""
raise NotImplemented
def reference(self, asset):
"""the reference action
"""
raise NotImplemented
def trim_server_path(self, path_in):
"""Trims the server_path value from the given path_in
:param path_in: The path that wanted to be trimmed
:return: str
"""
server_path = os.environ['REPO'].replace('\\', '/')
if path_in.startswith(server_path):
length = len(server_path)
if not server_path.endswith('/'):
length += 1
path_in = path_in[length:]
return path_in
def get_versions_from_path(self, path):
"""Finds Version instances from the given path value.
Finds and returns the :class:`~oyProjectManager.models.version.Version`
instances from the given path value.
Returns an empth list if it can't find any matching.
This method is different than
:meth:`~oyProjectManager.models.entity.EnvironmentBase.get_version_from_full_path`
because it returns a list of
:class:`~oyProjectManager.models.version.Version` instances which are
residing in that path. The list is ordered by the ``id``\ s of the
instances.
:param path: A path which has possible
:class:`~oyProjectManager.models.version.Version` instances.
:return: A list of :class:`~oyProjectManager.models.version.Version`
instances.
"""
if path is None or path == "":
return None
# get the path by trimming the server_path
path = path.replace('\\', '/')
path = self.trim_server_path(path)
# get all the version instance at that path
return Version.query()\
.filter(Version.path.startswith(path))\
.order_by(Version.id.desc())\
.all()
def get_version_from_full_path(self, full_path):
"""Finds the Version instance from the given full_path value.
Finds and returns a :class:`~oyProjectManager.models.version.Version`
instance from the given full_path value.
Returns None if it can't find any matching.
:param full_path: The full_path of the desired
:class:`~oyProjectManager.models.version.Version` instance.
:return: :class:`~oyProjectManager.models.version.Version`
"""
path, filename = os.path.split(full_path)
path = self.trim_server_path(path)
logger.debug('path: %s' % path)
# try to get a version with that info
version = Version.query()\
.filter(Version.path==path)\
.filter(Version.filename==filename)\
.first()
return version
def get_current_version(self):
"""Returns the current Version instance from the environment.
:returns: :class:`~oyProjectManager.models.version.Version` instance or
None
"""
raise NotImplemented
def get_last_version(self):
"""Returns the last opened Version instance from the environment.
* It first looks at the current open file full path and tries to match
it with a Version instance.
* Then searches for the recent files list.
* Still not able to find any Version instances, will return the version
instance with the highest id which has the current workspace path in
its path
* Still not able to find any Version instances returns None
:returns: :class:`~oyProjectManager.models.version.Version` instance or
None
"""
raise NotImplemented
def get_project(self):
"""returns the current project from environment
"""
raise NotImplemented
def set_project(self, version):
"""Sets the project to the given Versions project.
:param version: A :class:`~oyProjectManager.models.version.Version`.
"""
raise NotImplemented
def check_referenced_versions(self):
"""Checks the referenced versions
returns list of asset objects
"""
raise NotImplemented
def get_referenced_versions(self):
"""Returns the :class:`~oyProjectManager.models.version.Version`
instances which are referenced in to the current scene
:returns: list of :class:`~oyProjectManager.models.version.Version`
instances
"""
raise NotImplemented
def get_frame_range(self):
"""Returns the frame range from the environment
:returns: a tuple of integers containing the start and end frame
numbers
"""
raise NotImplemented
def set_frame_range(self, start_frame=1, end_frame=100,
adjust_frame_range=False):
"""Sets the frame range in the environment to the given start and end
frames
"""
raise NotImplemented
def get_fps(self):
"""Returns the frame rate of this current environment
"""
raise NotImplemented
def set_fps(self, fps=25):
"""Sets the frame rate of the environment. The default value is 25.
"""
raise NotImplemented
@property
def extensions(self):
"""Returns the valid native extensions for this environment.
:returns: a list of strings
"""
return self._extensions
@extensions.setter
def extensions(self, extensions):
"""Sets the valid native extensions of this environment.
:param extensions: A list of strings holding the extensions. Ex:
["ma", "mb"] for Maya
"""
self._extensions = extensions
def has_extension(self, filename):
"""Returns True if the given file names extension is in the extensions
list false otherwise.
accepts:
* a full path with extension or not
* a file name with extension or not
* an extension with a dot on the start or not
:param filename: A string containing the filename
"""
if filename is None:
return False
return filename.split('.')[-1].lower() in self.extensions
def load_referenced_versions(self):
"""loads all the references
"""
raise NotImplemented
def replace_version(self, source_version, target_version):
"""Replaces the source_version with the target_version
:param source_version: A
:class:`~oyProjectManager.models.version.Version` instance holding
the version to be replaced
:param target_version: A
:class:`~oyProjectManager.models.version.Version`
instance holding the new version replacing the source one
"""
raise NotImplemented
def replace_external_paths(self, mode=0):
"""Replaces the external paths (which are not starting with the
environment variable) with a proper path. The mode controls if the
resultant path should be absolute or relative to the project dir.
:param mode: Controls the resultant path is absolute or relative.
mode 0: absolute (a path which starts with $REPO)
mode 1: relative (to project path)
:return:
"""
raise NotImplemented
| 32.018036 | 90 | 0.632972 | 15,291 | 0.957063 | 0 | 0 | 3,256 | 0.203793 | 0 | 0 | 9,773 | 0.611692 |
943cbbdb31ae448fb7d75fccc1ef80e65961de28 | 718 | py | Python | setup.py | Omerdan03/dog_scraper | 85c22ec7b378bb2736fd7c8ef0314616c957dbc1 | [
"MIT"
] | null | null | null | setup.py | Omerdan03/dog_scraper | 85c22ec7b378bb2736fd7c8ef0314616c957dbc1 | [
"MIT"
] | null | null | null | setup.py | Omerdan03/dog_scraper | 85c22ec7b378bb2736fd7c8ef0314616c957dbc1 | [
"MIT"
] | null | null | null | import os
from setuptools import setup, find_namespace_packages
requirements = open('requirements.txt').readlines()
with open(os.path.normpath(os.path.join(__file__, '../scraper/VERSION'))) as f:
__version__ = f.readline(0)
setup(name='dog-scraper',
version=__version__,
description='A repo for scrapping dog and pets from Israel Ministry of agriculture',
url='https://github.com/Omerdan03/dog_scraper.git',
author='Omer Danziger',
author_email='Omer.d@razor-labs.com',
license='MIT License',
packages=find_namespace_packages(),
install_requires=requirements,
package_data={
'': ['*.txt', '*.json', '*.yml', '*.yaml', 'VERSION', '*.pkl']
})
| 34.190476 | 90 | 0.667131 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 267 | 0.371866 |
943e1d6391bc96c958af3f46e434cc6fac7f09c7 | 568 | py | Python | uni_ticket/migrations/0176_ticketcategorywsprotocollo_protocollo_uo_rpa_matricola.py | biotech2021/uniTicket | 8c441eac18e67a983e158326b1c4b82f00f1f1ef | [
"Apache-2.0"
] | 15 | 2019-09-06T06:47:08.000Z | 2022-01-17T06:39:54.000Z | uni_ticket/migrations/0176_ticketcategorywsprotocollo_protocollo_uo_rpa_matricola.py | biotech2021/uniTicket | 8c441eac18e67a983e158326b1c4b82f00f1f1ef | [
"Apache-2.0"
] | 69 | 2019-09-06T12:03:19.000Z | 2022-03-26T14:30:53.000Z | uni_ticket/migrations/0176_ticketcategorywsprotocollo_protocollo_uo_rpa_matricola.py | biotech2021/uniTicket | 8c441eac18e67a983e158326b1c4b82f00f1f1ef | [
"Apache-2.0"
] | 13 | 2019-09-11T10:54:20.000Z | 2021-11-23T09:09:19.000Z | # Generated by Django 3.2.7 on 2021-11-11 09:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('uni_ticket', '0175_alter_ticketcategorywsprotocollo_protocollo_uo_rpa'),
]
operations = [
migrations.AddField(
model_name='ticketcategorywsprotocollo',
name='protocollo_uo_rpa_matricola',
field=models.CharField(blank=True, default='', help_text='Matricola RPA sul sistema di protocollo', max_length=255, verbose_name='RPA matricola'),
),
]
| 29.894737 | 158 | 0.684859 | 475 | 0.836268 | 0 | 0 | 0 | 0 | 0 | 0 | 231 | 0.40669 |
943eeb9749e082db09f5227b018be34955839421 | 1,015 | py | Python | slowfast/config/custom_config.py | bqhuyy/SlowFast-clean | 3dc000dc9fe1951ab70cb835bfb91b71a07d8f63 | [
"Apache-2.0"
] | null | null | null | slowfast/config/custom_config.py | bqhuyy/SlowFast-clean | 3dc000dc9fe1951ab70cb835bfb91b71a07d8f63 | [
"Apache-2.0"
] | null | null | null | slowfast/config/custom_config.py | bqhuyy/SlowFast-clean | 3dc000dc9fe1951ab70cb835bfb91b71a07d8f63 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Add custom configs and default values"""
from fvcore.common.config import CfgNode
def add_custom_config(_C):
# Knowledge distillation
_C.KD = CfgNode()
# If True enable KD, else skip KD.
_C.KD.ENABLE = False
# Teacher's config
_C.KD.CONFIG = ""
# Alpha
_C.KD.ALPHA = 0.95
# Temperature
_C.KD.TEMPERATURE = 6
# Teacher's config
_C.KD.CONFIG = "configs/Kinetics/SLOWFAST_8x8_R50.yaml"
# Path to the checkpoint to load the initial weight.
_C.KD.CHECKPOINT_FILE_PATH = ""
# Checkpoint types include `caffe2` or `pytorch`.
_C.KD.CHECKPOINT_TYPE = "pytorch"
_C.KD.TEACHER_TRANS_FUNC = 'bottleneck_transform'
# TSM
_C.TSM = CfgNode()
# n_div for TSM
_C.TSM.N_DIV = [[8, 8], [8, 8], [8, 8], [8, 8]]
# fusion n_div
_C.TSM.FUSION_N_DIV = [8, 8, 8, 8]
_C.TEST.CLASS_LIST = 'filenames/kinetics-40'
| 22.555556 | 71 | 0.628571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 483 | 0.475862 |
9440856b2d7276c60d2cc96015a0362bb8a2a18e | 2,425 | py | Python | edan.py | Smithsonian/EDAN-python | 096dfba39fd3585ae773f01751f2f91616084775 | [
"Apache-2.0"
] | 1 | 2021-11-08T09:46:01.000Z | 2021-11-08T09:46:01.000Z | edan.py | Smithsonian/EDAN-python | 096dfba39fd3585ae773f01751f2f91616084775 | [
"Apache-2.0"
] | null | null | null | edan.py | Smithsonian/EDAN-python | 096dfba39fd3585ae773f01751f2f91616084775 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
#
# Search metadata in the EDAN API
# v0.1
#
import urllib.parse
import urllib.request
import datetime
import email.utils
import uuid
import hashlib
import json
from base64 import b64encode
#for testing
from urllib.request import Request, urlopen
from urllib.error import URLError, HTTPError
def queryEDAN(edan_q, url, AppID, AppKey):
"""
Execute the query
"""
#Date of request
dt = datetime.datetime.now()
RequestDate = email.utils.format_datetime(dt)
#Generated uniquely for this request
Nonce = str(uuid.uuid4()).replace('-', '')
#This will be the value of X-AuthContent, each element is joined by a single newline
StringToSign = "{}\n{}\n{}\n{}".format(Nonce, edan_q, RequestDate, AppKey)
#First hash using SHA1
HashedString = hashlib.sha1(StringToSign.encode('utf-8')).hexdigest()
#Base64 encode
EncodedString = b64encode(HashedString.encode('utf-8')).decode('utf-8')
#Set headers
headers = {'X-AppId': AppID, 'X-Nonce': Nonce, 'X-RequestDate': RequestDate, 'X-AuthContent': EncodedString}
#Make request
req = urllib.request.Request(url = url, headers = headers, method = "GET")
try:
response = urlopen(req)
except HTTPError as e:
print('The server couldn\'t fulfill the request.')
print('Error: {} ({})'.format(e.reason, e.code))
return False
except URLError as e:
print('We failed to reach a server.')
print('Reason: ', e.reason)
return False
else:
data = response.read().decode('utf-8')
return json.loads(data)
def searchEDAN(edan_query, AppID, AppKey, rows = 10, start = 0):
"""
Search EDAN
"""
#Request
edan_query = urllib.parse.quote_plus(edan_query)
edan_q = "q={}&rows={}&start={}&facet=true".format(edan_query, rows, start)
#Put whole thing together
url = 'https://edan.si.edu/metadata/v2.0/collections/search.htm?{}'.format(edan_q)
#Execute query
result = queryEDAN(edan_q, url, AppID, AppKey)
return result
def getContentEDAN(edan_id, AppID, AppKey):
"""
Get details from an item using an EDAN ID
"""
#Request
edan_q = "url={}".format(edan_id)
#Put whole thing together
url = 'https://edan.si.edu/content/v2.0/content/getContent.htm?{}'.format(edan_q)
#Execute query
result = queryEDAN(edan_q, url, AppID, AppKey)
return result
| 28.529412 | 112 | 0.659794 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 848 | 0.349691 |
9443b3059c2ab393a05388896493bc8f4737abcb | 1,166 | py | Python | dash_docs/chapters/dash_bio/examples/ideogram.py | joelostblom/dash-docs | 7be5aed7795f61ac32375ce33a18046b8f2f5254 | [
"MIT"
] | 379 | 2017-06-21T14:35:52.000Z | 2022-03-20T01:47:14.000Z | dash_docs/chapters/dash_bio/examples/ideogram.py | joelostblom/dash-docs | 7be5aed7795f61ac32375ce33a18046b8f2f5254 | [
"MIT"
] | 746 | 2017-06-21T19:58:17.000Z | 2022-03-23T14:51:24.000Z | dash_docs/chapters/dash_bio/examples/ideogram.py | joelostblom/dash-docs | 7be5aed7795f61ac32375ce33a18046b8f2f5254 | [
"MIT"
] | 201 | 2017-06-21T21:53:19.000Z | 2022-03-17T13:23:55.000Z | import dash
import dash_bio as dashbio
import dash_html_components as html
import dash_core_components as dcc
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div([
'Select which chromosomes to display on the ideogram below:',
dcc.Dropdown(
id='displayed-chromosomes',
options=[{'label': str(i), 'value': str(i)} for i in range(1, 23)],
multi=True,
value=[str(i) for i in range(1, 23)]
),
dashbio.Ideogram(
id='my-dashbio-ideogram'
),
html.Div(id='ideogram-rotated')
])
@app.callback(
dash.dependencies.Output('my-dashbio-ideogram', 'chromosomes'),
[dash.dependencies.Input('displayed-chromosomes', 'value')]
)
def update_ideogram(value):
return value
@app.callback(
dash.dependencies.Output('ideogram-rotated', 'children'),
[dash.dependencies.Input('my-dashbio-ideogram', 'rotated')]
)
def update_ideogram_rotated(rot):
return 'You have {} selected a chromosome.'.format(
'' if rot else 'not')
if __name__ == '__main__':
app.run_server(debug=True)
| 26.5 | 75 | 0.683533 | 0 | 0 | 0 | 0 | 455 | 0.390223 | 0 | 0 | 355 | 0.30446 |
9445502dd31059686a0626a1b245765a02b8c798 | 6,860 | py | Python | blockchain/node.py | EnniOne/minimum_viable_block_chain | 3e0bb1ea5e63f3d22958806a37ead2ab94b827cd | [
"MIT"
] | null | null | null | blockchain/node.py | EnniOne/minimum_viable_block_chain | 3e0bb1ea5e63f3d22958806a37ead2ab94b827cd | [
"MIT"
] | null | null | null | blockchain/node.py | EnniOne/minimum_viable_block_chain | 3e0bb1ea5e63f3d22958806a37ead2ab94b827cd | [
"MIT"
] | null | null | null | from blockchain import Blockchain, Transaction
from nacl.signing import SigningKey
from hashlib import sha256
from time import sleep
from threading import Thread
import random
class Node:
"""Represent a Node."""
def __init__(self, neighbours, unverified_transactions_pool):
"""
Initialize the Node.
:param neighbours: Other nodes that take part in the network.
:param unverified_transactions_pool: Pool of unverified transactions
"""
self.private_key = SigningKey.generate()
self.public_key = self.private_key.verify_key
self.id = sha256(self.public_key.encode()).hexdigest()
self.name = self.id
self.blockchain = Blockchain()
self.neighbours = neighbours
self.unverified_transactions_pool = unverified_transactions_pool
def log(self, message):
"""Log a message to stdout, adding this node's identifier"""
print("[{id}]: {msg}".format(id=self.name, msg=message))
def mine(self):
"""Mine a new block"""
try:
transaction = self.unverified_transactions_pool.pop()
except IndexError:
self.log("No transaction new transaction found")
return False
self.consensus() # ensure consensus
if not transaction.is_valid(self.blockchain.ledger):
self.log("Transaction invalid")
return False
# Get proof of last block
last_block = self.blockchain.last_block
last_proof = last_block.dict["proof"]
proof = self.blockchain.proof_of_work(last_proof) # compute new proof
block = self.blockchain.new_block(proof, transaction) # Add new block to ledger
self.log("New block forged: {}".format(block.hash))
return True
def consensus(self):
"""Replace the blockchain with the longest valid in the network."""
for node in self.neighbours:
min_length = len(self.blockchain.ledger)
current_neighbour_chain = node.blockchain
# Only replace ledger if the neighbours chain is longer and valid
if len(current_neighbour_chain.ledger) > min_length and current_neighbour_chain.is_valid():
self.blockchain.ledger = current_neighbour_chain.ledger
class MiningNode(Node, Thread):
"""Represent a Thread that mines new blocks"""
def __init__(self, neighbours, unverified_transactions_pool):
Thread.__init__(self)
super().__init__(neighbours, unverified_transactions_pool)
self.daemon = True
def run(self):
"""Mine and never stop (unless there is an evil alien that demands you to stop. Then stop.)"""
while True:
if not self.mine():
sleep(5)
class WalletNode(Node, Thread):
"""Represent a Person using a simple wallet."""
def __init__(self, neighbours, unverified_transactions_pool, name):
Thread.__init__(self)
super().__init__(neighbours, unverified_transactions_pool)
self.daemon = True
self.name = name
self.friends = []
def add_friends(self, *friend_nodes):
for node in friend_nodes:
self.friends.append(node)
def new_transaction(self, recipient, amount):
"""Send an amount of coins to a recipient"""
self.consensus()
if recipient not in [x.name for x in self.friends]:
self.log("I don't know {}".format(recipient))
return False
if amount > self.balance:
self.log("I don't have enough money to send {} {} Coins.".format(recipient, amount))
return False
self.log("I'm sending {} {} Coins.".format(recipient, amount))
outputs = []
spent_outputs = []
for block in self.blockchain.ledger:
for output in block.transaction.outputs: # Sum all earnings
if output["public_key"] == self.public_key:
outputs.append((block.transaction.hash, block.transaction.outputs.index(output)))
for input in block.transaction.inputs: # Detect outgoings
if input["public_key"] == self.public_key:
spent_outputs.append((input["hash"], input["output_index"]))
outputs_for_t_input = []
for output in outputs:
if output not in spent_outputs:
outputs_for_t_input.append(output)
outputs = outputs_for_t_input
output_amount = 0
for b in self.blockchain.ledger:
for output in outputs:
if b.transaction.hash == output[0]:
output_amount += b.transaction.outputs[output[1]]["amount"]
for friend in self.friends:
if friend.name == recipient:
recipient = friend.public_key
inputs = []
for output in outputs: # Generate inputs
sig = self.private_key.sign(output[0].encode())
inputs.append({"hash": output[0], "output_index": output[1],
"signature": sig, "public_key": self.public_key})
outputs = [{"public_key": recipient, "amount": amount}]
if amount < output_amount:
outputs.append({"public_key": self.public_key, "amount": output_amount - amount})
transaction = Transaction(inputs=inputs.copy(), outputs=outputs.copy())
self.unverified_transactions_pool.append(transaction)
def go_to_work(self):
"""Add a new generating transaction for 50 coins"""
self.consensus()
transaction = Transaction([], [{"public_key": self.public_key, "amount": 50}])
self.unverified_transactions_pool.append(transaction)
@property
def balance(self):
"""Return the Node's balance"""
self.consensus() # update
balance = 0
outgoings = []
for block in self.blockchain.ledger:
for output in block.transaction.outputs: # Sum all earnings
if output["public_key"] == self.public_key:
balance += output["amount"]
for input in block.transaction.inputs: # Detect outgoings
if input["public_key"] == self.public_key:
outgoings.append((input["hash"], input["output_index"]))
# Sub outgoings
for block in self.blockchain.ledger:
for outgoing in outgoings:
if block.transaction.hash == outgoing[0]:
balance -= block.transaction.outputs[outgoing[1]]["amount"]
return balance
def run(self):
while True:
self.go_to_work()
self.log("Balance {}".format(self.balance))
sleep(5)
recipient = random.choice(self.friends).name
amount = random.randint(1, 100)
self.new_transaction(recipient, amount)
| 36.489362 | 103 | 0.617347 | 6,675 | 0.973032 | 0 | 0 | 857 | 0.124927 | 0 | 0 | 1,362 | 0.198542 |
94464711b51aaed6bb644bb94d8782573a3c211b | 302 | py | Python | mmocr/models/common/__init__.py | yangrisheng/mmocr | 3ad4a8d3f8d2d22b7854b72ee68a7977a3f3631f | [
"Apache-2.0"
] | 2 | 2022-01-02T13:33:10.000Z | 2022-02-08T07:40:30.000Z | mmocr/models/common/__init__.py | yangrisheng/mmocr | 3ad4a8d3f8d2d22b7854b72ee68a7977a3f3631f | [
"Apache-2.0"
] | null | null | null | mmocr/models/common/__init__.py | yangrisheng/mmocr | 3ad4a8d3f8d2d22b7854b72ee68a7977a3f3631f | [
"Apache-2.0"
] | null | null | null | # Copyright (c) OpenMMLab. All rights reserved.
from . import backbones, layers, losses, modules
from .backbones import * # NOQA
from .layers import * # NOQA
from .losses import * # NOQA
from .modules import * # NOQA
__all__ = backbones.__all__ + losses.__all__ + layers.__all__ + modules.__all__
| 33.555556 | 79 | 0.731788 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 71 | 0.235099 |
9446aade498fa199059711760c1a3d5ae32f881f | 994 | py | Python | Task1B.py | lhliew/flood-warning | 234bb3f7ec7174fc91963d8b7e64df1893694e1b | [
"MIT"
] | null | null | null | Task1B.py | lhliew/flood-warning | 234bb3f7ec7174fc91963d8b7e64df1893694e1b | [
"MIT"
] | null | null | null | Task1B.py | lhliew/flood-warning | 234bb3f7ec7174fc91963d8b7e64df1893694e1b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 29 16:13:48 2017
@author: laide
"""
"""prints a list of tuples (station name, town, distance) for the 10 closest
and the 10 furthest stations from the Cambridge city centre, (52.2053, 0.1218)."""
from floodsystem.geo import stations_by_distance
from floodsystem.stationdata import build_station_list
def run():
#Input coordinates of Cambridge city centre
Reference_coordinate = (52.2053, 0.1218)
#Create list of tuples (station name, distance)
TheList = stations_by_distance (build_station_list(), Reference_coordinate)
#Create list of tuples (station name, town, distance) for the 10 closest and furthest stations
closest = [(s.name, s.town, d) for s, d in TheList[:10]]
furthest = [(s.name, s.town, d) for s, d in TheList[-10:]]
print ("The closest 10 stations are:")
print (closest)
print ("The furthest 10 stations are:")
print (furthest)
if __name__ == "__main__":
run() | 32.064516 | 98 | 0.687123 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 496 | 0.498994 |
9446b9ff584bcbd9ff011026042c303a28b108ae | 7,988 | py | Python | tests/run-tests.py | notofonts/NotoSansDuployan | d3bf2d4436d376501d0720e5d9f1fe032f8c5ffc | [
"Apache-2.0"
] | 6 | 2020-04-06T02:14:07.000Z | 2022-03-22T09:13:47.000Z | tests/run-tests.py | dscorbett/duployan-font | 966e4e233f56d818bbbcb4548f1cf232cd3fe4a1 | [
"Apache-2.0"
] | 2 | 2021-07-19T10:20:41.000Z | 2021-12-16T01:25:02.000Z | tests/run-tests.py | notofonts/NotoSansDuployan | d3bf2d4436d376501d0720e5d9f1fe032f8c5ffc | [
"Apache-2.0"
] | 1 | 2019-08-04T03:40:57.000Z | 2019-08-04T03:40:57.000Z | #!/usr/bin/env python3
# Copyright 2018-2019 David Corbett
# Copyright 2020-2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import difflib
import json
import os
import re
import subprocess
import sys
CI = os.getenv('CI') == 'true'
DISAMBIGUATION_SUFFIX_PATTERN = re.compile(r'\._[0-9A-F]+$')
GLYPH_POSITION_PATTERN = re.compile(r'@-?[0-9]+,-?[0-9]+')
NOTDEF_PATTERN = re.compile(r'[\[|]\.notdef@')
SPACE_NAME_COMPONENT_PATTERN = re.compile('(?<=[\[|])(?:uni00A0|uni200[0-9A]|uni202F|uni205F|uni3000)(?![0-9A-Za-z_])')
FULL_FONT_CODE_POINTS = [0x034F]
NAME_PREFIX = r'(?:(?:dupl|u(?:ni(?:[0-9A-F]{4})+|[0-9A-F]{4,6})(?:_[^.]*)?)\.)'
UNSTABLE_NAME_COMPONENT_PATTERN = re.compile(fr'(?<=[\[|])(?:{NAME_PREFIX}[0-9A-Za-z_]+|(?!{NAME_PREFIX})[0-9A-Za-z_]+)')
def parse_color(color):
if color == 'auto':
return CI or sys.stdout.isatty()
if color == 'no':
return False
if color == 'yes':
return True
raise ValueError(f'Invalid --color value: {color}')
def parse_json(s):
x = 0
y = 0
for glyph in json.loads(s):
if not (name := glyph['g']).startswith('_'):
yield f'''{
DISAMBIGUATION_SUFFIX_PATTERN.sub('', name)
}@{
x + glyph["dx"]
},{
y + glyph["dy"]
}'''
x += int(glyph['ax'])
y += int(glyph['ay'])
yield f'_@{x},{y}'
def munge(output, regular, incomplete):
if incomplete:
output = UNSTABLE_NAME_COMPONENT_PATTERN.sub('dupl', output)
if not regular:
output = GLYPH_POSITION_PATTERN.sub('', output)
return output
def print_diff(code_points, options, actual_output, expected_output, color):
if color:
highlighted_actual_output = []
highlighted_expected_output = []
matcher = difflib.SequenceMatcher(None, actual_output, expected_output, False)
for tag, i1, i2, j1, j2 in matcher.get_opcodes():
if tag == 'equal':
highlighted_actual_output.append(actual_output[i1:i2])
highlighted_expected_output.append(expected_output[j1:j2])
elif tag == 'delete':
highlighted_actual_output.append('\x1B[1;96m')
highlighted_actual_output.append(actual_output[i1:i2])
highlighted_actual_output.append('\x1B[0m')
elif tag == 'insert':
highlighted_expected_output.append('\x1B[1;93m')
highlighted_expected_output.append(expected_output[j1:j2])
highlighted_expected_output.append('\x1B[0m')
elif tag == 'replace':
highlighted_actual_output.append('\x1B[1;96m')
highlighted_actual_output.append(actual_output[i1:i2])
highlighted_actual_output.append('\x1B[0m')
highlighted_expected_output.append('\x1B[1;93m')
highlighted_expected_output.append(expected_output[j1:j2])
highlighted_expected_output.append('\x1B[0m')
else:
assert False, f'Unknown tag: {tag}'
actual_output = ''.join(highlighted_actual_output)
expected_output = ''.join(highlighted_expected_output)
print()
print(f'Input: {code_points}:{options}')
print('Actual: ' + actual_output)
print('Expected: ' + expected_output)
def run_test(font, line, png_file, color, incomplete, view_all):
code_points, options, expected_output = line.split(':')
p = subprocess.Popen(
[
'hb-shape',
font,
'-u',
code_points,
'-O',
'json',
'--remove-default-ignorables',
*options.split(),
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout_data, stderr_data = p.communicate()
print(stderr_data.decode('utf-8'), end='', file=sys.stderr)
actual_output = f'[{"|".join(parse_json(stdout_data.decode("utf-8")))}]'
regular = font.endswith('-Regular.otf')
passed = (munge(actual_output, regular, incomplete) == munge(expected_output, regular, incomplete)
or incomplete and (
NOTDEF_PATTERN.search(actual_output)
or SPACE_NAME_COMPONENT_PATTERN.search(expected_output)
or any(int(cp, 16) in FULL_FONT_CODE_POINTS for cp in code_points.split())
)
)
if not passed or view_all:
if not passed:
print_diff(code_points, options, actual_output, expected_output, color)
if not CI:
os.makedirs(os.path.dirname(png_file), exist_ok=True)
png_file = '{}-{}.png'.format(png_file, code_points.replace(' ', '-'))
p = subprocess.Popen(
[
'hb-view',
'--font-file',
font,
'--font-size',
'upem',
'-u',
f'E000 {code_points} E000',
'--remove-default-ignorables',
'-o',
png_file,
'-O',
'png',
'--margin',
'800 0',
*options.split(),
],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
p.wait()
print(p.stderr.read().decode('utf-8'), end='', file=sys.stderr)
return (passed, ':'.join([code_points, options, actual_output]))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run shaping tests.')
parser.add_argument('--color', default='auto', help='Whether to print diffs in color: "yes", "no", or "auto".')
parser.add_argument('--incomplete', action='store_true', help='Whether the font is less than the complete font. Do not fail a test if the actual result contains `.notdef`. Ignore the parts of glyph names that indicate code points.')
parser.add_argument('--view', action='store_true', help='Render all test cases, not just the failures.')
parser.add_argument('font', help='The path to a font.')
parser.add_argument('tests', nargs='*', help='The paths to test files.')
args = parser.parse_args()
color = parse_color(args.color.lower())
passed_all = True
failed_dir = os.path.join(os.path.dirname(sys.argv[0]), 'failed', os.path.basename(args.font))
os.makedirs(failed_dir, exist_ok=True)
for fn in args.tests:
result_lines = []
passed_file = True
with open(fn) as f:
for line_number, line in enumerate(f, start=1):
line = line.rstrip()
if line and line[0] != '#':
passed_line, result_line = run_test(
args.font,
line,
os.path.join(failed_dir, 'png', os.path.basename(fn), '{:03}'.format(line_number)),
color,
args.incomplete,
args.view,
)
passed_file = passed_file and passed_line
result_lines.append(result_line + '\n')
else:
result_lines.append(line + '\n')
if not passed_file:
with open(os.path.join(failed_dir, os.path.basename(fn)), 'w') as f:
f.writelines(result_lines)
passed_all = passed_all and passed_file
if not passed_all:
sys.exit(1)
| 40.964103 | 236 | 0.578117 | 0 | 0 | 403 | 0.050451 | 0 | 0 | 0 | 0 | 2,121 | 0.265523 |
944721eaafb8ae53ae88021d8e7ade1d8c08d23c | 1,969 | py | Python | TagScreen.py | CMPUT-291-Miniproject/MiniProject-1 | e56b3acf6867278613a2a05d1fbf453f909fa45b | [
"MIT"
] | null | null | null | TagScreen.py | CMPUT-291-Miniproject/MiniProject-1 | e56b3acf6867278613a2a05d1fbf453f909fa45b | [
"MIT"
] | null | null | null | TagScreen.py | CMPUT-291-Miniproject/MiniProject-1 | e56b3acf6867278613a2a05d1fbf453f909fa45b | [
"MIT"
] | null | null | null | from Tag import Tag
from CheckInput import CheckInput
from PostQuery import QuestionQuery
from PostQuery import AnswerQuery
class TagScreen:
"""
A screen which handles adding a tag to a post
This module is responsible for providing the UI of the screen
which the user can interface with to add tags to posts
"""
def __init__(self, terminal, post):
"""
Creates an instance of TagScreen
Parameters:
terminal:
A Terminal object allowing for this module to interface
with the OS terminal
post:
A PostQuery Object which contains information about the post
who is getting a tag
Returns:
An instance of TagScreen
"""
self.__chkinp__ = CheckInput()
self.__terminal__ = terminal
self.__post__ = post
self.__tag__ = Tag(terminal.getDBName())
def printTitle(self):
"""
Prints text identifying this screen to the user and providing some information
"""
self.__terminal__.clear()
self.__terminal__.printCenter("Tag the post")
self.__terminal__.printCenter("The post you are currently tagging has the title " + self.__post__.title)
def printScreen(self):
"""
Serves as the main loop of the module. Allowing the user
to interface with the program by providing a tag
"""
self.printTitle()
invalidInput = True
try:
userInput = input("Enter tag you would like to add to the post: ")
if self.__chkinp__.checkEscape(userInput):
return None
self.__tag__.addTag(self.__post__.pid, userInput)
except Exception as e:
print(e)
else:
print("Tag successfully added!")
finally:
input("Type enter to continue: ")
if __name__ == "__main__":
from Terminal import Terminal
from SearchForPostsScreen import SearchForPostsScreen
from SelectedPostScreen import SelectedPostScreen
sfps = SearchForPostsScreen(Terminal())
post = sfps.printScreen()
sps = SelectedPostScreen(Terminal(), post, True)
if sps.printScreen() == 3:
t = TagScreen(Terminal(), post)
t.printScreen()
| 26.608108 | 106 | 0.733875 | 1,472 | 0.747588 | 0 | 0 | 0 | 0 | 0 | 0 | 852 | 0.432707 |
9447f67cbc469e7ce4cacfa8e5dbd718fbc8f8cb | 6,096 | py | Python | sta663_project_lda/algorithms/lda_gibbs.py | haofuml/sta663_project_lda | d9d0253f61996fef48e9909aecf583e70e318aff | [
"MIT"
] | 3 | 2019-05-18T13:37:07.000Z | 2021-11-19T07:29:47.000Z | sta663_project_lda/algorithms/lda_gibbs.py | haofuml/sta663_project_lda | d9d0253f61996fef48e9909aecf583e70e318aff | [
"MIT"
] | null | null | null | sta663_project_lda/algorithms/lda_gibbs.py | haofuml/sta663_project_lda | d9d0253f61996fef48e9909aecf583e70e318aff | [
"MIT"
] | null | null | null | """
Collapsed Gibbs Sampling Implementation of LDA
"""
import numpy as np
import sys
import random
from scipy.special import gamma, gammaln, psi
from scipy.stats import *
from scipy import *
import argparse
from sta663_project_lda.visualization.demo_topics import topic_viz
class LDAGibbs(object):
def __init__(self, data_path, ntopics):
self.TOPICS = ntopics
self.alpha = np.ones(self.TOPICS)
for i in range(self.TOPICS):
self.alpha[i] = 0.1
self.beta = 0.01
data = np.load(data_path).T
self.DOCS = data.shape[0]
self.VOCABS = data.shape[1]
self.documents = {}
for i, doc in enumerate(data):
tmp_doc = []
for j, word in enumerate(doc):
if(word==0):
continue
while(word!=0):
tmp_doc.append(j)
word -=1
random.shuffle(tmp_doc)
self.documents[i] = tmp_doc
self.theta = np.zeros([self.DOCS, self.TOPICS])
self.phi = np.zeros([self.TOPICS, self.VOCABS])
self.sample_theta = np.zeros([self.DOCS, self.TOPICS])
self.sample_phi = np.zeros([self.TOPICS, self.VOCABS])
def Loss(self):
ll = 0
for z in range(self.TOPICS): # Symmetric Dirichlet Distribution (beta distribution in high dimension) Words | Topics, beta
ll += gammaln(self.VOCABS*self.beta) # gamma distribution
ll -= self.VOCABS * gammaln(self.beta)
ll += np.sum(gammaln(self.cntTW[z] + self.beta))
ll -= gammaln(np.sum(self.cntTW[z] + self.beta))
for doc_num, doc in enumerate(self.documents): # Dirichlet Distribution: Topics | Docs, alpha
ll += gammaln(np.sum(self.alpha)) # Beta(alpha)
ll -= np.sum(gammaln(self.alpha))
ll += np.sum(gammaln(self.cntDT[doc_num] + self.alpha))
ll -= gammaln(np.sum(self.cntDT[doc_num] + self.alpha))
return ll
def gibbs_update(self, d, w, pos):
z = self.topicAssignments[d][pos] # old theme
self.cntTW[z,w] -= 1
self.cntDT[d,z] -= 1
self.cntT[z] -= 1
prL = (self.cntDT[d] + self.alpha) / (self.lenD[d] -1 + np.sum(self.alpha))
prR = (self.cntTW[:,w] + self.beta) / (self.cntT + self.beta * self.VOCABS)
prFullCond = prL * prR
prFullCond /= np.sum(prFullCond)
new_z = np.random.multinomial(1, prFullCond).argmax()
self.topicAssignments[d][pos] = new_z
self.cntTW[new_z, w] += 1
self.cntDT[d, new_z] += 1
self.cntT[new_z] += 1
def update_alpha_beta(self):
# Update Beta
x = 0
y = 0
for z in range(self.TOPICS):
x += np.sum(psi(self.cntTW[z] + self.beta) - psi(self.beta))
y += psi(np.sum(self.cntTW[z] + self.beta)) - psi(self.VOCABS * self.beta)
self.beta = (self.beta * x) / (self.VOCABS * y) # UPDATE BETA
# Update Alpha
x = 0
y = 0
for d in range(self.DOCS):
y += psi(np.sum(self.cntDT[d] + self.alpha)) - psi(np.sum(self.alpha))
x += psi(self.cntDT[d] + self.alpha) - psi(self.alpha)
self.alpha *= x / y # UPDATE ALPHA
def update_phi_theta(self):
for d in range(self.DOCS):
for z in range(self.TOPICS):
self.sample_theta[d][z] = (self.cntDT[d][z] + self.alpha[z])/ (self.lenD[d] + np.sum(self.alpha))
for z in range(self.TOPICS):
for w in range(self.VOCABS):
self.sample_phi[z][w] = (self.cntTW[z][w] + self.beta) / (self.cntT[z] + self.beta * self.VOCABS)
def print_alpha_beta(self):
print('Alpha')
for i in range(self.TOPICS):
print(self.alpha[i])
print('Beta: {}'.format(self.beta))
def run(self,max_iter = 50):
burnin = max_iter*0.8
self.topicAssignments = {}
self.cntTW = np.zeros([self.TOPICS, self.VOCABS]) # count topic to words
self.cntDT = np.zeros([self.DOCS, self.TOPICS]) # count docs to topics
self.cntT = np.zeros(self.TOPICS)
self.lenD = np.zeros(self.DOCS)
# Iterate All the Documents, Initilaze the probibability matrix
for doc_num,doc in enumerate(self.documents):
doc_size = len(self.documents[doc])
tmp = np.random.randint(0,self.TOPICS, size = doc_size)
self.topicAssignments[doc_num] = tmp
for i, word in enumerate(self.documents[doc]):
self.cntTW[tmp[i],word] += 1
self.cntDT[doc_num, tmp[i]] += 1
self.cntT[tmp[i]] += 1
self.lenD[word] +=1
print('LIKELIHOOD:\n', self.Loss())
self.print_alpha_beta()
SAMPLES = 0
for s in range(max_iter):
print('Iter: {}'.format(s))
for doc_num, doc in enumerate(self.documents):
for i, word in enumerate(self.documents[doc]):
self.gibbs_update(doc_num, word, i) # word itself is its numerate.
self.update_alpha_beta()
print('Loss{}'.format(self.Loss()))
self.print_alpha_beta()
if(s>burnin):
self.update_phi_theta()
self.theta +=self.sample_theta
self.phi += self.sample_phi
self.theta /= (max_iter - burnin-1)
self.phi /= (max_iter - burnin-1)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Gibbs Sampling Training Paramters.')
parser.add_argument('-K', dest='K', type=int,
help='number of Topics', default=2)
parser.add_argument('--datadir', dest='datadir', action='store',
help='Path of the genearted data', default='./data/toydata_mat.npy')
args = parser.parse_args()
lda = LDAGibbs(args.datadir, args.K)
lda.run()
# print("Theta: {}".format(lda.theta))
vocabulary = np.load('./data/toydata_voc.npy')
topic_viz(lda.phi.T,vocabulary,topk=5)
| 38.1 | 130 | 0.563156 | 5,199 | 0.852854 | 0 | 0 | 0 | 0 | 0 | 0 | 690 | 0.113189 |
9448ae4b1a5ba29919ea2751ef6ab4b81defaac0 | 1,971 | py | Python | FrontEnd/app/socket_client_side.py | ahmobayen/image_processing | d616b2ca2dafba1fb519f025044481eb1a2e760b | [
"MIT"
] | null | null | null | FrontEnd/app/socket_client_side.py | ahmobayen/image_processing | d616b2ca2dafba1fb519f025044481eb1a2e760b | [
"MIT"
] | null | null | null | FrontEnd/app/socket_client_side.py | ahmobayen/image_processing | d616b2ca2dafba1fb519f025044481eb1a2e760b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os.path
import pickle
import struct
import socket
import selectors
sel = selectors.DefaultSelector()
messages = [b'Video Request']
def video_receive():
HOST = "127.0.0.1" # The server's hostname or IP address
PORT = 65432 # The port used by the server
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as client_socket:
try:
client_socket.connect((HOST, PORT))
client_socket.sendall(b'Video Request')
data = b""
payload_size = struct.calcsize("Q")
while True:
while len(data) < payload_size:
packet = client_socket.recv(4 * 1024) # 4K
if not packet:
break
data += packet
packed_msg_size = data[:payload_size]
data = data[payload_size:]
msg_size = struct.unpack("Q", packed_msg_size)[0]
while len(data) < msg_size:
data += client_socket.recv(4 * 1024)
frame_data = data[:msg_size]
data = data[msg_size:]
frame = pickle.loads(frame_data)
if frame:
yield b'--frame\r\n'b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n'
except ConnectionError:
client_socket.close()
def simple_receive():
HOST = "127.0.0.1" # The server's hostname or IP address
PORT = 65431 # The port used by the server
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as server_side_socket:
server_side_socket.connect((HOST, PORT))
server_side_socket.sendall(b'api Request')
data = server_side_socket.recv(1024)
server_side_socket.close()
return data
if __name__ == '__main__':
from ast import literal_eval
data = str(simple_receive().decode('utf-8') )
python_dict = literal_eval(data)
print(python_dict)
| 32.311475 | 97 | 0.584982 | 0 | 0 | 1,216 | 0.616946 | 0 | 0 | 0 | 0 | 312 | 0.158295 |
944b3519824a1ec44b0b5915e648a3cea1b18227 | 5,156 | py | Python | notebooks/myhmm.py | RonDen/HanTokenization | 7fd61f3de33a1fc094784fcc49bd4b2808195c89 | [
"MIT"
] | 3 | 2021-04-04T03:28:41.000Z | 2021-04-23T02:57:20.000Z | notebooks/myhmm.py | RonDen/HanTokenization | 7fd61f3de33a1fc094784fcc49bd4b2808195c89 | [
"MIT"
] | null | null | null | notebooks/myhmm.py | RonDen/HanTokenization | 7fd61f3de33a1fc094784fcc49bd4b2808195c89 | [
"MIT"
] | 2 | 2021-04-13T11:34:35.000Z | 2021-04-20T00:51:41.000Z | import re
import os
from prob import trans_P, emit_P, start_P
from preprocess import preprocess, recov, UNK
DATAROOT = '/home/luod/class/nlp/HanTokenization/datasets'
RESULTROOT = '/home/luod/class/nlp/HanTokenization/results'
VOCAB_FILE = os.path.join(DATAROOT, 'training_vocab.txt')
VOCAB_FREQ = os.path.join(RESULTROOT, 'vocab-freq.txt')
TRAIN_FILE = os.path.join(DATAROOT, 'training.txt')
TEST_FILE = os.path.join(DATAROOT, 'test.txt')
MIN_FLOAT = -3.14e100
PrevStatus = {
'B': 'ES',
'M': 'MB',
'S': 'SE',
'E': 'BM'
}
Force_Split_Words = set([])
def add_force_split(word):
global Force_Split_Words
Force_Split_Words.add(word)
def viterbi(obs, states, start_p, trans_p, emit_p):
V = [{}] # tabular
path = {}
for y in states: # init
V[0][y] = start_p[y] + emit_p[y].get(obs[0], MIN_FLOAT)
path[y] = [y]
for t in range(1, len(obs)):
V.append({})
newpath = {}
for y in states:
em_p = emit_p[y].get(obs[t], MIN_FLOAT)
(prob, state) = max(
[(V[t - 1][y0] + trans_p[y0].get(y, MIN_FLOAT) + em_p, y0) for y0 in PrevStatus[y]])
V[t][y] = prob
newpath[y] = path[state] + [y]
path = newpath
(prob, state) = max((V[len(obs) - 1][y], y) for y in 'ES')
return (prob, path[state])
def hmm_cut(sentence):
global emit_P
prob, pos_list = viterbi(sentence, 'BMES', start_P, trans_P, emit_P)
begin, nexti = 0, 0
# print pos_list, sentence
for i, char in enumerate(sentence):
pos = pos_list[i]
if pos == 'B':
begin = i
elif pos == 'E':
yield sentence[begin:i + 1]
nexti = i + 1
elif pos == 'S':
yield char
nexti = i + 1
if nexti < len(sentence):
yield sentence[nexti:]
re_han = re.compile("([\u4E00-\u9FD5]+)")
re_skip = re.compile("([a-zA-Z0-9]+(?:\.\d+)?%?)")
def cut(sentence):
if not sentence:
yield None
blocks = re_han.split(sentence)
for blk in blocks:
if re_han.match(blk):
for word in hmm_cut(blk):
if word not in Force_Split_Words:
yield word
else:
for c in word:
yield c
else:
tmp = re_skip.split(blk)
for x in tmp:
if x:
yield x
with open(TRAIN_FILE, 'r', encoding='utf-8') as f:
train_set = list(map(str.strip, f.readlines()))
with open(TEST_FILE, 'r', encoding='utf-8') as f:
test_set = list(map(str.strip, f.readlines()))
train_set_split = [line.split(' ') for line in train_set]
test_set_split = [line.split(' ') for line in test_set]
train_raw = [''.join(line) for line in train_set_split]
test_raw = [''.join(line) for line in test_set_split]
def eval(file_path, train=False):
if not train:
os.system('perl /home/luod/class/nlp/HanTokenization/scripts/score /home/luod/class/nlp/HanTokenization/datasets/training_vocab.txt /home/luod/class/nlp/HanTokenization/datasets/test.txt %s ' % file_path)
else:
os.system('perl /home/luod/class/nlp/HanTokenization/scripts/score /home/luod/class/nlp/HanTokenization/datasets/training_vocab.txt /home/luod/class/nlp/HanTokenization/datasets/training.txt %s' % file_path)
def pre_make_cut(cut_func, result_file):
file_path = os.path.join(RESULTROOT, result_file)
with open(os.path.join(RESULTROOT, result_file), 'w+', encoding='utf-8') as f:
for line in test_raw:
if not line:
f.write('\n')
continue
sens, rec = preprocess(line)
res, idx = [], 0
le, ri = 0, 0
while ri < len(sens):
if sens[ri] == UNK:
if sens[le: ri]:
res += hmm_cut(sens[le: ri])
le = ri + 1
if idx < len(rec):
res += [rec[idx]]
idx += 1
ri += 1
if ri == len(sens) and sens[-1] != UNK:
res += hmm_cut(sens[le:])
res = ' '.join(res)
f.write(res)
f.write('\n')
eval(file_path)
def make_cut(cut_func, result_file, train=False):
file_path = os.path.join(RESULTROOT, result_file)
line_list = test_raw
if train:
line_list = train_raw
with open(os.path.join(RESULTROOT, result_file), 'w+', encoding='utf-8') as f:
for line in line_list:
if not line:
f.write('\n')
continue
sen = line
res = cut_func(sen)
res = ' '.join(res)
f.write(res)
f.write('\n')
eval(file_path, train)
def get_result():
pre_make_cut(hmm_cut, 'pre_test_hmm_no_chunk.txt')
pre_make_cut(cut, 'pre_test_hmm_chunk.txt')
def make_test_file():
with open('../datasets/raw_test.txt', 'w', encoding='utf8') as f:
for line in test_raw:
f.write(line + '\n')
if __name__ == '__main__':
make_test_file()
# get_result()
| 29.803468 | 215 | 0.554112 | 0 | 0 | 987 | 0.191427 | 0 | 0 | 0 | 0 | 846 | 0.164081 |
944b3da25cd5066da7daf13b3dab234d95f0e9bd | 9,727 | py | Python | tests/test_vidkl.py | ziatdinovmax/gpax | a35374c178b66a3ea5640063a479b0b6be8d57db | [
"MIT"
] | 13 | 2021-11-18T20:20:18.000Z | 2022-03-23T12:53:51.000Z | tests/test_vidkl.py | ziatdinovmax/gpax | a35374c178b66a3ea5640063a479b0b6be8d57db | [
"MIT"
] | 5 | 2022-02-25T09:50:44.000Z | 2022-03-26T21:10:26.000Z | tests/test_vidkl.py | ziatdinovmax/gpax | a35374c178b66a3ea5640063a479b0b6be8d57db | [
"MIT"
] | null | null | null | import sys
import pytest
import numpy as onp
import jax.numpy as jnp
import jax
import haiku as hk
import numpyro
from numpy.testing import assert_equal, assert_array_equal
sys.path.insert(0, "../gpax/")
from gpax.vidkl import viDKL, MLP
from gpax.utils import get_keys
def get_dummy_data(jax_ndarray=True):
X = onp.random.randn(21, 36)
y = onp.random.randn(21,)
if jax_ndarray:
return jnp.array(X), jnp.array(y)
return X, y
def get_dummy_image_data(jax_ndarray=True):
X = onp.random.randn(21, 16, 16, 1)
y = onp.random.randn(21,)
if jax_ndarray:
return jnp.array(X), jnp.array(y)
return X, y
def get_dummy_vector_data(jax_ndarray=True):
X, y = get_dummy_data(jax_ndarray)
X = X[None].repeat(3, axis=0)
y = y[None].repeat(3, axis=0)
return X, y
class CustomConvNet(hk.Module):
def __init__(self, embedim=2):
super().__init__()
self._embedim = embedim
def __call__(self, x):
x = hk.Conv2D(32, 3)(x)
x = jax.nn.relu(x)
x = hk.MaxPool(2, 2, 'SAME')(x)
x = hk.Conv2D(64, 3)(x)
x = jax.nn.relu(x)
x = hk.Flatten()(x)
x = hk.Linear(self._embedim)(x)
return x
@pytest.mark.parametrize("jax_ndarray", [True, False])
def test_single_fit(jax_ndarray):
X, y = get_dummy_data(jax_ndarray)
rng_key = get_keys()[0]
m = viDKL(X.shape[-1])
nn_params, kernel_params, losses = m.single_fit(
rng_key, X, y, num_steps=100, step_size=0.05)
assert isinstance(kernel_params, dict)
assert isinstance(nn_params, dict)
assert isinstance(losses, jnp.ndarray)
@pytest.mark.parametrize("jax_ndarray", [True, False])
def test_single_fit_custom_net(jax_ndarray):
X, y = get_dummy_image_data(jax_ndarray)
rng_key = get_keys()[0]
m = viDKL(X.shape[1:], nn=CustomConvNet)
nn_params, kernel_params, losses = m.single_fit(
rng_key, X, y, num_steps=100, step_size=0.05)
for i, val in enumerate(nn_params.values()):
for k, v in val.items():
if 'w' in k and i < 2:
assert_equal(v.ndim, 4) # confirm that this is a 4-dim weights tensor of CNN
def test_get_mvn_posterior():
rng_key = get_keys()[0]
X, y = get_dummy_data()
X_test, _ = get_dummy_data()
net = hk.transform(lambda x: MLP()(x))
nn_params = net.init(rng_key, X)
kernel_params = {"k_length": jnp.array([1.0]),
"k_scale": jnp.array(1.0),
"noise": jnp.array(0.1)}
m = viDKL(X.shape[-1])
mean, cov = m.get_mvn_posterior(X, y, X_test, nn_params, kernel_params)
assert isinstance(mean, jnp.ndarray)
assert isinstance(cov, jnp.ndarray)
assert_equal(mean.shape, (X_test.shape[0],))
assert_equal(cov.shape, (X_test.shape[0], X_test.shape[0]))
def test_get_mvn_posterior_noiseless():
rng_key = get_keys()[0]
X, y = get_dummy_data()
X_test, _ = get_dummy_data()
net = hk.transform(lambda x: MLP()(x))
nn_params = net.init(rng_key, X)
kernel_params = {"k_length": jnp.array([1.0]),
"k_scale": jnp.array(1.0),
"noise": jnp.array(0.1)}
m = viDKL(X.shape[-1])
mean1, cov1 = m.get_mvn_posterior(X, y, X_test, nn_params, kernel_params, noiseless=False)
mean1_, cov1_ = m.get_mvn_posterior(X, y, X_test, nn_params, kernel_params, noiseless=False)
mean2, cov2 = m.get_mvn_posterior(X, y, X_test, nn_params, kernel_params, noiseless=True)
assert_array_equal(mean1, mean1_)
assert_array_equal(cov1, cov1_)
assert_array_equal(mean1, mean2)
assert onp.count_nonzero(cov1 - cov2) > 0
def test_fit_scalar_target():
X, y = get_dummy_data()
rng_key = get_keys()[0]
m = viDKL(X.shape[-1])
m.fit(rng_key, X, y, num_steps=100, step_size=0.05)
for v in m.kernel_params.values():
assert v.ndim < 2
for val in m.nn_params.values():
for v in val.values():
assert v.ndim < 3
def test_fit_vector_target():
X, y = get_dummy_vector_data()
rng_key = get_keys()[0]
m = viDKL(X.shape[-1])
m.fit(rng_key, X, y, num_steps=100, step_size=0.05)
for v in m.kernel_params.values():
assert v.ndim > 0
assert_equal(v.shape[0], 3)
for val in m.nn_params.values():
for v in val.values():
assert v.ndim > 1
assert_equal(v.shape[0], 3)
def test_predict_scalar():
rng_key = get_keys()[0]
X, y = get_dummy_data()
X_test, _ = get_dummy_data()
net = hk.transform(lambda x: MLP()(x))
nn_params = net.init(rng_key, X)
kernel_params = {"k_length": jnp.array([1.0]),
"k_scale": jnp.array(1.0),
"noise": jnp.array(0.1)}
m = viDKL(X.shape[-1])
m.X_train = X
m.y_train = y
m.nn_params = nn_params
m.kernel_params = kernel_params
mean, var = m.predict(rng_key, X_test)
assert isinstance(mean, jnp.ndarray)
assert isinstance(var, jnp.ndarray)
assert_equal(mean.shape, (len(X_test),))
assert_equal(var.shape, (len(X_test),))
def test_predict_vector():
rng_key = get_keys()[0]
X, y = get_dummy_vector_data()
X_test, _ = get_dummy_vector_data()
net = hk.transform(lambda x: MLP()(x))
clone = lambda x: net.init(rng_key, x)
nn_params = jax.vmap(clone)(X)
kernel_params = {"k_length": jnp.array([[1.0], [1.0], [1.0]]),
"k_scale": jnp.array([1.0, 1.0, 1.0]),
"noise": jnp.array([0.1, 0.1, 0.1])}
m = viDKL(X.shape[-1])
m.X_train = X
m.y_train = y
m.nn_params = nn_params
m.kernel_params = kernel_params
mean, var = m.predict(rng_key, X_test)
assert isinstance(mean, jnp.ndarray)
assert isinstance(var, jnp.ndarray)
assert_equal(mean.shape, X_test.shape[:-1])
assert_equal(var.shape, X_test.shape[:-1])
def test_predict_in_batches_scalar():
rng_key = get_keys()[0]
X, y = get_dummy_data()
X_test, _ = get_dummy_data()
net = hk.transform(lambda x: MLP()(x))
nn_params = net.init(rng_key, X)
kernel_params = {"k_length": jnp.array([1.0]),
"k_scale": jnp.array(1.0),
"noise": jnp.array(0.1)}
m = viDKL(X.shape[-1])
m.X_train = X
m.y_train = y
m.nn_params = nn_params
m.kernel_params = kernel_params
mean, var = m.predict_in_batches(rng_key, X_test, batch_size=10)
assert isinstance(mean, jnp.ndarray)
assert isinstance(var, jnp.ndarray)
assert_equal(mean.shape, (len(X_test),))
assert_equal(var.shape, (len(X_test),))
def test_predict_in_batches_vector():
rng_key = get_keys()[0]
X, y = get_dummy_vector_data()
X_test, _ = get_dummy_vector_data()
net = hk.transform(lambda x: MLP()(x))
clone = lambda x: net.init(rng_key, x)
nn_params = jax.vmap(clone)(X)
kernel_params = {"k_length": jnp.array([[1.0], [1.0], [1.0]]),
"k_scale": jnp.array([1.0, 1.0, 1.0]),
"noise": jnp.array([0.1, 0.1, 0.1])}
m = viDKL(X.shape[-1])
m.X_train = X
m.y_train = y
m.nn_params = nn_params
m.kernel_params = kernel_params
mean, var = m.predict_in_batches(rng_key, X_test, batch_size=10)
assert isinstance(mean, jnp.ndarray)
assert isinstance(var, jnp.ndarray)
assert_equal(mean.shape, X_test.shape[:-1])
assert_equal(var.shape, X_test.shape[:-1])
def test_fit_predict_scalar():
rng_key = get_keys()[0]
X, y = get_dummy_data()
X_test, _ = get_dummy_data()
m = viDKL(X.shape[-1])
mean, var = m.fit_predict(
rng_key, X, y, X_test, num_steps=100, step_size=0.05, batch_size=10)
assert isinstance(mean, jnp.ndarray)
assert isinstance(var, jnp.ndarray)
assert_equal(mean.shape, (len(X_test),))
assert_equal(var.shape, (len(X_test),))
def test_fit_predict_vector():
rng_key = get_keys()[0]
X, y = get_dummy_vector_data()
X_test, _ = get_dummy_vector_data()
m = viDKL(X.shape[-1])
mean, var = m.fit_predict(
rng_key, X, y, X_test, num_steps=100, step_size=0.05, batch_size=10)
assert isinstance(mean, jnp.ndarray)
assert isinstance(var, jnp.ndarray)
assert_equal(mean.shape, X_test.shape[:-1])
assert_equal(var.shape, X_test.shape[:-1])
def test_fit_predict_scalar_ensemble():
rng_key = get_keys()[0]
X, y = get_dummy_data()
X_test, _ = get_dummy_data()
m = viDKL(X.shape[-1])
mean, var = m.fit_predict(
rng_key, X, y, X_test, n_models=4,
num_steps=100, step_size=0.05, batch_size=10)
assert isinstance(mean, jnp.ndarray)
assert isinstance(var, jnp.ndarray)
assert_equal(mean.shape, (4, len(X_test),))
assert_equal(var.shape, (4, len(X_test),))
def test_fit_predict_vector_ensemble():
rng_key = get_keys()[0]
X, y = get_dummy_vector_data()
X_test, _ = get_dummy_vector_data()
m = viDKL(X.shape[-1])
mean, var = m.fit_predict(
rng_key, X, y, X_test, n_models=2,
num_steps=100, step_size=0.05, batch_size=10)
assert isinstance(mean, jnp.ndarray)
assert isinstance(var, jnp.ndarray)
assert_equal(mean.shape, (2, *X_test.shape[:-1]))
assert_equal(var.shape, (2, *X_test.shape[:-1]))
def test_fit_predict_scalar_ensemble_custom_net():
rng_key = get_keys()[0]
X, y = get_dummy_image_data()
X_test, _ = get_dummy_image_data()
m = viDKL(X.shape[1:], nn=CustomConvNet)
mean, var = m.fit_predict(
rng_key, X, y, X_test, n_models=2,
num_steps=100, step_size=0.05, batch_size=10)
assert isinstance(mean, jnp.ndarray)
assert isinstance(var, jnp.ndarray)
assert_equal(mean.shape, (2, len(X_test),))
assert_equal(var.shape, (2, len(X_test),))
| 33.197952 | 96 | 0.631027 | 399 | 0.04102 | 0 | 0 | 948 | 0.097461 | 0 | 0 | 253 | 0.02601 |
944b49db9922d734d432d502d558b6874b8edee9 | 350 | py | Python | module3-nosql-and-document-oriented-databases/mongoDB.py | cocoisland/DS-Unit-3-Sprint-2-SQL-and-Databases | 063af0488fa5694ee233298ed76de71b4229fe4c | [
"MIT"
] | null | null | null | module3-nosql-and-document-oriented-databases/mongoDB.py | cocoisland/DS-Unit-3-Sprint-2-SQL-and-Databases | 063af0488fa5694ee233298ed76de71b4229fe4c | [
"MIT"
] | null | null | null | module3-nosql-and-document-oriented-databases/mongoDB.py | cocoisland/DS-Unit-3-Sprint-2-SQL-and-Databases | 063af0488fa5694ee233298ed76de71b4229fe4c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import pymongo
conn_string="mongodb://dbUser19:LSVyKnHW@cluster0-shard-00-00-nadgn.mongodb.net:27017,cluster0-shard-00-01-nadgn.mongodb.net:27017,cluster0-shard-00-02-nadgn.mongodb.net:27017/test?ssl=true&replicaSet=Cluster0-shard-0&authSource=admin&retryWrites=true"
client=pymongo.MongoClient(conn_string)
db=client.test
| 31.818182 | 252 | 0.814286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 261 | 0.745714 |
944bf54b73dad52bc537f98cbad4c7acf0d3d271 | 10,744 | py | Python | tests/test_helper.py | ajctrl/pysesameos2 | 045d785f028e737eab67146d1c7ca7a033df0b0e | [
"Apache-2.0"
] | 16 | 2021-06-30T00:13:00.000Z | 2022-02-16T14:58:48.000Z | tests/test_helper.py | ajctrl/pysesameos2 | 045d785f028e737eab67146d1c7ca7a033df0b0e | [
"Apache-2.0"
] | 32 | 2021-06-26T23:16:48.000Z | 2022-03-07T12:55:22.000Z | tests/test_helper.py | ajctrl/pysesameos2 | 045d785f028e737eab67146d1c7ca7a033df0b0e | [
"Apache-2.0"
] | 6 | 2021-10-31T07:05:39.000Z | 2022-02-05T07:28:35.000Z | #!/usr/bin/env python
"""Tests for `pysesameos2` package."""
import pytest
from pysesameos2.helper import (
CHProductModel,
CHSesame2MechSettings,
CHSesame2MechStatus,
CHSesameBotButtonMode,
CHSesameBotLockSecondsConfiguration,
CHSesameBotMechSettings,
CHSesameBotMechStatus,
CHSesameBotUserPreDir,
CHSesameProtocolMechStatus,
HistoryTagHelper,
)
class TestCHProductModel:
def test_CHProductModel_raises_exception_on_invalid_model(self):
with pytest.raises(AttributeError):
CHProductModel.SS99
def test_CHProductModel_SS2(self):
ss2 = CHProductModel.SS2
assert ss2.deviceModel() == "sesame_2"
assert ss2.isLocker()
assert ss2.productType() == 0
assert ss2.deviceFactory().__name__ == "CHSesame2"
def test_CHProductModel_SS4(self):
ss2 = CHProductModel.SS4
assert ss2.deviceModel() == "sesame_4"
assert ss2.isLocker()
assert ss2.productType() == 4
assert ss2.deviceFactory().__name__ == "CHSesame2"
def test_CHProductModel_WM2(self):
wm2 = CHProductModel.WM2
assert wm2.deviceModel() == "wm_2"
assert not wm2.isLocker()
assert wm2.productType() == 1
with pytest.raises(NotImplementedError):
wm2.deviceFactory()
def test_CHProductModel_getByModel_raises_exception_on_invalid_arguments(self):
with pytest.raises(TypeError):
CHProductModel.getByModel(123)
def test_CHProductModel_getByModel_returns_None_for_unknown_model(self):
with pytest.raises(NotImplementedError):
CHProductModel.getByModel("sesame_99")
def test_CHProductModel_getByModel_returns_SS2(self):
assert CHProductModel.getByModel("sesame_2") is CHProductModel.SS2
def test_CHProductModel_getByValue_raises_exception_on_invalid_arguments(self):
with pytest.raises(TypeError):
CHProductModel.getByValue("0")
def test_CHProductModel_getByValue_returns_None_for_unknown_model(self):
with pytest.raises(NotImplementedError):
CHProductModel.getByValue(999)
def test_CHProductModel_getByValue_returns_SS2(self):
assert CHProductModel.getByValue(0) is CHProductModel.SS2
class TestCHSesameProtocolMechStatus:
def test_CHSesameProtocolMechStatus_raises_exception_on_emtry_arguments(self):
with pytest.raises(TypeError):
CHSesameProtocolMechStatus()
def test_CHSesameProtocolMechStatus_raises_exception_on_non_string_argument(self):
with pytest.raises(TypeError):
CHSesameProtocolMechStatus(10)
def test_CHSesameProtocolMechStatus(self):
status = CHSesameProtocolMechStatus(rawdata="60030080f3ff0002")
assert status.isInLockRange()
status = CHSesameProtocolMechStatus(rawdata=bytes.fromhex("60030080f3ff0002"))
assert status.isInLockRange()
class TestCHSesame2MechStatus:
def test_CHSesame2MechStatus_raises_exception_on_emtry_arguments(self):
with pytest.raises(TypeError):
CHSesame2MechStatus()
def test_CHSesame2MechStatus_raises_exception_on_non_string_argument(self):
with pytest.raises(TypeError):
CHSesame2MechStatus(10)
def test_CHSesame2MechStatus_rawdata_locked(self):
status = CHSesame2MechStatus(rawdata="60030080f3ff0002")
assert status.getBatteryPrecentage() == 100.0
assert status.getBatteryVoltage() == 6.0809384164222875
assert status.getPosition() == -13
assert status.getRetCode() == 0
assert status.getTarget() == -32768
assert status.isInLockRange()
assert not status.isInUnlockRange()
assert (
str(status)
== "CHSesame2MechStatus(Battery=100% (6.08V), isInLockRange=True, isInUnlockRange=False, Position=-13)"
)
status = CHSesame2MechStatus(rawdata=bytes.fromhex("60030080f3ff0002"))
assert (
str(status)
== "CHSesame2MechStatus(Battery=100% (6.08V), isInLockRange=True, isInUnlockRange=False, Position=-13)"
)
def test_CHSesame2MechStatus_rawdata_unlocked(self):
status = CHSesame2MechStatus(rawdata="5c030503e3020004")
assert status.getBatteryPrecentage() == 100.0
assert status.getBatteryVoltage() == 6.052785923753666
assert status.getPosition() == 739
assert status.getRetCode() == 0
assert status.getTarget() == 773
assert not status.isInLockRange()
assert status.isInUnlockRange()
assert (
str(status)
== "CHSesame2MechStatus(Battery=100% (6.05V), isInLockRange=False, isInUnlockRange=True, Position=739)"
)
def test_CHSesame2MechStatus_rawdata_lowpower(self):
status = CHSesame2MechStatus(rawdata="30030080f3ff0002")
assert status.getBatteryPrecentage() == 44
assert status.getBatteryVoltage() == 5.743108504398827
status2 = CHSesame2MechStatus(rawdata="48020080f3ff0002")
assert status2.getBatteryPrecentage() == 0
class TestCHSesame2MechSettings:
def test_CHSesame2MechSettings_raises_exception_on_emtry_arguments(self):
with pytest.raises(TypeError):
CHSesame2MechSettings()
def test_CHSesame2MechSettings_raises_exception_on_non_string_argument(self):
with pytest.raises(TypeError):
CHSesame2MechSettings(10)
def test_CHSesame2MechSettings(self):
setting = CHSesame2MechSettings(
rawdata=bytes.fromhex("efff1c0159ff85008600b201")
)
assert setting.isConfigured is True
assert setting.getLockPosition() == -17
assert setting.getUnlockPosition() == 284
assert (
str(setting)
== "CHSesame2MechSettings(LockPosition=-17, UnlockPosition=284, isConfigured=True)"
)
class TestCHSesameBotMechStatus:
def test_CHSesameBotMechStatus_raises_exception_on_emtry_arguments(self):
with pytest.raises(TypeError):
CHSesameBotMechStatus()
def test_CHSesameBotMechStatus_raises_exception_on_non_string_argument(self):
with pytest.raises(TypeError):
CHSesameBotMechStatus(10)
def test_CHSesameBotMechStatus_rawdata_locked(self):
status = CHSesameBotMechStatus(rawdata="5503000000000102")
assert status.getBatteryPrecentage() == 100.0
assert status.getBatteryVoltage() == 3.001759530791789
assert status.isInLockRange()
assert not status.isInUnlockRange()
assert status.getMotorStatus() == 0
assert (
str(status) == "CHSesameBotMechStatus(Battery=100% (3.00V), motorStatus=0)"
)
status = CHSesameBotMechStatus(rawdata=bytes.fromhex("5503000000000102"))
assert (
str(status) == "CHSesameBotMechStatus(Battery=100% (3.00V), motorStatus=0)"
)
def test_CHSesameBotMechStatus_rawdata_unlocked(self):
status = CHSesameBotMechStatus(rawdata="5503000000000104")
assert status.getBatteryPrecentage() == 100.0
assert status.getBatteryVoltage() == 3.001759530791789
assert not status.isInLockRange()
assert status.isInUnlockRange()
assert status.getMotorStatus() == 0
assert (
str(status) == "CHSesameBotMechStatus(Battery=100% (3.00V), motorStatus=0)"
)
def test_CHSesameBotMechStatus_rawdata_lowpower(self):
status = CHSesameBotMechStatus(rawdata="3003000000000102")
assert status.getBatteryPrecentage() == 44
assert status.getBatteryVoltage() == 2.8715542521994135
status2 = CHSesameBotMechStatus(rawdata="4802000000000102")
assert status2.getBatteryPrecentage() == 0
class TestCHSesameBotMechSettings:
def test_CHSesameBotMechSettings_raises_exception_on_emtry_arguments(self):
with pytest.raises(TypeError):
CHSesameBotMechSettings()
def test_CHSesameBotMechSettings_raises_exception_on_non_string_argument(self):
with pytest.raises(TypeError):
CHSesameBotMechSettings(10)
def test_CHSesameBotMechSettings(self):
setting = CHSesameBotMechSettings(
rawdata=bytes.fromhex("010a0a0a140f000000000000")
)
assert setting.getUserPrefDir() == CHSesameBotUserPreDir.reversed
assert setting.getLockSecConfig().getLockSec() == 10
assert setting.getLockSecConfig().getUnlockSec() == 10
assert setting.getLockSecConfig().getClickLockSec() == 10
assert setting.getLockSecConfig().getClickHoldSec() == 20
assert setting.getLockSecConfig().getClickUnlockSec() == 15
assert setting.getButtonMode() == CHSesameBotButtonMode.click
assert (
str(setting)
== "CHSesameBotMechSettings(userPrefDir=CHSesameBotUserPreDir.reversed, lockSec=10, unlockSec=10, clickLockSec=10, clickHoldSec=20, clickUnlockSec=15, buttonMode=CHSesameBotButtonMode.click)"
)
class TestCHSesameBotLockSecondsConfiguration:
def test_CHSesameBotLockSecondsConfiguration_raises_exception_on_emtry_arguments(
self,
):
with pytest.raises(TypeError):
CHSesameBotLockSecondsConfiguration()
def test_CHSesameBotLockSecondsConfiguration_raises_exception_on_non_string_argument(
self,
):
with pytest.raises(TypeError):
CHSesameBotLockSecondsConfiguration(10)
def test_CHSesameBotLockSecondsConfiguration(self):
c = CHSesameBotLockSecondsConfiguration(rawdata="0a0a0a140f")
assert c.getLockSec() == 10
assert c.getUnlockSec() == 10
assert c.getClickLockSec() == 10
assert c.getClickHoldSec() == 20
assert c.getClickUnlockSec() == 15
class TestHistoryTagHelper:
def test_split_utf8(self):
text_bytes = "適当に 分割すると最後の文字が壊れてしまう".encode("utf-8")
assert text_bytes[:25].decode("utf-8") == "適当に 分割すると"
with pytest.raises(UnicodeDecodeError) as excinfo:
text_bytes[:26].decode("utf-8")
assert "unexpected end of data" in str(excinfo.value)
test_25 = HistoryTagHelper.split_utf8(text_bytes, 25)
test_26 = HistoryTagHelper.split_utf8(text_bytes, 26)
desired_split = [
"適当に 分割すると".encode("utf-8"),
"最後の文字が壊れ".encode("utf-8"),
"てしまう".encode("utf-8"),
]
assert list(test_25) == desired_split
assert list(test_26) == desired_split
def test_create_htag(self):
assert (
HistoryTagHelper.create_htag(history_tag="適当な日本語で OK")
== b"\x15\xe9\x81\xa9\xe5\xbd\x93\xe3\x81\xaa\xe6\x97\xa5\xe6\x9c\xac\xe8\xaa\x9e\xe3\x81\xa7"
)
| 37.048276 | 203 | 0.696203 | 10,440 | 0.961857 | 0 | 0 | 0 | 0 | 0 | 0 | 1,499 | 0.138106 |
944c02065ba04cb19712b6f22f00a4dabb48ce05 | 3,049 | py | Python | test_migrations/contrib/pytest_plugin/plugin.py | skarzi/django-test-migrations | 9b61f36fd91900334a29ff26dc60228842a86da8 | [
"MIT"
] | 4 | 2019-07-26T12:42:42.000Z | 2020-01-27T07:45:09.000Z | test_migrations/contrib/pytest_plugin/plugin.py | skarzi/django-test-migrations | 9b61f36fd91900334a29ff26dc60228842a86da8 | [
"MIT"
] | 37 | 2019-10-18T18:10:14.000Z | 2020-01-31T07:46:23.000Z | test_migrations/contrib/pytest_plugin/plugin.py | skarzi/django-test-migrations | 9b61f36fd91900334a29ff26dc60228842a86da8 | [
"MIT"
] | 1 | 2019-08-03T15:54:50.000Z | 2019-08-03T15:54:50.000Z | import pytest
from test_migrations import constants
from .fixtures import migrator # pylint: disable=W0611
pytest_plugins = ['pytest_django'] # pylint: disable=C0103
def pytest_load_initial_conftests(early_config):
# Register the marks
early_config.addinivalue_line(
'markers',
(
"{marker}: Mark the test as a"
"Django migration test. Dynamically add `transactional_db` "
"fixture to marked item. Migration tests are run only when "
"`--test-migrations` pytest's CLI option passed."
).format(marker=constants.MIGRATIONS_TEST_MARKER),
)
def pytest_addoption(parser):
"""Add option for running migration tests.
"""
group = parser.getgroup('django_test_migrations')
group._addoption( # pylint: disable=W0212
'--test-migrations',
action='store_true',
dest='test_migrations',
default=False,
help=(
"Run Django migration tests. This does the following: "
" ensure migrations are enabled, skip all test not marked with "
"`{marker}` marker."
).format(marker=constants.MIGRATIONS_TEST_MARKER)
)
def pytest_sessionstart(session):
if session.config.getoption('test_migrations', False):
# TODO: consider raising AssertionError when `nomigration` falsy
session.config.option.nomigrations = False
def pytest_collection_modifyitems(session, items):
migration_test_skip_marker = pytest.mark.skip(
reason=(
'Migration tests not skipped, because`--test-migration` option '
'passed.'
),
)
for item in items:
# mark all tests using `migrator` fixture with `MIGRATION_TEST_MARKER`
if 'migrator' in getattr(item, 'fixturenames', list()):
item.add_marker(constants.MIGRATIONS_TEST_MARKER)
# skip all no migration tests when option `--test-migrations` passed
if (
session.config.getoption('test_migrations', False)
and not item.get_closest_marker(constants.MIGRATIONS_TEST_MARKER)
):
item.add_marker(migration_test_skip_marker)
@pytest.fixture(autouse=True, scope='function')
def _django_migration_marker(request):
"""Implement the migration marker, internal to `django_test_migrations`.
This will dynamically request the `transactional_db` fixture
and skip tests marked with migration marker if not
explicitly requested by passing `--test-migrations` option.
"""
marker = request.node.get_closest_marker(constants.MIGRATIONS_TEST_MARKER)
if marker:
if request.config.getoption('test_migrations', False):
request.getfixturevalue('transactional_db')
else:
pytest.skip(
msg=(
'Migration tests can require `migrations` enabled and can '
'be slow hence they should be ran separetly with pytest '
'`--test-migrations` option.'
),
)
| 35.870588 | 81 | 0.651361 | 0 | 0 | 0 | 0 | 871 | 0.285667 | 0 | 0 | 1,361 | 0.446376 |
944d0a8ff7bd49103305170067ede2c52eda684a | 19,307 | py | Python | hrp/models.py | paleocore/paleocore110 | 754f3248ab22a2996b43bd224bd4ba15462edf7d | [
"MIT"
] | null | null | null | hrp/models.py | paleocore/paleocore110 | 754f3248ab22a2996b43bd224bd4ba15462edf7d | [
"MIT"
] | 7 | 2020-02-05T20:54:24.000Z | 2021-12-13T20:13:20.000Z | hrp/models.py | paleocore/paleocore110 | 754f3248ab22a2996b43bd224bd4ba15462edf7d | [
"MIT"
] | null | null | null | import os
from django.contrib.gis.db import models
from hrp.ontologies import *
# from hrp.ontologies import ITEM_TYPE_VOCABULARY, HRP_COLLECTOR_CHOICES, \
# HRP_COLLECTING_METHOD_VOCABULARY, HRP_BASIS_OF_RECORD_VOCABULARY, HRP_COLLECTION_CODES
from django.contrib.gis.geos import Point
import projects.models
class TaxonRank(projects.models.TaxonRank):
class Meta:
verbose_name = "HRP Taxon Rank"
verbose_name_plural = "HRP Taxon Ranks"
class Taxon(projects.models.Taxon):
parent = models.ForeignKey('self', null=True, blank=True)
rank = models.ForeignKey(TaxonRank)
class Meta:
verbose_name = "HRP Taxon"
verbose_name_plural = "HRP Taxa"
class IdentificationQualifier(projects.models.IdentificationQualifier):
class Meta:
verbose_name = "HRP ID Qualifier"
verbose_name_plural = "HRP ID Qualifiers"
# Locality Class
class Locality(projects.models.PaleoCoreLocalityBaseClass):
id = models.CharField(primary_key=True, max_length=255)
collection_code = models.CharField(null=True, blank=True, choices=HRP_COLLECTION_CODES, max_length=10)
locality_number = models.IntegerField(null=True, blank=True)
sublocality = models.CharField(null=True, blank=True, max_length=50)
description = models.TextField(null=True, blank=True, max_length=255)
stratigraphic_section = models.CharField(null=True, blank=True, max_length=50)
upper_limit_in_section = models.IntegerField(null=True, blank=True)
lower_limit_in_section = models.DecimalField(max_digits=38, decimal_places=8, null=True, blank=True)
error_notes = models.CharField(max_length=255, null=True, blank=True)
notes = models.CharField(max_length=254, null=True, blank=True)
geom = models.PointField(srid=4326, blank=True, null=True)
date_last_modified = models.DateTimeField("Date Last Modified", auto_now=True)
objects = models.GeoManager()
def __str__(self):
nice_name = str(self.collection_code) + " " + str(self.locality_number) + str(self.sublocality)
return nice_name.replace("None", "").replace("--", "")
class Meta:
verbose_name = "HRP Locality"
verbose_name_plural = "HRP Localities"
ordering = ("locality_number", "sublocality")
class Person(projects.models.Person):
last_name = models.CharField("Last Name", null=True, blank=True, max_length=256)
first_name = models.CharField("First Name", null=True, blank=True, max_length=256)
class Meta:
verbose_name = "HRP Person"
verbose_name_plural = "HRP People"
ordering = ["last_name", "first_name"]
def __str__(self):
if self.last_name and self.first_name:
name = self.last_name+', '+self.first_name
else:
name = self.last_name
return name
# Occurrence Class and Subclasses
class Occurrence(projects.models.PaleoCoreOccurrenceBaseClass):
"""
Occurrence == Specimen, a general class for things discovered in the field.
Find's have three subtypes: Archaeology, Biology, Geology
Fields are grouped by comments into logical sets (i.e. ontological classes)
"""
basis_of_record = models.CharField("Basis of Record", max_length=50, blank=True, null=False,
help_text='e.g. Observed item or Collected item',
choices=HRP_BASIS_OF_RECORD_VOCABULARY) # NOT NULL dwc:basisOfRecord
field_number = models.CharField("Field Number", max_length=50, null=True, blank=True)
item_type = models.CharField("Item Type", max_length=255, blank=True, null=False,
choices=ITEM_TYPE_VOCABULARY) # NOT NULL
# TODO merge with taxon
item_scientific_name = models.CharField("Sci Name", max_length=255, null=True, blank=True)
# TODO merge with element
item_description = models.CharField("Description", max_length=255, blank=True, null=True)
item_count = models.IntegerField("Item Count", blank=True, null=True, default=1)
collector = models.CharField("Collector", max_length=50, blank=True, null=True, choices=HRP_COLLECTOR_CHOICES)
recorded_by = models.ForeignKey("Person", null=True, blank=True, related_name="occurrence_recorded_by")
finder = models.CharField("Finder", null=True, blank=True, max_length=50, choices=HRP_COLLECTOR_CHOICES)
found_by = models.ForeignKey("Person", null=True, blank=True, related_name="occurrence_found_by")
collecting_method = models.CharField("Collecting Method", max_length=50,
choices=HRP_COLLECTING_METHOD_VOCABULARY,
null=True, blank=True)
locality = models.ForeignKey("Locality", null=True, blank=True) # dwc:sampling_protocol
item_number = models.IntegerField("Item #", null=True, blank=True)
item_part = models.CharField("Item Part", max_length=10, null=True, blank=True)
cat_number = models.CharField("Cat Number", max_length=255, blank=True, null=True)
disposition = models.CharField("Disposition", max_length=255, blank=True, null=True)
preparation_status = models.CharField("Prep Status", max_length=50, blank=True, null=True)
# TODO rename collection remarks to find remarks
collection_remarks = models.TextField("Collection Remarks", null=True, blank=True, max_length=255)
# Geological Context
stratigraphic_formation = models.CharField("Formation", max_length=255, blank=True, null=True)
stratigraphic_member = models.CharField("Member", max_length=255, blank=True, null=True)
analytical_unit_1 = models.CharField(max_length=255, blank=True, null=True)
analytical_unit_2 = models.CharField(max_length=255, blank=True, null=True)
analytical_unit_3 = models.CharField(max_length=255, blank=True, null=True)
analytical_unit_found = models.CharField(max_length=255, blank=True, null=True)
analytical_unit_likely = models.CharField(max_length=255, blank=True, null=True)
analytical_unit_simplified = models.CharField(max_length=255, blank=True, null=True)
in_situ = models.BooleanField(default=False)
ranked = models.BooleanField(default=False)
weathering = models.SmallIntegerField(blank=True, null=True)
surface_modification = models.CharField("Surface Mod", max_length=255, blank=True, null=True)
geology_remarks = models.TextField("Geol Remarks", max_length=500, null=True, blank=True)
# Location
collection_code = models.CharField("Collection Code", max_length=20, blank=True, null=True)
drainage_region = models.CharField("Drainage Region", null=True, blank=True, max_length=255)
# Media
image = models.FileField(max_length=255, blank=True, upload_to="uploads/images/hrp", null=True)
class Meta:
verbose_name = "HRP Occurrence"
verbose_name_plural = "HRP Occurrences"
ordering = ["collection_code", "locality", "item_number", "item_part"]
def catalog_number(self):
"""
Generate a pretty string formatted catalog number from constituent fields
:return: catalog number as string
"""
if self.basis_of_record == 'Collection':
# Crate catalog number string. Null values become None when converted to string
if self.item_number:
if self.item_part:
item_text = '-' + str(self.item_number) + str(self.item_part)
else:
item_text = '-' + str(self.item_number)
else:
item_text = ''
catalog_number_string = str(self.collection_code) + " " + str(self.locality_id) + item_text
return catalog_number_string.replace('None', '').replace('- ', '') # replace None with empty string
else:
return None
@staticmethod
def fields_to_display():
fields = ("id", "barcode")
return fields
@staticmethod
def method_fields_to_export():
"""
Method to store a list of fields that should be added to data exports.
Called by export admin actions.
These fields are defined in methods and are not concrete fields in the DB so have to be declared.
:return:
"""
return ['longitude', 'latitude', 'easting', 'northing', 'catalog_number', 'photo']
def get_all_field_names(self):
"""
Field names from model
:return: list with all field names
"""
field_list = self._meta.get_fields() # produce a list of field objects
return [f.name for f in field_list] # return a list of names from each field
def get_foreign_key_field_names(self):
"""
Get foreign key fields
:return: returns a list of for key field names
"""
field_list = self._meta.get_fields() # produce a list of field objects
return [f.name for f in field_list if f.is_relation] # return a list of names for fk fields
def get_concrete_field_names(self):
"""
Get field names that correspond to columns in the DB
:return: returns a lift
"""
field_list = self._meta.get_fields()
return [f.name for f in field_list if f.concrete]
def photo(self):
try:
return u'<a href="%s"><img src="%s" style="width:600px" /></a>' \
% (os.path.join(self.image.url), os.path.join(self.image.url))
except:
return None
photo.short_description = 'Photo'
photo.allow_tags = True
photo.mark_safe = True
def thumbnail(self):
try:
return u'<a href="%s"><img src="%s" style="width:100px" /></a>' \
% (os.path.join(self.image.url), os.path.join(self.image.url))
except:
return None
thumbnail.short_description = 'Thumb'
thumbnail.allow_tags = True
thumbnail.mark_safe = True
class Biology(Occurrence):
# Biology
sex = models.CharField("Sex", null=True, blank=True, max_length=50)
life_stage = models.CharField("Life Stage", null=True, blank=True, max_length=50, choices=HRP_LIFE_STAGE_CHOICES)
size_class = models.CharField("Size Class", null=True, blank=True, max_length=50, choices=HRP_SIZE_CLASS_CHOICES)
# Taxon
taxon = models.ForeignKey(Taxon,
default=0, on_delete=models.SET_DEFAULT, # prevent deletion when taxa deleted
related_name='hrp_taxon_bio_occurrences')
identification_qualifier = models.ForeignKey(IdentificationQualifier, null=True, blank=True,
on_delete=models.SET_NULL,
related_name='hrp_id_qualifier_bio_occurrences')
qualifier_taxon = models.ForeignKey(Taxon, null=True, blank=True,
on_delete=models.SET_NULL,
related_name='hrp_qualifier_taxon_bio_occurrences')
verbatim_taxon = models.CharField(null=True, blank=True, max_length=1024)
verbatim_identification_qualifier = models.CharField(null=True, blank=True, max_length=255)
taxonomy_remarks = models.TextField(max_length=500, null=True, blank=True)
# Identification
identified_by = models.CharField(null=True, blank=True, max_length=100, choices=HRP_IDENTIFIER_CHOICES)
year_identified = models.IntegerField(null=True, blank=True)
type_status = models.CharField(null=True, blank=True, max_length=50)
fauna_notes = models.TextField(null=True, blank=True, max_length=64000)
# Element
side = models.CharField("Side", null=True, blank=True, max_length=50, choices=HRP_SIDE_CHOICES)
element = models.CharField("Element", null=True, blank=True, max_length=50, choices=HRP_ELEMENT_CHOICES)
# TODO add element_modifier choices once field is cleaned
element_modifier = models.CharField("Element Mod", null=True, blank=True, max_length=50,
choices=HRP_ELEMENT_MODIFIER_CHOICES)
# TODO populate portion after migrate
element_portion = models.CharField("Element Portion", null=True, blank=True, max_length=50,
choices=HRP_ELEMENT_PORTION_CHOICES)
# TODO populate number choices after migrate
element_number = models.CharField(null=True, blank=True, max_length=50, choices=HRP_ELEMENT_NUMBER_CHOICES)
element_remarks = models.TextField(max_length=500, null=True, blank=True)
tooth_upper_or_lower = models.CharField(null=True, blank=True, max_length=50)
tooth_number = models.CharField(null=True, blank=True, max_length=50)
tooth_type = models.CharField(null=True, blank=True, max_length=50)
# upper dentition fields
uli1 = models.BooleanField(default=False)
uli2 = models.BooleanField(default=False)
uli3 = models.BooleanField(default=False)
uli4 = models.BooleanField(default=False)
uli5 = models.BooleanField(default=False)
uri1 = models.BooleanField(default=False)
uri2 = models.BooleanField(default=False)
uri3 = models.BooleanField(default=False)
uri4 = models.BooleanField(default=False)
uri5 = models.BooleanField(default=False)
ulc = models.BooleanField(default=False)
urc = models.BooleanField(default=False)
ulp1 = models.BooleanField(default=False)
ulp2 = models.BooleanField(default=False)
ulp3 = models.BooleanField(default=False)
ulp4 = models.BooleanField(default=False)
urp1 = models.BooleanField(default=False)
urp2 = models.BooleanField(default=False)
urp3 = models.BooleanField(default=False)
urp4 = models.BooleanField(default=False)
ulm1 = models.BooleanField(default=False)
ulm2 = models.BooleanField(default=False)
ulm3 = models.BooleanField(default=False)
urm1 = models.BooleanField(default=False)
urm2 = models.BooleanField(default=False)
urm3 = models.BooleanField(default=False)
# lower dentition fields
lli1 = models.BooleanField(default=False)
lli2 = models.BooleanField(default=False)
lli3 = models.BooleanField(default=False)
lli4 = models.BooleanField(default=False)
lli5 = models.BooleanField(default=False)
lri1 = models.BooleanField(default=False)
lri2 = models.BooleanField(default=False)
lri3 = models.BooleanField(default=False)
lri4 = models.BooleanField(default=False)
lri5 = models.BooleanField(default=False)
llc = models.BooleanField(default=False)
lrc = models.BooleanField(default=False)
llp1 = models.BooleanField(default=False)
llp2 = models.BooleanField(default=False)
llp3 = models.BooleanField(default=False)
llp4 = models.BooleanField(default=False)
lrp1 = models.BooleanField(default=False)
lrp2 = models.BooleanField(default=False)
lrp3 = models.BooleanField(default=False)
lrp4 = models.BooleanField(default=False)
llm1 = models.BooleanField(default=False)
llm2 = models.BooleanField(default=False)
llm3 = models.BooleanField(default=False)
lrm1 = models.BooleanField(default=False)
lrm2 = models.BooleanField(default=False)
lrm3 = models.BooleanField(default=False)
# indeterminate dental fields
indet_incisor = models.BooleanField(default=False)
indet_canine = models.BooleanField(default=False)
indet_premolar = models.BooleanField(default=False)
indet_molar = models.BooleanField(default=False)
indet_tooth = models.BooleanField(default=False)
deciduous = models.BooleanField(default=False)
# Measurements
um_tooth_row_length_mm = models.DecimalField(max_digits=38, decimal_places=8, null=True, blank=True)
um_1_length_mm = models.DecimalField(max_digits=38, decimal_places=8, null=True, blank=True)
um_1_width_mm = models.DecimalField(max_digits=38, decimal_places=8, null=True, blank=True)
um_2_length_mm = models.DecimalField(max_digits=38, decimal_places=8, null=True, blank=True)
um_2_width_mm = models.DecimalField(max_digits=38, decimal_places=8, null=True, blank=True)
um_3_length_mm = models.DecimalField(max_digits=38, decimal_places=8, null=True, blank=True)
um_3_width_mm = models.DecimalField(max_digits=38, decimal_places=8, null=True, blank=True)
lm_tooth_row_length_mm = models.DecimalField(max_digits=38, decimal_places=8, null=True, blank=True)
lm_1_length = models.DecimalField(max_digits=38, decimal_places=8, null=True, blank=True)
lm_1_width = models.DecimalField(max_digits=38, decimal_places=8, null=True, blank=True)
lm_2_length = models.DecimalField(max_digits=38, decimal_places=8, null=True, blank=True)
lm_2_width = models.DecimalField(max_digits=38, decimal_places=8, null=True, blank=True)
lm_3_length = models.DecimalField(max_digits=38, decimal_places=8, null=True, blank=True)
lm_3_width = models.DecimalField(max_digits=38, decimal_places=8, null=True, blank=True)
# TODO delete attributes, preparations and morphobank number
attributes = models.CharField(null=True, blank=True, max_length=50)
preparations = models.CharField(null=True, blank=True, max_length=50)
morphobank_number = models.IntegerField(null=True, blank=True) # empty, ok to delete
def __str__(self):
return str(self.taxon.__str__())
class Meta:
verbose_name = "HRP Biology"
verbose_name_plural = "HRP Biology"
class Archaeology(Occurrence):
find_type = models.CharField(null=True, blank=True, max_length=255)
length_mm = models.DecimalField(max_digits=38, decimal_places=8, null=True, blank=True)
width_mm = models.DecimalField(max_digits=38, decimal_places=8, null=True, blank=True)
class Meta:
verbose_name = "HRP Archaeology"
verbose_name_plural = "HRP Archaeology"
class Geology(Occurrence):
find_type = models.CharField(null=True, blank=True, max_length=255)
dip = models.DecimalField(max_digits=38, decimal_places=8, null=True, blank=True)
strike = models.DecimalField(max_digits=38, decimal_places=8, null=True, blank=True)
color = models.CharField(null=True, blank=True, max_length=255)
texture = models.CharField(null=True, blank=True, max_length=255)
class Meta:
verbose_name = "HRP Geology"
verbose_name_plural = "HRP Geology"
# Hydrology Class
class Hydrology(models.Model):
length = models.DecimalField(max_digits=38, decimal_places=8, null=True, blank=True)
name = models.CharField(null=True, blank=True, max_length=50)
size = models.IntegerField(null=True, blank=True)
map_sheet = models.CharField(null=True, blank=True, max_length=50)
geom = models.LineStringField(srid=4326)
objects = models.GeoManager()
def __str__(self):
return str(self.name)
class Meta:
verbose_name = "HRP Hydrology"
verbose_name_plural = "HRP Hydrology"
# Media Classes
class Image(models.Model):
occurrence = models.ForeignKey("Occurrence", related_name='hrp_occurrences')
image = models.ImageField(upload_to="uploads/images", null=True, blank=True)
description = models.TextField(null=True, blank=True)
class File(models.Model):
occurrence = models.ForeignKey("Occurrence")
file = models.FileField(upload_to="uploads/files", null=True, blank=True)
description = models.TextField(null=True, blank=True)
| 48.147132 | 117 | 0.70187 | 18,870 | 0.977366 | 0 | 0 | 504 | 0.026105 | 0 | 0 | 3,381 | 0.175118 |
944d49dda9afc349c390f4947052a89775f74de8 | 6,781 | py | Python | sfepy/discrete/dg/limiters.py | BubuLK/sfepy | 3e8e2082c26d574dc334fe3a0e0eeb723f7a6657 | [
"BSD-3-Clause"
] | 2 | 2017-05-24T17:14:11.000Z | 2018-05-15T14:10:14.000Z | sfepy/discrete/dg/limiters.py | Moyunning/sfepy | 127ab753a2f4f24ed359d0152088d11227c3dd49 | [
"BSD-3-Clause"
] | null | null | null | sfepy/discrete/dg/limiters.py | Moyunning/sfepy | 127ab753a2f4f24ed359d0152088d11227c3dd49 | [
"BSD-3-Clause"
] | 1 | 2018-05-15T16:06:52.000Z | 2018-05-15T16:06:52.000Z | # -*- coding: utf-8 -*-
"""
Limiters for high order DG methods
"""
import numpy as nm
from sfepy.discrete.dg.poly_spaces import iter_by_order
from sfepy.discrete.dg.fields import get_raveler, get_unraveler
from sfepy.base.base import output
MACHINE_EPS = 1e-30
def minmod(a, b, c):
"""Minmod function of three variables, returns:
0 , where sign(a) != sign(b) != sign(c)
min(a,b,c) , elsewhere
Parameters
----------
a : array_like
c : array_like
b : array_like
Returns
-------
out : ndarray
"""
seq = (nm.sign(a) == nm.sign(b)) & (nm.sign(b) == nm.sign(c))
res = nm.zeros(nm.shape(a))
res[seq] = nm.sign(a[seq]) * nm.minimum.reduce([nm.abs(b[seq]),
nm.abs(a[seq]),
nm.abs(c[seq])])
return res
def minmod_seq(abc):
"""Minmod function of n variables, returns:
0 , where sign(a_1) != sign(a_2) != ... != sign(a_n)
min(a_1, a_2, a_3, ... , a_n) , elsewhere
Parameters
----------
abc : sequence of array_like
Returns
-------
out : ndarray
"""
seq = nm.hstack([nm.sign(x) for x in abc])
seq = seq[:, 0, None] == seq
seq = seq.prod(axis=1).astype(bool)
res = nm.zeros(nm.shape(abc[0]))
res[seq] = nm.sign(abc[0][seq]) * \
nm.minimum.reduce([nm.abs(x[seq]) for x in abc])
return res
class DGLimiter:
name = "abstract DG limiter"
def __init__(self, field, verbose=False):
self.field = field
self.extended = field.extended
self.n_el_nod = field.n_el_nod
self.n_cell = field.n_cell
self.ravel = get_raveler(self.n_el_nod, self.n_cell)
self.unravel = get_unraveler(self.n_el_nod, self.n_cell)
self.verbose = verbose
output("Setting up limiter: {} for {}.".format(self.name,
self.field.family_name))
def __call__(self, u):
raise NotImplementedError("Called abstract limiter")
class IdentityLimiter(DGLimiter):
"""Neutral limiter returning unchanged solution."""
name = "identity"
def __call__(self, u):
if self.verbose: output(self.name + " limiter")
return u
class MomentLimiter1D(DGLimiter):
""" Moment limiter for 1D based on [1]_
.. [1] Krivodonova (2007):
Limiters for high-order discontinuous Galerkin methods"""
name = "moment_1D_limiter"
def __call__(self, u):
""""
Parameters
----------
u : array_like
raveled solution at time step n in shape (order * n_space_nod, ...)
Returns
-------
u : ndarray
unraveled limited solution
"""
# for convenience do not try to limit FV
if self.n_el_nod == 1:
if self.verbose: output(self.name + " no limiting for FV.")
return u
u = self.unravel(u).swapaxes(0, 1)
idx = nm.arange(nm.shape(u[0, 1:-1])[0])
idx_bc = nm.arange(nm.shape(u[0, :])[0])
nu = nm.copy(u)
tilu = nm.zeros(u.shape[1:])
for ll in range(self.n_el_nod - 1, 0, -1):
tilu[idx] = minmod(nu[ll, 1:-1][idx],
nu[ll - 1, 2:][idx] - nu[ll - 1, 1:-1][idx],
nu[ll - 1, 1:-1][idx] - nu[ll - 1, :-2][idx])
idx = idx[nm.where(abs(tilu[idx] - nu[ll, 1:-1][idx])
> MACHINE_EPS)[0]]
if self.verbose:
output("{} limiting in {} cells out of {} :".
format(self.name, len(idx_bc), self.n_cell))
output(idx_bc)
if len(idx_bc) == 0:
break
nu[ll, 1:-1][idx] = tilu[idx]
return self.ravel(nu.swapaxes(0, 1))[:, 0]
class MomentLimiter2D(DGLimiter):
""" Moment limiter for 2D based on [1]_
.. [1] Krivodonova (2007):
Limiters for high-order discontinuous Galerkin methods"""
name = "moment_limiter_2D"
def __call__(self, u):
"""
Parameters
----------
u : array_like
raveled solution at time step n in shape (order * n_space_nod, ...)
Returns
-------
u : ndarray
unraveled limited solution
"""
if self.n_el_nod == 1:
if self.verbose: output(self.name + " no limiting for FV.")
return u
ex = self.extended
nbrhd_idx = self.field.get_facet_neighbor_idx()
inner_mask = nbrhd_idx[:, :, 0] > 0
idx = nm.where(inner_mask.prod(axis=1))[0]
nbrhd_idx = nbrhd_idx[:, :, 0]
u = self.unravel(u).swapaxes(0, 1)
nu = nm.zeros((self.field.approx_order + 1,) * 2 + u.shape[1:])
tilu = nm.zeros(u.shape[1:])
for ll, (ii, jj) in enumerate(iter_by_order(self.field.approx_order, 2,
extended=ex)):
nu[ii, jj, ...] = u[ll]
for ii, jj in reversed(list(iter_by_order(self.field.approx_order, 2,
extended=ex))):
minmod_args = [nu[ii, jj, idx]]
nbrhs = nbrhd_idx[idx]
if ii - 1 >= 0:
alf = nm.sqrt((2 * ii - 1) / (2 * ii + 1))
# right difference in x axis
dx_r = alf * (nu[ii - 1, jj, nbrhs[:, 1]] - nu[ii - 1, jj, idx])
# left differnce in x axis
dx_l = alf * (nu[ii - 1, jj, idx] - nu[ii - 1, jj, nbrhs[:, 3]])
minmod_args += [dx_r, dx_l]
if jj - 1 >= 0:
alf = nm.sqrt((2 * jj - 1) / (2 * jj + 1))
# right i.e. element "up" difference in y axis
dy_up = alf * (nu[ii, jj - 1, nbrhs[:, 2]] - nu[ii, jj - 1, idx])
# left i.e. element "down" difference in y axis
dy_dn = alf * (nu[ii, jj - 1, idx] - nu[ii, jj - 1, nbrhs[:, 0]])
minmod_args += [dy_up, dy_dn]
tilu[idx] = minmod_seq(minmod_args)
idx = idx[nm.where(abs(tilu[idx] - nu[ii, jj, idx]) > MACHINE_EPS)[0]]
if self.verbose:
output("{} limiting {} in {} cells out of {} :".
format(self.name, (ii, jj), len(idx), self.n_cell))
output(idx)
if len(idx) == 0:
break
nu[ii, jj, idx] = tilu[idx]
resu = nm.zeros(u.shape)
for ll, (ii, jj) in enumerate(iter_by_order(self.field.approx_order, 2,
extended=ex)):
resu[ll] = nu[ii, jj]
return self.ravel(resu.swapaxes(0, 1))[:, 0]
| 32.290476 | 83 | 0.493585 | 5,309 | 0.782923 | 0 | 0 | 0 | 0 | 0 | 0 | 1,875 | 0.276508 |
944e1897e78e31a1756131e5df7add361d007cab | 3,042 | py | Python | yarn_list_utils/Utils.py | windgeek/bigdata_cus | c876fc445cbb7c867179bcdd0612bc9f880b597a | [
"Apache-2.0"
] | null | null | null | yarn_list_utils/Utils.py | windgeek/bigdata_cus | c876fc445cbb7c867179bcdd0612bc9f880b597a | [
"Apache-2.0"
] | null | null | null | yarn_list_utils/Utils.py | windgeek/bigdata_cus | c876fc445cbb7c867179bcdd0612bc9f880b597a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by wind on 2021/4/21
import requests
import json
import time
class Utils:
'''
@staticmethod
def fetchJsonToList(url, root, subitem):
response = requests.get(url)
try:
json_items = json.loads(response.text)
return json_items[root][subitem]
except Exception,ex:
items = []
return items
'''
@staticmethod
def fetchJsonToListItems(url, root, subitem):
response = requests.get(url)
try:
json_items = json.loads(response.text)
return json_items[root][subitem]
except Exception as ex:
items = []
return items
@staticmethod
def fetchJsonItems(url, root, subitem):
response = requests.get(url)
try:
json_items = json.loads(response.text)
items = json_items[root][subitem]
except Exception as ex:
items = []
return items
@staticmethod
def fetchJsonKvItems(url, root):
response = requests.get(url)
try:
json_items = json.loads(response.text)
items = json_items[root]
except Exception as ex:
items = {}
return items
@staticmethod
def fetchJsonToList(url):
response = requests.get(url)
try:
json_items = json.loads(response.text)
items = json_items
except Exception as ex:
items = []
return items
@staticmethod
def fetchRMJsonKvItems(url, root, key, value):
result = {}
response = requests.get(url)
text = json.loads(response.text)
items = text[root]
for item in items:
if (item.has_key("modelerType")):
if (item["modelerType"].find("user") != -1): continue
if item.has_key(key) and item.has_key(value):
result[item[key]] = item[value]
return result
@staticmethod
def fetchJsonKvsItems(url, root, key, value):
result = {}
response = requests.get(url)
text = json.loads(response.text)
items = text[root]
for item in items:
if (item.has_key("modelerType")):
if (item["modelerType"].find("user") != -1): continue
if item.has_key(key):
result[item[key]] = [item[i] for i in value.split(",")]
return result
@staticmethod
def timeToDatetime(timestamp):
t = time.localtime(timestamp)
return time.strftime('%Y-%m-%d %H:%M:%S', t)
@staticmethod
def currentTimeToDatetime():
t = time.localtime(time.time())
return time.strftime('%Y-%m-%d %H:%M:%S', t)
@staticmethod
def getOrDefault(map, key, default):
if map.has_key(key): return map[key]
return default
@staticmethod
def getOrDefaultByList(list, key, value, default):
for item in list:
if item[key] == value: return item
return default
| 26.920354 | 71 | 0.56476 | 2,921 | 0.960224 | 0 | 0 | 2,563 | 0.842538 | 0 | 0 | 460 | 0.151216 |
944e5d01bc95a8da54644cdff9222609c9befc8d | 1,695 | py | Python | api/views/errors.py | rubycho/webpi | 49163427d6a56548da415ec58f150c610cd031be | [
"MIT"
] | 5 | 2020-01-31T13:07:15.000Z | 2020-04-02T05:23:51.000Z | api/views/errors.py | rubycho/webpi | 49163427d6a56548da415ec58f150c610cd031be | [
"MIT"
] | 10 | 2020-02-06T08:36:08.000Z | 2021-06-10T18:21:23.000Z | api/views/errors.py | rubycho/webpi | 49163427d6a56548da415ec58f150c610cd031be | [
"MIT"
] | null | null | null | import enum
from typing import List
from rest_framework import status
from rest_framework.response import Response
class DataType(enum.Enum):
"""
Constants, used on generating error strings.
"""
QUERY = 'query'
PARAM = 'param'
BODY = 'body'
def extract_data(data: dict, keys: List[str]) -> (List[str], dict):
"""
Extract data based on keys.
:param data: dict-similar-typed source
:param keys: keys to extract value from source
:return: (keys that is not available on data, extracted data)
"""
missing = []
marshaled = {}
for key in keys:
if key in data:
marshaled[key] = data[key]
else:
missing.append(key)
return missing, marshaled
def missing_keys(data_type: DataType, keys: List[str]) -> Response:
"""
Create BAD REQUEST Response.
Indicate the request is missing query or param or body items.
:param data_type: missing data type
:param keys: missing item names
:return: Response
"""
errors = [
'missing {} {}'.format(data_type.value, key) for key in keys
]
return Response(
data={'errors': errors},
status=status.HTTP_400_BAD_REQUEST
)
def wrong_keys(data_type: DataType, keys: List[str]) -> Response:
"""
Create BAD REQUEST Response.
Indicate the request has invalid query or param or body items.
:param data_type: invalid data type
:param keys: invalid item names
:return: Response
"""
errors = [
'wrong {} {}'.format(data_type.value, key) for key in keys
]
return Response(
data={'errors': errors},
status=status.HTTP_400_BAD_REQUEST
)
| 24.214286 | 68 | 0.631858 | 149 | 0.087906 | 0 | 0 | 0 | 0 | 0 | 0 | 747 | 0.440708 |
9451476f11677b46be97a0f4978515f3acb6cf52 | 1,049 | py | Python | object-oriented-programming/src/oop-interface.py | giserh/book-python | ebd4e70cea1dd56986aa8efbae3629ba3f1ba087 | [
"MIT"
] | 1 | 2019-01-02T15:04:08.000Z | 2019-01-02T15:04:08.000Z | object-oriented-programming/src/oop-interface.py | giserh/book-python | ebd4e70cea1dd56986aa8efbae3629ba3f1ba087 | [
"MIT"
] | null | null | null | object-oriented-programming/src/oop-interface.py | giserh/book-python | ebd4e70cea1dd56986aa8efbae3629ba3f1ba087 | [
"MIT"
] | null | null | null | class MLModelInterface:
def fit(self, features, labels):
raise NotImplementedError
def predict(self, data):
raise NotImplementedError
class KNeighborsClassifier(MLModelInterface):
def fit(self, features, labels):
pass
def predict(self, data):
pass
class LinearRegression(MLModelInterface):
def fit(self, features, labels):
pass
def predict(self, data):
pass
class LogisticsRegression(MLModelInterface):
pass
# Imput to the classifier
features = [
(5.1, 3.5, 1.4, 0.2),
(4.9, 3.0, 1.4, 0.2),
(4.7, 3.2, 1.3, 0.2),
(7.0, 3.2, 4.7, 1.4),
(6.4, 3.2, 4.5, 1.5),
(6.9, 3.1, 4.9, 1.5),
(6.3, 3.3, 6.0, 2.5),
(5.8, 2.7, 5.1, 1.9),
(7.1, 3.0, 5.9, 2.1),
]
# 0: I. setosa
# 1: I. versicolor
# 2: I. virginica
labels = [0, 0, 0, 1, 1, 1, 2, 2, 2]
to_predict = [
(5.7, 2.8, 4.1, 1.3),
(4.9, 2.5, 4.5, 1.7),
(4.6, 3.4, 1.4, 0.3),
]
model = LinearRegression()
model.fit(features, labels)
model.predict(to_predict)
# [1, 2, 0]
| 18.732143 | 45 | 0.553861 | 483 | 0.460439 | 0 | 0 | 0 | 0 | 0 | 0 | 85 | 0.08103 |
94538901fe8b5906394cf7db13b524c5e05fce62 | 2,933 | py | Python | main.py | celikmustafa89/streaming-standardization | 190c5fcde7ed55fc3c4f674ea0d26992b41fd8d1 | [
"MIT"
] | null | null | null | main.py | celikmustafa89/streaming-standardization | 190c5fcde7ed55fc3c4f674ea0d26992b41fd8d1 | [
"MIT"
] | null | null | null | main.py | celikmustafa89/streaming-standardization | 190c5fcde7ed55fc3c4f674ea0d26992b41fd8d1 | [
"MIT"
] | null | null | null | import random
import math
listes = []
"""for i in range(3):
# listes.append(random.sample(range(5, 50), random.randint(5,1000)))
listes.append(random.sample(range(1, 100), 10))
"""
listes = [
[10,20,30,90,30,54,123,34,656,246,24,842,6784,2,56,4,5,7423,6,6,3,345,6,7,345,46],
[10,20,30,90],
[10,20,30,90,30,54,123,34,656,246,24,842,6784,2,56,4]
]
def sum(liste):
total = 0
for x in liste:
total += x
return total
def mean(liste):
total = sum(liste)
return total / len(liste)
def std(liste):
meann = mean(liste)
total = 0.0
for x in liste:
total += (meann - x) ** 2
return math.sqrt(total / (len(liste) - 1))
def variance(samples):
M = 0
S = 0
index = 0
for x in samples:
x = samples[index]
oldM = M
M = M + (x - M) / (index + 1)
S = S + (x - M) * (x - oldM)
if index != 0:
print("---- {}".format(S/(index+1-1)))
index += 1
return S / (len(samples) - 1)
def evaluate(nums):
print("list: ")
print(nums)
print("sum: {}".format(sum(nums)))
print("mean: {}".format(mean(nums)))
print("size: {}".format(len(nums)))
batch_std = std(nums)
print("batch_std: {}".format(batch_std))
streaming_std = variance(nums)
print("streaming_std: {}".format(streaming_std))
difference = batch_std - streaming_std
print("batch_std - streaming_std = {}".format(difference))
error = 100 * (difference / batch_std)
print("original error: {}%".format((error)))
print("float error: {}%".format(float("%0.9f" % error)))
print("int error: {}%".format(int(error)))
print("\n")
def main():
#for liss in listes:
liss = listes[0]
print("liste:{}".format(liss) )
"""
for i in range(len(liss)):
if i==0 or i==1:
continue
sub_liste = liss[:i]
print("original: {}".format(std(sub_liste)))
"""
for i in range(len(liss)):
if i == 0 or i == 1:
continue
sub_liste = liss[:i]
print("original: {}".format(std(sub_liste)))
evaluate(sub_liste)
for liste in listes:
evaluate(liste)
#main()
"""
for i in range(len(listes[0])):
if i == 0 or i == 1:
continue
sub_liste = listes[0][:i]
print("original[{}] S: {} - standardization: {}".format(i,std(sub_liste)))
"""
i = 0
for x in listes[0]:
i += 1
if i == 1:
continue
print("standardization({}) : {}".format(x, ((x-mean(listes[0][:i]))/(std(listes[0][:i])))))
M = S = count = sum = 0
while(True):
val = input("sayı: ")
val = int(val)
sum += val
count += 1
oldM = M
M = M + (val - M) / (count)
S = S + (val - M) * (val - oldM)
if count != 1:
print("S = {}".format(S / (count - 1)))
print("stream standardization({}) : {}".format(val, (val-(sum/count))/math.sqrt(S/(count-1))))
| 23.094488 | 102 | 0.527446 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 815 | 0.277778 |
9453f91d17fbd3f26c0b9b47751d7481f19b12e8 | 109 | gyp | Python | binding.gyp | artik-snu/node-addon-gpio | 9530478d8543b0ffbfbcf8780a7b91ccbd56a111 | [
"MIT"
] | null | null | null | binding.gyp | artik-snu/node-addon-gpio | 9530478d8543b0ffbfbcf8780a7b91ccbd56a111 | [
"MIT"
] | null | null | null | binding.gyp | artik-snu/node-addon-gpio | 9530478d8543b0ffbfbcf8780a7b91ccbd56a111 | [
"MIT"
] | null | null | null | {
"targets": [
{
"target_name": "gpio",
"sources": ["gpio.cc", "tizen-gpio.cc"]
}
]
} | 13.625 | 45 | 0.422018 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 61 | 0.559633 |
945432debee6e2dc09fae0ff0cc88d5b42bb1d00 | 2,208 | py | Python | webauthn/helpers/parse_client_data_json.py | MasterKale/py_webauthn | fe97b9841328aa84559bd2a282c07d20145845c1 | [
"BSD-3-Clause"
] | null | null | null | webauthn/helpers/parse_client_data_json.py | MasterKale/py_webauthn | fe97b9841328aa84559bd2a282c07d20145845c1 | [
"BSD-3-Clause"
] | null | null | null | webauthn/helpers/parse_client_data_json.py | MasterKale/py_webauthn | fe97b9841328aa84559bd2a282c07d20145845c1 | [
"BSD-3-Clause"
] | null | null | null | import json
from json.decoder import JSONDecodeError
from .base64url_to_bytes import base64url_to_bytes
from .exceptions import InvalidClientDataJSONStructure
from .structs import CollectedClientData, TokenBinding
def parse_client_data_json(val: bytes) -> CollectedClientData:
"""
Break apart `response.clientDataJSON` buffer into structured data
"""
try:
json_dict = json.loads(val)
except JSONDecodeError:
raise InvalidClientDataJSONStructure(
"Unable to decode client_data_json bytes as JSON"
)
# Ensure required values are present in client data
if "type" not in json_dict:
raise InvalidClientDataJSONStructure(
'client_data_json missing required property "type"'
)
if "challenge" not in json_dict:
raise InvalidClientDataJSONStructure(
'client_data_json missing required property "challenge"'
)
if "origin" not in json_dict:
raise InvalidClientDataJSONStructure(
'client_data_json missing required property "origin"'
)
client_data = CollectedClientData(
type=json_dict["type"],
challenge=base64url_to_bytes(json_dict["challenge"]),
origin=json_dict["origin"],
)
# Populate optional values if set
if "crossOrigin" in json_dict:
cross_origin = bool(json_dict["crossOrigin"])
client_data.cross_origin = cross_origin
if "tokenBinding" in json_dict:
token_binding_dict = json_dict["tokenBinding"]
# Some U2F devices set a string to `token_binding`, in which case ignore it
if type(token_binding_dict) is dict:
if "status" not in token_binding_dict:
raise InvalidClientDataJSONStructure(
'token_binding missing required property "status"'
)
status = token_binding_dict["status"]
token_binding = TokenBinding(status=status)
# Handle optional values
if "id" in token_binding_dict:
id = token_binding_dict["id"]
token_binding.id = f"{id}"
client_data.token_binding = token_binding
return client_data
| 33.454545 | 83 | 0.665308 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 658 | 0.298007 |
9454ae456058a5c984b91c0a770883c558f45cd2 | 1,934 | py | Python | tests/python/pants_test/backend/jvm/tasks/jvm_compile/test_resource_mapping.py | WamBamBoozle/pants | 98cadfa1a5d337146903eb66548cfe955f2627b3 | [
"Apache-2.0"
] | null | null | null | tests/python/pants_test/backend/jvm/tasks/jvm_compile/test_resource_mapping.py | WamBamBoozle/pants | 98cadfa1a5d337146903eb66548cfe955f2627b3 | [
"Apache-2.0"
] | null | null | null | tests/python/pants_test/backend/jvm/tasks/jvm_compile/test_resource_mapping.py | WamBamBoozle/pants | 98cadfa1a5d337146903eb66548cfe955f2627b3 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.backend.jvm.tasks.jvm_compile.resource_mapping import ResourceMapping
from pants_test.base_test import BaseTest
class ResourceMappingTest(BaseTest):
def test_resource_mapping_ok(self):
rel_dir = 'tests/python/pants_test/backend/jvm/tasks/jvm_compile/test-data/resource_mapping'
resource_mapping = ResourceMapping(rel_dir)
self.assertEquals(2, len(resource_mapping.mappings))
def test_resource_mapping_short(self):
rel_dir = 'tests/python/pants_test/backend/jvm/tasks/jvm_compile/test-data/resource_mapping-broken-short'
resource_mapping = ResourceMapping(rel_dir)
with self.assertRaises(ResourceMapping.TruncatedFileException):
resource_mapping.mappings
def test_resource_mapping_long(self):
rel_dir = 'tests/python/pants_test/backend/jvm/tasks/jvm_compile/test-data/resource_mapping-broken-long'
resource_mapping = ResourceMapping(rel_dir)
with self.assertRaises(ResourceMapping.TooLongFileException):
resource_mapping.mappings
def test_resource_mapping_mangled(self):
rel_dir = 'tests/python/pants_test/backend/jvm/tasks/jvm_compile/test-data/resource_mapping-broken-mangled'
resource_mapping = ResourceMapping(rel_dir)
with self.assertRaises(ResourceMapping.UnparseableLineException):
resource_mapping.mappings
def test_resource_mapping_noitems(self):
rel_dir = 'tests/python/pants_test/backend/jvm/tasks/jvm_compile/test-data/resource_mapping-broken-missing-items'
resource_mapping = ResourceMapping(rel_dir)
with self.assertRaises(ResourceMapping.MissingItemsLineException):
resource_mapping.mappings
| 39.469388 | 117 | 0.798345 | 1,497 | 0.774043 | 0 | 0 | 0 | 0 | 0 | 0 | 614 | 0.317477 |
9457ac0da5564b2ab782626c607ee29294300e54 | 1,480 | py | Python | examples/basic_events.py | Shadofer/dogey | 1d9f1b82aa7ecfe6d9776feb03364ef9eb00bd63 | [
"MIT"
] | 3 | 2021-05-18T09:46:30.000Z | 2022-03-26T14:23:24.000Z | examples/basic_events.py | Shadofer/dogey | 1d9f1b82aa7ecfe6d9776feb03364ef9eb00bd63 | [
"MIT"
] | null | null | null | examples/basic_events.py | Shadofer/dogey | 1d9f1b82aa7ecfe6d9776feb03364ef9eb00bd63 | [
"MIT"
] | null | null | null | from dogey import Dogey
from dogey.classes import Message, User, Room, Context
from dogey.exceptions import DogeyCommandError
dogey = Dogey(token='your token', refresh_token='your refresh token', prefix='.')
bot = dogey.bot
@dogey.event
async def on_ready():
print(f'{bot.name} is up! (prefix is {bot.prefix})')
await dogey.create_room('dogey.py', description='A simple event example bot', is_private=False)
@dogey.event
async def on_room_created(room: Room):
# Dogey auto saves both room details and room members when you get in a room
print(f'Created room: {room.name}')
@dogey.event
async def on_user_join(user: User, room: Room):
print(f'{user.username} has joined {room.name}')
await dogey.send(f'Welcome {user.username} to {room.name}!')
@dogey.event
async def on_user_leave(user: User, room: Room):
print(f'{user.username} has left {room.name}')
@dogey.event
async def on_message(message: Message):
author: User = dogey.room_members[message.sent_from]
print(f'A message has been sent by {author.username}: {message.content}')
@dogey.event
async def on_hand_raised(user: User):
await dogey.add_speaker(user.id)
await dogey.send(f'Gave {user.username} permission to speak.')
@dogey.event
async def on_room_leave(room: Room):
print(f'I\ve left: {room.name}')
@dogey.event
async def on_command_error(ctx: Context, error: DogeyCommandError):
await dogey.send(f'{error.command_name}: {error.message}')
dogey.start()
| 31.489362 | 99 | 0.728378 | 0 | 0 | 0 | 0 | 1,223 | 0.826351 | 1,119 | 0.756081 | 519 | 0.350676 |
94581e6740d331c24508ac853a1aafc12d13bcc6 | 380 | py | Python | algorithm/deep_learning/neural_network6.py | kake777/python_sample | 3e69c0e89a67f81ced56193524c2f69913262dda | [
"MIT"
] | null | null | null | algorithm/deep_learning/neural_network6.py | kake777/python_sample | 3e69c0e89a67f81ced56193524c2f69913262dda | [
"MIT"
] | null | null | null | algorithm/deep_learning/neural_network6.py | kake777/python_sample | 3e69c0e89a67f81ced56193524c2f69913262dda | [
"MIT"
] | null | null | null | #ミニバッチ学習
import numpy as np
from dataset.mnist import load_mnist
(x_train, t_train), (x_test, t_test) =\
load_mnist(normalize=True, one_hot_label=True)
print(x_train.shape)
print(t_train.shape)
train_size = x_train.shape[0]
batch_size = 10
batch_mask = np.random.choice(train_size, batch_size)
x_batch = x_train[batch_mask]
t_batch = x_train[batch_mask]
print(batch_mask)
| 21.111111 | 53 | 0.778947 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 0.055838 |
9459da79cb451077424b3e724720da8c0a06c9ac | 1,281 | py | Python | openport/common/tee.py | Deshdeepak1/openport | 4aefb3281c2a483ed7b02c8574321b5c0d9a954a | [
"MIT"
] | 5 | 2018-06-04T14:45:15.000Z | 2022-01-07T09:02:35.000Z | openport/common/tee.py | Deshdeepak1/openport | 4aefb3281c2a483ed7b02c8574321b5c0d9a954a | [
"MIT"
] | 9 | 2019-11-24T19:58:33.000Z | 2022-03-29T21:54:03.000Z | openport/common/tee.py | Deshdeepak1/openport | 4aefb3281c2a483ed7b02c8574321b5c0d9a954a | [
"MIT"
] | 5 | 2019-08-04T17:56:15.000Z | 2022-02-06T20:15:03.000Z | import sys
class TeeStdOut(object):
def __init__(self, name, mode):
self.file = open(name, mode)
self.stdout = sys.stdout
sys.stdout = self
def close(self):
if self is None:
return
if self.stdout is not None:
sys.stdout = self.stdout
self.stdout = None
if self.file is not None:
self.file.close()
self.file = None
def write(self, data):
self.file.write(data)
self.stdout.write(data)
def flush(self):
self.file.flush()
self.stdout.flush()
def __del__(self):
return
self.close()
class TeeStdErr(object):
def __init__(self, name, mode):
self.file = open(name, mode)
self.stderr = sys.stderr
sys.stderr = self
def close(self):
if self.stderr is not None:
sys.stderr = self.stderr
self.stderr = None
if self.file is not None:
self.file.close()
self.file = None
def write(self, data):
self.file.write(data)
self.stderr.write(data)
def flush(self):
self.file.flush()
self.stderr.flush()
def __del__(self):
return
self.close() | 22.875 | 37 | 0.530055 | 1,266 | 0.98829 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
9459faf5f1ae64d251e582624318b24b8d865ed6 | 10,624 | py | Python | main.py | vk02169/photobooth_raspi3 | fe2374a3b5359b2bdf908a8383c4ae7ae4ff2598 | [
"Apache-2.0"
] | 1 | 2020-12-18T17:27:10.000Z | 2020-12-18T17:27:10.000Z | main.py | vk02169/photobooth_raspi3 | fe2374a3b5359b2bdf908a8383c4ae7ae4ff2598 | [
"Apache-2.0"
] | null | null | null | main.py | vk02169/photobooth_raspi3 | fe2374a3b5359b2bdf908a8383c4ae7ae4ff2598 | [
"Apache-2.0"
] | null | null | null |
##################################################################################################################################
# The original photobooth application was written by WYLUM. I have borrowed from their code base and the proceeded to enhance/subtract.
# Especially the part where the application uploads to Google Picasa, the code within listalbums.py and credentials.py remains
# unchanged from the original. As does tkkb.py. I claim no credit for it.
##################################################################################################################################
#Imports
import time
import tkMessageBox
import logging
import PIL
from PIL import ImageTk
from Tkinter import *
from camera import *
from util import *
from camconfig import *
from upload import uploadImages
from upload import cleanupUploaders
config=Configurator.instance()
WIDTH = config.getScreenWidth()
HEIGHT = config.getScreenHeight()
#
# main() - to initialize configurator and logging
#
def main():
logfile = config.getInstallDir() + "/log/photobooth.log"
logging.basicConfig( filename=logfile,
level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
logging.info("--------------------------------")
logging.info("------STARTING PHOTOBOOTH-------")
logging.info("--------------------------------")
if __name__ == '__main__':
main()
# This is a simple GUI, so we allow the main_win singleton to do the legwork
main_win = Tk()
main_win.attributes("-fullscreen",True)
#Base thumbnail position
BASE_TN_X_COORD=120
BASE_TN_Y_COORD=10
IMG_BORDER_PIXELS = 5
# Thumbnail characteristics
MAX_TN_WIDTH = WIDTH/2
MAX_TN_HEIGHT = HEIGHT/2-30
MAX_TN_SIZE = (MAX_TN_WIDTH, MAX_TN_HEIGHT)
##################################################################################################################################
# name: displayImage(imgfile, i)
# Displays a thumbnail of the incoming image onto a (grid location) specified by
# the parameter 'i'
##################################################################################################################################
img_to_disp_array=[]
def displayImage(imgfile, i):
logging.info("In displayImage():...")
img = PIL.Image.open(imgfile)
#Create thumbnail of incoming image...
img.thumbnail(MAX_TN_SIZE, PIL.Image.ANTIALIAS)
#Calculate coordinates of where to display thumbnail
tn_size = img.size
x = BASE_TN_X_COORD + (i%2) * (tn_size[0] + IMG_BORDER_PIXELS)
if (i%2 == 1):
i=i-1
y = BASE_TN_Y_COORD + (i/2) * (tn_size[1] + IMG_BORDER_PIXELS)
#...and format it to a displayable format.
global img_to_disp_array
img_to_disp = ImageTk.PhotoImage(img)
img_to_disp_array.append(img_to_disp) #To keep reference alive
# Finally, display the thumbnail at appropriate locations
img_display_canvas.create_image([x, y], image=img_to_disp, tags="image", anchor=NW)
img_display_canvas.update()
logging.info("Exiting displayImage()")
##################################################################################################################################
##################################################################################################################################
# name: displayImages(picsarray)
# Receives an array of imagefiles in the incoming parameter.
# -for each imagefile
# -invokes the displayImage() method to display image appropriately
##################################################################################################################################
def displayImages(picsarray):
i=0
img_to_disp_array=[] # Reset results of prior thumbnail display
while i < len(picsarray):
imgfile = picsarray[i]
displayImage(imgfile, i)
i=i+1
CANVAS_TEXT_FONT = ("Helvetica", 50)
##################################################################################################################################
# name: startCountdown()
# Needs the incoming camera argument to start the whole preview process.
# Displays the countdown text one letter at a time (using the CountdownText
# utility class. Stops preview once countdown is completed.
##################################################################################################################################
def startCountdown(camera, canvas, countdown_text="Smile!"):
camera.start_preview()
clearCanvas(canvas, "all")
logging.info("Calling CountdownText()...")
CountdownText(text=countdown_text,
font=CANVAS_TEXT_FONT,
canvas=canvas,
fill="purple",
x=WIDTH/2,
y=HEIGHT/2+155,
anchor=Tkinter.CENTER)
logging.info("Back from CountdownText()")
clearCanvas(canvas, "text")
camera.stop_preview()
##################################################################################################################################
# name: startBooth()
# Kicks off the capture process:
# -for each picture to be taken
# -starts countdown.
# -snaps the picture (which automatically archives the pictures locally)
# -once all pictures are snapped, displays them in a grid on the canvas.
# -finally uploads these pictures to external cloud(s)
# @param
# @return
##################################################################################################################################
picsarray=[]
def startBooth():
logging.info("In startBooth()...")
num_pics = config.getNumPics()
if num_pics > 4: #capping off the number of pictures (within a selfie) to 4 irrespective of configuration
num_pics = 4
i=0
global picsarray
picsarray=[] #reset
camera=getCamera()
clearCanvas(img_display_canvas)
try:
# Capture the pics first.
# For each pic...
while i < num_pics:
#...get ready for (next) picture in the session.
BlinkingText(text="Get ready!",
font=CANVAS_TEXT_FONT,
fill="purple",
blink_freq=.5,
num_blinks=3,
canvas=img_display_canvas,
x=WIDTH/2, y=HEIGHT/2,
anchor=Tkinter.CENTER)
#Count it down and...
startCountdown(camera, img_display_canvas, countdown_text=config.getCountdownText())
#...snap the picture. Add it to the list of images and then...
picsarray.append( camera.snap() )
if picsarray[i] is None:
messageBox("Error", "Error", "No pictures were taken!")
break
clearCanvas(img_display_canvas);
i=i+1
## End while
clearCanvas(img_display_canvas, "all");
# ...display and...
displayImages(picsarray)
# ...upload
uploadImages(picsarray)
time.sleep(5) # Arbitrary time to allow people to view thumbnail(s)
clearCanvas(img_display_canvas, "all");
logging.info("Exiting startBooth()")
except Exception as e:
printExceptionTrace("In startBooth()!", e)
finally:
camera.close()
splashText()
##################################################################################################################################
# onClose() - Called in response to the Exit button
##################################################################################################################################
main_win.after_id = None
def onClose(*args, **kw):
if main_win.after_id is not None:
main_win.after_cancel(main_win.after_id)
logging.info("onClose(): Cleaning up uploaders...")
cleanupUploaders()
main_win.quit()
##################################################################################################################################
##################################################################################################################################
# name - configure()
# Bring up the Configurator UI to maintain configurations
##################################################################################################################################
def configure(main_win):
configurator=Configurator.instance()
configurator.displayConfigUI(main_win)
#################################################################
frame = Frame(main_win)
if config.showExitConfigureBtns():
exit_btn=Button(frame, text="Exit", command=onClose)
exit_btn.grid(row=1, column=0)
customize_btn=Button(frame, text="Configure", command=lambda *args: configure(main_win))
customize_btn.grid(row=1, column=1)
frame.pack()
# This is to catch the WM_DELETE_WINDOW arriving when the 'X' is clicked
# on the main window. We want a clean shutdown, and so we're trapping it.
main_win.protocol('WM_DELETE_WINDOW', onClose)
##################################################################################################################################
# onClick()
# This method is called in response to a mouse click or a touch on the
# touch screen.
##################################################################################################################################
def onClick(*args):
startBooth()
##################################################################################################################################
# Create the canvas on which to draw image
# Register the onClick() handler with the canvas
img_display_canvas = Canvas(main_win, width=WIDTH, height=HEIGHT, borderwidth=10, relief="ridge")
img_display_canvas.pack()
img_display_canvas.bind('<Button-1>', onClick) #Register handler for any click (or touch if touchscreen)
def splashText():
img_display_canvas.config(background="orange")
img_display_canvas.create_text(WIDTH/2, HEIGHT/2 , text="Touch/Click here to snap!", fill="purple", font=CANVAS_TEXT_FONT, tags="text")
splashText()
#The main windows loop
main_win.wm_title("VKFamily Photobooth")
main_win.mainloop()
################################################################################################################################## | 38.078853 | 139 | 0.481645 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,878 | 0.553276 |
945a434c2fd10a32fcc164824e6dff4647c862a3 | 3,251 | py | Python | globalCounter/server/counter_server.py | aratz-lasa/globalCounter | 9ac0841b0e7d1bc71cd6205649c1b07bcf77e01f | [
"MIT"
] | 2 | 2019-03-24T19:09:59.000Z | 2019-03-25T07:15:06.000Z | globalCounter/server/counter_server.py | aratz-lasa/globalCounter | 9ac0841b0e7d1bc71cd6205649c1b07bcf77e01f | [
"MIT"
] | null | null | null | globalCounter/server/counter_server.py | aratz-lasa/globalCounter | 9ac0841b0e7d1bc71cd6205649c1b07bcf77e01f | [
"MIT"
] | null | null | null | import socket
from multiprocessing import Pool, Queue, Manager, cpu_count
from ..protocol.methods import *
from ..protocol.models import *
from ..various.abc import CounterServer
MAX_WORKERS = cpu_count()
class UDPCounterServer(CounterServer):
def __init__(self, ip="0.0.0.0", port=0, max_workers=MAX_WORKERS):
self.ip = ip
self.port = port
self.sock = None
self.is_running = False
# workers
self.manager = Manager()
self.topic_sum_map = self.manager.dict()
self.pending_requests = Queue()
self.workers_pool = Pool(
processes=max_workers, initializer=self.worker_loop)
def run(self) -> None:
self.bind_socket()
self.is_running = True
try:
while self.is_running:
msg, addr = self.sock.recvfrom(MSG_MAXIMUM_LENGTH)
self.pending_requests.put((msg, addr))
except Exception as err:
if self.is_running:
raise err
def worker_loop(self) -> None:
while True:
msg, addr = self.pending_requests.get()
re_msg = get_response(msg, self.topic_sum_map)
self.send_response(re_msg, addr)
def send_response(self, message: bytes, addr) -> None:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(message, addr)
def bind_socket(self) -> None:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((self.ip, self.port))
def stop(self) -> None:
self.is_running = False
self.workers_pool.terminate()
self.workers_pool.join()
self.sock.close()
class TCPCounterServer(CounterServer):
def __init__(self, ip="0.0.0.0", port=0, max_workers=MAX_WORKERS):
self.ip = ip
self.port = port
self.sock = None
self.is_running = False
# workers
self.manager = Manager()
self.topic_sum_map = self.manager.dict()
self.pending_requests = Queue()
self.workers_pool = Pool(
processes=max_workers, initializer=self.worker_loop)
def run(self) -> None:
self.bind_socket()
self.is_running = True
try:
while self.is_running:
conn, addr = self.sock.accept()
self.pending_requests.put((conn, addr))
except Exception as err:
if self.is_running:
raise err
def worker_loop(self) -> None:
while True:
conn, addr = self.pending_requests.get()
msg = conn.recv(MSG_MAXIMUM_LENGTH)
re_msg = get_response(msg, self.topic_sum_map)
self.send_response(re_msg, conn)
def send_response(self, message: bytes, conn) -> None:
conn.send(message)
conn.close()
def bind_socket(self) -> None:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((self.ip, self.port))
self.sock.listen(1)
def stop(self) -> None:
self.is_running = False
self.sock.close()
| 31.563107 | 71 | 0.609966 | 3,038 | 0.934482 | 0 | 0 | 0 | 0 | 0 | 0 | 36 | 0.011074 |
945a5840ff79964bce5f89f0237c5ec63c7f6bc4 | 4,364 | py | Python | src/fparser/two/tests/fortran2003/test_format_item_c1002.py | sturmianseq/fparser | bf3cba3f31a72671d4d4a93b6ef4f9832006219f | [
"BSD-3-Clause"
] | 33 | 2017-08-18T16:31:27.000Z | 2022-03-28T09:43:50.000Z | src/fparser/two/tests/fortran2003/test_format_item_c1002.py | sturmianseq/fparser | bf3cba3f31a72671d4d4a93b6ef4f9832006219f | [
"BSD-3-Clause"
] | 319 | 2017-01-12T14:22:07.000Z | 2022-03-23T20:53:25.000Z | src/fparser/two/tests/fortran2003/test_format_item_c1002.py | sturmianseq/fparser | bf3cba3f31a72671d4d4a93b6ef4f9832006219f | [
"BSD-3-Clause"
] | 17 | 2017-10-13T07:12:28.000Z | 2022-02-11T14:42:18.000Z | # Copyright (c) 2019 Science and Technology Facilities Council
# All rights reserved.
# Modifications made as part of the fparser project are distributed
# under the following license:
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Test Fortran 2003 constraint C1002 : This file tests the support
for a format specification. The standard C1002 tests are performed via
test_format_specification_r1002.py as the constraints are associated
with R1002. This file picks up any tests that need to act directly on
this class.
'''
import pytest
from fparser.two.Fortran2003 import Format_Item_C1002
from fparser.two.utils import InternalError, NoMatchError
def test_data_edit_descriptor_error(f2003_create):
'''Check that None is returned if the descriptor following a P edit
descriptor is not of the expected type. What is expected is a
Format_Item instance containing a Data_Edit_Descriptor as its
second item. This test checks that we return None if the second
item is not a Data_Edit_Descriptor.
We do this by trying to match with a format-item-list as this is
the only other thing that returns a Format_Item instance. However,
it does not contain a Data_Edit_Descriptor as its second item so
it triggers the appropriate line of code.
'''
my_input = "2P ('hello')"
with pytest.raises(NoMatchError) as excinfo:
_ = Format_Item_C1002(my_input)
assert "Format_Item_C1002: '2P ('hello')'" in str(excinfo.value)
def test_internal_errors1(f2003_create, monkeypatch):
'''Check that an internal error is raised if the length of the Items
list is not 2 as the str() method assumes that it is.
'''
line = "2P F2.2"
ast = Format_Item_C1002(line)
monkeypatch.setattr(ast, "items", [None, None, None])
with pytest.raises(InternalError) as excinfo:
str(ast)
assert "should be length 2 but found '3'" in str(excinfo.value)
def test_internal_error2(f2003_create, monkeypatch):
'''Check that an internal error is raised if entry 0 of items is empty
or None as the str() method assumes that it has content.
'''
line = "2P F2.2"
ast = Format_Item_C1002(line)
monkeypatch.setattr(ast, "items", [None, ast.items[1]])
with pytest.raises(InternalError) as excinfo:
str(ast)
assert ("items entry 0 should contain a format items object but it "
"is empty or None") in str(excinfo.value)
def test_internal_error3(f2003_create, monkeypatch):
'''Check that an internal error is raised if entry 1 of items is empty
or None as the str() method assumes that it has content.
'''
line = "2P F2.2"
ast = Format_Item_C1002(line)
monkeypatch.setattr(ast, "items", [ast.items[0], None])
with pytest.raises(InternalError) as excinfo:
str(ast)
assert ("items entry 1 should contain a format items object but it "
"is empty or None") in str(excinfo.value)
| 40.785047 | 74 | 0.745417 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,195 | 0.732126 |
945a6543444e52c5a044844b767d07e928876dbc | 1,447 | py | Python | nomaj/nj/nj_fixed.py | monomonedula/nomaj | d81e8919966bd2fc649594be647d8d98f0ff6a8f | [
"MIT"
] | null | null | null | nomaj/nj/nj_fixed.py | monomonedula/nomaj | d81e8919966bd2fc649594be647d8d98f0ff6a8f | [
"MIT"
] | null | null | null | nomaj/nj/nj_fixed.py | monomonedula/nomaj | d81e8919966bd2fc649594be647d8d98f0ff6a8f | [
"MIT"
] | null | null | null | from typing import Callable, Awaitable, Dict
from koda import Result, Ok
from nvelope import JSON
from nomaj.nomaj import Nomaj, Req, Resp
class NjFixed(Nomaj):
def __init__(self, resp: Resp):
self._resp: Ok[Resp] = Ok(resp)
async def respond_to(self, request: Req) -> Result[Resp, Exception]:
return self._resp
def meta(self) -> Dict[str, JSON]:
return {
"nomaj": {
"type": self.__class__.__name__,
},
"response": {
"status": self._resp.val.status,
"headers": list(self._resp.val.headers.items()),
},
}
class NjCallable(Nomaj):
def __init__(self, cb: Callable[[Req], Awaitable[Result[Resp, Exception]]]):
self._cb: Callable[[Req], Awaitable[Result[Resp, Exception]]] = cb
async def respond_to(self, request: Req) -> Result[Resp, Exception]:
return await self._cb(request)
def meta(self) -> Dict[str, JSON]:
return {
"nomaj": {
"type": self.__class__.__name__,
},
}
class NjWithMeta(Nomaj):
def __init__(self, nj: Nomaj, meta: Dict[str, JSON]):
self._nj: Nomaj = nj
self._meta: Dict[str, JSON] = meta
async def respond_to(self, request: Req) -> Result[Resp, Exception]:
return await self._nj.respond_to(request)
def meta(self) -> Dict[str, JSON]:
return self._meta
| 27.301887 | 80 | 0.580511 | 1,297 | 0.896337 | 0 | 0 | 0 | 0 | 319 | 0.220456 | 53 | 0.036628 |
945b7f901e1c11d43f4642fe9b5624c95774e9b1 | 2,542 | py | Python | flask_app/dash/orange/models.py | julien-bonnefoy/website | a00d70697cc3a367dcdb32ca62ed29493029cf91 | [
"Apache-2.0"
] | null | null | null | flask_app/dash/orange/models.py | julien-bonnefoy/website | a00d70697cc3a367dcdb32ca62ed29493029cf91 | [
"Apache-2.0"
] | null | null | null | flask_app/dash/orange/models.py | julien-bonnefoy/website | a00d70697cc3a367dcdb32ca62ed29493029cf91 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from sqlalchemy import Column, Integer, String, Sequence, Text, Table
from sqlalchemy import ForeignKey, Boolean, DateTime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
import redis
import rq
import datetime as dt
from flask_app.database import PkModel, reference_col, db
from flask_app.extensions import bcrypt
import base64
from datetime import datetime, timedelta
from hashlib import md5
import json
import os
from time import time
from flask import current_app, url_for
from flask_login import UserMixin
import jwt
from flask_app.extensions import login_manager
Base = declarative_base()
jobs_skills = Table(
"jobs_skills",
Base.metadata,
Column("job_id", Integer, ForeignKey("job.job_id")),
Column("skill_id", Integer, ForeignKey("skill.skill_id"))
)
class LearningObjects(Base):
__tablename__ = "learning_objects"
index = Column(Integer, Sequence('lo_ix_seq'), primary_key=True)
lo_id = Column(String)
lo_title = Column(Text)
lo_description = Column(Text)
lemma_lo_title = Column(Text)
lemma_lo_description = Column(Text)
active_status = Column(Boolean)
lo_type = Column(Integer, ForeignKey('types.type_ix'))
supplier = Column(Integer, ForeignKey('suppliers.supplier_ix'))
parent_subject = Column(Integer, ForeignKey('categories.category_ix'))
class Suppliers(Base):
__tablename__ = "suppliers"
supplier_ix = Column(Integer, Sequence('supplier_ix_seq'), primary_key=True)
supplier_name = Column(String)
class Types(Base):
__tablename__ = "types"
type_ix = Column(Integer, Sequence('type_ix_seq'), primary_key=True)
type_name = Column(String)
class Categories(Base):
__tablename__ = "categories"
category_ix = Column(Integer, Sequence('category_ix_seq'), primary_key=True)
category_name = Column(String)
class Jobs(Base):
__tablename__: str = "jobs_table"
job_id = Column(Integer, primary_key=True)
jon_name = Column(String)
skills = relationship(
"Skill", secondary=jobs_skills, back_populates="jobs"
)
learning_objects = relationship(
"LearningObjects", back_populates="jobs"
)
class Skill(Base):
__tablename__: str = "skills_table"
skill_id = Column(String, primary_key=True)
skill_name = Column(String)
jobs = relationship(
"Job", secondary=jobs_skills, back_populates="skills"
)
learning_objects = relationship(
"LearningObject", back_populates="skills"
)
| 28.561798 | 80 | 0.734068 | 1,674 | 0.658537 | 0 | 0 | 0 | 0 | 0 | 0 | 349 | 0.137293 |
945db23a1d2728c5507f15d764b787a34c73c68a | 252 | py | Python | cms/templatetags/watch.py | sandmark/DjangoPerfectSquare | 490686a1780c27d1b592b1771450f2a86ac861cf | [
"MIT"
] | null | null | null | cms/templatetags/watch.py | sandmark/DjangoPerfectSquare | 490686a1780c27d1b592b1771450f2a86ac861cf | [
"MIT"
] | 3 | 2020-02-11T21:47:12.000Z | 2021-06-10T18:24:43.000Z | cms/templatetags/watch.py | sandmark/DjangoPerfectSquare | 490686a1780c27d1b592b1771450f2a86ac861cf | [
"MIT"
] | null | null | null | from django import template
from django.utils.http import urlquote
import re
register = template.Library()
@register.filter
def quote_filepath(url):
_, scheme, path = re.split(r'(https?://)', url)
return '{}{}'.format(scheme, urlquote(path))
| 22.909091 | 51 | 0.710317 | 0 | 0 | 0 | 0 | 142 | 0.563492 | 0 | 0 | 20 | 0.079365 |
945de4aea7a3b88d286047de11642d31b0128f82 | 7,774 | py | Python | model/transformer.py | scut-bds/exampe_repo_from_scutbds | 6528eeb25d6da53dd4f7eb6b92d534631794aa80 | [
"Apache-2.0"
] | null | null | null | model/transformer.py | scut-bds/exampe_repo_from_scutbds | 6528eeb25d6da53dd4f7eb6b92d534631794aa80 | [
"Apache-2.0"
] | null | null | null | model/transformer.py | scut-bds/exampe_repo_from_scutbds | 6528eeb25d6da53dd4f7eb6b92d534631794aa80 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2021 South China University of Technology and
# Engineering Research Ceter of Minstry of Education on Human Body Perception.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Model example
# Author: Chen Xiaofeng, Chen Yirong
# Date: 2021.01.05
# Reference:
# [1] https://github.com/tensorflow/tensor2tensor
# [2] https://github.com/huggingface/transformers
import math
import torch
import torch.nn as nn
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
class LayerNorm(nn.Module):
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class Embedding(nn.Module):
def __init__(self, config):
super(Embedding, self).__init__()
self.max_len = config["n_positions"]
self.embedding_size = config["n_embd"]
self.word_embedding = nn.Embedding(config["vocab_size"], self.embedding_size, padding_idx=1)
def forward(self, ids, token_type_ids=None):
batch_size = ids.size(0)
sequence_len = ids.size(1)
position_seq = torch.arange(0, self.max_len).unsqueeze(1).repeat(1, int(self.embedding_size/2)).float()
div = torch.arange(0, self.embedding_size, 2).repeat(self.max_len, 1).float()
posi_matrix = position_seq/10000**(div/self.embedding_size)
position_embedding = torch.cat([torch.sin(posi_matrix), torch.cos(posi_matrix)], dim=-1)
position_vector = position_embedding[: sequence_len].repeat(batch_size, 1, 1).to(ids.device)
word_vector = self.word_embedding(ids)
if token_type_ids is None:
token_type_vector = torch.zeros_like(word_vector, device=word_vector.device)
else:
token_type_vector = self.word_embedding(token_type_ids)
padding_len = [torch.sum(ids[i] == 1).item() for i in range(batch_size)]
return (word_vector+position_vector+token_type_vector), padding_len
class FeedForward(nn.Module):
def __init__(self, config):
super(FeedForward, self).__init__()
self.ff_in = nn.Linear(config["n_embd"], 4*config["n_embd"])
self.ff_out = nn.Linear(4*config["n_embd"], config["n_embd"])
self.acti_func = gelu
self.resid_dropout = nn.Dropout(config["resid_pdrop"])
self.layer_norm = LayerNorm(config["n_embd"])
def forward(self, x):
return self.layer_norm(x + self.ff_out(self.resid_dropout(self.acti_func(self.ff_in(x)))))
class MultiHeadAttn(nn.Module):
def __init__(self, config, qkv_size=None):
super(MultiHeadAttn, self).__init__()
self.num_head = config["n_head"]
if qkv_size:
self.qkv_size = qkv_size
else:
self.qkv_size = int(config["n_embd"] / config["n_head"])
self.Q = nn.Linear(config["n_embd"], self.qkv_size * self.num_head)
self.K = nn.Linear(config["n_embd"], self.qkv_size * self.num_head)
self.V = nn.Linear(config["n_embd"], self.qkv_size * self.num_head)
self.outputlinear = nn.Linear(self.qkv_size * self.num_head, config["n_embd"])
self.attn_dropout = nn.Dropout(config["attn_pdrop"])
self.layer_norm = LayerNorm(config["n_embd"])
def forward(self, query, key, value, mask=None):
batch_size = query.size(0)
query_sl = query.size(1)
key_sl = key.size(1)
q, k, v = self.Q(query), self.K(key), self.V(value)
q = q.view(batch_size, query_sl, self.num_head, self.qkv_size).permute(0, 2, 1, 3)
k = k.view(batch_size, key_sl, self.num_head, self.qkv_size).permute(0, 2, 3, 1)
v = v.view(batch_size, key_sl, self.num_head, self.qkv_size).permute(0, 2, 1, 3)
attn_score = torch.matmul(q, k) / self.qkv_size**0.5
if mask is not None:
mask = mask.repeat(1, self.num_head, 1)
mask = mask.view(batch_size, self.num_head, query_sl, key_sl)
attn_score.masked_fill_(mask, -float("inf"))
attn_weight = self.attn_dropout(torch.softmax(attn_score, dim=-1))
output = torch.matmul(attn_weight, v).permute(0, 2, 1, 3)
output = output.contiguous().view(batch_size, -1, self.num_head * self.qkv_size)
output = self.outputlinear(output)
return self.layer_norm(query + output)
class Encoder(nn.Module):
def __init__(self, config):
super(Encoder, self).__init__()
self.context_attn = MultiHeadAttn(config)
self.encoder_ff = FeedForward(config)
def forward(self, context_vector, con_mask):
return self.context_attn(context_vector, context_vector, context_vector, con_mask)
class Decoder(nn.Module):
def __init__(self, config):
super(Decoder, self).__init__()
self.response_attn = MultiHeadAttn(config)
self.decode_attn = MultiHeadAttn(config)
self.decoder_ff = FeedForward(config)
def forward(self, context_vector, response_vector, enc_mask, dec_mask):
response_seq_len = response_vector.size(1)
decoder_hidden = self.response_attn(response_vector, response_vector, response_vector, dec_mask)
decoder_hidden = self.decode_attn(decoder_hidden, context_vector, context_vector, enc_mask[:, :1].repeat(1, response_seq_len, 1))
return self.decoder_ff(decoder_hidden)
class Transformer(nn.Module):
def __init__(self, config):
super(Transformer, self).__init__()
self.config = config
self.embedding = Embedding(config)
self.context_encoder = nn.ModuleList([Encoder(config) for _ in range(config["n_layer"])])
self.response_decoder = nn.ModuleList([Decoder(config) for _ in range(config["n_layer"])])
self.pro_linear = nn.Linear(config["n_embd"], config["vocab_size"])
def forward(self, context_ids, token_type_ids, response_ids):
con_seq_len = context_ids.size(1)
context_vector, con_padding_len = self.embedding(context_ids, token_type_ids)
con_mask = [[[1]*(con_seq_len-con_padding_len[i])+[0]*con_padding_len[i]] for i in range(len(con_padding_len))]
con_mask = torch.tensor(con_mask, device=context_ids.device).repeat(1, con_seq_len, 1) == 0
for encoder in self.context_encoder:
context_vector = encoder(context_vector, con_mask)
resp_seq_len = response_ids.size(1)
response_vector, resp_padding_len = self.embedding(response_ids)
resp_mask = [[[1]*(resp_seq_len-resp_padding_len[i])+[0]*resp_padding_len[i]] for i in range(len(resp_padding_len))]
resp_mask = torch.tril(torch.tensor(resp_mask, device=response_ids.device).repeat(1, resp_seq_len, 1)) == 0
for decoder in self.response_decoder:
response_vector = decoder(context_vector, response_vector, con_mask, resp_mask)
return self.pro_linear(response_vector)
| 46.831325 | 138 | 0.664651 | 6,654 | 0.85593 | 0 | 0 | 0 | 0 | 0 | 0 | 1,090 | 0.140211 |
945e1051e675a474b42fb803eb11282480b657ba | 264 | py | Python | src/lib/idol/dataclass/codegen/schema/primitive_type.py | lyric-com/idol | 285005e9ddaa92b2284b7e9c28cd12f1e34746ec | [
"MIT"
] | null | null | null | src/lib/idol/dataclass/codegen/schema/primitive_type.py | lyric-com/idol | 285005e9ddaa92b2284b7e9c28cd12f1e34746ec | [
"MIT"
] | 2 | 2020-03-24T18:03:10.000Z | 2020-03-31T10:41:56.000Z | src/lib/idol/dataclass/codegen/schema/primitive_type.py | lyric-com/idol | 285005e9ddaa92b2284b7e9c28cd12f1e34746ec | [
"MIT"
] | null | null | null | # DO NOT EDIT
# This file was generated by idol_data, any changes will be lost when idol_data is rerun again
from enum import Enum
class SchemaPrimitiveTypeEnum(Enum):
INT = "int"
DOUBLE = "double"
STRING = "string"
BOOL = "bool"
ANY = "any"
| 22 | 94 | 0.67803 | 130 | 0.492424 | 0 | 0 | 0 | 0 | 0 | 0 | 139 | 0.526515 |
9460da662c249dbfe258e1da02ea4d17876fff61 | 485 | py | Python | tests/test_requirements_files/app.py | Robinson04/serverlesspack | 271fdbc4bcc769a778e369b14e5f491360824512 | [
"MIT"
] | 1 | 2022-03-07T22:32:32.000Z | 2022-03-07T22:32:32.000Z | tests/test_requirements_files/app.py | Robinson04/serverlesspack | 271fdbc4bcc769a778e369b14e5f491360824512 | [
"MIT"
] | 3 | 2021-03-01T19:12:46.000Z | 2021-05-09T11:09:27.000Z | tests/test_requirements_files/app.py | Robinson04/serverlesspack | 271fdbc4bcc769a778e369b14e5f491360824512 | [
"MIT"
] | null | null | null | from pydantic import BaseModel
from typing import Optional
class RequestDataModel(BaseModel):
loginToken: str
def login_with_google(data: dict):
request_data = RequestDataModel(**data)
from google.oauth2 import id_token
from google.auth.transport.requests import Request as GoogleRequest
user_infos: Optional[dict] = id_token.verify_oauth2_token(
id_token=request_data.loginToken, request=GoogleRequest(), audience='token_id'
)
print(user_infos)
| 30.3125 | 86 | 0.769072 | 54 | 0.11134 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.020619 |
94611d8fb6626acc66a8ff1fb61998cb3024f470 | 274 | py | Python | nickleback.py | helanan/kill_nickleback | 6cbc37f58cf3e9aedc42d4e2718b8c7549692a15 | [
"MIT"
] | null | null | null | nickleback.py | helanan/kill_nickleback | 6cbc37f58cf3e9aedc42d4e2718b8c7549692a15 | [
"MIT"
] | null | null | null | nickleback.py | helanan/kill_nickleback | 6cbc37f58cf3e9aedc42d4e2718b8c7549692a15 | [
"MIT"
] | null | null | null | songs = { ('Nickelback', 'How You Remind Me'), ('Will.i.am', 'That Power'), ('Miles Davis', 'Stella by Starlight'), ('Nickelback', 'Animals') }
# Using a set comprehension, create a new set that contains all songs that were not performed by Nickelback.
nonNickelback = {}
| 54.8 | 144 | 0.689781 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 217 | 0.791971 |
94619449453a285926187d511d9d1aa00b91410b | 8,761 | py | Python | siam_tracker/benchmarks/otb/butil/seq_config.py | songheony/SPM-Tracker | 41fd91ec42cf9072fe44d45c5bb68993f28a12ad | [
"MIT"
] | 32 | 2019-08-30T09:50:03.000Z | 2021-10-12T08:36:25.000Z | siam_tracker/benchmarks/otb/butil/seq_config.py | songheony/SPM-Tracker | 41fd91ec42cf9072fe44d45c5bb68993f28a12ad | [
"MIT"
] | 3 | 2019-09-05T09:45:52.000Z | 2020-12-02T02:42:08.000Z | siam_tracker/benchmarks/otb/butil/seq_config.py | songheony/SPM-Tracker | 41fd91ec42cf9072fe44d45c5bb68993f28a12ad | [
"MIT"
] | 16 | 2019-09-10T09:04:53.000Z | 2021-09-13T12:44:47.000Z | import zipfile
import shutil
import copy
import json
from PIL import Image
from ..config import *
from ..model import *
from .split_seq import split_seq_TRE
from .shift_bbox import shift_init_BB
import logging
logging.getLogger().setLevel(logging.INFO)
def get_sub_seqs(s, numSeg, evalType):
s.len = s.endFrame - s.startFrame + 1
s.s_frames = [None] * s.len
for i in range(s.len):
image_no = s.startFrame + i
_id = s.imgFormat.format(image_no)
s.s_frames[i] = s.path + _id
rect_anno = s.gtRect
subSeqs, subAnno = split_seq_TRE(s, numSeg, rect_anno)
s.subAnno = subAnno
if evalType == 'OPE':
subS = subSeqs[0]
subSeqs = []
subSeqs.append(subS)
subA = subAnno[0]
subAnno = []
subAnno.append(subA)
elif evalType == 'SRE':
subS = subSeqs[0]
subA = subAnno[0]
subSeqs = []
subAnno = []
r = subS.init_rect
img = Image.open(s.s_frames[0])
(imgWidth, imgHeight) = img.size
for i in range(len(shiftTypeSet)):
s = copy.deepcopy(subS)
shiftType = shiftTypeSet[i]
s.init_rect = shift_init_BB(s.init_rect, shiftType,
imgHeight, imgWidth)
s.shiftType = shiftType
subSeqs.append(s)
subAnno.append(subA)
return subSeqs, subAnno
def setup_seqs(loadSeqs):
seqs = make_seq_configs(loadSeqs)
for seq in seqs:
logging.info("\t" + seq.name + "\t" + seq.path)
save_seq_config(seq)
def save_seq_config(seq):
string = json.dumps(seq.__dict__, indent=2)
src = os.path.join(SEQ_SRC, seq.name)
configFile = open(src+'/cfg.json', 'wb')
configFile.write(string.encode())
configFile.close()
def load_seq_config(seqName):
src = os.path.join(SEQ_SRC, seqName)
configFile = open(src+'/cfg.json')
string = configFile.read()
j = json.loads(string)
seq = Sequence(**j)
seq.path = os.path.join(os.path.abspath(seq.path), '')
return seq
def load_all_seq_configs():
seqNames = get_seq_names('ALL')
seqs = list()
for name in seqNames:
seq = load_seq_config(name)
seqs.append(seq)
return seqs
def load_seq_configs(seqNames):
return [load_seq_config(x) for x in seqNames]
def get_seq_names(loadSeqs):
if type(loadSeqs) is list:
return loadSeqs
if loadSeqs.lower() == 'all':
names = os.listdir(SEQ_SRC)
names.remove(ATTR_LIST_FILE)
names.remove(ATTR_DESC_FILE)
names.remove(TB_50_FILE)
names.remove(TB_100_FILE)
names.remove(TC_FILE)
elif loadSeqs.lower() == 'tb50':
tb_50 = open(SEQ_SRC+TB_50_FILE)
seq_list = tb_50.readlines()
names = sorted([x.split('\t')[0].strip() for x in seq_list])
elif loadSeqs.lower() == 'tb100':
tb_100 = open(SEQ_SRC+TB_100_FILE)
seq_list = tb_100.readlines()
names = sorted([x.split('\t')[0].strip() for x in seq_list])
elif loadSeqs.lower() == 'tc':
tc = open(SEQ_SRC+TC_FILE)
seq_list = tc.readlines()
names = sorted([x.split('\t')[0].strip() for x in seq_list])
elif loadSeqs.lower() == 'cvpr13':
cvpr13 = open(SEQ_SRC+CVPR_13_FILE)
seq_list = cvpr13.readlines()
names = sorted([x.split('\t')[0].strip() for x in seq_list])
else:
names = loadSeqs
return names
def make_seq_configs(loadSeqs):
names = get_seq_names(loadSeqs)
seqList = []
for name in names:
src = SEQ_SRC + name
imgSrc = src + '/img/'
path = imgSrc
if not os.path.exists(src):
os.makedirs(src)
if not os.path.exists(imgSrc):
logging.info(name + ' does not have img directory')
if DOWNLOAD_SEQS:
download_sequence(name)
else:
logging.error('If you want to download sequences,\n' \
+ 'check if config.py\'s DOWNLOAD_SEQS is True')
sys.exit(1)
imgfiles = sorted(os.listdir(imgSrc))
imgfiles = [x for x in imgfiles if x.split('.')[1] in ['jpg', 'png']]
nz, ext, startFrame, endFrame = get_format(name, imgfiles)
attrSrc = os.path.join(src, ATTR_FILE)
if not os.path.exists(attrSrc):
attrlist_src = os.path.join(SEQ_SRC, ATTR_LIST_FILE)
attrlistFile = open(attrlist_src)
lines = attrlistFile.readlines()
attrs = None
for line in lines:
if name.lower() in line.lower():
attrs = line.split('\t')[1]
attrFile = open(attrSrc, 'w')
attrFile.write(attrs)
attrFile.close()
break
if attrs == None:
sys.exit(name + ' does not have attrlist')
attrFile = open(attrSrc)
lines = attrFile.readline()
attributes = [x.strip() for x in lines.split(', ')]
imgFormat = "{0}{1}{2}{3}".format("{0:0",nz,"d}.",ext)
gtFile = open(os.path.join(src, GT_FILE))
gtLines = gtFile.readlines()
gtRect = []
for line in gtLines:
if '\t' in line:
gtRect.append(list(map(int,line.strip().split('\t'))))
elif ',' in line:
gtRect.append(list(map(int,line.strip().split(','))))
elif ' ' in line:
gtRect.append(list(map(int,line.strip().split(' '))))
init_rect = [0,0,0,0]
seq = Sequence(name, path, startFrame, endFrame,
attributes, nz, ext, imgFormat, gtRect, init_rect)
seqList.append(seq)
return seqList
def get_format(name, imgfiles):
filenames = imgfiles[0].split('.')
nz = len(filenames[0])
ext = filenames[1]
startFrame = int(filenames[0])
endFrame = startFrame + len(imgfiles) - 1
if name == "David":
startFrame = 300
endFrame = 770
elif name == "Football1":
endFrame = 74
elif name == "Freeman3":
endFrame = 460
elif name == "Freeman4":
endFrame = 283
elif name == "Diving":
endFrame = 215
return nz, ext, startFrame, endFrame
def download_sequence(seqName):
file_name = SEQ_SRC + seqName + '.zip'
if seqName == 'Jogging-1' or seqName == 'Jogging-2':
url = DOWNLOAD_URL.format('Jogging')
download_and_extract_file(url, file_name, SEQ_SRC)
src = SEQ_SRC + 'Jogging/'
dst1 = SEQ_SRC + 'Jogging-1/'
dst2 = SEQ_SRC + 'Jogging-2/'
if not os.path.exists(dst1 + 'img'):
shutil.copytree(src + 'img', dst1 + 'img')
if not os.path.exists(dst2 + 'img'):
shutil.copytree(src + 'img', dst2 + 'img')
shutil.move(src + 'groundtruth_rect.1.txt', dst1 + GT_FILE)
shutil.move(src + 'groundtruth_rect.2.txt', dst2 + GT_FILE)
shutil.move(src + 'jogging-1.txt', dst1 + INIT_OMIT_FILE)
shutil.move(src + 'jogging-2.txt', dst2 + INIT_OMIT_FILE)
shutil.rmtree(src)
elif seqName == 'Skating2-1' or seqName == 'Skating2-2':
url = DOWNLOAD_URL.format('Skating2')
download_and_extract_file(url, file_name, SEQ_SRC)
src = SEQ_SRC + 'Skating2/'
dst1 = SEQ_SRC + 'Skating2-1/'
dst2 = SEQ_SRC + 'Skating2-2/'
if not os.path.exists(dst1 + 'img'):
shutil.copytree(src + 'img', dst1 + 'img')
if not os.path.exists(dst2 + 'img'):
shutil.copytree(src + 'img', dst2 + 'img')
shutil.move(src + 'groundtruth_rect.1.txt', dst1 + GT_FILE)
shutil.move(src + 'groundtruth_rect.2.txt', dst2 + GT_FILE)
shutil.rmtree(src)
elif seqName == 'Human4-1' or seqName == 'Human4-2':
url = DOWNLOAD_URL.format('Human4')
download_and_extract_file(url, file_name, SEQ_SRC)
src = SEQ_SRC + 'Human4/'
# dst1 = SEQ_SRC + 'Human4-1/'
dst2 = SEQ_SRC + 'Human4-2/'
# if not os.path.exists(dst1 + 'img'):
# shutil.copytree(src + 'img', dst1 + 'img')
if not os.path.exists(dst2 + 'img'):
shutil.copytree(src + 'img', dst2 + 'img')
# shutil.move(src + 'groundtruth_rect.1.txt', dst1 + GT_FILE)
shutil.move(src + 'groundtruth_rect.2.txt', dst2 + GT_FILE)
shutil.rmtree(src)
else:
url = DOWNLOAD_URL.format(seqName)
download_and_extract_file(url, file_name, SEQ_SRC)
if os.path.exists(SEQ_SRC + '__MACOSX'):
shutil.rmtree(SEQ_SRC + '__MACOSX')
def download_and_extract_file(url, dst, ext_dst):
logging.info('You can download sequence from {} and unzip it by yourself.'.format(url))
| 34.089494 | 91 | 0.576304 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,033 | 0.117909 |
9462eb2500bdbcb6be261010cdb1088e435cd696 | 4,099 | py | Python | main.py | mneeman/Removing_atmospheric_turbulence | cf0dc0ab503d6fe9dfa4f48d2fffc69f1e0899b6 | [
"MIT"
] | 4 | 2020-06-25T15:21:51.000Z | 2021-08-11T14:04:43.000Z | main.py | mneeman/Removing_atmospheric_turbulence | cf0dc0ab503d6fe9dfa4f48d2fffc69f1e0899b6 | [
"MIT"
] | 1 | 2021-05-29T08:22:30.000Z | 2021-05-29T08:22:30.000Z | main.py | mneeman/Removing_atmospheric_turbulence | cf0dc0ab503d6fe9dfa4f48d2fffc69f1e0899b6 | [
"MIT"
] | 2 | 2020-09-23T02:42:59.000Z | 2021-08-11T14:04:45.000Z | import multiprocessing
multiprocessing.set_start_method('spawn', True)
import argparse
import os
import numpy as np
import math
import sys
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision.utils import make_grid
from torch.utils.tensorboard import SummaryWriter
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.autograd as autograd
import torch
from train import train
from test import test, test_moving
from models import UNet, Discriminator, weights_init
from functions import *
from data_util import MyDataset
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.enabled = True
def main(opt):
writer = SummaryWriter()
log_dir = writer.get_logdir()
os.makedirs(os.path.join(log_dir, "images"), exist_ok=True)
os.makedirs(os.path.join(log_dir, "test"), exist_ok=True)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
# Initialize generator and discriminator
generator = UNet(opt.sample_num, opt.channels, opt.batch_size, opt.alpha)
discriminator = Discriminator(opt.batch_size, opt.alpha)
generator.to(device=device)
discriminator.to(device=device)
# Optimizers
optimizer_G = torch.optim.Adam(generator.parameters(), lr=opt.lr_g, betas=(opt.b1, opt.b2))
optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.lr_d, betas=(opt.b1, opt.b2))
if opt.mode == 'train':
generator = train(writer, log_dir, device, generator, discriminator, optimizer_G, optimizer_D, opt)
test(opt, log_dir, generator=generator)
if opt.mode == 'test':
test(opt, log_dir)
test_moving(opt, log_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--n_epochs", type=int, default=100, help="number of epochs of training")
parser.add_argument("--n_epochs_g", type=int, default=3, help="number of epochs of training only g")
parser.add_argument("--batch_size", type=int, default=1, help="size of the batches")
parser.add_argument("--lr_d", type=float, default=0.000001, help="adam: learning rate d")
parser.add_argument("--lr_g", type=float, default=0.00004, help="adam: learning rate g")
parser.add_argument("--b1", type=float, default=0.5, help="adam: decay of first order momentum of gradient")
parser.add_argument("--b2", type=float, default=0.99, help="adam: decay of first order momentum of gradient")
parser.add_argument("--alpha", type=float, default=0.2, help="Randomized Leaky ReLU activation layer")
parser.add_argument("--lambda_gp", type=float, default=10, help="Loss weight for gradient penalty")
parser.add_argument("--img_size", type=int, default=256, help="size of each image dimension")
parser.add_argument("--channels", type=int, default=3, help="number of image channels")
parser.add_argument("--sample_interval", type=int, default=30, help="interval betwen image samples")
parser.add_argument("--load_model", type=str, default='', help="path to model to continue training")
parser.add_argument("--windows", type=bool, default=False, help="run on windows")
parser.add_argument("--mode", type=str, default='test', help="train, test")
parser.add_argument("--dataset_dir", type=str, default='/Users/Maayan/Documents/databases/mit_100_frames', help="path to dataset directory")
parser.add_argument("--reference_dataset_path", type=str, default='/Users/Maayan/Documents/databases/mit', help="path to ground thruth dataset")
parser.add_argument("--test_dataset_path", type=str, default='/Users/Maayan/Documents/databases/test/frames_256', help="path to test_dataset")
parser.add_argument("--num_workers_dataloader", type=int, default=0, help="num workers for dataloader")
parser.add_argument("--sample_num", type=int, default=20, help="number of images to random sample from each video")
opt = parser.parse_args()
print(opt)
main(opt)
| 44.554348 | 148 | 0.732618 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,140 | 0.278117 |
94644175cac90d22e5db804ffcb4e25dad573381 | 857 | py | Python | change_brightness.py | qLethon/bin_picking_robot | 807025442925e82cdf5558eaa766edb479c7619a | [
"MIT"
] | null | null | null | change_brightness.py | qLethon/bin_picking_robot | 807025442925e82cdf5558eaa766edb479c7619a | [
"MIT"
] | 1 | 2019-10-28T13:46:40.000Z | 2019-11-09T11:29:16.000Z | change_brightness.py | qLethon/bin_picking_robot | 807025442925e82cdf5558eaa766edb479c7619a | [
"MIT"
] | null | null | null | from PIL import Image, ImageEnhance
import os
import argparse
def change_brightness(source_dir, save_dir, brightness):
os.makedirs(save_dir, exist_ok=True)
image_pathes = [f for f in os.scandir(source_dir) if f.is_file()]
for image_path in image_pathes:
save_path = os.path.join(save_dir, image_path.name.rstrip('.jpg') + "_" + str(brightness) + ".jpg")
ImageEnhance.Brightness(Image.open(image_path.path)).enhance(brightness).save(save_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--imagedir', type=str, required=True)
parser.add_argument('-s', '--savedir', type=str, required=True)
args = parser.parse_args()
for brightness in (0.5, 0.75, 1.25, 1.5):
change_brightness(args.imagedir, os.path.join(args.savedir, str(brightness)), brightness) | 40.809524 | 107 | 0.70245 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 56 | 0.065344 |
946461fc3a4fd6828fbc72c73b2d0410d8ef7f35 | 858 | py | Python | handlers/changing_stickerpack_handl.py | bbt-t/bot-pet-project | 6b0d7862b14fe739be52d87ff8c8610a3f4548e1 | [
"Apache-2.0"
] | null | null | null | handlers/changing_stickerpack_handl.py | bbt-t/bot-pet-project | 6b0d7862b14fe739be52d87ff8c8610a3f4548e1 | [
"Apache-2.0"
] | null | null | null | handlers/changing_stickerpack_handl.py | bbt-t/bot-pet-project | 6b0d7862b14fe739be52d87ff8c8610a3f4548e1 | [
"Apache-2.0"
] | null | null | null | from aiogram.dispatcher import FSMContext
from aiogram.types import CallbackQuery
from handlers.states_in_handlers import UserSettingStates
from loader import dp
from utils.keyboards.start_handl_choice_kb import get_start_keyboard
@dp.callback_query_handler(text='set_skin', state=UserSettingStates.settings)
async def choose_a_sticker_pack(call: CallbackQuery, state: FSMContext) -> None:
async with state.proxy() as data:
if data.get('lang') == 'ru':
await call.message.answer(
"На какой меняем?",
reply_markup=await get_start_keyboard(is_choice_skin=True)
)
else:
await call.message.answer(
"What are we changing to?",
reply_markup=await get_start_keyboard(is_choice_skin=True, lang='en')
)
await state.finish()
| 37.304348 | 85 | 0.681818 | 0 | 0 | 0 | 0 | 636 | 0.730195 | 558 | 0.640643 | 81 | 0.092997 |
9465b84b222240f9c7b3a1cae7f082ad76af9e0f | 256 | py | Python | DebugLibrary/robotvar.py | lobinho/robotframework-debuglibrary | 7e6f9a005ad60a56bc47820c215ac8efd115a88e | [
"BSD-3-Clause"
] | 93 | 2015-02-06T16:40:27.000Z | 2022-02-10T14:26:23.000Z | DebugLibrary/robotvar.py | lobinho/robotframework-debuglibrary | 7e6f9a005ad60a56bc47820c215ac8efd115a88e | [
"BSD-3-Clause"
] | 55 | 2015-05-21T02:48:01.000Z | 2021-08-13T15:00:01.000Z | DebugLibrary/robotvar.py | stdedos/robotframework-debuglibrary | 5f48ce7e603c096bf258b2118b4ac3db0172675b | [
"BSD-3-Clause"
] | 50 | 2015-05-20T13:52:26.000Z | 2021-11-19T21:23:05.000Z | def assign_variable(robot_instance, variable_name, args):
"""Assign a robotframework variable."""
variable_value = robot_instance.run_keyword(*args)
robot_instance._variables.__setitem__(variable_name, variable_value)
return variable_value
| 42.666667 | 72 | 0.792969 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 39 | 0.152344 |
94698a7593ec8f7eb2b02572abe4aa120407e92f | 1,653 | py | Python | objectModel/Python/tests/storage/test_github.py | rt112000/CDM | 34bd34f9260140a8f8aa02bd87c23033f3daad4c | [
"CC-BY-4.0",
"MIT"
] | 884 | 2019-05-10T02:09:10.000Z | 2022-03-31T14:02:00.000Z | objectModel/Python/tests/storage/test_github.py | spbast/CDM | bf97a3720c97ee4c9df3625084cf8b3bc65ff9c7 | [
"CC-BY-4.0",
"MIT"
] | 171 | 2019-06-10T11:34:37.000Z | 2022-03-31T22:50:12.000Z | objectModel/Python/tests/storage/test_github.py | spbast/CDM | bf97a3720c97ee4c9df3625084cf8b3bc65ff9c7 | [
"CC-BY-4.0",
"MIT"
] | 340 | 2019-05-07T18:00:16.000Z | 2022-03-31T12:00:15.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
import json
import unittest
import unittest.mock as mock
import random
from tests.common import async_test
from cdm.storage.github import GithubAdapter
class GithubStorageAdapterTestCase(unittest.TestCase):
def test_make_corpus_path(self):
adapter = GithubAdapter()
adapter.timeout = 2000
adapter.maximum_timeout = 5000
adapter.number_of_retries = 0
# Valid path.
self.assertEqual(adapter.create_corpus_path(
'https://raw.githubusercontent.com/Microsoft/CDM/master/schemaDocuments/dir1/dir2/file.json'), '/dir1/dir2/file.json')
# Invalid path.
self.assertIsNone(adapter.create_corpus_path('https://raw.githubusercontent.com/Microsoft/CDM/master/schemaDocument/dir1/dir2/file.json'))
@mock.patch('cdm.utilities.network.cdm_http_client.urllib.request.urlopen', new_callable=mock.mock_open, read_data=json.dumps({'Ḽơᶉëᶆ': 'ȋṕšᶙṁ'}).encode())
@async_test
async def test_read(self, mock_urlopen):
adapter = GithubAdapter()
adapter.timeout = 2000
adapter.maximum_timeout = 5000
raw_data = await adapter.read_async('/dir1/dir2/file.json')
data = json.loads(raw_data)
# Verify URL.
self.assertEqual(mock_urlopen.call_args[0][0].full_url, 'https://raw.githubusercontent.com/Microsoft/CDM/master/schemaDocuments/dir1/dir2/file.json')
self.assertEqual(data, {'Ḽơᶉëᶆ': 'ȋṕšᶙṁ'}) # Verify data.
if __name__ == '__main__':
unittest.main()
| 37.568182 | 159 | 0.717483 | 1,323 | 0.784232 | 0 | 0 | 704 | 0.417309 | 512 | 0.303497 | 659 | 0.390634 |
946a29f0318d7d5fe69b7d46e7d524cff9ab803e | 1,162 | py | Python | speedTester/logs/average.py | saurabhcommand/Hello-world | 647bad9da901a52d455f05ecc37c6823c22dc77e | [
"MIT"
] | 1,428 | 2018-10-03T15:15:17.000Z | 2019-03-31T18:38:36.000Z | speedTester/logs/average.py | saurabhcommand/Hello-world | 647bad9da901a52d455f05ecc37c6823c22dc77e | [
"MIT"
] | 1,162 | 2018-10-03T15:05:49.000Z | 2018-10-18T14:17:52.000Z | speedTester/logs/average.py | saurabhcommand/Hello-world | 647bad9da901a52d455f05ecc37c6823c22dc77e | [
"MIT"
] | 3,909 | 2018-10-03T15:07:19.000Z | 2019-03-31T18:39:08.000Z | file1 = open("./logs/pythonlog.txt", 'r+')
avg1 = 0.0
lines1 = 0.0
for line in file1:
lines1 = lines1 + 1.0
avg1 = (avg1 + float(line))
avg1 = avg1/lines1
print(avg1, "for Python with", lines1, "lines")
file2 = open("./logs/clog.txt", 'r+')
avg2 = 0.0
lines2 = 0.0
for line in file2:
lines2 = lines2 + 1.0
avg2 = (avg2 + float(line))
avg2 = avg2/lines2
print(avg2, "for C with", lines2, "lines")
file3 = open("./logs/cpplog.txt", 'r+')
avg3 = 0.0
lines3 = 0.0
for line in file3:
lines3 = lines3 + 1.0
avg3 = (avg3 + float(line))
avg3 = avg3/lines3
print(avg3, "for C++ with", lines3, "lines")
file4 = open("./logs/javalog.txt", 'r+')
avg4 = 0.0
lines4 = 0.0
for line in file4:
lines4 = lines4 + 1.0
avg4 = (avg4 + float(line))
avg4 = avg4/lines4
print(avg4, "for Java with", lines4, "lines")
word = ""
while(word.lower() != "y" and word.lower() != "n"):
word = input("Do you want to wipe the previous log? [Y/N]")
if(word.lower() == "y"):
file1.truncate(0)
file3.truncate(0)
file2.truncate(0)
file4.truncate(0)
print("Done.")
file4.close()
file3.close()
file2.close()
file1.close()
| 21.518519 | 63 | 0.598107 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 243 | 0.209122 |
946a4a73330228004fdde94d6a2c3a081179bb5a | 768 | py | Python | test/test_handlers.py | abstractR/ext_logging | b8b6078261644ec46449de70450fd440e3083ffb | [
"Apache-2.0"
] | null | null | null | test/test_handlers.py | abstractR/ext_logging | b8b6078261644ec46449de70450fd440e3083ffb | [
"Apache-2.0"
] | null | null | null | test/test_handlers.py | abstractR/ext_logging | b8b6078261644ec46449de70450fd440e3083ffb | [
"Apache-2.0"
] | null | null | null | import json
import errno
import os
import ext_logging
from . import BaseTestCase, log
class TraceCase(BaseTestCase):
def test_multiple_handlers(self):
log_conf_sysl = {
'handler': 'ext_logging.handlers.StdOutExtendedSysLogHandler',
'level': 'DEBUG',
'json_serializer': json.JSONEncoder,
}
log_conf_elk = {
'handler': 'ext_logging.handlers.ELKFileHandler',
'level': 'DEBUG',
'json_serializer': json.JSONEncoder,
'elkdir': '.'
}
ext_logging.configure_logs({
'MODULES': {
'test': [log_conf_sysl, log_conf_elk],
}
})
log.info('here test', json_data={'this': {'does not': 'fail'}})
| 24 | 74 | 0.559896 | 678 | 0.882813 | 0 | 0 | 0 | 0 | 0 | 0 | 226 | 0.294271 |
946bb5cefd90ff90d24a9f15626ae9d52df1b370 | 1,136 | py | Python | setup.py | opacam/python3-tmdb3 | 6d287389f1e09e7e034591782c1a287b816c73bb | [
"BSD-3-Clause"
] | 2 | 2019-03-17T20:31:28.000Z | 2020-10-15T22:03:17.000Z | setup.py | opacam/python3-tmdb3 | 6d287389f1e09e7e034591782c1a287b816c73bb | [
"BSD-3-Clause"
] | 2 | 2019-09-04T10:12:50.000Z | 2020-06-17T07:54:51.000Z | setup.py | opacam/python3-tmdb3 | 6d287389f1e09e7e034591782c1a287b816c73bb | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import sys
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
wd = os.path.dirname(os.path.abspath(__file__))
os.chdir(wd)
sys.path.insert(1, wd)
name = 'tmdb3'
pkg = __import__('tmdb3')
author, email = pkg.__author__.rsplit(' ', 1)
email = email.strip('<>')
maintainer, maintainer_email = pkg.__maintainer__.rsplit(' ', 1)
maintainer_email = maintainer_email.strip('<>')
version = pkg.__version__
classifiers = pkg.__classifiers__
with open('README.md') as f:
long_description = f.read()
try:
reqs = open(os.path.join(os.path.dirname(__file__),
'requirements.txt')).read()
except (IOError, OSError):
reqs = ''
setup(
name=name,
version=version,
author=author,
author_email=email,
maintainer=maintainer,
maintainer_email=maintainer_email,
description='TheMovieDB.org APIv3 interface (Python 3.6+)',
long_description=long_description,
classifiers=classifiers,
install_requires=reqs,
packages=['tmdb3'],
keywords='themoviedb.org',
python_requires='>=3.6',
)
| 21.846154 | 64 | 0.68662 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 156 | 0.137324 |
946c090b0b918ab6c06ef2242175541fc692fd5e | 1,645 | py | Python | Linked List/142. Linked List Cycle II.py | xli1110/LC | 3c18b8809c5a21a62903060eef659654e0595036 | [
"MIT"
] | 2 | 2021-04-02T11:57:46.000Z | 2021-04-02T11:57:47.000Z | Linked List/142. Linked List Cycle II.py | xli1110/LC | 3c18b8809c5a21a62903060eef659654e0595036 | [
"MIT"
] | null | null | null | Linked List/142. Linked List Cycle II.py | xli1110/LC | 3c18b8809c5a21a62903060eef659654e0595036 | [
"MIT"
] | null | null | null | # Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def detectCycle(self, head: ListNode) -> ListNode:
if head is None:
return None
# 1 - check cycle
p1 = head
p2 = head.next
has_cycle = False
while p2 is not None and p2.next is not None:
if p1 == p2:
has_cycle = True
break
p1 = p1.next
p2 = p2.next.next
if not has_cycle:
return None
# 2 - cycle length
cycle_length = 1
p2 = p1.next
while p1 != p2:
p2 = p2.next
cycle_length += 1
# 3 - problem 19, the nth node from the end
# L = cycle + non
# p1 moves cycle
# p1 and p2 moves non, meeting at the start
dummy = ListNode(None)
dummy.next = head
p1 = dummy
for _ in range(cycle_length):
p1 = p1.next
p2 = dummy
while p2 != p1:
p1 = p1.next
p2 = p2.next
return p1
if __name__ == "__main__":
head = ListNode(1)
p = head
node = ListNode(2)
p.next = node
p = p.next
node = ListNode(3)
p.next = node
p = p.next
node = ListNode(4)
p.next = node
p = p.next
start = node
node = ListNode(5)
p.next = node
p = p.next
node = ListNode(6)
p.next = node
p = p.next
node = ListNode(7)
p.next = node
p = p.next
p.next = start
sol = Solution()
print(sol.detectCycle(head).val)
| 19.583333 | 54 | 0.491185 | 1,102 | 0.669909 | 0 | 0 | 0 | 0 | 0 | 0 | 200 | 0.121581 |
946e26227518ff513554910aa81069946aa27b6e | 858 | py | Python | simpleml/models/classifiers/sklearn/mixture.py | ptoman/SimpleML | a829ee05da01a75b64982d91a012e9274b6f7c6e | [
"BSD-3-Clause"
] | 15 | 2018-08-19T19:36:23.000Z | 2021-11-09T17:47:18.000Z | simpleml/models/classifiers/sklearn/mixture.py | ptoman/SimpleML | a829ee05da01a75b64982d91a012e9274b6f7c6e | [
"BSD-3-Clause"
] | 75 | 2020-10-11T17:58:59.000Z | 2022-03-29T22:34:54.000Z | simpleml/models/classifiers/sklearn/mixture.py | ptoman/SimpleML | a829ee05da01a75b64982d91a012e9274b6f7c6e | [
"BSD-3-Clause"
] | 4 | 2018-04-30T23:09:42.000Z | 2022-01-19T08:03:18.000Z | '''
Wrapper module around `sklearn.mixture`
'''
__author__ = 'Elisha Yadgaran'
from .base_sklearn_classifier import SklearnClassifier
from simpleml.models.classifiers.external_models import ClassificationExternalModelMixin
from sklearn.mixture import BayesianGaussianMixture, GaussianMixture
'''
Gaussian Mixture
'''
class WrappedSklearnBayesianGaussianMixture(BayesianGaussianMixture, ClassificationExternalModelMixin):
pass
class SklearnBayesianGaussianMixture(SklearnClassifier):
def _create_external_model(self, **kwargs):
return WrappedSklearnBayesianGaussianMixture(**kwargs)
class WrappedSklearnGaussianMixture(GaussianMixture, ClassificationExternalModelMixin):
pass
class SklearnGaussianMixture(SklearnClassifier):
def _create_external_model(self, **kwargs):
return WrappedSklearnGaussianMixture(**kwargs)
| 26.8125 | 103 | 0.825175 | 526 | 0.613054 | 0 | 0 | 0 | 0 | 0 | 0 | 88 | 0.102564 |
946e637bb8b70153f7f53eb205e32d60fd2680ee | 1,355 | py | Python | Configuration/Generator/python/ZPrime5000JJ_8TeV_TuneCUETP8M1_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 6 | 2017-09-08T14:12:56.000Z | 2022-03-09T23:57:01.000Z | Configuration/Generator/python/ZPrime5000JJ_8TeV_TuneCUETP8M1_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 545 | 2017-09-19T17:10:19.000Z | 2022-03-07T16:55:27.000Z | Configuration/Generator/python/ZPrime5000JJ_8TeV_TuneCUETP8M1_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 14 | 2017-10-04T09:47:21.000Z | 2019-10-23T18:04:45.000Z | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
#pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(13000.0),
maxEventsToPrint = cms.untracked.int32(0),
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1.0),
crossSection = cms.untracked.double(0.00002497),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'NewGaugeBoson:ffbar2gmZZprime = on',
'Zprime:gmZmode = 0',
'32:m0 =5000',
'32:onMode = off',
'32:onIfAny = 1',
'32:onIfAny = 2',
'32:onIfAny = 3',
'32:onIfAny = 4',
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters',
)
)
)
| 42.34375 | 75 | 0.512177 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 290 | 0.214022 |
946e6ae2d052be8d2efbe4c1190acf5f785c81d2 | 32,747 | py | Python | fairseq/modules/Z_layer/multi_phrase_attention.py | salvation-z/fairseq | baea7343dfa1a8f0f316c6978ba66a9a60c6130d | [
"MIT"
] | 1 | 2020-03-26T02:33:19.000Z | 2020-03-26T02:33:19.000Z | fairseq/modules/Z_layer/multi_phrase_attention.py | salvation-z/fairseq | baea7343dfa1a8f0f316c6978ba66a9a60c6130d | [
"MIT"
] | null | null | null | fairseq/modules/Z_layer/multi_phrase_attention.py | salvation-z/fairseq | baea7343dfa1a8f0f316c6978ba66a9a60c6130d | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copied from multihead_attention.py
# Change it for phrase level gaussian attention
# TODO:
# 1. Graph based function
# 2. Convlution based function
# Phrase_args
# 1. generate_function
# 2. parse_function
# 3. center_first
# 4. window_size
# Phrase_info
# Notimplemented yet
import math
from typing import Dict, Optional, Tuple
from math import ceil
import torch
import torch.nn.functional as F
from fairseq import utils
from torch import Tensor, nn
from torch.nn import Parameter
from fairseq.incremental_decoding_utils import with_incremental_state
# import torchsnooper
# from torch_geometric.nn import GATConv, GCNConv
class PhraseGenerator(nn.Module):
"""
Phrase level representation generator
1. Parsing the seqence for different function
"""
def __init__(
self,
phrase_args,
):
"""
init function
Args:
embed_dim ([int]): [the input dimension (is the same as output dimension)]
generate_function ([str]): using different phrase generate functions
center_first ([bool, default None]): whether let the 1st token to be the center of the phrase
"""
super().__init__()
generate_function = phrase_args.generate_function
center_first = phrase_args.center_first
self.__parse_func__ = PhraseBuilder(phrase_args)
# Basic function
if(generate_function == 'max-pooling'):
self.__type__ = generate_function
self.__repr_func__ = lambda tokens: torch.max(tokens, 2)[0]
elif(generate_function == 'averate-pooling'):
self.__type__ = generate_function
self.__repr_func__ = lambda tokens: torch.mean(tokens, 2)[0]
# Graph based function
# Not implemented
# Undone
elif(generate_function == 'GAT'):
assert type(center_first) == bool
self.__type__ = generate_function
raise NotImplementedError
pass
elif(generate_function == 'GCN'):
assert type(center_first) == bool
self.__type__ = generate_function
raise NotImplementedError
pass
# Conv based function
# Undone
elif(generate_function == 'CNN'):
raise NotImplementedError
pass
else:
# Return first token as outputs
self.__repr_func__ = lambda tokens: tokens[0]
return
def forward(
self,
x,
phrase_info,
):
"""
forward method
Args:
x ([Tensor]): [(bsz*head_num, seq_len, head_dim) the tensor in attention layer]
phrase_info ([dict]): [used for parsing]
Returns:
[Tensor]: [(bsz*head_num, phrase_num, head_dim)]
"""
parsed, phrase_info = self.__parse_func__(x, phrase_info)
output = self.__repr_func__(parsed)
return output, phrase_info
# Undone
# 1. fixed_window √
# 2. graph based ×
class PhraseBuilder:
def __init__(self, phrase_args):
"""
[Parsing the seq into Phrases, each sentence is parsed into multiple phrases]
Args:
phrase_args ([dict]): [used for parsing]
"""
self.parse_function = phrase_args.parse_function
if(self.parse_function == 'fixed_window'):
assert 'window_size' in dir(phrase_args), (
'Using fixed window, but the size of window is not indicated'
)
self.window_size = phrase_args.window_size
def __call__(self, x, phrase_info):
"""
[Parsing the seq into Phrases, each sentence is parsed into multiple phrases]
Args:
x ([Tensor]): (bsz*head_num, seq_len, head_dim) the tensor in attention layer
phrase_info ([dict]): [used for parsing and etc.]
Returns:
result: [Tensor], (phrase_len, phrase_num, bsz, embed_dim)
phrase_info: [dict], contain information like mu and sigma
"""
if(self.parse_function == 'fixed_window'):
device = x.device
seq_length = x.size(1)
# bsz here indicate bsz * head_num
bsz = x.size(0)
chunks = ceil(seq_length / self.window_size)
max_seq_size = self.window_size * chunks
pad = (0, max_seq_size - seq_length)
# Padding Zero to the Tensor X
x = x.transpose(1, -1)
x = F.pad(x, pad)
x = x.transpose(1, -1)
x = x.chunk(chunks, dim=1)
result = torch.stack(x, dim=1)
fixed_mu = torch.arange(
int(self.window_size / 2), max_seq_size, self.window_size, device=device)
fixed_mu = fixed_mu.repeat(bsz, seq_length, 1)
fixed_sigam = torch.full((bsz, seq_length, chunks), self.window_size / 4, device=device)
phrase_info['fixed_mu'] = fixed_mu
phrase_info['fixed_sigma'] = fixed_sigam
phrase_info['padding_size'] = max_seq_size - seq_length
assert fixed_mu.size(2) == chunks
assert fixed_sigam.size(2) == chunks
return result, phrase_info
# Undone
# 1. reset para (for max/mean pooling there is no para ~~)
# 2. forward √
# 3. init √
@with_incremental_state
class MultiPhraseAttention(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
Note:
1. By default the torch version MHA is turned on in MultiHeadAttention, but it is deleted here
2. The add_zero_attention is also deleted here, because i have no idea what it is
"""
def __init__(
self,
embed_dim,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
self_attention=False,
encoder_decoder_attention=False,
phrase_args=None,
apply_phrase=False,
):
super().__init__()
# what ever mode is running, phrase args should be given
assert phrase_args is not None
self.phrase_args = phrase_args
# if both attention is turned on, there will be two W_k and W_q (W_v will remain the same as origin)
self.gaussian_attention = self.phrase_args.gaussian_attention
self.multihead_attention = self.phrase_args.multihead_attention
assert self.multihead_attention or self.gaussian_attention, (
'At least one attention should be added'
)
# init for phrase repr
self.apply_phrase = apply_phrase
# If apply_phrase is set True, we supposed that the key is tokens
# If apply_phrase is set False, we sepposed that the key is phrase
if(self.apply_phrase):
self.phrase_encoder = PhraseGenerator(phrase_args)
assert self.gaussian_attention
# original args
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
# Note:
# 1. if self_attention&gaussian_attention = True, apply_phrase should also be True
# 2. if encoder_decoder_attention=True, apply_phrase should be False
self.self_attention = self_attention
if(self.self_attention and self.gaussian_attention):
assert self.apply_phrase
self.encoder_decoder_attention = encoder_decoder_attention
if(self.encoder_decoder_attention):
assert not self.apply_phrase
assert not self.self_attention or self.qkv_same_dim, (
"Self-attention requires query, key and " "value to be of the same size"
)
# projection layers
if(self.gaussian_attention):
self.k_proj_gauss = nn.Linear(self.kdim, embed_dim, bias=bias)
self.q_proj_gauss = nn.Linear(embed_dim, embed_dim, bias=bias)
if(self.multihead_attention):
self.k_proj_base = nn.Linear(self.kdim, embed_dim, bias=bias)
self.q_proj_base = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(self.vdim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
if(self.gaussian_attention):
self.bias_k_gauss = Parameter(torch.Tensor(1, 1, embed_dim))
if(self.multihead_attention):
self.bias_k_base = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k_gauss = self.bias_v = self.bias_k_base = None
self.reset_parameters()
self.onnx_trace = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def reset_parameters(self):
if self.qkv_same_dim:
# Empirically observed the convergence to be much better with
# the scaled initialization
if(self.gaussian_attention):
nn.init.xavier_uniform_(
self.k_proj_gauss.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(
self.q_proj_gauss.weight, gain=1 / math.sqrt(2))
if(self.multihead_attention):
nn.init.xavier_uniform_(
self.k_proj_base.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(
self.q_proj_base.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
else:
if(self.gaussian_attention):
nn.init.xavier_uniform_(self.k_proj_gauss.weight)
nn.init.xavier_uniform_(self.q_proj_gauss.weight)
if(self.multihead_attention):
nn.init.xavier_uniform_(self.k_proj_base.weight)
nn.init.xavier_uniform_(self.q_proj_base.weight)
nn.init.xavier_uniform_(self.v_proj.weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.out_proj.bias is not None:
nn.init.constant_(self.out_proj.bias, 0.)
if self.bias_k_gauss is not None:
nn.init.xavier_normal_(self.bias_k_gauss)
if self.bias_k_base is not None:
nn.init.xavier_normal_(self.bias_k_base)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def gauss_builder(self, mus, sigmas, weights, seq_length):
"""
Generate Gauss attention
Args:
mus (Tensor): the mu of the gauss attention for each sequence (bsz * heads, src_len, phrase_num)
sigmas (Tensor): the sigma of the gauss attention for each sequence (bsz * heads, src_len, phrase_num)
weights (Tensor): the weight of each gauss distribution (bsz * head_num, src_len, phrase_num)
seq_length (int): the length of sequences
Return:
attention (Tensor): The attention generated by token and phrase repr (bsz * heads, seq_len, seq_len)
"""
def gauss_distribution(mu, sigma, x):
x = x.float()
base = torch.exp(-(x - mu) * (x - mu) / (2 * sigma * sigma))
return base / (math.sqrt(2 * math.pi) * sigma)
device = weights.device
bsz, seq_len, phrase_num = mus.size()
x = [torch.arange(0, seq_length, device=device) for i in range(bsz)]
y = torch.zeros(bsz, seq_len, seq_len, device=device)
# for bsz, src_len, phrase_num
for batch, (m, s, w) in enumerate(zip(mus, sigmas, weights)):
for tok, (mu, sigma, weight, i) in enumerate(zip(m, s, w, x)):
for a, b, c in zip(mu, sigma, weight):
y[batch, tok] += c * gauss_distribution(a, b, i)
gauss_attention = y
return gauss_attention
def gauss_builder_v2(self, mus, sigmas, weights, seq_length):
"""
Generate Gauss attention
Args:
mus (Tensor): the mu of the gauss attention for each sequence (bsz * heads, src_len, phrase_num)
sigmas (Tensor): the sigma of the gauss attention for each sequence (bsz * heads, src_len, phrase_num)
weights (Tensor): the weight of each gauss distribution (bsz * head_num, src_len, phrase_num)
seq_length (int): the length of sequences
Return:
attention (Tensor): The attention generated by token and phrase repr (bsz * heads, seq_len, seq_len)
"""
def gauss_distribution(mu, sigma, x):
mu = mu.unsqueeze(-1).expand(-1, -1, -1, x.size(-1))
sigma = sigma.unsqueeze(-1).expand(-1, -1, -1, x.size(-1))
x = x.float()
base = torch.exp(-(x - mu) * (x - mu) / (2 * sigma * sigma))
return base / (math.sqrt(2 * math.pi) * sigma)
device = weights.device
bsz, seq_len, phrase_num = mus.size()
weights = weights.unsqueeze(-1).expand(-1, -1, -1, seq_len)
# size: bsz * head_num, seq_len, phrase_num, seq_len
x = torch.arange(0., seq_length, device=device).repeat(bsz, seq_len, phrase_num, 1)
y = gauss_distribution(mus, sigmas, x) * weights
y = y.sum(dim=-2)
gauss_attention = y
return gauss_attention
def forward(
self,
query,
key: Optional[Tensor],
value: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
need_weights: bool = True,
static_kv: bool = False,
attn_mask: Optional[Tensor] = None,
before_softmax: bool = False,
need_head_weights: bool = False,
phrase_info: dict = None,
need_phrase: bool = False,
) -> Tuple[Tensor, Optional[Tensor]]:
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
need_weights (bool, optional): return the attention weights,
averaged over heads (default: False).
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
before_softmax (bool, optional): return the raw attention
weights and values before the attention softmax.
need_head_weights (bool, optional): return the attention
weights for each head. Implies *need_weights*. Default:
return the average attention weights over all heads.
query: tokens(source side: seq, bsz, embed_dim)
key: phrase repr
value: tokens(source/target side)
phrase_info (dict, optional): used for phrase parsing
need_phrase (bool, False): return the phrase repr
"""
if need_head_weights:
need_weights = True
key_phrase = None
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if saved_state is not None and "prev_key" in saved_state:
# previous time steps are cached - no need to recompute
# key and value if they are static
if static_kv:
assert self.encoder_decoder_attention and not self.self_attention
key = value = None
else:
saved_state = None
# Here in self_attention, only query is needed
# project should be applied before multiheads
if self.self_attention:
if(self.multihead_attention):
q_base = self.q_proj_base(query)
k_base = self.k_proj_base(query)
if(self.gaussian_attention):
q_gauss = self.q_proj_gauss(query)
k_gauss = self.k_proj_gauss(query)
v = self.v_proj(query)
# In encoder_decoder attention, phrase(k) and token(v) are provided by encoder
# while token(q) is provided by decoder
elif self.encoder_decoder_attention:
# Basic multihead attention's k&v are provided by encoder and k = v
if(self.multihead_attention):
q_base = self.q_proj_base(query)
if key is None:
assert value is None
k_base = v = None
else:
k_base = self.k_proj_base(key)
v = self.v_proj(key)
# Gaussian attention's key&value are provided by encoder but key!=value
# Not that there is no need to build phrase in decoder, because it is done by the encoder
if(self.gaussian_attention):
q_gauss = self.q_proj_gauss(query)
if key is None:
assert value is None
k_gauss = v = None
else:
assert key is not None
assert value is not None
k_gauss = self.k_proj_gauss(key)
v = self.v_proj(value)
else:
# Note:
# If both key and value are provided, and apply_phrase is set False,
# we supposed that key is phrase repr,
# which means no PhraseEncoder will be added here
assert key is not None and value is not None
if(self.multihead_attention):
q_base = self.q_proj_base(query)
k_base = self.k_proj_base(key)
if(self.gaussian_attention):
q_gauss = self.q_proj_gauss(query)
k_gauss = self.k_proj_gauss(key)
v = self.v_proj(value)
if(self.multihead_attention):
q_base *= self.scaling
if(self.gaussian_attention):
q_gauss *= self.scaling
if self.bias_k_base is not None:
k_base = torch.cat([k_base, self.bias_k_base.repeat(1, bsz, 1)])
if self.bias_k_gauss is not None:
k_gauss = torch.cat([k_gauss, self.bias_k_gauss.repeat(1, bsz, 1)])
if(self.bias_k_base or self.bias_k_gauss):
assert self.bias_v is not None
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
key_padding_mask.new_zeros(
key_padding_mask.size(0), 1),
],
dim=1,
)
# embed_dim = head_dim * head_num
# q: (tgt_len, bsz, embed_dim) -> (bsz * head_num, tgt_len, head_dim)
# k: (phrase_num, bsz, embed_dim) -> (bsz * head_num, phrase_num, head_dim)
# v: (src_len, bsz, embed_dim) -> (bsz * head_num, scr_len, head_dim)
# Now, the implement suppose fixed window~
# TODO graph based function is not supported yet
if(self.multihead_attention):
q_base = (
q_base.contiguous()
.view(tgt_len, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if k_base is not None:
k_base = (
k_base.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if(self.gaussian_attention):
q_gauss = (
q_gauss.contiguous()
.view(tgt_len, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if k_gauss is not None:
k_gauss = (
k_gauss.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if(self.apply_phrase):
key_phrase, phrase_info = self.phrase_encoder(k_gauss, phrase_info)
k_gauss = key_phrase
else:
key_phrase = k_gauss
if v is not None:
v = (
v.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if saved_state is not None:
# saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
# From saved_state get keys
if "prev_key_base" in saved_state:
assert self.multihead_attention
_prev_key_base = saved_state["prev_key_base"]
assert _prev_key_base is not None
prev_key_base = _prev_key_base.view(
bsz * self.num_heads, -1, self.head_dim)
if static_kv:
k_base = prev_key_base
else:
assert k_base is not None
k_base = torch.cat([prev_key_base, k_base], dim=1)
if "prev_key_gauss" in saved_state:
assert self.gaussian_attention
_prev_key_gauss = saved_state["prev_key_gauss"]
assert _prev_key_gauss is not None
prev_key_gauss = _prev_key_gauss.view(
bsz * self.num_heads, -1, self.head_dim)
if static_kv:
k_gauss = prev_key_gauss
else:
assert k_gauss is not None
k_gauss = torch.cat([prev_key_gauss, k_gauss], dim=1)
# From saved_state get values
if "prev_value" in saved_state:
_prev_value = saved_state["prev_value"]
assert _prev_value is not None
prev_value = _prev_value.view(
bsz * self.num_heads, -1, self.head_dim)
if static_kv:
v = prev_value
else:
assert v is not None
v = torch.cat([prev_value, v], dim=1)
# apply saved mask
prev_key_padding_mask: Optional[Tensor] = None
if "prev_key_padding_mask" in saved_state:
prev_key_padding_mask = saved_state["prev_key_padding_mask"]
assert v is not None
assert k_base or k_gauss
key_padding_mask = MultiPhraseAttention._append_prev_key_padding_mask(
key_padding_mask=key_padding_mask,
prev_key_padding_mask=prev_key_padding_mask,
batch_size=bsz,
src_len=k_base.size(1),
static_kv=static_kv,
)
# save the newest state
if(self.multihead_attention):
saved_state["prev_key_base"] = k_base.view(
bsz, self.num_heads, -1, self.head_dim)
if(self.gaussian_attention):
saved_state["prev_key_gauss"] = k_gauss.view(
bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_value"] = v.view(
bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_key_padding_mask"] = key_padding_mask
# In this branch incremental_state is never None
assert incremental_state is not None
incremental_state = self._set_input_buffer(
incremental_state, saved_state)
if(self.multihead_attention):
assert k_base is not None
src_len = k_base.size(1)
else:
assert k_gauss is not None
src_len = k_gauss.size(1)
# This is part of a workaround to get around fork/join parallelism
# not supporting Optional types.
if key_padding_mask is not None and key_padding_mask.dim() == 0:
key_padding_mask = None
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
# calc multihead attention
if(self.multihead_attention):
base_attn = torch.bmm(q_base, k_base.transpose(1, 2))
else:
base_attn = None
# calc gaussian attention
if(self.gaussian_attention):
gauss_weight = torch.bmm(q_gauss, k_gauss.transpose(1, 2))
gauss_attn = self.gauss_builder_v2(
phrase_info['fixed_mu'], phrase_info['fixed_sigma'], gauss_weight, tgt_len)
if(base_attn is None):
base_attn = torch.zeros_like(gauss_attn)
else:
gauss_attn = torch.zeros_like(base_attn)
# add attention together (maybe add after softmax is better? )
gauss_attn = gauss_attn.to(base_attn.device)
attn_weights = gauss_attn + base_attn
attn_weights = MultiPhraseAttention.apply_sparse_mask(
attn_weights, tgt_len, src_len, bsz)
assert list(attn_weights.size()) == [
bsz * self.num_heads, tgt_len, src_len]
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0)
if self.onnx_trace:
attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)
attn_weights += attn_mask
if key_padding_mask is not None:
# don't attend to padding symbols
attn_weights = attn_weights.view(
bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(
2).to(torch.bool), float("-inf")
)
attn_weights = attn_weights.view(
bsz * self.num_heads, tgt_len, src_len)
if before_softmax:
return attn_weights, v
# apply softmax and dropout
attn_weights_float = utils.softmax(
attn_weights, dim=-1, onnx_trace=self.onnx_trace
)
attn_weights = attn_weights_float.type_as(attn_weights)
attn_probs = F.dropout(
attn_weights_float.type_as(attn_weights),
p=self.dropout,
training=self.training,
)
# apply attention
assert v is not None
attn = torch.bmm(attn_probs, v)
assert list(attn.size()) == [
bsz * self.num_heads, tgt_len, self.head_dim]
if self.onnx_trace and attn.size(1) == 1:
# when ONNX tracing a single decoder step (sequence length == 1)
# the transpose is a no-op copy before view, thus unnecessary
attn = attn.contiguous().view(tgt_len, bsz, embed_dim)
else:
attn = attn.transpose(0, 1).contiguous().view(
tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
attn_weights: Optional[Tensor] = None
if need_weights:
attn_weights = attn_weights_float.view(
bsz, self.num_heads, tgt_len, src_len
).transpose(1, 0)
if not need_head_weights:
# average attention weights over heads
attn_weights = attn_weights.mean(dim=0)
if(need_phrase):
assert key_phrase is not None
return attn, attn_weights, key_phrase
return attn, attn_weights
@staticmethod
def _append_prev_key_padding_mask(
key_padding_mask: Optional[Tensor],
prev_key_padding_mask: Optional[Tensor],
batch_size: int,
src_len: int,
static_kv: bool,
) -> Optional[Tensor]:
# saved key padding masks have shape (bsz, seq_len)
if prev_key_padding_mask is not None and static_kv:
new_key_padding_mask = prev_key_padding_mask
elif prev_key_padding_mask is not None and key_padding_mask is not None:
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), key_padding_mask.float()], dim=1
)
# During incremental decoding, as the padding token enters and
# leaves the frame, there will be a time when prev or current
# is None
elif prev_key_padding_mask is not None:
filler = torch.zeros(
(batch_size, src_len - prev_key_padding_mask.size(1)),
device=prev_key_padding_mask.device,
)
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), filler.float()], dim=1
)
elif key_padding_mask is not None:
filler = torch.zeros(
(batch_size, src_len - key_padding_mask.size(1)),
device=key_padding_mask.device,
)
new_key_padding_mask = torch.cat(
[filler.float(), key_padding_mask.float()], dim=1
)
else:
new_key_padding_mask = prev_key_padding_mask
return new_key_padding_mask
@torch.jit.export
def reorder_incremental_state(
self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], new_order: Tensor
):
"""Reorder buffered internal state (for incremental generation)."""
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
input_buffer_k = input_buffer[k]
if input_buffer_k is not None:
if self.encoder_decoder_attention and input_buffer_k.size(0) == new_order.size(0):
break
input_buffer[k] = input_buffer_k.index_select(0, new_order)
incremental_state = self._set_input_buffer(
incremental_state, input_buffer)
return incremental_state
def _get_input_buffer(
self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]
) -> Dict[str, Optional[Tensor]]:
result = self.get_incremental_state(incremental_state, "attn_state")
if result is not None:
return result
else:
empty_result: Dict[str, Optional[Tensor]] = {}
return empty_result
def _set_input_buffer(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
buffer: Dict[str, Optional[Tensor]],
):
return self.set_incremental_state(incremental_state, "attn_state", buffer)
def apply_sparse_mask(attn_weights, tgt_len: int, src_len: int, bsz: int):
return attn_weights
def upgrade_state_dict_named(self, state_dict, name):
prefix = name + "." if name != "" else ""
items_to_add = {}
keys_to_remove = []
for k in state_dict.keys():
if k.endswith(prefix + "in_proj_weight"):
# in_proj_weight used to be q + k + v with same dimensions
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + "q_proj.weight"] = state_dict[k][:dim]
items_to_add[prefix + "k_proj.weight"] = state_dict[k][dim: 2 * dim]
items_to_add[prefix + "v_proj.weight"] = state_dict[k][2 * dim:]
keys_to_remove.append(k)
k_bias = prefix + "in_proj_bias"
if k_bias in state_dict.keys():
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + "q_proj.bias"] = state_dict[k_bias][:dim]
items_to_add[prefix + "k_proj.bias"] = state_dict[k_bias][
dim: 2 * dim
]
items_to_add[prefix + "v_proj.bias"] = state_dict[k_bias][2 * dim:]
keys_to_remove.append(prefix + "in_proj_bias")
for k in keys_to_remove:
del state_dict[k]
for key, value in items_to_add.items():
state_dict[key] = value
| 39.984127 | 114 | 0.585458 | 31,764 | 0.969775 | 0 | 0 | 27,279 | 0.832845 | 0 | 0 | 8,192 | 0.250107 |
946ec27fdd23802b4feeaeb4d5ee19100b13e2f5 | 140 | py | Python | tools/bacommon/__init__.py | ritiek/ballistica | 5f909d0b91bfbed3e96c21dbf342616a2d2e7b41 | [
"MIT"
] | null | null | null | tools/bacommon/__init__.py | ritiek/ballistica | 5f909d0b91bfbed3e96c21dbf342616a2d2e7b41 | [
"MIT"
] | null | null | null | tools/bacommon/__init__.py | ritiek/ballistica | 5f909d0b91bfbed3e96c21dbf342616a2d2e7b41 | [
"MIT"
] | null | null | null | # Released under the MIT License. See LICENSE for details.
#
"""Bits of functionality common to ballistica client and server components."""
| 35 | 78 | 0.771429 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 137 | 0.978571 |
946f05e7e0483f9687ceb2a81c265b7213be7125 | 12,818 | py | Python | tensorflow_ranking/python/keras/estimator_test.py | renyi533/ranking | 77f280075e220946d0935b44b64770f4dabb37d0 | [
"Apache-2.0"
] | 2,482 | 2018-12-03T20:49:40.000Z | 2022-03-30T19:36:01.000Z | tensorflow_ranking/python/keras/estimator_test.py | wangziliang11/ranking | 6cf8f70a8533ba15abbfb5f50db17cb01fc56410 | [
"Apache-2.0"
] | 304 | 2018-12-04T02:01:40.000Z | 2022-03-29T21:58:08.000Z | tensorflow_ranking/python/keras/estimator_test.py | wangziliang11/ranking | 6cf8f70a8533ba15abbfb5f50db17cb01fc56410 | [
"Apache-2.0"
] | 449 | 2018-12-04T01:16:52.000Z | 2022-03-27T08:29:49.000Z | # Copyright 2021 The TensorFlow Ranking Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for Keras Estimator."""
import os
from absl.testing import parameterized
import tensorflow as tf
from google.protobuf import text_format
from tensorflow_ranking.python import data
from tensorflow_ranking.python.keras import estimator as estimator_lib
from tensorflow_ranking.python.keras import losses
from tensorflow_ranking.python.keras import metrics
from tensorflow_ranking.python.keras import model
from tensorflow_ranking.python.keras import network
from tensorflow_serving.apis import input_pb2
_SIZE = 'example_list_size'
_ELWC_PROTO = text_format.Parse(
"""
context {
features {
feature {
key: "query_length"
value { int64_list { value: 3 } }
}
}
}
examples {
features {
feature {
key: "unigrams"
value { bytes_list { value: "tensorflow" } }
}
feature {
key: "utility"
value { float_list { value: 0.0 } }
}
feature {
key: "dense_feature"
value { float_list { value: -0.5 value: 0.5 } }
}
feature {
key: "doc_weight"
value { float_list { value: 0.0 } }
}
}
}
examples {
features {
feature {
key: "unigrams"
value { bytes_list { value: ["learning", "to", "rank"] } }
}
feature {
key: "utility"
value { float_list { value: 1.0 } }
}
feature {
key: "dense_feature"
value { float_list { value: 0.5 value: 0.5 } }
}
feature {
key: "doc_weight"
value { float_list { value: 1.0 } }
}
}
}
""", input_pb2.ExampleListWithContext())
_LABEL_FEATURE = 'utility'
_PADDING_LABEL = -1.
_EXAMPLE_WEIGHT_FEATURE = 'doc_weight'
def _get_feature_columns():
def _normalizer_fn(t):
return 2 * t
context_feature_columns = {
'query_length':
tf.feature_column.numeric_column(
'query_length',
shape=(1,),
default_value=0,
dtype=tf.int64,
normalizer_fn=_normalizer_fn)
}
example_feature_columns = {
'utility':
tf.feature_column.numeric_column(
'utility',
shape=(1,),
default_value=_PADDING_LABEL,
dtype=tf.float32),
'unigrams':
tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_vocabulary_list(
'unigrams',
vocabulary_list=[
'ranking', 'regression', 'classification', 'ordinal'
]),
dimension=10),
'dense_feature':
tf.feature_column.numeric_column(
'dense_feature',
shape=(2,),
default_value=0.0,
dtype=tf.float32)
}
custom_objects = {'_normalizer_fn': _normalizer_fn}
return context_feature_columns, example_feature_columns, custom_objects
def _get_example_weight_feature_column():
return tf.feature_column.numeric_column(
_EXAMPLE_WEIGHT_FEATURE, dtype=tf.float32, default_value=1.)
# This network needs actual layers, otherwise the estimator training fails.
class _DummyUnivariateRankingNetwork(network.UnivariateRankingNetwork):
"""Dummy univariate ranking network with a simple scoring function."""
def __init__(self,
context_feature_columns=None,
example_feature_columns=None,
name='dummy_ranking_network',
**kwargs):
super(_DummyUnivariateRankingNetwork, self).__init__(
context_feature_columns=context_feature_columns,
example_feature_columns=example_feature_columns,
name=name,
**kwargs)
self._score_layer = tf.keras.layers.Dense(units=1)
def score(self, context_features=None, example_features=None, training=True):
example_input = [
tf.keras.layers.Flatten()(example_features[name])
for name in sorted(self.example_feature_columns)
]
return self._score_layer(tf.concat(example_input, axis=1))
class KerasModelToEstimatorTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(KerasModelToEstimatorTest, self).setUp()
(context_feature_columns, example_feature_columns,
custom_objects) = _get_feature_columns()
self._context_feature_columns = context_feature_columns
self._example_feature_columns = example_feature_columns
# Remove label feature from example feature column.
del self._example_feature_columns[_LABEL_FEATURE]
self._custom_objects = custom_objects
self._network = _DummyUnivariateRankingNetwork(
context_feature_columns=self._context_feature_columns,
example_feature_columns=self._example_feature_columns)
self._loss = losses.get(
losses.RankingLossKey.SOFTMAX_LOSS,
reduction=tf.compat.v2.losses.Reduction.SUM_OVER_BATCH_SIZE)
self._eval_metrics = metrics.default_keras_metrics()
self._optimizer = tf.keras.optimizers.Adagrad(learning_rate=0.1)
self._config = tf.estimator.RunConfig(
keep_checkpoint_max=2, save_checkpoints_secs=2)
self._data_file = os.path.join(tf.compat.v1.test.get_temp_dir(),
'test_elwc.tfrecord')
serialized_elwc_list = [
_ELWC_PROTO.SerializeToString(),
] * 20
if tf.io.gfile.exists(self._data_file):
tf.io.gfile.remove(self._data_file)
with tf.io.TFRecordWriter(self._data_file) as writer:
for serialized_elwc in serialized_elwc_list:
writer.write(serialized_elwc)
def tearDown(self):
super(KerasModelToEstimatorTest, self).tearDown()
if tf.io.gfile.exists(self._data_file):
tf.io.gfile.remove(self._data_file)
self._data_file = None
def _make_input_fn(self, weights_feature_name=None):
"""Return an input function, serves weights defined in weights_feature_name.
Args:
weights_feature_name: (str) A string defines the weights feature in
dataset. None if no weights is used.
Returns:
A function serves features and labels. Weights will be served in features.
"""
def _input_fn():
context_feature_columns, example_feature_columns, _ = (
_get_feature_columns())
context_feature_spec = tf.feature_column.make_parse_example_spec(
list(context_feature_columns.values()))
label_column = tf.feature_column.numeric_column(
_LABEL_FEATURE, dtype=tf.float32, default_value=_PADDING_LABEL)
weight_column = (
_get_example_weight_feature_column()
if weights_feature_name == _EXAMPLE_WEIGHT_FEATURE else None)
example_fc_list = (
list(example_feature_columns.values()) + [label_column] +
([weight_column] if weight_column else []))
example_feature_spec = tf.feature_column.make_parse_example_spec(
example_fc_list)
dataset = data.build_ranking_dataset(
file_pattern=self._data_file,
data_format=data.ELWC,
batch_size=10,
context_feature_spec=context_feature_spec,
example_feature_spec=example_feature_spec,
list_size=2,
reader=tf.data.TFRecordDataset,
size_feature_name=_SIZE)
features = tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()
label = tf.squeeze(features.pop(_LABEL_FEATURE), axis=2)
return features, label
return _input_fn
def test_model_to_estimator_missing_custom_objects(self):
keras_model = model.create_keras_model(
network=self._network,
loss=self._loss,
metrics=self._eval_metrics,
optimizer=self._optimizer,
size_feature_name=_SIZE)
estimator = estimator_lib.model_to_estimator(
model=keras_model, config=self._config, custom_objects=None)
self.assertIsInstance(estimator, tf.compat.v1.estimator.Estimator)
# Train and export model.
train_spec = tf.estimator.TrainSpec(
input_fn=self._make_input_fn(), max_steps=1)
eval_spec = tf.estimator.EvalSpec(
name='eval', input_fn=self._make_input_fn(), steps=10)
with self.assertRaises(AttributeError):
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
@parameterized.named_parameters(
('without_weights', None, 'predict'),
('with_example_weights', _EXAMPLE_WEIGHT_FEATURE, 'predict'),
('pointwise_inference', None, 'regress'))
def test_model_to_estimator(self, weights_feature_name, serving_default):
keras_model = model.create_keras_model(
network=self._network,
loss=self._loss,
metrics=self._eval_metrics,
optimizer=self._optimizer,
size_feature_name=_SIZE)
estimator = estimator_lib.model_to_estimator(
model=keras_model,
config=self._config,
weights_feature_name=weights_feature_name,
custom_objects=self._custom_objects,
serving_default=serving_default)
self.assertIsInstance(estimator, tf.compat.v1.estimator.Estimator)
# Train and export model.
train_spec = tf.estimator.TrainSpec(
input_fn=self._make_input_fn(weights_feature_name), max_steps=1)
eval_spec = tf.estimator.EvalSpec(
name='eval',
input_fn=self._make_input_fn(weights_feature_name),
steps=10)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
context_feature_spec = tf.feature_column.make_parse_example_spec(
self._context_feature_columns.values())
example_feature_spec = tf.feature_column.make_parse_example_spec(
self._example_feature_columns.values())
def _make_serving_input_fn(serving_default):
if serving_default == 'predict':
return data.build_ranking_serving_input_receiver_fn(
data.ELWC,
context_feature_spec=context_feature_spec,
example_feature_spec=example_feature_spec,
size_feature_name=_SIZE)
else:
def pointwise_serving_fn():
serialized = tf.compat.v1.placeholder(
dtype=tf.string, shape=[None], name='input_ranking_tensor')
receiver_tensors = {'input_ranking_data': serialized}
features = data.parse_from_tf_example(
serialized,
context_feature_spec=context_feature_spec,
example_feature_spec=example_feature_spec,
size_feature_name=_SIZE)
return tf.estimator.export.ServingInputReceiver(features,
receiver_tensors)
return pointwise_serving_fn
serving_input_receiver_fn = _make_serving_input_fn(serving_default)
export_dir = os.path.join(tf.compat.v1.test.get_temp_dir(), 'export')
estimator.export_saved_model(export_dir, serving_input_receiver_fn)
# Confirm model ran and created checkpoints and saved model.
final_ckpt_path = os.path.join(estimator.model_dir, 'model.ckpt-1.meta')
self.assertTrue(tf.io.gfile.exists(final_ckpt_path))
saved_model_pb = os.path.join(export_dir,
tf.io.gfile.listdir(export_dir)[0],
'saved_model.pb')
self.assertTrue(tf.io.gfile.exists(saved_model_pb))
def test_model_to_estimator_wrong_weights_name(self):
keras_model = model.create_keras_model(
network=self._network,
loss=self._loss,
metrics=self._eval_metrics,
optimizer=self._optimizer,
size_feature_name=_SIZE)
estimator = estimator_lib.model_to_estimator(
model=keras_model,
config=self._config,
weights_feature_name='weights',
custom_objects=self._custom_objects)
self.assertIsInstance(estimator, tf.compat.v1.estimator.Estimator)
# Train and export model.
train_spec = tf.estimator.TrainSpec(
input_fn=self._make_input_fn(), max_steps=1)
eval_spec = tf.estimator.EvalSpec(
name='eval', input_fn=self._make_input_fn(), steps=10)
with self.assertRaises(ValueError):
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
if __name__ == '__main__':
tf.test.main()
| 36.005618 | 80 | 0.677953 | 8,893 | 0.69379 | 0 | 0 | 2,983 | 0.23272 | 0 | 0 | 2,848 | 0.222188 |
946f30f9387c6c8f7e6cd66ecde7c286751d1fc1 | 140 | py | Python | tests/tests/screens/screens/test_screens.py | centergy/flex_ussd | ddc0ccd192e3a0a82e8b7705f088862d59656c28 | [
"MIT"
] | null | null | null | tests/tests/screens/screens/test_screens.py | centergy/flex_ussd | ddc0ccd192e3a0a82e8b7705f088862d59656c28 | [
"MIT"
] | null | null | null | tests/tests/screens/screens/test_screens.py | centergy/flex_ussd | ddc0ccd192e3a0a82e8b7705f088862d59656c28 | [
"MIT"
] | null | null | null | from flex.ussd.screens import UssdScreen
class AbsractOne(UssdScreen):
class Meta:
abstract = True
class Home(AbsractOne):
pass
| 9.333333 | 40 | 0.742857 | 90 | 0.642857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
946f3f719a9bae48bd6b704f1e5e51de03e9d2b1 | 6,567 | py | Python | k-means.py | JoelRamosC/Algorithms_PYTHON | 07a693f9729e92fd6a70b1840d06f718fc24a436 | [
"MIT"
] | null | null | null | k-means.py | JoelRamosC/Algorithms_PYTHON | 07a693f9729e92fd6a70b1840d06f718fc24a436 | [
"MIT"
] | null | null | null | k-means.py | JoelRamosC/Algorithms_PYTHON | 07a693f9729e92fd6a70b1840d06f718fc24a436 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Fonte https://realpython.com/k-means-clustering-python/
# Clustering is a set of techniques used to partition data into groups, or clusters. Clusters are loosely defined as groups of data objects that are more similar to other objects in their cluster than they are to data objects in other clusters.
# Partitional clustering
# divides data objects into nonoverlapping groups. In other words, no object can be a member of more than one cluster, and every cluster must have at least one object.Two examples of partitional clustering algorithms are k-means and k-medoids.
# Hierarchical clustering
# determines cluster assignments by building a hierarchy. This is implemented by either a bottom-up or a top-down approach
# Density-based clustering
# determines cluster assignments based on the density of data points in a region. Clusters are assigned where there are high densities of data points separated by low-density regions.
# Conventional k-means requires only a few steps. The first step is to randomly select k centroids, where k is equal to the number of clusters you choose. Centroids are data points representing the center of a cluster.
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from sklearn.preprocessing import StandardScaler
# Generate the synthetic data and labels:
features, true_labels = make_blobs(n_samples=200,centers=3,cluster_std=2.75,random_state=42)
# you’ll use the StandardScaler class. This class implements a type of feature scaling called standardization. Standardization scales, or shifts, the values for each numerical feature in your dataset so that the features have a mean of 0 and standard deviation of 1:
scaler = StandardScaler()
scaled_features = scaler.fit_transform(features)
kmeans = KMeans(
init="random",
n_clusters=3,
n_init=10,
max_iter=300,
random_state=42
)
kmeans.fit(scaled_features)
# The lowest SSE value
kmeans.inertia_
# Final locations of the centroid
kmeans.cluster_centers_
# The number of iterations required to converge
kmeans.n_iter_
kmeans.labels_[:5]
# Choosing the Appropriate Number of Clusters
kmeans_kwargs = {
"init": "random",
"n_init": 10,
"max_iter": 300,
"random_state": 42,
}
# A list holds the SSE values for each k
sse = []
for k in range(1, 11):
kmeans = KMeans(n_clusters=k, **kmeans_kwargs)
kmeans.fit(scaled_features)
sse.append(kmeans.inertia_)
#the best k is elbow point of curve
plt.style.use("fivethirtyeight")
plt.plot(range(1, 11), sse)
plt.xticks(range(1, 11))
plt.xlabel("Number of Clusters")
plt.ylabel("SSE")
plt.show()
# When you plot SSE as a function of the number of clusters, notice that SSE continues to decrease as you increase k. As more centroids are added, the distance from each point to its closest centroid will decrease.
# There’s a sweet spot where the SSE curve starts to bend known as the elbow point. The x-value of this point is thought to be a reasonable trade-off between error and number of clusters. In this example, the elbow is located at x=3:
plt.style.use("fivethirtyeight")
plt.plot(range(1, 11), sse)
plt.xticks(range(1, 11))
plt.xlabel("Number of Clusters")
plt.ylabel("SSE")
plt.show()
# Determining the elbow point in the SSE curve isn’t always straightforward. If you’re having trouble choosing the elbow point of the curve, then you could use a Python package, kneed, to identify the elbow point programmatically:
kl = KneeLocator(
range(1, 11), sse, curve="convex", direction="decreasing"
)
kl.elbow
# The silhouette coefficient is a measure of cluster cohesion and separation. It quantifies how well a data point fits into its assigned cluster based on two factors:
# How close the data point is to other points in the cluster
# How far away the data point is from points in other clusters
# Silhouette coefficient values range between -1 and 1. Larger numbers indicate that samples are closer to their clusters than they are to other clusters.
# In the scikit-learn implementation of the silhouette coefficient, the average silhouette coefficient of all the samples is summarized into one score. The silhouette score() function needs a minimum of two clusters, or it will raise an exception.
# Loop through values of k again. This time, instead of computing SSE, compute the silhouette coefficient:
# A list holds the silhouette coefficients for each k
silhouette_coefficients = []
# Notice you start at 2 clusters for silhouette coefficient
for k in range(2, 11):
kmeans = KMeans(n_clusters=k, **kmeans_kwargs)
kmeans.fit(scaled_features)
score = silhouette_score(scaled_features, kmeans.labels_)
silhouette_coefficients.append(score)
# Plotting the average silhouette scores for each k shows that the best choice for k is 3 since it has the maximum score:
plt.style.use("fivethirtyeight")
plt.plot(range(2, 11), silhouette_coefficients)
plt.xticks(range(2, 11))
plt.xlabel("Number of Clusters")
plt.ylabel("Silhouette Coefficient")
plt.show()
#Evaluating Clustering Performance Using Advanced Techniques
from sklearn.cluster import DBSCAN
from sklearn.datasets import make_moons
from sklearn.metrics import adjusted_rand_score
features, true_labels = make_moons(
n_samples=250, noise=0.05, random_state=42
)
scaled_features = scaler.fit_transform(features)
# Instantiate k-means and dbscan algorithms
kmeans = KMeans(n_clusters=2)
dbscan = DBSCAN(eps=0.3)
# Fit the algorithms to the features
kmeans.fit(scaled_features)
dbscan.fit(scaled_features)
# Compute the silhouette scores for each algorithm
kmeans_silhouette = silhouette_score(
scaled_features, kmeans.labels_
).round(2)
dbscan_silhouette = silhouette_score(
scaled_features, dbscan.labels_
).round (2)
# Print the silhouette coefficient for each of the two algorithms and compare them. A higher silhouette coefficient suggests better clusters, which is misleading in this scenario:
kmeans_silhouette
dbscan_silhouette
# Compare the clustering results of DBSCAN and k-means using ARI as the performance metric:
ari_kmeans = adjusted_rand_score(true_labels, kmeans.labels_)
ari_dbscan = adjusted_rand_score(true_labels, dbscan.labels_)
round(ari_kmeans, 2)
round(ari_dbscan, 2)
| 36.687151 | 267 | 0.753617 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,131 | 0.628289 |
946feff5f8d46bf5d118c8553c43a5f9b1acdac8 | 4,660 | py | Python | Imaging/Core/Testing/Python/TestImageProjection.py | jasper-yeh/VtkDotNet | 84b56f781cb511694e4380cebfb245bbefe2560b | [
"BSD-3-Clause"
] | 3 | 2020-06-20T23:31:06.000Z | 2021-01-11T02:17:16.000Z | Imaging/Core/Testing/Python/TestImageProjection.py | jasper-yeh/VtkDotNet | 84b56f781cb511694e4380cebfb245bbefe2560b | [
"BSD-3-Clause"
] | 1 | 2020-12-01T23:21:02.000Z | 2020-12-02T23:44:43.000Z | Imaging/Core/Testing/Python/TestImageProjection.py | jasper-yeh/VtkDotNet | 84b56f781cb511694e4380cebfb245bbefe2560b | [
"BSD-3-Clause"
] | 5 | 2015-10-09T04:12:29.000Z | 2021-12-15T16:57:11.000Z | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# this script tests vtkImageSlab with various axes permutations,
# in order to cover a nasty set of "if" statements that check
# the intersections of the raster lines with the input bounding box.
# Image pipeline
reader = vtk.vtkImageReader()
reader.ReleaseDataFlagOff()
reader.SetDataByteOrderToLittleEndian()
reader.SetDataExtent(0,63,0,63,1,93)
reader.SetDataSpacing(3.2,3.2,1.5)
reader.SetFilePrefix("" + str(VTK_DATA_ROOT) + "/Data/headsq/quarter")
reader.SetDataMask(0x7fff)
slab1 = vtk.vtkImageSlab()
slab1.SetInputConnection(reader.GetOutputPort())
slab1.SetOperationToMean()
slab1.TrapezoidIntegrationOn()
slab1.SetOrientationToZ()
slab2 = vtk.vtkImageSlab()
slab2.SetInputConnection(reader.GetOutputPort())
slab2.SetOperationToMax()
slab2.MultiSliceOutputOff()
slab2.SetOutputScalarTypeToInputScalarType()
slab3 = vtk.vtkImageSlab()
slab3.SetInputConnection(reader.GetOutputPort())
slab3.SetOperationToSum()
slab3.SetOrientationToX()
slab3.MultiSliceOutputOn()
slab3.SetOutputScalarTypeToDouble()
reslice3 = vtk.vtkImageReslice()
reslice3.SetInputConnection(slab3.GetOutputPort())
reslice3.SetResliceAxesDirectionCosines([0,1,0,0,0,-1,1,0,0])
reslice3.SetOutputSpacing(3.2,3.2,3.2)
reslice3.SetOutputExtent(0,74,0,74,0,0)
slab4 = vtk.vtkImageSlab()
slab4.SetInputConnection(reader.GetOutputPort())
slab4.SetOperationToMax()
slab4.SetOrientation(0)
slab4.MultiSliceOutputOn()
slab4.SetOutputScalarTypeToFloat()
reslice4 = vtk.vtkImageReslice()
reslice4.SetInputConnection(slab4.GetOutputPort())
reslice4.SetResliceAxesDirectionCosines([0,1,0,0,0,-1,1,0,0])
reslice4.SetOutputSpacing(3.2,3.2,3.2)
reslice4.SetOutputExtent(0,74,0,74,0,0)
slab5 = vtk.vtkImageSlab()
slab5.SetInputConnection(reader.GetOutputPort())
slab5.SetOperationToMean()
slab5.SetOrientationToY()
slab5.MultiSliceOutputOn()
reslice5 = vtk.vtkImageReslice()
reslice5.SetInputConnection(slab5.GetOutputPort())
reslice5.SetResliceAxesDirectionCosines([1,0,0,0,0,-1,0,1,0])
reslice5.SetOutputSpacing(3.2,3.2,3.2)
reslice5.SetOutputExtent(0,74,0,74,0,0)
slab6 = vtk.vtkImageSlab()
slab6.SetInputConnection(reader.GetOutputPort())
slab6.SetOperationToMax()
slab6.SetOrientation(1)
slab6.MultiSliceOutputOn()
reslice6 = vtk.vtkImageReslice()
reslice6.SetInputConnection(slab6.GetOutputPort())
reslice6.SetResliceAxesDirectionCosines([1,0,0,0,0,-1,0,1,0])
reslice6.SetOutputSpacing(3.2,3.2,3.2)
reslice6.SetOutputExtent(0,74,0,74,0,0)
mapper1 = vtk.vtkImageMapper()
mapper1.SetInputConnection(slab1.GetOutputPort())
mapper1.SetColorWindow(2000)
mapper1.SetColorLevel(1000)
mapper1.SetZSlice(0)
mapper2 = vtk.vtkImageMapper()
mapper2.SetInputConnection(slab2.GetOutputPort())
mapper2.SetColorWindow(2000)
mapper2.SetColorLevel(1000)
mapper2.SetZSlice(0)
mapper3 = vtk.vtkImageMapper()
mapper3.SetInputConnection(reslice3.GetOutputPort())
mapper3.SetColorWindow(128000)
mapper3.SetColorLevel(64000)
mapper3.SetZSlice(0)
mapper4 = vtk.vtkImageMapper()
mapper4.SetInputConnection(reslice4.GetOutputPort())
mapper4.SetColorWindow(2000)
mapper4.SetColorLevel(1000)
mapper4.SetZSlice(0)
mapper5 = vtk.vtkImageMapper()
mapper5.SetInputConnection(reslice5.GetOutputPort())
mapper5.SetColorWindow(2000)
mapper5.SetColorLevel(1000)
mapper5.SetZSlice(0)
mapper6 = vtk.vtkImageMapper()
mapper6.SetInputConnection(reslice6.GetOutputPort())
mapper6.SetColorWindow(2000)
mapper6.SetColorLevel(1000)
mapper6.SetZSlice(0)
actor1 = vtk.vtkActor2D()
actor1.SetMapper(mapper1)
actor2 = vtk.vtkActor2D()
actor2.SetMapper(mapper2)
actor3 = vtk.vtkActor2D()
actor3.SetMapper(mapper3)
actor4 = vtk.vtkActor2D()
actor4.SetMapper(mapper4)
actor5 = vtk.vtkActor2D()
actor5.SetMapper(mapper5)
actor6 = vtk.vtkActor2D()
actor6.SetMapper(mapper6)
imager1 = vtk.vtkRenderer()
imager1.AddActor2D(actor1)
imager1.SetViewport(0.0,0.0,0.3333,0.5)
imager2 = vtk.vtkRenderer()
imager2.AddActor2D(actor2)
imager2.SetViewport(0.0,0.5,0.3333,1.0)
imager3 = vtk.vtkRenderer()
imager3.AddActor2D(actor3)
imager3.SetViewport(0.3333,0.0,0.6667,0.5)
imager4 = vtk.vtkRenderer()
imager4.AddActor2D(actor4)
imager4.SetViewport(0.3333,0.5,0.6667,1.0)
imager5 = vtk.vtkRenderer()
imager5.AddActor2D(actor5)
imager5.SetViewport(0.6667,0.0,1.0,0.5)
imager6 = vtk.vtkRenderer()
imager6.AddActor2D(actor6)
imager6.SetViewport(0.6667,0.5,1.0,1.0)
imgWin = vtk.vtkRenderWindow()
imgWin.AddRenderer(imager1)
imgWin.AddRenderer(imager2)
imgWin.AddRenderer(imager3)
imgWin.AddRenderer(imager4)
imgWin.AddRenderer(imager5)
imgWin.AddRenderer(imager6)
imgWin.SetSize(225,150)
imgWin.Render()
# --- end of script --
| 33.285714 | 70 | 0.809871 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 276 | 0.059227 |
94704cd4de1f0b3c909a1594891c846e45aa5197 | 967 | py | Python | tests/pipelines/test_context_factory.py | dpasse/eeyore | 0420cf9ff6d3ddecfc716e62aa97a00443cb23cb | [
"MIT"
] | null | null | null | tests/pipelines/test_context_factory.py | dpasse/eeyore | 0420cf9ff6d3ddecfc716e62aa97a00443cb23cb | [
"MIT"
] | null | null | null | tests/pipelines/test_context_factory.py | dpasse/eeyore | 0420cf9ff6d3ddecfc716e62aa97a00443cb23cb | [
"MIT"
] | null | null | null | import os
import sys
sys.path.insert(0, os.path.abspath('src'))
from eeyore_nlp.pipelines import ContextFactory, \
TextPipeline, \
ContractionsTextPipe, \
PreTaggedContextFactory
def test_context_factory():
pipeline = TextPipeline(
pipes=[
ContractionsTextPipe()
]
)
factory = ContextFactory(pipeline)
context = factory.execute('We aren\'t going to New York.')
assert context.get('tokens') == ['We', 'are', 'not', 'going', 'to', 'New', 'York', '.']
def test_context_factory():
factory = PreTaggedContextFactory('negative-scope')
sentence = '<NEG>Not going to the park</NEG>.'
context = factory.execute(sentence, id=1)
assert context.cache['id'] == 1
assert context.get('negative-scope') == [
'B-NEG',
'I-NEG',
'I-NEG',
'I-NEG',
'I-NEG',
''
]
| 26.861111 | 91 | 0.544984 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 191 | 0.197518 |
9470c26c016cbc9700c1e8fe9a668a1e2bb75e79 | 722 | py | Python | l5q1.py | gonewithharshwinds/itt-lab | 257eb0d38b09eac7991b490ec64c068ef51d7fb2 | [
"MIT"
] | 1 | 2022-01-06T00:07:36.000Z | 2022-01-06T00:07:36.000Z | l5q1.py | gonewithharshwinds/itt-lab | 257eb0d38b09eac7991b490ec64c068ef51d7fb2 | [
"MIT"
] | null | null | null | l5q1.py | gonewithharshwinds/itt-lab | 257eb0d38b09eac7991b490ec64c068ef51d7fb2 | [
"MIT"
] | null | null | null | #!/usr/bin/python
def add(a,b):
return a+b
def sub(a,b):
return a-b
def mul(a,b):
return a*b
def div(a,b):
return a/b
a = int(input("Enter first number: "))
b = int(input("Enter second number: "))
print("Please select operation : \n" \
"1. Addition \n" \
"2. Subtraction \n" \
"3. Multiplication \n" \
"4. Division \n")
select = int(input("Select operations form 1, 2, 3, 4 :"))
if select == 1:
print(a, "+", b, "=", add(a,b))
elif select == 2:
print(a, "-", b, "=", sub(a,b))
elif select == 3:
print(a, "*", b, "=", mul(a,b))
elif select == 4:
if b == 0:
exit
elif b != 0:
print(a, "/", b, "=", div(a,b))
else:
print("Invalid input")
| 20.628571 | 59 | 0.506925 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 242 | 0.33518 |
94710d9fc08d583f01866bc48d784b361bc959a7 | 2,467 | py | Python | Task/Sokoban/Python/sokoban.py | LaudateCorpus1/RosettaCodeData | 9ad63ea473a958506c041077f1d810c0c7c8c18d | [
"Info-ZIP"
] | 1 | 2018-11-09T22:08:38.000Z | 2018-11-09T22:08:38.000Z | Task/Sokoban/Python/sokoban.py | seanwallawalla-forks/RosettaCodeData | 9ad63ea473a958506c041077f1d810c0c7c8c18d | [
"Info-ZIP"
] | null | null | null | Task/Sokoban/Python/sokoban.py | seanwallawalla-forks/RosettaCodeData | 9ad63ea473a958506c041077f1d810c0c7c8c18d | [
"Info-ZIP"
] | 1 | 2018-11-09T22:08:40.000Z | 2018-11-09T22:08:40.000Z | from array import array
from collections import deque
import psyco
data = []
nrows = 0
px = py = 0
sdata = ""
ddata = ""
def init(board):
global data, nrows, sdata, ddata, px, py
data = filter(None, board.splitlines())
nrows = max(len(r) for r in data)
maps = {' ':' ', '.': '.', '@':' ', '#':'#', '$':' '}
mapd = {' ':' ', '.': ' ', '@':'@', '#':' ', '$':'*'}
for r, row in enumerate(data):
for c, ch in enumerate(row):
sdata += maps[ch]
ddata += mapd[ch]
if ch == '@':
px = c
py = r
def push(x, y, dx, dy, data):
if sdata[(y+2*dy) * nrows + x+2*dx] == '#' or \
data[(y+2*dy) * nrows + x+2*dx] != ' ':
return None
data2 = array("c", data)
data2[y * nrows + x] = ' '
data2[(y+dy) * nrows + x+dx] = '@'
data2[(y+2*dy) * nrows + x+2*dx] = '*'
return data2.tostring()
def is_solved(data):
for i in xrange(len(data)):
if (sdata[i] == '.') != (data[i] == '*'):
return False
return True
def solve():
open = deque([(ddata, "", px, py)])
visited = set([ddata])
dirs = ((0, -1, 'u', 'U'), ( 1, 0, 'r', 'R'),
(0, 1, 'd', 'D'), (-1, 0, 'l', 'L'))
lnrows = nrows
while open:
cur, csol, x, y = open.popleft()
for di in dirs:
temp = cur
dx, dy = di[0], di[1]
if temp[(y+dy) * lnrows + x+dx] == '*':
temp = push(x, y, dx, dy, temp)
if temp and temp not in visited:
if is_solved(temp):
return csol + di[3]
open.append((temp, csol + di[3], x+dx, y+dy))
visited.add(temp)
else:
if sdata[(y+dy) * lnrows + x+dx] == '#' or \
temp[(y+dy) * lnrows + x+dx] != ' ':
continue
data2 = array("c", temp)
data2[y * lnrows + x] = ' '
data2[(y+dy) * lnrows + x+dx] = '@'
temp = data2.tostring()
if temp not in visited:
if is_solved(temp):
return csol + di[2]
open.append((temp, csol + di[2], x+dx, y+dy))
visited.add(temp)
return "No solution"
level = """\
#######
# #
# #
#. # #
#. $$ #
#.$$ #
#.# @#
#######"""
psyco.full()
init(level)
print level, "\n\n", solve()
| 25.43299 | 65 | 0.398865 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 225 | 0.091204 |
94722bb27e5ee8fb87a9f33ce242e8abc9022b87 | 6,621 | py | Python | tests/test_thornthwaite.py | gastoneb/PyETo | 8bb427654b144141442a3b88754ba637644e47e9 | [
"BSD-3-Clause"
] | 100 | 2015-05-12T13:48:48.000Z | 2022-03-30T08:16:30.000Z | tests/test_thornthwaite.py | tigerforeveryoung/PyETo | 0b7ac9f149f4c89c5b5759a875010c521aa07f0f | [
"BSD-3-Clause"
] | 11 | 2015-09-11T10:01:46.000Z | 2021-12-09T15:25:47.000Z | tests/test_thornthwaite.py | tigerforeveryoung/PyETo | 0b7ac9f149f4c89c5b5759a875010c521aa07f0f | [
"BSD-3-Clause"
] | 74 | 2015-08-23T20:05:10.000Z | 2022-02-25T12:55:29.000Z | """
Unit test script for pyeto.thornthwaite.py
"""
import unittest
import pyeto
class TestThornthwaite(unittest.TestCase):
def test_monthly_mean_daylight_hours(self):
# Test against values for latitude 20 deg N from Bautista et al (2009)
# Calibration of the equations of Hargreaves and Thornthwaite to
# estimate the potential evapotranspiration in semi-arid and subhumid
# tropical climates for regional applications. Atmosfera 22(4), 331-
# 348.
test_mmdlh = [
10.9, # Jan
11.3, # Feb
11.9, # Mar
12.5, # Apr
12.9, # May
13.2, # Jun
13.1, # Jul
12.7, # Aug
12.1, # Sep
11.5, # Oct
11.0, # Nov
10.8, # Dec
]
mmdlh = pyeto.monthly_mean_daylight_hours(pyeto.deg2rad(20.0))
# Values were only quoted to 1 decimal place so check they are accurate
# to within 12 minutes (0.2 hours)
for m in range(12):
self.assertAlmostEqual(mmdlh[m], test_mmdlh[m], delta=0.15)
# Test against values for latitude 46 deg N from Mimikou M. and
# Baltas E., Technical hydrology, Second edition, NTUA, 2002.
# cited in PAPADOPOULOU E., VARANOU E., BALTAS E., DASSAKLIS A., and
# MIMIKOU M. (2003) ESTIMATING POTENTIAL EVAPOTRANSPIRATION AND ITS
# SPATIAL DISTRIBUTION IN GREECE USING EMPIRICAL METHODS.
test_mmdlh = [
8.9, # Jan
10.1, # Feb
11.6, # Mar
13.3, # Apr
14.7, # May
15.5, # Jun
15.2, # Jul
13.9, # Aug
12.3, # Sep
10.7, # Oct
9.2, # Nov
8.5, # Dec
]
mmdlh = pyeto.monthly_mean_daylight_hours(pyeto.deg2rad(46.0))
# Values were only quoted to 1 decimal place so check they are accurate
# to within 12 minutes (0.2 hours)
for m in range(12):
self.assertAlmostEqual(mmdlh[m], test_mmdlh[m], delta=0.15)
# Test against values obtained for Los Angles, California,
# latitude 34 deg 05' N, from
# http://aa.usno.navy.mil/data/docs/Dur_OneYear.php
latitude = pyeto.deg2rad(34.0833333)
la_mmdlh = [
10.182, # Jan
10.973, # Feb
11.985, # Mar
13.046, # Apr
13.940, # May
14.388, # Jun
14.163, # Jul
13.404, # Aug
12.374, # Sep
11.320, # Oct
10.401, # Nov
9.928, # Dec
]
mmdlh = pyeto.monthly_mean_daylight_hours(latitude)
# Check that the 2 methods are almost the same (within 15 minutes)
for m in range(12):
self.assertAlmostEqual(mmdlh[m], la_mmdlh[m], delta=0.25)
# Test with year set to a non-leap year
non_leap = pyeto.monthly_mean_daylight_hours(latitude, 2015)
for m in range(12):
self.assertEqual(mmdlh[m], non_leap[m])
# Test with year set to a leap year
leap = pyeto.monthly_mean_daylight_hours(latitude, 2016)
for m in range(12):
if m == 0:
self.assertEqual(leap[m], non_leap[m])
elif m == 1: # Feb
# Because Feb extends further into year in a leap year it
# should have a slightly longer mean day length in northern
# hemisphere
self.assertGreater(leap[m], non_leap[m])
else:
# All months after Feb in a lieap year will be composed of
# diffent Julian days (days of the year) compared to a
# non-leap year so will have different mean daylengths.
self.assertNotEqual(leap[m], non_leap[m])
# Test with bad latitude
with self.assertRaises(ValueError):
_ = pyeto.monthly_mean_daylight_hours(
pyeto.deg2rad(90.01))
with self.assertRaises(ValueError):
_ = pyeto.monthly_mean_daylight_hours(
pyeto.deg2rad(-90.01))
# Test limits of latitude
_ = pyeto.monthly_mean_daylight_hours(
pyeto.deg2rad(90.0))
_ = pyeto.monthly_mean_daylight_hours(
pyeto.deg2rad(-90.0))
def test_thornthwaite(self):
# Test values obtained from a worked example in Hydrology: An
# Environmental Approach, pp 435-436 by Ian Watson.
test_monthly_t = [
2.1, 2.5, 4.8, 7.1, 8.3, 10.7, 13.4, 14.5, 11.1, 8.2, 5.4, 3.7]
test_monthly_mean_dlh = [
9.4, 10.6, 11.9, 13.4, 14.6, 15.2, 14.9, 13.9, 12.6, 11.1, 9.8, 9.1]
test_pet = [
10.67, 14.08, 28.49, 45.85, 57.47, 75.20, 89.91, 90.29, 64.26,
43.34, 26.24, 17.31]
# NOTE: The test PET was calculated using rounded coefficients, rounded
# intermediate values and doesn't adjust for the number of days in
# the month. This results in a small difference in estimated monthly
# PET of up to +/- 4 mm.
pet = pyeto.thornthwaite(test_monthly_t, test_monthly_mean_dlh)
for m in range(12):
diff = abs(pet[m] - test_pet[m])
self.assertLess(diff, 4)
# Test with non-leap year
pet_non_leap = pyeto.thornthwaite(
test_monthly_t, test_monthly_mean_dlh, year=2015)
# Test results are same as above when year argument is set
for m in range(12):
self.assertEqual(pet[m], pet_non_leap[m])
# Test with leap year
pet_leap = pyeto.thornthwaite(
test_monthly_t, test_monthly_mean_dlh, year=2016)
for m in range(12):
# 29 days in Feb so PET should be higher than in non-leap year
# results
if m == 1: # Feb
self.assertGreater(pet_leap[m], pet_non_leap[m])
else:
self.assertEqual(pet_leap[m], pet_non_leap[m])
# Test with wrong length args
with self.assertRaises(ValueError):
_ = pyeto.thornthwaite(list(range(11)), test_monthly_mean_dlh)
with self.assertRaises(ValueError):
_ = pyeto.thornthwaite(list(range(13)), test_monthly_mean_dlh)
with self.assertRaises(ValueError):
_ = pyeto.thornthwaite(test_monthly_t, list(range(11)))
with self.assertRaises(ValueError):
_ = pyeto.thornthwaite(test_monthly_t, list(range(13)))
if __name__ == '__main__':
unittest.main()
| 37.619318 | 80 | 0.561547 | 6,487 | 0.979761 | 0 | 0 | 0 | 0 | 0 | 0 | 2,220 | 0.335297 |
947267e1c8550678d00f1bad3cf2d94a7396e900 | 6,619 | py | Python | openmdao.lib/src/openmdao/lib/optproblems/sellar.py | OzanCKN/OpenMDAO-Framework | 05e9d4b9bc41d0ec00a7073545146c925cd33b0b | [
"Apache-2.0"
] | 1 | 2015-11-05T11:14:45.000Z | 2015-11-05T11:14:45.000Z | openmdao.lib/src/openmdao/lib/optproblems/sellar.py | janus/OpenMDAO-Framework | 05e9d4b9bc41d0ec00a7073545146c925cd33b0b | [
"Apache-2.0"
] | null | null | null | openmdao.lib/src/openmdao/lib/optproblems/sellar.py | janus/OpenMDAO-Framework | 05e9d4b9bc41d0ec00a7073545146c925cd33b0b | [
"Apache-2.0"
] | 1 | 2020-07-15T02:45:54.000Z | 2020-07-15T02:45:54.000Z | """
Two discipline components.
From Sellar's analytic problem.
Sellar, R. S., Batill, S. M., and Renaud, J. E., Response Surface Based, Concur-
rent Subspace Optimization for Multidisciplinary System Design," Proceedings
References 79 of the 34th AIAA Aerospace Sciences Meeting and Exhibit, Reno, NV,
January 1996.
"""
from openmdao.main.api import Component, ComponentWithDerivatives
from openmdao.main.problem_formulation import OptProblem
from openmdao.lib.datatypes.api import Float
class Discipline1(Component):
"""Component containing Discipline 1"""
# pylint: disable-msg=E1101
z1 = Float(0.0, iotype='in', desc='Global Design Variable')
z2 = Float(0.0, iotype='in', desc='Global Design Variable')
x1 = Float(0.0, iotype='in', desc='Local Design Variable')
y2 = Float(0.0, iotype='in', desc='Disciplinary Coupling')
y1 = Float(iotype='out', desc='Output of this Discipline')
def execute(self):
"""Evaluates the equation
y1 = z1**2 + z2 + x1 - 0.2*y2"""
z1 = self.z1
z2 = self.z2
x1 = self.x1
y2 = self.y2
self.y1 = z1**2 + z2 + x1 - 0.2*y2
#print "(%f, %f, %f)" % (z1, z2, x1)
class Discipline1_WithDerivatives(ComponentWithDerivatives):
"""Component containing Discipline 1"""
# pylint: disable-msg=E1101
z1 = Float(0.0, iotype='in', desc='Global Design Variable')
z2 = Float(0.0, iotype='in', desc='Global Design Variable')
x1 = Float(0.0, iotype='in', desc='Local Design Variable')
y2 = Float(0.0, iotype='in', desc='Disciplinary Coupling')
y1 = Float(iotype='out', desc='Output of this Discipline')
def __init__(self):
super(SellarDiscipline1_WithDerivatives,self).__init__()
self.derivatives.declare_first_derivative(self, 'y1', 'z1')
self.derivatives.declare_first_derivative(self, 'y1', 'z2')
self.derivatives.declare_first_derivative(self, 'y1', 'x1')
self.derivatives.declare_first_derivative(self, 'y1', 'y2')
def calculate_first_derivatives(self):
"""Analytical first derivatives"""
self.derivatives.set_first_derivative('y1', 'z1', 2.0*self.z1)
self.derivatives.set_first_derivative('y1', 'z2', 1.0)
self.derivatives.set_first_derivative('y1', 'x1', 1.0)
self.derivatives.set_first_derivative('y1', 'y2', -0.2)
def execute(self):
"""Evaluates the equation
y1 = z1**2 + z2 + x1 - 0.2*y2"""
z1 = self.z1
z2 = self.z2
x1 = self.x1
y2 = self.y2
self.y1 = z1**2 + z2 + x1 - 0.2*y2
#print "(%f, %f, %f)" % (z1, z2, x1)
class Discipline2(Component):
"""Component containing Discipline 2"""
# pylint: disable-msg=E1101
z1 = Float(0.0, iotype='in', desc='Global Design Variable')
z2 = Float(0.0, iotype='in', desc='Global Design Variable')
y1 = Float(0.0, iotype='in', desc='Disciplinary Coupling')
y2 = Float(iotype='out', desc='Output of this Discipline')
def execute(self):
"""Evaluates the equation
y2 = y1**(.5) + z1 + z2"""
z1 = self.z1
z2 = self.z2
# Note: this may cause some issues. However, y1 is constrained to be
# above 3.16, so lets just let it converge, and the optimizer will
# throw it out
y1 = abs(self.y1)
self.y2 = y1**(.5) + z1 + z2
class Discipline2_WithDerivatives(ComponentWithDerivatives):
"""Component containing Discipline 2"""
# pylint: disable-msg=E1101
z1 = Float(0.0, iotype='in', desc='Global Design Variable')
z2 = Float(0.0, iotype='in', desc='Global Design Variable')
y1 = Float(0.0, iotype='in', desc='Disciplinary Coupling')
y2 = Float(iotype='out', desc='Output of this Discipline')
def __init__(self):
super(SellarDiscipline2_WithDerivatives,self).__init__()
self.derivatives.declare_first_derivative(self, 'y2', 'z1')
self.derivatives.declare_first_derivative(self, 'y2', 'z2')
self.derivatives.declare_first_derivative(self, 'y2', 'y1')
def calculate_first_derivatives(self):
"""Analytical first derivatives"""
self.derivatives.set_first_derivative('y2', 'z1', 1.0)
self.derivatives.set_first_derivative('y2', 'z2', 1.0)
self.derivatives.set_first_derivative('y2', 'y1', .5*self.y1**-0.5)
def execute(self):
"""Evaluates the equation
y2 = y1**(.5) + z1 + z2"""
z1 = self.z1
z2 = self.z2
# Note: this may cause some issues. However, y1 is constrained to be
# above 3.16, so lets just let it converge, and the optimizer will
# throw it out
y1 = abs(self.y1)
self.y2 = y1**(.5) + z1 + z2
class SellarProblem(OptProblem):
""" Sellar test problem definition."""
def __init__(self):
""" Creates a new Assembly with this problem
Optimal Design at (1.9776, 0, 0)
Optimal Objective = 3.18339"""
super(SellarProblem, self).__init__()
#add the discipline components to the assembly
self.add('dis1', Discipline1())
self.add('dis2', Discipline2())
#START OF MDAO Problem Definition
#Global Des Vars
self.add_parameter(("dis1.z1","dis2.z1"),name="z1",low=-10,high=10,start=5.0)
self.add_parameter(("dis1.z2","dis2.z2"),name="z2",low=0,high=10,start=2.0)
#Local Des Vars
self.add_parameter("dis1.x1",low=0,high=10,start=1.0)
#Coupling Vars
self.add_coupling_var(("dis2.y1","dis1.y1"),name="y1",start=0.0)
self.add_coupling_var(("dis1.y2","dis2.y2"),name="y2",start=0.0)
#self.add_coupling_var(("dis1.y2","dis2.y2"),start=0.0)
self.add_objective('(dis1.x1)**2 + dis1.z2 + dis1.y1 + math.exp(-dis2.y2)',name="obj1")
self.add_constraint('3.16 < dis1.y1')
self.add_constraint('dis2.y2 < 24.0')
#solution to the opt problem
self.solution = {
"z1":1.9776,
"z2":0.0,
"dis1.x1":0.0,
"y1":3.16,
"y2": 3.756,
'obj1':3.1834
}
#END OF MDAO Problem Definition
| 34.118557 | 95 | 0.573803 | 6,023 | 0.909956 | 0 | 0 | 0 | 0 | 0 | 0 | 2,584 | 0.390391 |
947326ec1e6764387f8f688fc871d6938528c208 | 1,266 | py | Python | video/emotion_detection_camera.py | xii1/image-classifier-service | 01d5f2d6ca1436cefa9e3b978caebcc614b9ed34 | [
"MIT"
] | null | null | null | video/emotion_detection_camera.py | xii1/image-classifier-service | 01d5f2d6ca1436cefa9e3b978caebcc614b9ed34 | [
"MIT"
] | null | null | null | video/emotion_detection_camera.py | xii1/image-classifier-service | 01d5f2d6ca1436cefa9e3b978caebcc614b9ed34 | [
"MIT"
] | null | null | null | import cv2
from ml.facial_expression_classification import predict_facial_expression_by_array, IMAGE_WIDTH, IMAGE_HEIGHT
from video.camera import Camera
OPENCV_HAARCASCADE_FRONTALFACE_FILE = 'trained_models/opencv/haarcascades/haarcascade_frontalface_alt.xml'
class EmotionDetectionCamera(Camera):
def __init__(self):
self.face_cascade = cv2.CascadeClassifier(OPENCV_HAARCASCADE_FRONTALFACE_FILE)
self.font = cv2.FONT_HERSHEY_SIMPLEX
super().__init__()
def get_frame(self):
_, frame = self.video.read()
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame_gray = cv2.equalizeHist(frame_gray)
faces = self.face_cascade.detectMultiScale(frame_gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)
face_roi = frame[y:y + h, x:x + w]
face_roi = cv2.resize(face_roi, (IMAGE_WIDTH, IMAGE_HEIGHT))
result = predict_facial_expression_by_array(face_roi)
cv2.rectangle(frame, (x, y - 40), (x + w, y), (0, 255, 0), -1)
cv2.putText(frame, result, (x + 10, y - 10), self.font, 0.7, (0, 0, 0), 2)
_, jpeg = cv2.imencode('.jpg', frame)
return jpeg.tobytes()
| 38.363636 | 109 | 0.655608 | 1,001 | 0.790679 | 0 | 0 | 0 | 0 | 0 | 0 | 74 | 0.058452 |
9474293f5ec239c728c0ec0b3c31ea7b0eb35bf9 | 3,221 | py | Python | vb2py/test_at_scale/testheinsega.py | ceprio/xl_vb2py | 899fec0301140fd8bd313e8c80b3fa839b3f5ee4 | [
"BSD-3-Clause"
] | null | null | null | vb2py/test_at_scale/testheinsega.py | ceprio/xl_vb2py | 899fec0301140fd8bd313e8c80b3fa839b3f5ee4 | [
"BSD-3-Clause"
] | null | null | null | vb2py/test_at_scale/testheinsega.py | ceprio/xl_vb2py | 899fec0301140fd8bd313e8c80b3fa839b3f5ee4 | [
"BSD-3-Clause"
] | null | null | null |
import unittest
from vb2py.test_at_scale import file_tester
class Test_heinsega(file_tester.FileTester):
def test0(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/Module1.bas')
def test1(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/start.frm')
def test2(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/ShutDownWin.frm')
def test3(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/password_win.frm')
def test4(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/OX_CookiesCtrl.bas')
def test5(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/Parsing.bas')
def test6(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/BrowserW.frm')
def test7(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/OX_manifest.bas')
def test8(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/Declare_Function.bas')
def test9(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/OX_function.bas')
def test10(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/OX_FileSystem.bas')
def test11(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/Transcoding.bas')
def test12(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/History_Logs.frm')
def test13(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/script_from.frm')
def test14(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/CMDresult.bas')
def test15(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/variable.bas')
def test16(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/OX_MouseWheel.bas')
def test17(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/OX_Finish_Download.frm')
def test18(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/Ctrl8dot3name.frm')
def test19(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/ComDialog.frm')
def test20(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/sys.frm')
def test21(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/OX_163_Module.bas')
def test22(self):
self._testFile('/Users/paul/Workspace/sandbox/vb2py-git-files/heinsega/OX163_VB6project_Win32/OX163_mainfrm.frm')
if __name__ == '__main__':
unittest.main()
| 40.2625 | 120 | 0.804409 | 3,111 | 0.965849 | 0 | 0 | 0 | 0 | 0 | 0 | 2,190 | 0.679913 |
9474886f597535a540299d4544f2ce99f2545825 | 9,257 | py | Python | scripts/forecast_models_ex.py | chiara87todaro/1C_PYproject | 871dddc3c1e70edc3d6d6470a0f286b6bcf5e7cf | [
"BSD-3-Clause"
] | null | null | null | scripts/forecast_models_ex.py | chiara87todaro/1C_PYproject | 871dddc3c1e70edc3d6d6470a0f286b6bcf5e7cf | [
"BSD-3-Clause"
] | null | null | null | scripts/forecast_models_ex.py | chiara87todaro/1C_PYproject | 871dddc3c1e70edc3d6d6470a0f286b6bcf5e7cf | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 17 10:23:50 2019
@author: chiara
"""
import os
import numpy as np # scientific calculation
import pandas as pd # data analysis
import itertools
import warnings
from statsmodels.tsa.arima_model import ARMA
ts1=list(range(0,500,2))
len(ts1)
model=ARMA(ts1,order=(0,1))
#model.information()
fit=model.fit(disp=5)
fit.summary()
# ARMA Model Results
#==============================================================================
#Dep. Variable: y No. Observations: 250
#Model: ARMA(0, 1) Log Likelihood -1428.744
#Method: css-mle S.D. of innovations 72.604
#Date: Thu, 17 Oct 2019 AIC 2863.489
#Time: 10:57:35 BIC 2874.053
#Sample: 0 HQIC 2867.740
#
#==============================================================================
# coef std err z P>|z| [0.025 0.975]
#------------------------------------------------------------------------------
#const 249.0083 9.165 27.169 0.000 231.045 266.972
#ma.L1.y 0.9999 0.010 101.243 0.000 0.981 1.019
# Roots
#=============================================================================
# Real Imaginary Modulus Frequency
#-----------------------------------------------------------------------------
#MA.1 -1.0001 +0.0000j 1.0001 0.5000
#-----------------------------------------------------------------------------
# o) P>\z\ is the p-val
# o) AIC (Akaike Information Criterion) value measures how well a model fits
# the data while taking into account the overall complexity of the model.
# A model that fits the data very well while using lots of features will be
# assigned a larger AIC score than a model that uses fewer features to achieve
# the same goodness-of-fit. Therefore, we are interested in finding the model
# that yields the lowest AIC value.
pred=fit.predict(len(ts1),len(ts1)) #374.49
pred
from statsmodels.tsa.vector_ar.var_model import VAR
#from statsmodels.tsa.statespace.varmax import VARMAX
ts2=list(range(500,1000,2))
ts=pd.DataFrame({"ts1":ts1,"ts2":ts2})
model=VAR(ts) #,order=(0,1)
#model.information()
fit=model.fit()
fit.summary()
# Summary of Regression Results
#==================================
#Model: VAR
#Method: OLS
#Date: Thu, 17, Oct, 2019
#Time: 16:00:22
#--------------------------------------------------------------------
#No. of Equations: 2.00000 BIC: -116.125
#Nobs: 249.000 HQIC: -116.175
#Log likelihood: 13767.4 FPE: 3.39553e-51
#AIC: -116.209 Det(Omega_mle): 3.31516e-51
#--------------------------------------------------------------------
#Results for equation ts1
#=========================================================================
# coefficient std. error t-stat prob
#-------------------------------------------------------------------------
#const -0.001984 NAN NAN NAN
#L1.ts1 0.995996 NAN NAN NAN
#L1.ts2 0.004004 NAN NAN NAN
#=========================================================================
#
#Results for equation ts2
#=========================================================================
# coefficient std. error t-stat prob
#-------------------------------------------------------------------------
#const 0.002016 NAN NAN NAN
#L1.ts1 -0.003996 NAN NAN NAN
#L1.ts2 1.003996 NAN NAN NAN
#=========================================================================
#
#Correlation matrix of residuals
# ts1 ts2
#ts1 1.000000 0.951165
#ts2 0.951165 1.000000
pred=fit.forecast(fit.y,steps=1) #array([[ 500., 1000.]])
pred
pred=fit.forecast(fit.y,steps=3)
pred #array([[ 500., 1000.],
# [ 502., 1002.],
# [ 504., 1004.]])
##################################### SARIMAX
from statsmodels.tsa.statespace.sarimax import SARIMAX
# Create parameters
# Define the p, d and q parameters to take any value between 0 and 2
p = d = q = range(0, 2)
# Generate all different combinations of p, q and q triplets
pdq = list(itertools.product(p, d, q))
# Generate all different combinations of seasonal p, q and q triplets
seasonal_pdq = [(x[0], x[1], x[2], 52) for x in pdq]#list(itertools.product(p, d, q))
warnings.filterwarnings("ignore") # specify to ignore warning messages
param=pdq[0]
param_seasonal=seasonal_pdq[0]
for param in pdq:
for param_seasonal in seasonal_pdq:
try:
mod = SARIMAX(ts1, order=param,seasonal_order=param_seasonal,
enforce_stationarity=False,
enforce_invertibility=False)
results = mod.fit()
print('ARIMA{}x{}12 - AIC:{}'.format(param, param_seasonal, results.aic))
except:
continue
#ARIMA(0, 0, 0)x(0, 0, 0, 52)12 - AIC:3529.4532640333523
#ARIMA(0, 0, 0)x(0, 0, 1, 52)12 - AIC:8524.710121490572
#ARIMA(0, 0, 0)x(0, 1, 0, 52)12 - AIC:2390.951838473629
#ARIMA(0, 0, 0)x(0, 1, 1, 52)12 - AIC:6109.756521634717
#ARIMA(0, 0, 0)x(1, 0, 0, 52)12 - AIC:2132.090287303192
#ARIMA(0, 0, 0)x(1, 0, 1, 52)12 - AIC:2034.1091306333342
#ARIMA(0, 0, 0)x(1, 1, 0, 52)12 - AIC:-3089.4441840755426
#ARIMA(0, 0, 0)x(1, 1, 1, 52)12 - AIC:nan
#ARIMA(0, 0, 1)x(0, 0, 0, 52)12 - AIC:8827.74964853632
#ARIMA(0, 0, 1)x(0, 0, 1, 52)12 - AIC:nan
#ARIMA(0, 0, 1)x(0, 1, 0, 52)12 - AIC:8529.012165403003
#ARIMA(0, 0, 1)x(0, 1, 1, 52)12 - AIC:16764.04877539664
#ARIMA(0, 0, 1)x(1, 0, 0, 52)12 - AIC:9566.733370582071
#ARIMA(0, 0, 1)x(1, 0, 1, 52)12 - AIC:8295.369705647365
#ARIMA(0, 0, 1)x(1, 1, 0, 52)12 - AIC:6356.26416402472
#ARIMA(0, 0, 1)x(1, 1, 1, 52)12 - AIC:6271.2742439695485
#ARIMA(0, 1, 0)x(0, 0, 0, 52)12 - AIC:1049.5945140272559
#ARIMA(0, 1, 0)x(0, 0, 1, 52)12 - AIC:9789.103372012913
#ARIMA(0, 1, 0)x(0, 1, 0, 52)12 - AIC:nan
#ARIMA(0, 1, 0)x(0, 1, 1, 52)12 - AIC:nan
#ARIMA(0, 1, 0)x(1, 0, 0, 52)12 - AIC:-4170.033637108996
#ARIMA(0, 1, 0)x(1, 0, 1, 52)12 - AIC:-4153.431343153703
#ARIMA(0, 1, 0)x(1, 1, 0, 52)12 - AIC:-3013.1187268516032
#ARIMA(0, 1, 0)x(1, 1, 1, 52)12 - AIC:-3202.583612185782
#ARIMA(0, 1, 1)x(0, 0, 0, 52)12 - AIC:10707.71402921827
#ARIMA(0, 1, 1)x(0, 0, 1, 52)12 - AIC:20986.03629024016 worst
#ARIMA(0, 1, 1)x(0, 1, 0, 52)12 - AIC:nan
#ARIMA(0, 1, 1)x(0, 1, 1, 52)12 - AIC:nan
#ARIMA(0, 1, 1)x(1, 0, 0, 52)12 - AIC:8542.970298607246
#ARIMA(0, 1, 1)x(1, 0, 1, 52)12 - AIC:8458.300549382868
#ARIMA(0, 1, 1)x(1, 1, 0, 52)12 - AIC:-3011.1187268516032
#ARIMA(0, 1, 1)x(1, 1, 1, 52)12 - AIC:-3018.8321417660136
#ARIMA(1, 0, 0)x(0, 0, 0, 52)12 - AIC:712.1298895449919
#ARIMA(1, 0, 0)x(0, 0, 1, 52)12 - AIC:10620.112972204352
#ARIMA(1, 0, 0)x(0, 1, 0, 52)12 - AIC:nan
#ARIMA(1, 0, 0)x(0, 1, 1, 52)12 - AIC:6111.756521634712
#ARIMA(1, 0, 0)x(1, 0, 0, 52)12 - AIC:-2365.892284196455
#ARIMA(1, 0, 0)x(1, 0, 1, 52)12 - AIC:-1950.972772140532
#ARIMA(1, 0, 0)x(1, 1, 0, 52)12 - AIC:nan
#ARIMA(1, 0, 0)x(1, 1, 1, 52)12 - AIC:nan
#ARIMA(1, 0, 1)x(0, 0, 0, 52)12 - AIC:372.5044628282068
#ARIMA(1, 0, 1)x(0, 0, 1, 52)12 - AIC:9083.281510795705
#ARIMA(1, 0, 1)x(0, 1, 0, 52)12 - AIC:nan
#ARIMA(1, 0, 1)x(0, 1, 1, 52)12 - AIC:6071.64785596824
#ARIMA(1, 0, 1)x(1, 0, 0, 52)12 - AIC:-2089.2449870039572
#ARIMA(1, 0, 1)x(1, 0, 1, 52)12 - AIC:-1929.925530884988
#ARIMA(1, 0, 1)x(1, 1, 0, 52)12 - AIC:nan
#ARIMA(1, 0, 1)x(1, 1, 1, 52)12 - AIC:nan
#ARIMA(1, 1, 0)x(0, 0, 0, 52)12 - AIC:-5251.66293223826
#ARIMA(1, 1, 0)x(0, 0, 1, 52)12 - AIC:8233.103162467083
#ARIMA(1, 1, 0)x(0, 1, 0, 52)12 - AIC:nan
#ARIMA(1, 1, 0)x(0, 1, 1, 52)12 - AIC:-3202.583612185782
#ARIMA(1, 1, 0)x(1, 0, 0, 52)12 - AIC:-4146.842877252098
#ARIMA(1, 1, 0)x(1, 0, 1, 52)12 - AIC:-5916.636927368082 <====== *
#ARIMA(1, 1, 0)x(1, 1, 0, 52)12 - AIC:-3202.583612185782
#ARIMA(1, 1, 0)x(1, 1, 1, 52)12 - AIC:-3200.583612185782
#ARIMA(1, 1, 1)x(0, 0, 0, 52)12 - AIC:-5242.946995244625
#ARIMA(1, 1, 1)x(0, 0, 1, 52)12 - AIC:8193.128146332323
#ARIMA(1, 1, 1)x(0, 1, 0, 52)12 - AIC:nan
#ARIMA(1, 1, 1)x(0, 1, 1, 52)12 - AIC:-3018.8321417660136
#ARIMA(1, 1, 1)x(1, 0, 0, 52)12 - AIC:-4902.063264828318
#ARIMA(1, 1, 1)x(1, 0, 1, 52)12 - AIC:-5051.314673560011
#ARIMA(1, 1, 1)x(1, 1, 0, 52)12 - AIC:-3200.583612185782
#ARIMA(1, 1, 1)x(1, 1, 1, 52)12 - AIC:-3016.8321417660136
| 44.936893 | 85 | 0.480393 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,952 | 0.859026 |
9474bac774018b60c0a057e186d5951eb0b4920a | 6,798 | py | Python | app/front/forms.py | karilint/TaxonManager | 62969403f349319b1253b53a98dc55f5a597da17 | [
"MIT"
] | null | null | null | app/front/forms.py | karilint/TaxonManager | 62969403f349319b1253b53a98dc55f5a597da17 | [
"MIT"
] | 16 | 2022-01-18T07:16:20.000Z | 2022-03-21T08:49:32.000Z | app/front/forms.py | karilint/TaxonManager | 62969403f349319b1253b53a98dc55f5a597da17 | [
"MIT"
] | null | null | null | # Copyright 2020 Frances M. Skinner, Christian Hill
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cProfile import label
from django import forms
from .models import (
Expert,
GeographicDiv,
Kingdom,
Reference,
SynonymLink,
TaxonAuthorLkp,
TaxonomicUnit,
)
from django_select2.forms import Select2MultipleWidget, ModelSelect2MultipleWidget
from django.contrib.admin import site as admin_site
from django.contrib.admin.widgets import RelatedFieldWidgetWrapper
import re
class DoiForm(forms.Form):
doi = forms.CharField(widget= forms.TextInput(attrs={'id':'id_autodoi'}))
def clean_doi(self):
doi = self.cleaned_data['doi']
if doi is None or doi.strip() == '':
raise forms.ValidationError('Doi cannot be left blank')
if re.compile(r"^10(?:\.[^\s\/]+)?\/").match(doi) == None:
raise forms.ValidationError('Doi is invalid')
return self.cleaned_data['doi']
class BibtexForm(forms.Form):
bib = forms.CharField(widget=forms.Textarea(attrs={'id': 'id_autobibtex', 'rows':5, 'cols':60}))
def clean_bib(self):
bib = self.cleaned_data['bib']
if bib is None or bib.strip() == '':
raise forms.ValidationError('BibTex cannot be left blank')
if not bib.startswith('@'):
raise forms.ValidationError('BibTex is invalid, BibTex should start with "@"')
if not bib.endswith('}'):
raise forms.ValidationError('BibTex is invalid, BibTex should end with "}"')
return self.cleaned_data['bib']
class RefForm(forms.ModelForm):
class Meta:
model = Reference
exclude = ['ris', 'citeproc_json', 'visible', 'note_latex', 'note_html', 'title_html', 'title_latex']
def clean_doi(self):
doi = self.cleaned_data['doi']
try:
ref = Reference.objects.get(doi=doi)
if ref.pk != self.instance.pk:
raise forms.ValidationError('A reference with this DOI exists'
'in the database already')
except Reference.MultipleObjectsReturned:
# Hmmm... there are already multiple entries with this DOI in the
# database. TODO deal with this case
pass
except Reference.DoesNotExist:
# Good: a reference with this DOI is not in the DB already
pass
return self.cleaned_data['doi']
class TaxonForm(forms.ModelForm):
template_name = 'add-taxon.html'
kingdom_name = forms.ModelChoiceField(queryset=Kingdom.objects.all())
rank_name = forms.CharField(widget=forms.Select(choices=[]), label="New taxon's parent")
taxonnomic_types = forms.CharField(widget=forms.Select(choices=[]), label="Rank of the new taxon")
is_junior_synonym = forms.BooleanField(required=False)
senior_synonym = forms.CharField(widget=forms.Select(choices=[]), label="Senior synonym", required = False)
# reference = forms.ModelChoiceField(queryset=Reference.objects.all(), label="References where the taxon is mentioned", empty_label="Please choose reference for this taxon")
# https://stackoverflow.com/a/8538923
references = forms.ModelMultipleChoiceField(
queryset=Reference.objects.filter(visible=1),
widget=Select2MultipleWidget,
)
geographic_div = forms.ModelMultipleChoiceField(
queryset=GeographicDiv.objects.all(),
widget=Select2MultipleWidget,
label='Geographic location',
required=False
)
expert = forms.ModelMultipleChoiceField(
queryset=Expert.objects.all(),
widget=Select2MultipleWidget,
label= 'Experts',
required=False
)
taxon_author_id = forms.ModelChoiceField(
queryset=TaxonAuthorLkp.objects.all(),
required=False,
label='Author'
)
# Maybe multiplechoicefield from this advice: https://stackoverflow.com/a/56823482
# FIX: In order to query database and set an author for new unit, add a suitable field
# other later deemed necessary fields can also be added here
class Meta:
model = TaxonomicUnit
fields = ['kingdom_name' , 'taxonnomic_types', 'rank_name', 'unit_name1', 'unit_name2', 'unit_name3', 'unit_name4', 'references', 'geographic_div', 'expert', 'taxon_author_id']
exclude = ['unnamed_taxon_ind']
def __init__(self, *args, **kwargs):
super(TaxonForm, self).__init__(*args, **kwargs)
for field in ["expert", "taxon_author_id", "references"]:
self.fields[field].widget = RelatedFieldWidgetWrapper(
self.fields[field].widget,
self.instance._meta.get_field(field).remote_field,
admin_site
)
class JuniorSynonymForm(forms.Form):
template_name = 'add_junior_synonym.html'
synonym_id = forms.CharField(widget=forms.Select(choices=[]), label="Junior synonym")
#class Meta:
#model = SynonymLink
#fields = ['synonym_id']
class ExpertForm(forms.ModelForm):
template_name = 'add-expert.html'
geographic_div = forms.ModelMultipleChoiceField(
queryset=GeographicDiv.objects.all(),
widget=Select2MultipleWidget,
)
class Meta:
model = Expert
fields = ['expert', 'geographic_div']
class AuthorForm(forms.ModelForm):
template_name = 'add-author.html'
kingdom = forms.ModelChoiceField(queryset=Kingdom.objects.all())
class Meta:
model = TaxonAuthorLkp
fields = ['taxon_author', 'kingdom']
# Prevent blank or duplicate authors
def clean_taxon_author(self):
taxon_author = self.cleaned_data['taxon_author']
if taxon_author is None or taxon_author.strip() == '':
raise forms.ValidationError('Taxon author cannot be left blank')
try:
author = TaxonAuthorLkp.objects.get(taxon_author=taxon_author)
if author is not None:
raise forms.ValidationError('An author with the name '\
+ taxon_author + ' already exists in the database')
except TaxonAuthorLkp.DoesNotExist:
# Good: an author with this name is not in the DB already
pass
return self.cleaned_data['taxon_author']
| 36.945652 | 184 | 0.661518 | 5,759 | 0.847161 | 0 | 0 | 0 | 0 | 0 | 0 | 2,324 | 0.341865 |
9474e2d806d86010d62a12d23d2e46d45b4305af | 1,336 | py | Python | src/reddack/cli.py | diatomicDisaster/Reddit-Slackbot | 4f22af110e72eab19d9162a4428800a1895303f3 | [
"MIT"
] | null | null | null | src/reddack/cli.py | diatomicDisaster/Reddit-Slackbot | 4f22af110e72eab19d9162a4428800a1895303f3 | [
"MIT"
] | 10 | 2022-02-21T01:11:20.000Z | 2022-02-22T18:13:00.000Z | src/reddack/cli.py | diatomicDisaster/redack | 4f22af110e72eab19d9162a4428800a1895303f3 | [
"MIT"
] | null | null | null | # Future imports
from __future__ import (
annotations
)
# Standard imports
import argparse
from typing import (
Sequence
)
from pathlib import Path
# Local imports
import reddack
import reddack.config
import reddack.utils
def create_arg_parser() -> argparse.ArgumentParser:
"""Create the argument parser for the CLI"""
parser = argparse.ArgumentParser(
description=(
"Moderate Reddit communities via Slack"
),
argument_default=argparse.SUPPRESS
)
parser.add_argument(
"--config",
dest="config_path",
required=True,
help="The path to the config file."
)
parser.add_argument(
"--queue",
action="store_true"
)
return parser
def process_args(parsedargs):
configpath = Path(parsedargs.configpath)
if configpath.suffix == ".json":
reddack_objs = reddack.config.reddack_from_file(configpath)
if parsedargs.queue:
for objs in reddack_objs:
reddack.utils.sync(objs)
def cli(sys_argv: Sequence[str] | None = None) -> None:
"""Parse the CLI arguments"""
parser = create_arg_parser()
parsed_args = parser.parse_args(sys_argv)
process_args(parsed_args)
def main(sys_argv: Sequence[str] | None = None) -> None:
"""Run through the CLI."""
cli(sys_argv)
| 22.644068 | 67 | 0.658683 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 268 | 0.200599 |
9474e898a0c7ad31351c854ed58654d64d8a886d | 2,815 | py | Python | src/TSP/DP.py | ox4f5da2/TSP | b395806b44fdc2e2b5d7488b77356ccbf30eef21 | [
"MIT"
] | null | null | null | src/TSP/DP.py | ox4f5da2/TSP | b395806b44fdc2e2b5d7488b77356ccbf30eef21 | [
"MIT"
] | null | null | null | src/TSP/DP.py | ox4f5da2/TSP | b395806b44fdc2e2b5d7488b77356ccbf30eef21 | [
"MIT"
] | null | null | null | import time
import numpy as np
import utils # 自定义工具函数包
inf = 10e7 # 定义无穷大值
def getMinDistance(point, cityNum, dp):
"""
得到动态规划后的列表
:param point: 城市距离矩阵 ndarray
:param cityNum: 城市数量 int
:return: dp列表 list
"""
column = 1 << (cityNum - 1) # dp数组的列数
# 初始化dp数组第一列
for i in range(cityNum):
dp[i][0] = point[i][0]
# 更新dp数组,先列再行
for j in range(1, column):
for i in range(0, cityNum):
dp[i][j] = inf
if i == 0:
if (j << 1) & 1 == 1:
continue
elif i >= 0:
if ((j >> (i - 1)) & 1) == 1 :
continue
for k in range(1, cityNum):
if ((j >> (k - 1)) & 1) == 0:
continue
if dp[i][j] > point[i][k] + dp[k][j ^ (1 << (k - 1))]:
dp[i][j] = point[i][k] + dp[k][j ^ (1 << (k - 1))]
return dp
def isVisited(visited, cityNum):
"""
判断结点是否都以访问但不包括0号结点
:param visited: 访问数组 ndarray
:param cityNum: 城市数量 int
:return: 布尔值
"""
for i in range(1, cityNum):
if visited[i] == False:
return False
return True
def getPath(point, cityNum, dp):
"""
判断结点是否都以访问但不包括0号结点
:param point: 城市距离矩阵 ndarray
:param cityNum: 城市数量 int
:return: 动态规划最优路径 list
"""
path = [] # 存储最短路径
column = 1 << (cityNum - 1) # dp数组的列数
visited = np.zeros(cityNum, dtype=np.bool_) # 标记访问数组
pioneer = 0 # 前驱节点编号
min = inf
S = column - 1
# 把起点结点编号加入容器
path.append(0)
while isVisited(visited, cityNum) == False:
for i in range(1, cityNum):
if visited[i] == False and (S & (1 << (i - 1))) != 0:
if min > point[i][pioneer] + dp[i][(S ^ (1 << (i - 1)))]:
min = point[i][pioneer] + dp[i][(S ^ (1 << (i - 1)))]
temp = i
pioneer = temp
path.append(pioneer)
visited[pioneer] = True
S = S ^ (1 << (pioneer - 1))
min = inf
return path
def dynamicProgramming(cityNum, coordinate, point, ifShowResult):
"""
动态规划算法
:param cityNum: 城市数量 int
:param coordinate: 城市坐标 list
:param point: 城市距离矩阵 ndarray
:param ifShowResult: 是否展示结果 bool
:return: 最小距离 double 运行时间 double
"""
start = time.perf_counter() # 程序开始时间
dp = getMinDistance(point, cityNum, np.zeros((cityNum, 1 << (cityNum - 1)))) # 计算dp列表以及最短路径的值
path = getPath(point, cityNum, dp) # 获取最优路径,保存在path中,根据动态规划公式反向找出最短路径结点列表
end = time.perf_counter() # 程序结束时间
if ifShowResult == True:
utils.printTable(path, 7, end - start, cityNum, round(dp[0][(1 << (cityNum - 1)) - 1], 2)) # 打印表格
utils.showTip({
"notice": "是否显示城市网络图(Y/N):",
"warning": "非法输入, 请输入Y/y/N/n"
},["Y", "y", "N", "n"], ["Y", "y"], utils.drawNetwork, coordinate, point, path, inf) # 显示网络图的提示
return round(dp[0][(1 << (cityNum - 1)) - 1], 2), end - start
# if __name__ == "__main__":
# cityNum, coordinate, point = utils.cityInit()
# dynamicProgramming(cityNum, coordinate, point) | 28.15 | 101 | 0.574778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,421 | 0.422791 |
9475e0c18bbe81db935a18b756540e0834fe6a82 | 503 | py | Python | Chapter09/others/convertor.py | PacktPublishing/Machine-Learning-for-Mobile | 89ccd5a8caba5266c5be0a3465e8cb112cbc5a7a | [
"MIT"
] | 13 | 2018-06-28T04:21:55.000Z | 2021-10-05T19:45:48.000Z | Chapter09/screenshots/avinash/convertor.py | PacktPublishing/Machine-Learning-for-Mobile | 89ccd5a8caba5266c5be0a3465e8cb112cbc5a7a | [
"MIT"
] | null | null | null | Chapter09/screenshots/avinash/convertor.py | PacktPublishing/Machine-Learning-for-Mobile | 89ccd5a8caba5266c5be0a3465e8cb112cbc5a7a | [
"MIT"
] | 12 | 2018-12-19T13:59:23.000Z | 2021-08-16T15:47:35.000Z | import tfcoreml as tf_converter
tf_converter.convert(tf_model_path = 'retrained_graph.pb',
mlmodel_path = 'converted.mlmodel',
output_feature_names = ['final_result:0'],
image_input_names = 'input:0',
class_labels = 'retrained_labels.txt',
red_bias = -1,
green_bias = -1,
blue_bias = -1,
image_scale = 2.0/224.0
)
| 41.916667 | 63 | 0.475149 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 86 | 0.170974 |
94763e73690878ccbfb0b93a8b2aa9df65644db9 | 5,567 | py | Python | pythem-master/pythem/modules/fuzzer.py | Marzooq13579/Hack-Gadgets | 4b9351c4465f3e01fb0390b1e86dfe7c26237a19 | [
"MIT"
] | 8 | 2019-02-17T20:11:46.000Z | 2019-10-18T06:27:16.000Z | pythem-master/pythem/modules/fuzzer.py | Marzooq13579/Hack-Gadgets | 4b9351c4465f3e01fb0390b1e86dfe7c26237a19 | [
"MIT"
] | null | null | null | pythem-master/pythem/modules/fuzzer.py | Marzooq13579/Hack-Gadgets | 4b9351c4465f3e01fb0390b1e86dfe7c26237a19 | [
"MIT"
] | 4 | 2019-02-17T23:00:18.000Z | 2019-10-18T06:27:14.000Z | #!/usr/bin/env python2.7
# Copyright (c) 2016-2018 Angelo Moura
#
# This file is part of the program pythem
#
# pythem is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
import os
import sys
import struct
import resource
import time
from netaddr import IPAddress, AddrFormatError
from subprocess import *
import socket
class SimpleFuzz(object):
name = "Fuzzer"
desc = "Used in the xploit module. simple 'A' generation through tcp or stdin"
version = "0.3"
def __init__(self, target, type, offset):
self.offset = offset
self.target = target
if type == "test":
return
if type == "tcp":
self.port = input("[+]Enter the tcp port to fuzz: ")
self.tcpfuzz()
elif type == "stdin":
self.stdinfuzz()
else:
print "[!] Select a valid fuzzer type (stdin or tcp)."
def stdinfuzz(self):
buf = ''
while True:
try:
first = True
buf += '\x41' * self.offset
resource.setrlimit(resource.RLIMIT_STACK, (-1, -1))
resource.setrlimit(resource.RLIMIT_CORE, (-1, -1))
P = Popen(self.target, stdin=PIPE)
print "[*] Sending buffer with lenght: " + str(len(buf))
P.stdin.write(buf + '\n')
line = sys.stdin.readline()
P.poll()
ret = P.returncode
if ret is None:
continue
else:
if ret == -4:
print "\n[+] Instruction Pointer may be at: {}\n".format(str(len(buf)))
break
elif ret == -7:
print "\n[+] Instruction Pointer may be near: {}\n".format(str(len(buf)))
print "[*] Child program crashed with code: %d\n" % ret
continue
elif ret == -11:
print "[*] Child program crashed with SIGSEGV code: %d\n" % ret
print "\n[*] Hit enter to continue.\n"
continue
else:
print "[*] Child program exited with code: %d\n" % ret
print "\n[*] Hit enter to continue.\n"
continue
except KeyboardInterrupt:
break
def tcpfuzz(self):
buf = ''
try:
self.target = str(IPAddress(self.target))
except AddrFormatError as e:
try:
self.target = socket.gethostbyname(self.target)
except Exception as e:
print "[-] Select a valid IP Address as target."
print "[!] Exception caught: {}".format(e)
return
buf = '\x41' * self.offset
print "[+] TCP fuzzing initialized, wait untill crash."
while True:
try:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.settimeout(2)
self.socket.connect((self.target, self.port))
print "[+] Fuzzing with [{}] bytes.".format(len(buf))
try:
response = self.socket.recv(1024)
print "[*] Response: {}".format(response)
self.socket.send(buf)
try:
response = self.socket.recv(1024)
print "[*] Response: {}".format(response)
self.socket.close()
buf += '\x41' * self.offset
except:
self.socket.close()
buf += '\x41' * self.offset
except:
self.socket.send(buf)
try:
response = self.socket.recv(1024)
print "[*] Response: {}".format(response)
self.socket.close()
buf += '\x41' * self.offset
except:
self.socket.close()
buf += '\x41' * self.offset
except KeyboardInterrupt:
break
except Exception as e:
if 'Connection refused' in e:
print "[-] Connection refused."
time.sleep(4)
else:
try:
response = self.socket.recv(1024)
print "[*] Response: {}".format(response)
except Exception as e:
if 'timed out' in e:
print "[-] Timed out."
time.sleep(2)
print "[+] Crash occured with buffer length: {}".format(str(len(buf)))
print "[!] Exception caught: {}".format(e)
| 36.86755 | 97 | 0.485181 | 4,610 | 0.828094 | 0 | 0 | 0 | 0 | 0 | 0 | 1,692 | 0.303934 |
9476f2d76b635e59e675584f63851e269e61905f | 874 | py | Python | attention/utils/metadata.py | fbickfordsmith/attention-iclr | 3b8744fd344ed7e2e360a601be12f12e69892941 | [
"MIT"
] | null | null | null | attention/utils/metadata.py | fbickfordsmith/attention-iclr | 3b8744fd344ed7e2e360a601be12f12e69892941 | [
"MIT"
] | null | null | null | attention/utils/metadata.py | fbickfordsmith/attention-iclr | 3b8744fd344ed7e2e360a601be12f12e69892941 | [
"MIT"
] | null | null | null | """
Define metadata variables used throughout the repository.
"""
import numpy as np
import pandas as pd
from scipy.spatial.distance import squareform
from sklearn.metrics.pairwise import cosine_distances
from ..utils.paths import path_metadata, path_representations, path_results
wnids = np.loadtxt(path_metadata/'imagenet_class_wnids.txt', dtype=str)
ind2wnid = {ind:wnid for ind, wnid in enumerate(wnids)}
wnid2ind = {wnid:ind for ind, wnid in enumerate(wnids)}
acc_baseline = pd.read_csv(path_results/'baseline_attn_results.csv')
acc_vgg = pd.read_csv(path_results/'vgg16_results.csv')
mean_acc = np.mean(acc_vgg['accuracy'])
std_acc = np.std(acc_vgg['accuracy'])
distances_represent = cosine_distances(np.load(path_representations))
mean_dist = np.mean(squareform(distances_represent, checks=False))
std_dist = np.std(squareform(distances_represent, checks=False))
| 38 | 75 | 0.803204 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 157 | 0.179634 |
9477323d968feefdc5b066b7368d4e3e196160db | 767 | py | Python | Grokking-Algorithms/Greedy/classRoomScheduling.py | javokhirbek1999/AlgorithmsDS | f5f403fed959ac8cf3064c8c852c59f2e67496ab | [
"MIT"
] | 6 | 2021-03-21T02:24:05.000Z | 2021-04-05T01:32:13.000Z | Grokking-Algorithms/Greedy/classRoomScheduling.py | javokhirbek1999/AlgorithmsDS | f5f403fed959ac8cf3064c8c852c59f2e67496ab | [
"MIT"
] | null | null | null | Grokking-Algorithms/Greedy/classRoomScheduling.py | javokhirbek1999/AlgorithmsDS | f5f403fed959ac8cf3064c8c852c59f2e67496ab | [
"MIT"
] | null | null | null |
# The classroom scheduling problem
# Suppose you have a classroom and you want to hold as many classes as possible
# __________________________
#| class | start | end |
#|_______|_________|________|
#| Art | 9:00 am | 10:30am|
#|_______|_________|________|
#| Eng | 9:30am | 10:30am|
#|_______|_________|________|
#| Math | 10 am | 11 am |
#|_______|_________|________|
#| CS | 10:30am | 11.30am|
#|_______|_________|________|
#| Music | 11 am | 12 pm |
#|_______|_________|________|
#|
def schedule(classes):
possible_classes = []
possible_classes.append(classes[0])
print(possible_classes)
# return
for i in range(2,len(classes)):
if possible_classes[-1][1]<=classes[i][0]:
possible_classes.append(classes[i])
return possible_classes
| 27.392857 | 80 | 0.698827 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 497 | 0.647979 |
947752ce89babbd55c9b6ee0f3a325aa92e874d3 | 4,462 | py | Python | app/tests/checkout_backend/uses_cases/test_total_amount_processor.py | jcazallasc/lana-python-challenge | 134ef30b6dcc7c323c90df8992b194741f071d95 | [
"MIT"
] | null | null | null | app/tests/checkout_backend/uses_cases/test_total_amount_processor.py | jcazallasc/lana-python-challenge | 134ef30b6dcc7c323c90df8992b194741f071d95 | [
"MIT"
] | null | null | null | app/tests/checkout_backend/uses_cases/test_total_amount_processor.py | jcazallasc/lana-python-challenge | 134ef30b6dcc7c323c90df8992b194741f071d95 | [
"MIT"
] | null | null | null | from django.test import TestCase
from checkout_backend.entities.offer_entity import OfferEntity
from checkout_backend.entities.product_entity import ProductEntity
from checkout_backend.uses_cases.total_amount_processor import TotalAmountProcessor
class OffersTestCase(TestCase):
def setUp(self):
self.product_pen = ProductEntity(
id=1,
code='PEN',
name='PEN',
price=500,
)
self.product_tshirt = ProductEntity(
id=2,
code='TSHIRT',
name='TSHIRT',
price=2000,
)
self.product_mug = ProductEntity(
id=3,
code='MUG',
name='MUG',
price=750,
)
self.multi_buy_offer = OfferEntity(
id=1,
name='2x1',
product=self.product_pen,
quantity=2,
discount_unit=1,
discount_percent=0,
)
self.depend_discount_offer = OfferEntity(
id=2,
name='3 or more discount 25%',
product=self.product_tshirt,
quantity=3,
discount_unit=0,
discount_percent=25,
)
self.offers = [
self.multi_buy_offer,
self.depend_discount_offer,
]
self.total_amount_processor = TotalAmountProcessor(self.offers)
def test_get_total_amount_with_multi_buy_offer(self):
"""Test get total amount with a multi buy offer"""
total_amount = self.total_amount_processor.get_total_amount(
[
{
'quantity': self.multi_buy_offer.quantity,
'product': self.multi_buy_offer.product,
}
]
)
self.assertEqual(total_amount, 500)
def test_get_total_amount_with_percent_discount_offer(self):
"""Test get total amount with percent discount amount"""
total_amount = self.total_amount_processor.get_total_amount(
[
{
'quantity': self.depend_discount_offer.quantity,
'product': self.depend_discount_offer.product,
}
]
)
self.assertEqual(total_amount, 4500)
def test_get_total_amount_with_lane_case_1(self):
"""Test lana case 1"""
total_amount = self.total_amount_processor.get_total_amount(
[
{
'quantity': 1,
'product': self.product_pen,
},
{
'quantity': 1,
'product': self.product_tshirt,
},
{
'quantity': 1,
'product': self.product_mug,
},
],
)
self.assertEqual(total_amount, 3250)
def test_get_total_amount_with_lane_case_2(self):
"""Test lana case 2"""
total_amount = self.total_amount_processor.get_total_amount(
[
{
'quantity': 2,
'product': self.product_pen,
},
{
'quantity': 1,
'product': self.product_tshirt,
},
],
)
self.assertEqual(total_amount, 2500)
def test_get_total_amount_with_lane_case_3(self):
"""Test lana case 3"""
total_amount = self.total_amount_processor.get_total_amount(
[
{
'quantity': 1,
'product': self.product_pen,
},
{
'quantity': 4,
'product': self.product_tshirt,
},
],
)
self.assertEqual(total_amount, 6500)
def test_get_total_amount_with_lane_case_4(self):
"""Test lana case 4"""
total_amount = self.total_amount_processor.get_total_amount(
[
{
'quantity': 3,
'product': self.product_pen,
},
{
'quantity': 3,
'product': self.product_tshirt,
},
{
'quantity': 1,
'product': self.product_mug,
},
],
)
self.assertEqual(total_amount, 6250)
| 27.54321 | 83 | 0.485208 | 4,211 | 0.943747 | 0 | 0 | 0 | 0 | 0 | 0 | 487 | 0.109144 |
94777a5313465c9c8238bf7bf45c469fe3eb78a4 | 315 | py | Python | wavelink/__init__.py | hamza1311/Wavelink | f593c59d3589dea9a5337731ac3daaac4cabbfee | [
"MIT"
] | null | null | null | wavelink/__init__.py | hamza1311/Wavelink | f593c59d3589dea9a5337731ac3daaac4cabbfee | [
"MIT"
] | null | null | null | wavelink/__init__.py | hamza1311/Wavelink | f593c59d3589dea9a5337731ac3daaac4cabbfee | [
"MIT"
] | null | null | null | __title__ = 'WaveLink'
__author__ = 'EvieePy'
__license__ = 'MIT'
__copyright__ = 'Copyright 2019-2020 (c) PythonistaGuild'
__version__ = '0.6.0'
from .client import Client
from .errors import *
from .eqs import *
from .events import *
from .player import *
from .node import Node
from .websocket import WebSocket
| 22.5 | 57 | 0.752381 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 72 | 0.228571 |
9477d09d2f626c9ba5ae9d79fc4fcad2a1d51dc9 | 231 | py | Python | Domain/restaurantvalidator.py | VargaIonut23/restaurant | 3f991f30b03921481142187ef33f81d1dc4fe2ad | [
"MIT"
] | null | null | null | Domain/restaurantvalidator.py | VargaIonut23/restaurant | 3f991f30b03921481142187ef33f81d1dc4fe2ad | [
"MIT"
] | null | null | null | Domain/restaurantvalidator.py | VargaIonut23/restaurant | 3f991f30b03921481142187ef33f81d1dc4fe2ad | [
"MIT"
] | null | null | null | class restaurantvalidator():
def valideaza(self, restaurant):
erori = []
if len(restaurant.nume) == 0:
erori.append('numele nu trb sa fie null')
if erori:
raise ValueError(erori)
| 28.875 | 53 | 0.575758 | 230 | 0.995671 | 0 | 0 | 0 | 0 | 0 | 0 | 27 | 0.116883 |
947bb882a6bca2a6316713846edc3e060262afd0 | 656 | py | Python | test/supporting/test_processor.py | holitics/phenome-extensions | 230d1494d0f0d7a1d747cb8af9413df46d8d0e25 | [
"Apache-2.0"
] | 1 | 2019-11-22T18:15:39.000Z | 2019-11-22T18:15:39.000Z | test/supporting/test_processor.py | holitics/phenome-extensions | 230d1494d0f0d7a1d747cb8af9413df46d8d0e25 | [
"Apache-2.0"
] | 1 | 2020-02-29T03:13:39.000Z | 2020-02-29T03:19:29.000Z | test/supporting/test_processor.py | holitics/phenome-extensions | 230d1494d0f0d7a1d747cb8af9413df46d8d0e25 | [
"Apache-2.0"
] | null | null | null | # test_processor.py, Copyright (c) 2019, Phenome Project - Nicholas Saparoff <nick.saparoff@gmail.com>
from phenome_core.core.base.base_processor import BaseProcessor
class TestProcessor(BaseProcessor):
__test__ = False
def __init__(self):
super(TestProcessor, self).__init__()
def process(self, results):
from phenome.test.supporting.test_mockobject import MockObject
test_value = 45
object = MockObject()
object.id = 1
# here we would normally POLL the object
# populate the value with 45
results.set_result(object, 'test_value', test_value)
return results
| 22.62069 | 102 | 0.685976 | 484 | 0.737805 | 0 | 0 | 0 | 0 | 0 | 0 | 182 | 0.277439 |
947d9383137b613729b449880f7118b84411c66a | 344 | py | Python | Pyspark_Sample_ML_programs/simpleapp.py | mihaque313/pyspark_mllib | 8342108384d15b7f2169aff190038e176d7533a9 | [
"Apache-2.0"
] | 7 | 2018-01-31T07:44:13.000Z | 2021-09-17T11:07:08.000Z | Pyspark_Sample_ML_programs/simpleapp.py | mihaque313/pyspark_mllib | 8342108384d15b7f2169aff190038e176d7533a9 | [
"Apache-2.0"
] | 2 | 2017-11-24T05:57:47.000Z | 2021-06-29T10:42:24.000Z | Pyspark_Sample_ML_programs/simpleapp.py | mihaque313/pyspark_mllib | 8342108384d15b7f2169aff190038e176d7533a9 | [
"Apache-2.0"
] | 6 | 2017-11-02T12:28:37.000Z | 2019-07-06T08:03:33.000Z | from pyspark import SparkContext
logFile = "D:/Spark/spark-1.6.1-bin-hadoop2.6/README.md"
sc = SparkContext("local", "Simple App")
logData = sc.textFile(logFile).cache()
numAs = logData.filter(lambda s: 'a' in s).count()
numBs = logData.filter(lambda s: 'b' in s).count()
print("Lines with a: %i, lines with b: %i" % (numAs, numBs)) | 34.4 | 60 | 0.671512 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 107 | 0.311047 |
947e361e77f0dbd8f7fdac8ef15859fe0c1d3230 | 1,218 | py | Python | cli/src/commands/Backup.py | cicharka/epiphany | 8f7ac8916ba32919629c792cc7bdb9156d4295d7 | [
"Apache-2.0"
] | 2 | 2019-02-12T14:02:33.000Z | 2019-07-16T08:44:24.000Z | cli/src/commands/Backup.py | cicharka/epiphany | 8f7ac8916ba32919629c792cc7bdb9156d4295d7 | [
"Apache-2.0"
] | null | null | null | cli/src/commands/Backup.py | cicharka/epiphany | 8f7ac8916ba32919629c792cc7bdb9156d4295d7 | [
"Apache-2.0"
] | null | null | null | import os
from cli.src.commands.BackupRecoveryBase import BackupRecoveryBase
from cli.src.helpers.doc_list_helpers import select_single
class Backup(BackupRecoveryBase):
"""Perform backup operations."""
def __init__(self, input_data):
super(BackupRecoveryBase, self).__init__(__name__) # late call of the Step.__init__(__name__)
super(Backup, self).__init__(input_data)
def backup(self):
"""Backup all enabled components."""
self._process_input_docs()
self._process_configuration_docs()
# Get backup config document
backup_doc = select_single(self.configuration_docs, lambda x: x.kind == 'configuration/backup')
self._update_role_files_and_vars('backup', backup_doc)
# Set env
self.logger.info(f'ANSIBLE_CONFIG={self.ansible_config_file_path}')
os.environ["ANSIBLE_CONFIG"] = self.ansible_config_file_path
# Execute all enabled component playbooks sequentially
for component_name, component_config in sorted(backup_doc.specification.components.items()):
if component_config.enabled:
self._update_playbook_files_and_run('backup', component_name)
return 0
| 34.8 | 103 | 0.715107 | 1,078 | 0.885057 | 0 | 0 | 0 | 0 | 0 | 0 | 304 | 0.249589 |
947e404b2d74b25f2cf7a78cf9b4ba6e70f26910 | 228 | py | Python | python/testData/inspections/PyUnresolvedReferencesInspection3K/objectNewAttributes.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/testData/inspections/PyUnresolvedReferencesInspection3K/objectNewAttributes.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 173 | 2018-07-05T13:59:39.000Z | 2018-08-09T01:12:03.000Z | python/testData/inspections/PyUnresolvedReferencesInspection3K/objectNewAttributes.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z | class C(object):
def __new__(cls):
self = object.__new__(cls)
self.foo = 1
return self
x = C()
print(x.foo)
print(x.<warning descr="Unresolved attribute reference 'bar' for class 'C'">bar</warning>)
| 22.8 | 90 | 0.622807 | 114 | 0.5 | 0 | 0 | 0 | 0 | 0 | 0 | 52 | 0.22807 |
9481c1171edc52dfcbc1a9877ebb6348ed166a5e | 84 | py | Python | lumin/nn/ensemble/__init__.py | choisant/lumin | c039136eb096e8f3800f13925f9325b99cf7e76b | [
"Apache-2.0"
] | 43 | 2019-02-11T16:16:42.000Z | 2021-12-13T15:35:20.000Z | lumin/nn/ensemble/__init__.py | choisant/lumin | c039136eb096e8f3800f13925f9325b99cf7e76b | [
"Apache-2.0"
] | 48 | 2020-05-21T02:40:50.000Z | 2021-08-10T11:07:08.000Z | lumin/nn/ensemble/__init__.py | choisant/lumin | c039136eb096e8f3800f13925f9325b99cf7e76b | [
"Apache-2.0"
] | 14 | 2019-05-02T15:09:41.000Z | 2022-01-12T21:13:34.000Z | # from .ensemble import * # noqa 403
# __all__ = [*ensemble.__all__] # noqa F405
| 21 | 44 | 0.654762 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 81 | 0.964286 |
9482473a1126c6399fbe546129eda6043f695cbf | 490 | py | Python | scg-scrape.py | josteinstraume/python-capstone | a7cf2e3d6e0cf83686b999f9877d06446c1176af | [
"BSD-3-Clause"
] | 1 | 2017-09-01T22:54:00.000Z | 2017-09-01T22:54:00.000Z | scg-scrape.py | josteinstraume/python-capstone | a7cf2e3d6e0cf83686b999f9877d06446c1176af | [
"BSD-3-Clause"
] | null | null | null | scg-scrape.py | josteinstraume/python-capstone | a7cf2e3d6e0cf83686b999f9877d06446c1176af | [
"BSD-3-Clause"
] | null | null | null | import urllib, csv, numpy
from BeautifulSoup import *
url = raw_input('Enter URL to crawl: ')
if len(url) < 1:
url = 'http://sales.starcitygames.com//deckdatabase/deckshow.php?&t%5BC1%5D=3&start_num=0&start_num=0&limit=limit'
html = urllib.urlopen(url).read()
soup = BeautifulSoup(html)
# Retrieve all of the anchor tags
tags = soup('a')
for tag in tags:
print 'TAG:', tag
print 'URL:', tag.get('href', None)
print 'Contents:', tag.contents[0]
print 'Attrs:', tag.attrs
| 27.222222 | 115 | 0.685714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 203 | 0.414286 |
9482667f7b3f81e2c39c9eb01e0e0b4cdd7db3f4 | 2,921 | py | Python | wire/messages.py | evuez/stork | bd57b207957a9e2feb6ec253cfd2c125a139e52b | [
"MIT"
] | null | null | null | wire/messages.py | evuez/stork | bd57b207957a9e2feb6ec253cfd2c125a139e52b | [
"MIT"
] | null | null | null | wire/messages.py | evuez/stork | bd57b207957a9e2feb6ec253cfd2c125a139e52b | [
"MIT"
] | null | null | null | """
Messages:
https://wiki.theory.org/BitTorrentSpecification#Messages
<length prefix><message ID><payload>
"""
from collections import namedtuple
from struct import pack
from struct import unpack
FORMAT = '>IB{}'
Message = namedtuple('Message', 'len id payload')
KEEP_ALIVE = -1
CHOKE = 0
UNCHOKE = 1
INTERESTED = 2
NOT_INTERESTED = 3
HAVE = 4
BITFIELD = 5
REQUEST = 6
PIECE = 7
CANCEL = 8
PORT = 9
FORMAT_KEEP_ALIVE = \
FORMAT_CHOKE = \
FORMAT_UNCHOKE = \
FORMAT_INTERESTED = \
FORMAT_NOT_INTERESTED = '>IB'
FORMAT_HAVE = '>IBI'
FORMAT_BITFIELD = '>IB{}B'
FORMAT_REQUEST = '>IBIII'
FORMAT_PIECE = '>IBII{}c'
FORMAT_CANCEL = '>IBIII'
FORMAT_PORT = '>IBH'
def decode(message):
if len(message) == 4:
return Message(0, KEEP_ALIVE, None)
len_, id_ = unpack('>IB', message[:5])
return [
decode_choke,
decode_unchoke,
decode_interested,
decode_not_interested,
decode_have,
decode_bitfield,
decode_request,
decode_piece,
decode_cancel,
decode_port,
][id_](message, len_ - 1)
# Messages
def keep_alive():
return b'\x00\x00\x00\x00'
def choke():
return b'\x00\x00\x00\x01\x00'
def unchoke():
return b'\x00\x00\x00\x01\x01'
def interested():
return b'\x00\x00\x00\x01\x02'
def not_interested():
return b'\x00\x00\x00\x01\x03'
def have(piece_index):
return pack(FORMAT_HAVE, 5, 4, piece_index)
def bitfield(bits):
len_ = 1 + len(bits)
return pack(FORMAT_BITFIELD.format(len_), len_, 5, bits)
def request(index, begin, length):
return pack(FORMAT_REQUEST, 13, 6, index, begin, length)
def piece(index, begin, block):
len_ = 9 + len(block)
return pack(FORMAT_PIECE.format(len_), len_, 7, index, begin, block)
def cancel(index, begin, length):
return pack(FORMAT_CANCEL, 13, 8, index, begin, length)
def port(listen_port):
return pack(FORMAT_PORT, 3, 9, listen_port)
# Decoders
def decode_choke(message, _paylen):
return Message(*unpack(FORMAT_CHOKE, message), None)
def decode_unchoke(message, _paylen):
return Message(*unpack(FORMAT_UNCHOKE, message), None)
def decode_interested(message, _paylen):
return Message(*unpack(FORMAT_INTERESTED, message), None)
def decode_not_interested(message, _paylen):
return Message(*unpack(FORMAT_NOT_INTERESTED, message), None)
def decode_have(message, _paylen):
return Message(*unpack(FORMAT_HAVE, message))
def decode_bitfield(message, paylen):
len_, id_, *payload = unpack(FORMAT_BITFIELD.format(paylen), message)
return Message(len_, id_, payload)
def decode_request(message):
pass
def decode_piece(message, paylen):
len_, id_, index, begin, *block = unpack(
FORMAT_PIECE.format(paylen - 8),
message
)
return Message(len_, id_, (index, begin, block))
def decode_cancel(message):
pass
def decode_port(message):
pass
| 19.091503 | 73 | 0.683328 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 331 | 0.113317 |
9482e836bc889d1e104a509019fa216e2390d9d4 | 1,515 | py | Python | nimrud/minimal/features.py | grayhem/nimrud | 5ced7107f5183ed71fc98c2d9fec178cf74cd988 | [
"MIT"
] | 1 | 2018-12-11T14:17:08.000Z | 2018-12-11T14:17:08.000Z | nimrud/minimal/features.py | grayhem/nimrud | 5ced7107f5183ed71fc98c2d9fec178cf74cd988 | [
"MIT"
] | null | null | null | nimrud/minimal/features.py | grayhem/nimrud | 5ced7107f5183ed71fc98c2d9fec178cf74cd988 | [
"MIT"
] | 3 | 2017-05-21T10:19:20.000Z | 2019-04-09T01:10:06.000Z | """
functions to be mapped over point neighborhoods
"""
import numpy as np
# for handling empty neighborhoods in centroid
np.seterr(invalid="raise")
def take(neighborhood_idx, search_space_cloud):
"""
return an array of points from the search space (needed for the following)
"""
return search_space_cloud.take(neighborhood_idx, axis=0)
def centroid(query_point, neighborhood_points):
"""
compute the distance between the query point and the geometric mean of its neighborhood.
"""
try:
norm = np.linalg.norm(query_point - neighborhood_points.mean(0))
except FloatingPointError:
norm = 0
return norm
def population(neighborhood_points):
"""
count the points in the neighborhood
"""
return np.atleast_2d(neighborhood_points).shape[0]
def pca(neighborhood_points):
"""
return the normalized variance of the first two principal components of the neighborhood
"""
covariance = np.cov(neighborhood_points, rowvar=False)
# note we use eigvalsh here because it guarantees ascending order of the eigenvalues
try:
eigvals = np.linalg.eigvalsh(covariance)
except np.linalg.LinAlgError:
# one point in neighborhood
eigvals = np.zeros(3)
except FloatingPointError:
# no points in neighborhood
eigvals = np.zeros(3)
else:
# normalize to sum to 1
eigvals /= eigvals.sum()
# return the two largest. this is a weird slice.
return eigvals[:0:-1]
| 25.677966 | 92 | 0.688449 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 667 | 0.440264 |
948360ee32179bd011bde2331cc1861483d3487f | 4,002 | py | Python | Code Voorraad/Python stages/main.py | MarZwa/smart-kitchen | 2c313119116cd387564e39e63b7b602680ad5187 | [
"MIT"
] | null | null | null | Code Voorraad/Python stages/main.py | MarZwa/smart-kitchen | 2c313119116cd387564e39e63b7b602680ad5187 | [
"MIT"
] | null | null | null | Code Voorraad/Python stages/main.py | MarZwa/smart-kitchen | 2c313119116cd387564e39e63b7b602680ad5187 | [
"MIT"
] | null | null | null | import serial
import os
import json
from pprint import pprint
import mysql.connector
import time
import requests
mydb = mysql.connector.connect(
host="localhost",
user="max",
passwd="maximum123",
database="SmartKitchenDb"
)
com = serial.Serial('/dev/ttyUSB1', baudrate=9600, timeout=3.0)
com2 = serial.Serial('/dev/ttyUSB0', baudrate=9600, timeout=3.0)
barcode_scanned = False
user = ""
product_name = ""
rfid = ""
while True:
# Declaring the remote serial connection:
rsc = com.readline().strip()
rsc2 = com2.readline().strip()
rsc = rsc.decode('utf-8')
# In my arduino code I first print 'UID tag :' before I print the RFID code. I did this for readablility when writing the arduino code
# After reading this it strips away that string in front of the RFID code
# After this it requests the name of the user from the database:
if "UID tag :" in rsc:
rfid = rsc.lstrip("UID tag :")
try:
# This sends a GET request to the system with the Laravel Database
r = requests.get(f"http://192.168.1.243:8000/api/rfid/{rfid}")
r_text = str(r)
print("RFID lezen: " + r_text)
r.raise_for_status()
resp = json.loads(r.text)
rfid = resp[0]["rfid"]
user = resp[0]["name"]
except requests.HTTPError as e:
print(e.response.text)
# This code first checks if the remote serial connection is not an empty byte and follows this check by checking if there is a username:
if rsc2 != b'' and user != '':
barcode_scanned = True
# This sets the variable barcode_scanned to True and checks if the byte is not empty:
if barcode_scanned == True and rsc2 != b'':
barcode = str(rsc2, 'utf-8')
try:
# Here I use a GET request to the OpenFoodFacts database:
r = requests.get(f'https://world.openfoodfacts.org/api/v0/product/{barcode}.json')
r_text = str(r)
print("Gegevens uit OpenFoodFacts API opvragen: " + r_text)
r.raise_for_status()
resp = json.loads(r.text)
# Here I do a check if the product is in the database, if not it print the status_verbose which is just 'product not found'
# If the product is in the database it gets the productname of the product and stores it in a variable:
if resp["status_verbose"] != "product not found":
product_name = str(resp["product"]["product_name"])
barcode_scanned = False
else:
print(resp["status_verbose"])
except requests.HTTPError as e:
print(e.response.text)
# Here I do a check if the the serial connection reads an 'A' or a 'D', after this it checks if the productname is not empty.
# Reading an 'S' means add to storagelist.
# Reading a 'G' means add to grocerylist.
if "S" in rsc and product_name != "":
# Here I create a JSON with the data I need and send it to the Laravel API using a POST request
# This POST request triggers an database insert with that data
gooi_data = {'product_name':f'{product_name}', 'user_name':f'{user}'}
d = requests.post(f"http://192.168.1.243:8000/api/rfid/{rfid}/create-storage", data=gooi_data)
d_text = str(d)
print("POST request naar de API: " + d_text)
if "G" in rsc and product_name != "":
# Here I create a JSON with the info I need and send it to the Laravel API using a POST request
# This POST request triggers an database insert with that data
gooi_data = {'product_name':f'{product_name}', 'user_name':f'{user}'}
d = requests.post(f"http://192.168.1.243:8000/api/rfid/{rfid}/create-grocery", data=gooi_data)
d_text = str(d)
print("POST request naar de API: " + d_text)
time.sleep(1)
mydb.commit()
mydb.close()
| 41.6875 | 140 | 0.618941 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,064 | 0.515742 |
94847930b226d1efe5a63b43542e7df43b0d1b37 | 1,049 | py | Python | docs/sphinx-jupyter-widgets-cleanup.py | jinsanity07git/tmip-emat | ff816cf50f141825078bb276d6da46d92c5028a9 | [
"BSD-3-Clause"
] | null | null | null | docs/sphinx-jupyter-widgets-cleanup.py | jinsanity07git/tmip-emat | ff816cf50f141825078bb276d6da46d92c5028a9 | [
"BSD-3-Clause"
] | null | null | null | docs/sphinx-jupyter-widgets-cleanup.py | jinsanity07git/tmip-emat | ff816cf50f141825078bb276d6da46d92c5028a9 | [
"BSD-3-Clause"
] | null | null | null |
import argparse, os
parser = argparse.ArgumentParser()
parser.add_argument('outdir', type=str, help='sphinx output directory')
args = parser.parse_args()
import re
duplicate_tag = '''(<script src="https://unpkg.com/@jupyter-widgets/html-manager@\^[0-9]*\.[0-9]*\.[0-9]*/dist/embed-amd.js"></script>)'''
bad1 = re.compile(duplicate_tag)
bad2 = re.compile(duplicate_tag+"(.*)"+duplicate_tag)
def dedupe_jupyter_widgets_manager(filename):
with open(filename, 'rt') as html_in:
content = html_in.read()
n = len(bad1.findall(content))
if n>1:
content_1 = bad1.sub("", content, count=n-1)
print(f"FIXING [{n}]:",filename)
with open(filename, 'wt') as html_out:
html_out.write(content_1)
else:
print(f"PASSED [{n}]:",filename)
def fixing_walker(filename):
directory = os.path.dirname(os.path.abspath(filename))
for dirpath, dirnames, filenames in os.walk(directory):
for f in filenames:
if f[-5:]==".html":
this_file = os.path.join(dirpath, f)
dedupe_jupyter_widgets_manager(this_file)
fixing_walker(args.outdir)
| 26.897436 | 138 | 0.705434 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 210 | 0.200191 |
9484fa5bc2d2fde4df65c4b2fe52f46809a2781f | 480 | py | Python | finance_manager/database/views/v_input_inc_other.py | jehboyes/finance_manager | d310a3a4c2c6b6e5564e2a83e3f355b23266b773 | [
"MIT"
] | null | null | null | finance_manager/database/views/v_input_inc_other.py | jehboyes/finance_manager | d310a3a4c2c6b6e5564e2a83e3f355b23266b773 | [
"MIT"
] | null | null | null | finance_manager/database/views/v_input_inc_other.py | jehboyes/finance_manager | d310a3a4c2c6b6e5564e2a83e3f355b23266b773 | [
"MIT"
] | null | null | null | from finance_manager.database.replaceable import ReplaceableObject as o
from finance_manager.database.views import account_description, p_list_string, p_sum_string
def _view():
view = o("v_input_inc_other", f"""
SELECT i.inc_id, i.account, a.description as account_name, {account_description}, i.description, i.set_id,
{p_list_string}, {p_sum_string} as amount
FROM input_inc_other i
LEFT OUTER JOIN fs_account a ON i.account = a.account""")
return view
| 40 | 110 | 0.764583 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 268 | 0.558333 |
94855633809fd0ec0bf2dcd84d688eb77e81384d | 1,524 | py | Python | examples/example_plugin/example_plugin/signals.py | susanhooks/nautobot | bc3ef5958f0d5decb0be763342c790f26ff1e20e | [
"Apache-2.0"
] | null | null | null | examples/example_plugin/example_plugin/signals.py | susanhooks/nautobot | bc3ef5958f0d5decb0be763342c790f26ff1e20e | [
"Apache-2.0"
] | null | null | null | examples/example_plugin/example_plugin/signals.py | susanhooks/nautobot | bc3ef5958f0d5decb0be763342c790f26ff1e20e | [
"Apache-2.0"
] | null | null | null | """Signal handlers for the example example_plugin."""
def nautobot_database_ready_callback(sender, *, apps, **kwargs):
"""
Callback function triggered by the nautobot_database_ready signal when the Nautobot database is fully ready.
This function is connected to that signal in ExamplePluginConfig.ready().
A plugin could use this callback to add any records to the database that it requires for proper operation,
such as:
- Relationship definitions
- CustomField definitions
- Webhook definitions
- etc.
Args:
sender (PluginConfig): The ExamplePluginConfig instance that was registered for this callback
apps (django.apps.apps.Apps): Use this to look up model classes as needed
**kwargs: See https://docs.djangoproject.com/en/3.1/ref/signals/#post-migrate for additional args
"""
# Ensure that a desired custom field exists on the Site model
ContentType = apps.get_model("contenttypes", "ContentType")
Site = apps.get_model("dcim", "Site")
CustomField = apps.get_model("extras", "CustomField")
from nautobot.extras.choices import CustomFieldTypeChoices
field, _ = CustomField.objects.update_or_create(
name="example-plugin-auto-custom-field",
defaults={
"type": CustomFieldTypeChoices.TYPE_TEXT,
"label": "Example Plugin Automatically Added Custom Field",
"default": "Default value",
},
)
field.content_types.set([ContentType.objects.get_for_model(Site)])
| 39.076923 | 112 | 0.705381 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,022 | 0.670604 |
94862dcf90686609ce3f0a6f42a3235da01595c7 | 8,988 | py | Python | scrape/scrape_md.py | jesse-peters/people | 7c7efbff65e7db4cc5409748f72f0ed98ae1be96 | [
"CC0-1.0"
] | null | null | null | scrape/scrape_md.py | jesse-peters/people | 7c7efbff65e7db4cc5409748f72f0ed98ae1be96 | [
"CC0-1.0"
] | null | null | null | scrape/scrape_md.py | jesse-peters/people | 7c7efbff65e7db4cc5409748f72f0ed98ae1be96 | [
"CC0-1.0"
] | null | null | null | import re
import lxml.html
import click
import scrapelib
from common import Person
def elem_to_str(item, inside=False):
attribs = " ".join(f"{k}='{v}'" for k, v in item.attrib.items())
return f"<{item.tag} {attribs}> @ line {item.sourceline}"
class XPath:
def __init__(self, xpath, *, min_items=1, max_items=None, num_items=None):
self.xpath = xpath
self.min_items = min_items
self.max_items = max_items
self.num_items = num_items
def match(self, element, *, min_items=None, max_items=None, num_items=None):
items = element.xpath(self.xpath)
num_items = self.num_items if num_items is None else num_items
max_items = self.max_items if max_items is None else max_items
min_items = self.min_items if min_items is None else min_items
if num_items is not None and len(items) != num_items:
raise XPathError(
f"{self.xpath} on {elem_to_str(element)} got {len(items)}, "
f"expected {num_items}"
)
if min_items is not None and len(items) < min_items:
raise XPathError(
f"{self.xpath} on {elem_to_str(element)} got {len(items)}, "
f"expected at least {min_items}"
)
if max_items is not None and len(items) > max_items:
raise XPathError(
f"{self.xpath} on {elem_to_str(element)} got {len(items)}, "
f"expected at most {max_items}"
)
return items
def match_one(self, element):
return str(self.match(element, num_items=1)[0])
class NoSuchScraper(Exception):
pass
class XPathError(ValueError):
pass
# @attr.s
# class ContactDetail:
# note = attr.ib()
# voice = attr.ib()
# email =attr.ib()
# fax = attr.ib()
# address = attr.ib()
# @attr.s
# class Person:
# name = attr.ib()
# state = attr.ib()
# party = attr.ib()
# district = attr.ib()
# chamber = attr.ib()
# image = attr.ib(default=None)
# given_name = attr.ib(default=None)
# family_name = attr.ib(default=None)
# links = attr.ib(default=attr.Factory(list))
# sources = attr.ib(default=attr.Factory(list))
# capitol_office = attr.ib(default=None)
# district_office = attr.ib(default=None)
class Scraper(scrapelib.Scraper):
def fetch_page_data(self, page):
print(f"fetching {page.url} for {page.__class__.__name__}")
data = self.get(page.url)
page.set_raw_data(data)
def augment_item(self, item, subpages):
for subpage_func in subpages:
page = subpage_func(item)
self.fetch_page_data(page)
page_data = page.get_data()
item.update(page_data)
return item
def scrape(self, chamber, session):
for page in self.start_scrape(chamber, session):
self.fetch_page_data(page)
for item in page.get_data():
if page.subpages:
item = self.augment_item(item, page.subpages)
if isinstance(item, dict):
item = self.to_object(item)
yield item
def to_object(self, item):
"""
converts intermediate data (often in a dictionary) to a final object to be validated
"""
return item
def start_scrape(self, chamber, session):
"""
yields one or more Page objects that will kick off the scrape.
It may also raise a ValueError (TBD) when it does not have an appropriate entrypoint
to scrape the requested data.
"""
raise NotImplementedError()
class Page:
def __init__(self, url):
"""
a Page can be instantiated with a url & options (TBD) needed to fetch it
"""
self.url = url
def set_raw_data(self, raw_data):
""" callback to handle raw data returned by grabbing the URL """
self.raw_data = raw_data
def get_data(self):
""" return data extracted from this page and this page alone """
raise NotImplementedError()
class HtmlPage:
def set_raw_data(self, raw_data):
self.raw_data = raw_data
self.root = lxml.html.fromstring(raw_data.content)
self.root.make_links_absolute(self.url)
class HtmlListPage(HtmlPage):
"""
Simplification for HTML pages that get a list of items and process them.
When overriding the class, instead of providing get_data, one must only provide
an xpath and a process_item function.
"""
xpath = None
def get_data(self):
if not self.xpath:
raise NotImplementedError("must either provide xpath or override scrape")
items = self.xpath.match(self.root)
for item in items:
item = self.process_item(item)
yield item
def process_item(self, item):
return item
class MDPersonDetail(HtmlPage):
def __init__(self, url):
self.url = url
def parse_address_block(self, block):
state = "address"
# group lines by type
values = {"address": [], "phone": [], "fax": []}
for line in block.splitlines():
line = line.strip()
if not line:
continue
if line.startswith("Phone"):
state = "phone"
elif line.startswith("Fax"):
state = "fax"
values[state].append(line)
# postprocess values
phones = []
for line in values["phone"]:
for match in re.findall(r"\d{3}-\d{3}-\d{4}", line):
phones.append(match)
faxes = []
for line in values["fax"]:
for match in re.findall(r"\d{3}-\d{3}-\d{4}", line):
faxes.append(match)
return {"address": "; ".join(values["address"]), "phones": phones, "faxes": faxes}
def get_data(self):
# annapolis_info = (
# XPath("//dt[text()='Annapolis Info']/following-sibling::dd[1]")
# .match_one(self.root)
# .text_content()
# )
# interim_info = (
# XPath("//dt[text()='Interim Info']/following-sibling::dd[1]")
# .match_one(self.root)
# .text_content()
# )
# print(self.parse_address_block(annapolis_info))
# print(self.parse_address_block(interim_info))
return dict(
name=XPath("//h2/text()").match_one(self.root).split(" ", 1)[1],
# "email": XPath(
# "//dt[text()='Contact']/following-sibling::dd[1]/a[1]/text()"
# ).match_one(self.root),
)
class MDPersonList(HtmlListPage):
xpath = XPath("//div[@id='myDIV']//div[@class='p-0 member-index-cell']")
subpages = [lambda item: MDPersonDetail(item["link"])]
def __init__(self, url):
self.url = url
def process_item(self, item):
dd_text = XPath(".//dd/text()").match(item)
district = dd_text[2].strip().split()[1]
party = dd_text[4].strip()
return dict(
chamber="upper" if "senate" in self.url else "lower",
image=XPath(".//img/@src").match_one(item),
district=district,
party=party,
link=XPath(".//dd/a[1]/@href").match_one(item),
)
class MDPersonScraper(Scraper):
def start_scrape(self, chamber, session):
""" This function yields one or more Page objects that will kick off the scrape.
It may also raise a ValueError (TBD) when it does not have an appropriate entrypoint
to scrape the requested data.
"""
if session:
raise NoSuchScraper("cannot scrape non-current sessions")
if chamber == "upper":
yield MDPersonList("http://mgaleg.maryland.gov/mgawebsite/Members/Index/senate")
elif chamber == "lower":
yield MDPersonList("http://mgaleg.maryland.gov/mgawebsite/Members/Index/house")
def to_object(self, item):
p = Person(
state="md",
chamber=item["chamber"],
name=item["name"],
party=item["party"],
image=item["image"],
district=item["district"],
)
p.add_link(item["link"])
p.add_source(item["link"])
return p
@click.group()
def cli():
pass
@cli.command()
@click.argument("class_name")
@click.argument("url")
def sample(class_name, url):
# implementation is a stub, this will be able to accept dotted paths once implemented
Cls = globals()[class_name]
page = Cls(url)
s = Scraper()
s.fetch_page_data(page)
print(page.get_data())
@cli.command()
@click.option("--chamber", multiple=True, default=["upper", "lower"])
@click.option("--session", default=None)
def scrape(chamber, session):
for ch in chamber:
for item in MDPersonScraper().scrape(ch, session):
item.save("incoming/md/people")
if __name__ == "__main__":
cli()
| 30.060201 | 92 | 0.584446 | 7,413 | 0.824766 | 1,273 | 0.141633 | 626 | 0.069648 | 0 | 0 | 3,214 | 0.357588 |
9488f4460b704b58bd7868b851740603de9d3660 | 6,962 | py | Python | overwatch/stats/ids.py | jonghwanhyeon/overwatch-stats | 37b19414dc746d0a9bd5a2cf38ee949ffdf62e25 | [
"MIT"
] | 12 | 2017-09-02T10:47:24.000Z | 2018-06-11T16:09:21.000Z | overwatch/stats/ids.py | jonghwanhyeon/overwatch-stats | 37b19414dc746d0a9bd5a2cf38ee949ffdf62e25 | [
"MIT"
] | 5 | 2018-04-01T09:30:39.000Z | 2021-01-14T09:52:04.000Z | overwatch/stats/ids.py | hyeon0145/overwatch-stats | 37b19414dc746d0a9bd5a2cf38ee949ffdf62e25 | [
"MIT"
] | 2 | 2017-09-19T15:11:49.000Z | 2018-03-30T20:02:50.000Z | OVERALL_CATEGORY_ID = '0x02E00000FFFFFFFF'
HERO_CATEGORY_IDS = {
'reaper': '0x02E0000000000002',
'tracer': '0x02E0000000000003',
'mercy': '0x02E0000000000004',
'hanzo': '0x02E0000000000005',
'torbjorn': '0x02E0000000000006',
'reinhardt': '0x02E0000000000007',
'pharah': '0x02E0000000000008',
'winston': '0x02E0000000000009',
'widowmaker': '0x02E000000000000A',
'bastion': '0x02E0000000000015',
'symmetra': '0x02E0000000000016',
'zenyatta': '0x02E0000000000020',
'genji': '0x02E0000000000029',
'roadhog': '0x02E0000000000040',
'mccree': '0x02E0000000000042',
'junkrat': '0x02E0000000000065',
'zarya': '0x02E0000000000068',
'soldier76': '0x02E000000000006E',
'lucio': '0x02E0000000000079',
'dva': '0x02E000000000007A',
'mei': '0x02E00000000000DD',
'sombra': '0x02E000000000012E',
'ana': '0x02E000000000013B',
'orisa': '0x02E000000000013E',
'doomfist': '0x02E000000000012F',
'moira': '0x02E00000000001A2',
'brigitte': '0x02E0000000000195',
'wrecking_ball': '0x02E00000000001CA',
}
INVERTED_HERO_CATEGORY_IDS = {category_id: hero for hero, category_id in HERO_CATEGORY_IDS.items()}
# Taken from https://github.com/SunDwarf/OWAPI/blob/master/owapi/prestige.py
LEVEL_IDS = {
# Bronze
'0x0250000000000918': 0,
'0x0250000000000919': 0,
'0x025000000000091A': 0,
'0x025000000000091B': 0,
'0x025000000000091C': 0,
'0x025000000000091D': 0,
'0x025000000000091E': 0,
'0x025000000000091F': 0,
'0x0250000000000920': 0,
'0x0250000000000921': 0,
'0x0250000000000922': 100,
'0x0250000000000924': 100,
'0x0250000000000925': 100,
'0x0250000000000926': 100,
'0x025000000000094C': 100,
'0x0250000000000927': 100,
'0x0250000000000928': 100,
'0x0250000000000929': 100,
'0x025000000000092B': 100,
'0x0250000000000950': 100,
'0x025000000000092A': 200,
'0x025000000000092C': 200,
'0x0250000000000937': 200,
'0x025000000000093B': 200,
'0x0250000000000933': 200,
'0x0250000000000923': 200,
'0x0250000000000944': 200,
'0x0250000000000948': 200,
'0x025000000000093F': 200,
'0x0250000000000951': 200,
'0x025000000000092D': 300,
'0x0250000000000930': 300,
'0x0250000000000934': 300,
'0x0250000000000938': 300,
'0x0250000000000940': 300,
'0x0250000000000949': 300,
'0x0250000000000952': 300,
'0x025000000000094D': 300,
'0x0250000000000945': 300,
'0x025000000000093C': 300,
'0x025000000000092E': 400,
'0x0250000000000931': 400,
'0x0250000000000935': 400,
'0x025000000000093D': 400,
'0x0250000000000946': 400,
'0x025000000000094A': 400,
'0x0250000000000953': 400,
'0x025000000000094E': 400,
'0x0250000000000939': 400,
'0x0250000000000941': 400,
'0x025000000000092F': 500,
'0x0250000000000932': 500,
'0x025000000000093E': 500,
'0x0250000000000936': 500,
'0x025000000000093A': 500,
'0x0250000000000942': 500,
'0x0250000000000947': 500,
'0x025000000000094F': 500,
'0x025000000000094B': 500,
'0x0250000000000954': 500,
# Silver
'0x0250000000000956': 600,
'0x025000000000095C': 600,
'0x025000000000095D': 600,
'0x025000000000095E': 600,
'0x025000000000095F': 600,
'0x0250000000000960': 600,
'0x0250000000000961': 600,
'0x0250000000000962': 600,
'0x0250000000000963': 600,
'0x0250000000000964': 600,
'0x0250000000000957': 700,
'0x0250000000000965': 700,
'0x0250000000000966': 700,
'0x0250000000000967': 700,
'0x0250000000000968': 700,
'0x0250000000000969': 700,
'0x025000000000096A': 700,
'0x025000000000096B': 700,
'0x025000000000096C': 700,
'0x025000000000096D': 700,
'0x0250000000000958': 800,
'0x025000000000096E': 800,
'0x025000000000096F': 800,
'0x0250000000000970': 800,
'0x0250000000000971': 800,
'0x0250000000000972': 800,
'0x0250000000000973': 800,
'0x0250000000000974': 800,
'0x0250000000000975': 800,
'0x0250000000000976': 800,
'0x0250000000000959': 900,
'0x0250000000000977': 900,
'0x0250000000000978': 900,
'0x0250000000000979': 900,
'0x025000000000097A': 900,
'0x025000000000097B': 900,
'0x025000000000097C': 900,
'0x025000000000097D': 900,
'0x025000000000097E': 900,
'0x025000000000097F': 900,
'0x025000000000095A': 1000,
'0x0250000000000980': 1000,
'0x0250000000000981': 1000,
'0x0250000000000982': 1000,
'0x0250000000000983': 1000,
'0x0250000000000984': 1000,
'0x0250000000000985': 1000,
'0x0250000000000986': 1000,
'0x0250000000000987': 1000,
'0x0250000000000988': 1000,
'0x025000000000095B': 1100,
'0x0250000000000989': 1100,
'0x025000000000098A': 1100,
'0x025000000000098B': 1100,
'0x025000000000098C': 1100,
'0x025000000000098D': 1100,
'0x025000000000098E': 1100,
'0x025000000000098F': 1100,
'0x0250000000000991': 1100,
'0x0250000000000990': 1100,
# Gold
'0x0250000000000992': 1200,
'0x0250000000000993': 1200,
'0x0250000000000994': 1200,
'0x0250000000000995': 1200,
'0x0250000000000996': 1200,
'0x0250000000000997': 1200,
'0x0250000000000998': 1200,
'0x0250000000000999': 1200,
'0x025000000000099A': 1200,
'0x025000000000099B': 1200,
'0x025000000000099C': 1300,
'0x025000000000099D': 1300,
'0x025000000000099E': 1300,
'0x025000000000099F': 1300,
'0x02500000000009A0': 1300,
'0x02500000000009A1': 1300,
'0x02500000000009A2': 1300,
'0x02500000000009A3': 1300,
'0x02500000000009A4': 1300,
'0x02500000000009A5': 1300,
'0x02500000000009A6': 1400,
'0x02500000000009A7': 1400,
'0x02500000000009A8': 1400,
'0x02500000000009A9': 1400,
'0x02500000000009AA': 1400,
'0x02500000000009AB': 1400,
'0x02500000000009AC': 1400,
'0x02500000000009AD': 1400,
'0x02500000000009AE': 1400,
'0x02500000000009AF': 1400,
'0x02500000000009B0': 1500,
'0x02500000000009B1': 1500,
'0x02500000000009B2': 1500,
'0x02500000000009B3': 1500,
'0x02500000000009B4': 1500,
'0x02500000000009B5': 1500,
'0x02500000000009B6': 1500,
'0x02500000000009B7': 1500,
'0x02500000000009B8': 1500,
'0x02500000000009B9': 1500,
'0x02500000000009BA': 1600,
'0x02500000000009BB': 1600,
'0x02500000000009BC': 1600,
'0x02500000000009BD': 1600,
'0x02500000000009BE': 1600,
'0x02500000000009BF': 1600,
'0x02500000000009C0': 1600,
'0x02500000000009C1': 1600,
'0x02500000000009C2': 1600,
'0x02500000000009C3': 1600,
'0x02500000000009C4': 1700,
'0x02500000000009C5': 1700,
'0x02500000000009C6': 1700,
'0x02500000000009C7': 1700,
'0x02500000000009C8': 1700,
'0x02500000000009C9': 1700,
'0x02500000000009CA': 1700,
'0x02500000000009CB': 1700,
'0x02500000000009CC': 1700,
'0x02500000000009CD': 1700,
}
| 31.502262 | 99 | 0.682275 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,517 | 0.648808 |
94895e02f21a684db845d0d46679e9537e49785c | 16,851 | py | Python | dataloaders/custom_transforms.py | JACKYLUO1991/DCBNet | b797584b66ad99fe984f58268befb12ec60ccfae | [
"MIT"
] | 6 | 2021-06-14T15:23:59.000Z | 2022-01-19T12:29:20.000Z | dataloaders/custom_transforms.py | JACKYLUO1991/DCBANet | b797584b66ad99fe984f58268befb12ec60ccfae | [
"MIT"
] | 1 | 2021-06-03T08:08:20.000Z | 2021-06-09T17:24:54.000Z | dataloaders/custom_transforms.py | JACKYLUO1991/DCBANet | b797584b66ad99fe984f58268befb12ec60ccfae | [
"MIT"
] | 1 | 2020-09-19T17:13:36.000Z | 2020-09-19T17:13:36.000Z | import torch
import math
import numbers
import random
import numpy as np
from PIL import Image, ImageOps
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage.interpolation import map_coordinates
import cv2
from scipy import ndimage
def to_multilabel(pre_mask, classes=2):
""" Multi-label """
mask = np.zeros((pre_mask.shape[0], pre_mask.shape[1], classes))
# background: [0, 0]
mask[pre_mask == 1] = [0, 1] # disc
mask[pre_mask == 2] = [1, 1] # cup
return mask
class add_salt_pepper_noise():
def __call__(self, sample):
image = sample['image']
X_imgs_copy = image.copy()
salt_vs_pepper = 0.2 # Salt and pepper noise ratio
amount = 0.004 # Percentage of total
num_salt = np.ceil(amount * X_imgs_copy.size * salt_vs_pepper)
num_pepper = np.ceil(amount * X_imgs_copy.size * (1.0 - salt_vs_pepper))
seed = random.random()
if seed > 0.75:
# Add Salt noise
coords = [np.random.randint(0, i - 1, int(num_salt)) for i in X_imgs_copy.shape]
X_imgs_copy[coords[0], coords[1], :] = 1
elif seed > 0.5:
# Add Pepper noise
coords = [np.random.randint(0, i - 1, int(num_pepper)) for i in X_imgs_copy.shape]
X_imgs_copy[coords[0], coords[1], :] = 0
return {'image': X_imgs_copy,
'label': sample['label'],
'img_name': sample['img_name']}
class adjust_light():
def __call__(self, sample):
image = sample['image']
seed = random.random()
if seed > 0.5:
gamma = random.random() * 3 + 0.5
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) * 255 for i in np.arange(0, 256)]).astype(np.uint8)
image = cv2.LUT(np.array(image).astype(np.uint8), table).astype(np.uint8)
return {'image': image,
'label': sample['label'],
'img_name': sample['img_name']}
else:
return sample
class eraser():
def __call__(self, sample, s_l=0.02, s_h=0.06, r_1=0.3, r_2=0.6, v_l=0, v_h=255, pixel_level=False):
image = sample['image']
img_h, img_w, img_c = image.shape
if random.random() > 0.5:
return sample
while True:
s = np.random.uniform(s_l, s_h) * img_h * img_w
r = np.random.uniform(r_1, r_2)
w = int(np.sqrt(s / r))
h = int(np.sqrt(s * r))
left = np.random.randint(0, img_w)
top = np.random.randint(0, img_h)
if left + w <= img_w and top + h <= img_h:
break
if pixel_level:
c = np.random.uniform(v_l, v_h, (h, w, img_c))
else:
c = np.random.uniform(v_l, v_h)
image[top:top + h, left:left + w, :] = c
return {'image': image,
'label': sample['label'],
'img_name': sample['img_name']}
class elastic_transform():
"""
Elastic deformation of images as described in [Simard2003]_.
.. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
Convolutional Neural Networks applied to Visual Document Analysis", in
Proc. of the International Conference on Document Analysis and
Recognition, 2003.
"""
def __call__(self, sample):
image, label = sample['image'], sample['label']
alpha = image.size[1] * 2
sigma = image.size[1] * 0.08
random_state = None
seed = random.random()
if seed > 0.5:
assert len(image.size) == 2
if random_state is None:
random_state = np.random.RandomState(None)
shape = image.size[0: 2]
dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha
dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha
x, y = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), indexing='ij')
indices = np.reshape(x + dx, (-1, 1)), np.reshape(y + dy, (-1, 1))
transformed_image = np.zeros([image.size[0], image.size[1], 3])
transformed_label = np.zeros([image.size[0], image.size[1]])
for i in range(3):
transformed_image[:, :, i] = map_coordinates(np.array(image)[:, :, i], indices, order=1).reshape(shape)
if label is not None:
transformed_label[:, :] = map_coordinates(np.array(label)[:, :], indices, order=1,
mode='nearest').reshape(shape)
else:
transformed_label = None
transformed_image = transformed_image.astype(np.uint8)
if label is not None:
transformed_label = transformed_label.astype(np.uint8)
return {'image': transformed_image,
'label': transformed_label,
'img_name': sample['img_name']}
else:
return {'image': np.array(sample['image']),
'label': np.array(sample['label']),
'img_name': sample['img_name']}
class RandomCrop(object):
def __init__(self, size, padding=0):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size # h, w
self.padding = padding
def __call__(self, sample):
img, mask = sample['image'], sample['label']
w, h = img.size
if self.padding > 0 or w < self.size[0] or h < self.size[1]:
padding = np.maximum(self.padding, np.maximum((self.size[0] - w) // 2 + 5, (self.size[1] - h) // 2 + 5))
img = ImageOps.expand(img, border=padding, fill=0)
mask = ImageOps.expand(mask, border=padding, fill=255)
assert img.width == mask.width
assert img.height == mask.height
w, h = img.size
th, tw = self.size # target size
if w == tw and h == th:
return {'image': img,
'label': mask,
'img_name': sample['img_name']}
x1 = random.randint(0, w - tw)
y1 = random.randint(0, h - th)
img = img.crop((x1, y1, x1 + tw, y1 + th))
mask = mask.crop((x1, y1, x1 + tw, y1 + th))
return {'image': img,
'label': mask,
'img_name': sample['img_name']}
class CenterCrop(object):
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, sample):
img = sample['image']
mask = sample['label']
w, h = img.size
th, tw = self.size
x1 = int(round((w - tw) / 2.))
y1 = int(round((h - th) / 2.))
img = img.crop((x1, y1, x1 + tw, y1 + th))
mask = mask.crop((x1, y1, x1 + tw, y1 + th))
return {'image': img,
'label': mask,
'img_name': sample['img_name']}
class RandomFlip(object):
def __call__(self, sample):
img = sample['image']
mask = sample['label']
name = sample['img_name']
if random.random() < 0.5:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
mask = mask.transpose(Image.FLIP_LEFT_RIGHT)
if random.random() < 0.5:
img = img.transpose(Image.FLIP_TOP_BOTTOM)
mask = mask.transpose(Image.FLIP_TOP_BOTTOM)
return {'image': img,
'label': mask,
'img_name': name
}
class FixedResize(object):
def __init__(self, size):
self.size = tuple(reversed(size)) # size: (h, w)
def __call__(self, sample):
img = sample['image']
mask = sample['label']
name = sample['img_name']
assert img.width == mask.width
assert img.height == mask.height
img = img.resize(self.size, Image.BILINEAR)
mask = mask.resize(self.size, Image.NEAREST)
return {'image': img,
'label': mask,
'img_name': name}
class Scale(object):
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, sample):
img = sample['image']
mask = sample['label']
assert img.width == mask.width
assert img.height == mask.height
w, h = img.size
if (w >= h and w == self.size[1]) or (h >= w and h == self.size[0]):
return {'image': img,
'label': mask,
'img_name': sample['img_name']}
oh, ow = self.size
img = img.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
return {'image': img,
'label': mask,
'img_name': sample['img_name']}
class RandomSizedCrop(object):
def __init__(self, size):
self.size = size
def __call__(self, sample):
img = sample['image']
mask = sample['label']
name = sample['img_name']
assert img.width == mask.width
assert img.height == mask.height
for attempt in range(10):
area = img.size[0] * img.size[1]
target_area = random.uniform(0.45, 1.0) * area
aspect_ratio = random.uniform(0.5, 2)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if random.random() < 0.5:
w, h = h, w
if w <= img.size[0] and h <= img.size[1]:
x1 = random.randint(0, img.size[0] - w)
y1 = random.randint(0, img.size[1] - h)
img = img.crop((x1, y1, x1 + w, y1 + h))
mask = mask.crop((x1, y1, x1 + w, y1 + h))
assert (img.size == (w, h))
img = img.resize((self.size, self.size), Image.BILINEAR)
mask = mask.resize((self.size, self.size), Image.NEAREST)
return {'image': img,
'label': mask,
'img_name': name}
# Fallback
scale = Scale(self.size)
crop = CenterCrop(self.size)
sample = crop(scale(sample))
return sample
class RandomRotate(object):
def __init__(self, size=512):
self.degree = random.randint(1, 4) * 90 # (90, 180, 270)
self.size = size
def __call__(self, sample):
img = sample['image']
mask = sample['label']
seed = random.random()
if seed > 0.5:
rotate_degree = self.degree
img = img.rotate(rotate_degree, Image.BILINEAR, expand=0)
mask = mask.rotate(rotate_degree, Image.NEAREST, expand=255)
sample = {'image': img, 'label': mask, 'img_name': sample['img_name']}
return sample
class RandomScaleCrop(object):
def __init__(self, size):
self.size = size
self.crop = RandomCrop(self.size)
def __call__(self, sample):
img = sample['image']
mask = sample['label']
name = sample['img_name']
assert img.width == mask.width
assert img.height == mask.height
seed = random.random()
if seed > 0.5:
w = int(random.uniform(0.5, 1.5) * img.size[0])
h = int(random.uniform(0.5, 1.5) * img.size[1])
img, mask = img.resize((w, h), Image.BILINEAR), mask.resize((w, h), Image.NEAREST)
sample = {'image': img, 'label': mask, 'img_name': name}
return self.crop(sample)
# class ResizeImg(object):
# def __init__(self, size):
# self.size = size
#
# def __call__(self, sample):
# img = sample['image']
# mask = sample['label']
# name = sample['img_name']
# assert img.width == mask.width
# assert img.height == mask.height
#
# img = img.resize((self.size, self.size))
#
# sample = {'image': img, 'label': mask, 'img_name': name}
#
# return sample
class Resize(object):
def __init__(self, size):
self.size = size
def __call__(self, sample):
img = sample['image']
mask = sample['label']
name = sample['img_name']
assert img.width == mask.width
assert img.height == mask.height
img = img.resize((self.size, self.size))
mask = mask.resize((self.size, self.size))
sample = {'image': img, 'label': mask, 'img_name': name}
return sample
class Normalize(object):
"""Normalize a tensor image with mean and standard deviation.
Args:
mean (tuple): means for each channel.
std (tuple): standard deviations for each channel.
"""
def __init__(self, mean=(0., 0., 0.), std=(1., 1., 1.)):
self.mean = mean
self.std = std
def __call__(self, sample):
img = np.array(sample['image']).astype(np.float32)
mask = np.array(sample['label']).astype(np.float32)
img /= 255.0
img -= self.mean
img /= self.std
return {'image': img,
'label': mask,
'img_name': sample['img_name']}
class GetBoundary(object):
def __init__(self, width=5):
self.width = width
def __call__(self, mask):
cup = mask[:, :, 0]
disc = mask[:, :, 1]
dila_cup = ndimage.binary_dilation(cup, iterations=self.width).astype(cup.dtype)
eros_cup = ndimage.binary_erosion(cup, iterations=self.width).astype(cup.dtype)
dila_disc = ndimage.binary_dilation(disc, iterations=self.width).astype(disc.dtype)
eros_disc = ndimage.binary_erosion(disc, iterations=self.width).astype(disc.dtype)
cup = dila_cup + eros_cup
disc = dila_disc + eros_disc
cup[cup == 2] = 0 # Within the circle
disc[disc == 2] = 0
boundary = (cup + disc) > 0
return boundary.astype(np.uint8)
class Normalize_tf(object):
"""Normalize a tensor image with mean and standard deviation.
Args:
mean (tuple): means for each channel.
std (tuple): standard deviations for each channel.
"""
def __init__(self, mean=(0., 0., 0.), std=(1., 1., 1.)):
self.mean = mean
self.std = std
self.get_boundary = GetBoundary()
def __call__(self, sample):
img = np.array(sample['image']).astype(np.float32)
__mask = np.array(sample['label']).astype(np.uint8)
name = sample['img_name']
img /= 127.5
img -= 1.0 # (-1, 1)
_mask = np.zeros([__mask.shape[0], __mask.shape[1]])
_mask[__mask > 200] = 255
_mask[(__mask > 50) & (__mask < 201)] = 128
__mask[_mask == 0] = 2 # optic cup
__mask[_mask == 255] = 0 # background
__mask[_mask == 128] = 1 # optic disc
mask = to_multilabel(__mask) # [0, 0]: background; [0, 1]: disc; [1, 1]: cup
boundary = self.get_boundary(mask) * 255
boundary = ndimage.gaussian_filter(boundary, sigma=3) / 255.0 # (0 ~ 1)
boundary = np.expand_dims(boundary, -1)
return {'image': img,
'map': mask,
'boundary': boundary,
'img_name': name
}
# class Normalize_cityscapes(object):
# """Normalize a tensor image with mean and standard deviation.
# Args:
# mean (tuple): means for each channel.
# std (tuple): standard deviations for each channel.
# """
#
# def __init__(self, mean=(0., 0., 0.)):
# self.mean = mean
#
# def __call__(self, sample):
# img = np.array(sample['image']).astype(np.float32)
# mask = np.array(sample['label']).astype(np.float32)
# img -= self.mean
# img /= 255.0
#
# return {'image': img,
# 'label': mask,
# 'img_name': sample['img_name']}
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
img = np.array(sample['image']).astype(np.float32).transpose((2, 0, 1))
map = np.array(sample['map']).astype(np.uint8).transpose((2, 0, 1))
boundary = np.array(sample['boundary']).astype(np.float).transpose((2, 0, 1))
name = sample['img_name']
img = torch.from_numpy(img).float()
map = torch.from_numpy(map).float()
boundary = torch.from_numpy(boundary).float()
return {'image': img,
'map': map,
'boundary': boundary,
'img_name': name}
| 32.656977 | 119 | 0.537891 | 15,197 | 0.901846 | 0 | 0 | 0 | 0 | 0 | 0 | 3,131 | 0.185805 |
94898fd94bd3fe0499bb64604b546c928d2c6aa0 | 2,325 | py | Python | tools/Polygraphy/tests/backend/trt/test_profile.py | KaliberAI/TensorRT | 34a167558058bb801176430d37d38a46328ab0d2 | [
"Apache-2.0"
] | 5,249 | 2019-06-17T17:20:34.000Z | 2022-03-31T17:56:05.000Z | tools/Polygraphy/tests/backend/trt/test_profile.py | KaliberAI/TensorRT | 34a167558058bb801176430d37d38a46328ab0d2 | [
"Apache-2.0"
] | 1,721 | 2019-06-17T18:13:29.000Z | 2022-03-31T16:09:53.000Z | tools/Polygraphy/tests/backend/trt/test_profile.py | KaliberAI/TensorRT | 34a167558058bb801176430d37d38a46328ab0d2 | [
"Apache-2.0"
] | 1,414 | 2019-06-18T04:01:17.000Z | 2022-03-31T09:16:53.000Z | #
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import tensorrt as trt
from polygraphy import mod
from polygraphy.backend.trt import Profile, network_from_onnx_bytes
from tests.models.meta import ONNX_MODELS
@pytest.fixture(scope="session")
def dynamic_identity_network():
builder, network, parser = network_from_onnx_bytes(ONNX_MODELS["dynamic_identity"].loader)
with builder, network, parser:
yield builder, network, parser
class TestProfile(object):
def test_can_add(self):
profile = Profile()
min, opt, max = (1, 1), (2, 2), (4, 4)
assert profile.add("input", min=min, opt=opt, max=max) is profile
shape_tuple = profile["input"]
assert shape_tuple.min == min
assert shape_tuple.opt == opt
assert shape_tuple.max == max
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.0"), reason="Unsupported for TRT 6")
def test_fill_defaults_does_not_overwrite(self, dynamic_identity_network):
_, network, _ = dynamic_identity_network
profile = Profile().add("X", (1, 1, 1, 1), (1, 1, 2, 2), (1, 1, 3, 3))
profile.fill_defaults(network) is profile
assert profile["X"].min == (1, 1, 1, 1)
assert profile["X"].opt == (1, 1, 2, 2)
assert profile["X"].max == (1, 1, 3, 3)
@pytest.mark.skipif(mod.version(trt.__version__) < mod.version("7.0"), reason="Unsupported for TRT 6")
def test_to_trt(self, dynamic_identity_network):
builder, network, _ = dynamic_identity_network
profile = Profile().add("X", (1, 2, 1, 1), (1, 2, 2, 2), (1, 2, 4, 4))
trt_profile = profile.to_trt(builder, network)
trt_profile.get_shape("X") == ((1, 2, 1, 1), (1, 2, 2, 2), (1, 2, 4, 4))
| 40.086207 | 106 | 0.673118 | 1,298 | 0.55828 | 200 | 0.086022 | 1,163 | 0.500215 | 0 | 0 | 713 | 0.306667 |
948bbd53e9c46defc98a1d5869983748d03b9a4f | 103 | py | Python | model.py | McHacks-2018/Retro-Reddit | a1620a5c374d535bb95151de466100234367451b | [
"MIT"
] | null | null | null | model.py | McHacks-2018/Retro-Reddit | a1620a5c374d535bb95151de466100234367451b | [
"MIT"
] | 1 | 2019-10-22T02:52:07.000Z | 2019-10-22T02:52:07.000Z | model.py | McHacks-2018/Retro-Reddit | a1620a5c374d535bb95151de466100234367451b | [
"MIT"
] | null | null | null | class Section:
def get_display_text(self):
pass
def get_children(self):
pass
| 12.875 | 31 | 0.601942 | 102 | 0.990291 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
948d349dd45abeaca4dd80acac76c13c402ebd73 | 1,611 | py | Python | silvapermaculture/search.py | Walachul/SilvaPermaculture | 47c8579f14b22b33ba8b27c03ce244dc2c7b17e8 | [
"MIT"
] | null | null | null | silvapermaculture/search.py | Walachul/SilvaPermaculture | 47c8579f14b22b33ba8b27c03ce244dc2c7b17e8 | [
"MIT"
] | 5 | 2021-03-18T23:28:19.000Z | 2022-03-11T23:45:03.000Z | silvapermaculture/search.py | Walachul/SilvaPermaculture | 47c8579f14b22b33ba8b27c03ce244dc2c7b17e8 | [
"MIT"
] | 1 | 2019-09-08T14:45:13.000Z | 2019-09-08T14:45:13.000Z | from flask import current_app
#This module is created for interaction with the Elasticsearch index
#Function that adds element to the index of Elasticsearch. Uses model as the SQLAlchemy model
def add_element_index(index,model):
#Check to see if Elasticsearch server is configured or not.
#The application runs witouth errors if the Elasticsearch server doesn't run.
if not current_app.elasticsearch:
return
payload={}
for field in model.__searchit__:
payload[field] = getattr(model,field)
current_app.elasticsearch.index(index=index, doc_type=index, id=model.id,
body=payload)
#Function that removes indexed elements. Uses model as the SQLAlchemy model
def remove_element_from_index(index,model):
if not current_app.elasticsearch:
return
current_app.elasticsearch.delete(index=index, doc_type=index, id=model.id)
#Function that searches the fields specified to be searched in
#the models.py with the variable __searchit_
def search_index(index,query,page,per_page):
if not current_app.elasticsearch:
return [], 0
search = current_app.elasticsearch.search(index=index, doc_type=index,
body={'query':{'multi_match':{'query':query, 'fields': ['*']}},
'from':(page -1)*per_page, 'size':per_page})
#List comprehension used to get the IDs of elements found
ids = [int(hit['_id']) for hit in search['hits']['hits']]
#Return IDS and total number of elements from the elasticsearch
return ids, search['hits']['total']
| 47.382353 | 101 | 0.692117 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 678 | 0.420857 |
948d415b98380c188931709b7710a872ebb746cb | 263 | py | Python | client.py | mguidon/socket-logger | ff74a7974a2965794a4b90068b9ecb4aedf95346 | [
"MIT"
] | null | null | null | client.py | mguidon/socket-logger | ff74a7974a2965794a4b90068b9ecb4aedf95346 | [
"MIT"
] | null | null | null | client.py | mguidon/socket-logger | ff74a7974a2965794a4b90068b9ecb4aedf95346 | [
"MIT"
] | null | null | null | import socketio
sio = socketio.Client()
@sio.event
def connect():
print('connection established')
@sio.event
def log(data):
print(data)
@sio.event
def disconnect():
print('disconnected from server')
sio.connect('http://localhost:8080')
sio.wait() | 14.611111 | 37 | 0.69962 | 0 | 0 | 0 | 0 | 168 | 0.638783 | 0 | 0 | 73 | 0.277567 |
8469536286b19dfddcc5622a89710f4c22502799 | 3,388 | py | Python | interview/chainlink/problems.py | topliceanu/learn | 6f4bb19fffb7b2baa88f047409075d5765ee6308 | [
"MIT"
] | 24 | 2016-03-21T07:53:54.000Z | 2020-06-29T12:16:36.000Z | interview/chainlink/problems.py | topliceanu/learn | 6f4bb19fffb7b2baa88f047409075d5765ee6308 | [
"MIT"
] | 5 | 2015-09-29T17:12:36.000Z | 2020-03-26T20:51:56.000Z | interview/chainlink/problems.py | topliceanu/learn | 6f4bb19fffb7b2baa88f047409075d5765ee6308 | [
"MIT"
] | 12 | 2016-05-24T16:48:32.000Z | 2020-10-02T12:22:09.000Z | def solution(prices):
if len(prices) == 0:
return 0
# We are always paying the first price.
total = prices[0]
min_price = prices[0]
for i in range(1, len(prices)):
if prices[i] > min_price:
total += prices[i] - min_price
if prices[i] < min_price:
min_price = prices[i]
return total
def num_divisors(number, keys):
count = 0
for key in keys:
if number % key == 0:
count += 1
return count
def encryptionValidity(instructionCount, validityPeriod, keys):
max_num_divisors = float('-inf')
for key in keys:
num = num_divisors(key, keys)
if num > max_num_divisors:
max_num_divisors = num
encryption_strength = max_num_divisors * pow(10, 5)
is_crackable = 1
if encryption_strength / instructionCount > validityPeriod:
is_crackable = 0
return is_crackable, encryption_strength
# Write a function that accepts as an argument a string of addition/subtraction operations.
# The function should return the result of the operations as an integer
# ex: calculate("1 - 2 + 3") => 2
def apply_op(op, a, b):
if op == 'plus':
return a + b
if op == 'minus':
return a - b
def calculate(expression):
tokens = expression.split(" ")
result = 0
last_op = 'plus'
for token in tokens:
if token == "":
continue
if str.isdigit(token):
new_val = int(token)
result = apply_op(last_op, result, new_val)
if token == '+':
last_op = 'plus'
if token == '-':
last_op = 'minus'
return result
# Next, write a function that accepts as an argument a string of addition/subtraction
# operations and also includes parentheses to indicate order of operations. The function
# should return the result of the operations as an integer
# ex: calculate("1 - (2 + 3)") => -4
def parse_number(expression):
if len(expression) == 0:
return '', expression
hd, tl = expression[0], expression[1:]
if str.isdigit(hd) == False:
return '', expression
more, rest = parse_number(tl)
return hd + more, rest
def tokenize(expression):
if len(expression) == 0:
return []
hd, tl = expression[0], expression[1:]
if hd == ' ':
return tokenize(tl)
if str.isdigit(hd):
num, rest = parse_number(expression)
return [int(num)] + tokenize(rest)
if hd in ['(', ')', '+', '-']:
return [hd] + tokenize(tl)
def calculate_two_rec(tokens, result_so_far, last_op):
if len(tokens) == 0:
return result_so_far, []
token, rest = tokens[0], tokens[1:]
if isinstance(token, int):
return calculate_two_rec(rest, apply_op(last_op, result_so_far, token), last_op)
if token == '+':
return calculate_two_rec(rest, result_so_far, 'plus')
if token == '-':
return calculate_two_rec(rest, result_so_far, 'minus')
if token == '(':
value_in_paran, rest_after_paran = calculate_two_rec(rest, 0, 'plus')
return calculate_two_rec(rest_after_paran, apply_op(last_op, result_so_far, value_in_paran), 'plus')
if token == ')':
return result_so_far, rest
def calculate_two(expression):
tokens = tokenize(expression)
final_result, _ = calculate_two_rec(tokens, 0, 'plus')
return final_result
| 32.266667 | 108 | 0.619835 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 613 | 0.180933 |
84699eaf44ad2cc9577c8cecad8b2d9e9e2a0a25 | 355 | py | Python | example.py | badmutex/CoPipes | b329a381f0dbaef238b9e36c7103fd72bee80138 | [
"BSD-2-Clause"
] | 1 | 2020-11-27T23:18:58.000Z | 2020-11-27T23:18:58.000Z | example.py | badi/CoPipes | b329a381f0dbaef238b9e36c7103fd72bee80138 | [
"BSD-2-Clause"
] | null | null | null | example.py | badi/CoPipes | b329a381f0dbaef238b9e36c7103fd72bee80138 | [
"BSD-2-Clause"
] | null | null | null | from copipes import coroutine, pipeline, null
from copipes.macros.pipe import pipe
@pipe
def putStrLn():
"""doc"""
[x]
print x
send(x)
@pipe
def replicate(n):
[x]
for i in xrange(n):
send(x)
if __name__ == '__main__':
pipeline(
putStrLn,
replicate.params(3),
putStrLn,
).feed(range(10))
| 14.791667 | 45 | 0.574648 | 0 | 0 | 0 | 0 | 138 | 0.388732 | 0 | 0 | 19 | 0.053521 |
846a34f97a5d99256a90a1f11c153e6cfb2340e9 | 52 | py | Python | utils.py | whoamins/Ctftime-TelegramBot | e2d309ecdcc4c2e3a484a735771e2edf9cdb2554 | [
"MIT"
] | 2 | 2021-12-17T21:37:45.000Z | 2022-02-05T18:30:55.000Z | utils.py | whoamins/Ctftime-parser | 2760355b5ea41202c50cf4218ff06d79c8ca1d59 | [
"MIT"
] | null | null | null | utils.py | whoamins/Ctftime-parser | 2760355b5ea41202c50cf4218ff06d79c8ca1d59 | [
"MIT"
] | 2 | 2021-12-18T14:30:28.000Z | 2021-12-18T20:12:02.000Z | def extract_arg(arg):
return arg.split()[1:][0]
| 17.333333 | 29 | 0.634615 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
846e06126ad4220ba24d719a68f1fbc8b1349f7e | 1,693 | py | Python | tests/attention/test_attention_layer.py | SamuelCahyawijaya/fast-transformers | 6ae8ed4cc50bd037968db4f5062e4d328aae73fe | [
"MIT"
] | 1,171 | 2020-06-30T01:57:19.000Z | 2022-03-31T15:11:25.000Z | tests/attention/test_attention_layer.py | SamuelCahyawijaya/fast-transformers | 6ae8ed4cc50bd037968db4f5062e4d328aae73fe | [
"MIT"
] | 105 | 2020-06-30T14:40:56.000Z | 2022-02-08T16:31:45.000Z | tests/attention/test_attention_layer.py | SamuelCahyawijaya/fast-transformers | 6ae8ed4cc50bd037968db4f5062e4d328aae73fe | [
"MIT"
] | 127 | 2020-06-26T09:07:48.000Z | 2022-03-25T06:46:37.000Z | #
# Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/
# Written by Angelos Katharopoulos <angelos.katharopoulos@idiap.ch>,
# Apoorv Vyas <avyas@idiap.ch>
#
import unittest
import torch
from fast_transformers.attention.attention_layer import AttentionLayer
class TestAttentionLayer(unittest.TestCase):
def _assert_sizes_attention(self, qshape, kshape, vshape):
def inner(q, k, v, m1, m2, m3):
self.assertEqual(q.shape, qshape)
self.assertEqual(k.shape, kshape)
self.assertEqual(v.shape, vshape)
N, L, H, E = q.shape
_, S, _, D = v.shape
return v.new_zeros((N, L, H, D))
return inner
def test_forward(self):
att = AttentionLayer(
self._assert_sizes_attention(
(10, 5, 4, 25),
(10, 8, 4, 25),
(10, 8, 4, 25)
),
100,
4
)
v = att(
torch.rand(10, 5, 100),
torch.rand(10, 8, 100),
torch.rand(10, 8, 100),
None, None, None
)
self.assertEqual(v.shape, (10, 5, 100))
att = AttentionLayer(
self._assert_sizes_attention(
(10, 5, 4, 32),
(10, 8, 4, 32),
(10, 8, 4, 64)
),
100,
4,
d_keys=32,
d_values=64
)
v = att(
torch.rand(10, 5, 100),
torch.rand(10, 8, 100),
torch.rand(10, 8, 100),
None, None, None
)
self.assertEqual(v.shape, (10, 5, 100))
if __name__ == "__main__":
unittest.main()
| 25.651515 | 70 | 0.489663 | 1,365 | 0.806261 | 0 | 0 | 0 | 0 | 0 | 0 | 177 | 0.104548 |
846eacdff1b10d8e9e80d22c01185eced8ed622a | 54 | py | Python | api_permission/__init__.py | jayvdb/django-api-permission | ba771314b4a9c5c2edc5161b423e257012be8922 | [
"MIT"
] | 3 | 2020-06-01T17:06:16.000Z | 2021-12-12T05:40:04.000Z | api_permission/__init__.py | jayvdb/django-api-permission | ba771314b4a9c5c2edc5161b423e257012be8922 | [
"MIT"
] | 4 | 2020-06-02T06:46:06.000Z | 2020-06-04T17:38:57.000Z | api_permission/__init__.py | tcitry/django-api-permission | 371bd5f2989edf906c91a0e5af60a4da11d0da42 | [
"MIT"
] | null | null | null | default_app_config = 'api_permission.apps.AuthConfig'
| 27 | 53 | 0.851852 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 32 | 0.592593 |