max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
kotti/views/util.py | IonicaBizauKitchen/Kotti | 191 | 12765151 | import hashlib
from collections import defaultdict
from datetime import datetime
from urllib.parse import urlencode
from babel.dates import format_date
from babel.dates import format_datetime
from babel.dates import format_time
from babel.numbers import format_currency
from pyramid.decorator import reify
from pyramid.i18n import get_locale_name
from pyramid.interfaces import ILocation
from pyramid.location import inside
from pyramid.location import lineage
from pyramid.renderers import get_renderer
from pyramid.renderers import render
from pyramid.settings import asbool
from sqlalchemy import and_
from sqlalchemy import not_
from sqlalchemy import or_
from kotti import DBSession
from kotti import get_settings
from kotti.events import objectevent_listeners
from kotti.interfaces import INavigationRoot
from kotti.resources import Content
from kotti.resources import Document
from kotti.resources import Node
from kotti.resources import Tag
from kotti.resources import TagsToContents
from kotti.resources import get_root
from kotti.sanitizers import sanitize
from kotti.security import view_permitted
from kotti.util import TemplateStructure
from kotti.util import render_view
from kotti.views.site_setup import CONTROL_PANEL_LINKS
from kotti.views.slots import slot_events
class SettingHasValuePredicate:
def __init__(self, val, config):
self.name, self.value = val
if not isinstance(self.value, bool):
raise ValueError("Only boolean values supported")
def text(self):
return f"if_setting_has_value = {self.name} == {self.value}"
phash = text
def __call__(self, context, request):
return asbool(request.registry.settings[self.name]) == self.value
class RootOnlyPredicate:
def __init__(self, val, config):
self.val = val
def text(self):
return f"root_only = {self.val}"
phash = text
def __call__(self, context, request):
return (context is request.root) == self.val
def template_api(context, request, **kwargs):
return get_settings()["kotti.templates.api"][0](context, request, **kwargs)
def add_renderer_globals(event):
if event.get("renderer_name") != "json":
request = event["request"]
api = getattr(request, "template_api", None)
if api is None and request is not None:
api = template_api(event["context"], event["request"])
event["api"] = api
class Slots:
def __init__(self, context, request):
self.context = context
self.request = request
def __getattr__(self, key):
for event_type in slot_events:
if event_type.name == key:
break
else:
raise AttributeError(key)
value = []
event = event_type(self.context, self.request)
for snippet in objectevent_listeners(event):
if snippet is not None:
if isinstance(snippet, list):
value.extend(snippet)
else:
value.append(snippet)
setattr(self, key, value)
return value
class TemplateAPI:
"""This implements the ``api`` object that's passed to all templates.
Use dict-access as a shortcut to retrieve template macros from templates.
"""
# Instead of overriding these, consider using the
# ``kotti.overrides`` variable.
BARE_MASTER = "kotti:templates/master-bare.pt"
VIEW_MASTER = "kotti:templates/view/master.pt"
EDIT_MASTER = "kotti:templates/edit/master.pt"
SITE_SETUP_MASTER = "kotti:templates/site-setup/master.pt"
body_css_class = ""
def __init__(self, context, request, bare=None, **kwargs):
self.context, self.request = context, request
if getattr(request, "template_api", None) is None:
request.template_api = self
self.S = get_settings()
if request.is_xhr and bare is None:
bare = True # use bare template that renders just the content area
self.bare = bare
self.slots = Slots(context, request)
self.__dict__.update(kwargs)
@staticmethod
def is_location(context):
"""Does `context` implement :class:`pyramid.interfaces.ILocation`?
:param context: The context.
:type context: kotti.interfaces.INode
:rtype: bool
:returns: True if Is the context object implements
:class:`pyramid.interfaces.ILocation`.
"""
return ILocation.providedBy(context)
@reify
def edit_needed(self):
if "kotti.fanstatic.edit_needed" in self.S:
return [r.need() for r in self.S["kotti.fanstatic.edit_needed"]]
@reify
def view_needed(self):
if "kotti.fanstatic.view_needed" in self.S:
return [r.need() for r in self.S["kotti.fanstatic.view_needed"]]
def macro(self, asset_spec, macro_name="main"):
if self.bare and asset_spec in (
self.VIEW_MASTER,
self.EDIT_MASTER,
self.SITE_SETUP_MASTER,
):
asset_spec = self.BARE_MASTER
return get_renderer(asset_spec).implementation().macros[macro_name]
@reify
def site_title(self):
""" The site title.
:result: Value of the ``kotti.site_title`` setting (if specified) or
the root item's ``title`` attribute.
:rtype: str
"""
value = get_settings().get("kotti.site_title")
if not value:
value = self.root.title
return value
@reify
def page_title(self):
"""
Title for the current page as used in the ``<head>`` section of the
default ``master.pt`` template.
:result: '[Human readable view title ]``context.title`` -
:meth:`~TemplateAPI.site_title`''
:rtype: str
"""
view_title = self.request.view_name.replace("_", " ").title()
if view_title:
view_title += " "
view_title += self.context.title
return f"{view_title} - {self.site_title}"
def url(self, context=None, *elements, **kwargs):
"""
URL construction helper. Just a convenience wrapper for
:func:`pyramid.request.resource_url` with the same signature. If
``context`` is ``None`` the current context is passed to
``resource_url``.
"""
if context is None:
context = self.context
if not ILocation.providedBy(context):
return self.request.url
return self.request.resource_url(context, *elements, **kwargs)
@reify
def root(self):
"""
The site root.
:result: The root object of the site.
:rtype: :class:`kotti.resources.Node`
"""
if ILocation.providedBy(self.context):
return self.lineage[-1]
else:
return get_root()
@reify
def navigation_root(self):
"""
The root node for the navigation.
:result: Nearest node in the :meth:`lineage` that provides
:class:`kotti.interfaces.INavigationRoot` or :meth:`root` if
no node provides that interface.
:rtype: :class:`kotti.resources.Node`
"""
for o in self.lineage:
if INavigationRoot.providedBy(o):
return o
return self.root
@reify
def lineage(self):
"""
Lineage from current context to the root node.
:result: List of nodes.
:rtype: list of :class:`kotti.resources.Node`
"""
return list(lineage(self.context))
@reify
def breadcrumbs(self):
"""
List of nodes from the :meth:`navigation_root` to the context.
:result: List of nodes.
:rtype: list of :class:`kotti.resources.Node`
"""
breadcrumbs = self.lineage
if self.root != self.navigation_root:
index = breadcrumbs.index(self.navigation_root)
breadcrumbs = breadcrumbs[: index + 1]
return reversed(breadcrumbs)
def has_permission(self, permission, context=None):
""" Convenience wrapper for :func:`pyramid.security.has_permission`
with the same signature. If ``context`` is ``None`` the current
context is passed to ``has_permission``."""
if context is None:
context = self.context
return self.request.has_permission(permission, context)
def render_view(self, name="", context=None, request=None, secure=True, bare=True):
if context is None:
context = self.context
if request is None:
request = self.request
before = self.bare
try:
self.bare = bare
html = render_view(context, request, name, secure)
finally:
self.bare = before
return TemplateStructure(html)
def render_template(self, renderer, **kwargs):
return TemplateStructure(render(renderer, kwargs, self.request))
def list_children(self, context=None, permission="view"):
if context is None:
context = self.context
if isinstance(context, Node):
if permission is None:
return context.children
return context.children_with_permission(self.request, permission)
return [
c
for c in getattr(context, "values", lambda: [])()
if (not permission or self.request.has_permission(permission, c))
]
inside = staticmethod(inside)
def avatar_url(self, user=None, size="14", default_image="identicon"):
if user is None:
user = self.request.user
email = user.email
if not email:
email = user.name
h = hashlib.md5(email.encode("utf8")).hexdigest()
query = {"default": default_image, "size": str(size)}
url = "https://secure.gravatar.com/avatar/{}?{}".format(h, urlencode(query))
return url
@reify
def locale_name(self):
return get_locale_name(self.request)
def format_date(self, d, fmt=None):
if fmt is None:
fmt = self.S["kotti.date_format"]
return format_date(d, format=fmt, locale=self.locale_name)
def format_datetime(self, dt, fmt=None):
if fmt is None:
fmt = self.S["kotti.datetime_format"]
if not isinstance(dt, datetime):
dt = datetime.fromtimestamp(dt)
return format_datetime(dt, format=fmt, locale=self.locale_name)
def format_time(self, t, fmt=None):
if fmt is None:
fmt = self.S["kotti.time_format"]
return format_time(t, format=fmt, locale=self.locale_name)
def format_currency(self, n, currency, fmt=None):
return format_currency(n, currency, format=fmt, locale=self.locale_name)
@staticmethod
def get_type(name):
for class_ in get_settings()["kotti.available_types"]:
if class_.type_info.name == name:
return class_
def find_edit_view(self, item):
view_name = self.request.view_name
if not view_permitted(item, self.request, view_name):
view_name = "edit"
if not view_permitted(item, self.request, view_name):
view_name = ""
return view_name
@reify
def edit_links(self):
if not hasattr(self.context, "type_info"):
return []
return [
link
for link in self.context.type_info.edit_links
if link.visible(self.context, self.request)
]
@reify
def site_setup_links(self):
return [link for link in CONTROL_PANEL_LINKS
if link.visible(self.root, self.request)]
@staticmethod
def sanitize(html, sanitizer="default"):
""" Convenience wrapper for :func:`kotti.sanitizers.sanitize`.
:param html: HTML to be sanitized
:type html: str
:param sanitizer: name of the sanitizer to use.
:type sanitizer: str
:result: sanitized HTML
:rtype: str
"""
return sanitize(html, sanitizer)
class NodesTree:
def __init__(self, node, request, item_mapping, item_to_children, permission):
self._node = node
self._request = request
self._item_mapping = item_mapping
self._item_to_children = item_to_children
self._permission = permission
@property
def __parent__(self):
if self.parent_id:
return self._item_mapping[self.parent_id]
@property
def children(self):
return [
NodesTree(
child,
self._request,
self._item_mapping,
self._item_to_children,
self._permission,
)
for child in self._item_to_children[self.id]
if self._request.has_permission(self._permission, child)
]
def _flatten(self, item):
# noinspection PyProtectedMember
yield item._node
for ch in item.children:
yield from self._flatten(ch)
def tolist(self):
return list(self._flatten(self))
def __getattr__(self, key):
return getattr(self._node, key)
def nodes_tree(request, context=None, permission="view"):
item_mapping = {}
item_to_children = defaultdict(lambda: [])
for node in DBSession.query(Content).with_polymorphic(Content):
item_mapping[node.id] = node
if request.has_permission(permission, node):
item_to_children[node.parent_id].append(node)
for children in item_to_children.values():
children.sort(key=lambda ch: ch.position)
if context is None:
node = item_to_children[None][0]
else:
node = context
return NodesTree(node, request, item_mapping, item_to_children, permission)
def search_content(search_term, request=None):
return get_settings()["kotti.search_content"][0](search_term, request)
def default_search_content(search_term, request=None):
# noinspection PyUnresolvedReferences
searchstring = f"%{search_term}%"
# generic_filter can be applied to all Node (and subclassed) objects
generic_filter = or_(
Content.name.like(searchstring),
Content.title.like(searchstring),
Content.description.like(searchstring),
)
results = (
DBSession.query(Content)
.filter(generic_filter)
.order_by(Content.title.asc())
.all()
)
# specific result contain objects matching additional criteria
# but must not match the generic criteria (because these objects
# are already in the generic_results)
document_results = DBSession.query(Document).filter(
and_(Document.body.like(searchstring), not_(generic_filter))
)
for results_set in [content_with_tags([searchstring]), document_results.all()]:
[results.append(c) for c in results_set if c not in results]
result_dicts = []
for result in results:
if request.has_permission("view", result):
result_dicts.append(
dict(
name=result.name,
title=result.title,
description=result.description,
path=request.resource_path(result),
)
)
return result_dicts
def content_with_tags(tag_terms):
return (
DBSession.query(Content)
.join(TagsToContents)
.join(Tag)
.filter(or_(*[Tag.title.like(tag_term) for tag_term in tag_terms]))
.all()
)
def search_content_for_tags(tags, request=None):
result_dicts = []
for result in content_with_tags(tags):
if request.has_permission("view", result):
result_dicts.append(
dict(
name=result.name,
title=result.title,
description=result.description,
path=request.resource_path(result),
)
)
return result_dicts
def includeme(config):
""" Pyramid includeme hook.
:param config: app config
:type config: :class:`pyramid.config.Configurator`
"""
config.add_view_predicate("root_only", RootOnlyPredicate)
config.add_view_predicate("if_setting_has_value", SettingHasValuePredicate)
| 1.820313 | 2 |
IndianNameGenerator/Names.py | bshantherishenoy/indian-name-generator | 0 | 12765152 | import random
#Punjabi
#-----
mainNamePunjabi=["Gagan", "Har", "Bal", "Man", "Nav", "Sukh", "Kush", "Gur", "Karam", "Karan", "Dil", "Dharam", "Param", "Dal", "Jas", "Par", "Dul"]
maleSuffixPunjabi=["jeet", "jyot", "vinder", "preet", "meet"]
femleSuffixPunjabi=["preet", "jeet", "bir"]
unionSuffixPunjabi=maleSuffixPunjabi+femleSuffixPunjabi
def randomPunjabi():
namePunjabi=random.choice(mainNamePunjabi)+random.choice(unionSuffixPunjabi)
return namePunjabi
def malePunjabi():
namePunjabi=random.choice(mainNamePunjabi)+random.choice(maleSuffixPunjabi)
return namePunjabi
def femalePunjabi():
namePunjabi=random.choice(mainNamePunjabi)+random.choice(femaleSuffixPunjabi)
return namePunjabi
#-----
#Marathi
#-----
maleNameMarathi=["Aarav", "Kshitij", "Shantanu", "Onkar", "Aniket", "Atharva", "Prajwal", "Yash", "Abhijeet", "Ganesh", "Sachin", "Prathamesh", "Vaibhav", "Ninad", "Mihir", "Tejas", "Suyash", "Sanket", "Devang", "Darshan", "Soham", "Rohit", "Manish", "Aadesh", "Siddhesh",
"Aakash", "Anmol", "Chaitanya", "Dharmesh", "Gagan", "Gaurav", "Gopal", "Ishan", "Mehul", "Om", "Rahul", "Sandesh", "Tanmay", "Tushar", "Utkarsh",
"Vedang", "Varun", "Vinay", "Vivek", "Yogesh"]
femaleNameMarathi=["Vaishnavi", "Maithili", "Pooja", "Smital", "Shivani", "Veerja", "Shruti", "Aditi", "Manali", "Anuja", "Pranali", "Saloni",
"Aabha", "Aakriti", "Aruni", "Akanksha", "Akshata", "Aboli", "Ankita", "Chaitrali", "Divya", "Dhriti", "Gargi", "Gayatri", "Gauravi", "Gautami", "Isha", "Ishika",
"Kajal", "Kalyani", "Neha", "Nishi", "Tanvi", "Yuti"]
unionNameMarathi=maleNameMarathi+femaleNameMarathi
def randomMarathi():
nameMarathi=random.choice(unionNameMarathi)
return nameMarathi
def maleMarathi():
nameMarathi=random.choice(maleNameMarath)
return nameMarathi
def femaleMarathi():
nameMarathi=random.choice(femaleNameMarath)
return nameMarathi
#-----
#Bengali
#-----
maleNameBengali=["Abhik", "Abhoy", "Achintya", "Arnab", "Benoy", "Bhaskor",
"Bipin", "Daiwik", "Debesh", "Hrishab", "Indroneel", "Palash", "Paritosh", "Shirshendu", "Shubhang",
"Sourav", "Subrata", "Tapan", "Gairik", "Ujjwal"]
femaleNameBengali=["Ankolika", "Arundhati", "Bidisha", "Bibhuti", "Bipasha", "Chaitali", "Debjani", "Debolina", "Drishti", "Durba", "Joyeeta", "Kajol", "Kshamya", "Indrani", "Lotika", "Mishti",
"Naisha", "Pakhi", "Paromita", "Piyali", "Sagarika", "Shorbari", "Shoma", "Sushmita", "Tavishi", "Tvisha", "Yoshita"]
unionNameBengali=maleNameBengali+femaleNameBengali
def randomBengali():
nameBengali=random.choice(unionNameBengali)
return nameBengali
def maleBengali():
nameBengali=random.choice(maleNameBengali)
return nameBengali
def femaleBengali():
nameBengali=random.choice(femaleNameBengali)
return nameBengali
#-----
#Gujarati
#-----
maleNameGujarati=["Dhaval", "Haanish", "Herik", "Jigar", "Jignesh", "Joshil", "Mukund", "Munjal", "Oresh", "Prakat", "Pratul",
"Praful", "Praveen", "Prerit", "Devang", "Pujesh", "Raghubeer", "Sanam", "Yaksh", "Ahem", "Yug", "Yuvan", "Ronak"]
femaleNameGujarati=["Hinal", "Hiral", "Havya", "Jaimini", "Komal", "Jigna", "Raashi", "Kavya", "Nutan", "Pranauthi", "Puruvi",
"Tanishka", "Vaishnavi", "Vanshi", "Vrishti", "Vritika", "Kanchan"]
unionNameGujarati=maleNameGujarati+femaleNameGujarati
def randomGujarati():
nameGujarati=random.choice(unionNameGujarati)
return nameGujarati
def maleGujarati():
nameGujarati=random.choice(maleNameGujarati)
return nameGujarati
def femaleGujarati():
nameGujarati=random.choice(femaleNameGujarati)
return nameGujarati
#-----
#Kannada
#-----
maleNameKannada=["Shreyas","Ganesh","Rishab","Ritvik","Ramesh","Abhhishek","Nandan","Kishen","Narayan","Aniket","Pawan","Hanumappa","Shiva","Rajath","Prateek","Prajwal","Ujval","Utaam","Mohit","Chetan","Dheeraj","Somshekar","Mahesh","Mallikarjun","Tony","Sukesh","Varun","Nikhil","Vasant","Deepak","Vasudev","Subrmanya","Vinay","hrihari","Santosh","Darshan","Vikshit","Amogh","Govind","Vittal","Jagganath","Shishir","Guru","Girish","Vikunt","Keshaw","Arya","Rahul","Rajesh","Shashidar","Venktash","Raman","Dhanush","Arjun","Karana","Kubera"]
femaleNameKannada=["Rachana","Divya","Renuka","Deeksha","Arpita","Ambruta","Bharathi","Seema","Shantala","Shoba","Kaveri","Priya","Prabha","Saraswati","Yeshaswini","Tejaswini","Sindhu","Ramya","Radhika","Shreya","Sameeksha","Chandana","Ganga","Meera","Nayana","Parvati","Shambavi","Sumana","Sridevi","Rishitha","Sneha","Vidya","Vaishanavi","Geetha","Veena","Kavita","Kavya","Kavana","Keerthi","Lavanya","Vandita","Vinuta","Aishwarya","Soundarya","Ananya","Samveetha","Bhavya","Bhagya","Girija","Gayatri","Anu","Dhanya","Krutika","Anjali"]
surnameKannada = ["Shenoy","Bhat","Pai","Kini","Hegede","Patil","Kotambri","Reddy","Rai","Shetty","Rao","Menasinkai","Ullagaddi" ,"Limbekai", "Ballolli", "Tenginkai", "Byali","Akki", "Doddamani", "Hadimani" , "Kattimani", "Bevinmarad", "Hunasimarad" , "Mirjankar", "Belagavi", "Hublikar" ,"Jamkhandi","Angadi", "Amavasya", "Kage", "Bandi", "Kuri", "Kudari", "Toppige", "Beegadkai", "Pyati" ,"Hanagi" ,"Rotti","Hebbar","Ballal","Rai"]
def maleKannada():
nameKannada=random.choice(maleNameKannada)+" " + random.choice(surnameKannada)
return nameKannada
def femaleKannada():
nameKannada=random.choice(femaleNameKannada)+" " + random.choice(surnameKannada)
return nameKannada
#----- | 3.1875 | 3 |
bnn/src/training/characters-gen-binary-weights.py | Siraj-Qazi/BNN-PYNQ | 7 | 12765153 | #BSD 3-Clause License
#=======
#
#Copyright (c) 2017, Xilinx
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#
#* Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
#* Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
#* Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
#AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
#IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
#FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
#DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
#SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
#OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import finnthesizer as fth
if __name__ == "__main__":
bnnRoot = "."
npzFile = bnnRoot + "/weights/char_parameters.npz"
targetDirBin = bnnRoot + "/binparam-lfcW1A1-pynq-nist"
targetDirHLS = bnnRoot + "/binparam-lfcW1A1-pynq-nist/hw"
simdCounts = [64, 32, 64, 8]
peCounts = [32, 64, 32, 16]
WeightsPrecisions_fractional = [0, 0, 0, 0]
ActivationPrecisions_fractional = [0, 0, 0, 0]
InputPrecisions_fractional = [0, 0, 0, 0]
WeightsPrecisions_integer = [1, 1, 1, 1]
ActivationPrecisions_integer = [1, 1, 1, 1]
InputPrecisions_integer = [1, 1, 1, 1]
#classes = map(lambda x: str(x), range(61))
classes = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", #Digits
"A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", #Upper case
"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"] #Lower case
fth.convertFCNetwork(npzFile, targetDirBin, targetDirHLS, simdCounts, peCounts, WeightsPrecisions_fractional, ActivationPrecisions_fractional, InputPrecisions_fractional, WeightsPrecisions_integer, ActivationPrecisions_integer, InputPrecisions_integer)
with open(targetDirBin + "/classes.txt", "w") as f:
f.write("\n".join(classes))
| 1.078125 | 1 |
magpie/dataset/dataset.py | daemon/magpie | 3 | 12765154 | <gh_stars>1-10
from collections import namedtuple
import enum
import torch.utils.data as data
registry = {}
class DatasetType(enum.Enum):
TRAINING = enum.auto()
VALIDATION = enum.auto()
TEST = enum.auto()
class MagpieDataset(data.Dataset):
def __init_subclass__(cls, name, **kwargs):
registry[name] = cls
DatasetDescriptor = namedtuple("DatasetDescriptor", "length, mean, mean2")
def find_dataset(name):
return registry[name] | 2.625 | 3 |
newtonnet/train/hooks/__init__.py | THGLab/NewtonNet | 0 | 12765155 | <filename>newtonnet/train/hooks/__init__.py
"""
"""
from newtonnet.train.hooks.visualizers import VizMolVectors3D | 0.984375 | 1 |
option_keyboard/core/networks.py | aditimavalankar/option-keyboard | 13 | 12765156 | <filename>option_keyboard/core/networks.py
import torch.nn as nn
class MlpDiscrete(nn.Module):
def __init__(self, input_dim, output_dim, hidden=[64, 128]):
super(MlpDiscrete, self).__init__()
self.hidden1 = nn.Linear(input_dim, hidden[0])
self.relu = nn.ReLU()
self.hidden2 = nn.Linear(hidden[0], hidden[1])
self.output = nn.Linear(hidden[1], output_dim)
def forward(self, x):
self.h1 = self.hidden1(x)
self.r1 = self.relu(self.h1)
self.h2 = self.hidden2(self.r1)
self.r2 = self.relu(self.h2)
self.o = self.output(self.r2)
return self.o
def set_weights(self, state_dict):
self.load_state_dict(state_dict)
def copy_weights(self, net):
self.load_state_dict(net.state_dict())
def soft_update(self, net, tau):
for old_p, new_p in zip(self.parameters(), net.parameters()):
old_p.data.copy_(tau * new_p + (1 - tau) * old_p)
| 2.75 | 3 |
reasoner/json2f2.py | shiv-io/blawx | 1 | 12765157 | <filename>reasoner/json2f2.py
# Script to Take a JSON object, convert it into a Python structure, and convert the Python structure into Flora-2 code.
# <NAME>
import sys, json, types
def json2flora(key,value,parentname="data",root=False):
retstr = ""
# If this is not a leaf:
if isinstance(value, (list,dict,tuple)):
# for each of the subvariables, json2flora it
if isinstance(value, list):
if root:
retstr += parentname + "[" + key + "->\\#[list->{"
else:
retstr += "{"
if len(value):
for i, v in enumerate(value):
if isinstance(v, (list,dict,tuple)):
retstr += json2flora(key,v,parentname)
else:
retstr += jsonvalue2flora(v)
retstr += ", "
retstr = retstr[ :-2 ]
if root:
retstr += "}]]"
else:
retstr += "}"
elif isinstance(value, dict):
#retstr += "The elements of the dict are: \n\n"
if root:
retstr += parentname + "[" + key + "->\\#["
else:
retstr += "\\#["
if len(value):
for k, v in value.items():
#retstr += str(k) + ": " + str(v) + "\n\n"
retstr += k + "->"
if isinstance(v, (list,dict,tuple)):
retstr += json2flora(k,v,"")
else:
retstr += jsonvalue2flora(v)
retstr += ", "
retstr = retstr[ :-2 ]
retstr += "]"
if root:
retstr += "]"
elif isinstance(value, tuple):
# Convert tuple to a list, and try again
# I'm not sure if this is correct... need to test.
newvalue = list(value)
#retstr += "Converting " + str(value) + " to " + str(newvalue) + " and retrying.\n\n"
retstr += json2flora(key,newvalue,parentname)
else:
if root:
retstr += parentname + "["
retstr += str(key) + "->" + jsonvalue2flora(value)
if root:
retstr += "]"
return retstr
def jsonvalue2flora(value):
if isinstance(value,str):
if not (value[0] == "'" and value[-1] == "'"):
return "'" + str(value) + "'"
else:
return str(value)
elif isinstance(value,type(None)):
return "\@?"
elif isinstance(value,bool):
if value:
return "true"
else:
return "false"
elif isinstance(value,(int,float)):
return str(value)
else:
return str(value) + ":" + type(value).__name__
# Get the data from the command line
filename = sys.argv[1]
file = open(filename, "r")
# Convert from JSON to Python structure
dictionary = json.load(file)
#print(dictionary)
#print(dictionary['test'])
# Convert all lists to dictionaries, maybe?
# Convert the resulting Python dictionary to a list of Flora-2 Entries.
output = []
for k,v in dictionary.items():
output.append(json2flora(k,v,root=True))
# Output the Flora-2 Code
for o in output:
print(o + ".\n")
| 3.59375 | 4 |
decorators_ex.py | srknbyrm/Python | 0 | 12765158 | def start_end_decorator(func):
def wrapper():
print('start')
func()
print('end')
return wrapper
@start_end_decorator # print_name = start_end_decorator(print_name)
def print_name():
print('serkan')
print_name()
import functools
def sum_of_digits(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
total_sum = 0
func_result = func(*args, **kwargs)
for digit in str(func_result):
total_sum += int(digit)
return total_sum
return wrapper
@sum_of_digits
def add_five(x):
return x + 5
x = add_five(11)
print(x)
print(help(add_five))
print(add_five.__name__)
# Decorator function arguments
def repeat(num_times):
def decorator_repeat(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
for _ in range(num_times):
result = func(*args, **kwargs)
return result
return wrapper
return decorator_repeat
@repeat(num_times=3)
def greet(name):
print(f"Hello {name}")
greet('Alex')
# a decorator function that prints debug information about the wrapped function
def debug(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
args_repr = [repr(a) for a in args]
kwargs_repr = [f"{k}={v!r}" for k, v in kwargs.items()]
signature = ", ".join(args_repr + kwargs_repr)
print(f"Calling {func.__name__}({signature})")
result = func(*args, **kwargs)
print(f"{func.__name__!r} returned {result!r}")
return result
return wrapper
def start_end_decorator_2(func):
def wrapper(*args, **kwargs):
func(*args, **kwargs)
print('End')
return wrapper
@debug
@start_end_decorator_2
def say_hello(name):
greeting = f'Hello {name}'
print(greeting)
return greeting
# now `debug` is executed first and calls `@start_end_decorator_4`, which then calls `say_hello`
say_hello(name='Alex')
import functools
class CountCalls:
# the init needs to have the func as argument and stores it
def __init__(self, func):
functools.update_wrapper(self, func)
self.func = func
self.num_calls = 0
# extend functionality, execute function, and return the result
def __call__(self, *args, **kwargs):
self.num_calls += 1
print(f"Call {self.num_calls} of {self.func.__name__!r}")
@CountCalls
def say_hello(num):
print("Hello!")
say_hello(5)
say_hello(5) | 3.765625 | 4 |
app/main.py | paiv/icfpc2020 | 0 | 12765159 | <reponame>paiv/icfpc2020
#!/usr/bin/env python
import arrival
import requests
import sys
from urllib.parse import urljoin
import logging
logging.basicConfig(level=logging.DEBUG)
class Transport:
def __init__(self, url, api_key=None):
self.url = url
self.api_key = api_key
self.req = requests.Session()
def send(self, text):
body = text.encode('utf-8')
auth = {'apiKey': self.api_key} if self.api_key else None
r = self.req.post(urljoin(self.url, '/aliens/send'), data=body, params=auth)
if r.status_code != 200:
print('Unexpected server response:')
print('HTTP code:', r.status_code)
print('Response body:', r.text)
exit(2)
print('Server response:', r.text)
return r.text
class Client:
def __init__(self, url, player_key, api_key=None):
self.tr = Transport(url, api_key=api_key)
self.player_key = player_key
self.codec = arrival.ConsCodec()
def send(self, message):
print(repr(message))
text = self.codec.encode(message)
response = self.tr.send(text)
r = self.codec.decode(response)
print(repr(r))
if r == [0]:
raise Exception('server [0]')
return r
def ping(self):
m = [0]
return self.send(m)
def create_server(self):
m = [1, 0]
return self.send(m)
def join_server(self):
m = [2, int(self.player_key), []]
return self.send(m)
def start_game(self, x):
m = [3, int(self.player_key), (0,0,0,x)]
return self.send(m)
def send_commands(self, commands):
m = [4, int(self.player_key), commands]
return self.send(m)
class Player:
pass
def main(url, player=None):
print('ServerUrl: %s; PlayerKey: %s' % (url, player))
cli = Client(url=url, player_key=player, api_key=None)
if not player:
_, ((_, attack_key), (_, defend_key)) = cli.create_server()
print('attack key:', attack_key)
print('defend key:', defend_key)
cli.player_key = attack_key
cli.join_server()
cli.start_game(1 if player else 2)
# (1, gameStage, staticGameInfo, gameState)
while True:
cli.send_commands([])
if __name__ == '__main__':
main(*sys.argv[1:])
| 2.9375 | 3 |
models/Transformers.py | 050644zf/sccl | 0 | 12765160 | """
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved
Author: <NAME> (<EMAIL>)
Date: 02/26/2021
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Parameter
# from transformers import AutoModel, AutoTokenizer
class SCCLBert(nn.Module):
def __init__(self, bert_model, cluster_centers=None, alpha=1.0):
super(SCCLBert, self).__init__()
print(bert_model[0].tokenizer)
self.tokenizer = bert_model[0].tokenizer
self.sentbert = bert_model[0].auto_model
self.emb_size = self.sentbert.config.hidden_size
self.alpha = alpha
print(self.emb_size)
# Instance-CL head
self.head = nn.Sequential(
nn.Linear(self.emb_size, self.emb_size),
nn.ReLU(inplace=True),
nn.Linear(self.emb_size, 128))
# Clustering head
initial_cluster_centers = torch.tensor(
cluster_centers, dtype=torch.float, requires_grad=True)
self.cluster_centers = Parameter(initial_cluster_centers)
def get_embeddings(self, features, pooling="mean"):
bert_output = self.sentbert.forward(**features)
attention_mask = features['attention_mask'].unsqueeze(-1)
all_output = bert_output[0]
mean_output = torch.sum(all_output*attention_mask, dim=1) / torch.sum(attention_mask, dim=1)
return mean_output
def get_cluster_prob(self, embeddings):
norm_squared = torch.sum((embeddings.unsqueeze(1) - self.cluster_centers) ** 2, 2)
numerator = 1.0 / (1.0 + (norm_squared / self.alpha))
power = float(self.alpha + 1) / 2
numerator = numerator ** power
return numerator / torch.sum(numerator, dim=1, keepdim=True)
def local_consistency(self, embd0, embd1, embd2, criterion):
p0 = self.get_cluster_prob(embd0)
p1 = self.get_cluster_prob(embd1)
p2 = self.get_cluster_prob(embd2)
lds1 = criterion(p1, p0)
lds2 = criterion(p2, p0)
return lds1+lds2
| 2.453125 | 2 |
moogle/profiles/migrations/0003_auto__add_driveprofile.py | nimiq/moogle-project | 4 | 12765161 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'DriveProfile'
db.create_table('profiles_driveprofile', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(unique=True, to=orm['auth.User'])),
('family_name', self.gf('django.db.models.fields.CharField')(max_length=50, blank=True)),
('given_name', self.gf('django.db.models.fields.CharField')(max_length=50, blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=101, blank=True)),
('gender', self.gf('django.db.models.fields.CharField')(max_length=10, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75, blank=True)),
('verified_email', self.gf('django.db.models.fields.NullBooleanField')(null=True, blank=True)),
('locale', self.gf('django.db.models.fields.CharField')(max_length=5, blank=True)),
('google_id', self.gf('django.db.models.fields.CharField')(max_length=50, blank=True)),
('link', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
))
db.send_create_signal('profiles', ['DriveProfile'])
def backwards(self, orm):
# Deleting model 'DriveProfile'
db.delete_table('profiles_driveprofile')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'unique': 'True'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'blank': 'True', 'to': "orm['auth.Permission']"})
},
'auth.permission': {
'Meta': {'object_name': 'Permission', 'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True', 'related_name': "'user_set'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True', 'related_name': "'user_set'"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '30', 'unique': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'object_name': 'ContentType', 'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'profiles.driveprofile': {
'Meta': {'object_name': 'DriveProfile'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'family_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'given_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'google_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'locale': ('django.db.models.fields.CharField', [], {'max_length': '5', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '101', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'unique': 'True', 'to': "orm['auth.User']"}),
'verified_email': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'})
},
'profiles.gmailprofile': {
'Meta': {'object_name': 'GmailProfile'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'family_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'given_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'google_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'locale': ('django.db.models.fields.CharField', [], {'max_length': '5', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '101', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'unique': 'True', 'to': "orm['auth.User']"}),
'verified_email': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['profiles'] | 2.328125 | 2 |
tests/_display/test_line_dot_setting.py | ynsnf/apysc | 16 | 12765162 | from random import randint
from retrying import retry
import apysc as ap
from apysc._display.line_dot_setting import LineDotSetting
class TestLineDotSetting:
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test___init__(self) -> None:
setting: LineDotSetting = LineDotSetting(dot_size=5)
assert isinstance(setting._value['dot_size'], ap.Int)
assert setting._value['dot_size'] == 5
setting = LineDotSetting(dot_size=ap.Int(10))
assert setting._value['dot_size'] == 10
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test_dot_size(self) -> None:
setting: LineDotSetting = LineDotSetting(dot_size=5)
assert setting.dot_size == 5
assert isinstance(setting.dot_size, ap.Int)
| 2.5625 | 3 |
remote_api.py | PatchPorting/patcher | 2 | 12765163 | """
Copyright (c) 2017 IBM Corp.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import requests
import json
from os import getenv
HOST= 'patchport-http.mybluemix.net'
VER = 'v0'
TOKEN = getenv("TOKEN")
if not TOKEN:
print ("Set env variable TOKEN with the access token if you want to access the remote API.")
class Remote(object):
def __init__(self):
self.APIurl = "https://%s/api/%s" % (HOST, VER)
self.token = TOKEN
def _requestPOST(self, path, data={}):
headers = {'content-type': 'application/json'}
u = "%s/%s?access_token=%s" % (self.APIurl, path, self.token)
r = requests.post(u,data=data,headers=headers)
if not r.status_code == 200:
raise Exception('Error %d: %s\n%s' % (r.status_code,r.reason,u))
return r.json()
def _requestPATCH(self, path, data={}):
headers = {'content-type': 'application/json'}
u = "%s/%s?access_token=%s" % (self.APIurl, path, self.token)
r = requests.patch(u,data=data,headers=headers)
if not r.status_code == 200:
raise Exception('Error %d: %s\n%s' % (r.status_code,r.reason,u))
return r.json()
def _requestDELETE(self, path):
u = "%s/%s?access_token=%s" % (self.APIurl, path, self.token)
r = requests.delete(u)
if not r.status_code == 200:
raise Exception('Error %d: %s\n%s' % (r.status_code,r.reason,u))
return r.json()
def _requestGET(self, path, where=None, order=None, limit=None):
filter = {}
if where:
filter["where"] = where
if order:
filter["order"] = order
if limit:
filter["limit"] = limit
u = "%s/%s?filter=%s&access_token=%s" % (self.APIurl, path, json.dumps(filter), self.token)
r = requests.get(u)
if not r.status_code == 200:
raise Exception('Error %d: %s\n%s' % (r.status_code,r.reason,u))
return r.json()
def _findById(self,path, id):
# Is it a local file (instead of a remote id). This is just for debugging and should be removed TODO
if len(id) > 1 and id[0] == '<' and id[-1] == '>' and id != "<removed>":
with open(id[1:-1]) as data_file:
return json.load(data_file)
return self._requestGET(path=path % id)
#TODO catch empty
def reprJSON(self):
r = {}
for k, v in self.__dict__.iteritems():
if k in self._postableFields: r[k] = v
return r
class CVE(Remote):
def __init__(self, name=None, id=None):
Remote.__init__(self)
if id: cve = self._findById("cves/%s", id)
elif name: cve = self._findByName(name)
else: raise Exception('You have to give something to construct this CVE')
self.__dict__.update(cve)
def _findByName(self,name):
cves = self._requestGET(where={"name": name}, path='cves')
if len(cves) == 1:
return cves[0]
elif len(cves) == 0:
raise Exception('CVE id not found: %s' % name)
else:
raise Exception('Multiple CVEs match %s' % name)
def __getattr__(self, item):
if item is "patchsets":
patchsets = self._requestGET(path="cves/%s/patchsets" % self.id)
self.patchsets = [Patchsets(json=patchset) for patchset in patchsets]
return self.patchsets
else:
raise AttributeError(item)
class Patchsets(Remote):
def __init__(self,json={}):
Remote.__init__(self)
self.__dict__.update(json)
def __getattr__(self, item):
if item is "patches":
patches = self._requestGET(path="patchsets/%s/patches" % self.id)
self.patches = [Hunk(json=patch) for patch in patches]
return self.patches
else:
raise AttributeError(item)
class Patch(Remote):
def __init__(self,json={}):
Remote.__init__(self)
self.__dict__.update(json)
def __getattr__(self, item):
if item is "hunks":
hunks = self._requestGET(path="patches/%s/hunks" % self.id)
self.hunks= [Hunk(json=patch) for patch in hunks]
return self.hunks
else:
raise AttributeError(item)
class Hunk(Remote):
_postableFields = [
'cveId',
'data',
'fileName',
'id',
'patchId',
'patchsetId'
]
def __init__(self,json={},id=None):
Remote.__init__(self)
if id: json=self._findById("hunks/%s", id)
self.__dict__.update(json)
def __getattr__(self, item):
if item is "hunks":
hunks = self._requestGET(path="patches/%s/hunks" % self.id)
self.hunks= [Hunk(json=patch) for patch in hunks]
elif item is "cve":
self.cve = CVE(id=self.cveId)
else:
return self.__dict__[item]
return getattr(self,item)
class Setup(Remote):
def __init__(self,json={},id=None):
Remote.__init__(self)
if id: json=self._findById("setups/%s", id)
self.name = json['name']
self.content = json['content']
self.id = json['id']
class Build(Remote):
_postableFields = ['pkgName',
'pkgVersion',
'mode',
'status',
'dist',
'urgency',
'hunks',
'results',
'cveId',
'patchsetId',
'cveName',
'id']
def __init__(self,json={},where=None, id=None):
'''if empty, get the next waiting build.
with where, it gives you the next waiting with that filter'''
Remote.__init__(self)
if id: json = self._findById("builds/%s",id)
if not json: json = self._nextWaiting(where=where)
if not json:
from json import dumps
raise Exception('No pending builds (filter: %s)' % dumps(where))
self.__dict__.update(json)
def _nextWaiting(self,json={},where={}):
where["status"] = "waiting"
result = self._requestGET(where=where, limit=1, order='timestamp ASC', path='builds')
if len(result): return result[0]
else: return None
def updateResults(self,data):
if not isinstance(data,list):
data = [data]
o = {'buildId':self.id, 'data': {}}
for d in data:
o['data'] = d
self._requestPOST(path='results', data=json.dumps(o))
# TODO update the object with the new data?
def updateStatus(self, status):
result = self._requestPATCH(path='builds/%s' % self.id,data=json.dumps({'status': status}))
# TODO update the object with the new data?
return result
def updateStarted(self, now):
result = self._requestPATCH(path='builds/%s' % self.id,data=json.dumps({'started': now}))
return result
def postme(self):
data = json.dumps(self.reprJSON(), cls=ComplexEncoder)
result = self._requestPOST(path='builds',data=data)
return Build(result)
class RemoteEncoder(json.JSONEncoder):
def default(self, o):
r = {}
if type(o) is str: return o
for k,v in o.__dict__.iteritems():
if k in o._postableFields: r[k] = json.dumps(v, cls=RemoteEncoder)
return r
class ComplexEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj,'reprJSON'):
return obj.reprJSON()
else:
return json.JSONEncoder.default(self, obj)
class Result(Remote):
_postableFields =[
'children',
'deb',
'debdiff',
'hunkId',
'id',
'log',
'results',
'status',
'type']
def __init__(self, json={}):
Remote.__init__(self)
self.__dict__.update(json)
class Results(list):
def __init__(self, results=[],buildId=None):
list.__init__(self)
if buildId:
r = Remote()
results = r._requestGET(where={"buildId": buildId}, path='results')
for result in results:
self.append(Result(json=result)) | 2.375 | 2 |
zoo_torch/examples/pose_estimation_quanteval.py | quic/aimet-model-zoo | 89 | 12765164 | <reponame>quic/aimet-model-zoo
#!/usr/bin/env python3.6
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2020 of Qualcomm Innovation Center, Inc. All rights reserved.
#
# @@-COPYRIGHT-END-@@
# =============================================================================
"""
This script applies and evaluates a compressed pose estimation model which has a similar
structure with https://github.com/CMU-Perceptual-Computing-Lab/openpose. Evaluation is
done on 2014 val dataset with person keypoints only. This model is quantization-friendly
so no post-training methods or QAT were applied. For instructions please refer to
zoo_torch/Docs/PoseEstimation.md
"""
import os
import math
import argparse
from functools import partial
from tqdm import tqdm
import cv2
from scipy.ndimage.filters import gaussian_filter
import torch
import torch.nn as nn
import numpy as np
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from aimet_torch import quantsim
def get_pre_stage_net():
network_dict = {'block_pre_stage': [{'sequential_CPM':
[[512, 256, (3, 1), 1, (1, 0), False],
[256, 256, (1, 3), 1, (0, 1)]]},
{'conv4_4_CPM': [256, 128, 3, 1, 1]}]}
return network_dict
def get_shared_network_dict():
network_dict = get_pre_stage_net()
stage_channel = [0, 128, 185, 185, 185, 185, 185]
shared_channel = [0, 112, 128]
sequential4_channel = [0, 32, 48]
for i in range(1, 3):
network_dict['block%d_shared' % i] = \
[{'sequential1_stage%d_L1' % i:
[[stage_channel[i], shared_channel[i], (7, 1), 1, (3, 0), False],
[shared_channel[i], 128, (1, 7), 1, (0, 3)]]},
{'sequential2_stage%d_L1' % i:
[[128, 112, (7, 1), 1, (3, 0), False],
[112, 128, (1, 7), 1, (0, 3)]]}]
network_dict['block%d_1' % i] = [{'sequential3_stage%d_L1' % i:
[[128, 32, (3, 1), 1, (1, 0), False],
[32, 128, (1, 3), 1, (0, 1)]]},
{'sequential4_stage%d_L1' % i:
[[128, 32, (3, 1), 1, (1, 0), False],
[32, 128, (1, 3), 1, (0, 1)]]},
{'sequential5_stage%d_L1' % i:
[[128, 32, (3, 1), 1, (1, 0), False],
[32, 128, (1, 3), 1, (0, 1)]]},
{'Mconv6_stage%d_L1' % i: [128, 128, 1, 1, 0]},
{'Mconv7_stage%d_L1' % i: [128, 38, 1, 1, 0]}]
network_dict['block%d_2' % i] = [{'sequential3_stage%d_L1' % i:
[[128, 32, (3, 1), 1, (1, 0), False],
[32, 128, (1, 3), 1, (0, 1)]]},
{'sequential4_stage%d_L1' % i:
[[128, sequential4_channel[i], (3, 1), 1, (1, 0), False],
[sequential4_channel[i], 128, (1, 3), 1, (0, 1)]]},
{'sequential5_stage%d_L1' % i:
[[128, 48, (3, 1), 1, (1, 0), False],
[48, 128, (1, 3), 1, (0, 1)]]},
{'Mconv6_stage%d_L2' % i: [128, 128, 1, 1, 0]},
{'Mconv7_stage%d_L2' % i: [128, 19, 1, 1, 0]}]
return network_dict
def get_model(upsample=False):
block0 = [{'conv0': [3, 32, 3, 1, 1]},
{'sequential1':
[[32, 16, (3, 1), 1, (1, 0), False],
[16, 32, (1, 3), 1, (0, 1)]]}, {'pool1_stage1': [2, 2, 0]},
{'sequential2':
[[32, 32, (3, 1), 1, (1, 0), False],
[32, 64, (1, 3), 1, (0, 1)]]},
{'sequential3':
[[64, 32, (3, 1), 1, (1, 0), False],
[32, 96, (1, 3), 1, (0, 1)]]}, {'pool2_stage1': [2, 2, 0]},
{'sequential4':
[[96, 80, (3, 1), 1, (1, 0), False],
[80, 256, (1, 3), 1, (0, 1)]]},
{'sequential5':
[[256, 80, (3, 1), 1, (1, 0), False],
[80, 256, (1, 3), 1, (0, 1)]]},
{'sequential6':
[[256, 48, (3, 1), 1, (1, 0), False],
[48, 128, (1, 3), 1, (0, 1)]]},
{'sequential7':
[[128, 48, (3, 1), 1, (1, 0), False],
[48, 256, (1, 3), 1, (0, 1)]]}, {'pool3_stage1': [2, 2, 0]},
{'sequential8':
[[256, 96, (3, 1), 1, (1, 0), False],
[96, 512, (1, 3), 1, (0, 1)]]},
{'sequential9':
[[512, 192, (3, 1), 1, (1, 0), False],
[192, 512, (1, 3), 1, (0, 1)]]}]
print("defining network with shared weights")
network_dict = get_shared_network_dict()
def define_base_layers(block, layer_size):
layers = []
for i in range(layer_size):
one_ = block[i]
for k, v in zip(one_.keys(), one_.values()):
if 'pool' in k:
layers += [nn.MaxPool2d(kernel_size=v[0], stride=v[1], padding=v[2])]
elif 'sequential' in k:
conv2d_1 = nn.Conv2d(in_channels=v[0][0], out_channels=v[0][1], kernel_size=v[0][2],
stride=v[0][3], padding=v[0][4], bias=v[0][5])
conv2d_2 = nn.Conv2d(in_channels=v[1][0], out_channels=v[1][1], kernel_size=v[1][2],
stride=v[1][3], padding=v[1][4])
sequential = nn.Sequential(conv2d_1, conv2d_2)
layers += [sequential, nn.ReLU(inplace=True)]
else:
conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1], kernel_size=v[2],
stride=v[3], padding=v[4])
layers += [conv2d, nn.ReLU(inplace=True)]
return layers
def define_stage_layers(cfg_dict):
layers = define_base_layers(cfg_dict, len(cfg_dict) - 1)
one_ = cfg_dict[-1].keys()
k = list(one_)[0]
v = cfg_dict[-1][k]
conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1], kernel_size=v[2], stride=v[3],
padding=v[4])
layers += [conv2d]
return nn.Sequential(*layers)
# create all the layers of the model
base_layers = define_base_layers(block0, len(block0))
pre_stage_layers = define_base_layers(network_dict['block_pre_stage'],
len(network_dict['block_pre_stage']))
models = {'block0': nn.Sequential(*base_layers),
'block_pre_stage': nn.Sequential(*pre_stage_layers)}
shared_layers_s1 = define_base_layers(network_dict['block1_shared'],
len(network_dict['block1_shared']))
shared_layers_s2 = define_base_layers(network_dict['block2_shared'],
len(network_dict['block2_shared']))
models['block1_shared'] = nn.Sequential(*shared_layers_s1)
models['block2_shared'] = nn.Sequential(*shared_layers_s2)
for k, v in zip(network_dict.keys(), network_dict.values()):
if 'shared' not in k and 'pre_stage' not in k:
models[k] = define_stage_layers(v)
model = PoseModel(models, upsample=upsample)
return model
class PoseModel(nn.Module):
"""
CMU pose estimation model.
Based on: "Realtime Multi-Person 2D Pose Estimation using Part Affinity Fields":
https://arxiv.org/pdf/1611.08050.pdf
Made lighter and more efficient by Amir (<EMAIL>) in the
Morpheus team.
Some layers of the original commented out to reduce model complexity
"""
def __init__(self, model_dict, upsample=False):
super(PoseModel, self).__init__()
self.upsample = upsample
self.basemodel = model_dict['block0']
self.pre_stage = model_dict['block_pre_stage']
self.stage1_shared = model_dict['block1_shared']
self.stage1_1 = model_dict['block1_1']
self.stage2_1 = model_dict['block2_1']
self.stage2_shared = model_dict['block2_shared']
self.stage1_2 = model_dict['block1_2']
self.stage2_2 = model_dict['block2_2']
def forward(self, x):
out1_vgg = self.basemodel(x)
out1 = self.pre_stage(out1_vgg)
out1_shared = self.stage1_shared(out1)
out1_1 = self.stage1_1(out1_shared)
out1_2 = self.stage1_2(out1_shared)
out2 = torch.cat([out1_1, out1_2, out1], 1)
out2_shared = self.stage2_shared(out2)
out2_1 = self.stage2_1(out2_shared)
out2_2 = self.stage2_2(out2_shared)
if self.upsample:
# parameters to check for up-sampling: align_corners = True, mode='nearest'
upsampler = nn.Upsample(scale_factor=2, mode='bilinear')
out2_1_up = upsampler(out2_1)
out2_2_up = upsampler(out2_2)
return out1_1, out1_2, out2_1, out2_2, out2_1_up, out2_2_up
else:
return out1_1, out1_2, out2_1, out2_2
class ModelBuilder(object):
def __init__(self, upsample=False):
self.model = None
self.upsample = upsample
def create_model(self):
model = get_model(self.upsample)
self.model = model
return self.model
def non_maximum_suppression(map, thresh):
map_s = gaussian_filter(map, sigma=3)
map_left = np.zeros(map_s.shape)
map_left[1:, :] = map_s[:-1, :]
map_right = np.zeros(map_s.shape)
map_right[:-1, :] = map_s[1:, :]
map_up = np.zeros(map_s.shape)
map_up[:, 1:] = map_s[:, :-1]
map_down = np.zeros(map_s.shape)
map_down[:, :-1] = map_s[:, 1:]
peaks_binary = np.logical_and.reduce((map_s >= map_left, map_s >= map_right, map_s >= map_up,
map_s >= map_down,
map_s > thresh))
peaks = zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0]) # note reverse
peaks_with_score = [x + (map[x[1], x[0]],) for x in peaks]
return peaks_with_score
def pad_image(img, stride, padding):
h = img.shape[0]
w = img.shape[1]
pad = 4 * [None]
pad[0] = 0 # up
pad[1] = 0 # left
pad[2] = 0 if (h % stride == 0) else stride - (h % stride) # down
pad[3] = 0 if (w % stride == 0) else stride - (w % stride) # right
img_padded = img
pad_up = np.tile(img_padded[0:1, :, :] * 0 + padding, (pad[0], 1, 1))
img_padded = np.concatenate((pad_up, img_padded), axis=0)
pad_left = np.tile(img_padded[:, 0:1, :] * 0 + padding, (1, pad[1], 1))
img_padded = np.concatenate((pad_left, img_padded), axis=1)
pad_down = np.tile(img_padded[-2:-1, :, :] * 0 + padding, (pad[2], 1, 1))
img_padded = np.concatenate((img_padded, pad_down), axis=0)
pad_right = np.tile(img_padded[:, -2:-1, :] * 0 + padding, (1, pad[3], 1))
img_padded = np.concatenate((img_padded, pad_right), axis=1)
return img_padded, pad
def encode_input(image, scale, stride, padding):
image_scaled = cv2.resize(image, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
image_scaled_padded, pad = pad_image(image_scaled, stride, padding)
return image_scaled_padded, pad
def decode_output(data, stride, padding, input_shape, image_shape):
output = np.transpose(np.squeeze(data), (1, 2, 0))
output = cv2.resize(output, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
output = output[:input_shape[0] - padding[2], :input_shape[1] - padding[3], :]
output = cv2.resize(output, (image_shape[1], image_shape[0]), interpolation=cv2.INTER_CUBIC)
return output
def preprocess(image, transforms):
mean_bgr = [34.282957728666474, 32.441979567868017, 24.339757511312481]
image = image.astype(np.float32)
if 'bgr' in transforms:
if image.shape[0] == 3:
image = image[::-1, :, :]
elif image.shape[2] == 3:
image = image[:, :, ::-1]
if 'tr' in transforms:
image = image.transpose((2, 0, 1))
if 'mean' in transforms:
image[0, :, :] -= mean_bgr[0]
image[1, :, :] -= mean_bgr[1]
image[2, :, :] -= mean_bgr[2]
if 'addchannel' in transforms:
image = image[np.newaxis, :, :, :]
if 'normalize' in transforms:
image = image / 256 - 0.5
return image
def run_model(model, image, fast=False):
scale_search = [1.]
crop = 368
stride = 8
padValue = 128
if fast:
scales = scale_search
else:
scales = [x * crop / image.shape[0] for x in scale_search]
heatmaps, pafs = [], []
for scale in scales:
if fast:
horiz = image.shape[0] < image.shape[1]
sz = (496, 384) if horiz else (384, 496)
image_encoded = cv2.resize(image, dsize=(int(sz[0] * scale), int(sz[1] * scale)))
else:
image_encoded, pad = encode_input(image, scale, stride,
padValue)
image_encoded_ = preprocess(image_encoded,
['addchannel', 'normalize', 'bgr'])
image_encoded_ = np.transpose(image_encoded_, (0, 3, 1, 2))
with torch.no_grad():
input_image = torch.FloatTensor(torch.from_numpy(image_encoded_).float())
if next(model.parameters()).is_cuda:
input_image = input_image.to(device='cuda')
output = model(input_image)
paf = output[2].cpu().data.numpy().transpose((0, 2, 3, 1))
heatmap = output[3].cpu().data.numpy().transpose((0, 2, 3, 1))
if fast:
paf = cv2.resize(paf[0], (image.shape[1], image.shape[0]))
heatmap = cv2.resize(heatmap[0], dsize=(image.shape[1], image.shape[0]))
else:
# paf = paf.transpose((0, 3, 1, 2))
# heatmap = heatmap.transpose((0, 3, 1, 2))
paf = decode_output(paf, stride, pad, image_encoded.shape,
image.shape)
heatmap = decode_output(heatmap, stride, pad, image_encoded.shape,
image.shape)
pafs.append(paf)
heatmaps.append(heatmap)
return np.asarray(heatmaps).mean(axis=0), np.asarray(pafs).mean(axis=0)
def get_keypoints(heatmap):
thre1 = 0.1
keypoints_all = []
keypoints_cnt = 0
for part in range(19 - 1):
keypoints = non_maximum_suppression(heatmap[:, :, part], thre1)
id = range(keypoints_cnt, keypoints_cnt + len(keypoints))
keypoints = [keypoints[i] + (id[i],) for i in range(len(id))]
keypoints_all.append(keypoints)
keypoints_cnt += len(keypoints)
return keypoints_all
def get_limb_consistency(paf, start_keypoint, end_keypoint, image_h, div_num=10):
vec_key = np.subtract(end_keypoint[:2], start_keypoint[:2])
vec_key_norm = math.sqrt(vec_key[0] * vec_key[0] + vec_key[1] * vec_key[1])
if vec_key_norm == 0:
vec_key_norm = 1
vec_key = np.divide(vec_key, vec_key_norm)
vec_paf = list(zip(np.linspace(start_keypoint[0], end_keypoint[0], num=div_num).astype(int),
np.linspace(start_keypoint[1], end_keypoint[1], num=div_num).astype(int)))
vec_paf_x = np.array([paf[vec_paf[k][1], vec_paf[k][0], 0] for k in range(div_num)])
vec_paf_y = np.array([paf[vec_paf[k][1], vec_paf[k][0], 1] for k in range(div_num)])
# To see how well the direction of the prediction over the line connecting the limbs aligns
# with the vec_key we compute the integral of the dot product of the "affinity vector at point
# 'u' on the line" and the "vec_key".
# In discrete form, this integral is done as below:
vec_sims = np.multiply(vec_paf_x, vec_key[0]) + np.multiply(vec_paf_y, vec_key[1])
# this is just a heuristic approach to punish very long predicted limbs
vec_sims_prior = vec_sims.mean() + min(0.5 * image_h / vec_key_norm - 1, 0)
return vec_sims, vec_sims_prior
def connect_keypoints(image_shape, keypoints, paf, limbs, limbsInds):
thre2 = 0.05
connections = []
small_limb_list = [1, 15, 16, 17, 18]
for k in range(len(limbsInds)):
paf_limb = paf[:, :, limbsInds[k]]
limb_strs = keypoints[limbs[k][0]]
limb_ends = keypoints[limbs[k][1]]
if len(limb_strs) != 0 and len(limb_ends) != 0:
cands = []
for i, limb_str in enumerate(limb_strs):
for j, limb_end in enumerate(limb_ends):
# for each potential pair of keypoints which can have a limb in between we
# measure a score using the get_limb_consistency function
if limbs[k][0] in small_limb_list or limbs[k][1] in small_limb_list:
sims, sims_p = get_limb_consistency(paf_limb, limb_str, limb_end,
image_shape[0], div_num=10)
else:
sims, sims_p = get_limb_consistency(paf_limb, limb_str, limb_end,
image_shape[0], div_num=10)
if len(np.where(sims > thre2)[0]) > int(0.80 * len(sims)) and sims_p > 0:
cands.append([i, j, sims_p])
cands = sorted(cands, key=lambda x: x[2], reverse=True)
connection = np.zeros((0, 3))
visited_strs, visited_ends = [], []
for cand in cands:
i, j, s = cand
if i not in visited_strs and j not in visited_ends:
connection = np.vstack([connection, [limb_strs[i][3], limb_ends[j][3], s]])
visited_strs.append(i)
visited_ends.append(j)
if len(connection) >= min(len(limb_strs), len(limb_ends)):
break
connections.append(connection)
else:
connections.append([])
return connections
def create_skeletons(keypoints, connections, limbs):
# last number in each row is the total parts number of that person
# the second last number in each row is the score of the overall configuration
skeletons = -1 * np.ones((0, 20))
keypoints_flatten = np.array([item for sublist in keypoints for item in sublist])
for k in range(len(limbs)):
if len(connections[k]) > 0:
detected_str = connections[k][:, 0]
detected_end = connections[k][:, 1]
limb_str, limb_end = np.array(limbs[k])
for i in range(len(connections[k])):
found = 0
subset_idx = [-1, -1]
for j in range(len(skeletons)):
if skeletons[j][limb_str] == detected_str[i] or \
skeletons[j][limb_end] == detected_end[i]:
subset_idx[found] = j
found += 1
if found == 1:
j = subset_idx[0]
if skeletons[j][limb_end] != detected_end[i]:
skeletons[j][limb_end] = detected_end[i]
skeletons[j][-1] += 1
skeletons[j][-2] += keypoints_flatten[detected_end[i].astype(int), 2] + \
connections[k][i][2]
elif found == 2: # if found 2 and disjoint, merge them
j1, j2 = subset_idx
membership = ((skeletons[j1] >= 0).astype(int) +
(skeletons[j2] >= 0).astype(int))[:-2]
if len(np.nonzero(membership == 2)[0]) == 0: # merge
skeletons[j1][:-2] += (skeletons[j2][:-2] + 1)
skeletons[j1][-2:] += skeletons[j2][-2:]
skeletons[j1][-2] += connections[k][i][2]
skeletons = np.delete(skeletons, j2, 0)
else: # as like found == 1
skeletons[j1][limb_end] = detected_end[i]
skeletons[j1][-1] += 1
skeletons[j1][-2] += keypoints_flatten[detected_end[i].astype(int), 2] + \
connections[k][i][2]
# if find no partA in the subset, create a new subset
elif not found and k < 17:
row = -1 * np.ones(20)
row[limb_str] = detected_str[i]
row[limb_end] = detected_end[i]
row[-1] = 2
row[-2] = sum(keypoints_flatten[connections[k][i, :2].astype(int), 2]) + \
connections[k][i][2]
skeletons = np.vstack([skeletons, row])
# delete some rows of subset which has few parts occur
deleteIdx = []
for i in range(len(skeletons)):
if skeletons[i][-1] < 4 or skeletons[i][-2] / skeletons[i][-1] < 0.4:
deleteIdx.append(i)
skeletons = np.delete(skeletons, deleteIdx, axis=0)
return {'keypoints': skeletons[:, :18], 'scores': skeletons[:, 18]}
def estimate_pose(image_shape, heatmap, paf):
# limbs as pair of keypoints: [start_keypoint, end_keypoint] keypoints index to heatmap matrix
limbs = [[1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 8], [8, 9], [9, 10], [1, 11],
[11, 12], [12, 13],
[1, 0], [0, 14], [14, 16], [0, 15], [15, 17], [2, 16], [5, 17]]
# index where each limb stands in paf matrix. Two consecutive indices for x and y component
# of paf
limbsInd = [[12, 13], [20, 21], [14, 15], [16, 17], [22, 23], [24, 25], [0, 1], [2, 3], [4, 5],
[6, 7], [8, 9],
[10, 11], [28, 29], [30, 31], [34, 35], [32, 33], [36, 37], [18, 19], [26, 27]]
# Computing the keypoints using non-max-suppression
keypoints = get_keypoints(heatmap)
# Computing which pairs of joints should be connected based on the paf.
connections = connect_keypoints(image_shape, keypoints, paf, limbs, limbsInd)
skeletons = create_skeletons(keypoints, connections, limbs)
return skeletons, np.array([item for sublist in keypoints for item in sublist])
def parse_results(skeletons, points):
coco_indices = [0, -1, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3]
skeletons_out, scores = [], []
for score, keypoints in zip(skeletons['scores'], skeletons['keypoints']):
skeleton = []
for p in range(len(keypoints)):
if p == 1:
continue
ind = int(keypoints[p])
if ind >= 0:
point = {'x': points[ind, 0], 'y': points[ind, 1], 'score': points[ind, 2],
'id': coco_indices[p]}
skeleton.append(point)
skeletons_out.append(skeleton)
scores.append(score)
return {'skeletons': skeletons_out, 'scores': scores}
class COCOWrapper:
def __init__(self, coco_path, num_imgs=None):
self.coco_path = coco_path
self.num_imgs = num_imgs
# sys.path.append(self.coco_apth + "codes/PythonAPI")
def get_images(self):
imgs = self.cocoGT.imgs.values()
image_ids = sorted(map(lambda x: x['id'], self.cocoGT.imgs.values()))
if self.num_imgs:
image_ids = image_ids[:self.num_imgs]
imgs = list(filter(lambda x: x['id'] in image_ids, imgs))
return imgs
def evaluate_json(self, obj):
# initialize COCO detections api
cocoDT = self.cocoGT.loadRes(obj)
imgIds = sorted(self.cocoGT.getImgIds())
if self.num_imgs:
imgIds = imgIds[:self.num_imgs]
# running evaluation
cocoEval = COCOeval(self.cocoGT, cocoDT, 'keypoints')
cocoEval.params.imgIds = imgIds
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
return cocoEval.stats[0::5]
def get_results_json(self, results, imgs):
results_obj = []
for img, result in list(zip(imgs, results)):
for score, skeleton in list(zip(result['scores'], result['skeletons'])):
obj = {'image_id': img['id'], 'category_id': 1, 'keypoints': np.zeros(shape=(3, 17))}
for keypoint in skeleton:
obj['keypoints'][0, keypoint['id']] = keypoint['x'] - 0.5
obj['keypoints'][1, keypoint['id']] = keypoint['y'] - 0.5
obj['keypoints'][2, keypoint['id']] = 1
obj['keypoints'] = list(np.reshape(obj['keypoints'], newshape=(51,), order='F'))
obj['score'] = score / len(skeleton)
results_obj.append(obj)
return results_obj
@property
def cocoGT(self):
annType = 'keypoints'
prefix = 'person_keypoints'
print('Initializing demo for *%s* results.' % (annType))
# initialize COCO ground truth api
dataType = 'val2014'
annFile = os.path.join(self.coco_path, 'annotations/%s_%s.json' % (prefix, dataType))
cocoGT = COCO(annFile)
if not cocoGT:
raise AttributeError('COCO ground truth demo failed to initialize!')
return cocoGT
def evaluate_model(model,
coco_path,
num_imgs=None,
fast=True):
coco = COCOWrapper(coco_path, num_imgs)
results = []
image_path = os.path.join(coco.coco_path, 'images/val2014/')
imgs = coco.get_images()
print("Running extended evaluation on the validation set")
for i, img in tqdm(enumerate(imgs)):
image = cv2.imread(image_path + img['file_name']) # B,G,R order
heatmap, paf = run_model(model, image, fast)
skeletons, keypoints = estimate_pose(image.shape, heatmap, paf)
results.append(parse_results(skeletons, keypoints))
try:
ans = coco.evaluate_json(coco.get_results_json(results, imgs))
return ans
except:
return [0, 0]
def parse_args():
parser = argparse.ArgumentParser(prog='pose_estimation_quanteval',
description='Evaluate the post quantized SRGAN model')
parser.add_argument('model_dir',
help='The location where the the .pth file is saved,'
'the .pth contains model weights',
type=str)
parser.add_argument('coco_path',
help='The location coco images and annotations are saved. '
'It assumes a folder structure containing two subdirectorys '
'`images/val2014` and `annotations`. Right now only val2014 '
'dataset with person_keypoints are supported',
type=str)
parser.add_argument('--representative-datapath',
'-reprdata',
help='The location where representative data are stored. '
'The data will be used for computation of encodings',
type=str)
parser.add_argument('--quant-scheme',
'-qs',
help='Support two schemes for quantization: [`tf` or `tf_enhanced`],'
'`tf_enhanced` is used by default',
default='tf_enhanced',
choices=['tf', 'tf_enhanced'],
type=str)
return parser.parse_args()
def pose_estimation_quanteval(args):
# load the model checkpoint from meta
model_builder = ModelBuilder()
model_builder.create_model()
model = model_builder.model
state_dict = torch.load(args.model_dir)
state = model.state_dict()
state.update(state_dict)
model.load_state_dict(state)
# create quantsim object which inserts quant ops between layers
sim = quantsim.QuantizationSimModel(model,
input_shapes=(1, 3, 128, 128),
quant_scheme=args.quant_scheme)
evaluate = partial(evaluate_model,
num_imgs=500
)
sim.compute_encodings(evaluate, args.coco_path)
eval_num = evaluate_model(sim.model,
args.coco_path
)
print(f'The [mAP, mAR] results are: {eval_num}')
if __name__ == '__main__':
args = parse_args()
pose_estimation_quanteval(args)
| 1.859375 | 2 |
google_analytics/light_gbm_valid_set.py | mathxyz/stock2 | 92 | 12765165 | <reponame>mathxyz/stock2
import pandas as pd
import numpy as np
import lightgbm as lgb
from sklearn.model_selection import train_test_split
#run
X_train = np.load('X_train.dat')
y_train = np.load('y_train.dat')
X_test = np.load('X_test.dat')
X_train.shape
y_train.shape
X_test.shape
#split off a validation set
X_trainr, X_val, y_trainr, y_val = train_test_split(X_train, y_train,
test_size = 0.2, random_state = 1738)
#basic starting params, can tune via grid search once things moving well
lgb_params = {
"seed": 1738,
"bagging_seed" : 42,
"objective" : "regression",
"metric" : "rmse",
"num_leaves" : 25,
"learning_rate" : 0.01,
"bagging_fraction" : 0.6,
"feature_fraction" : 0.6,
}
#load in lgbm matrix fmt
ltrain = lgb.Dataset(X_trainr, label = y_trainr)
lval = lgb.Dataset(X_val, label = y_val)
#build and train the model
lgb_model1 = lgb.train(lgb_params, ltrain,
num_boost_round = 5000,
valid_sets = [ltrain, lval],
early_stopping_rounds = 100,
verbose_eval = 100)
#make predictions on the test data
test_y = lgb_model1.predict(X_test, num_iteration = lgb_model1.best_iteration)
# sum the predictions using the defined formula to get a revenue by user metric
# aggregate on 'fullVisitorId'
# final_test['fullVisitorId' ]
#issue - the ids in the submission file and the ids in the test aren't a 1:1 match?
#have I jumbled them or something?
#resolved - they were mixed type in the train, some string, some int...
# I flipped all ids in both sub and test to str... still not all there :/
final_test = pd.read_csv('./data/test_cleaned.csv')
final_test['fullVisitorId'] = final_test['fullVisitorId'].astype('str')
final_pred = final_test[['fullVisitorId']].copy()
final_pred['test_pred'] = test_y
#group by id
final_by_ind = final_pred.groupby(['fullVisitorId']).sum()
#move index to a col
final_by_ind = final_by_ind.reset_index()
#merge the predictions with the sample sub
submission = pd.read_csv('./data/sample_submission.csv')
submission = submission.merge(final_by_ind, on = 'fullVisitorId', how = 'left')
#fill nas and move to right column name
submission['PredictedLogRevenue'] = submission['test_pred'].fillna(0.0)
submission = submission.drop(['test_pred'], axis = 1)
def set_min_zero(x):
if x < 0:
return 0
else:
return x
submission['PredictedLogRevenue'] = submission['PredictedLogRevenue'].apply(
lambda x: set_min_zero(x))
#submit the output
submission.to_csv('cam_lightgbm_pred4_floor.csv', index = False)
#1.78 first go, worse than all 0s
#1.775 on second... beating the all 0s but barely.
#1.6371 on third... making gains now
"""
changes:
try3 : dropped the categoricals with 50+ options, possibly too much noise in the features
training
[1968] training's rmse: 1.53077 valid_1's rmse: 1.6381
lb: 1.6371
NOTES:
#train, drop features/ repeat seems like a good way to go... removing the noisy columns
greatly improved the accuracy
#need to figure out what is causing the lack of strength in predictions.
#try:
#upsampling the #s
#PCA
#increase the train size?
#something is still wrong here... shouldn't be getting negative predictions
#need to find out how to ensure the predictions stay positive
#once the above is working try the following:
1. up the iterations to train a little
2. grid search to pick better hyperparams
3. need a faster learning rate to get things working
""" | 2.765625 | 3 |
ems_auth/models.py | Atwinenickson/lendsuphumanresourcemanagement | 36 | 12765166 | <filename>ems_auth/models.py
from django.contrib.auth.base_user import AbstractBaseUser, BaseUserManager
from django.contrib.auth.models import PermissionsMixin
from django.db import models
# Create your models here.
from employees.models import Employee
class UserManager(BaseUserManager):
def create_user(self, email, password, **extra_fields):
if not email:
raise ValueError('The email must be set')
email = self.normalize_email(email)
user = self.model(email=email, **extra_fields)
user.set_password(password)
user.save()
return user
def create_superuser(self, email, password, **extra_fields):
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
extra_fields.setdefault('is_active', True)
if extra_fields.get('is_staff') is not True:
raise ValueError('Superuser must have is_staff=True.')
if extra_fields.get('is_superuser') is not True:
raise ValueError('Superuser must have is_superuser=True.')
return self.create_user(email=email, password=password, **extra_fields)
class User(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(unique=True)
is_superuser = models.BooleanField(default=False)
is_staff = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
is_hod = models.BooleanField(default=False)
is_hr = models.BooleanField(max_length=10, default=False)
is_cfo = models.BooleanField(default=False)
is_ceo = models.BooleanField(default=False)
is_supervisor = models.BooleanField(default=False)
password_changed = models.BooleanField(default=False)
USERNAME_FIELD = 'email'
objects = UserManager()
# Return something meaningful
def __str__(self):
return '{}'.format(self.email)
@property
def status(self):
if self.is_active:
return 'Active'
else:
return 'Inactive'
@property
def get_unread_notifications(self):
return self.notification_set.filter(status="Unread")
class Meta:
unique_together = ['email']
class SolitonUser(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
employee = models.OneToOneField(Employee, on_delete=models.CASCADE)
| 2.359375 | 2 |
splunk_ansible_code-master/backend/update_apps_in_branch.py | jfhucka/Configator | 1 | 12765167 | <reponame>jfhucka/Configator
################################################
#
# This is a python that pulls all the splunk apps from teh git:/splunk_apps repo
#
# This script should be attached to a cron job, if the local repo branch is to be kept up to date.
# Or this script can be executed manaully after the user has made a change to the splunk_apps master repo.
#
# It is assumed that this script can be called at multiples by many different users, including the a crontab.
# It is important that "git pulls" do not overlap and are not too burdensome to the Ansible server.
#
# Therefore ....
# Any git pull, will be proceeded by a git pull lock.
# And process must successfully apply the git pull lock before attempting a git pull.
# Only one git pull lock can exist at any time.
# If a git pull is attempted, but another git pull is in process, then wait until the current git pull is done.
# ... if the somoeone else then sets a git repo lock, then exit, Effectively someone has done a git pull on your behalf
# Git pull locks and git push locks can not coincide.
#
###############################################
# Lock bit location relative to location of this script.
lockBitLocation = "lockDirectory"
logFileLocation = "logs"
debug=True
import argparse
import os
import time
import subprocess
import re
import glob
import shutil
def start_logging(cwd):
# Get the current time and create the log file
timestamp = time.strftime("%Y%m%d%H%M%S")
logFileName = "refresh_splunk_app_repo-"+timestamp
logDir = cwd+"/"+logFileLocation
logFileFullPath = logDir+"/"+logFileName
if not os.path.isdir(logDir):
os.makedirs(logDir)
if(debug): print "Created log directory "+logDir
if os.path.isfile(logFileFullPath):
if(debug): print "ERROR. Log file exists."
exit(0)
else:
try:
f = open(logFileFullPath, "w")
except:
if (debug): print "ERROR. Not able to open log file "+logFileFullPath
exit(0)
# Populate the logfile with an opening event ..
timestamp = time.strftime("%Y-%m-%d %H:%M:%S")
f.write(timestamp+" Going to refresh the splunk_apps repo on this server.\n")
return(f)
def log_message(logFile,message):
timestamp = time.strftime("%Y-%m-%d %H:%M:%S")
logFile.write(timestamp+" message='"+message+"'\n")
return()
def stop_logging(fileHandle):
# Populate the logfile with an closing event ..
timestamp = time.strftime("%Y-%m-%d %H:%M:%S")
fileHandle.write(timestamp+" message=Stopped Logging.\n")
fileHandle.close()
return()
if __name__ == "__main__":
# Parse arguments prog='splunk_ansible',
parse = argparse.ArgumentParser(usage='%(prog)s ', description='Pull the latest splunk apps down from the splunk_apps repo')
args = parse.parse_args()
# Start a log file
# Create a updating_apps_<time_millsecond>.lck token and wait for the host.lck tokens to drain.
# Check for only one updating_apps token. If 2, then the latest wins. The earliest dies.
# Pull splunk_apps configs to local repo.
# Record the freshness date in a local file
# Close log
cwd = os.getcwd()
cwd = "/var/directory/manage_splunk"
logFile = start_logging(cwd)
# Create a time based "updating_apps_*.lck" token
time_marker = int(round(time.time() * 1000))
lock_file_name = "updating_apps_"+str(time_marker)+".lck"
lockDir = cwd+"/"+lockBitLocation
lockFileFullPath_apps = lockDir+"/"+lock_file_name
try:
if not os.path.isdir(lockDir):
os.makedirs(lockDir)
if (debug): print "Created lock directory "+lockDir
with open(lockFileFullPath_apps, "w") as f:
f.write("")
f.close()
log_message(logFile,"Created a lock file "+lockFileFullPath_apps)
except:
if (debug): print "ERROR. Not able to create "+lockFileFullPath_apps
log_message(logFile,"ERROR. Not able to create "+lockFileFullPath_apps)
stop_logging(logFile)
exit()
# Wait for host .lck file to drain
num_tries = 0
lockFileFullPath = lockDir+"/[!updating_apps]*.lck"
while (num_tries < 30):
num_tries = num_tries+1
host_lock_list = glob.glob(lockFileFullPath)
#print "host_lock_list="+str(host_lock_list)
#host_lock_list = [fn for fn in glob(lockFileFullPath) if not os.path.basename(fn).startswith("updating_apps")]
if len(host_lock_list) > 0:
log_message(logFile,"INFO. Detected "+str(host_lock_list)+". Will wait 5 seconds for these hosts to finish being updated.")
if (debug): print "INFO. Hosts are being updated. Wait 5 seconds and try again."
time.sleep(5)
else:
break
if num_tries == 30:
log_message(logFile,"ERROR. Can not proceed because hosts are STILL being updated.")
if (debug): print "ERROR. Can not proceed because hosts are STILL being updated."
stop_logging(logFile)
os.remove(lockFileFullPath_apps)
exit()
# Host lock files have drained.
log_message(logFile,"INFO. Host lock files have drained.")
#if (debug): print "Host lock files have drained."
# splunk_ansible.py will not proceed until the updating_apps_ lck token is removed.
# Look at the now current queue of updating_apps_<time_millsecond>.lck tokens
# If this token is the earliest (or only token), then proceed.
# If the token queue is > 1, then die, unless this token is the latest.
lockFileFullPath_apps_all = lockDir+"/updating_apps_*.lck"
lockFileFullPath_apps_all_list = glob.glob(lockFileFullPath_apps_all)
log_message(logFile,str(lockFileFullPath_apps_all_list))
#print "lockFileFullPath_apps_all_list="+str(lockFileFullPath_apps_all_list)
earliest = False
latest = False
if len(lockFileFullPath_apps_all_list) == 1:
# Only one detected .lock file
earliest = True
else:
# Parse each lock file and see if this is earliest or latest
earliest = True
latest = True
for item in lockFileFullPath_apps_all_list:
other_time_stamp = re.search('updating_apps_(.+?).lck',item).group(1)
other_time_stamp_int = int(other_time_stamp)
if time_marker > other_time_stamp_int:
earliest = False
if time_marker < other_time_stamp_int:
latest = False
# If earliest in list and list has more than one, then exit and letthe later proceed.
if len(lockFileFullPath_apps_all_list) > 1 and earliest==True:
log_message(logFile,"INFO. We have not started a repo refresh, and there appears to be a pending, later repo refresh request.. So bail on this request.")
if (debug): print "INFO. Other pending requests. Bail."
os.remove(lockFileFullPath_apps)
stop_logging(logFile)
exit()
#print str(earliest)
#print str(latest)
log_message(logFile,"earliest="+str(earliest)+" latest="+str(latest))
if (earliest==True) or (latest==True):
# Pull down the latest splunk_apps
#command = 'cd '+cwd+'/splunk_apps; /usr/local/bin/git pull'
#print command
#log_message(logFile,str(command))
#output=subprocess.check_output(command, shell=True)
# git --git-dir=/var/directory/manage_splunk/splunk_apps/.git --work-tree=/var/directory/manage_splunk/splunk_apps pull
command = ['/usr/local/bin/git','--git-dir=/var/directory/manage_splunk/splunk_apps/.git','--work-tree=/var/directory/manage_splunk/splunk_apps','pull','--no-edit']
#command = ['/bin/sh','-c','"cd /var/directory/manage_splunk/splunk_apps && /usr/local/bin/git pull -q origin master"']
#command = ['ssh','-o','StrictHostKeyChecking no','ansible-control2.snc1','cd /var/directory/manage_splunk/splunk_apps && /usr/local/bin/git pull']
#if (debug): print str(command)
log_message(logFile,str(command))
try:
output=subprocess.Popen(command,stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = output.communicate()
#if (debug): print str(stdout)
log_message(logFile,str(stdout))
except Exception,e:
log_message(logFile,"ERROR. "+str(e))
stop_logging(logFile)
os.remove(lockFileFullPath_apps)
exit()
log_message(logFile,"INFO. Refreshed splunk_apps")
if (debug): print "INFO. Refreshed splunk_apps"
os.remove(lockFileFullPath_apps)
stop_logging(logFile)
exit()
| 2.140625 | 2 |
examples/txt2unicode/demo_utf8_2_tscii.py | nv-d/open-tamil | 218 | 12765168 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# (C) 2014 Arulalan.T <<EMAIL>>
# (C) 2015 <NAME>
# This file is part of 'open-tamil/txt2unicode' package examples
#
import sys
sys.path.append("../..")
from tamil.txt2unicode import tscii2unicode, unicode2tscii
tscii = """¾¢ÕÅûÙÅ÷ «ÕǢ ¾¢ÕìÌÈû """
uni_1 = tscii2unicode(tscii)
tscii_from_uni = unicode2tscii(uni_1)
uni_2 = tscii2unicode(tscii_from_uni)
f = open("encode-result.txt", "w")
f.write("Initial tscii : " + tscii + "\n\n")
f.write("From tscii to unicode : " + uni_1 + "\n\n")
f.write("From unicode to tscii : " + tscii_from_uni + "\n\n")
f.write("Again back to unicode from above tscii : " + uni_2)
f.close()
assert uni_1 == uni_2, " Both unicode are 'not' same! "
assert tscii == tscii_from_uni, " Both tscii are 'not' same! "
print("tscii original input", tscii)
print("from tscii2unicode", uni_1)
print("from unicode2tscii", tscii_from_uni)
print("back to unicode", uni_2)
print("converted unicode stored in 'encode-result.txt' file")
| 3.203125 | 3 |
APIdata.py | janbrus/px-api-prophet | 0 | 12765169 | """
coding: utf-8
@authour: <NAME>, modified <NAME>
Inspired by:
https://github.com/hmelberg/stats-to-pandas/blob/master/stats_to_pandas/__init__.py
https://github.com/eurostat/prophet
"""
from __future__ import print_function
import pandas as pd
import requests
import ast
from pyjstat import pyjstat
from collections import OrderedDict
from ipywidgets import widgets
from IPython.display import display
# todo: consider using jsonstat instead of pyjstat
class API_to_data:
def __init__(self, language='en', base_url='http://data.ssb.no/api/v0'):
"""
Parameters:
-----------
language: string
default in Statistics Norway: 'en' (Search for English words)
optional in Statistics Norway: 'no' (Search for Norwegian words)
url: string
default in Statistics Norway: 'http://data.ssb.no/api/v0'
different defaults can be specified
"""
self.language = language
self.burl = base_url
self.furl = None
self.variables = None
self.time = None
def search(self, phrase):
"""
Search for tables that contain the phrase in Statistics Norway.
Returns a pandas dataframe with the results.
Not case sensitive.
Language sensitive (specified in the language option)
Example
-------
df = search("income")
Parameters
----------
phrase: string
The phrase can contain several words (space separated):
search("export Norwegian parrot")
It also supports trucation:
search("pharma*")
"""
# todo: make converter part of the default specification only for statistics norway
convert = {'æ' : '%C3%A6', 'Æ' : '%C3%86', 'ø' : '%C3%B8', 'Ø' : '%C3%98', 'å' : '%C3%A5', 'Å' : '%C3%85',
'"' : '%22', '(' : '%28', ')' : '%29', ' ' : '%20'}
search_str = '{base_url}/{language}/table/?query={phrase}'.format(base_url=self.burl, language=self.language, phrase=phrase)
for k, v in convert.items():
search_str = search_str.replace(k, v)
df = pd.read_json(search_str)
if len(df) == 0:
print("No match")
return df
# make the dataframe more readable
# (is it worth it? increases vulnerability. formats may differ and change)
# todo: make search and format conditional on the database being searched
# split the table name into table id and table text
df['table_id'] = df['title'].str.split(':').str.get(0)
df['table_title'] = df['title'].str.split(':').str.get(1)
del df['title']
# make table_id the index, visually more intuitive with id as first column
df = df.set_index('table_id')
# change order of columns to make it more intuitive (table_title is first)
cols = df.columns.tolist()
cols.sort(reverse=True)
df = df[cols[:-2]]
return df
def get_variables(self, table_id=None):
"""
Returns a list.
Each element of the list is a dictionary that provides more
information about a variable.
For instance, one variable may contain information about the
different years that are available.
Parameters
----------
table_id: string
the unique table_id number, a string including leading zeros.
"""
try:
numb = int(table_id)
if len(str(numb)) == 4:
numb = '0' + str(numb)
except ValueError:
print('table_id mus be of type integer')
if self.furl is None:
self.furl = '{base_url}/{language}/table/{table_id}'.format(base_url=self.burl, language=self.language,
table_id=numb)
df = pd.read_json(self.furl)
variables = [dict(values) for values in df.iloc[:, 1]]
return variables
def select(self, table_id):
"""
Selects a table based on the table_id and returns a widget container
in which the user can select the set of variables and values to be
included in the final table.
Example
--------
box = select(table_id = '10714')
Parameters
----------
table_id : string
the id of the desired table
"""
# get a list with dictionaries containing information about each variable
self.variables = self.get_variables(table_id=table_id)
table_info = pd.read_json(self.furl)
table_title = table_info.iloc[0, 0]
# get number of variables (ok, childish approach, can be simplified!)
nvars = len(self.variables)
var_list = list(range(nvars))
# a list of dictionaries of the values available for each variable
option_list = [OrderedDict(zip(self.variables[var]['valueTexts'],
self.variables[var]['values']))
for var in var_list]
# create a selection widget for each variable
# todo: skip widget or make it invisible if there is only one option?
# todo: make first alternative a default selection initially for all tables?
# todo: add buttons for selecting "all", "latest" , "first" and "none"
selection_widgets = [widgets.widget_selection.SelectMultiple(
options=option_list[var],
rows=8,
layout={'width' : '500px'}
)
for var in var_list]
# put all the widgets in a container
variables_container = widgets.Tab(selection_widgets)
# label each container with the variable label
for var in var_list:
title = str(self.variables[var]['text'])
variables_container.set_title(var, title)
# build widgets and put in one widget container
headline = widgets.Label(value = table_title, color = 'blue')
endline = widgets.Label(value='''Select category and click on elements
to be included in the table (CTRL-A selects "all")''')
url_text = widgets.Label(value=self.furl)
from IPython.display import display
button = widgets.Button(description="Click when finished")
selection_container = widgets.VBox([headline,
endline,
variables_container,
url_text,
button])
selection_container.layout.border = '3px grey solid'
def clicked(b):
print('Info is saved. You can now run the rest of the code :)')
button.on_click(clicked)
return selection_container
def get_json(self, box=None, out='dict'):
"""
Takes a widget container as input (where the user has selected varables)
and returns a json dictionary or string that will fetch these variables.
The json follows the json-stat format.
Parameters
----------
box : widget container
name of widget box with the selected variables
out : string
default: 'dict', options: 'str'
The json can be returned as a dictionary or a string.
The final end query should use a dict, but some may find it useful to
get the string and revise it before transforming it back to a dict.
Example
-------
json_query = get_json(box)
"""
table_url = box.children[3].value
nvars = len(box.children[2].children)
var_list = list(range(nvars))
query_element = {}
# create a dict of strings, one for each variable that specifies
# the json-stat that selects the variables/values
for x in var_list:
value_list = str(list(box.children[2].children[x].value))
query_element[x] = '{{"code": "{code}", "selection": {{"filter": "item", "values": {values} }}}}'.format(
code = self.variables[x]['code'],
values = value_list)
query_element[x] = query_element[x].replace("\'", '"')
all_elements = str(list(query_element.values()))
all_elements = all_elements.replace("\'", "")
query = '{{"query": {all_elements} , "response": {{"format": "json-stat" }}}}'.format(all_elements = all_elements)
if out == 'dict':
query = ast.literal_eval(query)
# todo: build it as a dictionary to start with (and not a string that is made into a dict as now)
# todo: add error message if required variables are not selected
# todo: avoid repeat downloading of same information
# eg. get_variables is sometimes used three times before a table is downloaded
return query
def to_dict(json_str):
"""
Transforms a string to a dictionary.
Note: Will fail if string is not correctly specified.
"""
# OK, really unnecessary func, but a concession to less experienced users
# todo: use json module instead, json.dumps()
query = ast.literal_eval(json_str)
return query
def read_box(self, from_box):
"""
Takes a widget container as input (where the user has selected varables)
and returns a pandas dataframe with the values for the selected variables.
Example
-------
df = read_box(box)
Parameters:
-----------
from_box: widget container
"""
try:
query = self.get_json(from_box)
url = from_box.children[3].value
data = requests.post(url, json=query)
results = pyjstat.from_json_stat(data.json(object_pairs_hook=OrderedDict))
label = data.json(object_pairs_hook=OrderedDict)
return [results[0], label['dataset']['label']]
except TypeError:
print('You must make choices in the box!')
except:
print('You must make choices in the box!')
def fiksDato(self, dato):
hjdat = int(dato[5:6]) * 3
hjdat2 = str(hjdat)
if hjdat < 12:
dato = dato[0:4] + '-0' + hjdat2
else:
dato = dato[0:4] + '-' + hjdat2
dates = pd.date_range(dato, periods=1, freq='M')
dato = str(dates[0])
return dato
def prepare_dataframe(self, df, val_col='value'):
"""
Parameters:
-----------
time_col : type sting
Name of time column (for SSB-data, usually 'uke', 'måned', 'år' og 'kvartal' in norwegian)
val_col : type string
Name of column containing the values (usually 'value')
df : typ pandas.DataFrame
The dataset one want to make forecast of
"""
self.time = time_col = df.columns[-2]
if self.language == 'no':
df_ret = df[[self.time, val_col]]
if 'M' in df_ret.loc[0, self.time]:
self.time = 'måned'
df_ret = df[[self.time, val_col]]
df_ret.loc[:, self.time] = pd.to_datetime(df[self.time].str.replace('M', '-'))
freq = 'M';
periods = 12;
elif 'U' in df_ret.loc[0, self.time]:
self.time = 'uke'
df_ret = df[[self.time, val_col]]
df_ret.loc[:, self.time] = pd.to_datetime((df[self.time].str.replace('U', '-')).add('-1'), format='%Y-%W-%w')
freq = 'W'
periods = 52
elif 'K' in df_ret.loc[0, self.time]:
self.time = 'kvartal'
df_ret = df[[self.time, val_col]]
df_ret.loc[:, self.time] = pd.to_datetime(df[self.time].str.replace('K', '-'))
df_ret.loc[:, self.time] = df[self.time].apply(self.fiksDato)
freq = 'q'
periods = 4
else:
self.time = 'år'
df_ret = df[[self.time, val_col]]
df_ret.loc[:, self.time] = pd.to_datetime(df[self.time])
freq = 'y'
periods = 1
elif self.language == 'en':
df_ret = df[[self.time, val_col]]
if 'M' in df_ret.loc[0, self.time]:
self.time = 'month'
df_ret = df[[self.time, val_col]]
df_ret.loc[:, self.time] = pd.to_datetime(df[self.time].str.replace('M', '-'))
freq = 'M'
periods = 12
elif 'U' in df_ret.loc[0, self.time]:
self.time = 'week'
df_ret = df[[self.time, val_col]]
df_ret.loc[:, self.time] = pd.to_datetime((df[self.time].str.replace('U', '-')).add('-1'), format='%Y-%W-%w')
freq = 'W'
periods = 52
elif 'K' in df_ret.loc[0, self.time]:
self.time = 'quarter'
df_ret = df[[self.time, val_col]]
df_ret.loc[:, self.time] = pd.to_datetime(df[self.time].str.replace('K', '-'))
df_ret.loc[:, self.time] = df[self.time].apply(self.fiksDato)
freq = 'q'
periods = 4
else:
self.time = 'year'
df_ret = df[[self.time, val_col]]
df_ret.loc[:, self.time] = pd.to_datetime(df[self.time])
freq = 'y'
periods = 1
#the input to `Prophet` is always a `pandas.DataFrame` object, and it must contain two columns: `ds` and `y`:
df_ret.columns = ['ds', 'y']
return [df_ret, freq, periods] | 2.96875 | 3 |
py/maximum-product-of-word-lengths.py | ckclark/leetcode | 0 | 12765170 | <filename>py/maximum-product-of-word-lengths.py<gh_stars>0
from operator import or_
class Solution(object):
def maxProduct(self, words):
"""
:type words: List[str]
:rtype: int
"""
words = filter(None, words)
if not words:
return 0
words.sort(key=len, reverse=True)
lenwords = map(len, words)
bitval = [reduce(or_, map(lambda x:1 << (ord(x) - ord('a')), word)) for word in words]
size = len(words)
ans = 0
for i in xrange(size):
if lenwords[i] * lenwords[i] <= ans: break
for j in xrange(i + 1, size):
if not (bitval[i] & bitval[j]):
ans = max(ans, lenwords[i] * lenwords[j])
break
return ans
| 3.390625 | 3 |
source_ddc/test/unit/test_ddc.py | sansan-inc/econ-source | 26 | 12765171 | import numpy as np
from source_ddc.simulation_tools import simulate
from source_ddc.algorithms import NFXP, NPL, CCP
from source_ddc.probability_tools import StateManager, random_ccp
from test.utils.functional_tools import average_out
n_repetitions = 10
def test_nfxp(simple_transition_matrix):
def utility_fn(theta, choices, states):
m_states, m_actions = np.meshgrid(states, choices)
return (theta[0] * np.log(m_states + 1) - theta[1] * m_actions).reshape((len(choices), -1, 1))
true_params = [0.5, 3]
discount_factor = 0.95
n_choices = 2
n_states = 5
state_manager = StateManager(miles=n_states)
@average_out(n_repetitions)
def test():
df, _ = simulate(
500,
100,
n_choices,
state_manager,
true_params,
utility_fn,
discount_factor,
simple_transition_matrix
)
algorithm = NFXP(
df['action'].values,
df['state'].values,
simple_transition_matrix,
utility_fn,
discount_factor,
parameter_names=['variable_cost', 'replacement_cost']
)
return algorithm.estimate(start_params=[-1, -1], method='bfgs')
mean_params = test()
tolerance_levels = np.array([0.05, 0.05])
assert np.all(np.abs(mean_params - true_params) < tolerance_levels)
def test_ccp(simple_transition_matrix):
def utility_fn(theta, choices, states):
m_states, m_actions = np.meshgrid(states, choices)
return (theta[0] * np.log(m_states + 1) - theta[1] * m_actions).reshape((len(choices), -1, 1))
true_params = [0.5, 3]
discount_factor = 0.95
n_choices = 2
n_states = 5
state_manager = StateManager(miles=n_states)
@average_out(n_repetitions)
def test():
df, ccp = simulate(
500,
100,
n_choices,
state_manager,
true_params,
utility_fn,
discount_factor,
simple_transition_matrix
)
algorithm = CCP(
df['action'].values,
df['state'].values,
simple_transition_matrix,
utility_fn,
discount_factor,
initial_p=ccp,
parameter_names=['variable_cost', 'replacement_cost']
)
return algorithm.estimate(start_params=[1, 1], method='bfgs')
mean_params = test()
tolerance_levels = np.array([0.05, 0.05])
assert np.all(np.abs(mean_params - true_params) < tolerance_levels)
def test_npl(simple_transition_matrix):
def utility_fn(theta, choices, states):
m_states, m_actions = np.meshgrid(states, choices)
return (theta[0] * np.log(m_states + 1) - theta[1] * m_actions).reshape((len(choices), -1, 1))
true_params = [0.5, 3]
discount_factor = 0.95
n_choices = 2
n_states = 5
state_manager = StateManager(miles=n_states)
@average_out(n_repetitions)
def test():
df, _ = simulate(
500,
100,
n_choices,
state_manager,
true_params,
utility_fn,
discount_factor,
simple_transition_matrix)
ccp = random_ccp(n_states, n_choices)
algorithm = NPL(
df['action'].values,
df['state'].values,
simple_transition_matrix,
utility_fn,
discount_factor,
initial_p=ccp,
parameter_names=['variable_cost', 'replacement_cost']
)
return algorithm.estimate(start_params=[1, 1], method='bfgs')
mean_params = test()
tolerance_levels = np.array([0.05, 0.05])
assert np.all(np.abs(mean_params - true_params) < tolerance_levels)
def test_npl_relaxation_param(simple_transition_matrix):
def utility_fn(theta, choices, states):
m_states, m_actions = np.meshgrid(states, choices)
return (theta[0] * np.log(m_states + 1) - theta[1] * m_actions).reshape((len(choices), -1, 1))
true_params = [0.5, 3]
discount_factor = 0.95
n_choices = 2
n_states = 5
state_manager = StateManager(miles=n_states)
@average_out(n_repetitions)
def test():
df, _ = simulate(500,
100,
n_choices,
state_manager,
true_params,
utility_fn,
discount_factor,
simple_transition_matrix)
ccp = random_ccp(n_states, n_choices)
algorithm = NPL(
df['action'].values,
df['state'].values,
simple_transition_matrix,
utility_fn,
discount_factor,
initial_p=ccp,
relaxation_param=0.9,
parameter_names=['variable_cost', 'replacement_cost'],
npl_maxiter=50
)
return algorithm.estimate(start_params=[1, 1], method='bfgs')
mean_params = test()
tolerance_levels = np.array([0.05, 0.05])
assert np.all(np.abs(mean_params - true_params) < tolerance_levels)
| 2.265625 | 2 |
examples/concatenate.py | nvllsvm/consumerpool | 6 | 12765172 | <filename>examples/concatenate.py<gh_stars>1-10
from consumers import Pool
def concatenate(letters):
return '-'.join(letters)
with Pool(concatenate, quantity=2) as pool:
for letter in 'abcdef':
pool.put(letter)
print(pool.results)
| 3.125 | 3 |
run.py | t2wan/DSC180A-CheckPoint1 | 1 | 12765173 | <reponame>t2wan/DSC180A-CheckPoint1
#!/usr/bin/env python
import sys
import json
from etl import get_data
def main(targets):
if 'DBLP' in targets:
with open('config/data-params.json') as fh:
data_cfg = json.load(fh)
# make the data target
get_data(data_cfg)
return
if __name__ == '__main__':
targets = sys.argv[1]
main(targets)
| 2.109375 | 2 |
bot/utils.py | RehanPlayz/YesBot | 1 | 12765174 | <gh_stars>1-10
#
# YesBot utils.py | Copyright (c) 2020 Mrmagicpie
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
#
import discord
from discord.ext import commands
from discord.ext.commands import BucketType
import datetime
#
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
#
class utils(commands.Cog):
def __init__(self, bot):
self.bot = bot
#
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
#
@commands.command()
@commands.cooldown(1, 1, type=BucketType.user)
@commands.has_guild_permissions(ban_members=True)
async def ban(self, ctx, o: discord.Member = None, *, reason = None):
if o == None:
embed = discord.Embed(
title="Ban usage",
description=f"""
YesBot Ban Usage:
``{ctx.prefix}ban (member)``
""",
colour=discord.Colour.blue(),
timestamp=datetime.datetime.utcnow()
)
embed.set_footer(
icon_url=self.bot.user.avatar_url,
text="YesBot Moderation"
)
await ctx.send(embed=embed)
else:
if reason == None:
reason = "No reason specified."
embed = discord.Embed(
title=f"Ban {o.display_name}?",
description=f"""
React with ✅ to ban this user for ``{reason}``!
""",
colour=discord.Colour.blue(),
timestamp=datetime.datetime.utcnow()
)
embed.set_footer(
icon_url=self.bot.user.avatar_url,
text="YesBot Moderation"
)
msg = await ctx.send(embed=embed)
await msg.add_reaction('✅')
def ok(reaction, user):
if ctx.author.id == user.id:
return str(reaction.emoji) == "✅"
reaction, user = await self.bot.wait_for("reaction_add", check=ok)
if str(reaction.emoji) == "✅":
await msg.clear_reactions()
rip = discord.Embed(
title=f"You have been banned from {ctx.guild.name}!",
description=f"""
You have been banned from {ctx.guild.name} for:
```
{reason}
```
""",
colour=discord.Colour.red(),
timestamp=datetime.datetime.utcnow()
)
embed.set_footer(
icon_url=self.bot.user.avatar_url,
text="YesBot Moderation"
)
oop = discord.Embed(title=f"Banning {o.display_name}!")
lmao = discord.Embed(
title=f"Banned {o.display_name}",
description=f"""
Banned {o.mention} for:
```
{reason}
```
"""
)
await msg.edit(embed=oop)
await o.ban(reason=reason)
await o.send(embed=rip)
await msg.edit(embed=lmao)
@commands.command()
@commands.cooldown(1, 1, type=BucketType.user)
async def help(self, ctx):
embed = discord.Embed(
title="YesBot help",
description=f"""
Below is a list of currently available YesBot commands!
""",
colour=discord.Colour.blue(),
timestamp=datetime.datetime.utcnow()
)
embed.add_field(
name="Ban (@user) [reason]",
value="""
Requires: ``ban_members``
> Ban a user from your guild
"""
)
embed.add_field(
name="Echo [What to echo]",
value="""
Requires: ``send_messages``
> Echo something as the bot
"""
)
embed.set_footer(
icon_url=self.bot.user.avatar_url,
text="YesBot Utilities"
)
await ctx.send(embed=embed)
@commands.command()
@commands.cooldown(1, 2, type=BucketType.user)
async def echo(self, ctx, *, echo = None):
mentions = discord.AllowedMentions(everyone=False, users=False, roles=False)
if echo == None:
await ctx.send('Please specify something to echo!')
else:
await ctx.message.delete()
await ctx.send(f"""
{echo}
_ _
> {ctx.author.mention}
""", allowed_mentions=mentions)
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
embed = discord.Embed(
title="Uh oh!",
description=f"""
Uh oh! We ran into a problem.
```css
{error}
```
""",
colour=discord.Colour.red(),
timestamp=datetime.datetime.utcnow()
)
embed.set_footer(
icon_url=self.bot.user.avatar_url,
text="Oh no, we ran into a problem!"
)
await ctx.send(embed=embed)
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
#
def setup(bot):
bot.add_cog(utils(bot)) | 2.484375 | 2 |
hackcollab_project/hosts.py | calixo888/hackcollab | 0 | 12765175 | <reponame>calixo888/hackcollab
from django.conf import settings
from django_hosts import patterns, host
host_patterns = patterns('',
host(r'www', 'hackcollab_app.urls', name='www'),
host(r'solarhacks', 'hackathon.urls', name='solarhacks'),
host(r'testhacks', 'hackathon.urls', name='testhacks'),
host(r'waycoolhacks', 'hackathon.urls', name='waycoolhacks'),
)
| 1.46875 | 1 |
co2-calculator.py | vladetaStoj/VehicleEmissionCalculator | 0 | 12765176 | #SAP DevelopmentChallange solution, written by <NAME> (<EMAIL>)
#Version: 01_28082020
#License: MIT
import sys
import argparse
from emissions import VehicleEmissions
#Important note: ArgumentParser converts any "-" to "_"
ap = argparse.ArgumentParser()
ap.add_argument("--distance", "-dist", help = "Total distance travelled")
ap.add_argument("--unit-of-distance", "-unit-dist", help = "Unit of distance: kilometers (default) or meters")
ap.add_argument("--transportation-method", "-tran-mthd", help = "Type of vehicle used for calculation")
ap.add_argument("--output", "-out", help = "Output of emissions in either kilograms or grams (default)")
args = vars(ap.parse_args())
#Get distance
if (args["distance"] == None):
sys.exit("No input distance given! Exiting...")
else:
input_distance = float(args["distance"])
#Get distance unit
if (args["unit_of_distance"] == None):
print("No unit of distance given, using KM by default")
distance_unit = "km"
else:
distance_unit = args["unit_of_distance"]
#Get vehcile type
if (args["transportation_method"] == None):
sys.exit("No vehicle type given! Exiting...")
else:
vehicle_type = args["transportation_method"] #one more name validity check is performed internally by the vehicle class
#Get output (optional)
output = args["output"]
#Global variables and parsing conditions
if (distance_unit == "m"):
input_distance = input_distance / 1000.0
#Create instance of vehicle object and calculate the emissions
vehicle = VehicleEmissions(vehicle_type)
vehicle.CalculateEmission(input_distance)
outputEmission = 0
if (output == "g"):
outputEmission = vehicle.GetEmissionGrams()
print("Your trip caused " + str(outputEmission) + "g of CO2-equivalent.")
elif (output == "kg"):
outputEmission = vehicle.GetEmmisionKG()
print("Your trip caused " + str(outputEmission) + "kg of CO2-equivalent.")
elif (output == None):
outputEmission = vehicle.GetEmmisionKG()
print("Your trip caused " + str(outputEmission) + "kg of CO2-equivalent.") | 3.296875 | 3 |
mavsdk/transponder_pb2_grpc.py | PML-UCF/MAVSDK-Python | 0 | 12765177 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from . import transponder_pb2 as transponder_dot_transponder__pb2
class TransponderServiceStub(object):
"""
Allow users to get ADS-B information
and set ADS-B update rates.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.SubscribeTransponder = channel.unary_stream(
'/mavsdk.rpc.transponder.TransponderService/SubscribeTransponder',
request_serializer=transponder_dot_transponder__pb2.SubscribeTransponderRequest.SerializeToString,
response_deserializer=transponder_dot_transponder__pb2.TransponderResponse.FromString,
)
self.SetRateTransponder = channel.unary_unary(
'/mavsdk.rpc.transponder.TransponderService/SetRateTransponder',
request_serializer=transponder_dot_transponder__pb2.SetRateTransponderRequest.SerializeToString,
response_deserializer=transponder_dot_transponder__pb2.SetRateTransponderResponse.FromString,
)
class TransponderServiceServicer(object):
"""
Allow users to get ADS-B information
and set ADS-B update rates.
"""
def SubscribeTransponder(self, request, context):
"""Subscribe to 'transponder' updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetRateTransponder(self, request, context):
"""Set rate to 'transponder' updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_TransponderServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'SubscribeTransponder': grpc.unary_stream_rpc_method_handler(
servicer.SubscribeTransponder,
request_deserializer=transponder_dot_transponder__pb2.SubscribeTransponderRequest.FromString,
response_serializer=transponder_dot_transponder__pb2.TransponderResponse.SerializeToString,
),
'SetRateTransponder': grpc.unary_unary_rpc_method_handler(
servicer.SetRateTransponder,
request_deserializer=transponder_dot_transponder__pb2.SetRateTransponderRequest.FromString,
response_serializer=transponder_dot_transponder__pb2.SetRateTransponderResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'mavsdk.rpc.transponder.TransponderService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| 2.03125 | 2 |
angular_flask/controllers.py | dgolovan/everblog | 1 | 12765178 | <reponame>dgolovan/everblog
import os
from functools import update_wrapper
from flask import Flask, request, Response
from flask import render_template, url_for, redirect, send_from_directory
from flask import send_file, make_response, abort, current_app
from datetime import timedelta, datetime
from bson.json_util import dumps
from angular_flask import app
from slugify import slugify
from random import randint
from evernote.api.client import EvernoteClient
from evernote.edam.notestore.ttypes import NoteFilter, NotesMetadataResultSpec
#
# CORS DECORATOR
#
def crossdomain(origin=None, methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, basestring):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, basestring):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
# routing for basic pages (pass routing onto the Angular app)
@app.route('/')
@app.route('/everblog')
@app.route('/everpost/<guid>')
def basic_pages(**kwargs):
return make_response(open('angular_flask/templates/index.html').read())
##########
# REST API for Evernote
#
@app.route('/api/evernote', methods = ['GET'])
@crossdomain(origin='*')
def get_ev_notes():
dev_token = app.config['EN_DEV_TOKEN']
notestore_url = app.config['EN_NOTESTORE_URL']
nb_guid = app.config['EN_NB_GUID']
client = EvernoteClient(token=dev_token)
note_store = client.get_note_store()
filt = NoteFilter()
filt.notebookGuid = nb_guid
spec = NotesMetadataResultSpec()
spec.includeTitle = True
notemetalist = note_store.findNotesMetadata(dev_token, filt, 0, 100, spec)
notes = []
for note_data in notemetalist.notes:
note = note_store.getNote(dev_token, note_data.guid, True, True, True, True)
title = note.title
author = note.attributes.author
date = note.updated
url = note.attributes.sourceURL
cont = note.content
note_tags = []
tag_guids = note.tagGuids
if tag_guids is not None:
for tag_guid in tag_guids:
tag = note_store.getTag(dev_token, tag_guid)
note_tags.append({'guid': tag_guid, 'name': tag.name})
notes.append({'guid': note_data.guid, 'title': title, 'date_modified': date, 'author': author, 'content': cont, 'tags': note_tags})
#print notes
return dumps(notes)
@app.route('/api/evernote/<guid>', methods = ['GET'])
@crossdomain(origin='*')
def get_ev_note(guid):
dev_token = app.config['EN_DEV_TOKEN']
client = EvernoteClient(token=dev_token)
note_store = client.get_note_store()
note = note_store.getNote(dev_token, guid, True, True, True, True)
title = note.title
author = note.attributes.author
date = note.updated
url = note.attributes.sourceURL
cont = note.content
note_tags = []
tag_guids = note.tagGuids
if tag_guids is not None:
for tag_guid in tag_guids:
tag = note_store.getTag(dev_token, tag_guid)
note_tags.append({'guid': tag_guid, 'name': tag.name})
return dumps({'guid': guid, 'title': title, 'date_modified': date, 'author': author, 'content': cont, 'tags': note_tags})
######
# Misc
#
# special file handlers and error handlers
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'),
'img/favicon.ico')
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
| 2.046875 | 2 |
oslo_middleware/tests/test_http_proxy_to_wsgi.py | mail2nsrajesh/oslo.middleware | 0 | 12765179 | # Copyright (c) 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from wsgiref import util
from oslotest import base as test_base
import webob
from oslo_middleware import http_proxy_to_wsgi
class TestHTTPProxyToWSGI(test_base.BaseTestCase):
def setUp(self):
super(TestHTTPProxyToWSGI, self).setUp()
@webob.dec.wsgify()
def fake_app(req):
return util.application_uri(req.environ)
self.middleware = http_proxy_to_wsgi.HTTPProxyToWSGI(fake_app)
self.middleware.oslo_conf.set_override('enable_proxy_headers_parsing',
True,
group='oslo_middleware')
self.request = webob.Request.blank('/foo/bar', method='POST')
def test_backward_compat(self):
@webob.dec.wsgify()
def fake_app(req):
return util.application_uri(req.environ)
self.middleware = http_proxy_to_wsgi.HTTPProxyToWSGIMiddleware(
fake_app)
response = self.request.get_response(self.middleware)
self.assertEqual(b"http://localhost:80/", response.body)
def test_no_headers(self):
response = self.request.get_response(self.middleware)
self.assertEqual(b"http://localhost:80/", response.body)
def test_url_translate_ssl(self):
self.request.headers['X-Forwarded-Proto'] = "https"
response = self.request.get_response(self.middleware)
self.assertEqual(b"https://localhost:80/", response.body)
def test_url_translate_ssl_port(self):
self.request.headers['X-Forwarded-Proto'] = "https"
self.request.headers['X-Forwarded-Host'] = "example.com:123"
response = self.request.get_response(self.middleware)
self.assertEqual(b"https://example.com:123/", response.body)
def test_url_translate_host_ipv6(self):
self.request.headers['X-Forwarded-Proto'] = "https"
self.request.headers['X-Forwarded-Host'] = "[fdf8:f53e:61e4::18]:123"
response = self.request.get_response(self.middleware)
self.assertEqual(b"https://[fdf8:f53e:61e4::18]:123/", response.body)
def test_url_translate_base(self):
self.request.headers['X-Forwarded-Prefix'] = "/bla"
response = self.request.get_response(self.middleware)
self.assertEqual(b"http://localhost:80/bla", response.body)
def test_url_translate_port_and_base_and_proto_and_host(self):
self.request.headers['X-Forwarded-Proto'] = "https"
self.request.headers['X-Forwarded-Prefix'] = "/bla"
self.request.headers['X-Forwarded-Host'] = "example.com:8043"
response = self.request.get_response(self.middleware)
self.assertEqual(b"https://example.com:8043/bla", response.body)
def test_rfc7239_invalid(self):
self.request.headers['Forwarded'] = (
"iam=anattacker;metoo, I will crash you!!P;m,xx")
response = self.request.get_response(self.middleware)
self.assertEqual(b"http://localhost:80/", response.body)
def test_rfc7239_proto(self):
self.request.headers['Forwarded'] = (
"for=foobar;proto=https, for=foobaz;proto=http")
response = self.request.get_response(self.middleware)
self.assertEqual(b"https://localhost:80/", response.body)
def test_rfc7239_proto_host(self):
self.request.headers['Forwarded'] = (
"for=foobar;proto=https;host=example.com, for=foobaz;proto=http")
response = self.request.get_response(self.middleware)
self.assertEqual(b"https://example.com/", response.body)
def test_rfc7239_proto_host_base(self):
self.request.headers['Forwarded'] = (
"for=foobar;proto=https;host=example.com:8043, for=foobaz")
self.request.headers['X-Forwarded-Prefix'] = "/bla"
response = self.request.get_response(self.middleware)
self.assertEqual(b"https://example.com:8043/bla", response.body)
def test_forwarded_for_headers(self):
@webob.dec.wsgify()
def fake_app(req):
return req.environ['REMOTE_ADDR']
self.middleware = http_proxy_to_wsgi.HTTPProxyToWSGIMiddleware(
fake_app)
forwarded_for_addr = '1.2.3.4'
forwarded_addr = '8.8.8.8'
# If both X-Forwarded-For and Fowarded headers are present, it should
# use the Forwarded header and ignore the X-Forwarded-For header.
self.request.headers['Forwarded'] = (
"for=%s;proto=https;host=example.com:8043" % (forwarded_addr))
self.request.headers['X-Forwarded-For'] = forwarded_for_addr
response = self.request.get_response(self.middleware)
self.assertEqual(forwarded_addr.encode(), response.body)
# Now if only X-Forwarded-For header is present, it should be used.
del self.request.headers['Forwarded']
response = self.request.get_response(self.middleware)
self.assertEqual(forwarded_for_addr.encode(), response.body)
class TestHTTPProxyToWSGIDisabled(test_base.BaseTestCase):
def setUp(self):
super(TestHTTPProxyToWSGIDisabled, self).setUp()
@webob.dec.wsgify()
def fake_app(req):
return util.application_uri(req.environ)
self.middleware = http_proxy_to_wsgi.HTTPProxyToWSGI(fake_app)
self.middleware.oslo_conf.set_override('enable_proxy_headers_parsing',
False,
group='oslo_middleware')
self.request = webob.Request.blank('/foo/bar', method='POST')
def test_no_headers(self):
response = self.request.get_response(self.middleware)
self.assertEqual(b"http://localhost:80/", response.body)
def test_url_translate_ssl_has_no_effect(self):
self.request.headers['X-Forwarded-Proto'] = "https"
self.request.headers['X-Forwarded-Host'] = "example.com:123"
response = self.request.get_response(self.middleware)
self.assertEqual(b"http://localhost:80/", response.body)
| 2.0625 | 2 |
make_const.py | creachadair/curled | 0 | 12765180 | <reponame>creachadair/curled<gh_stars>0
#!/usr/bin/env python
##
## Name: make_const.py
## Purpose: Make the "constants" module.
##
## Copyright (c) 2009-2010 <NAME>, All Rights Reserved.
##
import errno, os, sys
SECTION_COMMENT = {
'CURLcode': 'Return codes from curl_easy_* functions',
'CURLFORMcode': 'Return codes for curl_formadd()',
'CURLSHcode': 'Return codes for curl_share_stopt()',
'CURLSHoption': 'Option codes for curl_share_setopt()',
'CURLformoption': 'Option codes for CURLFORM_ARRAY values',
'CURLoption': 'Option codes for curl_easy_setopt()',
'CURLversion': 'Version selectors for curl_version_info()',
'curlversioncodes': 'Bit masks for curl_version_info_data->features',
'curl_TimeCond': 'Option codes for CURLOPT_TIMECONDITION',
'curl_closepolicy': 'Option codes for CURLOPT_CLOSEPOLICY',
'curl_ftpauth': 'Option codes for CURLOPT_FTPSSLAUTH',
'curl_ftpccc': 'Option codes for CURLOPT_FTP_SSL_CCC',
'curl_ftpcreatedir': 'Option codes for CURLOPT_FTP_CREATE_MISSING_DIRS',
'curl_ftpmethod': 'Option codes for CURLOPT_FTP_FILEMETHOD',
'curl_infotype':
'Specifies the kind of data passed to information_callback',
'curl_lock_access': 'Specifies lock access type for lock functions',
'curl_lock_data': 'Different data locks for a single share',
'curl_proxytype': 'Option codes for CURLOPT_PROXYTYPE',
'curl_usessl': 'Option codes for CURLOPT_USE_SSL',
'curlauth': 'Option codes for CURLOPT_HTTPAUTH',
'curlglobal': 'Flags for curl_global_init()',
'CURLINFO': 'Selector codes for curl_easy_getinfo()',
'curliocmd': 'Operation codes for ioctl callbacks',
'curlioerr': 'Return codes for ioctl callbacks',
'curlopttype': 'Option type codes',
'curlpause': 'Bit masks for curl_easy_pause()',
'curlproto':
'Option bitmasks for CURLOPT_PROTOCOLS and CURLOPT_REDIR_PROTOCOLS',
'curlsshauth': 'Option bitmasks for CURLOPT_SSH_AUTH_TYPES',
'curlsocktype': 'Socket type selector for sockopt callback',
'httpversion': 'Option codes for CURLOPT_HTTP_VERSION',
'ipresolve': 'Option codes for CURLOPT_IPRESOLVE',
'netrcoption': 'Option codes for CURLOPT_NETRC',
'redirflags': 'Option codes for CURLOPT_POSTREDIR',
'sslversion': 'Option codes for CURLOPT_SSLVERSION',
}
def format_section_comment(com):
base = '# --- %s ' % com
if len(base) < 72:
base += '-' * (72 - len(base))
return base
def extract_defines(include_dirs=()):
if not os.path.isfile("curlextract.c"):
raise KeyError("curlextract.c is missing")
if os.path.exists("curlextract"):
os.unlink("curlextract")
args = ['gcc', '-o', 'curlextract']
for v in include_dirs:
args.append('-I')
args.append(v)
args.append("curlextract.c")
res = os.spawnvp(os.P_WAIT, args[0], args)
if res != 0:
raise ValueError("compilation error: %s" % res)
result = {None: []}
curid = None
with os.popen('./curlextract', 'r') as cmd:
for line in cmd:
if line.startswith('@'):
curid = line[1:].rstrip()
else:
key, val = line.rstrip().split('=', 1)
result.setdefault(curid, []).append((key, val))
if not result[None]:
result.pop(None)
os.unlink("curlextract")
return result
def write_module(defs, ofp):
ofp.write('''
##
## Name: constants.py
## Purpose: Symbolic constants used by libcurl.
##
## Copyright (c) <NAME>, All Rights Reserved.
##
## This file was mechanically generated by extracting the names
## defined in <curl/curl.h>
##
##\n'''.lstrip())
name_width = max(max(len(k) for k, v in d) for d in defs.values())
val_template = '%%(key)-%ds = %%(value)s\n' % name_width
for sect in sorted(defs.keys(), key=lambda s: s.lower()):
com = SECTION_COMMENT.get(sect)
ofp.write('\n')
if com is not None:
ofp.write(format_section_comment(com))
ofp.write('\n')
for key, val in defs[sect]:
ofp.write(val_template % {'key': key, 'value': val})
ofp.write('\n# Here there be dragons\n')
def main(argv):
defs = extract_defines(['/opt/local/include'])
with file('constants.py', 'wt') as fp:
write_module(defs, fp)
return 0
if __name__ == '__main__':
res = main(sys.argv[1:])
sys.exit(res)
# Here there be dragons
| 1.414063 | 1 |
wn/_config.py | fushinari/wn | 0 | 12765181 | <gh_stars>0
"""
Local configuration settings.
"""
from typing import Dict
from pathlib import Path
import toml
from wn import Error
from wn._types import AnyPath
from wn._util import is_url, resources, short_hash
# The directory where downloaded and added data will be stored.
DEFAULT_DATA_DIRECTORY = Path.home() / '.wn_data'
DEFAULT_DATABASE_FILENAME = 'wn.db'
class WNConfig:
def __init__(self):
self._data_directory = DEFAULT_DATA_DIRECTORY
self._projects = {}
self.database_filename = DEFAULT_DATABASE_FILENAME
@property
def data_directory(self) -> Path:
"""The file system directory where Wn's data is stored."""
dir = self._data_directory
dir.mkdir(exist_ok=True)
return dir
@data_directory.setter
def data_directory(self, path):
dir = Path(path).expanduser()
if dir.exists() and not dir.is_dir():
raise Error(f'path exists and is not a directory: {dir}')
self._data_directory = dir
@property
def database_path(self):
"""The path to the database file."""
return self.data_directory / self.database_filename
@property
def downloads_directory(self):
"""The file system directory where downloads are cached."""
dir = self.data_directory / 'downloads'
dir.mkdir(exist_ok=True)
return dir
@property
def index(self) -> Dict[str, Dict]:
"""The project index."""
return self._projects
def add_project(
self,
id: str,
label: str,
language: str,
license: str = None,
) -> None:
"""Add a new wordnet project to the index.
Arguments:
id: short identifier of the project
label: full name of the project
language: `BCP 47`_ language code of the resource
license: link or name of the project's default license
.. _BCP 47: https://en.wikipedia.org/wiki/IETF_language_tag
"""
if id in self._projects:
raise ValueError(f'project already added: {id}')
self._projects[id] = {
'label': label,
'language': language,
'versions': {},
'license': license
}
def add_project_version(
self,
id: str,
version: str,
url: str,
license: str = None,
) -> None:
"""Add a new resource version for a project.
Arguments:
id: short identifier of the project
version: version string of the resource
url: web address of the resource
license: link or name of the resource's license; if not
given, the project's default license will be used.
"""
version_data = {'resource_url': url}
if license:
version_data['license'] = license
project = self._projects[id]
project['versions'][version] = version_data
def get_project_info(self, arg: str) -> Dict:
"""Return a dictionary of information about an indexed project.
If the project has been downloaded and cached, the ``"cache"``
key will point to the path of the cached file, otherwise its
value is ``None``.
Arguments:
arg: a project specifier
Example:
>>> info = wn.config.get_project_info('pwn:3.0')
>>> info['label']
'Princeton WordNet'
"""
id, _, version = arg.partition(':')
project: Dict = self._projects[id]
versions: Dict = project['versions']
if not version or version == '*':
version = next(iter(versions))
if version not in versions:
raise Error(f'no such version: {version!r} ({project})')
url = versions[version]['resource_url']
cache_path = self.get_cache_path(url)
return dict(
id=id,
version=version,
label=project['label'],
language=project['language'],
license=versions[version].get('license', project.get('license')),
resource_url=url,
cache=cache_path if cache_path.exists() else None
)
def get_cache_path(self, arg: str) -> Path:
"""Return the path for caching *arg*.
The *arg* argument may be either a URL or a project specifier
that gets passed to :meth:`get_project_info`. Note that this
is just a path operation and does not signify that the file
exists in the file system.
"""
if not is_url(arg):
arg = self.get_project_info(arg)['resource_url']
filename = short_hash(arg)
return self.downloads_directory / filename
def update(self, data: dict) -> None:
"""Update the configuration with items in *data*.
Items are only inserted or replaced, not deleted. If a project
index is provided in the ``"index"`` key, then either the
project must not already be indexed or any project fields
(label, language, or license) that are specified must be equal
to the indexed project.
"""
if 'data_directory' in data:
self.data_directory = data['data_directory']
for id, project in data.get('index', {}).items():
if id in self._projects:
# validate that they are the same
_project = self._projects[id]
for attr in ('label', 'language', 'license'):
if attr in project and project[attr] != _project[attr]:
raise Error(f'{attr} mismatch for {id}')
else:
self.add_project(
id,
project['label'],
project['language'],
license=project.get('license'),
)
for version, info in project.get('versions', {}).items():
self.add_project_version(
id,
version,
info['url'],
license=info.get('license'),
)
def load_index(self, path: AnyPath) -> None:
"""Load and update with the project index at *path*.
The project index is a TOML_ file containing project and
version information. For example:
.. code-block:: toml
[ewn]
label = "Open English WordNet"
language = "en"
license = "https://creativecommons.org/licenses/by/4.0/"
[ewn.versions.2019]
url = "https://en-word.net/static/english-wordnet-2019.xml.gz"
[ewn.versions.2020]
url = "https://en-word.net/static/english-wordnet-2020.xml.gz"
.. _TOML: https://toml.io
"""
path = Path(path).expanduser()
index = toml.load(path)
self.update({'index': index})
config = WNConfig()
with resources.path('wn', 'index.toml') as index_path:
config.load_index(index_path)
| 2.65625 | 3 |
prashpyutil/sum.py | pfarkya/prashpyutil | 0 | 12765182 | <filename>prashpyutil/sum.py
def sumoftwo(a, b):
"""
This is an sumoftwo function to add two number
:param int a: first number to add
:param int b: first number to add
:return: return addition of the integer
:rtype: int
:example:
>>> r = sumoftwo(2, 3)
>>> r
5
"""
return a + b
| 3.78125 | 4 |
experiments/do_umap_plot_interactive.py | chunliangliu2020/de-novo-drug-design | 0 | 12765183 | <gh_stars>0
import os, sys
import argparse
import configparser
import ast
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
import time
import pandas as pd
from rdkit import Chem
from rdkit.Chem import Draw
from rdkit.Chem import Descriptors
from rdkit.Chem import Lipinski
import altair as alt
import numpy as np
import matplotlib
matplotlib.use("TKAgg")
import matplotlib.pylab as plt
import seaborn as sns
sys.path.append('../src/')
from python import helper as hp
from python import helper_chem as hp_chem
from python import fixed_parameters as FP
parser = argparse.ArgumentParser(description='Do umap interactive plot')
parser.add_argument('-fn','--filename', type=str, help='Path to the fine-tuning txt file', required=True)
parser.add_argument('-v','--verbose', type=bool, help='Verbose', required=True)
def get_dataframe(list_of_smi, embedding, time):
"""
rule of 5 taken from: https://squonk.it/docs/cells/Lipinski%20filter%20(RDKit)/
"""
mols = [Chem.MolFromSmiles(smi) for smi in list_of_smi]
assert len(mols)==embedding.shape[0]
FractionCSP3 = []
mol_weights = []
LogP = []
H_donor = []
H_acceptor = []
# We have to do this cubersome for loop
# because rdkit is sometimes unhappy about
# some SMILES and is throwing weird error
# when computing certain properties.
# If this happens here, we return a None value
for mol in mols:
try:
fsp3 = round(Descriptors.FractionCSP3(mol),4)
except:
fsp3 = None
FractionCSP3.append(fsp3)
try:
mweight = round(Descriptors.ExactMolWt(mol),2)
except:
mweight = None
mol_weights.append(mweight)
try:
lp = round(Descriptors.MolLogP(mol),2)
except:
lp = None
LogP.append(lp)
try:
hdonor = round(Lipinski.NumHDonors(mol),2)
except:
hdonor = None
H_donor.append(hdonor)
try:
hacceptor = round(Lipinski.NumHAcceptors(mol),2)
except:
hacceptor = None
H_acceptor.append(hacceptor)
# rule of 5
limit_mw = 500
limit_LogP = 5.0
limit_H_donor = 5
limit_H_acceptor = 10
lipinski = []
for m,l,hd,ha in zip(mol_weights, LogP, H_donor, H_acceptor):
if m is None or l is None or hd is None or ha is None:
lipinski.append('unknown')
elif m<=limit_mw and l<=limit_LogP and hd<=limit_H_donor and ha<=limit_H_acceptor:
lipinski.append('respected')
else:
lipinski.append('not respected')
moldf = {}
moldf['Cluster'] = [f'{time}']*embedding.shape[0]
moldf['UMAP1'] = embedding[:,0]
moldf['UMAP2'] = embedding[:,1]
moldf['SMILES'] = list_of_smi
moldf['url'] = ['http://molview.org/?q=' + x for x in list_of_smi]
moldf['Molecular weights'] = mol_weights
moldf['Fraction Csp3'] = FractionCSP3
moldf['LogP'] = LogP
moldf['H-bond donor count'] = H_donor
moldf['H-bond acceptor count'] = H_acceptor
moldf['Rule of 5'] = lipinski
df = pd.DataFrame.from_dict(moldf)
return df
def do_interactive_chart(df, save_path):
chart = alt.Chart(df).transform_calculate().mark_point(filled=True,size=60).encode(
x = 'UMAP1',
y = 'UMAP2',
color = alt.Color('Cluster', scale=alt.Scale(scheme='tableau20')),
tooltip = ['SMILES', 'Molecular weights', 'Fraction Csp3',
'LogP', 'H-bond donor count', 'H-bond acceptor count', 'Rule of 5'],
href = 'url').interactive().properties(width=800,
height=600).configure_axis(
grid=False,
ticks=False).configure_view(
strokeWidth=0).configure_header(
labelFontSize=16)
chart.save(f'{save_path}.html')
if __name__ == '__main__':
start = time.time()
####################################
# get back parameters
args = vars(parser.parse_args())
verbose = args['verbose']
filename = args['filename']
name_data = filename.split('/')[-1].replace('.txt','')
config = configparser.ConfigParser()
config.read('parameters.ini')
# We do the UMAP only if the default parameters
# were run, i.e. 40 epochs and models saved
# every 10 epochs (period = 10)
check_epoch = int(config['MODEL']['epochs'])
check_period = int(config['MODEL']['period'])
if check_epoch==40 and check_period==10:
min_len = int(config['PROCESSING']['min_len'])
max_len = int(config['PROCESSING']['max_len'])
mode = config['EXPERIMENTS']['mode']
e_end = int(config['MODEL']['epochs'])
if e_end<10: e_end=f'0{e_end}'
n_dataset = FP.UMAP_PLOT['n_dataset']
temp = float(config['EXPERIMENTS']['temp'])
if verbose: print('\nSTART DOING INTERACTIVE UMAP PROJECTION')
####################################
####################################
# path to the saved UMAP embedding
# and to save the interative UMAP
path_umap = f'results/{name_data}/umap/'
####################################
####################################
# Do the plot
path_projection = f'{path_umap}umap_projection_{temp}.npy'
embedding = np.load(path_projection)
with open(f'{path_umap}smiles_src.txt', 'r') as f:
smiles_src = f.read().splitlines()
with open(f'{path_umap}smiles_tgt.txt', 'r') as f:
smiles_tgt = f.read().splitlines()
with open(f'{path_umap}smiles_start_{temp}.txt', 'r') as f:
smiles_start = f.read().splitlines()
with open(f'{path_umap}smiles_end_{temp}.txt', 'r') as f:
smiles_end = f.read().splitlines()
with open(f'{path_umap}smiles_ft.txt', 'r') as f:
smiles_ft = f.read().splitlines()
lim_src = len(smiles_src)
lim_tgt = lim_src + len(smiles_tgt)
lim_start = lim_tgt + len(smiles_start)
lim_end = lim_start + len(smiles_end)
# get separate information
df_src = get_dataframe(smiles_src,
embedding[:lim_src, :],
'Source space')
df_tgt = get_dataframe(smiles_tgt,
embedding[lim_src:lim_tgt, :],
'Target space')
df_start = get_dataframe(smiles_start,
embedding[lim_tgt:lim_start, :],
'First epoch')
df_end = get_dataframe(smiles_end,
embedding[lim_start:lim_end, :],
'Last epoch')
df_ft = get_dataframe(smiles_ft,
embedding[lim_end:, :],
'Target set')
# concate dataframe
frames = [df_src, df_tgt, df_start, df_end, df_ft]
frames_concat = pd.concat(frames)
# plot
do_interactive_chart(frames_concat, f'{path_umap}interative_umap')
end = time.time()
if verbose: print(f'INTERACTIVE UMAP PROJECTION DONE in {end - start:.04} seconds')
else:
print('Defaut paremeters not used; interactive UMAP not done.')
#################################### | 2.09375 | 2 |
ocrb/tba/modules/submodules.py | ecker-lab/object-centric-representation-benchmark | 25 | 12765184 | import numpy as np
import torch
import torch.nn as nn
import functions.submodules as F
def norm_grad(input, max_norm):
if input.requires_grad:
def norm_hook(grad):
N = grad.size(0) # batch number
norm = grad.view(N, -1).norm(p=2, dim=1) + 1e-6
scale = (norm / max_norm).clamp(min=1).view([N]+[1]*(grad.dim()-1))
return grad / scale
# clip_coef = float(max_norm) / (grad.norm(2).data[0] + 1e-6)
# return grad.mul(clip_coef) if clip_coef < 1 else grad
input.register_hook(norm_hook)
def clip_grad(input, value):
if input.requires_grad:
input.register_hook(lambda g: g.clamp(-value, value))
def scale_grad(input, scale):
if input.requires_grad:
input.register_hook(lambda g: g * scale)
def func(func_name):
if func_name is None:
return None
elif func_name == 'tanh':
return nn.Tanh()
elif func_name == 'relu':
return nn.ReLU()
elif func_name == 'sigmoid':
return nn.Sigmoid()
elif func_name == 'softmax':
return nn.Softmax(dim=1)
else:
assert False, 'Invalid func_name.'
class CheckBP(nn.Module):
def __init__(self, label='a', show=1):
super(CheckBP, self).__init__()
self.label = label
self.show = show
def forward(self, input):
return F.CheckBP.apply(input, self.label, self.show)
class Identity(nn.Module):
def forward(self, input):
return F.Identity.apply(input)
class Log(nn.Module):
def __init__(self, eps=1e-20):
super(Log, self).__init__()
self.eps = eps
def forward(self, input):
return (input + self.eps).log()
class Round(nn.Module):
"""
The round operater which is similar to the deterministic Straight-Through Estimator
It forwards by rounding the input, and backwards with the original output gradients
"""
def forward(self, input):
return F.Round.apply(input)
class StraightThrough(nn.Module):
"""
The stochastic Straight-Through Estimator
It forwards by sampling from the input probablilities, and backwards with the original output gradients
"""
def forward(self, input):
return F.StraightThrough.apply(input)
class ArgMax(nn.Module):
"""
Input: N * K matrix, where N is the batch size
Output: N * K matrix, the one-hot encoding of arg_max(input) along the last dimension
"""
def forward(self, input):
assert input.dim() == 2, 'only support 2D arg max'
return F.ArgMax.apply(input)
class STGumbelSigmoid(nn.Module):
def __init__(self, tao=1.0):
super(STGumbelSigmoid, self).__init__()
self.tao = tao
self.log = Log()
self.round = Round()
def forward(self, mu):
log = self.log
u1 = torch.rand(mu.size()).cuda()
u2 = torch.rand(mu.size()).cuda()
a = (log(mu) - log(-log(u1)) - log(1 - mu) + log(-log(u2))) / self.tao
return self.round(a.sigmoid())
class STGumbelSoftmax(nn.Module):
def __init__(self, tao=1.0):
super(STGumbelSoftmax, self).__init__()
self.tao = tao
self.log = Log()
self.softmax = nn.Softmax(dim=1)
self.arg_max = ArgMax()
def forward(self, mu):
log = self.log
u = torch.rand(mu.size()).cuda() # N * K
# mu = CheckBP('mu')(mu)
a = (log(mu) - log(-log(u))) / self.tao
# a = CheckBP('a')(a)
return self.arg_max(self.softmax(a))
class GaussianSampler(nn.Module):
def forward(self, mu, log_var):
standard_normal = torch.randn(mu.size()).cuda()
return mu + (log_var * 0.5).exp() * standard_normal
class PermutationMatrixCalculator(nn.Module):
"""
Input: N * K matrix, where N is the batch size
Output: N * K * K tensor, with each K * K matrix to sort the corresponding row of the input
"""
def __init__(self, descend=True):
super(PermutationMatrixCalculator, self).__init__()
self.descend = descend
def forward(self, input):
assert input.dim() == 2, 'only support 2D input'
return F.PermutationMatrixCalculator.apply(input, self.descend)
class Conv(nn.Module):
def __init__(self, conv_features, conv_kernels, out_sizes, bn=0, dp=0):
super(Conv, self).__init__()
self.layer_num = len(conv_features) - 1
self.out_sizes = out_sizes
assert self.layer_num == len(conv_kernels) == len(out_sizes) > 0, 'Invalid conv parameters'
self.bn = bn
self.dp = dp
# Convolutional block
for i in range(0, self.layer_num):
setattr(self, 'conv'+str(i), nn.Conv2d(conv_features[i], conv_features[i+1],
(conv_kernels[i][0], conv_kernels[i][1]), stride=1,
padding=(conv_kernels[i][0]//2, conv_kernels[i][1]//2)))
if bn == 1:
setattr(self, 'bn'+str(i), nn.BatchNorm2d(conv_features[i+1]))
setattr(self, 'pool'+str(i), nn.AdaptiveMaxPool2d(tuple(out_sizes[i])))
if dp == 1:
setattr(self, 'dp'+str(i), nn.Dropout2d(0.2))
# Transformations
self.tranform = func('relu')
def forward(self, X):
H = X # N * D * H * W
for i in range(0, self.layer_num):
H = getattr(self, 'conv'+str(i))(H)
if self.bn == 1:
H = getattr(self, 'bn'+str(i))(H)
H = getattr(self, 'pool'+str(i))(H)
if self.dp == 1:
H = getattr(self, 'dp'+str(i))(H)
# if i == self.layer_num - 1:
# print(H.data[0, :, H.size(2)//2, H.size(3)//2].reshape(1, -1))
H = self.tranform(H)
# if i == self.layer_num - 1:
# print(H.data[0, :, H.size(2)//2, H.size(3)//2].reshape(1, -1))
return H
class DeConv(nn.Module):
def __init__(self, scales, conv_features, conv_kernels, conv_paddings, out_trans=None, bn=0, dp=0):
super(DeConv, self).__init__()
self.layer_num = len(conv_features) - 1
self.scales = scales
assert self.layer_num == len(scales) == len(conv_kernels) == len(conv_paddings) > 0, \
'Invalid deconv parameters'
self.bn = bn
self.dp = dp
# Convolutional block
for i in range(0, self.layer_num):
if scales[i] > 1:
setattr(self, 'unpool'+str(i), nn.Upsample(scale_factor=scales[i], mode='nearest'))
setattr(self, 'conv'+str(i), nn.Conv2d(conv_features[i], conv_features[i+1], conv_kernels[i],
stride=1, padding=tuple(conv_paddings[i])))
if bn == 1:
setattr(self, 'bn'+str(i), nn.BatchNorm2d(conv_features[i+1]))
if dp == 1:
setattr(self, 'dp'+str(i), nn.Dropout2d(0.2))
# Transformations
self.transform = func('relu')
self.out_trans_func = func(out_trans)
def forward(self, X):
H = X # N * D * H * W
# Hidden layers
for i in range(0, self.layer_num):
if self.scales[i] > 1:
H = getattr(self, 'unpool'+str(i))(H)
H = getattr(self, 'conv'+str(i))(H)
if self.bn == 1:
H = getattr(self, 'bn'+str(i))(H)
if self.dp == 1:
H = getattr(self, 'dp'+str(i))(H)
if i < self.layer_num - 1:
H = self.transform(H)
# Output layer
if self.out_trans_func is not None:
H = self.out_trans_func(H)
return H
class FCN(nn.Module):
def __init__(self, features, hid_trans='tanh', out_trans=None, hid_bn=0, out_bn=0):
super(FCN, self).__init__()
self.layer_num = len(features) - 1
assert self.layer_num > 0, 'Invalid fc parameters'
self.hid_bn = hid_bn
self.out_bn = out_bn
# Linear layers
for i in range(0, self.layer_num):
setattr(self, 'fc'+str(i), nn.Linear(features[i], features[i+1]))
if hid_bn == 1:
setattr(self, 'hid_bn_func'+str(i), nn.BatchNorm1d(features[i+1]))
if out_bn == 1:
self.out_bn_func = nn.BatchNorm1d(features[-1])
# Transformations
self.hid_trans_func = func(hid_trans)
self.out_trans_func = func(out_trans)
def forward(self, X):
H = X
# Hidden layers
for i in range(0, self.layer_num):
H = getattr(self, 'fc'+str(i))(H)
if i < self.layer_num - 1:
if self.hid_bn == 1:
H = getattr(self, 'hid_bn_func'+str(i))(H)
H = self.hid_trans_func(H)
# Output layer
if self.out_bn == 1:
H = self.out_bn_func(H)
if self.out_trans_func is not None:
H = self.out_trans_func(H)
return H
class CNN(nn.Module):
def __init__(self, params):
super(CNN, self).__init__()
self.conv = Conv(params['conv_features'], params['conv_kernels'], params['out_sizes'],
bn=params['bn'])
self.fcn = FCN(params['fc_features'], hid_trans='relu', out_trans=params['out_trans'],
hid_bn=params['bn'], out_bn=params['bn'])
def forward(self, X):
# X: N * D * H * W
# Conv
H = self.conv(X) # N * D_out1 * H_out1 * W_out1
# H = CheckBP('H_Conv')(H)
# FCN
H = H.view(H.size(0), -1) # N * (D_out1 * H_out1 * W_out1)
H = self.fcn(H) # N * D_out2
return H
class DCN(nn.Module):
def __init__(self, params):
super(DCN, self).__init__()
self.fcn = FCN(params['fc_features'], hid_trans='relu', out_trans='relu', hid_bn=params['bn'],
out_bn=params['bn'])
self.deconv = DeConv(params['scales'], params['conv_features'], params['conv_kernels'],
params['conv_paddings'], out_trans=params['out_trans'], bn=params['bn'])
self.H_in, self.W_in = params['H_in'], params['W_in']
def forward(self, X):
# X: N * D
# FCN
H = self.fcn(X) # N * (D_out1 * H_out1 * W_out1)
# Deconv
H = H.view(H.size(0), -1, self.H_in, self.W_in) # N * D_out1 * H_out1 * W_out1
H = self.deconv(H) # N * D_out2 * H_out2 * W_out2
return H
| 2.21875 | 2 |
setup.py | cng-osensa/HelloPy | 0 | 12765185 | <reponame>cng-osensa/HelloPy
#from distutils.core import setup
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
# name: Name of project - this will be how the project is listed on PyPI
name = 'osensa-testpackage',
# packages: this must be the same as the name
##packages = ['osensa-testpackage'], # Can manually include packages
packages = find_packages(exclude=[]), # Or can automatically find packages (use exclude to omit packages not intended for release/install)
# install_requires: specify what dependencies a project minimally needs to run
install_requires = [],
# python_requires: if project only runs on certain Python versions, specify this here
## python_requires = '>=3', requires Python 3+
## python_requires = '~=3.3', requires Python 3.3 and up, not willing to commit to Python 4 support yet
## python_requires = '>=2.6, !=3.0.*, <4' requires Python 2.6, 2.7, and all versions of Python 3 starting with 3.1
# version: suggested versioning scheme
# 1.2.0.dev1 development release
# 1.2.0a1 alpha release
# 1.2.0b1 beta release
# 1.2.0rc1 release candidate
# 1.2.0 final release
# 1.2.0post1 post release
version = '0.0.2',
# description: short description of the project
description = 'A random test lib',
# long_description: longer description of the project
long_description = long_description,
# author: provides details of author
author = 'osensa',
author_email = '<EMAIL>',
# license: provide type of license you are using
license='MIT',
# url: homepage
url = 'https://github.com/cng-osensa/HelloPy/', # use the URL to the github repo
download_url = 'https://github.com/cng-osensa/HelloPy/archive/0.1.tar.gz', # Github tarball download
# keywords: list of keywords that describe this project
keywords = ['testing', 'logging', 'example'], # arbitrary keywords
# classifiers: list of classifiers to categorize project. See full listing here: https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers = [],
) | 1.664063 | 2 |
anc2vec/__init__.py | aedera/anc2vec | 2 | 12765186 | <filename>anc2vec/__init__.py
from .main import get_embeddings
from .main import get_go
| 1.046875 | 1 |
app/settings.py | satrio-pamungkas/vrede-backend-fastapi | 0 | 12765187 | <reponame>satrio-pamungkas/vrede-backend-fastapi
from dotenv import load_dotenv
import os
load_dotenv()
SERVER_PORT = int(os.getenv("WEB_PORT"))
DATABASE_USER = os.getenv("DB_USER")
DATABASE_PASSWORD = os.getenv("DB_PASSWORD")
DATABASE_SERVER = os.getenv("DB_SERVER")
DATABASE_PORT = os.getenv("DB_PORT")
DATABASE_NAME = os.getenv("DB_NAME")
DATABASE_URL = f"postgresql://{DATABASE_USER}:{DATABASE_PASSWORD}@{DATABASE_SERVER}:{DATABASE_PORT}/{DATABASE_NAME}?sslmode=require"
| 2.046875 | 2 |
listings/extract_features.py | FrederikBoehm/Bachelorarbeit_Text | 0 | 12765188 | model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=init_checkpoint,
layer_indexes=layer_indexes,
use_tpu=False,
use_one_hot_embeddings=False)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=False,
model_fn=model_fn,
config=run_config,
predict_batch_size=batch_size)
input_fn = input_fn_builder(
features=features, seq_length=max_seq_length)
vectorized_text_segments = []
for result in estimator.predict(input_fn, yield_single_examples=True):
layer_output = result["layer_output_0"]
feature_vec = [
round(float(x), 6) for x in layer_output[0].flat
]
vectorized_text_segments.append(feature_vec) | 2.59375 | 3 |
preprocessing/train_word2vec.py | NikosKolitsas/twitter_sentiment_analysis | 0 | 12765189 | <gh_stars>0
""" local word2vec embedding training """
import os
from gensim.models.word2vec import Word2Vec
DATA_PATH = '../data'
TRAIN = os.path.join(DATA_PATH, 'train')
TEST = os.path.join(DATA_PATH, 'test')
POS_TWEET_FILE = os.path.join(TRAIN, 'train_pos_full_orig.txt')
NEG_TWEET_FILE = os.path.join(TRAIN, 'train_neg_full_orig.txt')
TEST_TWEET_FILE = os.path.join(TEST, 'test_data_orig.txt')
EMBEDDING_SIZE = 20
def read_tweets(fname):
"""Read the tweets in the given file.
Returns a 2d array where every row is a tweet, split into words.
"""
with open(fname, 'r') as f:
return [l.split() for l in f.readlines()]
pos_tweets = read_tweets(POS_TWEET_FILE)
neg_tweets = read_tweets(NEG_TWEET_FILE)
test_tweets = read_tweets(TEST_TWEET_FILE)
sentences = pos_tweets + neg_tweets + test_tweets
print(len(sentences))
tokens = [item.strip() for sentence in sentences for item in sentence]
WORKERS = 8
model = Word2Vec(sentences, size=EMBEDDING_SIZE, window=10, min_count=5, workers=WORKERS)
fname = "{0}/word2vec/word2vec-local-gensim-orig-{1}.bin".format(DATA_PATH, EMBEDDING_SIZE)
print("Writing embeddings to file {0}.".format(fname))
model.save(fname)
print("Done! Happy neural networking!") | 2.75 | 3 |
sorting_gym/envs/wrappers.py | hardbyte/sorting-gym | 5 | 12765190 | from typing import Union, List
import numpy as np
from gym import spaces, ActionWrapper
from gym.spaces import flatten_space, flatdim, unflatten, flatten
from sorting_gym import DiscreteParametric
def merge_discrete_spaces(input_spaces: List[Union[spaces.Discrete, spaces.Tuple, spaces.MultiBinary]]) -> spaces.MultiDiscrete:
"""
Merge nested Discrete, and MultiBinary spaces into a single MultiDiscrete space
TODO could also add support for MultiDiscrete
:param input_spaces:
:return:
"""
return spaces.MultiDiscrete(_discrete_dims(input_spaces))
def _discrete_dims(input_spaces: Union[spaces.Discrete, spaces.Tuple, spaces.MultiBinary]):
sizes = []
for space in input_spaces:
if isinstance(space, spaces.Discrete):
sizes.append(space.n)
elif isinstance(space, spaces.MultiBinary):
sizes.extend([2 for _ in range(space.n)])
elif isinstance(space, spaces.MultiDiscrete):
sizes.extend(space.nvec)
elif isinstance(space, spaces.Tuple):
sizes.extend(_discrete_dims(space.spaces))
return sizes
def _discrete_unflatten(argument_space, args):
"""
:param argument_space:
:param args:
:return:
"""
res = []
args = list(args)
while len(args) > 0:
if isinstance(argument_space, spaces.Discrete):
res.append(args.pop(0))
elif isinstance(argument_space, spaces.MultiDiscrete):
res.append(args[:argument_space.shape[0]])
del args[:argument_space.shape[0]]
elif isinstance(argument_space, spaces.MultiBinary):
res.append(args[:argument_space.n])
del args[:argument_space.shape[0]]
elif isinstance(argument_space, spaces.Tuple):
_num_tuple_args = _discrete_dims(argument_space.spaces)
res.append(args[:len(_num_tuple_args)])
del args[:len(_num_tuple_args)]
else:
raise NotImplemented
return res
class DisjointMultiDiscreteActionSpaceWrapper(ActionWrapper):
"""Expose a MultiDiscrete action space for each disjoint action space instead of a more complex nested space.
Wrapping a discrete parametric space with the following disjoint spaces:
Discrete(k),
Tuple([Discrete(k), MultiBinary(1)]),
Tuple([Discrete(k), Discrete(k)]),
should result in output spaces of:
MultiDiscrete([k]),
MultiDiscrete([k, 2]),
MultiDiscrete([k, k]
"""
def __init__(self, env):
assert isinstance(env.action_space, DiscreteParametric), (
"expected DiscreteParametric action space, got {}".format(type(env.action_space)))
super(DisjointMultiDiscreteActionSpaceWrapper, self).__init__(env)
self.parametric_space: DiscreteParametric = env.action_space
# Construct the modified disjoint spaces
self.disjoint_action_spaces = [merge_discrete_spaces([s]) for s in self.parametric_space.disjoint_spaces]
self.action_space = DiscreteParametric(env.action_space.parameter_space.n, self.disjoint_action_spaces)
def action(self, action):
"""
Convert an action using the merged MultiDiscrete disjoint space into a DiscreteParametric action.
"""
assert self.action_space.contains(action), "Given action is not valid in this action space"
# Get the discrete parameter value
parameter = action[0]
# The args should be a valid MultiDiscrete sample for the given parameter. Note
# MultiDiscrete samples are ndarrays of dtype np.int64.
args = action[1:]
assert self.disjoint_action_spaces[parameter].contains(np.array(args, dtype=np.int64))
# TODO the args need to be converted back into their original nested form
#
output_space = self.env.action_space.disjoint_spaces[parameter]
raise NotImplemented
assert self.env.action_space.contains(transformed_action)
return tuple(transformed_action)
class MultiDiscreteActionSpaceWrapper(ActionWrapper):
"""Expose a single MultiDiscrete action space instead of a DiscreteParametric action space.
"""
def __init__(self, env):
assert isinstance(env.action_space, DiscreteParametric), ("expected DiscreteParametric action space, got {}".format(type(env.action_space)))
super(MultiDiscreteActionSpaceWrapper, self).__init__(env)
parametric_space: DiscreteParametric = env.action_space
# Construct a space from the parametric space's parameter_space and disjoint spaces
self.action_space = merge_discrete_spaces([parametric_space.parameter_space] + list(parametric_space.disjoint_spaces))
def action(self, action):
"""Convert a MultiDiscrete action into a DiscreteParametric action."""
# Get the discrete parameter value
parameter = np.argmax(action[0])
argument_space = self.env.action_space[parameter]
# Convert the appropriate args for the disjoint space using the parameter
start_index = 1 + len(_discrete_dims(self.env.action_space.disjoint_spaces[:parameter]))
end_index = 1 + len(_discrete_dims(self.env.action_space.disjoint_spaces[:parameter + 1]))
# Our discrete arguments for the disjoint space
args = action[start_index:end_index]
disjoint_args = _discrete_unflatten(argument_space, args)
# Make the final flat tuple
transformed_action = [parameter]
if isinstance(disjoint_args, (tuple, list)):
transformed_action.extend(disjoint_args)
else:
transformed_action.append(disjoint_args)
assert self.env.action_space.contains(transformed_action)
return tuple(transformed_action)
class BoxActionSpaceWrapper(ActionWrapper):
"""Expose a flat Box action space instead of a parametric action space.
Example::
>>> isinstance(BoxActionSpaceWrapper(env).action_space, Box)
True
Note that sampling from a Box is not the same as flattening samples from a richer
subspace. To draw action space samples from a `SimpleActionSpace` call
`SimpleActionSpace.action_space_sample()`
"""
def __init__(self, env):
assert isinstance(env.action_space, DiscreteParametric), ("expected DiscreteParametric action space, got {}".format(type(env.action_space)))
super(BoxActionSpaceWrapper, self).__init__(env)
parametric_space: DiscreteParametric = env.action_space
# Construct a space from the parametric space's parameter_space and disjoint spaces
self.action_space = flatten_space(spaces.Tuple([parametric_space.parameter_space] +
list(parametric_space.disjoint_spaces)))
self.disjoint_sizes = [flatdim(space) for space in parametric_space.disjoint_spaces]
def action(self, action):
"""Convert a flattened action into a parametric space."""
# Get the discrete parameter value
num_disjoint_spaces = len(self.env.action_space)
parameter = np.argmax(action[:num_disjoint_spaces])
argument_space = self.env.action_space[parameter]
# Now we need to index the appropriate args for the disjoint space using the parameter
start_index = num_disjoint_spaces
start_index += sum(self.disjoint_sizes[:parameter])
end_index = start_index + self.disjoint_sizes[parameter]
# Flattened arguments for the disjoint space
args = action[start_index:end_index]
try:
disjoint_args = unflatten(argument_space, args)
except IndexError as e:
# Very likely the args are invalid for the wrapped space e.g. a Discrete(2) getting all zeros.
msg = "Failed to unflatten arguments to wrapped space of " + str(argument_space)
raise ValueError(msg) from e
# Make the final flat tuple
transformed_action = [parameter]
if isinstance(disjoint_args, tuple):
transformed_action.extend(disjoint_args)
else:
transformed_action.append(disjoint_args)
assert self.env.action_space.contains(transformed_action)
return tuple(transformed_action)
def reverse_action(self, action):
"""Convert a wrapped action (e.g. from a DiscreteParametric) into a flattened action"""
parameter = action[0]
result = np.zeros(self.action_space.shape[0], dtype=self.action_space.dtype)
result[parameter] = 1.0
start_index = len(self.env.action_space)
start_index += sum(self.disjoint_sizes[:parameter])
end_index = start_index + self.disjoint_sizes[parameter]
result[start_index:end_index] = flatten(self.env.action_space[parameter], action[1:])
assert self.action_space.contains(result)
return result
def action_space_sample(self):
rich_sample = self.env.action_space.sample()
assert self.env.action_space.contains(rich_sample)
return self.reverse_action(rich_sample)
| 2.828125 | 3 |
Aula_6/exercicio1.py | Mateus-Silva11/AulasPython | 0 | 12765191 | <reponame>Mateus-Silva11/AulasPython<gh_stars>0
# Exercico 1 - listas
# Escreva program que leia a nome de 10 alunos
# Armezene os nomes em uma lista
# Imprema a lista
nomes =[]
for i in range(1,11):
nome = [input(f'Informe o {i} Nome')]
nomes.append(nome)
for d in nomes:
print(f' Nomes informados {d}') | 3.640625 | 4 |
testing/BenchmarkPredictor.py | beratkurar/asar_2018_page_segmentation_competition | 2 | 12765192 | <filename>testing/BenchmarkPredictor.py<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 21 13:14:50 2017
@author: B
"""
import sys
sys.path.append('/root/PageSegComp/Models/')
import numpy as np
np.random.seed(123)
import argparse
import Models , PageLoadBatches
from keras.callbacks import ModelCheckpoint
from keras import optimizers
import glob
import cv2
import os
parser = argparse.ArgumentParser()
parser.add_argument("--save_weights_path", type = str,default ="bestweights" )
parser.add_argument("--train_images", type = str, default ="ptrain/" )
parser.add_argument("--train_annotations", type = str, default = "pltrain/" )
parser.add_argument("--n_classes", type=int, default = 3 )
parser.add_argument("--input_height", type=int , default = 320 )
parser.add_argument("--input_width", type=int , default = 320 )
parser.add_argument('--validate',action='store_false')
parser.add_argument("--val_images", type = str , default = "pvalidation/")
parser.add_argument("--val_annotations", type = str , default = "plvalidation/")
parser.add_argument("--test_images", type = str , default = "pbench/")
parser.add_argument("--test_annotations", type = str , default = "pltest/")
parser.add_argument("--output_path", type = str , default = "pprediction/")
parser.add_argument("--epochs", type = int, default = 250 )
parser.add_argument("--batch_size", type = int, default = 16 )
parser.add_argument("--val_batch_size", type = int, default = 16 )
parser.add_argument("--test_batch_size", type = int, default = 16 )
parser.add_argument("--load_weights", type = str , default = '')
parser.add_argument("--model_name", type = str , default = "fcn8")
parser.add_argument("--optimizer_name", type = str , default = "sgd")
args = parser.parse_args()
train_images_path = args.train_images
train_segs_path = args.train_annotations
train_batch_size = args.batch_size
n_classes = args.n_classes
input_height = args.input_height
input_width = args.input_width
validate = args.validate
save_weights_path = args.save_weights_path
epochs = args.epochs
load_weights = args.load_weights
test_images_path = args.test_images
test_segs_path = args.test_annotations
test_batch_size = args.test_batch_size
model_name = args.model_name
modelFns = { 'vgg_segnet':Models.VGGSegnet.VGGSegnet , 'vgg_unet':Models.VGGUnet.VGGUnet , 'vgg_unet2':Models.VGGUnet.VGGUnet2 , 'fcn8':Models.FCN8.FCN8 , 'fcn32':Models.FCN32.FCN32 }
modelFN = modelFns[ model_name ]
m = modelFN( n_classes , input_height=input_height, input_width=input_width )
output_height = m.outputHeight
output_width = m.outputWidth
print('loading test images')
images = sorted(glob.glob( test_images_path + "/*.jpg" ) + glob.glob( test_images_path+ "/*.png" ) + glob.glob( test_images_path + "/*.jpeg" ))
images.sort()
print('loading the last best model')
loaded_model=modelFN( n_classes , input_height=input_height, input_width=input_width)
loaded_model.load_weights('bestweights02167')
colors=[(255,255,255),(0,0,255),(255,0,0)]
for imgName in images:
outName = imgName.replace( test_images_path , args.output_path)
X = PageLoadBatches.getImageArr(imgName , args.input_width , args.input_height)
pr = loaded_model.predict( np.array([X]))[0]
pr = pr.reshape(( output_height , output_width , n_classes ) ).argmax( axis=2 )
seg_img = np.zeros( ( output_height , output_width , 3 ) )
for c in range(n_classes):
seg_img[:,:,0] += ((pr[:,: ] == c )*( colors[c][0] )).astype('uint8')
seg_img[:,:,1] += ((pr[:,: ] == c )*( colors[c][1] )).astype('uint8')
seg_img[:,:,2] += ((pr[:,: ] == c )*( colors[c][2] )).astype('uint8')
seg_img = cv2.resize(seg_img , (input_width , input_height ))
cv2.imwrite(outName , seg_img )
print('combining the predictions')
patchSize=320
patchNumber=0
predictions='pprediction/'
original='lbench/'
paths = sorted(glob.glob(predictions+ "*.png" ))
pages=[item.split('_patch')[0] for item in paths]
oldpage=pages[0]
g=[[]]
i=0
pathc=0
for page in pages:
if page==oldpage:
g[i].append(paths[pathc])
pathc=pathc+1
else:
i=i+1
g.append([])
g[i].append(paths[pathc])
pathc=pathc+1
oldpage=page
for group in g:
group=np.array(group)
ord_indices=[]
for i in range(0,len(group)):
ord_indices.append(int(group[i].split('_patch')[1].split('.')[0]))
order = np.argsort(ord_indices)
group = group[order]
oi=group[0].split('/')[1].split('_patch')[0]+'.jpg'
originalPage=cv2.imread(original+oi,0)
rows,cols=originalPage.shape
x=rows//patchSize
y=cols//patchSize
sx=x*patchSize
sy=y*patchSize
ni=np.zeros((int(sx),int(sy),3))+255
cp=0
for i in range(0,sx,patchSize):
for j in range(0,sy,patchSize):
ni[i:i+patchSize,j:j+patchSize]=cv2.imread(group[cp],1)
cp=cp+1
cv2.imwrite('out/'+group[0].split('/')[1].split('_patch')[0]+'.png',ni)
| 2.03125 | 2 |
wagtail/images/migrations/0023_merge_20200415_1549.py | sprymix/wagtail | 0 | 12765193 | <reponame>sprymix/wagtail<gh_stars>0
# Generated by Django 2.2.12 on 2020-04-15 06:49
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('wagtailimages', '0021_image_file_hash'),
('wagtailimages', '0022_auto_20190506_2114'),
]
operations = [
]
| 1.398438 | 1 |
py/solutions/boj/division_plan.py | aid95/algorithm-diary | 0 | 12765194 | <reponame>aid95/algorithm-diary
import heapq
import sys
rl = sys.stdin.readline
root = []
rank = []
def find(x: int) -> int:
if x == root[x]:
return x
root[x] = find(root[x])
return root[x]
def union(x: int, y: int):
_x = find(x)
_y = find(y)
if _x != _y:
if rank[_x] > rank[_y]:
_x, _y = _y, _x
root[_x] = _y
if rank[_x] == rank[_y]:
rank[_y] += 1
return
def solution(m: int, n: int, graph: list[(int, int, int)]) -> int:
answer = []
for i in range(m + 1):
root.append(i)
rank.append(1)
q = []
for (src, des, cost) in graph:
heapq.heappush(q, (cost, (src, des)))
cnt = 0
while q and cnt < n - 2:
cost, (src, des) = heapq.heappop(q)
if find(src) != find(des):
answer.append(cost)
union(src, des)
cnt += 1
return sum(answer[:-1])
if __name__ == '__main__':
M, N = [int(x) for x in rl().rstrip().split()]
GRAPH = []
for _ in range(N):
A, B, C = [int(x) for x in rl().rstrip().split()]
GRAPH.append((A, B, C))
print(solution(M, N, GRAPH))
| 3.15625 | 3 |
TwoPointerSW/ProgrammerString.py | PK-100/Competitive_Programming | 70 | 12765195 | """
We consider a string programmer string if some subset of its letters can be rearranged to form the word programmer.
They are anagrams.
Given a long string determine the number of indices within the string that are in between two programmer strings.
The character 'x' inside an anagram are considered redundant and not counted. But they are counted normally as characters outside
an anagram of programmer type substring.
This question came in Cognizant Mock Test 2019
"""
def getVector(w):
w = w.lower()
fv = [0]*26
for ch in w:
fv[ord(ch)-97] += 1
return fv
def getsVector(w):
w = w.lower()
fv = [0]*26
for ch in w:
fv[ord(ch)-97] += 1
return stringifyWindow(fv)
def stringifyWindow(arr):
arr[23] = 0
fvs = [str(i) for i in arr]
return ''.join(fvs)
def addToWindow(fv, ch):
fv[ord(ch)-97] += 1
return fv
def remFromWindow(fv, ch):
fv[ord(ch)-97] -= 1
return fv
def program(string, word):
end1, start2 = 0, 0
if len(string) < len(word):
return -1
n = len(string)
found = 0
targethashstr = getsVector(word)
left = 0
right = len(word)-1
window = string[:len(word)]
windowhash = getVector(window)
while right < n:
#print(left, right, string[left:right+1])
# x is present
while len(word) > sum(windowhash)-windowhash[23]:
#print("Window Lengthening Due To Presence Of X")
right += 1
if right == n:
right = n-1
break
windowhash = addToWindow(windowhash, string[right])
#print(string[left:right+1])
windowhashstr = stringifyWindow(windowhash)
if windowhashstr == targethashstr and found == 0:
found = 1
while string[left] == 'x':
left += 1
#print("First ",left,right)
end1 = right
elif windowhashstr == targethashstr and found == 1:
found = 2
while string[left] == 'x':
left += 1
start2 = left
#print("Second ",left,right, string[left: right+1])
break
else:
#print("Sliding")
left += 1
right = left + len(word) - 1
windowhash = getVector(string[left : right+1])
continue
left = right + 1
right = left + len(word) - 1
windowhash = getVector(string[left : right+1])
if found == 2:
return start2 - end1 - 1
else:
return -1
print(program('programmerrxprogxxermram','programmer'))
print(program('xprogrxammerprogrammer','programmer'))
print(program('rammerxprogrammer','programmer'))
print(program('progamemrrgramxprgom', 'programmer'))
print(program('progamemrrgramxprergom', 'programmer'))
print(program('progamemrrramxprergom', 'programmer')) | 4.21875 | 4 |
pipeline.py | ishipachev/UdacitySDCND-CarND-Vehicle-Detection-P5 | 0 | 12765196 | # import multiply_detections
from hog_window_search import find_cars
from heat import apply_heat
import pickle
import cv2
import glob
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.image as mpimg
dist_pickle = pickle.load(open("output/svc_model.p", "rb"))
# get attributes of our svc object
svc = dist_pickle["svc"]
X_scaler = dist_pickle["scaler"]
orient = dist_pickle["orient"]
pix_per_cell = dist_pickle["pix_per_cell"]
cell_per_block = dist_pickle["cell_per_block"]
colorspace = dist_pickle["colorspace"]
hog_channel = dist_pickle["hog_channel"]
# spatial_size = (32, 32)
# hist_bins = 32
def pipeline(img):
# ystart = 400
# ystop = 656
#
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
bbox_list = []
# out_img, boxes1 = find_cars(img, ystart, ystop, 1,
# svc, X_scaler, orient, pix_per_cell,
# cell_per_block, colorspace)
# out_img, boxes2 = find_cars(img, ystart, ystop, 2,
# svc, X_scaler, orient, pix_per_cell,
# cell_per_block, colorspace)
#
# out_img, boxes4 = find_cars(img, ystart, ystop, 4,
# svc, X_scaler, orient, pix_per_cell,
# cell_per_block, colorspace)
# out_img, boxes4 = find_cars(img, ystart, ystop, 4,
# svc, X_scaler, orient, pix_per_cell,
# cell_per_block, colorspace)
# for bbox in box_list:
# cv2.rectangle(out_img, bbox[0],
# bbox[1], (0, 0, 255), 6)
# new way:
rectangles = []
ystart = 400
ystop = 464
scale = 1.0
rectangles.append(find_cars(img, ystart, ystop, scale,
svc, X_scaler, orient, pix_per_cell,
cell_per_block, colorspace))
ystart = 416
ystop = 480
scale = 1.0
rectangles.append(find_cars(img, ystart, ystop, scale,
svc, X_scaler, orient, pix_per_cell,
cell_per_block, colorspace))
ystart = 400
ystop = 496
scale = 1.5
rectangles.append(find_cars(img, ystart, ystop, scale,
svc, X_scaler, orient, pix_per_cell,
cell_per_block, colorspace))
ystart = 432
ystop = 528
scale = 1.5
rectangles.append(find_cars(img, ystart, ystop, scale,
svc, X_scaler, orient, pix_per_cell,
cell_per_block, colorspace))
ystart = 400
ystop = 528
scale = 2.0
rectangles.append(find_cars(img, ystart, ystop, scale,
svc, X_scaler, orient, pix_per_cell,
cell_per_block, colorspace))
ystart = 432
ystop = 560
scale = 2.0
rectangles.append(find_cars(img, ystart, ystop, scale,
svc, X_scaler, orient, pix_per_cell,
cell_per_block, colorspace))
ystart = 400
ystop = 596
scale = 3.5
rectangles.append(find_cars(img, ystart, ystop, scale,
svc, X_scaler, orient, pix_per_cell,
cell_per_block, colorspace))
ystart = 464
ystop = 660
scale = 3.5
rectangles.append(find_cars(img, ystart, ystop, scale,
svc, X_scaler, orient, pix_per_cell,
cell_per_block, colorspace))
# apparently this is the best way to flatten a list of lists
rectangles = [item for sublist in rectangles for item in sublist]
#Plot rectangles on image
for bbox in rectangles:
cv2.rectangle(img, bbox[0],
bbox[1], (0, 255, 0), 4)
out_img, heatmap, labels = apply_heat(img, rectangles)
return out_img, heatmap, labels
video_path = "project_video.mp4"
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output/output2.avi', fourcc, 25.0, (1280, 720), isColor=True)
cap = cv2.VideoCapture(video_path)
cnt = 0
while cap.isOpened():
# for i in range(50):
ret, frame = cap.read()
if ret is True:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = frame.astype(np.float32) / 255
result, heat, labels = pipeline(frame)
# result = cv2.cvtColor(frame, cv2.COLOR_LUV2RGB)
result2 = cv2.cvtColor((result * 255).astype(np.uint8), cv2.COLOR_BGR2RGB)
out.write(result2)
cnt += 1
print(cnt)
else:
break
#
# files = glob.glob("f:/work/sdc/project4/CarND-Vehicle-Detection/output/vlc/*.png")
#
# for file in files:
# img = cv2.imread(file)
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# img = img.astype(np.float32) / 255
# result, heat, labels = pipeline(img)
#
# fig = plt.figure()
# plt.subplot(121)
# plt.imshow(result)
# plt.title('Car Positions')
# plt.subplot(122)
# plt.imshow(heat, cmap='hot')
# plt.title('Heat Map')
# fig.tight_layout()
#
# # plt.imshow(labels[0], cmap='gray')
# # plt.title('Labels')
# # fig.tight_layout()
| 2.3125 | 2 |
tests/test_deprecated_trie_class.py | Bhargavasomu/py-trie | 0 | 12765197 | <reponame>Bhargavasomu/py-trie
import pytest
from trie import Trie
def test_deprecated_trie():
with pytest.warns(DeprecationWarning):
trie = Trie(db={})
trie[b'foo'] = b'bar'
assert b'foo' in trie
assert trie[b'foo'] == b'bar'
| 2.390625 | 2 |
src/accounts/models/__init__.py | earth-emoji/dennea | 0 | 12765198 | from .customer import *
from .vendor import *
from .driver import * | 1.109375 | 1 |
arrow/commands/cmd_remote.py | trstickland/python-apollo | 5 | 12765199 | import click
from arrow.commands.remote.add_organism import cli as add_organism
from arrow.commands.remote.add_track import cli as add_track
from arrow.commands.remote.delete_organism import cli as delete_organism
from arrow.commands.remote.delete_track import cli as delete_track
from arrow.commands.remote.update_organism import cli as update_organism
from arrow.commands.remote.update_track import cli as update_track
@click.group()
def cli():
pass
cli.add_command(add_organism)
cli.add_command(add_track)
cli.add_command(delete_organism)
cli.add_command(delete_track)
cli.add_command(update_organism)
cli.add_command(update_track)
| 1.539063 | 2 |
tests/integration/cmor/_fixes/cmip6/test_access_esm1_5.py | markelg/ESMValCore | 26 | 12765200 | """Tests for the fixes of ACCESS-ESM1-5."""
import unittest.mock
import iris
import numpy as np
import pytest
from esmvalcore.cmor._fixes.cmip6.access_esm1_5 import Cl, Cli, Clw, Hus, Zg
from esmvalcore.cmor._fixes.common import ClFixHybridHeightCoord
from esmvalcore.cmor.fix import Fix
from esmvalcore.cmor.table import get_var_info
B_POINTS = [
0.99771648645401, 0.990881502628326, 0.979542553424835,
0.9637770652771, 0.943695485591888, 0.919438362121582,
0.891178011894226, 0.859118342399597, 0.823493480682373,
0.784570515155792, 0.742646217346191, 0.698050200939178,
0.651142716407776, 0.602314412593842, 0.55198872089386,
0.500619947910309, 0.44869339466095, 0.39672577381134,
0.34526526927948, 0.294891387224197, 0.24621507525444,
0.199878215789795, 0.156554222106934, 0.116947874426842,
0.0817952379584312, 0.0518637150526047, 0.0279368180781603,
0.0107164792716503, 0.00130179093685001,
0, 0, 0, 0, 0, 0, 0, 0, 0,
]
B_BOUNDS = [
[1, 0.994296252727509],
[0.994296252727509, 0.985203862190247],
[0.985203862190247, 0.971644043922424],
[0.971644043922424, 0.953709840774536],
[0.953709840774536, 0.931527435779572],
[0.931527435779572, 0.905253052711487],
[0.905253052711487, 0.875074565410614],
[0.875074565410614, 0.84121161699295],
[0.84121161699295, 0.80391401052475],
[0.80391401052475, 0.763464510440826],
[0.763464510440826, 0.720175802707672],
[0.720175802707672, 0.674392521381378],
[0.674392521381378, 0.626490533351898],
[0.626490533351898, 0.576877355575562],
[0.576877355575562, 0.525990784168243],
[0.525990784168243, 0.474301367998123],
[0.474301367998123, 0.422309905290604],
[0.422309905290604, 0.370548874139786],
[0.370548874139786, 0.3195820748806],
[0.3195820748806, 0.270004868507385],
[0.270004868507385, 0.222443267703056],
[0.222443267703056, 0.177555426955223],
[0.177555426955223, 0.136030226945877],
[0.136030226945877, 0.0985881090164185],
[0.0985881090164185, 0.0659807845950127],
[0.0659807845950127, 0.0389823913574219],
[0.0389823913574219, 0.0183146875351667],
[0.0183146875351667, 0.00487210927531123],
[0.00487210927531123, 0],
[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0],
[0, 0], [0, 0],
]
@pytest.fixture
def cl_cubes():
"""``cl`` cubes."""
b_coord = iris.coords.AuxCoord(np.zeros_like(B_POINTS),
bounds=np.zeros_like(B_BOUNDS),
var_name='b')
cube = iris.cube.Cube(
np.ones_like(B_POINTS),
var_name='cl',
standard_name='cloud_area_fraction_in_atmosphere_layer',
units='%',
aux_coords_and_dims=[(b_coord, 0)],
)
return iris.cube.CubeList([cube])
def test_get_cl_fix():
"""Test getting of fix."""
fix = Fix.get_fixes('CMIP6', 'ACCESS-ESM1-5', 'Amon', 'cl')
assert fix == [Cl(None)]
@unittest.mock.patch(
'esmvalcore.cmor._fixes.cmip6.access_esm1_5.ClFixHybridHeightCoord.'
'fix_metadata', autospec=True)
def test_cl_fix_metadata(mock_base_fix_metadata, cl_cubes):
"""Test ``fix_metadata`` for ``cl``."""
mock_base_fix_metadata.side_effect = lambda x, y: y
fix = Cl(None)
out_cube = fix.fix_metadata(cl_cubes)[0]
b_coord = out_cube.coord(var_name='b')
np.testing.assert_allclose(b_coord.points, B_POINTS)
np.testing.assert_allclose(b_coord.bounds, B_BOUNDS)
def test_cl_fix():
"""Test fix for ``cl``."""
assert issubclass(Cl, ClFixHybridHeightCoord)
def test_get_cli_fix():
"""Test getting of fix."""
fix = Fix.get_fixes('CMIP6', 'ACCESS-ESM1-5', 'Amon', 'cli')
assert fix == [Cli(None)]
def test_cli_fix():
"""Test fix for ``cli``."""
assert Cli is Cl
def test_get_clw_fix():
"""Test getting of fix."""
fix = Fix.get_fixes('CMIP6', 'ACCESS-ESM1-5', 'Amon', 'clw')
assert fix == [Clw(None)]
def test_clw_fix():
"""Test fix for ``clw``."""
assert Clw is Cl
@pytest.fixture
def cubes_with_wrong_air_pressure():
"""Cubes with wrong ``air_pressure`` coordinate."""
air_pressure_coord = iris.coords.DimCoord(
[1000.09, 600.6, 200.0],
bounds=[[1200.00001, 800], [800, 400.8], [400.8, 1.9]],
var_name='plev',
standard_name='air_pressure',
units='pa',
)
hus_cube = iris.cube.Cube(
[0.0, 1.0, 2.0],
var_name='hus',
dim_coords_and_dims=[(air_pressure_coord, 0)],
)
zg_cube = hus_cube.copy()
zg_cube.var_name = 'zg'
return iris.cube.CubeList([hus_cube, zg_cube])
def test_get_hus_fix():
"""Test getting of fix."""
fix = Fix.get_fixes('CMIP6', 'ACCESS-ESM1-5', 'Amon', 'hus')
assert fix == [Hus(None)]
def test_hus_fix_metadata(cubes_with_wrong_air_pressure):
"""Test ``fix_metadata`` for ``hus``."""
vardef = get_var_info('CMIP6', 'Amon', 'hus')
fix = Hus(vardef)
out_cubes = fix.fix_metadata(cubes_with_wrong_air_pressure)
assert len(out_cubes) == 2
hus_cube = out_cubes.extract_cube('hus')
zg_cube = out_cubes.extract_cube('zg')
assert hus_cube.var_name == 'hus'
assert zg_cube.var_name == 'zg'
np.testing.assert_allclose(hus_cube.coord('air_pressure').points,
[1000.0, 601.0, 200.0])
np.testing.assert_allclose(hus_cube.coord('air_pressure').bounds,
[[1200.0, 800.0], [800.0, 401.0], [401.0, 2.0]])
np.testing.assert_allclose(zg_cube.coord('air_pressure').points,
[1000.09, 600.6, 200.0])
np.testing.assert_allclose(zg_cube.coord('air_pressure').bounds,
[[1200.00001, 800], [800, 400.8], [400.8, 1.9]])
def test_get_zg_fix():
"""Test getting of fix."""
fix = Fix.get_fixes('CMIP6', 'ACCESS-ESM1-5', 'Amon', 'zg')
assert fix == [Zg(None)]
def test_zg_fix_metadata(cubes_with_wrong_air_pressure):
"""Test ``fix_metadata`` for ``zg``."""
vardef = get_var_info('CMIP6', 'Amon', 'zg')
fix = Zg(vardef)
out_cubes = fix.fix_metadata(cubes_with_wrong_air_pressure)
assert len(out_cubes) == 2
hus_cube = out_cubes.extract_cube('hus')
zg_cube = out_cubes.extract_cube('zg')
assert hus_cube.var_name == 'hus'
assert zg_cube.var_name == 'zg'
np.testing.assert_allclose(hus_cube.coord('air_pressure').points,
[1000.09, 600.6, 200.0])
np.testing.assert_allclose(hus_cube.coord('air_pressure').bounds,
[[1200.00001, 800], [800, 400.8], [400.8, 1.9]])
np.testing.assert_allclose(zg_cube.coord('air_pressure').points,
[1000.0, 601.0, 200.0])
np.testing.assert_allclose(zg_cube.coord('air_pressure').bounds,
[[1200.0, 800.0], [800.0, 401.0], [401.0, 2.0]])
| 1.695313 | 2 |
gamma/FileSystem.py | jappe999/WebScraper | 1 | 12765201 | <filename>gamma/FileSystem.py
import os, re
def create_dir(directory):
try:
if not os.path.exists(directory):
os.makedirs(directory)
return True
except Exception as e:
print(e)
return False
def set_data(url, html):
stripped_path = re.sub('^(http://|https://)(www\.)?', '', url)
directory = 'data/' + stripped_path
create_dir(directory)
FILE_NAME = 'index.html'
with open(directory + '/' + FILE_NAME, 'w+') as f:
f.write(str(html))
f.close()
| 2.671875 | 3 |
netezza/pyodbc/operations.py | GuidoE/django-netezza | 0 | 12765202 | <gh_stars>0
try:
from django.db.backends.base.operations import BaseDatabaseOperations
except ImportError:
from django.db.backends import BaseDatabaseOperations
class DatabaseOperations(BaseDatabaseOperations):
def quote_name(self, name):
return name # Netezza doesn't seem to like quoted names
| 1.84375 | 2 |
fastestimator/trace/io/csv_logger.py | TortoiseHam/fastestimator | 1 | 12765203 | # Copyright 2019 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
from collections import defaultdict
from typing import List, Optional, Union, Iterable
import pandas as pd
from fastestimator.trace.trace import Trace
from fastestimator.util.data import Data
from fastestimator.util.traceability_util import traceable
@traceable()
class CSVLogger(Trace):
"""Log monitored quantities in a CSV file.
Args:
filename: Output filename.
monitor_names: List of keys to monitor. If None then all metrics will be recorded.
mode: What mode(s) to execute this Trace in. For example, "train", "eval", "test", or "infer". To execute
regardless of mode, pass None. To execute in all modes except for a particular one, you can pass an argument
like "!infer" or "!train".
"""
def __init__(self,
filename: str,
monitor_names: Optional[Union[List[str], str]] = None,
mode: Union[None, str, Iterable[str]] = ("eval", "test")) -> None:
super().__init__(inputs="*" if monitor_names is None else monitor_names, mode=mode)
self.filename = filename
self.data = None
def on_begin(self, data: Data) -> None:
self.data = defaultdict(list)
def on_epoch_end(self, data: Data) -> None:
self.data["mode"].append(self.system.mode)
self.data["epoch"].append(self.system.epoch_idx)
if "*" in self.inputs:
for key, value in data.read_logs().items():
self.data[key].append(value)
else:
for key in self.inputs:
self.data[key].append(data[key])
def on_end(self, data: Data) -> None:
df = pd.DataFrame(data=self.data)
if os.path.exists(self.filename):
df.to_csv(self.filename, mode='a', index=False)
else:
df.to_csv(self.filename, index=False)
| 2.375 | 2 |
rwanda_dummy_deterministic.py | Data-Linkage/Rwandan_linkage | 0 | 12765204 | <reponame>Data-Linkage/Rwandan_linkage
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 20 14:25:54 2022
@author: collys
"""
" Rwanda synthetic data - deterministic linkage example"
""" Matching vars -
Full_name
First_name
Last_name
Month
Year
Sex
Age
Head_of_household
Telephone_number
Province
District
Sector
Cell
Village
Latitude
Longitude"""
# A couple of notes - not all of this script runs as I couldn't figure out how to convert some of the Pyspark
# code to Python in time
# Function that creates matchkeys - This will run and demonstrates the different conditions we can link on.
# Matching loop - doesn't run; have annotated to explain the logic.
# Conflict resolution - doesn't run; have annotated to explain the logic.
import pandas as pd
import numpy as np
import fuzzywuzzy as fz
from fuzzywuzzy import process
import jellyfish
# Read in synthetic data
df1 = pd.read_csv('Data/Mock_Rwanda_Data_Census.csv')
df2 = pd.read_csv('Data/Mock_Rwanda_Data_Pes.csv')
# Need to add some empty rows to the Census data (df1) to get the matchkeys to run
df1.loc[df1.shape[0]] = [None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None]
df1.loc[df1.shape[0]] = [None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None]
df1.loc[df1.shape[0]] = [None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None]
# list of matchkeys. These are the conditions to loop through and make our matches.
# This function does run and produces a list of keys that would be passed to a matching loop
def MATCHKEYS(df1, df2):
# Exact match on Full Name, Head of Household, Month & Year of Birth, Village
mk1 = [df1['full_name'] == df2['full_name'],
df1['HoH'] == df2['HoH'],
df1['month'] == df2['month'],
df1['year'] == df2['year'],
df1['lati'] == df2['lati'],
df1['lon'] == df2['lon']]
# Exact match on Full Name, Head of Household, Age, Village
mk2 = [df1.full_name == df2.full_name,
df1.HoH == df2.HoH,
df1['age.y'] == df2['age.y'],
df1.lati == df2.lati,
df1.lon == df2.lon]
# Exact match on First Name, Last Name, Head of Household, Month & Year of Birth, Village
mk3 = [df1.first_name == df2.first_name,
df1.last_name == df2.last_name,
df1.HoH == df2.HoH,
df1.month == df2.month,
df1.year == df2.year,
df1.lati == df2.lati,
df1.lon == df2.lon]
# Exact match on First Name, Last Name, Head of Household, Age, Village
mk4 = [df1.first_name == df2.first_name,
df1.last_name == df2.last_name,
df1.HoH == df2.HoH,
df1['age.y'] == df2['age.y'],
df1.lati == df2.lati,
df1.lon == df2.lon]
# Fuzzy name - first 3 characters from first and last name
mk5 = [df1.first_name[0:3] == df2.first_name[0:3],
df1.last_name[0:3] == df2.last_name[0:3],
df1.month == df2.month,
df1.year == df2.year,
df1.lati == df2.lati,
df1.lon == df2.lon]
# Fuzzy age/DoB (for the actual linkage we want to scale the difference in age depending on the age
# but for this example I've just set it to be a difference of 3 or fewer)
mk6 = [df1.full_name == df2.full_name,
df1.HoH == df2.HoH,
(df1['age.y'] - df2['age.y']) < 3,
df1.lati == df2.lati,
df1.lon == df2.lon]
mk7 = [df1.full_name == df2.full_name,
(df1.year - df2.year) < 3,
df1.lati == df2.lati,
df1.lon == df2.lon]
mk8 = [df1.full_name == df2.full_name,
df1.HoH == df2.HoH,
(df1.year - df2.year) < 3,
df1.village == df2.village]
# Fuzzy geography - use different levels of geography (village, cell, sector). The looser we go on
# geography the tighter we want the rest of the matching conditions to be to prevent false positives
mk9 = [df1.full_name == df2.full_name,
df1.HoH == df2.HoH,
df1['age.y'] == df2['age.y'],
df1.village == df2.village]
mk10 = [df1.full_name == df2.full_name,
df1.HoH == df2.HoH,
df1['age.y'] == df2['age.y'],
df1.cell == df2.cell]
mk11 = [df1.full_name == df2.full_name,
df1.HoH == df2.HoH,
df1.month == df2.month,
df1.year == df2.year,
df1.sector == df2.sector]
keys = [<KEY>]
return keys
# Matching - I haven't figured out how to un-Pyspark it properly, but I have annotated to demonstrate
# what it should be doing.
# Create a list of columns you want to keep in the matched file
columns = ['id_indi_x', 'id_indi_y', 'mkey']
# Create an empty list/dataframe that you will append your links to
matches = []
# Matching loop
for i, EX in (keys, 1):
print("\n MATCHKEY", i) # prints out which matchkey the loop is currently running
# Join on blocking pass i (joins on each condition)
df = df1.merge(df2, on = EX, how = 'inner')
# Create the KEY column (creates the mkey column)
df = df.insert('mkey')
# Append pairs to final dataset (unions together the data from each pass)
matches = matches.append(df)
# Minimum blocking pass for every unique PAIR (creates a new column showing the minimum mkey for each
# unique pair of ID's (Census and PES ID))
matches = matches.insert('Min_Key_Match', min('mkey').over(Window.partitionBy('id_indi_x', 'id_indi_y')))
# Drops any pairs (records) where the value in the mkey column does not match the value in the minimum
# matchkey column. Essentially this keeps only the best matches.
matches = matches.filter(matches.mkey == matches.Min_key_match).drop('Min_Key_match')
print(matches.count()) # prints out count of data for each matchkey. this is cumulative as the loop runs
# This section also doesn't run, because the above loop doesn't run,
# and this is PySpark code, not Python. Again I have annotated what the purpose is!
# Between key conflicts (non-unique links)
# Calculates the minimum key for each Census ID
matches = matches.withColumn('min_key_cen', F.min('mkey').over(Window.partitionBy('id_cen')))
# Calculates the minimum key for each PES ID
matches = matches.withColumn('min_key_pes', F.min('mkey').over(Window.partitionBy('id_pes')))
# Filters out any pairs of records where either Census or PES record has been matched by an earlier matchkey
matches = matches.filter(((matches['min_key_cen'] == matches['mkey']) &
(matches['min_key_pes'] == matches['mkey']) | (matches.mkey <5)))
# Within key non-uniques
# Calculates how many Census records links there are by each PES ID
matches = matches.withColumn('match_count_cen', F.approx_count_distinct('id_pes').over(Window.partitionBy('id_cen')))
# Calculates how many PES records links there are by each Census ID
matches = matches.withColumn('match_count_pes', F.approx_count_distinct('id_cen').over(Window.partitionBy('id_pes')))
# Filters out duplicate records (i.e. records where the ID count does not equal 1)
matches = matches.filter((matches['match_count_cen'] == 1) &
(matches['match_count_pes'] == 1))
| 2.296875 | 2 |
spartan/expr/base.py | MaggieQi/spartan | 0 | 12765205 | <reponame>MaggieQi/spartan<filename>spartan/expr/base.py
'''
Defines the base class of all expressions (`Expr`), as
well as common subclasses for collections.
'''
import collections
import weakref
import sys
import traceback
import numpy as np
from ..node import Node, indent
from .. import blob_ctx, node, util
from ..util import Assert, copy_docstring
from ..array import distarray
from ..config import FLAGS, BoolFlag
from ..rpc import TimeoutException
from traits.api import Any, Instance, Int, PythonValue
FLAGS.add(BoolFlag('opt_expression_cache', True, 'Enable expression caching.'))
class newaxis(object):
'''
The newaxis object indicates that users wish to add a new dimension
'''
class NotShapeable(Exception):
'''
Thrown when the shape for an expression cannot be computed without
first evaluating the expression.
'''
unique_id = iter(xrange(10000000))
def _map(*args, **kw):
'''
Indirection for handling builtin operators (+,-,/,*).
(Map is implemented in map.py)
'''
fn = kw['fn']
from .map import map
return map(args, fn)
def expr_like(expr, **kw):
'''Construct a new expression like ``expr``.
The new expression has the same id, but is initialized using ``kw``
'''
kw['expr_id'] = expr.expr_id
trace = kw.pop('trace', None)
if trace != None and FLAGS.opt_keep_stack:
trace.fuse(expr.stack_trace)
kw['stack_trace'] = trace
else:
kw['stack_trace'] = expr.stack_trace
new_expr = expr.__class__(**kw)
#util.log_info('Copied %s', new_expr)
return new_expr
# Pulled out as a class so that we can add documentation.
class EvalCache(object):
'''
Expressions can be copied around and changed during optimization
or due to user actions; we want to ensure that a cache entry can
be found using any of the equivalent expression nodes.
To that end, expressions are identfied by an expression id; when
an expression is copied, the expression ID remains the same.
The cache tracks results based on expression ID's. Since this is
no longer directly linked to an expressions lifetime, we have to
manually track reference counts here, and clear items from the
cache when the reference count hits zero.
'''
def __init__(self):
self.refs = collections.defaultdict(int)
self.cache = {}
def set(self, exprid, value):
#assert not exprid in self.cache, 'Trying to replace an existing cache entry!'
self.cache[exprid] = value
def get(self, exprid):
return self.cache.get(exprid, None)
def register(self, exprid):
self.refs[exprid] += 1
def deregister(self, expr_id):
self.refs[expr_id] -= 1
if self.refs[expr_id] == 0:
#util.log_debug('Destroying... %s', expr_id)
if expr_id in self.cache:
#import objgraph
#objgraph.show_backrefs([self.cache[expr_id]], filename='%s-refs.png' % expr_id)
del self.cache[expr_id]
del self.refs[expr_id]
def clear(self):
self.refs.clear()
self.cache.clear()
class ExprTrace(object):
'''
Captures the stack trace for an expression.
Lazy evaluation and optimization can result in stack traces that are very far
from the actual source of an error. To combat this, expressions track their
original creation point, which is logged when an error occurs.
Multiple stack traces can be tracked, as certain optimizations
will combine multiple expressions together.
'''
def __init__(self):
if FLAGS.capture_expr_stack:
self.stack_list = [traceback.extract_stack(sys._getframe(3))]
else:
self.stack_list = []
def format_stack(self):
trace = []
for i, st in enumerate(self.stack_list):
trace.append('Stack %d of %d' % (i, len(self.stack_list)))
trace.append('-' * 80 + '\n')
for (filename, lineno, fname, txt) in st:
trace.append('%d >>> %s:%s [%s]: %s\n' % (i, filename, lineno, fname, txt[:60]))
#trace.extend(st)
return trace
def dump(self):
if not FLAGS.capture_expr_stack:
print >>sys.stderr, 'Stack tracking for expressions is disabled. Use --capture_expr_stack=1 to enable.'
return
print >>sys.stderr, 'Expr creation stack traceback.'
if not FLAGS.opt_keep_stack:
print >>sys.stderr, ' Use --opt_keep_stack=True to see expressions merged during optimization.'
for s in self.format_stack():
sys.stderr.write(s)
def fuse(self, trace):
self.stack_list.extend(trace.stack_list)
eval_cache = EvalCache()
class Expr(Node):
'''
Base class for all expressions.
`Expr` objects capture user operations.
An expression can have one or more dependencies, which must
be evaluated before the expression itself.
Expressions may be evaluated (using `Expr.force`), the
result of evaluating an expression is cached until the expression
itself is reclaimed.
'''
expr_id = PythonValue(None, desc="Integer or None")
stack_trace = Instance(ExprTrace)
# should evaluation of this object be cached
needs_cache = True
optimized_expr = None
@property
def ndim(self):
return len(self.shape)
def load_data(self, cached_result):
#util.log_info('expr:%s load_data from not checkpoint node', self.expr_id)
return None
def cache(self):
'''
Return a cached value for this `Expr`.
If a cached value is not available, or the cached array is
invalid (missing tiles), returns None.
'''
result = eval_cache.get(self.expr_id)
if result is not None and len(result.bad_tiles) == 0:
return result
return self.load_data(result)
# get distarray from eval_cache
# check if still valid
# if valid, return
# if not valid: check for disk data
# if disk data: load bad tiles back
# else: return None
#return eval_cache.get(self.expr_id, None)
def dependencies(self):
'''
Returns:
Dictionary mapping from name to `Expr`.
'''
return dict([(k, getattr(self, k)) for k in self.members])
def compute_shape(self):
'''
Compute the shape of this expression.
If the shape is not available (data dependent), raises `NotShapeable`.
Returns:
tuple: Shape of this expression.
'''
raise NotShapeable
def visit(self, visitor):
'''
Apply visitor to all children of this node, returning a new `Expr` of the same type.
:param visitor: `OptimizePass`
'''
deps = {}
for k in self.members:
deps[k] = visitor.visit(getattr(self, k))
return expr_like(self, **deps)
def __repr__(self):
return self.pretty_str()
def pretty_str(self):
'''Return a pretty representation of this node, suitable for showing to users.
By default, this returns the debug representation.
'''
return self.debug_str()
#raise NotImplementedError, type(self)
def __del__(self):
eval_cache.deregister(self.expr_id)
def __init__(self, *args, **kw):
super(Expr, self).__init__(*args, **kw)
#assert self.expr_id is not None
if self.expr_id is None:
self.expr_id = unique_id.next()
else:
Assert.isinstance(self.expr_id, int)
if self.stack_trace is None:
self.stack_trace = ExprTrace()
eval_cache.register(self.expr_id)
self.needs_cache = self.needs_cache and FLAGS.opt_expression_cache
def evaluate(self):
'''
Evaluate an `Expr`.
Dependencies are evaluated prior to evaluating the expression.
The result of the evaluation is stored in the expression cache,
future calls to evaluate will return the cached value.
Returns:
DistArray:
'''
cache = self.cache()
if cache is not None:
util.log_debug('Retrieving %d from cache' % self.expr_id)
return cache
ctx = blob_ctx.get()
#util.log_info('Evaluting deps for %s', prim)
deps = {}
for k, vs in self.dependencies().iteritems():
if isinstance(vs, Expr):
deps[k] = vs.evaluate()
else:
#assert not isinstance(vs, (dict, list)), vs
deps[k] = vs
try:
value = self._evaluate(ctx, deps)
#value = self.optimized()._evaluate(ctx, deps)
except TimeoutException:
util.log_info('%s %d need to retry', self.__class__, self.expr_id)
return self.evaluate()
except Exception:
print >>sys.stderr, 'Error executing expression'
self.stack_trace.dump()
raise
if self.needs_cache:
#util.log_info('Caching %s -> %s', prim.expr_id, value)
eval_cache.set(self.expr_id, value)
return value
def _evaluate(self, ctx, deps):
'''
Evaluate this expression.
Args:
ctx: `BlobCtx` for interacting with the cluster
deps (dict): Map from name to `DistArray` or scalar.
'''
raise NotImplementedError
def __hash__(self):
return self.expr_id
def typename(self):
return self.__class__.__name__
def __add__(self, other):
return _map(self, other, fn=np.add)
def __sub__(self, other):
return _map(self, other, fn=np.subtract)
def __mul__(self, other):
'''
Multiply 2 expressions.
:param other: `Expr`
'''
return _map(self, other, fn=np.multiply)
def __mod__(self, other):
return _map(self, other, fn=np.mod)
def __div__(self, other):
return _map(self, other, fn=np.divide)
def __eq__(self, other):
return _map(self, other, fn=np.equal)
def __ne__(self, other):
return _map(self, other, fn=np.not_equal)
def __lt__(self, other):
return _map(self, other, fn=np.less)
def __gt__(self, other):
return _map(self, other, fn=np.greater)
def __and__(self, other):
return _map(self, other, fn=np.logical_and)
def __or__(self, other):
return _map(self, other, fn=np.logical_or)
def __xor(self, other):
return _map(self, other, fn=np.logical_xor)
def __pow__(self, other):
return _map(self, other, fn=np.power)
def __neg__(self):
return _map(self, fn=np.negative)
def __rsub__(self, other):
return _map(other, self, fn=np.subtract)
def __radd__(self, other):
return _map(other, self, fn=np.add)
def __rmul__(self, other):
return _map(other, self, fn=np.multiply)
def __rdiv__(self, other):
return _map(other, self, fn=np.divide)
def reshape(self, new_shape):
'''
Return a new array with shape``new_shape``, and data from
this array.
:param new_shape: `tuple` with same total size as original shape.
'''
from . import reshape
return reshape(self, new_shape)
def __getitem__(self, idx):
from .slice import SliceExpr
from .filter import FilterExpr
from .reshape import ReshapeExpr
if isinstance(idx, (int, tuple, slice)):
is_del_dim = False
del_dim = list()
if isinstance(idx, tuple):
for x in xrange(len(idx)):
if isinstance(idx[x], int):
is_del_dim = True
del_dim.append(x)
if isinstance(idx, int) or is_del_dim or (isinstance(idx, tuple) and (newaxis in idx)):
#The shape has to be updated
if isinstance(idx, tuple):
new_shape = tuple([slice(x, None, None) if x == -1 else x for x in idx if not x == newaxis])
else:
new_shape = idx
ret = SliceExpr(src=self, idx = new_shape)
new_shape = []
if isinstance(idx, tuple):
shape_ptr = idx_ptr = 0
while shape_ptr < len(ret.shape) or idx_ptr < len(idx):
if idx_ptr < len(idx) and idx[idx_ptr] == newaxis:
new_shape.append(1)
else:
new_shape.append(ret.shape[shape_ptr])
shape_ptr += 1
idx_ptr += 1
else:
new_shape = list(ret.shape)
del_dim.append(0)
#Delete dimension if needed
if is_del_dim:
for i in del_dim:
new_shape.pop(i)
return ReshapeExpr(array=ret, new_shape=new_shape)
else:
#This means it's just a simple slice op
return SliceExpr(src=self, idx=idx)
else:
return FilterExpr(src=self, idx=idx)
def __setitem__(self, k, val):
raise Exception, 'Expressions are read-only.'
@property
def shape(self):
'''Try to compute the shape of this expression.
If the value has been computed already this always succeeds.
:rtype: `tuple`
'''
cache = self.cache()
if cache is not None:
return cache.shape
try:
return self.compute_shape()
except NotShapeable:
util.log_debug('Not shapeable: %s', self)
return evaluate(self).shape
@property
def size(self):
return np.prod(self.shape)
def force(self):
'Evaluate this expression (and all dependencies).'
return self.evaluate()
#return self.optimized().evaluate()
def optimized(self):
'''
Return an optimized version of this expression graph.
:rtype: `Expr`
'''
# If the expr has been optimized, return the cached optimized expr.
#return optimized_dag(self)
if self.optimized_expr is None:
self.optimized_expr = optimized_dag(self)
self.optimized_expr.optimized_expr = self.optimized_expr
return self.optimized_expr
else:
return self.optimized_expr
def glom(self):
'''
Evaluate this expression and convert the resulting
distributed array into a Numpy array.
:rtype: `np.ndarray`
'''
return glom(self)
def __reduce__(self):
return evaluate(self).__reduce__()
class AsArray(Expr):
'''Promote a value to be array-like.
This should be wrapped around most user-inputs that may be
used in an array context, e.g. (``1 + x => map((as_array(1), as_array(x)), +)``)
'''
val = PythonValue
def visit(self, visitor):
return self
def compute_shape(self):
if hasattr(self.val, 'shape'):
return self.val.shape
if np.isscalar(self.val):
return np.asarray(self.val).shape
raise NotShapeable
def _evaluate(self, ctx, deps):
util.log_debug('%s: Array promotion: value=%s', self.expr_id, deps['val'])
return distarray.as_array(deps['val'])
def pretty_str(self):
return str(self.val)
class Val(Expr):
'''Convert an existing value to an expression.'''
val = PythonValue
needs_cache = False
@copy_docstring(Expr.visit)
def visit(self, visitor):
return self
def dependencies(self):
return {}
@copy_docstring(Expr.visit)
def compute_shape(self):
return self.val.shape
def _evaluate(self, ctx, deps):
return self.val
def pretty_str(self):
return str(self.val)
class CollectionExpr(Expr):
'''
`CollectionExpr` subclasses wrap normal tuples, lists and dicts with `Expr` semantics.
`CollectionExpr.visit` and `CollectionExpr.evaluate` will visit or evaluate
all of the tuple, list or dictionary elements in this expression.
'''
needs_cache = False
vals = PythonValue
def _evaluate(self, ctx, deps):
return deps
def __getitem__(self, idx):
return self.vals[idx]
def __iter__(self):
return iter(self.vals)
class DictExpr(CollectionExpr):
def iteritems(self): return self.vals.iteritems()
def keys(self): return self.vals.keys()
def values(self): return self.vals.values()
def itervalues(self): return self.vals.itervalues()
def iterkeys(self): return self.vals.iterkeys()
def pretty_str(self):
return '{ %s } ' % ',\n'.join(
['%s : %s' % (k, repr(v)) for k, v in self.vals.iteritems()])
def dependencies(self):
return self.vals
def visit(self, visitor):
return DictExpr(vals=dict([(k, visitor.visit(v)) for (k, v) in self.vals.iteritems()]))
class ListExpr(CollectionExpr):
def dependencies(self):
return dict(('v%d' % i, self.vals[i]) for i in range(len(self.vals)))
def pretty_str(self):
return indent('[\n%s\n]') % ','.join([v.pretty_str() for v in self.vals])
def __str__(self):
return repr(self)
def _evaluate(self, ctx, deps):
ret = []
for i in range(len(self.vals)):
k = 'v%d' % i
ret.append(deps[k])
return ret
def visit(self, visitor):
return ListExpr(vals=[visitor.visit(v) for v in self.vals])
def __len__(self):
return len(self.vals)
class TupleExpr(CollectionExpr):
def dependencies(self):
return dict(('v%d' % i, self.vals[i]) for i in range(len(self.vals)))
def pretty_str(self):
return '( %s )' % ','.join([v.pretty_str() for v in self.vals])
def _evaluate(self, ctx, deps):
ret = []
for i in range(len(self.vals)):
k = 'v%d' % i
ret.append(deps[k])
return tuple(ret)
def visit(self, visitor):
return TupleExpr(vals=tuple([visitor.visit(v) for v in self.vals]))
def __len__(self):
return len(self.vals)
def glom(value):
'''
Evaluate this expression and return the result as a `numpy.ndarray`.
'''
if isinstance(value, Expr):
value = evaluate(value)
if isinstance(value, np.ndarray):
return value
return value.glom()
def optimized_dag(node):
'''
Optimize and return the DAG representing this expression.
:param node: The node to compute a DAG for.
'''
if not isinstance(node, Expr):
raise TypeError
from . import optimize
return optimize.optimize(node)
def force(node):
'''
Evaluate ``node``.
:param node: `Expr`
'''
return evaluate(node)
def evaluate(node):
'''
Evaluate ``node``.
:param node: `Expr`
'''
if isinstance(node, Expr):
return node.force()
Assert.isinstance(node, (np.ndarray, distarray.DistArray))
return node
def eager(node):
'''
Eagerly evaluate ``node`` and convert the result back into an `Expr`.
:param node: `Expr` to evaluate.
'''
result = force(node)
return Val(val=result)
def lazify(val):
'''
Lift ``val`` into an Expr node.
If ``val`` is already an expression, it is returned unmodified.
:param val: anything.
'''
#util.log_info('Lazifying... %s', val)
if isinstance(val, Expr):
return val
if isinstance(val, dict):
return DictExpr(vals=val)
if isinstance(val, list):
return ListExpr(vals=val)
if isinstance(val, tuple):
return TupleExpr(vals=val)
return Val(val=val)
def as_array(v):
'''
Convert a numpy value or scalar into an `Expr`.
:param v: `Expr`, numpy value or scalar.
'''
if isinstance(v, Expr):
return v
else:
return AsArray(val=v)
| 2.109375 | 2 |
tests/test_metaclass.py | Lex0ne/trafaret_validator | 8 | 12765206 | from unittest import TestCase
import trafaret as t
from trafaret_validator import TrafaretValidator
class ValidatorForTest(TrafaretValidator):
t_value = t.Int()
value = 5
class ValidatorForTest2(ValidatorForTest):
test = t.String()
class TestMetaclass(TestCase):
def test_metaclass(self):
self.assertIsInstance(ValidatorForTest._validators, dict,
'Value should be instance of dict')
self.assertIn('t_value', ValidatorForTest._validators,
'Value should be in _validators')
self.assertNotIn('value', ValidatorForTest._validators,
'Value should not be in _validators')
self.assertIsInstance(ValidatorForTest._trafaret, t.Trafaret,
'Value should be instance of Trafaret')
self.assertFalse(ValidatorForTest._data,
'_data should be empty')
self.assertFalse(ValidatorForTest._errors,
'_data should be empty')
def test_inheritance(self):
self.assertIsInstance(ValidatorForTest2._validators, dict,
'Value should be instance of dict')
self.assertIn('t_value', ValidatorForTest2._validators,
'Value should be in _validators')
self.assertIn('test', ValidatorForTest2._validators,
'Value should be in _validators')
self.assertNotIn('value', ValidatorForTest2._validators,
'Value should not be in _validators')
self.assertIsInstance(ValidatorForTest2._trafaret, t.Trafaret,
'Value should be instance of Trafaret')
self.assertFalse(ValidatorForTest2._data,
'_data should be empty')
self.assertFalse(ValidatorForTest2._errors,
'_data should be empty')
| 3.21875 | 3 |
MavenScrapy/MavenScrapy/settings.py | wjcIvan/oschinaLearning | 1 | 12765207 | # -*- coding: utf-8 -*-
# Scrapy settings for MavenScrapy project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'MavenScrapy'
FEED_EXPORT_ENCODING = 'utf-8'
SCHEDULER = "scrapy_redis.scheduler.Scheduler"
DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"
#REDIS_HOST = '192.168.22.106'
REDIS_HOST = '127.0.0.1'
REDIS_PORT = 6379
REDIS_PASSWORD = '<PASSWORD>'
SPIDER_MODULES = ['MavenScrapy.spiders']
NEWSPIDER_MODULE = 'MavenScrapy.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = 'MavenScrapy (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 128
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 1
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
# COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False
# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en',
}
USER_AGENT = "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:47.0) Gecko/20100101 Firefox/47.0"
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# 'MavenScrapy.middlewares.MavenscrapySpiderMiddleware': 543,
# }
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'MavenScrapy.middlewares.ProxyMiddleware': 543,
}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
# }
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'MavenScrapy.pipelines.MavenscrapyPipeline': 300,
}
# MySQL 数据库参数配置
MYSQL_HOST = '127.0.0.1'
MYSQL_PORT = 3306
MYSQL_USER = 'root'
MYSQL_PASSWORD = '<PASSWORD>'
MYSQL_DBNAME = 'test'
RETRY_ENABLED = True # 打开重试开关
RETRY_TIMES = 30 # 重试次数
DOWNLOAD_TIMEOUT = 60 # 60s超时
RETRY_HTTP_CODES = [429, 404, 403] # 重试
# 爬取网站最大允许的深度(depth)值。如果为0,则没有限制。
DEPTH_LIMIT = 0
# 爬取时,0表示深度优先Lifo(默认);1表示广度优先FiFo
# 后进先出,深度优先
DEPTH_PRIORITY = 0
SCHEDULER_DISK_QUEUE = 'scrapy.squeues.PickleLifoDiskQueue'
SCHEDULER_MEMORY_QUEUE = 'scrapy.squeues.LifoMemoryQueue'
# 先进先出,广度优先
# DEPTH_PRIORITY = 1
# SCHEDULER_DISK_QUEUE = 'scrapy.squeues.PickleFifoDiskQueue'
# SCHEDULER_MEMORY_QUEUE = 'scrapy.squeues.FifoMemoryQueue'
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| 1.429688 | 1 |
conda.recipe/build_and_upload_conda_package.py | xinbinhuang/featuretools | 1 | 12765208 | <gh_stars>1-10
import subprocess
def build_docker_img(img_id, no_cache=False):
build_cmd = ["docker", "build"]
if no_cache:
build_cmd.append("--no-cache")
build_cmd.extend(["-t", img_id, "."])
# remove so we can catch an error more easily
# in the case where the build command fails
subprocess.run(['docker', 'rmi', '-f', img_id])
subprocess.run(build_cmd)
if __name__ == '__main__':
import argparse
import getpass
import sys
import os
parser = argparse.ArgumentParser(description='Test locally')
parser.add_argument('--no-cache', default=False, action='store_true', help='build docker image without cache')
parser.add_argument('--no-build', default=False, action='store_true', help='Do not build the docker image')
parser.add_argument('--img', default="conda_featuretools_build", type=str, help='docker image to use')
parser.add_argument('--username', type=str, help='Anaconda username')
parser.add_argument('--password', help="<PASSWORD>. If this is not given, you will be prompted")
args = parser.parse_args()
password = args.password
if not password:
password = getpass.getpass(stream=sys.stderr)
img_id = args.img
if not args.no_build:
build_docker_img(img_id, args.no_cache)
featuretools_folder = os.path.dirname(os.getcwd())
run_cmd = ["docker",
"run",
"-v", featuretools_folder + ":/featuretools/",
'-i',
'--entrypoint', '/bin/bash',
img_id,
"-c",
"/featuretools/conda.recipe/build_featuretools.sh {} '{}'".format(args.username,
password)]
with open('test.log', 'w') as f:
process = subprocess.Popen(run_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in iter(process.stdout.readline, b''):
sys.stdout.buffer.write(line)
f.write(line.decode(sys.stdout.encoding))
| 2.578125 | 3 |
CodeForces/A2OJ Ladder/demo5.py | dimitrov-dimitar/competitive-programming | 0 | 12765209 | <filename>CodeForces/A2OJ Ladder/demo5.py
for i in range(100000):
print(i)
| 1.828125 | 2 |
deeptools/plotPCA.py | gartician/deepTools | 351 | 12765210 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import argparse
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['svg.fonttype'] = 'none'
from deeptools import cm # noqa: F401
from deeptools.correlation import Correlation
from deeptools.parserCommon import writableFile
from deeptools._version import __version__
def parse_arguments(args=None):
basic_args = plotCorrelationArgs()
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""
Tool for generating a principal component analysis (PCA)
plot from multiBamSummary or multiBigwigSummary output. By default, the loadings for each sample in each principal component is plotted. If the data is transposed, the projections of each sample on the requested principal components is plotted instead.
Detailed help:
plotPCA -h
""",
epilog='example usages:\n'
'plotPCA -in coverages.npz -o pca.png\n\n'
' \n\n',
parents=[basic_args, ])
return parser
def plotCorrelationArgs():
parser = argparse.ArgumentParser(add_help=False)
required = parser.add_argument_group('Required arguments')
# define the arguments
required.add_argument('--corData', '-in',
metavar='FILE',
help='Coverage file (generated by multiBamSummary or multiBigwigSummary)',
required=True)
optional = parser.add_argument_group('Optional arguments')
optional.add_argument('--plotFile', '-o',
help='File name to save the plot to. '
'The extension determines the file format. '
'For example: '
'pca.pdf will save the PCA plot in PDF format. '
'The available options are: .png, '
'.eps, .pdf and .svg. If this option is omitted, then you MUST specify --outFileNameData',
type=writableFile,
metavar='FILE')
optional.add_argument('--labels', '-l',
metavar='sample1 sample2',
help='User defined labels instead of default labels from '
'file names. '
'Multiple labels have to be separated by spaces, e.g. '
'--labels sample1 sample2 sample3',
nargs='+')
optional.add_argument('--plotTitle', '-T',
help='Title of the plot, to be printed on top of '
'the generated image. Leave blank for no title. (Default: %(default)s)',
default='')
optional.add_argument('--plotFileFormat',
metavar='FILETYPE',
help='Image format type. If given, this option '
'overrides the image format based on the plotFile '
'ending. The available options are: png, '
'eps, pdf, plotly and svg.',
choices=['png', 'pdf', 'svg', 'eps', 'plotly'])
optional.add_argument('--plotHeight',
help='Plot height in cm. (Default: %(default)s)',
type=float,
default=10)
optional.add_argument('--plotWidth',
help='Plot width in cm. The minimum value is 1 cm. (Default: %(default)s)',
type=float,
default=10)
optional.add_argument('--outFileNameData',
metavar='file.tab',
type=writableFile,
help='File name to which the data underlying the plot '
'should be saved, such as myPCA.tab. For untransposed '
'data, this is the loading per-sample and PC as well '
'as the eigenvalues. For transposed data, this is the '
'rotation per-sample and PC and the eigenvalues. The '
'projections are truncated to the number of '
'eigenvalues for transposed data.')
optional.add_argument('--ntop',
help='Use only the top N most variable rows in the '
'original matrix. Specifying 0 will result in all '
'rows being used. If the matrix is to be transposed, '
'rows with 0 variance are always excluded, even if a '
'values of 0 is specified. The default is 1000. (Default: %(default)s)',
type=int,
default=1000)
optional.add_argument('--PCs',
help='The principal components to plot. If specified, '
'you must provide two different integers, greater '
'than zero, separated by a space. An example (and the default) is "1 2". (Default: %(default)s)',
type=int,
nargs=2,
default=[1, 2])
optional.add_argument('--log2',
help='log2 transform the datapoints prior to computing '
'the PCA. Note that 0.01 is added to all values to '
'prevent 0 values from becoming -infinity. Using this '
'option with input that contains negative values will '
'result in an error.',
action='store_true')
optional.add_argument('--colors',
metavar="COLORS",
nargs='+',
help="A list of colors for the symbols. Color names and html hex string (e.g., #eeff22) are accepted. The color names should be space separated. For example, --colors red blue green. If not specified, the symbols will be given automatic colors.")
optional.add_argument('--markers',
metavar="MARKERS",
nargs='+',
help="A list of markers for the symbols. (e.g., '<','>','o') are accepted. The marker values should be space separated. For example, --markers 's' 'o' 's' 'o'. If not specified, the symbols will be given automatic shapes.")
optional.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
optionalEx = optional.add_mutually_exclusive_group()
optionalEx.add_argument('--transpose',
help='Perform the PCA on the transposed matrix, (i.e., on the '
'matrix where rows are samples and columns are '
'bins/features. This then matches what is typically '
'done in R.',
action='store_true')
optionalEx.add_argument('--rowCenter',
help='When specified, each row (bin, gene, etc.) '
'in the matrix is centered at 0 before the PCA is '
'computed. This is useful only if you have a strong '
'bin/gene/etc. correlation and the resulting '
'principal component has samples stacked vertically. This option is not applicable if --transpose is specified.',
action='store_true')
return parser
def main(args=None):
args = parse_arguments().parse_args(args)
if args.plotFile is None and args.outFileNameData is None:
sys.exit("At least one of --plotFile and --outFileNameData must be specified!\n")
if args.ntop < 0:
sys.exit("The value specified for --ntop must be >= 0!\n")
if args.PCs[0] == args.PCs[1]:
sys.exit("You must specify different principal components!\n")
if args.PCs[0] <= 0 or args.PCs[1] <= 0:
sys.exit("The specified principal components must be at least 1!\n")
corr = Correlation(args.corData,
labels=args.labels,)
corr.rowCenter = args.rowCenter
corr.transpose = args.transpose
corr.ntop = args.ntop
corr.log2 = args.log2
Wt, eigenvalues = corr.plot_pca(args.plotFile,
PCs=args.PCs,
plot_title=args.plotTitle,
image_format=args.plotFileFormat,
plotWidth=args.plotWidth,
plotHeight=args.plotHeight,
cols=args.colors,
marks=args.markers)
if args.outFileNameData is not None:
of = open(args.outFileNameData, "w")
of.write("#plotPCA --outFileNameData\n")
of.write("Component\t{}\tEigenvalue\n".format("\t".join(corr.labels)))
n = eigenvalues.shape[0]
for i in range(n):
of.write("{}\t{}\t{}\n".format(i + 1, "\t".join(["{}".format(x) for x in Wt[i, :]]), eigenvalues[i]))
of.close()
if __name__ == "__main__":
main()
| 2.25 | 2 |
barbucket/contracts.py | ajmal017/barbucket | 1 | 12765211 | import sqlite3
import time
from bs4 import BeautifulSoup
from numpy.core import numeric
import requests
import logging
import enlighten
from barbucket.database import DatabaseConnector
from barbucket.tools import GracefulExiter
class ContractsDatabase():
def __init__(self):
pass
def create_contract(self, contract_type_from_listing, exchange_symbol,
broker_symbol, name, currency, exchange):
logging.debug(f"Creating new contract {contract_type_from_listing}_{exchange}_{broker_symbol}_{currency}.")
db_connector = DatabaseConnector()
conn = db_connector.connect()
cur = conn.cursor()
cur.execute("""INSERT INTO contracts (
contract_type_from_listing,
exchange_symbol,
broker_symbol,
name,
currency,
exchange)
VALUES (?, ?, ?, ?, ?, ?)""",(
contract_type_from_listing,
exchange_symbol,
broker_symbol,
name,
currency,
exchange))
conn.commit()
cur.close()
db_connector.disconnect(conn)
def get_contracts(self, filters={}, return_columns=[]):
"""
returns a list of sqlite3.Row objects
"""
# Prepare query to get requested values from db
query = "SELECT * FROM all_contract_info"
if len(return_columns) > 0:
cols = ", ".join(return_columns)
query = query.replace("*", cols)
if len(filters) > 0:
query += " WHERE "
for key, value in filters.items():
if value == "NULL":
query += (key + " IS " + str(value) + " and ")
elif isinstance(value, str):
query += (key + " = '" + str(value) + "' and ")
elif isinstance(value, (int, float)):
query += (key + " = " + str(value) + " and ")
query = query[:-5] #remove trailing 'and'
query += ";"
# Get requested values from db
logging.debug(f"Getting contracts from databse with query: {query}")
db_connector = DatabaseConnector()
conn = db_connector.connect()
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute(query)
contracts = cur.fetchall()
conn.commit()
cur.close()
db_connector.disconnect(conn)
return contracts
def delete_contract(self, exchange, symbol, currency):
db_connector = DatabaseConnector()
conn = db_connector.connect()
cur = conn.cursor()
cur.execute("""DELETE FROM contracts
WHERE (broker_symbol = ?
AND exchange = ?
AND currency = ?);""",
(symbol,
exchange,
currency))
conn.commit()
cur.close()
db_connector.disconnect(conn)
def delete_contract_id(self, contract_id):
db_connector = DatabaseConnector()
conn = db_connector.connect()
cur = conn.cursor()
cur.execute("""DELETE FROM contracts
WHERE contract_id = ?;""",
contract_id)
conn.commit()
cur.close()
db_connector.disconnect(conn)
class IbExchangeListings():
def __init__(self):
pass
def read_ib_exchange_listing_singlepage(self, ctype, exchange):
url = f"https://www.interactivebrokers.com/en/index.php?f=567"\
+ f"&exch={exchange}"
html = requests.get(url).text
# Correct error from IB
old_lines = html.splitlines()
new_lines = []
corrections = 0
for line in old_lines:
if (' <td align="left" valign="middle">' in line)\
and ("href" not in line):
line = line.replace("</a>","")
corrections += 1
new_lines.append(line)
html = "".join(new_lines)
if corrections == 0:
logging.info(f"IB error for singlepage listings no longer present.")
soup = BeautifulSoup(html, 'html.parser')
tables = soup.find_all('table', \
class_='table table-striped table-bordered')
rows = tables[2].tbody.find_all('tr')
website_data = []
for row in rows:
cols = row.find_all('td')
row_dict = {
'type': ctype,
'broker_symbol': cols[0].text.strip(),
'name': cols[1].text.strip(),
'exchange_symbol': cols[2].text.strip(),
'currency': cols[3].text.strip(),
'exchange': exchange.upper()}
website_data.append(row_dict)
return website_data
def read_ib_exchange_listing_paginated(self, ctype, exchange):
"""
Returns list of contracts
Returns -1 if aborted by user
"""
website_data = []
page = 1
# Setup progress bar
# manager = enlighten.get_manager()
# pbar = manager.counter(total=len(contracts), desc="Contracts", unit="contracts")
exiter = GracefulExiter()
while True:
# Get website
logging.info(f"Scraping IB exchange listing for {exchange}, page {page}.")
url = f"https://www.interactivebrokers.com/en/index.php?f=2222"\
+ f"&exch={exchange}&showcategories=STK&p=&cc=&limit=100"\
+ f"&page={page}"
html = requests.get(url).text
# Correct error from IB
if "(click link for more details)</span></th>\n </th>" in html:
html = html.replace(\
"(click link for more details)</span></th>\n </th>\n",\
"(click link for more details)</span></th>\n")
else:
logging.info(f"IB error for paginated listings no longer present.")
# Parse HTML
soup = BeautifulSoup(html, 'html.parser')
tables = soup.find_all('table', \
class_='table table-striped table-bordered')
rows = tables[2].tbody.find_all('tr')
# Empty table -> End is reached
if rows == []:
return website_data
# Add data from this page to 'website_data'
for row in rows:
cols = row.find_all('td')
row_dict = {
'type': ctype,
'broker_symbol': cols[0].text.strip(),
'name': cols[1].text.strip(),
'exchange_symbol': cols[2].text.strip(),
'currency': cols[3].text.strip(),
'exchange': exchange.upper()}
website_data.append(row_dict)
# Check for abort signal
if exiter.exit():
logging.info(f"Exiting on user request.")
return []
# Prepare for next page
page += 1
time.sleep(3) #show some mercy to webserver
| 2.703125 | 3 |
src/python/WMCore/WMBS/MySQL/Subscriptions/MarkFinishedSubscriptions.py | khurtado/WMCore | 21 | 12765212 | <filename>src/python/WMCore/WMBS/MySQL/Subscriptions/MarkFinishedSubscriptions.py<gh_stars>10-100
#!/usr/bin/env python
"""
_MarkFinishedSubscriptions_
MySQL implementation of Subscriptions.MarkFinishedSubscriptions
Created on Aug 29, 2012
@author: dballest
"""
from time import time
from WMCore.Database.DBFormatter import DBFormatter
class MarkFinishedSubscriptions(DBFormatter):
"""
Marks the given subscriptions as finished, and updates the timestamp
"""
updateSQL = """UPDATE wmbs_subscription
SET finished = :finished, last_update = :timestamp
WHERE id = :id"""
def execute(self, ids, finished = True, conn = None,
transaction = False):
"""
_execute_
Update the subscriptions to match their finished status
"""
if finished:
finished = 1
else:
finished = 0
#Make sure it's a list of IDs
if not isinstance(ids, list):
ids = [ids]
binds = []
for subId in ids:
binds.append({'id': subId, 'finished': finished, 'timestamp': int(time())})
if binds:
self.dbi.processData(self.updateSQL, binds, conn = conn,
transaction = transaction)
return
| 2.5 | 2 |
funded_stat.py | far-from-normal/Kiva-Insight | 2 | 12765213 | import pandas as pd
import numpy as np
from pathlib import Path
from data_params import Data
from data_utils import (
preprocess_train_df,
fit_stats,
transform_stats,
save_transformed_stats,
)
def summarize_stats(csv_name, dir_to_data, stat_name_select):
path_to_input_df = Path(dir_to_data, csv_name)
path_to_output_df = Path(dir_to_data, "funded_" + csv_name)
df = pd.read_csv(path_to_input_df)
df_funded = df.loc[df["STATUS"] == "funded"].drop(columns=["STATUS"])
df_expired = df.loc[df["STATUS"] == "expired"].drop(columns=["STATUS"])
df_funded_described = df_funded.describe(include="all")
df_funded_described
df_expired_described = df_expired.describe(include="all")
df_expired_described
funded_stat = df_funded_described.loc[stat_name_select]
funded_stat = funded_stat.to_frame()
funded_stat.reset_index(level=0, inplace=True)
funded_stat.columns = ["FeatureName", "FeatureValueFunded"]
expired_stat = df_expired_described.loc[stat_name_select]
expired_stat = expired_stat.to_frame()
expired_stat.reset_index(level=0, inplace=True)
expired_stat.columns = ["FeatureName", "FeatureValueExpired"]
funded_stat = funded_stat.merge(expired_stat, on="FeatureName")
funded_stat.to_csv(path_to_output_df, index=False)
return funded_stat
# %% ###########
data_par = Data()
cols_process = data_par.cols_process
cols_output = data_par.cols_output
valid_status = data_par.valid_status
dir_to_saved_data = data_par.dir_to_saved_data
dir_to_query_data = data_par.dir_to_query_data
path_to_training_data = data_par.path_to_training_data
stat_name_select = data_par.stat_name_select
predict = False
csv_name_tags = "stats_tags_df.csv"
csv_name_loanuse = "stats_loanuse_df.csv"
csv_name_desc = "stats_desc_df.csv"
df = pd.read_csv(path_to_training_data, usecols=cols_process)
df = preprocess_train_df(df, valid_status, cols_output, predict)
fit_stats(dir_to_saved_data, df)
stats_tags_df, stats_loanuse_df, stats_desc_df = transform_stats(dir_to_saved_data, df)
save_transformed_stats(dir_to_saved_data, stats_tags_df, csv_name_tags)
save_transformed_stats(dir_to_saved_data, stats_loanuse_df, csv_name_loanuse)
save_transformed_stats(dir_to_saved_data, stats_desc_df, csv_name_desc)
funded_df_tags = summarize_stats(csv_name_tags, dir_to_saved_data, stat_name_select)
funded_df_loanuse = summarize_stats(
csv_name_loanuse, dir_to_saved_data, stat_name_select
)
funded_df_desc = summarize_stats(csv_name_desc, dir_to_saved_data, stat_name_select)
funded_df_tags
funded_df_tags.to_csv(Path(dir_to_saved_data, "funded_df_tags.csv"), index=False)
funded_df_loanuse
funded_df_loanuse.to_csv(Path(dir_to_saved_data, "funded_df_loanuse.csv"), index=False)
funded_df_desc
funded_df_desc.to_csv(Path(dir_to_saved_data, "funded_df_desc.csv"), index=False)
| 2.90625 | 3 |
uol_os_reports.py | LCBRU/reporter | 0 | 12765214 | <gh_stars>0
#!/usr/bin/env python3
import reporter.uol_os_reports
from runner import run
run()
| 0.996094 | 1 |
OPMS_v3-dev3.1/apps/online_management/models.py | litiian/asyncstudy | 0 | 12765215 | <gh_stars>0
from django.db import models
from users.models import UserProfile
from host_management.models import DomainNameResolveInfo, ProjectInfo
######################################
# 故障标签表
######################################
class TroubleTag(models.Model):
name = models.CharField(verbose_name='分类名称', max_length=20)
class Meta:
verbose_name = '故障标签表'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
######################################
# 故障表
######################################
class TroubleRecord(models.Model):
name = models.CharField(verbose_name='平台名称', max_length=20)
url = models.ForeignKey(DomainNameResolveInfo, verbose_name='域名', related_name='tr_url', blank=True, null=True, on_delete=models.CASCADE)
project = models.ForeignKey(ProjectInfo, verbose_name='项目', related_name='tr_project', on_delete=models.CASCADE)
event = models.CharField(verbose_name='事件和原因', max_length=50)
tags = models.ManyToManyField(TroubleTag, verbose_name='标签')
event_time = models.DateTimeField(verbose_name='故障时间')
handle_user = models.ManyToManyField(UserProfile, verbose_name='处理人')
handle_way = models.CharField(verbose_name='处理办法', max_length=100)
handle_time = models.DateTimeField(verbose_name='处理时间')
handle_result = models.PositiveSmallIntegerField(verbose_name='处理结果', choices=((1, '已处理'), (2, '未处理'), (3, '其它')), default=1)
desc = models.CharField(verbose_name='备注', max_length=100, blank=True, null=True)
status = models.PositiveSmallIntegerField(verbose_name='状态', choices=((0, '关闭'), (1, '开启')), default=1)
class Meta:
verbose_name = '故障表'
verbose_name_plural = verbose_name
def __str__(self):
return self.event
######################################
# 上线表
######################################
class DeployRecord(models.Model):
name = models.CharField(verbose_name='平台名称', max_length=20)
url = models.ForeignKey(DomainNameResolveInfo, verbose_name='域名', related_name='dep_url', blank=True, null=True, on_delete=models.CASCADE)
project = models.ForeignKey(ProjectInfo, verbose_name='项目', related_name='dep_project', on_delete=models.CASCADE)
deploy_time = models.DateTimeField(verbose_name='上线时间')
request_user = models.CharField(verbose_name='发起人', max_length=20)
deploy_user = models.ForeignKey(UserProfile, verbose_name='处理人', related_name='dep_user', on_delete=models.CASCADE)
deploy_result = models.PositiveSmallIntegerField(verbose_name='上线结果', choices=((1, '成功'), (2, '失败')), default=1)
desc = models.CharField(verbose_name='备注', max_length=100, blank=True, null=True)
status = models.PositiveSmallIntegerField(verbose_name='状态', choices=((0, '关闭'), (1, '开启')), default=1)
class Meta:
verbose_name = '上线表'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
######################################
# 运维事件表
######################################
class OpsEvent(models.Model):
name = models.CharField(verbose_name='事件名称', max_length=100)
start_time = models.DateTimeField(verbose_name='开始时间')
stop_time = models.DateTimeField(verbose_name='结束时间', blank=True, null=True)
op_user = models.ManyToManyField(UserProfile, verbose_name='处理人')
desc = models.CharField(verbose_name='备注', max_length=100, blank=True, null=True)
status = models.PositiveSmallIntegerField(verbose_name='状态', choices=((0, '关闭'), (1, '开启')), default=1)
class Meta:
verbose_name = '运维事件表'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
| 2 | 2 |
Users/forms.py | Fowerus/drf-crm | 3 | 12765216 | from django import forms
from .models import User
class MUserForm(forms.ModelForm):
class Meta:
model = User
fields = ['id', 'surname', 'first_name',
'second_name', 'email', 'phone', 'address']
| 2.1875 | 2 |
individual/carshop/apps.py | PavelBelka/django-2-individual | 0 | 12765217 | from django.apps import AppConfig
class CarshopConfig(AppConfig):
name = 'carshop'
| 1.164063 | 1 |
algorithms/LSBMR.py | FaiZaman/Steganograph-app-y | 0 | 12765218 | <filename>algorithms/LSBMR.py
"""
Least Significant Bit Matching Revisited embedding & extraction algorithm
Embeds two message bits in a two-pixel block
The first bit is embedded in the LSB of the first pixel
The second bit is embedded in the output of a binary function f on both pixels
"""
import math
import random
from algorithms.LSBM import LSBM
from algorithms.PVD import PVD
from utility import message_to_binary, is_message_complete, integer_to_binary,\
binary_to_string, save_image, save_message
class LSBMR(LSBM, PVD):
def __init__(self, image, message, key, save_path):
super().__init__(image, message, key, save_path)
self.name = 'LSBMR'
# initialise outliers for masking
self.outliers = {}
# generate the cases where masking the 2 LSBs does not give the same result
# for two adjacent values, and value is value to add to fix this
for j in range(1, 254):
if 252 & j != 252 & (j + 1):
self.outliers[j] = -1
elif 252 & j != 252 & (j - 1):
self.outliers[j] = 1
# retrieves coordinates in image of first pixels in block
def get_coordinates(self):
pixels = []
# loop through the image
for y in range(0, self.height):
for x in range(0, self.width):
# either both odd or both even for first pixel in block
if (y % 2 == 0 and x % 2 == 0) or (y % 2 != 0 and x % 2 != 0):
(next_x, next_y), _ = self.get_pixel_block(x, y) # get next pixel coordinates
# remove out of bounds pixels (0+3, 255-3 for masking)
if 3 < self.image[y][x] < 252 and 3 < self.image[next_y][next_x] < 252:
pixels.append((y, x))
return pixels
# satisfies condition such that the LSB of the second message bit is result of the function
def binary_function(self, a, b):
value = math.floor(a/2) + b
binary_value = integer_to_binary(value)
return binary_value[-1]
# computes the first stego pixel from LSBMR embedding
def first_pixel_change(self, first_pixel, first_stego_pixel, value):
# if pixel is outlier do the operation as defined in outliers dict
if first_pixel in self.outliers:
first_stego_pixel += self.outliers[first_pixel]
else:
first_stego_pixel = first_pixel + value
return first_stego_pixel
# embeds message bits in pair of pixels as per LSBMR embedding
def embed_pixels(self, first_pixel, second_pixel, message_index, hybrid):
# get inputs and convert
first_msg_bit = self.message[message_index]
second_msg_bit = self.message[message_index + 1]
first_pixel_binary = integer_to_binary(first_pixel)
first_stego_pixel, second_stego_pixel = first_pixel, second_pixel
# LSBMR algorithm
if first_msg_bit == first_pixel_binary[-1]:
if second_msg_bit != self.binary_function(first_pixel, second_pixel):
# if pixel is outlier do the operation as defined in outliers dict
if second_pixel in self.outliers:
second_stego_pixel += self.outliers[second_pixel]
else:
second_stego_pixel = self.random_increment_or_decrement(second_pixel)
else:
second_stego_pixel = second_pixel
first_stego_pixel = first_pixel
else:
if second_msg_bit == self.binary_function(first_pixel - 1, second_pixel):
first_stego_pixel = self.first_pixel_change(first_pixel, first_stego_pixel, -1)
else:
first_stego_pixel = self.first_pixel_change(first_pixel, first_stego_pixel, 1)
second_stego_pixel = second_pixel
# LSBMR adjustment for masking edges - if the adjustment offsets binary function
# add or subtract 3 from first stego pixel to conserve this; keeping 6 MSBs same for masking
if second_msg_bit != self.binary_function(first_stego_pixel, second_stego_pixel):
if first_stego_pixel > first_pixel:
first_stego_pixel = first_pixel + 3
else:
first_stego_pixel = first_pixel - 3
return first_stego_pixel, second_stego_pixel
# generates pixel path through image and sends pixels to be embedded with message data
def embed_image(self):
self.message = message_to_binary(self.message)
message_index = 0
message_length = len(self.message)
if message_length > self.num_bytes:
raise ValueError("The message is too large for the image.")
# get the first pixel coordinates in blocks
pixels = self.get_coordinates()
num_pixels = len(pixels)
# generate random path of pixel blocks based on seed
path = random.sample(pixels, num_pixels)
cover_image = self.image # so image is not modified
for (y, x) in path:
# compute the two-pixel block and the coordinates of the next pixel
next_coordinates, block = self.get_pixel_block(x, y)
first_pixel, second_pixel = block[0], block[1]
next_x, next_y = next_coordinates[0], next_coordinates[1]
# use LSBMR embedding and output stego pixels
first_stego_pixel, second_stego_pixel =\
self.embed_pixels(first_pixel, second_pixel, message_index, hybrid=False)
# reassign new stego pixels and increment message index
cover_image[y][x] = first_stego_pixel
cover_image[next_y][next_x] = second_stego_pixel
message_index += 2
if message_index == message_length:
break
# reassign, save, and return stego image
stego_image = cover_image
is_saved = save_image(self.save_path, self.image_name, self.time_string, stego_image)
return is_saved
# loops through image in the same order as when encoding and extracts message bits
def extract(self):
# initialise message and same pixel block pseudorandom embedding path
binary_message = ""
pixels = self.get_coordinates()
num_pixels = len(pixels)
path = random.sample(pixels, num_pixels)
counter = 0
# loop through image pixel blocks
for (y, x) in path:
# compute the two-pixel block and the coordinates of the next pixel
next_coordinates, stego_block = self.get_pixel_block(x, y)
first_stego_pixel, second_stego_pixel = stego_block[0], stego_block[1]
next_x, next_y = next_coordinates[0], next_coordinates[1]
# extract both bits from the pixel pair
first_binary_pixel = integer_to_binary(first_stego_pixel)
first_msg_bit = first_binary_pixel[-1]
second_msg_bit = self.binary_function(first_stego_pixel, second_stego_pixel)
# append to message
binary_message += first_msg_bit + second_msg_bit
# check every 5000 iterations if the message is in the extracted bits so far
# in order to speed up the algorithm
if counter % 5000 == 0:
if is_message_complete(binary_message, self.delimiter):
break
counter += 1
# extract the original message, save to file, and return
extracted_message, _ = binary_to_string(binary_message, self.delimiter)
is_saved = save_message(self.save_path, self.time_string, extracted_message)
return is_saved
| 2.96875 | 3 |
conans/test/conan_v2/conanfile/test_environment.py | matthiasng/conan | 6,205 | 12765219 | <gh_stars>1000+
import textwrap
from conans.client.tools.env import _environment_add
from conans.test.utils.conan_v2_tests import ConanV2ModeTestCase
class CollectLibsTestCase(ConanV2ModeTestCase):
def test_conan_username(self):
t = self.get_client()
conanfile = textwrap.dedent("""
from conans import ConanFile
class Recipe(ConanFile):
name = "name"
version = "version"
""")
t.save({'conanfile.py': conanfile})
with _environment_add({'CONAN_USERNAME': "user"}):
t.run('create .', assert_error=True)
self.assertIn("Conan v2 incompatible: Environment variable 'CONAN_USERNAME' is deprecated", t.out)
def test_conan_channel(self):
t = self.get_client()
conanfile = textwrap.dedent("""
from conans import ConanFile
class Recipe(ConanFile):
name = "name"
version = "version"
default_user = "user"
""")
t.save({'conanfile.py': conanfile})
with _environment_add({'CONAN_CHANNEL': "user"}):
t.run('create .', assert_error=True)
self.assertIn("Conan v2 incompatible: Environment variable 'CONAN_CHANNEL' is deprecated", t.out)
| 2.4375 | 2 |
tests/agents_tests/test_dqn.py | WhenTheyCry96/chainerrl | 2 | 12765220 | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
from builtins import * # NOQA
standard_library.install_aliases() # NOQA
import unittest
from chainer import testing
import numpy as np
import basetest_dqn_like as base
import chainerrl
from chainerrl.agents.dqn import compute_value_loss
from chainerrl.agents.dqn import compute_weighted_value_loss
from chainerrl.agents.dqn import DQN
from basetest_training import _TestBatchTrainingMixin
class TestDQNOnDiscreteABC(
_TestBatchTrainingMixin, base._TestDQNOnDiscreteABC):
def make_dqn_agent(self, env, q_func, opt, explorer, rbuf, gpu):
return DQN(q_func, opt, rbuf, gpu=gpu, gamma=0.9, explorer=explorer,
replay_start_size=100, target_update_interval=100)
class TestDQNOnDiscreteABCBoltzmann(
_TestBatchTrainingMixin, base._TestDQNOnDiscreteABC):
def make_dqn_agent(self, env, q_func, opt, explorer, rbuf, gpu):
explorer = chainerrl.explorers.Boltzmann()
return DQN(q_func, opt, rbuf, gpu=gpu, gamma=0.9, explorer=explorer,
replay_start_size=100, target_update_interval=100)
class TestDQNOnContinuousABC(
_TestBatchTrainingMixin, base._TestDQNOnContinuousABC):
def make_dqn_agent(self, env, q_func, opt, explorer, rbuf, gpu):
return DQN(q_func, opt, rbuf, gpu=gpu, gamma=0.9, explorer=explorer,
replay_start_size=100, target_update_interval=100)
# Batch training with recurrent models is currently not supported
class TestDQNOnDiscretePOABC(base._TestDQNOnDiscretePOABC):
def make_dqn_agent(self, env, q_func, opt, explorer, rbuf, gpu):
return DQN(q_func, opt, rbuf, gpu=gpu, gamma=0.9, explorer=explorer,
replay_start_size=100, target_update_interval=100,
episodic_update=True)
def _huber_loss_1(a):
if abs(a) < 1:
return 0.5 * a ** 2
else:
return abs(a) - 0.5
@testing.parameterize(
*testing.product({
'batch_accumulator': ['mean', 'sum'],
'clip_delta': [True, False],
})
)
class TestComputeValueLoss(unittest.TestCase):
def setUp(self):
self.y = np.asarray([1.0, 2.0, 3.0, 4.0], dtype='f')
self.t = np.asarray([2.1, 2.2, 2.3, 2.4], dtype='f')
if self.clip_delta:
self.gt_losses = np.asarray(
[_huber_loss_1(a) for a in self.y - self.t])
else:
self.gt_losses = np.asarray(
[0.5 * a ** 2 for a in self.y - self.t])
def test_not_weighted(self):
loss = compute_value_loss(
self.y, self.t, clip_delta=self.clip_delta,
batch_accumulator=self.batch_accumulator).array
if self.batch_accumulator == 'mean':
gt_loss = self.gt_losses.mean()
else:
gt_loss = self.gt_losses.sum()
self.assertAlmostEqual(loss, gt_loss, places=5)
def test_uniformly_weighted(self):
# Uniform weights
w1 = np.ones(self.y.size, dtype='f')
loss_w1 = compute_weighted_value_loss(
self.y, self.t, clip_delta=self.clip_delta,
batch_accumulator=self.batch_accumulator,
weights=w1).array
if self.batch_accumulator == 'mean':
gt_loss = self.gt_losses.mean()
else:
gt_loss = self.gt_losses.sum()
self.assertAlmostEqual(loss_w1, gt_loss, places=5)
def test_randomly_weighted(self):
# Random weights
wu = np.random.uniform(low=0, high=2, size=self.y.size).astype('f')
loss_wu = compute_weighted_value_loss(
self.y, self.t, clip_delta=self.clip_delta,
batch_accumulator=self.batch_accumulator,
weights=wu).array
if self.batch_accumulator == 'mean':
gt_loss = (self.gt_losses * wu).mean()
else:
gt_loss = (self.gt_losses * wu).sum()
self.assertAlmostEqual(loss_wu, gt_loss, places=5)
| 2.0625 | 2 |
dataset_generator.py | lolporer/anomaly_generator | 1 | 12765221 | import csv
import os
# execfile("C:\\Users\\YONI\\Documents\\Projects\\degree\\attack detection methods\\anomaly_generator\\dataset_generator.py")
ROW_NUM = 100
path = "C:\\Users\\YONI\\Documents\\anomally_detector\\data_sets\\example\\"
users_num = 100
features_num = 20
directory = "data_sets\\"
if not os.path.exists(directory):
os.makedirs(directory)
users = []
features = []
for i in range(0,users_num):
users.append('user'+str(i))
for i in range(0,features_num):
features.append('feature'+str(i))
for user in users:
with open("data_sets\\"+user+'.csv', 'w') as csvfile:
writer = csv.DictWriter(csvfile, delimiter=',', lineterminator='\n', fieldnames=features)
writer.writeheader()
for i in range(1,ROW_NUM):
featDic = {}
for feature in features:
featDic[feature] = user + '_' + feature + '_' + str(i)
writer.writerow(featDic) | 2.609375 | 3 |
watchbot_progress/utils.py | mapbox/watchbot-progress-py | 1 | 12765222 | <reponame>mapbox/watchbot-progress-py
import json
from boto3.session import Session as boto3_session
def chunker(iterable, n):
"""
Chop list in smaller lists
"""
for i in range(0, len(iterable), n):
yield iterable[i:i + n]
def aws_send_message(message, topic, subject=None, client=None):
"""
Sends SNS message
"""
if not client:
session = boto3_session()
client = session.client('sns')
return client.publish(
Message=json.dumps(message),
Subject=subject,
TargetArn=topic)
def sns_worker(messages, topic, subject=None):
"""
Sends batch of SNS messages
"""
session = boto3_session()
client = session.client('sns')
for message in messages:
aws_send_message(message, topic, subject=subject, client=client)
return True
| 2.484375 | 2 |
snake/utilities.py | owenjaques/kuhmilch | 0 | 12765223 | <reponame>owenjaques/kuhmilch
import json
from enum import Enum
import numpy as np
# TODO: create did collide with self function
# TODO: create did collide with wall function
# TODO: create did kill other snake function
# TODO: create did eat food function
# TODO: move this to a seperate 'constants' file
FREE = 0
FOOD = 1
SNAKE = 2
HEAD = 3
# TODO: refactor / remove this
class GameSaver:
def __init__(self):
self.game_data = []
def save_move(self, data):
self.game_data.append(data)
def save_game_to_file(self):
try:
id = self.game_data[0]['game']['id']
except:
print('Game data is empty. Not saving to file.')
try:
with open('game_data/' + id +'.json', 'x') as f:
json.dump(self.game_data, f)
except Exception as e:
print('Error saving game data to file.')
print(e)
def get_map(data):
board_width = data['board']['width']
game_map = np.zeros((board_width, board_width), dtype=int)
# adds all food to map
for food in data['board']['food']:
x = food['x']
y = food['y']
game_map[y][x] = FOOD
# adds all snake parts to map
for snake in data['board']['snakes']:
for points in snake['body']:
x = points['x']
y = points['y']
game_map[y][x] = SNAKE
# adds your snake's head to map
head = data['you']['body'][0]
x = head['x']
y = head['y']
game_map[y][x] = HEAD
return game_map
def print_map(game_map):
# flips the y axis so that it can match what the screen shows
print(np.flip(game_map, 0))
def did_die(data):
return False if data['you'] in data['board']['snakes'] else True | 3.21875 | 3 |
mmdet3d/models/model_utils/__init__.py | wangguojun2018/CenterNet3d | 91 | 12765224 | from .vote_module import VoteModule
from .deform_conv_layers import DeformConvBlock,ModulatedDeformConvBlock
__all__ = ['VoteModule','DeformConvBlock','ModulatedDeformConvBlock']
| 1.125 | 1 |
tests/theia_automation_lsp/lib/file_explorer.py | grianbrcom/che-che4z-lsp-for-cobol | 0 | 12765225 | # Copyright (c) 2020 Broadcom.
# The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
#
# This program and the accompanying materials are made
# available under the terms of the Eclipse Public License 2.0
# which is available at https://www.eclipse.org/legal/epl-2.0/
#
# SPDX-License-Identifier: EPL-2.0
#
# Contributors:
# Broadcom, Inc. - initial API and implementation
import json
import os
import pyperclip
from robot.api.deco import keyword
from selenium.common.exceptions import NoSuchElementException, WebDriverException, TimeoutException
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
from inc.cfg.env_constants import TEST_FILES_DIR
from inc.exceptions.custom_timeout_exception import CustomTimeoutException
from inc.exceptions.element_not_found_exception import ElementNotFoundException
from inc.exceptions.general_exception import GeneralException
from inc.helpers import highlight
from inc.theia.main_menu import MainMenu
from inc.theia.ui import UI
from lib.common_engine import CommonEngine
from inc.decorators.wait_till_exist import WaitTillExist
from inc.theia import constants, lsp_constants
from lib.monako_editor import MonakoEditor
class FileExplorer(CommonEngine):
def __init__(self, url=None):
super(FileExplorer, self).__init__(url)
def is_file_explorer_visible(self):
panel_state = explorer_state = constants.STATE_HIDDEN
theia_left_panel = self.find_it(UI.get_theia_left_panel_locator())
if constants.THEIA_ELEMENT_COLLAPSED not in theia_left_panel.get_attribute(constants.TYPE_CLASS):
panel_state = constants.STATE_VISIBLE
try:
file_explorer_id_element = self.find_it(UI.get_files_explorer_locator())
explorer_classes = file_explorer_id_element.get_attribute(constants.TYPE_CLASS)
if constants.THEIA_ELEMENT_COLLAPSED not in explorer_classes and \
constants.THEIA_HIDDEN not in explorer_classes:
explorer_state = constants.STATE_VISIBLE
except (NoSuchElementException, ElementNotFoundException):
pass
except Exception:
raise GeneralException(self.get_driver(), call_from=self.is_file_explorer_visible.__name__)
return constants.STATE_HIDDEN not in [panel_state, explorer_state]
def make_sure_debug_button_is_here(self):
theia_locator = UI.get_theia_left_panel_locator()
WebDriverWait(self._driver, constants.DEFAULT_TIMEOUT).until(
expected_conditions.presence_of_element_located(theia_locator)
)
theia_left_panel = self.find_it(theia_locator)
highlight(theia_left_panel)
try:
return self.find_it(UI.get_debug_tab_locator())
except (NoSuchElementException, ElementNotFoundException):
menu = MainMenu(self.get_driver())
menu.invoke_menu_path(constants.Debug)
return self.find_it(UI.get_debug_tab_locator())
@WaitTillExist()
def show_file_explorer(self):
if not self.is_file_explorer_visible():
file_explorer_id_element = self.find_it(UI.get_files_explorer_tab_locator())
self.click_me(file_explorer_id_element)
else:
print("visible?")
@WaitTillExist(timeout=constants.DEFAULT_HUGE_TIMEOUT, interval=10)
def wait_for_theia_loaded(self):
# self.switch_to_theia_frame()
theia_panel_locator = UI.get_theia_left_right_panel_locator()
try:
theia_panel = self.find_it(theia_panel_locator)
highlight(theia_panel)
except ElementNotFoundException:
raise WebDriverException
except Exception:
raise
def show_debug_tab(self):
debug_id_element = self.make_sure_debug_button_is_here()
highlight(debug_id_element)
if not self.current_left_tab(debug_id_element):
self.click_me(debug_id_element)
def start_debug(self):
debug_start_locator = UI.get_debug_start_locator()
WebDriverWait(self._driver, constants.DEFAULT_LONG_TIMEOUT).until(
expected_conditions.presence_of_element_located(debug_start_locator)
)
theia_locator = UI.get_theia_left_panel_locator()
theia_panel = self.find_it(theia_locator)
debug_start_button = self.find_it(debug_start_locator, parent=theia_panel)
highlight(debug_start_button, effect_time=1)
self.click_me(debug_start_button, element_human_name="Start Debug")
debug_thread_locator = UI.get_debug_thread_locator()
WebDriverWait(self._driver, constants.DEFAULT_LONG_TIMEOUT).until(
expected_conditions.presence_of_element_located(debug_thread_locator)
)
editor_obj = self.get_editor_obj()
line_num = editor_obj.get_current_line_num()
return line_num
def debug_step_over(self):
debug_step_over_locator = UI.get_debug_step_over_locator()
WebDriverWait(self._driver, constants.DEFAULT_LONG_TIMEOUT).until(
expected_conditions.presence_of_element_located(debug_step_over_locator)
)
editor_obj = self.get_editor_obj()
editor = editor_obj.get_editor_element()
step_locator = UI.get_debug_top_stack_frame_locator()
step_element = self.find_it(step_locator, parent=editor)
highlight(step_element, effect_time=1)
theia_locator = UI.get_theia_left_panel_locator()
theia_panel = self.find_it(theia_locator)
debug_step_over_button = self.find_it(debug_step_over_locator, parent=theia_panel)
highlight(debug_step_over_button, effect_time=1)
self.click_me(debug_step_over_button, element_human_name="Step Over")
# possible variants after step: determine what to wait here for
# theia-debug-top-stack-frame - exists, theia-debug-top-stack-frame-line - exists and line num increased
# or
# theia-debug-top-stack-frame - exists, theia-debug-top-stack-frame-line - doesn't exist and line num unchanged
try:
WebDriverWait(self._driver, constants.DEFAULT_SHORT_TIMEOUT).until(
expected_conditions.invisibility_of_element_located(step_locator)
)
except TimeoutException:
pass
WebDriverWait(self._driver, constants.DEFAULT_TIMEOUT).until(
expected_conditions.presence_of_element_located(step_locator)
)
line_after_step = editor_obj.get_current_line_num()
print("line after debug step", line_after_step)
step_element = self.find_it(step_locator, parent=editor)
highlight(step_element, effect_time=1)
return line_after_step
def debug_continue(self, file_is_opened=True):
debug_continue_locator = UI.get_debug_continue_locator()
WebDriverWait(self._driver, constants.DEFAULT_TIMEOUT).until(
expected_conditions.presence_of_element_located(debug_continue_locator)
)
editor_obj = self.get_editor_obj()
editor = editor_obj.get_editor_element()
step_locator = UI.get_debug_top_stack_frame_locator()
if file_is_opened:
step_element = self.find_it(step_locator, parent=editor)
highlight(step_element, effect_time=1)
theia_locator = UI.get_theia_left_panel_locator()
theia_panel = self.find_it(theia_locator)
debug_continue_button = self.find_it(debug_continue_locator, parent=theia_panel)
highlight(debug_continue_button, effect_time=1)
self.click_me(debug_continue_button, element_human_name="Continue")
if not file_is_opened:
return None
try:
WebDriverWait(self._driver, constants.DEFAULT_SHORT_TIMEOUT).until(
expected_conditions.invisibility_of_element_located(step_locator)
)
except TimeoutException:
pass
WebDriverWait(self._driver, constants.DEFAULT_TIMEOUT).until(
expected_conditions.presence_of_element_located(step_locator)
)
line_after_continue = editor_obj.get_current_line_num()
print("line after debug continue", line_after_continue)
step_element = self.find_it(step_locator, parent=editor)
highlight(step_element, effect_time=1)
return line_after_continue
@WaitTillExist(timeout=constants.DEFAULT_LONG_TIMEOUT)
def wait_for_debug_to_stop(self):
theia_locator = UI.get_theia_left_panel_locator()
theia_panel = self.find_it(theia_locator)
debug_thread_locator = UI.get_debug_thread_locator()
try:
thread_element = self.find_it(debug_thread_locator, parent=theia_panel)
highlight(thread_element)
raise WebDriverException
except (NoSuchElementException, ElementNotFoundException):
return
except Exception:
raise
def open_preferences(self):
menu = MainMenu(self.get_driver())
menu.invoke_menu_path(constants.Preferences)
@WaitTillExist()
def get_variables_elements(self, parent=None):
variables_elements = self.find_them(UI.get_debug_console_variable_locator(), parent=parent)
if not len(variables_elements):
raise WebDriverException
return variables_elements
@WaitTillExist(timeout=constants.DEFAULT_SHORT_TIMEOUT)
def check_variable_is_updated(self, text, value):
variable_value = text.split(":")[1].strip()
if str(variable_value) != str(value):
raise WebDriverException
def get_variable_element(self, var_name, var_value=None):
editor_obj = self.get_editor_obj()
variables_header = editor_obj.find_variables_header()
highlight(variables_header, effect_time=1)
self.expand_tree(variables_header)
variables_container = self.get_parent_node(variables_header)
highlight(variables_container, effect_time=1)
locals_trees = self.find_them(UI.get_tree_node_content_locator(), parent=variables_container)
locals_found = False
locals_tree = None
for locals_tree in locals_trees:
if locals_tree.text.upper() == constants.THEIA_LOCALS.upper():
locals_found = True
break
if not locals_found:
raise ElementNotFoundException(self.get_driver(), call_from=self.get_variable_element.__name__)
self.expand_tree(locals_tree)
variable_elements = self.get_variables_elements(parent=variables_container)
for variable_element in variable_elements:
highlight(variable_element, effect_time=1)
full_var_text = variable_element.text
variable_name = full_var_text.split(":")[0]
if var_value is not None:
self.check_variable_is_updated(full_var_text, var_value)
print("Current variable '{0}' value: '{1}'".format(var_name, var_value))
print("variable_name", variable_name)
if variable_name.upper() == var_name.upper():
return variable_element
raise NoSuchElementException
def expand_directory_node(self, dir_node, empty_node=False, timeout=None):
content = self.find_file_explorer_content()
self.expand_tree_node(dir_node, content, empty_node=empty_node, timeout=timeout)
@staticmethod
def is_directory(dir_node):
return constants.THEIA_DIR_NODE in dir_node.get_attribute(constants.TYPE_CLASS)
@WaitTillExist(timeout=constants.DEFAULT_SHORT_TIMEOUT)
def enter_in_dialog(self, input_msg, dlg_title=None):
dialog_shell = self.find_it(UI.get_theia_dialog_shell_locator())
dialog_title = self.find_it(UI.get_theia_dialog_title_locator(), parent=dialog_shell)
if dialog_title.text.upper() != dlg_title.upper():
raise ElementNotFoundException
dialog_content = self.find_it(UI.get_theia_dialog_content_locator(), parent=dialog_shell)
input_element = dialog_content.find_element(By.XPATH, "./input")
self.input_value(input_msg, input_element=input_element)
@WaitTillExist(timeout=constants.DEFAULT_SHORT_TIMEOUT)
def answer_ok_to_dialog(self, dlg_title):
dialog_shell = self.find_it(UI.get_theia_dialog_shell_locator())
dialog_title = self.find_it(UI.get_theia_dialog_title_locator(), parent=dialog_shell)
if dialog_title.text.upper() != dlg_title.upper():
raise ElementNotFoundException
dialog_control = self.find_it(UI.get_theia_dialog_control_locator(), parent=dialog_shell)
control_buttons = self.find_them(UI.get_buttons_locator(), parent=dialog_control)
for control_button in control_buttons:
button_text = control_button.text
if button_text.upper() == constants.OK.upper():
self.click_me(control_button, element_human_name=constants.OK, effect_time=1)
@WaitTillExist(timeout=constants.DEFAULT_SHORT_TIMEOUT, should_exist=False, do_dump=False)
def close_dialog(self):
try:
self.switch_to_theia_frame()
dialog_shell = self.find_it(UI.get_theia_dialog_shell_locator())
dialog_title = self.find_it(UI.get_theia_dialog_title_locator(), parent=dialog_shell)
highlight(dialog_title)
if dialog_title.text.upper() == che_constants.CHE_BUG_TITLE.upper():
self.send_key_sequence([Keys.ESCAPE, Keys.ESCAPE, Keys.ESCAPE])
except Exception as e:
raise WebDriverException(msg=e.args)
def get_stop_debug_button(self):
debug_stop_locator = UI.get_debug_stop_locator()
WebDriverWait(self._driver, constants.DEFAULT_TIMEOUT).until(
expected_conditions.presence_of_element_located(debug_stop_locator)
)
theia_locator = UI.get_theia_left_panel_locator()
theia_panel = self.find_it(theia_locator)
debug_stop_button = self.find_it(debug_stop_locator, parent=theia_panel)
if constants.THEIA_MOD_DISABLED in debug_stop_button.get_attribute(constants.TYPE_CLASS):
debug_stop_button = None
return debug_stop_button
def stop_debug(self, retry=False):
debug_stop_button = self.get_stop_debug_button()
if debug_stop_button is None:
return
highlight(debug_stop_button, effect_time=1)
self.click_me(debug_stop_button, element_human_name="Stop Debug")
try:
self.wait_for_debug_to_stop()
except (TimeoutException, CustomTimeoutException):
if retry:
print("Retry to stop debug")
debug_stop_button = self.get_stop_debug_button()
if debug_stop_button is None:
return
highlight(debug_stop_button, effect_time=1)
self.click_me(debug_stop_button, element_human_name="Stop Debug")
self.wait_for_debug_to_stop()
else:
raise
except Exception:
raise
@staticmethod
def current_left_tab(left_tab_element):
left_tab_element_classes = left_tab_element.get_attribute(constants.TYPE_CLASS)
return constants.THEIA_CURRENT_TAB in left_tab_element_classes
@keyword("Get Syntax Ok Message For ${member}")
def get_for_member_syntax_ok_message(self, member):
full_message = lsp_constants.LSP_COBOL_MEMBER_SYNTAX_OK_TEMPLATE.format(member)
return full_message
@keyword("Get Editor")
def get_editor_obj(self):
editor = MonakoEditor(self.get_driver())
return editor
@keyword("See ${text} In Statusbar")
def status_bar_should_have_text(self, text):
theia_status_bar = self.find_it(UI.get_theia_statusbar_locator())
elements_with_commands = self.find_them(UI.get_status_elements(), parent=theia_status_bar)
for elements_with_command in elements_with_commands:
if elements_with_command.text == text:
highlight(elements_with_command, effect_time=1)
return
raise NoSuchElementException
def select_all(self):
# actions = [
# {
# "action": "key_down",
# "value": Keys.CONTROL
# },
# {
# "actions": "send_keys",
# "value": "a"
# },
# {
# "action": "key_up",
# "value": Keys.CONTROL
# }
# ]
# self.execute_key_sequence(actions)
#
# return
actions = ActionChains(self.get_driver())
actions.key_down(Keys.CONTROL)
actions.send_keys("a")
actions.key_up(Keys.CONTROL)
actions.perform()
def copy_to_clipboard(self):
actions = ActionChains(self.get_driver())
actions.key_down(Keys.CONTROL)
actions.send_keys("c")
actions.key_up(Keys.CONTROL)
actions.perform()
def paste_from_clipboard(self):
actions = ActionChains(self.get_driver())
actions.key_down(Keys.CONTROL)
actions.send_keys("v")
actions.key_up(Keys.CONTROL)
actions.perform()
def save_file(self):
actions = ActionChains(self.get_driver())
actions.key_down(Keys.CONTROL)
actions.send_keys("s")
actions.key_up(Keys.CONTROL)
actions.perform()
@staticmethod
def clear_clipboard():
clb_type = pyperclip.determine_clipboard()
print("clipboard", clb_type)
pyperclip.copy("")
@staticmethod
def get_from_clipboard(json_format=True):
res = pyperclip.paste()
print("clipboard res: '{0}' - '{1}'".format(res, type(res)))
if json_format:
res = json.loads(res)
print("json res: '{0}' - '{1}'".format(res, type(res)))
return json.dumps(res, indent=3)
@staticmethod
def get_to_clipboard(value):
pyperclip.copy(value)
@staticmethod
@keyword("Modify User Preferences")
def append_dict(pref_dict, merge_dict):
if isinstance(pref_dict, str):
pref_dict = json.loads(pref_dict)
for key in merge_dict:
pref_dict[key] = merge_dict[key]
return json.dumps(pref_dict, indent=3)
| 1.773438 | 2 |
hackerearth/Algorithms/Roy and Tiles/solution.py | ATrain951/01.python-com_Qproject | 4 | 12765226 | <reponame>ATrain951/01.python-com_Qproject
"""
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
"""
# Write your code here
from collections import defaultdict
mod = 1000000007
t = int(input())
for _ in range(t):
n = int(input())
adj = [defaultdict(int) for _ in range(n)]
for i in range(n):
for j in map(int, input().strip().split()):
adj[i][j] += 1
ans = defaultdict(int)
part = adj[1:]
for i in adj[0]:
temp = adj[0][i]
count = 0
for j in part:
count += 1
k = j[i + count]
if k == 0:
temp = 0
break
else:
temp *= k
temp %= mod
ans[i - 1] = temp
q = int(input())
for _ in range(q):
s, d = map(int, input().strip().split())
if d - s - 1 != n:
print(0)
else:
print(ans[s])
| 2.765625 | 3 |
models.py | YongWookHa/swin-transformer-ocr | 43 | 12765227 | import torch
import random
import pytorch_lightning as pl
from x_transformers import *
from x_transformers.autoregressive_wrapper import *
from timm.models.swin_transformer import SwinTransformer
import utils
class SwinTransformerOCR(pl.LightningModule):
def __init__(self, cfg, tokenizer):
super().__init__()
self.cfg = cfg
self.tokenizer = tokenizer
self.encoder = CustomSwinTransformer( img_size=(cfg.height, cfg.width),
patch_size=cfg.patch_size,
in_chans=cfg.channels,
num_classes=0,
window_size=cfg.window_size,
embed_dim=cfg.encoder_dim,
depths=cfg.encoder_depth,
num_heads=cfg.encoder_heads
)
self.decoder = CustomARWrapper(
TransformerWrapper(
num_tokens=len(tokenizer),
max_seq_len=cfg.max_seq_len,
attn_layers=Decoder(
dim=cfg.decoder_dim,
depth=cfg.decoder_depth,
heads=cfg.decoder_heads,
**cfg.decoder_cfg
)),
pad_value=cfg.pad_token
)
self.bos_token = cfg.bos_token
self.eos_token = cfg.eos_token
self.max_seq_len = cfg.max_seq_len
self.temperature = cfg.temperature
def configure_optimizers(self):
optimizer = getattr(torch.optim, self.cfg.optimizer)
optimizer = optimizer(self.parameters(), lr=float(self.cfg.lr))
if not self.cfg.scheduler:
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda x: 1)
scheduler = {
'scheduler': scheduler, 'interval': "epoch", "name": "learning rate"
}
return [optimizer], [scheduler]
elif hasattr(torch.optim.lr_scheduler, self.cfg.scheduler):
scheduler = getattr(torch.optim.lr_scheduler, self.cfg.scheduler)
elif hasattr(utils, self.cfg.scheduler):
scheduler = getattr(utils, self.cfg.scheduler)
else:
raise ModuleNotFoundError
scheduler = {
'scheduler': scheduler(optimizer, **self.cfg.scheduler_param),
'interval': self.cfg.scheduler_interval,
'name': "learning rate"
}
return [optimizer], [scheduler]
def forward(self, x):
'''
x: (B, C, W, H)
labels: (B, S)
# B : batch size
# W : image width
# H : image height
# S : source sequence length
# E : hidden size
# V : vocab size
'''
encoded = self.encoder(x)
dec = self.decoder.generate(torch.LongTensor([self.bos_token]*len(x))[:, None].to(x.device), self.max_seq_len,
eos_token=self.eos_token, context=encoded, temperature=self.temperature)
return dec
def training_step(self, batch, batch_num):
x, y = batch
tgt_seq, tgt_mask = y
encoded = self.encoder(x)
loss = self.decoder(tgt_seq, mask=tgt_mask, context=encoded)
self.log("train_loss", loss)
return {'loss': loss}
def validation_step(self, batch, batch_num):
x, y = batch
tgt_seq, tgt_mask = y
encoded = self.encoder(x)
loss = self.decoder(tgt_seq, mask=tgt_mask, context=encoded)
dec = self.decoder.generate((torch.ones(x.size(0),1)*self.bos_token).long().to(x.device), self.max_seq_len,
eos_token=self.eos_token, context=encoded, temperature=self.temperature)
gt = self.tokenizer.decode(tgt_seq)
pred = self.tokenizer.decode(dec)
assert len(gt) == len(pred)
acc = sum([1 if gt[i] == pred[i] else 0 for i in range(len(gt))]) / x.size(0)
return {'val_loss': loss,
'results' : {
'gt' : gt,
'pred' : pred
},
'acc': acc
}
def validation_epoch_end(self, outputs):
val_loss = sum([x['val_loss'] for x in outputs]) / len(outputs)
acc = sum([x['acc'] for x in outputs]) / len(outputs)
wrong_cases = []
for output in outputs:
for i in range(len(output['results']['gt'])):
gt = output['results']['gt'][i]
pred = output['results']['pred'][i]
if gt != pred:
wrong_cases.append("|gt:{}/pred:{}|".format(gt, pred))
wrong_cases = random.sample(wrong_cases, min(len(wrong_cases), self.cfg.batch_size//2))
self.log('val_loss', val_loss)
self.log('accuracy', acc)
# custom text logging
self.logger.log_text("wrong_case", "___".join(wrong_cases), self.global_step)
@torch.no_grad()
def predict(self, image):
dec = self(image)
pred = self.tokenizer.decode(dec)
return pred
class CustomSwinTransformer(SwinTransformer):
def __init__(self, img_size=224, *cfg, **kwcfg):
super(CustomSwinTransformer, self).__init__(img_size=img_size, *cfg, **kwcfg)
self.height, self.width = img_size
def forward_features(self, x):
x = self.patch_embed(x)
x = self.pos_drop(x)
x = self.layers(x)
x = self.norm(x) # B L C
return x
class CustomARWrapper(AutoregressiveWrapper):
def __init__(self, *cfg, **kwcfg):
super(CustomARWrapper, self).__init__(*cfg, **kwcfg)
@torch.no_grad()
def generate(self, start_tokens, seq_len, eos_token=None, temperature=1., filter_logits_fn=top_k, filter_thres=0.9, **kwcfg):
was_training = self.net.training
num_dims = len(start_tokens.shape)
if num_dims == 1:
start_tokens = start_tokens[None, :]
b, t = start_tokens.shape
self.net.eval()
out = start_tokens
mask = kwcfg.pop('mask', None)
if mask is None:
mask = torch.full_like(out, True, dtype=torch.bool, device=out.device)
for _ in range(seq_len):
x = out[:, -self.max_seq_len:]
mask = mask[:, -self.max_seq_len:]
logits = self.net(x, mask=mask, **kwcfg)[:, -1, :]
if filter_logits_fn in {top_k, top_p}:
filtered_logits = filter_logits_fn(logits, thres=filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
elif filter_logits_fn is entmax:
probs = entmax(logits / temperature, alpha=ENTMAX_ALPHA, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
mask = F.pad(mask, (0, 1), value=True)
if eos_token is not None and (torch.cumsum(out == eos_token, 1)[:, -1] >= 1).all():
break
out = out[:, t:]
if num_dims == 1:
out = out.squeeze(0)
self.net.train(was_training)
return out
| 2 | 2 |
app.py | davidguzmanr/quickdraw-app | 0 | 12765228 | import streamlit as st
from streamlit_drawable_canvas import st_canvas
from PIL import Image
import numpy as np
import torch
import torch.nn.functional as F
import torchvision.transforms as transforms
import json
# Specify canvas parameters in application
stroke_width = st.sidebar.slider(
label='Stroke width:',
min_value=1,
max_value=25,
value=3
)
drawing_mode = st.sidebar.selectbox(
label='Drawing tool:',
options=('freedraw', 'line', 'rect', 'circle', 'transform')
)
realtime_update = st.sidebar.checkbox(
label='Update in realtime',
value=True
)
# Create a canvas component
canvas_result = st_canvas(
stroke_width=stroke_width,
stroke_color='black',
update_streamlit=realtime_update,
height=400,
width=400,
drawing_mode=drawing_mode,
key='canvas',
)
@st.cache
def load_model():
model = torch.load(
f='quickdraw/models/model.pt',
map_location=torch.device('cpu')
)
return model
model = load_model()
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.9720, 0.9720, 0.9720),
(0.1559, 0.1559, 0.1559)) # Normalize with the mean and std of the whole dataset
])
# Dictionary to map id to name of the class
with open('quickdraw/categories/id_to_class.json') as file:
id_to_class = json.load(file)
if canvas_result.image_data is not None:
image = canvas_result.image_data
# Convert RGBA image to RGB (PIL doesn't convert as I want)
image_rgb = Image.fromarray(np.uint8(image)).convert(mode='P')
image_rgb = np.array(image_rgb)[:, :, np.newaxis]
image_rgb = np.repeat(image_rgb, repeats=3, axis=2)
# Use the same transformation used in training and add batch dimension
image_rgb = torch.unsqueeze(transform(image_rgb), dim=0)
# Compute logits
y_lgts = model(image_rgb)
# Compute scores
y_prob = F.softmax(y_lgts, dim=1)
# Compute the top 3 predictions
top_3 = torch.topk(y_prob, k=3)
preds = top_3.indices.numpy().flatten()
probs = top_3.values.detach().numpy().flatten()
labels = [id_to_class[str(i)] for i in preds]
predictions = dict(zip(labels, probs))
st.write('**Top 3 predictions:**')
st.write(predictions) | 2.765625 | 3 |
encoder_decoder/dtypes_encode_decode.py | UMass-Rescue/encoder_decoder | 0 | 12765229 | import numpy as np
def float_ndarray_to_dict(arr):
return np_arr_to_dict(arr)
def dict_to_float_ndarray(string):
return dict_to_np_arr(string)
def identity(e):
return e
def float_to_string(num):
return str(num)
def string_to_float(string):
return float(string)
def np_arr_to_dict(arr):
return {'arr': arr.tolist(),
'shape':list(arr.shape),
'dtype':str(arr.dtype)
}
def dict_to_np_arr(data):
arr, shape, dtype = data['arr'], data['shape'], data['dtype']
return np.array(arr, dtype=dtype).reshape(tuple(shape)) | 3.0625 | 3 |
plugins/__init__.py | SeaSeaEm/SpueBox | 9 | 12765230 | <reponame>SeaSeaEm/SpueBox<filename>plugins/__init__.py
from .administrative import AdministrativePlugin
from .musicplayer import MusicPlayerPlugin
from .tag import TagPlugin
from .randomgame import RandomGamePlugin
| 1.203125 | 1 |
zerver/webhooks/gosquared/view.py | acguglielmo/zulip | 1 | 12765231 | from typing import Any, Dict, Optional
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from zerver.decorator import api_key_only_webhook_view
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_error, json_success
from zerver.lib.webhooks.common import check_send_webhook_message
from zerver.models import UserProfile
BODY_TEMPLATE = '[{website_name}]({website_url}) has {user_num} visitors online.'
@api_key_only_webhook_view('GoSquared')
@has_request_variables
def api_gosquared_webhook(request: HttpRequest, user_profile: UserProfile,
payload: Dict[str, Dict[str, Any]]=REQ(argument_type='body')) -> HttpResponse:
domain_name = payload['siteDetails']['domain']
user_num = payload['concurrents']
user_acc = payload['siteDetails']['acct']
acc_url = 'https://www.gosquared.com/now/' + user_acc
body = BODY_TEMPLATE.format(website_name=domain_name, website_url=acc_url, user_num=user_num)
topic = 'GoSquared - {website_name}'.format(website_name=domain_name)
check_send_webhook_message(request, user_profile, topic, body)
return json_success()
| 2.0625 | 2 |
tests/test_devopsProjectTest.py | mitchdenny/azure-devops-cli-extension | 0 | 12765232 | <gh_stars>0
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import unittest
from azure.cli.testsdk import ScenarioTest
from azure_devtools.scenario_tests import AllowLargeResponse
from .utilities.helper import disable_telemetry, set_authentication, get_test_org_from_env_variable
DEVOPS_CLI_TEST_ORGANIZATION = get_test_org_from_env_variable() or 'Https://dev.azure.com/azuredevopsclitest'
class DevopsProjectTests(ScenarioTest):
@AllowLargeResponse(size_kb=3072)
@disable_telemetry
@set_authentication
def test_devops_projects_CreateListShowDelete(self):
self.cmd('az devops configure --defaults organization=' + DEVOPS_CLI_TEST_ORGANIZATION)
random_project_name = self.create_random_name(prefix='projectTest', length=15)
try:
project_description = 'This is a sample project description'
source_control_type = 'git'
project_visibility = 'public'
create_project_command = ('az devops project create --name ' + random_project_name + ' -d "' + project_description +
'" --source-control ' + source_control_type + ' --visibility ' + project_visibility + ' --output json --detect off')
project_create_output = self.cmd(create_project_command).get_output_in_json()
created_project_id = project_create_output["id"]
assert len(created_project_id) > 0
assert project_description == project_create_output["description"]
assert project_visibility == project_create_output["visibility"].lower()
assert source_control_type == project_create_output["capabilities"]["versioncontrol"]["sourceControlType"].lower()
list_project_command = 'az devops project list --output json --detect off'
list_project_output = self.cmd(list_project_command).get_output_in_json()
verified_project_list = False
assert len(list_project_output) > 1
for project in list_project_output:
if (project["id"] == created_project_id):
verified_project_list = True
assert verified_project_list == True
show_project_command = 'az devops project show --project ' + created_project_id + ' --output json --detect off'
show_project_output = self.cmd(show_project_command).get_output_in_json()
assert show_project_output["id"] == created_project_id
assert show_project_output["name"] == random_project_name
finally:
#Delete the project create for the test
delete_project_command = 'az devops project delete --id ' + created_project_id + ' -y --output json --detect off'
self.cmd(delete_project_command)
#Verify Deletion
list_project_command = 'az devops project list --output json --detect off'
list_project_output_after_delete = self.cmd(list_project_command).get_output_in_json()
for project in list_project_output_after_delete:
if (project["id"] == created_project_id):
assert 0
| 2.109375 | 2 |
src/asm/cms/cms.py | ctheune/assembly-cms | 0 | 12765233 | import z3c.baseregistry.baseregistry
import asm.cms.page
import grok
import zope.component
import zope.interface
import zope.publisher.browser
import zope.publisher.interfaces.browser
import zope.intid.interfaces
class CMS(grok.Application, asm.cms.page.Page):
zope.interface.implements(asm.cms.interfaces.ICMS)
# Keep this here to support old instances.
type = 'htmlpage'
def __init__(self, type='htmlpage'):
super(CMS, self).__init__(type)
@grok.subscribe(zope.intid.interfaces.IIntIds, grok.IObjectAddedEvent)
def cleanup_initial_edition(obj, event):
# This is a work-around for an ordering problem: eventually the initial
# editions are created before the intid utility is registered. This cleans
# up that mess and registers all editions that exist in the CMS directly.
cms = obj.__parent__.__parent__
if not asm.cms.interfaces.ICMS.providedBy(cms):
return
for edition in cms.values():
obj.register(edition)
obj.register(cms)
class CMSProfile(grok.Adapter):
grok.context(CMS)
grok.provides(asm.cms.interfaces.IProfileSelection)
def set_name(self, value):
value = zope.component.getUtility(asm.cms.interfaces.IProfile,
name=value)
sm = self.context.getSiteManager()
bases = (x for x in sm.__bases__
if not asm.cms.interfaces.IProfile.providedBy(x))
sm.__bases__ = (value,) + tuple(bases)
def get_name(self):
sm = self.context.getSiteManager()
for profile in sm.__bases__:
if not asm.cms.interfaces.IProfile.providedBy(profile):
continue
break
else:
return None
for name, reg_profile in zope.component.getUtilitiesFor(
asm.cms.interfaces.IProfile):
if reg_profile is profile:
return name
name = property(fget=get_name, fset=set_name)
class Profile(z3c.baseregistry.baseregistry.BaseComponents):
zope.interface.implements(asm.cms.interfaces.IProfile)
def __init__(self, name):
super(Profile, self).__init__(zope.component.globalSiteManager, name)
| 1.578125 | 2 |
preserialize/utils.py | xzzy/django-preserialize | 23 | 12765234 | <filename>preserialize/utils.py
import collections
from django.db import models
from django.db.models.fields import Field
from django.db.models import FieldDoesNotExist
PSEUDO_SELECTORS = (':all', ':pk', ':local', ':related')
DEFAULT_SELECTORS = (':pk', ':local')
def convert_to_camel(s):
if '_' not in s:
return s
toks = s.split('_')
return toks[0] + ''.join(x.title() for x in toks[1:] if x.upper() != x)
class ModelFieldResolver(object):
cache = {}
def _get_pk_field(self, model):
fields = (model._meta.pk,)
names = tuple([x.name for x in fields])
return {
':pk': dict(list(zip(names, fields))),
}
def _get_all_related_objects(self, model):
return [
f for f in model._meta.get_fields()
if (f.one_to_many or f.one_to_one)
and f.auto_created and not f.concrete
]
def _get_all_related_many_to_many_objects(self, model):
return [
f for f in model._meta.get_fields(include_hidden=True)
if f.many_to_many and f.auto_created
]
def _get_local_fields(self, model):
"Return the names of all locally defined fields on the model class."
local = [f for f in model._meta.fields]
m2m = [f for f in model._meta.many_to_many]
fields = local + m2m
names = tuple([x.name for x in fields])
return {
':local': dict(list(zip(names, fields))),
}
def _get_related_fields(self, model):
"Returns the names of all related fields for model class."
reverse_fk = self._get_all_related_objects(model)
reverse_m2m = self._get_all_related_many_to_many_objects(model)
fields = tuple(reverse_fk + reverse_m2m)
names = tuple([x.get_accessor_name() for x in fields])
return {
':related': dict(list(zip(names, fields))),
}
def _get_fields(self, model):
if model not in self.cache:
fields = {}
fields.update(self._get_pk_field(model))
fields.update(self._get_local_fields(model))
fields.update(self._get_related_fields(model))
all_ = {}
for x in list(fields.values()):
all_.update(x)
fields[':all'] = all_
self.cache[model] = fields
return self.cache[model]
def get_field(self, model, attr):
fields = self._get_fields(model)
# Alias to model fields
if attr in PSEUDO_SELECTORS:
return list(fields[attr].keys())
# Assume a field or property
return attr
resolver = ModelFieldResolver()
def parse_selectors(model, fields=None, exclude=None, key_map=None, **options):
"""Validates fields are valid and maps pseudo-fields to actual fields
for a given model class.
"""
fields = fields or DEFAULT_SELECTORS
exclude = exclude or ()
key_map = key_map or {}
validated = []
for alias in fields:
# Map the output key name to the actual field/accessor name for
# the model
actual = key_map.get(alias, alias)
# Validate the field exists
cleaned = resolver.get_field(model, actual)
if cleaned is None:
raise AttributeError('The "{0}" attribute could not be found '
'on the model "{1}"'.format(actual, model))
# Mapped value, so use the original name listed in `fields`
if type(cleaned) is list:
validated.extend(cleaned)
elif alias != actual:
validated.append(alias)
else:
validated.append(cleaned)
return tuple([x for x in validated if x not in exclude])
def get_field_value(obj, name, allow_missing=False):
value = None
if hasattr(obj, name):
value = getattr(obj, name)
# Check if the name of is field on the model and get the prep
# value if it is a Field instance.
if isinstance(obj, models.Model):
try:
field = obj._meta.get_field(name)
if isinstance(field, Field) and field.__class__.__name__ \
not in ('JSONField',):
value = field.get_prep_value(value)
except FieldDoesNotExist:
pass
elif hasattr(obj, '__getitem__') and name in obj:
value = obj[name]
elif not allow_missing:
raise ValueError('{} has no attribute {}'.format(obj, name))
# Handle a local many-to-many or a reverse foreign key
if value.__class__.__name__ in ('RelatedManager', 'ManyRelatedManager',
'GenericRelatedObjectManager'):
value = value.all()
# Check for callable
elif isinstance(value, collections.Callable):
value = value()
return value
| 2.21875 | 2 |
openbook_invitations/migrations/0002_auto_20190101_1413.py | TamaraAbells/okuna-api | 164 | 12765235 | <gh_stars>100-1000
# Generated by Django 2.1.4 on 2019-01-01 13:13
from django.conf import settings
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('openbook_invitations', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='UserInvite',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('invited_date', models.DateField(verbose_name='invited date')),
('name', models.CharField(blank=True, max_length=256, null=True)),
('email', models.EmailField(max_length=254, unique=True, verbose_name='email address')),
('username', models.CharField(blank=True, error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 30 characters or fewer. Letters, digits and _ only.', max_length=30, null=True, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('badge_keyword', models.CharField(blank=True, max_length=16, null=True)),
('token', models.CharField(max_length=256)),
('invited_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='invited_users', to=settings.AUTH_USER_MODEL)),
],
),
migrations.RemoveField(
model_name='inviteuser',
name='invited_by',
),
migrations.DeleteModel(
name='InviteUser',
),
]
| 1.984375 | 2 |
python/lvmnps/switch/dli/powerswitch.py | wasndas/lvmnps | 0 | 12765236 | # -*- coding: utf-8 -*-
#
# @Author: <NAME> (<EMAIL>)
# @Date: 2021-06-24
# @Filename: lvmnps/switch/iboot/powerswitch.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
import datetime
from sdsstools.logger import SDSSLogger
from lvmnps.switch.dli.lvmpower import PowerSwitch as DliPowerSwitch
from lvmnps.switch.powerswitchbase import PowerSwitchBase
# Todo: Dont inherit clu.Device in lvmnps.switch.dli.dlipower.PowerSwitch if you are not using it.
__all__ = ["PowerSwitch"]
class PowerSwitch(PowerSwitchBase):
"""Powerswitch class to manage the iboot power switch"""
def __init__(self, name: str, config: [], log: SDSSLogger):
super().__init__(name, config, log)
self.hostname = self.config_get("hostname")
self.username = self.config_get("user", "admin")
self.password = self.config_get("password", "<PASSWORD>")
self.use_https = self.config_get("use_https", False)
self.dli = None
self.reachable = None
async def start(self):
if not await self.isReachable():
self.log.warning(f"{self.name} not reachable on start up")
await self.update(self.outlets)
async def stop(self):
try:
pass
except Exception as ex:
self.log.error(f"Unexpected exception {type(ex)}: {ex}")
return False
self.log.debug("So Long, and Thanks for All the Fish ...")
async def isReachable(self):
try:
if not self.dli:
self.dli = DliPowerSwitch(
name=self.name,
userid=self.username,
password=<PASSWORD>,
hostname=self.hostname,
use_https=self.use_https,
)
# reachable = self.statuslist()
self.reachable = await self.dli.verify()
if not self.reachable:
self.dli = None
return self.reachable
except Exception as ex:
self.log.error(
f"Unexpected exception is {type(ex)}: {ex}"
) # help me please.... to ck
self.dli = None
return False
async def update(self, outlets):
# outlets contains all targeted ports
self.log.debug(f"{outlets}")
# flag = await self.isReachable()
try:
if await self.isReachable():
# get a list [] of port states, use outlets for a subset.
currentstatus = await self.dli.statusdictionary()
# print(currentstatus)
for o in outlets:
o.setState(currentstatus[o.portnum])
else:
for o in outlets:
o.setState(-1)
except Exception as ex:
self.log.error(f"Unexpected exception for {type(ex)}: {ex}")
async def switch(self, state, outlets):
# outlets contains all targeted ports
self.log.debug(f"{outlets} = {state}")
try:
if await self.isReachable():
# either loop over the outlets or pass the outlet list.
current_time = datetime.datetime.now()
print(f"after isReachable : {current_time}")
for o in outlets:
await self.dli.on(o.portnum) if state else await self.dli.off(
o.portnum
)
current_time = datetime.datetime.now()
print(f"after dli : {current_time}")
await self.update(outlets)
print(outlets)
current_time = datetime.datetime.now()
print(f"after update : {current_time}")
except Exception as ex:
self.log.error(f"Unexpected exception to {type(ex)}: {ex}")
async def cycle(self, name, portnum):
outlets = self.collectOutletsByNameAndPort(name, portnum)
for o in outlets:
await self.dli.cycle(o.portnum)
| 1.875 | 2 |
slurm/merge_analysis.py | oxfordni/storm-analysis | 0 | 12765237 | #!/usr/bin/env python
"""
Merges the intermediate localization files into a single
localization file.
Hazen 08/17
"""
import glob
import os
from xml.etree import ElementTree
import storm_analysis.sa_library.readinsight3 as readinsight3
import storm_analysis.sa_library.writeinsight3 as writeinsight3
def mergeAnalysis(dir_name, bin_base_name, extensions = [".bin"]):
# Create Insight3 file writers.
i3_out = []
for ext in extensions:
i3_out.append(writeinsight3.I3Writer(bin_base_name + ext))
# Find all the job*.xml files.
job_xml_files = glob.glob(dir_name + "job*.xml")
# Sort job files.
job_xml_files = sorted(job_xml_files, key = lambda x: int(os.path.splitext(os.path.basename(x))[0].split("_")[1]))
# Check for corresponding mlist.bin files.
metadata = None
last_frame = 0
for i in range(len(job_xml_files)):
job_complete = True
for j, ext in enumerate(extensions):
mlist_name = dir_name + "p_" + str(i+1) + "_mlist" + ext
if os.path.exists(mlist_name) and readinsight3.checkStatus(mlist_name):
# Load metadata from the first file.
if (i == 0) and (j == 0):
metadata = readinsight3.loadI3Metadata(mlist_name)
# Read localizations.
i3_data = readinsight3.loadI3File(mlist_name, verbose = False)
# Check for empty file.
if (i3_data.size == 0):
print("No localizations found in", mlist_name)
else:
# Print frame range covered.
if (j == 0):
last_frame = i3_data["fr"][-1]
print(i3_data["fr"][0], last_frame, mlist_name)
# Add localizations to the output file.
i3_out[j].addMolecules(i3_data)
else:
job_complete = False
break
if not job_complete:
print("Merge failed because", job_xml_files[i], "is incomplete.")
for j, ext in enumerate(extensions):
i3_out[j].close()
os.remove(bin_base_name + ext)
assert(False)
if metadata is None:
print("No metadata found.")
for i3w in i3_out:
i3w.close()
else:
# Fix movie length node based on the last frame of the last molecule.
metadata.find("movie").find("movie_l").text = str(last_frame)
# Also need to fix analysis end points. We are assuming that the
# entire movie was analyzed.
metadata.find("settings").find("start_frame").text = "-1"
metadata.find("settings").find("max_frame").text = "-1"
for i3w in i3_out:
i3w.closeWithMetadata(ElementTree.tostring(metadata, 'ISO-8859-1'))
if (__name__ == "__main__"):
import argparse
parser = argparse.ArgumentParser(description = 'Merge analysis results from parallel analysis.')
parser.add_argument('--working_dir', dest='wdir', type=str, required=True,
help = "The name of the analysis working directory.")
parser.add_argument('--bin_base_name', dest='merged', type=str, required=True,
help = "The base name of the merged localization file (i.e. without .bin extension)")
parser.add_argument('--ext', dest='ext', type=str, required=False, default=[".bin"], nargs = "*",
help = "The name of the extensions, if any.")
args = parser.parse_args()
mergeAnalysis(args.wdir, args.merged, args.ext)
| 2.328125 | 2 |
joatu/settings/prod.py | moileretour/joatu | 1 | 12765238 | from joatu.settings.base import *
DEBUG = False
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'joatu.middleware.LoginRequiredMiddleware',
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
| 1.3125 | 1 |
examples/basic-usage/lambda/main.py | JousP/terraform-aws-lambda-function | 0 | 12765239 | import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def handler(event, context):
logger.info("Hello World!")
| 2.53125 | 3 |
storage/views.py | bopopescu/storyboard | 0 | 12765240 | #!/usr/bin/env python
# encoding: utf-8
"""
views.py
Created by <NAME> on 2012-04-02.
Copyright (c) 2012 Close To U. All rights reserved.
"""
import datetime
import logging
import hashlib
from django.shortcuts import render_to_response
from django.shortcuts import get_object_or_404
from django.template import RequestContext
from django.contrib import auth
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.http import HttpResponseNotModified
from django.http import HttpResponseNotFound
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AuthenticationForm
from django.utils import simplejson
# from google.appengine.api import files
from config import STORAGE_SERVICE,STORAGE_BUCKET,STORAGE_FOLDER,STORAGE_ACCESS_KEY,STORAGE_SECRET_ACCESS_KEY
from models import *
from forms import *
import tempfile
import boto
import logging
logger = logging.getLogger(__name__)
# from google.appengine.api import images
# config = boto.config
# config.add_section('Credentials')
# config.set('Credentials', 'gs_access_key_id', '')
# config.set('Credentials', 'gs_secret_access_key', '')
def photos(request):
query = Storage.objects.all().order_by('-updated').filter(kind='image')
return render_to_response('storage/photos.html',{'photos':query},context_instance=RequestContext(request))
@login_required
def ajax_upload(request):
if request.method == 'POST':
name = request.META['HTTP_X_FILE_NAME']
content_type = request.META['HTTP_CONTENT_TYPE'] or 'application/octet-stream'
file_size = request.META['HTTP_X_FILE_SIZE']
file_data = request.raw_post_data
# logging.info(request.META.keys())
# logging.info(name)
# logging.info(content_type)
# logging.info(file_size)
if file_size>0 and file_data:
now = datetime.datetime.now()
file_ext_pos = name.rfind('.')
file_name_len = len(name)
if not (content_type == 'image/jpeg' or content_type == 'image/png' or content_type == 'image/gif'):
return
if file_ext_pos<=0 and file_ext_pos>=file_name_len:
return
file_ext = name[file_ext_pos-file_name_len:]
file_name = 'uploads/ohbug/photo/%s%s' % (now.strftime('%Y-%m/%d-%H%M%S-%f'),file_ext)
file_path = '/%s/%s/%s' % (STORAGE_SERVICE,STORAGE_BUCKET,file_name)
#logging.info(file_path)
write_path = files.gs.create(file_path, acl='bucket-owner-full-control',mime_type=content_type)
with files.open(write_path, 'a') as fp:
fp.write(file_data)
files.finalize(write_path)
s = Storage()
s.storage = STORAGE_SERVICE
s.bucket = STORAGE_BUCKET
s.path = file_name
s.mime = content_type
s.size = len(file_data)
s.md5 = hashlib.md5(file_data).hexdigest()
s.name = name
s.author = request.user
s.save()
HTTP_HOST = request.META['HTTP_HOST']
to_json = {
'origin': 'http://%s/photo/%s' % (HTTP_HOST,s.key) ,
'url': 'http://%s/photo/raw/%s.%s' % (HTTP_HOST,s.key,s.name)
}
return HttpResponse(simplejson.dumps(to_json), mimetype='application/json')
else:
#return HttpResponse('ajax_upload: POST method required.')
return HttpResponseRedirect('/photo/upload')
@login_required
def upload(request):
if request.method == 'POST':
form = UploadForm(request.POST, request.FILES)
if form.is_valid():
if request.FILES.has_key('images'):
name = request.FILES['images'].name
content_type = request.FILES['images'].content_type or 'application/octet-stream'#'text/plain'
file_data = request.FILES['images'].read()
now = datetime.datetime.now()
file_ext_pos = name.rfind('.')
file_name_len = len(name)
if not (content_type == 'image/jpeg' or content_type == 'image/png' or content_type == 'image/gif'):
return
if file_ext_pos<=0 and file_ext_pos>=file_name_len:
return
file_ext = name[file_ext_pos-file_name_len:].lower()
file_uri = '%s%s' % (now.strftime('%Y-%m-%d-%H%M%S-%f'),file_ext)
file_name = 'uploads/ohbug/photo/%s%s' % (now.strftime('%Y-%m/%d-%H%M%S-%f'),file_ext)
file_path = '/%s/%s/%s' % (STORAGE_SERVICE,STORAGE_BUCKET,file_name)
if STORAGE_SERVICE == 'sina':
import sae.storage
s = sae.storage.Client()
ob = sae.storage.Object(file_data,expires='', content_type=content_type, content_encoding='gzip')
s.put(STORAGE_BUCKET, file_uri , ob)
file_name = file_uri
if STORAGE_SERVICE == 'baidu':
# TMPDIR = '/tmp'
# try:
# from bae.core import const
# TMPDIR = const.APP_TMPDIR
# except Exception, e:
# pass
# output = open(TMPDIR+'/'+file_uri, 'wb')
# output.write(file_data)
# output.close()
# try:
# from bae.api import logging as logger
# except:
# import logging
# logger = logging.getLogger('pyhttpclient')
# logger.info('file_data')
# logger.info(file_uri)
# BAE API
# from bae.api import bcs
# baebcs = bcs.BaeBCS('http://bcs.duapp.com/', STORAGE_ACCESS_KEY, STORAGE_SECRET_ACCESS_KEY)
# #obj_path = TMPDIR+'/'+file_uri
# obj_name = u'/%s'%(file_uri)
# baebcs.put_object(STORAGE_BUCKET, obj_name.encode('utf8'), file_data)
# #baebcs.put_file(STORAGE_BUCKET, obj_name.encode('utf8'), obj_path.encode('utf8'))
# baebcs.make_public(STORAGE_BUCKET, obj_name.encode('utf8'))
# BCS API
import pybcs
# # TMPDIR = '/tmp'
# # try:
# # from bae.core import const
# # TMPDIR = const.APP_TMPDIR
# # except Exception, e:
# # pass
# # output = open(TMPDIR+'/'+file_uri, 'wb')
# # output.write(file_data)
# # output.close()
bcs = pybcs.BCS('http://bcs.duapp.com/', STORAGE_ACCESS_KEY, STORAGE_SECRET_ACCESS_KEY)
bucket = bcs.bucket(STORAGE_BUCKET)
obj_name = u'/%s'%(file_uri)
obj = bucket.object(obj_name.encode('utf8'))
obj.put(file_data)
obj.make_public()
#obj.put_file(TMPDIR+'/'+file_uri)
file_name = file_uri
# Google Storage
if STORAGE_SERVICE == 'gs':
dst_uri = boto.storage_uri(STORAGE_BUCKET, STORAGE_SERVICE)
new_dst_uri = dst_uri.clone_replace_name(file_name)
#logging.info(dst_uri)
#logging.info(new_dst_uri)
tmp = tempfile.TemporaryFile()
tmp.write(file_data)
tmp.seek(0)
dst_key = new_dst_uri.new_key()
dst_key.content_type = content_type
dst_key.set_contents_from_file(tmp)
#logger.info('hello')
# write_path = files.gs.create(file_path, acl='bucket-owner-full-control',mime_type=content_type)
# with files.open(write_path, 'a') as fp:
# fp.write(file_data)
# files.finalize(write_path)
s = Storage()
s.storage = STORAGE_SERVICE
s.bucket = STORAGE_BUCKET
s.path = file_name
s.mime = content_type
s.size = len(file_data)
s.md5 = hashlib.md5(file_data).hexdigest()
s.name = name
s.kind = 'image'
s.author = request.user
s.save()
return HttpResponseRedirect('/photo/%s'%s.key)
else:
form = UploadForm()
return render_to_response('storage/upload.html',{'form':form},context_instance=RequestContext(request))
def view(request,key=None):
s = get_object_or_404(Storage,pk=key)
# name = s.name
# file_ext_pos = name.rfind('.')
# file_name_len = len(name)
# file_ext = name[file_ext_pos-file_name_len:]
HTTP_HOST = request.META['HTTP_HOST']
url = 'http://%s/photo/raw/%s.%s' % (HTTP_HOST,s.key,s.name)
if STORAGE_SERVICE == 'baidu':
url = 'http://bcs.duapp.com/%s/%s'%(STORAGE_BUCKET,s.path)
image = {
'origin': s,
'url': url
}
return render_to_response('storage/photo.html',{'image':image},context_instance=RequestContext(request))
def read_gs(read_path):
image_data = None
try:
with files.open(read_path, 'r') as fp:
buf = fp.read(1000000)
image_data = buf
while buf:
buf = fp.read(1000000)
image_data +=buf
except Exception,e:
pass
return image_data
def cache_response(new_image, mime):
response = HttpResponse(new_image, mime)
format_str = '%a, %d %b %Y %H:%M:%S GMT'
expires_date = datetime.datetime.utcnow() + datetime.timedelta(365)
expires_str = expires_date.strftime(format_str)
last_modified_date = datetime.datetime.utcnow()
last_modified_str = expires_date.strftime(format_str)
response['Expires'] = expires_str #eg:'Sun, 08 Apr 2013 11:11:02 GMT'
response["Last-Modified"] = last_modified_str #for 'If-Modified-Since'
response['Cache-Control'] = 'max-age=172800'
#response['Content-Disposition'] = 'attachment; filename=%s' % s.name
#response["ETag"] = ''
return response
def raw(request,key=None):
if request.META.has_key('HTTP_IF_MODIFIED_SINCE'):
return HttpResponseNotModified()
#request.META.get("HTTP_IF_NONE_MATCH", None)
s = get_object_or_404(Storage,pk=key)
#read_path = '/%s/%s/%s'% (s.storage, s.bucket, s.path)
#image_data = read_gs(read_path)
tmp = None
if STORAGE_SERVICE == 'sina':
import sae.storage
sc = sae.storage.Client()
ob = sc.get(STORAGE_BUCKET, s.path)
url = sc.url(STORAGE_BUCKET, s.path)
print url
if ob and ob.data:
tmp = ob.data
if STORAGE_SERVICE == 'baidu':
import pybcs
bcs = pybcs.BCS('http://bcs.duapp.com/', STORAGE_ACCESS_KEY, STORAGE_SECRET_ACCESS_KEY)
bucket = bcs.bucket(STORAGE_BUCKET)
obj_name = u'/%s'%(s.path)
obj = bucket.object(obj_name.encode('utf8'))
tmp = obj.get()['body']
if STORAGE_SERVICE == 'gs':
src_uri = boto.storage_uri(s.bucket + '/' + s.path, 'gs')
src_key = src_uri.get_key()
tmp = tempfile.TemporaryFile()
src_key.get_file(tmp)
tmp.seek(0)
if tmp:
return cache_response(tmp, s.mime)
else:
return HttpResponseNotFound()
def thumbnail(request,key=None):
if request.META.has_key('HTTP_IF_MODIFIED_SINCE'):
return HttpResponseNotModified()
s = get_object_or_404(Storage,pk=key)
image_data = None
if STORAGE_SERVICE == 'sina':
import sae.storage
sc = sae.storage.Client()
ob = sc.get(STORAGE_BUCKET, s.path)
if ob and ob.data:
image_data = ob.data
if STORAGE_SERVICE == 'baidu':
import pybcs
bcs = pybcs.BCS('http://bcs.duapp.com/', STORAGE_ACCESS_KEY, STORAGE_SECRET_ACCESS_KEY)
bucket = bcs.bucket(STORAGE_BUCKET)
obj_name = u'/%s'%(s.path)
obj = bucket.object(obj_name.encode('utf8'))
image_data = obj.get()['body']
if STORAGE_SERVICE == 'gs':
read_path = '/%s/%s/%s'% (s.storage, s.bucket, s.path)
#image_data = read_gs(read_path)
src_uri = boto.storage_uri(s.bucket + '/' + s.path, 'gs')
src_key = src_uri.get_key()
tmp = tempfile.TemporaryFile()
src_key.get_file(tmp)
tmp.seek(0)
image_data = tmp
image_data = tmp.read()
if image_data:
# MIN_SIZE = 100
# image = images.Image(image_data)
# width = image.width
# height = image.height
# if width>height:
# rate = width*1.0/height
# else:
# rate = height*1.0/width
# size = int(MIN_SIZE*rate+1)
# new_image = images.resize(image_data, width=size, height=size, output_encoding=images.PNG)
# image = images.Image(new_image)
# right_x = round(MIN_SIZE*1.0/image.width,5)
# if right_x>1:
# right_x = 1.0
# else:
# left_x = (1- right_x)/2
# right_x = right_x + left_x
# bottom_y = round(MIN_SIZE*1.0/image.height,5)
# if bottom_y >1:
# bottom_y = 1.0
# else:
# top_y = (1-bottom_y)/2
# bottom_y = bottom_y + top_y
# new_image = images.crop(new_image, left_x, top_y, right_x, bottom_y, output_encoding=images.PNG)
#return cache_response(image_data, s.mime)
from PIL import Image,ImageOps
import StringIO
img = Image.open(StringIO.StringIO(image_data))
#region = img.resize((100, 100),Image.ANTIALIAS)
region = ImageOps.fit(img,(100, 100),Image.ANTIALIAS)
response = HttpResponse(mimetype="image/png")
format_str = '%a, %d %b %Y %H:%M:%S GMT'
expires_date = datetime.datetime.utcnow() + datetime.timedelta(365)
expires_str = expires_date.strftime(format_str)
last_modified_date = datetime.datetime.utcnow()
last_modified_str = expires_date.strftime(format_str)
response['Expires'] = expires_str #eg:'Sun, 08 Apr 2013 11:11:02 GMT'
response["Last-Modified"] = last_modified_str #for 'If-Modified-Since'
response['Cache-Control'] = 'max-age=172800'
region.save(response, 'PNG', quality = 100)
return response
else:
return HttpResponseNotFound() | 1.914063 | 2 |
quickdraw/create_dataset.py | davidguzmanr/quickdraw-app | 0 | 12765241 | from quickdraw import QuickDrawDataGroup
from tqdm import tqdm
import os
def main():
"""
Download the images and create the necessary directories to store them.
Notes
-----
- See https://pytorch.org/vision/stable/datasets.html#torchvision.datasets.ImageFolder to see
how images must be arranged for the Dataset.
"""
if not os.path.exists('images'):
os.mkdir('images')
with open('categories/categories.txt') as file:
names = [name.replace('\n', '') for name in file.readlines()]
for name in tqdm(names):
images = QuickDrawDataGroup(
name,
recognized=True,
max_drawings=1000,
cache_dir='bin-images',
print_messages=False
)
name = name.replace(' ', '-')
path = f'images/{name}/'
if not os.path.exists(path):
os.mkdir(path)
for drawing in images.drawings:
drawing.image.save(f'images/{name}/{drawing.key_id}.jpg')
if __name__ == '__main__':
main() | 3.1875 | 3 |
evcouplings/utils/tracker/sql.py | mrunalimanj/EVcouplings | 117 | 12765242 | <reponame>mrunalimanj/EVcouplings
"""
SQL-based result tracker (cannot store actual results, only status).
Using this tracker requires installation of the sqlalchemy package.
Regarding using models from different sources in Flask-SQLAlchemy:
https://stackoverflow.com/questions/28789063/associate-external-class-model-with-flask-sqlalchemy
TODO: Note that this tracker doesn't handle job reruns gracefully yet, because the result field will be
progressively overwritten but not reset when the job is rerun.
Authors:
<NAME>
"""
from contextlib import contextmanager
import json
import os
from copy import deepcopy
from sqlalchemy import (
Column, Integer, String, DateTime, Text,
create_engine, func
)
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.exc import DBAPIError
from sqlalchemy.dialects import mysql
from evcouplings.utils.helpers import retry
from evcouplings.utils.config import InvalidParameterError
from evcouplings.utils.tracker import EStatus
from evcouplings.utils.tracker.base import ResultTracker
# create SQLALchemy declarative base for SQL models
Base = declarative_base()
JOB_TABLE_NAME = "evcouplings_jobs"
# work around 65k limitation for mysql (without introducing max length, which would
# cause issues with postgresql)
# see here: https://github.com/sqlalchemy/sqlalchemy/issues/4443
LongText = Text().with_variant(mysql.LONGTEXT(), "mysql")
class SQLTracker(ResultTracker):
"""
Tracks compute job results in an SQL backend
"""
def __init__(self, **kwargs):
"""
Create new SQL-based tracker. For now, this tracker will ignore file_list
and store all file paths in the database except for those in delete_list.
Parameters
----------
connection_string : str
SQLite connection URI. Must include database name,
and username/password if authentication is used.
job_id : str
Unique job identifier of job which should be tracked
prefix : str
Prefix of pipeline job
pipeline : str
Name of pipeline that is running
file_list : list(str)
List of file item keys from outconfig that should
be stored in database. For now, this parameter has no
effect and all file paths will be stored in database.
delete_list : list(str)
List of file item keys from outconfig that will be deleted
after run is finished. These files cannot be stored as paths
to the pipeline result in the output.
config : dict(str)
Entire configuration dictionary of job
retry_max_number : int, optional (default: None)
Maximum number of attemps to perform database queries / updates.
If None, will try forever.
retry_wait : int, optional (default: None)
Time in seconds between retries to connect to database
"""
super().__init__(**kwargs)
# for SQL tracker, job ID may not be longer than 255 chars to not interfere with older SQL DBs
if len(self.job_id) > 255:
raise InvalidParameterError(
"Length of job_id for SQL tracker may not exceed 255 characters for database compatibility reasons"
)
# create SQLAlchemy engine and session maker to
# instantiate later sessions
self._engine = create_engine(self.connection_string)
self._Session = sessionmaker(bind=self._engine)
# Make sure all tables are there in database
Base.metadata.create_all(bind=self._engine)
@contextmanager
def session_scope(self):
"""
Provide a transactional scope around a series of operations.
Source: https://docs.sqlalchemy.org/en/latest/orm/session_basics.html
"""
session = self._Session()
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close()
def get(self):
"""
Return the current entry tracked by this tracker.
Does not attempt to retry if database connection fails.
"""
with self.session_scope() as session:
query_res = session.query(
ComputeJob
).filter_by(
job_id=self.job_id
).all()
q = [
deepcopy(x.__dict__) for x in query_res
]
if len(q) == 0:
return None
if len(q) > 1:
raise ValueError(
"Job ID not unique, found more than one job."
)
else:
return q[0]
def _retry_query(self, func, session, rollback=True):
"""
Retry database query until success or maximum number of attempts
is reached
Parameters
----------
func : callable
Query function that will be executed until successful
session : sqlalchemy.orm.session.Session
SQLALchemy database session
rollback : bool, optional (default: True)
Perform rollback of session before reattempt,
can be set to False for read-only queries
Returns
-------
Result of func()
Raises
------
ResourceError
If execution is not successful within maximum
number of attempts
"""
if rollback:
retry_action = session.rollback
else:
retry_action = None
return retry(
func,
self.retry_max_number,
self.retry_wait,
exceptions=DBAPIError,
retry_action=retry_action
)
def _execute_update(self, session, q, status=None, message=None, stage=None, results=None):
"""
Wraps update to SQL database (to allow for retries)
Parameters
----------
session : sqlalchemy.orm.session.Session
SQLALchemy database session
q : sqlalchemy.orm.query.Query
SQLAlchemy query if a job with self.job_id
already exists
For remaining parameters, see update()
"""
# check if we already have some job
num_rows = len(q.all())
# create new entry if not already existing
if num_rows == 0:
# Note: do not initialize location here, since this should
# be either set by outside code upon job creation,
# or based on current working dir of running job
r = ComputeJob(
job_id=self.job_id,
prefix=self.prefix,
status=EStatus.INIT,
config=json.dumps(self.config),
pipeline=self.pipeline,
time_created=func.now()
)
session.add(r)
else:
# can only be one row due to unique constraint
r = q.one()
# if status is given, update
if status is not None:
r.status = status
# if we switch into running state, record
# current time as starting time of actual computation
if status == EStatus.RUN:
r.time_started = func.now()
# pragmatic hack to filling in the location if not
# already set - can only do this based on current directory
# inside pipeline runner (i.e. when job is started), since
# any other code that creates the job entry may operate in a
# different working directory (e.g. batch submitter in evcouplings app)
if r.location is None:
r.location = os.getcwd()
# if stage is given, update
if stage is not None:
r.stage = stage
# set termination/fail message
if message is not None:
r.message = str(message)
# update timestamp of last modification
# (will correspond to finished time at the end)
r.time_updated = func.now()
# finally, also update results (stored as json)
if results is not None:
# first, extract current state in database to dict
if r.results is not None:
current_result_state = json.loads(r.results)
else:
current_result_state = {}
# store everything in database except files that are
# flagged for deletion on filesystem, since we only
# store the file paths to these files
result_update = {
k: v for (k, v) in results.items() if k not in self.delete_list
}
# create result update, make sure update overwrites
# any pre-existing keys
new_result_state = {
**current_result_state,
**result_update
}
# finally, add updated result state to database record
r.results = json.dumps(new_result_state)
session.commit()
def update(self, status=None, message=None, stage=None, results=None):
with self.session_scope() as session:
# see if we can find the job in the database already
q = self._retry_query(
lambda: session.query(ComputeJob).filter_by(job_id=self.job_id),
session=session,
rollback=False
)
# then execute actual update
self._retry_query(
lambda: self._execute_update(session, q, status, message, stage, results),
session=session,
rollback=True
)
class ComputeJob(Base):
"""
Single compute job. Holds general information about job
and its status, but not about individual parameters
(these are stored in config file to keep table schema
stable).
"""
__tablename__ = JOB_TABLE_NAME
# internal unique ID of this single compute job
key = Column(Integer, primary_key=True)
# human-readable job identifier (must be unique)
job_id = Column(String(255), unique=True)
# job prefix
prefix = Column(String(2048))
# job pipeline (monomer, complex, ...)
pipeline = Column(String(128))
# location - e.g., working dir, remote URI, asf
location = Column(String(2048))
# job status ("pending", "running", "finished",
# "failed", "terminated")
status = Column(String(128))
# message upon job failure / termination
# (e.g. exception, termination code, ...)
message = Column(LongText)
# job identifier e.g. on compute cluster
# e.g. if job should be stopped
runner_id = Column(String(2048))
# stage of computational pipeline
# ("align", "couplings", ...)
stage = Column(String(128))
# time the job was created
time_created = Column(DateTime())
# time the job started running
time_started = Column(DateTime())
# time the job finished running; last
# update corresponds to time job finished
time_updated = Column(DateTime())
# configuration of job (stringified JSON)
config = Column(LongText)
# Optional MD5 hash of configuration to identify
# unique job configurations
fingerprint = Column(String(32))
# results of job (stringified JSON)
results = Column(LongText)
| 2.171875 | 2 |
Surviving/main.py | jungwoohan72/DGN_pytorch | 48 | 12765243 | import math, random, copy
import numpy as np
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
import torch
import torch.nn as nn
import torch.optim as optim
import torch.autograd as autograd
import torch.nn.functional as F
from DGN import DGN
from buffer import ReplayBuffer
from surviving import Surviving
from config import *
USE_CUDA = torch.cuda.is_available()
env = Surviving(n_agent = 100)
n_ant = env.n_agent
observation_space = env.len_obs
n_actions = env.n_action
buff = ReplayBuffer(capacity)
model = DGN(n_ant,observation_space,hidden_dim,n_actions)
model_tar = DGN(n_ant,observation_space,hidden_dim,n_actions)
model = model.cuda()
model_tar = model_tar.cuda()
optimizer = optim.Adam(model.parameters(), lr = 0.0001)
O = np.ones((batch_size,n_ant,observation_space))
Next_O = np.ones((batch_size,n_ant,observation_space))
Matrix = np.ones((batch_size,n_ant,n_ant))
Next_Matrix = np.ones((batch_size,n_ant,n_ant))
f = open('r.txt','w')
while i_episode<n_episode:
if i_episode > 100:
epsilon -= 0.0004
if epsilon < 0.1:
epsilon = 0.1
i_episode+=1
steps = 0
obs, adj = env.reset()
while steps < max_step:
steps+=1
action=[]
q = model(torch.Tensor(np.array([obs])).cuda(), torch.Tensor(adj).cuda())[0]
for i in range(n_ant):
if np.random.rand() < epsilon:
a = np.random.randint(n_actions)
else:
a = q[i].argmax().item()
action.append(a)
next_obs, next_adj, reward, terminated = env.step(action)
buff.add(np.array(obs),action,reward,np.array(next_obs),adj,next_adj,terminated)
obs = next_obs
adj = next_adj
score += sum(reward)
if i_episode%20==0:
print(score/2000)
f.write(str(score/2000)+'\n')
score = 0
if i_episode < 100:
continue
for e in range(n_epoch):
batch = buff.getBatch(batch_size)
for j in range(batch_size):
sample = batch[j]
O[j] = sample[0]
Next_O[j] = sample[3]
Matrix[j] = sample[4]
Next_Matrix[j] = sample[5]
q_values = model(torch.Tensor(O).cuda(), torch.Tensor(Matrix).cuda())
target_q_values = model_tar(torch.Tensor(Next_O).cuda(), torch.Tensor(Next_Matrix).cuda()).max(dim = 2)[0]
target_q_values = np.array(target_q_values.cpu().data)
expected_q = np.array(q_values.cpu().data)
for j in range(batch_size):
sample = batch[j]
for i in range(n_ant):
expected_q[j][i][sample[1][i]] = sample[2][i] + (1-sample[6])*GAMMA*target_q_values[j][i]
loss = (q_values - torch.Tensor(expected_q).cuda()).pow(2).mean()
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i_episode%5 == 0:
model_tar.load_state_dict(model.state_dict())
| 1.929688 | 2 |
Desafios/desafio55.py | gustavodoamaral/115_Desafios_Python | 1 | 12765244 | <reponame>gustavodoamaral/115_Desafios_Python
pesos = []
for p in range(1,6):
peso = int(input("Digite o peso: "))
pesos.append(peso)
print("o menor peso é {} e o menor peso é {}".format(min(pesos), max(pesos))) | 4 | 4 |
stock_activities/migrations/0015_alter_stockactivity_activity_type_id.py | ericpesto/Archeon-Django-REST-API | 1 | 12765245 | <reponame>ericpesto/Archeon-Django-REST-API
# Generated by Django 3.2.4 on 2021-06-05 22:36
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('activity_definitions', '0008_alter_activitydefinition_activity_type_id'),
('stock_activities', '0014_alter_stockactivity_activity_type_id'),
]
operations = [
migrations.AlterField(
model_name='stockactivity',
name='activity_type_id',
field=models.ForeignKey(blank=True, db_column='activity_type_id', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='activity_definitions.activitydefinition'),
),
]
| 1.546875 | 2 |
Transcribe-Comprehend/util/xer_py3.py | Napkin-DL/my-aws-example | 0 | 12765246 | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
from __future__ import unicode_literals # at top of module
import argparse
import logging
import sys
logging.basicConfig(
format='%(levelname)s(%(filename)s:%(lineno)d): %(message)s')
def levenshtein(u, v):
prev = None
curr = [0] + list(range(1, len(v) + 1))
# Operations: (SUB, DEL, INS)
prev_ops = None
curr_ops = [(0, 0, i) for i in range(len(v) + 1)]
for x in range(1, len(u) + 1):
prev, curr = curr, [x] + ([None] * len(v))
prev_ops, curr_ops = curr_ops, [(0, x, 0)] + ([None] * len(v))
for y in range(1, len(v) + 1):
delcost = prev[y] + 1
addcost = curr[y - 1] + 1
subcost = prev[y - 1] + int(u[x - 1] != v[y - 1])
curr[y] = min(subcost, delcost, addcost)
if curr[y] == subcost:
(n_s, n_d, n_i) = prev_ops[y - 1]
curr_ops[y] = (n_s + int(u[x - 1] != v[y - 1]), n_d, n_i)
elif curr[y] == delcost:
(n_s, n_d, n_i) = prev_ops[y]
curr_ops[y] = (n_s, n_d + 1, n_i)
else:
(n_s, n_d, n_i) = curr_ops[y - 1]
curr_ops[y] = (n_s, n_d, n_i + 1)
return curr[len(v)], curr_ops[len(v)]
def load_file(fname, encoding):
try:
f = open(fname, 'r')
data = []
for line in f:
data.append(line.rstrip('\n').rstrip('\r').decode(encoding))
f.close()
except:
logging.error('Error reading file "%s"', fname)
exit(1)
return data
def get_unicode_code(text):
result = ''.join( char if ord(char) < 128 else '\\u'+format(ord(char), 'x') for char in text )
return result
def measure(transcription=None, reference=None, input_source='str', separator='\t', encoding='utf-8'):
'''
parser = argparse.ArgumentParser(
description='Compute useful evaluation metrics (CER, WER, SER, ...)')
parser.add_argument(
'-r', '--reference', type=str, metavar='REF', default=None,
help='reference sentence or file')
parser.add_argument(
'-t', '--transcription', type=str, metavar='HYP', default=None,
help='transcription sentence or file')
parser.add_argument(
'-i', '--input_source', type=str, choices=('-', 'str', 'file'),
default='-', help=""""-" reads parallel sentences from the standard
input, "str" interprets `-r' and `-t' as sentences, and "file"
interprets `-r' and `-t' as two parallel files, with one sentence per
line (default: -)""")
parser.add_argument(
'-s', '--separator', type=str, metavar='SEP', default='\t',
help="""use this string to separate the reference and transcription
when reading from the standard input (default: \\t)""")
parser.add_argument(
'-e', '--encoding', type=str, metavar='ENC', default='utf-8',
help="""character encoding of the reference and transcription text
(default: utf-8)""")
args = parser.parse_args()
'''
if input_source != '-' and \
(reference is None or transcription is None):
logging.error('Expected reference and transcription sources')
exit(1)
ref, hyp = [], []
if input_source == 'str':
ref.append(reference)
hyp.append(transcription)
# ref.append(get_unicode_code(reference))
# hyp.append(get_unicode_code(transcription))
elif input_source == '-':
line_n = 0
for line in sys.stdin:
line_n += 1
line = line.rstrip('\n').rstrip('\r').decode(encoding)
fields = line.split(separator)
if len(fields) != 2:
logging.warning(
'Line %d has %d fields but 2 were expected',
line_n, len(fields))
continue
ref.append(fields[0])
hyp.append(fields[1])
elif input_source == 'file':
ref = load_file(reference, encoding)
hyp = load_file(transcription, encoding)
if len(ref) != len(hyp):
logging.error(
'The number of reference and transcription sentences does not '
'match (%d vs. %d)', len(ref), len(hyp))
exit(1)
else:
logging.error('INPUT FROM "%s" NOT IMPLEMENTED', input_source)
exit(1)
wer_s, wer_i, wer_d, wer_n = 0, 0, 0, 0
cer_s, cer_i, cer_d, cer_n = 0, 0, 0, 0
sen_err = 0
for n in range(len(ref)):
# update CER statistics
_, (s, i, d) = levenshtein(ref[n], hyp[n])
cer_s += s
cer_i += i
cer_d += d
cer_n += len(ref[n])
# update WER statistics
_, (s, i, d) = levenshtein(ref[n].split(), hyp[n].split())
wer_s += s
wer_i += i
wer_d += d
wer_n += len(ref[n].split())
# update SER statistics
if s + i + d > 0:
sen_err += 1
if cer_n > 0:
print('CER: %g%%, WER: %g%%, SER: %g%%' % (
(100.0 * (cer_s + cer_i + cer_d)) / cer_n,
(100.0 * (wer_s + wer_i + wer_d)) / wer_n,
(100.0 * sen_err) / len(ref)))
| 2.890625 | 3 |
pymtl3/stdlib/test_utils/test_srcs.py | kevinyuan/pymtl3 | 0 | 12765247 | """
========================================================================
Test sources
========================================================================
Test sources with CL or RTL interfaces.
Author : <NAME>
Date : Mar 11, 2019
"""
from collections import deque
from pymtl3 import *
from pymtl3.stdlib.ifcs import RecvCL2SendRTL, SendIfcRTL
#-------------------------------------------------------------------------
# TestSrcCL
#-------------------------------------------------------------------------
class TestSrcCL( Component ):
def construct( s, Type, msgs, initial_delay=0, interval_delay=0 ):
s.send = CallerIfcCL( Type=Type )
s.msgs = deque( msgs )
s.count = initial_delay
s.delay = interval_delay
@update_once
def up_src_send():
if s.count > 0:
s.count -= 1
elif not s.reset:
if s.send.rdy() and s.msgs:
s.send( s.msgs.popleft() )
s.count = s.delay # reset count after a message is sent
def done( s ):
return not s.msgs
# Line trace
def line_trace( s ):
return "{}".format( s.send )
#-------------------------------------------------------------------------
# TestSrcRTL
#-------------------------------------------------------------------------
# TODO: deprecating TestSrcRTL.
class TestSrcRTL( Component ):
def construct( s, Type, msgs, initial_delay=0, interval_delay=0 ):
# Interface
s.send = SendIfcRTL( Type )
# Components
s.src = TestSrcCL( Type, msgs, initial_delay, interval_delay )
s.adapter = RecvCL2SendRTL( Type )
connect( s.src.send, s.adapter.recv )
connect( s.adapter.send, s.send )
def done( s ):
return s.src.done()
# Line trace
def line_trace( s ):
return "{}".format( s.send )
| 2.21875 | 2 |
gym_wrapper/jsbsimgymenvironmentwrapper.py | lauritowal/JSBSim_gym_wrapper | 1 | 12765248 | import gym
from typing import List, Tuple, Dict
import numpy as np
from gym import spaces
from core.simulation import Simulation
from service import global_constants
class JsbsimGymEnvironmentWrapper(gym.Env):
"""Custom Environment that follows gym interface"""
metadata = {'render.modes': ['human']}
def __init__(self, configuration_path: str=global_constants.DEFAULT_CONFIGURATION_PATH):
super(JsbsimGymEnvironmentWrapper, self).__init__()
self.sim = Simulation(configuration_path=configuration_path)
self._dimensions = 1
self.action_space = spaces.Box(
low=-0,
high=1,
shape=(self._dimensions,),
dtype=np.float32
)
self.observation_space = spaces.Box(
low=np.inf,
high=np.inf,
shape=self._getObs().shape, # Passt sich damit automatisch an die Beobachtung an
dtype=np.float32
)
def reset(self):
self.sim.reset_with_initial_condition()
return self._getObs(), self._calcRewards(), self._calcDones(), {}
def step(self, actions: List[np.ndarray]) -> Tuple[np.ndarray, np.ndarray, np.ndarray, Dict]:
self.sim.set_properties('fcs/throttle-cmd-norm', actions[0])
self.sim.run()
return self._getObs(), self._calcRewards(), self._calcDones(), {}
def _getObs(self) -> np.ndarray:
state = self.sim.get_state()
return np.array(list(state.values()))
def _calcRewards(self) -> np.ndarray:
rewAgent0 = 0
return np.array([rewAgent0], dtype=np.float32)
def _calcDones(self) -> np.ndarray:
dones = np.zeros(1)
return dones
def render(self, mode='human'):
pass
def close(self):
pass
def seed(self, seed=None) -> None:
pass
if __name__ == "__main__":
env = JsbsimGymEnvironmentWrapper()
ob = env.reset()
action = env.action_space.sample()
for _ in range(10):
print(env.step(action))
| 2.25 | 2 |
YouIsAMazeMe/game/movingSprite.py | jakeard/YouIsAMazeMe | 0 | 12765249 | <reponame>jakeard/YouIsAMazeMe<filename>YouIsAMazeMe/game/movingSprite.py<gh_stars>0
# A subclass of arcade.sprite, and a superclass of every moving sprite.
import arcade
import game.constants as constants
class MovingSprite(arcade.Sprite):
"""
A super class that keeps track of all moving sprites
locations, directions, face, movement, state, etc.
"""
def __init__(self, x, y):
super().__init__()
# Movement constants
self.fixing = False
self.is_disabled = False
self.center_x = x
self.center_y = y
self.is_moving = False
self.direction = (0,0)
self.current_pos = (self.center_x, self.center_y)
self.target_pos = self.current_pos
self.initial_pos = self.current_pos
self.disabled_pos = None
def set_move(self, direction):
if not self.is_disabled:
self.initial_pos = self.current_pos
self.direction = direction
self.is_moving = True
self.target_pos = ((self.center_x+(direction[0]*constants.TILE_SIZE)), (self.center_y+(direction[1]*constants.TILE_SIZE)))
def move(self):
"""Method that gets called during update, used to move."""
# Am I at my target location?
if self.target_pos != (self.center_x, self.center_y):
self.change_x = self.direction[0]*constants.MOVEMENT_SPEED
self.change_y = self.direction[1]*constants.MOVEMENT_SPEED
else:
self.direction = (0,0)
self.change_x = 0
self.change_y = 0
self.is_moving = False
if self.fixing:
self.fixing = False
def update(self):
"""The player's update class. Is run every game tick."""
if not self.is_disabled:
super().update()
# Make sure that the current position is up to date
self.current_pos = (self.center_x, self.center_y)
# Only run the move function if set_move has activated movement
if self.is_moving:
self.move()
else: # enter this block if I'm not moving!
if not self.is_moving and self.fixing:
self.fixing = False
#self._round_pos() # disabled, slows down program speed and still doesn't fix every error.
def bounce(self, direction=None):
"""Causes the sprite to reverse direction. Reverses current direction by default."""
self.fixing = True
if direction is None:
direction = self.direction
self.direction = (direction[0]*-1, direction[1]*-1)
self.target_pos = self.initial_pos
def hide(self):
"""Teleport them off screen, and disable their movement."""
if not self.is_disabled:
self.center_x,self.center_y = self.target_pos
self.disabled_pos = self.target_pos
self.change_x = 0
self.change_y = 0
self.is_disabled = True
self.is_moving = False
self.direction = (0,0)
self.center_x,self.center_y = (-64,-64)
def unhide(self):
self.center_x = 275
self.center_y = 389
if self.is_disabled:
self.is_disabled = False
self.center_x,self.center_y = self.disabled_pos
self.disabled_pos = None
def _round_pos(self):
"""Helper function, sets the sprite's location to the closest tile center."""
if not self.center_x%64==0:
x_offset = round(self.center_x/64)
self.center_x = x_offset*64
if not self.center_y%64==0:
y_offset = round(self.center_y/64)
self.center_y = y_offset*64
def collides_with_sprite(self, other) -> bool:
if not self.is_disabled:
return super().collides_with_sprite(other)
else:
return False
| 3.5625 | 4 |
lang/py/pylib/code/xml/etree/ElementTree/ElementTree_entity_references.py | ch1huizong/learning | 13 | 12765250 | <reponame>ch1huizong/learning<filename>lang/py/pylib/code/xml/etree/ElementTree/ElementTree_entity_references.py
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2010 <NAME>. All rights reserved.
#
"""Dump the OPML in plain text
"""
#end_pymotw_header
from xml.etree import ElementTree
with open('data.xml', 'rt') as f:
tree = ElementTree.parse(f)
node = tree.find('entity_expansion')
print node.tag
print ' in attribute:', node.attrib['attribute']
print ' in text :', node.text.strip()
| 2.546875 | 3 |