hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4ccb34c0cea6589f24735d2351aa1b7f5a9d5641
| 213
|
py
|
Python
|
api/handlers/youtube.py
|
LostLuma/repo-import-test
|
45273fc3543d21366ed3cc5007dc5680b1e3e546
|
[
"MIT"
] | 1
|
2020-01-27T17:42:30.000Z
|
2020-01-27T17:42:30.000Z
|
api/handlers/youtube.py
|
LostLuma/repo-import-test
|
45273fc3543d21366ed3cc5007dc5680b1e3e546
|
[
"MIT"
] | 59
|
2021-11-17T08:21:59.000Z
|
2022-03-29T08:29:55.000Z
|
api/handlers/youtube.py
|
SpoopySite/SpoopySite
|
da68e454eee2a242e3df2ae8ef31bf1e50da571b
|
[
"MIT"
] | 3
|
2020-01-26T23:19:24.000Z
|
2021-09-25T07:07:59.000Z
|
import urllib.parse
from urllib.parse import ParseResult
def youtube(parsed: ParseResult):
if "q" in urllib.parse.parse_qs(parsed.query):
return urllib.parse.parse_qs(parsed.query).get("q")[0], True
| 26.625
| 68
| 0.732394
|
42643d698e9bee3626241082712d38f08f52d52f
| 804
|
py
|
Python
|
study_python/django/helloworld/helloworld/urls.py
|
AlphaSunny/study
|
4e65127fefa9078b7ae6b9db92369c93e61e4327
|
[
"MIT"
] | null | null | null |
study_python/django/helloworld/helloworld/urls.py
|
AlphaSunny/study
|
4e65127fefa9078b7ae6b9db92369c93e61e4327
|
[
"MIT"
] | null | null | null |
study_python/django/helloworld/helloworld/urls.py
|
AlphaSunny/study
|
4e65127fefa9078b7ae6b9db92369c93e61e4327
|
[
"MIT"
] | null | null | null |
"""helloworld URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('hello/', include('hello.urls')),
path('admin/', admin.site.urls),
]
| 34.956522
| 77
| 0.70398
|
620afd9236f1a042ef1aed36067a075755bbc4f2
| 1,147
|
py
|
Python
|
messaging_service/server.py
|
Kusla75/py-sockets
|
cfce1978c37e1ff48a1634cde15c06a1a3b377ab
|
[
"MIT"
] | null | null | null |
messaging_service/server.py
|
Kusla75/py-sockets
|
cfce1978c37e1ff48a1634cde15c06a1a3b377ab
|
[
"MIT"
] | null | null | null |
messaging_service/server.py
|
Kusla75/py-sockets
|
cfce1978c37e1ff48a1634cde15c06a1a3b377ab
|
[
"MIT"
] | null | null | null |
import socket
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = socket.gethostname()
port = 420
server_socket.bind((host, port))
server_socket.listen(3)
socket_list = []
names_list = []
messages_history = []
client_socket, address = server_socket.accept()
name = client_socket.recv(1024).decode('utf-8')
names_list.append(name)
socket_list.append(client_socket)
print(f'Connection established with {name} with IP address {address[0]}')
client_socket, address = server_socket.accept()
name = client_socket.recv(1024).decode('utf-8')
names_list.append(name)
socket_list.append(client_socket)
print(f'Connection established with {name} with IP address {address[0]}')
while True:
message = socket_list[0].recv(1024)
if message:
socket_list[1].send(message)
print(f'Message sent from {names_list[0]} to {names_list[1]}')
messages_history.append(message.decode('utf-8'))
message = socket_list[1].recv(1024)
if message:
socket_list[0].send(message)
print(f'Message sent from {names_list[1]} to {names_list[0]}')
messages_history.append(message.decode('utf-8'))
| 31.861111
| 73
| 0.725371
|
328ec1eebe6e63377251a83f8a3d9fca25aaa36d
| 334
|
py
|
Python
|
byceps/permissions/webhook.py
|
GSH-LAN/byceps
|
ab8918634e90aaa8574bd1bb85627759cef122fe
|
[
"BSD-3-Clause"
] | null | null | null |
byceps/permissions/webhook.py
|
GSH-LAN/byceps
|
ab8918634e90aaa8574bd1bb85627759cef122fe
|
[
"BSD-3-Clause"
] | null | null | null |
byceps/permissions/webhook.py
|
GSH-LAN/byceps
|
ab8918634e90aaa8574bd1bb85627759cef122fe
|
[
"BSD-3-Clause"
] | null | null | null |
"""
byceps.permissions.webhook
~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from ..util.authorization import create_permission_enum
WebhookPermission = create_permission_enum(
'webhook',
[
'administrate',
'view',
],
)
| 17.578947
| 55
| 0.634731
|
35dd9d80b06ede53e12cfb77e32784e1bbfbca74
| 1,673
|
py
|
Python
|
rdr_service/alembic/versions/434fb0f05794_add_ignore_and_dev_note_to_genomics_.py
|
all-of-us/raw-data-repository
|
d28ad957557587b03ff9c63d55dd55e0508f91d8
|
[
"BSD-3-Clause"
] | 39
|
2017-10-13T19:16:27.000Z
|
2021-09-24T16:58:21.000Z
|
rdr_service/alembic/versions/434fb0f05794_add_ignore_and_dev_note_to_genomics_.py
|
all-of-us/raw-data-repository
|
d28ad957557587b03ff9c63d55dd55e0508f91d8
|
[
"BSD-3-Clause"
] | 312
|
2017-09-08T15:42:13.000Z
|
2022-03-23T18:21:40.000Z
|
rdr_service/alembic/versions/434fb0f05794_add_ignore_and_dev_note_to_genomics_.py
|
all-of-us/raw-data-repository
|
d28ad957557587b03ff9c63d55dd55e0508f91d8
|
[
"BSD-3-Clause"
] | 19
|
2017-09-15T13:58:00.000Z
|
2022-02-07T18:33:20.000Z
|
"""add ignore and dev note to genomics models.
Revision ID: 434fb0f05794
Revises: 994dfe6e53ee
Create Date: 2020-09-30 14:39:16.244636
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '434fb0f05794'
down_revision = '994dfe6e53ee'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('genomic_gc_validation_metrics', sa.Column('dev_note', sa.String(length=255), nullable=True))
op.add_column('genomic_gc_validation_metrics', sa.Column('ignore_flag', sa.SmallInteger(), nullable=True))
op.add_column('genomic_set_member', sa.Column('dev_note', sa.String(length=255), nullable=True))
op.add_column('genomic_set_member_history', sa.Column('dev_note', sa.String(length=255), nullable=True))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('genomic_set_member', 'dev_note')
op.drop_column('genomic_set_member_history', 'dev_note')
op.drop_column('genomic_gc_validation_metrics', 'ignore_flag')
op.drop_column('genomic_gc_validation_metrics', 'dev_note')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| 29.350877
| 111
| 0.710102
|
c40550532466d39647c15b5c1544605769b4e808
| 7,839
|
py
|
Python
|
src/five/formlib/metaconfigure.py
|
abstract-open-solutions/five.formlib
|
36fe931f8b0b44b6ff835265999fb509119f0ddf
|
[
"ZPL-2.1"
] | null | null | null |
src/five/formlib/metaconfigure.py
|
abstract-open-solutions/five.formlib
|
36fe931f8b0b44b6ff835265999fb509119f0ddf
|
[
"ZPL-2.1"
] | null | null | null |
src/five/formlib/metaconfigure.py
|
abstract-open-solutions/five.formlib
|
36fe931f8b0b44b6ff835265999fb509119f0ddf
|
[
"ZPL-2.1"
] | null | null | null |
##############################################################################
#
# Copyright (c) 2004, 2005 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
from AccessControl.security import protectClass
from AccessControl.class_init import InitializeClass
from ExtensionClass import Base
import zope.component
from zope.interface import Interface
from zope.i18nmessageid import MessageFactory
from zope.app.form.browser.metaconfigure import BaseFormDirective
from zope.browser.interfaces import IAdding
from zope.browsermenu.metaconfigure import menuItemDirective
from zope.publisher.interfaces.browser import IDefaultBrowserLayer
from Products.Five.browser.pagetemplatefile import ZopeTwoPageTemplateFile
from Products.Five.browser.metaconfigure import SimpleViewClass
from five.formlib import EditView, AddView
_ = MessageFactory('zope')
def EditViewFactory(name, schema, label, permission, layer,
template, default_template, bases, for_, fields,
fulledit_path=None, fulledit_label=None, menu=u''):
class_ = SimpleViewClass(template, globals(), used_for=schema,
bases=bases)
class_.schema = schema
class_.label = label
class_.fieldNames = fields
class_.fulledit_path = fulledit_path
if fulledit_path and (fulledit_label is None):
fulledit_label = "Full edit"
class_.fulledit_label = fulledit_label
class_.generated_form = ZopeTwoPageTemplateFile(default_template)
if layer is None:
layer = IDefaultBrowserLayer
s = zope.component.getGlobalSiteManager()
s.registerAdapter(class_, (for_, layer), Interface, name)
# Reminder: the permission we got has already been processed by
# BaseFormDirective, that means that zope.Public has been
# translated to the CheckerPublic object
protectClass(class_, permission)
InitializeClass(class_)
class FiveFormDirective(BaseFormDirective):
def _processWidgets(self):
if self._widgets:
customWidgetsObject = type(
'CustomWidgetsMixin', (Base,), self._widgets)
self.bases = self.bases + (customWidgetsObject,)
class EditFormDirective(FiveFormDirective):
view = EditView
default_template = 'edit.pt'
title = _('Edit')
def _handle_menu(self):
if self.menu:
menuItemDirective(
self._context, self.menu, self.for_ or self.schema,
'@@' + self.name, self.title, permission=self.permission,
layer=self.layer)
def __call__(self):
self._processWidgets()
self._handle_menu()
self._context.action(
discriminator=self._discriminator(),
callable=EditViewFactory,
args=self._args(),
kw={'menu': self.menu},
)
def AddViewFactory(name, schema, label, permission, layer,
template, default_template, bases, for_,
fields, content_factory, arguments,
keyword_arguments, set_before_add, set_after_add,
menu=u''):
class_ = SimpleViewClass(template, globals(), used_for=schema,
bases=bases)
class_.schema = schema
class_.label = label
class_.fieldNames = fields
class_._factory = content_factory
class_._arguments = arguments
class_._keyword_arguments = keyword_arguments
class_._set_before_add = set_before_add
class_._set_after_add = set_after_add
class_.generated_form = ZopeTwoPageTemplateFile(default_template)
if layer is None:
layer = IDefaultBrowserLayer
s = zope.component.getGlobalSiteManager()
s.registerAdapter(class_, (for_, layer), Interface, name)
# Reminder: the permission we got has already been processed by
# BaseFormDirective, that means that zope.Public has been
# translated to the CheckerPublic object
protectClass(class_, permission)
InitializeClass(class_)
class AddFormDirective(FiveFormDirective):
view = AddView
default_template = 'add.pt'
for_ = IAdding
# default add form information
description = None
content_factory = None
arguments = None
keyword_arguments = None
set_before_add = None
set_after_add = None
def _handle_menu(self):
if self.menu or self.title:
if (not self.menu) or (not self.title):
raise ValueError("If either menu or title are specified, "
"they must both be specified")
# Add forms are really for IAdding components, so do not use
# for=self.schema.
menuItemDirective(
self._context, self.menu, self.for_, '@@' + self.name,
self.title, permission=self.permission, layer=self.layer,
description=self.description)
def _handle_arguments(self, leftover=None):
schema = self.schema
fields = self.fields
arguments = self.arguments
keyword_arguments = self.keyword_arguments
set_before_add = self.set_before_add
set_after_add = self.set_after_add
if leftover is None:
leftover = fields
if arguments:
missing = [n for n in arguments if n not in fields]
if missing:
raise ValueError("Some arguments are not included in the form",
missing)
optional = [n for n in arguments if not schema[n].required]
if optional:
raise ValueError("Some arguments are optional, use"
" keyword_arguments for them",
optional)
leftover = [n for n in leftover if n not in arguments]
if keyword_arguments:
missing = [n for n in keyword_arguments if n not in fields]
if missing:
raise ValueError(
"Some keyword_arguments are not included in the form",
missing)
leftover = [n for n in leftover if n not in keyword_arguments]
if set_before_add:
missing = [n for n in set_before_add if n not in fields]
if missing:
raise ValueError(
"Some set_before_add are not included in the form",
missing)
leftover = [n for n in leftover if n not in set_before_add]
if set_after_add:
missing = [n for n in set_after_add if n not in fields]
if missing:
raise ValueError(
"Some set_after_add are not included in the form",
missing)
leftover = [n for n in leftover if n not in set_after_add]
self.set_after_add += leftover
else:
self.set_after_add = leftover
def __call__(self):
self._processWidgets()
self._handle_menu()
self._handle_arguments()
self._context.action(
discriminator=self._discriminator(),
callable=AddViewFactory,
args=self._args() + (self.content_factory, self.arguments,
self.keyword_arguments,
self.set_before_add, self.set_after_add),
kw={'menu': self.menu},
)
| 35.470588
| 79
| 0.626738
|
1e7a60cf29eda25ef782ad4e5b20fb4ddf5a5f48
| 69,753
|
py
|
Python
|
500SIGSandGS/Code to Run/Sim500_5_05_25.py
|
alistairjwilson/nrmpInterviews_SimData
|
8c99ff0d9a4e9db70dec5f7ef92b98054bf42a83
|
[
"MIT"
] | null | null | null |
500SIGSandGS/Code to Run/Sim500_5_05_25.py
|
alistairjwilson/nrmpInterviews_SimData
|
8c99ff0d9a4e9db70dec5f7ef92b98054bf42a83
|
[
"MIT"
] | null | null | null |
500SIGSandGS/Code to Run/Sim500_5_05_25.py
|
alistairjwilson/nrmpInterviews_SimData
|
8c99ff0d9a4e9db70dec5f7ef92b98054bf42a83
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""faster_simulations_sigs_revised.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1_-oQDQXJCYRE3JnPBMlYaLIdoBOpX25f
"""
import numpy as np
import pandas as pd
class Worker(object):
name = ""
p = [] #original preferences, list of strings of names of hospitals ranked, unchanged
p_dict = {} # same as p but as a mapping of hospital name to ranking in p
ranking = [] #list of strings of names of hospitals ranked
matchings = [] #list of hospitals that worker is matched to, t-algorithm
wishes = [] #list of names of hospitals used in Gale-Shapley algorithm as rankings submitted
wishes_dict = {} # same as above but a dictionary mapping name to ranking in p'
current = None #hospital that worker is currently matched to in Gale-Shapley algorithm
interviews = [] #simulated interviews, list of hospitals
choices = [] #list of hospitals that worker wants to interview with, ranked
pref = [] # this is the total utilities that hospitals provide this worker
def __init__(self, cu, name, we,n):
'''
cu is common utility
name is in format WN where N is a number
we is the weight for common utility
n is the number of hospitals
'''
self.name=name
self.generateP(cu, we, n)
self.start()
def start(self):
''' sets starting conditions '''
self.match([])
self.matchings = []
self.wishes = []
self.setWishes()
self.interviews = []
self.current = None
def generateP(self,cu,we,n):
''' generates the preferences p using cu and iu'''
self.pref = [np.random.normal(0,1) * (1 - we) + cu[x] * we for x in range(n)] #creates array with weighted utility for firms with common utility weighed 60% and idiosyncratic utility weighed 40%
self.ranking=["H" + str(x + 1) for x in range(n)]
# ranks hospitals based on their weighted utility, highest utility ranked first
self.pref, self.ranking = self.fastersort(self.pref, self.ranking)
self.p = []
self.p_dict = {}
index = 1
for x in self.ranking:
self.p.append(x)
self.p_dict[x] = index
index += 1
def setChoices(self, hospitals):
''' sets choices with ranked list of hospitals '''
self.choices = []
for h in self.p:
self.choices.append(self.getEq(h, hospitals))
def generateP2(self,cu,n):
''' generates the preferences using cu only'''
self.ranking= ["H" + str(x + 1) for x in range(n)]
pref = []
for c in cu:
pref.append(c)
pref, self.ranking = self.fastersort(pref, self.ranking)
self.p_dict = {}
index = 1
for x in self.ranking:
self.p_dict[x] = index
index += 1
def fastersort(self, pref, ranking):
return zip(*sorted(zip(pref, ranking), reverse=True))
def bubblesort(self, pref, ranking):
''' sorts/ranks the hospitals based on the preferences'''
for i in range(len(pref)):
for k in range(len(pref) - 1, i, -1):
if(pref[k] > pref[k - 1]):
self.swap(pref, ranking, k, k - 1)
def swap(self, pref, ranking, x, y):
''' helper function for the bubble sort that swaps two hospitals in ranking '''
temp1 = pref[x]
temp2 = ranking[x]
pref[x] = pref[y]
ranking[x] = ranking[y]
pref[y] = temp1
ranking[y] = temp2
def checkAvailability(self, goal, agent):
'''
agent is a hospital
goal is number of desired matchings in T-algorithm (max number of interviews for hospitals)
returns True if hospital is available, False otherwise
'''
if(not agent.getMatchings() or len(agent.getMatchings())<goal):
return True
if(len(agent.getMatchings())>goal):
x = (agent.getMatchings()[:goal])[-1]
else:
x = agent.getMatchings()[-1]
ranking = agent.getP_dict()
return ranking[self.getName()] <= ranking[x.getName()]
def getOldRank(self, agent, other=None):
index = 1
for a in agent.getRanking():
if other is not None and a == other.getName():
return index
elif other is None and a == self.getName():
return index
index += 1
def proposed(self):
''' simulates a worker proposing'''
if(len(self.wishes) > 0):
self.wishes = self.wishes[1:]
def getEq(self, n, hospitals):
''' returns hospital with certain name (Hn)'''
for h in hospitals:
if(h.getName() == n):
return h
return None
def getOrig(self):
''' return the original (untruncated) preference p '''
return self.p
def getHospitalUtilities(self):
'''Returns the utilities that hospitals provide this worker, sorted in the
Same order as p (so in decreasing total utility)'''
return self.pref
def getTotalUtilityByHospital(self, hospital_name):
for i in range(len(self.p)):
if (self.p[i] == hospital_name):
return self.pref[i]
def setOrig(self, op):
''' set the original preference p'''
self.ranking = []
self.p = []
self.p_dict = {}
index = 1
for o in op:
self.ranking.append(o)
self.p.append(o)
self.p_dict[o] = index
index +=1
def getPref(self, hospitals):
''' returns the top unproposed hospital for this worker '''
h = self.getEq(self.wishes[0], hospitals)
self.proposed()
return h
def getP_dict(self):
return self.p_dict
def match(self, avail):
'''
avail is list of hospitals that are available for this worker
function matches worker to these hospitals
'''
self.matchings = []
for m in avail:
self.matchings.append(m)
def judge(self, h):
'''
Used in Gale-Shapley algorithm when hospitals propose
Returns rejected hospital if any (so if None returned, then matched for now)
'''
if(len(self.wishes) == 1 and self.wishes[0] != h.getName()):
return h
x = None
for i in range(len(self.wishes)):
if(self.wishes[i] == h.getName()):
self.wishes = self.wishes[:i + 1]
if(self.current is not None):
self.current.setMatch(None)
x = self.current
self.current = h
h.setMatch(self)
return x
return h
def interview(self, h):
self.interviews.append(h)
def getTopChoice(self):
''' Returns top choice hospital unproposed to
And removes it from unproposed choices list '''
h = self.choices[0]
self.choices = self.choices[1:]
return h
def setMatch(self, h):
''' this is used in Gale-Shapley to set deferred acceptance match for this worker, h is hospital '''
self.current = h
def setWishes(self):
''' sets rankings (wishes) for Gale-shapley algorithm based on matchings produced by T-algorithm'''
self.wishes = []
self.wishes_dict = {}
index = 1
for r in self.matchings:
self.wishes.append(r.getName())
self.wishes_dict[r.getName()] = index
index += 1
def setWishes2(self):
''' sets rankings(wishes) for Gale-Shapley algorithm based on original preferences '''
self.wishes = []
self.wishes_dict = {}
index = 1
for r in self.ranking:
self.wishes.append(r)
self.wishes_dict[r] = index
index += 1
def setWishes3(self, max_interviews=5):
''' sets rankings(wishes) for Gale-Shapley algorithm based on truncated original preferences (top 5)'''
self.wishes = []
self.wishes_dict = {}
index = 1
for r in self.ranking:
self.wishes.append(r)
self.wishes_dict[r] = index
index += 1
# Note: change the 5 below to change the length of truncated preferences
self.wishes=self.wishes[:max_interviews]
def setWishes4(self):
''' sets rankings (wishes) for gale shapley algorithm based on matchings from simulated interviews'''
self.wishes = []
self.matchings = []
self.wishes_dict = {}
index = 1
for r in self.interviews:
self.wishes.append(r.getName())
self.matchings.append(r)
self.wishes_dict[r.getName()] = index
index += 1
def checkBlocking(self, h2):
''' returns number of blocking pairs contain this worker '''
count = 0
for h in self.p:
if(self.current is not None and h == self.current.getName()):
return count
if(self.getEq(h, h2).block(self)):
count += 1
return count
def checkBlocking2(self,h2):
''' returns number of blocking pairs that contain this worker, but only blocking pairs where both agents are matched'''
count = 0
if(self.current is None):
return 0
for h in self.p:
if(h == self.current.getName()):
return count
if(self.getEq(h, h2).block2(self)):
count += 1
return count
def block(self, h):
''' returns True if this worker with h is a blocking pair '''
if(self.current is None): #since every hospital is ranked in p, then if worker is unmatched it prefers h to being unmatched
return True
for h2 in self.p:
if(h2 == self.current.getName()):
return False
if(h2 == h.getName()):
return True
return False
def getInterviews(self):
return self.interviews
def getChoices(self):
return self.choices
def getName(self):
''' returns string with name in form Wn where n is number '''
return self.name
def getMatchings(self):
''' returns list of hospitals '''
return self.matchings
def getRanking(self):
''' returns list of strings of names of hospitals ranked '''
return self.ranking
def getWishes(self):
''' returns list of strings of names of hospitals that will be ranked '''
return self.wishes
def getCurrent(self):
''' returns current match (hospital) during Gale Shapley algorithm '''
return self.current
# note that the hospital class is identical since roles during T-algorithm and Gale-Shapley can be switched depending on who's proposing
class Hospital(object):
name = ""
p = [] #original preferences, list of strings of names of workers ranked, unchanged
p_dict = {} # same as above but as a dictionary
ranking = [] #list of strings of names of workers ranked
matchings = [] #list of workers that hospital is matched to, t-algorithm
wishes = [] #list of names of workers used in Gale-Shapley algorithm as rankings submitted
wishes_dict = {} # same as above but as dictionary
current = None #worker that hospital is currently matched to in Gale-Shapley algorithm assuming One-to-One matching
interviews = [] #simulated interviews, list of doctors
iu = [] #idiosyncratic utility that workers provide for this hospital
iu_sorted = [] # idiosyncratic utility that workers provide for this hospital, in same order as p
utilities_sorted = [] # combination of idiosyncratic and common utility, in same order as p
cu_sorted = [] # common utility, in same order as p
cu_unsorted = [] # cuw in original order, used to sort other lists in same order
def __init__(self, cu, name,we,n):
'''
Cu is common utility
Name is in format HN where N is number
we is weight for common utility
n is number of doctors
'''
self.name = name
self.iu = []
self.iu_sorted = []
self.utilities_sorted = []
self.cu_sorted = []
self.cu_unsorted = []
self.generateP(cu, we, n)
self.start()
self.interviews = []
def start(self):
''' sets starting conditions '''
self.match([])
self.matchings = []
self.wishes = []
self.setWishes()
self.current = None
def generateP(self,cu,we,n):
''' this generates p using cu and iu (common and idiosyncratic utility weights)'''
# First, let's store this order of common utility
self.cu_unsorted = [c for c in cu]
self.iu = [np.random.normal(0, 1) for x in range(n)]
self.iu_sorted = [x for x in self.iu]
self.utilities_sorted = []
self.cu_sorted = [x for x in cu]
pref = [self.iu[x] * (1 - we) + cu[x] * we for x in range(n)] #creates array with weighted utility for workers with common utility weighed 70% and idiosyncratic utility weighed 30%
for x in pref:
self.utilities_sorted.append(x)
self.ranking = ["W" + str(x + 1) for x in range(n)]
# ranks hospitals based on their weighted utility, highest utility ranked first
pref, self.ranking = self.fastersort(pref, self.ranking)
self.p = []
self.p_dict = {}
index = 1
for x in self.ranking:
self.p.append(x)
self.p_dict[x] = index
index += 1
# now we also sort iu, cu, and total utility that doctors provide in order of decreasing cu (so doctor with highest cu is first)
# the commented out lines below can be used to check that outcome is consistent with previous sorting method
#cu_sorted_copy = [item for item in self.cu_sorted]
#iu_sorted_copy = [item for item in self.iu_sorted]
#utilities_sorted_copy = [item for item in self.utilities_sorted]
#self.bubblesort2(cu_sorted_copy, iu_sorted_copy, utilities_sorted_copy)
self.cu_sorted, self.iu_sorted, self.utilities_sorted = self.fastersort2(self.cu_sorted, self.iu_sorted, self.utilities_sorted)
#for a, b in zip(cu_sorted_copy, self.cu_sorted):
# assert a == b, f"Old {a}, New: {b}"
#for a, b in zip(iu_sorted_copy, self.iu_sorted):
# assert a == b, f"Old {a}, New: {b}"
#for a, b in zip(utilities_sorted_copy, self.utilities_sorted):
# assert a == b, f"Old {a}, New: {b}"
def generateP2(self,cu,n):
''' this generates p using cu only '''
self.ranking = ["W" + str(x + 1) for x in range(n)]
pref = []
for c in cu:
pref.append(c)
self.cu_unsorted = [c for c in cu]
pref, self.ranking = self.fastersort(pref, self.ranking)
self.p = []
self.p_dict = {}
index = 1
for x in self.ranking:
self.p.append(x)
self.p_dict[x] = index
index += 1
def fastersort(self, pref, ranking):
return zip(*sorted(zip(pref, ranking), reverse=True))
def bubblesort(self, pref, ranking):
''' used to get the rankings from the preferences '''
for i in range(len(pref)):
for k in range(len(pref) - 1, i, -1):
if(pref[k] > pref[k - 1]):
self.swap(pref, ranking, k, k - 1)
def bubblesort2(self, cu, iu, u):
''' used to get the rankings from the preferences '''
for i in range(len(cu)):
for k in range(len(cu) - 1, i, -1):
if(cu[k] > cu[k - 1]):
self.swap2(cu, iu, u, k, k - 1)
def fastersort2(self, pref, ranking1, ranking2):
return zip(*sorted(zip(pref, ranking1, ranking2), reverse=True))
def swap(self, pref, ranking, x, y):
''' helper function for bubble sort'''
temp1 = pref[x]
temp2 = ranking[x]
pref[x] = pref[y]
ranking[x] = ranking[y]
pref[y] = temp1
ranking[y] = temp2
def swap2(self, cu, iu, u, x, y):
''' helper function for bubble sort'''
temp1 = cu[x]
temp2 = iu[x]
temp3 = u[x]
cu[x] = cu[y]
iu[x] = iu[y]
u[x] = u[y]
cu[y] = temp1
iu[y] = temp2
u[y] = temp3
def checkAvailability(self, goal, agent):
'''
agent is a worker
goal is number of desired matchings in T-algorithm (max number of interviews for hospitals)
returns True if hospital is available, False otherwise
'''
if(not agent.getMatchings() or len(agent.getMatchings()) < goal):
return True
if(len(agent.getMatchings()) > goal):
x = (agent.getMatchings()[:goal])[-1]
else:
x = agent.getMatchings()[-1]
ranking = agent.getP_dict()
return ranking[self.getName()] <= ranking[x.getName()]
def getOldRank(self, agent, other=None):
index = 1
for a in agent.getRanking():
if other is not None and a == other.getName():
return index
elif other is None and a == self.getName():
return index
index += 1
def proposed(self):
''' simulates a hospital proposing '''
if(len(self.wishes) > 0):
self.wishes = self.wishes[1:]
def interview(self, w):
self.interviews.append(w)
def getEq(self, n, workers):
''' returns worker with certain name '''
for w in workers:
if(w.getName() == n):
return w
return None
def getPref(self,workers):
''' returns the top unproposed worker for this hospital '''
w = self.getEq(self.wishes[0], workers)
self.proposed()
return w
def getP_dict(self):
return self.p_dict
def match(self, avail):
''' avail is list of workers that are available for this hospital
this function matches hospital to those workers'''
self.matchings = []
for m in avail:
self.matchings.append(m)
def judge(self,w):
''' used in Gale-Shapley algorithm when workers propose
returns rejected worker if any '''
if(len(self.wishes) == 1 and self.wishes[0]!= w.getName()):
return w
x = None
for i in range(len(self.wishes)):
if(self.wishes[i] == w.getName()):
self.wishes = self.wishes[:i + 1]
if(self.current is not None):
self.current.setMatch(None)
x = self.current
self.current = w
w.setMatch(self)
return x
return w
def setMatch(self, w):
''' this is used in Gale-Shapley to set deferred acceptance match for this hospital
w is worker'''
self.current = w
def setWishes(self):
''' sets rankings (wishes) for Gale-shapley algorithm based on matchings produced by T-algorithm'''
self.wishes = []
self.wishes_dict = {}
index = 1
for r in self.matchings:
self.wishes.append(r.getName())
self.wishes_dict[r.getName()] = index
index += 1
def setWishes2(self):
''' sets rankings(wishes) for Gale-Shapley algorithm based on original preferences'''
self.wishes = []
self.wishes_dict = {}
index = 1
for r in self.ranking:
self.wishes.append(r)
self.wishes_dict[r] = index
index += 1
def setWishes3(self, max_interviews=5):
''' sets rankings(wishes) for Gale-Shapley algorithm based on truncated original preferences'''
self.wishes = []
self.wishes_dict = {}
index = 1
for r in self.ranking:
self.wishes.append(r)
self.wishes_dict[r] = index
index += 1
# Note: to change length of truncated preference, change 5 below
self.wishes = self.wishes[:max_interviews]
def setWishes4(self, cuh,we):
'''
cuh is common utility workers provide
this adds idiosyncratic shock to matches from simulated interviews,
we is weight for common utility
'''
utilities = []
pref = []
for w in self.interviews:
utilities.append(cuh[int(w.getName()[1:]) - 1] * we + self.iu[int(w.getName()[1:]) - 1] * (1 - we))
pref.append(w)
self.bubblesort(utilities, pref)
self.wishes = []
self.wishes_dict = {}
index = 1
for p in pref:
self.wishes.append(p.getName())
self.wishes_dict[p.getName()] = index
index += 1
def block(self, w):
''' returns True if this hospital with w is a blocking pair '''
if(self.current is None): #since every worker is ranked in p, then if hospital is unmatched it prefers w to being unmatched
return True
for h in self.p:
if(h == self.current.getName()):
return False
if(h == w.getName()):
return True
def block2(self,w):
''' returns True if this hospital with w is a blocking pair, but only if this hospital is matched'''
if(self.current is None):
return False
for h in self.p:
if (h == self.current.getName()):
return False
if (h == w.getName()):
return True
def getIU(self):
''' returns list of idiosyncratic utility values'''
return self.iu
def getIU_sorted(self):
'''returns list of idiosyncratic utility values that doctors, ranked in order of p, provide'''
return self.iu_sorted
def getCU_sorted(self):
'''returns list of common utility values that doctors, ranked in order of p, provide'''
return self.cu_sorted
def getUtilities_sorted(self):
'''returns list of utility values that doctors, ranked in order of p, provide'''
return self.utilities_sorted
def setIU(self, i):
''' sets idiosyncratic utility values '''
self.iu = []
for val in i:
self.iu.append(val)
def getInterviews(self):
return self.interviews
def getName(self):
''' returns name (string) in format Hn where n is number'''
return self.name
def getMatchings(self):
''' returns list of workers that hospital is matched to '''
return self.matchings
def getRanking(self):
''' returns list of strings of names of workers ranked '''
return self.ranking
def setRanking(self, another_ranking):
self.ranking = []
self.p = []
self.p_dict = {}
index = 1
for r in another_ranking:
self.ranking.append(r)
self.p.append(r)
self.p_dict[r] = index
index += 1
def getWishes(self):
''' returns list of strings of names of workers that will be ranked '''
return self.wishes
def getCurrent(self):
''' returns current match (worker) during Gale Shapley algorithm '''
return self.current
def a(goal, w, hospitals):
''' this is the part of the T-algorithm where each worker "proposes"
returns top n (or less) hospitals
where n is desired number of matchings'''
available = []
for h in hospitals:
if(w.checkAvailability(goal, h)):
available.append(h)
# available contains all hospitals available to this worker
if len(available) > 0:
temp, available = fastersort(getEquiv(available, w), available)
if(len(available) <= goal):
return available
else:
top = available[:goal]
return top
def b(goal, h, workers):
''' same as above but this time hospital "proposes" '''
available = []
for w in workers:
if(h.checkAvailability(goal, w)):
available.append(w)
if len(available) > 0:
temp, available = fastersort(getEquiv(available, h), available)
if(len(available) <= goal):
return available
else:
top = available[:goal]
return top
def fastersort(pref, ranking):
# note this sorts in ascending order
# previous sorts sort in descending order since the first argument, "pref"
# represents utilities
# here, "pref" represents a ranking, so we want "1" to be first
return zip(*sorted(zip(pref, ranking)))
def bubblesort(pref, ranking):
# note this sorts in ascending order
for i in range(len(pref)):
for k in range(len(pref) - 1, i, -1):
if(pref[k] < pref[k - 1]):
swap(pref,ranking, k, k - 1)
def swap(pref, ranking, x, y):
temp1 = pref[x]
temp2 = ranking[x]
pref[x] = pref[y]
ranking[x] = ranking[y]
pref[y] = temp1
ranking[y] = temp2
def getEquiv(avail, judge):
equiv = []
for a in avail:
equiv.append(getRank(a, judge))
return equiv
def getRank(desired, judge):
ranking = judge.getP_dict()
return ranking[desired.getName()]
def iteration(goal, workers, hospitals):
''' this is one iteration of t-algorithm
this is done until no changes are made'''
todo = []
total = []
for w in workers:
todo.append(a(goal, w, hospitals))
for h in hospitals:
todo.append(b(goal, h, workers))
for w in workers:
w.match(todo.pop(0))
total.append(w.getMatchings())
for h in hospitals:
h.match(todo.pop(0))
total.append(h.getMatchings())
return total
def equate(past, current):
''' checks if matchings remain same in t-algorithm in two consecutive iterations of "proposals" '''
if(len(past) != len(current)):
return False
for i in range(len(past)):
if(len(past[i]) != len(current[i])):
return False
for j in range(len(past[i])):
if(past[i][j] != current[i][j]):
return False
return True
def getPreM(agent):
''' gets prematchings for t-algorithm '''
updated = []
for w in agent.getRanking():
for j in agent.getMatchings():
if(w == j.getName()):
updated.append(j)
continue
return updated
def proposals(workers,hospitals):
''' gale shapley algorithm where workers propose
If order of parameters is switched then hospitals propose '''
while(len(workers)>0):
y = None
for i in range(len(workers)):
w = workers[0]
y = None
if(len(w.getWishes()) > 0 and w.getCurrent() is None):
y = w.getPref(hospitals).judge(w)
workers = workers[1:]
if(y is not None):
workers.append(y)
def getWorker(name,workers):
for w in workers:
if(w.getName()==name):
return w
def getNameToWorkerDict(workers):
nameToWorker = {}
for w in workers:
nameToWorker[w.getName()] = w
return nameToWorker
def getR(w):
''' gets rank of hospital w is matched to based on p' (t-algorithm results)
Note that p' is reported preferences '''
h = w.getCurrent()
if(h is None):
return 0 # unmatched
for i in range(len(w.getMatchings())):
if(w.getMatchings()[i].getName() == h.getName()):
return i + 1
def getR2(w):
''' gets the rank of hospital w is matched to based on p (original preferences)
(this is based on matchings after both t-algorithm and Gale Shapley are run) '''
h = w.getCurrent()
if(h is None):
return 0 # unmatched
for i in range(len(w.getRanking())):
if(w.getRanking()[i] == h.getName()):
return i + 1
def getR3(w, pprime):
''' gets rank of doctor h is matched to based on p' (IU and CU)
Note that p' is reported preferences '''
h = w.getCurrent()
if(h is None):
return 0 # unmatched
for i in range(len(pprime)):
if(pprime[i] == h.getName()):
return i + 1
return -1
def fillInUtilities(hospital, hospital_names, worker_index, cu_sorted, iu_sorted, utilities_sorted):
'''
Hospital_names, cu_sorted, iu_sorted, utilities_sorted are all in the same order (each entry is a preference list for a hospital)
Within the cu_sorted, iu_sorted, utilities_sorted lists, they preferences lists are in the order of doctors with highest CU first
worker_index is 0 for the doctor with highest CU, 1 for second highest, etc
hospital is the hospital that the worker matched to (so we can match its name to hospital_name to get its preference)
'''
hospital_index = 0
for h in hospital_names:
if (hospital.getName() == h):
break
hospital_index += 1
return cu_sorted[hospital_index][worker_index], iu_sorted[hospital_index][worker_index], utilities_sorted[hospital_index][worker_index]
def getDoctorsAndHospitals(num_doctors, num_hospitals, we_doctors, we_hospitals):
'''
we_doctors is weight for common utility for doctors
we_hospitals is weight for common utility for hospitals '''
# First, note that we can change the distribution and its parameters for common utility
cuw = [np.random.normal(0, 1) for x in range(num_hospitals)] # common utility hospitals provide
cuh = [np.random.normal(0, 1) for x in range(num_doctors)] # common utility workers provide
# This generates the workers and hospitals for this run (same ones used for all algorithms)
workers = [Worker(cuw, "W" + str(x), we_doctors, num_hospitals) for x in range(1, num_doctors + 1)] #creates array of workers with their own randomized preferences
hospitals = [Hospital(cuh, "H" + str(x), we_hospitals, num_doctors) for x in range(1, num_hospitals + 1)] # creates array of hospitals with their own randomized preferences
workers2 = [Worker(cuw, "W" + str(x), we_doctors, num_hospitals) for x in range(1, num_doctors + 1)] # creates array of workers with their own randomized preferences
hospitals2 = [Hospital(cuh, "H" + str(x), we_hospitals, num_doctors) for x in range(1, num_hospitals + 1)] # creates array of hospitals with their own randomized preferences
workers3 = [Worker(cuw, "W" + str(x), we_doctors, num_hospitals) for x in range(1, num_doctors + 1)] # creates array of workers with their own randomized preferences
return workers, hospitals, workers2, workers3, hospitals2, cuw, cuh
def getDoctorsAndHospitalsExtended(num_doctors, num_hospitals, we_doctors, we_hospitals):
'''
we_doctors is weight for common utility for doctors
we_hospitals is weight for common utility for hospitals '''
# First, note that we can change the distribution and its parameters for common utility
cuw = [np.random.normal(0, 1) for x in range(num_hospitals)] # common utility hospitals provide
cuh = [np.random.normal(0, 1) for x in range(num_doctors)] # common utility workers provide
# This generates the workers and hospitals for this run (same ones used for all algorithms)
workers = [Worker(cuw, "W" + str(x), we_doctors, num_hospitals) for x in range(1, num_doctors + 1)] #creates array of workers with their own randomized preferences
hospitals = [Hospital(cuh, "H" + str(x), we_hospitals, num_doctors) for x in range(1, num_hospitals + 1)] # creates array of hospitals with their own randomized preferences
workers2 = [Worker(cuw, "W" + str(x), we_doctors, num_hospitals) for x in range(1, num_doctors + 1)] # creates array of workers with their own randomized preferences
hospitals2 = [Hospital(cuh, "H" + str(x), we_hospitals, num_doctors) for x in range(1, num_hospitals + 1)] # creates array of hospitals with their own randomized preferences
workers3 = [Worker(cuw, "W" + str(x), we_doctors, num_hospitals) for x in range(1, num_doctors + 1)] # creates array of workers with their own randomized preferences
workers4 = [Worker(cuw, "W" + str(x), we_doctors, num_hospitals) for x in range(1, num_doctors + 1)] # creates array of workers with their own randomized preferences
hospitals4 = [Hospital(cuh, "H" + str(x), we_hospitals, num_doctors) for x in range(1, num_hospitals + 1)] # creates array of hospitals with their own randomized preferences
return workers, hospitals, workers2, workers3, hospitals2, cuw, cuh, workers4, hospitals4
def addDoctorsAndWorkersToLists(num_doctors, num_hospitals, docs, works, types, runs, run_num, cu_ranks):
'''
This just updates these lists (this is done the same way every run)
'''
for i in range(1, num_doctors + 1):
docs.append(i)
types.append("Doctor")
runs.append(run_num)
cu_ranks.append(i)
for i in range(1, num_hospitals + 1):
works.append(i)
types.append("Hospital")
runs.append(run_num)
cu_ranks.append(i)
def recordBlockingPairs(x, y, workers_list, hospitals_list, array_to_record, num_doctors, min_index):
# make a dictionary mapping worker names to workers
name_to_worker = getNameToWorkerDict(workers_list)
# same with hospitals
name_to_hospital = getNameToWorkerDict(hospitals_list)
# now we check for blocking pairs for each individual doctor/hospital
for i in range(len(x)):
d = x[i]
# get the actual worker
w = name_to_worker[d]
for j in range(len(y)):
e = y[j]
# get the actual hospital
h = name_to_hospital[e]
if(h.block(w) and w.block(h)):
array_to_record[i + min_index] += 1
array_to_record[j + num_doctors + min_index] += 1
def doTruncatedGS(workers, hospitals, x, y, max_interviews, doDocsPropose, doctors_p, doctors_pprime, hospitals_p, hospital_pprime, match_gs_truncated_pp_docs, match_gs_truncated_p_docs, array_to_record, num_doctors, min_index, match_name):
w5 = []
h5 = []
for w in workers:
w.setWishes3(max_interviews) # sets wishes according to original preferences, but truncated
w.setMatch(None)
w5.append(w)
for h in hospitals:
h.setWishes2() # sets wishes according to original preferences
h.setMatch(None)
h5.append(h)
print("Starting GS with truncated preferences. Time: " + str(np.datetime64('now')))
if (doDocsPropose):
proposals(w5, h5)
else:
proposals(h5, w5)
w5_nameToWorkerDict = getNameToWorkerDict(w5)
h5_nameToWorkerDict = getNameToWorkerDict(h5)
# Now we iterate through the doctors in the order of highest CU and record their matchings
for i in range(len(x)):
d = x[i]
# get the actual worker
w = w5_nameToWorkerDict[d]
match_gs_truncated_pp_docs.append(getR3(w, doctors_pprime[i]))
match_gs_truncated_p_docs.append(getR3(w, doctors_p[i]))
if (w.getCurrent() is None):
match_name.append("Unmatched")
else:
match_name.append(w.getCurrent().getName())
# Now we do the same thing, but recording the matchings for hospitals
for i in range(len(y)):
d = y[i]
# get the actual hospital
h = h5_nameToWorkerDict[d]
match_gs_truncated_pp_docs.append(getR3(h, hospital_pprime[i]))
match_gs_truncated_p_docs.append(getR3(h, hospitals_p[i])) # For p, the rankings are based on CU only
if (h.getCurrent() is None):
match_name.append("Unmatched")
else:
match_name.append(h.getCurrent().getName())
# now we record blocking pairs
recordBlockingPairs(x, y, w5, h5, array_to_record, num_doctors, min_index)
def doGS(workers, hospitals, x, y, doDocsPropose, doctors_p, doctors_pprime, hospitals_p, hospital_pprime, match_gs_p_docs, match_gs_pp_docs, array_to_record, num_doctors, min_index, match_name):
# Here, we run Gale Shapley with no interview stage
# Thus, we see what happens if there were no interviews and
# doctors reported their true, untruncated preferences
w3gs = []
h3gs = []
for w in workers:
w.setWishes2() #sets wishes according to original preferences
w.setMatch(None)
w3gs.append(w)
for h in hospitals:
h.setWishes2() #sets wishes according to original preferences
h.setMatch(None)
h3gs.append(h)
print("Starting GS with no interview stage and untruncated preferences. Time: " + str(np.datetime64('now')))
if (doDocsPropose):
proposals(w3gs, h3gs) # Actual gale shapley algorithm
else:
proposals(h3gs, w3gs)
# Record results
w3gs_nameToWorkerDict = getNameToWorkerDict(w3gs)
h3gs_nameToWorkerDict = getNameToWorkerDict(h3gs)
for i in range(len(x)):
d = x[i]
# get the actual worker
w = w3gs_nameToWorkerDict[d]
match_gs_pp_docs.append(getR3(w, doctors_pprime[i]))
match_gs_p_docs.append(getR3(w, doctors_p[i]))
if (w.getCurrent() is None):
match_name.append("Unmatched")
else:
match_name.append(w.getCurrent().getName())
# Now we do the same thing, but recording the matchings for hospitals
for i in range(len(y)):
d = y[i]
# get the actual hospital
h = h3gs_nameToWorkerDict[d]
match_gs_pp_docs.append(getR3(h, hospital_pprime[i]))
match_gs_p_docs.append(getR3(h, hospitals_p[i])) # For p, the rankings are based on CU only
if (h.getCurrent() is None):
match_name.append("Unmatched")
else:
match_name.append(h.getCurrent().getName())
# Check stability for final matchings (we compare with original rankings, p)
recordBlockingPairs(x, y, w3gs, h3gs, array_to_record, num_doctors, min_index)
# Gale shapley has been run with original preferences and results recorded for how hospitals were ranked in p
def runSIGSOnly(num_doctors, num_hospitals, we_doctors, we_hospitals, max_interviews, ids, docs, works, types, runs, run_num, match_in_pp_docs, match_in_p_docs, cu_ranks):
workers, hospitals, workers2, workers3, hospitals2, cuw, cuh = getDoctorsAndHospitals(num_doctors, num_hospitals, we_doctors, we_hospitals)
# First, we simply update the docs, works, and runs lists
addDoctorsAndWorkersToLists(num_doctors, num_hospitals, docs, works, types, runs, run_num, cu_ranks)
# This generates the preferences for the hospitals based only on common utiltiy (for SIGS)
for h in hospitals2:
h.generateP2(cuh, num_doctors) #hospital's original preferences,p, only based on cu
x = hospitals2[0].getRanking() #these are the names of workers ranked by cu
# Now we do the same thing, simply to get the list of hospitals ranked by CU
for w in workers3:
w.generateP2(cuw, num_hospitals)
y = workers3[0].getRanking() # these are the names of the hospitals ranked by cu
# We add the names to the ids list
for x_1 in x:
ids.append(x_1)
for y_1 in y:
ids.append(y_1)
workers2_nameToWorkerDict = getNameToWorkerDict(workers2)
hospitals2_nameToWorkerDict = getNameToWorkerDict(hospitals2)
# Now we get the actual doctors and hospitals corresponding to these names
# so we can check their final matches at the end
ranked_workers = []
for n in x:
#ranked_workers.append(getWorker(n, workers2))
ranked_workers.append(workers2_nameToWorkerDict[n])
ranked_hospitals = []
for n in y:
#ranked_hospitals.append(getWorker(n, hospitals2))
ranked_hospitals.append(hospitals2_nameToWorkerDict[n])
# Now we set "workers2" to have the same ranking as "workers" so they are identical
for i in range(len(workers)):
workers2[i].setOrig(workers[i].getOrig())
# Similarly, we want "hospitals2" to have the same idiosyncratic utilities
# as "hospitals"even if the rankings aren't changed (still only based off of CU)
# we use these idiosyncratic utilities to update the preferences after the interviews
for i in range(len(hospitals)):
hospitals2[i].setIU(hospitals[i].getIU())
for w in workers2:
w.setChoices(hospitals2)
# Simulated Interviews
for w in ranked_workers:
while(len(w.getInterviews()) < max_interviews and len(w.getChoices()) > 0): # while interviewed at less than the max and has at least one more hospital to apply to
h = w.getTopChoice()
if(len(h.getInterviews()) < max_interviews): # if hospital has interviewed less than the max allowed
h.interview(w)
w.interview(h)
# end of simulated interviews
w3=[]
h3=[]
m4=[] # m4 are matchings from simulated interviews and Gale shapley with p with doctors proposing
# Report preferences for Gale-Shapley/Deferred Acceptance Algorithm
# For SIGS
for w in workers2:
w.setWishes4() # sets wishes according to matches of simulated interviews
w3.append(w)
# For SIGS, hospitals initially only had common utility
# After interviewing candidates, they now have an idiosyncratic utility to their preferences
# We add this to to create a new ranking of doctors by each hospital
for h in hospitals2:
h.setWishes4(cuh, we_hospitals) # add idiosyncratic utility and sets wishes according to matches of simulated interviews
h3.append(h)
w3_nameToWorkerDict = getNameToWorkerDict(w3)
h3_nameToWorkerDict = getNameToWorkerDict(h3)
# Since we just added idiosyncratic utility, we can save the p prime rankings
hospital_pprime = []
for d in y:
#h = getWorker(d, h3)
h = h3_nameToWorkerDict[d]
hospital_pprime.append(h.getWishes())
doctors_pprime = []
for d in x:
w = getWorker(d, w3)
w = w3_nameToWorkerDict[d]
doctors_pprime.append(w.getWishes())
# Gale Shapley Algorithm
proposals(w3,h3) # gale shapley after simulated interviews with doctors proposing
w3_nameToWorkerDict = getNameToWorkerDict(w3)
h3_nameToWorkerDict = getNameToWorkerDict(h3)
# Now we iterate through the doctors in the order of highest CU and record their matchings
for i in range(len(x)):
d = x[i]
#w = getWorker(d, w3) # get the actual worker
w = w3_nameToWorkerDict[d]
match_in_pp_docs.append(getR3(w, doctors_pprime[i]))
match_in_p_docs.append(getR2(w))
# Now we do the same thing, but recording the matchings for hospitals
for i in range(len(y)):
d = y[i]
#h = getWorker(d, h3) # get the actual hospital
h = h3_nameToWorkerDict[d]
match_in_pp_docs.append(getR3(h, hospital_pprime[i]))
match_in_p_docs.append(getR2(h)) # For p, the rankings are based on CU only
def runTAGS_and_GS(num_doctors, num_hospitals, we_doctors, we_hospitals, max_interviews, ids, docs, works, types, runs, run_num, cu_ranks, match_gs_p_docs, match_gs_pp_docs, match_gs_p_hosp, match_gs_pp_hosp, match_gs_truncated_p_docs, match_gs_truncated_pp_docs, match_gs_truncated_p_hosp,
match_gs_truncated_pp_hosp, match_tags_p_docs, match_tags_pp_docs, cu_provided_gs, u_provided_gs, iu_provided_gs, cu_provided_gs_truncated, u_provided_gs_truncated, iu_provided_gs_truncated, cu_provided_tags, u_provided_tags, iu_provided_tags, blocking_pair_counts,
bp_TAGS, min_index, match_tags_b_p_docs, match_tags_b_pp_docs, cu_provided_tags_b, u_provided_tags_b, iu_provided_tags_b, bp_TAGS_b, bp_GS_Trunc_h, bp_GS_Trunc_d, bp_GS_d, bp_GS_h, preference_profile, match_name_tags_d, match_name_tags_h, match_name_gs_d, match_name_gs_h, match_name_gs_trunc_d, match_name_gs_trunc_h):
workers, hospitals, workers2, workers3, hospitals2, cuw, cuh, workers4, hospitals4 = getDoctorsAndHospitalsExtended(num_doctors, num_hospitals, we_doctors, we_hospitals)
# note that workers and hospitals have the preferences that we use for all the other workers/hospitals lists
# so we can get these preferences
iu_sorted = []
cu_sorted = []
utilities_sorted = []
hospital_names = [] # this is so we can match hospitals to indices in these lists by name
for h in hospitals:
iu_sorted.append(h.getIU_sorted())
cu_sorted.append(h.getCU_sorted())
utilities_sorted.append(h.getUtilities_sorted())
hospital_names.append(h.getName())
# First, we simply update the docs, works, and runs lists
addDoctorsAndWorkersToLists(num_doctors, num_hospitals, docs, works, types, runs, run_num, cu_ranks)
# This generates the preferences for the hospitals based only on common utiltiy (for SIGS)
for h in hospitals2:
h.generateP2(cuh, num_doctors) #hospital's original preferences,p, only based on cu (note that hospitals2 and hospitals have different IU, but same CU)
x = hospitals2[0].getRanking() #these are the names of workers ranked by cu
# Now we do the same thing, simply to get the list of hospitals ranked by CU
for w in workers3:
w.generateP2(cuw, num_hospitals) # this isn't used for any matching
y = workers3[0].getRanking() # these are the names of the hospitals ranked by cu
# We add the names to the ids list
for x_1 in x:
ids.append(x_1)
for y_1 in y:
ids.append(y_1)
# Now we get the actual doctors and hospitals corresponding to these names
# so we can check their final matches at the end
workers2_nameToWorkerDict = getNameToWorkerDict(workers2)
hospitals2_nameToWorkerDict = getNameToWorkerDict(hospitals2)
ranked_workers = []
for n in x:
ranked_workers.append(workers2_nameToWorkerDict[n])
ranked_hospitals = []
for n in y:
ranked_hospitals.append(hospitals2_nameToWorkerDict[n])
# Now we set "workers2" to have the same ranking as "workers" so they are identical
for i in range(len(workers)):
workers2[i].setOrig(workers[i].getOrig())
workers4[i].setOrig(workers[i].getOrig()) # This sets the preferences of the workers for TAGS to be the exact same as those for SIGS and GS
# Similarly, we want "hospitals2" to have the same idiosyncratic utilities
# as "hospitals"even if the rankings aren't changed (still only based off of CU)
# we use these idiosyncratic utilities to update the preferences after the interviews
for i in range(len(hospitals)):
hospitals2[i].setIU(hospitals[i].getIU())
hospitals4[i].setIU(hospitals[i].getIU())
hospitals4[i].setRanking(hospitals[i].getRanking()) # This sets the preferences of the hospitals for TAGS to be the exact same as those for SIGS (after interviews)
for w in workers2:
w.setChoices(hospitals2)
print("Starting T-Algorithm. Time: " + str(np.datetime64('now')))
past = []
current = []
for w in workers4: # prematches all hospitals to each worker for t-algorithm
w.match(hospitals4)
w.match(getPreM(w))
past.append(w.getMatchings())
# T-Algorithm
goal = max_interviews #sets max amount of interviews (matches in T-algorithm for each agent)
current = iteration(goal,workers,hospitals)
while(not equate(past,current)): # while they don't converge
past = []
for p in current:
past.append(p)
current = iteration(goal, workers4, hospitals4)
# end of t-algorithm
# Report preferences for Gale-Shapley/Deferred Acceptance Algorithm
# For TAGS
workers4b = []
hospitals4b = [] # in these lists we store hospitals/workers for when doctors propose in GS phase after T-algorithm
for w in workers4:
w.setWishes() # sets wishes (reported preferences) according to matches of t-algorithm
workers4b.append(w)
for h in hospitals4:
h.setWishes() #sets wishes according to matches of t-algorithm
hospitals4b.append(h)
hospital_pprime2 = []
hospitals_p2 = []
workers4_nameToWorkerDict = getNameToWorkerDict(workers4)
hospitals4_nameToWorkerDict = getNameToWorkerDict(hospitals4)
for d in y:
h = hospitals4_nameToWorkerDict[d]
hospital_pprime2.append(h.getWishes())
hospitals_p2.append(h.getRanking())
doctors_pprime2 = []
doctors_p2 = []
for d in x:
w = workers4_nameToWorkerDict[d]
doctors_pprime2.append(w.getWishes())
doctors_p2.append(w.getOrig())
print("Starting DA after T-Algorithm. Time: " + str(np.datetime64('now')))
# Gale Shapley Algorithm
proposals(workers4, hospitals4) # gale-shapley algorithm with doctors proposing after t-algorithm
workers4_nameToWorkerDict = getNameToWorkerDict(workers4)
hospitals4_nameToWorkerDict = getNameToWorkerDict(hospitals4)
# Now we iterate through the doctors in the order of highest CU and record their matchings
for i in range(len(x)):
d = x[i]
# get the actual worker
w = workers4_nameToWorkerDict[d]
match_tags_pp_docs.append(getR3(w, doctors_pprime2[i]))
match_tags_p_docs.append(getR3(w, doctors_p2[i]))
if (w.getCurrent() is None):
match_name_tags_d.append("Unmatched")
else:
match_name_tags_d.append(w.getCurrent().getName())
if (getR3(w, doctors_pprime2[i]) == 0): # unmatched, so no hospital
cu_provided_tags.append(-2)
iu_provided_tags.append(-2)
u_provided_tags.append(-2)
else:
# Get the worker's match
h = workers4_nameToWorkerDict[d].getCurrent()
cu_doc, iu_doc, u_doc = fillInUtilities(h, hospital_names, i, cu_sorted, iu_sorted, utilities_sorted)
cu_provided_tags.append(cu_doc)
iu_provided_tags.append(iu_doc)
u_provided_tags.append(u_doc)
# Now we do the same thing, but recording the matchings for hospitals
for i in range(len(y)):
d = y[i]
# get the actual hospital
h = hospitals4_nameToWorkerDict[d]
match_tags_pp_docs.append(getR3(h, hospital_pprime2[i]))
match_tags_p_docs.append(getR3(h, hospitals_p2[i])) # For p, the rankings are based on CU only
if (h.getCurrent() is None):
match_name_tags_d.append("Unmatched")
else:
match_name_tags_d.append(h.getCurrent().getName())
cu_provided_tags.append(-1)
iu_provided_tags.append(-1)
u_provided_tags.append(-1)
# Check stability for final matchings (we compare with original rankings, p)
bp1 = 0
bp2 = 0
for w in workers4:
bp1 += w.checkBlocking(hospitals4)
bp2 += w.checkBlocking2(hospitals4) # this excludes unmatched doctors/hospitals
blocking_pair_counts[4].append(bp1)
blocking_pair_counts[5].append(bp2)
# now we check for blocking pairs for each individual doctor/hospital
for i in range(len(x)):
d = x[i]
# get the actual worker
w = workers4_nameToWorkerDict[d]
for j in range(len(y)):
e = y[j]
# get the actual hospital
h = hospitals4_nameToWorkerDict[e]
if(h.block(w) and w.block(h)):
bp_TAGS[i + min_index] += 1
bp_TAGS[j + num_doctors + min_index] += 1
print("starting DA for TAGS with hospitals proposing")
# Now we run TA GS by re-using the T-algorithm results, but have hospitals propose in GS
for w in workers4b:
w.setWishes()
w.setMatch(None)
for h in hospitals4b:
h.setWishes()
h.setMatch(None)
workers4b_nameToWorkerDict = getNameToWorkerDict(workers4b)
hospitals4b_nameToWorkerDict = getNameToWorkerDict(hospitals4b)
# Now that we finished the T-Algorithm, we can fill out the relevant rows for the
# preference profile
for i in range(len(x)):
d = x[i]
# get the actual worker
w = workers4b_nameToWorkerDict[d]
for j in range(len(y)):
dh = y[j]
# get the actual hospital
h = hospitals4b_nameToWorkerDict[dh]
# Row:
row = []
# Run ID
row.append(run_num)
# Doctor ID (Common Ranking is (i + 1))
row.append(w.getName())
# Hospital ID (Common Ranking is (j + 1))
row.append(h.getName())
# Doctor (i + 1) total utility from Hospital (j + 1)
row.append(w.getTotalUtilityByHospital(h.getName()))
# Hospital (j + 1) total utility from Doctor (i + 1)
_, _, u_doc = fillInUtilities(h, hospital_names, i, cu_sorted, iu_sorted, utilities_sorted)
row.append(u_doc)
# Did Doctor (i + 1) interview at Hospital (j + 1) in TAGS
if (h.getName() in w.getWishes()):
row.append(1)
else:
row.append(0)
# Now we add the row
preference_profile.append(row)
proposals(hospitals4b, workers4b) # gale-shapley algorithm with hospitals proposing
workers4b_nameToWorkerDict = getNameToWorkerDict(workers4b)
hospitals4b_nameToWorkerDict = getNameToWorkerDict(hospitals4b)
# Now we iterate through the doctors in the order of highest CU and record their matchings
for i in range(len(x)):
d = x[i]
# get the actual worker
w = workers4b_nameToWorkerDict[d]
match_tags_b_pp_docs.append(getR3(w, doctors_pprime2[i]))
match_tags_b_p_docs.append(getR3(w, doctors_p2[i]))
if (w.getCurrent() is None):
match_name_tags_h.append("Unmatched")
else:
match_name_tags_h.append(w.getCurrent().getName())
if (getR3(w, doctors_pprime2[i]) == 0): # unmatched, so no hospital
cu_provided_tags_b.append(-2)
iu_provided_tags_b.append(-2)
u_provided_tags_b.append(-2)
else:
# Get the worker's match
h = workers4b_nameToWorkerDict[d].getCurrent()
cu_doc, iu_doc, u_doc = fillInUtilities(h, hospital_names, i, cu_sorted, iu_sorted, utilities_sorted)
cu_provided_tags_b.append(cu_doc)
iu_provided_tags_b.append(iu_doc)
u_provided_tags_b.append(u_doc)
# Now we do the same thing, but recording the matchings for hospitals
for i in range(len(y)):
d = y[i]
# get the actual hospital
h = hospitals4b_nameToWorkerDict[d]
match_tags_b_pp_docs.append(getR3(h, hospital_pprime2[i]))
match_tags_b_p_docs.append(getR3(h, hospitals_p2[i])) # For p, the rankings are based on CU only
if (h.getCurrent() is None):
match_name_tags_h.append("Unmatched")
else:
match_name_tags_h.append(h.getCurrent().getName())
cu_provided_tags_b.append(-1)
iu_provided_tags_b.append(-1)
u_provided_tags_b.append(-1)
# Check stability for final matchings (we compare with original rankings, p)
bp1 = 0
bp2 = 0
for w in workers4b:
bp1 += w.checkBlocking(hospitals4b)
bp2 += w.checkBlocking2(hospitals4b) # this excludes unmatched doctors/hospitals
blocking_pair_counts[0].append(bp1)
blocking_pair_counts[1].append(bp2)
# now we check for blocking pairs for each individual doctor/hospital
for i in range(len(x)):
d = x[i]
# get the actual worker
w = workers4b_nameToWorkerDict[d]
for j in range(len(y)):
e = y[j]
# get the actual hospital
h = hospitals4b_nameToWorkerDict[e]
if(h.block(w) and w.block(h)):
bp_TAGS_b[i + min_index] += 1
bp_TAGS_b[j + num_doctors + min_index] += 1
# Now we do GS with untrucated preferences with both doctors proposing
doGS(workers, hospitals, x, y, True, doctors_p2, doctors_pprime2, hospitals_p2, hospital_pprime2, match_gs_p_docs, match_gs_pp_docs, bp_GS_d, num_doctors, min_index, match_name_gs_d)
# And doctors not proposing
doGS(workers, hospitals, x, y, False, doctors_p2, doctors_pprime2, hospitals_p2, hospital_pprime2, match_gs_p_hosp, match_gs_pp_hosp, bp_GS_h, num_doctors, min_index, match_name_gs_h)
# Here we do the same as above, but we only truncate the preferences for the doctors and assume the hospitals rank every doctor
doTruncatedGS(workers, hospitals, x, y, max_interviews, True, doctors_p2, doctors_pprime2, hospitals_p2, hospital_pprime2, match_gs_truncated_pp_docs, match_gs_truncated_p_docs, bp_GS_Trunc_d, num_doctors, min_index, match_name_gs_trunc_d)
# Now we have hospitals propose
doTruncatedGS(workers, hospitals, x, y, max_interviews, False, doctors_p2, doctors_pprime2, hospitals_p2, hospital_pprime2, match_gs_truncated_pp_hosp, match_gs_truncated_p_hosp, bp_GS_Trunc_h, num_doctors, min_index, match_name_gs_trunc_h)
for i in range(len(x) + len(y)):
cu_provided_gs_truncated.append(-1)
iu_provided_gs_truncated.append(-1)
u_provided_gs_truncated.append(-1)
cu_provided_gs.append(-1)
iu_provided_gs.append(-1)
u_provided_gs.append(-1)
def runSIGSandGS(num_doctors, num_hospitals, we_doctors, we_hospitals, max_interviews, ids, docs, works, types, runs, run_num, match_in_pp_docs, match_in_p_docs, cu_ranks, match_gs_p_docs, match_gs_pp_docs):
workers, hospitals, workers2, workers3, hospitals2, cuw, cuh = getDoctorsAndHospitals(num_doctors, num_hospitals, we_doctors, we_hospitals)
# First, we simply update the docs, works, and runs lists
addDoctorsAndWorkersToLists(num_doctors, num_hospitals, docs, works, types, runs, run_num, cu_ranks)
# This generates the preferences for the hospitals based only on common utiltiy (for SIGS)
for h in hospitals2:
h.generateP2(cuh, num_doctors) #hospital's original preferences,p, only based on cu
x = hospitals2[0].getRanking() #these are the names of workers ranked by cu
# Now we do the same thing, simply to get the list of hospitals ranked by CU
for w in workers3:
w.generateP2(cuw, num_hospitals)
y = workers3[0].getRanking() # these are the names of the hospitals ranked by cu
# We add the names to the ids list
for x_1 in x:
ids.append(x_1)
for y_1 in y:
ids.append(y_1)
# Now we get the actual doctors and hospitals corresponding to these names
# so we can check their final matches at the end
ranked_workers = []
for n in x:
ranked_workers.append(getWorker(n, workers2))
ranked_hospitals = []
for n in y:
ranked_hospitals.append(getWorker(n, hospitals2))
# Now we set "workers2" to have the same ranking as "workers" so they are identical
for i in range(len(workers)):
workers2[i].setOrig(workers[i].getOrig())
# Similarly, we want "hospitals2" to have the same idiosyncratic utilities
# as "hospitals"even if the rankings aren't changed (still only based off of CU)
# we use these idiosyncratic utilities to update the preferences after the interviews
for i in range(len(hospitals)):
hospitals2[i].setIU(hospitals[i].getIU())
for w in workers2:
w.setChoices(hospitals2)
# Simulated Interviews
for w in ranked_workers:
while(len(w.getInterviews()) < max_interviews and len(w.getChoices()) > 0): # while interviewed at less than the max and has at least one more hospital to apply to
h = w.getTopChoice()
if(len(h.getInterviews()) < max_interviews): # if hospital has interviewed less than the max allowed
h.interview(w)
w.interview(h)
# end of simulated interviews
w3=[] # These are for SIGS with Doctors Proposing
h3=[]
# Report preferences for Gale-Shapley/Deferred Acceptance Algorithm
# For SIGS
for w in workers2:
w.setWishes4() # sets wishes according to matches of simulated interviews
w3.append(w)
# For SIGS, hospitals initially only had common utility
# After interviewing candidates, they now have an idiosyncratic utility to their preferences
# We add this to to create a new ranking of doctors by each hospital
for h in hospitals2:
h.setWishes4(cuh, we_hospitals) # add idiosyncratic utility and sets wishes according to matches of simulated interviews
h3.append(h)
# Since we just added idiosyncratic utility, we can save the p prime rankings
hospital_pprime = []
hospitals_p = []
for d in y:
h = getWorker(d, h3)
hospital_pprime.append(h.getWishes())
hospitals_p.append(h.getRanking()) # this is the same as x (only based on CU)
doctors_pprime = []
doctors_p = []
for d in x:
w = getWorker(d, w3)
doctors_pprime.append(w.getWishes())
doctors_p.append(w.getOrig())
# Gale Shapley Algorithm
proposals(w3,h3) # gale shapley after simulated interviews with doctors proposing
# Now we iterate through the doctors in the order of highest CU and record their matchings
for i in range(len(x)):
d = x[i]
w = getWorker(d, w3) # get the actual worker
match_in_pp_docs.append(getR3(w, doctors_pprime[i]))
match_in_p_docs.append(getR3(w, doctors_p[i]))
# Now we do the same thing, but recording the matchings for hospitals
for i in range(len(y)):
d = y[i]
h = getWorker(d, h3) # get the actual hospital
match_in_pp_docs.append(getR3(h, hospital_pprime[i]))
match_in_p_docs.append(getR3(h, hospitals_p[i])) # For p, the rankings are based on CU only
# Here, we run Gale Shapley with no interview stage
# Thus, we see what happens if there were no interviews and
# doctors reported their true, untruncated preferences
w3gs = []
h3gs = []
for w in workers:
w.setWishes2() #sets wishes according to original preferences
w.setMatch(None)
w3gs.append(w)
for h in hospitals:
h.setWishes2() #sets wishes according to original preferences
h.setMatch(None)
h3gs.append(h)
proposals(w3gs, h3gs) # Actual gale shapley algorithm
# Record results
for i in range(len(x)):
d = x[i]
w = getWorker(d, w3gs) # get the actual worker
match_gs_pp_docs.append(getR3(w, doctors_pprime[i]))
match_gs_p_docs.append(getR3(w, doctors_p[i]))
# Now we do the same thing, but recording the matchings for hospitals
for i in range(len(y)):
d = y[i]
h = getWorker(d, h3gs) # get the actual hospital
match_gs_pp_docs.append(getR3(h, hospital_pprime[i]))
match_gs_p_docs.append(getR3(h, hospitals_p[i])) # For p, the rankings are based on CU only
# Gale shapley has been run with original preferences and results recorded for how hospitals were ranked in p
def simulate(n, num_docs, num_hospitals, cud, cuh, max_interviews, filename='empty'):
ids = []
docs = []
works = []
runs = []
cu_ranks = []
types = []
match_in_pp_docs = []
match_in_p_docs = []
match_gs_p_docs = []
match_gs_pp_docs = []
match_gs_p_hosp = []
match_gs_pp_hosp = []
match_in_pp_hosp = []
match_in_p_hosp = []
match_gs_truncated_p_docs = []
match_gs_truncated_pp_docs = []
match_gs_truncated_p_hosp = []
match_gs_truncated_pp_hosp = []
match_tags_p_docs = []
match_tags_pp_docs = []
cu_provided_sigs = []
iu_provided_sigs = []
u_provided_sigs = []
cu_provided_gs = []
iu_provided_gs = []
u_provided_gs = []
cu_provided_gs_truncated = []
iu_provided_gs_truncated = []
u_provided_gs_truncated = []
cu_provided_tags = []
iu_provided_tags = []
u_provided_tags = []
blocking_pair_counts = []
for i in range(6):
blocking_pair_counts.append([])
bp_SIGS = []
bp_TAGS = []
match_tags_b_p_docs = []
match_tags_b_pp_docs = []
cu_provided_tags_b = []
u_provided_tags_b = []
iu_provided_tags_b = []
bp_TAGS_b = []
bp_GS_Trunc_h = []
bp_GS_Trunc_d = []
bp_GS_d = []
bp_GS_h = []
preference_profile = []
match_name_tags_d = []
match_name_tags_h = []
match_name_gs_d = []
match_name_gs_h = []
match_name_gs_trunc_d = []
match_name_gs_trunc_h = []
run_nums = []
m_i = []
n_d = []
n_h = []
cu_d = []
cu_h = []
min_index = 0 # this is for indexing blocking pairs
max_index = 0
for run_num in range(1, n + 1):
min_index = max_index
print("Simulation #" + str(run_num) + " Time: " + str(np.datetime64('now')))
run_nums.append(run_num)
m_i.append(max_interviews)
n_d.append(num_docs)
n_h.append(num_hospitals)
cu_d.append(cud)
cu_h.append(cuh)
for j in range(num_docs):
bp_TAGS.append(0)
bp_SIGS.append(0)
bp_TAGS_b.append(0)
bp_GS_Trunc_h.append(0)
bp_GS_Trunc_d.append(0)
bp_GS_d.append(0)
bp_GS_h.append(0)
max_index += 1
for j in range(num_hospitals):
bp_TAGS.append(0)
bp_SIGS.append(0)
bp_TAGS_b.append(0)
bp_GS_Trunc_h.append(0)
bp_GS_Trunc_d.append(0)
bp_GS_d.append(0)
bp_GS_h.append(0)
max_index += 1
#runTAGS_and_GS(num_docs, num_hospitals, cud, cuh, max_interviews, ids, docs, works, types, runs, run_num, cu_ranks, match_gs_p_docs, match_gs_pp_docs, match_gs_p_hosp, match_gs_pp_hosp, match_gs_truncated_p_docs, match_gs_truncated_pp_docs, match_gs_truncated_p_hosp,
# match_gs_truncated_pp_hosp, match_tags_p_docs, match_tags_pp_docs, cu_provided_gs, u_provided_gs, iu_provided_gs, cu_provided_gs_truncated, u_provided_gs_truncated, iu_provided_gs_truncated, cu_provided_tags, u_provided_tags, iu_provided_tags, blocking_pair_counts,
# bp_TAGS, min_index, match_tags_b_p_docs, match_tags_b_pp_docs, cu_provided_tags_b, u_provided_tags_b, iu_provided_tags_b, bp_TAGS_b, bp_GS_Trunc_h, bp_GS_Trunc_d, bp_GS_d, bp_GS_h, preference_profile, match_name_tags_d, match_name_tags_h, match_name_gs_d, match_name_gs_h, match_name_gs_trunc_d, match_name_gs_trunc_h)
#runSIGSOnly(num_docs, num_hospitals, cud, cuh, max_interviews, ids, docs, works, types, runs, run_num, match_in_pp_docs, match_in_p_docs, cu_ranks)
runSIGSandGS(num_docs, num_hospitals, cud, cuh, max_interviews, ids, docs, works, types, runs, run_num, match_in_pp_docs, match_in_p_docs, cu_ranks, match_gs_p_docs, match_gs_pp_docs)
#columns = ['id', 'Run','Type', 'Common ranking', 'Match GS D','GS Match, P (D Prop)', 'GS Match, P Prime (D Prop)', 'Match GS H', 'GS Match, P (H Prop)', 'GS Match, P Prime (H Prop)', 'Match GS Trunc D','GS Truncated Match, P (D Prop)', 'GS Truncated Match, P Prime (D Prop)', 'Match GS Trunc H','GS Truncated Match, P (H Prop)', 'GS Truncated Match, P Prime (H Prop)', 'Match TAGS P D','TAGS, P (D Prop)', 'TAGS, P Prime(D Prop)', 'Match TAGS P H',
# 'TAGS, P (H Prop)', 'TAGS, P Prime(H Prop)', 'blocking pairs TAGS (D Prop)', 'blocking pairs TAGS (H Prop)', 'blocking pairs Trunc. GS (H Prop)', 'blocking pairs Trunc. GS (D Prop)', 'blocking pairs GS (D Prop)', 'blocking pairs GS (H Prop)']
#results = pd.DataFrame(list(zip(ids, runs, types, cu_ranks, match_name_gs_d, match_gs_p_docs, match_gs_pp_docs, match_name_gs_h, match_gs_p_hosp, match_gs_pp_hosp, match_name_gs_trunc_d, match_gs_truncated_p_docs, match_gs_truncated_pp_docs, match_name_gs_trunc_h, match_gs_truncated_p_hosp, match_gs_truncated_pp_hosp, match_name_tags_d, match_tags_p_docs, match_tags_pp_docs, match_name_tags_h, match_tags_b_p_docs, match_tags_b_pp_docs, bp_TAGS, bp_TAGS_b, bp_GS_Trunc_h, bp_GS_Trunc_d, bp_GS_d, bp_GS_h)), columns = columns)
# This commented out code is to run just SIGS
#columns = ['id', 'Run','Type', 'Common ranking', 'SIGS Match, P (D Proposing)', 'SIGS Match, P Prime (D Proposing)']
#results = pd.DataFrame(list(zip(ids, runs, types, cu_ranks, match_in_p_docs, match_in_pp_docs)), columns = columns)
# This runs SIGS and GS with doctor proposing
columns = ['id', 'Run','Type', 'Common ranking', 'SIGS Match, P (D Proposing)', 'SIGS Match, P Prime (D Proposing)', 'GS Match, P (D Proposing)', 'GS Match, P Prime (D Proposing)']
results = pd.DataFrame(list(zip(ids, runs, types, cu_ranks, match_in_p_docs, match_in_pp_docs, match_gs_p_docs, match_gs_pp_docs)), columns = columns)
col2 = ['Run', 'Max Interviews', 'Num Doctors', 'Num Hospitals', 'Common utility weight Doctors', 'Common utility weight Hospitals']
results2 = pd.DataFrame(list(zip(run_nums, m_i, n_d, n_h, cu_d, cu_h)), columns = col2)
if (filename != 'empty'):
fn = filename + ".csv"
fn2 = filename + "_key.csv"
fn3 = filename + "_preference_profile.csv"
results.to_csv(fn, index = False)
results2.to_csv(fn2, index = False)
return results, results2
import cProfile
import pstats
def profile(filename, func, args=None):
pr = cProfile.Profile()
pr.enable()
if args is not None:
func(**args)
else:
func()
pr.disable()
f = open(filename, 'w')
ps = pstats.Stats(pr, stream=f)
ps.sort_stats('cumulative', 'tottime')
ps.print_stats()
f.close()
simulate(34,500,500,0.05,0.25,5,'OutSim_SIGS_500_05_25_5X')
| 38.580199
| 533
| 0.638023
|
663963e2045db29d8249b91fd0652fcc511abcef
| 2,144
|
py
|
Python
|
Code/Final/BestModel/AdaBoostBest.py
|
shivachawala/PumpItUp
|
41c8f3be0808009dbd13fda7a6f6f1ebfd916646
|
[
"MIT"
] | null | null | null |
Code/Final/BestModel/AdaBoostBest.py
|
shivachawala/PumpItUp
|
41c8f3be0808009dbd13fda7a6f6f1ebfd916646
|
[
"MIT"
] | null | null | null |
Code/Final/BestModel/AdaBoostBest.py
|
shivachawala/PumpItUp
|
41c8f3be0808009dbd13fda7a6f6f1ebfd916646
|
[
"MIT"
] | 2
|
2018-05-06T04:26:49.000Z
|
2019-01-03T00:09:34.000Z
|
# coding: utf-8
# In[1]:
# AdaBoost Classification
import pandas as pd
import numpy as np
from sklearn import model_selection
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
# Read data set
data_values = pd.read_csv("../../../Datasets/train_values_processed.csv")
data_labels = data_values["status_group"]
data_values.drop(['status_group'], axis=1, inplace=True)
#Train-Test split: 75%-25%
X_train, X_test, y_train, y_test = train_test_split(data_values, data_labels, test_size=0.25, random_state=42)
# In[2]:
from sklearn.grid_search import GridSearchCV
from sklearn.tree import DecisionTreeClassifier
Adaclf = AdaBoostClassifier(algorithm='SAMME.R',learning_rate=0.01,n_estimators=600,
base_estimator=DecisionTreeClassifier(criterion='entropy', max_depth=40,
min_samples_leaf=1, min_samples_split=2,
splitter='best'))
Adaclf.fit(X_train, y_train)
from sklearn.metrics import confusion_matrix
predClf = Adaclf.predict(X_train)
trainConfusionMtrx = confusion_matrix(y_train, predClf)
trainConfusionMtrx
# In[3]:
AdaClfPred = Adaclf.predict(X_test)
testConfusionMtrx = confusion_matrix(y_test, AdaClfPred)
print("Confusion Matrix: \n",testConfusionMtrx)
# In[4]:
#Classification report
print("Classification Report:\n",classification_report(y_test, AdaClfPred))
# In[5]:
print("Accuracy:",Adaclf.score(X_test, y_test))
# In[6]:
#To avoid overfitting use kfold cross validation
k = 10
kFold = model_selection.KFold(n_splits=k, random_state=7)
Adaclf = AdaBoostClassifier(algorithm='SAMME.R',learning_rate=0.1,n_estimators=130,
base_estimator=DecisionTreeClassifier(class_weight=None, criterion='entropy', max_depth=5,
min_samples_leaf=1, min_samples_split=2,
splitter='best'))
accuracy = model_selection.cross_val_score(Adaclf, data_values, data_labels, cv=kFold)
print("Accuracy with 10fold Cross Valid:",accuracy.mean())
| 28.586667
| 122
| 0.725746
|
2ab1a7b80d098a38db0597a8860b9fffd789290c
| 12,484
|
py
|
Python
|
saas/pagination.py
|
kaiserho/djaodjin-saas
|
2e65d8d4daf51fe88e3434cc82e771dc50e5dc83
|
[
"BSD-2-Clause"
] | 383
|
2015-03-07T06:19:39.000Z
|
2022-03-12T20:53:37.000Z
|
saas/pagination.py
|
kaiserho/djaodjin-saas
|
2e65d8d4daf51fe88e3434cc82e771dc50e5dc83
|
[
"BSD-2-Clause"
] | 146
|
2015-03-25T22:45:44.000Z
|
2022-02-22T08:49:35.000Z
|
saas/pagination.py
|
kaiserho/djaodjin-saas
|
2e65d8d4daf51fe88e3434cc82e771dc50e5dc83
|
[
"BSD-2-Clause"
] | 111
|
2015-02-12T22:13:07.000Z
|
2022-03-11T05:45:53.000Z
|
# Copyright (c) 2021, DjaoDjin inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from collections import OrderedDict
from rest_framework.pagination import PageNumberPagination
from rest_framework.response import Response
from . import settings
from .models import (sum_dest_amount, sum_orig_amount, sum_balance_amount,
Transaction)
class BalancePagination(PageNumberPagination):
"""
Decorate the results of an API call with balance on an account
containing *selector*.
"""
def paginate_queryset(self, queryset, request, view=None):
self.start_at = view.start_at
self.ends_at = view.ends_at
if view.selector is not None:
dest_totals = sum_dest_amount(queryset.filter(
dest_account__icontains=view.selector))
orig_totals = sum_orig_amount(queryset.filter(
orig_account__icontains=view.selector))
else:
dest_totals = sum_dest_amount(queryset)
orig_totals = sum_orig_amount(queryset)
balance = sum_balance_amount(dest_totals, orig_totals)
self.balance_amount = balance['amount']
self.balance_unit = balance['unit']
return super(BalancePagination, self).paginate_queryset(
queryset, request, view=view)
def get_paginated_response(self, data):
return Response(OrderedDict([
('start_at', self.start_at),
('ends_at', self.ends_at),
('balance_amount', self.balance_amount),
('balance_unit', self.balance_unit),
('count', self.page.paginator.count),
('next', self.get_next_link()),
('previous', self.get_previous_link()),
('results', data)
]))
def get_paginated_response_schema(self, schema):
return {
'type': 'object',
'properties': {
'start_at': {
'type': 'string',
'format': 'date',
'description': "Start of the date range for which"\
" the balance was computed"
},
'ends_at': {
'type': 'string',
'format': 'date',
'description': "End of the date range for which"\
" the balance was computed"
},
'balance_amount': {
'type': 'integer',
'description': "balance of all transactions in cents"\
" (i.e. 100ths) of unit"
},
'balance_unit': {
'type': 'integer',
'description': "three-letter ISO 4217 code"\
" for currency unit (ex: usd)"
},
'count': {
'type': 'integer',
'description': "The number of records"
},
'next': {
'type': 'string',
'description': "API end point to get the next page"\
"of records matching the query",
'nullable': True,
'format': 'uri',
},
'previous': {
'type': 'string',
'description': "API end point to get the previous page"\
"of records matching the query",
'nullable': True,
'format': 'uri',
},
'results': schema,
},
}
class RoleListPagination(PageNumberPagination):
def get_paginated_response(self, data):
return Response(OrderedDict([
('invited_count', self.request.invited_count),
('requested_count', self.request.requested_count),
('count', self.page.paginator.count),
('next', self.get_next_link()),
('previous', self.get_previous_link()),
('results', data)
]))
def get_paginated_response_schema(self, schema):
return {
'type': 'object',
'properties': {
'invited_count': {
'type': 'integer',
'description': "Number of user invited to have a role"
},
'requested_count': {
'type': 'integer',
'description': "Number of user requesting a role"
},
'count': {
'type': 'integer',
'description': "The number of records"
},
'next': {
'type': 'string',
'description': "API end point to get the next page"\
"of records matching the query",
'nullable': True,
'format': 'uri',
},
'previous': {
'type': 'string',
'description': "API end point to get the previous page"\
"of records matching the query",
'nullable': True,
'format': 'uri',
},
'results': schema,
},
}
class StatementBalancePagination(PageNumberPagination):
"""
Decorate the results of an API call with the balance as shown
in an organization statement.
"""
def paginate_queryset(self, queryset, request, view=None):
self.start_at = view.start_at
self.ends_at = view.ends_at
self.balance_amount, self.balance_unit \
= Transaction.objects.get_statement_balance(view.organization)
return super(StatementBalancePagination, self).paginate_queryset(
queryset, request, view=view)
def get_paginated_response(self, data):
return Response(OrderedDict([
('start_at', self.start_at),
('ends_at', self.ends_at),
('balance_amount', self.balance_amount),
('balance_unit', self.balance_unit),
('count', self.page.paginator.count),
('next', self.get_next_link()),
('previous', self.get_previous_link()),
('results', data)
]))
def get_paginated_response_schema(self, schema):
return {
'type': 'object',
'properties': {
'start_at': {
'type': 'string',
'format': 'date',
'description': "Start of the date range for which"\
" the balance was computed"
},
'ends_at': {
'type': 'string',
'format': 'date',
'description': "End of the date range for which"\
" the balance was computed"
},
'balance_amount': {
'type': 'integer',
'description': "balance of all transactions in cents"\
" (i.e. 100ths) of unit"
},
'balance_unit': {
'type': 'integer',
'description': "three-letter ISO 4217 code"\
" for currency unit (ex: usd)"
},
'count': {
'type': 'integer',
'description': "The number of records"
},
'next': {
'type': 'string',
'description': "API end point to get the next page"\
"of records matching the query",
'nullable': True,
'format': 'uri',
},
'previous': {
'type': 'string',
'description': "API end point to get the previous page"\
"of records matching the query",
'nullable': True,
'format': 'uri',
},
'results': schema,
},
}
class TotalPagination(PageNumberPagination):
def paginate_queryset(self, queryset, request, view=None):
self.start_at = view.start_at
self.ends_at = view.ends_at
self.totals = view.totals
return super(TotalPagination, self).paginate_queryset(
queryset, request, view=view)
def get_paginated_response(self, data):
return Response(OrderedDict([
('start_at', self.start_at),
('ends_at', self.ends_at),
('balance_amount', self.totals['amount']),
('balance_unit', self.totals['unit']),
('count', self.page.paginator.count),
('next', self.get_next_link()),
('previous', self.get_previous_link()),
('results', data)
]))
def get_paginated_response_schema(self, schema):
return {
'type': 'object',
'properties': {
'balance_amount': {
'type': 'integer',
'description': "The sum of all record amounts (in unit)"
},
'balance_unit': {
'type': 'integer',
'description': "three-letter ISO 4217 code"\
" for currency unit (ex: usd)"
},
'count': {
'type': 'integer',
'description': "The number of records"
},
'next': {
'type': 'string',
'description': "API end point to get the next page"\
"of records matching the query",
'nullable': True,
'format': 'uri',
},
'previous': {
'type': 'string',
'description': "API end point to get the previous page"\
"of records matching the query",
'nullable': True,
'format': 'uri',
},
'results': schema,
},
}
class TypeaheadPagination(PageNumberPagination):
page_size = settings.MAX_TYPEAHEAD_CANDIDATES
def paginate_queryset(self, queryset, request, view=None):
self.count = queryset.count()
if self.count > self.page_size:
# returning an empty set if the number of results is greater than
# MAX_TYPEAHEAD_CANDIDATES
queryset = queryset.none()
self.count = 0
return list(queryset)
def get_paginated_response(self, data):
return Response(OrderedDict([
('count', self.count),
('results', data)
]))
def get_schema_operation_parameters(self, view):
return []
def get_paginated_response_schema(self, schema):
return {
'type': 'object',
'properties': {
'count': {
'type': 'integer',
'description': "The number of records"
},
'results': schema,
},
}
| 37.716012
| 77
| 0.505607
|
87a947edfc022c5669b177f678ae75088a7c112e
| 627
|
py
|
Python
|
proto-build/gui/UcsSdk-0.5/samples/getFaults.py
|
Havate/havate-openstack
|
a0ca519af0da5df7981ccfb86cd9994f8c181644
|
[
"Apache-2.0"
] | 1
|
2015-06-26T23:31:42.000Z
|
2015-06-26T23:31:42.000Z
|
proto-build/gui/UcsSdk-0.5/samples/getFaults.py
|
Havate/havate-openstack
|
a0ca519af0da5df7981ccfb86cd9994f8c181644
|
[
"Apache-2.0"
] | 1
|
2015-03-12T01:03:44.000Z
|
2015-03-12T01:03:44.000Z
|
proto-build/gui/UcsSdk-0.5/samples/getFaults.py
|
Havate/havate-openstack
|
a0ca519af0da5df7981ccfb86cd9994f8c181644
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import sys
import os
from UcsSdk import *
# This script retrieve all the UCS Manager Faults.
# <Update the Credential Parameters below before executing the script>
ucsm_ip = '0.0.0.0'
user = 'username'
password = 'password'
try:
handle = UcsHandle()
handle.Login(ucsm_ip,user, password)
getRsp = handle.GetManagedObject(None, FaultInst.ClassId())
if (getRsp != None):
WriteObject(getRsp)
handle.Logout()
except Exception, err:
handle.Logout()
print "Exception:", str(err)
import traceback, sys
print '-'*60
traceback.print_exc(file=sys.stdout)
print '-'*60
| 19.59375
| 71
| 0.688995
|
b58f3a85b11f93177778fa1fd82ea576269208ed
| 5,646
|
py
|
Python
|
sync.py
|
androidtrackers/android-enterprise-devices
|
98f749219acc445218544c76d72b0ff0efb1f828
|
[
"MIT"
] | 6
|
2020-09-23T08:43:36.000Z
|
2022-03-09T08:15:15.000Z
|
sync.py
|
androidtrackers/android-enterprise-devices
|
98f749219acc445218544c76d72b0ff0efb1f828
|
[
"MIT"
] | null | null | null |
sync.py
|
androidtrackers/android-enterprise-devices
|
98f749219acc445218544c76d72b0ff0efb1f828
|
[
"MIT"
] | 3
|
2019-07-25T13:10:52.000Z
|
2021-11-15T12:09:28.000Z
|
#!/usr/bin/env python3.7
"""Google certified android devices tracker"""
import difflib
from datetime import date
from time import sleep
from itertools import tee
from os import rename, path, system, environ
from requests import get, post
GIT_OAUTH_TOKEN = environ['GIT_OAUTH_TOKEN_XFU']
BOT_TOKEN = environ['BOTTOKEN']
TODAY = str(date.today())
def fetch():
"""
Download latest and convert to markdown
"""
url = "https://androidenterprisepartners.withgoogle.com/_ah/spi/search/v1/devices?" \
"aer=true&size=999&sort=aer:desc,sort_name:asc"
data = get(url).json()['items']
with open('README.md', 'w', encoding="utf-8") as markdown:
markdown.write('# [Google Enterprise Android Devices List]'
'(https://androidenterprisepartners.withgoogle.com/devices/)\n\n')
markdown.write('|Brand|Name|Models|Image|Website|Type|'
'Display|CPU|RAM|Storage|Battery|OS|Telephony|Fingerprint|NFC|\n')
markdown.write('|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n')
for item in data:
brand = item['brand']
name = item['name']
models = item['models']
image = item['imageUrls']['original']
website = item['website']
device_type = item['hardwareFeatures']['formFactor']
display = item['hardwareFeatures']['display']
ram = item['hardwareFeatures']['ram']
flash = item['hardwareFeatures']['flash']
os = item['hardwareFeatures']['os']
processor_speed = item['hardwareFeatures']['processorSpeed']
battery = item['hardwareFeatures']['batteryLife']
telephony = '✓' if item['hardwareFeatures'].get('telephonySupport') else '✗'
fingerprint = '✓' if item['hardwareFeatures'].get('fingerPrintSupport') else '✗'
nfc = '✓' if item['hardwareFeatures'].get('nfcSupport') else '✗'
markdown.write(f'|{brand}|{name}|{models}|[Here]({image})|[Here]({website})|{device_type}'
f'|{display}|{processor_speed}|{ram}|{flash}|{battery}|{os}'
f'|{telephony}|{fingerprint}|{nfc}|\n')
def diff_files():
"""
diff
"""
with open('old.md', 'r') as old, open('README.md', 'r') as new:
diff = difflib.unified_diff(old.readlines(), new.readlines(), fromfile='old', tofile='new')
d1, d2 = tee(diff, 2)
changes = [line.split('+')[1] for line in d1 if line.startswith('+')]
deletes = [line.split('-')[1] for line in d2 if line.startswith('-')]
adds = [line for line in changes[1:] if '|'.join(line.split('|')[:3]) not in str(deletes[1:])]
new = ''.join([i for i in changes if '|'.join(i.split('|')[:3]) in str(adds)])
with open('changes', 'w') as out:
out.write(new)
def post_to_tg():
"""
post new devices to telegram channel
"""
# tg
telegram_chat = "@AndroidEnterpriseDevices"
with open('changes', 'r') as f:
changes = f.read()
for line in changes.splitlines():
try:
info = line.split("|")
brand = info[1]
name = info[2]
models = info[3]
image = info[4]
website = info[5]
device_type = info[6]
display = info[7]
processor_speed = info[8]
ram = info[9]
flash = info[10]
battery = info[11]
os = info[12]
telephony = info[13]
fingerprint = info[14]
nfc = info[15]
photo = image.split('(')[1].split(')')[0]
telegram_message = f"*New Android Enterprise Recommended device added!*\n" \
f"Brand: *{brand}*\n" \
f"Name: *{name}*\n" \
f"Type: *{device_type}*\n" \
f"Models: `{models}`\n" \
f"Website: {website}\n" \
f"*Display*: {display}\n" \
f"*CPU*: {processor_speed}\n" \
f"*RAM*: {ram}\n" \
f"*Storage*: {flash}\n" \
f"*Battery*: `{battery}`\n" \
f"*OS*: {os}\n" \
f"*Telephony Support*: {telephony}\n" \
f"*Fingerprint Support*: {fingerprint}\n" \
f"*NFC Support*: {nfc}\n"
telegram_url = f"https://api.telegram.org/bot{BOT_TOKEN}/sendPhoto?" \
f"chat_id={telegram_chat}&caption={telegram_message}&" \
f"parse_mode=Markdown&disable_web_page_preview=yes&" \
f"photo={photo}"
telegram_req = post(telegram_url)
telegram_status = telegram_req.status_code
if telegram_status == 200:
print("{0}: Telegram Message sent".format(name))
else:
print(f"{telegram_req.reason}")
sleep(3)
except IndexError:
continue
def git_commit_push():
"""
git add - git commit - git push
"""
system("git add README.md && git -c \"user.name=XiaomiFirmwareUpdater\" "
"-c \"user.email=xiaomifirmwareupdater@gmail.com\" "
"commit -m \"[skip ci] sync: {0}\" && "" \
""git push -q https://{1}@github.com/androidtrackers/"
"android-enterprise-devices.git HEAD:master"
.format(TODAY, GIT_OAUTH_TOKEN))
def main():
"""
certified-android-devices tracker
"""
if path.exists('README.md'):
rename('README.md', 'old.md')
fetch()
diff_files()
post_to_tg()
git_commit_push()
if __name__ == '__main__':
main()
| 38.148649
| 102
| 0.535069
|
e82a2e52d1d8b36f3ee5086a546d9217955d7b5b
| 2,796
|
py
|
Python
|
lawrencemain.py
|
kinxer/lawrence
|
391d69fcae1c21d1da00b8c628cf383e25579d77
|
[
"Apache-2.0"
] | 2
|
2016-05-27T18:14:53.000Z
|
2017-12-10T01:15:57.000Z
|
lawrencemain.py
|
kinxer/lawrence
|
391d69fcae1c21d1da00b8c628cf383e25579d77
|
[
"Apache-2.0"
] | null | null | null |
lawrencemain.py
|
kinxer/lawrence
|
391d69fcae1c21d1da00b8c628cf383e25579d77
|
[
"Apache-2.0"
] | null | null | null |
#LING380 Final Project
#Chatbot
import random
import pickle
from nltk.corpus import wordnet as wn
from parsing import parser
botDict = {}
#'''
with open('responses.p','rb') as f: # opens (and closes) file to read (r) as bytes (b)
botDict = pickle.load(f)
#'''
def train(key, sent):
parsedSent = parser(sent)
if key in botDict.keys():
botDict[key].append((sent,parsedSent))
else:
botDict[key] = [(sent,parsedSent)]
def respond(response): # changed to deal with a list of possible responses
possibleResponses = []
for i in response:
if i[0] in botDict.keys():
randNum = random.randint(0, len(botDict[i[0]]) - 1)
possibleResponses.append(botDict[i[0]][randNum])
elif i[2] != '?': # if not, try hypernyms
hyper = lambda s: s.hypernyms() # find all hypernyms
hyperList = list(wn.synset(i[2]).closure(hyper))[0:4]
fewHyper = [j.name().split('.')[0] for j in hyperList]
for hyp in fewHyper:
#print(hyp) #for testing
if hyp in botDict.keys():
randNum = random.randint(0, len(botDict[hyp]) - 1)
possibleResponses.append(botDict[hyp][randNum])
if len(possibleResponses) == 0:
print("I don't really know much about that.")
teachInput = input("What subject did you want me to know about? (enter a word as a subject)\n")
teachInput = teachInput.lower()
teachInput = teachInput.strip("?")
teachResponse = input("What should I say about that? (this is what Lawrence will respond with) \n")
print("Cool, thanks! I'll keep that in mind.")
train(teachInput, teachResponse)
else:
myResponse = possibleResponses[random.randint(0,len(possibleResponses)-1)]
#myResponse = possibleResponses[-1]
print(myResponse[0])
''' # original code for giving responses
#train("name", "My name's Lawrence! How are you today?")
train("name", "Hey I'm Lawrence.")
train("science", "Science is my favorite subject.")
train("science", "I hear the jury's still out on science.")
#'''
userInput = input("Hey! My name's Lawrence. Ask me a few questions, like what I think about science!\n")
userInput = userInput.lower()
parsedInput, sentenceType = parser(userInput)
while "bye" not in userInput:
respond(parsedInput)
userInput = input("Enter response: ")
userInput = userInput.lower()
parsedInput, sentenceType = parser(userInput)
else:
print("Goodbye! See you later.")
with open('responses.p','wb') as f: # opens (and closes) file to write (w) as bytes (b)
pickle.dump(botDict,f)
| 35.392405
| 108
| 0.605866
|
957cdbd7136447c5f54da811fb8ea6908f467fb8
| 1,372
|
py
|
Python
|
Ejercicio 8.py
|
crltsnch/Ejercicios-grupales
|
72e01d6489816ea1b9308af1abd62792e5464c93
|
[
"Apache-2.0"
] | null | null | null |
Ejercicio 8.py
|
crltsnch/Ejercicios-grupales
|
72e01d6489816ea1b9308af1abd62792e5464c93
|
[
"Apache-2.0"
] | null | null | null |
Ejercicio 8.py
|
crltsnch/Ejercicios-grupales
|
72e01d6489816ea1b9308af1abd62792e5464c93
|
[
"Apache-2.0"
] | null | null | null |
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'countApplesAndOranges' function below.
#
# The function accepts following parameters:
# 1. INTEGER s
# 2. INTEGER t
# 3. INTEGER a
# 4. INTEGER b
# 5. INTEGER_ARRAY apples
# 6. INTEGER_ARRAY oranges
#
def countApplesAndOranges(s, t, a, b, apples, oranges):
# Write your code here
manzanasdentro=0
naranjasdentro=0
for manzana in apples:
if(a+manzana>=s and a+manzana<=t):
manzanasdentro+=1
for naranja in oranges:
if(b+naranja>=s and b+naranja<=t):
naranjasdentro+=1
print("Han caido " + str(manzanasdentro) + " manzanas dentro")
print("Han caido " + str(naranjasdentro) + " naranjas dentro")
if __name__ == '__main__':
first_multiple_input = input().rstrip().split()
s = int(first_multiple_input[0])
t = int(first_multiple_input[1])
second_multiple_input = input().rstrip().split()
a = int(second_multiple_input[0])
b = int(second_multiple_input[1])
third_multiple_input = input().rstrip().split()
m = int(third_multiple_input[0])
n = int(third_multiple_input[1])
apples = list(map(int, input().rstrip().split()))
oranges = list(map(int, input().rstrip().split()))
countApplesAndOranges(s, t, a, b, apples, oranges)
| 31.181818
| 66
| 0.646501
|
427d876689ad5e10c18264ab330489985a8e2b41
| 3,336
|
py
|
Python
|
tests/tests.py
|
techtonik/portalocker
|
519236ac0c5bdfc43e07fcbf6ade354f959a3923
|
[
"PSF-2.0"
] | null | null | null |
tests/tests.py
|
techtonik/portalocker
|
519236ac0c5bdfc43e07fcbf6ade354f959a3923
|
[
"PSF-2.0"
] | null | null | null |
tests/tests.py
|
techtonik/portalocker
|
519236ac0c5bdfc43e07fcbf6ade354f959a3923
|
[
"PSF-2.0"
] | null | null | null |
from __future__ import print_function
from __future__ import with_statement
import pytest
import portalocker
def test_exceptions(tmpfile):
# Open the file 2 times
a = open(tmpfile, 'a')
b = open(tmpfile, 'a')
# Lock exclusive non-blocking
lock_flags = portalocker.LOCK_EX | portalocker.LOCK_NB
# First lock file a
portalocker.lock(a, lock_flags)
# Now see if we can lock file b
with pytest.raises(portalocker.LockException):
portalocker.lock(b, lock_flags)
# Cleanup
a.close()
b.close()
def test_with_timeout(tmpfile):
# Open the file 2 times
with pytest.raises(portalocker.AlreadyLocked):
with portalocker.Lock(tmpfile, timeout=0.1) as fh:
print('writing some stuff to my cache...', file=fh)
with portalocker.Lock(tmpfile, timeout=0.1, mode='wb',
fail_when_locked=True):
pass
print('writing more stuff to my cache...', file=fh)
def test_without_timeout(tmpfile):
# Open the file 2 times
with pytest.raises(portalocker.LockException):
with portalocker.Lock(tmpfile, timeout=None) as fh:
print('writing some stuff to my cache...', file=fh)
with portalocker.Lock(tmpfile, timeout=None, mode='w'):
pass
print('writing more stuff to my cache...', file=fh)
def test_without_fail(tmpfile):
# Open the file 2 times
with pytest.raises(portalocker.LockException):
with portalocker.Lock(tmpfile, timeout=0.1) as fh:
print('writing some stuff to my cache...', file=fh)
lock = portalocker.Lock(tmpfile, timeout=0.1)
lock.acquire(check_interval=0.05, fail_when_locked=False)
def test_simple(tmpfile):
with open(tmpfile, 'w') as fh:
fh.write('spam and eggs')
fh = open(tmpfile, 'r+')
portalocker.lock(fh, portalocker.LOCK_EX)
fh.seek(13)
fh.write('foo')
# Make sure we didn't overwrite the original text
fh.seek(0)
assert fh.read(13) == 'spam and eggs'
portalocker.unlock(fh)
fh.close()
def test_truncate(tmpfile):
with open(tmpfile, 'w') as fh:
fh.write('spam and eggs')
with portalocker.Lock(tmpfile, mode='a+') as fh:
# Make sure we didn't overwrite the original text
fh.seek(0)
assert fh.read(13) == 'spam and eggs'
with portalocker.Lock(tmpfile, mode='w+') as fh:
# Make sure we truncated the file
assert fh.read() == ''
def test_class(tmpfile):
lock = portalocker.Lock(tmpfile)
lock2 = portalocker.Lock(tmpfile, fail_when_locked=False, timeout=0.01)
with lock:
lock.acquire()
with pytest.raises(portalocker.LockException):
with lock2:
pass
with lock2:
pass
def test_acquire_release(tmpfile):
lock = portalocker.Lock(tmpfile)
lock2 = portalocker.Lock(tmpfile, fail_when_locked=False)
lock.acquire() # acquire lock when nobody is using it
with pytest.raises(portalocker.LockException):
# another party should not be able to acquire the lock
lock2.acquire(timeout=0.01)
# re-acquire a held lock is a no-op
lock.acquire()
lock.release() # release the lock
lock.release() # second release does nothing
| 27.8
| 75
| 0.638189
|
c26b9bc6057afd10a0de72197114e3c2a9353ec1
| 1,130
|
py
|
Python
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/vplsEthernet_template.py
|
Vibaswan/ixnetwork_restpy
|
239fedc7050890746cbabd71ea1e91c68d9e5cad
|
[
"MIT"
] | null | null | null |
ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/vplsEthernet_template.py
|
Vibaswan/ixnetwork_restpy
|
239fedc7050890746cbabd71ea1e91c68d9e5cad
|
[
"MIT"
] | null | null | null |
ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/vplsEthernet_template.py
|
Vibaswan/ixnetwork_restpy
|
239fedc7050890746cbabd71ea1e91c68d9e5cad
|
[
"MIT"
] | null | null | null |
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class VPLS_Ethernet_Frame(Base):
__slots__ = ()
_SDM_NAME = 'vplsEthernet'
_SDM_ATT_MAP = {
'CW Zero': 'vplsEthernet.pweControlWord.zero',
'CW Rsvd': 'vplsEthernet.pweControlWord.reserved',
'CW Sequence Number': 'vplsEthernet.pweControlWord.sequenceNumber',
}
def __init__(self, parent):
super(VPLS_Ethernet_Frame, self).__init__(parent)
@property
def CW_Zero(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CW Zero']))
@property
def CW_Rsvd(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CW Rsvd']))
@property
def CW_Sequence_Number(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CW Sequence Number']))
def add(self):
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
| 33.235294
| 93
| 0.711504
|
a059ccf0e81f8796a1d82b8c78800c914a178211
| 7,183
|
py
|
Python
|
indico/modules/events/static/util.py
|
jgrigera/indico
|
b5538f2755bc38a02313d079bac831ee3dfb44ab
|
[
"MIT"
] | 1
|
2018-11-12T21:29:26.000Z
|
2018-11-12T21:29:26.000Z
|
indico/modules/events/static/util.py
|
jgrigera/indico
|
b5538f2755bc38a02313d079bac831ee3dfb44ab
|
[
"MIT"
] | 9
|
2020-09-08T09:25:57.000Z
|
2022-01-13T02:59:05.000Z
|
indico/modules/events/static/util.py
|
jgrigera/indico
|
b5538f2755bc38a02313d079bac831ee3dfb44ab
|
[
"MIT"
] | 3
|
2020-07-20T09:09:44.000Z
|
2020-10-19T00:29:49.000Z
|
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
import base64
import mimetypes
import re
import urlparse
from contextlib import contextmanager
import requests
from flask import current_app, g, request
from flask_webpackext import current_webpack
from flask_webpackext.manifest import JinjaManifestEntry
from pywebpack import Manifest
from werkzeug.urls import url_parse
from indico.core.config import config
from indico.modules.events.layout.models.images import ImageFile
from indico.web.flask.util import endpoint_for_url
_css_url_pattern = r"""url\((['"]?)({}|https?:)?([^)'"]+)\1\)"""
_url_has_extension_re = re.compile(r'.*\.([^/]+)$')
_plugin_url_pattern = r'(?:{})?/static/plugins/([^/]+)/(.*?)(?:__v[0-9a-f]+)?\.([^.]+)$'
_static_url_pattern = r'(?:{})?/(images|dist|fonts)(.*)/(.+?)(?:__v[0-9a-f]+)?\.([^.]+)$'
_custom_url_pattern = r'(?:{})?/static/custom/(.+)$'
def rewrite_static_url(path):
"""Remove __vxxx prefix from static URLs."""
plugin_pattern = _plugin_url_pattern.format(url_parse(config.BASE_URL).path)
static_pattern = _static_url_pattern.format(url_parse(config.BASE_URL).path)
custom_pattern = _custom_url_pattern.format(url_parse(config.BASE_URL).path)
if re.match(plugin_pattern, path):
return re.sub(plugin_pattern, r'static/plugins/\1/\2.\3', path)
elif re.match(static_pattern, path):
return re.sub(static_pattern, r'static/\1\2/\3.\4', path)
else:
return re.sub(custom_pattern, r'static/custom/\1', path)
def _create_data_uri(url, filename):
"""Create a data url that contains the file in question."""
response = requests.get(url, verify=False)
if response.status_code != 200:
# couldn't access the file
return url
data = base64.b64encode(response.content)
content_type = (mimetypes.guess_type(filename)[0] or
response.headers.get('Content-Type', 'application/octet-stream'))
return 'data:{};base64,{}'.format(content_type, data)
def _rewrite_event_asset_url(event, url):
"""Rewrite URLs of assets such as event images.
Only assets contained within the event will be taken into account
"""
scheme, netloc, path, qs, anchor = urlparse.urlsplit(url)
netloc = netloc or current_app.config['SERVER_NAME']
scheme = scheme or 'https'
# internal URLs (same server)
if netloc == current_app.config['SERVER_NAME']:
# this piece of Flask magic finds the endpoint that corresponds to
# the URL and checks that it points to an image belonging to this event
endpoint_info = endpoint_for_url(path)
if endpoint_info:
endpoint, data = endpoint_info
if endpoint == 'event_images.image_display' and int(data['confId']) == event.id:
image_file = ImageFile.get(data['image_id'])
if image_file and image_file.event == event:
return 'images/{}-{}'.format(image_file.id, image_file.filename), image_file
# if the URL is not internal or just not an image,
# we embed the contents using a data URI
data_uri = _create_data_uri(urlparse.urlunsplit((scheme, netloc, path, qs, '')), urlparse.urlsplit(path)[-1])
return data_uri, None
def _remove_anchor(url):
"""Remove the anchor from a URL."""
scheme, netloc, path, qs, anchor = urlparse.urlsplit(url)
return urlparse.urlunsplit((scheme, netloc, path, qs, ''))
def rewrite_css_urls(event, css):
"""Rewrite CSS in order to handle url(...) properly."""
# keeping track of used URLs
used_urls = set()
used_images = set()
def _replace_url(m):
prefix = m.group(2) or ''
url = m.group(3)
if url.startswith('/event/') or re.match(r'https?:', prefix):
rewritten_url, image_file = _rewrite_event_asset_url(event, prefix + url)
if image_file:
used_images.add(image_file)
return 'url({})'.format(rewritten_url)
else:
rewritten_url = rewrite_static_url(url)
used_urls.add(_remove_anchor(rewritten_url))
if url.startswith('/static/plugins/'):
return "url('../../../../../{}')".format(rewritten_url)
else:
return "url('../../../{}')".format(rewritten_url)
indico_path = url_parse(config.BASE_URL).path
new_css = re.sub(_css_url_pattern.format(indico_path), _replace_url, css.decode('utf-8'), flags=re.MULTILINE)
return new_css.encode('utf-8'), used_urls, used_images
def url_to_static_filename(endpoint, url):
"""Handle special endpoint/URLs so that they link to offline content."""
if re.match(r'(events)?\.display(_overview)?$', endpoint):
return 'index.html'
elif endpoint == 'event_layout.css_display':
return 'custom.css'
elif endpoint == 'event_images.logo_display':
return 'logo.png'
indico_path = url_parse(config.BASE_URL).path
if re.match(_static_url_pattern.format(indico_path), url):
url = rewrite_static_url(url)
else:
# get rid of [/whatever]/event/1234
url = re.sub(r'{}(?:/event/\d+)?/(.*)'.format(indico_path), r'\1', url)
if not url.startswith('assets/'):
# replace all remaining slashes
url = url.rstrip('/').replace('/', '--')
# it's not executed in a webserver, so we do need a .html extension
if not _url_has_extension_re.match(url):
url += '.html'
return url
def _rule_for_endpoint(endpoint):
return next((x for x in current_app.url_map.iter_rules(endpoint) if 'GET' in x.methods), None)
@contextmanager
def override_request_endpoint(endpoint):
rule = _rule_for_endpoint(endpoint)
assert rule is not None
old_rule = request.url_rule
request.url_rule = rule
try:
yield
finally:
request.url_rule = old_rule
class RewrittenManifest(Manifest):
"""A manifest that rewrites its asset paths."""
def __init__(self, manifest):
super(RewrittenManifest, self).__init__()
self._entries = {k: JinjaManifestEntry(entry.name, self._rewrite_paths(entry._paths))
for k, entry in manifest._entries.viewitems()}
self.used_assets = set()
def _rewrite_paths(self, paths):
return [rewrite_static_url(path) for path in paths]
def __getitem__(self, key):
self.used_assets.add(key)
return super(RewrittenManifest, self).__getitem__(key)
@contextmanager
def collect_static_files():
"""Keep track of URLs used by manifest and url_for."""
g.custom_manifests = {None: RewrittenManifest(current_webpack.manifest)}
g.used_url_for_assets = set()
used_assets = set()
yield used_assets
for manifest in g.custom_manifests.viewvalues():
used_assets |= {p for k in manifest.used_assets for p in manifest[k]._paths}
used_assets |= {rewrite_static_url(url) for url in g.used_url_for_assets}
del g.custom_manifests
del g.used_url_for_assets
| 38.207447
| 113
| 0.669219
|
7b58b5599904756e6d1db6907d37d6c351b66a40
| 5,711
|
py
|
Python
|
tools/flash_algo_gen.py
|
f-okuhara/CMSIS-DAP
|
871add645a0681ff62138b181e8ceae374520895
|
[
"Apache-2.0"
] | null | null | null |
tools/flash_algo_gen.py
|
f-okuhara/CMSIS-DAP
|
871add645a0681ff62138b181e8ceae374520895
|
[
"Apache-2.0"
] | null | null | null |
tools/flash_algo_gen.py
|
f-okuhara/CMSIS-DAP
|
871add645a0681ff62138b181e8ceae374520895
|
[
"Apache-2.0"
] | null | null | null |
"""
CMSIS-DAP Interface Firmware
Copyright (c) 2009-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This script takes as input an .axf file containing the flash algorithm to be
loaded in the target RAM and it converts it to a binary array ready to be
included in the CMSIS-DAP Interface Firmware source code.
"""
from struct import unpack
from os.path import join
from utils import run_cmd
from settings import *
from paths import TMP_DIR
# INPUT
ALGO_ELF_PATH = join(TMP_DIR, "flash_algo.axf")
ALGO_OFFSET = 0x20
# OUTPUT
TMP_DIR_W_TERM = TMP_DIR + '/'
DEV_INFO_PATH = join(TMP_DIR, "DevDscr")
ALGO_BIN_PATH = join(TMP_DIR, "PrgCode")
ALGO_TXT_PATH = join(TMP_DIR, "flash_algo.txt")
# Algorithm start addresses for each TARGET (compared with DevName in the
# FlashDevice structure in FlashDev.c
ALGO_START_ADDRESSES = {
'LPC1700': 0x10000000,
'LPC11xx': 0x10000000,
'LPC8xx': 0x10000000,
'LPC11U68': 0x10000000,
'LPC1549': 0x02000000,
'LPC18xx': 0x10000000,
'LPC43xx': 0x10000000,
'LPC4337': 0x10000000,
'MKXX': 0x20000000,
'TZ1000': 0x20000000,
}
class FlashInfo(object):
def __init__(self, path):
with open(path, "rb") as f:
# Read Device Information struct (defined in FlashOS.H, declared in FlashDev.c).
self.version = unpack("H", f.read(2))[0]
self.devName = f.read(128).split(b'\0',1)[0]
self.devType = unpack("H", f.read(2))[0]
self.devAddr = unpack("L", f.read(4))[0]
self.szDev = unpack("L", f.read(4))[0]
self.szPage = unpack("L", f.read(4))[0]
skipped = f.read(4)
self.valEmpty = unpack("B", f.read(1))[0]
skipped = f.read(3)
self.toProg = unpack("L", f.read(4))[0]
self.toErase = unpack("L", f.read(4))[0]
self.sectSize = []
self.sectAddr = []
while 1:
addr = unpack("L", f.read(4))[0]
size = unpack("L", f.read(4))[0]
if addr == 0xffffffff:
break
elif size == 0xffffffff:
break
else:
self.sectSize.append(size)
self.sectAddr.append(addr)
def get_algo_start(self):
# Search the DevName part of the FlashDevice description (FlashDev.c)
# for anything matching the ALGO_START_ADDRESSES dictionary
for target in ALGO_START_ADDRESSES:
if target in self.devName:
print 'Identified target as %s' % (target)
return ALGO_START_ADDRESSES[target]
print 'Found no match in ALGO_START_ADDRESSES for "%s"' % (self.devName)
raise Error()
def printInfo(self):
print "Extracted device information:"
print "----------------------------"
print "Version: 0x%04x" % (self.version)
print "Device Name: %s" % (self.devName)
print "Device Type: %u" % (self.devType)
print "Device Address: 0x%08x" % (self.devAddr)
print "Device Size: 0x%08x" % (self.szDev)
print "Prog Page Size: %u" % (self.szPage)
print "valEmpty: 0x%02x" % (self.valEmpty)
print "Timeout Prog: %u" % (self.toProg)
print "Timeout Erase: %u" % (self.toErase)
for i in range(len(self.sectSize)):
print "Sectors[%d]: { 0x%08x, 0x%08x }" % (i, self.sectSize[i], self.sectAddr[i])
def gen_flash_algo():
run_cmd([FROMELF, '--bin', ALGO_ELF_PATH, '-o', TMP_DIR_W_TERM])
try:
flash_info = FlashInfo(DEV_INFO_PATH)
ALGO_START = flash_info.get_algo_start()
except IOError, e:
print repr(e), e
ALGO_START = 0x20000000
print "ALGO_START = 0x%08x\n" % ALGO_START
#flash_info.printInfo()
with open(ALGO_BIN_PATH, "rb") as f, open(ALGO_TXT_PATH, mode="w+") as res:
# Flash Algorithm
res.write("""
const uint32_t flash_algo_blob[] = {
0xE00ABE00, 0x062D780D, 0x24084068, 0xD3000040, 0x1E644058, 0x1C49D1FA, 0x2A001E52, 0x4770D1F2,
""");
nb_bytes = ALGO_OFFSET
bytes_read = f.read(1024)
while bytes_read:
bytes_read = unpack(str(len(bytes_read)/4) + 'I', bytes_read)
for i in range(len(bytes_read)):
res.write(("0x%08x" % bytes_read[i]) + ", ")
nb_bytes += 4
if (nb_bytes % 0x20) == 0:
res.write("\n ") # % nb_bytes)
bytes_read = f.read(1024)
res.write("\n};\n")
# Address of the functions within the flash algorithm
stdout, _, _ = run_cmd([FROMELF, '-s', ALGO_ELF_PATH])
res.write("""
static const TARGET_FLASH flash = {
""")
for line in stdout.splitlines():
t = line.strip().split()
if len(t) != 8: continue
name, loc = t[1], t[2]
if name in ['Init', 'UnInit', 'EraseChip', 'EraseSector', 'ProgramPage']:
addr = ALGO_START + ALGO_OFFSET + int(loc, 16)
res.write(" 0x%08X, // %s\n" % (addr, name))
if __name__ == '__main__':
gen_flash_algo()
| 35.253086
| 99
| 0.589389
|
74499b6ba2de97e413f132330e7d428a44a84608
| 417
|
py
|
Python
|
django_errors/tests/test_settings.py
|
DLRSP/django-errors
|
1721169b7a094781422a57b1b4e5878dc3a88d77
|
[
"MIT"
] | 3
|
2021-12-06T20:37:40.000Z
|
2021-12-13T23:38:47.000Z
|
django_errors/tests/test_settings.py
|
DLRSP/django-errors
|
1721169b7a094781422a57b1b4e5878dc3a88d77
|
[
"MIT"
] | 51
|
2020-04-19T14:32:56.000Z
|
2022-03-30T10:46:18.000Z
|
django_errors/tests/test_settings.py
|
DLRSP/django-errors
|
1721169b7a094781422a57b1b4e5878dc3a88d77
|
[
"MIT"
] | null | null | null |
SECRET_KEY = 'fake-key'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'tests.db',
'SUPPORTS_TRANSACTIONS': 'false',
}
}
ROOT_URLCONF = 'example.urls'
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles'
]
| 23.166667
| 47
| 0.628297
|
fcc810185cd9144c3eb4b1e25dd268d473af7da0
| 626
|
py
|
Python
|
manage.py
|
OSMChina/openstreetmap-calendar
|
7514d7173ea5237e7637f2b54d873e6f45280e04
|
[
"Apache-2.0"
] | 26
|
2019-10-15T08:59:58.000Z
|
2021-04-06T03:47:43.000Z
|
manage.py
|
OSMChina/openstreetmap-calendar
|
7514d7173ea5237e7637f2b54d873e6f45280e04
|
[
"Apache-2.0"
] | 84
|
2019-10-15T09:31:34.000Z
|
2022-03-10T21:07:46.000Z
|
manage.py
|
OSMChina/openstreetmap-calendar
|
7514d7173ea5237e7637f2b54d873e6f45280e04
|
[
"Apache-2.0"
] | 10
|
2019-10-15T09:19:51.000Z
|
2022-03-10T04:23:09.000Z
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'osmcal.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.454545
| 73
| 0.682109
|
9a97781bffba0af229d5ec75019b5598f9b6f4ca
| 783
|
py
|
Python
|
Techfest 2021/Hash Function.py
|
AkashSCIENTIST/CompetitiveSolutions
|
236db303a21c7195ebf721394a54ce9df70782f5
|
[
"Apache-2.0"
] | null | null | null |
Techfest 2021/Hash Function.py
|
AkashSCIENTIST/CompetitiveSolutions
|
236db303a21c7195ebf721394a54ce9df70782f5
|
[
"Apache-2.0"
] | null | null | null |
Techfest 2021/Hash Function.py
|
AkashSCIENTIST/CompetitiveSolutions
|
236db303a21c7195ebf721394a54ce9df70782f5
|
[
"Apache-2.0"
] | null | null | null |
import sys
sys.setrecursionlimit(2**20)
def cross(n):
s = list("abcdefghijklmnopqrstuvwxyz")
if n == 1:
return s
res = [sub1 + sub2 for sub1 in s for sub2 in s]
if n == 2:
return res
else:
for i in range(n-2):
temp = [sub1 + sub2 for sub1 in s for sub2 in res]
res = temp
temp = []
return res
n, k, m = map(int, input().split(" "))
mod_ = 2 ** m
arr = cross(n)
def hash(s:str, m:int) -> int:
if len(s) == 0:
return 0
elif len(s) == 1:
return (ord(s[-1]) - ord('a') + 1)
else:
h = ((hash(s[:-1], m) * 33) ^ (ord(s[-1]) - ord('a') + 1)) % m
return h
total = 0
for i in arr:
if hash(i, mod_) == k:
total += 1
print(total)
| 20.076923
| 70
| 0.464879
|
12d95ec3643d69d69f36deff2d1aa9bba36d8760
| 3,085
|
py
|
Python
|
utils/eval/geometry.py
|
wx-b/patch2pix
|
ad26ef065568eabf9a0bb6dc09f53462e9aeef36
|
[
"MIT"
] | 157
|
2021-03-18T03:43:27.000Z
|
2022-03-30T03:48:26.000Z
|
utils/eval/geometry.py
|
wx-b/patch2pix
|
ad26ef065568eabf9a0bb6dc09f53462e9aeef36
|
[
"MIT"
] | 8
|
2021-04-08T07:54:12.000Z
|
2022-03-29T18:40:38.000Z
|
utils/eval/geometry.py
|
wx-b/patch2pix
|
ad26ef065568eabf9a0bb6dc09f53462e9aeef36
|
[
"MIT"
] | 15
|
2021-04-08T04:18:35.000Z
|
2022-03-28T05:54:57.000Z
|
import numpy as np
from transforms3d.quaternions import quat2mat, mat2quat
# The skew-symmetric matrix of vector
skew = lambda v: np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])
# Essential matrix & fundamental matrix
ess2fund = lambda K1, K2, E: np.linalg.inv(K2).T @ E @ np.linalg.inv(K1)
ess2fund_inv = lambda K1_inv, K2_inv, E: K2_inv.T @ E @ K1_inv
fund2ess = lambda F, K2, K1: K2.T @ F @ K1
# Camera relative pose to fundamental matrix
pose2ess = lambda R, t: skew(t.reshape(3,)) @ R
pose2fund = lambda K1, K2, R, t: np.linalg.inv(K2).T @ R @ K1.T @ skew((K1 @ R.T).dot(t.reshape(3,)))
pose2fund_inv = lambda K1, K2_inv, R, t: K2_inv.T @ R @ K1.T @ skew((K1 @ R.T).dot(t))
# Normalize fundamental matrix
normF = lambda F: F / F[-1,-1] # Normalize F by the last value
normalize = lambda A: A / np.linalg.norm(A)
def compose_projection_matrix(R, t):
"""Construct projection matrix
Args:
- R: rotation matrix, size (3,3);
- t: translation vector, size (3,);
Return:
- projection matrix [R|t], size (3,4)
"""
return np.hstack([R, np.expand_dims(t, axis=1)])
def matches2relapose_cv(p1, p2, K1, K2, rthres=1):
import cv2
# Move back to image center based coordinates
f1, f2, = K1[0,0], K2[0, 0]
pc1 = np.array([K1[:2, 2]])
pc2 = np.array([K2[:2, 2]])
# Rescale to im2 's focal setting
p1 = (p1 - pc1) * f2 / f1
p2 = (p2 - pc2)
K = np.array([[f2, 0, 0],
[0, f2, 0],
[0, 0, 1]])
E, inls = cv2.findEssentialMat(p1, p2, cameraMatrix=K, method=cv2.FM_RANSAC, threshold=rthres)
inls = np.where(inls > 0)[0]
_, R, t, _ = cv2.recoverPose(E, p1[inls], p2[inls], K)
return E, inls, R, t
def matches2relapose_degensac(p1, p2, K1, K2, rthres=1):
import pydegensac
import cv2
# Move back to image center based coordinates
f1, f2 = K1[0,0], K2[0, 0]
pc1 = np.array([K1[:2, 2]])
pc2 = np.array([K2[:2, 2]])
# Rescale to im2 's focal setting
p1 = (p1 - pc1) * f2 / f1
p2 = (p2 - pc2)
K = np.array([[f2, 0, 0],
[0, f2, 0],
[0, 0, 1]])
K1 = K2 = K
F, inls = pydegensac.findFundamentalMatrix(p1, p2, rthres)
E = fund2ess(F, K1, K2)
inls = np.where(inls > 0)[0]
_, R, t, _ = cv2.recoverPose(E, p1[inls], p2[inls], K)
return E, inls, R, t
def abs2relapose(c1, c2, q1, q2):
"""Calculate relative pose between two cameras
Args:
- c1: absolute position of the first camera
- c2: absolute position of the second camera
- q1: orientation quaternion of the first camera
- q2: orientation quaternion of the second camera
Return:
- (t12, q12): relative pose giving the transformation from the 1st camera to the 2nd camera coordinates,
t12 is translation, q12 is relative rotation quaternion
"""
r1 = quat2mat(q1)
r2 = quat2mat(q2)
r12 = r2.dot(r1.T)
q12 = mat2quat(r12)
t12 = r2.dot(c1 - c2)
return (t12, q12)
| 34.277778
| 110
| 0.581199
|
cff1de67279d789ce247d84cd8d0aea91ea9fc51
| 147
|
py
|
Python
|
tests/model_control/detailed/transf_Logit/model_control_one_enabled_Logit_PolyTrend_NoCycle_NoAR.py
|
shaido987/pyaf
|
b9afd089557bed6b90b246d3712c481ae26a1957
|
[
"BSD-3-Clause"
] | 377
|
2016-10-13T20:52:44.000Z
|
2022-03-29T18:04:14.000Z
|
tests/model_control/detailed/transf_Logit/model_control_one_enabled_Logit_PolyTrend_NoCycle_NoAR.py
|
ysdede/pyaf
|
b5541b8249d5a1cfdc01f27fdfd99b6580ed680b
|
[
"BSD-3-Clause"
] | 160
|
2016-10-13T16:11:53.000Z
|
2022-03-28T04:21:34.000Z
|
tests/model_control/detailed/transf_Logit/model_control_one_enabled_Logit_PolyTrend_NoCycle_NoAR.py
|
ysdede/pyaf
|
b5541b8249d5a1cfdc01f27fdfd99b6580ed680b
|
[
"BSD-3-Clause"
] | 63
|
2017-03-09T14:51:18.000Z
|
2022-03-27T20:52:57.000Z
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Logit'] , ['PolyTrend'] , ['NoCycle'] , ['NoAR'] );
| 36.75
| 74
| 0.734694
|
0c6ac518fc511588e588e6f18f5c069533601f79
| 2,908
|
py
|
Python
|
software/python/potentiostat/examples/run_mux_cyclic_w_plot.py
|
GVRX/potentiostat
|
1bb44639180ad6d81697631d4d5f699e6fb4eef1
|
[
"MIT"
] | 14
|
2020-07-01T13:08:36.000Z
|
2021-09-10T15:02:33.000Z
|
software/python/potentiostat/examples/run_mux_cyclic_w_plot.py
|
GVRX/potentiostat
|
1bb44639180ad6d81697631d4d5f699e6fb4eef1
|
[
"MIT"
] | 3
|
2020-07-21T05:21:52.000Z
|
2021-10-01T05:04:06.000Z
|
software/python/potentiostat/examples/run_mux_cyclic_w_plot.py
|
GVRX/potentiostat
|
1bb44639180ad6d81697631d4d5f699e6fb4eef1
|
[
"MIT"
] | 10
|
2020-08-07T03:25:11.000Z
|
2022-01-27T10:56:07.000Z
|
from potentiostat import Potentiostat
import matplotlib.pyplot as plt
port = '/dev/ttyACM0' # Serial port for potentiostat device
datafile = 'data.txt' # Output file for time, curr, volt data
channel_list = [1,7]
test_name = 'cyclic' # The name of the test to run
curr_range = '100uA' # The name of the current range [-100uA, +100uA]
sample_rate = 100.0 # The number of samples/second to collect
volt_min = -0.2 # The minimum voltage in the waveform (V)
volt_max = 3.0 # The maximum voltage in the waveform (V)
#volt_per_sec = 0.050 # The rate at which to transition from volt_min to volt_max (V/s)
volt_per_sec = 1.00 # The rate at which to transition from volt_min to volt_max (V/s)
num_cycles = 1 # The number of cycle in the waveform
# Convert parameters to amplitude, offset, period, phase shift for triangle waveform
amplitude = (volt_max - volt_min)/2.0 # Waveform peak amplitude (V)
offset = (volt_max + volt_min)/2.0 # Waveform offset (V)
period_ms = int(1000*4*amplitude/volt_per_sec) # Waveform period in (ms)
shift = 0.0 # Waveform phase shift - expressed as [0,1] number
# 0 = no phase shift, 0.5 = 180 deg phase shift, etc.
# Create dictionary of waveform parameters for cyclic voltammetry test
test_param = {
'quietValue' : 0.0,
'quietTime' : 0,
'amplitude' : amplitude,
'offset' : offset,
'period' : period_ms,
'numCycles' : num_cycles,
'shift' : shift,
}
# Create potentiostat object and set current range, sample rate and test parameters
dev = Potentiostat(port)
dev.set_curr_range(curr_range)
dev.set_sample_rate(sample_rate)
dev.set_param(test_name,test_param)
dev.set_mux_enabled(True)
dev.set_enabled_mux_channels(channel_list)
# Run cyclic voltammetry test
#data_dict = dev.run_test(test_name,display='data',filename=datafile)
#data_dict = dev.run_test(test_name,display='data')
data_dict = dev.run_test(test_name,display='pbar',filename='data.txt')
#data_dict = dev.run_test(test_name,display='pbar',filename='data.pkl')
dev.set_mux_enabled(False)
# plot results using matplotlib
for chan, data in data_dict.iteritems():
plt.figure(chan)
plt.subplot(211)
plt.plot(data['t'],data['v'])
plt.ylabel('potential (V)')
plt.grid('on')
plt.title('volt,curr vs time, channel = {0}'.format(chan))
plt.subplot(212)
plt.plot(data['t'],data['i'])
plt.ylabel('current (uA)')
plt.xlabel('time (sec)')
plt.grid('on')
plt.figure(chan + len(channel_list))
plt.plot(data['v'],data['i'])
plt.xlabel('potential (V)')
plt.ylabel('current (uA)')
plt.title('curr vs volt, channel = {0}'.format(chan))
plt.grid('on')
plt.show()
| 37.282051
| 102
| 0.643398
|
775e44c13788c54daa125f2cf48c901c812eb1de
| 6,341
|
py
|
Python
|
redsmall_plots.py
|
mfatihaktas/deep-scheduler
|
ad567465399620ec379cfdaa67fbcd94ded03c75
|
[
"MIT"
] | 11
|
2018-03-28T02:55:12.000Z
|
2021-07-12T15:21:38.000Z
|
redsmall_plots.py
|
mfatihaktas/deep-scheduler
|
ad567465399620ec379cfdaa67fbcd94ded03c75
|
[
"MIT"
] | null | null | null |
redsmall_plots.py
|
mfatihaktas/deep-scheduler
|
ad567465399620ec379cfdaa67fbcd94ded03c75
|
[
"MIT"
] | 5
|
2018-03-16T01:36:46.000Z
|
2019-10-17T03:23:20.000Z
|
from modeling import *
from redsmall_data import *
N, Cap = 20, 10
k = BZipf(1, 10) # BZipf(1, 5)
R = Uniform(1, 1)
b, beta_ = 10, 3 # 2.1
L = Pareto(b, beta_)
a, alpha_ = 1, 3
Sl = Pareto(a, alpha_)
def alpha_gen(ro):
return alpha_
ro0_l = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] # [0.1]
d_l, ro0_scherid_X_l_m = get_d_l__ro0_scherid_X_l_m(beta_, alpha_)
log(INFO, "", alpha_=alpha_, d_l=d_l, ro0_scherid_X_l_m=ro0_scherid_X_l_m)
def plot_ET_wrt_d():
r, red = 2, 'Coding'
log(INFO, "r= {}".format(r) )
dopt_l = []
def plot_(ro0):
log(INFO, "ro0= {}".format(ro0) )
scherid_X_l_m = ro0_scherid_X_l_m[ro0]
sim_ET_l, sim_StdT_l = [], []
ET_wMGc_l, approx_ET_wMGc_l = [], []
d_l_ = []
for d in d_l:
# for d in sorted([float(k[2:]) for k, _ in scherid_X_l_m.items() ] ):
# for key in [k for k, _ in scherid_X_l_m.items() ]:
X_l_m = scherid_X_l_m['d={}'.format(d) ]
# X_l_m = scherid_X_l_m[key]
# d = float(key[2:])
sim_ET = np.mean(X_l_m['ET_l'] )
if sim_ET > 200:
break
d_l_.append(d)
sim_ET_l.append(sim_ET)
sim_StdT_l.append(np.std(X_l_m['ET_l'] ) )
ET_wMGc, EW_wMGc, Prqing_wMGc = redsmall_ET_EW_Prqing_wMGc(ro0, N, Cap, k, r, b, beta_, a, alpha_gen, d, red)
if ET_wMGc is None or ET_wMGc > 100:
ET_wMGc = None
ET_wMGc_l.append(ET_wMGc)
approx_ET_wMGc, approx_EW_wMGc, approx_Prqing_wMGc = redsmall_approx_ET_EW_Prqing_wMGc(ro0, N, Cap, k, r, b, beta_, a, alpha_gen, d, red)
if approx_ET_wMGc is None or approx_ET_wMGc > 100:
approx_ET_wMGc = None
approx_ET_wMGc_l.append(approx_ET_wMGc)
plot.errorbar(d_l_, sim_ET_l, yerr=sim_StdT_l, label='Simulation', c=NICE_RED, marker='d', ls=':', mew=0.5, ms=8)
plot.plot(d_l_, ET_wMGc_l, label='M/G/c', c=NICE_BLUE, marker='o', ls=':')
plot.plot(d_l_, approx_ET_wMGc_l, label='Asymptotic', c=NICE_GREEN, marker='p', ls=':', mew=0.5, ms=8)
d_opt = redsmall_optimal_d(ro0, N, Cap, k, r, b, beta_, a, alpha_gen, red, max_d=max(d_l_) )
dopt_l.append(d_opt)
# d_opt = min(d_opt, max(d_l_) )
# if d_opt <= max(d_l_):
ET_wMGc, EW_wMGc, Prqing_wMGc = redsmall_ET_EW_Prqing_wMGc(ro0, N, Cap, k, r, b, beta_, a, alpha_gen, d_opt, red)
log(INFO, "*** ro0= {}, d_opt= {}, max_d= {}, ET_wMGc= {}".format(ro0, d_opt, max(d_l_), ET_wMGc) )
plot.plot([d_opt], [ET_wMGc], label=r'$\sim$optimal', c='orangered', marker='x', ls=':', mew=3, ms=10)
plot.xscale('log')
plot.xlim(right=max(d_l_)*2)
prettify(plot.gca() )
fontsize = 21
plot.legend(loc='best', framealpha=0.5, fontsize=14, numpoints=1)
plot.xlabel(r'$d$', fontsize=fontsize)
plot.ylabel(r'$E[T]$', fontsize=fontsize)
# plot.title(r'$N= {}$, $Cap= {}$, $\rho_0= {}$, $r= {}$'.format(N, Cap, ro, r) + '\n' \
# + r'$k \sim${}, $L \sim${}, $Sl \sim${}'.format(k.to_latex(), L.to_latex(), Sl.to_latex() ) )
# plot.gca().title.set_position([.5, 1.05] )
plot.title(r'$\rho_0= {}$'.format(ro0), fontsize=fontsize)
fig = plot.gcf()
fig.set_size_inches(4, 4)
plot.savefig('plot_ET_wrt_d_ro{}.png'.format(ro0), bbox_inches='tight')
fig.clear()
for ro0 in ro0_l:
plot_(ro0)
# plot_(ro0=0.1)
log(INFO, "done;", ro0_l=ro0_l, dopt_l=dopt_l)
def plot_ESl_ET_vs_ro__redsmall_vs_drl():
ro0_scherid_X_l_m = get_data_redsmall_vs_drl(alpha_)
def profile(ro, scherid, X, ulim=float('Inf') ):
l = ro0_scherid_X_l_m[ro][scherid][X]
m, s = np.mean(l), np.std(l)
if m > ulim:
m, s = float('NaN'), float('NaN')
return m, s
RLScher_ESl_l, Redsmall_ESl_l = [], []
RLScher_ESl_err_l, Redsmall_ESl_err_l = [], []
RLScher_ET_l, Redsmall_ET_l = [], []
RLScher_ET_err_l, Redsmall_ET_err_l = [], []
for ro in ro0_l:
mean, stdev = profile(ro, 'RLScher', 'ESl_l')
RLScher_ESl_l.append(mean)
RLScher_ESl_err_l.append(stdev)
mean, stdev = profile(ro, 'Redsmall', 'ESl_l')
Redsmall_ESl_l.append(mean)
Redsmall_ESl_err_l.append(stdev)
mean, stdev = profile(ro, 'RLScher', 'ET_l')
RLScher_ET_l.append(mean)
RLScher_ET_err_l.append(stdev)
mean, stdev = profile(ro, 'Redsmall', 'ET_l')
Redsmall_ET_l.append(mean)
Redsmall_ET_err_l.append(stdev)
## ESl
plot.errorbar(ro0_l, RLScher_ESl_l, yerr=RLScher_ESl_err_l, label='Redundant-RL', c=NICE_RED, marker=next(marker_c), linestyle=':', mew=0.5, ms=8)
plot.errorbar(ro0_l, Redsmall_ESl_l, yerr=Redsmall_ESl_err_l, label='Redundant-small', c=NICE_GREEN, marker=next(marker_c), linestyle=':', mew=0.5, ms=8)
fontsize = 18
prettify(plot.gca() )
plot.legend(framealpha=0.5, loc='best', numpoints=1)
plot.xticks(rotation=70)
# plot.yscale('log')
plot.xlabel(r'Baseline offered load $\rho_0$', fontsize=fontsize)
plot.ylabel('Average job slowdown', fontsize=fontsize)
# plot.title(r'$\rho= {}$'.format(ro), fontsize=fontsize)
plot.gcf().set_size_inches(4, 4)
plot.savefig('plot_ESl_vs_ro__redsmall_vs_drl.png', bbox_inches='tight')
plot.gcf().clear()
## ET
plot.errorbar(ro0_l, RLScher_ET_l, yerr=RLScher_ET_err_l, label='Redundant-RL', c=NICE_RED, marker=next(marker_c), linestyle=':', mew=0.5, ms=8)
plot.errorbar(ro0_l, Redsmall_ET_l, yerr=Redsmall_ET_err_l, label='Redundant-small', c=NICE_GREEN, marker=next(marker_c), linestyle=':', mew=0.5, ms=8)
prettify(plot.gca() )
plot.legend(framealpha=0.5, loc='best', numpoints=1)
plot.xticks(rotation=70)
plot.xlabel(r'Baseline offered load $\rho_0$', fontsize=fontsize)
plot.ylabel('Average job completion time', fontsize=fontsize)
plot.gcf().set_size_inches(4, 4)
plot.savefig('plot_ET_vs_ro__redsmall_vs_drl.png', bbox_inches='tight')
plot.gcf().clear()
log(INFO, "done.")
if __name__ == "__main__":
# ar = round(ar_for_ro(ro, N, Cap, k, R, L, S), 2)
# sinfo_m = {
# 'njob': 5000*N, # 'nworker': N, 'wcap': Cap, 'ar': ar, # 'k_rv': k, # 'reqed_rv': R, # 'lifetime_rv': L, # 'straggle_m': {'slowdown': lambda load: S.sample() } }
# mapping_m = {'type': 'spreading'}
# sching_m = {'type': 'expand_if_totaldemand_leq', 'r': r, 'threshold': None}
# u = 40*L.mean()*Sl.mean()
# for d in [0, *np.logspace(math.log10(l), math.log10(u), 20) ]:
plot_ET_wrt_d()
# plot_ESl_ET_vs_ro__redsmall_vs_drl()
| 39.880503
| 182
| 0.638543
|
e9716ba093a61cdd0cb28cae7e0361aab05a7121
| 1,352
|
py
|
Python
|
teste_mock/mock.py
|
gabriel-correia0408/Sala_Green_GabrielCorreia
|
1d22f466d372786c5f8c8eaba7202844b5f03445
|
[
"Apache-2.0"
] | null | null | null |
teste_mock/mock.py
|
gabriel-correia0408/Sala_Green_GabrielCorreia
|
1d22f466d372786c5f8c8eaba7202844b5f03445
|
[
"Apache-2.0"
] | null | null | null |
teste_mock/mock.py
|
gabriel-correia0408/Sala_Green_GabrielCorreia
|
1d22f466d372786c5f8c8eaba7202844b5f03445
|
[
"Apache-2.0"
] | null | null | null |
#unittest.mock é uma biblioteca para teste em Python. Que permite substituir partes
# do seu sistema em teste por
# objetos simulados e fazer afirmações sobre como elas foram usadas.
#unittest.mock fornece uma classe core Mock removendo a necessidade de criar uma série de stubs
# em tod o seu conjunto de testes.
# Depois de executar uma ação, você pode fazer afirmações sobre quais métodos / atributos foram usados
# e com quais argumentos foram chamados. Você também pode especificar valores de
# retorno e definir os atributos necessários da maneira normal.
#Adicionalmente, o mock fornece um decorador patch() que lida com os atributos
# do módulo de patch e do nível de classe no escopo de um teste,junto com sentinel para criar
# objetos únicos. Veja o guia rápido para alguns exemplos de como usar Mock, MagicMock e patch().
#Mock é muito fácil de usar e foi projetado para uso com unittest. O mock é baseado no padrão
# ‘ação -> asserção’ em vez de ‘gravar -> reproduzir’ usado por muitas estruturas de simulação.
#Existe um backport de unittest.mock para versões anteriores do Python, disponível como mock no PyPI.
#exemplo a baixo deve ser refeito contendo classe qu existem e devem serm importadas
from unittest.mock import MagicMock
thing = ProductionClass()
thing.method = MagicMock(return_value=3)
thing.method(3, 4, 5, key='value')
| 56.333333
| 102
| 0.787722
|
ddfb35cdd3e0cdb3e4a0860be3835cfceccc3063
| 66
|
py
|
Python
|
wsgi.py
|
ercancavusoglu/flask-skeleton
|
9c9c0b8f9b7b339837f532ea4ca9698e6a55900c
|
[
"Apache-2.0"
] | null | null | null |
wsgi.py
|
ercancavusoglu/flask-skeleton
|
9c9c0b8f9b7b339837f532ea4ca9698e6a55900c
|
[
"Apache-2.0"
] | null | null | null |
wsgi.py
|
ercancavusoglu/flask-skeleton
|
9c9c0b8f9b7b339837f532ea4ca9698e6a55900c
|
[
"Apache-2.0"
] | null | null | null |
from bootstrap import *
if __name__ == '__main__':
app.run()
| 13.2
| 26
| 0.651515
|
1388f57721c3024f212a6fab147ff8e8dfaf95da
| 39,500
|
py
|
Python
|
qap/cli.py
|
manwithadodla/quality-assessment-protocol
|
9f4d660bd67eb20d4b4a28ae7e837e6d396f0318
|
[
"BSD-3-Clause"
] | 38
|
2015-01-23T20:07:22.000Z
|
2021-11-08T07:08:27.000Z
|
qap/cli.py
|
manwithadodla/quality-assessment-protocol
|
9f4d660bd67eb20d4b4a28ae7e837e6d396f0318
|
[
"BSD-3-Clause"
] | 107
|
2015-01-09T00:34:34.000Z
|
2022-02-28T07:44:10.000Z
|
qap/cli.py
|
manwithadodla/quality-assessment-protocol
|
9f4d660bd67eb20d4b4a28ae7e837e6d396f0318
|
[
"BSD-3-Clause"
] | 24
|
2015-09-14T16:11:12.000Z
|
2021-10-04T08:09:16.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import os
import os.path as op
import argparse
from nipype import config
log_dir=os.path.join("tmp","nipype","logs")
config.update_config({'logging': {'log_directory': log_dir,
'log_to_file': True}})
from nipype import logging
logger = logging.getLogger('workflow')
class QAProtocolCLI:
"""
This class and the associated run_workflow function implement what
the former scripts (qap_anatomical_spatial.py, etc.) contained
"""
def __init__(self, parse_args=True):
if parse_args:
self._parse_args()
else:
self._cloudify = False
self._bundle_idx = None
def _parse_args(self):
parser = argparse.ArgumentParser()
group = parser.add_argument_group(
"Regular Use Inputs (non-cloud runs)")
cloudgroup = parser.add_argument_group(
"AWS Cloud Inputs (only required for AWS Cloud runs)")
req = parser.add_argument_group("Required Inputs")
cloudgroup.add_argument('--bundle_idx', type=int,
help='Bundle index to run')
cloudgroup.add_argument('--log_dir', type=str,
help='Directory for workflow logging')
# Subject list (YAML file)
group.add_argument(
"data_config_file", type=str,
help="filepath to participant list YAML")
req.add_argument(
"pipeline_config_file", type=str,
help="filepath to pipeline configuration YAML")
# Write PDF reports
group.add_argument(
"--with-reports", action='store_true', default=False,
help="Write a summary report in PDF format.")
args = parser.parse_args()
# Load config
from qap.script_utils import read_yml_file
self._config = read_yml_file(args.pipeline_config_file)
self.validate_config_dict()
self._config['pipeline_config_yaml'] = \
os.path.realpath(args.pipeline_config_file)
self._run_name = self._config['pipeline_name']
if args.with_reports:
self._config['write_report'] = True
if "num_sessions_at_once" not in self._config.keys():
self._config["num_sessions_at_once"] = 1
if "num_bundles_at_once" not in self._config.keys():
self._config["num_bundles_at_once"] = 1
self._config["subject_list"] = os.path.realpath(args.data_config_file)
if args.bundle_idx:
self._bundle_idx = args.bundle_idx
else:
self._bundle_idx = None
if args.log_dir:
self._run_log_dir = args.log_dir
else:
self._run_log_dir = self._config["output_directory"]
def submit_cluster_batch_file(self, num_bundles):
"""Write the cluster batch file for the appropriate scheduler.
- Batch file setup code borrowed from dclark87's CPAC cluster setup
code:
- https://github.com/FCP-INDI/C-PAC/blob/0.4.0_development/CPAC/pipeline/cpac_runner.py
- https://github.com/dclark87
- This function will write the batch file appropriate for the
scheduler being used, and then this CLI will be run again on each
node/slot through the run_one_bundle function.
:type num_bundles: int
:param num_bundles: The number of bundles total being run.
"""
import os
import re
import getpass
import commands
from time import strftime
from indi_schedulers import cluster_templates
print "Submitting cluster job to %s.." % self._platform
# Create cluster log dir
cluster_files_dir = \
os.path.join(self._config["output_directory"], "cluster_files")
if not os.path.exists(cluster_files_dir):
os.makedirs(cluster_files_dir)
# Batch file variables
timestamp = str(strftime("%Y_%m_%d_%H_%M_%S"))
shell = commands.getoutput('echo $SHELL')
user_account = getpass.getuser()
# Set up config dictionary
config_dict = {'timestamp': timestamp,
'shell': shell,
'job_name': self._run_name,
'num_tasks': num_bundles,
'queue': "all.q",
'par_env': "mpi_smp",
'cores_per_task': self._config["num_processors"],
'user': user_account,
'work_dir': cluster_files_dir}
# Get string template for job scheduler
if self._platform == "PBS":
env_arr_idx = '$PBS_ARRAYID'
batch_file_contents = cluster_templates.pbs_template
confirm_str = '(?<=Your job-array )\d+'
exec_cmd = 'qsub'
elif self._platform == "SGE":
env_arr_idx = '$SGE_TASK_ID'
batch_file_contents = cluster_templates.sge_template
confirm_str = '(?<=Your job-array )\d+'
exec_cmd = 'qsub'
elif self._platform == "SLURM":
hrs_limit = 8 * num_bundles
time_limit = '%d:00:00' % hrs_limit
config_dict["time_limit"] = time_limit
env_arr_idx = '$SLURM_ARRAY_TASK_ID'
batch_file_contents = cluster_templates.slurm_template
confirm_str = '(?<=Submitted batch job )\d+'
exec_cmd = 'sbatch'
config_dict['env_arr_idx'] = env_arr_idx
config_dict['run_cmd'] = 'echo "Running task: %s"' % env_arr_idx
# Populate string from config dict values
batch_file_contents = batch_file_contents % config_dict
run_str = "qap_measures_pipeline.py --bundle_idx %s --log_dir %s %s "\
"%s" % (env_arr_idx, self._run_log_dir,
self._config["subject_list"],
self._config["pipeline_config_yaml"])
batch_file_contents = "\n".join([batch_file_contents, run_str])
batch_filepath = os.path.join(cluster_files_dir, 'cpac_submit_%s.%s'
% (timestamp, self._platform))
with open(batch_filepath, 'w') as f:
f.write(batch_file_contents)
print "Batch file written to %s.." % batch_filepath
# Get output response from job submission
out = commands.getoutput('%s %s' % (exec_cmd, batch_filepath))
# Check for successful qsub submission
if re.search(confirm_str, out) == None:
err_msg = 'Error submitting QAP pipeline run to %s queue' \
% self._platform
raise Exception(err_msg)
print "Batch job submitted to %s queue." % self._platform
# Get pid and send to pid file
pid = re.search(confirm_str, out).group(0)
pid_file = os.path.join(cluster_files_dir, 'pid.txt')
with open(pid_file, 'w') as f:
f.write(pid)
def validate_config_dict(self):
"""Validate the pipeline configuration dictionary to ensure the
parameters are properly set.
"""
config_options = ["pipeline_name",
"num_processors",
"num_sessions_at_once",
"available_memory",
"cluster_system",
"output_directory",
"working_directory",
"template_head_for_anat",
"exclude_zeros",
"start_idx",
"stop_idx",
"write_report",
"write_graph",
"write_all_outputs",
"upload_to_s3",
"bucket_prefix",
"bucket_out_prefix",
"local_prefix",
"bucket_name",
"creds_path"]
invalid = []
for param in self._config.keys():
if param not in config_options:
invalid.append(param)
if len(invalid) > 0:
err = "\n[!] The following parameters in your configuration " \
"file are not recognized. Double-check the pipeline " \
"configuration template.\n"
err += "\n".join([x for x in invalid])
raise Exception(err)
else:
return 0
def create_session_dict(self, subdict):
"""Collapse the participant resource pools so that each participant-
session combination has its own entry.
- input subdict format:
{'sub_01': {'session_01':
{'anatomical_scan': {'scan_01': <filepath>,
'scan_02': <filepath>},
'site_name': 'Site_1'} },
'sub_02': {..} }
- output dict format:
{ (sub01,session01): {"scan_01": {
"anatomical_scan": <filepath>},
{"scan_02": {
"anatomical_scan": <filepath>} } }
:type subdict: dict
:param subdict: A dictionary containing the filepaths of input files
for each participant, sorted by session and scan.
:rtype: dict
:return: A dictionary of dictionaries where each participant-session
combination has its own entry, and input file filepaths are
defined.
"""
from qap.qap_utils import raise_smart_exception
flat_sub_dict_dict = {}
sites_dict = {}
for subid in subdict.keys():
subid = str(subid)
# sessions
for session in subdict[subid].keys():
# resource files
for resource in subdict[subid][session].keys():
if type(subdict[subid][session][resource]) is dict:
# then this has sub-scans defined
for scan in subdict[subid][session][resource].keys():
filepath = subdict[subid][session][resource][scan]
resource_dict = {}
resource_dict[resource] = filepath
sub_info_tuple = (subid, session)
if sub_info_tuple not in flat_sub_dict_dict.keys():
flat_sub_dict_dict[sub_info_tuple] = {}
if scan not in flat_sub_dict_dict[sub_info_tuple].keys():
flat_sub_dict_dict[sub_info_tuple][scan] = {}
flat_sub_dict_dict[sub_info_tuple][scan].update(
resource_dict)
elif resource == "site_name":
sites_dict[subid] = subdict[subid][session][resource]
else:
filepath = subdict[subid][session][resource]
resource_dict = {}
resource_dict[resource] = filepath
sub_info_tuple = (subid, session)
if sub_info_tuple not in flat_sub_dict_dict.keys():
flat_sub_dict_dict[sub_info_tuple] = {}
flat_sub_dict_dict[sub_info_tuple].update(
resource_dict)
if len(flat_sub_dict_dict) == 0:
# this error message meant more for devs than user
msg = "The participant dictionary is empty."
raise_smart_exception(locals(), msg)
# in case some subjects have site names and others don't
if len(sites_dict.keys()) > 0:
for subid in subdict.keys():
subid = str(subid)
if subid not in sites_dict.keys():
sites_dict[subid] = None
# integrate site information into flat_sub_dict_dict
# it was separate in the first place to circumvent the fact
# that even though site_name doesn't get keyed with scan names
# names, that doesn't necessarily mean scan names haven't been
# specified for that participant
for sub_info_tuple in flat_sub_dict_dict.keys():
site_info = {}
site_info["site_name"] = sites_dict[sub_info_tuple[0]]
flat_sub_dict_dict[sub_info_tuple].update(site_info)
return flat_sub_dict_dict
def load_sublist(self):
"""Load the participant list YAML file into a dictionary and check.
- subdict format:
{'sub_01': {'session_01':
{'anatomical_scan': {'scan_01': <filepath>,
'scan_02': <filepath>},
'site_name': 'Site_1'} },
'sub_02': {..} }
:rtype: dictionary
:return: The participant list in a dictionary.
"""
import yaml
from qap.qap_utils import raise_smart_exception
if "subject_list" in self._config.keys():
with open(self._config["subject_list"], "r") as f:
subdict = yaml.load(f)
else:
msg = "\n\n[!] There is no participant list YML to read.\n\n"
raise_smart_exception(locals(),msg)
if len(subdict) == 0:
msg = "The participant list provided is either empty or could " \
"not be read properly!"
raise_smart_exception(locals(),msg)
return subdict
def create_bundles(self):
"""Create a list of participant "bundles".
:rtype: list
:return: A list of bundles - each bundle being a dictionary that is a
starting resource pool for N sub-session-scan combos with N
being the number of participants per bundle (set by the user)
"""
from qap.qap_utils import raise_smart_exception
i = 0
bundles = []
for session_tuple in self._sub_dict.keys():
if i == 0:
new_bundle = {}
sub = session_tuple[0]
ses = session_tuple[1]
site_name = None
if "site_name" in self._sub_dict[session_tuple].keys():
site_name = self._sub_dict[session_tuple]["site_name"]
for scan in self._sub_dict[session_tuple].keys():
if type(self._sub_dict[session_tuple][scan]) is dict:
# to avoid fields in sub_dict[session_tuple] that are
# strings (such as site_name or creds_path)
sub_info_tuple = (sub, ses, scan)
new_bundle[sub_info_tuple] = \
self._sub_dict[session_tuple][scan]
if site_name:
new_bundle[sub_info_tuple].update({"site_name": site_name})
i += 1
if i == self._config["num_sessions_at_once"]:
bundles.append(new_bundle)
i = 0
if i > 0:
bundles.append(new_bundle)
if len(bundles) == 0:
msg = "No bundles created."
raise_smart_exception(locals(),msg)
return bundles
def run_one_bundle(self, bundle_idx, run=True):
"""Execute one bundle's workflow on one node/slot of a cluster/grid.
- Compatible with Amazon AWS cluster runs, and S3 buckets.
:type bundle_idx: int
:param bundle_idx: The bundle ID number - used to calculate which
entries in the participant list to pull into the
current bundle, based on the number of participants
per bundle (participants at once).
:type run: bool
:param run: Run flag, set to False for testing.
:rtype: dictionary
:return: A dictionary with information about the workflow run, its
status, and results.
"""
import os
from qap_utils import write_json
from cloud_utils import download_single_s3_path
self._config["workflow_log_dir"] = self._run_log_dir
bundle_dict = self._bundles_list[bundle_idx-1]
num_bundles = len(self._bundles_list)
# check for s3 paths
for sub in bundle_dict.keys():
# in case we're dealing with string entries in the data dict
try:
bundle_dict[sub].keys()
except AttributeError:
continue
for resource in bundle_dict[sub].keys():
value = bundle_dict[sub][resource]
if "s3://" in value:
bundle_dict[sub][resource] = \
download_single_s3_path(value, self._config)
wfargs = (bundle_dict, bundle_dict.keys(),
self._config, self._run_name, self.runargs,
bundle_idx, num_bundles)
if run:
# let's go!
rt = run_workflow(wfargs)
# write bundle results to JSON file
write_json(rt, os.path.join(rt["bundle_log_dir"],
"workflow_results.json"))
# make not uploading results to S3 bucket the default if not
# specified
if "upload_to_s3" not in self._config.keys():
self._config["upload_to_s3"] = False
# upload results
if self._config["upload_to_s3"]:
from cloud_utils import upl_qap_output
upl_qap_output(self._config)
return rt
else:
return wfargs
def run(self, config_file=None, partic_list=None):
"""Establish where and how we're running the pipeline and set up the
run. (Entry point)
- This is the entry point for pipeline building and connecting.
Depending on the inputs, the appropriate workflow runner will
be selected and executed.
:type config_file: str
:param config_file: Filepath to the pipeline configuration file in
YAML format.
:type partic_list: str
:param partic_list: Filepath to the participant list file in YAML
format.
"""
from time import strftime
from qap_utils import raise_smart_exception, \
check_config_settings
# in case we are overloading
if config_file:
from qap.script_utils import read_yml_file
self._config = read_yml_file(config_file)
self.validate_config_dict()
self._config["pipeline_config_yaml"] = config_file
if not self._config:
raise Exception("config not found!")
if partic_list:
self._config["subject_list"] = partic_list
# Get configurations and settings
check_config_settings(self._config, "num_processors")
check_config_settings(self._config, "num_sessions_at_once")
check_config_settings(self._config, "available_memory")
check_config_settings(self._config, "output_directory")
check_config_settings(self._config, "working_directory")
self._num_bundles_at_once = 1
write_report = self._config.get('write_report', False)
if "cluster_system" in self._config.keys() and not self._bundle_idx:
res_mngr = self._config["cluster_system"]
if (res_mngr == None) or ("None" in res_mngr) or \
("none" in res_mngr):
self._platform = None
else:
platforms = ["SGE", "PBS", "SLURM"]
self._platform = str(res_mngr).upper()
if self._platform not in platforms:
msg = "The resource manager %s provided in the pipeline "\
"configuration file is not one of the valid " \
"choices. It must be one of the following:\n%s" \
% (self._platform, str(platforms))
raise_smart_exception(locals(), msg)
else:
self._platform = None
# Create output directory
try:
os.makedirs(self._config["output_directory"])
except:
if not op.isdir(self._config["output_directory"]):
err = "[!] Output directory unable to be created.\n" \
"Path: %s\n\n" % self._config["output_directory"]
raise Exception(err)
else:
pass
# Create working directory
try:
os.makedirs(self._config["working_directory"])
except:
if not op.isdir(self._config["working_directory"]):
err = "[!] Output directory unable to be created.\n" \
"Path: %s\n\n" % self._config["working_directory"]
raise Exception(err)
else:
pass
results = []
# set up callback logging
import logging
from nipype.pipeline.plugins.callback_log import log_nodes_cb
cb_log_filename = os.path.join(self._config["output_directory"],
"callback.log")
# Add handler to callback log file
cb_logger = logging.getLogger('callback')
cb_logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(cb_log_filename)
cb_logger.addHandler(handler)
# settle run arguments (plugins)
self.runargs = {}
self.runargs['plugin'] = 'MultiProc'
self.runargs['plugin_args'] = \
{'memory_gb': int(self._config["available_memory"]),
'status_callback': log_nodes_cb}
n_procs = {'n_procs': self._config["num_processors"]}
self.runargs['plugin_args'].update(n_procs)
# load the participant list file into dictionary
subdict = self.load_sublist()
# flatten the participant dictionary
self._sub_dict = self.create_session_dict(subdict)
# create the list of bundles
self._bundles_list = self.create_bundles()
num_bundles = len(self._bundles_list)
if not self._bundle_idx:
# want to initialize the run-level log directory (not the bundle-
# level) only the first time we run the script, due to the
# timestamp. if sub-nodes are being kicked off by a batch file on
# a cluster, we don't want a new timestamp for every new node run
self._run_log_dir = op.join(self._config['output_directory'],
'_'.join([self._run_name, "logs"]),
'_'.join([strftime("%Y%m%d_%H_%M_%S"),
"%dbundles" % num_bundles]))
if self._run_log_dir:
if not os.path.isdir(self._run_log_dir):
try:
os.makedirs(self._run_log_dir)
except:
if not op.isdir(self._run_log_dir):
err = "[!] Log directory unable to be created.\n" \
"Path: %s\n\n" % self._run_log_dir
raise Exception(err)
else:
pass
if num_bundles == 1:
self._config["num_sessions_at_once"] = \
len(self._bundles_list[0])
# Start the magic
if not self._platform and not self._bundle_idx:
# not a cluster/grid run
for idx in range(1, num_bundles+1):
results.append(self.run_one_bundle(idx))
elif not self._bundle_idx:
# there is a self._bundle_idx only if the pipeline runner is run
# with bundle_idx as a parameter - only happening either manually,
# or when running on a cluster
self.submit_cluster_batch_file(num_bundles)
else:
# if there is a bundle_idx supplied to the runner
results = self.run_one_bundle(self._bundle_idx)
def starter_node_func(starter):
"""Pass a dummy string through to provide a basic function for the first
Nipype workflow node.
- This is used for a Nipype utility function node to serve as a starting
node to connect to multiple unrelated Nipype workflows. Each of these
workflows runs QAP for one participant in the current bundle being run.
- Connecting the multiple non-interdependent participant workflows as
one workflow allows the Nipype resource scheduler to maximize
performance.
:type starter: str
:param starter: A dummy string.
:rtype: str
:return: The same string.
"""
return starter
def run_workflow(args, run=True):
"""Connect and execute the QAP Nipype workflow for one bundle of data.
- This function will update the resource pool with what is found in the
output directory (if it already exists). If the final expected output
of the pipeline is already found, the pipeline will not run and it
will move onto the next bundle. If the final expected output is not
present, the pipeline begins to build itself backwards.
:type args: tuple
:param args: A 7-element tuple of information comprising of the bundle's
resource pool, a list of participant info, the configuration
options, the pipeline ID run name and miscellaneous run args.
:rtype: dictionary
:return: A dictionary with information about the workflow run, its status,
and results.
"""
import os
import os.path as op
import nipype.interfaces.io as nio
import nipype.pipeline.engine as pe
import nipype.interfaces.utility as niu
import qap
from qap_utils import read_json
import glob
import time
from time import strftime
from nipype import config as nyconfig
# unpack args
resource_pool_dict, sub_info_list, config, run_name, runargs, \
bundle_idx, num_bundles = args
# Read and apply general settings in config
keep_outputs = config.get('write_all_outputs', False)
# take date+time stamp for run identification purposes
pipeline_start_stamp = strftime("%Y-%m-%d_%H:%M:%S")
pipeline_start_time = time.time()
if "workflow_log_dir" not in config.keys():
config["workflow_log_dir"] = config["output_directory"]
bundle_log_dir = op.join(config["workflow_log_dir"],
'_'.join(["bundle", str(bundle_idx)]))
try:
os.makedirs(bundle_log_dir)
except:
if not op.isdir(bundle_log_dir):
err = "[!] Bundle log directory unable to be created.\n" \
"Path: %s\n\n" % bundle_log_dir
raise Exception(err)
else:
pass
# set up logging
nyconfig.update_config(
{'logging': {'log_directory': bundle_log_dir, 'log_to_file': True}})
logging.update_logging(nyconfig)
logger.info("QAP version %s" % qap.__version__)
logger.info("Pipeline start time: %s" % pipeline_start_stamp)
workflow = pe.Workflow(name=run_name)
workflow.base_dir = op.join(config["working_directory"])
# set up crash directory
workflow.config['execution'] = \
{'crashdump_dir': config["output_directory"]}
# create the one node all participants will start from
starter_node = pe.Node(niu.Function(input_names=['starter'],
output_names=['starter'],
function=starter_node_func),
name='starter_node')
# set a dummy variable
starter_node.inputs.starter = ""
new_outputs = 0
# iterate over each subject in the bundle
logger.info("Starting bundle %s out of %s.." % (str(bundle_idx),
str(num_bundles)))
# results dict
rt = {'status': 'Started', 'bundle_log_dir': bundle_log_dir}
for sub_info in sub_info_list:
resource_pool = resource_pool_dict[sub_info]
# in case we're dealing with string entries in the data dict
try:
resource_pool.keys()
except AttributeError:
continue
# resource pool check
invalid_paths = []
for resource in resource_pool.keys():
try:
if not op.isfile(resource_pool[resource]) and resource != "site_name":
invalid_paths.append((resource, resource_pool[resource]))
except:
err = "\n\n[!]"
raise Exception(err)
if len(invalid_paths) > 0:
err = "\n\n[!] The paths provided in the subject list to the " \
"following resources are not valid:\n"
for path_tuple in invalid_paths:
err = "%s%s: %s\n" % (err, path_tuple[0], path_tuple[1])
err = "%s\n\n" % err
raise Exception(err)
# process subject info
sub_id = str(sub_info[0])
# for nipype
if "-" in sub_id:
sub_id = sub_id.replace("-","_")
if "." in sub_id:
sub_id = sub_id.replace(".","_")
if sub_info[1]:
session_id = str(sub_info[1])
# for nipype
if "-" in session_id:
session_id = session_id.replace("-","_")
if "." in session_id:
session_id = session_id.replace(".","_")
else:
session_id = "session_0"
if sub_info[2]:
scan_id = str(sub_info[2])
# for nipype
if "-" in scan_id:
scan_id = scan_id.replace("-","_")
if "." in scan_id:
scan_id = scan_id.replace(".","_")
else:
scan_id = "scan_0"
name = "_".join(["", sub_id, session_id, scan_id])
rt[name] = {'id': sub_id, 'session': session_id, 'scan': scan_id,
'resource_pool': str(resource_pool)}
logger.info("Participant info: %s" % name)
# set output directory
output_dir = op.join(config["output_directory"], run_name,
sub_id, session_id, scan_id)
try:
os.makedirs(output_dir)
except:
if not op.isdir(output_dir):
err = "[!] Output directory unable to be created.\n" \
"Path: %s\n\n" % output_dir
raise Exception(err)
else:
pass
# for QAP spreadsheet generation only
config.update({"subject_id": sub_id, "session_id": session_id,
"scan_id": scan_id, "run_name": run_name})
if "site_name" in resource_pool:
config.update({"site_name": resource_pool["site_name"]})
logger.info("Configuration settings:\n%s" % str(config))
qap_types = ["anatomical_spatial",
"functional_spatial",
"functional_temporal"]
# update that resource pool with what's already in the output
# directory
for resource in os.listdir(output_dir):
if (op.exists(op.join(output_dir, resource)) and
resource not in resource_pool.keys()):
try:
resource_pool[resource] = \
glob.glob(op.join(output_dir, resource, "*"))[0]
except IndexError:
if ".json" in resource:
# load relevant json info into resource pool
json_file = op.join(output_dir, resource)
json_dict = read_json(json_file)
sub_json_dict = json_dict["%s %s %s" % (sub_id,
session_id,
scan_id)]
if "anatomical_header_info" in sub_json_dict.keys():
resource_pool["anatomical_header_info"] = \
sub_json_dict["anatomical_header_info"]
if "functional_header_info" in sub_json_dict.keys():
resource_pool["functional_header_info"] = \
sub_json_dict["functional_header_info"]
for qap_type in qap_types:
if qap_type in sub_json_dict.keys():
resource_pool["_".join(["qap",qap_type])] = \
sub_json_dict[qap_type]
except:
# a stray file in the sub-sess-scan output directory
pass
# create starter node which links all of the parallel workflows within
# the bundle together as a Nipype pipeline
resource_pool["starter"] = (starter_node, 'starter')
# individual workflow and logger setup
logger.info("Contents of resource pool for this participant:\n%s"
% str(resource_pool))
# start connecting the pipeline
qw = None
for qap_type in qap_types:
if "_".join(["qap", qap_type]) not in resource_pool.keys():
if qw is None:
from qap import qap_workflows as qw
wf_builder = \
getattr(qw, "_".join(["qap", qap_type, "workflow"]))
workflow, resource_pool = wf_builder(workflow, resource_pool,
config, name)
if ("anatomical_scan" in resource_pool.keys()) and \
("anatomical_header_info" not in resource_pool.keys()):
if qw is None:
from qap import qap_workflows as qw
workflow, resource_pool = \
qw.qap_gather_header_info(workflow, resource_pool, config,
name, "anatomical")
if ("functional_scan" in resource_pool.keys()) and \
("functional_header_info" not in resource_pool.keys()):
if qw is None:
from qap import qap_workflows as qw
workflow, resource_pool = \
qw.qap_gather_header_info(workflow, resource_pool, config,
name, "functional")
# set up the datasinks
out_list = []
for output in resource_pool.keys():
for qap_type in qap_types:
if qap_type in output:
out_list.append("_".join(["qap", qap_type]))
# write_all_outputs (writes everything to the output directory, not
# just the final JSON files)
if keep_outputs:
out_list = resource_pool.keys()
logger.info("Outputs we're keeping: %s" % str(out_list))
logger.info('Resource pool keys after workflow connection: '
'{}'.format(str(resource_pool.keys())))
# Save reports to out_dir if necessary
if config.get('write_report', False):
if ("qap_mosaic" in resource_pool.keys()) and \
("qap_mosaic" not in out_list):
out_list += ['qap_mosaic']
# The functional temporal also has an FD plot
if 'qap_functional_temporal' in resource_pool.keys():
if ("qap_fd" in resource_pool.keys()) and \
("qap_fd" not in out_list):
out_list += ['qap_fd']
for output in out_list:
# we use a check for len()==2 here to select those items in the
# resource pool which are tuples of (node, node_output), instead
# of the items which are straight paths to files
# resource pool items which are in the tuple format are the
# outputs that have been created in this workflow because they
# were not present in the subject list YML (the starting resource
# pool) and had to be generated
if (len(resource_pool[output]) == 2) and (output != "starter"):
ds = pe.Node(nio.DataSink(), name='datasink_%s%s'
% (output,name))
ds.inputs.base_directory = output_dir
node, out_file = resource_pool[output]
workflow.connect(node, out_file, ds, output)
new_outputs += 1
elif ".json" in resource_pool[output]:
new_outputs += 1
logger.info("New outputs: %s" % str(new_outputs))
# run the pipeline (if there is anything to do)
if new_outputs > 0:
if config.get('write_graph', False):
workflow.write_graph(
dotfilename=op.join(config["output_directory"],
"".join([run_name, ".dot"])),
simple_form=False)
workflow.write_graph(
graph2use="orig",
dotfilename=op.join(config["output_directory"],
"".join([run_name, ".dot"])),
simple_form=False)
workflow.write_graph(
graph2use="hierarchical",
dotfilename=op.join(config["output_directory"],
"".join([run_name, ".dot"])),
simple_form=False)
if run:
try:
logger.info("Running with plugin %s" % runargs["plugin"])
logger.info("Using plugin args %s" % runargs["plugin_args"])
workflow.run(plugin=runargs["plugin"],
plugin_args=runargs["plugin_args"])
rt['status'] = 'finished'
logger.info("Workflow run finished for bundle %s."
% str(bundle_idx))
except Exception as e: # TODO We should be more specific here ...
errmsg = e
rt.update({'status': 'failed'})
logger.info("Workflow run failed for bundle %s."
% str(bundle_idx))
# ... however this is run inside a pool.map: do not raise
# Exception
else:
return workflow
else:
rt['status'] = 'cached'
logger.info("\nEverything is already done for bundle %s."
% str(bundle_idx))
# Remove working directory when done
if not keep_outputs:
try:
work_dir = op.join(workflow.base_dir, scan_id)
if op.exists(work_dir):
import shutil
shutil.rmtree(work_dir)
except:
logger.warn("Couldn\'t remove the working directory!")
pass
if rt["status"] == "failed":
logger.error(errmsg)
else:
pipeline_end_stamp = strftime("%Y-%m-%d_%H:%M:%S")
pipeline_end_time = time.time()
logger.info("Elapsed time (minutes) since last start: %s"
% ((pipeline_end_time - pipeline_start_time) / 60))
logger.info("Pipeline end time: %s" % pipeline_end_stamp)
return rt
| 39.147671
| 101
| 0.550051
|
7e36a2ea4e2c2fe4fde376f0ba1d24ba3ec3a922
| 10,510
|
py
|
Python
|
installer/bkt_install/reg.py
|
pyro-team/bkt-toolbox
|
bbccba142a81ca0a46056f2bcda75899979158a5
|
[
"MIT"
] | 12
|
2019-05-31T02:57:26.000Z
|
2022-03-26T09:40:50.000Z
|
installer/bkt_install/reg.py
|
mrflory/bkt-toolbox
|
bbccba142a81ca0a46056f2bcda75899979158a5
|
[
"MIT"
] | 27
|
2021-11-27T16:33:19.000Z
|
2022-03-27T17:47:26.000Z
|
installer/bkt_install/reg.py
|
pyro-team/bkt-toolbox
|
bbccba142a81ca0a46056f2bcda75899979158a5
|
[
"MIT"
] | 3
|
2019-06-12T10:59:20.000Z
|
2020-04-21T15:13:50.000Z
|
# -*- coding: utf-8 -*-
'''
Created on 19.02.2017
@author: chschmitt
'''
from __future__ import absolute_import, division, print_function
import os.path
from contextlib import contextmanager
import System
import Microsoft.Win32 as Win32
from System.Reflection import Assembly, AssemblyName
RegistryHive = Win32.RegistryHive
RegistryView = Win32.RegistryView
RegistryKey = Win32.RegistryKey
RegistryValueKind = Win32.RegistryValueKind
class PathString(str):
def __truediv__(self, other):
if not other or ('\\' in other) or ('/' in other):
raise ValueError
return type(self)(self + '\\' + other)
__div__ = __truediv__
__floordiv__ = __truediv__
@contextmanager
def open_key(base, path, *args, **kwargs):
try:
value = base.OpenSubKey(path, *args, **kwargs)
if value is None:
raise KeyError(str(base) + '\\' + path)
yield value
finally:
if value:
value.Close()
@contextmanager
def open_or_create(base, path, *args, **kwargs):
value = None
try:
value = base.CreateSubKey(path, *args, **kwargs)
if value is None:
raise KeyError(str(base) + '\\' + path)
yield value
finally:
if value:
value.Close()
class Properties(object):
pass
class AssemblyRegService(object):
def __init__(self, prog_id=None, uuid=None, assembly_path=None, wow6432=True):
self.prog_id = prog_id
self.uuid = uuid
self.assembly_path = assembly_path
self.wow6432 = wow6432
def load_assembly_attributes(self):
assembly = Assembly.ReflectionOnlyLoadFrom(self.assembly_path)
assembly_name = AssemblyName(assembly.FullName)
assembly_uri = u'file:///' + self.assembly_path.replace(os.path.sep, u'/')
p = Properties()
p.full_name = assembly.FullName
p.version = str(assembly_name.Version)
p.codebase_uri = assembly_uri
p.runtime_version = assembly.ImageRuntimeVersion
self.assembly_properties = p
def get_hkcu(self, view=RegistryView.Default):
return RegistryKey.OpenBaseKey(RegistryHive.CurrentUser, view)
def get_hkcu_wow(self):
if System.Environment.Is64BitOperatingSystem:
if self.wow6432:
view = RegistryView.Registry32
else:
view = RegistryView.Registry64
else:
view = RegistryView.Default
return self.get_hkcu(view)
def _define_prog_id(self, base, prog_id, uuid):
prog_id_path = PathString('Software') / 'Classes' / prog_id
with open_or_create(base, prog_id_path) as prog_id_key:
prog_id_key.SetValue('', prog_id, RegistryValueKind.String)
with open_or_create(base, prog_id_path / 'CLSID') as clsid:
clsid.SetValue('', uuid, RegistryValueKind.String)
def define_prog_id(self):
with self.get_hkcu() as base:
self._define_prog_id(base, self.prog_id, self.uuid)
def define_wow_uuid_clsid(self):
with self.get_hkcu_wow() as base:
self._define_wow_uuid_clsid(base)
def _define_wow_uuid_clsid(self, base):
uuid_path = PathString('Software') / 'Classes' / 'CLSID' / self.uuid
with open_or_create(base, uuid_path) as uuid:
uuid.SetValue('', self.prog_id, RegistryValueKind.String)
with open_or_create(base, uuid_path / 'ProgId') as uuid:
uuid.SetValue('', self.prog_id, RegistryValueKind.String)
with open_or_create(base, uuid_path / 'Implemented Categories' / '{62C8FE65-4EBB-45E7-B440-6E39B2CDBF29}'):
pass
with open_or_create(base, uuid_path / 'InprocServer32') as serv:
serv.SetValue('', 'mscoree.dll')
serv.SetValue('ThreadingModel', 'Both')
serv.SetValue('Class', self.prog_id)
p = self.assembly_properties
serv.SetValue('Assembly', p.full_name)
serv.SetValue('RuntimeVersion', p.runtime_version)
serv.SetValue('CodeBase', p.codebase_uri)
with open_or_create(base, uuid_path / 'InprocServer32' / self.assembly_properties.version) as version:
version.SetValue('Class', self.prog_id)
p = self.assembly_properties
version.SetValue('Assembly', p.full_name)
version.SetValue('RuntimeVersion', p.runtime_version)
version.SetValue('CodeBase', p.codebase_uri)
def register_assembly(self):
self.load_assembly_attributes()
self.define_prog_id()
self.define_wow_uuid_clsid()
def unregister_assembly(self):
prog_id_path = PathString('Software') / 'Classes' / self.prog_id
with self.get_hkcu() as base:
base.DeleteSubKeyTree(prog_id_path, False)
uuid_path = PathString('Software') / 'Classes' / 'CLSID' / self.uuid
with self.get_hkcu_wow() as base:
base.DeleteSubKeyTree(uuid_path, False)
def office_default_path(app_name):
return PathString('Software') / 'Microsoft' / 'Office' / app_name / 'Addins'
class AddinRegService(object):
def __init__(self, prog_id, friendly_name, description, addins_regpath, load_behavior):
self.prog_id = prog_id
self.friendly_name = friendly_name
self.description = description
self.addins_regpath = addins_regpath
self.load_behavior = load_behavior
def get_hkcu(self, view=RegistryView.Default):
return RegistryKey.OpenBaseKey(RegistryHive.CurrentUser, view)
def register_addin(self):
with self.get_hkcu() as base:
self._register_addin(base)
def _register_addin(self, base):
prog_id_path = self.addins_regpath / self.prog_id
with open_or_create(base, prog_id_path) as prog_id:
prog_id.SetValue('LoadBehavior', self.load_behavior, RegistryValueKind.DWord)
prog_id.SetValue('FriendlyName', self.friendly_name)
prog_id.SetValue('Description', self.description)
def unregister_addin(self):
prog_id_path = self.addins_regpath / self.prog_id
with self.get_hkcu() as base:
base.DeleteSubKeyTree(prog_id_path, False)
class ResiliencyRegService(object):
def __init__(self, prog_id, app_name):
self.prog_id = prog_id
self.app_name = app_name
def get_hkcu(self, view=RegistryView.Default):
return RegistryKey.OpenBaseKey(RegistryHive.CurrentUser, view)
def add_to_dndlist(self):
path = PathString('Software') / 'Microsoft' / 'Office' / '16.0' / self.app_name / 'Resiliency' / 'DoNotDisableAddinList'
with self.get_hkcu() as base:
with open_or_create(base, path) as dndlist:
dndlist.SetValue(self.prog_id, 1, RegistryValueKind.DWord)
def remove_from_dndlist(self):
path = PathString('Software') / 'Microsoft' / 'Office' / '16.0' / self.app_name / 'Resiliency' / 'DoNotDisableAddinList'
with self.get_hkcu() as base:
try:
with open_key(base, path, True) as dndlist: #writable=True
dndlist.DeleteValue(self.prog_id)
except (KeyError, ValueError):
pass
def clear_disabled_items(self):
app_paths = [
PathString('Software') / 'Microsoft' / 'Office' / 'ClickToRun' / 'REGISTRY' / 'MACHINE' / 'Software' / 'Microsoft' / 'Office' / '16.0' / self.app_name / 'Resiliency' / 'DisabledItems',
PathString('Software') / 'Microsoft' / 'Office' / '16.0' / self.app_name / 'Resiliency' / 'DisabledItems',
PathString('Software') / 'Microsoft' / 'Office' / '15.0' / self.app_name / 'Resiliency' / 'DisabledItems',
PathString('Software') / 'Microsoft' / 'Office' / '14.0' / self.app_name / 'Resiliency' / 'DisabledItems',
]
removed_items = 0
with self.get_hkcu() as base:
for path in app_paths:
# base.DeleteSubKey(path, False)
try:
with open_key(base, path, True) as key: #writable=True
# removed_items += key.ValueCount
for name in key.GetValueNames():
#FIXME: would be better to remove only the BKT entry, not all
key.DeleteValue(name)
removed_items += 1
except KeyError:
continue
return removed_items
class QueryRegService(object):
def get_hklm(self, view=RegistryView.Default):
return RegistryKey.OpenBaseKey(RegistryHive.LocalMachine, view)
# def get_hkcu(self, view=RegistryView.Default):
# return RegistryKey.OpenBaseKey(RegistryHive.CurrentUser, view)
def _get_outlook_bitness_for_base(self, base, app_paths):
with open_key(base, app_paths) as path:
return path.GetValue('Bitness')
def _get_path_for_base(self, base, app_name):
app_paths = PathString('Software') / 'Microsoft' / 'Windows' / 'CurrentVersion' / 'App Paths' / app_name
with open_key(base, app_paths) as path:
return path.GetValue('')
def get_app_path(self, app_name='excel.exe'):
with self.get_hklm() as base:
return self._get_path_for_base(base, app_name)
# NOTE: If office is installed from Microsoft Store the app path exists in HKCU, but
# the path is under Program Files\WindowsApps\... which is not readable, so no need to check this
# with self.get_hkcu() as base:
# try:
# return self._get_path_for_base(base, app_name)
# except KeyError:
# pass
# raise KeyError("no path in registry found for %s" % app_name)
def get_outlook_bitness(self):
paths = [
PathString('Software') / 'Microsoft' / 'Office' / 'ClickToRun' / 'REGISTRY' / 'MACHINE' / 'Software' / 'Microsoft' / 'Office' / '16.0' / 'Outlook',
PathString('Software') / 'Microsoft' / 'Office' / '16.0' / 'Outlook',
PathString('Software') / 'Microsoft' / 'Office' / '15.0' / 'Outlook',
PathString('Software') / 'Microsoft' / 'Office' / '14.0' / 'Outlook',
]
with self.get_hklm() as base:
for path in paths:
try:
return self._get_outlook_bitness_for_base(base, path)
except KeyError:
continue
return None
| 38.357664
| 196
| 0.629401
|
7be85c7bd097b93b6d435104af04c1b44616007d
| 3,334
|
py
|
Python
|
scrapydemo/scrapydemo/spiders/cna_sections.py
|
jasperaiwizard/scrapper
|
64e8bbec1654df00e57ef8d083ffa5542fe39612
|
[
"MIT"
] | 1
|
2021-12-31T12:50:33.000Z
|
2021-12-31T12:50:33.000Z
|
scrapydemo/scrapydemo/spiders/cna_sections.py
|
coderdoufu/article_scraper
|
79abd2a4dae919de9f8cd30bc7aa7b29eb7f67c1
|
[
"MIT"
] | null | null | null |
scrapydemo/scrapydemo/spiders/cna_sections.py
|
coderdoufu/article_scraper
|
79abd2a4dae919de9f8cd30bc7aa7b29eb7f67c1
|
[
"MIT"
] | null | null | null |
import scrapy
from scrapy.crawler import CrawlerRunner
from scrapy.utils.project import get_project_settings
from scrapydemo.scrapydemo.items import ArticleItem
from twisted.internet import reactor
import sys, os
from urllib.parse import quote_plus
import json
from datetime import datetime as dt
def get_request_payload():
query = {
"maxValuesPerFacet": 40,
"page": 0,
"hitsPerPage": 1,
"highlightPreTag": quote_plus("__ais-highlight__"),
"highlightPostTag": quote_plus("__/ais-highlight__"),
"attributesToRetrieve": [],
"attributesToHighlight": [],
"attributesToSnippet": [],
"facets": "categories",
"facetFilters": quote_plus(str([[f"type:article"]])).replace("%27","%22")
}
query_url = "query=&"+ "&".join("%s=%s" % (k,v) for k,v in query.items())
query_string = {
"requests":
[{
"indexName":"cnarevamp-ezrqv5hx",
"params":query_url
}]
}
query_string_url = json.dumps(query_string)
return query_string_url
class CnaSectionsSpider(scrapy.Spider):
name = 'cna_sections'
allowed_domains = ['channelnewsasia.com']
start_urls = ['https://www.channelnewsasia.com']
custom_settings = {
"ITEM_PIPELINES" : {}
}
def start_requests(self):
url = 'https://kkwfbq38xf-dsn.algolia.net/1/indexes/*/queries?x-algolia-agent=Algolia%20for%20JavaScript%20(3.35.1)%3B%20Browser%20(lite)%3B%20instantsearch.js%20(4.0.0)%3B%20JS%20Helper%20(0.0.0-5a0352a)&x-algolia-application-id=KKWFBQ38XF&x-algolia-api-key=e5eb600a29d13097eef3f8da05bf93c1'
headers = {
"accept": 'application/json',
"Accept-Encoding": 'gzip, deflate, br',
"Accept-Language": 'en-GB,en-US;q=0.9,en;q=0.8,zh-CN;q=0.7,zh-TW;q=0.6,zh;q=0.5',
"Connection": 'keep-alive',
"content-type": 'application/x-www-form-urlencoded',
"Host": 'kkwfbq38xf-dsn.algolia.net',
"Origin": 'https://www.channelnewsasia.com',
"Referer": 'https://www.channelnewsasia.com/',
"Sec-Fetch-Mode": 'cors',
"Sec-Fetch-Site": 'cross-site',
}
yield scrapy.Request(url,
callback=self.parse_api,
method="POST",
body=get_request_payload(),
headers=headers)
def parse(self,response):
pass
def parse_api(self, response):
categories = json.loads(response.body.decode('utf-8'))['results'][0]['facets']['categories']
with open('scrapydemo/data/cna_sections.txt', 'wb') as f:
f.write(','.join(list(categories.keys())))
if __name__ == "__main__":
# configure_logging({'LOG_FORMAT': '%(levelname)s: %(message)s'})
runner = CrawlerRunner(get_project_settings())
d = runner.crawl(CnaSectionsSpider)
d.addBoth(lambda _: reactor.stop())
reactor.run()
# close reactor after finishing crawling
os.execl(sys.executable, *sys.argv)
| 37.044444
| 300
| 0.563587
|
b88285738a4873c7ad1133b628a7ffe8e00c9cb8
| 2,398
|
py
|
Python
|
tests/test_pseudopos.py
|
slacAdpai/pcdsdevices
|
7d79821f8b5652a7fd457b9a73d22ef37b2043d3
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
tests/test_pseudopos.py
|
slacAdpai/pcdsdevices
|
7d79821f8b5652a7fd457b9a73d22ef37b2043d3
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
tests/test_pseudopos.py
|
slacAdpai/pcdsdevices
|
7d79821f8b5652a7fd457b9a73d22ef37b2043d3
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
import logging
import pytest
from ophyd.device import Component as Cpt
from ophyd.positioner import SoftPositioner
from pcdsdevices.pseudopos import DelayBase, SimDelayStage, SyncAxesBase
logger = logging.getLogger(__name__)
class FiveSyncSoftPositioner(SyncAxesBase):
one = Cpt(SoftPositioner, init_pos=0)
two = Cpt(SoftPositioner, init_pos=0)
three = Cpt(SoftPositioner, init_pos=0)
four = Cpt(SoftPositioner, init_pos=0)
five = Cpt(SoftPositioner, init_pos=0)
class MaxTwoSyncSoftPositioner(SyncAxesBase):
one = Cpt(SoftPositioner, init_pos=1)
two = Cpt(SoftPositioner, init_pos=5)
def calc_combined(self, real_position):
return max(real_position)
@pytest.fixture(scope='function')
def five_axes():
return FiveSyncSoftPositioner(name='sync', egu='five')
@pytest.fixture(scope='function')
def two_axes():
return MaxTwoSyncSoftPositioner(name='sync', egu='two')
def test_sync_passthrough(five_axes):
logger.debug('test_sync_passthrough')
assert five_axes.name == 'sync'
assert five_axes.egu == 'five'
def test_sync_basic(five_axes):
logger.debug('test_sync_basic')
five_axes.move(5)
for i, pos in enumerate(five_axes.real_position):
assert pos == 5
assert five_axes.pseudo.position == 5
def test_sync_offset(five_axes, two_axes):
logger.debug('test_sync_offset')
five_axes.one.move(1)
five_axes.two.move(2)
five_axes.three.move(3)
five_axes.four.move(4)
five_axes.five.move(5)
assert five_axes.pseudo.position == 1
five_axes.move(10)
assert five_axes.one.position == 10
assert five_axes.two.position == 11
assert five_axes.three.position == 12
assert five_axes.four.position == 13
assert five_axes.five.position == 14
assert two_axes.pseudo.position == 5
def test_delay_basic():
stage_s = SimDelayStage('prefix', name='name', egu='s', n_bounces=2)
stage_ns = SimDelayStage('prefix', name='name', egu='ns', n_bounces=2)
approx_c = 3e8
stage_s.move(1e-9)
stage_ns.move(1)
for pos in stage_s.motor.position, stage_ns.motor.position:
assert abs(pos*1e-3 - 1e-9 * approx_c / 2) < 0.01
def test_subcls_warning():
logger.debug('test_subcls_warning')
with pytest.raises(TypeError):
SyncAxesBase('prefix', name='name')
with pytest.raises(TypeError):
DelayBase('prefix', name='name')
| 27.883721
| 74
| 0.712677
|
96b37f021e629fec372ab739422ac7a6e837228d
| 8,531
|
py
|
Python
|
lucid_bot/utils.py
|
viargentum/lucid-bot
|
3c7279221193e09455b93a642507c08c140d3600
|
[
"MIT"
] | 3
|
2021-05-12T02:18:55.000Z
|
2021-07-27T23:44:03.000Z
|
lucid_bot/utils.py
|
viargentum/lucid-bot
|
3c7279221193e09455b93a642507c08c140d3600
|
[
"MIT"
] | 2
|
2021-03-23T18:01:52.000Z
|
2021-03-23T23:58:51.000Z
|
lucid_bot/utils.py
|
viargentum/lucid-bot
|
3c7279221193e09455b93a642507c08c140d3600
|
[
"MIT"
] | 2
|
2021-03-23T14:40:30.000Z
|
2021-03-23T18:04:34.000Z
|
import asyncio
from datetime import datetime
import discord
from discord.ext import commands
from enum import Enum, auto
from lucid_bot import config
from lucid_bot.lucid_embed import lucid_embed
class LucidCommandResult(Enum):
SUCCESS = auto()
FAIL = auto()
class Utils:
def __init__(self, bot):
self.bot = bot
self.config = config.config
@staticmethod
def time() -> str:
time = datetime.now().strftime("%H:%M:%S")
return f"[\033[32m{time[:12]}\033[0m] | "
@staticmethod
async def command_result(
ctx: commands.Context,
*,
result: LucidCommandResult,
message: str = None,
) -> None:
if not message:
if result is LucidCommandResult.SUCCESS:
await ctx.message.add_reaction(
config.config["resultAssets"]["successReact"]
)
else:
await ctx.message.add_reaction(
config.config["resultAssets"]["failReact"]
)
else:
if result is LucidCommandResult.SUCCESS:
embed = lucid_embed(ctx, success=True).set_author(
name=message,
icon_url=config.config["resultAssets"]["successImg"],
)
await ctx.send(embed=embed)
else:
embed = lucid_embed(ctx, fail=True).set_author(
name=message,
icon_url=config.config["resultAssets"]["failImg"],
)
await ctx.send(embed=embed)
async def yes_no_dialogue(
self,
message: discord.Message,
ctx: commands.Context,
timeout: int = 20,
dm: bool = False,
) -> bool:
await message.add_reaction("✅")
await message.add_reaction("❌")
while True:
try:
reaction = await self.bot.wait_for("reaction_add", timeout=timeout)
except asyncio.TimeoutError:
embed = lucid_embed(
title="Timeout -",
description="Sorry, you took too long to react.",
)
if dm:
await ctx.author.send(embed=embed)
else:
await ctx.send(embed=embed)
return None
if reaction[1].id == ctx.author.id:
reaction = reaction[0].emoji
if reaction == "✅" or "❌":
break
else:
return None
if reaction == "✅":
return True
else:
return False
async def announcement_channel(
self, ctx: commands.Context, message: discord.Message
):
# EMBED CHANNEL
while True:
try:
announce_channel_message: discord.Message = await self.bot.wait_for(
"message", timeout=20
)
except asyncio.TimeoutError:
embed = lucid_embed(
title="Timeout",
description="Sorry, you took too long to respond.",
)
await message.edit(embed=embed)
return None
if announce_channel_message.author.id == ctx.author.id:
await announce_channel_message.delete()
channel_tag: str = announce_channel_message.content
try:
announce_channel: discord.abc.GuildChannel = (
announce_channel_message.channel_mentions[0]
)
except IndexError:
embed = lucid_embed(
title="Command Error -",
description="Did you mention a valid channel?",
)
await message.edit(embed=embed)
return None
break
return announce_channel, channel_tag
async def announce_title(
self, ctx: commands.Context, message: discord.Message
) -> str:
# EMBED TITLE
embed = lucid_embed(
title="Bot Announcement -",
description="What should the title of the announcement be?",
)
await message.edit(embed=embed)
while True:
try:
announce_title_message: discord.Message = await self.bot.wait_for(
"message", timeout=60
)
except asyncio.TimeoutError:
embed = lucid_embed(
title="Timeout",
description="Sorry, you took too long to respond.",
)
await message.edit(embed=embed)
return None
if announce_title_message.author.id == ctx.author.id:
await announce_title_message.delete()
break
return announce_title_message.content
async def announcement_description(
self, ctx: commands.Context, message: discord.Message
) -> str:
# EMBED DESCRIPTION
embed = lucid_embed(
title="Bot Announcement -",
description="What should the announcement say?",
)
await message.edit(embed=embed)
while True:
try:
announce_description_message: discord.Message = await self.bot.wait_for(
"message", timeout=180
)
except asyncio.TimeoutError:
embed = lucid_embed(
title="Timeout",
description="Sorry, you took too long to respond.",
)
await message.edit(embed=embed)
return None
if announce_description_message.author.id == ctx.author.id:
await announce_description_message.delete()
break
return announce_description_message.content
async def announce_color(
self, message: discord.Message, ctx: commands.Context
) -> str:
embed = lucid_embed(
title="Bot Announcement -",
description="What should the color of the embed be?\n\n(Wait for all reactions to "
"appear.)",
)
await message.edit(embed=embed)
for value in self.config["reactColors"]:
await message.add_reaction(self.config["reactColors"][value])
while True:
try:
reactColor = await self.bot.wait_for("reaction_add", timeout=20)
except asyncio.TimeoutError:
embed = lucid_embed(
title="Timeout -",
description="Sorry, you took too long to react.",
)
await message.edit(embed=embed)
return None
if reactColor[1].id == ctx.author.id:
reactColor = reactColor[0].emoji
if reactColor in self.config["reactColors"].values():
break
else:
return None
colorHex = self.config["reactColorsHex"][reactColor]
return str(colorHex)
async def announcement_send(
self,
ctx: commands.Context,
announce_channel: discord.abc.GuildChannel,
announce_embed: discord.Embed,
channel_tag: str,
) -> None:
# CONFIRM/DENY SEND
embed = lucid_embed(
title="Bot Announcement -",
description="Do you want to send the announcement " + "as shown above?",
)
message: discord.Message = await ctx.send(embed=embed)
reaction_yes: bool = await self.yes_no_dialogue(message, ctx, 10, False)
if reaction_yes:
await announce_channel.send(embed=announce_embed)
embed = lucid_embed(
title="Bot Announcement -",
description="Announcement successfully sent to " + channel_tag + ".",
)
embed.set_footer(text="bot developed by viargentum#3850")
await message.clear_reactions()
await message.edit(embed=embed)
else:
embed = lucid_embed(
title="Bot Announcement -",
description="Announcement Cancelled.",
)
embed.set_footer(text="bot developed by viargentum#3850")
await message.clear_reactions()
await message.edit(embed=embed)
return
| 29.316151
| 95
| 0.525612
|
73d1f88ca9cd73999aa67a657a470aada64e3a41
| 4,868
|
pyde
|
Python
|
mode/examples/Topics/Cellular Automata/GameOfLife/GameOfLife.pyde
|
timgates42/processing.py
|
78a237922c2a928b83f4ad579dbf8d32c0099890
|
[
"Apache-2.0"
] | 1,224
|
2015-01-01T22:09:23.000Z
|
2022-03-29T19:43:56.000Z
|
mode/examples/Topics/Cellular Automata/GameOfLife/GameOfLife.pyde
|
timgates42/processing.py
|
78a237922c2a928b83f4ad579dbf8d32c0099890
|
[
"Apache-2.0"
] | 253
|
2015-01-14T03:45:51.000Z
|
2022-02-08T01:18:19.000Z
|
mode/examples/Topics/Cellular Automata/GameOfLife/GameOfLife.pyde
|
timgates42/processing.py
|
78a237922c2a928b83f4ad579dbf8d32c0099890
|
[
"Apache-2.0"
] | 225
|
2015-01-13T18:38:33.000Z
|
2022-03-30T20:27:39.000Z
|
"""
A Processing implementation of Game of Life By Joan Soler-Adillon
Press SPACE BAR to pause and change the cell's values with the mouse
On pause, click to activate/deactivate cells
Press R to randomly reset the cells' grid
Press C to clear the cells' grid
The original Game of Life was created by John Conway in 1970.
"""
cellSize = 5 # Size of cells
# How likely for a cell to be alive at start (in percentage)
probabilityOfAliveAtStart = 15
# Variables for timer
interval = 100
lastRecordedTime = 0
# Colors for active/inactive cells
alive = color(0, 200, 0)
dead = color(0)
pause = False # Pause
def setup():
global grid_w, grid_h
global cells # Array of cells
global cellsBuffer # Buffer while changing the others in the interations
size(360, 360)
# Instantiate arrays
grid_w, grid_h = int(width / cellSize), int(height / cellSize)
cells = [[None] * grid_w for _ in range(grid_h)]
cellsBuffer = [[None] * grid_w for _ in range(grid_h)]
# This stroke will draw the background grid
stroke(48)
noSmooth()
# Initialization of cells
for x in range(grid_w):
for y in range(grid_h):
state = random(100)
if state > probabilityOfAliveAtStart:
state = 0
else:
state = 1
cells[x][y] = state # Save state of each cell
background(0) # Fill in black in case cells don't cover all the windows
def draw():
global lastRecordedTime
# Draw grid
for x in range(grid_w):
for y in range(grid_h):
if cells[x][y] == 1:
fill(alive) # If alive
else:
fill(dead) # If dead
rect(x * cellSize, y * cellSize, cellSize, cellSize)
# Iterate if timer ticks
if millis() - lastRecordedTime > interval:
if not pause:
iteration()
lastRecordedTime = millis()
# Create new cells manually on pause
if pause and mousePressed:
# Map and adef out of bound errors
xCellOver = int(map(mouseX, 0, width, 0, width / cellSize))
xCellOver = constrain(xCellOver, 0, width / cellSize - 1)
yCellOver = int(map(mouseY, 0, height, 0, height / cellSize))
yCellOver = constrain(yCellOver, 0, height / cellSize - 1)
# Check against cells in buffer
if cellsBuffer[xCellOver][yCellOver] == 1: # Cell is alive
cells[xCellOver][yCellOver] = 0 # Kill
fill(dead) # Fill with kill color
else: # Cell is dead
cells[xCellOver][yCellOver] = 1 # Make alive
fill(alive) # Fill alive color
# And then save to buffer once mouse goes up
elif pause and not mousePressed:
# Save cells to buffer
# (so we opeate with one array keeping the other intact)
for x in range(grid_w):
for y in range(grid_h):
cellsBuffer[x][y] = cells[x][y]
def iteration(): # When the clock ticks
# Save cells to buffer
# (so we opeate with one array keeping the other intact)
for x in range(grid_w):
for y in range(grid_h):
cellsBuffer[x][y] = cells[x][y]
# Visit each cell:
for x in range(grid_w):
for y in range(grid_h):
# And visit all the neighbours of each cell
neighbours = 0 # We'll count the neighbours
for xx in range(x - 1, x + 2):
for yy in range(y - 1, y + 2):
# Make sure you are not out of bounds
if 0 <= xx < grid_w and 0 <= yy < grid_w:
# Make sure to check against self
if not (xx == x and yy == y):
if cellsBuffer[xx][yy] == 1:
# Check alive neighbours and count them
neighbours = neighbours + 1
if cellsBuffer[x][y] == 1:
if neighbours < 2 or neighbours > 3:
cells[x][y] = 0 # Die unless it has 2 or 3 neighbours
else: # The cell is dead: make it live if necessary
if neighbours == 3:
cells[x][y] = 1 # Only if it has 3 neighbours
def keyPressed():
global pause
if key == 'r' or key == 'R':
# Restart: reinitialization of cells
for x in range(grid_w):
for y in range(grid_h):
state = random(100)
if state > probabilityOfAliveAtStart:
state = 0
else:
state = 1
cells[x][y] = state # Save state of each cell
if key == ' ': # On/off of pause
pause = not pause
if (key == 'c' or key == 'C'): # Clear all
for x in range(grid_w):
for y in range(grid_h):
cells[x][y] = 0 # Save all to zero
| 36.878788
| 77
| 0.560805
|
0605d306223721e4b8a92917777ae6bdcba3d09f
| 625
|
py
|
Python
|
Scripts/bisectionsqrt.py
|
timseymore/py-scripts
|
89cce81a1a62f9aa9aad477919b12fff5b42a694
|
[
"MIT"
] | null | null | null |
Scripts/bisectionsqrt.py
|
timseymore/py-scripts
|
89cce81a1a62f9aa9aad477919b12fff5b42a694
|
[
"MIT"
] | null | null | null |
Scripts/bisectionsqrt.py
|
timseymore/py-scripts
|
89cce81a1a62f9aa9aad477919b12fff5b42a694
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Bisectionsqrt
Use bisection search to approximate square root
Created on Thu Feb 1 21:29:30 2018
@author: tim_s
"""
def sqrt(x):
x = -25
x = abs(x)
epsilon = .01
numGuesses = 0
low = 0.0
high = max(1.0, x)
ans = (high + low)/2.0
while abs(ans**2 - x) >= epsilon:
print('low = ', low, 'high = ', high, 'ans =', ans)
numGuesses += 1
if ans**2 < x:
low = ans
else:
high = ans
ans = abs(high + low)/2.0
print('numGuesses = ', numGuesses)
print(ans, 'is close to the square root of ', x)
| 20.16129
| 59
| 0.5072
|
175e4f66e7f8be8658a2dda12c1a047adf3ca4b3
| 312
|
py
|
Python
|
newspaper_project/urls.py
|
vaibhavgope/django_newspaper
|
d6f62e153286909ef401dd4aca0e7a9b6e1937f8
|
[
"Apache-2.0"
] | null | null | null |
newspaper_project/urls.py
|
vaibhavgope/django_newspaper
|
d6f62e153286909ef401dd4aca0e7a9b6e1937f8
|
[
"Apache-2.0"
] | null | null | null |
newspaper_project/urls.py
|
vaibhavgope/django_newspaper
|
d6f62e153286909ef401dd4aca0e7a9b6e1937f8
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('users/', include('users.urls')),
path('users/', include('django.contrib.auth.urls')),
path('', include('pages.urls')),
path('articles/', include('articles.urls')),
]
| 31.2
| 56
| 0.657051
|
306eeb10d67f4fafc6bafb36c14702e999cbc0bf
| 3,286
|
py
|
Python
|
tests/test_ml/test_rl.py
|
granularai/polyaxon-schemas
|
017ae74701f21f12f0b25e75379681ea5d8baa9e
|
[
"MIT"
] | null | null | null |
tests/test_ml/test_rl.py
|
granularai/polyaxon-schemas
|
017ae74701f21f12f0b25e75379681ea5d8baa9e
|
[
"MIT"
] | null | null | null |
tests/test_ml/test_rl.py
|
granularai/polyaxon-schemas
|
017ae74701f21f12f0b25e75379681ea5d8baa9e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from unittest import TestCase
from tests.utils import assert_equal_dict
from polyaxon_schemas.ml.rl.environments import GymEnvironmentConfig
from polyaxon_schemas.ml.rl.explorations import (
ConstantExplorationConfig,
DecayExplorationConfig,
GreedyExplorationConfig,
OrnsteinUhlenbeckExplorationConfig,
RandomDecayExplorationConfig,
RandomExplorationConfig
)
from polyaxon_schemas.ml.rl.memories import BatchMemoryConfig
class TestMemoryConfigs(TestCase):
def test_latent_bridge_config(self):
config_dict = {
'env_id': 'CartPole-v0',
}
config = GymEnvironmentConfig.from_dict(config_dict)
assert_equal_dict(config.to_dict(), config_dict)
class TestEnvironmentConfigs(TestCase):
def test_latent_bridge_config(self):
config_dict = {
'size': 500,
'batch_size': 500,
}
config = BatchMemoryConfig.from_dict(config_dict)
assert_equal_dict(config.to_dict(), config_dict)
class TestExplorationConfigs(TestCase):
def test_constant_exploration_config(self):
config_dict = {
'value': 0.8,
'is_continuous': False
}
config = ConstantExplorationConfig.from_dict(config_dict)
assert_equal_dict(config.to_dict(), config_dict)
def test_greedy_exploration_config(self):
config_dict = {
'is_continuous': False
}
config = GreedyExplorationConfig.from_dict(config_dict)
assert_equal_dict(config.to_dict(), config_dict)
def test_random_exploration_config(self):
config_dict = {
'is_continuous': False
}
config = RandomExplorationConfig.from_dict(config_dict)
assert_equal_dict(config.to_dict(), config_dict)
def test_decay_exploration_config(self):
config_dict = {
'is_continuous': False,
'exploration_rate': 0.15,
'decay_type': 'polynomial_decay',
'start_decay_at': 0,
'stop_decay_at': 1e9,
'decay_rate': 0.,
'staircase': False,
'decay_steps': 100000,
'min_exploration_rate': 0
}
config = DecayExplorationConfig.from_dict(config_dict)
assert_equal_dict(config.to_dict(), config_dict)
def test_random_decay_exploration_config(self):
config_dict = {
'is_continuous': False,
'num_actions': None,
'decay_type': 'polynomial_decay',
'start_decay_at': 0,
'stop_decay_at': 1e9,
'decay_rate': 0.,
'staircase': False,
'decay_steps': 10000,
'min_exploration_rate': 0
}
config = RandomDecayExplorationConfig.from_dict(config_dict)
assert_equal_dict(config.to_dict(), config_dict)
def test_orsteinuhlenbeck_exploration_config(self):
config_dict = {
'is_continuous': True,
'num_actions': 4,
'sigma': 0.3,
'mu': 0,
'theta': 0.15
}
config = OrnsteinUhlenbeckExplorationConfig.from_dict(config_dict)
assert_equal_dict(config.to_dict(), config_dict)
| 32.215686
| 74
| 0.643944
|
f3fff7d3e81a040de8cd6591374789c8ea9c6a7a
| 10,554
|
py
|
Python
|
deepctr_torch/inputs.py
|
ParticleMedia/DeepCTR-Torch
|
862a0b4ab59b24797b7852d9d4a3fcb79c7f378c
|
[
"Apache-2.0"
] | null | null | null |
deepctr_torch/inputs.py
|
ParticleMedia/DeepCTR-Torch
|
862a0b4ab59b24797b7852d9d4a3fcb79c7f378c
|
[
"Apache-2.0"
] | null | null | null |
deepctr_torch/inputs.py
|
ParticleMedia/DeepCTR-Torch
|
862a0b4ab59b24797b7852d9d4a3fcb79c7f378c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
"""
Author:
Weichen Shen,weichenswc@163.com
"""
from collections import OrderedDict, namedtuple, defaultdict
from itertools import chain
import torch
import torch.nn as nn
import numpy as np
from .layers.sequence import SequencePoolingLayer
from .layers.utils import concat_fun
from .layers import DNN
DEFAULT_GROUP_NAME = "default_group"
class SparseFeat(namedtuple('SparseFeat',
['name', 'vocabulary_size', 'embedding_dim', 'use_hash', 'dtype', 'embedding_name',
'group_name'])):
__slots__ = ()
def __new__(cls, name, vocabulary_size, embedding_dim=4, use_hash=False, dtype="int32", embedding_name=None,
group_name=DEFAULT_GROUP_NAME):
if embedding_name is None:
embedding_name = name
if embedding_dim == "auto":
embedding_dim = 6 * int(pow(vocabulary_size, 0.25))
if use_hash:
print(
"Notice! Feature Hashing on the fly currently is not supported in torch version,you can use tensorflow version!")
return super(SparseFeat, cls).__new__(cls, name, vocabulary_size, embedding_dim, use_hash, dtype,
embedding_name, group_name)
def __hash__(self):
return self.name.__hash__()
class VarLenSparseFeat(namedtuple('VarLenSparseFeat',
['sparsefeat', 'maxlen', 'combiner', 'length_name'])):
__slots__ = ()
def __new__(cls, sparsefeat, maxlen, combiner="mean", length_name=None):
return super(VarLenSparseFeat, cls).__new__(cls, sparsefeat, maxlen, combiner, length_name)
@property
def name(self):
return self.sparsefeat.name
@property
def vocabulary_size(self):
return self.sparsefeat.vocabulary_size
@property
def embedding_dim(self):
return self.sparsefeat.embedding_dim
@property
def use_hash(self):
return self.sparsefeat.use_hash
@property
def dtype(self):
return self.sparsefeat.dtype
@property
def embedding_name(self):
return self.sparsefeat.embedding_name
@property
def group_name(self):
return self.sparsefeat.group_name
def __hash__(self):
return self.name.__hash__()
class DenseFeat(namedtuple('DenseFeat', ['name', 'dimension', 'dtype'])):
__slots__ = ()
def __new__(cls, name, dimension=1, dtype="float32"):
return super(DenseFeat, cls).__new__(cls, name, dimension, dtype)
def __hash__(self):
return self.name.__hash__()
class DenseEmbeddingFeat(namedtuple('DenseEmbeddingFeat', ['name', 'dimension', 'embedding_dim', 'hidden_units', 'dtype'])):
__slots__ = ()
def __new__(cls, name, dimension=1, embedding_dim=4, hidden_units=[], dtype="float32"):
return super(DenseEmbeddingFeat, cls).__new__(cls, name, dimension, embedding_dim, hidden_units, dtype)
def __hash__(self):
return self.name.__hash__()
def generate_embedding_dnn(self, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, init_std, device):
if self.dimension == self.embedding_dim:
return nn.Identity()
return DNN(self.dimension, self.hidden_units + [self.embedding_dim],
activation=dnn_activation, l2_reg=l2_reg_dnn, dropout_rate=dnn_dropout, use_bn=dnn_use_bn,
init_std=init_std, device=device)
def get_feature_names(feature_columns):
features = build_input_features(feature_columns)
return list(features.keys())
# def get_inputs_list(inputs):
# return list(chain(*list(map(lambda x: x.values(), filter(lambda x: x is not None, inputs)))))
def build_input_features(feature_columns):
# Return OrderedDict: {feature_name:(start, start+dimension)}
features = OrderedDict()
start = 0
for feat in feature_columns:
feat_name = feat.name
if feat_name in features:
continue
if isinstance(feat, SparseFeat):
features[feat_name] = (start, start + 1)
start += 1
elif isinstance(feat, DenseFeat):
features[feat_name] = (start, start + feat.dimension)
start += feat.dimension
elif isinstance(feat, DenseEmbeddingFeat):
features[feat_name] = (start, start + feat.dimension)
start += feat.dimension
elif isinstance(feat, VarLenSparseFeat):
features[feat_name] = (start, start + feat.maxlen)
start += feat.maxlen
if feat.length_name is not None and feat.length_name not in features:
features[feat.length_name] = (start, start + 1)
start += 1
else:
raise TypeError("Invalid feature column type,got", type(feat))
return features
def combined_dnn_input(sparse_embedding_list, dense_value_list):
if len(sparse_embedding_list) > 0 and len(dense_value_list) > 0:
sparse_dnn_input = torch.flatten(
torch.cat(sparse_embedding_list, dim=-1), start_dim=1)
dense_dnn_input = torch.flatten(
torch.cat(dense_value_list, dim=-1), start_dim=1)
return concat_fun([sparse_dnn_input, dense_dnn_input])
elif len(sparse_embedding_list) > 0:
return torch.flatten(torch.cat(sparse_embedding_list, dim=-1), start_dim=1)
elif len(dense_value_list) > 0:
return torch.flatten(torch.cat(dense_value_list, dim=-1), start_dim=1)
else:
raise NotImplementedError
def get_varlen_pooling_list(embedding_dict, features, feature_index, varlen_sparse_feature_columns, device):
varlen_sparse_embedding_list = []
for feat in varlen_sparse_feature_columns:
seq_emb = embedding_dict[feat.name]
if feat.length_name is None:
seq_mask = features[:, feature_index[feat.name][0]:feature_index[feat.name][1]].long() != 0
emb = SequencePoolingLayer(mode=feat.combiner, supports_masking=True, device=device)(
[seq_emb, seq_mask])
else:
seq_length = features[:, feature_index[feat.length_name][0]:feature_index[feat.length_name][1]].long()
emb = SequencePoolingLayer(mode=feat.combiner, supports_masking=False, device=device)(
[seq_emb, seq_length])
varlen_sparse_embedding_list.append(emb)
return varlen_sparse_embedding_list
def create_embedding_matrix(feature_columns, init_std=0.0001, linear=False, sparse=False, device='cpu'):
# Return nn.ModuleDict: for sparse features, {embedding_name: nn.Embedding}
# for varlen sparse features, {embedding_name: nn.EmbeddingBag}
sparse_feature_columns = list(
filter(lambda x: isinstance(x, SparseFeat), feature_columns)) if len(feature_columns) else []
varlen_sparse_feature_columns = list(
filter(lambda x: isinstance(x, VarLenSparseFeat), feature_columns)) if len(feature_columns) else []
embedding_dict = nn.ModuleDict(
{feat.embedding_name: nn.Embedding(feat.vocabulary_size, feat.embedding_dim if not linear else 1, sparse=sparse)
for feat in
sparse_feature_columns + varlen_sparse_feature_columns}
)
# for feat in varlen_sparse_feature_columns:
# embedding_dict[feat.embedding_name] = nn.EmbeddingBag(
# feat.dimension, embedding_size, sparse=sparse, mode=feat.combiner)
for tensor in embedding_dict.values():
nn.init.normal_(tensor.weight, mean=0, std=init_std)
return embedding_dict.to(device)
def embedding_lookup(X, sparse_embedding_dict, sparse_input_dict, sparse_feature_columns, return_feat_list=(),
mask_feat_list=(), to_list=False):
"""
Args:
X: input Tensor [batch_size x hidden_dim]
sparse_embedding_dict: nn.ModuleDict, {embedding_name: nn.Embedding}
sparse_input_dict: OrderedDict, {feature_name:(start, start+dimension)}
sparse_feature_columns: list, sparse features
return_feat_list: list, names of feature to be returned, defualt () -> return all features
mask_feat_list, list, names of feature to be masked in hash transform
Return:
group_embedding_dict: defaultdict(list)
"""
group_embedding_dict = defaultdict(list)
for fc in sparse_feature_columns:
feature_name = fc.name
embedding_name = fc.embedding_name
if (len(return_feat_list) == 0 or feature_name in return_feat_list):
# TODO: add hash function
# if fc.use_hash:
# raise NotImplementedError("hash function is not implemented in this version!")
lookup_idx = np.array(sparse_input_dict[feature_name])
input_tensor = X[:, lookup_idx[0]:lookup_idx[1]].long()
emb = sparse_embedding_dict[embedding_name](input_tensor)
group_embedding_dict[fc.group_name].append(emb)
if to_list:
return list(chain.from_iterable(group_embedding_dict.values()))
return group_embedding_dict
def varlen_embedding_lookup(X, embedding_dict, sequence_input_dict, varlen_sparse_feature_columns):
varlen_embedding_vec_dict = {}
for fc in varlen_sparse_feature_columns:
feature_name = fc.name
embedding_name = fc.embedding_name
if fc.use_hash:
# lookup_idx = Hash(fc.vocabulary_size, mask_zero=True)(sequence_input_dict[feature_name])
# TODO: add hash function
lookup_idx = sequence_input_dict[feature_name]
else:
lookup_idx = sequence_input_dict[feature_name]
varlen_embedding_vec_dict[feature_name] = embedding_dict[embedding_name](
X[:, lookup_idx[0]:lookup_idx[1]].long()) # (lookup_idx)
return varlen_embedding_vec_dict
def get_dense_input(X, features, feature_columns):
dense_feature_columns = list(filter(lambda x: isinstance(
x, DenseFeat), feature_columns)) if feature_columns else []
dense_input_list = []
for fc in dense_feature_columns:
lookup_idx = np.array(features[fc.name])
input_tensor = X[:, lookup_idx[0]:lookup_idx[1]].float()
dense_input_list.append(input_tensor)
return dense_input_list
def maxlen_lookup(X, sparse_input_dict, maxlen_column):
if maxlen_column is None or len(maxlen_column)==0:
raise ValueError('please add max length column for VarLenSparseFeat of DIN/DIEN input')
lookup_idx = np.array(sparse_input_dict[maxlen_column[0]])
return X[:, lookup_idx[0]:lookup_idx[1]].long()
| 39.52809
| 129
| 0.676426
|
381d5e745de0ce4308deb4f83fb8bd6efe551f47
| 530
|
py
|
Python
|
flask_app/authorization.py
|
Cs586/Team-Project-Final-Web-Application
|
37a7f704430f87228f4fac90b6242ff41b2ed312
|
[
"Unlicense"
] | null | null | null |
flask_app/authorization.py
|
Cs586/Team-Project-Final-Web-Application
|
37a7f704430f87228f4fac90b6242ff41b2ed312
|
[
"Unlicense"
] | null | null | null |
flask_app/authorization.py
|
Cs586/Team-Project-Final-Web-Application
|
37a7f704430f87228f4fac90b6242ff41b2ed312
|
[
"Unlicense"
] | 2
|
2020-08-05T17:11:52.000Z
|
2021-02-14T03:09:43.000Z
|
from typing import Dict, Optional
from flask_app.calendar_data import CalendarData
class Authorization:
def __init__(self, calendar_data: CalendarData) -> None:
self.calendar_data = calendar_data
def can_access(self, username: str, data: Optional[Dict] = None, calendar_id: Optional[str] = None,) -> bool:
if calendar_id is None:
return username in self.calendar_data.users_list(data=data)
else:
return username in self.calendar_data.users_list(calendar_id=calendar_id)
| 35.333333
| 113
| 0.718868
|
2d568e8641c5ac50f5cef9f1fa10bc6ea3ef9a60
| 1,380
|
py
|
Python
|
fairseq/data/audio/feature_transforms/samp_fbank.py
|
StatNLP/ada4asr
|
3f40fac990afa471153ff6a8a450dfce9712b962
|
[
"MIT"
] | 4
|
2021-09-06T06:40:41.000Z
|
2022-02-14T09:59:37.000Z
|
fairseq/data/audio/feature_transforms/samp_fbank.py
|
StatNLP/ada4asr
|
3f40fac990afa471153ff6a8a450dfce9712b962
|
[
"MIT"
] | null | null | null |
fairseq/data/audio/feature_transforms/samp_fbank.py
|
StatNLP/ada4asr
|
3f40fac990afa471153ff6a8a450dfce9712b962
|
[
"MIT"
] | 1
|
2021-07-12T12:34:47.000Z
|
2021-07-12T12:34:47.000Z
|
import math
import numbers
from typing import Optional
import numpy as np
from fairseq.data.audio.feature_transforms import (
AudioFeatureTransform,
register_audio_feature_transform,
)
@register_audio_feature_transform("samp_fbank")
class SampFbankTransform(AudioFeatureTransform):
@classmethod
def from_config_dict(cls, config=None):
_config = {} if config is None else config
return SampFbankTransform(
_config.get("max_samp_fbank", 5),
_config.get("num_samp_fbank", -1),
_config.get("sampleFbank_prob", 1.0)
)
def __init__(self, max_samp_fbank, num_samp_fbank, sampleFbank_prob):
assert max_samp_fbank >= 1
self.max_samp_fbank = max_samp_fbank
self.num_samp_fbank = num_samp_fbank
self.sampleFbank_prob = sampleFbank_prob
def __repr__(self):
return (
self.__class__.__name__
+ "("
+ ", ".join(
[
f"max_samp_fbank={self.max_samp_fbank}",
f"num_samp_fbank={self.num_samp_fbank}",
f"sampleFbank_prob={self.sampleFbank_prob}",
]
)
+ ")"
)
def __call__(self, spectrogram):
'''
To maintain consistsnt API with feature_transform
'''
return spectrogram
| 27.6
| 73
| 0.606522
|
5b0378afb3e350a0b970e91d7577efb6624936f0
| 816
|
py
|
Python
|
bazar/markup.py
|
emencia/emencia-django-bazar
|
a0cf56c00988c84c2288c21fa2a08364fc5033aa
|
[
"MIT"
] | null | null | null |
bazar/markup.py
|
emencia/emencia-django-bazar
|
a0cf56c00988c84c2288c21fa2a08364fc5033aa
|
[
"MIT"
] | 11
|
2015-05-06T14:50:14.000Z
|
2017-12-16T23:46:17.000Z
|
bazar/markup.py
|
emencia/emencia-django-bazar
|
a0cf56c00988c84c2288c21fa2a08364fc5033aa
|
[
"MIT"
] | null | null | null |
"""
Some markup utilities for RST and DjangoCodeMirror usage
TODO: rst/djangocodemirror formatting should not be the default formatting,
because they are not package requirements.
"""
from django.forms import ValidationError
from rstview.parser import SourceReporter, map_parsing_errors
from djangocodemirror.fields import DjangoCodeMirrorField
def get_text_field(form_instance, **kwargs):
"""
Return a DjangoCodeMirrorField field
"""
kwargs.update({
'config_name': 'bazar'
})
return DjangoCodeMirrorField(**kwargs)
def clean_restructuredtext(form_instance, content):
"""
RST syntax validation
"""
if content:
errors = SourceReporter(content)
if errors:
raise ValidationError(map(map_parsing_errors, errors))
return content
| 25.5
| 75
| 0.724265
|
bb880da1a974996faa59cd0fcfeeabf5cd915796
| 1,544
|
py
|
Python
|
jobs/views.py
|
SethWen/JobSpiderDjango
|
5d7160c717c328c4927ff0b5f0af1a43cbcbcb29
|
[
"Apache-2.0"
] | 5
|
2017-07-10T14:57:22.000Z
|
2018-03-12T08:17:45.000Z
|
jobs/views.py
|
SethWen/JobSpiderDjango
|
5d7160c717c328c4927ff0b5f0af1a43cbcbcb29
|
[
"Apache-2.0"
] | null | null | null |
jobs/views.py
|
SethWen/JobSpiderDjango
|
5d7160c717c328c4927ff0b5f0af1a43cbcbcb29
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render
from models import LagouJobs, ZhilianJobs
from dss import Serializer
# Create your views here.
def get_lagou(request):
"""
获取拉勾网职位信息
:param request:
:return:
"""
response_dict = get_jobs(request, LagouJobs)
return JsonResponse(response_dict, safe=False)
def get_zhilian(request):
"""
获取智联招聘职位信息
:param request:
:return:
"""
response_dict = get_jobs(request, ZhilianJobs)
return JsonResponse(response_dict, safe=False)
def get_jobs(request, mtype):
"""
获取职位信息
:param request:
:param mtype:
:return:
"""
offset = request.GET.get('offset', 0)
limit = request.GET.get('limit', 10)
# type=1: python; type=2: android
ptype = int(request.GET.get('type', 1))
offset = int(offset)
limit = int(limit)
if ptype == 1:
jobs = mtype.objects.all().filter(keyWord='python')
elif ptype == 2:
jobs = mtype.objects.all().filter(keyWord='android')
else:
jobs = ZhilianJobs.objects.all()
count = jobs.count()
if count >= limit * (offset + 1):
offset_jobs = jobs[offset * limit:offset * limit + limit]
else:
offset_jobs = jobs[offset * limit:offset * limit + (count % limit)]
print('mlgb', type(jobs))
s = Serializer.serializer(offset_jobs)
response_dict = {
'code': 20200,
'msg': 'success',
'data': s
}
print(type(s))
return response_dict
| 24.125
| 75
| 0.624352
|
e159ff03c7af51a5dec91c00018999da8ddb9ceb
| 5,224
|
py
|
Python
|
taglets/data/custom_dataset.py
|
BatsResearch/taglets
|
0fa9ebeccc9177069aa09b2da84746b7532e3495
|
[
"Apache-2.0"
] | 13
|
2021-11-10T13:17:10.000Z
|
2022-03-30T22:56:52.000Z
|
taglets/data/custom_dataset.py
|
BatsResearch/taglets
|
0fa9ebeccc9177069aa09b2da84746b7532e3495
|
[
"Apache-2.0"
] | 1
|
2021-11-10T16:01:47.000Z
|
2021-11-10T16:01:47.000Z
|
taglets/data/custom_dataset.py
|
BatsResearch/taglets
|
0fa9ebeccc9177069aa09b2da84746b7532e3495
|
[
"Apache-2.0"
] | 2
|
2022-02-14T22:40:29.000Z
|
2022-02-27T04:27:48.000Z
|
import os
import numpy as np
from torch.utils.data import Dataset
from PIL import Image
import torch
class CustomImageDataset(Dataset):
"""
A custom dataset used to create dataloaders.
"""
def __init__(self, filepaths, labels=None, label_map=None, transform=None):
"""
Create a new CustomImageDataset.
:param filepaths: A list of filepaths.
:param labels: A list of labels
:param label_map: A dictionary to map string labels to intergers
:param transform: A transform to perform on the images
"""
self.filepaths = filepaths
self.labels = labels
self.label_map = label_map
self.transform = transform
def __getitem__(self, index):
while True:
try:
img = Image.open(self.filepaths[index]).convert('RGB')
break
except:
replace_index = np.random.randint(len(self.filepaths))
self.filepaths[index] = self.filepaths[replace_index]
self.labels[index] = self.labels[replace_index]
continue
if self.transform is not None:
img = self.transform(img)
if self.labels is not None:
if self.label_map is not None:
label = torch.tensor(self.label_map[(self.labels[index])])
else:
label = torch.tensor(int(self.labels[index]))
return img, label
else:
return img
def __len__(self):
return len(self.filepaths)
class PseudoshotImageDataset(CustomImageDataset):
"""
A custom dataset used to create dataloaders.
"""
def __init__(self, filepaths, labels=None, label_mask=None, label_map=None, transform=None):
super(PseudoshotImageDataset, self).__init__(filepaths, labels, label_map, transform)
self.label_mask = label_mask
def __getitem__(self, index):
img = Image.open(self.filepaths[index]).convert('RGB')
if self.transform is not None:
img = self.transform(img)
if self.labels is not None:
if self.label_map is not None:
label = torch.tensor(self.label_map[(self.labels[index])])
else:
label = torch.tensor(int(self.labels[index]))
if self.label_mask:
return img, label, int(self.label_mask[index])
return img, label
else:
return img
def __len__(self):
return len(self.filepaths)
class CustomVideoDataset(Dataset):
"""
A custom dataset used to create dataloaders.
"""
def __init__(self, filepaths, labels=None, label_map=None, transform=None, clips_dictionary=None):
"""
Create a new CustomVideoDataset.
:param filepaths: A list of filepaths.
:param labels: A list of labels
:param label_map: A dictionary to map string labels to intergers
:param transform: A transform to perform on the frames
:pram clips_dictionary: dictionary (id clip, list images) to get frames of a clip
"""
self.filepaths = filepaths
self.labels = labels
self.label_map = label_map
self.transform = transform
self.clips_dictionary = clips_dictionary
def __getitem__(self, index):
clip_id = int(os.path.basename(self.filepaths[index])) # chech what path you have/want
frames_paths = self.clips_dictionary[clip_id]
# print(f"FRAMES list[:2]: {frames_paths[:2]} and number of frames {len(frames_paths)}")
frames = []
for f in frames_paths[:10]: # get same size clips - random pick for eval
frame = Image.open(f).convert('RGB')
if self.transform is not None: # BE CAREFUL TRANSFORMATION MIGHT NEED TO CHANGE FOR VIDEO EVAL!!!!!
frame = self.transform(frame)
frames.append(frame)
img = torch.stack(frames) # need to be of the same size!
if self.labels is not None:
if self.label_map is not None:
label = torch.tensor(self.label_map[(self.labels[index])])
else:
label = torch.tensor(int(self.labels[index]))
return img, label
else:
return img
def __len__(self):
return len(self.filepaths)
class SoftLabelDataset(Dataset):
"""
A custom dataset used to create dataloaders.
"""
def __init__(self, dataset, labels, remove_old_labels=False):
"""
Create a new SoftLabelDataset.
:param dataset: A PyTorch dataset
:param labels: A list of labels
:param remove_old_labels: A boolean indicating whether to the dataset returns labels that we do not use
"""
self.dataset = dataset
self.labels = labels
self.remove_old_labels = remove_old_labels
def __getitem__(self, index):
data = self.dataset[index]
label = self.labels[index]
if self.remove_old_labels:
data = data[0]
return data, label
def __len__(self):
return len(self.dataset)
| 33.273885
| 112
| 0.602221
|
262093e4dd7e520667e4fbbc7b5aff7a9c64fbd6
| 101,685
|
py
|
Python
|
test_autoarray/unit/dataset/test_imaging.py
|
Sketos/PyAutoArray
|
72dc7e8d1c38786915f82a7e7284239e5ce87624
|
[
"MIT"
] | null | null | null |
test_autoarray/unit/dataset/test_imaging.py
|
Sketos/PyAutoArray
|
72dc7e8d1c38786915f82a7e7284239e5ce87624
|
[
"MIT"
] | null | null | null |
test_autoarray/unit/dataset/test_imaging.py
|
Sketos/PyAutoArray
|
72dc7e8d1c38786915f82a7e7284239e5ce87624
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import pytest
import shutil
import autoarray as aa
from autoarray import exc
from autoarray.dataset import imaging
test_data_dir = "{}/../test_files/array/".format(
os.path.dirname(os.path.realpath(__file__))
)
class TestConstructor:
def test__setup_image__correct_attributes(self):
image = aa.array.manual_2d(
array=[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]
)
psf = aa.kernel.full(fill_value=3.0, shape_2d=(3, 3))
noise_map = aa.array.manual_2d(array=5.0 * np.ones((3, 3)))
imaging = aa.imaging(
image=image,
noise_map=noise_map,
psf=psf,
background_noise_map=aa.array.full(fill_value=7.0, shape_2d=((3, 3))),
poisson_noise_map=aa.array.full(fill_value=9.0, shape_2d=((3, 3))),
exposure_time_map=aa.array.full(fill_value=11.0, shape_2d=((3, 3))),
)
assert imaging.image.in_2d == pytest.approx(
np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]), 1e-2
)
assert (imaging.psf.in_2d == 3.0 * np.ones((3, 3))).all()
assert (imaging.noise_map.in_2d == 5.0 * np.ones((3, 3))).all()
assert (imaging.background_noise_map.in_2d == 7.0 * np.ones((3, 3))).all()
assert (imaging.poisson_noise_map.in_2d == 9.0 * np.ones((3, 3))).all()
assert (imaging.exposure_time_map.in_2d == 11.0 * np.ones((3, 3))).all()
class TestEstimateNoiseFromImage:
def test__image_and_exposure_time_all_1s__no_background__noise_is_all_1s(self):
# Imaging (eps) = 1.0
# Background (eps) = 0.0
# Exposure times = 1.0 s
# Imaging (counts) = 1.0
# Background (counts) = 0.0
# Noise (counts) = sqrt(1.0 + 0.0**2) = 1.0
# Noise (eps) = 1.0 / 1.0
image = aa.array.ones(shape_2d=(3, 3))
exposure_time = aa.array.ones(shape_2d=(3, 3))
background_noise = aa.array.zeros(shape_2d=(3, 3))
imaging = aa.imaging(
image=image,
noise_map=None,
pixel_scales=1.0,
psf=aa.kernel.ones(shape_2d=(3, 3)),
exposure_time_map=exposure_time,
background_noise_map=background_noise,
)
assert (imaging.estimated_noise_map.in_2d == np.ones((3, 3))).all()
def test__image_all_4s__exposure_time_all_1s__no_background__noise_is_all_2s(self):
# Imaging (eps) = 4.0
# Background (eps) = 0.0
# Exposure times = 1.0 s
# Imaging (counts) = 4.0
# Background (counts) = 0.0
# Noise (counts) = sqrt(4.0 + 0.0**2) = 2.0
# Noise (eps) = 2.0 / 1.0
image = aa.array.full(fill_value=4.0, shape_2d=(4, 2))
exposure_time = aa.array.ones(shape_2d=(4, 2))
background_noise = aa.array.zeros(shape_2d=(4, 2))
imaging = aa.imaging(
image=image,
noise_map=None,
psf=aa.kernel.ones(shape_2d=(3, 3)),
exposure_time_map=exposure_time,
background_noise_map=background_noise,
)
assert (imaging.estimated_noise_map.in_2d == 2.0 * np.ones((4, 2))).all()
def test__image_all_1s__exposure_time_all_4s__no_background__noise_is_all_2_divided_4_so_halves(
self
):
# Imaging (eps) = 1.0
# Background (eps) = 0.0
# Exposure times = 4.0 s
# Imaging (counts) = 4.0
# Background (counts) = 0.0
# Noise (counts) = sqrt(4.0 + 0.0**2) = 2.0
# Noise (eps) = 2.0 / 4.0 = 0.5
image = aa.array.ones(shape_2d=(1, 5))
exposure_time = aa.array.full(fill_value=4.0, shape_2d=(1, 5))
background_noise = aa.array.zeros(shape_2d=(1, 5))
imaging = aa.imaging(
image=image,
noise_map=None,
psf=aa.kernel.ones(shape_2d=(3, 3)),
exposure_time_map=exposure_time,
background_noise_map=background_noise,
)
assert (imaging.estimated_noise_map.in_2d == 0.5 * np.ones((1, 5))).all()
def test__image_and_exposure_times_range_of_values__no_background__noises_estimates_correct(
self
):
image = aa.array.manual_2d(array=np.array([[5.0, 3.0], [10.0, 20.0]]))
exposure_time = aa.array.manual_2d(np.array([[1.0, 2.0], [3.0, 4.0]]))
background_noise = aa.array.zeros(shape_2d=(2, 2))
imaging = aa.imaging(
image=image,
noise_map=None,
psf=aa.kernel.ones(shape_2d=(3, 3)),
exposure_time_map=exposure_time,
background_noise_map=background_noise,
)
assert (
imaging.estimated_noise_map.in_2d
== np.array(
[
[np.sqrt(5.0), np.sqrt(6.0) / 2.0],
[np.sqrt(30.0) / 3.0, np.sqrt(80.0) / 4.0],
]
)
).all()
def test__image_and_exposure_times_all_1s__background_is_float_sqrt_3__noise_is_all_2s(
self
):
# Imaging (eps) = 1.0
# Background (eps) = sqrt(3.0)
# Exposure times = 1.0 s
# Imaging (counts) = 1.0
# Background (counts) = sqrt(3.0)
# Noise (counts) = sqrt(1.0 + sqrt(3.0)**2) = sqrt(1.0 + 3.0) = 2.0
# Noise (eps) = 2.0 / 1.0 = 2.0
image = aa.array.ones(shape_2d=(3, 3))
exposure_time = aa.array.ones(shape_2d=(3, 3))
background_noise = aa.array.full(fill_value=3.0 ** 0.5, shape_2d=(3, 3))
imaging = aa.imaging(
image=image,
noise_map=None,
psf=aa.kernel.ones(shape_2d=(3, 3)),
exposure_time_map=exposure_time,
background_noise_map=background_noise,
)
assert imaging.estimated_noise_map.in_2d == pytest.approx(
2.0 * np.ones((3, 3)), 1e-2
)
def test__image_and_exposure_times_all_1s__background_is_float_5__noise_all_correct(
self
):
# Imaging (eps) = 1.0
# Background (eps) = 5.0
# Exposure times = 1.0 s
# Imaging (counts) = 1.0
# Background (counts) = 5.0
# Noise (counts) = sqrt(1.0 + 5**2)
# Noise (eps) = sqrt(1.0 + 5**2) / 1.0
image = aa.array.ones(shape_2d=(2, 3))
exposure_time = aa.array.ones(shape_2d=(2, 3))
background_noise = aa.array.full(fill_value=5.0, shape_2d=(2, 3))
imaging = aa.imaging(
image=image,
noise_map=None,
psf=aa.kernel.ones(shape_2d=(3, 3)),
exposure_time_map=exposure_time,
background_noise_map=background_noise,
)
assert imaging.estimated_noise_map.in_2d == pytest.approx(
np.array(
[
[np.sqrt(1.0 + 25.0), np.sqrt(1.0 + 25.0), np.sqrt(1.0 + 25.0)],
[np.sqrt(1.0 + 25.0), np.sqrt(1.0 + 25.0), np.sqrt(1.0 + 25.0)],
]
),
1e-2,
)
def test__image_all_1s__exposure_times_all_2s__background_is_float_5__noise_all_correct(
self
):
# Imaging (eps) = 1.0
# Background (eps) = 5.0
# Exposure times = 2.0 s
# Imaging (counts) = 2.0
# Background (counts) = 10.0
# Noise (counts) = sqrt(2.0 + 10**2) = sqrt(2.0 + 100.0)
# Noise (eps) = sqrt(2.0 + 100.0) / 2.0
image = aa.array.ones(shape_2d=(2, 3))
exposure_time = aa.array.full(fill_value=2.0, shape_2d=(2, 3))
background_noise = aa.array.full(fill_value=5.0, shape_2d=(2, 3))
imaging = aa.imaging(
image=image,
noise_map=None,
psf=aa.kernel.ones(shape_2d=(3, 3)),
exposure_time_map=exposure_time,
background_noise_map=background_noise,
)
assert imaging.estimated_noise_map.in_2d == pytest.approx(
np.array(
[
[
np.sqrt(2.0 + 100.0) / 2.0,
np.sqrt(2.0 + 100.0) / 2.0,
np.sqrt(2.0 + 100.0) / 2.0,
],
[
np.sqrt(2.0 + 100.0) / 2.0,
np.sqrt(2.0 + 100.0) / 2.0,
np.sqrt(2.0 + 100.0) / 2.0,
],
]
),
1e-2,
)
def test__same_as_above_but_different_image_values_in_each_pixel_and_new_background_values(
self
):
# Can use pattern from previous test_autoarray for values
image = aa.array.manual_2d(array=[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
exposure_time = aa.array.ones(shape_2d=(3, 2))
background_noise = aa.array.full(fill_value=12.0, shape_2d=(3, 2))
imaging = aa.imaging(
image=image,
noise_map=None,
psf=aa.kernel.ones(shape_2d=(3, 3)),
exposure_time_map=exposure_time,
background_noise_map=background_noise,
)
assert imaging.estimated_noise_map.in_2d == pytest.approx(
np.array(
[
[np.sqrt(1.0 + 144.0), np.sqrt(2.0 + 144.0)],
[np.sqrt(3.0 + 144.0), np.sqrt(4.0 + 144.0)],
[np.sqrt(5.0 + 144.0), np.sqrt(6.0 + 144.0)],
]
),
1e-2,
)
def test__image_and_exposure_times_range_of_values__background_has_value_9___noise_estimates_correct(
self
):
# Use same pattern as above, noting that here our background values are now being converts to counts using
# different exposure time and then being squared.
image = aa.array.manual_2d(array=[[5.0, 3.0], [10.0, 20.0]])
exposure_time = aa.array.manual_2d(array=[[1.0, 2.0], [3.0, 4.0]])
background_noise = aa.array.full(fill_value=9.0, shape_2d=((2, 2)))
imaging = aa.imaging(
image=image,
noise_map=None,
psf=aa.kernel.ones(shape_2d=(3, 3)),
exposure_time_map=exposure_time,
background_noise_map=background_noise,
)
assert imaging.estimated_noise_map.in_2d == pytest.approx(
np.array(
[
[np.sqrt(5.0 + 81.0), np.sqrt(6.0 + 18.0 ** 2.0) / 2.0],
[
np.sqrt(30.0 + 27.0 ** 2.0) / 3.0,
np.sqrt(80.0 + 36.0 ** 2.0) / 4.0,
],
]
),
1e-2,
)
def test__image_and_exposure_times_and_background_are_all_ranges_of_values__noise_estimates_correct(
self
):
# Use same pattern as above, noting that we are now also using a model background signal_to_noise_ratio map.
image = aa.array.manual_2d(array=[[5.0, 3.0], [10.0, 20.0]])
exposure_time = aa.array.manual_2d(array=[[1.0, 2.0], [3.0, 4.0]])
background_noise = aa.array.manual_2d(array=[[5.0, 6.0], [7.0, 8.0]])
imaging = aa.imaging(
image=image,
noise_map=None,
psf=aa.kernel.ones(shape_2d=(3, 3)),
exposure_time_map=exposure_time,
background_noise_map=background_noise,
)
assert imaging.estimated_noise_map.in_2d == pytest.approx(
np.array(
[
[np.sqrt(5.0 + 5.0 ** 2.0), np.sqrt(6.0 + 12.0 ** 2.0) / 2.0],
[
np.sqrt(30.0 + 21.0 ** 2.0) / 3.0,
np.sqrt(80.0 + 32.0 ** 2.0) / 4.0,
],
]
),
1e-2,
)
class TestEstimateDataGrid:
def test__via_edges__input_all_ones__sky_bg_level_1(self):
imaging = aa.imaging(
image=aa.array.manual_2d(np.ones((3, 3))),
noise_map=np.ones((3, 3)),
psf=aa.kernel.ones(shape_2d=(3, 3)),
pixel_scales=0.1,
)
sky_noise = imaging.background_noise_from_edges(no_edges=1)
assert sky_noise == 0.0
def test__via_edges__3x3_image_simple_gaussian__answer_ignores_central_pixel(self):
image = aa.array.manual_2d([[1, 1, 1], [1, 100, 1], [1, 1, 1]])
imaging = aa.imaging(
image=image,
noise_map=np.ones((3, 3)),
psf=aa.kernel.ones(shape_2d=(3, 3)),
pixel_scales=0.1,
)
sky_noise = imaging.background_noise_from_edges(no_edges=1)
assert sky_noise == 0.0
def test__via_edges__4x3_image_simple_gaussian__ignores_central_pixels(self):
image = aa.array.manual_2d([[1, 1, 1], [1, 100, 1], [1, 100, 1], [1, 1, 1]])
imaging = aa.imaging(
image=image,
noise_map=np.ones((3, 3)),
psf=aa.kernel.ones(shape_2d=(3, 3)),
pixel_scales=0.1,
)
sky_noise = imaging.background_noise_from_edges(no_edges=1)
assert sky_noise == 0.0
def test__via_edges__4x4_image_simple_gaussian__ignores_central_pixels(self):
image = aa.array.manual_2d(
[[1, 1, 1, 1], [1, 100, 100, 1], [1, 100, 100, 1], [1, 1, 1, 1]]
)
imaging = aa.imaging(
image=image,
noise_map=np.ones((3, 3)),
psf=aa.kernel.ones(shape_2d=(3, 3)),
pixel_scales=0.1,
)
sky_noise = imaging.background_noise_from_edges(no_edges=1)
assert sky_noise == 0.0
def test__via_edges__5x5_image_simple_gaussian_two_edges__ignores_central_pixel(
self
):
image = aa.array.manual_2d(
[
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 100, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
]
)
imaging = aa.imaging(
image=image,
noise_map=np.ones((3, 3)),
psf=aa.kernel.ones(shape_2d=(3, 3)),
pixel_scales=0.1,
)
sky_noise = imaging.background_noise_from_edges(no_edges=2)
assert sky_noise == 0.0
def test__via_edges__6x5_image_two_edges__values(self):
image = aa.array.manual_2d(
[
[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[10, 11, 100, 12, 13],
[14, 15, 100, 16, 17],
[18, 19, 20, 21, 22],
[23, 24, 25, 26, 27],
]
)
imaging = aa.imaging(
image=image,
noise_map=np.ones((3, 3)),
psf=aa.kernel.ones(shape_2d=(3, 3)),
pixel_scales=0.1,
)
sky_noise = imaging.background_noise_from_edges(no_edges=2)
assert sky_noise == np.std(np.arange(28))
def test__via_edges__7x7_image_three_edges__values(self):
image = aa.array.manual_2d(
[
[0, 1, 2, 3, 4, 5, 6],
[7, 8, 9, 10, 11, 12, 13],
[14, 15, 16, 17, 18, 19, 20],
[21, 22, 23, 100, 24, 25, 26],
[27, 28, 29, 30, 31, 32, 33],
[34, 35, 36, 37, 38, 39, 40],
[41, 42, 43, 44, 45, 46, 47],
]
)
imaging = aa.imaging(
image=image,
noise_map=np.ones((3, 3)),
psf=aa.kernel.ones(shape_2d=(3, 3)),
pixel_scales=0.1,
)
sky_noise = imaging.background_noise_from_edges(no_edges=3)
assert sky_noise == np.std(np.arange(48))
class TestNewImagingResized:
def test__all_components_resized__psf_is_not(self):
image = aa.array.manual_2d(array=np.ones((6, 6)), pixel_scales=1.0)
image[21] = 2.0
noise_map_array = aa.array.ones(shape_2d=(6, 6))
noise_map_array[21] = 3.0
background_noise_map_array = aa.array.ones(shape_2d=(6, 6))
background_noise_map_array[21] = 4.0
exposure_time_map_array = aa.array.ones(shape_2d=(6, 6))
exposure_time_map_array[21] = 5.0
background_sky_map_array = aa.array.ones(shape_2d=(6, 6))
background_sky_map_array[21] = 6.0
imaging = aa.imaging(
image=image,
psf=aa.kernel.zeros(shape_2d=(3, 3)),
noise_map=noise_map_array,
background_noise_map=background_noise_map_array,
exposure_time_map=exposure_time_map_array,
background_sky_map=background_sky_map_array,
)
imaging = imaging.resized_from_new_shape(new_shape=(4, 4))
assert (
imaging.image.in_2d
== np.array(
[
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 2.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
]
)
).all()
assert (
imaging.noise_map.in_2d
== np.array(
[
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 3.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
]
)
).all()
assert (
imaging.background_noise_map.in_2d
== np.array(
[
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 4.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
]
)
).all()
assert (
imaging.exposure_time_map.in_2d
== np.array(
[
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 5.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
]
)
).all()
assert (
imaging.background_sky_map.in_2d
== np.array(
[
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 6.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
]
)
).all()
assert imaging.poisson_noise_map == None
assert imaging.pixel_scales == (1.0, 1.0)
assert (imaging.psf.in_2d == np.zeros((3, 3))).all()
assert imaging.geometry.origin == (0.0, 0.0)
def test__resize_psf(self):
image = aa.array.manual_2d(array=np.ones((6, 6)))
imaging = aa.imaging(
image=image, noise_map=None, psf=aa.kernel.zeros(shape_2d=(3, 3))
)
imaging = imaging.resized_psf_from_new_shape(new_shape=(1, 1))
assert (imaging.image.in_2d == np.ones((6, 6))).all()
assert (imaging.psf.in_2d == np.zeros((1, 1))).all()
class TestNewImagingModifiedImage:
def test__imaging_returns_with_modified_image(self):
image = aa.array.manual_2d(array=np.ones((4, 4)), pixel_scales=1.0)
image[10] = 2.0
noise_map_array = aa.array.ones(shape_2d=(4, 4))
noise_map_array[10] = 3.0
background_noise_map_array = aa.array.ones(shape_2d=(4, 4))
background_noise_map_array[10] = 4.0
exposure_time_map_array = aa.array.ones(shape_2d=(4, 4))
exposure_time_map_array[10] = 5.0
background_sky_map_array = aa.array.ones(shape_2d=(4, 4))
background_sky_map_array[10] = 6.0
imaging = aa.imaging(
image=image,
psf=aa.kernel.zeros(shape_2d=(3, 3)),
noise_map=noise_map_array,
background_noise_map=background_noise_map_array,
exposure_time_map=exposure_time_map_array,
background_sky_map=background_sky_map_array,
)
modified_image = aa.array.ones(shape_2d=(4, 4), pixel_scales=1.0)
modified_image[10] = 10.0
imaging = imaging.modified_image_from_image(image=modified_image)
assert (
imaging.image.in_2d
== np.array(
[
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 10.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
]
)
).all()
assert (
imaging.noise_map.in_2d
== np.array(
[
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 3.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
]
)
).all()
assert (
imaging.background_noise_map.in_2d
== np.array(
[
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 4.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
]
)
).all()
assert (
imaging.exposure_time_map.in_2d
== np.array(
[
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 5.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
]
)
).all()
assert (
imaging.background_sky_map.in_2d
== np.array(
[
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 6.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
]
)
).all()
assert imaging.poisson_noise_map == None
assert imaging.pixel_scales == (1.0, 1.0)
assert (imaging.psf.in_2d == np.zeros((3, 3))).all()
assert imaging.geometry.origin == (0.0, 0.0)
class TestNewImagingBinnedUp:
def test__all_components_binned_up_correct(self):
image = aa.array.manual_2d(array=np.ones((6, 6)), pixel_scales=1.0)
image[21] = 2.0
image[27] = 2.0
image[33] = 2.0
binned_image_util = aa.util.binning.bin_array_2d_via_mean(
array_2d=image.in_2d, bin_up_factor=2
)
noise_map_array = aa.array.ones(shape_2d=(6, 6), pixel_scales=1.0)
noise_map_array[21:24] = 3.0
binned_noise_map_util = aa.util.binning.bin_array_2d_via_quadrature(
array_2d=noise_map_array.in_2d, bin_up_factor=2
)
background_noise_map_array = aa.array.ones(shape_2d=(6, 6), pixel_scales=1.0)
background_noise_map_array[21:24] = 4.0
binned_background_noise_map_util = aa.util.binning.bin_array_2d_via_quadrature(
array_2d=background_noise_map_array.in_2d, bin_up_factor=2
)
exposure_time_map_array = aa.array.ones(shape_2d=(6, 6), pixel_scales=1.0)
exposure_time_map_array[21:24] = 5.0
binned_exposure_time_map_util = aa.util.binning.bin_array_2d_via_sum(
array_2d=exposure_time_map_array.in_2d, bin_up_factor=2
)
background_sky_map_array = aa.array.ones(shape_2d=(6, 6), pixel_scales=1.0)
background_sky_map_array[21:24] = 6.0
binned_background_sky_map_util = aa.util.binning.bin_array_2d_via_mean(
array_2d=background_sky_map_array.in_2d, bin_up_factor=2
)
psf = aa.kernel.ones(shape_2d=(3, 5), pixel_scales=1.0)
psf_util = psf.rescaled_with_odd_dimensions_from_rescale_factor(
rescale_factor=0.5, renormalize=False
)
imaging = aa.imaging(
image=image,
psf=psf,
noise_map=noise_map_array,
background_noise_map=background_noise_map_array,
exposure_time_map=exposure_time_map_array,
background_sky_map=background_sky_map_array,
)
imaging = imaging.binned_from_bin_up_factor(bin_up_factor=2)
assert (imaging.image.in_2d == binned_image_util).all()
assert (imaging.psf == psf_util).all()
assert (imaging.noise_map.in_2d == binned_noise_map_util).all()
assert (
imaging.background_noise_map.in_2d == binned_background_noise_map_util
).all()
assert (imaging.exposure_time_map.in_2d == binned_exposure_time_map_util).all()
assert (
imaging.background_sky_map.in_2d == binned_background_sky_map_util
).all()
assert imaging.poisson_noise_map == None
assert imaging.image.pixel_scales == (2.0, 2.0)
assert imaging.psf.pixel_scales == pytest.approx((1.0, 1.66666666666), 1.0e-4)
assert imaging.noise_map.pixel_scales == (2.0, 2.0)
assert imaging.background_noise_map.pixel_scales == (2.0, 2.0)
assert imaging.exposure_time_map.pixel_scales == (2.0, 2.0)
assert imaging.background_sky_map.pixel_scales == (2.0, 2.0)
assert imaging.image.geometry.origin == (0.0, 0.0)
class TestSNRLimit:
def test__signal_to_noise_limit_above_max_signal_to_noise__signal_to_noise_map_unchanged(
self
):
image = aa.array.full(fill_value=20.0, shape_2d=(2, 2))
image[3] = 5.0
noise_map_array = aa.array.full(fill_value=5.0, shape_2d=(2, 2))
noise_map_array[3] = 2.0
imaging = aa.imaging(
image=image,
psf=aa.kernel.zeros(shape_2d=(3, 3)),
noise_map=noise_map_array,
background_noise_map=aa.array.full(fill_value=1.0, shape_2d=(2, 2)),
exposure_time_map=aa.array.full(fill_value=2.0, shape_2d=(2, 2)),
background_sky_map=aa.array.full(fill_value=3.0, shape_2d=(2, 2)),
)
imaging = imaging.signal_to_noise_limited_from_signal_to_noise_limit(
signal_to_noise_limit=100.0
)
assert (imaging.image.in_2d == np.array([[20.0, 20.0], [20.0, 5.0]])).all()
assert (imaging.noise_map.in_2d == np.array([[5.0, 5.0], [5.0, 2.0]])).all()
assert (
imaging.signal_to_noise_map.in_2d == np.array([[4.0, 4.0], [4.0, 2.5]])
).all()
assert (imaging.psf.in_2d == np.zeros((3, 3))).all()
assert (imaging.background_noise_map.in_2d == np.ones((2, 2))).all()
assert (imaging.exposure_time_map.in_2d == 2.0 * np.ones((2, 2))).all()
assert (imaging.background_sky_map.in_2d == 3.0 * np.ones((2, 2))).all()
def test__signal_to_noise_limit_below_max_signal_to_noise__signal_to_noise_map_capped_to_limit(
self
):
image = aa.array.full(fill_value=20.0, shape_2d=(2, 2))
image[3] = 5.0
noise_map_array = aa.array.full(fill_value=5.0, shape_2d=(2, 2))
noise_map_array[3] = 2.0
imaging = aa.imaging(
image=image,
psf=aa.kernel.zeros(shape_2d=(3, 3)),
noise_map=noise_map_array,
background_noise_map=aa.array.full(fill_value=1.0, shape_2d=(2, 2)),
exposure_time_map=aa.array.full(fill_value=2.0, shape_2d=(2, 2)),
background_sky_map=aa.array.full(fill_value=3.0, shape_2d=(2, 2)),
)
imaging_capped = imaging.signal_to_noise_limited_from_signal_to_noise_limit(
signal_to_noise_limit=2.0
)
assert (
imaging_capped.image.in_2d == np.array([[20.0, 20.0], [20.0, 5.0]])
).all()
assert (
imaging_capped.noise_map.in_2d == np.array([[10.0, 10.0], [10.0, 2.5]])
).all()
assert (
imaging_capped.signal_to_noise_map.in_2d
== np.array([[2.0, 2.0], [2.0, 2.0]])
).all()
assert (imaging_capped.psf.in_2d == np.zeros((3, 3))).all()
assert (imaging_capped.background_noise_map.in_2d == np.ones((2, 2))).all()
assert (imaging_capped.exposure_time_map.in_2d == 2.0 * np.ones((2, 2))).all()
assert (imaging_capped.background_sky_map.in_2d == 3.0 * np.ones((2, 2))).all()
imaging_capped = imaging.signal_to_noise_limited_from_signal_to_noise_limit(
signal_to_noise_limit=3.0
)
assert (
imaging_capped.image.in_2d == np.array([[20.0, 20.0], [20.0, 5.0]])
).all()
assert (
imaging_capped.noise_map.in_2d
== np.array([[(20.0 / 3.0), (20.0 / 3.0)], [(20.0 / 3.0), 2.0]])
).all()
assert (
imaging_capped.signal_to_noise_map.in_2d
== np.array([[3.0, 3.0], [3.0, 2.5]])
).all()
assert (imaging_capped.psf.in_2d == np.zeros((3, 3))).all()
assert (imaging_capped.background_noise_map.in_2d == np.ones((2, 2))).all()
assert (imaging_capped.exposure_time_map.in_2d == 2.0 * np.ones((2, 2))).all()
assert (imaging_capped.background_sky_map.in_2d == 3.0 * np.ones((2, 2))).all()
class TestImageConvertedFrom:
def test__counts__all_arrays_in_units_of_flux_are_converted(self):
image = aa.array.ones(shape_2d=(3, 3))
noise_map_array = aa.array.full(fill_value=2.0, shape_2d=(3, 3))
background_noise_map_array = aa.array.full(fill_value=3.0, shape_2d=(3, 3))
exposure_time_map_array = aa.array.full(fill_value=0.5, shape_2d=(3, 3))
background_sky_map_array = aa.array.full(fill_value=6.0, shape_2d=(3, 3))
imaging = aa.imaging(
image=image,
psf=aa.kernel.zeros(shape_2d=(3, 3)),
noise_map=noise_map_array,
background_noise_map=background_noise_map_array,
poisson_noise_map=None,
exposure_time_map=exposure_time_map_array,
background_sky_map=background_sky_map_array,
)
imaging = imaging.data_in_electrons()
assert (imaging.image.in_2d == 2.0 * np.ones((3, 3))).all()
assert (imaging.noise_map.in_2d == 4.0 * np.ones((3, 3))).all()
assert (imaging.background_noise_map.in_2d == 6.0 * np.ones((3, 3))).all()
assert imaging.poisson_noise_map == None
assert (imaging.background_sky_map.in_2d == 12.0 * np.ones((3, 3))).all()
def test__adus__all_arrays_in_units_of_flux_are_converted(self):
image = aa.array.ones(shape_2d=(3, 3))
noise_map_array = aa.array.full(fill_value=2.0, shape_2d=(3, 3))
background_noise_map_array = aa.array.full(fill_value=3.0, shape_2d=(3, 3))
exposure_time_map_array = aa.array.full(fill_value=0.5, shape_2d=(3, 3))
background_sky_map_array = aa.array.full(fill_value=6.0, shape_2d=(3, 3))
imaging = aa.imaging(
image=image,
psf=aa.kernel.zeros(shape_2d=(3, 3)),
noise_map=noise_map_array,
background_noise_map=background_noise_map_array,
poisson_noise_map=None,
exposure_time_map=exposure_time_map_array,
background_sky_map=background_sky_map_array,
)
imaging = imaging.data_in_adus_from_gain(gain=2.0)
assert (imaging.image.in_2d == 2.0 * 2.0 * np.ones((3, 3))).all()
assert (imaging.noise_map.in_2d == 2.0 * 4.0 * np.ones((3, 3))).all()
assert (imaging.background_noise_map.in_2d == 2.0 * 6.0 * np.ones((3, 3))).all()
assert imaging.poisson_noise_map == None
assert (imaging.background_sky_map.in_2d == 2.0 * 12.0 * np.ones((3, 3))).all()
class TestImageWithPoissonNoiseAdded:
def test__mock_image_all_1s__poisson_noise_is_added_correct(self):
psf = aa.kernel.manual_2d(
array=np.ones((3, 3)), pixel_scales=3.0, renormalize=False
)
imaging_data = aa.imaging(
image=aa.array.manual_2d(array=np.ones((4, 4))),
pixel_scales=3.0,
psf=psf,
noise_map=aa.array.manual_2d(array=np.ones((4, 4))),
exposure_time_map=aa.array.manual_2d(array=3.0 * np.ones((4, 4))),
background_sky_map=aa.array.manual_2d(array=4.0 * np.ones((4, 4))),
)
mock_image = aa.array.manual_2d(array=np.ones((4, 4)))
mock_image_with_sky = mock_image + 4.0 * np.ones((16,))
mock_image_with_sky_and_noise = (
mock_image_with_sky
+ imaging.generate_poisson_noise(
image=mock_image_with_sky,
exposure_time_map=aa.array.manual_2d(array=3.0 * np.ones((4, 4))),
seed=1,
)
)
mock_image_with_noise = mock_image_with_sky_and_noise - 4.0 * np.ones((16,))
imaging_with_noise = imaging_data.add_poisson_noise_to_data(seed=1)
assert (imaging_with_noise.image == mock_image_with_noise).all()
class TestImagingFromFits:
def test__no_settings_just_pass_fits(self):
imaging = aa.imaging.from_fits(
pixel_scales=0.1,
image_path=test_data_dir + "3x3_ones.fits",
noise_map_path=test_data_dir + "3x3_threes.fits",
renormalize_psf=False,
)
assert (imaging.image.in_2d == np.ones((3, 3))).all()
assert imaging.psf == None
assert (imaging.noise_map.in_2d == 3.0 * np.ones((3, 3))).all()
assert imaging.background_noise_map == None
assert imaging.poisson_noise_map == None
assert imaging.exposure_time_map == None
assert imaging.background_sky_map == None
assert imaging.pixel_scales == (0.1, 0.1)
assert imaging.noise_map.mask.pixel_scales == (0.1, 0.1)
def test__optional_array_paths_included__loads_optional_array(self):
imaging = aa.imaging.from_fits(
pixel_scales=0.1,
image_path=test_data_dir + "3x3_ones.fits",
psf_path=test_data_dir + "3x3_twos.fits",
noise_map_path=test_data_dir + "3x3_threes.fits",
background_noise_map_path=test_data_dir + "3x3_fours.fits",
poisson_noise_map_path=test_data_dir + "3x3_fives.fits",
exposure_time_map_path=test_data_dir + "3x3_sixes.fits",
background_sky_map_path=test_data_dir + "3x3_sevens.fits",
renormalize_psf=False,
)
assert (imaging.image.in_2d == np.ones((3, 3))).all()
assert (imaging.psf.in_2d == 2.0 * np.ones((3, 3))).all()
assert (imaging.noise_map.in_2d == 3.0 * np.ones((3, 3))).all()
assert (imaging.background_noise_map.in_2d == 4.0 * np.ones((3, 3))).all()
assert (imaging.poisson_noise_map.in_2d == 5.0 * np.ones((3, 3))).all()
assert (imaging.exposure_time_map.in_2d == 6.0 * np.ones((3, 3))).all()
assert (imaging.background_sky_map.in_2d == 7.0 * np.ones((3, 3))).all()
assert imaging.pixel_scales == (0.1, 0.1)
assert imaging.psf.mask.pixel_scales == (0.1, 0.1)
assert imaging.noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.background_noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.poisson_noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.exposure_time_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.background_sky_map.mask.pixel_scales == (0.1, 0.1)
def test__all_files_in_one_fits__load_using_different_hdus(self):
imaging = aa.imaging.from_fits(
pixel_scales=0.1,
image_path=test_data_dir + "3x3_multiple_hdu.fits",
image_hdu=0,
psf_path=test_data_dir + "3x3_multiple_hdu.fits",
psf_hdu=1,
noise_map_path=test_data_dir + "3x3_multiple_hdu.fits",
noise_map_hdu=2,
background_noise_map_path=test_data_dir + "3x3_multiple_hdu.fits",
background_noise_map_hdu=3,
poisson_noise_map_path=test_data_dir + "3x3_multiple_hdu.fits",
poisson_noise_map_hdu=4,
exposure_time_map_path=test_data_dir + "3x3_multiple_hdu.fits",
exposure_time_map_hdu=5,
background_sky_map_path=test_data_dir + "3x3_multiple_hdu.fits",
background_sky_map_hdu=6,
renormalize_psf=False,
)
assert (imaging.image.in_2d == np.ones((3, 3))).all()
assert (imaging.psf.in_2d == 2.0 * np.ones((3, 3))).all()
assert (imaging.noise_map.in_2d == 3.0 * np.ones((3, 3))).all()
assert (imaging.background_noise_map.in_2d == 4.0 * np.ones((3, 3))).all()
assert (imaging.poisson_noise_map.in_2d == 5.0 * np.ones((3, 3))).all()
assert (imaging.exposure_time_map.in_2d == 6.0 * np.ones((3, 3))).all()
assert (imaging.background_sky_map.in_2d == 7.0 * np.ones((3, 3))).all()
assert imaging.pixel_scales == (0.1, 0.1)
assert imaging.psf.mask.pixel_scales == (0.1, 0.1)
assert imaging.noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.background_noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.poisson_noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.exposure_time_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.background_sky_map.mask.pixel_scales == (0.1, 0.1)
def test__exposure_time_included__creates_exposure_time_map_using_exposure_time(
self
):
imaging = aa.imaging.from_fits(
pixel_scales=0.1,
image_path=test_data_dir + "3x3_ones.fits",
noise_map_path=test_data_dir + "3x3_ones.fits",
psf_path=test_data_dir + "3x3_ones.fits",
exposure_time_map_from_single_value=3.0,
renormalize_psf=False,
)
assert (imaging.exposure_time_map.in_2d == 3.0 * np.ones((3, 3))).all()
def test__exposure_time_map_from_inverse_noise_map(self):
imaging = aa.imaging.from_fits(
pixel_scales=0.1,
image_path=test_data_dir + "3x3_ones.fits",
psf_path=test_data_dir + "3x3_twos.fits",
noise_map_path=test_data_dir + "3x3_threes.fits",
background_noise_map_path=test_data_dir + "3x3_fours.fits",
exposure_time_map_from_single_value=3.0,
exposure_time_map_from_inverse_noise_map=True,
renormalize_psf=False,
)
assert (imaging.exposure_time_map.in_2d == 3.0 * np.ones((3, 3))).all()
imaging = aa.imaging.from_fits(
image_path=test_data_dir + "3x3_ones.fits",
pixel_scales=0.1,
psf_path=test_data_dir + "3x3_twos.fits",
noise_map_path=test_data_dir + "3x3_threes.fits",
background_noise_map_path=test_data_dir + "3x3_fours.fits",
exposure_time_map_from_single_value=6.0,
exposure_time_map_from_inverse_noise_map=True,
renormalize_psf=False,
)
assert (imaging.exposure_time_map.in_2d == 6.0 * np.ones((3, 3))).all()
def test__exposure_time_map_from_inverse_noise_map__background_noise_is_converted_from_inverse_noise_map(
self
):
imaging = aa.imaging.from_fits(
pixel_scales=0.1,
image_path=test_data_dir + "3x3_ones.fits",
psf_path=test_data_dir + "3x3_twos.fits",
noise_map_path=test_data_dir + "3x3_threes.fits",
background_noise_map_path=test_data_dir + "3x3_ones_central_two.fits",
convert_background_noise_map_from_inverse_noise_map=True,
exposure_time_map_from_single_value=3.0,
exposure_time_map_from_inverse_noise_map=True,
renormalize_psf=False,
)
inverse_noise_map = aa.array.manual_2d(
array=np.array([[1.0, 1.0, 1.0], [1.0, 2.0, 1.0], [1.0, 1.0, 1.0]])
)
background_noise_map_converted = aa.data_converter.noise_map_from_inverse_noise_map(
inverse_noise_map=inverse_noise_map
)
assert (
imaging.background_noise_map.in_2d
== np.array([[1.0, 1.0, 1.0], [1.0, 0.5, 1.0], [1.0, 1.0, 1.0]])
).all()
assert (
imaging.background_noise_map.in_2d == background_noise_map_converted.in_2d
).all()
assert (
imaging.exposure_time_map.in_2d
== np.array([[1.5, 1.5, 1.5], [1.5, 3.0, 1.5], [1.5, 1.5, 1.5]])
).all()
def test__pad_shape_of_images_and_psf(self):
imaging = aa.imaging.from_fits(
pixel_scales=0.1,
image_path=test_data_dir + "3x3_ones.fits",
psf_path=test_data_dir + "3x3_twos.fits",
noise_map_path=test_data_dir + "3x3_threes.fits",
background_noise_map_path=test_data_dir + "3x3_fours.fits",
poisson_noise_map_path=test_data_dir + "3x3_fives.fits",
exposure_time_map_path=test_data_dir + "3x3_sixes.fits",
background_sky_map_path=test_data_dir + "3x3_sevens.fits",
resized_imaging_shape=(5, 5),
resized_psf_shape=(7, 7),
renormalize_psf=False,
)
padded_array = np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 1.0, 1.0, 0.0],
[0.0, 1.0, 1.0, 1.0, 0.0],
[0.0, 1.0, 1.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
]
)
psf_padded_array = np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 2.0, 2.0, 2.0, 0.0, 0.0],
[0.0, 0.0, 2.0, 2.0, 2.0, 0.0, 0.0],
[0.0, 0.0, 2.0, 2.0, 2.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
]
)
assert (imaging.image.in_2d == padded_array).all()
assert (imaging.psf.in_2d == psf_padded_array).all()
assert (imaging.noise_map.in_2d == 3.0 * padded_array).all()
assert (imaging.background_noise_map.in_2d == 4.0 * padded_array).all()
assert (imaging.poisson_noise_map.in_2d == 5.0 * padded_array).all()
assert (imaging.exposure_time_map.in_2d == 6.0 * padded_array).all()
assert (imaging.background_sky_map.in_2d == 7.0 * padded_array).all()
assert imaging.pixel_scales == (0.1, 0.1)
assert imaging.psf.mask.pixel_scales == (0.1, 0.1)
assert imaging.noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.background_noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.poisson_noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.exposure_time_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.background_sky_map.mask.pixel_scales == (0.1, 0.1)
imaging = aa.imaging.from_fits(
image_path=test_data_dir + "3x3_ones.fits",
psf_path=test_data_dir + "3x3_twos.fits",
noise_map_path=test_data_dir + "3x3_threes.fits",
exposure_time_map_from_single_value=3.0,
pixel_scales=0.1,
resized_imaging_shape=(5, 5),
resized_psf_shape=(7, 7),
renormalize_psf=False,
)
exposure_padded_array = np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 3.0, 3.0, 3.0, 0.0],
[0.0, 3.0, 3.0, 3.0, 0.0],
[0.0, 3.0, 3.0, 3.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
]
)
assert (imaging.image.in_2d == padded_array).all()
assert (imaging.exposure_time_map.in_2d == exposure_padded_array).all()
def test__trim_shape_of_images_and_psf(self):
imaging = aa.imaging.from_fits(
pixel_scales=0.1,
image_path=test_data_dir + "3x3_ones.fits",
psf_path=test_data_dir + "3x3_twos.fits",
noise_map_path=test_data_dir + "3x3_threes.fits",
background_noise_map_path=test_data_dir + "3x3_fours.fits",
poisson_noise_map_path=test_data_dir + "3x3_fives.fits",
exposure_time_map_path=test_data_dir + "3x3_sixes.fits",
background_sky_map_path=test_data_dir + "3x3_sevens.fits",
resized_imaging_shape=(1, 1),
resized_psf_shape=(1, 1),
renormalize_psf=False,
)
trimmed_array = np.array([[1.0]])
assert (imaging.image.in_2d == trimmed_array).all()
assert (imaging.psf.in_2d == 2.0 * trimmed_array).all()
assert (imaging.noise_map.in_2d == 3.0 * trimmed_array).all()
assert (imaging.background_noise_map.in_2d == 4.0 * trimmed_array).all()
assert (imaging.poisson_noise_map.in_2d == 5.0 * trimmed_array).all()
assert (imaging.exposure_time_map.in_2d == 6.0 * trimmed_array).all()
assert (imaging.background_sky_map.in_2d == 7.0 * trimmed_array).all()
assert imaging.pixel_scales == (0.1, 0.1)
assert imaging.psf.mask.pixel_scales == (0.1, 0.1)
assert imaging.noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.background_noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.poisson_noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.exposure_time_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.background_sky_map.mask.pixel_scales == (0.1, 0.1)
def test__convert_noise_map_from_weight_map(self):
imaging = aa.imaging.from_fits(
pixel_scales=0.1,
image_path=test_data_dir + "3x3_ones.fits",
psf_path=test_data_dir + "3x3_twos.fits",
noise_map_path=test_data_dir + "3x3_threes.fits",
background_noise_map_path=test_data_dir + "3x3_fours.fits",
poisson_noise_map_path=test_data_dir + "3x3_fives.fits",
exposure_time_map_path=test_data_dir + "3x3_sixes.fits",
background_sky_map_path=test_data_dir + "3x3_sevens.fits",
convert_noise_map_from_weight_map=True,
renormalize_psf=False,
)
weight_map = aa.array.full(fill_value=3.0, shape_2d=(3, 3))
noise_map_converted = aa.data_converter.noise_map_from_weight_map(
weight_map=weight_map
)
assert (imaging.image.in_2d == np.ones((3, 3))).all()
assert (imaging.psf.in_2d == 2.0 * np.ones((3, 3))).all()
assert (imaging.noise_map.in_2d == noise_map_converted.in_2d).all()
assert (imaging.background_noise_map.in_2d == 4.0 * np.ones((3, 3))).all()
assert (imaging.poisson_noise_map.in_2d == 5.0 * np.ones((3, 3))).all()
assert (imaging.exposure_time_map.in_2d == 6.0 * np.ones((3, 3))).all()
assert (imaging.background_sky_map.in_2d == 7.0 * np.ones((3, 3))).all()
assert imaging.pixel_scales == (0.1, 0.1)
assert imaging.psf.mask.pixel_scales == (0.1, 0.1)
assert imaging.noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.background_noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.poisson_noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.exposure_time_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.background_sky_map.mask.pixel_scales == (0.1, 0.1)
def test__convert_noise_map_from_inverse_noise_map(self):
imaging = aa.imaging.from_fits(
pixel_scales=0.1,
image_path=test_data_dir + "3x3_ones.fits",
psf_path=test_data_dir + "3x3_twos.fits",
noise_map_path=test_data_dir + "3x3_threes.fits",
background_noise_map_path=test_data_dir + "3x3_fours.fits",
poisson_noise_map_path=test_data_dir + "3x3_fives.fits",
exposure_time_map_path=test_data_dir + "3x3_sixes.fits",
background_sky_map_path=test_data_dir + "3x3_sevens.fits",
convert_noise_map_from_inverse_noise_map=True,
renormalize_psf=False,
)
inverse_noise_map = aa.array.manual_2d(array=3.0 * np.ones((3, 3)))
noise_map_converted = aa.data_converter.noise_map_from_inverse_noise_map(
inverse_noise_map=inverse_noise_map
)
assert (imaging.image.in_2d == np.ones((3, 3))).all()
assert (imaging.psf.in_2d == 2.0 * np.ones((3, 3))).all()
assert (imaging.noise_map.in_2d == noise_map_converted.in_2d).all()
assert (imaging.background_noise_map.in_2d == 4.0 * np.ones((3, 3))).all()
assert (imaging.poisson_noise_map.in_2d == 5.0 * np.ones((3, 3))).all()
assert (imaging.exposure_time_map.in_2d == 6.0 * np.ones((3, 3))).all()
assert (imaging.background_sky_map.in_2d == 7.0 * np.ones((3, 3))).all()
assert imaging.pixel_scales == (0.1, 0.1)
assert imaging.psf.mask.pixel_scales == (0.1, 0.1)
assert imaging.noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.background_noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.poisson_noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.exposure_time_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.background_sky_map.mask.pixel_scales == (0.1, 0.1)
def test__noise_map_from_image_and_background_noise_map(self):
imaging = aa.imaging.from_fits(
pixel_scales=0.1,
image_path=test_data_dir + "3x3_ones.fits",
psf_path=test_data_dir + "3x3_twos.fits",
noise_map_from_image_and_background_noise_map=True,
background_noise_map_path=test_data_dir + "3x3_fours.fits",
poisson_noise_map_path=test_data_dir + "3x3_fives.fits",
exposure_time_map_path=test_data_dir + "3x3_sixes.fits",
background_sky_map_path=test_data_dir + "3x3_sevens.fits",
renormalize_psf=False,
)
noise_map_converted = aa.data_converter.noise_map_from_image_and_background_noise_map(
image=imaging.image,
background_noise_map=imaging.background_noise_map,
gain=2.0,
exposure_time_map=imaging.exposure_time_map,
)
assert (imaging.image.in_2d == np.ones((3, 3))).all()
assert (imaging.psf.in_2d == 2.0 * np.ones((3, 3))).all()
assert (imaging.noise_map.in_2d == noise_map_converted.in_2d).all()
assert (
imaging.noise_map.in_2d
== (np.sqrt((24.0) ** 2.0 + (6.0)) / (6.0)) * np.ones((3, 3))
).all()
assert (imaging.background_noise_map.in_2d == 4.0 * np.ones((3, 3))).all()
assert (imaging.poisson_noise_map.in_2d == 5.0 * np.ones((3, 3))).all()
assert (imaging.exposure_time_map.in_2d == 6.0 * np.ones((3, 3))).all()
assert (imaging.background_sky_map.in_2d == 7.0 * np.ones((3, 3))).all()
assert imaging.pixel_scales == (0.1, 0.1)
assert imaging.psf.mask.pixel_scales == (0.1, 0.1)
assert imaging.noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.background_noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.poisson_noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.exposure_time_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.background_sky_map.mask.pixel_scales == (0.1, 0.1)
def test__noise_map_from_image_and_background_noise_map__include_convert_from_electrons(
self
):
imaging = aa.imaging.from_fits(
pixel_scales=0.1,
image_path=test_data_dir + "3x3_ones.fits",
psf_path=test_data_dir + "3x3_twos.fits",
noise_map_from_image_and_background_noise_map=True,
background_noise_map_path=test_data_dir + "3x3_fours.fits",
poisson_noise_map_path=test_data_dir + "3x3_fives.fits",
exposure_time_map_path=test_data_dir + "3x3_sixes.fits",
background_sky_map_path=test_data_dir + "3x3_sevens.fits",
convert_from_electrons=True,
renormalize_psf=False,
)
image = aa.array.ones(shape_2d=(3, 3))
background_noise_map = aa.array.manual_2d(array=4.0 * np.ones((3, 3)))
noise_map_converted = aa.data_converter.noise_map_from_image_and_background_noise_map(
image=image,
background_noise_map=background_noise_map,
gain=None,
exposure_time_map=imaging.exposure_time_map,
convert_from_electrons=True,
)
noise_map_converted = noise_map_converted / 6.0
assert (imaging.image.in_2d == np.ones((3, 3)) / 6.0).all()
assert (imaging.psf.in_2d == 2.0 * np.ones((3, 3))).all()
assert (imaging.noise_map.in_2d == noise_map_converted.in_2d).all()
assert (imaging.noise_map.in_2d == np.sqrt(17.0) * np.ones((3, 3)) / 6.0).all()
assert (imaging.background_noise_map.in_2d == 4.0 * np.ones((3, 3)) / 6.0).all()
assert (imaging.poisson_noise_map.in_2d == 5.0 * np.ones((3, 3)) / 6.0).all()
assert (imaging.exposure_time_map.in_2d == 6.0 * np.ones((3, 3))).all()
assert (imaging.background_sky_map.in_2d == 7.0 * np.ones((3, 3)) / 6.0).all()
assert imaging.pixel_scales == (0.1, 0.1)
assert imaging.psf.mask.pixel_scales == (0.1, 0.1)
assert imaging.noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.background_noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.poisson_noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.exposure_time_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.background_sky_map.mask.pixel_scales == (0.1, 0.1)
def test__noise_map_from_image_and_background_noise_map__include_convert_from_adus(
self
):
imaging = aa.imaging.from_fits(
pixel_scales=0.1,
image_path=test_data_dir + "3x3_ones.fits",
psf_path=test_data_dir + "3x3_twos.fits",
noise_map_from_image_and_background_noise_map=True,
background_noise_map_path=test_data_dir + "3x3_fours.fits",
poisson_noise_map_path=test_data_dir + "3x3_fives.fits",
exposure_time_map_path=test_data_dir + "3x3_sixes.fits",
background_sky_map_path=test_data_dir + "3x3_sevens.fits",
gain=2.0,
convert_from_adus=True,
renormalize_psf=False,
)
image = aa.array.ones(shape_2d=(3, 3))
background_noise_map = aa.array.manual_2d(array=4.0 * np.ones((3, 3)))
noise_map_converted = aa.data_converter.noise_map_from_image_and_background_noise_map(
image=image,
background_noise_map=background_noise_map,
gain=2.0,
exposure_time_map=imaging.exposure_time_map,
convert_from_adus=True,
)
noise_map_converted = 2.0 * noise_map_converted / 6.0
assert (imaging.image.in_2d == 2.0 * np.ones((3, 3)) / 6.0).all()
assert (imaging.psf.in_2d == 2.0 * np.ones((3, 3))).all()
assert (imaging.noise_map.in_2d == noise_map_converted.in_2d).all()
assert (imaging.noise_map.in_2d == np.sqrt(66.0) * np.ones((3, 3)) / 6.0).all()
assert (
imaging.background_noise_map.in_2d == 2.0 * 4.0 * np.ones((3, 3)) / 6.0
).all()
assert (
imaging.poisson_noise_map.in_2d == 2.0 * 5.0 * np.ones((3, 3)) / 6.0
).all()
assert (imaging.exposure_time_map.in_2d == 6.0 * np.ones((3, 3))).all()
assert (
imaging.background_sky_map.in_2d == 2.0 * 7.0 * np.ones((3, 3)) / 6.0
).all()
assert imaging.pixel_scales == (0.1, 0.1)
assert imaging.psf.mask.pixel_scales == (0.1, 0.1)
assert imaging.noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.background_noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.poisson_noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.exposure_time_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.background_sky_map.mask.pixel_scales == (0.1, 0.1)
def test__noise_map_non_constant__adds_small_noise_values(self):
np.random.seed(1)
imaging = aa.imaging.from_fits(
pixel_scales=0.1,
image_path=test_data_dir + "3x3_ones.fits",
psf_path=test_data_dir + "3x3_twos.fits",
noise_map_path=test_data_dir + "3x3_threes.fits",
background_noise_map_path=test_data_dir + "3x3_fours.fits",
poisson_noise_map_path=test_data_dir + "3x3_fives.fits",
exposure_time_map_path=test_data_dir + "3x3_sixes.fits",
background_sky_map_path=test_data_dir + "3x3_sevens.fits",
noise_map_non_constant=True,
renormalize_psf=False,
)
assert (imaging.image.in_2d == np.ones((3, 3))).all()
assert (imaging.psf.in_2d == 2.0 * np.ones((3, 3))).all()
assert imaging.noise_map.in_2d == pytest.approx(
np.array(
[
[3.000907, 3.00044, 3.000277],
[3.0005587, 3.001036, 3.00119],
[3.000558, 3.00103668, 3.0011903],
]
),
1.0e-2,
)
assert (imaging.background_noise_map.in_2d == 4.0 * np.ones((3, 3))).all()
assert (imaging.poisson_noise_map.in_2d == 5.0 * np.ones((3, 3))).all()
assert (imaging.exposure_time_map.in_2d == 6.0 * np.ones((3, 3))).all()
assert (imaging.background_sky_map.in_2d == 7.0 * np.ones((3, 3))).all()
assert imaging.pixel_scales == (0.1, 0.1)
assert imaging.psf.mask.pixel_scales == (0.1, 0.1)
assert imaging.noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.background_noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.poisson_noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.exposure_time_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.background_sky_map.mask.pixel_scales == (0.1, 0.1)
def test__convert_background_noise_map_from_weight_map(self):
imaging = aa.imaging.from_fits(
pixel_scales=0.1,
image_path=test_data_dir + "3x3_ones.fits",
psf_path=test_data_dir + "3x3_twos.fits",
noise_map_path=test_data_dir + "3x3_threes.fits",
background_noise_map_path=test_data_dir + "3x3_fours.fits",
poisson_noise_map_path=test_data_dir + "3x3_fives.fits",
exposure_time_map_path=test_data_dir + "3x3_sixes.fits",
background_sky_map_path=test_data_dir + "3x3_sevens.fits",
renormalize_psf=False,
convert_background_noise_map_from_weight_map=True,
)
weight_map = aa.array.manual_2d(array=4.0 * np.ones((3, 3)))
background_noise_map_converted = aa.data_converter.noise_map_from_weight_map(
weight_map=weight_map
)
assert (imaging.image.in_2d == np.ones((3, 3))).all()
assert (imaging.psf.in_2d == 2.0 * np.ones((3, 3))).all()
assert (imaging.noise_map.in_2d == 3.0 * np.ones((3, 3))).all()
assert (
imaging.background_noise_map.in_2d == background_noise_map_converted.in_2d
).all()
assert (imaging.poisson_noise_map.in_2d == 5.0 * np.ones((3, 3))).all()
assert (imaging.exposure_time_map.in_2d == 6.0 * np.ones((3, 3))).all()
assert (imaging.background_sky_map.in_2d == 7.0 * np.ones((3, 3))).all()
assert imaging.pixel_scales == (0.1, 0.1)
assert imaging.psf.mask.pixel_scales == (0.1, 0.1)
assert imaging.noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.background_noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.poisson_noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.exposure_time_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.background_sky_map.mask.pixel_scales == (0.1, 0.1)
def test__convert_background_noise_map_from_inverse_noise_map(self):
imaging = aa.imaging.from_fits(
pixel_scales=0.1,
image_path=test_data_dir + "3x3_ones.fits",
psf_path=test_data_dir + "3x3_twos.fits",
noise_map_path=test_data_dir + "3x3_threes.fits",
background_noise_map_path=test_data_dir + "3x3_fours.fits",
poisson_noise_map_path=test_data_dir + "3x3_fives.fits",
exposure_time_map_path=test_data_dir + "3x3_sixes.fits",
background_sky_map_path=test_data_dir + "3x3_sevens.fits",
renormalize_psf=False,
convert_background_noise_map_from_inverse_noise_map=True,
)
inverse_noise_map = aa.array.manual_2d(array=4.0 * np.ones((3, 3)))
background_noise_map_converted = aa.data_converter.noise_map_from_inverse_noise_map(
inverse_noise_map=inverse_noise_map
)
assert (imaging.image.in_2d == np.ones((3, 3))).all()
assert (imaging.psf.in_2d == 2.0 * np.ones((3, 3))).all()
assert (imaging.noise_map.in_2d == 3.0 * np.ones((3, 3))).all()
assert (
imaging.background_noise_map.in_2d == background_noise_map_converted.in_2d
).all()
assert (imaging.poisson_noise_map.in_2d == 5.0 * np.ones((3, 3))).all()
assert (imaging.exposure_time_map.in_2d == 6.0 * np.ones((3, 3))).all()
assert (imaging.background_sky_map.in_2d == 7.0 * np.ones((3, 3))).all()
assert imaging.pixel_scales == (0.1, 0.1)
assert imaging.psf.mask.pixel_scales == (0.1, 0.1)
assert imaging.noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.background_noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.poisson_noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.exposure_time_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.background_sky_map.mask.pixel_scales == (0.1, 0.1)
def test__poisson_noise_map_from_image(self):
imaging = aa.imaging.from_fits(
pixel_scales=0.1,
image_path=test_data_dir + "3x3_ones.fits",
psf_path=test_data_dir + "3x3_twos.fits",
noise_map_path=test_data_dir + "3x3_threes.fits",
background_noise_map_path=test_data_dir + "3x3_fours.fits",
poisson_noise_map_path=test_data_dir + "3x3_fives.fits",
exposure_time_map_path=test_data_dir + "3x3_sixes.fits",
background_sky_map_path=test_data_dir + "3x3_sevens.fits",
renormalize_psf=False,
poisson_noise_map_from_image=True,
)
image = aa.array.ones(shape_2d=(3, 3))
poisson_noise_map_converted = aa.data_converter.poisson_noise_map_from_image_and_exposure_time_map(
image=image, exposure_time_map=imaging.exposure_time_map, gain=None
)
assert (imaging.image.in_2d == np.ones((3, 3))).all()
assert (imaging.psf.in_2d == 2.0 * np.ones((3, 3))).all()
assert (imaging.noise_map.in_2d == 3.0 * np.ones((3, 3))).all()
assert (imaging.background_noise_map.in_2d == 4.0 * np.ones((3, 3))).all()
assert (
imaging.poisson_noise_map.in_2d == (np.sqrt(6.0) / (6.0)) * np.ones((3, 3))
).all()
assert (
imaging.poisson_noise_map.in_2d == poisson_noise_map_converted.in_2d
).all()
assert (imaging.exposure_time_map.in_2d == 6.0 * np.ones((3, 3))).all()
assert (imaging.background_sky_map.in_2d == 7.0 * np.ones((3, 3))).all()
assert imaging.pixel_scales == (0.1, 0.1)
assert imaging.psf.mask.pixel_scales == (0.1, 0.1)
assert imaging.noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.background_noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.poisson_noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.exposure_time_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.background_sky_map.mask.pixel_scales == (0.1, 0.1)
def test__poisson_noise_map_from_image__include_convert_from_electrons(self):
imaging = aa.imaging.from_fits(
pixel_scales=0.1,
image_path=test_data_dir + "3x3_ones.fits",
psf_path=test_data_dir + "3x3_twos.fits",
noise_map_path=test_data_dir + "3x3_threes.fits",
background_noise_map_path=test_data_dir + "3x3_fours.fits",
poisson_noise_map_path=test_data_dir + "3x3_fives.fits",
exposure_time_map_path=test_data_dir + "3x3_sixes.fits",
background_sky_map_path=test_data_dir + "3x3_sevens.fits",
renormalize_psf=False,
poisson_noise_map_from_image=True,
convert_from_electrons=True,
)
image = aa.array.ones(shape_2d=(3, 3))
poisson_noise_map_counts = aa.data_converter.poisson_noise_map_from_image_and_exposure_time_map(
image=image,
exposure_time_map=imaging.exposure_time_map,
gain=None,
convert_from_electrons=True,
)
poisson_noise_map_converted = poisson_noise_map_counts / 6.0
assert (imaging.image.in_2d == np.ones((3, 3)) / 6.0).all()
assert (imaging.psf.in_2d == 2.0 * np.ones((3, 3))).all()
assert (imaging.noise_map.in_2d == 3.0 * np.ones((3, 3)) / 6.0).all()
assert (imaging.background_noise_map.in_2d == 4.0 * np.ones((3, 3)) / 6.0).all()
assert (imaging.poisson_noise_map.in_2d == np.ones((3, 3)) / 6.0).all()
assert (
imaging.poisson_noise_map.in_2d == poisson_noise_map_converted.in_2d
).all()
assert (imaging.exposure_time_map.in_2d == 6.0 * np.ones((3, 3))).all()
assert (imaging.background_sky_map.in_2d == 7.0 * np.ones((3, 3)) / 6.0).all()
assert imaging.pixel_scales == (0.1, 0.1)
assert imaging.psf.mask.pixel_scales == (0.1, 0.1)
assert imaging.noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.background_noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.poisson_noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.exposure_time_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.background_sky_map.mask.pixel_scales == (0.1, 0.1)
def test__poisson_noise_map_from_image__include_convert_from_adus(self):
imaging = aa.imaging.from_fits(
pixel_scales=0.1,
image_path=test_data_dir + "3x3_ones.fits",
psf_path=test_data_dir + "3x3_twos.fits",
noise_map_path=test_data_dir + "3x3_threes.fits",
background_noise_map_path=test_data_dir + "3x3_fours.fits",
poisson_noise_map_path=test_data_dir + "3x3_fives.fits",
exposure_time_map_path=test_data_dir + "3x3_sixes.fits",
background_sky_map_path=test_data_dir + "3x3_sevens.fits",
renormalize_psf=False,
poisson_noise_map_from_image=True,
gain=2.0,
convert_from_adus=True,
)
image = aa.array.ones(shape_2d=(3, 3))
poisson_noise_map_counts = aa.data_converter.poisson_noise_map_from_image_and_exposure_time_map(
image=image,
exposure_time_map=imaging.exposure_time_map,
gain=2.0,
convert_from_adus=True,
)
poisson_noise_map_converted = 2.0 * poisson_noise_map_counts / 6.0
assert (imaging.image.in_2d == 2.0 * np.ones((3, 3)) / 6.0).all()
assert (imaging.psf.in_2d == 2.0 * np.ones((3, 3))).all()
assert (imaging.noise_map.in_2d == 2.0 * 3.0 * np.ones((3, 3)) / 6.0).all()
assert (
imaging.background_noise_map.in_2d == 2.0 * 4.0 * np.ones((3, 3)) / 6.0
).all()
assert (
imaging.poisson_noise_map.in_2d == np.sqrt(2.0 * np.ones((3, 3))) / 6.0
).all()
assert (
imaging.poisson_noise_map.in_2d == poisson_noise_map_converted.in_2d
).all()
assert (imaging.exposure_time_map.in_2d == 6.0 * np.ones((3, 3))).all()
assert (
imaging.background_sky_map.in_2d == 2.0 * 7.0 * np.ones((3, 3)) / 6.0
).all()
assert imaging.pixel_scales == (0.1, 0.1)
assert imaging.psf.mask.pixel_scales == (0.1, 0.1)
assert imaging.noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.background_noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.poisson_noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.exposure_time_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.background_sky_map.mask.pixel_scales == (0.1, 0.1)
def test__convert_poisson_noise_map_from_weight_map(self):
imaging = aa.imaging.from_fits(
pixel_scales=0.1,
image_path=test_data_dir + "3x3_ones.fits",
psf_path=test_data_dir + "3x3_twos.fits",
noise_map_path=test_data_dir + "3x3_threes.fits",
background_noise_map_path=test_data_dir + "3x3_fours.fits",
poisson_noise_map_path=test_data_dir + "3x3_fives.fits",
exposure_time_map_path=test_data_dir + "3x3_sixes.fits",
background_sky_map_path=test_data_dir + "3x3_sevens.fits",
renormalize_psf=False,
convert_poisson_noise_map_from_weight_map=True,
)
weight_map = aa.array.manual_2d(array=5.0 * np.ones((3, 3)))
poisson_noise_map_converted = aa.data_converter.noise_map_from_weight_map(
weight_map=weight_map
)
assert (imaging.image.in_2d == np.ones((3, 3))).all()
assert (imaging.psf.in_2d == 2.0 * np.ones((3, 3))).all()
assert (imaging.noise_map.in_2d == 3.0 * np.ones((3, 3))).all()
assert (imaging.background_noise_map.in_2d == 4.0 * np.ones((3, 3))).all()
assert (
imaging.poisson_noise_map.in_2d == poisson_noise_map_converted.in_2d
).all()
assert (imaging.exposure_time_map.in_2d == 6.0 * np.ones((3, 3))).all()
assert (imaging.background_sky_map.in_2d == 7.0 * np.ones((3, 3))).all()
assert imaging.pixel_scales == (0.1, 0.1)
assert imaging.psf.mask.pixel_scales == (0.1, 0.1)
assert imaging.noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.background_noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.poisson_noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.exposure_time_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.background_sky_map.mask.pixel_scales == (0.1, 0.1)
def test__convert_poisson_noise_map_from_inverse_noise_map(self):
imaging = aa.imaging.from_fits(
pixel_scales=0.1,
image_path=test_data_dir + "3x3_ones.fits",
psf_path=test_data_dir + "3x3_twos.fits",
noise_map_path=test_data_dir + "3x3_threes.fits",
background_noise_map_path=test_data_dir + "3x3_fours.fits",
poisson_noise_map_path=test_data_dir + "3x3_fives.fits",
exposure_time_map_path=test_data_dir + "3x3_sixes.fits",
background_sky_map_path=test_data_dir + "3x3_sevens.fits",
renormalize_psf=False,
convert_poisson_noise_map_from_inverse_noise_map=True,
)
inverse_noise_map = aa.array.manual_2d(array=5.0 * np.ones((3, 3)))
poisson_noise_map_converted = aa.data_converter.noise_map_from_inverse_noise_map(
inverse_noise_map=inverse_noise_map
)
assert (imaging.image.in_2d == np.ones((3, 3))).all()
assert (imaging.psf.in_2d == 2.0 * np.ones((3, 3))).all()
assert (imaging.noise_map.in_2d == 3.0 * np.ones((3, 3))).all()
assert (imaging.background_noise_map.in_2d == 4.0 * np.ones((3, 3))).all()
assert (
imaging.poisson_noise_map.in_2d == poisson_noise_map_converted.in_2d
).all()
assert (imaging.exposure_time_map.in_2d == 6.0 * np.ones((3, 3))).all()
assert (imaging.background_sky_map.in_2d == 7.0 * np.ones((3, 3))).all()
assert imaging.pixel_scales == (0.1, 0.1)
assert imaging.psf.mask.pixel_scales == (0.1, 0.1)
assert imaging.noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.background_noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.poisson_noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.exposure_time_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.background_sky_map.mask.pixel_scales == (0.1, 0.1)
def test__psf_renormalized_true__renormalized_psf(self):
imaging = aa.imaging.from_fits(
pixel_scales=0.1,
image_path=test_data_dir + "3x3_ones.fits",
psf_path=test_data_dir + "3x3_twos.fits",
noise_map_path=test_data_dir + "3x3_threes.fits",
background_noise_map_path=test_data_dir + "3x3_fours.fits",
poisson_noise_map_path=test_data_dir + "3x3_fives.fits",
exposure_time_map_path=test_data_dir + "3x3_sixes.fits",
background_sky_map_path=test_data_dir + "3x3_sevens.fits",
renormalize_psf=True,
)
assert (imaging.image.in_2d == np.ones((3, 3))).all()
assert imaging.psf.in_2d == pytest.approx((1.0 / 9.0) * np.ones((3, 3)), 1e-2)
assert (imaging.noise_map.in_2d == 3.0 * np.ones((3, 3))).all()
assert (imaging.background_noise_map.in_2d == 4.0 * np.ones((3, 3))).all()
assert (imaging.poisson_noise_map.in_2d == 5.0 * np.ones((3, 3))).all()
assert (imaging.exposure_time_map.in_2d == 6.0 * np.ones((3, 3))).all()
assert (imaging.background_sky_map.in_2d == 7.0 * np.ones((3, 3))).all()
assert imaging.pixel_scales == (0.1, 0.1)
assert imaging.psf.mask.pixel_scales == (0.1, 0.1)
assert imaging.noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.background_noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.poisson_noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.exposure_time_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.background_sky_map.mask.pixel_scales == (0.1, 0.1)
def test__convert_image_from_electrons_using_exposure_time(self):
imaging = aa.imaging.from_fits(
pixel_scales=0.1,
image_path=test_data_dir + "3x3_ones.fits",
psf_path=test_data_dir + "3x3_twos.fits",
noise_map_path=test_data_dir + "3x3_threes.fits",
background_noise_map_path=test_data_dir + "3x3_fours.fits",
poisson_noise_map_path=test_data_dir + "3x3_fives.fits",
exposure_time_map_path=test_data_dir + "3x3_sixes.fits",
background_sky_map_path=test_data_dir + "3x3_sevens.fits",
renormalize_psf=False,
convert_from_electrons=True,
)
assert (imaging.image.in_2d == np.ones((3, 3)) / 6.0).all()
assert (imaging.psf.in_2d == 2.0 * np.ones((3, 3))).all()
assert (imaging.noise_map.in_2d == 3.0 * np.ones((3, 3)) / 6.0).all()
assert (imaging.background_noise_map.in_2d == 4.0 * np.ones((3, 3)) / 6.0).all()
assert (imaging.poisson_noise_map.in_2d == 5.0 * np.ones((3, 3)) / 6.0).all()
assert (imaging.exposure_time_map.in_2d == 6.0 * np.ones((3, 3))).all()
assert (imaging.background_sky_map.in_2d == 7.0 * np.ones((3, 3)) / 6.0).all()
assert imaging.pixel_scales == (0.1, 0.1)
assert imaging.psf.mask.pixel_scales == (0.1, 0.1)
assert imaging.noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.background_noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.poisson_noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.exposure_time_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.background_sky_map.mask.pixel_scales == (0.1, 0.1)
def test__convert_image_from_adus_using_exposure_time_and_gain(self):
imaging = aa.imaging.from_fits(
pixel_scales=0.1,
image_path=test_data_dir + "3x3_ones.fits",
psf_path=test_data_dir + "3x3_twos.fits",
noise_map_path=test_data_dir + "3x3_threes.fits",
background_noise_map_path=test_data_dir + "3x3_fours.fits",
poisson_noise_map_path=test_data_dir + "3x3_fives.fits",
exposure_time_map_path=test_data_dir + "3x3_sixes.fits",
background_sky_map_path=test_data_dir + "3x3_sevens.fits",
renormalize_psf=False,
gain=2.0,
convert_from_adus=True,
)
assert (imaging.image.in_2d == 2.0 * np.ones((3, 3)) / 6.0).all()
assert (imaging.psf.in_2d == 2.0 * np.ones((3, 3))).all()
assert (imaging.noise_map.in_2d == 2.0 * 3.0 * np.ones((3, 3)) / 6.0).all()
assert (
imaging.background_noise_map.in_2d == 2.0 * 4.0 * np.ones((3, 3)) / 6.0
).all()
assert (
imaging.poisson_noise_map.in_2d == 2.0 * 5.0 * np.ones((3, 3)) / 6.0
).all()
assert (imaging.exposure_time_map.in_2d == 6.0 * np.ones((3, 3))).all()
assert (
imaging.background_sky_map.in_2d == 2.0 * 7.0 * np.ones((3, 3)) / 6.0
).all()
assert imaging.pixel_scales == (0.1, 0.1)
assert imaging.psf.mask.pixel_scales == (0.1, 0.1)
assert imaging.noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.background_noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.poisson_noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.exposure_time_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.background_sky_map.mask.pixel_scales == (0.1, 0.1)
def test__no_noise_map_input__raises_imaging_exception(self):
with pytest.raises(exc.DataException):
aa.imaging.from_fits(
pixel_scales=0.1,
image_path=test_data_dir + "3x3_ones.fits",
psf_path=test_data_dir + "3x3_twos.fits",
)
def test__multiple_noise_map_options__raises_imaging_exception(self):
with pytest.raises(exc.DataException):
aa.imaging.from_fits(
pixel_scales=0.1,
image_path=test_data_dir + "3x3_ones.fits",
psf_path=test_data_dir + "3x3_twos.fits",
noise_map_path=test_data_dir + "3x3_threes.fits",
convert_noise_map_from_inverse_noise_map=True,
convert_noise_map_from_weight_map=True,
)
with pytest.raises(exc.DataException):
aa.imaging.from_fits(
pixel_scales=0.1,
image_path=test_data_dir + "3x3_ones.fits",
psf_path=test_data_dir + "3x3_twos.fits",
noise_map_path=test_data_dir + "3x3_threes.fits",
convert_noise_map_from_inverse_noise_map=True,
noise_map_from_image_and_background_noise_map=True,
)
with pytest.raises(exc.DataException):
aa.imaging.from_fits(
pixel_scales=0.1,
image_path=test_data_dir + "3x3_ones.fits",
psf_path=test_data_dir + "3x3_twos.fits",
noise_map_path=test_data_dir + "3x3_threes.fits",
noise_map_from_image_and_background_noise_map=True,
convert_noise_map_from_weight_map=True,
)
def test__exposure_time_and_exposure_time_map_included__raies_imaging_error(self):
with pytest.raises(exc.DataException):
aa.imaging.from_fits(
pixel_scales=0.1,
image_path=test_data_dir + "3x3_ones.fits",
psf_path=test_data_dir + "3x3_ones.fits",
noise_map_path=test_data_dir + "3x3_threes.fits",
exposure_time_map_path=test_data_dir + "3x3_ones.fits",
exposure_time_map_from_single_value=1.0,
)
def test__noise_map_from_image_and_background_noise_map_exceptions(self):
# need background noise_map map - raise error if not present
with pytest.raises(exc.DataException):
aa.imaging.from_fits(
pixel_scales=0.1,
image_path=test_data_dir + "3x3_ones.fits",
psf_path=test_data_dir + "3x3_ones.fits",
noise_map_path=test_data_dir + "3x3_threes.fits",
exposure_time_map_from_single_value=1.0,
noise_map_from_image_and_background_noise_map=True,
)
# Dont need gain if datas is in electrons
aa.imaging.from_fits(
pixel_scales=0.1,
image_path=test_data_dir + "3x3_ones.fits",
psf_path=test_data_dir + "3x3_ones.fits",
noise_map_path=test_data_dir + "3x3_threes.fits",
background_noise_map_path=test_data_dir + "3x3_fours.fits",
exposure_time_map_from_single_value=1.0,
noise_map_from_image_and_background_noise_map=True,
convert_from_electrons=True,
)
# Need gain if datas is in adus
with pytest.raises(exc.DataException):
aa.imaging.from_fits(
pixel_scales=0.1,
image_path=test_data_dir + "3x3_ones.fits",
psf_path=test_data_dir + "3x3_ones.fits",
noise_map_path=test_data_dir + "3x3_threes.fits",
background_noise_map_path=test_data_dir + "3x3_fours.fits",
noise_map_from_image_and_background_noise_map=True,
convert_from_adus=True,
)
# No error if datas already in adus
aa.imaging.from_fits(
pixel_scales=0.1,
image_path=test_data_dir + "3x3_ones.fits",
psf_path=test_data_dir + "3x3_ones.fits",
noise_map_path=test_data_dir + "3x3_threes.fits",
background_noise_map_path=test_data_dir + "3x3_fours.fits",
exposure_time_map_from_single_value=1.0,
noise_map_from_image_and_background_noise_map=True,
gain=1.0,
convert_from_adus=True,
)
def test__poisson_noise_map_from_image_exceptions(self):
# Dont need gain if datas is in e/s
aa.imaging.from_fits(
pixel_scales=0.1,
image_path=test_data_dir + "3x3_ones.fits",
psf_path=test_data_dir + "3x3_ones.fits",
noise_map_path=test_data_dir + "3x3_threes.fits",
exposure_time_map_from_single_value=1.0,
poisson_noise_map_from_image=True,
)
# No exposure time - not load
with pytest.raises(exc.DataException):
aa.imaging.from_fits(
pixel_scales=0.1,
image_path=test_data_dir + "3x3_ones.fits",
psf_path=test_data_dir + "3x3_ones.fits",
poisson_noise_map_from_image=True,
convert_from_electrons=True,
)
# Need gain if datas in adus
with pytest.raises(exc.DataException):
aa.imaging.from_fits(
pixel_scales=0.1,
image_path=test_data_dir + "3x3_ones.fits",
psf_path=test_data_dir + "3x3_ones.fits",
noise_map_path=test_data_dir + "3x3_threes.fits",
background_noise_map_path=test_data_dir + "3x3_fours.fits",
exposure_time_map_from_single_value=1.0,
poisson_noise_map_from_image=True,
convert_from_adus=True,
)
def test__output_all_arrays(self):
imaging = aa.imaging.from_fits(
pixel_scales=0.1,
image_path=test_data_dir + "3x3_ones.fits",
psf_path=test_data_dir + "3x3_twos.fits",
noise_map_path=test_data_dir + "3x3_threes.fits",
background_noise_map_path=test_data_dir + "3x3_fours.fits",
poisson_noise_map_path=test_data_dir + "3x3_fives.fits",
exposure_time_map_path=test_data_dir + "3x3_sixes.fits",
background_sky_map_path=test_data_dir + "3x3_sevens.fits",
renormalize_psf=False,
)
output_data_dir = "{}/../test_files/array/output_test/".format(
os.path.dirname(os.path.realpath(__file__))
)
if os.path.exists(output_data_dir):
shutil.rmtree(output_data_dir)
os.makedirs(output_data_dir)
imaging.output_to_fits(
image_path=output_data_dir + "image.fits",
psf_path=output_data_dir + "psf.fits",
noise_map_path=output_data_dir + "noise_map.fits",
background_noise_map_path=output_data_dir + "background_noise_map.fits",
poisson_noise_map_path=output_data_dir + "poisson_noise_map.fits",
exposure_time_map_path=output_data_dir + "exposure_time_map.fits",
background_sky_map_path=output_data_dir + "background_sky_map.fits",
)
imaging = aa.imaging.from_fits(
pixel_scales=0.1,
image_path=output_data_dir + "image.fits",
psf_path=output_data_dir + "psf.fits",
noise_map_path=output_data_dir + "noise_map.fits",
background_noise_map_path=output_data_dir + "background_noise_map.fits",
poisson_noise_map_path=output_data_dir + "poisson_noise_map.fits",
exposure_time_map_path=output_data_dir + "exposure_time_map.fits",
background_sky_map_path=output_data_dir + "background_sky_map.fits",
renormalize_psf=False,
)
assert (imaging.image.in_2d == np.ones((3, 3))).all()
assert (imaging.psf.in_2d == 2.0 * np.ones((3, 3))).all()
assert (imaging.noise_map.in_2d == 3.0 * np.ones((3, 3))).all()
assert (imaging.background_noise_map.in_2d == 4.0 * np.ones((3, 3))).all()
assert (imaging.poisson_noise_map.in_2d == 5.0 * np.ones((3, 3))).all()
assert (imaging.exposure_time_map.in_2d == 6.0 * np.ones((3, 3))).all()
assert (imaging.background_sky_map.in_2d == 7.0 * np.ones((3, 3))).all()
assert imaging.pixel_scales == (0.1, 0.1)
assert imaging.psf.mask.pixel_scales == (0.1, 0.1)
assert imaging.noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.background_noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.poisson_noise_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.exposure_time_map.mask.pixel_scales == (0.1, 0.1)
assert imaging.background_sky_map.mask.pixel_scales == (0.1, 0.1)
class TestSimulateImaging:
def test__setup_with_all_features_off(self):
image = aa.array.manual_2d(
array=np.array([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]]),
pixel_scales=0.1,
)
exposure_time_map = aa.array.ones(shape_2d=image.shape_2d)
imaging_simulated = aa.imaging.simulate(
image=image,
exposure_time=1.0,
exposure_time_map=exposure_time_map,
add_noise=False,
)
assert (imaging_simulated.exposure_time_map.in_2d == np.ones((3, 3))).all()
assert (
imaging_simulated.image.in_2d
== np.array([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]])
).all()
assert imaging_simulated.pixel_scales == (0.1, 0.1)
def test__setup_with_background_sky_on__noise_off__no_noise_in_image__noise_map_is_noise_value(
self
):
image = aa.array.manual_2d(
array=np.array([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]])
)
exposure_time_map = aa.array.ones(shape_2d=image.mask.shape)
background_sky_map = aa.array.full(fill_value=16.0, shape_2d=image.mask.shape)
imaging_simulated = aa.imaging.simulate(
image=image,
exposure_time=1.0,
exposure_time_map=exposure_time_map,
background_sky_map=background_sky_map,
add_noise=False,
noise_if_add_noise_false=0.2,
noise_seed=1,
)
assert (
imaging_simulated.exposure_time_map.in_2d == 1.0 * np.ones((3, 3))
).all()
assert (
imaging_simulated.image.in_2d
== np.array([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]])
).all()
assert (imaging_simulated.noise_map.in_2d == 0.2 * np.ones((3, 3))).all()
assert (
imaging_simulated.background_noise_map.in_2d == 4.0 * np.ones((3, 3))
).all()
def test__setup_with_background_sky_on__noise_on_so_background_adds_noise_to_image(
self
):
image = aa.array.manual_2d(
array=np.array([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]])
)
exposure_time_map = aa.array.ones(shape_2d=image.mask.shape)
background_sky_map = aa.array.full(fill_value=16.0, shape_2d=image.mask.shape)
imaging_simulated = aa.imaging.simulate(
image=image,
exposure_time=1.0,
exposure_time_map=exposure_time_map,
background_sky_map=background_sky_map,
add_noise=True,
noise_seed=1,
)
assert (
imaging_simulated.exposure_time_map.in_2d == 1.0 * np.ones((3, 3))
).all()
assert (
imaging_simulated.image.in_2d
== np.array([[1.0, 5.0, 4.0], [1.0, 2.0, 1.0], [5.0, 2.0, 7.0]])
).all()
assert (
imaging_simulated.poisson_noise_map.in_2d
== np.array(
[
[np.sqrt(1.0), np.sqrt(5.0), np.sqrt(4.0)],
[np.sqrt(1.0), np.sqrt(2.0), np.sqrt(1.0)],
[np.sqrt(5.0), np.sqrt(2.0), np.sqrt(7.0)],
]
)
).all()
assert (
imaging_simulated.background_noise_map.in_2d == 4.0 * np.ones((3, 3))
).all()
def test__setup_with_psf_blurring_on__blurs_image_and_trims_psf_edge_off(self):
image = aa.array.manual_2d(
array=np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
]
)
)
psf = aa.kernel.manual_2d(
array=np.array([[0.0, 1.0, 0.0], [1.0, 2.0, 1.0], [0.0, 1.0, 0.0]])
)
exposure_time_map = aa.array.ones(shape_2d=image.mask.shape)
imaging_simulated = aa.imaging.simulate(
image=image,
exposure_time=1.0,
exposure_time_map=exposure_time_map,
psf=psf,
add_noise=False,
)
assert (
imaging_simulated.image.in_2d
== np.array([[0.0, 1.0, 0.0], [1.0, 2.0, 1.0], [0.0, 1.0, 0.0]])
).all()
assert (imaging_simulated.exposure_time_map.in_2d == np.ones((3, 3))).all()
def test__setup_with_background_sky_and_psf_on__psf_does_no_blurring__image_and_sky_both_trimmed(
self
):
image = aa.array.manual_2d(
array=np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
]
)
)
psf = aa.kernel.no_blur()
exposure_time_map = aa.array.ones(shape_2d=image.mask.shape)
background_sky_map = aa.array.full(fill_value=16.0, shape_2d=image.mask.shape)
imaging_simulated = aa.imaging.simulate(
image=image,
exposure_time=1.0,
exposure_time_map=exposure_time_map,
psf=psf,
background_sky_map=background_sky_map,
add_noise=False,
noise_seed=1,
)
assert (
imaging_simulated.exposure_time_map.in_2d == 1.0 * np.ones((3, 3))
).all()
assert (
imaging_simulated.image.in_2d
== np.array([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]])
).all()
assert (
imaging_simulated.background_noise_map.in_2d == 4.0 * np.ones((3, 3))
).all()
def test__setup_with_noise(self):
image = aa.array.manual_2d(
array=np.array([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]])
)
exposure_time_map = aa.array.full(fill_value=20.0, shape_2d=image.mask.shape)
imaging_simulated = aa.imaging.simulate(
image=image,
exposure_time=20.0,
exposure_time_map=exposure_time_map,
add_noise=True,
noise_seed=1,
)
assert (
imaging_simulated.exposure_time_map.in_2d == 20.0 * np.ones((3, 3))
).all()
assert imaging_simulated.image.in_2d == pytest.approx(
np.array([[0.0, 0.0, 0.0], [0.0, 1.05, 0.0], [0.0, 0.0, 0.0]]), 1e-2
)
# Because of the value is 1.05, the estimated Poisson noise_map_1d is:
# sqrt((1.05 * 20))/20 = 0.2291
assert imaging_simulated.poisson_noise_map.in_2d == pytest.approx(
np.array([[0.0, 0.0, 0.0], [0.0, 0.2291, 0.0], [0.0, 0.0, 0.0]]), 1e-2
)
assert imaging_simulated.noise_map.in_2d == pytest.approx(
np.array([[0.0, 0.0, 0.0], [0.0, 0.2291, 0.0], [0.0, 0.0, 0.0]]), 1e-2
)
def test__setup_with__psf_blurring_and_poisson_noise_on__poisson_noise_added_to_blurred_image(
self
):
image = aa.array.manual_2d(
array=np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
]
)
)
psf = aa.kernel.manual_2d(
array=np.array([[0.0, 1.0, 0.0], [1.0, 2.0, 1.0], [0.0, 1.0, 0.0]])
)
exposure_time_map = aa.array.full(fill_value=20.0, shape_2d=image.mask.shape)
imaging_simulated = aa.imaging.simulate(
image=image,
exposure_time=20.0,
exposure_time_map=exposure_time_map,
psf=psf,
add_noise=True,
noise_seed=1,
)
assert (
imaging_simulated.exposure_time_map.in_2d == 20.0 * np.ones((3, 3))
).all()
assert imaging_simulated.image.in_2d == pytest.approx(
np.array([[0.0, 1.05, 0.0], [1.3, 2.35, 1.05], [0.0, 1.05, 0.0]]), 1e-2
)
# The estimated Poisson noises are:
# sqrt((2.35 * 20))/20 = 0.3427
# sqrt((1.3 * 20))/20 = 0.2549
# sqrt((1.05 * 20))/20 = 0.2291
assert imaging_simulated.poisson_noise_map.in_2d == pytest.approx(
np.array(
[[0.0, 0.2291, 0.0], [0.2549, 0.3427, 0.2291], [0.0, 0.2291, 0.0]]
),
1e-2,
)
def test__simulate_function__turns_exposure_time_and_sky_level_to_arrays(self):
image = aa.array.manual_2d(
array=np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
]
)
)
psf = aa.kernel.no_blur()
exposure_time_map = aa.array.ones(shape_2d=image.mask.shape)
background_sky_map = aa.array.full(fill_value=16.0, shape_2d=image.mask.shape)
imaging_model = aa.imaging.simulate(
image=image,
exposure_time=1.0,
exposure_time_map=exposure_time_map,
psf=psf,
background_sky_map=background_sky_map,
add_noise=False,
noise_seed=1,
)
image = aa.array.manual_2d(
array=np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
]
)
)
imaging_simulated = aa.imaging.simulate(
image=image,
exposure_time=1.0,
background_level=16.0,
psf=psf,
add_noise=False,
noise_seed=1,
)
assert (
imaging_model.exposure_time_map.in_2d
== imaging_simulated.exposure_time_map.in_2d
).all()
assert imaging_model.image.in_2d == pytest.approx(
imaging_simulated.image.in_2d, 1e-4
)
assert (
imaging_model.background_noise_map.in_2d
== imaging_simulated.background_noise_map.in_2d
).all()
def test__noise_map_creates_nans_due_to_low_exposure_time__raises_error(self):
image = aa.array.manual_2d(array=np.ones((9, 9)))
psf = aa.kernel.no_blur()
exposure_time_map = aa.array.ones(shape_2d=image.mask.shape)
background_sky_map = aa.array.ones(shape_2d=image.mask.shape)
with pytest.raises(exc.DataException):
aa.imaging.simulate(
image=image,
psf=psf,
exposure_time=1.0,
exposure_time_map=exposure_time_map,
background_sky_map=background_sky_map,
add_noise=True,
noise_seed=1,
)
class TestSimulatePoissonNoise:
def test__input_image_all_0s__exposure_time_all_1s__all_noise_values_are_0s(self):
image = aa.array.zeros(shape_2d=(2, 2))
exposure_time = aa.array.ones(shape_2d=(2, 2))
simulated_poisson_image = image + imaging.generate_poisson_noise(
image, exposure_time, seed=1
)
assert simulated_poisson_image.shape_2d == (2, 2)
assert (simulated_poisson_image.in_2d == np.zeros((2, 2))).all()
def test__input_image_includes_10s__exposure_time_is_1s__gives_noise_values_near_1_to_5(
self
):
image = aa.array.manual_2d([[10.0, 0.0], [0.0, 10.0]])
exposure_time = aa.array.ones(shape_2d=(2, 2))
poisson_noise_map = imaging.generate_poisson_noise(image, exposure_time, seed=1)
simulated_poisson_image = image + poisson_noise_map
assert simulated_poisson_image.shape_2d == (2, 2)
# Use known noise_map_1d map for given seed.
assert (
poisson_noise_map.in_2d == np.array([[(10.0 - 9.0), 0], [0, (10.0 - 6.0)]])
).all()
assert (simulated_poisson_image.in_2d == np.array([[11, 0], [0, 14]])).all()
assert (simulated_poisson_image - poisson_noise_map == image).all()
def test__input_image_is_all_10s__exposure_time_is_1s__gives_noise_values_near_1_to_5(
self
):
image = aa.array.full(fill_value=10.0, shape_2d=(2, 2))
exposure_time = aa.array.ones(shape_2d=(2, 2))
poisson_noise_map = imaging.generate_poisson_noise(image, exposure_time, seed=1)
simulated_poisson_image = image + poisson_noise_map
assert simulated_poisson_image.shape_2d == (2, 2)
# Use known noise_map_1d map for given seed.
assert (poisson_noise_map.in_2d == np.array([[1, 4], [3, 1]])).all()
assert (simulated_poisson_image.in_2d == np.array([[11, 14], [13, 11]])).all()
assert (simulated_poisson_image - poisson_noise_map == image).all()
def test__input_image_has_1000000s__exposure_times_is_1s__these_give_positive_noise_values_near_1000(
self
):
image = aa.array.manual_2d([[10000000.0, 0.0], [0.0, 10000000.0]])
exposure_time_map = aa.array.ones(shape_2d=(2, 2))
poisson_noise_map = imaging.generate_poisson_noise(
image=image, exposure_time_map=exposure_time_map, seed=2
)
simulated_poisson_image = image + poisson_noise_map
assert simulated_poisson_image.shape_2d == (2, 2)
# Use known noise_map_1d map for given seed.
assert (poisson_noise_map.in_2d == np.array([[571, 0], [0, -441]])).all()
assert (
simulated_poisson_image.in_2d
== np.array([[10000000.0 + 571, 0.0], [0.0, 10000000.0 - 441]])
).all()
assert (simulated_poisson_image - poisson_noise_map == image).all()
def test__two_images_same_in_counts_but_different_in_electrons_per_sec__noise_related_by_exposure_times(
self
):
image_0 = aa.array.manual_2d([[10.0, 0.0], [0.0, 10.0]])
exposure_time_0 = aa.array.ones(shape_2d=(2, 2))
image_1 = aa.array.manual_2d([[5.0, 0.0], [0.0, 5.0]])
exposure_time_1 = 2.0 * aa.array.ones(shape_2d=(2, 2))
simulated_poisson_image_0 = image_0 + imaging.generate_poisson_noise(
image_0, exposure_time_0, seed=1
)
simulated_poisson_image_1 = image_1 + imaging.generate_poisson_noise(
image_1, exposure_time_1, seed=1
)
assert (simulated_poisson_image_0 / 2.0 == simulated_poisson_image_1).all()
def test__same_as_above_but_range_of_image_values_and_exposure_times(self):
image_0 = aa.array.manual_2d([[10.0, 20.0], [30.0, 40.0]])
exposure_time_0 = aa.array.manual_2d([[2.0, 2.0], [3.0, 4.0]])
image_1 = aa.array.manual_2d([[20.0, 20.0], [45.0, 20.0]])
exposure_time_1 = aa.array.manual_2d([[1.0, 2.0], [2.0, 8.0]])
simulated_poisson_image_0 = image_0 + imaging.generate_poisson_noise(
image_0, exposure_time_0, seed=1
)
simulated_poisson_image_1 = image_1 + imaging.generate_poisson_noise(
image_1, exposure_time_1, seed=1
)
assert (
simulated_poisson_image_0[0] == simulated_poisson_image_1[0] / 2.0
).all()
assert simulated_poisson_image_0[1] == simulated_poisson_image_1[1]
assert (
simulated_poisson_image_0[2] * 1.5
== pytest.approx(simulated_poisson_image_1[2], 1e-2)
).all()
assert (
simulated_poisson_image_0[3] / 2.0 == simulated_poisson_image_1[3]
).all()
| 40.592814
| 116
| 0.584403
|
3f48abd1a43216e3c56372d0d5ef8e6f21e11962
| 3,605
|
py
|
Python
|
torchmetrics/functional/regression/pearson.py
|
CSautier/metrics
|
32389fbc03b1bbbf0b15b05c56dc1db7a90bff97
|
[
"Apache-2.0"
] | null | null | null |
torchmetrics/functional/regression/pearson.py
|
CSautier/metrics
|
32389fbc03b1bbbf0b15b05c56dc1db7a90bff97
|
[
"Apache-2.0"
] | null | null | null |
torchmetrics/functional/regression/pearson.py
|
CSautier/metrics
|
32389fbc03b1bbbf0b15b05c56dc1db7a90bff97
|
[
"Apache-2.0"
] | null | null | null |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple
import torch
from torch import Tensor
from torchmetrics.utilities.checks import _check_same_shape
def _pearson_corrcoef_update(
preds: Tensor,
target: Tensor,
mean_x: Tensor,
mean_y: Tensor,
var_x: Tensor,
var_y: Tensor,
corr_xy: Tensor,
n_prior: Tensor,
) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]:
"""
Updates and returns variables required to compute Pearson Correlation Coefficient.
Checks for same shape of input tensors.
Args:
mean_x: current mean estimate of x tensor
mean_y: current mean estimate of y tensor
var_x: current variance estimate of x tensor
var_y: current variance estimate of y tensor
corr_xy: current covariance estimate between x and y tensor
n_prior: current number of observed observations
"""
# Data checking
_check_same_shape(preds, target)
preds = preds.squeeze()
target = target.squeeze()
if preds.ndim > 1 or target.ndim > 1:
raise ValueError('Expected both predictions and target to be 1 dimensional tensors.')
n_obs = preds.numel()
mx_new = (n_prior * mean_x + preds.mean() * n_obs) / (n_prior + n_obs)
my_new = (n_prior * mean_y + target.mean() * n_obs) / (n_prior + n_obs)
n_prior += n_obs
var_x += ((preds - mx_new) * (preds - mean_x)).sum()
var_y += ((target - my_new) * (target - mean_y)).sum()
corr_xy += ((preds - mx_new) * (target - mean_y)).sum()
mean_x = mx_new
mean_y = my_new
return mean_x, mean_y, var_x, var_y, corr_xy, n_prior
def _pearson_corrcoef_compute(
var_x: Tensor,
var_y: Tensor,
corr_xy: Tensor,
nb: Tensor,
) -> Tensor:
"""
Computes the final pearson correlation based on accumulated statistics
Args:
var_x: variance estimate of x tensor
var_y: variance estimate of y tensor
corr_xy: covariance estimate between x and y tensor
nb: number of observations
"""
var_x /= (nb - 1)
var_y /= (nb - 1)
corr_xy /= (nb - 1)
corrcoef = (corr_xy / (var_x * var_y).sqrt()).squeeze()
return torch.clamp(corrcoef, -1.0, 1.0)
def pearson_corrcoef(preds: Tensor, target: Tensor) -> Tensor:
"""
Computes pearson correlation coefficient.
Args:
preds: estimated scores
target: ground truth scores
Example:
>>> from torchmetrics.functional import pearson_corrcoef
>>> target = torch.tensor([3, -0.5, 2, 7])
>>> preds = torch.tensor([2.5, 0.0, 2, 8])
>>> pearson_corrcoef(preds, target)
tensor(0.9849)
"""
_temp = torch.zeros(1, dtype=preds.dtype, device=preds.device)
mean_x, mean_y, var_x = _temp.clone(), _temp.clone(), _temp.clone()
var_y, corr_xy, nb = _temp.clone(), _temp.clone(), _temp.clone()
_, _, var_x, var_y, corr_xy, nb = _pearson_corrcoef_update(preds, target, mean_x, mean_y, var_x, var_y, corr_xy, nb)
return _pearson_corrcoef_compute(var_x, var_y, corr_xy, nb)
| 33.37963
| 120
| 0.669348
|
b2a7b862f2683223f18b9a96052c68935b34c9a0
| 499
|
py
|
Python
|
rrs/migrations/0009_rmh_layerbranch.py
|
ebrent8/clear-linux-dissector-web
|
45f1f9b5a5753ab8b14ed3c99f1c9e68bb97a47c
|
[
"MIT"
] | 3
|
2019-05-12T21:11:53.000Z
|
2019-09-15T18:11:21.000Z
|
rrs/migrations/0009_rmh_layerbranch.py
|
ebrent8/clear-linux-dissector-web
|
45f1f9b5a5753ab8b14ed3c99f1c9e68bb97a47c
|
[
"MIT"
] | 21
|
2019-06-26T05:01:01.000Z
|
2022-03-11T23:47:21.000Z
|
rrs/migrations/0009_rmh_layerbranch.py
|
ebrent8/clear-linux-dissector-web
|
45f1f9b5a5753ab8b14ed3c99f1c9e68bb97a47c
|
[
"MIT"
] | 8
|
2019-06-13T08:51:12.000Z
|
2021-02-17T11:14:46.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('layerindex', '0010_add_dependencies'),
('rrs', '0008_upgrade_info'),
]
operations = [
migrations.AddField(
model_name='recipemaintainerhistory',
name='layerbranch',
field=models.ForeignKey(blank=True, null=True, to='layerindex.LayerBranch'),
),
]
| 23.761905
| 88
| 0.627255
|
da9cf95fc3988cdd4c3e83c7d1a246ced08097ec
| 1,128
|
py
|
Python
|
server/djangoapp/urls.py
|
snicer1/agfzb-CloudAppDevelopment_Capstone
|
d6c346fac5851dbf5f136ca9a7ea8606adf553b8
|
[
"Apache-2.0"
] | null | null | null |
server/djangoapp/urls.py
|
snicer1/agfzb-CloudAppDevelopment_Capstone
|
d6c346fac5851dbf5f136ca9a7ea8606adf553b8
|
[
"Apache-2.0"
] | null | null | null |
server/djangoapp/urls.py
|
snicer1/agfzb-CloudAppDevelopment_Capstone
|
d6c346fac5851dbf5f136ca9a7ea8606adf553b8
|
[
"Apache-2.0"
] | null | null | null |
from django.urls import path
from django.conf.urls.static import static
from django.conf import settings
from . import views
app_name = 'djangoapp'
urlpatterns = [
# route is a string contains a URL pattern
# view refers to the view function
# name the URL
# path for about view
# path for contact us view
# path for registration
# path for login
# path for logout
path(route='', view=views.get_dealerships, name='index'),
path(route='about', view=views.about_us, name='about'),
path(route='contact', view=views.contact, name='contact'),
path(route='logout', view=views.logout_request, name='logout'),
path(route='login', view=views.login_request, name='login'),
path(route='registration', view=views.registration_request, name='registration'),
path(route='dealer/<int:dealer_id>', view=views.get_dealer_details, name='dealer_details'),
path(route='add_review/<int:dealer_id>', view=views.add_review, name='add_review'),
# path for dealer reviews view
# path for add a review view
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 32.228571
| 95
| 0.70922
|
ce41aba32720f6ea9d94528de27dd679c70e0924
| 167
|
py
|
Python
|
tests/model_control/detailed/transf_Fisher/model_control_one_enabled_Fisher_MovingMedian_Seasonal_WeekOfYear_SVR.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | null | null | null |
tests/model_control/detailed/transf_Fisher/model_control_one_enabled_Fisher_MovingMedian_Seasonal_WeekOfYear_SVR.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | 1
|
2019-11-30T23:39:38.000Z
|
2019-12-01T04:34:35.000Z
|
tests/model_control/detailed/transf_Fisher/model_control_one_enabled_Fisher_MovingMedian_Seasonal_WeekOfYear_SVR.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | null | null | null |
import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Fisher'] , ['MovingMedian'] , ['Seasonal_WeekOfYear'] , ['SVR'] );
| 41.75
| 89
| 0.760479
|
0dd31086241c061970a46210a61a851121f4a1c4
| 8,931
|
py
|
Python
|
rollo/executor.py
|
khurrumsaleem/rollo
|
ad7bc0589c5195fefcde8e9e833340fa6d73e59b
|
[
"BSD-3-Clause"
] | null | null | null |
rollo/executor.py
|
khurrumsaleem/rollo
|
ad7bc0589c5195fefcde8e9e833340fa6d73e59b
|
[
"BSD-3-Clause"
] | null | null | null |
rollo/executor.py
|
khurrumsaleem/rollo
|
ad7bc0589c5195fefcde8e9e833340fa6d73e59b
|
[
"BSD-3-Clause"
] | null | null | null |
import rollo
from rollo.input_validation import InputValidation
from rollo.algorithm import Algorithm
from rollo.constraints import Constraints
from rollo.toolbox_generator import ToolboxGenerator
import json
import time
from collections import OrderedDict
import logging
import sys
class Executor(object):
"""Executes rollo simulation from start to finish.
Instances of this class can be used to perform a rollo run.
The Executor class drives the ROLLO code execution with the following
steps in the execute method:
1) User input file validation with InputValidation
2) Evaluation function generation with Evaluation class
3) DEAP toolbox initialization with ToolboxGenerator class
4) Constraint initialization with Constraints class
5) Genetic algorithm execution with Algorithm class
Parameters
----------
input_file : str
Name of input file
checkpoint_file : str, optional
Name of checkpoint file
Attributes
----------
input_file : str
Name of input file
checkpoint_file : str
Name of checkpoint file
verbose : bool
"""
def __init__(self, input_file, checkpoint_file=None, verbose=False):
self.input_file = input_file
self.checkpoint_file = checkpoint_file
if verbose:
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
def execute(self):
"""Executes rollo simulation to generate reactor designs. \n
1) Read and validate input file
2) Initialize evaluator
3) Initialize DEAP toolbox
4) Initialize constraints
5) Run genetic algorithm
"""
t0 = time.time()
input_dict = self.read_input_file()
iv = InputValidation(input_dict)
iv.add_all_defaults()
iv.validate()
complete_input_dict = iv.input
# organize control variables and output dict
control_dict, output_dict = self.organize_input_output(
complete_input_dict)
# generate evaluator function
evaluator_fn = self.load_evaluator(
control_dict, output_dict, complete_input_dict
)
# DEAP toolbox set up
toolbox, creator = self.load_toolbox(
evaluator_fn,
complete_input_dict["algorithm"],
complete_input_dict["control_variables"],
control_dict,
)
# load constraints if they exist
constraints = self.load_constraints(
output_dict, complete_input_dict["constraints"], toolbox
)
alg = Algorithm(
deap_toolbox=toolbox,
constraint_obj=constraints,
checkpoint_file=self.checkpoint_file,
deap_creator=creator,
control_dict=control_dict,
output_dict=output_dict,
input_dict=complete_input_dict,
start_time=t0,
parallel_method=complete_input_dict["algorithm"]["parallel"],
)
alg.generate()
t1 = time.time()
print("Total time in simulation " +
str(round(t1 - t0, 2)) + " seconds")
return
def read_input_file(self):
"""Reads a json input file and returns a dictionary
Returns
-------
data : dict
json input file converted into a dict
"""
with open(self.input_file) as json_file:
data = json.load(json_file)
return data
def organize_input_output(self, input_dict):
"""Labels the control variables and output variables with numbers
to keep consistency between evaluation, constraints, and algorithm
classes
Parameters
----------
input_dict : dict
input file dict
Returns
-------
control_vars : OrderedDict
Ordered dict of control variables as keys and a list of their
solver and count of variables as each value
output_vars : OrderedDict
Ordered dict of output variables as keys and solvers as values
"""
input_ctrl_vars = input_dict["control_variables"]
input_evaluators = input_dict["evaluators"]
input_algorithm = input_dict["algorithm"]
# define control variables dict
control_vars = OrderedDict()
for solver in input_evaluators:
for var in input_evaluators[solver]["inputs"]:
control_vars[var] = [solver, 1]
# define output variables dict
output_vars = OrderedDict()
optimized_variable = input_algorithm["optimized_variable"]
# find optimized variable
var_to_solver = {}
for solver in input_evaluators:
for var in input_evaluators[solver]["outputs"]:
var_to_solver[var] = solver
for opt_var in optimized_variable:
output_vars[opt_var] = var_to_solver[opt_var]
# put in the rest of the output variables
for solver in input_evaluators:
for var in input_evaluators[solver]["outputs"]:
if var not in optimized_variable:
output_vars[var] = solver
return control_vars, output_vars
def load_evaluator(self, control_dict, output_dict, input_dict):
"""Creates an Evaluation function object
Parameters
----------
control_dict : OrderedDict
Ordered dict of control variables as keys and a list of their
solver and number of variables as each value
output_dict : OrderedDict
Ordered dict of output variables as keys and solvers as values
input_dict : dict
input file dict with default values filled
Returns
-------
evaluator_fn : function
function that runs the evaluation software and returns output values
output by the software
"""
input_evaluators = input_dict["evaluators"]
evaluator = rollo.Evaluation()
for solver in input_evaluators:
solver_dict = input_evaluators[solver]
try:
output_script = solver_dict["output_script"]
except BaseException:
output_script = None
logging.warning(" No output script defined for " + solver)
evaluator.add_evaluator(
solver_name=solver,
input_script=solver_dict["input_script"],
output_script=output_script,
)
parallel_type = input_dict["algorithm"]["parallel"]
gens = input_dict["algorithm"]["generations"]
evaluator_fn = evaluator.eval_fn_generator(
control_dict, output_dict, input_dict["evaluators"],
gens, parallel_type)
return evaluator_fn
def load_toolbox(
self, evaluator_fn, input_algorithm, input_ctrl_vars, control_dict
):
"""Creates a DEAP toolbox object based on user-defined
parameters.
Parameters
----------
evaluator_fn : function
function that runs the evaluation software and returns output values
output by the software
input_algorithm : dict
algorithm sub-dictionary from input file
input_ctrl_vars : dict
control variables sub-dictionary from input file
control_dict : OrderedDict
Ordered dict of control variables as keys and a list of their
solver and number of variables as each value
Returns
-------
toolbox : deap.base.Toolbox object
DEAP toolbox populated with user-defined genetic algorithm parameters
creator : deap.creator object
DEAP meta-factory allowing to create classes that will fulfill the
needs of the evolutionary algorithms
"""
toolbox_generator = ToolboxGenerator()
toolbox, creator = toolbox_generator.setup(
evaluator_fn, input_algorithm, input_ctrl_vars, control_dict
)
return toolbox, creator
def load_constraints(self, output_dict, input_constraints, toolbox):
"""Creates a Constraints object loaded with user-defined
constraints information.
Parameters
----------
output_dict : OrderedDict
Ordered dict of output variables as keys and solvers as values
input_constraints : dict
constraints sub-dictionary from input file
toolbox : deap.base.Toolbox object
DEAP toolbox populated with genetic algorithm parameters for this
creator
Returns
-------
constraints_obj : rollo.Constraints object
Constraints object loaded with constraint information from the
input file
"""
constraint_obj = Constraints(output_dict, input_constraints, toolbox)
return constraint_obj
| 34.35
| 81
| 0.63218
|
c52a0f3a178beaf3c4b6eadbb328c425cbd4fb54
| 2,794
|
py
|
Python
|
src/main/scheduler/model/Caregiver.py
|
hu-bingwen/vaccine-scheduler-python
|
364939c1dfb1a9009a4147148e680a7bf07688a4
|
[
"MIT"
] | null | null | null |
src/main/scheduler/model/Caregiver.py
|
hu-bingwen/vaccine-scheduler-python
|
364939c1dfb1a9009a4147148e680a7bf07688a4
|
[
"MIT"
] | null | null | null |
src/main/scheduler/model/Caregiver.py
|
hu-bingwen/vaccine-scheduler-python
|
364939c1dfb1a9009a4147148e680a7bf07688a4
|
[
"MIT"
] | null | null | null |
import sys
sys.path.append("../util/*")
sys.path.append("../db/*")
from util.Util import Util
from db.ConnectionManager import ConnectionManager
import pymssql
class Caregiver:
def __init__(self, username, password=None, salt=None, hash=None):
self.username = username
self.password = password
self.salt = salt
self.hash = hash
# getters
def get(self):
cm = ConnectionManager()
conn = cm.create_connection()
cursor = conn.cursor(as_dict=True)
get_caregiver_details = "SELECT Salt, Hash FROM Caregivers WHERE Username = %s"
try:
cursor.execute(get_caregiver_details, self.username)
for row in cursor:
curr_salt = row['Salt']
curr_hash = row['Hash']
calculated_hash = Util.generate_hash(self.password, curr_salt)
if not curr_hash == calculated_hash:
cm.close_connection()
return None
else:
self.salt = curr_salt
self.hash = calculated_hash
return self
except pymssql.Error:
print("Error occurred when getting Caregivers")
cm.close_connection()
cm.close_connection()
return None
def get_username(self):
return self.username
def get_salt(self):
return self.salt
def get_hash(self):
return self.hash
def save_to_db(self):
cm = ConnectionManager()
conn = cm.create_connection()
cursor = conn.cursor()
add_caregivers = "INSERT INTO Caregivers VALUES (%s, %s, %s)"
try:
cursor.execute(add_caregivers, (self.username, self.salt, self.hash))
# you must call commit() to persist your data if you don't set autocommit to True
conn.commit()
except pymssql.Error as db_err:
print("Error occurred when creating Caregivers")
sqlrc = str(db_err.args[0])
print("Exception code: " + str(sqlrc))
cm.close_connection()
raise
cm.close_connection()
# Insert availability with parameter date d
def upload_availability(self, d):
cm = ConnectionManager()
conn = cm.create_connection()
cursor = conn.cursor()
add_availability = "INSERT INTO Availabilities VALUES (%s , %s)"
try:
cursor.execute(add_availability, (d, self.username))
# you must call commit() to persist your data if you don't set autocommit to True
conn.commit()
except pymssql.Error:
print("Error occurred when updating caregiver availability")
cm.close_connection()
cm.close_connection()
| 32.114943
| 93
| 0.589835
|
052a58b83d7d2afcd09e87d9890680f4990de7c4
| 545
|
py
|
Python
|
nanoql/__main__.py
|
viehwegerlib/nanoql
|
152c511afcff96c869d8eff261eebd7e82bc4337
|
[
"BSD-3-Clause"
] | 1
|
2019-04-24T21:38:26.000Z
|
2019-04-24T21:38:26.000Z
|
nanoql/__main__.py
|
phiweger/nanoql
|
152c511afcff96c869d8eff261eebd7e82bc4337
|
[
"BSD-3-Clause"
] | null | null | null |
nanoql/__main__.py
|
phiweger/nanoql
|
152c511afcff96c869d8eff261eebd7e82bc4337
|
[
"BSD-3-Clause"
] | null | null | null |
'''
nanohq command line.
'''
#
import click
# from nanohq.utils import emit # ,spike, simulate
# from nanohq.message import message
# from nanohq.publish import publish
# from nanohq.subscribe import subscribe
# from nanohq.monitor import accrue, watch, gather
# from nanohq.sift import sift
#
#
@click.group()
def cli():
pass
#
#
# cli.add_command(emit)
# cli.add_command(message)
# cli.add_command(publish)
# cli.add_command(subscribe)
# cli.add_command(accrue)
# cli.add_command(watch)
# cli.add_command(gather)
# cli.add_command(sift)
| 19.464286
| 51
| 0.741284
|
afdcc36667026924b44807f7b39ef757dc16941f
| 2,309
|
py
|
Python
|
pymatgen/io/cp2k/tests/test_sets.py
|
mjwen/pymatgen
|
051a13897dbac7b76438f5705d0c21befd58942c
|
[
"MIT"
] | null | null | null |
pymatgen/io/cp2k/tests/test_sets.py
|
mjwen/pymatgen
|
051a13897dbac7b76438f5705d0c21befd58942c
|
[
"MIT"
] | null | null | null |
pymatgen/io/cp2k/tests/test_sets.py
|
mjwen/pymatgen
|
051a13897dbac7b76438f5705d0c21befd58942c
|
[
"MIT"
] | null | null | null |
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
from pymatgen.core.structure import Molecule, Species, Structure
from pymatgen.io.cp2k.sets import (
CellOptSet,
Cp2kInputSet,
DftSet,
HybridCellOptSet,
HybridRelaxSet,
HybridStaticSet,
RelaxSet,
StaticSet,
)
from pymatgen.util.testing import PymatgenTest
Si_structure = Structure(
lattice=[[0, 2.734364, 2.734364], [2.734364, 0, 2.734364], [2.734364, 2.734364, 0]],
species=["Si", "Si"],
coords=[[0, 0, 0], [0.25, 0.25, 0.25]],
)
nonsense_Structure = Structure(
lattice=[[-1, -10, -100], [0.1, 0.01, 0.001], [7, 11, 21]], species=["X"], coords=[[-1, -1, -1]]
)
molecule = Molecule(species=["C", "H"], coords=[[0, 0, 0], [1, 1, 1]])
property_structure = Structure(
lattice=[[10, 0, 0], [0, 10, 0], [0, 0, 10]],
species=[
Species("Ni", oxidation_state=4, properties={"spin": 0}),
Species("O", oxidation_state=-2, properties={"spin": 0}),
"Ni",
"O",
],
coords=[[0, 0, 0], [0.25, 0.25, 0.25], [0.5, 0.5, 0.5], [1, 1, 1]],
)
# TODO More comprehensive testing
class SetTest(PymatgenTest):
def setUp(self):
pass
def test_all_sets(self):
for s in [Si_structure, molecule]:
cis = Cp2kInputSet(s)
self.assertMSONable(cis)
cis = Cp2kInputSet.from_dict(cis.as_dict())
Cp2kInputSet.from_string(cis.get_string())
DftSet(s)
StaticSet(s)
HybridStaticSet(s)
RelaxSet(s)
HybridRelaxSet(s)
CellOptSet(s)
HybridCellOptSet(s)
def test_aux_basis(self):
Si_aux_bases = ["FIT", "cFIT", "pFIT", "cpFIT"]
for s in Si_aux_bases:
HybridStaticSet(Si_structure, aux_basis={"Si": s})
def test_prints(self):
cis = RelaxSet(Si_structure, print_ldos=False, print_pdos=False)
self.assertFalse(cis.check("FORCE_EVAL/DFT/PRINT/PRINT/PDOS"))
cis = RelaxSet(Si_structure, print_ldos=True, print_hartree_potential=True)
self.assertTrue(cis.check("FORCE_EVAL/DFT/PRINT/PDOS/LDOS 1"))
self.assertTrue(cis.check("FORCE_EVAL/DFT/PRINT/V_HARTREE_CUBE"))
if __name__ == "__main__":
unittest.main()
| 29.227848
| 100
| 0.607189
|
7e81e03871017309281db662b0f7d987d465e31e
| 1,128
|
py
|
Python
|
events/migrations/0024_auto_20200114_1950.py
|
McCarthyCode/Market-to-Market-Chicago
|
15d491f6f45c0899864ae9256f2808e46e0e140b
|
[
"MIT"
] | null | null | null |
events/migrations/0024_auto_20200114_1950.py
|
McCarthyCode/Market-to-Market-Chicago
|
15d491f6f45c0899864ae9256f2808e46e0e140b
|
[
"MIT"
] | 1
|
2020-06-09T11:15:17.000Z
|
2020-06-09T11:15:17.000Z
|
events/migrations/0024_auto_20200114_1950.py
|
mattmc318/Market-to-Market-Chicago
|
15d491f6f45c0899864ae9256f2808e46e0e140b
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.6 on 2020-01-15 01:50
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('events', '0023_auto_20200114_1843'),
]
operations = [
migrations.CreateModel(
name='Weekday',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_created', models.DateTimeField(auto_now_add=True)),
('date_updated', models.DateTimeField(auto_now=True)),
('weekday', models.PositiveSmallIntegerField(choices=[(0, 'Monday'), (1, 'Tuesday'), (2, 'Wednesday'), (3, 'Thursday'), (4, 'Friday'), (5, 'Saturday'), (6, 'Sunday')])),
('info', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, to='events.RepeatInfo')),
],
options={
'abstract': False,
},
),
migrations.DeleteModel(
name='RecurringEventWeekday',
),
]
| 36.387097
| 185
| 0.582447
|
69207207afc89b26714b0d98938c06501ddb4f81
| 1,778
|
py
|
Python
|
tests/test_time_indicators.py
|
TimEconometrics/pyjuque
|
619f506bdcc163e689212337ec74c40e220e0eb6
|
[
"MIT"
] | null | null | null |
tests/test_time_indicators.py
|
TimEconometrics/pyjuque
|
619f506bdcc163e689212337ec74c40e220e0eb6
|
[
"MIT"
] | null | null | null |
tests/test_time_indicators.py
|
TimEconometrics/pyjuque
|
619f506bdcc163e689212337ec74c40e220e0eb6
|
[
"MIT"
] | null | null | null |
# app/tests/test_basic.py
import os
import unittest
import sys
curr_path = os.path.abspath(__file__)
root_path = os.path.abspath(
os.path.join(curr_path, os.path.pardir, os.path.pardir))
sys.path.append(root_path)
# Import all Created exchanges here
from bot.Exchanges.Binance import Binance
from bot.Indicators import AddIndicator, INDICATOR_DICT
from tests.utils import timeit
from pandas import DataFrame
import pandas
import json
class PytiIndicatorsTests(unittest.TestCase):
############################
#### setup and teardown ####
############################
# executed prior to each test
def setUp(self):
self.df_1k = pandas.read_csv('tests/data/BTCUSD_1m_1k.csv')
self.df_10k = pandas.read_csv('tests/data/BTCUSD_1m_10k.csv')
# executed after each test
def tearDown(self):
pass
###############
#### tests ####
###############
def _AddCredentials(self):
for key in INDICATOR_DICT.keys():
ret, time_5 = timeit(AddIndicator, False, self.df_1k, key, key, 5)
ret, time_50 = timeit(AddIndicator, False, self.df_1k, key, key, 50)
ret, time_500 = timeit(AddIndicator, False, self.df_1k, key, key, 500)
print("Times for calculating "+key+" \n on a dataframe with 1k rows:")
print("Period 5: ", round(time_5, 4), "| Period 50: ", round(time_50, 4), \
"| Period 500: ", round(time_500, 4))
ret, time_5 = timeit(AddIndicator, False, self.df_10k, key, key, 5)
ret, time_50 = timeit(AddIndicator, False, self.df_10k, key, key, 50)
ret, time_500 = timeit(AddIndicator, False, self.df_10k, key, key, 500)
print(" and on a dataframe with 10k rows:")
print("Period 5: ", round(time_5, 4), "| Period 50: ", round(time_50, 4), \
"| Period 500: ", round(time_500, 4), "\n")
if __name__ == "__main__":
unittest.main()
| 31.75
| 78
| 0.667042
|
b2ce903693086bbc0a5bc6d729f17f9aeb1686ee
| 2,872
|
py
|
Python
|
WebKit/Examples/Colors.py
|
PeaceWorksTechnologySolutions/w4py
|
74f5a03a63f1a93563502b908474aefaae2abda2
|
[
"MIT"
] | 18
|
2016-08-01T20:15:59.000Z
|
2019-12-24T16:00:03.000Z
|
WebKit/Examples/Colors.py
|
WebwareForPython/w4py
|
bba08f5974d49f5da7e88abe3eeda1037d0824a3
|
[
"MIT"
] | 6
|
2016-09-13T05:48:45.000Z
|
2020-01-09T18:29:12.000Z
|
WebKit/Examples/Colors.py
|
WebwareForPython/w4py
|
bba08f5974d49f5da7e88abe3eeda1037d0824a3
|
[
"MIT"
] | 6
|
2016-09-16T14:32:29.000Z
|
2020-01-03T18:52:16.000Z
|
import os
from ExamplePage import ExamplePage
# Helper functions
gamma = 2.2 # an approximation for today's CRTs
def brightness(r, g, b):
"""Calculate brightness of RGB color."""
r, g, b = map(lambda x: x/255.0, (r, g, b))
return (0.3*r**gamma + 0.6*g**gamma + 0.1*b**gamma)**(1/gamma)
def textcolor(r, g, b):
"""Determine a good text font color for high contrast."""
return 'white' if brightness(r, g, b) < 0.5 else 'black'
def RGBToHTMLColor(r, g, b):
"""Convert r, g, b to #RRGGBB."""
return '#%02X%02X%02X' % (r, g, b)
def HTMLColorToRGB(h):
"""Convert #RRGGBB to r, g, b."""
h = h.strip()
if h.startswith('#'):
h = h[1:]
h = h[:2], h[2:4], h[4:]
return map(lambda x: int(x, 16), h)
# Prepare HTML for color table
numSteps = 6 # this gives the "web-safe" color palette
steps = map(lambda x: 255.0*x/(numSteps-1), range(numSteps))
colorTable = [
'<p>Click on one of the colors below to set the background color.</p>',
'<table style="margin-left:auto;margin-right:auto">']
for r in steps:
for g in steps:
colorTable.append('<tr>\n')
for b in steps:
color = RGBToHTMLColor(r, g, b)
colorTable.append('<td style="background-color:%s;color:%s"'
' onclick="document.forms[0].elements[0].value=\'%s\';'
'document.forms[0].submit()">%s</td>\n'
% (color, textcolor(r, g, b), color, color))
colorTable.append('</tr>\n')
colorTable.append('</table>')
colorTable = ''.join(colorTable)
class Colors(ExamplePage):
"""Colors demo.
This class is a good example of caching. The color table that
this servlet creates never changes, so the servlet caches this
in the global colorTable variable. The original version of this
example did no caching and was 12 times slower.
"""
def htBodyArgs(self):
"""Write the attributes of the body element.
Overridden in order to throw in the custom background color
that the user can specify in our form.
"""
self._bgcolor = self.request().field('bgcolor', '#FFFFFF')
try:
r, g, b = HTMLColorToRGB(self._bgcolor)
self._color = textcolor(r, g, b)
except Exception:
self._color = 'black'
return 'style="color:black;background-color:%s"' % (self._bgcolor,)
def writeContent(self):
"""Write the actual content of the page."""
self.write('''
<div style="text-align:center;color:%s">
<h3>Color Table Demo</h3>
<form action="Colors" method="post">
Background color: <input type="text" name="bgcolor" value="%s">
<input type="submit" value="Go">
</form>
%s
</div>
''' % (self._color, self._bgcolor, colorTable))
| 33.011494
| 79
| 0.583217
|
b5830d63e8d2d399a5132201e3a90c8ca1847820
| 1,498
|
py
|
Python
|
compiler/tests/14_replica_bitline_test.py
|
xinjie0831/OpenRAM
|
76e2ab88fe4097ffa51e0387ba72165bcda49e68
|
[
"BSD-3-Clause"
] | null | null | null |
compiler/tests/14_replica_bitline_test.py
|
xinjie0831/OpenRAM
|
76e2ab88fe4097ffa51e0387ba72165bcda49e68
|
[
"BSD-3-Clause"
] | null | null | null |
compiler/tests/14_replica_bitline_test.py
|
xinjie0831/OpenRAM
|
76e2ab88fe4097ffa51e0387ba72165bcda49e68
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# See LICENSE for licensing information.
#
#Copyright (c) 2016-2019 Regents of the University of California and The Board
#of Regents for the Oklahoma Agricultural and Mechanical College
#(acting for and on behalf of Oklahoma State University)
#All rights reserved.
#
"""
Run a test on a replica bitline
"""
import unittest
from testutils import header,openram_test
import sys,os
sys.path.append(os.path.join(sys.path[0],".."))
import globals
from globals import OPTS
from sram_factory import factory
import debug
class replica_bitline_test(openram_test):
def runTest(self):
globals.init_openram("config_{0}".format(OPTS.tech_name))
# check replica bitline in single port
stages=4
fanout=4
rows=13
debug.info(2, "Testing RBL with {0} FO4 stages, {1} rows".format(stages,rows))
a = factory.create(module_type="replica_bitline", delay_fanout_list=stages*[fanout], bitcell_loads=rows)
self.local_check(a)
stages=8
rows=100
debug.info(2, "Testing RBL with {0} FO4 stages, {1} rows".format(stages,rows))
a = factory.create(module_type="replica_bitline", delay_fanout_list=stages*[fanout], bitcell_loads=rows)
self.local_check(a)
globals.end_openram()
# run the test from the command line
if __name__ == "__main__":
(OPTS, args) = globals.parse_args()
del sys.argv[1:]
header(__file__, OPTS.tech_name)
unittest.main()
| 30.571429
| 112
| 0.691589
|
aa311834660765b1aefa55ccba5bd3a0178813b2
| 9,074
|
py
|
Python
|
main_2.py
|
tuanad121/postfilt_gan
|
7efefc45d6b6cc6e099afb94bf0c40e4e7924ef2
|
[
"MIT"
] | null | null | null |
main_2.py
|
tuanad121/postfilt_gan
|
7efefc45d6b6cc6e099afb94bf0c40e4e7924ef2
|
[
"MIT"
] | null | null | null |
main_2.py
|
tuanad121/postfilt_gan
|
7efefc45d6b6cc6e099afb94bf0c40e4e7924ef2
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import argparse
import os
import random
#import numpy as np
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
from torch.autograd import Variable
from data_loader import get_loader, prepare_normalizer
from utils import plot_feats, read_binary_file
from models import define_netD, define_netG
## no crop photo for discriminator
def train(netD, netG, data_loader, opt):
label = torch.FloatTensor(1)
label = Variable(label, requires_grad=False)
real_label = 1.0
fake_label = 0.0
# cost criterion
# criterion = nn.BCELoss() # normal gan
criterion = nn.MSELoss() # lsgan
if opt.cuda:
netD.cuda()
netG.cuda()
criterion.cuda()
# setup optimizer
optimizerD = optim.Adam(netD.parameters(), lr=0.0001, betas=(0.5, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=0.0002, betas=(0.5, 0.999))
print('batch size =', opt.batchSize)
for epoch in range(opt.niter):
# store mini-batch data in list
for i, (real_data, pred_data) in enumerate(data_loader):
# print(real_data.shape, pred_data.shape)
#################################
# (1) Updata D network: maximize log(D(x)) + log(1 - D(G(z)))
#################################
# clear the gradient buffers
netD.zero_grad()
# crop the tensor to fixed size
# label = torch.full((real_data.size(0),), real_label)
noise = torch.FloatTensor(real_data.size()).normal_(0,1)
if opt.cuda:
pred_data = pred_data.cuda()
# label = label.cuda()
real_data = real_data.cuda()
noise = noise.cuda()
pred_data = Variable(pred_data)
real_data = Variable(real_data)
# train with real
output = netD(real_data)
# errD_real = criterion(output, label)
errD_real = torch.mean((real_label - output) ** 2)
errD_real.backward()
D_x = output.data.mean()
# train with fake
fake = netG(noise, pred_data)
# add the residual to the tts predicted data
fake = fake + pred_data
# crop the tensor to fixed size
output = netD(fake.detach())
# errD_fake = criterion(output, label)
errD_fake = torch.mean((fake_label - output) ** 2)
errD_fake.backward()
D_G_z1 = output.data.mean()
errD = (errD_real.item() + errD_fake.item())
# update the discriminator on mini batch
optimizerD.step()
############################
# (2) Update G network: maximize log(D(G(z)))
############################
netG.zero_grad()
output = netD(fake)
# errG = criterion(output, label)
errG = torch.mean((real_label - output) ** 2)
if 0:
errRes = nn.MSELoss()(fake, real_data)
g_loss = errRes + errG
else:
g_loss = errG
g_loss.backward()
D_G_z2 = output.data.mean()
optimizerG.step()
print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
%(epoch, opt.niter, i, len(data_loader),
errD, errG.item(), D_x, D_G_z1, D_G_z2))
if (epoch % 20 == 0) and (epoch != 0):
fake = netG(noise, pred_data)
fake = fake + pred_data
fake = fake.data.cpu().numpy()
fake = fake.reshape(opt.mgcDim, -1)
pred = pred_data.data.cpu().numpy()
pred = pred.reshape(opt.mgcDim, -1)
real = real_data.cpu().numpy()
real = real.reshape(opt.mgcDim, -1)
plot_feats(real, pred, fake, epoch, i, opt.outf)
del errD_fake, errD_real, errG, real_data, pred_data,
del noise, fake, output, errD
torch.cuda.empty_cache()
# do checkpointing
if (epoch % 40 == 0) and (epoch != 0):
torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' %(opt.outf, epoch))
torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' %(opt.outf, epoch))
def test(netG, opt):
assert opt.netG != ''
test_dir = opt.testdata_dir
for f in os.listdir(test_dir):
fname, ext = os.path.splitext(f)
if ext == '.cmp':
print(fname)
cmp_file = os.path.join(test_dir, f)
ac_data = read_binary_file(cmp_file, dim=47)
ac_data = torch.FloatTensor(ac_data)
noise = torch.FloatTensor(ac_data.size(0), nz)
if opt.cuda:
ac_data, noise = ac_data.cuda(), noise.cuda()
ac_data = Variable(ac_data)
noise = Variable(noise)
noise.data.normal_(0, 1)
generated_pulses = netG(noise, ac_data)
generated_pulses = generated_pulses.data.cpu().numpy()
generated_pulses = generated_pulses.reshape(ac_data.size(0), -1)
out_file = os.path.join(test_dir, fname + '.pls')
with open(out_file, 'wb') as fid:
generated_pulses.tofile(fid)
if __name__ == "__main__":
# parse the arguments
parser = argparse.ArgumentParser()
parser.add_argument('--voiceName', required=True, help='nick | jenny ')
parser.add_argument('--mode', required=True, type=str, help='train | test')
parser.add_argument('--xFilesList', required=True, help='path to input files list')
parser.add_argument('--yFilesList', required=True, help='path to output files list')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=2)
parser.add_argument('--batchSize', type=int, default=64, help='input batch size')
parser.add_argument('--mgcDim', type=int, default=40, help='mel-cepstrum dimension')
parser.add_argument('--nz', type=int, default=100, help='size of the latent z vector')
parser.add_argument('--niter', type=int, default=2000, help='number of epochs to train for')
parser.add_argument('--lr', type=float, default=0.0001, help='learning rate, default=0.0001')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.9')
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--netG', default='', help="path to netG (to continue training)")
parser.add_argument('--netD', default='', help="path to netD (to continue training)")
parser.add_argument('--outf', default='.', help='folder to output images and model checkpoints')
parser.add_argument('--manualSeed', type=int, help='manual seed')
parser.add_argument('--testdata_dir', type=str, help='path to test data')
opt = parser.parse_args()
print(opt)
print(torch.__version__)
device = torch.device("cuda:0" if opt.cuda else "cpu")
# prepare the data loader
x_files_list_file = opt.xFilesList
y_files_list_file = opt.yFilesList
in_dim = opt.mgcDim
out_dim = opt.mgcDim
with open(x_files_list_file, 'r') as fid:
x_files_list = [l.strip() for l in fid.readlines()]
with open(y_files_list_file, 'r') as fid:
y_files_list = [l.strip() for l in fid.readlines()]
x_normalizer = prepare_normalizer(x_files_list, in_dim)
y_normalizer = prepare_normalizer(y_files_list, out_dim)
data_loader = get_loader(x_files_list, y_files_list,
in_dim, out_dim, opt.batchSize, False, 10, x_normalizer, y_normalizer)
# prepare the output directories
try:
os.makedirs(opt.outf)
os.makedirs(os.path.join(opt.outf, 'figures'))
except OSError:
pass
# if manual seed is not provide then pick one randomly
if opt.manualSeed is None:
opt.manualSeed = random.randint(1, 10000)
print('Random Seed: ', opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
if opt.cuda:
torch.cuda.manual_seed_all(opt.manualSeed)
cudnn.enabled = False
cudnn.benchmark = False
# define the generator
netG = define_netG(in_ch=2, device=device)
if opt.netG != '':
netG.load_state_dict(torch.load(opt.netG))
print(netG)
# define the discriminator
netD = define_netD(device=device)
if opt.netD != '':
netD.load_state_dict(torch.load(opt.netD))
print(netD)
if opt.mode == 'train':
train(netD, netG, data_loader, opt)
elif opt.mode == 'test':
test(netG, opt)
else:
print('Mode must be either train or test only')
| 38.777778
| 100
| 0.58618
|
f946750ed95ffe7beab1651721ddeb11c0417d36
| 290
|
py
|
Python
|
codeforces/1331d.py
|
btjanaka/competitive-programming-solutions
|
e3df47c18451802b8521ebe61ca71ee348e5ced7
|
[
"MIT"
] | 3
|
2020-06-25T21:04:02.000Z
|
2021-05-12T03:33:19.000Z
|
codeforces/1331d.py
|
btjanaka/competitive-programming-solutions
|
e3df47c18451802b8521ebe61ca71ee348e5ced7
|
[
"MIT"
] | null | null | null |
codeforces/1331d.py
|
btjanaka/competitive-programming-solutions
|
e3df47c18451802b8521ebe61ca71ee348e5ced7
|
[
"MIT"
] | 1
|
2020-06-25T21:04:06.000Z
|
2020-06-25T21:04:06.000Z
|
# Author: btjanaka (Bryon Tjanaka)
# Problem: (CodeForces) 1331d
# Title: Again?
# Link: https://codeforces.com/contest/1331/problem/D
# Idea: Check if the number is odd or even :p
# Difficulty: easy
# Tags: april-fools
import sys
for line in sys.stdin:
print(int(line, base=16) % 2)
| 22.307692
| 53
| 0.703448
|
59ed6583308ab34b301f9a5c8b63d99cf5e59ed8
| 8,882
|
py
|
Python
|
elections/migrations/0049_auto_20190510_0736.py
|
zinaukarenku/zkr-platform
|
8daf7d1206c482f1f8e0bcd54d4fde783e568774
|
[
"Apache-2.0"
] | 2
|
2018-11-16T21:45:17.000Z
|
2019-02-03T19:55:46.000Z
|
elections/migrations/0049_auto_20190510_0736.py
|
zinaukarenku/zkr-platform
|
8daf7d1206c482f1f8e0bcd54d4fde783e568774
|
[
"Apache-2.0"
] | 13
|
2018-08-17T19:12:11.000Z
|
2022-03-11T23:27:41.000Z
|
elections/migrations/0049_auto_20190510_0736.py
|
zinaukarenku/zkr-platform
|
8daf7d1206c482f1f8e0bcd54d4fde783e568774
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.1.7 on 2019-05-10 07:36
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('elections', '0048_presidentcandidatebiography'),
]
operations = [
migrations.CreateModel(
name='EuroParliamentCandidateBiography',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bio_period', models.CharField(blank=True, max_length=15, verbose_name='Periodas')),
('bio_text', models.TextField(blank=True, verbose_name='Biografijos įrašas')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Sukurta')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Atnaujinta')),
],
options={
'verbose_name': 'Biografijos įrašas',
'verbose_name_plural': 'Biografijos įrašai',
'ordering': ['-created_at'],
},
),
migrations.CreateModel(
name='EuroParliamentCandidatePoliticalExperience',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('position', models.CharField(blank=True, max_length=100, verbose_name='Pareigos')),
('office', models.CharField(blank=True, max_length=100, verbose_name='Institucija')),
('start', models.DateField(blank=True, verbose_name='Pereigų pradžia')),
('end', models.DateField(blank=True, verbose_name='Pereigų pabaiga')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Sukurta')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Atnaujinta')),
],
options={
'verbose_name': 'Politinė patirties įrašas',
'verbose_name_plural': 'Politinės patirties įrašai',
'ordering': ['created_at'],
},
),
migrations.CreateModel(
name='EuroParliamentCandidateWorkExperience',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('position', models.CharField(blank=True, max_length=100, verbose_name='Pareigos')),
('office', models.CharField(blank=True, max_length=100, verbose_name='Darbovietė')),
('start', models.DateField(blank=True, verbose_name='Pereigų pradžia')),
('end', models.DateField(blank=True, verbose_name='Pereigų pabaiga')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Sukurta')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Atnaujinta')),
],
options={
'verbose_name': 'Darbo patirties įrašas',
'verbose_name_plural': 'Darbo patirties įrašai',
'ordering': ['created_at'],
},
),
migrations.CreateModel(
name='PresidentCandidatePoliticalExperience',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('position', models.CharField(blank=True, max_length=100, verbose_name='Pareigos')),
('office', models.CharField(blank=True, max_length=100, verbose_name='Institucija')),
('start', models.DateField(blank=True, null=True, verbose_name='Pereigų pradžia')),
('end', models.DateField(blank=True, null=True, verbose_name='Pereigų pabaiga')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Sukurta')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Atnaujinta')),
],
options={
'verbose_name': 'Politinė patirties įrašas',
'verbose_name_plural': 'Politinės patirties įrašai',
'ordering': ['created_at'],
},
),
migrations.CreateModel(
name='PresidentCandidateWorkExperience',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('position', models.CharField(blank=True, max_length=100, verbose_name='Pareigos')),
('office', models.CharField(blank=True, max_length=100, verbose_name='Darbovietė')),
('start', models.DateField(blank=True, null=True, verbose_name='Pereigų pradžia')),
('end', models.DateField(blank=True, null=True, verbose_name='Pereigų pabaiga')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Sukurta')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Atnaujinta')),
],
options={
'verbose_name': 'Darbo patirties įrašas',
'verbose_name_plural': 'Darbo patirties įrašai',
'ordering': ['created_at'],
},
),
migrations.AddField(
model_name='europarliamentcandidate',
name='birth_date',
field=models.DateField(blank=True, null=True, verbose_name='Gimimo data'),
),
migrations.AddField(
model_name='europarliamentcandidate',
name='birth_place',
field=models.CharField(blank=True, max_length=100, verbose_name='Gimimo vieta'),
),
migrations.AddField(
model_name='europarliamentcandidate',
name='hobbies',
field=models.CharField(blank=True, max_length=500, verbose_name='Pomėgiai'),
),
migrations.AddField(
model_name='europarliamentcandidate',
name='languages',
field=models.CharField(blank=True, max_length=300, verbose_name='Užsienio kalbos'),
),
migrations.AddField(
model_name='presidentcandidate',
name='birth_date',
field=models.DateField(blank=True, null=True, verbose_name='Gimimo data'),
),
migrations.AddField(
model_name='presidentcandidate',
name='birth_place',
field=models.CharField(blank=True, max_length=100, verbose_name='Gimimo vieta'),
),
migrations.AddField(
model_name='presidentcandidate',
name='email',
field=models.EmailField(blank=True, max_length=254, null=True, verbose_name='Kandidato el. paštas'),
),
migrations.AddField(
model_name='presidentcandidate',
name='hobbies',
field=models.CharField(blank=True, max_length=500, verbose_name='Pomėgiai'),
),
migrations.AddField(
model_name='presidentcandidate',
name='languages',
field=models.CharField(blank=True, max_length=300, verbose_name='Užsienio kalbos'),
),
migrations.AlterField(
model_name='presidentcandidate',
name='party',
field=models.CharField(blank=True, help_text='Jeigu kandidatas - be partijos, nurodykite, kad savarankiškas', max_length=280, verbose_name='Partija'),
),
migrations.AddField(
model_name='presidentcandidateworkexperience',
name='candidate',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='work_experience', to='elections.PresidentCandidate'),
),
migrations.AddField(
model_name='presidentcandidatepoliticalexperience',
name='candidate',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='political_experience', to='elections.PresidentCandidate'),
),
migrations.AddField(
model_name='europarliamentcandidateworkexperience',
name='candidate',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='work_experience', to='elections.EuroParliamentCandidate'),
),
migrations.AddField(
model_name='europarliamentcandidatepoliticalexperience',
name='candidate',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='political_experience', to='elections.EuroParliamentCandidate'),
),
migrations.AddField(
model_name='europarliamentcandidatebiography',
name='candidate',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='biographies', to='elections.EuroParliamentCandidate'),
),
]
| 51.34104
| 162
| 0.612362
|
3d8e146724c283fbcc618cd8d95dba7d8fb9c334
| 3,491
|
py
|
Python
|
bindings/python/ensmallen/datasets/string/legionellaworsleiensis.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 5
|
2021-02-17T00:44:45.000Z
|
2021-08-09T16:41:47.000Z
|
bindings/python/ensmallen/datasets/string/legionellaworsleiensis.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 18
|
2021-01-07T16:47:39.000Z
|
2021-08-12T21:51:32.000Z
|
bindings/python/ensmallen/datasets/string/legionellaworsleiensis.py
|
AnacletoLAB/ensmallen
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 3
|
2021-01-14T02:20:59.000Z
|
2021-08-04T19:09:52.000Z
|
"""
This file offers the methods to automatically retrieve the graph Legionella worsleiensis.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def LegionellaWorsleiensis(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Legionella worsleiensis graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Legionella worsleiensis graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="LegionellaWorsleiensis",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 33.247619
| 223
| 0.679461
|
0b908d3be355909f4a1cc907c58124ca0156f389
| 926
|
py
|
Python
|
app/user/views.py
|
CTKogstrom/drf_app_test
|
78afab79bcbc6b1c80821ea1997241907ed79267
|
[
"MIT"
] | null | null | null |
app/user/views.py
|
CTKogstrom/drf_app_test
|
78afab79bcbc6b1c80821ea1997241907ed79267
|
[
"MIT"
] | null | null | null |
app/user/views.py
|
CTKogstrom/drf_app_test
|
78afab79bcbc6b1c80821ea1997241907ed79267
|
[
"MIT"
] | null | null | null |
from rest_framework import generics, authentication, permissions
from rest_framework.authtoken.views import ObtainAuthToken
from user.serializers import UserSerializer, AuthTokenSerializer
from rest_framework.settings import api_settings
class CreateUserView(generics.CreateAPIView):
"""Create a new user in the system"""
serializer_class = UserSerializer
class CreateTokenView(ObtainAuthToken):
"""Create a new auth token for user"""
serializer_class = AuthTokenSerializer
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class ManageUserView(generics.RetrieveUpdateAPIView):
"""Manage the authenticated user"""
serializer_class = UserSerializer
authentication_classes = (authentication.TokenAuthentication,)
permission_classes = (permissions.IsAuthenticated,)
def get_object(self):
"""Retrieve and return authentication user"""
return self.request.user
| 34.296296
| 66
| 0.789417
|
1d9da75133fe227f683b04f8a381f4a0dc849691
| 1,399
|
py
|
Python
|
vsts/vsts/notification/v4_0/models/notification_event_publisher.py
|
kenkuo/azure-devops-python-api
|
9e920bd25e938fa89ff7f60153e5b9e113ca839d
|
[
"MIT"
] | null | null | null |
vsts/vsts/notification/v4_0/models/notification_event_publisher.py
|
kenkuo/azure-devops-python-api
|
9e920bd25e938fa89ff7f60153e5b9e113ca839d
|
[
"MIT"
] | null | null | null |
vsts/vsts/notification/v4_0/models/notification_event_publisher.py
|
kenkuo/azure-devops-python-api
|
9e920bd25e938fa89ff7f60153e5b9e113ca839d
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class NotificationEventPublisher(Model):
"""NotificationEventPublisher.
:param id:
:type id: str
:param subscription_management_info:
:type subscription_management_info: :class:`SubscriptionManagement <notification.v4_0.models.SubscriptionManagement>`
:param url:
:type url: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'subscription_management_info': {'key': 'subscriptionManagementInfo', 'type': 'SubscriptionManagement'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, id=None, subscription_management_info=None, url=None):
super(NotificationEventPublisher, self).__init__()
self.id = id
self.subscription_management_info = subscription_management_info
self.url = url
| 41.147059
| 121
| 0.56183
|
c6944cc60fa7aeec91bebea7bbfd55e66c34421f
| 1,641
|
py
|
Python
|
corded/http/route.py
|
an-dyy/Corded
|
ff34faca51ac2ca039adfc2fadd7b3f274ee7353
|
[
"MIT"
] | null | null | null |
corded/http/route.py
|
an-dyy/Corded
|
ff34faca51ac2ca039adfc2fadd7b3f274ee7353
|
[
"MIT"
] | null | null | null |
corded/http/route.py
|
an-dyy/Corded
|
ff34faca51ac2ca039adfc2fadd7b3f274ee7353
|
[
"MIT"
] | null | null | null |
"""
MIT License
Copyright (c) 2021 vcokltfre
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
class Route:
def __init__(self, path: str, **params):
"""Represents a Discord API route, used for ratelimit handling.
Args:
path (str): The API path to use.
params: The parameters to format the path with.
"""
self.route = path.format(**params)
# Key ratelimit handling parameters
guild_id = params.get("guild_id", 0)
channel_id = params.get("channel_id", 0)
webhook_id = params.get("webhook_id", 0)
self.bucket = f"{guild_id}-{channel_id}-{webhook_id}::{path}"
| 38.162791
| 78
| 0.728215
|
dbc015332a74e7e710965ff6b3cfcae5a8206cb2
| 6,025
|
py
|
Python
|
countries/views.py
|
baikov/dj-cc-dep
|
5ae66b1a638c040e6497d56c2a9299fac335d608
|
[
"MIT"
] | null | null | null |
countries/views.py
|
baikov/dj-cc-dep
|
5ae66b1a638c040e6497d56c2a9299fac335d608
|
[
"MIT"
] | 3
|
2021-11-02T21:08:31.000Z
|
2022-02-04T21:07:13.000Z
|
countries/views.py
|
baikov/dj-cc-dep
|
5ae66b1a638c040e6497d56c2a9299fac335d608
|
[
"MIT"
] | null | null | null |
# from django.http import HttpResponseRedirect
# from django.shortcuts import get_object_or_404
# from re import template
from django.shortcuts import render
from django.http import Http404
# from django.urls import reverse
from django.views.generic import ListView, DetailView
# from django.views.generic.edit import UpdateView
from django.db.models import Count
from .models import Color, Country, Flag, HistoricalFlag, BorderCountry
def index(request):
homepage_countries = Country.objects.order_by('-name')[:5]
homepage_flags = Flag.objects.order_by('-title')[:5]
context = {'countries': homepage_countries, 'flags': homepage_flags}
return render(request, 'countries/index.html', context)
class CountryListView(ListView):
model = Country
template_name = 'countries/countries-list.html'
context_object_name = 'countries'
paginate_by = 20
def get_queryset(self):
countries = Country.objects.all()
# limit = 40
if not self.request.user.is_superuser:
countries = countries.published()
return countries.order_by('-name') # [:limit]
class FlagListView(ListView):
model = Flag
template_name = 'countries/flags-list.html'
context_object_name = 'flags'
paginate_by = 20
def get_queryset(self):
flags = Flag.objects.all()
if not self.request.user.is_superuser:
flags = flags.published()
return flags.order_by('country__name')
class CountryDetailView(DetailView):
model = Country
template_name = 'countries/country-detail.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['author'] = 'Sart'
context['flags'] = self.object.flags.all()
if not self.request.user.is_superuser and not self.object.is_published:
raise Http404
return context
class FlagDetailView(DetailView):
model = Flag
template_name = 'countries/flag-detail.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
border_countries = []
same_colors = []
# Get all historical flags
context['historical'] = HistoricalFlag.objects.filter(
country__iso_code_a2=self.object.country.iso_code_a2).order_by('from_year')
# Get all border countries
neighbours = BorderCountry.objects.filter(country=self.object.country)
for row in neighbours:
border_countries.append(row.border_country)
context['neighbours'] = neighbours
# Get all flag colors
colors = Color.objects.filter(flags=self.object.id)
context['colors'] = colors
# Get flags with colors from same color groups
if colors:
for row in colors:
same_colors.append(row.color_group)
same_color_flags = Flag.objects.filter(colors__color_group=same_colors[0]).exclude(id=self.object.id)
for i in range(1, len(colors)):
same_color_flags = same_color_flags.filter(colors__color_group=same_colors[i])
context['same_flags'] = same_color_flags
# Get flags of border countries
border_flags = Flag.objects.filter(country__in=border_countries)
context['border_flags'] = border_flags
# Set width and height for Download img block
height, width = self.object.proportion.split(':')
context['widths'] = {
# 'w20': {'width': 20, 'height': int(20/int(width)*int(height))},
'w40': {'width': 40, 'height': int(40/int(width)*int(height))},
'w80': {'width': 80, 'height': int(80/int(width)*int(height))},
'w160': {'width': 160, 'height': int(160/int(width)*int(height))},
'w320': {'width': 320, 'height': int(320/int(width)*int(height))},
'w640': {'width': 640, 'height': int(640/int(width)*int(height))},
'w1280': {'width': 1280, 'height': int(1280/int(width)*int(height))},
'w2560': {'width': 2560, 'height': int(2560/int(width)*int(height))},
}
context['heights'] = {
'h20': {'width': int(20/int(height)*int(width)), 'height': 20},
'h24': {'width': int(24/int(height)*int(width)), 'height': 24},
'h40': {'width': int(40/int(height)*int(width)), 'height': 40},
'h60': {'width': int(60/int(height)*int(width)), 'height': 60},
'h80': {'width': int(80/int(height)*int(width)), 'height': 80},
'h120': {'width': int(120/int(height)*int(width)), 'height': 120},
'h240': {'width': int(240/int(height)*int(width)), 'height': 240},
}
context['country'] = Country.objects.get(id__exact=self.object.country.id)
return context
class ColorsListView(ListView):
model = Color
template_name = 'countries/colors-list.html'
context_object_name = 'colors'
def get_queryset(self):
colors = Color.objects.all().order_by('color_group')
return colors
def colors_group(request, color_group):
template_name = 'countries/colors-group.html'
# flags = Flag.objects.filter(colors__color_group=color_group)
url_color = color_group.split('-')
# flags = Flag.objects.filter(colors__color_group__in=url_color).distinct()
flags = Flag.objects.all()
for color in url_color:
flags = flags.filter(colors__color_group=color)
colors = Color.objects.filter(color_group__in=url_color).distinct('color_group')
context = {'flags': flags, 'colors': colors}
if flags:
return render(request, template_name, context)
else:
raise Http404
def colors_count(request, color_count):
template_name = 'countries/colors-count.html'
flags = Flag.objects.annotate(num_colors=Count('colors')).filter(num_colors=color_count)
context = {'flags': flags, 'color_count': color_count}
if flags:
return render(request, template_name, context)
else:
raise Http404
| 37.893082
| 113
| 0.646805
|
fa57ded8f784fbfdbc9caea938ff4a7fdb3b9e93
| 7,555
|
py
|
Python
|
cogs/stats.py
|
fennr/Samuro-HotsBot
|
81e7a65a08d50602442094e52d6d2e405c98ac1a
|
[
"Apache-2.0"
] | 1
|
2022-03-26T11:28:00.000Z
|
2022-03-26T11:28:00.000Z
|
cogs/stats.py
|
fennr/Samuro-HotsBot
|
81e7a65a08d50602442094e52d6d2e405c98ac1a
|
[
"Apache-2.0"
] | null | null | null |
cogs/stats.py
|
fennr/Samuro-HotsBot
|
81e7a65a08d50602442094e52d6d2e405c98ac1a
|
[
"Apache-2.0"
] | null | null | null |
""""
Samuro Bot
Автор: *fennr*
github: https://github.com/fennr/Samuro-HotsBot
Бот для сообществ по игре Heroes of the Storm
"""
import os
from discord import Embed, Member, File
from discord.ext import commands
from utils import check
from enum import Enum
import openpyxl
from openpyxl.styles import Font
from openpyxl.worksheet.dimensions import ColumnDimension, DimensionHolder
from openpyxl.utils import get_column_letter
from utils.classes import Const
from utils import exceptions, library
from utils.classes.Const import config
class League(Enum):
Bronze = "Бронза"
Silver = "Серебро"
Gold = "Золото"
Platinum = "Платина"
Diamond = "Алмаз"
Master = "Мастер"
Grandmaster = "Грандмастер"
class Stats(commands.Cog, name="Stats"):
"""
— Просмотр таблиц лидеров
"""
def __init__(self, bot):
self.bot = bot
@commands.group(name="top")
async def top(self, ctx):
if ctx.invoked_subcommand is None:
pass
@top.command(name="excel")
@check.is_samuro_dev()
async def top_excel(self, ctx):
headings = ['id', 'guild_id', 'Победы', 'Поражения', 'Очки', 'Батлтег']
filepath = 'UserStats.xlsx'
con, cur = library.get.con_cur()
guild_id = library.get.guild_id(ctx)
select = Const.selects.US
cur.execute(select, (guild_id, ))
data = cur.fetchall()
cur.close()
# Создание документа, страницы
wb = openpyxl.Workbook()
ws = wb.active
# Spreadsheet row and column indexes start at 1
# so we use "start = 1" in enumerate so
# we don't need to add 1 to the indexes.
for colno, heading in enumerate(headings, start=1):
c = ws.cell(row=1, column=colno)
c.font = Font(bold=True)
c.value = heading
# This time we use "start = 2" to skip the heading row.
for rowno, row in enumerate(data, start=2):
for colno, cell_value in enumerate(row, start=1):
ws.cell(row=rowno, column=colno).value = cell_value
# Выравнивание длины колонок
dim_holder = DimensionHolder(worksheet=ws)
for col in range(ws.min_column, ws.max_column + 1):
dim_holder[get_column_letter(col)] = ColumnDimension(ws, min=col, max=col, width=20)
ws.column_dimensions = dim_holder
# сохранение, вывод, удаление файла
wb.save(filepath)
await ctx.author.send(file=File(filepath))
os.remove(filepath)
await ctx.send("Файл отправлен личным сообщением")
@top.command(name="mmr")
async def top_mmr(self, ctx, league_type="Грандмастер", count=5):
"""
- Лидеры по ммр
"""
test = library.get.league(league_type.capitalize())
league = League(test)
print(league, league.name, league.value)
con, cur = library.get.con_cur()
guild_id = library.get.guild_id(ctx)
select = Const.selects.PlayersLeague
cur.execute(select, (league.name, count))
records = cur.fetchall()
embed = Embed(
title=f"Таблица лиги {league.value}",
color=config.info
)
value = ""
for i, record in enumerate(records):
value += f"{i + 1}. {library.get.mention(record.id)} (mmr: {record.mmr})\n"
if len(value) > 0:
embed.add_field(
name=f"Топ {count} игроков",
value=value
)
await ctx.send(embed=embed)
else:
await ctx.send("Нет игроков выбранной лиги")
@top.command(name="wins")
async def top_wins(self, ctx, count=10):
"""
- Лидеры по числу побед
"""
con, cur = library.get.con_cur()
guild_id = library.get.guild_id(ctx)
select = Const.selects.USWins
cur.execute(select, (guild_id, count))
records = cur.fetchall()
embed = Embed(
title=f"Таблица лидеров",
color=config.info
)
value = ""
for i, record in enumerate(records):
value += f"{i + 1}. {library.get.mention(record.id)} — {record.win}\n"
embed.add_field(
name=f"Топ {count} игроков по числу побед",
value=value
)
await ctx.send(embed=embed)
@top.command(name="points")
async def top_points(self, ctx, count=10):
"""
- Лидеры по заработанным очкам
"""
con, cur = library.get.con_cur()
guild_id = library.get.guild_id(ctx)
select = Const.selects.USPoints
cur.execute(select, (guild_id, count, ))
records = cur.fetchall()
embed = Embed(
title=f"Таблица лидеров",
color=config.info
)
value = ""
# max_indent = len(max([self.bot.get_user(x.id).name for x in records])) + 1 # получать макс длину имени
for i, record in enumerate(records):
value += f"{i+1}. {library.get.mention(record.id)} — {record.points}\n"
embed.add_field(
name=f"Топ {count} игроков по числу баллов",
value=value
)
await ctx.send(embed=embed)
@top.command(name="remove")
@commands.check_any(commands.has_role(703884580041785344), # Создатель
commands.has_role(703884637755408466), # Админ
commands.has_role(711230509967212564), # Старший модер
commands.has_role(711230315540250624), # Модер
commands.has_role(959144584720580618), # Samuro_dev
commands.has_role(880865537058545686), # test
)
async def points_remove(self, ctx, user, count=0):
con, cur = library.get.con_cur()
guild_id = library.get.guild_id(ctx)
user_id = library.get.user_id(user)
select = Const.selects.USIdGuild
cur.execute(select, (user_id, guild_id))
record = cur.fetchone()
stats = library.get.stats(record)
if stats.points < count:
await ctx.send(f"Недостаточно баллов\n"
f"Баллов у {library.get.mention(stats.id)}: {stats.points}")
else:
stats.points -= count
update = Const.updates.USPoints
cur.execute(update, (stats.points, stats.id, stats.guild_id))
library.commit(con)
await ctx.send(f"Баллы успешно сняты\n"
f"Осталось баллов: {stats.points}")
@top.command(name="end_season")
@check.is_owner()
async def end_season(self, ctx):
con, cur = library.get.con_cur()
guild_id = library.get.guild_id(ctx)
update = Const.updates.USGuildRemoveStats
cur.execute(update, (guild_id, ))
library.commit(con)
row_count = cur.rowcount
if row_count > 0:
await ctx.send(f"Статистика профилей удалена\nОбновлено профилей: {row_count}")
else:
await ctx.send(f"Не обновлено ни одного профиля")
@points_remove.error
@top_mmr.error
async def points_handler(self, ctx, error):
error = getattr(error, 'original', error) # получаем пользовательские ошибки
print(error)
if isinstance(error, commands.errors.MissingRole):
await ctx.send("Недостаточно прав для выполнения команды")
elif isinstance(error, exceptions.WrongLeague):
await ctx.send("Выберите корректную лигу")
def setup(bot):
bot.add_cog(Stats(bot))
| 35.469484
| 113
| 0.594705
|
f74f5aefcc5817037ddcd5c4e1f68206420349a7
| 292
|
py
|
Python
|
paper_spider/paper_spider/pipelines.py
|
peterwilliams97/ToneRanger
|
61ab8b5a96bbf3f82b8e6a07e470831189afff8c
|
[
"MIT"
] | null | null | null |
paper_spider/paper_spider/pipelines.py
|
peterwilliams97/ToneRanger
|
61ab8b5a96bbf3f82b8e6a07e470831189afff8c
|
[
"MIT"
] | null | null | null |
paper_spider/paper_spider/pipelines.py
|
peterwilliams97/ToneRanger
|
61ab8b5a96bbf3f82b8e6a07e470831189afff8c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
class PaperSpiderPipeline(object):
def process_item(self, item, spider):
return item
| 24.333333
| 65
| 0.715753
|
394de8f0de6b99f8f25966bc8c4f0e6f7c15ddd9
| 927
|
py
|
Python
|
setup.py
|
ronikobrosly/surgo_bayesian_network
|
66bd48a09d4b771264f75cfc443f6527a99eb446
|
[
"MIT"
] | null | null | null |
setup.py
|
ronikobrosly/surgo_bayesian_network
|
66bd48a09d4b771264f75cfc443f6527a99eb446
|
[
"MIT"
] | null | null | null |
setup.py
|
ronikobrosly/surgo_bayesian_network
|
66bd48a09d4b771264f75cfc443f6527a99eb446
|
[
"MIT"
] | null | null | null |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="surgo-bayesian-network",
version="0.0.1",
author="Roni Kobrosly",
author_email="roni.kobrosly@gmail.com",
description="Case study package for Surgo Foundation",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ronikobrosly/surgo_bayesian_network",
packages=setuptools.find_packages(include=['surgo_bayesian_network']),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.7.6',
install_requires=[
'matplotlib',
'networkx',
'numpy',
'pandas',
'pgmpy',
'pomegranate',
'pygraphviz',
'pytest',
'scipy'
]
)
| 27.264706
| 74
| 0.625674
|
04de03d2cc7be80e4ca37506ee1e6182fe825551
| 6,865
|
py
|
Python
|
src/lappy/services/hor_well_maker.py
|
erythrocyte/qtlappy
|
f8ceda45cb5c26e2be8107ddfbafe2d5e3b76627
|
[
"MIT"
] | null | null | null |
src/lappy/services/hor_well_maker.py
|
erythrocyte/qtlappy
|
f8ceda45cb5c26e2be8107ddfbafe2d5e3b76627
|
[
"MIT"
] | 13
|
2020-12-15T19:38:19.000Z
|
2021-02-20T13:52:05.000Z
|
src/lappy/services/hor_well_maker.py
|
erythrocyte/qtlappy
|
f8ceda45cb5c26e2be8107ddfbafe2d5e3b76627
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# coding: utf-8
from src.lappy.models.well import Well
from src.lappy.models.point import Point
from src.lappy.models.pointPair import PointPair
from src.lappy.models.vector import Vector
from src.lappy.services import geom_oper, vect_oper, geom_numpy
from src.lappy.services import well_track_service
import numpy as np
class HorWellMaker(object):
"""
"""
class PointPairs(object):
def __init__(self):
self.pairs = []
def make_thin(self, well, nw):
"""
args:
nw - segment points count
"""
if well is None or well.track is None or nw is None:
return None, None
pts = np.empty((0, 2))
seg = np.empty((0, 2))
# forward
for k in range(len(well.track)-1):
res = geom_numpy.line(well.track[k], well.track[k+1],
nw, use_last_pt=False)
if res[0] is not None:
pts = np.vstack([pts, res[0]])
# backward
for k in range(len(well.track)-1, 0, -1):
res = geom_numpy.line(well.track[k], well.track[k-1],
nw, use_last_pt=False)
if res[0] is not None:
pts = np.vstack([pts, res[0]])
N = len(pts)
i = np.arange(N)
seg = np.stack([i, i + 1], axis=1) % N
return [[well.track[0].x, well.track[0].y], pts, seg]
def make_real(self, well: Well, nw: int, hnw: int):
"""
"""
# check well track suitable
if not well_track_service.well_track_suits(well.track):
print('well track is not suitable: sharp angles')
return None, None
tp = self.__get_line_points(well)
pts = np.empty((0, 2))
sf = geom_numpy.sector(tp[0].pl, well.track[0],
nw, well.track[1], np.pi, use_last_pt=False)
if sf[0] is not None:
pts = np.vstack([pts, sf[0]])
ltp = len(tp)
for i in range(ltp-1):
lnn = geom_numpy.line(tp[i].pr, tp[i+1].pr, hnw, use_last_pt=False)
if lnn[0] is not None:
pts = np.vstack([pts, lnn[0]])
sf = geom_numpy.sector(tp[ltp-1].pr, well.track[ltp-1],
nw, well.track[ltp-2], np.pi, use_last_pt=False)
if sf[0] is not None:
pts = np.vstack([pts, sf[0]])
for i in range(ltp-1):
lnn = geom_numpy.line(tp[ltp-1 - i].pl, tp[ltp-1 - (i+1)].pl, hnw,
use_last_pt=False)
if lnn[0] is not None:
pts = np.vstack([pts, lnn[0]])
N = len(pts)
i = np.arange(N)
seg = np.stack([i, i + 1], axis=1) % N
return [[well.track[0].x, well.track[0].y], pts, seg]
def __get_line_points(self, well: Well):
"""
"""
rw = well.radius
track = well.track
prs = [self.PointPairs() for i in range(len(well.track))]
for i in range(len(well.track)-1):
p0, p1 = track[i], track[i + 1]
pr1 = self.__get_bound_points(p0, p1, rw)
pr2 = self.__get_bound_points(p1, p0, rw)
prs[i].pairs.append(pr1)
prs[i+1].pairs.append(pr2)
# swap left and right
self.__order_left_right_points(prs, well.track)
result = self.__merge_points(prs, well.track, well.radius)
return result
def __order_left_right_points(self, pts, track):
"""
Args:
pts : list[PointPairs]
track : well track
"""
def check_swap(p1, q1, p2, q2):
res, p = geom_oper.is_segments_intersect(p1, q1, p2, q2)
return True if res else False
def do_swap(pts, k, j):
pts[k].pairs[j].pl, pts[k].pairs[j].pr = \
pts[k].pairs[j].pr, pts[k].pairs[j].pl
def intersect_track(p1, q1, track):
for k in range(len(track)-1):
p2 = track[k]
q2 = track[k+1]
res = check_swap(p1, q1, p2, q2)
if res:
return True
return False
for k, p in enumerate(pts):
for j in range(1, len(p.pairs)+1):
if k == len(pts)-1 and j == len(p.pairs):
continue
p1 = p.pairs[j-1].pr
q1 = pts[k+1].pairs[0].pr if j == len(p.pairs) \
else p.pairs[j].pr
if intersect_track(p1, q1, track):
if j == len(p.pairs):
a, b = k+1, 0
else:
a, b = k, j
do_swap(pts, a, b)
def __merge_points(self, prs, track, r):
result = []
for i, pr in enumerate(prs):
while (len(pr.pairs) != 1):
prs[i].pairs = self.__merge_inner_points(pr.pairs, track[i], r)
result.append(PointPair(pr.pairs[0].pl, pr.pairs[0].pr))
return result
def __merge_inner_points(self, prs, tp, r):
"""
"""
if len(prs) == 1:
return prs[0]
result = []
for i in range(1, len(prs)):
pl1, pr1 = prs[i-1].pl, prs[i-1].pr
pl2, pr2 = prs[i].pl, prs[i].pr
pl = self.__get_merged_inner_pair(pl1, pl2, tp, r)
pr = self.__get_merged_inner_pair(pr1, pr2, tp, r)
pp = PointPair(pl, pr)
result.append(pp)
return result
def __get_merged_inner_pair(self, p1, p2, tp, r):
"""
"""
e = Point((p1.x + p2.x) / 2.0, (p1.y + p2.y) / 2.0, -1)
ux, uy = vect_oper.normalize(e, tp)
x, y = tp.x - r * ux, tp.y - r * uy
return Point(x, y, -1)
def __get_bound_points(self, pt_main, pt2, rw):
"""
returns:
PointPair with "left" and "right" points
"""
[x0, y0] = [pt_main.x, pt_main.y]
[x1, y1] = [pt2.x, pt2.y]
[asg, bsg] = geom_oper.get_line_cf(x0, y0, x1, y1)
if asg is None: # x=const
xp0 = x0 + rw
yp0 = y0
xp1 = x0 - rw
yp1 = y0
elif abs(asg - 0.0) < 1e-6: # y = const
xp0 = x0
yp0 = y0 + rw
xp1 = x0
yp1 = y0 - rw
else:
[ap, bp] = geom_oper.ortho_line_cf(asg, bsg, x0, y0)
x2 = x0 + 1.0
y2 = ap * x2 + bp
vx, vy = x2 - x0, y2 - y0
ux, uy = vect_oper.normalize(vx, vy)
xp0 = x0 + rw * ux
yp0 = y0 + rw * uy
xp1 = x0 - rw * ux
yp1 = y0 - rw * uy
p0 = Point(xp0, yp0, -1)
p1 = Point(xp1, yp1, -1)
result = PointPair(p0, p1)
return result
| 29.212766
| 79
| 0.475747
|
6f959fc33bf493c84e3503f265b2ae417f648da8
| 1,275
|
py
|
Python
|
sage/cli/grpc_server.py
|
JulienDavat/sage-engine
|
87fb7075a07395a527da660d5efc056b0f49758c
|
[
"MIT"
] | 25
|
2018-09-07T14:43:51.000Z
|
2021-10-31T22:41:48.000Z
|
sage/cli/grpc_server.py
|
JulienDavat/sage-engine
|
87fb7075a07395a527da660d5efc056b0f49758c
|
[
"MIT"
] | 4
|
2018-10-28T15:32:08.000Z
|
2022-01-26T12:47:36.000Z
|
sage/cli/grpc_server.py
|
JulienDavat/sage-engine
|
87fb7075a07395a527da660d5efc056b0f49758c
|
[
"MIT"
] | 16
|
2018-12-04T17:50:12.000Z
|
2022-03-26T22:55:47.000Z
|
# grpc_server.py
# Author: Thomas MINIER - MIT License 2017-2020
import signal
from asyncio import set_event_loop_policy
from os.path import isfile
from time import time
import click
import uvloop
from sage.grpc.grpc_server import get_server
def stop_server(server, grace=None):
"""Stop server on a CTRL-C event"""
def __fn__(signum, frame):
server.stop(grace)
return __fn__
@click.command()
@click.argument("config")
@click.option("-p", "--port", type=int, default=8000, show_default=True, help="The port to bind")
@click.option("-w", "--workers", type=int, default=4, show_default=True, help="he number of server workers")
@click.option("--log-level", type=click.Choice(["debug", "info", "warning", "error"]), default="info", show_default=True, help="The granularity of log outputs")
def start_grpc_server(config: str, port: int, workers: int, log_level: str) -> None:
"""Launch the Sage gRPC server using the CONFIG configuration file"""
# Enable uvloop
set_event_loop_policy(uvloop.EventLoopPolicy())
server = get_server(config, port=port, workers=workers)
# Stop the server on a CTRL-C event
signal.signal(signal.SIGINT, stop_server(server))
# Start the server, and wait until it completes
server.start()
server.wait_for_termination()
| 34.459459
| 160
| 0.738824
|
90764419b1d6b332906b928b15a6d6cb822ad017
| 3,356
|
py
|
Python
|
python/days/d05/__init__.py
|
tamaroth/advent-of-code-2018
|
83af50c7c128d268a8518ba88432dec6e15d149a
|
[
"MIT"
] | 1
|
2018-12-02T10:08:05.000Z
|
2018-12-02T10:08:05.000Z
|
python/days/d05/__init__.py
|
tamaroth/advent-of-code-2018
|
83af50c7c128d268a8518ba88432dec6e15d149a
|
[
"MIT"
] | null | null | null |
python/days/d05/__init__.py
|
tamaroth/advent-of-code-2018
|
83af50c7c128d268a8518ba88432dec6e15d149a
|
[
"MIT"
] | null | null | null |
"""
Day 5: Alchemical Reduction
"""
from days import Day
from utils.file import read_lines_of_datafile
def get_day_05():
return Day05(
read_lines_of_datafile('day_05_data.txt')[0]
)
class Day05(Day):
"""A solution to Day 5: Alchemical Reduction."""
def __init__(self, data):
super().__init__('Day 5: Alchemical Reduction')
self._polymer = data
def solve_part_one(self):
"""Solves the first part of the task.
It reacts the whole polymer and returns its length.
"""
return len(self._react_polymer(self._polymer))
def solve_part_two(self):
"""Solves the second part of the task.
Finds all possible units of the polymer, then for each unit it completely
removes it from polymer and finally reacts it. The solution is the shortest
of all possible reduced and reacted polymers.
"""
units = self._get_possible_units(self._polymer)
shortest = len(self._polymer)
for unit in units:
polymer = self._reduce_polymer(self._polymer, unit)
shortest = min(shortest, len(self._react_polymer(polymer)))
return shortest
def _react_polymer(self, polymer):
"""Reacts the given polymer.
Process of polymer reaction tries to find two adjacent units of the same
type but opposite polarity. When they are found, they are removed and
the process of polymer reaction starts again from the last visited unit.
"""
reactive = True
index = 0
while reactive:
# For each pass compute the length of the polymer.
end = len(polymer)
while index + 1 < end:
this = polymer[index]
next = polymer[index + 1]
if self._are_identical_same_types(this, next) and self._are_opposite_polarity(this, next):
polymer = polymer[:index] + polymer[index+2:]
# If the current index is not at the beginning of the polymer,
# we need to react it again.
if index != 0:
index -= 1
break
index += 1
if index + 1 >= len(polymer):
reactive = False
return polymer
def _reduce_polymer(self, polymer, unit):
"""Reduces the given polymer by the given unit (irrespective of its
polarity).
"""
return polymer.translate({
ord(unit.lower()): '',
ord(unit.upper()): '',
})
def _get_possible_units(self, polymer):
"""Get all possible units from the given polymer.
The returned dictionary consists only of lowercase units.
"""
possibilities = set()
for c in polymer:
if c.lower() not in possibilities:
possibilities.add(c.lower())
return possibilities
def _are_identical_same_types(self, unit_a, unit_b):
"""Checks whether the two given units have the same type."""
return unit_a.lower() == unit_b.lower()
def _are_opposite_polarity(self, unit_a, unit_b):
"""Checks whether the two given units have opposite polarity."""
return ((unit_a.islower() and unit_b.isupper()) or (unit_a.isupper() and unit_b.islower()))
| 33.89899
| 106
| 0.597735
|
30a1dd7c9dfa567a6854564000ee0392f344f7f7
| 1,247
|
py
|
Python
|
problems/orf.py
|
viadanna/rosalind-python
|
6709c683b04c2e069d73613a2844533e752030bb
|
[
"MIT"
] | null | null | null |
problems/orf.py
|
viadanna/rosalind-python
|
6709c683b04c2e069d73613a2844533e752030bb
|
[
"MIT"
] | null | null | null |
problems/orf.py
|
viadanna/rosalind-python
|
6709c683b04c2e069d73613a2844533e752030bb
|
[
"MIT"
] | null | null | null |
'''
Open Reading Frames
http://rosalind.info/problems/orf/
Problem
Either strand of a DNA double helix can serve as the coding strand for RNA
transcription. Hence, a given DNA string implies six total reading frames, or
ways in which the same region of DNA can be translated into amino acids: three
reading frames result from reading the string itself, whereas three more result
from reading its reverse complement.
An open reading frame (ORF) is one which starts from the start codon and ends
by stop codon, without any other stop codons in between. Thus, a candidate
protein string is derived by translating an open reading frame into amino acids
until a stop codon is reached.
Given: A DNA string s of length at most 1 kbp in FASTA format.
Return: Every distinct candidate protein string that can be translated from
ORFs of s. Strings can be returned in any order.
Sample Dataset
>Rosalind_99
AGCCATGTAGCTAACTCAGGTTACATGGGGATGACCCCGCGACTTGGATTAGAGTCTCTTTTGGAATAAGCCTGAATGATCCGAGTAGCATCTCAG
Sample Output
MLLGSFRLIPKETLIQVAGSSPCNLS
M
MGMTPRLGLESLLE
MTPRLGLESLLE
'''
from lib.fasta import read_fasta
def run_orf(data):
return '\n'.join(set(
p.sequence
for dna in read_fasta(data)
for p in dna.to_proteins()))
| 30.414634
| 96
| 0.790698
|
f069f9d1cee849ed9c3393e4a9a99971567e2900
| 7,989
|
py
|
Python
|
behavioral.py
|
gaosanyong/algorithms
|
09987f7b58648bfeeff3315f35b51e6e6fa5d072
|
[
"MIT"
] | 3
|
2021-03-27T07:54:16.000Z
|
2021-11-13T13:21:44.000Z
|
behavioral.py
|
gaosanyong/algorithms
|
09987f7b58648bfeeff3315f35b51e6e6fa5d072
|
[
"MIT"
] | null | null | null |
behavioral.py
|
gaosanyong/algorithms
|
09987f7b58648bfeeff3315f35b51e6e6fa5d072
|
[
"MIT"
] | 2
|
2021-05-31T18:20:36.000Z
|
2021-10-31T04:22:49.000Z
|
"""BEHAVIORAL DESIGN PATTERN
- observer
- visotor
- itrator
- strategy
- chain of responsibility
"""
"""OBSERVER DESIGN PATTERN
PROBLEM
* subjedts to be monitored
* observers to be notified
SCENARIO
* core temperatures of reactors at a power plant
* registered observers to be notified
SOLUTION
* subjet -- abstrat class (attach/detach/notify)
* concrete subjects
RELATED
* singleton
"""
class Subject(object): #Represents what is being 'observed'
def __init__(self):
self._observers = [] #This is hwere references to all the observers are being kept
#Note that this is a one-to-many relationships: there will be one subject to be observed by multiple _observers
def attach(self, observer):
if observer not in self._observers: #If the observer is not already in the observers list
self._observers.append(observer) #append the observer to the list
def detach(self, observer): #Simply remove the observer
try:
self._observers.remove(observer)
except ValueError:
pass
def notify(self, modifier=None):
for observer in self._observers: #For all the observers in the list
if modifier != observer: #Don't notify the observer who is actually updating the temperature
observer.update(self) #Alert the observers!
class Core(Subject): #Inherits from the Subject class
def __init__(self, name=""):
super(Core, self).__init__()
self._name = name #Set the name of the core
self._temp = 0 #Initialize the temperature of the core
@property #Getter that gets the core temperature
def temp(self):
return self._temp
@temp.setter #Setter that sets the core temperature
def temp(self, temp):
self._temp = temp
#Notify the observers when ever somebody changes the core temperature
self.notify()
class TempViewer:
def update(self, subject): #Alert method that is invoked when the notify() method in a concrete subject is invoked
print("Temperature Viewer: {} has Temperature {}".format(subject._name, subject._temp))
# #Let's create our subjects
# c1 = Core("Core 1")
# c2 = Core("Core 2")
# #Let's create our observers
# v1 = TempViewer()
# v2 = TempViewer()
# #Let's attach our observers to the first core
# c1.attach(v1)
# c1.attach(v2)
# #Let's change the temperature of our first core
# c1.temp = 80
# c1.temp = 90
"""VISITOR DESIGN PATTERN
PROBLEM
* new operations
* existing classes
* all dynamically done
SCENARIO
* house class
* HVAC specialist - visitor type 1
* electrician -- visitor type 2
SOLUTION
* new operations
* various elements of an existing
"""
class House(object):
def accept(self, visitor):
"""Interface to accept a visitor"""
#Triggers the visiting operation!
visitor.visit(self)
def work_on_hvac(self, hvac_specialist):
print(self, "worked on by", hvac_specialist) #Note that we now have a reference to the HVAC specialist object in the house object!
def work_on_electricity(self, electrician):
print(self, "worked on by", electrician) #Note that we now have a reference to the electrician object in the house object!
def __str__(self):
"""Simply return the class name when the House object is printed"""
return self.__class__.__name__
class Visitor(object):
"""Abstract visitor"""
def __str__(self):
"""Simply return the class name when the Visitor object is printed"""
return self.__class__.__name__
class HvacSpecialist(Visitor):
"""Concrete visitor: HVAC specialist"""
def visit(self, house):
house.work_on_hvac(self) #Note that the visitor now has a reference to the house object
class Electrician(Visitor):
"""Concrete visitor: electrician"""
def visit(self, house):
house.work_on_electricity(self) #Note that the visitor now has a reference to the house object
# #Create an HVAC specialist
# hv = HvacSpecialist()
# #Create an electrician
# e = Electrician()
# #Create a house
# home = House()
# #Let the house accept the HVAC specialist and work on the house by invoking the visit() method
# home.accept(hv)
# #Let the house accept the electrician and work on the houe by invoking the visit()
# home.accept(e)
"""ITERATOR PATTERN
PROBLEM
* the traversal interfaces of an aggregate object -- getting crowded
SCENARIO
* our custom iterator based on a built-in python iterator: zip()
* german counting words
* only up to a certain point
SOLUTION
* isolation
* interface
* tracking
* recommendation
RELATED PATTERNS
composite
"""
def count_to(count):
"""Our iterator implementation"""
#Our list
numbers_in_german = ["eins", "zwei", "drei", "vier", "funf"]
#Our built-in iterator
#Creates a tuple such as (1, "eins")
iterator = zip(range(count), numbers_in_german)
#Iterate through our iterable list
#Extract the German numbers
#Put them in a generator called number
for position, number in iterator:
#Returns a 'generator' containing numbers in German
yield number
#Let's test the generator returned by our iterator
# for num in count_to(3):
# print(f'{num}')
"""STRATEGY PATTERN
PROBLEM
* need for dynamically chainge the behavior of an object
SCENARIO
* abstract stragegy class with a default set of behaviors
* concrete stragegy class with new behaviors
SOLUTION
* the types module in Python
"""
import types #Import the types module
class Strategy:
"""The Stragegy Pattern class"""
def __init__(self, function=None):
self.name = "Default Strategy"
#If a reference to a function is provided, replace the execute() method with the given function
def execute(self): #This gets replaced by another version if another strategy is provided
"""The default method that prints the name of the strategy being used"""
print("{} is used".format(self.name))
#Replacement method 1
def strategy_one(self):
print("{} is used to execute method 1".format(self.name))
#Replacement method 2
def strategy_two(self):
print("{} is used to execute method 2".format(self.name))
# #Let's create our default strategy
# s0 = Strategy()
# #Let's execute our default strategy
# s0.execute()
# #Let's create the first variation of our default strategy by providing a new behavior
# s1 = Strategy(strategy_one)
# #Let's set its name
# s1.name = "Strategy One"
# #Let's execute the strategy
# s1.execute()
# s2 = Strategy(strategy_two)
# s2.name = "Strategy Two"
# s2.execute()
"""CHAIN OF RESPONSIBILITY
PROBLEM
*one request -- various type of processing
SCENARIO
* integer value
* handler -- find out range
SOLUTION
* abstract handler -- successor
* concrete handler -- checks if it can handle the request
RELATED PATTERNS
* composite
"""
class Handler: #Abstract handler
"""Abstract handler"""
def __init__(self, successor):
self._successor = successor #Define who sithe next handler
def handle(self, request):
handled = self._handle(request) #If handled, stop here
#Otherwise, keep going
if not handled:
self._successor.handle(request)
def _handle(self, request):
raise NotImplementedError("Must provide implementation in subclass!")
class ConcreteHandler(Handler): #Inherits from the abstract handler
"""Concrete handler 1"""
def _handle(self, request):
if 0 < request <= 10: #Provide a condition for handling
print("Request {} handled in handler 1".format(request))
return True #Indicates that the request has been handled
class DefaultHandler(Handler):
"""Default handler"""
def _handle(self, request):
"""If there is no handler available"""
#No condition checking since this is a default Handler
print("End of chain, no handler for {}".format(request))
return True #Indicates that the request has been handled
class Client: #Using handlers
def __init__(self):
self.handler = ConcreteHandler(DefaultHandler(None)) #Create handlers and use them in a sequence you want
#Note that the default handler has no successor
def delegate(self, requests): #Send your requests one at a time for handlers to handle
for request in requests:
self.handler.handle(request)
#Create a client
c = Client()
#Create request
requests = [2, 5, 30]
#Send the requests
c.delegate(requests)
| 28.031579
| 132
| 0.740643
|
dc47b742f36af6df4d5159eaf9f460877ea10d69
| 6,321
|
py
|
Python
|
RobotFramework_Testsuites/Keywords/CSetup.py
|
test-fullautomation/robotframework-testsuitesmanagement
|
4a39bcc7fa81d3761ee570c1bed560faf4b673ff
|
[
"Apache-2.0"
] | 3
|
2021-12-20T02:53:20.000Z
|
2022-02-16T03:04:42.000Z
|
RobotFramework_Testsuites/Keywords/CSetup.py
|
test-fullautomation/robotframework-testsuitesmanagement
|
4a39bcc7fa81d3761ee570c1bed560faf4b673ff
|
[
"Apache-2.0"
] | 5
|
2022-02-18T12:37:02.000Z
|
2022-03-23T10:14:07.000Z
|
RobotFramework_Testsuites/Keywords/CSetup.py
|
test-fullautomation/robotframework-testsuitesmanagement
|
4a39bcc7fa81d3761ee570c1bed560faf4b673ff
|
[
"Apache-2.0"
] | 4
|
2022-01-06T08:25:31.000Z
|
2022-03-08T02:23:03.000Z
|
# Copyright 2020-2022 Robert Bosch Car Multimedia GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import RobotFramework_Testsuites
from robot.api.deco import keyword
from robot.api import logger
from RobotFramework_Testsuites.Config import CConfig
from lxml import etree
from robot.libraries.BuiltIn import BuiltIn
class CSetupKeywords(object):
'''
Definition setup keywords
'''
@keyword
def testsuite_setup(self, sTestsuiteCfgFile=''):
if not RobotFramework_Testsuites.CTestsuitesCfg.oConfig.bLoadedCfg:
BuiltIn().unknown("Loading of %s" %(CConfig.sLoadedCfgError))
return
else:
if not RobotFramework_Testsuites.CTestsuitesCfg.oConfig.rConfigFiles.sLevel1:
if sTestsuiteCfgFile != '':
RobotFramework_Testsuites.CTestsuitesCfg.oConfig.rConfigFiles.sLevel2 = True
RobotFramework_Testsuites.CTestsuitesCfg.oConfig.rConfigFiles.sLevel4 = False
RobotFramework_Testsuites.CTestsuitesCfg.oConfig.sTestSuiteCfg = sTestsuiteCfgFile
try:
RobotFramework_Testsuites.CTestsuitesCfg.oConfig.loadCfg(RobotFramework_Testsuites.CTestsuitesCfg.oConfig)
except Exception as error:
BuiltIn().unknown("Loading of %s" %(CConfig.sLoadedCfgError))
else:
RobotFramework_Testsuites.CTestsuitesCfg.oConfig.rConfigFiles.sLevel3 = True
RobotFramework_Testsuites.CTestsuitesCfg.oConfig.rConfigFiles.sLevel4 = False
try:
RobotFramework_Testsuites.CTestsuitesCfg.oConfig.loadCfg(RobotFramework_Testsuites.CTestsuitesCfg.oConfig)
except Exception as error:
BuiltIn().unknown("Loading of %s" %(CConfig.sLoadedCfgError))
else:
logger.warn('The configuration level 1 is set for this Robot run! \nThe configuration \"%s\" is using as highest priority' \
%(RobotFramework_Testsuites.CTestsuitesCfg.oConfig.sTestCfgFile))
if RobotFramework_Testsuites.CTestsuitesCfg.oConfig.rConfigFiles.sLevel1:
logger.info('Running with configuration level: 1')
elif RobotFramework_Testsuites.CTestsuitesCfg.oConfig.rConfigFiles.sLevel2:
logger.info('Running with configuration level: 2')
if RobotFramework_Testsuites.CTestsuitesCfg.oConfig.bConfigLoaded:
logger.info("The parameters in \"%s\" will be added into configuration object" \
%(RobotFramework_Testsuites.CTestsuitesCfg.oConfig.sTestCfgFile))
elif RobotFramework_Testsuites.CTestsuitesCfg.oConfig.rConfigFiles.sLevel3:
logger.info('Running with configuration level: 3')
if RobotFramework_Testsuites.CTestsuitesCfg.oConfig.bConfigLoaded:
logger.info("The parameters in \"%s\" will be added into configuration object" \
%(RobotFramework_Testsuites.CTestsuitesCfg.oConfig.sTestCfgFile))
else:
logger.info('Running with configuration level: 4')
RobotFramework_Testsuites.CTestsuitesCfg.oConfig.verifyRbfwVersion()
logger.info('Suite Path: %s' %(RobotFramework_Testsuites.CTestsuitesCfg.oConfig.sTestcasePath))
logger.info('CfgFile Path: %s' %(RobotFramework_Testsuites.CTestsuitesCfg.oConfig.sTestCfgFile))
logger.info('Suite Count: %s' %(RobotFramework_Testsuites.CTestsuitesCfg.oConfig.iSuiteCount))
logger.info('Total testcases in TestSuite "%s" is: %s' %( \
RobotFramework_Testsuites.CTestsuitesCfg.oConfig.sRootSuiteName, \
RobotFramework_Testsuites.CTestsuitesCfg.oConfig.iTotalTestcases))
@keyword
def testsuite_teardown(self):
logger.info('testsuite_teardown: Will be implemented later')
@keyword
def testcase_setup(self):
logger.info('Test Count: %s' %(RobotFramework_Testsuites.CTestsuitesCfg.oConfig.iTestCount))
@keyword
def testcase_teardown(self):
logger.info('testcase_teardown: Will be implemented later')
@keyword
def update_config(self, sCfgFile):
CConfig.updateCfg(sCfgFile)
class CGeneralKeywords(object):
'''
Definition setup keywords
'''
@keyword
def get_config(self):
'''
oConfigParams: is the dictionary consist of some configuration params which
are return to user from get_config_params keyword
'''
return copy.deepcopy(RobotFramework_Testsuites.CTestsuitesCfg.oConfig.oConfigParams)
@keyword
def load_json(self, jsonfile, level=1, variant='default'):
'''
This keyword uses to load json file then return json object.
- Level = 1 -> loads the content of jsonfile.
- level != 1 -> loads the json file which is set with variant (likes loading
config level2)
'''
import os
from os.path import abspath, dirname
from JsonPreprocessor import CJsonPreprocessor
jsonFileDir = dirname(abspath(jsonfile))
oJsonPreprocessor = CJsonPreprocessor()
if level == 1:
oJsonData = oJsonPreprocessor.jsonLoad(jsonfile)
return oJsonData
else:
oJsonFristLevel = oJsonPreprocessor.jsonLoad(jsonfile)
if variant not in oJsonFristLevel:
logger.error('The variant: %s is not correct' % variant)
return {}
jsonFileLoaded = jsonFileDir + oJsonFristLevel[variant]['path'] + '/' + oJsonFristLevel[variant]['name']
oJsonData = oJsonPreprocessor.jsonLoad(jsonFileLoaded)
return oJsonData
| 46.138686
| 140
| 0.674577
|
e8cd4be42e26d99e825c8498b3ee0fa6d3ea2d2e
| 2,248
|
py
|
Python
|
QCPU_Setup/solver/solveBQM.py
|
cogrpar/qcpuWARE
|
9b8233e830f8cfacbef787781b2279e42f26fec5
|
[
"Apache-2.0"
] | 1
|
2022-02-01T14:40:05.000Z
|
2022-02-01T14:40:05.000Z
|
QCPU_Setup/solver/solveBQM.py
|
cogrpar/qcpuWARE
|
9b8233e830f8cfacbef787781b2279e42f26fec5
|
[
"Apache-2.0"
] | null | null | null |
QCPU_Setup/solver/solveBQM.py
|
cogrpar/qcpuWARE
|
9b8233e830f8cfacbef787781b2279e42f26fec5
|
[
"Apache-2.0"
] | 1
|
2022-02-01T14:40:31.000Z
|
2022-02-01T14:40:31.000Z
|
import dimod
import hybrid
import dwavebinarycsp
#function to solve for the minimum energy posibility:
def SolveExtreme(bqm):
# Define the workflow
iteration = hybrid.RacingBranches(
hybrid.InterruptableTabuSampler(),
hybrid.EnergyImpactDecomposer(size=2)
| hybrid.QPUSubproblemAutoEmbeddingSampler()
| hybrid.SplatComposer()
) | hybrid.ArgMin()
workflow = hybrid.LoopUntilNoImprovement(iteration, convergence=3)
# Solve the problem
init_state = hybrid.State.from_problem(bqm)
final_state = workflow.run(init_state).result()
# Print results
result = ("Solution: sample={.samples.first}".format(final_state))
resultSplit = result.split("{") #remove extra info
resultSplit = resultSplit[1].split("}")
result = resultSplit[0]
results = result.split(", ") #separate the vars
#now extract the numerical results:
refLen = 0
for i in range(len(results)):
if (not("*" in results[i])):
refLen += 1 #make sure that refined is the right length
refined = [0] * refLen
for i in range(len(results)):
if (not("*" in results[i])): #if this is a useful result that only contains one term
num = results[i].split(": ")
trash, position = num[0].split("v")
position = position.replace("'", "")
position = int(position)
refined[position] = (float(num[1]))
return(refined)
#function to solve BQM formatted from a CSP
def SolveCSP(bqm):
# Define the workflow
iteration = hybrid.RacingBranches(
hybrid.InterruptableTabuSampler(),
hybrid.EnergyImpactDecomposer(size=2)
| hybrid.QPUSubproblemAutoEmbeddingSampler()
| hybrid.SplatComposer()
) | hybrid.ArgMin()
workflow = hybrid.LoopUntilNoImprovement(iteration, convergence=3)
# Solve the problem
init_state = hybrid.State.from_problem(bqm)
final_state = workflow.run(init_state).result()
# Print results
result = ("Solution: sample={.samples.first}".format(final_state))
result = result.replace("Solution: sample=Sample(sample={", "")
result, trash = result.split("}")
results = result.split(", ")
#now extract the numerical results:
for i in range(len(results)):
trash, term = results[i].split(": ")
results[i] = float(term)
return(results)
| 31.661972
| 88
| 0.691726
|
e9e47f159f628e23f49547e3008480578937d6a8
| 30
|
py
|
Python
|
adv/srVAE/src/modules/priors/realnvp/model/__init__.py
|
eliphatfs/adversarial
|
62c51e22734fd6bc2f674d0a96fddd177e06c507
|
[
"Apache-2.0"
] | 60
|
2020-06-11T11:06:15.000Z
|
2022-03-31T14:35:19.000Z
|
src/modules/priors/realnvp/model/__init__.py
|
ioangatop/srVAE
|
dfee765c53f11f4653e7c6e7118a339832656867
|
[
"MIT"
] | 9
|
2020-06-28T09:45:28.000Z
|
2020-12-30T15:20:19.000Z
|
src/modules/priors/realnvp/model/__init__.py
|
ioangatop/srVAE
|
dfee765c53f11f4653e7c6e7118a339832656867
|
[
"MIT"
] | 9
|
2020-07-28T12:03:32.000Z
|
2022-03-31T14:34:08.000Z
|
from .real_nvp import RealNVP
| 15
| 29
| 0.833333
|
c790d50523ba39dbdf858f1888b21249657777a9
| 8,433
|
py
|
Python
|
tools/docs/generate_version_histories.py
|
sergiitk/envoy
|
fa99ae02afda16ecdfe993d57d57e733e6d63d06
|
[
"Apache-2.0"
] | null | null | null |
tools/docs/generate_version_histories.py
|
sergiitk/envoy
|
fa99ae02afda16ecdfe993d57d57e733e6d63d06
|
[
"Apache-2.0"
] | 10
|
2021-12-06T21:37:15.000Z
|
2021-12-23T09:46:20.000Z
|
tools/docs/generate_version_histories.py
|
sergiitk/envoy
|
fa99ae02afda16ecdfe993d57d57e733e6d63d06
|
[
"Apache-2.0"
] | 1
|
2019-07-09T21:09:57.000Z
|
2019-07-09T21:09:57.000Z
|
import pathlib
import re
import sys
import tarfile
from functools import cached_property
from frozendict import frozendict
import jinja2
from packaging import version
from aio.run import runner
from envoy.base import utils
from envoy.base.utils import IProject, Project
# TODO(phlax): Move all of this to pytooling
REFLINK_RE = re.compile(r":ref:[^>]*>`")
VERSION_HISTORY_INDEX_TPL = """
.. _version_history:
Version history
---------------
{% if dev_version %}
.. toctree::
:titlesonly:
:maxdepth: 2
:caption: Current development version
v{{ dev_version.major }}.{{ dev_version.minor }}/v{{ dev_version }}
{% endif %}
Stable versions
===============
{{ stable_message }}
.. toctree::
:titlesonly:
:maxdepth: 2
:caption: Changelog
{% for version in stable_versions %}
v{{ version.base_version }}: {{ changelogs[minor_versions[version][0]].version }} ({{ changelogs[minor_versions[version][0]].release_date }}) <v{{ version.base_version }}/v{{ version.base_version }}>
{%- endfor %}
Archived versions
=================
{{ archived_message }}
.. toctree::
:titlesonly:
:maxdepth: 1
{% for version in archived_versions %}
v{{ version.base_version }}: {{ changelogs[minor_versions[version][0]].version }} ({{ changelogs[minor_versions[version][0]].release_date }}) <v{{ version.base_version }}/v{{ version.base_version }}>
{%- endfor %}
.. _deprecated:
Deprecation Policy
==================
As of release 1.3.0, Envoy will follow a
`Breaking Change Policy <https://github.com/envoyproxy/envoy/blob/main//CONTRIBUTING.md#breaking-change-policy>`_.
Features in the deprecated list for each version have been DEPRECATED
and will be removed in the specified release cycle. A logged warning
is expected for each deprecated item that is in deprecation window.
"""
VERSION_HISTORY_MINOR_INDEX_TPL = """
.. _version_history_{{ minor_version }}:
{{ minor_version }}
{{ "-" * minor_version|length }}
Latest release:
`{{ current_release }} <https://github.com/envoyproxy/envoy/releases/tag/v{{ current_release }}>`_ ({{ release_date }})
{% if current_release != original_release.version %}
Initial release date:
{{ original_release.release_date }}
{% endif %}
.. toctree::
:titlesonly:
:maxdepth: 2
:caption: Changelog
{% for version in patch_versions %}
v{{ version.base_version }}
{%- endfor %}
"""
VERSION_HISTORY_TPL = """
.. _version_history_{{ changelog.base_version }}:
{{ changelog.base_version }} ({{ changelog.release_date }})
{{ "=" * (changelog.base_version|length + changelog.release_date|length + 4) }}
{% for name, section in sections.items() %}
{% if changelog.data[name] %}
{{ section.title }}
{{ "-" * section.title|length }}
{% if section.description %}
{{ section.description | versionize(mapped_version) }}
{% endif %}
{% for item in changelog.entries(name) -%}
* **{{ item.area }}**: {{ item.change | versionize(mapped_version) | indent(width=2, first=false) }}
{%- endfor %}
{% endif %}
{%- endfor %}
"""
def versionize_filter(text, mapped_version):
"""Replace refinks with versioned reflinks."""
if not mapped_version:
return text
version_prefix = f"v{mapped_version.base_version}:"
matches = set(REFLINK_RE.findall(text))
replacements = []
for matched in matches:
i = matched.find("<") + 1
remaining = matched[i:]
if ":" in remaining:
continue
replacements.append((matched, f"{matched[:i]}{version_prefix}{matched[i:]}"))
for sub, replacement in replacements:
text = text.replace(sub, replacement)
return text
class VersionHistories(runner.Runner):
@cached_property
def jinja_env(self) -> jinja2.Environment:
env = jinja2.Environment()
env.filters["versionize"] = versionize_filter
return env
@cached_property
def project(self) -> IProject:
return Project()
@cached_property
def sections(self) -> frozendict:
return self.project.changelogs.sections
@cached_property
def tpath(self) -> pathlib.Path:
return pathlib.Path(self.tempdir.name)
@cached_property
def version_history_index_tpl(self):
return self.jinja_env.from_string(VERSION_HISTORY_INDEX_TPL)
@cached_property
def version_history_minor_index_tpl(self):
return self.jinja_env.from_string(VERSION_HISTORY_MINOR_INDEX_TPL)
@cached_property
def version_history_tpl(self):
return self.jinja_env.from_string(VERSION_HISTORY_TPL)
def add_arguments(self, parser) -> None:
super().add_arguments(parser)
parser.add_argument("output_file")
def minor_index_path(self, minor_version) -> pathlib.Path:
return self.tpath.joinpath(f"v{minor_version.base_version}").joinpath(
f"v{minor_version.base_version}.rst")
@runner.cleansup
async def run(self) -> None:
self.write_version_history_index()
self.write_version_histories()
self.write_version_history_minor_indeces()
self.write_tarball()
def write_tarball(self) -> None:
with tarfile.open(self.args.output_file, "w") as tarball:
tarball.add(self.tpath, arcname="./")
def write_version_histories(self) -> None:
for changelog_version in self.project.changelogs:
self.write_version_history(changelog_version)
def write_version_history(self, changelog_version: version.Version) -> None:
minor_version = utils.minor_version_for(changelog_version)
root_path = self.tpath.joinpath(f"v{minor_version.base_version}")
root_path.mkdir(parents=True, exist_ok=True)
map_version = (
minor_version < self.project.minor_version
and (minor_version in self.project.inventories.versions))
version_history = self.version_history_tpl.render(
mapped_version=(minor_version if map_version else None),
sections=self.sections,
changelog=self.project.changelogs[changelog_version])
version_path = root_path.joinpath(f"v{changelog_version}.rst")
version_path.write_text(f"{version_history.strip()}\n\n")
def write_version_history_index(self) -> None:
stable_message = (
"Versions that are currently supported." if self.project.is_main_dev else
"Versions that were supported when this branch was initially released.")
archived_message = (
"Versions that are no longer supported." if self.project.is_main_dev else
"Versions that were no longer supported when this branch was initially released.")
version_history_rst = self.version_history_index_tpl.render(
archived_message=archived_message,
stable_message=stable_message,
dev_version=self.project.dev_version,
changelogs=self.project.changelogs,
minor_versions=self.project.minor_versions,
stable_versions=self.project.stable_versions,
archived_versions=self.project.archived_versions)
self.tpath.joinpath("version_history.rst").write_text(f"{version_history_rst.strip()}\n\n")
def write_version_history_minor_indeces(self) -> None:
for i, (minor_version, patches) in enumerate(self.project.minor_versions.items()):
if self.project.is_main_dev and i == 0:
continue
self.write_version_history_minor_index(minor_version, patches)
def write_version_history_minor_index(
self, minor_version: version.Version, patch_versions) -> None:
skip_first = (self.project.is_dev and self.project.is_current(patch_versions[0]))
if skip_first:
patch_versions = patch_versions[1:]
current_release = patch_versions[0]
original_release = self.project.changelogs[patch_versions[-1]]
version_history_minor_index = self.version_history_minor_index_tpl.render(
minor_version=f"v{minor_version.base_version}",
current_release=current_release.base_version,
original_release=original_release,
release_date=self.project.changelogs[current_release].release_date,
patch_versions=patch_versions)
self.minor_index_path(minor_version).write_text(
f"{version_history_minor_index.strip()}\n\n")
def main(*args):
return VersionHistories(*args)()
if __name__ == "__main__":
sys.exit(main(*sys.argv[1:]))
| 33.070588
| 201
| 0.688367
|
bcd1f908f41d5b748d8ffb098b56233d577df8c6
| 25
|
py
|
Python
|
mdx_wavedrom/__init__.py
|
chiggs/mdx_wavedrom
|
c8f27da68f873fe15bc6d6616e3e454507a6b2a0
|
[
"MIT"
] | 3
|
2015-07-29T04:23:39.000Z
|
2021-02-06T19:33:39.000Z
|
mdx_qrcode/__init__.py
|
airtonix/python-markdown-qrcode
|
c61efee77c9d5b5dc8179a89cbe4d870388fe02b
|
[
"MIT"
] | null | null | null |
mdx_qrcode/__init__.py
|
airtonix/python-markdown-qrcode
|
c61efee77c9d5b5dc8179a89cbe4d870388fe02b
|
[
"MIT"
] | 2
|
2018-05-26T14:46:40.000Z
|
2020-09-25T16:06:59.000Z
|
from extension import *
| 8.333333
| 23
| 0.76
|
74c8b67ccfa9d961c5afd5546a2308350c421704
| 2,163
|
py
|
Python
|
votebot/models.py
|
takwas/pyung-slack-votebot
|
a8bb4048b52e5b303dd31af302b95b98d2ef2317
|
[
"MIT"
] | null | null | null |
votebot/models.py
|
takwas/pyung-slack-votebot
|
a8bb4048b52e5b303dd31af302b95b98d2ef2317
|
[
"MIT"
] | null | null | null |
votebot/models.py
|
takwas/pyung-slack-votebot
|
a8bb4048b52e5b303dd31af302b95b98d2ef2317
|
[
"MIT"
] | null | null | null |
from sqlalchemy import Column, ForeignKey, Integer, String, Boolean, create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, sessionmaker
Base = declarative_base()
session = None
class Profile(Base):
__tablename__ = 'profiles'
id = Column(Integer, primary_key=True)
userid = Column(String(12), nullable=False, unique=True)
username = Column(String(250), nullable=False, unique=True)
title = Column(String, nullable=True, default='')
class Office(Base):
__tablename__ = 'offices'
id = Column(Integer, primary_key=True)
name = Column(String, nullable=False, unique=True)
channel_name = Column(String, nullable=True, default='')
channel = Column(String(12), nullable=False, unique=True)
topic = Column(String(12), nullable=True, default='')
purpose = Column(String, nullable=True, default='')
log_ts = Column(String, nullable=True, default='')
live_ts = Column(String, nullable=True, default='')
election_status_ts = Column(String, nullable=True, default='')
election_status = Column(Boolean, nullable=True, default=False)
class Candidacy(Base):
__tablename__ = 'candidacies'
id = Column(Integer, primary_key=True)
office_id = Column(Integer, ForeignKey('offices.id'), nullable=False)
office = relationship('Office', backref='candidacies')
candidate_id = Column(Integer, ForeignKey('profiles.id'), nullable=False)
candidate = relationship('Profile', backref='candidacies')
post_ts = Column(String, nullable=True, default='')
class Vote(Base):
__tablename__ = 'votes'
id = Column(Integer, primary_key=True)
voter_id = Column(Integer, ForeignKey('profiles.id'), nullable=False)
candidacy_id = Column(Integer, ForeignKey('candidacies.id'), nullable=False)
voter = relationship('Profile', backref='votes_cast')
candidacy = relationship('Candidacy', backref='votes')
def initdb(db_url):
global session
engine = create_engine(db_url)
Base.metadata.create_all(engine)
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
| 35.459016
| 82
| 0.71706
|
61f36c1ea83c34f2f09ce36cd4c560da29ec82c7
| 3,431
|
py
|
Python
|
auto_pose/eval/latex_report.py
|
HsiaoTsan/AugmentedAutoencoder
|
c60a96c4c29cf867a797d98bce34ee4dea5f2149
|
[
"MIT"
] | 1
|
2020-12-06T03:43:53.000Z
|
2020-12-06T03:43:53.000Z
|
auto_pose/eval/latex_report.py
|
HsiaoTsan/AugmentedAutoencoder
|
c60a96c4c29cf867a797d98bce34ee4dea5f2149
|
[
"MIT"
] | null | null | null |
auto_pose/eval/latex_report.py
|
HsiaoTsan/AugmentedAutoencoder
|
c60a96c4c29cf867a797d98bce34ee4dea5f2149
|
[
"MIT"
] | null | null | null |
import os
import glob
import time
prolog =\
r'''
\documentclass[a4paper,table]{article}
\usepackage{graphicx}
\usepackage{float}
\usepackage{tikz}
\usepackage{caption}
\usepackage{subcaption}
\usepackage{fancyhdr}
\usepackage[a4paper]{geometry}
\usepackage{hyperref}
\usepackage{pdflscape}
\usepackage{nameref}
\usepackage{xcolor}
\usepackage{adjustbox}
\usepackage{gensymb}
\usepackage[parfill]{parskip}
\usepackage[utf8]{inputenc}
\usepackage{pgfplots}
\usepackage{pifont}
\hypersetup{
colorlinks=true,
linktoc=all,
linkcolor=blue,
}
\pagestyle{fancy}
\newcommand*\rot{\rotatebox{90}}
\newcommand*\OK{\ding{51}}
\begin{document}
\begin{center}
{\Huge Experimental Protocol}\\
\textbf{%s}\\
\end{center}
''' % time.ctime()
epilog=\
r'''
\end{document}
'''
class Report(object):
def __init__(self, eval_dir, log_dir):
self.latex = []
self.eval_dir = eval_dir
self.log_dir = log_dir
def write_configuration(self, train_cfg_file_path, eval_cfg_file_path):
names = self.eval_dir.replace('_','\_').split('/')
self.latex.append(
r'''
\begin{table}[H]
\centering
\begin{adjustbox}{max width=\textwidth}
\begin{tabular}{c|c}
\textbf{experiment group:}& %s\\
\textbf{experiment name} & %s \\
\textbf{evaluation name} & %s \\
\textbf{test dataset} & %s \\
\end{tabular}
\end{adjustbox}
\end{table}
''' % (names[-5],names[-4],names[-2],names[-1])
)
workspace_path = os.environ.get('AE_WORKSPACE_PATH')
with open(train_cfg_file_path,'r') as f:
with open(eval_cfg_file_path,'r') as g:
train = f.read().replace('_','\_').replace('#','%')
evalu = g.read().replace('_','\_').replace('#','%')
self.latex.append(
r'''
\section{\Large Train Config}
%s
\section{\Large Evaluation Config}
%s
''' % (train, evalu))
def merge_all_tex_files(self):
tex_files = glob.glob(os.path.join(self.eval_dir,'latex','*.tex'))
for file in tex_files:
if 'report' not in file:
with open(file,'r') as f:
self.latex.append('\\begin{center}\n')
self.latex.append('\input{%s}' % file)
self.latex.append('\\end{center}\n')
def include_all_figures(self):
pdf_files = glob.glob(os.path.join(self.eval_dir,'figures','*.pdf'))
png_files_eval = glob.glob(os.path.join(self.eval_dir,'figures','*.png'))
png_files = glob.glob(os.path.join(self.log_dir,'train_figures','*29999.png'))
for file in pdf_files+png_files+png_files_eval:
self.latex.append(
r'''
\begin{figure}
\centering
\includegraphics[width=1.\textwidth,height=0.45\textheight,keepaspectratio]{%s}
\end{figure}
''' % file)
def save(self, pdf=False, filename = 'report.tex',open_pdf=True):
data = ''.join(self.latex)
full_filename = os.path.join(self.eval_dir,'latex','report.tex')
with open(full_filename, 'w+') as f:
f.write(prolog)
f.write(data)
f.write(epilog)
if pdf:
from subprocess import check_output, Popen
check_output(['pdflatex', filename], cwd=os.path.dirname(full_filename))
if open_pdf:
Popen(['okular', filename.split('.')[0] + '.pdf'], cwd=os.path.dirname(full_filename))
| 26.392308
| 102
| 0.611192
|
1638ce1a77801eeef8f7931e7b266157a3dcbc70
| 911
|
py
|
Python
|
examples/npred_demo.py
|
joleroi/gammapy
|
c4e0c4bd74c79d30e0837559d18b7a1a269f70d9
|
[
"BSD-3-Clause"
] | null | null | null |
examples/npred_demo.py
|
joleroi/gammapy
|
c4e0c4bd74c79d30e0837559d18b7a1a269f70d9
|
[
"BSD-3-Clause"
] | null | null | null |
examples/npred_demo.py
|
joleroi/gammapy
|
c4e0c4bd74c79d30e0837559d18b7a1a269f70d9
|
[
"BSD-3-Clause"
] | null | null | null |
"""Test npred model image computation.
"""
from astropy.units import Quantity
from astropy.coordinates import Angle
from gammapy.datasets import FermiGalacticCenter
from gammapy.irf import EnergyDependentTablePSF
from gammapy.data import (SpectralCube,
compute_npred_cube,
convolve_cube)
filenames = FermiGalacticCenter.filenames()
spectral_cube = SpectralCube.read(filenames['diffuse_model'])
exposure_cube = SpectralCube.read(filenames['exposure_cube'])
psf = EnergyDependentTablePSF.read(filenames['psf'])
spectral_cube = spectral_cube.reproject_to(exposure_cube)
energy_bounds = Quantity([10, 30, 100, 500], 'GeV')
npred_cube = compute_npred_cube(spectral_cube,
exposure_cube,
energy_bounds)
offset_max = Angle(1, 'deg')
npred_cube_convolved = convolve_cube(npred_cube, psf, offset_max)
| 36.44
| 65
| 0.715697
|
3cb98b826371f4dfda09a39ed9c09c8f6ab7451b
| 847
|
py
|
Python
|
LaureatsBackEnd-master/laureats/migrations/0011_auto_20200111_1525.py
|
SanaaCHAOU/laureat_management_ENSAT
|
d769714f9f8cb9ebf90e02577547ec348c011461
|
[
"MIT"
] | null | null | null |
LaureatsBackEnd-master/laureats/migrations/0011_auto_20200111_1525.py
|
SanaaCHAOU/laureat_management_ENSAT
|
d769714f9f8cb9ebf90e02577547ec348c011461
|
[
"MIT"
] | null | null | null |
LaureatsBackEnd-master/laureats/migrations/0011_auto_20200111_1525.py
|
SanaaCHAOU/laureat_management_ENSAT
|
d769714f9f8cb9ebf90e02577547ec348c011461
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.2 on 2020-01-11 14:25
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('laureats', '0010_auto_20200111_1458'),
]
operations = [
migrations.CreateModel(
name='Profession',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('libelle', models.CharField(default='', max_length=255)),
],
options={
'ordering': ['libelle'],
},
),
migrations.AlterField(
model_name='employe',
name='profession',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='employes', to='laureats.Profession'),
),
]
| 28.233333
| 132
| 0.570248
|
8bbf62fc2d1cdafa15fb2ddfbf38a9b559d67667
| 1,137
|
py
|
Python
|
study_management/migrations/0008_participantauthtoken.py
|
jdkizer9/ls2_app
|
8b4c37b44a673d1919a0e52b72f529b7e1abd2e3
|
[
"Apache-2.0"
] | null | null | null |
study_management/migrations/0008_participantauthtoken.py
|
jdkizer9/ls2_app
|
8b4c37b44a673d1919a0e52b72f529b7e1abd2e3
|
[
"Apache-2.0"
] | 7
|
2020-02-05T04:57:01.000Z
|
2022-02-10T06:51:23.000Z
|
study_management/migrations/0008_participantauthtoken.py
|
jdkizer9/ls2_app
|
8b4c37b44a673d1919a0e52b72f529b7e1abd2e3
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.1 on 2018-10-01 01:35
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('study_management', '0007_auto_20180929_2340'),
]
operations = [
migrations.CreateModel(
name='ParticipantAuthToken',
fields=[
('key', models.CharField(max_length=64, primary_key=True, serialize=False, verbose_name='Key')),
('last_used', models.DateTimeField(auto_now_add=True, verbose_name='Last Used')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='participant_auth_token', to=settings.AUTH_USER_MODEL, verbose_name='User')),
],
options={
'verbose_name': 'Participant Auth Token',
'verbose_name_plural': 'Participant Auth Token',
},
),
]
| 37.9
| 178
| 0.64292
|
f6d41fb4ecb590149cc41c82fcc6f586d7057055
| 4,721
|
py
|
Python
|
lib/python2.7/site-packages/pyami/msc.py
|
leschzinerlab/myami-3.2-freeHand
|
974b8a48245222de0d9cfb0f433533487ecce60d
|
[
"MIT"
] | null | null | null |
lib/python2.7/site-packages/pyami/msc.py
|
leschzinerlab/myami-3.2-freeHand
|
974b8a48245222de0d9cfb0f433533487ecce60d
|
[
"MIT"
] | null | null | null |
lib/python2.7/site-packages/pyami/msc.py
|
leschzinerlab/myami-3.2-freeHand
|
974b8a48245222de0d9cfb0f433533487ecce60d
|
[
"MIT"
] | 1
|
2019-09-05T20:58:37.000Z
|
2019-09-05T20:58:37.000Z
|
#!/usr/bin/env python
import numpy
import quietscipy
from scipy import fftpack
import scipy.ndimage
import correlator
import numextension
import mrc
import imagefun
import peakfinder
debug = True
def pad(im, shape):
im2 = numpy.zeros(shape, im.dtype)
r0 = shape[0]/2 - im.shape[0]/2
c0 = shape[1]/2 - im.shape[1]/2
im2[r0:r0+im.shape[0], c0:c0+im.shape[1]] = im
return im2
def mrc_write(im, filename):
if debug:
mrc.write(im, filename)
def findBestTransformValue(image, reference, start, end, increment, transfunc, *args, **kwargs):
bestvalue = None
bestshift = None
bestsnr = 6.0
bestnewimage = None
for i, value in enumerate(numpy.arange(start, end, increment)):
newimage = transfunc(image, value, *args, **kwargs)
newimage = pad(newimage, reference.shape)
cor = correlator.phase_correlate(reference, newimage, zero=False)
mrc_write(cor, 'cor-%s-%03d.mrc' % (transfunc.__name__, i))
results = peakfinder.findSubpixelPeak(cor, lpf=1.2)
shift = correlator.wrap_coord(results['subpixel peak'], cor.shape)
snr = results['snr']
if debug:
print i, value, snr, shift
if snr > bestsnr:
bestsnr = snr
bestvalue = value
bestshift = shift
bestnewimage = newimage
return bestvalue, shift, cor, bestnewimage
def findBestRotation(image, reference, start, end, increment):
return findBestTransformValue(image, reference, start, end, increment, scipy.ndimage.rotate, reshape=False)
def findBestScale(image, reference, start, end, increment):
return findBestTransformValue(image, reference, start, end, increment, scipy.ndimage.zoom)
def findShift(image, reference, scale, angle):
image = scipy.ndimage.zoom(image, scale)
image = scipy.ndimage.rotate(image, angle)
image = pad(image, reference.shape)
cor = correlator.phase_correlate(reference, image, zero=False)
results = peakfinder.findSubpixelPeak(cor, lpf=1.2)
shift = correlator.wrap_coord(results['subpixel peak'], cor.shape)
mrc_write(cor, 'finalcor.mrc')
mrc_write(image, 'finalimage.mrc')
mrc_write(reference, 'reference.mrc')
return shift
def findRotationScale(image, reference, anglestart, angleend, angleinc, scalestart, scaleend, scaleinc):
## scale image to initial guess
scaleguess = (float(scalestart) + scaleend) / 2
print 'SCALEGUESS', scaleguess
image2 = scipy.ndimage.zoom(image, scaleguess)
result = findBestRotation(image2, reference, anglestart, angleend, angleinc)
angle = result[0]
print 'BEST ANGLE', angle
image2 = scipy.ndimage.rotate(image, angle)
result = findBestScale(image2, reference, scalestart, scaleend, scaleinc)
scale = result[0]
print 'BEST SCALE', scale
return angle, scale
def findRotationScaleShift(image, reference, anglestart, angleend, angleinc, scalestart, scaleend, scaleinc, prebin):
im = imagefun.bin(image, prebin)
ref = imagefun.bin(reference, prebin)
angle, scale = findRotationScale(im, ref, anglestart, angleend, angleinc, scalestart, scaleend, scaleinc)
if None in (angle, scale):
return None
shift = findShift(image, reference, scale, angle)
if shift is None:
return None
return angle, scale, shift
def main():
import mrc
im1 = mrc.read('/ami/data00/leginon/07sep26cal/rawdata/07sep26cal_00006m.mrc')
mag1 = 14500
im2 = mrc.read('/ami/data00/leginon/07sep26cal/rawdata/07sep26cal_00007m.mrc')
mag2 = 11500
'''
im1 = mrc.read('/ami/data00/leginon/07sep26cal/rawdata/07sep26cal_00002m.mrc')
mag1 = 50000
im2 = mrc.read('/ami/data00/leginon/07sep26cal/rawdata/07sep26cal_00003m.mrc')
mag2 = 29000
'''
'''
im1 = mrc.read('/ami/data00/leginon/07sep25jim/rawdata/07sep25jim_00005hl3.mrc')
mag1 = 7800
im2 = mrc.read('/ami/data00/leginon/07sep25jim/rawdata/07sep25jim_00005hl2.mrc')
mag2 = 6500
'''
'''
im1 = mrc.read('/ami/data00/leginon/07sep25jim/rawdata/07sep25jim_00004fa.mrc')
mag1 = 50000
im2 = mrc.read('/ami/data00/leginon/07sep25jim/rawdata/07sep25jim_00004fa2.mrc')
mag2 = 29000
'''
'''
im1 = mrc.read('../images/07apr03a/07apr03a_00003gr_00005sq_v01_00006hl.mrc')
mag1 = 5000
im2 = mrc.read('../images/07apr03a/07apr03a_00003gr_00005sq_v01.mrc')
mag2 = 800
'''
#im1 = mrc.read('../images/07sep17jim4/07sep17jim4_00001a.mrc')
#im2 = mrc.read('../images/07sep17jim4/07sep17jim4_00003a.mrc')
#im1 = mrc.read('im00.mrc')
#im2 = mrc.read('im01.mrc')
scale = float(mag2)/mag1
scalestart = scale - 0.02
scaleend = scale + 0.02
scaleinc = 0.005
anglestart = -3
angleend = 3
angleinc = 0.25
bin = 4
binim1 = imagefun.bin(im1, bin)
binim2 = imagefun.bin(im2, bin)
rotation, scale = findRotationScale(binim1, binim2, anglestart, angleend, angleinc, scalestart, scaleend, scaleinc)
shift = findShift(im1, im2, scale, rotation)
print 'BEST SHIFT', shift
if __name__ == '__main__':
main()
| 31.898649
| 117
| 0.73692
|
f0b49ff9b29f0c1c3691e69038d26096e236261c
| 3,568
|
py
|
Python
|
mongoadmin/contenttypes/views.py
|
carlware/django-mongoadmin
|
3ef5d86ff2658ee1833cb431ecb43dfb98afb8b2
|
[
"BSD-3-Clause"
] | 50
|
2015-01-12T15:46:53.000Z
|
2021-11-19T15:24:24.000Z
|
mongoadmin/contenttypes/views.py
|
carlware/django-mongoadmin
|
3ef5d86ff2658ee1833cb431ecb43dfb98afb8b2
|
[
"BSD-3-Clause"
] | 8
|
2015-01-13T23:35:09.000Z
|
2021-01-05T03:01:19.000Z
|
mongoadmin/contenttypes/views.py
|
carlware/django-mongoadmin
|
3ef5d86ff2658ee1833cb431ecb43dfb98afb8b2
|
[
"BSD-3-Clause"
] | 26
|
2015-03-31T07:03:02.000Z
|
2019-06-18T03:21:31.000Z
|
from __future__ import unicode_literals
from django import http
from django.contrib.sites.models import Site, get_current_site
from django.utils.translation import ugettext as _
from mongoadmin.contenttypes.models import ContentType
def shortcut(request, content_type_id, object_id):
"""
Redirect to an object's page based on a content-type ID and an object ID.
"""
# Look up the object, making sure it's got a get_absolute_url() function.
try:
content_type = ContentType.objects.get(pk=content_type_id)
except (ContentType.DoesNotExist, ValueError):
raise http.Http404(_("Content type %(ct_id)s object %(obj_id)s doesn't exist") %
{'ct_id': content_type_id, 'obj_id': object_id})
if not content_type.model_class():
raise http.Http404(_("Content type %(ct_id)s object has no associated model") %
{'ct_id': content_type_id})
try:
obj = content_type.get_object_for_this_type(pk=object_id)
except (content_type.model_class().DoesNotExist, ValueError):
raise http.Http404(_("Content type %(ct_id)s object %(obj_id)s doesn't exist") %
{'ct_id': content_type_id, 'obj_id': object_id})
try:
get_absolute_url = obj.get_absolute_url
except AttributeError:
raise http.Http404(_("%(ct_name)s objects don't have a get_absolute_url() method") %
{'ct_name': content_type.name})
absurl = get_absolute_url()
# Try to figure out the object's domain, so we can do a cross-site redirect
# if necessary.
# If the object actually defines a domain, we're done.
if absurl.startswith('http://') or absurl.startswith('https://'):
return http.HttpResponseRedirect(absurl)
# Otherwise, we need to introspect the object's relationships for a
# relation to the Site object
object_domain = None
if Site._meta.installed:
opts = obj._meta
# First, look for an many-to-many relationship to Site.
for field in opts.many_to_many:
if field.rel.to is Site:
try:
# Caveat: In the case of multiple related Sites, this just
# selects the *first* one, which is arbitrary.
object_domain = getattr(obj, field.name).all()[0].domain
except IndexError:
pass
if object_domain is not None:
break
# Next, look for a many-to-one relationship to Site.
if object_domain is None:
for field in obj._meta.fields:
if field.rel and field.rel.to is Site:
try:
object_domain = getattr(obj, field.name).domain
except Site.DoesNotExist:
pass
if object_domain is not None:
break
# Fall back to the current site (if possible).
if object_domain is None:
try:
object_domain = get_current_site(request).domain
except Site.DoesNotExist:
pass
# If all that malarkey found an object domain, use it. Otherwise, fall back
# to whatever get_absolute_url() returned.
if object_domain is not None:
protocol = 'https' if request.is_secure() else 'http'
return http.HttpResponseRedirect('%s://%s%s'
% (protocol, object_domain, absurl))
else:
return http.HttpResponseRedirect(absurl)
| 41.011494
| 92
| 0.610426
|
273c93fcf9f421296aab5ca3269eedf24dd190dd
| 31,273
|
py
|
Python
|
examples/ezusb.py
|
karpierz/libusb
|
d9cbd2dd7b21f88b5533b7a750f85008402f305a
|
[
"Zlib"
] | 25
|
2017-09-04T04:02:31.000Z
|
2022-03-04T20:28:56.000Z
|
examples/ezusb.py
|
karpierz/libusb
|
d9cbd2dd7b21f88b5533b7a750f85008402f305a
|
[
"Zlib"
] | 7
|
2019-05-21T14:23:02.000Z
|
2022-03-21T20:50:17.000Z
|
examples/ezusb.py
|
karpierz/libusb
|
d9cbd2dd7b21f88b5533b7a750f85008402f305a
|
[
"Zlib"
] | 13
|
2019-06-26T01:05:59.000Z
|
2022-03-21T07:27:11.000Z
|
# Copyright (c) 2016-2021 Adam Karpierz
# Licensed under the zlib/libpng License
# https://opensource.org/licenses/Zlib
# Copyright © 2001 Stephen Williams (steve@icarus.com)
# Copyright © 2001-2002 David Brownell (dbrownell@users.sourceforge.net)
# Copyright © 2008 Roger Williams (rawqux@users.sourceforge.net)
# Copyright © 2012 Pete Batard (pete@akeo.ie)
# Copyright © 2013 Federico Manzan (f.manzan@gmail.com)
#
# This source code is free software; you can redistribute it
# and/or modify it in source code form under the terms of the GNU
# General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
import sys
import errno
import ctypes as ct
import libusb as usb
FX_TYPE_UNDEFINED = -1
FX_TYPE_AN21 = 0 # Original AnchorChips parts
FX_TYPE_FX1 = 1 # Updated Cypress versions
FX_TYPE_FX2 = 2 # USB 2.0 versions
FX_TYPE_FX2LP = 3 # Updated FX2
FX_TYPE_FX3 = 4 # USB 3.0 versions
FX_TYPE_MAX = 5
FX_TYPE_NAMES = ("an21", "fx", "fx2", "fx2lp", "fx3")
IMG_TYPE_UNDEFINED = -1
IMG_TYPE_HEX = 0 # Intel HEX
IMG_TYPE_IIC = 1 # Cypress 8051 IIC
IMG_TYPE_BIX = 2 # Cypress 8051 BIX
IMG_TYPE_IMG = 3 # Cypress IMG format
IMG_TYPE_MAX = 4
IMG_TYPE_NAMES = ("Intel HEX", "Cypress 8051 IIC", "Cypress 8051 BIX", "Cypress IMG format")
# Automatically identified devices (VID, PID, type, designation).
# TODO: Could use some validation. Also where's the FX2?
#
class fx_known_device(ct.Structure):
_fields_ = [
("vid", ct.c_uint16),
("pid", ct.c_uint16),
("type", ct.c_int),
("designation", ct.c_char_p), # const char*
]
FX_KNOWN_DEVICES = (
fx_known_device(0x0547, 0x2122, FX_TYPE_AN21, b"Cypress EZ-USB (2122S)"),
fx_known_device(0x0547, 0x2125, FX_TYPE_AN21, b"Cypress EZ-USB (2121S/2125S)"),
fx_known_device(0x0547, 0x2126, FX_TYPE_AN21, b"Cypress EZ-USB (2126S)"),
fx_known_device(0x0547, 0x2131, FX_TYPE_AN21, b"Cypress EZ-USB (2131Q/2131S/2135S)"),
fx_known_device(0x0547, 0x2136, FX_TYPE_AN21, b"Cypress EZ-USB (2136S)"),
fx_known_device(0x0547, 0x2225, FX_TYPE_AN21, b"Cypress EZ-USB (2225)"),
fx_known_device(0x0547, 0x2226, FX_TYPE_AN21, b"Cypress EZ-USB (2226)"),
fx_known_device(0x0547, 0x2235, FX_TYPE_AN21, b"Cypress EZ-USB (2235)"),
fx_known_device(0x0547, 0x2236, FX_TYPE_AN21, b"Cypress EZ-USB (2236)"),
fx_known_device(0x04b4, 0x6473, FX_TYPE_FX1, b"Cypress EZ-USB FX1"),
fx_known_device(0x04b4, 0x8613, FX_TYPE_FX2LP, b"Cypress EZ-USB FX2LP (68013A/68014A/68015A/68016A)"),
fx_known_device(0x04b4, 0x00f3, FX_TYPE_FX3, b"Cypress FX3"),
)
# This file contains functions for uploading firmware into Cypress
# EZ-USB microcontrollers. These chips use control endpoint 0 and vendor
# specific commands to support writing into the on-chip SRAM. They also
# support writing into the CPUCS register, which is how we reset the
# processor after loading firmware (including the reset vector).
#
# These Cypress devices are 8-bit 8051 based microcontrollers with
# special support for USB I/O. They come in several packages, and
# some can be set up with external memory when device costs allow.
# Note that the design was originally by AnchorChips, so you may find
# references to that vendor (which was later merged into Cypress).
# The Cypress FX parts are largely compatible with the Anchorhip ones.
# Verbosity level (default 1).
# Can be increased or decreased with options v/q
verbose = 1 # int
# return True if [addr,addr+size] includes external RAM
# for Anchorchips EZ-USB or Cypress EZ-USB FX
#static
#@annotate(addr=ct.c_uint32, size=ct.c_size_t)
def fx_is_external(addr, size) -> bool:
# with 8KB RAM, 0x0000-0x1b3f can be written
# we can't tell if it's a 4KB device here
if addr <= 0x1b3f:
return (addr + size) > 0x1b40
# there may be more RAM; unclear if we can write it.
# some bulk buffers may be unused, 0x1b3f-0x1f3f
# firmware can set ISODISAB for 2KB at 0x2000-0x27ff
return True
# return True if [addr,addr+size] includes external RAM
# for Cypress EZ-USB FX2
#static
#@annotate(addr=ct.c_uint32, size=ct.c_size_t)
def fx2_is_external(addr, size) -> bool:
# 1st 8KB for data/code, 0x0000-0x1fff
if addr <= 0x1fff:
return (addr + size) > 0x2000
# and 512 for data, 0xe000-0xe1ff
elif addr >= 0xe000 and addr <= 0xe1ff:
return (addr + size) > 0xe200
# otherwise, it's certainly external
else:
return True
# return True if [addr,addr+size] includes external RAM
# for Cypress EZ-USB FX2LP
#static
#@annotate(addr=ct.c_uint32, size=ct.c_size_t)
def fx2lp_is_external(addr, size) -> bool:
# 1st 16KB for data/code, 0x0000-0x3fff
if addr <= 0x3fff:
return (addr + size) > 0x4000
# and 512 for data, 0xe000-0xe1ff
elif addr >= 0xe000 and addr <= 0xe1ff:
return (addr + size) > 0xe200
# otherwise, it's certainly external
else:
return True
#*****************************************************************************#
# These are the requests (bRequest) that the bootstrap loader is expected
# to recognize. The codes are reserved by Cypress, and these values match
# what EZ-USB hardware, or "Vend_Ax" firmware (2nd stage loader) uses.
# Cypress' "a3load" is nice because it supports both FX and FX2, although
# it doesn't have the EEPROM support (subset of "Vend_Ax").
RW_INTERNAL = 0xA0 # hardware implements this one
RW_MEMORY = 0xA3
# Issues the specified vendor-specific write request.
#static
#@annotate(device=ct.POINTER(usb.device_handle), label=const char*)
# ct.c_uint8 opcode, ct.c_uint32 addr,
# const ct.POINTER(ct.c_ubyte) data, ct.c_size_t size)
def ezusb_write(device, label, opcode, addr, data, size) -> int:
from fxload import logerror
global verbose
if verbose > 1:
logerror("{}, addr {:#010x} len %4u ({:#06x})\n",
label, addr, size, size)
status = usb.control_transfer(device,
usb.LIBUSB_ENDPOINT_OUT |
usb.LIBUSB_REQUEST_TYPE_VENDOR |
usb.LIBUSB_RECIPIENT_DEVICE,
opcode,
addr & 0xFFFF, addr >> 16,
ct.cast(data, ct.POINTER(ct.c_ubyte)),
ct.c_uint16(size),
1000)
if status != ct.c_int(size).value:
if status < 0:
logerror("{}: {}\n", label, usb.error_name(status))
else:
logerror("{} ==> {}\n", label, status)
return 0 if status >= 0 else -errno.EIO
# Issues the specified vendor-specific read request.
#static
#@annotate(device=ct.POINTER(usb.device_handle), label=const char*)
# ct.c_uint8 opcode, ct.c_uint32 addr,
# const ct.POINTER(ct.c_ubyte) data, ct.c_size_t size)
def ezusb_read(device, label, opcode, addr, data, size) -> int:
from fxload import logerror
global verbose
if verbose > 1:
logerror("{}, addr {:#010x} len %4u ({:#06x})\n",
label, addr, size, size)
status = usb.control_transfer(device,
usb.LIBUSB_ENDPOINT_IN |
usb.LIBUSB_REQUEST_TYPE_VENDOR |
usb.LIBUSB_RECIPIENT_DEVICE,
opcode,
addr & 0xFFFF, addr >> 16,
ct.cast(data, ct.POINTER(ct.c_ubyte)),
ct.c_uint16(size),
1000)
if status != ct.c_int(size).value:
if status < 0:
logerror("{}: {}\n", label, usb.error_name(status))
else:
logerror("{} ==> {}\n", label, status)
return 0 if status >= 0 else -errno.EIO
# Modifies the CPUCS register to stop or reset the CPU.
# Returns False on error.
#static
#@annotate(device=ct.POINTER(usb.device_handle), addr=ct.c_uint32)
def ezusb_cpucs(device, addr, do_run: bool) -> bool:
from fxload import logerror
global verbose
data = ct.c_uint8(0x00 if do_run else 0x01)
if verbose:
logerror("{}\n", "stop CPU" if data else "reset CPU")
status = usb.control_transfer(device,
usb.LIBUSB_ENDPOINT_OUT |
usb.LIBUSB_REQUEST_TYPE_VENDOR |
usb.LIBUSB_RECIPIENT_DEVICE,
RW_INTERNAL,
addr & 0xFFFF, addr >> 16,
ct.byref(data), 1,
1000);
if (status != 1 and
# We may get an I/O error from libusb as the device disappears
(not do_run or status != usb.LIBUSB_ERROR_IO)):
mesg = "can't modify CPUCS"
if status < 0:
logerror("{}: {}\n", mesg, usb.error_name(status))
else:
logerror("{}\n", mesg)
return False
else:
return True
# Send an FX3 jumpt to address command
# Returns False on error.
#static
#@annotate(device=ct.POINTER(usb.device_handle), addr=ct.c_uint32)
def ezusb_fx3_jump(device, addr) -> bool:
from fxload import logerror
global verbose
if verbose:
logerror("transfer execution to Program Entry at {:#010x}\n", addr)
status = usb.control_transfer(device,
usb.LIBUSB_ENDPOINT_OUT |
usb.LIBUSB_REQUEST_TYPE_VENDOR |
usb.LIBUSB_RECIPIENT_DEVICE,
RW_INTERNAL,
addr & 0xFFFF, addr >> 16,
NULL, 0,
1000)
# We may get an I/O error from libusb as the device disappears
if status != 0 and status != usb.LIBUSB_ERROR_IO:
mesg = "failed to send jump command"
if status < 0:
logerror("{}: {}\n", mesg, usb.error_name(status))
else:
logerror("{}\n", mesg)
return False
else:
return True
#*****************************************************************************#
# Parse an Intel HEX image file and invoke the poke() function on the
# various segments to implement policies such as writing to RAM (with
# a one or two stage loader setup, depending on the firmware) or to
# EEPROM (two stages required).
#
# image - the hex image file
# context - for use by poke()
# is_external - if non-null, used to check which segments go into
# external memory (writable only by software loader)
# poke - called with each memory segment; errors indicated
# by returning negative values.
#
# Caller is responsible for halting CPU as needed, such as when
# overwriting a second stage loader.
#static
#@annotate(image=FILE*, context=void*,
# is_external=bool (*)(ct.c_uint32 addr, ct.c_size_t len),
# poke=int (*)(void* context, ct.c_uint32 addr, bool external,
# const ct.POINTER(ct.c_ubyte) data, ct.c_size_t len))
def parse_ihex(image, context, is_external, poke) -> int:
from fxload import logerror
global verbose
data = (ct.c_ubyte * 1023)()
# Read the input file as an IHEX file, and report the memory segments
# as we go. Each line holds a max of 16 bytes, but uploading is
# faster (and EEPROM space smaller) if we merge those lines into larger
# chunks. Most hex files keep memory segments together, which makes
# such merging all but free. (But it may still be worth sorting the
# hex files to make up for undesirable behavior from tools.)
#
# Note that EEPROM segments max out at 1023 bytes; the upload protocol
# allows segments of up to 64 KBytes (more than a loader could handle).
data_len = 0 # ct.c_size_t
data_addr = ct.c_uint32(0)
external = False # bool
first_line = True
while True:
buf = bytearray(b"\0"* 512)
try:
image.readinto(buf)
except:
logerror("EOF without EOF record!\n")
break
# EXTENSION: "# comment-till-end-of-line", for copyrights etc
if buf[0] == ord('#'):
continue
if buf[0] != ord(':'):
logerror("not an ihex record: {}", buf)
return -2
# ignore any newline
#cp # char*
cp = strchr(buf, '\n')
if cp != NULL:
cp[0] = 0
if verbose >= 3:
logerror("** LINE: {}\n", buf)
# Read the length field (up to 16 bytes)
tmp = buf[3]; buf[3] = 0
#size # ct.c_size_t
size = size_t(strtoul(buf[1:], NULL, 16))
buf[3] = tmp
# Read the target offset (address up to 64KB)
tmp = buf[7]; buf[7] = 0
#off # unsigned
off = unsigned_int(strtoul(buf[3:], NULL, 16))
buf[7] = tmp
# Initialize data_addr
if first_line:
data_addr = off
first_line = False
# Read the record type
tmp = buf[9]; buf[9] = 0
#rec_type # char
rec_type = char(strtoul(buf[7:], NULL, 16))
buf[9] = tmp
# If this is an EOF record, then make it so.
if rec_type == 1:
if verbose >= 2:
logerror("EOF on hexfile\n")
break;
if rec_type != 0:
logerror("unsupported record type: %u\n", rec_type)
return -3
if size * 2 + 11 > strlen(buf):
logerror("record too short?\n")
return -4
# FIXME check for _physically_ contiguous not just virtually
# e.g. on FX2 0x1f00-0x2100 includes both on-chip and external
# memory so it's not really contiguous
# flush the saved data if it's not contiguous,
# or when we've buffered as much as we can.
if (data_len != 0 and (off != (data_addr + data_len) or
# not merge or
(data_len + size) > ct.sizeof(data))):
if is_external: external = is_external(data_addr, data_len)
rc = poke(context, data_addr, external, data, data_len)
if rc < 0:
return -1
data_addr = off
data_len = 0
# append to saved data, flush later
cp = buf + 9
for idx in range(size):
tmp = cp[2]; cp[2] = 0
data[data_len + idx] = ct.c_uint8(strtoul(cp, NULL, 16))
cp[2] = tmp
cp += 2
data_len += size
# flush any data remaining
if data_len != 0:
if is_external: external = is_external(data_addr, data_len)
rc = poke(context, data_addr, external, data, data_len)
if rc < 0:
return -1
return 0
# Parse a binary image file and write it as is to the target.
# Applies to Cypress BIX images for RAM or Cypress IIC images
# for EEPROM.
#
# image - the BIX image file
# context - for use by poke()
# is_external - if non-null, used to check which segments go into
# external memory (writable only by software loader)
# poke - called with each memory segment; errors indicated
# by returning negative values.
#
# Caller is responsible for halting CPU as needed, such as when
# overwriting a second stage loader.
#static
#@annotate(image=FILE*, context=void*,
# is_external=bool (*)(ct.c_uint32 addr, ct.c_size_t len),
# poke=int (*)(void* context, ct.c_uint32 addr, bool external,
# const ct.POINTER(ct.c_ubyte) data, ct.c_size_t len))
def parse_bin(image, context, is_external, poke) -> int:
data = (ct.c_ubyte * 4096)()
data_len = 0 # ct.c_size_t
data_addr = ct.c_uint32(0)
external = False # bool
while True:
data_len = image.readinto(data)
if data_len == 0:
break
if is_external: external = is_external(data_addr, data_len)
rc = poke(context, data_addr, external, data, data_len)
if rc < 0:
return -1
data_addr += data_len
return 0 if feof(image) else -1
# Parse a Cypress IIC image file and invoke the poke() function on the
# various segments for writing to RAM
#
# image - the IIC image file
# context - for use by poke()
# is_external - if non-null, used to check which segments go into
# external memory (writable only by software loader)
# poke - called with each memory segment; errors indicated
# by returning negative values.
#
# Caller is responsible for halting CPU as needed, such as when
# overwriting a second stage loader.
#static
#@annotate(image=FILE*, context=void*,
# is_external=bool (*)(ct.c_uint32 addr, ct.c_size_t len),
# poke=int (*)(void* context, ct.c_uint32 addr, bool external,
# const ct.POINTER(ct.c_ubyte) data, ct.c_size_t len))
def parse_iic(image, context, is_external, poke) -> int:
from fxload import logerror
data = (ct.c_ubyte * 4096)()
block_header = (ct.c_uint8 * 4)()
data_len = 0 # ct.c_size_t
data_addr = ct.c_uint32(0)
external = False # bool
#long file_size
#long initial_pos
initial_pos = ftell(image)
if initial_pos < 0:
return -1
if fseek(image, 0, SEEK_END) != 0:
return -1
file_size = ftell(image);
if fseek(image, initial_pos, SEEK_SET) != 0:
return -1
while True:
# Ignore the trailing reset IIC data (5 bytes)
if ftell(image) >= (file_size - 5):
break
if image.readinto(block_header) != ct.sizeof(block_header):
logerror("unable to read IIC block header\n")
return -1
data_len = (block_header[0] << 8) + block_header[1]
data_addr = (block_header[2] << 8) + block_header[3]
if data_len > ct.sizeof(data):
# If this is ever reported as an error, switch to using malloc/realloc
logerror("IIC data block too small - please report this error to libusb.info\n")
return -1
#read_len # ct.c_size_t
read_len = image.fread(data, data_len)
if read_len != data_len:
logerror("read error\n")
return -1
if is_external: external = is_external(data_addr, data_len)
rc = poke(context, data_addr, external, data, data_len)
if rc < 0:
return -1
return 0
# the parse call will be selected according to the image type
#static
_parse = [
parse_ihex,
parse_iic,
parse_bin,
None
]
#*****************************************************************************#
# For writing to RAM using a first (hardware) or second (software)
# stage loader and 0xA0 or 0xA3 vendor requests
ram_mode = ct.c_int
(
_undef,
internal_only, # hardware first-stage loader
skip_internal, # first phase, second-stage loader
skip_external, # second phase, second-stage loader
) = (0, 1, 2, 3)
class ram_poke_context(ct.Structure):
_fields_ = [
("device", ct.POINTER(usb.device_handle)),
("mode", ram_mode),
("total", ct.c_size_t),
("count", ct.c_size_t),
]
RETRY_LIMIT = 5
#static
#@annotate(context=void*, addr=ct.c_uint32, external=bool,
# data=const ct.POINTER(ct.c_ubyte), size=ct.c_size_t)
def ram_poke(context, addr, external, data, size) -> int:
from fxload import logerror
ctx = ct.cast(context, ct.POINTER(ram_poke_context))[0]
ctx_mode = ctx.mode
if ctx_mode == internal_only: # CPU should be stopped
if external:
logerror("can't write {} bytes external memory at {:#010x}\n", size, addr)
return -errno.EINVAL
elif ctx_mode == skip_internal: # CPU must be running
if not external:
if verbose >= 2:
logerror("SKIP on-chip RAM, {} bytes at {:#010x}\n", size, addr)
return 0
elif ctx_mode == skip_external: # CPU should be stopped
if external:
if verbose >= 2:
logerror("SKIP external RAM, {} bytes at {:#010x}\n", size, addr)
return 0
elif ctx_mode == _undef:
logerror("bug\n")
return -errno.EDOM
else:
logerror("bug\n")
return -errno.EDOM
ctx.total += size
ctx.count += 1
# Retry this till we get a real error. Control messages are not
# NAKed (just dropped) so time out means is a real problem.
retry = 0
while True:
rc = ezusb_write(ctx.device,
"write external" if external else "write on-chip",
RW_MEMORY if external else RW_INTERNAL,
addr, data, size)
if rc >= 0 or retry >= RETRY_LIMIT:
break
if rc != usb.LIBUSB_ERROR_TIMEOUT:
break
retry += 1
return rc
# Load a Cypress Image file into target RAM.
# See http://www.cypress.com/?docID=41351 (AN76405 PDF) for more info.
#static
#@annotate(device=ct.POINTER(usb.device_handle), path=str)
def fx3_load_ram(device, path) -> int:
from fxload import logerror
global verbose
bBuf = ct.POINTER(ct.c_ubyte)
hBuf = (ct.c_ubyte * 4)()
blBuf = (ct.c_ubyte * 4)()
rBuf = (ct.c_ubyte * 4096)()
try:
image = open(path, "rb")
except:
logerror("unable to open '{}' for input\n", path)
return -2
if verbose:
logerror("open firmware image {} for RAM upload\n", path)
with image:
# Read header
if image.readinto(hBuf) != ct.sizeof(hBuf):
logerror("could not read image header")
return -3
# check "CY" signature byte and format
if hBuf[0] != 'C' or hBuf[1] != 'Y':
logerror("image doesn't have a CYpress signature\n")
return -3
# Check bImageType
bImageType = hBuf[3]
if bImageType == 0xB0:
if verbose:
logerror("normal FW binary {} image with checksum\n", "data" if hBuf[2] & 0x01 else "executable")
elif bImageType == 0xB1:
logerror("security binary image is not currently supported\n")
return -3
elif bImageType == 0xB2:
logerror("VID:PID image is not currently supported\n")
return -3
else:
logerror("invalid image type {:#04X}\n", hBuf[3])
return -3
# Read the bootloader version
if verbose:
if ezusb_read(device, "read bootloader version", RW_INTERNAL, 0xFFFF0020, blBuf, 4) < 0:
logerror("Could not read bootloader version\n")
return -8
logerror("FX3 bootloader version: {:#04X}{:02X}{:02X}{:02X}\n",
blBuf[3], blBuf[2], blBuf[1], blBuf[0])
if verbose:
logerror("writing image...\n")
dLength = ct.c_uint32()
dAddress = ct.c_uint32()
dCheckSum = 0 # ct.c_uint32
while True:
if ((image.fread(ct.byref(dLength), ct.sizeof(ct.c_uint32)) != ct.sizeof(ct.c_uint32)) or # read dLength
(image.fread(ct.byref(dAddress), ct.sizeof(ct.c_uint32)) != ct.sizeof(ct.c_uint32))): # read dAddress
logerror("could not read image")
return -3
if dLength == 0:
break # done
# coverity[tainted_data]
try:
dImageBuf = (ct.c_uint32 * dLength)()
ct.memset(dImageBuf, b"\0", dLength * ct.sizeof(ct.c_uint32))
except:
logerror("could not allocate buffer for image chunk\n")
return -4
# read sections
if image.fread(dImageBuf, ct.sizeof(ct.c_uint32) * dLength) != ct.sizeof(ct.c_uint32) * dLength:
logerror("could not read image")
return -3
for i in range(dLength):
dCheckSum += dImageBuf[i]
dLength <<= 2 # convert to Byte length
bBuf = ct.cast(dImageBuf, ct.POINTER(ct.c_ubyte))
while dLength > 0:
#dLen # ct.c_uint32
dLen = min(dLength, 4096) # 4K max
if (ezusb_write(device, "write firmware", RW_INTERNAL, dAddress, bBuf, dLen) < 0 or
ezusb_read (device, "read firmware", RW_INTERNAL, dAddress, rBuf, dLen) < 0):
logerror("R/W error\n")
return -5
# Verify data: rBuf with bBuf
for i in range(dLen):
if rBuf[i] != bBuf[i]:
logerror("verify error")
return -6
dLength -= dLen
bBuf += dLen
dAddress += dLen
# read pre-computed checksum data
dExpectedCheckSum = ct.c_uint32()
if (image.fread(ct.byref(dExpectedCheckSum), ct.sizeof(ct.c_uint32)) != ct.sizeof(ct.c_uint32) or
dCheckSum != dExpectedCheckSum):
logerror("checksum error\n")
return -7
# transfer execution to Program Entry
if not ezusb_fx3_jump(device, dAddress):
return -6
return 0
# This function uploads the firmware from the given file into RAM.
# Stage == 0 means this is a single stage load (or the first of
# two stages). Otherwise it's the second of two stages; the
# caller having preloaded the second stage loader.
#
# The target processor is reset at the end of this upload.
#@annotate(device=ct.POINTER(usb.device_handle), path=str, fx_type=ct.c_int, img_type=ct.c_int, stage=ct.c_int)
def ezusb_load_ram(device, path, fx_type, img_type, stage) -> int:
# Load a firmware file into target RAM. device is the open libusb
# device, and the path is the name of the source file. Open the file,
# parse the bytes, and write them in one or two phases.
#
# If stage == 0, this uses the first stage loader, built into EZ-USB
# hardware but limited to writing on-chip memory or CPUCS. Everything
# is written during one stage, unless there's an error such as the image
# holding data that needs to be written to external memory.
#
# Otherwise, things are written in two stages. First the external
# memory is written, expecting a second stage loader to have already
# been loaded. Then file is re-parsed and on-chip memory is written.
from fxload import logerror
global verbose
if fx_type == FX_TYPE_FX3:
return fx3_load_ram(device, path)
try:
image = open(path, "rb")
except:
logerror("{}: unable to open for input.\n", path)
return -2
if verbose > 1:
logerror("open firmware image {} for RAM upload\n", path)
with image:
if img_type == IMG_TYPE_IIC:
iic_header = (ct.c_uint8 * 8)()
if (image.readinto(iic_header) != ct.sizeof(iic_header) or
((fx_type == FX_TYPE_FX2LP or
fx_type == FX_TYPE_FX2) and iic_header[0] != 0xC2) or
(fx_type == FX_TYPE_AN21 and iic_header[0] != 0xB2) or
(fx_type == FX_TYPE_FX1 and iic_header[0] != 0xB6)):
logerror("IIC image does not contain executable code - cannot load to RAM.\n")
return -1
cpucs_addr = None
is_external = None # bool (*)(ct.c_uint32 off, ct.c_size_t len)
# EZ-USB original/FX and FX2 devices differ, apart from the 8051 core
if fx_type == FX_TYPE_FX2LP:
cpucs_addr = 0xe600
is_external = fx2lp_is_external
elif fx_type == FX_TYPE_FX2:
cpucs_addr = 0xe600
is_external = fx2_is_external
else:
cpucs_addr = 0x7f92
is_external = fx_is_external
ctx = ram_poke_context()
# use only first stage loader?
if stage == 0:
ctx.mode = internal_only
# if required, halt the CPU while we overwrite its code/data
if cpucs_addr is not None and not ezusb_cpucs(device, cpucs_addr, False):
return -1
# 2nd stage, first part? loader was already uploaded
else:
ctx.mode = skip_internal
# let CPU run; overwrite the 2nd stage loader later
if verbose:
logerror("2nd stage: write external memory\n")
# scan the image, first (maybe only) time
ctx.device = device
ctx.total = 0
ctx.count = 0
status = _parse[img_type](image, ct.byref(ctx), is_external, ram_poke)
if status < 0:
logerror("unable to upload {}\n", path)
return status
# second part of 2nd stage: rescan
# TODO: what should we do for non HEX images there?
if stage:
ctx.mode = skip_external
# if needed, halt the CPU while we overwrite the 1st stage loader
if cpucs_addr is not None and not ezusb_cpucs(device, cpucs_addr, False):
return -1
# at least write the interrupt vectors (at 0x0000) for reset!
rewind(image)
if verbose:
logerror("2nd stage: write on-chip memory\n")
status = parse_ihex(image, ct.byref(ctx), is_external, ram_poke)
if status < 0:
logerror("unable to completely upload {}\n", path)
return status
if verbose and ctx.count != 0:
logerror("... WROTE: {} bytes, {} segments, avg {}\n",
ctx.total, ctx.count, ctx.total // ctx.count)
# if required, reset the CPU so it runs what we just uploaded
if cpucs_addr is not None and not ezusb_cpucs(device, cpucs_addr, True):
return -1
return 0
# This function uploads the firmware from the given file into EEPROM.
# This uses the right CPUCS address to terminate the EEPROM load with
# a reset command where FX parts behave differently than FX2 ones.
# The configuration byte is as provided here (zero for an21xx parts)
# and the EEPROM type is set so that the microcontroller will boot
# from it.
#
# The caller must have preloaded a second stage loader that knows
# how to respond to the EEPROM write request.
#@annotate(device=ct.POINTER(usb.device_handle), path=str, fx_type=ct.c_int, img_type=ct.c_int, config=ct.c_int)
def ezusb_load_eeprom(device, path, fx_type, img_type, config) -> int:
raise NotImplementedError()
| 35.416761
| 119
| 0.593004
|
7d36bd67e156faa216406eb5de2f71c6ae519ce4
| 3,485
|
py
|
Python
|
gelato/gelato/settings.py
|
forfrt/Gelato
|
fde9cde624658d7168ce56e3606ee9749ad84ac2
|
[
"Apache-2.0"
] | null | null | null |
gelato/gelato/settings.py
|
forfrt/Gelato
|
fde9cde624658d7168ce56e3606ee9749ad84ac2
|
[
"Apache-2.0"
] | null | null | null |
gelato/gelato/settings.py
|
forfrt/Gelato
|
fde9cde624658d7168ce56e3606ee9749ad84ac2
|
[
"Apache-2.0"
] | null | null | null |
"""
Django settings for gelato project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'sdh5(m!pm73((617im-6))8q0o*ar)r*re#vukl8ngeen$o#pc'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["178.62.33.38", "140.82.42.69"]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'gelato',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'gelato.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'gelato.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'gelato',
'USER': 'root', # TODO: add a gelato account
'PASSWORD': 'gelato',
'HOST': '178.62.33.38',
'PORT': '3306',
},
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = 'rtfeng12@gmail.com'
EMAIL_HOST_PASSWORD = 'feng1223'
EMAIL_USE_TLS = True
| 25.625
| 91
| 0.688953
|
37b20b404f5014f19f19e898b6f77ef45e157b01
| 4,452
|
py
|
Python
|
LeetCode-All-Solution/Python3/LC-0094-Binary-Tree-Inorder-Traversal.py
|
YuweiYin/Algorithm_YuweiYin
|
28648fac59c5a4e3c907978cbd1b3e662ba18fd5
|
[
"MIT"
] | null | null | null |
LeetCode-All-Solution/Python3/LC-0094-Binary-Tree-Inorder-Traversal.py
|
YuweiYin/Algorithm_YuweiYin
|
28648fac59c5a4e3c907978cbd1b3e662ba18fd5
|
[
"MIT"
] | null | null | null |
LeetCode-All-Solution/Python3/LC-0094-Binary-Tree-Inorder-Traversal.py
|
YuweiYin/Algorithm_YuweiYin
|
28648fac59c5a4e3c907978cbd1b3e662ba18fd5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""=================================================================
@Project : Algorithm_YuweiYin/LeetCode-All-Solution/Python3
@File : LC-0094-Binary-Tree-Inorder-Traversal.py
@Author : [YuweiYin](https://github.com/YuweiYin)
@Date : 2022-03-06
=================================================================="""
import sys
import time
from typing import List, Optional
# import collections
"""
LeetCode - 0094 - (Easy) - Binary Tree Inorder Traversal
https://leetcode.com/problems/binary-tree-inorder-traversal/
Description & Requirement:
Given the root of a binary tree,
return the inorder traversal of its nodes' values.
Example 1:
Input: root = [1,null,2,null,null,3]
Output: [1,3,2]
Example 2:
Input: root = []
Output: []
Example 3:
Input: root = [1]
Output: [1]
Constraints:
The number of nodes in the tree is in the range [0, 100].
-100 <= Node.val <= 100
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right # the left and right of leaf_node are both None
@staticmethod
def build_binary_tree_layer(val_list: List[int]):
if not isinstance(val_list, list) or len(val_list) <= 0:
return None
node_list = []
for v in val_list:
if v is None:
node_list.append(None)
else:
node_list.append(TreeNode(val=v))
len_node_list = len(node_list)
for idx, cur_node in enumerate(node_list):
if cur_node is not None:
cur_node_right_index = (idx + 1) << 1
cur_node_left_index = cur_node_right_index - 1
if cur_node_left_index < len_node_list:
cur_node.left = node_list[cur_node_left_index]
if cur_node_right_index < len_node_list:
cur_node.right = node_list[cur_node_right_index]
return node_list[0] # return root_node
@staticmethod
def show_binary_tree_pre_order(root_node) -> List[int]:
val_list = []
def __dfs(cur_node):
if isinstance(cur_node, TreeNode):
val_list.append(cur_node.val)
__dfs(cur_node.left)
__dfs(cur_node.right)
__dfs(root_node)
return val_list
@staticmethod
def show_binary_tree_mid_order(root_node) -> List[int]:
val_list = []
def __dfs(cur_node):
if isinstance(cur_node, TreeNode):
__dfs(cur_node.left)
val_list.append(cur_node.val)
__dfs(cur_node.right)
__dfs(root_node)
return val_list
@staticmethod
def show_binary_tree_post_order(root_node) -> List[int]:
val_list = []
def __dfs(cur_node):
if isinstance(cur_node, TreeNode):
__dfs(cur_node.left)
__dfs(cur_node.right)
val_list.append(cur_node.val)
__dfs(root_node)
return val_list
class Solution:
def inorderTraversal(self, root: Optional[TreeNode]) -> List[int]:
# exception case
if not isinstance(root, TreeNode):
return [] # no tree, just null
# main method: (DFS inorder Traversal)
return self._inorderTraversal(root)
def _inorderTraversal(self, root: Optional[TreeNode]) -> List[int]:
assert isinstance(root, TreeNode)
res = []
def __dfs(cur_node):
if isinstance(cur_node, TreeNode):
__dfs(cur_node.left)
res.append(cur_node.val)
__dfs(cur_node.right)
__dfs(root)
return res
def main():
# Example 1: Output: [1,2,3]
root = [1, None, 2, None, None, 3]
# Example 2: Output: []
# root = []
# Example 3: Output: [1]
# root = [1]
root_node = TreeNode.build_binary_tree_layer(root)
# init instance
solution = Solution()
# run & time
start = time.process_time()
ans = solution.inorderTraversal(root_node)
end = time.process_time()
# show answer
print('\nAnswer:')
print(ans)
# print(TreeNode.show_binary_tree_mid_order(ans))
# show time consumption
print('Running Time: %.5f ms' % ((end - start) * 1000))
if __name__ == "__main__":
sys.exit(main())
| 27.652174
| 75
| 0.580189
|
deb8f1bb3a017d018cf4531c2595a9adab7967ca
| 17,506
|
py
|
Python
|
pba/model.py
|
chopardda/LDAS-NLP
|
c3737a71378267d847be9759e9bd468aa3da23d1
|
[
"Apache-2.0"
] | null | null | null |
pba/model.py
|
chopardda/LDAS-NLP
|
c3737a71378267d847be9759e9bd468aa3da23d1
|
[
"Apache-2.0"
] | null | null | null |
pba/model.py
|
chopardda/LDAS-NLP
|
c3737a71378267d847be9759e9bd468aa3da23d1
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""PBA & AutoAugment Train/Eval module.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import os
import time
import numpy as np
import tensorflow as tf
import pba.data_utils as data_utils
import pba.helper_utils as helper_utils
from pba.bert_model import build_bert_model
from pba.bert_optimization import create_optimizer
from pba.augmentation_utils import ContextNeighborStorage
import six
import json
import re
import collections
class BertConfig(object):
"""Configuration for `BertModel`."""
def __init__(self,
vocab_size,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=32,
type_vocab_size=16,
initializer_range=0.02):
"""Constructs BertConfig.
Args:
vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.current_learning_rate = None
def from_dict(json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size=None)
for (key, value) in six.iteritems(json_object):
config.__dict__[key] = value
return config
def config_from_json_file(json_file, model_dropout):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r") as reader:
text = reader.read()
config = from_dict(json.loads(text))
if model_dropout != -1:
config.hidden_dropout_prob = model_dropout
config.attention_probs_dropout_prob = model_dropout
return config
def build_model(input_ids, input_mask, token_type_ids, num_classes, is_training, hparams, noise_vector):
"""Constructs the vision model being trained/evaled.
Args:
inputs: input features being fed to the model build built.
num_classes: number of output classes being predicted.
is_training: is the model training or not.
hparams: additional hyperparameters associated with the model.
Returns:
Returns:
The logits of the model.
"""
if hparams.model_name == 'bert':
bert_config_file = os.path.join(hparams.data_path + 'pretrained_models/bert_base/bert_config.json')
bert_config = config_from_json_file(bert_config_file,-1)
logits, embd_output = build_bert_model(input_ids, input_mask, token_type_ids, num_classes, is_training, bert_config, noise_vector)
return logits, embd_output
def get_assignment_map_from_checkpoint(tvars, init_checkpoint):
"""Compute the union of the current variables and checkpoint variables."""
assignment_map = {}
initialized_variable_names = {}
name_to_variable = collections.OrderedDict()
for var in tvars:
name = var.name
m = re.match("^(.*):\\d+$", name)
if m is not None:
name = m.group(1)
name_to_variable[name] = var
init_vars = tf.train.list_variables(init_checkpoint)
assignment_map = collections.OrderedDict()
for x in init_vars:
(name, var) = (x[0], x[1])
if name not in name_to_variable:
continue
assignment_map[name] = name_to_variable[name]
initialized_variable_names[name] = 1
initialized_variable_names[name + ":0"] = 1
return (assignment_map, initialized_variable_names)
class Model(object):
"""Builds an model."""
def __init__(self, hparams, num_classes, text_size):
self.hparams = hparams
self.num_classes = num_classes
self.text_size = text_size
def build(self, mode):
"""Construct the model."""
assert mode in ['train', 'eval']
self.mode = mode
self._setup_misc(mode)
self._setup_texts_and_labels(self.hparams.dataset) # --- create placeholders
self._build_graph(self.input_ids, self.input_mask, self.token_type_ids, self.labels, mode, self.noise_vector)
def _setup_misc(self, mode):
"""Sets up miscellaneous in the model constructor."""
self.lr_rate_ph = self.hparams.lr
self.current_learning_rate = self.lr_rate_ph
self.batch_size = self.hparams.batch_size
self.dataset = self.hparams.dataset
self.max_seq_length = self.hparams.max_seq_length
self.epoch_accuracy = []
self.matthews_corr = []
self.loss_history = []
if mode == 'eval':
self.batch_size = self.hparams.test_batch_size
def _setup_texts_and_labels(self, dataset):
"""Sets up text and label placeholders for the model."""
self.input_ids = tf.placeholder(tf.int32, [None, self.text_size])
self.input_mask = tf.placeholder(tf.int32,[None, self.text_size])
self.token_type_ids = tf.placeholder(tf.int32, [None, self.text_size])
if self.num_classes < 100: # --- classification
self.labels = tf.placeholder(tf.int32, [None, self.num_classes])
else: # --- regression
self.labels = tf.placeholder(tf.float32, [None, 1])
self.noise_vector = tf.placeholder(tf.float32, [None, None, 768])
def assign_epoch(self, session, epoch_value):
session.run(
self._epoch_update, feed_dict={self._new_epoch: epoch_value})
def _build_graph(self, input_ids, input_mask, token_type_ids, labels, mode, noise_vector):
"""Constructs the TF graph for the model.
Args:
texts: A 2-D text Tensor
labels: A 2-D labels Tensor.
mode: string indicating training mode ( e.g., 'train', 'valid', 'test').
"""
is_training = 'train' in mode
if is_training:
self.global_step = tf.train.get_or_create_global_step()
# texts is placeholder set in _setup_texts_and_labels(data set)
logits, embd_output = build_model(input_ids, input_mask, token_type_ids, self.num_classes, is_training,
self.hparams, noise_vector)
self.embedding_output = embd_output
if self.dataset == 'stsb':
self.predictions = logits
self.cost = tf.reduce_mean(tf.square(logits - labels))
else:
self.predictions, self.cost = helper_utils.setup_loss(logits, labels)
self._calc_num_trainable_params()
if is_training:
self._build_train_op()
with tf.device('/cpu:0'):
self.saver = tf.train.Saver(max_to_keep=10)
init_checkpoint = os.path.join(self.hparams.data_path,'pretrained_models', 'bert_base', 'bert_model.ckpt')
tvars = tf.trainable_variables("bert")
(assignment_map, initialized_variable_names) = get_assignment_map_from_checkpoint(tvars, init_checkpoint)
self.assignment_map = assignment_map
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
self.init = tf.global_variables_initializer()
def _calc_num_trainable_params(self):
self.num_trainable_params = np.sum([
np.prod(var.get_shape().as_list())
for var in tf.trainable_variables()
])
tf.logging.info('number of trainable params: {}'.format(
self.num_trainable_params))
def _build_train_op(self):
"""Builds the train op for the model."""
hparams = self.hparams
clip_norm = hparams.gradient_clipping_by_global_norm
num_train_data = hparams.train_size
batch_size = hparams.batch_size
num_epochs = hparams.num_epochs
num_train_steps = int(np.floor(num_train_data/batch_size) * num_epochs * 0.9)
num_warmup_steps = int(np.floor(num_train_data/batch_size) * num_epochs * 0.1)
self.train_op, self.curr_learning_rate_tensor = create_optimizer(self.cost, self.lr_rate_ph, num_train_steps, num_warmup_steps, False, clip_norm, self.global_step)
class ModelTrainer(object):
"""Trains an instance of the Model class."""
def __init__(self, hparams):
self._session = None
self.hparams = hparams
np.random.seed(0) # --- Set the random seed to be sure the same validation set is used for each model
self.data_loader = data_utils.DataSet(hparams)
np.random.seed() # --- Put the random seed back to random
self.data_loader.reset()
# extra stuff for ray
self._build_models()
self._new_session()
self._session.__enter__()
self.create_nn_database(self.m, self.session)
def save_model(self, checkpoint_dir, step=None):
"""Dumps model into the backup_dir.
Args:
step: If provided, creates a checkpoint with the given step
number, instead of overwriting the existing checkpoints.
"""
model_save_name = os.path.join(checkpoint_dir,'model.ckpt') + '-' + str(step)
save_path = self.saver.save(self.session, model_save_name)
tf.logging.info('Saved child model')
return model_save_name
def extract_model_spec(self, checkpoint_path):
"""Loads a checkpoint with the architecture structure stored in the name."""
self.saver.restore(self.session, checkpoint_path)
tf.logging.warning(
'Loaded child model checkpoint from {}'.format(checkpoint_path))
def eval_child_model(self, model, data_loader, mode):
"""Evaluate the child model.
Args:
model: image model that will be evaluated.
data_loader: dataset object to extract eval data from.
mode: will the model be evaled on train, val or test.
Returns:
Accuracy of the model on the specified dataset.
"""
tf.logging.info('Evaluating child model in mode {}'.format(mode))
while True:
try:
accuracy, matthews_corrcoef, f1_score, pearson, spearman = helper_utils.eval_child_model(
self.session, model, data_loader, mode)
tf.logging.info(
'Eval child model accuracy: {}'.format(accuracy))
break
except (tf.errors.AbortedError, tf.errors.UnavailableError) as e:
tf.logging.info(
'Retryable error caught: {}. Retrying.'.format(e))
return accuracy, matthews_corrcoef, f1_score, pearson, spearman
@contextlib.contextmanager
def _new_session(self):
"""Creates a new session for model m. initialize variables, and save / restore from checkpoint."""
sess_cfg = tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False)
sess_cfg.gpu_options.allow_growth = True
self._session = tf.Session('', config=sess_cfg)
self._session.run(self.m.init)
return self._session
def _build_models(self):
"""Builds the text models for train and eval."""
m = Model(self.hparams, self.data_loader.num_classes, self.data_loader.text_size)
m.build('train')
self._num_trainable_params = m.num_trainable_params
self._saver = m.saver
self.m = m
self.meval = m
def create_nn_database(self, model, session):
"""Create search index for nearest neighbour augmentation from all samples in the train data"""
if type(self.data_loader.train_texts[0]) == str:
self.nn_database = ContextNeighborStorage(sentences=self.data_loader.train_texts, n_labels=self.data_loader.train_labels.shape[1], model=model, session=session)
elif type(self.data_loader.train_texts[0]) == tuple:
all_sentences = [list(sent_pair) for sent_pair in self.data_loader.train_texts]
all_sentences_flat = [item for sublist in all_sentences for item in sublist]
self.nn_database = ContextNeighborStorage(sentences=all_sentences_flat, n_labels=self.data_loader.train_labels.shape[1], model=model, session=session)
self.nn_database.process_sentences()
self.nn_database.build_search_index()
def _run_training_loop(self, curr_epoch):
"""Trains the model `m` for one epoch."""
start_time = time.time()
while True:
try:
train_accuracy, train_matthews, train_f1_score, train_pearson, train_spearman = helper_utils.run_epoch_training(self.session, self.m, self.data_loader, self.nn_database, curr_epoch)
break
except (tf.errors.AbortedError, tf.errors.UnavailableError) as e:
tf.logging.info(
'Retryable error caught: {}. Retrying.'.format(e))
tf.logging.info('Finished epoch: {}'.format(curr_epoch))
tf.logging.info('Epoch time(min): {}'.format(
(time.time() - start_time) / 60.0))
return train_accuracy, train_matthews, train_f1_score, train_pearson, train_spearman
def _compute_final_accuracies(self, iteration):
"""Run once training is finished to compute final test accuracy."""
if (iteration >= self.hparams.num_epochs - 1):
test_accuracy, test_matthews_corrcoef, test_f1_score, test_pearson, test_spearman = self.eval_child_model(self.m, self.data_loader, 'test')
else:
test_accuracy = 0
test_matthews_corrcoef = 0
test_f1_score = 0
test_pearson = 0
test_spearman = 0
tf.logging.info('Test Accuracy: {}'.format(test_accuracy))
tf.logging.info('Test Matthew\' s Corr: {}'.format(test_matthews_corrcoef))
tf.logging.info('Test F1 Score: {}'.format(test_f1_score))
tf.logging.info('Test Pearson: {}'.format(test_pearson))
tf.logging.info('Test Spearman: {}'.format(test_spearman))
return test_accuracy, test_matthews_corrcoef, test_f1_score, test_pearson, test_spearman
def run_model(self, epoch):
"""Trains and evalutes the image model."""
valid_accuracy = 0.
valid_matthews = 0.
valid_f1_score = 0.
valid_pearson = 0.
valid_spearman = 0.
training_accuracy, training_matthews, training_f1_score, training_pearson, training_spearman = self._run_training_loop(epoch)
if self.hparams.validation_size > 0:
valid_accuracy, valid_matthews, valid_f1_score, valid_pearson, valid_spearman = self.eval_child_model(self.m,
self.data_loader, 'val')
tf.logging.info('Train Acc: {}, Valid Acc: {}'.format(
training_accuracy, valid_accuracy))
return training_accuracy, training_matthews, training_f1_score, training_pearson, training_spearman, valid_accuracy, valid_matthews, valid_f1_score, valid_pearson, valid_spearman
def reset_config(self, new_hparams):
self.hparams = new_hparams
self.data_loader.reset_policy(new_hparams)
return
@property
def saver(self):
return self._saver
@property
def session(self):
return self._session
@property
def num_trainable_params(self):
return self._num_trainable_params
| 40.617169
| 197
| 0.673026
|
f0e18cf2b502ee8a6a8d1a0ee15d92085fc5d647
| 109
|
py
|
Python
|
pysts/notebook/report/__init__.py
|
sdswart/pysts
|
f140072e064b59a7d8732e73d71fd812b6d292c5
|
[
"MIT"
] | null | null | null |
pysts/notebook/report/__init__.py
|
sdswart/pysts
|
f140072e064b59a7d8732e73d71fd812b6d292c5
|
[
"MIT"
] | null | null | null |
pysts/notebook/report/__init__.py
|
sdswart/pysts
|
f140072e064b59a7d8732e73d71fd812b6d292c5
|
[
"MIT"
] | null | null | null |
from .app import create_app, debug_view
from .widget_utils import get_output_html, get_output_figure_widgets
| 36.333333
| 68
| 0.87156
|
5bb047c18236acf17d00a10e158287408cdfcf36
| 9,388
|
py
|
Python
|
pandora/model.py
|
mikekestemont/pandora
|
ecae769c8dac5cce563da114be923d22eec6656d
|
[
"MIT"
] | 2
|
2016-02-19T10:23:17.000Z
|
2016-09-28T16:14:41.000Z
|
pandora/model.py
|
mikekestemont/pandora
|
ecae769c8dac5cce563da114be923d22eec6656d
|
[
"MIT"
] | 6
|
2016-06-22T12:40:57.000Z
|
2018-04-16T08:39:52.000Z
|
pandora/model.py
|
mikekestemont/pandora
|
ecae769c8dac5cce563da114be923d22eec6656d
|
[
"MIT"
] | 3
|
2016-01-10T10:24:53.000Z
|
2017-02-06T13:47:20.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from keras.models import Model
from keras.layers.recurrent import LSTM
from keras.layers.convolutional import Convolution1D
from keras.layers import *
from keras.layers.embeddings import Embedding
from keras.optimizers import Adam
from keras.objectives import categorical_crossentropy
from keras.optimizers import Adam, RMSprop
def build_model(token_len, token_char_vector_dict,
nb_encoding_layers, nb_dense_dims,
lemma_len, lemma_char_vector_dict,
nb_tags, nb_morph_cats,
nb_lemmas, nb_train_tokens,
nb_context_tokens,
nb_embedding_dims,
pretrained_embeddings=None,
include_token=True,
include_context=True,
include_lemma=True,
include_pos=True,
include_morph=True,
nb_filters = 100,
filter_length = 3,
focus_repr = 'recurrent',
dropout_level = .15,
):
inputs, outputs = [], []
subnets = []
if include_token:
# add input layer:
token_input = Input(shape=(token_len, len(token_char_vector_dict)),
name='focus_in')
inputs.append(token_input)
if focus_repr == 'recurrent':
# add recurrent layers to model focus token:
for i in range(nb_encoding_layers):
if i == 0:
curr_input = token_input
else:
curr_input = curr_enc_out
if i == (nb_encoding_layers - 1):
token_subnet = Bidirectional(LSTM(output_dim=nb_dense_dims,
return_sequences=False,
activation='tanh',
name='final_focus_encoder'),
merge_mode='sum')(curr_input)
else:
curr_enc_out = Bidirectional(LSTM(output_dim=nb_dense_dims,
return_sequences=True,
activation='tanh',
name='encoder_'+str(i+1)),
merge_mode='sum')(curr_input)
elif focus_repr == 'convolutions':
token_subnet = Convolution1D(input_shape=(token_len, len(token_char_vector_dict)),
nb_filter=nb_filters,
filter_length=filter_length,
activation='relu',
border_mode='valid',
subsample_length=1,
init='glorot_uniform',
name='focus_conv')(token_input)
token_subnet = Flatten(name='focus_flat')(token_subnet)
token_subnet = Dropout(dropout_level, name='focus_dropout1')(token_subnet)
token_subnet = Dense(nb_dense_dims, name='focus_dense')(token_subnet)
token_subnet = Dropout(dropout_level, name='focus_dropout2')(token_subnet)
token_subnet = Activation('relu', name='final_focus_encoder')(token_subnet)
else:
raise ValueError('Parameter `focus_repr` not understood: use "recurrent" or "convolutions".')
subnets.append(token_subnet)
if include_context:
context_input = Input(shape=(nb_context_tokens,), dtype='int32', name='context_in')
inputs.append(context_input)
context_subnet = Embedding(input_dim=nb_train_tokens,
output_dim=nb_embedding_dims,
weights=pretrained_embeddings,
input_length=nb_context_tokens,
name='context_embedding')(context_input)
context_subnet = Flatten(name='context_flatten')(context_subnet)
context_subnet = Dropout(dropout_level, name='context_dropout')(context_subnet)
context_subnet = Activation('relu', name='context_relu')(context_subnet)
context_subnet = Dense(nb_dense_dims, name='context_dense1')(context_subnet)
context_subnet = Dropout(dropout_level, name='context_dropout2')(context_subnet)
context_subnet = Activation('relu', name='context_out')(context_subnet)
subnets.append(context_subnet)
# combine subnets:
if len(subnets) > 1:
joined = merge(subnets, mode='concat', name='joined')
else:
joined = Activation('linear', name='joined')(subnets[0])
if include_lemma:
if include_lemma == 'generate':
repeat = RepeatVector(lemma_len, name='encoder_repeat')(joined)
for i in range(nb_encoding_layers):
if i == 0:
curr_input = repeat
else:
curr_input = curr_out
if i == (nb_encoding_layers - 1):
output_name = 'final_focus_decoder'
else:
output_name = 'decoder_'+str(i + 1)
curr_out = Bidirectional(LSTM(output_dim=nb_dense_dims,
return_sequences=True,
activation='tanh',
name=output_name),
merge_mode='sum')(curr_input)
# add lemma decoder
lemma_label = TimeDistributed(Dense(len(lemma_char_vector_dict)),
name='lemma_dense')(curr_out)
lemma_label = Activation('softmax', name='lemma_out')(lemma_label)
elif include_lemma == 'label':
lemma_label = Dense(nb_lemmas,
name='lemma_dense1')(joined)
lemma_label = Dropout(dropout_level,
name='lemma_dense_dropout1')(lemma_label)
lemma_label = Activation('softmax',
name='lemma_out')(lemma_label)
outputs.append(lemma_label)
if include_pos:
pos_label = Dense(nb_tags,
name='pos_dense1')(joined)
pos_label = Dropout(dropout_level,
name='pos_dense_dropout1')(pos_label)
pos_label = Activation('softmax',
name='pos_out')(pos_label)
outputs.append(pos_label)
if include_morph:
if include_morph == 'label':
morph_label = Dense(nb_dense_dims,
activation='relu',
name='morph_dense1')(joined)
morph_label = Dropout(dropout_level,
name='morph_dense_dropout1')(morph_label)
morph_label = Dense(nb_dense_dims,
activation='relu',
name='morph_dense2')(morph_label)
morph_label = Dropout(dropout_level,
name='morph_dense_dropout2')(morph_label)
morph_label = Dense(nb_morph_cats,
activation='relu',
name='morph_dense3')(morph_label)
morph_label = Dropout(dropout_level,
name='morph_dense_dropout3')(morph_label)
morph_label = Activation('softmax',
name='morph_out')(morph_label)
elif include_morph == 'multilabel':
morph_label = Dense(nb_dense_dims,
activation='relu',
name='morph_dense1')(joined)
morph_label = Dropout(dropout_level,
name='morph_dense_dropout1')(morph_label)
morph_label = Dense(nb_dense_dims,
activation='relu',
name='morph_dense2')(morph_label)
morph_label = Dropout(dropout_level,
name='morph_dense_dropout2')(morph_label)
morph_label = Dense(nb_morph_cats,
activation='relu',
name='morph_dense3')(morph_label)
morph_label = Dropout(dropout_level,
name='morph_dense_dropout3')(morph_label)
morph_label = Activation('tanh',
name='morph_out')(morph_label)
outputs.append(morph_label)
loss_dict = {}
if include_lemma:
loss_dict['lemma_out'] = 'categorical_crossentropy'
if include_pos:
loss_dict['pos_out'] = 'categorical_crossentropy'
if include_morph:
if include_morph == 'label':
loss_dict['morph_out'] = 'categorical_crossentropy'
elif include_morph == 'multilabel':
loss_dict['morph_out'] = 'binary_crossentropy'
model = Model(input=inputs, output=outputs)
if focus_repr == 'convolutions':
model.compile(optimizer='SGD', loss=loss_dict)
else:
model.compile(optimizer='RMSprop', loss=loss_dict)
return model
| 45.352657
| 105
| 0.524073
|
3e9699a94995597885e45f9f0c49ae30a206bd13
| 8,216
|
py
|
Python
|
keras_retinanet/utils/anchors.py
|
TimeLCJ/smaller-keras-retinanet
|
87e869c1e7e72e5f323c6392a914a552f6f7b529
|
[
"Apache-2.0"
] | null | null | null |
keras_retinanet/utils/anchors.py
|
TimeLCJ/smaller-keras-retinanet
|
87e869c1e7e72e5f323c6392a914a552f6f7b529
|
[
"Apache-2.0"
] | null | null | null |
keras_retinanet/utils/anchors.py
|
TimeLCJ/smaller-keras-retinanet
|
87e869c1e7e72e5f323c6392a914a552f6f7b529
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
def anchor_targets_bbox(
image_shape,
annotations,
num_classes,
mask_shape=None,
negative_overlap=0.4,
positive_overlap=0.5,
**kwargs
):
anchors = anchors_for_shape(image_shape, **kwargs)
# label: 1 is positive, 0 is negative, -1 is dont care
labels = np.ones((anchors.shape[0], num_classes)) * -1
if annotations.shape[0]:
# obtain indices of gt annotations with the greatest overlap
overlaps = compute_overlap(anchors, annotations)
argmax_overlaps_inds = np.argmax(overlaps, axis=1)
max_overlaps = overlaps[np.arange(overlaps.shape[0]), argmax_overlaps_inds]
# assign bg labels first so that positive labels can clobber them
labels[max_overlaps < negative_overlap, :] = 0
# compute box regression targets
annotations = annotations[argmax_overlaps_inds]
# fg label: above threshold IOU
positive_indices = max_overlaps >= positive_overlap
labels[positive_indices, :] = 0
labels[positive_indices, annotations[positive_indices, 4].astype(int)] = 1
else:
# no annotations? then everything is background
labels[:] = 0
annotations = np.zeros((anchors.shape[0], annotations.shape[1]))
# ignore annotations outside of image
mask_shape = image_shape if mask_shape is None else mask_shape
anchors_centers = np.vstack([(anchors[:, 0] + anchors[:, 2]) / 2, (anchors[:, 1] + anchors[:, 3]) / 2]).T
indices = np.logical_or(anchors_centers[:, 0] >= mask_shape[1], anchors_centers[:, 1] >= mask_shape[0])
labels[indices, :] = -1
return labels, annotations, anchors
def layer_shapes(image_shape, model):
"""Compute layer shapes given input image shape and the model.
:param image_shape:
:param model:
:return:
"""
shape = {
model.layers[0].name: (None,) + image_shape,
}
for layer in model.layers[1:]:
nodes = layer._inbound_nodes
for node in nodes:
inputs = [shape[lr.name] for lr in node.inbound_layers]
if not inputs:
continue
shape[layer.name] = layer.compute_output_shape(inputs[0] if len(inputs) == 1 else inputs)
return shape
def make_shapes_callback(model):
def get_shapes(image_shape, pyramid_levels):
shape = layer_shapes(image_shape, model)
image_shapes = [shape["P{}".format(level)][1:3] for level in pyramid_levels]
return image_shapes
return get_shapes
def guess_shapes(image_shape, pyramid_levels):
"""Guess shapes based on pyramid levels.
:param image_shape:
:param pyramid_levels:
:return:
"""
image_shape = np.array(image_shape[:2])
image_shapes = [(image_shape + 2 ** x - 1) // (2 ** x) for x in pyramid_levels]
return image_shapes
def anchors_for_shape(
image_shape,
pyramid_levels=None,
ratios=None,
scales=None,
strides=None,
sizes=None,
shapes_callback=None,
):
if pyramid_levels is None:
pyramid_levels = [3, 4, 5, 6, 7]
if strides is None:
strides = [2 ** x for x in pyramid_levels]
if sizes is None:
sizes = [2 ** (x + 2) for x in pyramid_levels]
if ratios is None:
ratios = np.array([0.5, 1, 2])
if scales is None:
scales = np.array([2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)])
if shapes_callback is None:
shapes_callback = guess_shapes
image_shapes = shapes_callback(image_shape, pyramid_levels)
# compute anchors over all pyramid levels
all_anchors = np.zeros((0, 4))
for idx, p in enumerate(pyramid_levels):
anchors = generate_anchors(base_size=sizes[idx], ratios=ratios, scales=scales)
shifted_anchors = shift(image_shapes[idx], strides[idx], anchors)
all_anchors = np.append(all_anchors, shifted_anchors, axis=0)
return all_anchors
def shift(shape, stride, anchors):
shift_x = (np.arange(0, shape[1]) + 0.5) * stride
shift_y = (np.arange(0, shape[0]) + 0.5) * stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((
shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel()
)).transpose()
# add A anchors (1, A, 4) to
# cell K shifts (K, 1, 4) to get
# shift anchors (K, A, 4)
# reshape to (K*A, 4) shifted anchors
A = anchors.shape[0]
K = shifts.shape[0]
all_anchors = (anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2)))
all_anchors = all_anchors.reshape((K * A, 4))
return all_anchors
def generate_anchors(base_size=16, ratios=None, scales=None):
"""
Generate anchor (reference) windows by enumerating aspect ratios X
scales w.r.t. a reference window.
"""
if ratios is None:
ratios = np.array([0.5, 1, 2])
if scales is None:
scales = np.array([2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)])
num_anchors = len(ratios) * len(scales)
# initialize output anchors
anchors = np.zeros((num_anchors, 4))
# scale base_size
anchors[:, 2:] = base_size * np.tile(scales, (2, len(ratios))).T
# compute areas of anchors
areas = anchors[:, 2] * anchors[:, 3]
# correct for ratios
anchors[:, 2] = np.sqrt(areas / np.repeat(ratios, len(scales)))
anchors[:, 3] = anchors[:, 2] * np.repeat(ratios, len(scales))
# transform from (x_ctr, y_ctr, w, h) -> (x1, y1, x2, y2)
anchors[:, 0::2] -= np.tile(anchors[:, 2] * 0.5, (2, 1)).T
anchors[:, 1::2] -= np.tile(anchors[:, 3] * 0.5, (2, 1)).T
return anchors
def bbox_transform(anchors, gt_boxes, mean=None, std=None):
"""Compute bounding-box regression targets for an image."""
if mean is None:
mean = np.array([0, 0, 0, 0])
if std is None:
std = np.array([0.2, 0.2, 0.2, 0.2])
if isinstance(mean, (list, tuple)):
mean = np.array(mean)
elif not isinstance(mean, np.ndarray):
raise ValueError('Expected mean to be a np.ndarray, list or tuple. Received: {}'.format(type(mean)))
if isinstance(std, (list, tuple)):
std = np.array(std)
elif not isinstance(std, np.ndarray):
raise ValueError('Expected std to be a np.ndarray, list or tuple. Received: {}'.format(type(std)))
anchor_widths = anchors[:, 2] - anchors[:, 0]
anchor_heights = anchors[:, 3] - anchors[:, 1]
targets_dx1 = (gt_boxes[:, 0] - anchors[:, 0]) / anchor_widths
targets_dy1 = (gt_boxes[:, 1] - anchors[:, 1]) / anchor_heights
targets_dx2 = (gt_boxes[:, 2] - anchors[:, 2]) / anchor_widths
targets_dy2 = (gt_boxes[:, 3] - anchors[:, 3]) / anchor_heights
targets = np.stack((targets_dx1, targets_dy1, targets_dx2, targets_dy2))
targets = targets.T
targets = (targets - mean) / std
return targets
def compute_overlap(a, b):
"""
Parameters
----------
a: (N, 4) ndarray of float
b: (K, 5) ndarray of float
Returns
-------
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
"""
area = (b[:, 2] - b[:, 0]) * (b[:, 3] - b[:, 1])
iw = np.minimum(np.expand_dims(a[:, 2], axis=1), b[:, 2]) - np.maximum(np.expand_dims(a[:, 0], 1), b[:, 0])
ih = np.minimum(np.expand_dims(a[:, 3], axis=1), b[:, 3]) - np.maximum(np.expand_dims(a[:, 1], 1), b[:, 1])
iw = np.maximum(iw, 0)
ih = np.maximum(ih, 0)
ua = np.expand_dims((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]), axis=1) + area - iw * ih
ua = np.maximum(ua, np.finfo(float).eps)
intersection = iw * ih
return intersection / ua
| 31.968872
| 118
| 0.622201
|
5c199c25fbb82bbc8f8802880ab1d13782854282
| 2,383
|
py
|
Python
|
app/auth/views.py
|
Denniskamau/Cysect
|
88065a3086216efbf2f9e17678834afb28dcf7ff
|
[
"Apache-2.0"
] | 1
|
2017-04-27T09:42:47.000Z
|
2017-04-27T09:42:47.000Z
|
app/auth/views.py
|
Denniskamau/Cysect
|
88065a3086216efbf2f9e17678834afb28dcf7ff
|
[
"Apache-2.0"
] | null | null | null |
app/auth/views.py
|
Denniskamau/Cysect
|
88065a3086216efbf2f9e17678834afb28dcf7ff
|
[
"Apache-2.0"
] | null | null | null |
from flask import flash, redirect, render_template, url_for
from flask_login import login_required, login_user, logout_user
from . import auth
from forms import LoginForm, RegistrationForm
from .. import db
from ..models import Employee
@auth.route('/register', methods=['GET', 'POST'])
def register():
"""
Handle requests to the /register route
Add an employee to the database through the registration form
"""
form = RegistrationForm()
if form.validate_on_submit():
employee = Employee(email=form.email.data,
username=form.username.data,
first_name=form.first_name.data,
last_name=form.last_name.data,
password=form.password.data)
# add employee to the database
db.session.add(employee)
db.session.commit()
flash('You have successfully registered! You may now login.')
# redirect to the login page
return redirect(url_for('auth.login'))
# load registration template
return render_template('auth/register.html', form=form, title='Register')
@auth.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
# check whether employee exists in the database and whether
# the password entered matches the password in the database
employee = Employee.query.filter_by(email=form.email.data).first()
if employee is not None and employee.verify_password(
form.password.data):
# log employee in
login_user(employee)
# redirect to the appropriate dashboard page
if employee.is_admin:
return redirect(url_for('home.admin_dashboard'))
else:
return redirect(url_for('home.dashboard'))
# when login details are incorrect
else:
flash('Invalid email or password.')
# load login template
return render_template('auth/login.html', form=form, title='Login')
@auth.route('/logout')
@login_required
def logout():
"""
Handle requests to the /logout route
Log an employee out through the logout link
"""
logout_user()
flash('You have successfully been logged out.')
# redirect to the login page
return redirect(url_for('auth.login'))
| 33.56338
| 77
| 0.63953
|
160feb1d8a11b4b30d42b028908e6d8f1fa5b5b7
| 95,193
|
py
|
Python
|
wandb/sdk/wandb_run.py
|
KristianSpurling/client
|
c1f6e4b1eb73c496707c756d6f9235b3009b9e25
|
[
"MIT"
] | 1
|
2021-05-29T10:45:46.000Z
|
2021-05-29T10:45:46.000Z
|
wandb/sdk/wandb_run.py
|
KristianSpurling/client
|
c1f6e4b1eb73c496707c756d6f9235b3009b9e25
|
[
"MIT"
] | null | null | null |
wandb/sdk/wandb_run.py
|
KristianSpurling/client
|
c1f6e4b1eb73c496707c756d6f9235b3009b9e25
|
[
"MIT"
] | null | null | null |
#
# -*- coding: utf-8 -*-
from __future__ import print_function
import atexit
from datetime import timedelta
import glob
import json
import logging
import numbers
import os
import platform
import sys
import threading
import time
import traceback
import click
from six import iteritems, string_types
from six.moves import _thread as thread
from six.moves.collections_abc import Mapping
from six.moves.urllib.parse import quote as url_quote
from six.moves.urllib.parse import urlencode
import wandb
from wandb import errors
from wandb import trigger
from wandb._globals import _datatypes_set_callback
from wandb.apis import internal, public
from wandb.errors import Error
from wandb.util import add_import_hook, sentry_set_scope, to_forward_slash_path
from wandb.viz import (
create_custom_chart,
custom_chart_panel_config,
CustomChart,
Visualize,
)
from . import wandb_artifacts
from . import wandb_config
from . import wandb_history
from . import wandb_metric
from . import wandb_summary
from .interface.artifacts import Artifact as ArtifactInterface
from .lib import (
apikey,
config_util,
filenames,
filesystem,
ipython,
module,
proto_util,
redirect,
sparkline,
telemetry,
)
if wandb.TYPE_CHECKING: # type: ignore
from typing import (
Any,
Dict,
List,
Optional,
Sequence,
TextIO,
Tuple,
Union,
Type,
Callable,
)
from types import TracebackType
from .wandb_settings import Settings, SettingsConsole
from .interface.summary_record import SummaryRecord
from .interface.interface import BackendSender
from .lib.reporting import Reporter
from wandb.proto.wandb_internal_pb2 import (
RunRecord,
FilePusherStats,
PollExitResponse,
MetricRecord,
)
from .wandb_setup import _WandbSetup
from wandb.apis.public import Api as PublicApi
from .wandb_artifacts import Artifact
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import NoReturn
from .data_types import WBValue
from .interface.artifacts import (
ArtifactEntry,
ArtifactManifest,
)
logger = logging.getLogger("wandb")
EXIT_TIMEOUT = 60
RUN_NAME_COLOR = "#cdcd00"
class ExitHooks(object):
exception: Optional[BaseException] = None
def __init__(self) -> None:
self.exit_code = 0
self.exception = None
def hook(self) -> None:
self._orig_exit = sys.exit
sys.exit = self.exit
self._orig_excepthook = (
sys.excepthook
if sys.excepthook
!= sys.__excepthook__ # respect hooks by other libraries like pdb
else None
)
sys.excepthook = self.exc_handler
def exit(self, code: object = 0) -> "NoReturn":
orig_code = code
if code is None:
code = 0
elif not isinstance(code, int):
code = 1
self.exit_code = code
self._orig_exit(orig_code)
def was_ctrl_c(self) -> bool:
return isinstance(self.exception, KeyboardInterrupt)
def exc_handler(
self, exc_type: Type[BaseException], exc: BaseException, tb: TracebackType
) -> None:
self.exit_code = 1
self.exception = exc
if issubclass(exc_type, Error):
wandb.termerror(str(exc))
if self.was_ctrl_c():
self.exit_code = 255
traceback.print_exception(exc_type, exc, tb)
if self._orig_excepthook:
self._orig_excepthook(exc_type, exc, tb)
class RunStatusChecker(object):
"""Periodically polls the background process for relevant updates.
For now, we just use this to figure out if the user has requested a stop.
"""
def __init__(
self,
interface: BackendSender,
stop_polling_interval: int = 15,
retry_polling_interval: int = 5,
) -> None:
self._interface = interface
self._stop_polling_interval = stop_polling_interval
self._retry_polling_interval = retry_polling_interval
self._join_event = threading.Event()
self._stop_thread = threading.Thread(target=self.check_status)
self._stop_thread.daemon = True
self._stop_thread.start()
self._retry_thread = threading.Thread(target=self.check_network_status)
self._retry_thread.daemon = True
self._retry_thread.start()
def check_network_status(self) -> None:
join_requested = False
while not join_requested:
status_response = self._interface.communicate_network_status()
if status_response and status_response.network_responses:
for hr in status_response.network_responses:
if (
hr.http_status_code == 200 or hr.http_status_code == 0
): # we use 0 for non-http errors (eg wandb errors)
wandb.termlog("{}".format(hr.http_response_text))
else:
wandb.termlog(
"{} encountered ({}), retrying request".format(
hr.http_status_code, hr.http_response_text.rstrip()
)
)
join_requested = self._join_event.wait(self._retry_polling_interval)
def check_status(self) -> None:
join_requested = False
while not join_requested:
status_response = self._interface.communicate_stop_status()
if status_response and status_response.run_should_stop:
# TODO(frz): This check is required
# until WB-3606 is resolved on server side.
if not wandb.agents.pyagent.is_running():
thread.interrupt_main()
return
join_requested = self._join_event.wait(self._stop_polling_interval)
def stop(self) -> None:
self._join_event.set()
def join(self) -> None:
self.stop()
self._stop_thread.join()
self._retry_thread.join()
class Run(object):
"""
A unit of computation logged by wandb. Typically this is an ML experiment.
Create a run with `wandb.init()`.
In distributed training, use `wandb.init()` to create a run for
each process, and set the group argument to organize runs into a larger experiment.
Currently there is a parallel Run object in the wandb.Api. Eventually these
two objects will be merged.
Attributes:
history: (History) Time series values, created with `wandb.log()`.
History can contain scalar values, rich media, or even custom plots
across multiple steps.
summary: (Summary) Single values set for each `wandb.log()` key. By
default, summary is set to the last value logged. You can manually
set summary to the best value, like max accuracy, instead of the
final value.
"""
_telemetry_obj: telemetry.TelemetryRecord
_teardown_hooks: List[Callable[[], None]]
_tags: Optional[Tuple[Any, ...]]
_entity: Optional[str]
_project: Optional[str]
_group: Optional[str]
_job_type: Optional[str]
_name: Optional[str]
_notes: Optional[str]
_run_obj: Optional[RunRecord]
_run_obj_offline: Optional[RunRecord]
# Use string literal anotation because of type reference loop
_backend: Optional["wandb.sdk.backend.backend.Backend"]
_wl: Optional[_WandbSetup]
_upgraded_version_message: Optional[str]
_deleted_version_message: Optional[str]
_yanked_version_message: Optional[str]
_out_redir: Optional[redirect.RedirectBase]
_err_redir: Optional[redirect.RedirectBase]
_redirect_cb: Optional[Callable[[str, str], None]]
_output_writer: Optional["filesystem.CRDedupedFile"]
_atexit_cleanup_called: bool
_hooks: Optional[ExitHooks]
_exit_code: Optional[int]
_run_status_checker: Optional[RunStatusChecker]
_poll_exit_response: Optional[PollExitResponse]
_sampled_history: Optional[Dict[str, Union[List[int], List[float]]]]
_use_redirect: bool
_stdout_slave_fd: Optional[int]
_stderr_slave_fd: Optional[int]
_pid: int
def __init__(
self,
settings: Settings,
config: Optional[Dict[str, Any]] = None,
sweep_config: Optional[Dict[str, Any]] = None,
) -> None:
self._config = wandb_config.Config()
self._config._set_callback(self._config_callback)
self._config._set_settings(settings)
self._backend = None
self.summary = wandb_summary.Summary(
self._summary_get_current_summary_callback,
)
self.summary._set_update_callback(self._summary_update_callback)
self.history = wandb_history.History(self)
self.history._set_callback(self._history_callback)
_datatypes_set_callback(self._datatypes_callback)
self._settings = settings
self._wl = None
self._reporter: Optional[Reporter] = None
self._entity = None
self._project = None
self._group = None
self._job_type = None
self._run_id = settings.run_id
self._start_time = time.time()
self._starting_step = 0
self._name = None
self._notes = None
self._tags = None
self._hooks = None
self._teardown_hooks = []
self._redirect_cb = None
self._out_redir = None
self._err_redir = None
self.stdout_redirector = None
self.stderr_redirector = None
self._save_stdout = None
self._save_stderr = None
self._stdout_slave_fd = None
self._stderr_slave_fd = None
self._exit_code = None
self._exit_result = None
self._final_summary = None
self._sampled_history = None
self._jupyter_progress = None
if self._settings._jupyter and ipython._get_python_type() == "jupyter":
self._jupyter_progress = ipython.jupyter_progress_bar()
self._output_writer = None
self._upgraded_version_message = None
self._deleted_version_message = None
self._yanked_version_message = None
# Pull info from settings
self._init_from_settings(settings)
# Initial scope setup for sentry. This might get changed when the
# actual run comes back.
sentry_set_scope(
"user",
entity=self._entity,
project=self._project,
email=self._settings.email,
)
# Returned from backend request_run(), set from wandb_init?
self._run_obj = None
self._run_obj_offline = None
# Created when the run "starts".
self._run_status_checker = None
self._poll_exit_response = None
# Initialize telemetry object
self._telemetry_obj = telemetry.TelemetryRecord()
# Populate config
config = config or dict()
wandb_key = "_wandb"
config.setdefault(wandb_key, dict())
if settings.save_code and settings.program_relpath:
config[wandb_key]["code_path"] = to_forward_slash_path(
os.path.join("code", settings.program_relpath)
)
if sweep_config:
self._config.update_locked(
sweep_config, user="sweep", _allow_val_change=True
)
self._config._update(config, ignore_locked=True)
self._atexit_cleanup_called = False
self._use_redirect = True
self._progress_step = 0
self._pid = os.getpid()
def _telemetry_callback(self, telem_obj: telemetry.TelemetryRecord) -> None:
self._telemetry_obj.MergeFrom(telem_obj)
def _freeze(self) -> None:
self._frozen = True
def __setattr__(self, attr: str, value: object) -> None:
if getattr(self, "_frozen", None) and not hasattr(self, attr):
raise Exception("Attribute {} is not supported on Run object.".format(attr))
super(Run, self).__setattr__(attr, value)
def _telemetry_imports(self, imp: telemetry.TelemetryImports) -> None:
mods = sys.modules
if mods.get("torch"):
imp.torch = True
if mods.get("keras"):
imp.keras = True
if mods.get("tensorflow"):
imp.tensorflow = True
if mods.get("sklearn"):
imp.sklearn = True
if mods.get("fastai"):
imp.fastai = True
if mods.get("xgboost"):
imp.xgboost = True
if mods.get("catboost"):
imp.catboost = True
if mods.get("lightgbm"):
imp.lightgbm = True
if mods.get("pytorch_lightning"):
imp.pytorch_lightning = True
if mods.get("ignite"):
imp.pytorch_ignite = True
if mods.get("transformers"):
imp.transformers_huggingface = True
def _init_from_settings(self, settings: Settings) -> None:
if settings.entity is not None:
self._entity = settings.entity
if settings.project is not None:
self._project = settings.project
if settings.run_group is not None:
self._group = settings.run_group
if settings.run_job_type is not None:
self._job_type = settings.run_job_type
if settings.run_name is not None:
self._name = settings.run_name
if settings.run_notes is not None:
self._notes = settings.run_notes
if settings.run_tags is not None:
self._tags = settings.run_tags
def _make_proto_run(self, run: RunRecord) -> None:
"""Populate protocol buffer RunData for interface/interface."""
if self._entity is not None:
run.entity = self._entity
if self._project is not None:
run.project = self._project
if self._group is not None:
run.run_group = self._group
if self._job_type is not None:
run.job_type = self._job_type
if self._run_id is not None:
run.run_id = self._run_id
if self._name is not None:
run.display_name = self._name
if self._notes is not None:
run.notes = self._notes
if self._tags is not None:
for tag in self._tags:
run.tags.append(tag)
if self._start_time is not None:
run.start_time.FromSeconds(int(self._start_time))
# Note: run.config is set in interface/interface:_make_run()
def __getstate__(self) -> None:
pass
def __setstate__(self, state: Any) -> None:
pass
@property
def dir(self) -> str:
"""
Returns:
(str): The directory where all of the files associated with the run are
placed.
"""
return self._settings.files_dir
@property
def config(self) -> wandb_config.Config:
"""
Returns:
(Config): A config object (similar to a nested dict) of key
value pairs associated with the hyperparameters of the run.
"""
return self._config
@property
def config_static(self) -> wandb_config.ConfigStatic:
return wandb_config.ConfigStatic(self._config)
@property
def name(self) -> Optional[str]:
"""
Returns:
(str): the display name of the run. It does not need to be unique
and ideally is descriptive.
"""
if self._name:
return self._name
if not self._run_obj:
return None
return self._run_obj.display_name
@name.setter
def name(self, name: str) -> None:
self._name = name
if self._backend:
self._backend.interface.publish_run(self)
@property
def notes(self) -> Optional[str]:
r"""
Returns:
(str): notes associated with the run. Notes can be a multiline string
and can also use markdown and latex equations inside $$ like $\\{x}"""
if self._notes:
return self._notes
if not self._run_obj:
return None
return self._run_obj.notes
@notes.setter
def notes(self, notes: str) -> None:
self._notes = notes
if self._backend:
self._backend.interface.publish_run(self)
@property
def tags(self) -> Optional[Tuple]:
"""
Returns:
(Tuple[str]): tags associated with the run
"""
if self._tags:
return self._tags
run_obj = self._run_obj or self._run_obj_offline
if run_obj:
return tuple(run_obj.tags)
return None
@tags.setter
def tags(self, tags: Sequence) -> None:
self._tags = tuple(tags)
if self._backend:
self._backend.interface.publish_run(self)
@property
def id(self) -> str:
"""id property.
Returns:
(str): the run_id associated with the run
"""
if wandb.TYPE_CHECKING and TYPE_CHECKING:
assert self._run_id is not None
return self._run_id
@property
def sweep_id(self) -> Optional[str]:
"""
Returns:
(str, optional): the sweep id associated with the run or None
"""
if not self._run_obj:
return None
return self._run_obj.sweep_id or None
@property
def path(self) -> str:
"""
Returns:
(str): the path to the run `[entity]/[project]/[run_id]`
"""
parts = []
for e in [self._entity, self._project, self._run_id]:
if e is not None:
parts.append(e)
return "/".join(parts)
@property
def start_time(self) -> float:
"""
Returns:
(int): the unix time stamp in seconds when the run started
"""
if not self._run_obj:
return self._start_time
else:
return self._run_obj.start_time.ToSeconds()
@property
def starting_step(self) -> int:
"""
Returns:
(int): the first step of the run
"""
if not self._run_obj:
return self._starting_step
else:
return self._run_obj.starting_step
@property
def resumed(self) -> bool:
"""
Returns:
(bool): whether or not the run was resumed
"""
if self._run_obj:
return self._run_obj.resumed
return False
@property
def step(self) -> int:
"""
Every time you call wandb.log() it will by default increment the step
counter.
Returns:
(int): step counter
"""
return self.history._step
def project_name(self) -> str:
run_obj = self._run_obj or self._run_obj_offline
return run_obj.project if run_obj else ""
@property
def mode(self) -> str:
"""For compatibility with `0.9.x` and earlier, deprecate eventually."""
return "dryrun" if self._settings._offline else "run"
@property
def offline(self) -> bool:
return self._settings._offline
@property
def disabled(self) -> bool:
return self._settings._noop
@property
def group(self) -> str:
"""
Setting a group helps the W&B UI organize runs in a sensible way.
If you are doing a distributed training you should give all of the
runs in the training the same group.
If you are doing crossvalidation you should give all the crossvalidation
folds the same group.
Returns:
(str): name of W&B group associated with run.
"""
run_obj = self._run_obj or self._run_obj_offline
return run_obj.run_group if run_obj else ""
@property
def job_type(self) -> str:
run_obj = self._run_obj or self._run_obj_offline
return run_obj.job_type if run_obj else ""
@property
def project(self) -> str:
"""
Returns:
(str): name of W&B project associated with run.
"""
return self.project_name()
def log_code(
self,
root: str = ".",
name: str = None,
include_fn: Callable[[str], bool] = lambda path: path.endswith(".py"),
exclude_fn: Callable[[str], bool] = lambda path: os.sep + "wandb" + os.sep
in path,
) -> Optional[Artifact]:
"""
log_code() saves the current state of your code to a W&B artifact. By
default it walks the current directory and logs all files that end with ".py".
Arguments:
root (str, optional): The relative (to os.getcwd()) or absolute path to
recursively find code from.
name (str, optional): The name of our code artifact. By default we'll name
the artifact "source-$RUN_ID". There may be scenarios where you want
many runs to share the same artifact. Specifying name allows you to achieve that.
include_fn (callable, optional): A callable that accepts a file path and
returns True when it should be included and False otherwise. This
defaults to: `lambda path: path.endswith(".py")`
exclude_fn (callable, optional): A callable that accepts a file path and
returns True when it should be excluded and False otherwise. This
defaults to: `lambda path: False`
Examples:
Basic usage
```python
run.log_code()
```
Advanced usage
```python
run.log_code("../", include_fn=lambda path: path.endswith(".py") or path.endswith(".ipynb"))
```
Returns:
An `Artifact` object if code was logged
"""
name = name or "{}-{}".format("source", self.id)
art = wandb.Artifact(name, "code")
files_added = False
if root is not None:
root = os.path.abspath(root)
for file_path in filenames.filtered_dir(root, include_fn, exclude_fn):
files_added = True
save_name = os.path.relpath(file_path, root)
art.add_file(file_path, name=save_name)
# Add any manually staged files such is ipynb notebooks
for dirpath, _, files in os.walk(self._settings._tmp_code_dir):
for fname in files:
file_path = os.path.join(dirpath, fname)
save_name = os.path.relpath(file_path, self._settings._tmp_code_dir)
files_added = True
art.add_file(file_path, name=save_name)
if not files_added:
return None
return self.log_artifact(art)
def get_url(self) -> Optional[str]:
"""
Returns:
A url (str, optional) for the W&B run or None if the run
is offline
"""
if not self._run_obj:
wandb.termwarn("URL not available in offline run")
return None
return self._get_run_url()
def get_project_url(self) -> Optional[str]:
"""
Returns:
A url (str, optional) for the W&B project associated with
the run or None if the run is offline
"""
if not self._run_obj:
wandb.termwarn("URL not available in offline run")
return None
return self._get_project_url()
def get_sweep_url(self) -> Optional[str]:
"""
Returns:
A url (str, optional) for the sweep associated with the run
or None if there is no associated sweep or the run is offline.
"""
if not self._run_obj:
wandb.termwarn("URL not available in offline run")
return None
return self._get_sweep_url()
@property
def url(self) -> Optional[str]:
"""
Returns:
(str): name of W&B url associated with run.
"""
return self.get_url()
@property
def entity(self) -> str:
"""
Returns:
(str): name of W&B entity associated with run. Entity is either
a user name or an organization name.
"""
return self._entity or ""
def _repr_mimebundle_(
self, include: Any = None, exclude: Any = None
) -> Dict[str, str]:
url = self._get_run_url()
style = "border:none;width:100%;height:400px"
s = '<h1>Run({})</h1><iframe src="{}" style="{}"></iframe>'.format(
self._run_id, url, style
)
return {"text/html": s}
def _config_callback(
self,
key: Union[Tuple[str, ...], str] = None,
val: Any = None,
data: Dict[str, object] = None,
) -> None:
logger.info("config_cb %s %s %s", key, val, data)
if not self._backend or not self._backend.interface:
return
self._backend.interface.publish_config(key=key, val=val, data=data)
def _set_config_wandb(self, key: str, val: Any) -> None:
self._config_callback(key=("_wandb", key), val=val)
def _summary_update_callback(self, summary_record: SummaryRecord) -> None:
if self._backend:
self._backend.interface.publish_summary(summary_record)
def _summary_get_current_summary_callback(self) -> Dict[str, Any]:
if not self._backend:
return {}
ret = self._backend.interface.communicate_summary()
return proto_util.dict_from_proto_list(ret.item)
def _metric_callback(self, metric_record: MetricRecord) -> None:
if self._backend:
self._backend.interface._publish_metric(metric_record)
def _datatypes_callback(self, fname: str) -> None:
if not self._backend:
return
files = dict(files=[(fname, "now")])
self._backend.interface.publish_files(files)
# TODO(jhr): codemod add: PEP 3102 -- Keyword-Only Arguments
def _history_callback(self, row: Dict[str, Any], step: int) -> None:
# TODO(jhr): move visualize hack somewhere else
custom_charts = {}
for k in row:
if isinstance(row[k], Visualize):
config = {
"id": row[k].viz_id,
"historyFieldSettings": {"key": k, "x-axis": "_step"},
}
row[k] = row[k].value
self._config_callback(val=config, key=("_wandb", "viz", k))
elif isinstance(row[k], CustomChart):
custom_charts[k] = row[k]
custom_chart = row[k]
for k, custom_chart in custom_charts.items():
# remove the chart key from the row
# TODO: is this really the right move? what if the user logs
# a non-custom chart to this key?
row.pop(k)
# add the table under a different key
table_key = k + "_table"
row[table_key] = custom_chart.table
# add the panel
panel_config = custom_chart_panel_config(custom_chart, k, table_key)
self._add_panel(k, "Vega2", panel_config)
if self._backend:
not_using_tensorboard = len(wandb.patched["tensorboard"]) == 0
self._backend.interface.publish_history(
row, step, publish_step=not_using_tensorboard
)
def _console_callback(self, name: str, data: str) -> None:
# logger.info("console callback: %s, %s", name, data)
if self._backend:
self._backend.interface.publish_output(name, data)
def _tensorboard_callback(
self, logdir: str, save: bool = None, root_logdir: str = None
) -> None:
logger.info("tensorboard callback: %s, %s", logdir, save)
save = True if save is None else save
if self._backend:
self._backend.interface.publish_tbdata(logdir, save, root_logdir)
def _set_library(self, library: _WandbSetup) -> None:
self._wl = library
def _set_backend(self, backend: "wandb.sdk.backend.backend.Backend") -> None:
self._backend = backend
def _set_reporter(self, reporter: Reporter) -> None:
self._reporter = reporter
def _set_teardown_hooks(self, hooks: List[Callable[[], None]]) -> None:
self._teardown_hooks = hooks
def _set_run_obj(self, run_obj: RunRecord) -> None:
self._run_obj = run_obj
self._entity = run_obj.entity
self._project = run_obj.project
# Grab the config from resuming
if run_obj.config:
c_dict = config_util.dict_no_value_from_proto_list(run_obj.config.update)
# TODO: Windows throws a wild error when this is set...
if "_wandb" in c_dict:
del c_dict["_wandb"]
# We update the config object here without triggering the callback
self.config._update(c_dict, allow_val_change=True, ignore_locked=True)
# Update the summary, this will trigger an un-needed graphql request :(
if run_obj.summary:
summary_dict = {}
for orig in run_obj.summary.update:
summary_dict[orig.key] = json.loads(orig.value_json)
self.summary.update(summary_dict)
self.history._update_step()
# TODO: It feels weird to call this twice..
sentry_set_scope(
"user",
entity=run_obj.entity,
project=run_obj.project,
email=self._settings.email,
url=self._get_run_url(),
)
def _set_run_obj_offline(self, run_obj: RunRecord) -> None:
self._run_obj_offline = run_obj
def _add_singleton(
self, data_type: str, key: str, value: Dict[Union[int, str], str]
) -> None:
"""Stores a singleton item to wandb config.
A singleton in this context is a piece of data that is continually
logged with the same value in each history step, but represented
as a single item in the config.
We do this to avoid filling up history with a lot of repeated uneccessary data
Add singleton can be called many times in one run and it will only be
updated when the value changes. The last value logged will be the one
persisted to the server"""
value_extra = {"type": data_type, "key": key, "value": value}
if data_type not in self.config["_wandb"]:
self.config["_wandb"][data_type] = {}
if data_type in self.config["_wandb"][data_type]:
old_value = self.config["_wandb"][data_type][key]
else:
old_value = None
if value_extra != old_value:
self.config["_wandb"][data_type][key] = value_extra
self.config.persist()
def log(
self,
data: Dict[str, Any],
step: int = None,
commit: bool = None,
sync: bool = None,
) -> None:
"""Log a dict to the global run's history.
Use `wandb.log` to log data from runs, such as scalars, images, video,
histograms, and matplotlib plots.
The most basic usage is `wandb.log({'train-loss': 0.5, 'accuracy': 0.9})`.
This will save a history row associated with the run with `train-loss=0.5`
and `accuracy=0.9`. Visualize logged data in the workspace at wandb.ai,
or locally on a self-hosted instance of the W&B app:
https://docs.wandb.ai/self-hosted
Export data to explore in a Jupyter notebook, for example, with the API:
https://docs.wandb.ai/ref/public-api
Each time you call wandb.log(), this adds a new row to history and updates
the summary values for each key logged. In the UI, summary values show
up in the run table to compare single values across runs. You might want
to update summary manually to set the *best* value instead of the *last*
value for a given metric. After you finish logging, you can set summary:
`wandb.run.summary["accuracy"] = 0.9`.
Logged values don't have to be scalars. Logging any wandb object is supported.
For example `wandb.log({"example": wandb.Image("myimage.jpg")})` will log an
example image which will be displayed nicely in the wandb UI. See
https://docs.wandb.com/library/reference/data_types for all of the different
supported types.
Logging nested metrics is encouraged and is supported in the wandb API, so
you could log multiple accuracy values with `wandb.log({'dataset-1':
{'acc': 0.9, 'loss': 0.3} ,'dataset-2': {'acc': 0.8, 'loss': 0.2}})`
and the metrics will be organized in the wandb UI.
W&B keeps track of a global step so logging related metrics together is
encouraged, so by default each time wandb.log is called a global step
is incremented. If it's inconvenient to log related metrics together
calling `wandb.log({'train-loss': 0.5, commit=False})` and then
`wandb.log({'accuracy': 0.9})` is equivalent to calling
`wandb.log({'train-loss': 0.5, 'accuracy': 0.9})`
wandb.log is not intended to be called more than a few times per second.
If you want to log more frequently than that it's better to aggregate
the data on the client side or you may get degraded performance.
Arguments:
row: (dict, optional) A dict of serializable python objects i.e `str`,
`ints`, `floats`, `Tensors`, `dicts`, or `wandb.data_types`.
commit: (boolean, optional) Save the metrics dict to the wandb server
and increment the step. If false `wandb.log` just updates the current
metrics dict with the row argument and metrics won't be saved until
`wandb.log` is called with `commit=True`.
step: (integer, optional) The global step in processing. This persists
any non-committed earlier steps but defaults to not committing the
specified step.
sync: (boolean, True) This argument is deprecated and currently doesn't
change the behaviour of `wandb.log`.
Examples:
Basic usage
```python
wandb.log({'accuracy': 0.9, 'epoch': 5})
```
Incremental logging
```python
wandb.log({'loss': 0.2}, commit=False)
# Somewhere else when I'm ready to report this step:
wandb.log({'accuracy': 0.8})
```
Histogram
```python
wandb.log({"gradients": wandb.Histogram(numpy_array_or_sequence)})
```
Image
```python
wandb.log({"examples": [wandb.Image(numpy_array_or_pil, caption="Label")]})
```
Video
```python
wandb.log({"video": wandb.Video(numpy_array_or_video_path, fps=4,
format="gif")})
```
Matplotlib Plot
```python
wandb.log({"chart": plt})
```
PR Curve
```python
wandb.log({'pr': wandb.plots.precision_recall(y_test, y_probas, labels)})
```
3D Object
```python
wandb.log({"generated_samples":
[wandb.Object3D(open("sample.obj")),
wandb.Object3D(open("sample.gltf")),
wandb.Object3D(open("sample.glb"))]})
```
For more examples, see https://docs.wandb.com/library/log
Raises:
wandb.Error: if called before `wandb.init`
ValueError: if invalid data is passed
"""
current_pid = os.getpid()
if current_pid != self._pid:
message = "log() ignored (called from pid={}, init called from pid={}). See: https://docs.wandb.ai/library/init#multiprocess".format(
current_pid, self._pid
)
if self._settings._strict:
wandb.termerror(message, repeat=False)
raise errors.LogMultiprocessError(
"log() does not support multiprocessing"
)
wandb.termwarn(message, repeat=False)
return
if not isinstance(data, Mapping):
raise ValueError("wandb.log must be passed a dictionary")
if any(not isinstance(key, string_types) for key in data.keys()):
raise ValueError("Key values passed to `wandb.log` must be strings.")
if step is not None:
# if step is passed in when tensorboard_sync is used we honor the step passed
# to make decisions about how to close out the history record, but will strip
# this history later on in publish_history()
using_tensorboard = len(wandb.patched["tensorboard"]) > 0
if using_tensorboard:
wandb.termwarn(
"Step cannot be set when using syncing with tensorboard. Please log your step values as a metric such as 'global_step'",
repeat=False,
)
if self.history._step > step:
wandb.termwarn(
(
"Step must only increase in log calls. "
"Step {} < {}; dropping {}.".format(
step, self.history._step, data
)
)
)
return
elif step > self.history._step:
self.history._flush()
self.history._step = step
elif commit is None:
commit = True
if commit:
self.history._row_add(data)
else:
self.history._row_update(data)
def save(
self,
glob_str: Optional[str] = None,
base_path: Optional[str] = None,
policy: str = "live",
) -> Union[bool, List[str]]:
""" Ensure all files matching `glob_str` are synced to wandb with the policy specified.
Arguments:
glob_str: (string) a relative or absolute path to a unix glob or regular
path. If this isn't specified the method is a noop.
base_path: (string) the base path to run the glob relative to
policy: (string) on of `live`, `now`, or `end`
- live: upload the file as it changes, overwriting the previous version
- now: upload the file once now
- end: only upload file when the run ends
"""
if glob_str is None:
# noop for historical reasons, run.save() may be called in legacy code
wandb.termwarn(
(
"Calling run.save without any arguments is deprecated."
"Changes to attributes are automatically persisted."
)
)
return True
if policy not in ("live", "end", "now"):
raise ValueError(
'Only "live" "end" and "now" policies are currently supported.'
)
if isinstance(glob_str, bytes):
glob_str = glob_str.decode("utf-8")
if not isinstance(glob_str, string_types):
raise ValueError("Must call wandb.save(glob_str) with glob_str a str")
if base_path is None:
if os.path.isabs(glob_str):
base_path = os.path.dirname(glob_str)
wandb.termwarn(
(
"Saving files without folders. If you want to preserve "
"sub directories pass base_path to wandb.save, i.e. "
'wandb.save("/mnt/folder/file.h5", base_path="/mnt")'
)
)
else:
base_path = "."
wandb_glob_str = os.path.relpath(glob_str, base_path)
if ".." + os.sep in wandb_glob_str:
raise ValueError("globs can't walk above base_path")
with telemetry.context(run=self) as tel:
tel.feature.save = True
if glob_str.startswith("gs://") or glob_str.startswith("s3://"):
wandb.termlog(
"%s is a cloud storage url, can't save file to wandb." % glob_str
)
return []
files = glob.glob(os.path.join(self.dir, wandb_glob_str))
warn = False
if len(files) == 0 and "*" in wandb_glob_str:
warn = True
for path in glob.glob(glob_str):
file_name = os.path.relpath(path, base_path)
abs_path = os.path.abspath(path)
wandb_path = os.path.join(self.dir, file_name)
wandb.util.mkdir_exists_ok(os.path.dirname(wandb_path))
# We overwrite symlinks because namespaces can change in Tensorboard
if os.path.islink(wandb_path) and abs_path != os.readlink(wandb_path):
os.remove(wandb_path)
os.symlink(abs_path, wandb_path)
elif not os.path.exists(wandb_path):
os.symlink(abs_path, wandb_path)
files.append(wandb_path)
if warn:
file_str = "%i file" % len(files)
if len(files) > 1:
file_str += "s"
wandb.termwarn(
(
"Symlinked %s into the W&B run directory, "
"call wandb.save again to sync new files."
)
% file_str
)
files_dict = dict(files=[(wandb_glob_str, policy)])
if self._backend:
self._backend.interface.publish_files(files_dict)
return files
def restore(
self,
name: str,
run_path: Optional[str] = None,
replace: bool = False,
root: Optional[str] = None,
) -> Union[None, TextIO]:
return restore(name, run_path or self.path, replace, root or self.dir)
def finish(self, exit_code: int = None) -> None:
"""Marks a run as finished, and finishes uploading all data. This is
used when creating multiple runs in the same process. We automatically
call this method when your script exits.
"""
with telemetry.context(run=self) as tel:
tel.feature.finish = True
# detach logger, other setup cleanup
logger.info("finishing run %s", self.path)
for hook in self._teardown_hooks:
hook()
self._teardown_hooks = []
self._atexit_cleanup(exit_code=exit_code)
if self._wl and len(self._wl._global_run_stack) > 0:
self._wl._global_run_stack.pop()
module.unset_globals()
def join(self, exit_code: int = None) -> None:
"""Deprecated alias for `finish()` - please use finish"""
self.finish(exit_code=exit_code)
# TODO(jhr): annotate this
def plot_table(self, vega_spec_name, data_table, fields, string_fields=None): # type: ignore
"""Creates a custom plot on a table.
Arguments:
vega_spec_name: the name of the spec for the plot
table_key: the key used to log the data table
data_table: a wandb.Table object containing the data to
be used on the visualization
fields: a dict mapping from table keys to fields that the custom
visualization needs
string_fields: a dict that provides values for any string constants
the custom visualization needs
"""
visualization = create_custom_chart(
vega_spec_name, data_table, fields, string_fields or {}
)
return visualization
def _set_upgraded_version_message(self, msg: str) -> None:
self._upgraded_version_message = msg
def _set_deleted_version_message(self, msg: str) -> None:
self._deleted_version_message = msg
def _set_yanked_version_message(self, msg: str) -> None:
self._yanked_version_message = msg
def _add_panel(
self, visualize_key: str, panel_type: str, panel_config: dict
) -> None:
config = {
"panel_type": panel_type,
"panel_config": panel_config,
}
self._config_callback(val=config, key=("_wandb", "visualize", visualize_key))
def _get_url_query_string(self) -> str:
s = self._settings
# TODO(jhr): migrate to new settings, but for now this is safer
api = internal.Api()
if api.settings().get("anonymous") != "true":
return ""
api_key = apikey.api_key(settings=s)
return "?" + urlencode({"apiKey": api_key})
def _get_project_url(self) -> str:
s = self._settings
r = self._run_obj
if not r:
return ""
app_url = wandb.util.app_url(s.base_url)
qs = self._get_url_query_string()
url = "{}/{}/{}{}".format(
app_url, url_quote(r.entity), url_quote(r.project), qs
)
return url
def _get_run_url(self) -> str:
s = self._settings
r = self._run_obj
if not r:
return ""
app_url = wandb.util.app_url(s.base_url)
qs = self._get_url_query_string()
url = "{}/{}/{}/runs/{}{}".format(
app_url, url_quote(r.entity), url_quote(r.project), url_quote(r.run_id), qs
)
return url
def _get_sweep_url(self) -> str:
"""Generate a url for a sweep.
Returns:
(str): url if the run is part of a sweep
(None): if the run is not part of the sweep
"""
r = self._run_obj
if not r:
return ""
sweep_id = r.sweep_id
if not sweep_id:
return ""
app_url = wandb.util.app_url(self._settings.base_url)
qs = self._get_url_query_string()
return "{base}/{entity}/{project}/sweeps/{sweepid}{qs}".format(
base=app_url,
entity=url_quote(r.entity),
project=url_quote(r.project),
sweepid=url_quote(sweep_id),
qs=qs,
)
def _get_run_name(self) -> str:
r = self._run_obj
if not r:
return ""
return r.display_name
def _display_run(self) -> None:
project_url = self._get_project_url()
run_url = self._get_run_url()
sweep_url = self._get_sweep_url()
version_str = "Tracking run with wandb version {}".format(wandb.__version__)
if self.resumed:
run_state_str = "Resuming run"
else:
run_state_str = "Syncing run"
run_name = self._get_run_name()
app_url = wandb.util.app_url(self._settings.base_url)
sync_dir = self._settings._sync_dir
if self._settings._jupyter:
sync_dir = "<code>{}</code>".format(sync_dir)
dir_str = "Run data is saved locally in {}".format(sync_dir)
if self._settings._jupyter and ipython._get_python_type() == "jupyter":
sweep_line = (
'Sweep page: <a href="{}" target="_blank">{}</a><br/>\n'.format(
sweep_url, sweep_url
)
if sweep_url
else ""
)
docs_html = '<a href="https://docs.wandb.com/integrations/jupyter.html" target="_blank">(Documentation)</a>' # noqa: E501
ipython.display_html(
"""
{}<br/>
{} <strong style="color:{}">{}</strong> to <a href="{}" target="_blank">Weights & Biases</a> {}.<br/>
Project page: <a href="{}" target="_blank">{}</a><br/>
{}Run page: <a href="{}" target="_blank">{}</a><br/>
{}<br/><br/>
""".format( # noqa: E501
version_str,
run_state_str,
RUN_NAME_COLOR,
run_name,
app_url,
docs_html,
project_url,
project_url,
sweep_line,
run_url,
run_url,
dir_str,
)
)
else:
wandb.termlog(version_str)
wandb.termlog(
"{} {}".format(run_state_str, click.style(run_name, fg="yellow"))
)
emojis = dict(star="", broom="", rocket="")
if platform.system() != "Windows" and sys.stdout.encoding == "UTF-8":
emojis = dict(star="⭐️", broom="🧹", rocket="🚀")
wandb.termlog(
"{} View project at {}".format(
emojis.get("star", ""),
click.style(project_url, underline=True, fg="blue"),
)
)
if sweep_url:
wandb.termlog(
"{} View sweep at {}".format(
emojis.get("broom", ""),
click.style(sweep_url, underline=True, fg="blue"),
)
)
wandb.termlog(
"{} View run at {}".format(
emojis.get("rocket", ""),
click.style(run_url, underline=True, fg="blue"),
)
)
wandb.termlog(dir_str)
if not self._settings._offline:
wandb.termlog("Run `wandb offline` to turn off syncing.")
print("")
def _redirect(
self,
stdout_slave_fd: Optional[int],
stderr_slave_fd: Optional[int],
console: SettingsConsole = None,
) -> None:
if console is None:
console = self._settings._console
logger.info("redirect: %s", console)
out_redir: redirect.RedirectBase
err_redir: redirect.RedirectBase
if console == self._settings.Console.REDIRECT:
logger.info("Redirecting console.")
out_redir = redirect.Redirect(
src="stdout",
cbs=[
lambda data: self._redirect_cb("stdout", data), # type: ignore
self._output_writer.write, # type: ignore
],
)
err_redir = redirect.Redirect(
src="stderr",
cbs=[
lambda data: self._redirect_cb("stderr", data), # type: ignore
self._output_writer.write, # type: ignore
],
)
if os.name == "nt":
def wrap_fallback() -> None:
if self._out_redir:
self._out_redir.uninstall()
if self._err_redir:
self._err_redir.uninstall()
msg = (
"Tensorflow detected. Stream redirection is not supported "
"on Windows when tensorflow is imported. Falling back to "
"wrapping stdout/err."
)
wandb.termlog(msg)
self._redirect(None, None, console=self._settings.Console.WRAP)
add_import_hook("tensorflow", wrap_fallback)
elif console == self._settings.Console.WRAP:
logger.info("Wrapping output streams.")
out_redir = redirect.StreamWrapper(
src="stdout",
cbs=[
lambda data: self._redirect_cb("stdout", data), # type: ignore
self._output_writer.write, # type: ignore
],
)
err_redir = redirect.StreamWrapper(
src="stderr",
cbs=[
lambda data: self._redirect_cb("stderr", data), # type: ignore
self._output_writer.write, # type: ignore
],
)
elif console == self._settings.Console.OFF:
return
else:
raise ValueError("unhandled console")
try:
out_redir.install()
err_redir.install()
self._out_redir = out_redir
self._err_redir = err_redir
logger.info("Redirects installed.")
except Exception as e:
print(e)
logger.error("Failed to redirect.", exc_info=e)
return
def _restore(self) -> None:
logger.info("restore")
# TODO(jhr): drain and shutdown all threads
if self._use_redirect:
if self._out_redir:
self._out_redir.uninstall()
if self._err_redir:
self._err_redir.uninstall()
return
if self.stdout_redirector:
self.stdout_redirector.restore()
if self.stderr_redirector:
self.stderr_redirector.restore()
if self._save_stdout:
sys.stdout = self._save_stdout
if self._save_stderr:
sys.stderr = self._save_stderr
logger.info("restore done")
def _atexit_cleanup(self, exit_code: int = None) -> None:
if self._backend is None:
logger.warning("process exited without backend configured")
return
if self._atexit_cleanup_called:
return
self._atexit_cleanup_called = True
exit_code = exit_code or self._hooks.exit_code if self._hooks else 0
logger.info("got exitcode: %d", exit_code)
if exit_code == 0:
# Cleanup our resume file on a clean exit
if os.path.exists(self._settings.resume_fname):
os.remove(self._settings.resume_fname)
self._exit_code = exit_code
try:
self._on_finish()
except KeyboardInterrupt as ki:
if wandb.wandb_agent._is_running():
raise ki
wandb.termerror("Control-C detected -- Run data was not synced")
if ipython._get_python_type() == "python":
os._exit(-1)
except Exception as e:
self._console_stop()
self._backend.cleanup()
logger.error("Problem finishing run", exc_info=e)
wandb.termerror("Problem finishing run")
traceback.print_exception(*sys.exc_info())
if ipython._get_python_type() == "python":
os._exit(-1)
else:
# if silent, skip this as it is used to output stuff
if self._settings._silent:
return
self._on_final()
def _console_start(self) -> None:
logger.info("atexit reg")
self._hooks = ExitHooks()
self._hooks.hook()
atexit.register(lambda: self._atexit_cleanup())
if self._use_redirect:
# setup fake callback
self._redirect_cb = self._console_callback
output_log_path = os.path.join(self.dir, filenames.OUTPUT_FNAME)
self._output_writer = filesystem.CRDedupedFile(open(output_log_path, "wb"))
self._redirect(self._stdout_slave_fd, self._stderr_slave_fd)
def _console_stop(self) -> None:
self._restore()
if self._output_writer:
self._output_writer.close()
self._output_writer = None
def _on_init(self) -> None:
self._show_version_info()
def _on_start(self) -> None:
# TODO: make offline mode in jupyter use HTML
if self._settings._offline:
wandb.termlog(
(
"W&B syncing is set to `offline` in this directory. "
"Run `wandb online` or set WANDB_MODE=online to enable cloud syncing."
)
)
if self._settings.save_code and self._settings.code_dir is not None:
self.log_code(self._settings.code_dir)
if self._run_obj and not self._settings._silent:
self._display_run()
if self._backend and not self._settings._offline:
self._run_status_checker = RunStatusChecker(self._backend.interface)
self._console_start()
def _pusher_print_status(
self,
progress: FilePusherStats,
prefix: bool = True,
done: Optional[bool] = False,
) -> None:
if self._settings._offline:
return
line = " %.2fMB of %.2fMB uploaded (%.2fMB deduped)\r" % (
progress.uploaded_bytes / 1048576.0,
progress.total_bytes / 1048576.0,
progress.deduped_bytes / 1048576.0,
)
if self._jupyter_progress:
percent_done: float
if progress.total_bytes == 0:
percent_done = 1
else:
percent_done = progress.uploaded_bytes / progress.total_bytes
self._jupyter_progress.update(percent_done, line)
if done:
self._jupyter_progress.close()
elif not self._settings._jupyter:
spinner_states = ["-", "\\", "|", "/"]
line = spinner_states[self._progress_step % 4] + line
self._progress_step += 1
wandb.termlog(line, newline=False, prefix=prefix)
if done:
dedupe_fraction = (
progress.deduped_bytes / float(progress.total_bytes)
if progress.total_bytes > 0
else 0
)
if dedupe_fraction > 0.01:
wandb.termlog(
"W&B sync reduced upload amount by %.1f%% "
% (dedupe_fraction * 100),
prefix=prefix,
)
# clear progress line.
wandb.termlog(" " * 79, prefix=prefix)
def _on_finish_progress(self, progress: FilePusherStats, done: bool = None) -> None:
self._pusher_print_status(progress, done=done)
def _wait_for_finish(self) -> PollExitResponse:
while True:
if self._backend:
poll_exit_resp = self._backend.interface.communicate_poll_exit()
logger.info("got exit ret: %s", poll_exit_resp)
if poll_exit_resp:
done = poll_exit_resp.done
pusher_stats = poll_exit_resp.pusher_stats
if pusher_stats:
self._on_finish_progress(pusher_stats, done)
if done:
return poll_exit_resp
time.sleep(0.1)
def _on_finish(self) -> None:
trigger.call("on_finished")
# populate final import telemetry
with telemetry.context(run=self) as tel:
self._telemetry_imports(tel.imports_finish)
if self._run_status_checker:
self._run_status_checker.stop()
# make sure all uncommitted history is flushed
self.history._flush()
self._console_stop() # TODO: there's a race here with jupyter console logging
if not self._settings._silent:
if self._backend:
pid = self._backend._internal_pid
status_str = "Waiting for W&B process to finish, PID {}".format(pid)
if not self._exit_code:
status_str += "\nProgram ended successfully."
else:
status_str += "\nProgram failed with code {}. ".format(self._exit_code)
if not self._settings._offline:
status_str += " Press ctrl-c to abort syncing."
if self._settings._jupyter and ipython._get_python_type() == "jupyter":
ipython.display_html("<br/>" + status_str.replace("\n", "<br/>"))
else:
print("")
wandb.termlog(status_str)
# telemetry could have changed, publish final data
if self._backend:
self._backend.interface.publish_telemetry(self._telemetry_obj)
# TODO: we need to handle catastrophic failure better
# some tests were timing out on sending exit for reasons not clear to me
if self._backend:
self._backend.interface.publish_exit(self._exit_code)
# Wait for data to be synced
self._poll_exit_response = self._wait_for_finish()
if self._backend:
ret = self._backend.interface.communicate_summary()
self._final_summary = proto_util.dict_from_proto_list(ret.item)
if self._backend:
ret = self._backend.interface.communicate_sampled_history()
d = {item.key: item.values_float or item.values_int for item in ret.item}
self._sampled_history = d
if self._backend:
self._backend.cleanup()
if self._run_status_checker:
self._run_status_checker.join()
def _on_final(self) -> None:
# check for warnings and errors, show log file locations
if self._reporter:
# TODO: handle warnings and errors nicely in jupyter
warning_lines = self._reporter.warning_lines
if warning_lines:
wandb.termlog("Warnings:")
for line in warning_lines:
wandb.termlog(line)
if len(warning_lines) < self._reporter.warning_count:
wandb.termlog("More warnings")
error_lines = self._reporter.error_lines
if error_lines:
wandb.termlog("Errors:")
for line in error_lines:
wandb.termlog(line)
if len(error_lines) < self._reporter.error_count:
wandb.termlog("More errors")
if self._settings.log_user:
log_user = self._settings.log_user
if self._settings._jupyter:
log_user = "<code>{}</code>".format(log_user)
log_str = "Find user logs for this run at: {}".format(log_user)
if self._settings._jupyter and ipython._get_python_type() == "jupyter":
ipython.display_html(log_str)
else:
wandb.termlog(log_str)
if self._settings.log_internal:
log_internal = self._settings.log_internal
if self._settings._jupyter:
log_internal = "<code>{}</code>".format(log_internal)
log_str = "Find internal logs for this run at: {}".format(log_internal)
if self._settings._jupyter and ipython._get_python_type() == "jupyter":
ipython.display_html(log_str)
else:
wandb.termlog(log_str)
self._show_summary()
self._show_history()
self._show_files()
if self._run_obj:
run_url = self._get_run_url()
run_name = self._get_run_name()
if self._settings._jupyter and ipython._get_python_type() == "jupyter":
ipython.display_html(
"""
<br/>Synced <strong style="color:{}">{}</strong>: <a href="{}" target="_blank">{}</a><br/>
""".format(
RUN_NAME_COLOR, run_name, run_url, run_url
)
)
else:
wandb.termlog(
"\nSynced {}: {}".format(
click.style(run_name, fg="yellow"),
click.style(run_url, fg="blue"),
)
)
if self._settings._offline:
# TODO: handle jupyter offline messages
wandb.termlog("You can sync this run to the cloud by running:")
wandb.termlog(
click.style(
"wandb sync {}".format(self._settings._sync_dir), fg="yellow"
)
)
self._show_version_info(footer=True)
def _show_version_info(self, footer: bool = None) -> None:
package_problem = False
if self._deleted_version_message:
wandb.termerror(self._deleted_version_message)
package_problem = True
elif self._yanked_version_message:
wandb.termwarn(self._yanked_version_message)
package_problem = True
# only display upgrade message if packages are bad or in header
if not footer or package_problem:
if self._upgraded_version_message:
wandb.termlog(self._upgraded_version_message)
def _show_summary(self) -> None:
if self._final_summary:
logger.info("rendering summary")
max_len = max([len(k) for k in self._final_summary.keys()])
format_str = " {:>%s} {}" % max_len
summary_rows = []
for k, v in iteritems(self._final_summary):
# arrays etc. might be too large. for now we just don't print them
if isinstance(v, string_types):
if len(v) >= 20:
v = v[:20] + "..."
summary_rows.append((k, v))
elif isinstance(v, numbers.Number):
if isinstance(v, float):
v = round(v, 5)
summary_rows.append((k, v))
if self._settings._jupyter and ipython._get_python_type() == "jupyter":
summary_table = ipython.STYLED_TABLE_HTML
for row in summary_rows:
summary_table += "<tr><td>{}</td><td>{}</td></tr>".format(*row)
summary_table += "</table>"
ipython.display_html("<h3>Run summary:</h3><br/>" + summary_table)
else:
summary_lines = "\n".join(
[format_str.format(k, v) for k, v in summary_rows]
)
wandb.termlog("Run summary:")
wandb.termlog(summary_lines)
def _show_history(self) -> None:
if not self._sampled_history:
return
# Only print sparklines if the terminal is utf-8
# In some python 2.7 tests sys.stdout is a 'cStringIO.StringO' object
# which doesn't have the attribute 'encoding'
encoding = getattr(sys.stdout, "encoding", None)
if not encoding or encoding.upper() not in ("UTF_8", "UTF-8",):
return
logger.info("rendering history")
max_len = max([len(k) for k in self._sampled_history])
history_rows = []
for key in self._sampled_history:
vals = wandb.util.downsample(self._sampled_history[key], 40)
if any((not isinstance(v, numbers.Number) for v in vals)):
continue
line = sparkline.sparkify(vals)
history_rows.append((key, line))
if self._settings._jupyter and ipython._get_python_type() == "jupyter":
history_table = ipython.STYLED_TABLE_HTML
for row in history_rows:
history_table += "<tr><td>{}</td><td>{}</td></tr>".format(*row)
history_table += "</table>"
ipython.display_html("<h3>Run history:</h3><br/>" + history_table + "<br/>")
else:
wandb.termlog("Run history:")
history_lines = ""
format_str = " {:>%s} {}\n" % max_len
for row in history_rows:
history_lines += format_str.format(*row)
wandb.termlog(history_lines)
def _show_files(self) -> None:
if not self._poll_exit_response or not self._poll_exit_response.file_counts:
return
if self._settings._offline:
return
logger.info("logging synced files")
if self._settings._silent:
return
file_str = "Synced {} W&B file(s), {} media file(s), {} artifact file(s) and {} other file(s)".format( # noqa:E501
self._poll_exit_response.file_counts.wandb_count,
self._poll_exit_response.file_counts.media_count,
self._poll_exit_response.file_counts.artifact_count,
self._poll_exit_response.file_counts.other_count,
)
if self._settings._jupyter and ipython._get_python_type() == "jupyter":
ipython.display_html(file_str)
else:
wandb.termlog(file_str)
def _save_job_spec(self) -> None:
envdict = dict(python="python3.6", requirements=[],)
varsdict = {"WANDB_DISABLE_CODE": "True"}
source = dict(
git="git@github.com:wandb/examples.git", branch="master", commit="bbd8d23",
)
execdict = dict(
program="train.py",
directory="keras-cnn-fashion",
envvars=varsdict,
args=[],
)
configdict = (dict(self._config),)
artifactsdict = dict(dataset="v1",)
inputdict = dict(config=configdict, artifacts=artifactsdict,)
job_spec = {
"kind": "WandbJob",
"version": "v0",
"environment": envdict,
"source": source,
"exec": execdict,
"input": inputdict,
}
s = json.dumps(job_spec, indent=4)
spec_filename = filenames.JOBSPEC_FNAME
with open(spec_filename, "w") as f:
print(s, file=f)
self.save(spec_filename)
def define_metric(
self,
name: str,
step_metric: Union[str, wandb_metric.Metric, None] = None,
step_sync: bool = None,
hidden: bool = None,
summary: str = None,
goal: str = None,
overwrite: bool = None,
**kwargs: Any
) -> wandb_metric.Metric:
"""Define metric properties which will later be logged with `wandb.log()`.
Arguments:
name: Name of the metric.
step_metric: Independent variable associated with the metric.
step_sync: Automatically add `step_metric` to history if needed.
Defaults to True if step_metric is specified.
hidden: Hide this metric from automatic plots.
summary: Specify aggregate metrics added to summary.
Supported aggregations: "min,max,mean,best,last,none"
Default aggregation is `copy`
Aggregation `best` defaults to `goal`==`minimize`
goal: Specify direction for optimizing the metric.
Supported direections: "minimize,maximize"
Returns:
A metric object is returned that can be further specified.
"""
if not name:
raise wandb.Error("define_metric() requires non-empty name argument")
for k in kwargs:
wandb.termwarn("Unhandled define_metric() arg: {}".format(k))
if isinstance(step_metric, wandb_metric.Metric):
step_metric = step_metric.name
for arg_name, arg_val, exp_type in (
("name", name, string_types),
("step_metric", step_metric, string_types),
("step_sync", step_sync, bool),
("hidden", hidden, bool),
("summary", summary, string_types),
("goal", goal, string_types),
("overwrite", overwrite, bool),
):
# NOTE: type checking is broken for isinstance and string_types
if arg_val is not None and not isinstance(arg_val, exp_type): # type: ignore
arg_type = type(arg_val).__name__
raise wandb.Error(
"Unhandled define_metric() arg: {} type: {}".format(
arg_name, arg_type
)
)
stripped = name[:-1] if name.endswith("*") else name
if "*" in stripped:
raise wandb.Error(
"Unhandled define_metric() arg: name (glob suffixes only): {}".format(
name
)
)
summary_ops: Optional[Sequence[str]] = None
if summary:
summary_items = [s.lower() for s in summary.split(",")]
summary_ops = []
valid = {"min", "max", "mean", "best", "last", "copy", "none"}
for i in summary_items:
if i not in valid:
raise wandb.Error(
"Unhandled define_metric() arg: summary op: {}".format(i)
)
summary_ops.append(i)
goal_cleaned: Optional[str] = None
if goal is not None:
goal_cleaned = goal[:3].lower()
valid_goal = {"min", "max"}
if goal_cleaned not in valid_goal:
raise wandb.Error(
"Unhandled define_metric() arg: goal: {}".format(goal)
)
m = wandb_metric.Metric(
name=name,
step_metric=step_metric,
step_sync=step_sync,
summary=summary_ops,
hidden=hidden,
goal=goal_cleaned,
overwrite=overwrite,
)
m._set_callback(self._metric_callback)
m._commit()
with telemetry.context(run=self) as tel:
tel.feature.metric = True
return m
# TODO(jhr): annotate this
def watch(self, models, criterion=None, log="gradients", log_freq=100, idx=None) -> None: # type: ignore
wandb.watch(models, criterion, log, log_freq, idx)
# TODO(jhr): annotate this
def use_artifact(self, artifact_or_name, type=None, aliases=None): # type: ignore
""" Declare an artifact as an input to a run, call `download` or `file` on
the returned object to get the contents locally.
Arguments:
artifact_or_name: (str or Artifact) An artifact name.
May be prefixed with entity/project. Valid names
can be in the following forms:
- name:version
- name:alias
- digest
You can also pass an Artifact object created by calling `wandb.Artifact`
type: (str, optional) The type of artifact to use.
aliases: (list, optional) Aliases to apply to this artifact
Returns:
An `Artifact` object.
"""
r = self._run_obj
api = internal.Api(default_settings={"entity": r.entity, "project": r.project})
api.set_current_run_id(self.id)
if isinstance(artifact_or_name, str):
name = artifact_or_name
public_api = self._public_api()
artifact = public_api.artifact(type=type, name=name)
if type is not None and type != artifact.type:
raise ValueError(
"Supplied type {} does not match type {} of artifact {}".format(
type, artifact.type, artifact.name
)
)
api.use_artifact(artifact.id)
return artifact
else:
artifact = artifact_or_name
if aliases is None:
aliases = []
elif isinstance(aliases, str):
aliases = [aliases]
if isinstance(artifact_or_name, wandb.Artifact):
self._log_artifact(
artifact, aliases, is_user_created=True, use_after_commit=True
)
return artifact
elif isinstance(artifact, public.Artifact):
api.use_artifact(artifact.id)
return artifact
else:
raise ValueError(
'You must pass an artifact name (e.g. "pedestrian-dataset:v1"), an instance of wandb.Artifact, or wandb.Api().artifact() to use_artifact' # noqa: E501
)
def log_artifact(
self,
artifact_or_path: Union[wandb_artifacts.Artifact, str],
name: Optional[str] = None,
type: Optional[str] = None,
aliases: Optional[List[str]] = None,
) -> wandb_artifacts.Artifact:
""" Declare an artifact as output of a run.
Arguments:
artifact_or_path: (str or Artifact) A path to the contents of this artifact,
can be in the following forms:
- `/local/directory`
- `/local/directory/file.txt`
- `s3://bucket/path`
You can also pass an Artifact object created by calling
`wandb.Artifact`.
name: (str, optional) An artifact name. May be prefixed with entity/project.
Valid names can be in the following forms:
- name:version
- name:alias
- digest
This will default to the basename of the path prepended with the current
run id if not specified.
type: (str) The type of artifact to log, examples include `dataset`, `model`
aliases: (list, optional) Aliases to apply to this artifact,
defaults to `["latest"]`
Returns:
An `Artifact` object.
"""
return self._log_artifact(artifact_or_path, name, type, aliases)
def upsert_artifact(
self,
artifact_or_path: Union[wandb_artifacts.Artifact, str],
name: Optional[str] = None,
type: Optional[str] = None,
aliases: Optional[List[str]] = None,
distributed_id: Optional[str] = None,
) -> wandb_artifacts.Artifact:
""" Declare (or append tp) a non-finalized artifact as output of a run. Note that you must call
run.finish_artifact() to finalize the artifact. This is useful when distributed jobs
need to all contribute to the same artifact.
Arguments:
artifact_or_path: (str or Artifact) A path to the contents of this artifact,
can be in the following forms:
- `/local/directory`
- `/local/directory/file.txt`
- `s3://bucket/path`
You can also pass an Artifact object created by calling
`wandb.Artifact`.
name: (str, optional) An artifact name. May be prefixed with entity/project.
Valid names can be in the following forms:
- name:version
- name:alias
- digest
This will default to the basename of the path prepended with the current
run id if not specified.
type: (str) The type of artifact to log, examples include `dataset`, `model`
aliases: (list, optional) Aliases to apply to this artifact,
defaults to `["latest"]`
distributed_id: (string, optional) Unique string that all distributed jobs share. If None,
defaults to the run's group name.
Returns:
An `Artifact` object.
"""
if self.group == "" and distributed_id is None:
raise TypeError(
"Cannot upsert artifact unless run is in a group or distributed_id is provided"
)
if distributed_id is None:
distributed_id = self.group
return self._log_artifact(
artifact_or_path,
name,
type,
aliases,
distributed_id=distributed_id,
finalize=False,
)
def finish_artifact(
self,
artifact_or_path: Union[wandb_artifacts.Artifact, str],
name: Optional[str] = None,
type: Optional[str] = None,
aliases: Optional[List[str]] = None,
distributed_id: Optional[str] = None,
) -> wandb_artifacts.Artifact:
""" Finish a non-finalized artifact as output of a run. Subsequent "upserts" with
the same distributed ID will result in a new version
Arguments:
artifact_or_path: (str or Artifact) A path to the contents of this artifact,
can be in the following forms:
- `/local/directory`
- `/local/directory/file.txt`
- `s3://bucket/path`
You can also pass an Artifact object created by calling
`wandb.Artifact`.
name: (str, optional) An artifact name. May be prefixed with entity/project.
Valid names can be in the following forms:
- name:version
- name:alias
- digest
This will default to the basename of the path prepended with the current
run id if not specified.
type: (str) The type of artifact to log, examples include `dataset`, `model`
aliases: (list, optional) Aliases to apply to this artifact,
defaults to `["latest"]`
distributed_id: (string, optional) Unique string that all distributed jobs share. If None,
defaults to the run's group name.
Returns:
An `Artifact` object.
"""
if self.group == "" and distributed_id is None:
raise TypeError(
"Cannot finish artifact unless run is in a group or distributed_id is provided"
)
if distributed_id is None:
distributed_id = self.group
return self._log_artifact(
artifact_or_path,
name,
type,
aliases,
distributed_id=distributed_id,
finalize=True,
)
def _log_artifact(
self,
artifact_or_path: Union[wandb_artifacts.Artifact, str],
name: Optional[str] = None,
type: Optional[str] = None,
aliases: Optional[List[str]] = None,
distributed_id: Optional[str] = None,
finalize: bool = True,
is_user_created: bool = False,
use_after_commit: bool = False,
) -> wandb_artifacts.Artifact:
if not finalize and distributed_id is None:
raise TypeError("Must provide distributed_id if artifact is not finalize")
artifact, aliases = self._prepare_artifact(
artifact_or_path, name, type, aliases
)
artifact.distributed_id = distributed_id
self._assert_can_log_artifact(artifact)
if self._backend:
if not self._settings._offline:
future = self._backend.interface.communicate_artifact(
self,
artifact,
aliases,
finalize=finalize,
is_user_created=is_user_created,
use_after_commit=use_after_commit,
)
artifact._logged_artifact = _LazyArtifact(self._public_api(), future)
else:
self._backend.interface.publish_artifact(
self,
artifact,
aliases,
finalize=finalize,
is_user_created=is_user_created,
use_after_commit=use_after_commit,
)
return artifact
def _public_api(self) -> PublicApi:
overrides = {"run": self.id}
run_obj = self._run_obj
if run_obj is not None:
overrides["entity"] = run_obj.entity
overrides["project"] = run_obj.project
return public.Api(overrides)
# TODO(jhr): annotate this
def _assert_can_log_artifact(self, artifact) -> None: # type: ignore
if not self._settings._offline:
public_api = self._public_api()
expected_type = public.Artifact.expected_type(
public_api.client,
artifact.name,
public_api.settings["entity"],
public_api.settings["project"],
)
if expected_type is not None and artifact.type != expected_type:
raise ValueError(
"Expected artifact type {}, got {}".format(
expected_type, artifact.type
)
)
def _prepare_artifact(
self,
artifact_or_path: Union[wandb_artifacts.Artifact, str],
name: Optional[str] = None,
type: Optional[str] = None,
aliases: Optional[List[str]] = None,
) -> Tuple[wandb_artifacts.Artifact, List[str]]:
aliases = aliases or ["latest"]
if isinstance(artifact_or_path, str):
if name is None:
name = "run-%s-%s" % (self.id, os.path.basename(artifact_or_path))
artifact = wandb.Artifact(name, type)
if os.path.isfile(artifact_or_path):
artifact.add_file(artifact_or_path)
elif os.path.isdir(artifact_or_path):
artifact.add_dir(artifact_or_path)
elif "://" in artifact_or_path:
artifact.add_reference(artifact_or_path)
else:
raise ValueError(
"path must be a file, directory or external"
"reference like s3://bucket/path"
)
else:
artifact = artifact_or_path
if not isinstance(artifact, wandb.Artifact):
raise ValueError(
"You must pass an instance of wandb.Artifact or a "
"valid file path to log_artifact"
)
if isinstance(aliases, str):
aliases = [aliases]
artifact.finalize()
return artifact, aliases
def alert(
self,
title: str,
text: str,
level: Union[str, None] = None,
wait_duration: Union[int, float, timedelta, None] = None,
) -> None:
"""Launch an alert with the given title and text.
Arguments:
title: (str) The title of the alert, must be less than 64 characters long.
text: (str) The text body of the alert.
level: (str or wandb.AlertLevel, optional) The alert level to use, either: `INFO`, `WARN`, or `ERROR`.
wait_duration: (int, float, or timedelta, optional) The time to wait (in seconds) before sending another
alert with this title.
"""
level = level or wandb.AlertLevel.INFO
if isinstance(level, wandb.AlertLevel):
level = level.value
if level not in (
wandb.AlertLevel.INFO.value,
wandb.AlertLevel.WARN.value,
wandb.AlertLevel.ERROR.value,
):
raise ValueError("level must be one of 'INFO', 'WARN', or 'ERROR'")
wait_duration = wait_duration or timedelta(minutes=1)
if isinstance(wait_duration, int) or isinstance(wait_duration, float):
wait_duration = timedelta(seconds=wait_duration)
elif not callable(getattr(wait_duration, "total_seconds", None)):
raise ValueError(
"wait_duration must be an int, float, or datetime.timedelta"
)
wait_duration = int(wait_duration.total_seconds() * 1000)
if self._backend:
self._backend.interface.publish_alert(title, text, level, wait_duration)
def __enter__(self) -> "Run":
return self
def __exit__(
self,
exc_type: Type[BaseException],
exc_val: BaseException,
exc_tb: TracebackType,
) -> bool:
exit_code = 0 if exc_type is None else 1
self.finish(exit_code)
return exc_type is None
def mark_preempting(self) -> None:
"""Mark this run as preempting and tell the internal process
to immediately report this to the server."""
if self._backend:
self._backend.interface.publish_preempting()
# We define this outside of the run context to support restoring before init
def restore(
name: str,
run_path: Optional[str] = None,
replace: bool = False,
root: Optional[str] = None,
) -> Union[None, TextIO]:
""" Downloads the specified file from cloud storage into the current directory
or run directory. By default this will only download the file if it doesn't
already exist.
Arguments:
name: the name of the file
run_path: optional path to a run to pull files from, i.e. `username/project_name/run_id`
if wandb.init has not been called, this is required.
replace: whether to download the file even if it already exists locally
root: the directory to download the file to. Defaults to the current
directory or the run directory if wandb.init was called.
Returns:
None if it can't find the file, otherwise a file object open for reading
Raises:
wandb.CommError: if we can't connect to the wandb backend
ValueError: if the file is not found or can't find run_path
"""
is_disabled = wandb.run is not None and wandb.run.disabled
run = None if is_disabled else wandb.run
if run_path is None:
if run is not None:
run_path = run.path
else:
raise ValueError(
"run_path required when calling wandb.restore before wandb.init"
)
if root is None:
if run is not None:
root = run.dir
api = public.Api()
api_run = api.run(run_path)
if root is None:
root = os.getcwd()
path = os.path.join(root, name)
if os.path.exists(path) and replace is False:
return open(path, "r")
if is_disabled:
return None
files = api_run.files([name])
if len(files) == 0:
return None
# if the file does not exist, the file has an md5 of 0
if files[0].md5 == "0":
raise ValueError("File {} not found in {}.".format(name, run_path or root))
return files[0].download(root=root, replace=True)
# propigate our doc string to the runs restore method
try:
Run.restore.__doc__ = restore.__doc__
# py2 doesn't let us set a doc string, just pass
except AttributeError:
pass
def finish(exit_code: int = None) -> None:
"""
Marks a run as finished, and finishes uploading all data.
This is used when creating multiple runs in the same process.
We automatically call this method when your script exits.
"""
if wandb.run:
wandb.run.finish(exit_code=exit_code)
# propagate our doc string to the runs restore method
try:
Run.restore.__doc__ = restore.__doc__
# py2 doesn't let us set a doc string, just pass
except AttributeError:
pass
class _LazyArtifact(ArtifactInterface):
_api: PublicApi
_instance: Optional[ArtifactInterface] = None
_future: Any
def __init__(self, api: PublicApi, future: Any):
self._api = api
self._future = future
def _assert_instance(self) -> ArtifactInterface:
if not self._instance:
raise ValueError(
"Must call wait() before accessing logged artifact properties"
)
return self._instance
def __getattr__(self, item: str) -> Any:
self._assert_instance()
return getattr(self._instance, item)
def wait(self) -> ArtifactInterface:
if not self._instance:
resp = self._future.get().response.log_artifact_response
if resp.error_message:
raise ValueError(resp.error_message)
self._instance = public.Artifact.from_id(resp.artifact_id, self._api.client)
assert isinstance(
self._instance, ArtifactInterface
), "Insufficient permissions to fetch Artifact with id {} from {}".format(
resp.artifact_id, self._api.client.app_url()
)
return self._instance
@property
def id(self) -> Optional[str]:
return self._assert_instance().id
@property
def version(self) -> str:
return self._assert_instance().version
@property
def name(self) -> str:
return self._assert_instance().name
@property
def type(self) -> str:
return self._assert_instance().type
@property
def entity(self) -> str:
return self._assert_instance().entity
@property
def project(self) -> str:
return self._assert_instance().project
@property
def manifest(self) -> "ArtifactManifest":
return self._assert_instance().manifest
@property
def digest(self) -> str:
return self._assert_instance().digest
@property
def state(self) -> str:
return self._assert_instance().state
@property
def size(self) -> int:
return self._assert_instance().size
@property
def commit_hash(self) -> str:
return self._assert_instance().commit_hash
@property
def description(self) -> Optional[str]:
return self._assert_instance().description
@description.setter
def description(self, desc: Optional[str]) -> None:
self._assert_instance().description = desc
@property
def metadata(self) -> dict:
return self._assert_instance().metadata
@metadata.setter
def metadata(self, metadata: dict) -> None:
self._assert_instance().metadata = metadata
@property
def aliases(self) -> List[str]:
return self._assert_instance().aliases
@aliases.setter
def aliases(self, aliases: List[str]) -> None:
self._assert_instance().aliases = aliases
def used_by(self) -> List["wandb.apis.public.Run"]:
return self._assert_instance().used_by()
def logged_by(self) -> "wandb.apis.public.Run":
return self._assert_instance().logged_by()
# Commenting this block out since this code is unreachable since LocalArtifact
# overrides them and therefore untestable.
# Leaving behind as we may want to support these in the future.
# def new_file(self, name: str, mode: str = "w") -> Any: # TODO: Refine Type
# return self._assert_instance().new_file(name, mode)
# def add_file(
# self,
# local_path: str,
# name: Optional[str] = None,
# is_tmp: Optional[bool] = False,
# ) -> Any: # TODO: Refine Type
# return self._assert_instance().add_file(local_path, name, is_tmp)
# def add_dir(self, local_path: str, name: Optional[str] = None) -> None:
# return self._assert_instance().add_dir(local_path, name)
# def add_reference(
# self,
# uri: Union["ArtifactEntry", str],
# name: Optional[str] = None,
# checksum: bool = True,
# max_objects: Optional[int] = None,
# ) -> Any: # TODO: Refine Type
# return self._assert_instance().add_reference(uri, name, checksum, max_objects)
# def add(self, obj: "WBValue", name: str) -> Any: # TODO: Refine Type
# return self._assert_instance().add(obj, name)
def get_path(self, name: str) -> "ArtifactEntry":
return self._assert_instance().get_path(name)
def get(self, name: str) -> "WBValue":
return self._assert_instance().get(name)
def download(self, root: Optional[str] = None, recursive: bool = False) -> str:
return self._assert_instance().download(root, recursive)
def checkout(self, root: Optional[str] = None) -> str:
return self._assert_instance().checkout(root)
def verify(self, root: Optional[str] = None) -> Any:
return self._assert_instance().verify(root)
def save(self) -> None:
return self._assert_instance().save()
def delete(self) -> None:
return self._assert_instance().delete()
| 36.839396
| 171
| 0.576292
|
787b7bf7b2df3425e5a4ed89f51185f9b1cf2f55
| 1,819
|
py
|
Python
|
Server/app/models/account.py
|
DSM-Grape/Grape-Backend
|
3a7e3f2f15e3872ccbf6ef59775b50001c198a9d
|
[
"Apache-2.0"
] | 3
|
2018-04-11T13:24:35.000Z
|
2018-09-18T13:32:02.000Z
|
Server/app/models/account.py
|
DSM-Grape/Grape-Backend
|
3a7e3f2f15e3872ccbf6ef59775b50001c198a9d
|
[
"Apache-2.0"
] | null | null | null |
Server/app/models/account.py
|
DSM-Grape/Grape-Backend
|
3a7e3f2f15e3872ccbf6ef59775b50001c198a9d
|
[
"Apache-2.0"
] | null | null | null |
from uuid import uuid4
from flask_jwt_extended import create_access_token, create_refresh_token
from mongoengine import *
class AccountModel(Document):
meta = {
'collection': 'account'
}
id = StringField(
primary_key=True
)
# 서비스 자체 계정인 경우 이메일
# 연동 계정인 경우 해당 서비스에서 주는 ID
email_certified = BooleanField(
default=False
)
# 이메일 인증 여부
pw = StringField()
# required=False
plan = IntField(
default=1
)
# 1: Free
# 2: Business
# 3: First
nickname = StringField()
# required=False
class TokenModel(Document):
meta = {
'abstract': True,
'allow_inheritance': True
}
class Key(EmbeddedDocument):
owner = ReferenceField(
document_type=AccountModel,
required=True
)
user_agent = StringField(
required=True
)
key = EmbeddedDocumentField(
document_type=Key,
primary_key=True
)
# 여러 필드를 합쳐 PK로 두기 위함
identity = UUIDField(
unique=True,
default=uuid4
)
@classmethod
def _create_token(cls, account, user_agent):
return cls(
key=cls.Key(owner=account, user_agent=user_agent)
).save().identity
@classmethod
def create_access_token(cls, account, user_agent):
return create_access_token(
str(cls._create_token(account, user_agent))
)
@classmethod
def create_refresh_token(cls, account, user_agent):
return create_refresh_token(
str(cls._create_token(account, user_agent))
)
class AccessTokenModel(TokenModel):
meta = {
'collection': 'access_token'
}
class RefreshTokenModel(TokenModel):
meta = {
'collection': 'refresh_token'
}
| 19.351064
| 72
| 0.60033
|
2a723eb84da3769f728ff6563402b326ca4b1ec7
| 10,709
|
py
|
Python
|
qd_ssd.py
|
amsword/ssd.pytorch
|
3740d7e51c519f95bb3b14da2544c4d7317cfea7
|
[
"MIT"
] | null | null | null |
qd_ssd.py
|
amsword/ssd.pytorch
|
3740d7e51c519f95bb3b14da2544c4d7317cfea7
|
[
"MIT"
] | null | null | null |
qd_ssd.py
|
amsword/ssd.pytorch
|
3740d7e51c519f95bb3b14da2544c4d7317cfea7
|
[
"MIT"
] | null | null | null |
#from data import *
from data_util.voc0712 import TSVDetection
from utils.augmentations import SSDAugmentation
from layers.modules import MultiBoxLoss
from ssd import build_ssd
import os
import sys
import time
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torch.nn.init as init
import torch.utils.data as data
import numpy as np
import argparse
import logging
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from data_util import VOCDetection, BaseTransform
import torch.utils.data as data
import logging
from pprint import pformat
from ssd import build_ssd
import sys
import os
import time
import argparse
import numpy as np
import pickle
import cv2
import os.path as op
from tsv_io import TSVDataset, tsv_reader, tsv_writer
from qd_pytorch import torch_save
from tqdm import tqdm
def adjust_learning_rate(optimizer, base_lr, gamma, step):
"""Sets the learning rate to the initial LR decayed by 10 at every
specified step
# Adapted from PyTorch Imagenet example:
# https://github.com/pytorch/examples/blob/master/imagenet/main.py
"""
lr = base_lr * (gamma ** (step))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def xavier(param):
init.xavier_uniform(param)
def weights_init(m):
if isinstance(m, nn.Conv2d):
xavier(m.weight.data)
m.bias.data.zero_()
def test_net(pred_file, net, cuda, dataset, labelmap, thresh=0.05):
num_images = len(dataset)
def gen_rows():
for i in tqdm(range(num_images)):
key, im, gt, h, w = dataset.pull_item(i)
x = Variable(im.unsqueeze(0))
if cuda:
x = x.cuda()
detections = net(x).data
rects = []
# skip j = 0, because it's the background class
for j in range(1, detections.size(1)):
dets = detections[0, j, :]
mask = dets[:, 0].gt(0.).expand(5, dets.size(0)).t()
dets = torch.masked_select(dets, mask).view(-1, 5)
if dets.dim() == 0:
continue
boxes = dets[:, 1:]
boxes[:, 0] *= w
boxes[:, 2] *= w
boxes[:, 1] *= h
boxes[:, 3] *= h
scores = dets[:, 0].cpu().numpy()
cls_dets = np.hstack((boxes.cpu().numpy(),
scores[:, np.newaxis])).astype(np.float32,
copy=False)
for r in cls_dets:
if r[-1] < thresh:
continue
rect = {}
rect['rect'] = list(map(float, r[:4]))
rect['conf'] = float(r[-1])
rect['class'] = labelmap[j - 1]
rects.append(rect)
from qd_common import json_dump
yield key, json_dump(rects)
tsv_writer(gen_rows(), pred_file)
def ssd_pipeline(**kwargs):
kwargs['data'] = 'voc20'
kwargs['data'] = 'brand1048'
kwargs['net'] = 'vgg'
kwargs['expid'] = 'test'
kwargs['force_evaluate'] = True
kwargs['ovthresh'] = [0.5]
kwargs['cuda'] = True
kwargs['gamma'] = 0.1
kwargs['weight_decay'] = 5e-4
kwargs['momentum'] = 0.9
kwargs['save_folder'] = 'weights/'
kwargs['lr'] = 1e-3
kwargs['start_iter'] = 0
kwargs['resume'] = True
kwargs['batch_size'] = 32
kwargs['num_workers'] = 32
kwargs['max_iter'] = 120000
kwargs['confidence_threshold'] = 0.01
kwargs['force_train'] = True
kwargs['cuda'] = True
t = SSDTrain(**kwargs)
t.ensure_train()
pred_file = t.ensure_predict()
t.ensure_evaluate(pred_file)
def detection_collate(batch):
"""Custom collate fn for dealing with batches of images that have a different
number of associated object annotations (bounding boxes).
Arguments:
batch: (tuple) A tuple of tensor images and lists of annotations
Return:
A tuple containing:
1) (tensor) batch of images stacked on their 0 dim
2) (list of tensors) annotations for a given image are stacked on
0 dim
"""
targets = []
imgs = []
for sample in batch:
imgs.append(sample[0])
targets.append(torch.FloatTensor(sample[1]))
return torch.stack(imgs, 0), targets
from qd_pytorch import TorchTrain
class SSDTrain(TorchTrain):
def __init__(self, **kwargs):
super(SSDTrain, self).__init__(**kwargs)
def train(self):
torch.set_default_tensor_type('torch.cuda.FloatTensor')
kwargs = self.kwargs
from process_tsv import TSVDataset
tsv_dataset = TSVDataset(self.data)
tsv_file = tsv_dataset.get_data('train')
labelmap = tsv_dataset.get_labelmap_file()
cfg = kwargs
from qd_common import load_list_file
voc = {
'num_classes': len(load_list_file(labelmap)) + 1,
'lr_steps': (80000, 100000, 120000),
'max_iter': 120000,
'feature_maps': [38, 19, 10, 5, 3, 1],
'min_dim': 300,
'steps': [8, 16, 32, 64, 100, 300],
'min_sizes': [30, 60, 111, 162, 213, 264],
'max_sizes': [60, 111, 162, 213, 264, 315],
'aspect_ratios': [[2], [2, 3], [2, 3], [2, 3], [2], [2]],
'variance': [0.1, 0.2],
'clip': True,
'name': 'VOC',
}
for k in voc:
if k in cfg:
continue
cfg[k] = voc[k]
MEANS = (104, 117, 123)
dataset = TSVDetection(tsv_file=tsv_file,
labelmap=labelmap,
transform=SSDAugmentation(cfg['min_dim'], MEANS))
ssd_net = build_ssd('train', cfg['min_dim'], cfg['num_classes'])
net = ssd_net
if cfg['cuda']:
net = torch.nn.DataParallel(ssd_net)
cudnn.benchmark = True
vgg_weights = torch.load(cfg['save_folder'] + 'vgg16_reducedfc.pth')
print('Loading base network...')
ssd_net.vgg.load_state_dict(vgg_weights)
if cfg['cuda']:
net = net.cuda()
if cfg['resume']:
print('Initializing weights...')
# initialize newly added layers' weights with xavier method
ssd_net.extras.apply(weights_init)
ssd_net.loc.apply(weights_init)
ssd_net.conf.apply(weights_init)
optimizer = optim.SGD(net.parameters(), lr=cfg['lr'],
momentum=cfg['momentum'],
weight_decay=cfg['weight_decay'])
criterion = MultiBoxLoss(cfg['num_classes'], 0.5, True, 0, True, 3, 0.5,
False, cfg['cuda'])
net.train()
# loss counters
loc_loss = 0
conf_loss = 0
epoch = 0
logging.info('Loading the dataset...')
epoch_size = len(dataset) // cfg['batch_size']
step_index = 0
data_loader = data.DataLoader(dataset, cfg['batch_size'],
num_workers=cfg['num_workers'],
shuffle=True, collate_fn=detection_collate,
pin_memory=True)
# create batch iterator
batch_iterator = iter(data_loader)
for iteration in range(cfg['start_iter'], cfg['max_iter']):
if iteration in cfg['lr_steps']:
step_index += 1
adjust_learning_rate(optimizer, cfg['lr'], cfg['gamma'], step_index)
t0 = time.time()
# load train data
try:
images, targets = next(batch_iterator)
except StopIteration:
batch_iterator = iter(data_loader)
images, targets = next(batch_iterator)
if cfg['cuda']:
images = Variable(images.cuda())
targets = [Variable(ann.cuda(), volatile=True) for ann in targets]
else:
images = Variable(images)
targets = [Variable(ann, volatile=True) for ann in targets]
data_loading_time = time.time() - t0
t0 = time.time()
# forward
out = net(images)
# backprop
optimizer.zero_grad()
loss_l, loss_c = criterion(out, targets)
loss = loss_l + loss_c
loss.backward()
optimizer.step()
loc_loss += loss_l.data[0]
conf_loss += loss_c.data[0]
if iteration % 10 == 0:
logging.info('data loading time {}'.format(data_loading_time))
logging.info('timer: %.4f sec.' % (time.time() - t0))
logging.info('iter ' + repr(iteration) + ' || Loss: %.4f ||' % (loss.data[0]))
if iteration != 0 and iteration % 500 == 0:
logging.info('Saving state, iter: {}'.format(iteration))
model_file = op.join(self.output_folder, 'snapshot',
'model_iter_{}.pth.tar'.format(iteration + 1))
torch_save(ssd_net.state_dict(), model_file)
model_file = op.join(self.output_folder, 'snapshot',
'model_iter_{}.pth.tar'.format(iteration + 1))
torch_save(ssd_net.state_dict(), model_file)
def predict(self, model_file, pred_file):
train_dataset = TSVDataset(self.data)
labelmap = train_dataset.load_labelmap()
num_classes = len(labelmap) + 1 # +1 for background
net = build_ssd('test', 300, num_classes) # initialize SSD
net.load_state_dict(torch.load(model_file))
net.eval()
test_dataset = TSVDataset(self.test_data)
dataset_mean = (104, 117, 123)
dataset = TSVDetection(test_dataset.get_data('test'),
train_dataset.get_labelmap_file(),
BaseTransform(300, dataset_mean))
if self.kwargs['cuda']:
net = net.cuda()
cudnn.benchmark = True
test_net(pred_file, net, self.kwargs['cuda'], dataset,
labelmap, thresh=self.kwargs['confidence_threshold'])
return pred_file
if __name__ == '__main__':
from qd_common import init_logging, parse_general_args
init_logging()
kwargs = parse_general_args()
logging.info('param:\n{}'.format(pformat(kwargs)))
function_name = kwargs['type']
del kwargs['type']
locals()[function_name](**kwargs)
| 34.323718
| 94
| 0.56093
|
badaa4f46a66deb3b7be0cceaa6268feb7118b9b
| 3,860
|
py
|
Python
|
electrum_spectrumcash/tests/test_x509.py
|
Spectrumcash/Spectrum-ElectromX-Client
|
1008c9844eb99e70372850f9bb81d6ded3e59396
|
[
"MIT"
] | null | null | null |
electrum_spectrumcash/tests/test_x509.py
|
Spectrumcash/Spectrum-ElectromX-Client
|
1008c9844eb99e70372850f9bb81d6ded3e59396
|
[
"MIT"
] | 2
|
2021-06-01T22:49:23.000Z
|
2021-11-15T17:47:55.000Z
|
electrum_spectrumcash/tests/test_x509.py
|
Spectrumcash/Spectrum-ElectrumX-Client
|
1008c9844eb99e70372850f9bb81d6ded3e59396
|
[
"MIT"
] | null | null | null |
import unittest
from electrum_spectrumcash.x509 import X509
class TestX509(unittest.TestCase):
def test_generalizedtime(self):
full = X509(b'0\x82\x05F0\x82\x03.\x02\t\x00\xfeV\xd6\xb5?\xb1j\xe40\r\x06\t*\x86H\x86\xf7\r\x01\x01\x0b\x05\x000d1\x0b0\t\x06\x03U\x04\x06\x13\x02US1\x130\x11\x06\x03U\x04\x08\x0c\nCalifornia1!0\x1f\x06\x03U\x04\n\x0c\x18Internet Widgits Pty Ltd1\x1d0\x1b\x06\x03U\x04\x03\x0c\x14testnet.qtornado.com0 \x17\r180206010225Z\x18\x0f21180113010225Z0d1\x0b0\t\x06\x03U\x04\x06\x13\x02US1\x130\x11\x06\x03U\x04\x08\x0c\nCalifornia1!0\x1f\x06\x03U\x04\n\x0c\x18Internet Widgits Pty Ltd1\x1d0\x1b\x06\x03U\x04\x03\x0c\x14testnet.qtornado.com0\x82\x02"0\r\x06\t*\x86H\x86\xf7\r\x01\x01\x01\x05\x00\x03\x82\x02\x0f\x000\x82\x02\n\x02\x82\x02\x01\x00\xc2B\xe0\xa8\xd9$M\xbc)Wx\x0cv\x00\xc0\xfa2Ew:\xce\xa7\xcb\xc8\r?\xea\xc5R(\xc7\xc3Y\xe7zq=\xcd\x8d\xe3\x86\x9ecSI\xc7\x84\xf2~\x91\xd4\x19\xc2;\x97\xe81e\xf2\xeb\xf1\xadw\xa3p\x88A*-\r\xb6Yt\x98R\xe8\x8a\xf9\xb5>"F\xac\x19%\xc8~\x1d\xac\x93A\xffk\xce\xdb\xfc9\x05\xa0\xad\xf9V\x0f0\xa2b\xd0@\xe4\xf1\xb1\xe8\xb1\x10[&\xa1\xff\x13\xcfQ\xb7\x805\xef\xe7tL\xe5|\x08W\x8c\xd72\x9d\'\xeb\x92)3N\x01M\x06\xa9\xdc\xe4\'\x13\x90x\xd8\x830\x97\xa8\xcc2d \xfa\x91\x04\xd0\x1b\xe7\xaa t\x87\xba]\xb5w\x05(\xba\x07\xc2X$~?L\xc5\x03\xb2\xdeQ\xf3\xf3\xdab\xd9\x92\xd9\x86^:\x93\xc9\x86~\xd1\x94\xd4\x80\x9c\xff0\xc6m\xf4\xf0\xd6\x18\x96l\x1d\x0c\xe8\x15 \x8c\x89\xcb\xa4*\xd9\xefg\x844\x81\xb3\xce\xa1\x8a|\xf9h\xc3\xe1!\xfeZ`\xb71\x97Kj\x0b"\xd3\x98T\r\xd9\xbb<r\x0c\xd5Q\xd0L\x02\xcb\x19\x19\xd6\xdf$\xcej\xa8l\xbd\x81\x803\x95\x0e\x907&\x81J\x88\xaf\xa23\xb4q\x96\x08\xa9]}\xb8Rs\x89{\x04\x88/\xc1m\x8c\xe8\\X\x95 \x1cj\xf2(t\xd7\xef\x10-r\xb6\x17L\xce_\x1bf\xc0c\x18\x83\x99\xdf\xd5\xad\x88\xcd \xae\x07 \xed\xb6\xfc[\x9a/f\x92\xce^\x9c\xd9\x064\xb4\xcc\x1d,d\x99\xee\x9a4\xbe\xde0\x92\x8f/keq\x94\x9frf1\xda\xadM_\x11C\x19\x01\xf0\xe0I\x84W\xf9\xaa\xd3\x12ex\x89"\xbfQ\x1f\xbdU\xa0\x92\xa3\x9d\xdb?\x86\x82\x0b\x1e\xe0\x8aSq\xce%\xea4\xfb\x82\x92\x0f\xcf\xaa\xe2\r\xedd\xba\xff\x85\xa2+\xb0x9\xba\'\xd3\xf5\xd6\xfa\xb43\x0b\xd4\xf4\xca\xa5\xb1\xe4[\xe7\xf7\xc3\xd3\xdd\x85)\xac5E\x17\xae\x03fCC(\x06\x1cU\xedM\x90r\xe87\x8d}\xf1i\xfdO\x83\x05\x83\x83y\xd9f,\xe1\xba\xf0\\y\x8d\x08`\xb1\x02\x03\x01\x00\x010\r\x06\t*\x86H\x86\xf7\r\x01\x01\x0b\x05\x00\x03\x82\x02\x01\x00,.\x12jC3\x9fdF\x15\x16\xea*1\x0b[\xfa-\xcf\x80\x17\xf0\xfa\xf4\x96C\xff\xf9\xe9\xa2N\xda\xf1&6\x9ecV~\xea[\x07\xc1R\x03\x95\xd4\x84B\xe2r\x92\xad<mp\xf1\xcb\xb3\x8b\xbf \x08\x12\x1e6\xe3\xad\xbd1\x81\xbe\xaex\x002\xb6\xf9\xa0\xf6\xb7E^"\r\xa0w\x08\x14\xe7\x84\x03q2\x9c\xac\xce>\xc6\x0b\x81\x81k\x0e\xd01\x16\x91\xe4A\x8c\x1a\xe9W\xd4=<\xd4m_\xd4m\xa4H\x14\xc0\xae\x12\xab\x808\xf1\xf9_\xbb\xfb\xd0U\x0e\\\xd3.?\xa36\xe1hstU"\x17P\xcb>\x83\x9c\xaa\x9b\xb7\xe5\xb4\xb5W\xdc\xc1\xee\x91K\x12\xc2\xe1U\xaf\xf7I`\x83\x91\x0c\xc0\xcb\x15\x13!V\xa9\xc1\xca\x1b\x80\xff\xd8\x1f\xd8_+\x83\xcd\xcb%\xd6\xb7\xdc\x8a2\xa8Q\x1f\xbb.\xdf\x05\xb7hD\xab\xea\xe9\xfb.\xdd\x93\xd1\xf0\xb8r\xb9t.\xab\xf6]\xac\xc9U9\x87\x9e\xe36 \x87\xe7eo\x98\xac\xf4\x87\x8e\xf4\xa86\xd3\xcapy\xee\xa0]\xdbA\xb9\x00\xe9_R\xc8\xf7\xca\x13\xc6\xb1Z|c\xe8v\xa24\xac?k\xf1\xc4\x97\x18\x07\xbaU\xc9\xf5? \x95\x8f\x11\xa7\xc9\x8eY\x9c\xdfnx?\x88\xba\x90\xef\x94WU\xb5\xcf\x0b"\xe8\xfe\xa6.\x0cr-\xaf3\x8a\xe6v\xf9\xb91\x87\x91\xc6\xb1\xe9\xb9UP\xf5\x14\xb7\x99\x80\xc0\xc5}\x9a~\x7f\x06\x1e\xb8\x05\xd5\xa2LXO\\73i\x82\xcd\xc6#\xb7\xa4q\xd7\xd4y\xb1d\xaf\xa8\t\x9e1K\xd94\xaf7\x08\x8c);\xd2\xed\x91\xc6\xed\x83\x90\r\xef\x85\xf0\xfeJi\x02;\xf0\x0b\x03\xe7\xc1\x84\xd45\xaeP\xc2Lp\x1akb\xcaP\xe9\xfc\xc1\xc8VPQu\x85\x92l\x12\xb99{\x91\xd0\xa6d\n\xde\xf85\x93e\xfa\\\xf9cKx8\x84"s\xb8\xe52~\x97\x05\xc3\xf6\x1c\xca\x0b\xda\x8b\x90\xfeu5,\x94,\x99\xf9\x9a\xf3T\x8dAZ\xc7\xe9\x95-\x98\xf2\xbaL\x89\xc0?\xba1\xb5\\t|RY_\xc6\xabr\xe8')
full.check_date()
| 551.428571
| 3,702
| 0.743782
|
1265f83803dbcd797b3ff5a1da3ac60236a913b1
| 173
|
py
|
Python
|
cla_frontend/apps/core/session_security/urls.py
|
farrepa/cla_frontend
|
a789ad96cf91daf755784e3a5ed11350a85bddf6
|
[
"MIT"
] | 3
|
2018-02-19T09:50:06.000Z
|
2022-01-13T10:17:05.000Z
|
cla_frontend/apps/core/session_security/urls.py
|
farrepa/cla_frontend
|
a789ad96cf91daf755784e3a5ed11350a85bddf6
|
[
"MIT"
] | 171
|
2015-01-02T09:27:37.000Z
|
2022-03-24T08:56:57.000Z
|
cla_frontend/apps/core/session_security/urls.py
|
farrepa/cla_frontend
|
a789ad96cf91daf755784e3a5ed11350a85bddf6
|
[
"MIT"
] | 3
|
2019-03-14T08:31:47.000Z
|
2021-05-04T16:01:42.000Z
|
from django.conf.urls import patterns, url
from .views import JsonPingView
urlpatterns = patterns("", url("ping/$", JsonPingView.as_view(), name="session_security_ping"))
| 28.833333
| 95
| 0.763006
|
47b3756e39741b65cb1f2f776f3bda3d73f3e645
| 629
|
py
|
Python
|
setup.py
|
JackKuo-tw/TGmeetup
|
5422564ab9feb52ac4b46b13425dfc1e16f68ee6
|
[
"MIT"
] | 146
|
2018-01-28T05:34:42.000Z
|
2022-03-28T08:38:42.000Z
|
setup.py
|
JackKuo-tw/TGmeetup
|
5422564ab9feb52ac4b46b13425dfc1e16f68ee6
|
[
"MIT"
] | 76
|
2018-01-28T05:02:40.000Z
|
2020-02-02T05:02:55.000Z
|
setup.py
|
JackKuo-tw/TGmeetup
|
5422564ab9feb52ac4b46b13425dfc1e16f68ee6
|
[
"MIT"
] | 42
|
2018-01-28T05:25:15.000Z
|
2021-08-11T07:14:23.000Z
|
import setuptools
from distutils.core import setup
entry_points = {
'console_scripts': [
'tgmeetup = TGmeetup.tgmeetup:main'
]
}
setup(
name='tgmeetup',
packages=['TGmeetup', 'TGmeetup.libs', 'TGmeetup.libs.RegistrationAPI'],
version='1.0.0',
description='A collection set of technical groups information',
author='Samina Fu',
author_email='sufuf3@gmail.com',
url='https://github.com/sufuf3/TGmeetup',
keywords=['TechGroup', 'TGmeetup', 'meetup', 'community'],
install_requires=["requests", "ConfigParser", "pathlib", "terminaltables", "termcolor", "geocoder"],
entry_points=entry_points,
)
| 28.590909
| 102
| 0.704293
|
ce5a7bddee547fb3c1fa261fa905fa389557e425
| 8,578
|
py
|
Python
|
cnn/train_search_perNodePruning.py
|
SivanDoveh/darts
|
56604a823f5358f5ebb3573b59bc63cf329ea99b
|
[
"Apache-2.0"
] | null | null | null |
cnn/train_search_perNodePruning.py
|
SivanDoveh/darts
|
56604a823f5358f5ebb3573b59bc63cf329ea99b
|
[
"Apache-2.0"
] | null | null | null |
cnn/train_search_perNodePruning.py
|
SivanDoveh/darts
|
56604a823f5358f5ebb3573b59bc63cf329ea99b
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
import time
import glob
import numpy as np
import torch
import utils
import logging
import argparse
import torch.nn as nn
import torch.utils
import torch.nn.functional as F
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from model_search import Network
from architect import Architect
from perNodePruning import Prune
import torchvision.transforms as transforms
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='cifar10', help='cifar10,fashion-mnist,SVHN')
parser.add_argument('--data', type=str, default='../data', help='location of the data corpus')
parser.add_argument('--batch_size', type=int, default=64, help='batch size')
parser.add_argument('--learning_rate', type=float, default=0.025, help='init learning rate')
parser.add_argument('--learning_rate_min', type=float, default=0.001, help='min learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=3e-4, help='weight decay')
parser.add_argument('--report_freq', type=float, default=50, help='report frequency')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--epochs', type=int, default=50, help='num of training epochs')
parser.add_argument('--init_channels', type=int, default=16, help='num of init channels')
parser.add_argument('--layers', type=int, default=8, help='total number of layers')
parser.add_argument('--model_path', type=str, default='saved_models', help='path to save the model')
parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
parser.add_argument('--cutout_length', type=int, default=16, help='cutout length')
parser.add_argument('--drop_path_prob', type=float, default=0.3, help='drop path probability')
parser.add_argument('--save', type=str, default='EXP', help='experiment name')
parser.add_argument('--seed', type=int, default=2, help='random seed')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping')
parser.add_argument('--train_portion', type=float, default=0.5, help='portion of training data')
parser.add_argument('--unrolled', action='store_true', default=False, help='use one-step unrolled validation loss')
parser.add_argument('--arch_learning_rate', type=float, default=3e-4, help='learning rate for arch encoding')
parser.add_argument('--arch_weight_decay', type=float, default=1e-3, help='weight decay for arch encoding')
parser.add_argument('--num_to_zero', type=int, default=2, help='number of alphas to prune')
parser.add_argument('--epochs_pre_prune', type=int, default=19, help='number of alphas to prune')
parser.add_argument('--sparse', type=str, default='', help='do sparse pruning from prune or not')
parser.add_argument('--s_f', type=float, default=0.91, help='number of alphas to prune')
args = parser.parse_args()
args.save = 'train_s_per_node' + '-' + str(args.epochs_pre_prune) + '-search-{}-{}'.format(args.save,
time.strftime("%Y%m%d-%H%M%S"))
utils.create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
# CIFAR_CLASSES = 10
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled = True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %d' % args.gpu)
logging.info("args = %s", args)
in_channels, num_classes, dataset_in_torch = utils.dataset_fields(args) # new
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
model = Network(args.init_channels, in_channels, num_classes, args.layers, criterion)
model = model.cuda()
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
optimizer = torch.optim.SGD( # SGD for weights
model.parameters(),
args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay)
train_data = utils.dataset_split_and_transform(dataset_in_torch, args) # new
num_train = len(train_data)
indices = list(range(num_train))
split = int(np.floor(args.train_portion * num_train))
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]),
pin_memory=True, num_workers=2)
valid_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]),
pin_memory=True, num_workers=2)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, float(args.epochs), eta_min=args.learning_rate_min)
architect = Architect(model, args)
prune = Prune()
for epoch in range(args.epochs):
scheduler.step()
lr = scheduler.get_lr()[0]
logging.info('epoch %d lr %e', epoch, lr)
# pruning
if epoch > args.epochs_pre_prune:
prune.prune_alphas_step(model._arch_parameters, epoch, args)
# training
train_acc, train_obj = train(train_queue, valid_queue, model, architect, criterion, optimizer, lr)
logging.info('train_acc %f', train_acc)
# validation
#valid_acc, valid_obj = infer(valid_queue, model, criterion)
#logging.info('valid_acc %f', valid_acc)
genotype = model.genotype()
logging.info('genotype = %s', genotype)
logging.info(F.softmax(model.alphas_normal, dim=-1))
logging.info(F.softmax(model.alphas_reduce, dim=-1))
utils.save(model, os.path.join(args.save, 'weights.pt'))
def train(train_queue, valid_queue, model, architect, criterion, optimizer, lr):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
for step, (input, target) in enumerate(train_queue): # input,target is for w step
#model.train()
n = input.size(0)
input = Variable(input, requires_grad=False).cuda()
target = Variable(target, requires_grad=False).cuda(async=True)
# get a random minibatch from the search queue with replacement
input_search, target_search = next(iter(valid_queue)) # input_search,target_search is for alpha step
input_search = Variable(input_search, requires_grad=False).cuda()
target_search = Variable(target_search, requires_grad=False).cuda(async=True)
architect.step(input, target, input_search, target_search, lr, optimizer,
unrolled=args.unrolled) # update alpha
# during the arch.step the optimination for alpha happen
optimizer.zero_grad()
logits = model(input)
loss = criterion(logits, target)
loss.backward()
nn.utils.clip_grad_norm(model.parameters(), args.grad_clip)
optimizer.step()
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
objs.update(loss.data[0], n)
top1.update(prec1.data[0], n)
top5.update(prec5.data[0], n)
if step % args.report_freq == 0:
logging.info('train %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
return top1.avg, objs.avg
def infer(valid_queue, model, criterion):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.eval()
for step, (input, target) in enumerate(valid_queue):
input = Variable(input, volatile=True).cuda()
target = Variable(target, volatile=True).cuda(async=True)
logits = model(input)
loss = criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data[0], n)
top1.update(prec1.data[0], n)
top5.update(prec5.data[0], n)
if step % args.report_freq == 0:
logging.info('valid %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
return top1.avg, objs.avg
if __name__ == '__main__':
main()
| 40.847619
| 116
| 0.688622
|
6e1a8f516b22615e2f8a84522f423e650648a2bb
| 6,187
|
py
|
Python
|
tianshou/policy/modelfree/sac.py
|
MA-JIE/tianshou
|
6237cc0d52ecc892baa98d49dfe923e2c484721f
|
[
"MIT"
] | 2
|
2020-05-26T17:04:57.000Z
|
2020-05-31T15:33:24.000Z
|
tianshou/policy/modelfree/sac.py
|
MA-JIE/tianshou
|
6237cc0d52ecc892baa98d49dfe923e2c484721f
|
[
"MIT"
] | null | null | null |
tianshou/policy/modelfree/sac.py
|
MA-JIE/tianshou
|
6237cc0d52ecc892baa98d49dfe923e2c484721f
|
[
"MIT"
] | null | null | null |
import torch
import numpy as np
from copy import deepcopy
import torch.nn.functional as F
from typing import Dict, Tuple, Union, Optional
from tianshou.data import Batch
from tianshou.policy import DDPGPolicy
from tianshou.policy.utils import DiagGaussian
class SACPolicy(DDPGPolicy):
"""Implementation of Soft Actor-Critic. arXiv:1812.05905
:param torch.nn.Module actor: the actor network following the rules in
:class:`~tianshou.policy.BasePolicy`. (s -> logits)
:param torch.optim.Optimizer actor_optim: the optimizer for actor network.
:param torch.nn.Module critic1: the first critic network. (s, a -> Q(s,
a))
:param torch.optim.Optimizer critic1_optim: the optimizer for the first
critic network.
:param torch.nn.Module critic2: the second critic network. (s, a -> Q(s,
a))
:param torch.optim.Optimizer critic2_optim: the optimizer for the second
critic network.
:param float tau: param for soft update of the target network, defaults to
0.005.
:param float gamma: discount factor, in [0, 1], defaults to 0.99.
:param float exploration_noise: the noise intensity, add to the action,
defaults to 0.1.
:param float alpha: entropy regularization coefficient, default to 0.2.
:param action_range: the action range (minimum, maximum).
:type action_range: (float, float)
:param bool reward_normalization: normalize the reward to Normal(0, 1),
defaults to ``False``.
:param bool ignore_done: ignore the done flag while training the policy,
defaults to ``False``.
.. seealso::
Please refer to :class:`~tianshou.policy.BasePolicy` for more detailed
explanation.
"""
def __init__(self,
actor: torch.nn.Module,
actor_optim: torch.optim.Optimizer,
critic1: torch.nn.Module,
critic1_optim: torch.optim.Optimizer,
critic2: torch.nn.Module,
critic2_optim: torch.optim.Optimizer,
tau: float = 0.005,
gamma: float = 0.99,
alpha: float = 0.2,
action_range: Optional[Tuple[float, float]] = None,
reward_normalization: bool = False,
ignore_done: bool = False,
**kwargs) -> None:
super().__init__(None, None, None, None, tau, gamma, 0,
action_range, reward_normalization, ignore_done,
**kwargs)
self.actor, self.actor_optim = actor, actor_optim
self.critic1, self.critic1_old = critic1, deepcopy(critic1)
self.critic1_old.eval()
self.critic1_optim = critic1_optim
self.critic2, self.critic2_old = critic2, deepcopy(critic2)
self.critic2_old.eval()
self.critic2_optim = critic2_optim
self._alpha = alpha
self.__eps = np.finfo(np.float32).eps.item()
def train(self) -> None:
self.training = True
self.actor.train()
self.critic1.train()
self.critic2.train()
def eval(self) -> None:
self.training = False
self.actor.eval()
self.critic1.eval()
self.critic2.eval()
def sync_weight(self) -> None:
for o, n in zip(
self.critic1_old.parameters(), self.critic1.parameters()):
o.data.copy_(o.data * (1 - self._tau) + n.data * self._tau)
for o, n in zip(
self.critic2_old.parameters(), self.critic2.parameters()):
o.data.copy_(o.data * (1 - self._tau) + n.data * self._tau)
def forward(self, batch: Batch,
state: Optional[Union[dict, Batch, np.ndarray]] = None,
input: str = 'obs', **kwargs) -> Batch:
obs = getattr(batch, input)
logits, h = self.actor(obs, state=state, info=batch.info)
assert isinstance(logits, tuple)
dist = DiagGaussian(*logits)
x = dist.rsample()
y = torch.tanh(x)
act = y * self._action_scale + self._action_bias
log_prob = dist.log_prob(x) - torch.log(
self._action_scale * (1 - y.pow(2)) + self.__eps)
act = act.clamp(self._range[0], self._range[1])
return Batch(
logits=logits, act=act, state=h, dist=dist, log_prob=log_prob)
def learn(self, batch: Batch, **kwargs) -> Dict[str, float]:
with torch.no_grad():
obs_next_result = self(batch, input='obs_next')
a_ = obs_next_result.act
dev = a_.device
batch.act = torch.tensor(batch.act, dtype=torch.float, device=dev)
target_q = torch.min(
self.critic1_old(batch.obs_next, a_),
self.critic2_old(batch.obs_next, a_),
) - self._alpha * obs_next_result.log_prob
rew = torch.tensor(batch.rew,
dtype=torch.float, device=dev)[:, None]
done = torch.tensor(batch.done,
dtype=torch.float, device=dev)[:, None]
target_q = (rew + (1. - done) * self._gamma * target_q)
# critic 1
current_q1 = self.critic1(batch.obs, batch.act)
critic1_loss = F.mse_loss(current_q1, target_q)
self.critic1_optim.zero_grad()
critic1_loss.backward()
self.critic1_optim.step()
# critic 2
current_q2 = self.critic2(batch.obs, batch.act)
critic2_loss = F.mse_loss(current_q2, target_q)
self.critic2_optim.zero_grad()
critic2_loss.backward()
self.critic2_optim.step()
# actor
obs_result = self(batch)
a = obs_result.act
current_q1a = self.critic1(batch.obs, a)
current_q2a = self.critic2(batch.obs, a)
actor_loss = (self._alpha * obs_result.log_prob - torch.min(
current_q1a, current_q2a)).mean()
self.actor_optim.zero_grad()
actor_loss.backward()
self.actor_optim.step()
self.sync_weight()
return {
'loss/actor': actor_loss.item(),
'loss/critic1': critic1_loss.item(),
'loss/critic2': critic2_loss.item(),
}
| 40.97351
| 78
| 0.599483
|
723aa239017011a9d4fbc1811262d71464674a18
| 602
|
py
|
Python
|
web_chat/migrations/0007_auto_20211125_1732.py
|
renmarin/Web-Chat
|
343424eaa20e16bd41077b525dbeb1d60cf86d7b
|
[
"MIT"
] | null | null | null |
web_chat/migrations/0007_auto_20211125_1732.py
|
renmarin/Web-Chat
|
343424eaa20e16bd41077b525dbeb1d60cf86d7b
|
[
"MIT"
] | null | null | null |
web_chat/migrations/0007_auto_20211125_1732.py
|
renmarin/Web-Chat
|
343424eaa20e16bd41077b525dbeb1d60cf86d7b
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.9 on 2021-11-25 15:32
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('web_chat', '0006_auto_20211125_1454'),
]
operations = [
migrations.AlterField(
model_name='chat',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 11, 25, 17, 32, 57, 43790)),
),
migrations.AlterField(
model_name='chat',
name='message',
field=models.CharField(max_length=1500),
),
]
| 24.08
| 99
| 0.589701
|
a95cd3c1a209ba959d857a4ac51f6fc516338897
| 1,609
|
py
|
Python
|
ucb_cs61A/lab/lab09/tests/composed.py
|
tavaresdong/courses-notes
|
7fb89103bca679f5ef9b14cbc777152daac1402e
|
[
"MIT"
] | null | null | null |
ucb_cs61A/lab/lab09/tests/composed.py
|
tavaresdong/courses-notes
|
7fb89103bca679f5ef9b14cbc777152daac1402e
|
[
"MIT"
] | 1
|
2017-07-31T08:15:26.000Z
|
2017-07-31T08:15:26.000Z
|
ucb_cs61A/lab/lab09/tests/composed.py
|
tavaresdong/courses-notes
|
7fb89103bca679f5ef9b14cbc777152daac1402e
|
[
"MIT"
] | 1
|
2019-10-06T16:52:31.000Z
|
2019-10-06T16:52:31.000Z
|
test = {
'name': 'composed',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
scm> ((composed add-one add-one) 2)
4
""",
'hidden': False,
'locked': False
},
{
'code': r"""
scm> ((composed multiply-by-two multiply-by-two) 2)
8
""",
'hidden': False,
'locked': False
},
{
'code': r"""
scm> ((composed add-one multiply-by-two) 2)
5
""",
'hidden': False,
'locked': False
},
{
'code': r"""
scm> ((composed multiply-by-two add-one) 2)
6
""",
'hidden': False,
'locked': False
},
{
'code': r"""
scm> ((composed (composed add-one add-one) add-one) 2)
5
""",
'hidden': False,
'locked': False
},
{
'code': r"""
scm> ((composed (composed add-one add-one) multiply-by-two) 2)
6
""",
'hidden': False,
'locked': False
},
{
'code': r"""
scm> ((composed multiply-by-two (composed add-one add-one)) 2)
8
""",
'hidden': False,
'locked': False
}
],
'scored': False,
'setup': r"""
scm> (load 'lab09)
scm> (define (add-one a) (+ a 1))
scm> (define (multiply-by-two a) (* a 2))
""",
'teardown': '',
'type': 'scheme'
}
]
}
| 21.743243
| 72
| 0.3555
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.