max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
dolweb/urls.py | YggDrazil/DolphinWeb | 0 | 12773751 | from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
# Monkey patching ftw...
import dolweb.utils.monkey
admin.autodiscover()
urlpatterns = patterns('',
# Homepage
url(r'^$', 'dolweb.homepage.views.home', name='home'),
# Media (image gallery, link to videos)
url(r'^media/', include('dolweb.media.urls')),
# Documentation (FAQ and guides)
url(r'^docs/', include('dolweb.docs.urls')),
# Downloads
url(r'^download/', include('dolweb.downloads.urls')),
# Blog
url(r'^blog/', include('dolweb.blog.urls')),
# Compatibility list
url(r'^compat/', include('dolweb.compat.urls')),
# Django administration
url(r'^admin/', include(admin.site.urls)),
# Management interface
url(r'^mgmt/(?P<cmd>.+)$', 'dolweb.management.views.run_command', name='mgmt_run_command'),
)
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 1.921875 | 2 |
alipay/aop/api/response/AlipayMarketingRecruitPlanQueryResponse.py | antopen/alipay-sdk-python-all | 213 | 12773752 | <reponame>antopen/alipay-sdk-python-all<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.RecruitEnrollRule import RecruitEnrollRule
class AlipayMarketingRecruitPlanQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayMarketingRecruitPlanQueryResponse, self).__init__()
self._description = None
self._enroll_end_time = None
self._enroll_rules = None
self._enroll_start_time = None
self._logo = None
self._plan_id = None
self._plan_name = None
self._status = None
@property
def description(self):
return self._description
@description.setter
def description(self, value):
self._description = value
@property
def enroll_end_time(self):
return self._enroll_end_time
@enroll_end_time.setter
def enroll_end_time(self, value):
self._enroll_end_time = value
@property
def enroll_rules(self):
return self._enroll_rules
@enroll_rules.setter
def enroll_rules(self, value):
if isinstance(value, list):
self._enroll_rules = list()
for i in value:
if isinstance(i, RecruitEnrollRule):
self._enroll_rules.append(i)
else:
self._enroll_rules.append(RecruitEnrollRule.from_alipay_dict(i))
@property
def enroll_start_time(self):
return self._enroll_start_time
@enroll_start_time.setter
def enroll_start_time(self, value):
self._enroll_start_time = value
@property
def logo(self):
return self._logo
@logo.setter
def logo(self, value):
self._logo = value
@property
def plan_id(self):
return self._plan_id
@plan_id.setter
def plan_id(self, value):
self._plan_id = value
@property
def plan_name(self):
return self._plan_name
@plan_name.setter
def plan_name(self, value):
self._plan_name = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
def parse_response_content(self, response_content):
response = super(AlipayMarketingRecruitPlanQueryResponse, self).parse_response_content(response_content)
if 'description' in response:
self.description = response['description']
if 'enroll_end_time' in response:
self.enroll_end_time = response['enroll_end_time']
if 'enroll_rules' in response:
self.enroll_rules = response['enroll_rules']
if 'enroll_start_time' in response:
self.enroll_start_time = response['enroll_start_time']
if 'logo' in response:
self.logo = response['logo']
if 'plan_id' in response:
self.plan_id = response['plan_id']
if 'plan_name' in response:
self.plan_name = response['plan_name']
if 'status' in response:
self.status = response['status']
| 1.828125 | 2 |
main.py | ferizoozoo/Simple-brute-force-password-hacker | 0 | 12773753 | <reponame>ferizoozoo/Simple-brute-force-password-hacker
import sys
from password_checker import PasswordChecker
from client import Client
def main():
ip_address, port = sys.argv[1:3]
client = Client(ip_address, int(port))
password_checker = PasswordChecker(client)
password = password_checker.crack_password()
print(password)
client.close_connection()
if __name__ == '__main__':
main() | 2.6875 | 3 |
cogs/commands/useful.py | scottwedge/Villager-Bot | 0 | 12773754 | <gh_stars>0
from discord.ext import commands
import discord
import arrow
from googlesearch import search
from random import choice
class Useful(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.g = self.bot.get_cog("Global")
self.tips = ["Made by Iapetus11#6821 & TrustedMercury#1953", "You can get emeralds by voting for the bot!",
"Hey, check out the support server! discord.gg/39DwwUV", "Did you know you can buy emeralds?",
f"Wanna invite the bot? Try the !!invite command!"]
@commands.group(name="help")
async def help(self, ctx):
if ctx.invoked_subcommand is None:
help_embed = discord.Embed(color=discord.Color.green())
help_embed.set_author(
name="Villager Bot Commands",
icon_url="http://olimone.ddns.net/images/villagerbotsplash1.png")
help_embed.add_field(name="Minecraft", value=f"``{ctx.prefix}help mc``", inline=True)
help_embed.add_field(name="Fun", value=f"``{ctx.prefix}help fun``", inline=True)
help_embed.add_field(name="\uFEFF", value=f"\uFEFF", inline=True)
help_embed.add_field(name="Useful", value=f"``{ctx.prefix}help useful``", inline=True)
help_embed.add_field(name="Admin", value=f"``{ctx.prefix}help admin``", inline=True)
help_embed.add_field(name="\uFEFF", value=f"\uFEFF", inline=True)
help_embed.add_field(name="\uFEFF", value="""Need more help? Check out the Villager Bot [Support Server](https://discord.gg/39DwwUV)
Enjoying the bot? Vote for us on [top.gg](https://top.gg/bot/639498607632056321/vote)""", inline=False)
help_embed.set_footer(text=choice(self.tips))
await ctx.send(embed=help_embed)
@help.command(name='fun')
async def help_fun(self, ctx):
help_embed = discord.Embed(color=discord.Color.green())
help_embed.set_author(
name="Villager Bot Commands",
icon_url="http://olimone.ddns.net/images/villagerbotsplash1.png")
help_embed.add_field(
name="**Text Commands**",
value=f'**{ctx.prefix}villagerspeak** ***text*** *turns English text into villager sounds*\n'
f'**{ctx.prefix}enchant** ***text*** *turns english text into the Minecraft enchantment table language, a.k.a. the Standard Galactic Alphabet.*\n'
f'**{ctx.prefix}unenchant** ***text*** *turns the enchanting table language back into English*\n'
f'**{ctx.prefix}sarcastic** ***text*** *makes text sarcastic*\n'
f'**{ctx.prefix}say** ***text*** *bot will repeat what you tell it to*\n',
inline=False)
help_embed.add_field(
name="**Economy Commands**",
value=f'**{ctx.prefix}mine** *go mining with the bot for emeralds*\n'
f'**{ctx.prefix}balance** *the bot will tell you how many emeralds you have*\n'
f'**{ctx.prefix}vault** *shows you how many emerald blocks you have in the emerald vault*\n'
f'**{ctx.prefix}deposit** ***amount in emerald blocks*** *deposit emerald blocks into the emerald vault*\n'
f'**{ctx.prefix}withdraw** ***amount in emerald blocks*** *withdraw emerald blocks from the emerald vault*\n'
f'**{ctx.prefix}inventory** *see what you have in your inventory*\n'
f'**{ctx.prefix}give** ***@user amount*** *give mentioned user emeralds*\n'
f'**{ctx.prefix}giveitem** ***@user amount item*** *give mentioned a user specified amount of an item*\n'
f'**{ctx.prefix}gamble** ***amount*** *gamble with Villager Bot*\n'
f'**{ctx.prefix}pillage** ***@user*** *attempt to steal emeralds from another person*\n'
f'**{ctx.prefix}shop** *go shopping with emeralds*\n'
f'**{ctx.prefix}sell** ***amount item*** *sell a certain amount of an item*\n'
f'**{ctx.prefix}leaderboard** *shows the emerald leaderboard*\n'
f'**{ctx.prefix}chug** ***potion*** *uses the mentioned potion.*\n',
inline=False)
help_embed.add_field(
name="**Other Fun Commands**",
value=f'**{ctx.prefix}cursed** *the bot will upload a cursed Minecraft image*\n'
f'**{ctx.prefix}battle** ***user*** *allows you to battle your friends!*\n',
inline=False)
help_embed.set_footer(text=choice(self.tips))
await ctx.send(embed=help_embed)
@help.command(name='useful')
async def help_useful(self, ctx):
help_embed = discord.Embed(color=discord.Color.green())
help_embed.set_author(
name="Villager Bot Commands",
icon_url="http://olimone.ddns.net/images/villagerbotsplash1.png")
help_embed.add_field(
name="**Useful/Informative**",
value=f'**{ctx.prefix}help** *displays this help message*\n'
f'**{ctx.prefix}info** *displays information about the bot*\n'
f"**{ctx.prefix}ping** *to see the bot's latency between itself and the Discord API*'\n"
f'**{ctx.prefix}uptime** *to check how long the bot has been online*\n'
f'**{ctx.prefix}votelink** *to get the link to vote for and support the bot!*\n'
f'**{ctx.prefix}invite** *to get the link to add Villager Bot to your own server!*\n'
f'**{ctx.prefix}google** ***query*** *bot will search on google for your query*\n'
f'**{ctx.prefix}youtube** ***query*** *bot will search on youtube for your query*\n'
f'**{ctx.prefix}reddit** ***query*** *bot will search on reddit for your query*\n'
f'**{ctx.prefix}news** *shows what\'s new with the bot*\n',
inline=True)
help_embed.set_footer(text=choice(self.tips))
await ctx.send(embed=help_embed)
@help.command(name='admin')
async def help_admin(self, ctx):
help_embed = discord.Embed(color=discord.Color.green())
help_embed.set_author(
name="Villager Bot Commands",
icon_url="http://olimone.ddns.net/images/villagerbotsplash1.png")
help_embed.add_field(
name="**Admin Only**",
value=f'**{ctx.prefix}config** *change the settings of the bot for your server*\n'
f"**{ctx.prefix}purge** ***number of messages*** *deletes n number of messages where it's used*\n"
f'**{ctx.prefix}kick** ***@user*** *kicks the mentioned user*\n'
f'**{ctx.prefix}ban** ***@user*** *bans the mentioned user*\n'
f'**{ctx.prefix}pardon** ***@user*** *unbans the mentioned user*\n',
inline=True)
help_embed.set_footer(text=choice(self.tips))
await ctx.send(embed=help_embed)
@help.command(name="mc")
async def help_mc(self, ctx):
help_embed = discord.Embed(color=discord.Color.green())
help_embed.set_author(
name="Villager Bot Commands",
icon_url="http://olimone.ddns.net/images/villagerbotsplash1.png")
help_embed.add_field(
name="**Minecraft Commands**",
value=f'**{ctx.prefix}mcping** ***ip:port*** *to check the status of a Java Edition Minecraft server*\n'
f'**{ctx.prefix}mcpeping** ***ip*** *to check the status of a Bedrock Edition Minecraft server*\n'
f'**{ctx.prefix}stealskin** ***gamertag*** *steal another player\'s Minecraft skin*\n'
f'**{ctx.prefix}nametouuid** ***gamertag*** *gets the Minecraft uuid of the given player*\n'
f'**{ctx.prefix}uuidtoname** ***uuid*** *gets the gamertag from the given Minecraft uuid*\n'
f'**{ctx.prefix}randommc** *sends a random Minecraft server*\n'
f'**{ctx.prefix}buildidea** *sends a random idea on what you could build*\n'
f'**{ctx.prefix}colorcodes** *sends a Minecraft color code cheat-sheet your way.*\n',
inline=True)
help_embed.set_footer(text=choice(self.tips))
await ctx.send(embed=help_embed)
@commands.command(name="info", aliases=["information"])
async def information(self, ctx):
infoMsg = discord.Embed(color=discord.Color.green())
infoMsg.add_field(name="Creators", value="Iapetus11#6821 &\n TrustedMercury#1953", inline=True)
infoMsg.add_field(name="Bot Library", value="Discord.py", inline=True)
infoMsg.add_field(name="Command Prefix", value=ctx.prefix, inline=True)
infoMsg.add_field(name="Total Servers", value=str(len(self.bot.guilds)), inline=True)
infoMsg.add_field(name="Shards", value=str(self.bot.shard_count), inline=True)
infoMsg.add_field(name="Total Users", value=str(len(self.bot.users)), inline=True)
infoMsg.add_field(name="Bot Page", value="[Click Here](https://top.gg/bot/639498607632056321)", inline=True)
infoMsg.add_field(name="\uFEFF", value="\uFEFF", inline=True)
infoMsg.add_field(name="Discord", value="[Click Here](https://discord.gg/39DwwUV)", inline=True)
infoMsg.set_author(name="Villager Bot Information", url=discord.Embed.Empty, icon_url="http://olimone.ddns.net/images/villagerbotsplash1.png")
await ctx.send(embed=infoMsg)
@commands.command(name="new", aliases=["newstuff", "update", "recent", "events", "news"])
async def whats_new(self, ctx):
emb = discord.Embed(color=discord.Color.green())
emb.set_author(name="What's new with Villager Bot?", url=discord.Embed.Empty, icon_url="http://olimone.ddns.net/images/villagerbotsplash1.png")
emb.add_field(name="Bot Updates", value="- Voting now has a chance to give you items rather than just emeralds.\n"
"- Vault slots are now easier to get.\n"
"- Bot has been made far more stable, crash protection has been added.\n"
f"- New {ctx.prefix}news command!\n"
f"- Now, if you get pillaged, you receive a dm from the bot saying how much got stolen.\n", inline=False)
emb.set_footer(text=choice(self.tips))
await ctx.send(embed=emb)
@commands.command(name="ping", aliases=["pong", "ding", "dong"]) # Checks latency between Discord API and the bot
async def ping(self, ctx):
c = ctx.message.content.lower()
if "pong" in c:
pp = "Ping"
elif "ping" in c:
pp = "Pong"
elif "ding" in c:
pp = "Dong"
elif "dong" in c:
pp = "Ding"
await ctx.send(embed=discord.Embed(color=discord.Color.green(), description=f"<a:ping:692401875001278494> {pp}! \uFEFF ``{round(self.bot.latency*1000, 2)} ms``"))
@commands.command(name="uptime")
async def getuptime(self, ctx):
p = arrow.utcnow()
diff = (p - self.g.startTime)
days = diff.days
hours = int(diff.seconds / 3600)
minutes = int(diff.seconds / 60) % 60
if days == 1:
dd = "day"
else:
dd = "days"
if hours == 1:
hh = "hour"
else:
hh = "hours"
if minutes == 1:
mm = "minute"
else:
mm = "minutes"
await ctx.send(embed=discord.Embed(color=discord.Color.green(), description="Bot has been online for "+str(days)+" "+dd+", "+str(hours)+" "+hh+", and "+str(minutes)+" "+mm+"!"))
@commands.command(name="vote", aliases=["votelink"])
async def votelink(self, ctx):
voteL = discord.Embed(title="Vote for Villager Bot", description="[Click Here!](https://top.gg/bot/639498607632056321/vote)", color=discord.Color.green())
voteL.set_thumbnail(url="http://olimone.ddns.net/images/villagerbotsplash1.png")
await ctx.send(embed=voteL)
@commands.command(name="invite", aliases=["invitelink"])
async def inviteLink(self, ctx):
invL = discord.Embed(title="Add Villager Bot to your server", description="[Click Here!](https://bit.ly/2tQfOhW)", color=discord.Color.green())
invL.set_thumbnail(url="http://olimone.ddns.net/images/villagerbotsplash1.png")
await ctx.send(embed=invL)
@commands.command(name="google")
@commands.cooldown(1, 2, commands.BucketType.user)
async def googleSearch(self, ctx, *, query: str):
await ctx.trigger_typing()
rs = []
for result in search(query, tld="co.in", num=1, stop=1, pause=0):
rs.append(result)
if len(rs) > 0:
await ctx.send(rs[0])
else:
await ctx.send(embed=discord.Embed(color=discord.Color.green(), description="No results found for query \""+query+"\""))
@commands.command(name="youtube")
@commands.cooldown(1, 2, commands.BucketType.user)
async def ytSearch(self, ctx, *, query: str):
await ctx.trigger_typing()
rs = []
for result in search(query, tld="co.in", domains=["youtube.com"], num=1, stop=1, pause=0):
rs.append(result)
if len(rs) > 0:
await ctx.send(rs[0])
else:
await ctx.send(embed=discord.Embed(color=discord.Color.green(), description="No results found for query \""+query+"\""))
@commands.command(name="reddit")
@commands.cooldown(1, 2, commands.BucketType.user)
async def redditSearch(self, ctx, *, query: str):
await ctx.trigger_typing()
rs = []
for result in search(query, tld="co.in", domains=["reddit.com"], num=1, stop=1, pause=0):
rs.append(result)
if len(rs) > 0:
await ctx.send(rs[0])
else:
await ctx.send(embed=discord.Embed(color=discord.Color.green(), description="No results found for query \""+query+"\""))
def setup(bot):
bot.add_cog(Useful(bot))
| 2.765625 | 3 |
src/python/pants/backend/python/pex_util.py | AllClearID/pants | 0 | 12773755 | <filename>src/python/pants/backend/python/pex_util.py
# coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pex.interpreter import PythonInterpreter
from pex.platforms import Platform
def create_bare_interpreter(binary_path):
"""Creates an interpreter for python binary at the given path.
The interpreter is bare in that it has no extras associated with it.
:returns: A bare python interpreter with no extras.
:rtype: :class:`pex.interpreter.PythonInterpreter`
"""
# TODO(<NAME>): Replace with a more direct PythonInterpreter construction API call when
# https://github.com/pantsbuild/pex/issues/510 is fixed.
interpreter_with_extras = PythonInterpreter.from_binary(binary_path)
return PythonInterpreter(binary_path, interpreter_with_extras.identity, extras=None)
def get_local_platform():
"""Returns the name of the local platform; eg: 'linux_x86_64' or 'macosx_10_8_x86_64'.
:returns: The local platform name.
:rtype: str
"""
# TODO(<NAME>): Kill some or all usages when https://github.com/pantsbuild/pex/issues/511
# is fixed.
current_platform = Platform.current()
return current_platform.platform
| 2.453125 | 2 |
localite/api.py | stim-devices/dev-localite | 0 | 12773756 | <filename>localite/api.py<gh_stars>0
from localite.flow.mitm import start, kill
from localite.coil import Coil
| 1.21875 | 1 |
python/matrix/matrix.py | ropable/exercism | 0 | 12773757 | <filename>python/matrix/matrix.py
class Matrix:
def __init__(self, matrix_string: str):
# Split matrix_string into lines, split lines on whitespace and cast elements as integers.
self.matrix = [[int(j) for j in i.split()] for i in matrix_string.splitlines()]
def row(self, index: int) -> list[int]:
return self.matrix[index - 1]
def column(self, index: int) -> list[int]:
return [i[index - 1] for i in self.matrix]
| 3.59375 | 4 |
setup.py | goyalankit/cgtop | 0 | 12773758 | from setuptools import setup
#from cgtop import __version__
setup(
name='cgtop',
version=0.0,
description='A cgroup resource viewer',
long_description=open('README.md').read(),
keywords='top for cgroups',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/goyalankit/cgtop',
license='MIT',
download_url= 'https://github.com/goyalankit/cgtop/archive/master.zip',
packages=['cgtop'],
install_requires=[
"brownie>=0.5.1"
],
entry_points={
'console_scripts': ['cgtop = cgtop.main:top']
},
)
| 1.359375 | 1 |
security.py | alexhendra/Learn-Flask-RestFul | 0 | 12773759 | <gh_stars>0
from werkzeug.security import safe_str_cmp
from models.user import UserModel
# users = [
# User(1, 'bob', 'asdf')
# ]
# username_mapping = {
# 'bob': {
# 'id': 1,
# 'username': 'bob',
# 'password': '<PASSWORD>'
# }
# }
# userid_mapping = {
# 1: {
# 'id': 1,
# 'username': 'bob',
# 'password': '<PASSWORD>'
# }
# }
# username_mapping = {u.username: u for u in users}
# userid_mapping = {u.id: u for u in users}
#
#
# def authenticate(username, password):
# user = username_mapping.get(username, None)
# if user and safe_str_cmp(user.password, password):
# return user
#
#
# def identity(payload):
# user_id = payload['identity']
# return userid_mapping.get(user_id, None)
def authenticate(username, password):
user = UserModel.find_by_username(username)
if user and safe_str_cmp(user.password, password):
return user
def identity(payload):
user_id = payload['identity']
return UserModel.find_by_id(user_id)
| 2.609375 | 3 |
src/smach_based_introspection_framework/online_part/smach_modifier/modify_user_sm.py | birlrobotics/smach_based_introspection_framework | 7 | 12773760 | import copy
import types
from smach_based_introspection_framework.online_part.framework_core.states import (
RollBackRecovery,
)
import introspection_execute
from smach_based_introspection_framework.online_part.robot_screen_visualization.setter import(
show_anomaly_detected,
show_everyhing_is_good,
)
def run(sm):
import smach
with sm:
raw_user_states = copy.deepcopy(sm._states)
# redirect all NeedRecovery to their respective anomay diagnosis
for user_state in raw_user_states:
obj = sm._states[user_state]
obj.execute = types.MethodType(introspection_execute.execute, obj)
obj._outcomes.add("Revert")
state_name = user_state
state_transitions = sm._transitions[state_name]
state_transitions["Revert"] = RollBackRecovery.__name__
# build Recovery states automatically
recovery_outcomes = ['RecoveryFailed']
recovery_state_transitions = {
'RecoveryFailed': 'TaskFailed'
}
for user_state in raw_user_states:
state_name = user_state
recovery_outcomes.append('Reenter_'+state_name)
recovery_state_transitions['Reenter_'+state_name] = state_name
smach.StateMachine.add(
RollBackRecovery.__name__,
RollBackRecovery(outcomes=recovery_outcomes),
transitions=recovery_state_transitions
)
return sm
| 2.15625 | 2 |
mandarinizer.py | JackBDu/image-mandarinizer | 2 | 12773761 | <reponame>JackBDu/image-mandarinizer
#!/usr/bin/env python
# encoding=utf-8
# you need to install opencv before running the script import cv2
import sys # exit
import time # sleep
import argparse # argparse
try:
import cv2
except ImportError as error:
print "ImportError:", error.args[0]
sys.exit(1)
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2017, <NAME>"
__credits__ = ["<NAME>", "<NAME>"]
__license__ = "MIT"
__email__ = "<EMAIL>"
# parsing arguments
parser = argparse.ArgumentParser(description="Mandarinize an image file, a video file or a webcam stream.")
parser.add_argument('--version', action='version', version='%(prog)s 0.1')
parser.add_argument('-inv', "--invert", action='store_true', help="invert the color of the frame")
parser.add_argument('-p', "--preview", action='store_false', help="toggle preview")
parser.add_argument('-f', "--flip", action='store_true', help="toggle the vertical flip of the frame")
parser.add_argument('-s', "--space", action='count', help="add a space between every two characters")
parser.add_argument('-w', "--width", type=int, default=64, help="specify the width of the output")
parser.add_argument('-fps', "--framerate", type=int, default=12, help="specify the frames per second")
parser.add_argument('-d', "--depth", type=int, default=16, choices=[2,4,8,16], help="specify the color depth")
parser.add_argument('-c', "--character", nargs='+', help="specify a list of characters by the order of indensity")
parser.add_argument('-v', "--video", help="path to the video file")
parser.add_argument('-i', "--image", nargs='+', help="paths to the image files")
parser.add_argument('-o', "--out", default="out", help="path to the ouput file")
args = parser.parse_args()
if args.character:
char_list = args.character
else:
if args.depth == 2:
char_list = ["龘","一"] # 2-bit char list
elif args.depth == 4:
char_list = ["龘","淼","从","人"] # 4-bit char list
elif args.depth == 8:
char_list = ["龘","驫","羴","淼","壵","从","人","一"] # 8-bit char list
else:
char_list = ["龘","驫","羴","掱","蟲","淼","品","壵","尛","太","大","木","乂","人","丿","丶"] # 16-bit char list
if args.image:
for i in range(len(args.image)):
filename = args.out+'_'+str(i+1)+'.txt';
# open the file to write
print "creating the txt file: " + filename
file = open(filename, 'w')
# read image file as grayscale
print "loading the image file: " + args.image[i]
img = cv2.imread(args.image[i], cv2.IMREAD_GRAYSCALE)
# get the height and width of the image
height, width = img.shape
# resize the image
img = cv2.resize(img,(args.width, height*args.width/width), interpolation = cv2.INTER_CUBIC)
# get the new height and width of the image
height, width = img.shape
# loop through each row of pixels
print "mandarinizing..."
contentToWrite = ""
for i in range(height):
# loop through each pixel in the i-th row
for j in range(width):
# write corresponding chinese characters based on the color of the pixel
char_length = len(char_list)
for k in range(char_length):
if img[i, j] < 256/char_length*(k+1) and img[i, j] >= 256/char_length*k:
if args.invert:
contentToWrite += char_list[char_length-k-1]
else:
contentToWrite += char_list[k]
if args.space:
for l in range(args.space):
contentToWrite += ' '
break
# write a new line
contentToWrite += "\n"
if args.preview:
print contentToWrite
print "saving txt file: " + filename
file.write(contentToWrite)
# close file
print "closing txt file: " + filename
file.close()
print "mandarinized!"
sys.exit()
# turn on webcam when video file is not specified
if args.video:
videoSource = args.video
else:
videoSource = 0
# initialize video capture from builtin webcam
cap = cv2.VideoCapture(videoSource)
try:
# open the file to write
file = open(args.out+".manvid", "w")
# write file type in the first line of output file
file.write("manvid,")
# read frame from web cam
ret, frame = cap.read()
if not ret:
# release the video capture
print "video capture failed!"
file.close()
cap.release()
sys.exit()
# convert frame to grayscale image
img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# get the height and width of the image
height, width = img.shape
# calculate the image height based on the image width
image_height = height*args.width/width
# write meta data in the first line of output file
file.write(str(args.width)+",")
file.write(str(image_height)+",")
file.write(str(args.framerate)+",")
file.write(str(args.depth)+"\n")
print "mandarinizing..."
while True:
# read frame from web cam
ret, frame = cap.read()
if not ret:
# release the video capture
print "mandarinized!"
file.close()
cap.release()
sys.exit()
# preparing the frame for mandarinizing
img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
img = cv2.equalizeHist(img)
# resize the image
img = cv2.resize(img,(args.width, image_height), interpolation = cv2.INTER_CUBIC)
# flip the image vertically
if (args.flip):
img = cv2.flip(img, 1)
# get the new height and width of the image
height, width = img.shape
# initialize an empty string to store the entire frame of text
frameToPrint = ""
# loop through each row of pixels of the image
for i in range(height):
# loop through each pixel in the i-th row of the image
for j in range(width):
# write corresponding chinese characters based on the color of the pixel
char_length = len(char_list)
for k in range(char_length):
if img[i, j] < (k+1)*256/char_length and img[i, j] >= k*256/char_length:
if args.invert:
frameToPrint += char_list[char_length-k-1]
else:
frameToPrint += char_list[k]
if args.space:
for i in range(args.space):
frameToPrint += ' '
break
# write a new line
frameToPrint += "\n"
if args.preview:
# clear the terminal window
print(chr(27) + "[2J")
# print out the frame
print frameToPrint
file.write(frameToPrint)
# when preview on or webcam mode, that is, no delay if converting video with preview off
if args.preview or args.video:
# define the period of time each frame's gonna last
time.sleep(1.0/args.framerate)
# handle KeyboardInterrupt, typically Ctrl + C
except KeyboardInterrupt:
# release the video capture
file.close()
cap.release()
sys.exit()
| 2.875 | 3 |
rest_rpc/training/core/hypertuners/tune_interface.py | aimakerspace/synergos_rest | 0 | 12773762 | <gh_stars>0
#!/usr/bin/env python
####################
# Required Modules #
####################
# Generic/Built-in
import inspect
import os
from typing import Dict, List, Tuple, Union, Callable, Any
# Libs
import ray
from ray import tune
from ray.tune.ray_trial_executor import RayTrialExecutor
from ray.tune.suggest import ConcurrencyLimiter
from ray.tune.suggest.basic_variant import BasicVariantGenerator
# Custom
from rest_rpc import app
from rest_rpc.training.core.utils import TuneParser
from rest_rpc.training.core.hypertuners import BaseTuner
from rest_rpc.training.core.hypertuners.tune_driver_script import tune_proc
from synarchive.connection import RunRecords
from synmanager.train_operations import TrainProducerOperator
##################
# Configurations #
##################
SOURCE_FILE = os.path.abspath(__file__)
is_master = app.config['IS_MASTER']
is_cluster = app.config['IS_CLUSTER']
db_path = app.config['DB_PATH']
cores_used = app.config['CORES_USED']
gpu_count = app.config['GPU_COUNT']
tune_parser = TuneParser()
logging = app.config['NODE_LOGGER'].synlog
logging.debug("training/optimizations.py logged", Description="No Changes")
##################################################
# Hyperparameter Tuning Interface - RayTuneTuner #
##################################################
class RayTuneTuner(BaseTuner):
"""
Interfacing class for performing hyperparameter tuning on Ray.Tune. Due to
job parallelization, it is contingent that a queue be enforced, and thus, a
producer from Synergos Manager is necessary to facilitate this procedure.
Attributes:
platform (str): What hyperparameter tuning service to use
log_dir (str): Directory to export cached log files
"""
def __init__(self, host: str, port: int, log_dir: str = None):
super().__init__(platform="tune", log_dir=log_dir)
self.host = host
self.port = port
############
# Checkers #
############
def is_running(self) -> bool:
""" Checks if the execution of current tunable is still in progress
Returns:
State (bool)
"""
has_pending = self.__executor.in_staging_grace_period()
has_active = len(self.__executor.get_running_trials())
return has_pending or has_active
###########
# Helpers #
###########
@staticmethod
def _generate_cycle_name(keys: Dict[str, Any]) -> str:
""" Generates a unique name for the current optimization process
Args:
filters (list(str)): Composite IDs of a federated combination
Returns:
Cycle name (str)
"""
collab_id = keys['collab_id']
project_id = keys['project_id']
expt_id = keys['expt_id']
optim_cycle_name = f"{collab_id}-{project_id}-{expt_id}-optim"
return optim_cycle_name
@staticmethod
def _retrieve_args(callable: Callable, **kwargs) -> List[str]:
""" Retrieves all argument keys accepted by a specified callable object
from a pool of miscellaneous potential arguments
Args:
callable (callable): Callable object to be analysed
kwargs (dict): Any miscellaneous arguments
Returns:
Argument keys (list(str))
"""
input_params = list(inspect.signature(callable).parameters)
arguments = {}
for param in input_params:
param_value = getattr(kwargs, param, None)
if param_value:
arguments[param] = param_value
return arguments
@staticmethod
def _count_args(callable: Callable) -> List[str]:
""" Counts no. of parameters ingestible by a specified callable object.
Args:
callable (callable): Callable object to be analysed
Returns:
Argument keys (list(str))
"""
input_params = list(inspect.signature(callable).parameters)
param_count = len(input_params)
return param_count
def _parse_max_duration(self, duration: str = "1h") -> int:
""" Takes a user specified max duration string and converts it to
number of seconds.
Args:
duration (str): Max duration string
Returns:
Duration in seconds (int)
"""
SECS_PER_UNIT = {"s": 1, "m": 60, "h": 3600, "d": 86400, "w": 604800}
convert_to_seconds = lambda s: int(s[:-1]) * SECS_PER_UNIT[s[-1]]
try:
duration_tokens = duration.split(' ')
total_seconds = sum([convert_to_seconds(d) for d in duration_tokens])
return total_seconds
except Exception:
logging.warn(f"Invalid duration '{duration}' declared! Defaulted to None.")
return None
def _calculate_resources(
self,
max_concurrent: int
) -> Dict[str, Union[float, int]]:
""" Distributes no. of system cores to hyperparameter set generation
Args:
max_concurrent (int): No. of concurrent Tune jobs to run
Returns:
Resource kwargs (dict)
"""
###########################
# Implementation Footnote #
###########################
# [Causes]
# Tune is responsible for generating hyperparameter sets. However,
# Synergos handles all federated training within their own grids.
# [Problems]
# Tune auto-detects the amount of resources available for use. This is
# not an issue if only one Synergos component is deployed onto 1 VM.
# However, in the event that multiple components are deployed to the
# same VM, then these Tune jobs would auto-scale & over-allocate itself
# resources, when in fact it is performing a low-compute task of set
# generation as compared to other components
# [Solutions]
# Only allocate cores available to the system to generate
# hyperparameter sets, since all jobs will be handled out of Tune,
# in seperate grids, that may or may not be in the same machine
# consuming the same resources.
resources_per_trial={
'cpu': cores_used/max_concurrent,
'gpu': 0
}
return resources_per_trial
def _initialize_trial_scheduler(self, scheduler_str: str, **kwargs):
""" Parses user inputs and initializes a Tune trial scheduler to manage
the optimization process
Args:
scheduler_str (str): Name of scheduler module as a string
kwargs: Any parameters as required by aforementioned scheduler
Returns:
Scheduler (Tune object)
"""
parsed_scheduler = tune_parser.parse_scheduler(scheduler_str=scheduler_str)
scheduler_args = self._retrieve_args(parsed_scheduler, **kwargs)
initialized_scheduler = parsed_scheduler(**scheduler_args)
return initialized_scheduler
def _initialize_trial_searcher(self, searcher_str: str, **kwargs):
""" Parses user inputs and initializes a Tune trial searcher to manage
the optimization process
Note:
Axsearch comflicting dependencies. Dragonfly-opt is not supported
Args:
searcher_str (str): Name of searcher module as a string
kwargs: Any parameters as required by aforementioned searcher
Returns:
Searcher (Tune object)
"""
parsed_searcher = tune_parser.parse_searcher(searcher_str=searcher_str)
searcher_args = self._retrieve_args(parsed_searcher, **kwargs)
initialized_searcher = parsed_searcher(**searcher_args)
###########################
# Implementation Footnote #
###########################
# [Causes]
# BasicVariantGenerator in Tune is fundamental, so it itself is a
# searcher, and is the primary wrapper around other searchers
# [Problems]
# This is asymmetric since it is unable to wrap around itself
# [Solutions]
# Detect if intialized searcher is of BasicVariantGenerator, and manage
# it accordingly
if isinstance(initialized_searcher, BasicVariantGenerator):
search_algo = initialized_searcher
else:
search_algo = ConcurrencyLimiter(
searcher=initialized_searcher,
max_concurrent=1,#kwargs.get('max_concurrent', 1),
batch=False
)
return search_algo
def _initialize_trial_executor(self):
""" Initializes a trial executor for subsequent use
"""
self.__executor = RayTrialExecutor(queue_trials=False)
return self.__executor
def _initialize_tuning_params(
self,
optimize_mode: str,
trial_concurrency: int = 1,
max_exec_duration: str = "1h",
max_trial_num: int = 10,
verbose: bool = False,
**kwargs
) -> Dict[str, Union[float, int]]:
""" Parses user inputs and generates a set of tuning kwargs to
initialize other required parameter objects
Args:
optimize_mode (str): Direction to optimize metric (i.e. "max"/"min")
trial_concurrency (int): No. of trials to run concurrently. This is
in context of Tune, and is independent to the no. of jobs that
can be run concurrently across Synergos grids. Eg. Tune is
supposed to create 10 trials, at 5 concurrently, but
collaborators have only deployed 2 usable grids. This way, Tune
will still generate 5 trials, but will have to wait until all
5 trials have been completed across 2 Synergos grids first
before proceeding on to the next batch of 5 trials
max_exec_duration (str): Duration string capping each trial's runtime
max_trial_num (int): Max number of trials to run before giving up
verbose (bool): Toggles verbosity of outputs
kwargs: Miscellaneous params that may or may not be used
Returns:
Tuning parameters (dict)
"""
parsed_duration = self._parse_max_duration(max_exec_duration)
configured_executor = self._initialize_trial_executor()
local_dir = self.generate_output_directory()
return {
'mode': optimize_mode,
'num_samples': max_trial_num,
'time_budget_s': parsed_duration,
'trial_executor': configured_executor,
'local_dir': local_dir,
'checkpoint_at_end': False,
'verbose': 3 if verbose else 1,
'log_to_file': True
}
def _initialize_search_space(self, search_space: dict) -> dict:
""" Mapping custom search space config into Tune config
Args:
search_space (dict): Parameter space to search upon
Returns
Tune search configurations (dict)
"""
configured_search_space = {}
for hyperparameter_key in search_space.keys():
hyperparameter_type = search_space[hyperparameter_key]['_type']
hyperparameter_value = search_space[hyperparameter_key]['_value']
try:
parsed_type = tune_parser.parse_type(hyperparameter_type)
param_count = self._count_args(parsed_type)
tune_config_value = (
parsed_type(*hyperparameter_value)
if param_count > 1
else parsed_type(hyperparameter_value)
)
configured_search_space[hyperparameter_key] = tune_config_value
except:
raise RuntimeError(f"Specified hyperparmeter type '{hyperparameter_type}' is unsupported!")
return configured_search_space
##################
# Core Functions #
##################
def tune(
self,
keys: Dict[str, str],
grids: List[Dict[str, Any]],
action: str,
experiment: Dict[str, Any],
search_space: Dict[str, Dict[str, Union[str, bool, int, float]]],
metric: str,
optimize_mode: str,
scheduler: str = "AsyncHyperBandScheduler",
searcher: str = "BasicVariantGenerator",
trial_concurrency: int = 1,
max_exec_duration: str = "1h",
max_trial_num: int = 10,
auto_align: bool = True,
dockerised: bool = True,
verbose: bool = True,
log_msgs: bool = True,
**kwargs
):
""" Triggers parallelized hyperparameter optimzation
Args:
keys (dict): Unique IDs that form a composite key identifying the
current federated cycle
grids (list(dict)): All availble registered Synergos grids
action (str): ML operation to perform
experiment (dict): Experiment record documenting model architecture
search_space (dict): Parameter space to search upon
metric (str): Metric to optimize on
optimize_mode (str): Direction to optimize metric (i.e. "max"/"min")
scheduler (str): Name of scheduler module as a string
searcher (str): Name of searcher module as a string
trial_concurrency (int): No. of Tune trials to generate concurrently
max_exec_duration (str): Duration string capping each trial's runtime
max_trial_num (int): Max number of trials to run before giving up
auto_align (bool): Toggles if model should be auto-aligned
dockerized (bool): Toggles if deployed system is dockerized
verbose (bool): Toggles verbosity of outputs
log_msgs (bool): Toggles logging
kwargs: Miscellaneous parameters
Returns:
Results
"""
###########################
# Implementation Footnote #
###########################
# [Cause]
# In SynCluster mode, all processes are inducted as jobs. All jobs are sent
# to Synergos MQ to be linearized for parallel distributed computing.
# [Problems]
# Ray logs need to be aligned across distributed setting
# [Solution]
# Start director as a ray head node, with all other TTPs as child nodes
# connecting to it. Tuning parameters will be reported directly to the head
# node, bypassing the queue
ray.init(num_cpus=cores_used, num_gpus=0)
assert ray.is_initialized() == True
try:
optim_cycle_name = self._generate_cycle_name(keys)
configured_search_space = self._initialize_search_space(search_space)
config = {
'is_cluster': is_cluster,
'host': self.host,
'port': self.port,
'keys': keys,
'grids': grids,
'action': action,
'experiment': experiment,
'metric': metric,
'auto_align': auto_align,
'dockerised': dockerised,
'verbose': verbose,
'log_msgs': log_msgs,
**configured_search_space
}
configured_resources = self._calculate_resources(max_trial_num)
tuning_params = self._initialize_tuning_params(
optimize_mode=optimize_mode,
trial_concurrency=trial_concurrency,
max_exec_duration=max_exec_duration,
max_trial_num=max_trial_num,
verbose=verbose,
**kwargs
)
configured_scheduler = self._initialize_trial_scheduler(
scheduler_str=scheduler,
**{**kwargs, **tuning_params}
)
search_algorithm = self._initialize_trial_searcher(
searcher_str=searcher,
**{**kwargs, **tuning_params}
)
results = tune.run(
tune_proc,
name=optim_cycle_name,
config=config,
resources_per_trial=configured_resources,
scheduler=configured_scheduler,
search_alg=search_algorithm,
**tuning_params
)
finally:
# Stop Ray instance
ray.shutdown()
assert ray.is_initialized() == False
return results
| 1.796875 | 2 |
examples/NameTableGeneration.py | SavagePencil/RetroGraphicsToolkit | 8 | 12773763 | <gh_stars>1-10
import os
from PIL import Image
from rgtk.BitSet import BitSet
from rgtk.ColorEntry import ColorEntry
from rgtk.ColorRemap import ColorRemap
from rgtk.ColorRemapsIntoStagingPalettesEvaluator import ColorRemapsIntoStagingPalettesEvaluator
from rgtk.constraint_solver import ConstraintSolver
from rgtk.IndexedColorArray import IndexedColorArray
from rgtk.Interval import Interval
from rgtk.IntervalsToBitSetsEvaluator import IntervalsToBitSetsEvaluator
from rgtk.NameTableEntry import NameTableEntry
from rgtk.Pattern import Pattern
from rgtk.PatternsIntoPatternHashMapsEvaluator import PatternsIntoPatternHashMapsEvaluator
from rgtk.PixelArray import PixelArray
from rgtk.StagingPalette import StagingPalette
##############################################################################
# PIXEL ARRAY
# Track the path relative to this script.
our_dir = os.path.dirname(__file__)
font_image = Image.open(os.path.join(our_dir, "assets/font.png")).convert("RGB")
font_pixel_array = PixelArray(font_image, 0, 0, font_image.width, font_image.height)
font_pixel_array.quantize((8,8,8), (2,2,2))
flags_image = Image.open(os.path.join(our_dir, "assets/flags.png")).convert("RGB")
flags_pixel_array = PixelArray(flags_image, 0, 0, flags_image.width, flags_image.height)
flags_pixel_array.quantize((8,8,8), (2,2,2))
src_pixel_arrays = [font_pixel_array, flags_pixel_array]
##############################################################################
# COLOR REMAP
# Solve the color remap problem *FIRST*. This will give us a set of indexed
# color arrays that we can use to find unique patterns.
# FONT
# Extract all unique colors
font_unique_pixel_values_list = font_pixel_array.generate_deterministic_unique_pixel_list()
# Font comes in as green. Remap to white.
white_entry = ColorEntry()
white_entry.intentions.attempt_set_intention(ColorEntry.INTENTION_COLOR, (255,255,255))
font_special_color_remap = {(0,255,0): white_entry}
font_color_remap = ColorRemap(initial_intentions_map={}, unique_pixel_values_list=font_unique_pixel_values_list, color_remap=font_special_color_remap)
# FLAGS
# Extract all unique colors
flags_unique_pixel_values_list = flags_pixel_array.generate_deterministic_unique_pixel_list()
flags_color_remap = ColorRemap(initial_intentions_map={}, unique_pixel_values_list=flags_unique_pixel_values_list, color_remap={})
# COLOR REMAPS
color_remaps = [font_color_remap, flags_color_remap]
##############################################################################
# STAGING PALETTES
staging_palette_sprites = StagingPalette(16)
staging_palette_bg_only = StagingPalette(16)
staging_palettes = [staging_palette_sprites, staging_palette_bg_only]
##############################################################################
# SOLUTION FOR COLOR REMAPS -> STAGING PALETTES
remap_to_staging_solver = ConstraintSolver(color_remaps, staging_palettes, ColorRemapsIntoStagingPalettesEvaluator, None)
while (len(remap_to_staging_solver.solutions) == 0) and (remap_to_staging_solver.is_exhausted() == False):
remap_to_staging_solver.update()
# TODO find the best one.
remap_to_staging_solution = remap_to_staging_solver.solutions[0]
for move in remap_to_staging_solution:
# Let the corresponding color remap process these moves.
source_remap = color_remaps[move.source_index]
source_remap.remap_to_staging_palette(move, staging_palettes)
# Now apply the solution to the staging palettes.
remap_to_staging_solver.apply_solution(remap_to_staging_solution)
##############################################################################
# SOURCE PATTERN CREATION
# We'll create a unique pattern for each entry in the image. Later we'll
# merge those that we want to dupe-strip (or can be flipped to be dupes).
src_pattern_sets = []
pattern_intention_map_flips = {
Pattern.INTENTION_FLIPS_ALLOWED : Pattern.Flip.HORIZ
}
# Go through each large image and dice it up into smaller Patterns.
for image_array_idx in range(len(src_pixel_arrays)):
src_pixel_array = src_pixel_arrays[image_array_idx]
color_remap = color_remaps[image_array_idx]
src_patterns = []
pattern_width = 8
pattern_height = 8
for start_y in range(0, src_pixel_array.height, pattern_height):
for start_x in range(0, src_pixel_array.width, pattern_width):
# Convert each section of pixels into the *staging* palette indices.
# This may seem backwards. Why not just rez up a pixel array, and then
# get the indexed color array?
# Here's why we do it this way:
# We want a consistent color mapping for the WHOLE image. This will
# let us load the whole pattern data with one color remap. Let's say
# we have a pattern in our image that is totally black, and another
# pattern that is totally white. If we create an IndexedColorArray
# for each of these patterns, they will be identical, because they
# have only one color (they'll both be all zeroes).
# But we've already mapped the colors for the image as a whole,
# so those two will get unique values when remapped against them.
remapped_indices = []
for y in range(start_y, start_y + pattern_height):
for x in range(start_x, start_x + pattern_width):
pixel = src_pixel_array.get_pixel_value(x, y)
remapped_index = color_remap.convert_pixel_value_to_staging_index(pixel)
remapped_indices.append(remapped_index)
indexed_array = IndexedColorArray(width=pattern_width, height=pattern_height, indexed_array=remapped_indices)
pattern = Pattern(index_array=indexed_array, initial_intentions_map=pattern_intention_map_flips)
src_patterns.append(pattern)
# Add all source patterns.
src_pattern_sets.append(src_patterns)
##############################################################################
# UNIQUE PATTERN SOLVING
dest_map = {}
dest_maps = [dest_map]
unique_patterns_lists = []
src_idx_to_dest_pattern_flip_lists = []
# Execute a solver for each pattern set, but we'll merge all into the same destination.
for pattern_set in src_pattern_sets:
solver = ConstraintSolver(sources=pattern_set, destinations=dest_maps, evaluator_class=PatternsIntoPatternHashMapsEvaluator, debugging=None)
while((len(solver.solutions) == 0) and (solver.is_exhausted() == False)):
solver.update()
solution = solver.solutions[0]
solver.apply_solution(solution)
# Go through the solution and find the ones that were *added*, as these will be
# considered our "unique" patterns. All those that *matched* will point to them.
# Example:
# 'b' is index 1, and 'c' is index 2, and 'd' is index 3.
# 'b' and 'd' are horizontal flips, while 'c' is its own thing.
#
# 'b' points to 'b', since it was the original.
# 'c' points to 'c', for the same reason.
# 'd' points to 'b', with a horizontal flip.
#
# So we have 2 unique patterns ('b' and 'c'), and 'd' points to 'b' with a flip.
src_idx_to_unique_flip_list = [None] * len(pattern_set)
for move in solution:
change_list = move.change_list
dest_pattern = None
if change_list.matching_pattern_object_ref is None:
# Add this one to the unique list.
dest_pattern = pattern_set[move.source_index]
else:
# Get the pattern we matched out of the change list.
dest_pattern = change_list.matching_pattern_object_ref()
# Now add the src -> dest + flip.
src_idx_to_unique_flip_list[move.source_index] = (dest_pattern, change_list.flips_to_match)
# Arrange the uniques in corresponding order to the source indices.
# We have to do this as a separate pass because the order of moves
# (the loop above) is non-deterministic, and we want to retain the
# original order of the source indices.
unique_patterns_list = []
for source_idx in range(len(src_idx_to_unique_flip_list)):
source_pattern = pattern_set[source_idx]
# Is this one unique?
dest_flip_tuple = src_idx_to_unique_flip_list[source_idx]
dest_pattern = dest_flip_tuple[0]
if source_pattern == dest_pattern:
# If source matches dest, we aren't remapped (i.e., we're unique.)
unique_patterns_list.append(dest_pattern)
unique_patterns_lists.append(unique_patterns_list)
src_idx_to_dest_pattern_flip_lists.append(src_idx_to_unique_flip_list)
##############################################################################
# VRAM POSITIONING
# Create intervals for each of the unique tiles.
intervals = []
# The font must begin at a specific location.
font_VRAM_interval = Interval.create_fixed_length_at_start_point(20, len(unique_patterns_lists[0]))
intervals.append(font_VRAM_interval)
# Can go anywhere. We'll treat them as contiguous, but we could just as easily split them up into multiples.
flag_VRAM_interval = Interval(begin=0, end=448, length=len(unique_patterns_lists[1]))
intervals.append(flag_VRAM_interval)
# Find a home for them.
bitsets = []
VRAMPositions = BitSet(448)
bitsets.append(VRAMPositions)
interval_to_VRAM_solver = ConstraintSolver(sources=intervals, destinations=bitsets, evaluator_class=IntervalsToBitSetsEvaluator, debugging=None)
while (len(interval_to_VRAM_solver.solutions) == 0) and (interval_to_VRAM_solver.is_exhausted() == False):
interval_to_VRAM_solver.update()
# How'd the solution go?
solution = interval_to_VRAM_solver.solutions[0]
# Track where each pattern interval will go.
VRAM_dests = [None] * len(intervals)
for move in solution:
# The "source" will be one of our intervals, and since we're only doing one BitSet, our "destination" will always be the VRAMPositions array.
# Dig into the change list to figure out which slot was actually chosen.
source_interval = intervals[move.source_index]
dest_interval = move.change_list.chosen_interval
if dest_interval.begin == dest_interval.end:
print(f"Interval {move.source_index}: ({source_interval.begin}, {source_interval.end}) with length {source_interval.length} will occupy location {dest_interval.begin}.")
else:
print(f"Interval {move.source_index}: ({source_interval.begin}, {source_interval.end}) with length {source_interval.length} will occupy locations {dest_interval.begin} thru {dest_interval.end}")
VRAM_dests[move.source_index] = dest_interval.begin
pattern_to_VRAM_loc_map = {}
# Map each unique pattern to its VRAM loc.
for unique_list_idx in range(len(unique_patterns_lists)):
unique_pattern_list = unique_patterns_lists[unique_list_idx]
VRAM_dest = VRAM_dests[unique_list_idx]
for unique_idx in range(len(unique_pattern_list)):
unique_pattern = unique_pattern_list[unique_idx]
VRAM_pos = VRAM_dest + unique_idx
pattern_to_VRAM_loc_map[unique_pattern] = VRAM_pos
##############################################################################
# NAMETABLE CREATION
# Tie it all together to create an array of patterns, flips, palettes, and
# VRAM locations.
nametables = []
for nametable_idx in range(len(src_pattern_sets)):
nametable = []
# Let's find our palette index.
color_remap = color_remaps[nametable_idx]
our_palette = color_remap.staging_palette
for palette_idx in range(len(staging_palettes)):
staging_palette = staging_palettes[palette_idx]
if staging_palette is our_palette:
break
# Iterate through all the source patterns.
src_pattern_set = src_pattern_sets[nametable_idx]
for src_pattern_idx in range(len(src_pattern_set)):
# Find its unique correspondence, and any flips.
src_idx_to_dest_pattern_flip_list = src_idx_to_dest_pattern_flip_lists[nametable_idx]
unique_flip_tuple = src_idx_to_dest_pattern_flip_list[src_pattern_idx]
unique_pattern = unique_flip_tuple[0]
flips = unique_flip_tuple[1]
# Find the VRAM loc.
VRAM_pos = pattern_to_VRAM_loc_map[unique_pattern]
# We have everything we need to build the nametable entry.
nametable_entry = NameTableEntry(VRAM_loc=VRAM_pos, palette_index=palette_idx, flips=flips)
nametable.append(nametable_entry)
nametables.append(nametable)
print("Done!") | 1.84375 | 2 |
convertme/fimi/fimi_writer.py | mikulatomas/convertme | 2 | 12773764 | from convertme import WriterInterface, Dataset
class FimiWriter(WriterInterface):
def write(self, dataset, output):
for row in dataset.bools:
attributes = []
for attribute, _ in enumerate(row):
if row[attribute]:
attributes.append(str(attribute))
# remove f last space from line
# ' '.join() is the fastest way to concatenate string
output.write(' '.join(attributes) + '\n')
| 2.84375 | 3 |
twisted/websocket/test_ws_client.py | eigenphi/gcommon | 3 | 12773765 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# created: 2014-12-02
"""Demo and test program to verify WS server."""
import sys
from autobahn.twisted.websocket import WebSocketClientFactory
from autobahn.twisted.websocket import WebSocketClientProtocol
from twisted.internet import reactor
from twisted.python import log
class MyClientProtocol(WebSocketClientProtocol):
def onOpen(self):
print ('connected to server')
self.sendMessage(u"Hello, world!".encode('utf8'))
def onMessage(self, payload, isBinary):
if isBinary:
print("Binary message received: {0} bytes".format(len(payload)))
else:
print("Text message received: {0}".format(payload.decode('utf8')))
def onClose(self, was_clean, code, reason):
print("connection closed: ", was_clean, code, reason)
reactor.stop() # @UndefinedVariable
def main():
log.startLogging(sys.stdout)
factory = WebSocketClientFactory()
factory.protocol = MyClientProtocol
reactor.connectTCP("127.0.0.1", 10086, factory) # @UndefinedVariable
reactor.run() # @UndefinedVariable
# Test Codes
if __name__ == "__main__":
main()
print('Done')
| 2.578125 | 3 |
cookbook/migrations/0007_auto_20191226_0852.py | mhoellmann/recipes | 0 | 12773766 | <reponame>mhoellmann/recipes
# Generated by Django 3.0.1 on 2019-12-26 07:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cookbook', '0006_recipe_image'),
]
operations = [
migrations.RenameField(
model_name='recipe',
old_name='time',
new_name='working_time',
),
migrations.AddField(
model_name='recipe',
name='waiting_time',
field=models.IntegerField(default=0),
),
]
| 1.84375 | 2 |
koku/masu/apps.py | Vasyka/koku | 2 | 12773767 | #
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""Masu application configuration module."""
from django.apps import AppConfig
class MasuConfig(AppConfig):
"""Masu application configuration."""
name = "masu"
def ready(self):
"""Determine if app is ready on application startup."""
| 1.703125 | 2 |
electrumsv/wallet_database/cache.py | Breavyn/electrumsv | 0 | 12773768 | <filename>electrumsv/wallet_database/cache.py<gh_stars>0
"""
Due to database latency and concurrency problems that will result in race conditions, all access
needs to be authoritatively cached above the database. This also reduces the locking overhead as
there will be no reads or
"""
import threading
import time
from typing import cast, Dict, Iterable, List, Optional, Sequence, Tuple
from bitcoinx import double_sha256, hash_to_hex_str
from ..constants import TxFlags, MAXIMUM_TXDATA_CACHE_SIZE_MB
from ..logs import logs
from ..transaction import Transaction
from .tables import (byte_repr, CompletionCallbackType, InvalidDataError, MAGIC_UNTOUCHED_BYTEDATA,
MissingRowError, TransactionRow, TransactionTable, TxData, TxProof)
from ..util.cache import LRUCache
class TransactionCacheEntry:
def __init__(self, metadata: TxData, flags: TxFlags, time_loaded: Optional[float]=None) -> None:
self.metadata = metadata
self.flags = flags
self.time_loaded = time.time() if time_loaded is None else time_loaded
def __repr__(self):
return f"TransactionCacheEntry({self.metadata}, {TxFlags.to_repr(self.flags)})"
class TransactionCache:
def __init__(self, store: TransactionTable, txdata_cache_size: Optional[int]=None) -> None:
if txdata_cache_size is None:
txdata_cache_size = MAXIMUM_TXDATA_CACHE_SIZE_MB * (1024 * 1024)
self._logger = logs.get_logger("cache-tx")
self._cache: Dict[bytes, TransactionCacheEntry] = {}
self._bytedata_cache = LRUCache(max_size=txdata_cache_size)
self._store = store
self._lock = threading.RLock()
self._logger.debug("caching all metadata records")
self.get_metadatas()
self._logger.debug("cached %d metadata records", len(self._cache))
if txdata_cache_size > 0:
# How many of these can actually be cached is limited by the cache size.
self._logger.debug("attempting to cache unsettled transaction bytedata")
rows = self._store.read(TxFlags.HasByteData, TxFlags.HasByteData|TxFlags.StateSettled)
for row in rows:
self._bytedata_cache.set(row[0], row[1])
self._logger.debug("matched/cached %d unsettled transactions", len(rows))
def set_store(self, store: TransactionTable) -> None:
self._store = store
def set_maximum_cache_size_for_bytedata(self, maximum_size: int,
force_resize: bool=False) -> None:
self._bytedata_cache.set_maximum_size(maximum_size, force_resize)
def _validate_transaction_bytes(self, tx_hash: bytes, bytedata: Optional[bytes]) -> bool:
if bytedata is None:
return True
return tx_hash == double_sha256(bytedata)
def _entry_visible(self, entry_flags: int, flags: Optional[TxFlags]=None,
mask: Optional[TxFlags]=None) -> bool:
"""
Filter an entry based on it's flag bits compared to an optional comparison flag and flag
mask value.
- No flag and no mask: keep.
- No flag and mask: keep if any masked bits are set.
- Flag and no mask: keep if any masked bits are set.
- Flag and mask: keep if the masked bits are the flags.
"""
if flags is None:
if mask is None:
return True
return (entry_flags & mask) != 0
if mask is None:
return (entry_flags & flags) != 0
return (entry_flags & mask) == flags
@staticmethod
def _adjust_metadata_flags(data: TxData, flags: TxFlags) -> TxFlags:
flags &= ~TxFlags.METADATA_FIELD_MASK
flags |= TxFlags.HasFee if data.fee is not None else 0
flags |= TxFlags.HasHeight if data.height is not None else 0
flags |= TxFlags.HasPosition if data.position is not None else 0
return flags
@staticmethod
def _validate_new_flags(tx_hash: bytes, flags: TxFlags) -> None:
# All current states are expected to have bytedata.
if (flags & TxFlags.STATE_MASK) == 0 or (flags & TxFlags.HasByteData) != 0:
return
tx_id = hash_to_hex_str(tx_hash)
raise InvalidDataError("setting uncleared state without bytedata "
f"{tx_id} {TxFlags.to_repr(flags)}")
def add_transaction(self, tx: Transaction, flags: TxFlags=TxFlags.Unset,
completion_callback: Optional[CompletionCallbackType]=None) -> None:
tx_hash = tx.hash()
tx_hex = str(tx)
bytedata = bytes.fromhex(tx_hex)
date_updated = self._store._get_current_timestamp()
if tx_hash in self._cache:
self.update([ (tx_hash, TxData(date_added=date_updated, date_updated=date_updated),
bytedata, flags | TxFlags.HasByteData) ], completion_callback=completion_callback)
else:
self.add([
TransactionRow(tx_hash, TxData(date_added=date_updated, date_updated=date_updated),
bytedata, flags | TxFlags.HasByteData, None) ],
completion_callback=completion_callback)
def add(self, inserts: List[TransactionRow],
completion_callback: Optional[CompletionCallbackType]=None) -> None:
with self._lock:
return self._add(inserts, completion_callback=completion_callback)
def _add(self, inserts: List[TransactionRow],
completion_callback: Optional[CompletionCallbackType]=None) -> None:
"""
This infers the bytedata flag from the bytedata value for a given input row, and
alters the flags to reflect that inference. This differs from update, which uses
the input row's flag to indicate whether to retain the existing bytedata value/flag or
overwrite them.
"""
date_added = self._store._get_current_timestamp()
for i, (tx_hash, metadata, bytedata, add_flags, description) in enumerate(inserts):
assert tx_hash not in self._cache, \
f"Tx {hash_to_hex_str(tx_hash)} found in cache unexpectedly"
flags = self._adjust_metadata_flags(metadata, add_flags)
if bytedata is not None:
flags |= TxFlags.HasByteData
assert ((add_flags & TxFlags.METADATA_FIELD_MASK) == 0 or flags == add_flags), \
f"{TxFlags.to_repr(flags)} != {TxFlags.to_repr(add_flags)}"
self._validate_new_flags(tx_hash, flags)
metadata = TxData(metadata.height, metadata.position, metadata.fee, date_added,
date_added)
self._cache[tx_hash] = TransactionCacheEntry(metadata, flags)
if bytedata is not None:
self._bytedata_cache.set(tx_hash, bytedata)
inserts[i] = TransactionRow(tx_hash, metadata, bytedata, flags, description)
self._store.create(inserts, completion_callback=completion_callback)
def update(self, updates: List[Tuple[bytes, TxData, Optional[bytes], TxFlags]],
completion_callback: Optional[CompletionCallbackType]=None) -> None:
with self._lock:
self._update(updates, completion_callback=completion_callback)
def _update(self, updates: List[Tuple[bytes, TxData, Optional[bytes], TxFlags]],
update_all: bool=True,
completion_callback: Optional[CompletionCallbackType]=None) -> None:
"""
The flagged changes are applied to the existing entry, leaving the unflagged aspects
as they were. An example of this is bytedata, the bytedata in the existing entry should
remain the same (and it's flag) if the update row's bytedata flag is clear. If the update
row's bytedata flag is set, then the entry will get the update row's bytedata value and
the appropriate flag to indicate whether it is None or not (overwriting the existing
entry's bytedata/bytedata flag). This differs from add, which sets the flag based on
the bytedata.
"""
# For any given update entry there are some nuances to how the update is applied w/ flags.
update_map = { t[0]: t for t in updates }
desired_update_hashes = set(update_map)
updated_entries: List[Tuple[bytes, TxData, Optional[bytes], TxFlags]] = []
date_updated = self._store._get_current_timestamp()
for tx_hash, entry in self._get_entries(tx_hashes=desired_update_hashes,
require_all=update_all):
_tx_hash, incoming_metadata, incoming_bytedata, incoming_flags = update_map[tx_hash]
# Apply metadata changes.
fee = incoming_metadata.fee if incoming_flags & TxFlags.HasFee else entry.metadata.fee
height = incoming_metadata.height if incoming_flags & TxFlags.HasHeight \
else entry.metadata.height
position = incoming_metadata.position if incoming_flags & TxFlags.HasPosition \
else entry.metadata.position
new_metadata = TxData(height, position, fee, entry.metadata.date_added, date_updated)
flags = self._adjust_metadata_flags(new_metadata, entry.flags & ~TxFlags.STATE_MASK)
# incoming_flags & STATE_MASK declares if the state flags are touched by the update.
if incoming_flags & TxFlags.STATE_MASK != 0:
flags |= incoming_flags & TxFlags.STATE_MASK
else:
flags |= entry.flags & TxFlags.STATE_MASK
# incoming_flags & HasByteData declares if the bytedata is touched by the update.
flags &= ~TxFlags.HasByteData
if incoming_flags & TxFlags.HasByteData:
flags |= TxFlags.HasByteData if incoming_bytedata is not None else TxFlags.Unset
else:
flags |= entry.flags & TxFlags.HasByteData
if entry.metadata == new_metadata and entry.flags == flags:
continue
self._validate_new_flags(tx_hash, flags)
new_entry = TransactionCacheEntry(new_metadata, flags, entry.time_loaded)
self._logger.debug("_update: %s %r %s %s %r %r", hash_to_hex_str(tx_hash),
incoming_metadata, TxFlags.to_repr(incoming_flags),
byte_repr(incoming_bytedata), entry, new_entry)
self._cache[tx_hash] = new_entry
if incoming_flags & TxFlags.HasByteData:
self._bytedata_cache.set(tx_hash, incoming_bytedata)
elif flags & TxFlags.HasByteData:
# Indicate the user is not changing the bytedata, it's a metadata/flags update.
incoming_bytedata = MAGIC_UNTOUCHED_BYTEDATA
updated_entries.append((tx_hash, new_metadata, incoming_bytedata, flags))
# The reason we don't dispatch metadata and entry updates as separate calls
# is that there's no way of reusing a completion context for more than one thing.
if len(updated_entries):
self._store.update(updated_entries, completion_callback=completion_callback)
def update_flags(self, tx_hash: bytes, flags: TxFlags, mask: Optional[TxFlags]=None,
completion_callback: Optional[CompletionCallbackType]=None) -> TxFlags:
# This is an odd function. It logical ors metadata flags, but replaces the other
# flags losing their values.
if mask is None:
mask = TxFlags.METADATA_FIELD_MASK
else:
mask |= TxFlags.METADATA_FIELD_MASK
with self._lock:
date_updated = self._store._get_current_timestamp()
entry = self._get_entry(tx_hash)
assert entry is not None
entry.flags = (entry.flags & mask) | (flags & ~TxFlags.METADATA_FIELD_MASK)
self._validate_new_flags(tx_hash, entry.flags)
# Update the cached metadata for the new modification date.
metadata = entry.metadata
entry.metadata = TxData(metadata.height, metadata.position, metadata.fee,
metadata.date_added, date_updated)
self._store.update_flags([ (tx_hash, flags, mask, date_updated) ],
completion_callback=completion_callback)
return entry.flags
def update_proof(self, tx_hash: bytes, proof: TxProof,
completion_callback: Optional[CompletionCallbackType]=None) -> None:
with self._lock:
date_updated = self._store._get_current_timestamp()
entry = self._get_entry(tx_hash)
assert entry is not None
metadata = entry.metadata
entry.metadata = TxData(metadata.height, metadata.position, metadata.fee,
metadata.date_added, date_updated)
self._store.update_proof([ (tx_hash, proof, date_updated) ],
completion_callback=completion_callback)
def delete(self, tx_hash: bytes,
completion_callback: Optional[CompletionCallbackType]=None) -> None:
with self._lock:
self._logger.debug("cache_deletion: %s", hash_to_hex_str(tx_hash))
del self._cache[tx_hash]
self._bytedata_cache.set(tx_hash, None)
self._store.delete([ tx_hash ], completion_callback=completion_callback)
def get_flags(self, tx_hash: bytes) -> Optional[TxFlags]:
# We cache all metadata, so this can avoid touching the database.
entry = self._cache.get(tx_hash)
if entry is not None:
return entry.flags
return None
# NOTE: Only used by unit tests at this time.
def is_cached(self, tx_hash: bytes) -> bool:
return tx_hash in self._cache
# This should not be used to get
def get_entry(self, tx_hash: bytes, flags: Optional[TxFlags]=None,
mask: Optional[TxFlags]=None) -> Optional[TransactionCacheEntry]:
with self._lock:
return self._get_entry(tx_hash, flags, mask)
def _get_entry(self, tx_hash: bytes, flags: Optional[TxFlags]=None,
mask: Optional[TxFlags]=None,
force_store_fetch: bool=False) -> Optional[TransactionCacheEntry]:
# We want to hit the cache, but only if we can give them what they want. Generally if
# something is cached, then all we may lack is the bytedata.
if not force_store_fetch and tx_hash in self._cache:
entry = self._cache[tx_hash]
# If they filter the entry they request, we only give them a matched result.
if not self._entry_visible(entry.flags, flags, mask):
return None
# If they don't want bytedata give them the entry.
if mask is not None and (mask & TxFlags.HasByteData) == 0:
return entry
# If they do, and we have it cached, then give them the entry.
bytedata = self._bytedata_cache.get(tx_hash)
if bytedata is not None:
return entry
force_store_fetch = True
if not force_store_fetch:
return None
matches = self._store.read(flags, mask, tx_hashes=[tx_hash])
if len(matches):
tx_hash_, bytedata, flags_get, metadata = matches[0]
if bytedata is None or self._validate_transaction_bytes(tx_hash, bytedata):
# Overwrite any existing entry for this transaction. Due to the lock, and lack of
# flushing we can assume that we will not be clobbering any fresh changes.
entry = TransactionCacheEntry(metadata, flags_get)
self._cache.update({ tx_hash: entry })
if bytedata is not None:
self._bytedata_cache.set(tx_hash, bytedata)
self._logger.debug("get_entry/cache_change: %r", (hash_to_hex_str(tx_hash),
entry, TxFlags.to_repr(flags), TxFlags.to_repr(mask)))
# If they filter the entry they request, we only give them a matched result.
if self._entry_visible(entry.flags, flags, mask):
return entry
return None
raise InvalidDataError(tx_hash)
# TODO: If something is requested that does not exist, it will miss the cache and wait
# on the store access every time. It should be possible to cache misses and also maintain/
# update them on other accesses. A complication is the flag/mask filtering, which will
# not indicate presence of entries for the tx_hash.
return None
def get_metadata(self, tx_hash: bytes, flags: Optional[TxFlags]=None,
mask: Optional[TxFlags]=None) -> Optional[TxData]:
with self._lock:
return self._get_metadata(tx_hash, flags, mask)
def _get_metadata(self, tx_hash: bytes, flags: Optional[TxFlags]=None,
mask: Optional[TxFlags]=None) -> Optional[TxData]:
if tx_hash in self._cache:
entry = self._cache[tx_hash]
return entry.metadata if self._entry_visible(entry.flags, flags, mask) else None
return None
def have_transaction_data(self, tx_hash: bytes) -> bool:
entry = self._cache.get(tx_hash)
return entry is not None and (entry.flags & TxFlags.HasByteData) != 0
def have_transaction_data_cached(self, tx_hash: bytes) -> bool:
return tx_hash in self._bytedata_cache
def get_transaction(self, tx_hash: bytes, flags: Optional[TxFlags]=None,
mask: Optional[TxFlags]=None) -> Optional[Transaction]:
assert mask is None or (mask & TxFlags.HasByteData) != 0, "filter excludes transaction"
results = self.get_transactions(flags, mask, [ tx_hash ])
if len(results):
return results[0][1]
return None
def get_transactions(self, flags: Optional[TxFlags]=None, mask: Optional[TxFlags]=None,
tx_hashes: Optional[Iterable[bytes]]=None) -> List[Tuple[bytes, Transaction]]:
with self._lock:
results = []
for tx_hash, bytedata in self.get_transaction_datas(flags, mask, tx_hashes):
results.append((tx_hash, Transaction.from_bytes(bytedata)))
return results
def get_transaction_data(self, tx_hash: bytes, flags: Optional[TxFlags]=None,
mask: Optional[TxFlags]=None) -> Optional[bytes]:
assert mask is None or (mask & TxFlags.HasByteData) != 0, "filter excludes transaction"
results = self.get_transaction_datas(flags, mask, [ tx_hash ])
if len(results):
return results[0][1]
return None
def get_transaction_datas(self, flags: Optional[TxFlags]=None, mask: Optional[TxFlags]=None,
tx_hashes: Optional[Iterable[bytes]]=None) -> List[Tuple[bytes, bytes]]:
with self._lock:
results = []
missing_tx_hashes = []
for tx_hash, entry in self._get_entries(flags, mask, tx_hashes):
if entry.flags & TxFlags.HasByteData == 0:
continue
bytedata = self._bytedata_cache.get(tx_hash)
if bytedata is not None:
results.append((tx_hash, bytedata))
else:
missing_tx_hashes.append(tx_hash)
if len(missing_tx_hashes):
for row in self._store.read(flags, mask, missing_tx_hashes):
if row[2] & TxFlags.HasByteData != 0:
results.append((row[0], cast(bytes, row[1])))
return results
def get_entries(self, flags: Optional[TxFlags]=None, mask: Optional[TxFlags]=None,
tx_hashes: Optional[Iterable[bytes]]=None,
require_all: bool=True) -> List[Tuple[bytes, TransactionCacheEntry]]:
"Get the metadata and flags for the matched transactions."
with self._lock:
return self._get_entries(flags, mask, tx_hashes, require_all)
def _get_entries(self, flags: Optional[TxFlags]=None, mask: Optional[TxFlags]=None,
tx_hashes: Optional[Iterable[bytes]]=None,
require_all: bool=True) -> List[Tuple[bytes, TransactionCacheEntry]]:
# Raises MissingRowError if any transaction id in `tx_hashes` is not in the cache afterward,
# if `require_all` is set.
require_all = require_all and tx_hashes is not None
results = []
if tx_hashes is not None:
for tx_hash in tx_hashes:
entry = self._cache.get(tx_hash)
if entry is not None and self._entry_visible(entry.flags, flags, mask):
results.append((tx_hash, entry))
if require_all:
wanted_hashes = set(tx_hashes)
have_hashes = set(t[0] for t in results)
if wanted_hashes != have_hashes:
raise MissingRowError(wanted_hashes - have_hashes)
else:
for tx_hash, entry in self._cache.items():
if self._entry_visible(entry.flags, flags, mask):
results.append((tx_hash, entry))
return results
def get_metadatas(self, flags: Optional[TxFlags]=None, mask: Optional[TxFlags]=None,
tx_hashes: Optional[Sequence[bytes]]=None,
require_all: bool=True) -> List[Tuple[bytes, TxData]]:
with self._lock:
return self._get_metadatas(flags=flags, mask=mask, tx_hashes=tx_hashes,
require_all=require_all)
def _get_metadatas(self, flags: Optional[TxFlags]=None, mask: Optional[TxFlags]=None,
tx_hashes: Optional[Sequence[bytes]]=None,
require_all: bool=True) -> List[Tuple[bytes, TxData]]:
if self._cache:
if tx_hashes is not None:
matches = []
for tx_hash in tx_hashes:
entry = self._cache[tx_hash]
if self._entry_visible(entry.flags, flags, mask):
matches.append((tx_hash, entry.metadata))
return matches
return [ (t[0], t[1].metadata) for t in self._cache.items()
if self._entry_visible(t[1].flags, flags, mask) ]
store_tx_hashes: Optional[Sequence[bytes]] = None
if tx_hashes is not None:
store_tx_hashes = [ tx_hash for tx_hash in tx_hashes if tx_hash not in self._cache ]
cache_additions = {}
new_matches = []
existing_matches = []
# tx_hashes will be None and store_tx_hashes will be None.
# tx_hashes will be a list, and store_tx_hashes will be a list.
if tx_hashes is None or len(cast(Sequence[bytes], store_tx_hashes)):
for tx_hash, flags_get, metadata in self._store.read_metadata(
flags, mask, store_tx_hashes):
# We have no way of knowing if the match already exists, and if it does we should
# take the possibly full/complete with bytedata cached version, rather than
# corrupt the cache with the limited metadata version.
if tx_hash in self._cache:
existing_matches.append((tx_hash, self._cache[tx_hash].metadata))
else:
new_matches.append((tx_hash, metadata))
cache_additions[tx_hash] = TransactionCacheEntry(metadata, flags_get)
if len(cache_additions) > 0 or len(existing_matches) > 0:
self._logger.debug("get_metadatas/cache_additions: adds=%d haves=%d %r...",
len(cache_additions),
len(existing_matches), existing_matches[:5])
self._cache.update(cache_additions)
results = []
if store_tx_hashes is not None and len(store_tx_hashes):
assert tx_hashes is not None
for tx_hash in tx_hashes:
entry2 = self._cache.get(tx_hash)
if entry2 is None:
if require_all:
raise MissingRowError(tx_hash)
elif self._entry_visible(entry2.flags, flags, mask):
results.append((tx_hash, entry2.metadata))
else:
results = new_matches + existing_matches
return results
def get_height(self, tx_hash: bytes) -> Optional[int]:
entry = self._cache.get(tx_hash)
if entry is not None and entry.flags & (TxFlags.StateSettled|TxFlags.StateCleared):
return entry.metadata.height
return None
def get_unsynced_hashes(self) -> List[bytes]:
entries = self.get_metadatas(flags=TxFlags.Unset, mask=TxFlags.HasByteData)
return [ t[0] for t in entries ]
def get_unverified_entries(self, watermark_height: int) \
-> List[Tuple[bytes, TransactionCacheEntry]]:
results = self.get_metadatas(
flags=TxFlags.HasByteData | TxFlags.HasHeight,
mask=TxFlags.HasByteData | TxFlags.HasPosition | TxFlags.HasHeight)
return [ (tx_hash, self._cache[tx_hash]) for (tx_hash, metadata) in results
if 0 < cast(int, metadata.height) <= watermark_height ]
def apply_reorg(self, reorg_height: int,
completion_callback: Optional[CompletionCallbackType]=None) -> None:
fetch_flags = TxFlags.StateSettled
fetch_mask = TxFlags.StateSettled
unverify_mask = ~(TxFlags.HasHeight | TxFlags.HasPosition | TxFlags.HasProofData |
TxFlags.STATE_MASK)
with self._lock:
date_updated = self._store._get_current_timestamp()
# This does not request bytedata so if all metadata is cached, will not hit the
# database.
store_updates = []
for (tx_hash, metadata) in self.get_metadatas(fetch_flags, fetch_mask):
if cast(int, metadata.height) > reorg_height:
# Update the cached version to match the changes we are going to apply.
entry = self._cache[tx_hash]
entry.flags = (entry.flags & unverify_mask) | TxFlags.StateCleared
# TODO(rt12) BACKLOG the real unconfirmed height may be -1 unconf parent
entry.metadata = TxData(height=0, fee=metadata.fee,
date_added=metadata.date_added, date_updated=date_updated)
store_updates.append((tx_hash, entry.metadata, entry.flags))
if len(store_updates):
self._store.update_metadata(store_updates,
completion_callback=completion_callback)
| 2.28125 | 2 |
8ball.py | Alex-Bydder/8-ball_Project | 1 | 12773769 | <filename>8ball.py<gh_stars>1-10
import random
# Responses to said usr_question
random_responses = ['Certainly', 'Likely', 'Possibly', 'Unlikely', 'Not going to happen', 'Most likely']
ran_num_response = random.randint(0, len(random_responses))
# What user asks
usr_question = (input('Ask a question: '))
# Ensuring that it is actually a question
if ("?" not in usr_question):
usr_question = (input('(No question Mark) Ask a question: '))
if (usr_question != ""):
print (random_responses[ran_num_response])
| 3.609375 | 4 |
src/dataset.py | sulaimanvesal/vertebraeSegementation | 6 | 12773770 | <filename>src/dataset.py<gh_stars>1-10
"""
@Author: <NAME>
Date: Tuesday, 04, 2020
"""
import numpy as np
import cv2
import pandas as pd
from skimage.exposure import match_histograms
from matplotlib import pyplot as plt
from albumentations import (
PadIfNeeded,
HorizontalFlip,
VerticalFlip,
CenterCrop,
Crop,
Compose,
Transpose,
RandomRotate90,
ElasticTransform,
GridDistortion,
OpticalDistortion,
RandomSizedCrop,
OneOf,
CLAHE,
RandomBrightnessContrast,
RandomGamma,
GaussNoise
)
from keras.utils import to_categorical
class ImageProcessor:
@staticmethod
def augmentation(image, mask, noise=False, transform=False, clahe=True, r_bright=True, r_gamma=True):
aug_list = [
VerticalFlip(p=0.5),
HorizontalFlip(p=0.5),
RandomRotate90(p=0.5),
]
if r_bright:
aug_list += [RandomBrightnessContrast(p=.5)]
if r_gamma:
aug_list += [RandomGamma(p=.5)]
if clahe:
aug_list += [CLAHE(p=1., always_apply=True)]
if noise:
aug_list += [GaussNoise(p=.5, var_limit=1.)]
if transform:
aug_list += [ElasticTransform(p=.5, sigma=1., alpha_affine=20, border_mode=0)]
aug = Compose(aug_list)
augmented = aug(image=image, mask=mask)
image_heavy = augmented['image']
mask_heavy = augmented['mask']
return image_heavy, mask_heavy
@staticmethod
def split_data(img_path):
"""
Load train csv file and split the data into train and validation!
:return:
"""
df_train = pd.read_csv(img_path)
ids_train = df_train['img']
return ids_train
@staticmethod
def crop_volume(vol, crop_size=112):
"""
:param vol:
:return:
"""
return np.array(vol[:,
int(vol.shape[1] / 2) - crop_size: int(vol.shape[1] / 2) + crop_size,
int(vol.shape[2] / 2) - crop_size: int(vol.shape[2] / 2) + crop_size, ])
class DataGenerator:
def __init__(self, df,
channel="channel_first",
apply_noise=False,
apply_transform=False,
phase="train",
apply_online_aug=True,
batch_size=16,
height=256,
width=256,
crop_size=0,
n_samples=-1,
offline_aug=False,
toprint=False):
assert phase == "train" or phase == "valid", r"phase has to be either'train' or 'valid'"
assert isinstance(apply_noise, bool), "apply_noise has to be bool"
assert isinstance(apply_online_aug, bool), "apply_online_aug has to be bool"
self._data = df
self._len = len(df)
self._shuffle_indices = np.arange(len(df))
self._shuffle_indices = np.random.permutation(self._shuffle_indices)
self._height, self._width = height, width
self._apply_aug = apply_online_aug
self._apply_noise = apply_noise
self._apply_tranform = apply_transform
self._crop_size = crop_size
self._phase = phase
self._channel = channel
self._batch_size = batch_size
self._index = 0
self._totalcount = 0
if n_samples == -1:
self._n_samples = len(df)
else:
self._n_samples = n_samples
self._offline_aug = offline_aug
self._toprint = toprint
def __len__(self):
return self._len
@property
def apply_aug(self):
return self._apply_aug
@apply_aug.setter
def apply_aug(self, aug):
assert isinstance(aug, bool), "apply_aug has to be bool"
self._apply_aug = aug
def get_image_paths(self, id):
if self._phase == "train":
img_path = './input/images/{}.png'.format(id)
mask_path = './input/masks/{}.npy'.format(id)
else:
img_path = './input/images/{}.png'.format(id)
mask_path = './input/masks/{}.npy'.format(id)
return img_path, mask_path
def get_images_masks(self, img_path, mask_path):
img = cv2.imread(img_path)
# img = cv2.resize(img, (self._width, self._height), interpolation=cv2.INTER_AREA)
mask = np.load(mask_path)
#plt.imshow(img, cmap='gray'), plt.imshow(mask, cmap='jet', alpha=0.5);
#plt.show()
# mask = cv2.resize(mask, (self._width, self._height), interpolation=cv2.INTER_AREA)
return img, mask
def __iter__(self):
# self._index = 0
self._totalcount = 0
return self
def __next__(self):
# while True:
# shuffle image names
x_batch = []
y_batch = []
indices = []
if self._totalcount >= self._n_samples:
# self._index = 0
self._totalcount = 0
# self._shuffle_indices = np.random.permutation(self._shuffle_indices)
raise StopIteration
for i in range(self._batch_size):
indices.append(self._index)
self._index += 1
self._totalcount += 1
self._index = self._index % self._len
if self._totalcount >= self._n_samples:
break
# if self._toprint:
# print(indices)
ids_train_batch = self._data.iloc[self._shuffle_indices[indices]]
for _id in ids_train_batch.values:
img_path, mask_path = self.get_image_paths(id=_id)
img, mask = self.get_images_masks(img_path=img_path, mask_path=mask_path)
if self._apply_aug:
img, mask = ImageProcessor.augmentation(img, mask, noise=self._apply_noise, transform=self._apply_tranform)
else:
aug = Compose([CLAHE(always_apply=True)])
augmented = aug(image=img, mask=mask)
img, mask = augmented["image"], augmented["mask"]
mask = np.expand_dims(mask, axis=-1)
assert mask.ndim == 3
x_batch.append(img)
y_batch.append(mask)
# min-max batch normalisation
x_batch = np.array(x_batch, np.float32) / 255.
if self._crop_size:
x_batch = ImageProcessor.crop_volume(x_batch, crop_size=self._crop_size // 2)
y_batch = ImageProcessor.crop_volume(np.array(y_batch), crop_size=self._crop_size // 2)
if self._channel == "channel_first":
x_batch = np.moveaxis(x_batch, -1, 1)
y_batch = to_categorical(np.array(y_batch), num_classes=3)
y_batch = np.moveaxis(y_batch, source=3, destination=1)
return x_batch, y_batch
if __name__ == "__main__":
ids_train = ImageProcessor.split_data("./input/trainA.csv")
ids_valid = ImageProcessor.split_data("./input/validA.csv")
bs = 16
num_samples = 1000
trainA_generator = DataGenerator(df=ids_valid,
channel="channel_first",
apply_noise=False,
phase="valid",
apply_online_aug=False,
batch_size=5,
n_samples=-1)
img, mask = trainA_generator.__next__()
print(np.mean(img), np.std(img), img.shape, mask.shape)
temp = np.argmax(mask, axis=1)
temp2 = np.moveaxis(img, source=1, destination=3)
f = plt.figure()
f.add_subplot(1, 2, 1)
plt.imshow(temp2[4], cmap='gray'),
plt.title('Spine MR Image')
f.add_subplot(1, 2, 2)
plt.imshow(temp2[4], cmap='gray'),
plt.imshow(temp[4], cmap='jet', alpha=0.5);
plt.title('Ground Truth Mask')
plt.show(block=True) | 2.25 | 2 |
deep500/frameworks/reference/custom_operators/python/sum_op.py | khoaideptrai/deep500 | 90 | 12773771 | <reponame>khoaideptrai/deep500
import numpy as np
from deep500.lv0.operators.operator_interface import CustomPythonOp
class SumOp(CustomPythonOp):
def __init__(self, input_descriptors, output_descriptors):
super(SumOp, self).__init__(input_descriptors, output_descriptors)
self._input_desc = input_descriptors
self._output_desc = output_descriptors
def forward(self, *inputs):
temp = np.copy(inputs[0])
for i in range(1, len(inputs)):
temp = temp + inputs[i]
return temp
def backward(self, grads, fwd_inputs, fwd_outputs):
num_inputs = len(fwd_inputs)
grad_fwd_inputs = [grads[0].copy()] * num_inputs
for i in range(num_inputs):
if fwd_inputs[i].shape != grads[0].shape:
temp_shape = list(fwd_inputs[i].shape)
temp_extend = [1] * (len(grads[0].shape)-len(fwd_inputs[i].shape))
temp_shape = temp_extend + temp_shape
for j in range(len(temp_shape)):
if temp_shape[j] == 1:
grad_fwd_inputs[i] = np.sum(grad_fwd_inputs[i], j, keepdims=True)
grad_fwd_inputs[i] = np.reshape(grad_fwd_inputs[i], fwd_inputs[i].shape)
return grad_fwd_inputs | 2.375 | 2 |
utils_tests.py | DucAnhPhi/LinguisticAnalysis | 1 | 12773772 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 3 23:36:17 2017
Unit tests for utils.py
@author: duc
"""
import unittest
import utils as ut
from nltk.corpus import cmudict
def to_string(tokenized):
return " ".join(sum(tokenized, []))
class UtilTests(unittest.TestCase):
#------ test boolean functions ------------------------------------------------
def test_is_link(self):
s = "http://t.co/rlqo5xfbul"
self.assertFalse(ut.is_not_link(s))
def test_is_not_link(self):
s = "fake.website"
self.assertTrue(ut.is_not_link(s))
def test_is_contraction(self):
s = "couldn't"
self.assertFalse(ut.is_not_contraction(s))
def test_is_not_contraction(self):
s = "peoples'"
self.assertTrue(ut.is_not_contraction(s))
def test_is_compound(self):
s = "word-compound"
self.assertFalse(ut.is_not_compound(s))
def test_is_not_compound(self):
s = "wordcompound"
self.assertTrue(ut.is_not_compound(s))
def test_is_emoticon(self):
s = "xd"
self.assertFalse(ut.is_not_emoticon(s))
def test_is_not_emoticon(self):
s = "exd"
self.assertTrue(ut.is_not_emoticon(s))
#------------------------------------------------------------------------------
#------ test ut.remove_special_characters function ----------------------------
def test_no_punctuation(self):
s = "He said: 'Hey, my name is... Tim!' - Tim."
self.assertEqual(
to_string(ut.remove_special_characters(s)),
"he said hey my name is tim tim"
)
def test_no_emojis(self):
s = "💪🔥"
self.assertEqual(to_string(ut.remove_special_characters(s)), "")
def test_no_twitter_signs(self):
s = "#scandal @elonmusk #innovation @here"
self.assertEqual(
to_string(ut.remove_special_characters(s)),
"scandal elonmusk innovation here"
)
def test_numbers(self):
s = "1,2 1.2 1,000"
self.assertEqual(
to_string(ut.remove_special_characters(s)),
"12 12 1000"
)
def test_no_emoticons_without_letters_or_numbers(self):
s = (
"Here are some emoticons without letters or numbers in them"
" >:( :) :-)"
)
self.assertEqual(
to_string(ut.remove_special_characters(s)),
"here are some emoticons without letters or numbers in them"
)
#------------------------------------------------------------------------------
#------ test remove functions -------------------------------------------------
def test_no_emoticons_with_letters_or_numbers(self):
s = (
"here are some emoticons containing letters or numbers"
" :D :d :P :p :'D xd :o which the tokenizer may not know"
" :-3 :3 8) 8-) <3 </3"
)
self.assertEqual(
ut.remove_emoticons(s),
(
"here are some emoticons containing letters or numbers"
" which the tokenizer may not know"
)
)
def test_no_links(self):
s = (
"some links http://t.co/rlqo5xfbul www.google.com"
" bplaced.homepage.net/article/2221 g.com g.co"
)
self.assertEqual(ut.remove_links(s), "some links")
def test_no_stopwords(self):
s = [["i", "couldn","t", "wouldn", "t", "to", "do", "this"]]
self.assertEqual(ut.remove_stopwords(s), [[]])
def test_no_retweets(self):
s = [
"RT @realDonaldTrump: This is really sad! Fake news.",
"Some random tweet",
"RT @test: test"
]
self.assertEqual(ut.remove_retweets(s), ["Some random tweet"])
#------------------------------------------------------------------------------
#------ test split functions --------------------------------------------------
def test_split_compounds(self):
s = (
"e-mail enterprise-level level-14"
" three-level-building best-in-class"
)
self.assertEqual(
ut.split_compounds(s),
(
"e mail enterprise level level 14"
" three level building best in class"
)
)
def test_split_contractions(self):
s = r"I'm won't we'll can't he's that's there's"
self.assertEqual(
ut.split_contractions(s),
"i m won t we ll can t he s that s there s"
)
#------------------------------------------------------------------------------
#------ test count functions --------------------------------------------------
def test_count_word_syllables(self):
pronouncingDict = cmudict.dict()
strings = {
"123456789": 0,
"supercalifragilisticexpialidocious": 14,
"demagogue": 3,
"anathema": 4,
"payday": 2,
"Syrian": 3,
"crepuscular": 4,
"preservative": 4,
"significantly": 5,
"embezzlement": 4
}
for string in strings:
sylCount = ut.get_word_syllables(string, pronouncingDict)
self.assertEqual(sylCount, strings[string])
def test_count_word_syllables_offline(self):
# fails at supercalifragilistic..., asserts 13 instead of 14 syllables
# print accuracy of function instead
pronouncingDict = cmudict.dict()
strings = {
"123456789": 0,
"supercalifragilisticexpialidocious": 14,
"demagogue": 3,
"anathema": 4,
"payday": 2,
"Syrian": 3,
"crepuscular": 4,
"preservative": 4,
"significantly": 5,
"embezzlement": 4
}
accuracy = 0
for string in strings:
sylCount = ut.get_word_syllables_offline(string, pronouncingDict)
if sylCount == strings[string]:
accuracy += 10
print("\nsyllable counter offline accuracy: " + str(accuracy) + "%")
#------------------------------------------------------------------------------
def test_preprocessing(self):
s = (
"💪🔥 >:( xd <3 :'D http://t.co/rlqo5xfbul www.google.com e-mail"
" three-level-building I'm wouldn't @trump #bad"
" 1.2 Hi, my name is: Jon!? Next sentence."
)
self.assertEqual(
to_string(ut.preprocess(s)),
(
"e mail three level building i m wouldn t"
" trump bad 12 hi my name is jon next sentence"
)
)
if __name__ == '__main__':
unittest.main() | 3.359375 | 3 |
test/test_ex5.py | hemincong/MachineLearningExercise | 1 | 12773773 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import numpy as np
import scipy.io
# Exercise 5 | Regularized Linear Regression and Bias-Variance
#
# Instructions
# ------------
#
# This file contains code that helps you get started on the
# exercise. You will need to complete the following functions:
#
# linearRegCostFunction.m
# learningCurve.m
# validationCurve.m
#
# For this exercise, you will not need to change any code in this file,
# or any other files other than those mentioned above.
#
class test_ex5_regularized_linear_regressionand_bias_vs_variance(unittest.TestCase):
@classmethod
def setUp(cls):
# Load Training Data
print('Loading and Visualizing Data ...')
data_file = "resource/ex5data1.mat"
# Load
# You will have X, y, Xval, yval, Xtest, ytest in your environment
mat = scipy.io.loadmat(data_file)
cls.X = mat["X"]
cls.y = mat["y"]
cls.Xval = mat["Xval"]
cls.yval = mat["yval"]
cls.Xtest = mat["Xtest"]
cls.ytest = mat["ytest"]
cls.m = np.shape(cls.X)[0]
# =========== Part 1: Loading and Visualizing Data =============
# We start the exercise by first loading and visualizing the dataset.
# The following code will load the dataset into your environment and plot
# the data.
#
def test_load_and_visualzing_data(self):
import matplotlib.pyplot as plt
# print("point_end_y: {max_y}".format(max_y = point_end_y))
plt.figure(1)
plt.xlabel('Change in water level (x)')
plt.ylabel('Water flowing out of the dam (y)')
plt.scatter(self.X, self.y, marker='o', color='k', s=10)
plt.show()
# Plot training data
print('Program paused. Press enter to continue.')
# =========== Part 2: Regularized Linear Regression Cost =============
# You should now implement the cost function for regularized linear
# regression.
def test_regularized_linear_regression_cost_and_grad(self):
# m = Number of examples
theta = np.array([[1], [1]])
X_padded = np.column_stack((np.ones((self.m, 1)), self.X))
from ex5_regularized_linear_regressionand_bias_vs_variance.linearRegCostFunction import linearRegCostFunction
J, grad = linearRegCostFunction(X_padded, self.y, theta, 1)
self.assertAlmostEqual(J, 303.993, delta=0.001)
print('Cost at theta = [1 ; 1]: {cost} \n'
'(this value should be about 303.993192)'.format(cost=J))
# =========== Part 3: Regularized Linear Regression Gradient =============
# You should now implement the gradient for regularized linear
# regression.
self.assertAlmostEqual(grad[0], -15.303016, delta=0.0001)
self.assertAlmostEqual(grad[1], 598.250744, delta=0.0001)
print('Gradient at theta = [1 ; 1]: [{grad_0}; {grad_1}] \n'
'(this value should be about [-15.303016; 598.250744])\n'.format(grad_0=grad[0], grad_1=grad[1]))
# =========== Part 4: Train Linear Regression =============
# Once you have implemented the cost and gradient correctly, the
# trainLinearReg function will use your cost function to train
# regularized linear regression.
#
# Write Up Note: The data is non - linear, so this will not give a great
# fit.
#
def test_train_linear_reg(self):
from ex5_regularized_linear_regressionand_bias_vs_variance.trainLinearReg import trainLinearReg
# Train linear regression with lambda = 0
_lambda = 0
x_with_bias = np.column_stack((np.ones(self.m), self.X))
cost, theta = trainLinearReg(x_with_bias, self.y, _lambda)
ret = x_with_bias.dot(theta)
import matplotlib.pyplot as plt
plt.figure(1)
plt.xlabel('Change in water level (x)')
plt.ylabel('Water flowing out of the dam (y)')
plt.scatter(self.X, self.y, marker='x', c='r', s=30, linewidth=2)
plt.plot(self.X, ret, linewidth=2)
plt.show()
# =========== Part 5: Learning Curve for Linear Regression =============
# Next, you should implement the learningCurve function.
#
# Write Up Note: Since the model is underfitting the data, we expect to
# see a graph with "high bias" -- slide 8 in ML-advice.pdf
#
def test_learning_curve_for_linear_regression(self):
_lambda = 0
from ex5_regularized_linear_regressionand_bias_vs_variance.learningCurve import learningCurve
x_with_bias = np.column_stack((np.ones(self.m), self.X))
x_val_with_bias = np.column_stack((np.ones(np.shape(self.Xval)[0]), self.Xval))
error_train, error_val = learningCurve(x_with_bias, self.y, x_val_with_bias, self.yval, 0)
print('# Training Examples\tTrain Error\tCross Validation Error')
for i in range(self.m):
print(' \t{index}\t\t{error_train}\t{error_val}\n'.format(index=i,
error_train=error_train[i],
error_val=error_val[i]))
import matplotlib.pyplot as plt
temp = np.array([x for x in range(1, self.m + 1)])
# plt.plot(1:m, error_train, 1:m, error_val);
plt.title('Learning curve for linear regression')
plt.xlabel('Number of training examples')
plt.ylabel('Error')
plt.plot(temp, np.array(error_train), color='b', linewidth=2, label='Train')
plt.plot(temp, np.array(error_val), color='y', linewidth=2, label='Cross Validation')
plt.legend()
plt.show(block=True)
# =========== Part 6: Feature Mapping for Polynomial Regression =============
# One solution to this is to use polynomial regression.You should now
# complete polyFeatures to map each example into its powers
#
def test_feature_mapping_for_polynomial_regression(self):
p = 8
# Map X onto Polynomial Features and Normalize
from ex5_regularized_linear_regressionand_bias_vs_variance.polyFeatures import polyFeatures
X_poly = polyFeatures(self.X, p)
X_poly_m, X_poly_n = np.shape(X_poly)
self.assertEqual(X_poly_m, self.m)
self.assertEqual(X_poly_n, p)
from ex5_regularized_linear_regressionand_bias_vs_variance.featureNormalize import featureNormalize
X_poly, mu, sigma = featureNormalize(X_poly)
X_poly = np.column_stack((np.ones((self.m, 1)), X_poly))
X_poly_test = polyFeatures(self.Xtest, p)
X_poly_test_m, X_poly_test_n = np.shape(X_poly_test)
self.assertEqual(X_poly_test_m, np.shape(self.Xtest)[0])
self.assertEqual(X_poly_test_n, p)
X_poly_test = X_poly_test - mu
X_poly_test = X_poly_test / sigma
X_poly_test = np.column_stack((np.ones((X_poly_test.shape[0], 1)), X_poly_test))
X_poly_val = polyFeatures(self.Xval, p)
X_poly_val_m, X_poly_val_n = np.shape(X_poly_val)
self.assertEqual(X_poly_val_m, np.shape(self.Xval)[0])
self.assertEqual(X_poly_val_n, p)
X_poly_val = X_poly_val - mu
X_poly_val = X_poly_val / sigma
X_poly_val = np.column_stack((np.ones((X_poly_val.shape[0], 1)), X_poly_val))
print('Normalized Training Example 1:\n'
' {X_poly} '.format(X_poly=X_poly))
# =========== Part 7: Learning Curve for Polynomial Regression =============
# Now, you will get to experiment with polynomial regression with multiple
# values of lambda .The code below runs polynomial regression with
# lambda = 0. You should try running the code with different values of
# lambda to see how the fit and learning curve change.
#
_lambda = 0
from ex5_regularized_linear_regressionand_bias_vs_variance.trainLinearReg import trainLinearReg
cost, theta = trainLinearReg(X_poly, self.y, _lambda)
self.assertIsNotNone(cost)
self.assertIsNotNone(theta)
import matplotlib.pyplot as plt
plt.figure(1)
plt.scatter(self.X, self.y, marker='x', c='r', s=30, linewidth=2)
plt.xlim([-80, 80])
plt.ylim([-20, 60])
plt.xlabel('Change in water level(x)')
plt.ylabel('Water flowing out of the dam(y)')
plt.title('Polynomial Regression Fit (lambda = {:f})'.format(_lambda))
# plt.plot(self.X, self.y, 'rx', markersize=10, linewidth=1.5)
from ex5_regularized_linear_regressionand_bias_vs_variance.plotFit import plotFit
plotFit(min(self.X), max(self.X), mu, sigma, theta, p)
plt.show(block=False)
plt.figure(2)
from ex5_regularized_linear_regressionand_bias_vs_variance.learningCurve import learningCurve
error_train, error_val = learningCurve(X_poly, self.y, X_poly_val, self.yval, 0)
p1, p2 = plt.plot(range(1, self.m + 1), error_train, range(1, self.m + 1), error_val)
plt.legend((p1, p2), ('Train', 'Cross Validation'))
plt.show(block=False)
print('Polynomial Regression (lambda =%{_lambda})'.format(_lambda=_lambda))
print('# Training Examples\tTrain Error\tCross Validation Error')
for i in range(0, self.m):
print('\t{i}\t\t{error_train}\t{error_val}'.format(i=i, error_train=error_train[i], error_val=error_val[i]))
# =========== Part 8: Validation for Selecting Lambda =============
# You will now implement validationCurve to test various values of
# lambda on a validation set. You will then use this to select the
# "best" lambda value.
#
from ex5_regularized_linear_regressionand_bias_vs_variance.validationCurve import validationCurve
lambda_vec, error_train, error_val = validationCurve(X_poly, self.y, X_poly_val, self.yval)
self.assertEqual(len(error_train), len(lambda_vec))
self.assertEqual(len(error_val), len(lambda_vec))
plt.close('all')
p1, p2, = plt.plot(lambda_vec, error_train, lambda_vec, error_val)
plt.legend((p1, p2), ('Train', 'Cross Validation'))
plt.xlabel('lambda')
plt.ylabel('Error')
plt.show(block=False)
print('lambda\t\tTrain Error\tValidation Error')
for i in range(len(lambda_vec)):
print(
'{lambda_vec}\t{error_train}\t{error_val}'.format(lambda_vec=lambda_vec[i], error_train=error_train[i],
error_val=error_val[i]))
# =========== Part 9: Computing test set error and Plotting learning curves with randomly selected examples
# ============= best lambda value from previous step
lambda_val = 3
# note that we're using X_poly - polynomial linear regression with polynomial features
from ex5_regularized_linear_regressionand_bias_vs_variance.trainLinearReg import trainLinearReg
_, theta = trainLinearReg(X_poly, self.y, lambda_val)
# because we're using X_poly, we also have to use X_poly_test with polynomial features
from ex5_regularized_linear_regressionand_bias_vs_variance.linearRegCostFunction import linearRegCostFunction
error_test, _ = linearRegCostFunction(X_poly_test, self.ytest, theta, 0)
print('Test set error: {error_test}'.format(error_test=error_test)) # expected 3.859
# why? something wrong
# self.assertAlmostEqual(error_test, 3.859, delta=0.01)
# =========== Part 10: Plot learning curves with randomly selected examples =============
#
# lambda_val value for this step
lambda_val = 0.01
times = 50
error_train_rand = np.zeros((self.m, times))
error_val_rand = np.zeros((self.m, times))
for i in range(self.m):
for k in range(times):
rand_sample_train = np.random.permutation(X_poly.shape[0])
rand_sample_train = rand_sample_train[:i + 1]
rand_sample_val = np.random.permutation(X_poly_val.shape[0])
rand_sample_val = rand_sample_val[:i + 1]
X_poly_train_rand = X_poly[rand_sample_train, :]
y_train_rand = self.y[rand_sample_train]
X_poly_val_rand = X_poly_val[rand_sample_val, :]
y_val_rand = self.yval[rand_sample_val]
_, theta = trainLinearReg(X_poly_train_rand, y_train_rand, lambda_val)
cost, _ = linearRegCostFunction(X_poly_train_rand, y_train_rand, np.asarray(theta), 0)
error_train_rand[i, k] = cost
cost, _ = linearRegCostFunction(X_poly_val_rand, y_val_rand, theta, 0)
error_val_rand[i, k] = cost
error_train = np.mean(error_train_rand, axis=1)
error_val = np.mean(error_val_rand, axis=1)
p1, p2 = plt.plot(range(self.m), error_train, range(self.m), error_val)
plt.title('Polynomial Regression Learning Curve (lambda = {:f})'.format(lambda_val))
plt.legend((p1, p2), ('Train', 'Cross Validation'))
plt.xlabel('Number of training examples')
plt.ylabel('Error')
plt.axis([0, 13, 0, 150])
plt.show(block=False)
if __name__ == '__main__':
unittest.main()
| 3.75 | 4 |
rgb.py | AlexandrShcherbakov/python_2019 | 2 | 12773774 | <gh_stars>1-10
Colors = {'snow': ('255', '250', '250'),
'ghostwhite': ('248', '248', '255'),
'GhostWhite': ('248', '248', '255'),
'whitesmoke': ('245', '245', '245'),
'WhiteSmoke': ('245', '245', '245'),
'gainsboro': ('220', '220', '220'),
'floralwhite': ('255', '250', '240'),
'FloralWhite': ('255', '250', '240'),
'oldlace': ('253', '245', '230'),
'OldLace': ('253', '245', '230'),
'linen': ('250', '240', '230'),
'antiquewhite': ('250', '235', '215'),
'AntiqueWhite': ('250', '235', '215'),
'papayawhip': ('255', '239', '213'),
'PapayaWhip': ('255', '239', '213'),
'blanchedalmond': ('255', '235', '205'),
'BlanchedAlmond': ('255', '235', '205'),
'bisque': ('255', '228', '196'),
'peachpuff': ('255', '218', '185'),
'PeachPuff': ('255', '218', '185'),
'navajowhite': ('255', '222', '173'),
'NavajoWhite': ('255', '222', '173'),
'moccasin': ('255', '228', '181'),
'cornsilk': ('255', '248', '220'),
'ivory': ('255', '255', '240'),
'lemonchiffon': ('255', '250', '205'),
'LemonChiffon': ('255', '250', '205'),
'seashell': ('255', '245', '238'),
'honeydew': ('240', '255', '240'),
'mintcream': ('245', '255', '250'),
'MintCream': ('245', '255', '250'),
'azure': ('240', '255', '255'),
'aliceblue': ('240', '248', '255'),
'AliceBlue': ('240', '248', '255'),
'lavender': ('230', '230', '250'),
'lavenderblush': ('255', '240', '245'),
'LavenderBlush': ('255', '240', '245'),
'mistyrose': ('255', '228', '225'),
'MistyRose': ('255', '228', '225'),
'white': ('255', '255', '255'),
'black': ('0', '0', '0'),
'darkslategray': ('47', '79', '79'),
'DarkSlateGray': ('47', '79', '79'),
'darkslategrey': ('47', '79', '79'),
'DarkSlateGrey': ('47', '79', '79'),
'dimgray': ('105', '105', '105'),
'DimGray': ('105', '105', '105'),
'dimgrey': ('105', '105', '105'),
'DimGrey': ('105', '105', '105'),
'slategray': ('112', '128', '144'),
'SlateGray': ('112', '128', '144'),
'slategrey': ('112', '128', '144'),
'SlateGrey': ('112', '128', '144'),
'lightslategray': ('119', '136', '153'),
'LightSlateGray': ('119', '136', '153'),
'lightslategrey': ('119', '136', '153'),
'LightSlateGrey': ('119', '136', '153'),
'gray': ('190', '190', '190'),
'grey': ('190', '190', '190'),
'lightgrey': ('211', '211', '211'),
'LightGrey': ('211', '211', '211'),
'lightgray': ('211', '211', '211'),
'LightGray': ('211', '211', '211'),
'midnightblue': ('25', '25', '112'),
'MidnightBlue': ('25', '25', '112'),
'navy': ('0', '0', '128'),
'navyblue': ('0', '0', '128'),
'NavyBlue': ('0', '0', '128'),
'cornflowerblue': ('100', '149', '237'),
'CornflowerBlue': ('100', '149', '237'),
'darkslateblue': ('72', '61', '139'),
'DarkSlateBlue': ('72', '61', '139'),
'slateblue': ('106', '90', '205'),
'SlateBlue': ('106', '90', '205'),
'mediumslateblue': ('123', '104', '238'),
'MediumSlateBlue': ('123', '104', '238'),
'lightslateblue': ('132', '112', '255'),
'LightSlateBlue': ('132', '112', '255'),
'mediumblue': ('0', '0', '205'),
'MediumBlue': ('0', '0', '205'),
'royalblue': ('65', '105', '225'),
'RoyalBlue': ('65', '105', '225'),
'blue': ('0', '0', '255'),
'dodgerblue': ('30', '144', '255'),
'DodgerBlue': ('30', '144', '255'),
'deepskyblue': ('0', '191', '255'),
'DeepSkyBlue': ('0', '191', '255'),
'skyblue': ('135', '206', '235'),
'SkyBlue': ('135', '206', '235'),
'lightskyblue': ('135', '206', '250'),
'LightSkyBlue': ('135', '206', '250'),
'steelblue': ('70', '130', '180'),
'SteelBlue': ('70', '130', '180'),
'lightsteelblue': ('176', '196', '222'),
'LightSteelBlue': ('176', '196', '222'),
'lightblue': ('173', '216', '230'),
'LightBlue': ('173', '216', '230'),
'powderblue': ('176', '224', '230'),
'PowderBlue': ('176', '224', '230'),
'paleturquoise': ('175', '238', '238'),
'PaleTurquoise': ('175', '238', '238'),
'darkturquoise': ('0', '206', '209'),
'DarkTurquoise': ('0', '206', '209'),
'mediumturquoise': ('72', '209', '204'),
'MediumTurquoise': ('72', '209', '204'),
'turquoise': ('64', '224', '208'),
'cyan': ('0', '255', '255'),
'lightcyan': ('224', '255', '255'),
'LightCyan': ('224', '255', '255'),
'cadetblue': ('95', '158', '160'),
'CadetBlue': ('95', '158', '160'),
'mediumaquamarine': ('102', '205', '170'),
'MediumAquamarine': ('102', '205', '170'),
'aquamarine': ('127', '255', '212'),
'darkgreen': ('0', '100', '0'),
'DarkGreen': ('0', '100', '0'),
'darkolivegreen': ('85', '107', '47'),
'DarkOliveGreen': ('85', '107', '47'),
'darkseagreen': ('143', '188', '143'),
'DarkSeaGreen': ('143', '188', '143'),
'seagreen': ('46', '139', '87'),
'SeaGreen': ('46', '139', '87'),
'mediumseagreen': ('60', '179', '113'),
'MediumSeaGreen': ('60', '179', '113'),
'lightseagreen': ('32', '178', '170'),
'LightSeaGreen': ('32', '178', '170'),
'palegreen': ('152', '251', '152'),
'PaleGreen': ('152', '251', '152'),
'springgreen': ('0', '255', '127'),
'SpringGreen': ('0', '255', '127'),
'lawngreen': ('124', '252', '0'),
'LawnGreen': ('124', '252', '0'),
'green': ('0', '255', '0'),
'chartreuse': ('127', '255', '0'),
'mediumspringgreen': ('0', '250', '154'),
'MediumSpringGreen': ('0', '250', '154'),
'greenyellow': ('173', '255', '47'),
'GreenYellow': ('173', '255', '47'),
'limegreen': ('50', '205', '50'),
'LimeGreen': ('50', '205', '50'),
'yellowgreen': ('154', '205', '50'),
'YellowGreen': ('154', '205', '50'),
'forestgreen': ('34', '139', '34'),
'ForestGreen': ('34', '139', '34'),
'olivedrab': ('107', '142', '35'),
'OliveDrab': ('107', '142', '35'),
'darkkhaki': ('189', '183', '107'),
'DarkKhaki': ('189', '183', '107'),
'khaki': ('240', '230', '140'),
'palegoldenrod': ('238', '232', '170'),
'PaleGoldenrod': ('238', '232', '170'),
'lightgoldenrodyellow': ('250', '250', '210'),
'LightGoldenrodYellow': ('250', '250', '210'),
'lightyellow': ('255', '255', '224'),
'LightYellow': ('255', '255', '224'),
'yellow': ('255', '255', '0'),
'gold': ('255', '215', '0'),
'lightgoldenrod': ('238', '221', '130'),
'LightGoldenrod': ('238', '221', '130'),
'goldenrod': ('218', '165', '32'),
'darkgoldenrod': ('184', '134', '11'),
'DarkGoldenrod': ('184', '134', '11'),
'rosybrown': ('188', '143', '143'),
'RosyBrown': ('188', '143', '143'),
'indianred': ('205', '92', '92'),
'IndianRed': ('205', '92', '92'),
'saddlebrown': ('139', '69', '19'),
'SaddleBrown': ('139', '69', '19'),
'sienna': ('160', '82', '45'),
'peru': ('205', '133', '63'),
'burlywood': ('222', '184', '135'),
'beige': ('245', '245', '220'),
'wheat': ('245', '222', '179'),
'sandybrown': ('244', '164', '96'),
'SandyBrown': ('244', '164', '96'),
'tan': ('210', '180', '140'),
'chocolate': ('210', '105', '30'),
'firebrick': ('178', '34', '34'),
'brown': ('165', '42', '42'),
'darksalmon': ('233', '150', '122'),
'DarkSalmon': ('233', '150', '122'),
'salmon': ('250', '128', '114'),
'lightsalmon': ('255', '160', '122'),
'LightSalmon': ('255', '160', '122'),
'orange': ('255', '165', '0'),
'darkorange': ('255', '140', '0'),
'DarkOrange': ('255', '140', '0'),
'coral': ('255', '127', '80'),
'lightcoral': ('240', '128', '128'),
'LightCoral': ('240', '128', '128'),
'tomato': ('255', '99', '71'),
'orangered': ('255', '69', '0'),
'OrangeRed': ('255', '69', '0'),
'red': ('255', '0', '0'),
'hotpink': ('255', '105', '180'),
'HotPink': ('255', '105', '180'),
'deeppink': ('255', '20', '147'),
'DeepPink': ('255', '20', '147'),
'pink': ('255', '192', '203'),
'lightpink': ('255', '182', '193'),
'LightPink': ('255', '182', '193'),
'palevioletred': ('219', '112', '147'),
'PaleVioletRed': ('219', '112', '147'),
'maroon': ('176', '48', '96'),
'mediumvioletred': ('199', '21', '133'),
'MediumVioletRed': ('199', '21', '133'),
'violetred': ('208', '32', '144'),
'VioletRed': ('208', '32', '144'),
'magenta': ('255', '0', '255'),
'violet': ('238', '130', '238'),
'plum': ('221', '160', '221'),
'orchid': ('218', '112', '214'),
'mediumorchid': ('186', '85', '211'),
'MediumOrchid': ('186', '85', '211'),
'darkorchid': ('153', '50', '204'),
'DarkOrchid': ('153', '50', '204'),
'darkviolet': ('148', '0', '211'),
'DarkViolet': ('148', '0', '211'),
'blueviolet': ('138', '43', '226'),
'BlueViolet': ('138', '43', '226'),
'purple': ('160', '32', '240'),
'mediumpurple': ('147', '112', '219'),
'MediumPurple': ('147', '112', '219'),
'thistle': ('216', '191', '216'),
'snow1': ('255', '250', '250'),
'snow2': ('238', '233', '233'),
'snow3': ('205', '201', '201'),
'snow4': ('139', '137', '137'),
'seashell1': ('255', '245', '238'),
'seashell2': ('238', '229', '222'),
'seashell3': ('205', '197', '191'),
'seashell4': ('139', '134', '130'),
'AntiqueWhite1': ('255', '239', '219'),
'AntiqueWhite2': ('238', '223', '204'),
'AntiqueWhite3': ('205', '192', '176'),
'AntiqueWhite4': ('139', '131', '120'),
'bisque1': ('255', '228', '196'),
'bisque2': ('238', '213', '183'),
'bisque3': ('205', '183', '158'),
'bisque4': ('139', '125', '107'),
'PeachPuff1': ('255', '218', '185'),
'PeachPuff2': ('238', '203', '173'),
'PeachPuff3': ('205', '175', '149'),
'PeachPuff4': ('139', '119', '101'),
'NavajoWhite1': ('255', '222', '173'),
'NavajoWhite2': ('238', '207', '161'),
'NavajoWhite3': ('205', '179', '139'),
'NavajoWhite4': ('139', '121', '94'),
'LemonChiffon1': ('255', '250', '205'),
'LemonChiffon2': ('238', '233', '191'),
'LemonChiffon3': ('205', '201', '165'),
'LemonChiffon4': ('139', '137', '112'),
'cornsilk1': ('255', '248', '220'),
'cornsilk2': ('238', '232', '205'),
'cornsilk3': ('205', '200', '177'),
'cornsilk4': ('139', '136', '120'),
'ivory1': ('255', '255', '240'),
'ivory2': ('238', '238', '224'),
'ivory3': ('205', '205', '193'),
'ivory4': ('139', '139', '131'),
'honeydew1': ('240', '255', '240'),
'honeydew2': ('224', '238', '224'),
'honeydew3': ('193', '205', '193'),
'honeydew4': ('131', '139', '131'),
'LavenderBlush1': ('255', '240', '245'),
'LavenderBlush2': ('238', '224', '229'),
'LavenderBlush3': ('205', '193', '197'),
'LavenderBlush4': ('139', '131', '134'),
'MistyRose1': ('255', '228', '225'),
'MistyRose2': ('238', '213', '210'),
'MistyRose3': ('205', '183', '181'),
'MistyRose4': ('139', '125', '123'),
'azure1': ('240', '255', '255'),
'azure2': ('224', '238', '238'),
'azure3': ('193', '205', '205'),
'azure4': ('131', '139', '139'),
'SlateBlue1': ('131', '111', '255'),
'SlateBlue2': ('122', '103', '238'),
'SlateBlue3': ('105', '89', '205'),
'SlateBlue4': ('71', '60', '139'),
'RoyalBlue1': ('72', '118', '255'),
'RoyalBlue2': ('67', '110', '238'),
'RoyalBlue3': ('58', '95', '205'),
'RoyalBlue4': ('39', '64', '139'),
'blue1': ('0', '0', '255'),
'blue2': ('0', '0', '238'),
'blue3': ('0', '0', '205'),
'blue4': ('0', '0', '139'),
'DodgerBlue1': ('30', '144', '255'),
'DodgerBlue2': ('28', '134', '238'),
'DodgerBlue3': ('24', '116', '205'),
'DodgerBlue4': ('16', '78', '139'),
'SteelBlue1': ('99', '184', '255'),
'SteelBlue2': ('92', '172', '238'),
'SteelBlue3': ('79', '148', '205'),
'SteelBlue4': ('54', '100', '139'),
'DeepSkyBlue1': ('0', '191', '255'),
'DeepSkyBlue2': ('0', '178', '238'),
'DeepSkyBlue3': ('0', '154', '205'),
'DeepSkyBlue4': ('0', '104', '139'),
'SkyBlue1': ('135', '206', '255'),
'SkyBlue2': ('126', '192', '238'),
'SkyBlue3': ('108', '166', '205'),
'SkyBlue4': ('74', '112', '139'),
'LightSkyBlue1': ('176', '226', '255'),
'LightSkyBlue2': ('164', '211', '238'),
'LightSkyBlue3': ('141', '182', '205'),
'LightSkyBlue4': ('96', '123', '139'),
'SlateGray1': ('198', '226', '255'),
'SlateGray2': ('185', '211', '238'),
'SlateGray3': ('159', '182', '205'),
'SlateGray4': ('108', '123', '139'),
'LightSteelBlue1': ('202', '225', '255'),
'LightSteelBlue2': ('188', '210', '238'),
'LightSteelBlue3': ('162', '181', '205'),
'LightSteelBlue4': ('110', '123', '139'),
'LightBlue1': ('191', '239', '255'),
'LightBlue2': ('178', '223', '238'),
'LightBlue3': ('154', '192', '205'),
'LightBlue4': ('104', '131', '139'),
'LightCyan1': ('224', '255', '255'),
'LightCyan2': ('209', '238', '238'),
'LightCyan3': ('180', '205', '205'),
'LightCyan4': ('122', '139', '139'),
'PaleTurquoise1': ('187', '255', '255'),
'PaleTurquoise2': ('174', '238', '238'),
'PaleTurquoise3': ('150', '205', '205'),
'PaleTurquoise4': ('102', '139', '139'),
'CadetBlue1': ('152', '245', '255'),
'CadetBlue2': ('142', '229', '238'),
'CadetBlue3': ('122', '197', '205'),
'CadetBlue4': ('83', '134', '139'),
'turquoise1': ('0', '245', '255'),
'turquoise2': ('0', '229', '238'),
'turquoise3': ('0', '197', '205'),
'turquoise4': ('0', '134', '139'),
'cyan1': ('0', '255', '255'),
'cyan2': ('0', '238', '238'),
'cyan3': ('0', '205', '205'),
'cyan4': ('0', '139', '139'),
'DarkSlateGray1': ('151', '255', '255'),
'DarkSlateGray2': ('141', '238', '238'),
'DarkSlateGray3': ('121', '205', '205'),
'DarkSlateGray4': ('82', '139', '139'),
'aquamarine1': ('127', '255', '212'),
'aquamarine2': ('118', '238', '198'),
'aquamarine3': ('102', '205', '170'),
'aquamarine4': ('69', '139', '116'),
'DarkSeaGreen1': ('193', '255', '193'),
'DarkSeaGreen2': ('180', '238', '180'),
'DarkSeaGreen3': ('155', '205', '155'),
'DarkSeaGreen4': ('105', '139', '105'),
'SeaGreen1': ('84', '255', '159'),
'SeaGreen2': ('78', '238', '148'),
'SeaGreen3': ('67', '205', '128'),
'SeaGreen4': ('46', '139', '87'),
'PaleGreen1': ('154', '255', '154'),
'PaleGreen2': ('144', '238', '144'),
'PaleGreen3': ('124', '205', '124'),
'PaleGreen4': ('84', '139', '84'),
'SpringGreen1': ('0', '255', '127'),
'SpringGreen2': ('0', '238', '118'),
'SpringGreen3': ('0', '205', '102'),
'SpringGreen4': ('0', '139', '69'),
'green1': ('0', '255', '0'),
'green2': ('0', '238', '0'),
'green3': ('0', '205', '0'),
'green4': ('0', '139', '0'),
'chartreuse1': ('127', '255', '0'),
'chartreuse2': ('118', '238', '0'),
'chartreuse3': ('102', '205', '0'),
'chartreuse4': ('69', '139', '0'),
'OliveDrab1': ('192', '255', '62'),
'OliveDrab2': ('179', '238', '58'),
'OliveDrab3': ('154', '205', '50'),
'OliveDrab4': ('105', '139', '34'),
'DarkOliveGreen1': ('202', '255', '112'),
'DarkOliveGreen2': ('188', '238', '104'),
'DarkOliveGreen3': ('162', '205', '90'),
'DarkOliveGreen4': ('110', '139', '61'),
'khaki1': ('255', '246', '143'),
'khaki2': ('238', '230', '133'),
'khaki3': ('205', '198', '115'),
'khaki4': ('139', '134', '78'),
'LightGoldenrod1': ('255', '236', '139'),
'LightGoldenrod2': ('238', '220', '130'),
'LightGoldenrod3': ('205', '190', '112'),
'LightGoldenrod4': ('139', '129', '76'),
'LightYellow1': ('255', '255', '224'),
'LightYellow2': ('238', '238', '209'),
'LightYellow3': ('205', '205', '180'),
'LightYellow4': ('139', '139', '122'),
'yellow1': ('255', '255', '0'),
'yellow2': ('238', '238', '0'),
'yellow3': ('205', '205', '0'),
'yellow4': ('139', '139', '0'),
'gold1': ('255', '215', '0'),
'gold2': ('238', '201', '0'),
'gold3': ('205', '173', '0'),
'gold4': ('139', '117', '0'),
'goldenrod1': ('255', '193', '37'),
'goldenrod2': ('238', '180', '34'),
'goldenrod3': ('205', '155', '29'),
'goldenrod4': ('139', '105', '20'),
'DarkGoldenrod1': ('255', '185', '15'),
'DarkGoldenrod2': ('238', '173', '14'),
'DarkGoldenrod3': ('205', '149', '12'),
'DarkGoldenrod4': ('139', '101', '8'),
'RosyBrown1': ('255', '193', '193'),
'RosyBrown2': ('238', '180', '180'),
'RosyBrown3': ('205', '155', '155'),
'RosyBrown4': ('139', '105', '105'),
'IndianRed1': ('255', '106', '106'),
'IndianRed2': ('238', '99', '99'),
'IndianRed3': ('205', '85', '85'),
'IndianRed4': ('139', '58', '58'),
'sienna1': ('255', '130', '71'),
'sienna2': ('238', '121', '66'),
'sienna3': ('205', '104', '57'),
'sienna4': ('139', '71', '38'),
'burlywood1': ('255', '211', '155'),
'burlywood2': ('238', '197', '145'),
'burlywood3': ('205', '170', '125'),
'burlywood4': ('139', '115', '85'),
'wheat1': ('255', '231', '186'),
'wheat2': ('238', '216', '174'),
'wheat3': ('205', '186', '150'),
'wheat4': ('139', '126', '102'),
'tan1': ('255', '165', '79'),
'tan2': ('238', '154', '73'),
'tan3': ('205', '133', '63'),
'tan4': ('139', '90', '43'),
'chocolate1': ('255', '127', '36'),
'chocolate2': ('238', '118', '33'),
'chocolate3': ('205', '102', '29'),
'chocolate4': ('139', '69', '19'),
'firebrick1': ('255', '48', '48'),
'firebrick2': ('238', '44', '44'),
'firebrick3': ('205', '38', '38'),
'firebrick4': ('139', '26', '26'),
'brown1': ('255', '64', '64'),
'brown2': ('238', '59', '59'),
'brown3': ('205', '51', '51'),
'brown4': ('139', '35', '35'),
'salmon1': ('255', '140', '105'),
'salmon2': ('238', '130', '98'),
'salmon3': ('205', '112', '84'),
'salmon4': ('139', '76', '57'),
'LightSalmon1': ('255', '160', '122'),
'LightSalmon2': ('238', '149', '114'),
'LightSalmon3': ('205', '129', '98'),
'LightSalmon4': ('139', '87', '66'),
'orange1': ('255', '165', '0'),
'orange2': ('238', '154', '0'),
'orange3': ('205', '133', '0'),
'orange4': ('139', '90', '0'),
'DarkOrange1': ('255', '127', '0'),
'DarkOrange2': ('238', '118', '0'),
'DarkOrange3': ('205', '102', '0'),
'DarkOrange4': ('139', '69', '0'),
'coral1': ('255', '114', '86'),
'coral2': ('238', '106', '80'),
'coral3': ('205', '91', '69'),
'coral4': ('139', '62', '47'),
'tomato1': ('255', '99', '71'),
'tomato2': ('238', '92', '66'),
'tomato3': ('205', '79', '57'),
'tomato4': ('139', '54', '38'),
'OrangeRed1': ('255', '69', '0'),
'OrangeRed2': ('238', '64', '0'),
'OrangeRed3': ('205', '55', '0'),
'OrangeRed4': ('139', '37', '0'),
'red1': ('255', '0', '0'),
'red2': ('238', '0', '0'),
'red3': ('205', '0', '0'),
'red4': ('139', '0', '0'),
'DeepPink1': ('255', '20', '147'),
'DeepPink2': ('238', '18', '137'),
'DeepPink3': ('205', '16', '118'),
'DeepPink4': ('139', '10', '80'),
'HotPink1': ('255', '110', '180'),
'HotPink2': ('238', '106', '167'),
'HotPink3': ('205', '96', '144'),
'HotPink4': ('139', '58', '98'),
'pink1': ('255', '181', '197'),
'pink2': ('238', '169', '184'),
'pink3': ('205', '145', '158'),
'pink4': ('139', '99', '108'),
'LightPink1': ('255', '174', '185'),
'LightPink2': ('238', '162', '173'),
'LightPink3': ('205', '140', '149'),
'LightPink4': ('139', '95', '101'),
'PaleVioletRed1': ('255', '130', '171'),
'PaleVioletRed2': ('238', '121', '159'),
'PaleVioletRed3': ('205', '104', '137'),
'PaleVioletRed4': ('139', '71', '93'),
'maroon1': ('255', '52', '179'),
'maroon2': ('238', '48', '167'),
'maroon3': ('205', '41', '144'),
'maroon4': ('139', '28', '98'),
'VioletRed1': ('255', '62', '150'),
'VioletRed2': ('238', '58', '140'),
'VioletRed3': ('205', '50', '120'),
'VioletRed4': ('139', '34', '82'),
'magenta1': ('255', '0', '255'),
'magenta2': ('238', '0', '238'),
'magenta3': ('205', '0', '205'),
'magenta4': ('139', '0', '139'),
'orchid1': ('255', '131', '250'),
'orchid2': ('238', '122', '233'),
'orchid3': ('205', '105', '201'),
'orchid4': ('139', '71', '137'),
'plum1': ('255', '187', '255'),
'plum2': ('238', '174', '238'),
'plum3': ('205', '150', '205'),
'plum4': ('139', '102', '139'),
'MediumOrchid1': ('224', '102', '255'),
'MediumOrchid2': ('209', '95', '238'),
'MediumOrchid3': ('180', '82', '205'),
'MediumOrchid4': ('122', '55', '139'),
'DarkOrchid1': ('191', '62', '255'),
'DarkOrchid2': ('178', '58', '238'),
'DarkOrchid3': ('154', '50', '205'),
'DarkOrchid4': ('104', '34', '139'),
'purple1': ('155', '48', '255'),
'purple2': ('145', '44', '238'),
'purple3': ('125', '38', '205'),
'purple4': ('85', '26', '139'),
'MediumPurple1': ('171', '130', '255'),
'MediumPurple2': ('159', '121', '238'),
'MediumPurple3': ('137', '104', '205'),
'MediumPurple4': ('93', '71', '139'),
'thistle1': ('255', '225', '255'),
'thistle2': ('238', '210', '238'),
'thistle3': ('205', '181', '205'),
'thistle4': ('139', '123', '139'),
'gray0': ('0', '0', '0'),
'grey0': ('0', '0', '0'),
'gray1': ('3', '3', '3'),
'grey1': ('3', '3', '3'),
'gray2': ('5', '5', '5'),
'grey2': ('5', '5', '5'),
'gray3': ('8', '8', '8'),
'grey3': ('8', '8', '8'),
'gray4': ('10', '10', '10'),
'grey4': ('10', '10', '10'),
'gray5': ('13', '13', '13'),
'grey5': ('13', '13', '13'),
'gray6': ('15', '15', '15'),
'grey6': ('15', '15', '15'),
'gray7': ('18', '18', '18'),
'grey7': ('18', '18', '18'),
'gray8': ('20', '20', '20'),
'grey8': ('20', '20', '20'),
'gray9': ('23', '23', '23'),
'grey9': ('23', '23', '23'),
'gray10': ('26', '26', '26'),
'grey10': ('26', '26', '26'),
'gray11': ('28', '28', '28'),
'grey11': ('28', '28', '28'),
'gray12': ('31', '31', '31'),
'grey12': ('31', '31', '31'),
'gray13': ('33', '33', '33'),
'grey13': ('33', '33', '33'),
'gray14': ('36', '36', '36'),
'grey14': ('36', '36', '36'),
'gray15': ('38', '38', '38'),
'grey15': ('38', '38', '38'),
'gray16': ('41', '41', '41'),
'grey16': ('41', '41', '41'),
'gray17': ('43', '43', '43'),
'grey17': ('43', '43', '43'),
'gray18': ('46', '46', '46'),
'grey18': ('46', '46', '46'),
'gray19': ('48', '48', '48'),
'grey19': ('48', '48', '48'),
'gray20': ('51', '51', '51'),
'grey20': ('51', '51', '51'),
'gray21': ('54', '54', '54'),
'grey21': ('54', '54', '54'),
'gray22': ('56', '56', '56'),
'grey22': ('56', '56', '56'),
'gray23': ('59', '59', '59'),
'grey23': ('59', '59', '59'),
'gray24': ('61', '61', '61'),
'grey24': ('61', '61', '61'),
'gray25': ('64', '64', '64'),
'grey25': ('64', '64', '64'),
'gray26': ('66', '66', '66'),
'grey26': ('66', '66', '66'),
'gray27': ('69', '69', '69'),
'grey27': ('69', '69', '69'),
'gray28': ('71', '71', '71'),
'grey28': ('71', '71', '71'),
'gray29': ('74', '74', '74'),
'grey29': ('74', '74', '74'),
'gray30': ('77', '77', '77'),
'grey30': ('77', '77', '77'),
'gray31': ('79', '79', '79'),
'grey31': ('79', '79', '79'),
'gray32': ('82', '82', '82'),
'grey32': ('82', '82', '82'),
'gray33': ('84', '84', '84'),
'grey33': ('84', '84', '84'),
'gray34': ('87', '87', '87'),
'grey34': ('87', '87', '87'),
'gray35': ('89', '89', '89'),
'grey35': ('89', '89', '89'),
'gray36': ('92', '92', '92'),
'grey36': ('92', '92', '92'),
'gray37': ('94', '94', '94'),
'grey37': ('94', '94', '94'),
'gray38': ('97', '97', '97'),
'grey38': ('97', '97', '97'),
'gray39': ('99', '99', '99'),
'grey39': ('99', '99', '99'),
'gray40': ('102', '102', '102'),
'grey40': ('102', '102', '102'),
'gray41': ('105', '105', '105'),
'grey41': ('105', '105', '105'),
'gray42': ('107', '107', '107'),
'grey42': ('107', '107', '107'),
'gray43': ('110', '110', '110'),
'grey43': ('110', '110', '110'),
'gray44': ('112', '112', '112'),
'grey44': ('112', '112', '112'),
'gray45': ('115', '115', '115'),
'grey45': ('115', '115', '115'),
'gray46': ('117', '117', '117'),
'grey46': ('117', '117', '117'),
'gray47': ('120', '120', '120'),
'grey47': ('120', '120', '120'),
'gray48': ('122', '122', '122'),
'grey48': ('122', '122', '122'),
'gray49': ('125', '125', '125'),
'grey49': ('125', '125', '125'),
'gray50': ('127', '127', '127'),
'grey50': ('127', '127', '127'),
'gray51': ('130', '130', '130'),
'grey51': ('130', '130', '130'),
'gray52': ('133', '133', '133'),
'grey52': ('133', '133', '133'),
'gray53': ('135', '135', '135'),
'grey53': ('135', '135', '135'),
'gray54': ('138', '138', '138'),
'grey54': ('138', '138', '138'),
'gray55': ('140', '140', '140'),
'grey55': ('140', '140', '140'),
'gray56': ('143', '143', '143'),
'grey56': ('143', '143', '143'),
'gray57': ('145', '145', '145'),
'grey57': ('145', '145', '145'),
'gray58': ('148', '148', '148'),
'grey58': ('148', '148', '148'),
'gray59': ('150', '150', '150'),
'grey59': ('150', '150', '150'),
'gray60': ('153', '153', '153'),
'grey60': ('153', '153', '153'),
'gray61': ('156', '156', '156'),
'grey61': ('156', '156', '156'),
'gray62': ('158', '158', '158'),
'grey62': ('158', '158', '158'),
'gray63': ('161', '161', '161'),
'grey63': ('161', '161', '161'),
'gray64': ('163', '163', '163'),
'grey64': ('163', '163', '163'),
'gray65': ('166', '166', '166'),
'grey65': ('166', '166', '166'),
'gray66': ('168', '168', '168'),
'grey66': ('168', '168', '168'),
'gray67': ('171', '171', '171'),
'grey67': ('171', '171', '171'),
'gray68': ('173', '173', '173'),
'grey68': ('173', '173', '173'),
'gray69': ('176', '176', '176'),
'grey69': ('176', '176', '176'),
'gray70': ('179', '179', '179'),
'grey70': ('179', '179', '179'),
'gray71': ('181', '181', '181'),
'grey71': ('181', '181', '181'),
'gray72': ('184', '184', '184'),
'grey72': ('184', '184', '184'),
'gray73': ('186', '186', '186'),
'grey73': ('186', '186', '186'),
'gray74': ('189', '189', '189'),
'grey74': ('189', '189', '189'),
'gray75': ('191', '191', '191'),
'grey75': ('191', '191', '191'),
'gray76': ('194', '194', '194'),
'grey76': ('194', '194', '194'),
'gray77': ('196', '196', '196'),
'grey77': ('196', '196', '196'),
'gray78': ('199', '199', '199'),
'grey78': ('199', '199', '199'),
'gray79': ('201', '201', '201'),
'grey79': ('201', '201', '201'),
'gray80': ('204', '204', '204'),
'grey80': ('204', '204', '204'),
'gray81': ('207', '207', '207'),
'grey81': ('207', '207', '207'),
'gray82': ('209', '209', '209'),
'grey82': ('209', '209', '209'),
'gray83': ('212', '212', '212'),
'grey83': ('212', '212', '212'),
'gray84': ('214', '214', '214'),
'grey84': ('214', '214', '214'),
'gray85': ('217', '217', '217'),
'grey85': ('217', '217', '217'),
'gray86': ('219', '219', '219'),
'grey86': ('219', '219', '219'),
'gray87': ('222', '222', '222'),
'grey87': ('222', '222', '222'),
'gray88': ('224', '224', '224'),
'grey88': ('224', '224', '224'),
'gray89': ('227', '227', '227'),
'grey89': ('227', '227', '227'),
'gray90': ('229', '229', '229'),
'grey90': ('229', '229', '229'),
'gray91': ('232', '232', '232'),
'grey91': ('232', '232', '232'),
'gray92': ('235', '235', '235'),
'grey92': ('235', '235', '235'),
'gray93': ('237', '237', '237'),
'grey93': ('237', '237', '237'),
'gray94': ('240', '240', '240'),
'grey94': ('240', '240', '240'),
'gray95': ('242', '242', '242'),
'grey95': ('242', '242', '242'),
'gray96': ('245', '245', '245'),
'grey96': ('245', '245', '245'),
'gray97': ('247', '247', '247'),
'grey97': ('247', '247', '247'),
'gray98': ('250', '250', '250'),
'grey98': ('250', '250', '250'),
'gray99': ('252', '252', '252'),
'grey99': ('252', '252', '252'),
'gray100': ('255', '255', '255'),
'grey100': ('255', '255', '255'),
'darkgrey': ('169', '169', '169'),
'DarkGrey': ('169', '169', '169'),
'darkgray': ('169', '169', '169'),
'DarkGray': ('169', '169', '169'),
'darkblue': ('0', '0', '139'),
'DarkBlue': ('0', '0', '139'),
'darkcyan': ('0', '139', '139'),
'DarkCyan': ('0', '139', '139'),
'darkmagenta': ('139', '0', '139'),
'DarkMagenta': ('139', '0', '139'),
'darkred': ('139', '0', '0'),
'DarkRed': ('139', '0', '0'),
'lightgreen': ('144', '238', '144'),
'LightGreen': ('144', '238', '144')}
| 1.171875 | 1 |
script/bitbucket-buildstatus-notifier.py | ydemetriades/codefresh-bitbucket-buildstatus | 0 | 12773775 | <gh_stars>0
#!/usr/bin/env python
import os
import requests
repo_owner = os.getenv('CF_REPO_OWNER')
repo_slug = os.getenv('CF_REPO_NAME')
repo_auth_user = os.getenv('BB_BSN_REPO_AUTH_USER')
repo_auth_password = os.getenv('BB_BSN_REPO_AUTH_PASSWORD')
if repo_auth_user is None:
print("Authentication User Environment Variable [BB_BSN_REPO_AUTH_USER] is not defined.")
exit(2)
if repo_auth_password is None:
print("Authentication Password Environment Variable [BB_BSN_REPO_AUTH_PASSWORD] is not defined.")
exit(2)
bb_url = os.getenv('BB_BSN_URL', 'https://api.bitbucket.org')
cf_build_id = os.getenv('CF_BUILD_ID')
cf_status = os.getenv('CF_BUILD_STATUS', 'STOPPED') # 'SUCCESSFUL', 'FAILED', 'INPROGRESS', 'STOPPED'
cf_revision = os.getenv('CF_REVISION')
cf_build_url = os.getenv('CF_BUILD_URL')
print('Will Attempt to update build status of commit [{}] to [{}] '.format(cf_revision, cf_status))
data = {
'key': cf_revision,
'state': cf_status,
'name': 'Build [{}]'.format(cf_build_id),
'url': cf_build_url,
'description': 'Build [{}] {}'.format(cf_build_id, cf_status)
}
# Construct URL
api_url = ('%(url)s/2.0/repositories/'
'%(owner)s/%(repo_slug)s/commit/%(revision)s/statuses/build'
% {'url': bb_url,
'owner': repo_owner,
'repo_slug': repo_slug,
'revision': cf_revision})
print('Sending request to:')
print(api_url)
print('with body')
print(data)
# Post build status to Bitbucket
response = requests.post(api_url, auth=(repo_auth_user, repo_auth_password), json=data)
print('Response:')
print(response)
print(response.text)
if response:
exit(0)
else:
exit(1) | 2.25 | 2 |
rubin_sim/maf/mafContrib/StarCounts/StarCounts/__init__.py | RileyWClarke/flarubin | 0 | 12773776 | <gh_stars>0
__all__ = ["abs_mag", "coords", "spec_type", "starcount", "starcount_bymass", "stellardensity"]
| 1.046875 | 1 |
advbench/lib/logging.py | constrainedlearning/advbench | 0 | 12773777 | from selectors import EpollSelector
from scipy import ma
import torch
from torch.cuda.amp import autocast
from advbench import attacks
from einops import repeat, rearrange
import torch.nn.functional as F
from tqdm import tqdm
from advbench.datasets import FFCV_AVAILABLE
class PerturbationEval():
def __init__(self, algorithm, loader, max_perturbations=None, batched=True):
self.algorithm = algorithm
self.classifier = self.algorithm.classifier
self.hparams = self.algorithm.hparams
self.device = self.algorithm.device
self.loader = loader
self.max_perturbations = max_perturbations
self.perturbation = self.algorithm.attack.perturbation
self.dim = self.perturbation.dim
self.batched = batched
def eval_perturbed(self, single_img=False, batches=1):
self.grid = self.get_grid()
self.grid_size = self.grid.shape[0]
self.algorithm.classifier.eval()
self.algorithm.export()
adv_losses = []
adv_accs = []
with torch.no_grad():
if single_img:
imgs, labels = self.loader.dataset[0]
imgs, labels = imgs.unsqueeze(0).to(self.device), torch.tensor([labels]).to(self.device)
if FFCV_AVAILABLE:
with autocast():
adv_losses, adv_accs = self.step(imgs, labels)
else:
adv_losses, adv_accs = self.step(imgs, labels)
else:
for idx, batch in tqdm(enumerate(self.loader)):
if idx < batches:
imgs, labels = batch
imgs, labels = imgs.to(self.device), labels.to(self.device)
if FFCV_AVAILABLE:
with autocast():
adv_loss, adv_acc = self.step(imgs, labels)
else:
adv_loss, adv_acc = self.step(imgs, labels)
adv_losses.append(adv_loss)
adv_accs.append(adv_acc)
else:
break
self.algorithm.unexport()
self.algorithm.classifier.train()
self.loader.shuffle = True
if batches>1 or not single_img:
adv_losses = torch.concat(adv_losses, dim=0).mean(dim=0)
adv_accs = torch.concat(adv_accs, dim=0).mean(dim=0)
return self.grid, adv_losses, adv_accs
def step(self, imgs, labels):
batch_size = imgs.shape[0]
if self.batched:
adv_imgs = self.perturbation.perturb_img(
repeat(imgs, 'B W H C -> (B S) W H C', B=batch_size, S=self.grid_size),
repeat(self.grid, 'S D -> (B S) D', B=batch_size, D=self.dim, S=self.grid_size))
pred = self.classifier(adv_imgs)
adv_loss = F.cross_entropy(pred, repeat(labels, 'B -> (B S)', S=self.grid_size), reduction="none")
adv_acc = torch.eq(pred, repeat(labels, 'B -> (B S)', S=self.grid_size))
adv_loss = rearrange(adv_loss, '(B S) -> B S', B=batch_size, S=self.grid_size)
adv_acc = rearrange(adv_acc, '(B S) -> B S', B=batch_size, S=self.grid_size)
else:
adv_loss = torch.empty((batch_size, self.grid_size), device=imgs.device)
adv_acc = torch.empty((batch_size, self.grid_size), device=imgs.device)
for s in range(self.grid_size):
grid = repeat(self.grid[s], 'D -> B D', B=batch_size, D=self.dim)
adv_imgs = self.perturbation.perturb_img(imgs, grid)
pred = self.classifier(adv_imgs)
angle_loss = F.cross_entropy(pred, labels, reduction="none")
adv_loss[:, s] = angle_loss
adv_acc[:, s] = torch.eq(pred.argmax(dim=1), labels)
return adv_loss, adv_acc
def get_grid(self):
pass
class GridEval(PerturbationEval):
def __init__(self,algorithm, loader, max_perturbations=None):
super(GridEval, self).__init__(algorithm, loader, max_perturbations=max_perturbations)
self.attack = attacks.Grid_Search(algorithm.classifier, algorithm.hparams, algorithm.device, perturbation=algorithm.perturbation_name, grid_size=max_perturbations)
def get_grid(self):
return self.attack.grid
class AngleGrid(PerturbationEval):
def __init__(self,algorithm, loader, tx=0, ty=0, max_perturbations=None, batched=False):
super(AngleGrid, self).__init__(algorithm, loader, max_perturbations=max_perturbations, batched=False)
self.attack = attacks.Grid_Search(algorithm.classifier, algorithm.hparams, algorithm.device, perturbation="Rotation", grid_size=max_perturbations)
self.tx = tx
self.ty = ty
def get_grid(self):
angle_grid = self.attack.grid
ones = torch.ones_like(angle_grid)
grid = torch.column_stack([angle_grid, self.tx*ones, self.ty*ones])
return grid | 2.15625 | 2 |
bioindex/lib/index.py | massung/bioindex | 6 | 12773778 | <gh_stars>1-10
import concurrent.futures
import csv
import datetime
import functools
import logging
import orjson
import os
import os.path
import rich.progress
import sqlalchemy
import tempfile
import time
from .aws import invoke_lambda
from .s3 import list_objects, read_object, relative_key
from .schema import Schema
from .utils import cap_case_str
class Index:
"""
An index definition that can be built or queried.
"""
def __init__(self, name, table_name, s3_prefix, schema_string, built_date):
"""
Initialize the index with everything needed to build keys and query.
"""
self.schema = Schema(schema_string)
self.table = self.schema.table_def(table_name, sqlalchemy.MetaData())
self.name = name
self.built = built_date
self.s3_prefix = s3_prefix
@staticmethod
def create(engine, name, s3_prefix, schema):
"""
Create a new record in the __Index table and return True if
successful. Will overwrite any existing index with the same
name.
"""
assert s3_prefix.endswith('/'), "S3 prefix must be a common prefix ending with '/'"
# add the new index to the table
sql = (
'INSERT INTO `__Indexes` (`name`, `table`, `prefix`, `schema`) '
'VALUES (%s, %s, %s, %s) '
'ON DUPLICATE KEY UPDATE '
' `table` = VALUES(`table`), '
' `prefix` = VALUES(`prefix`), '
' `schema` = VALUES(`schema`), '
' `built` = 0 '
)
# add to the database
row = engine.execute(sql, name, cap_case_str(name), s3_prefix, schema)
return row and row.lastrowid is not None
@staticmethod
def list_indexes(engine, filter_built=True):
"""
Return an iterator of all the indexes.
"""
sql = 'SELECT `name`, `table`, `prefix`, `schema`, `built` FROM `__Indexes`'
# convert all rows to an index definition
indexes = map(lambda r: Index(*r), engine.execute(sql))
# remove indexes not built?
if filter_built:
indexes = filter(lambda i: i.built, indexes)
return indexes
@staticmethod
def lookup(engine, name):
"""
Lookup an index in the database, return its table name, s3 prefix,
schema, etc.
"""
sql = (
'SELECT `name`, `table`, `prefix`, `schema`, `built` '
'FROM `__Indexes` '
'WHERE `name` = %s '
)
# lookup the index
row = engine.execute(sql, name).fetchone()
if row is None:
raise KeyError(f'No such index: {name}')
return Index(*row)
def prepare(self, engine, rebuild=False):
"""
Ensure the records table exists for the index.
"""
self.set_built_flag(engine, flag=False)
if rebuild:
self.delete_keys(engine)
self.table.drop(engine, checkfirst=True)
logging.info('Creating %s table...', self.table.name)
self.table.create(engine, checkfirst=True)
def build(self, config, engine, use_lambda=False, workers=3, console=None):
"""
Builds the index table for objects in S3.
"""
logging.info('Finding keys in %s...', self.s3_prefix)
s3_objects = list(list_objects(config.s3_bucket, self.s3_prefix, exclude='_SUCCESS'))
# delete all stale keys; get the list of objects left to index
objects = self.delete_stale_keys(engine, s3_objects, console=console)
# calculate the total size of all the objects
total_size = functools.reduce(lambda a, b: a + b['Size'], objects, 0)
# progress format
p_fmt = [
"[progress.description]{task.description}",
rich.progress.BarColumn(),
rich.progress.FileSizeColumn(),
"[progress.percentage]{task.percentage:>3.0f}%"
]
if objects:
self.schema.drop_index(engine, self.table)
# as each job finishes...
with rich.progress.Progress(*p_fmt, console=console) as progress:
overall = progress.add_task('[green]Indexing keys...[/]', total=total_size)
# read several files in parallel
pool = concurrent.futures.ThreadPoolExecutor(max_workers=workers)
# index the objects remotely using lambda or locally
if use_lambda:
self.index_objects_remote(
config,
engine,
pool,
objects,
progress,
overall,
)
else:
self.index_objects_local(
config,
engine,
pool,
objects,
progress,
overall,
)
# finally, build the index after all inserts are done
logging.info('Building table index...')
# each table knows how to build its own index
self.schema.create_index(engine, self.table)
# set the built flag for the index
self.set_built_flag(engine, True)
# done indexing
logging.info('Index is up to date')
def delete_stale_keys(self, engine, objects, console=None):
"""
Deletes all records indexed where the key...
- no longer exists
- has the wrong version
- hasn't been fully indexed
"""
logging.info('Finding stale keys...')
keys = self.lookup_keys(engine)
# all keys are considered stale initially
stale_ids = set(map(lambda k: k['id'], keys.values()))
indexed_keys = set()
# loop over all the valid objects to be indexed
for obj in objects:
key, version = obj['Key'], obj['ETag'].strip('"')
k = keys.get(key)
# is this key already built and match versions?
if k and k['version'] == version:
stale_ids.remove(k['id'])
indexed_keys.add(key)
# delete stale keys
if stale_ids:
# if all the keys are stale, just drop the table
if not indexed_keys:
logging.info(f'Deleting table...')
self.prepare(engine, rebuild=True)
else:
with rich.progress.Progress(console=console) as progress:
task = progress.add_task('[red]Deleting...[/]', total=len(stale_ids))
n = 0
# delete all the keys from the table
for kid in stale_ids:
sql = f'DELETE FROM {self.table.name} WHERE `key` = %s'
n += engine.execute(sql, kid).rowcount
# remove the key from the __Keys table
self.delete_key(engine, key)
progress.advance(task)
# show what was done
logging.info(f'Deleted {n:,} records')
else:
logging.info('No stale keys; delete skipped')
# filter the objects that still need to be indexed
return [o for o in objects if o['Key'] not in indexed_keys]
def index_objects_remote(self, config, engine, pool, objects, progress=None, overall=None):
"""
Index the objects using a lambda function.
"""
def run_function(obj):
logging.info(f'Processing {relative_key(obj["Key"], self.s3_prefix)}...')
# lambda function event data
payload = {
'index': self.name,
'rds_secret': config.rds_secret,
'rds_schema': config.bio_schema,
's3_bucket': config.s3_bucket,
's3_obj': obj,
}
# run the lambda asynchronously
return invoke_lambda(config.lambda_function, payload)
# create a job per object
jobs = [pool.submit(run_function, obj) for obj in objects]
# as each job finishes, set the built flag for that key
for job in concurrent.futures.as_completed(jobs):
if job.exception() is not None:
raise job.exception()
result = job.result()
key = result['key']
record_count = result['records']
size = result['size']
# the insert was done remotely, simply set the built flag now
self.set_key_built_flag(engine, key)
# output number of records
logging.info(f'Wrote {record_count:,} records')
# update the overall bar
if progress:
progress.advance(overall, advance=size)
def index_objects_local(self, config, engine, pool, objects, progress=None, overall=None):
"""
Index S3 objects locally.
"""
jobs = [pool.submit(self.index_object, engine, config.s3_bucket, obj, progress, overall) for obj in objects]
# as each job finishes, insert the records into the table
for job in concurrent.futures.as_completed(jobs):
if job.exception() is not None:
raise job.exception()
# get the key and the record iterator returned
key, records = job.result()
# perform the insert serially, so jobs don't block each other
self.insert_records(engine, list(records))
# after inserting, set the key as being built
self.set_key_built_flag(engine, key)
def index_object(self, engine, bucket, obj, progress=None, overall=None):
"""
Read a file in S3, index it, and insert records into the table.
"""
key, version, size = obj['Key'], obj['ETag'].strip('"'), obj['Size']
key_id = self.insert_key(engine, key, version)
# read the file from s3
content = read_object(bucket, key)
start_offset = 0
records = {}
# per-file progress bar
rel_key = relative_key(key, self.s3_prefix)
file_progress = progress and progress.add_task(f'[yellow]{rel_key}[/]', total=size)
# process each line (record)
for line_num, line in enumerate(content.iter_lines()):
row = orjson.loads(line)
end_offset = start_offset + len(line) + 1 # newline
try:
for key_tuple in self.schema.index_builder(row):
if key_tuple in records:
records[key_tuple]['end_offset'] = end_offset
else:
records[key_tuple] = {
'key': key_id,
'start_offset': start_offset,
'end_offset': end_offset,
}
except (KeyError, ValueError) as e:
logging.warning('%s; skipping...', e)
# update progress
if progress:
progress.update(file_progress, completed=end_offset)
# track current file offset
start_offset = end_offset
# done with this object; tick the overall progress
if progress:
progress.remove_task(file_progress)
progress.advance(overall, advance=size)
# NOTE: Because this is called as a job, be sure and return a iterator
# and not the records as this is memory that is kept around for
# the entire duration of indexing.
return key, ({**self.schema.column_values(k), **r} for k, r in records.items())
def insert_records(self, engine, records):
"""
Insert all the records into the index table. It does this as fast as
possible by writing the file to a CSV and then loading it directly
into the table.
"""
if len(records) == 0:
return
# get the field names from the first record
fieldnames = list(records[0].keys())
quoted_fieldnames = [f'`{field}`' for field in fieldnames]
# create a temporary file to write the CSV to
tmp = tempfile.NamedTemporaryFile(mode='w+t', delete=False)
try:
w = csv.DictWriter(tmp, fieldnames)
# write the header and the rows
w.writeheader()
w.writerows(records)
finally:
tmp.close()
try:
infile = tmp.name.replace('\\', '/')
fail_ex = None
sql = (
f"LOAD DATA LOCAL INFILE '{infile}' "
f"INTO TABLE `{self.table.name}` "
f"FIELDS TERMINATED BY ',' "
f"LINES TERMINATED BY '\\n' "
f"IGNORE 1 ROWS "
f"({','.join(quoted_fieldnames)}) "
)
# attempt to bulk load into the database
for _ in range(5):
try:
engine.execute(sql)
break
except sqlalchemy.exc.OperationalError as ex:
fail_ex = ex
if ex.code == 1213: # deadlock; wait and try again
time.sleep(1)
else:
# failed to insert the rows, die
raise fail_ex
# output number of records
logging.info(f'Wrote {len(records):,} records')
finally:
os.remove(tmp.name)
def insert_records_batched(self, engine, records, batch_size=5000):
"""
Insert all the records, but in batches. This way if multiple files are
being indexed in parallel, they won't block each others' inserts by
locking the table.
"""
for i in range(0, len(records), batch_size):
self.insert_records(engine, records[i:i+batch_size])
def insert_key(self, engine, key, version):
"""
Adds a key to the __Keys table for an index by name. If the key
already exists and the versions match, just return the ID for it.
If the versions don't match, delete the existing record and create
a new one with a new ID.
"""
sql = 'SELECT `id`, `version` FROM `__Keys` WHERE `index` = %s and `key` = %s'
row = engine.execute(sql, self.name, key).fetchone()
if row is not None:
if row[1] == version:
return row[0]
# delete the existing key entry
engine.execute('DELETE FROM `__Keys` WHERE `id` = %s', row[0])
# add a new entry
sql = 'INSERT INTO `__Keys` (`index`, `key`, `version`) VALUES (%s, %s, %s)'
row = engine.execute(sql, self.name, key, version)
return row.lastrowid
def delete_key(self, engine, key):
"""
Removes all records from the index and the key from the __Keys
table for a paritcular index/key pair.
"""
sql = 'DELETE FROM `__Keys` WHERE `index` = %s and `key` = %s'
engine.execute(sql, self.name, key)
def delete_keys(self, engine):
"""
Removes all records from the __Keys table for a paritcular index
by name.
"""
engine.execute('DELETE FROM `__Keys` WHERE `index` = %s', self.name)
def lookup_keys(self, engine):
"""
Look up all the keys and versions for this index. Returns a dictionary
key -> {id, version}. The version will be None if the key hasn't been
completely indexed.
"""
sql = 'SELECT `id`, `key`, `version`, `built` FROM `__Keys` WHERE `index` = %s '
rows = engine.execute(sql, self.name).fetchall()
return {key: {'id': id, 'version': built and ver} for id, key, ver, built in rows}
def set_key_built_flag(self, engine, key):
"""
Update the keys table to indicate the key has been built.
"""
sql = 'UPDATE `__Keys` SET `built` = %s WHERE `index` = %s AND `key` = %s'
engine.execute(sql, datetime.datetime.utcnow(), self.name, key)
def set_built_flag(self, engine, flag=True):
"""
Update the __Index table to indicate this index has been built.
"""
now = datetime.datetime.utcnow()
if flag:
engine.execute('UPDATE `__Indexes` SET `built` = %s WHERE `name` = %s', now, self.name)
else:
engine.execute('UPDATE `__Indexes` SET `built` = NULL WHERE `name` = %s', self.name)
| 2.265625 | 2 |
activitysim/examples/example_marin/scripts/marin_fix.py | mxndrwgrdnr/activitysim | 85 | 12773779 | # remove some of the asim-style columns added by marin_work_tour_mode_choice.py
# so data input files look 'realistic' - and that work is done instaed by 'import_tours' annotation expression files
import os
import pandas as pd
import openmatrix as omx
input_dir = './data_3_marin'
output_dir = './data_3_marin/fix' # don't overwrite - but these files shold replace 'oritinals'
def input_path(filenane):
return os.path.join(input_dir, filenane)
def output_path(filenane):
return os.path.join(output_dir, filenane)
# 0 - get county zones
mazs = pd.read_csv(input_path("maz_data_asim.csv"))
del mazs['zone_id']
del mazs['county_id']
mazs.to_csv(output_path("maz_data_asim.csv"), index=False)
tazs = mazs["TAZ"].unique()
tazs.sort()
assert ((tazs - 1) == range(len(tazs))).all()
# MAZ,TAZ
taps = pd.read_csv(input_path("maz_taz.csv"))
# nothing
taps.to_csv(output_path("maz_taz.csv"), index=False)
taps = pd.read_csv(input_path("tap_data.csv"))
# nothing
taps.to_csv(output_path("tap_data.csv"), index=False)
# 2 - nearby skims need headers
maz_tap_walk = pd.read_csv(input_path("maz_tap_walk.csv"))
maz_maz_walk = pd.read_csv(input_path("maz_maz_walk.csv"))
maz_maz_bike = pd.read_csv(input_path("maz_maz_bike.csv"))
del maz_tap_walk['TAP.1']
del maz_maz_walk['DMAZ.1']
del maz_maz_bike['DMAZ.1']
maz_tap_walk.to_csv(output_path("maz_tap_walk.csv"), index=False)
maz_maz_walk.to_csv(output_path("maz_maz_walk.csv"), index=False)
maz_maz_bike.to_csv(output_path("maz_maz_bike.csv"), index=False)
# 3 - accessibility data
access = pd.read_csv(input_path("access.csv"))
del access['zone_id']
access.to_csv(output_path("access.csv"), index=False)
# 4 - maz to tap drive data
taz_tap_drive = pd.read_csv(input_path("maz_taz_tap_drive.csv"))
taz_tap_drive.to_csv(output_path("maz_taz_tap_drive.csv"), index=False)
# 5 - households
households = pd.read_csv(input_path("households_asim.csv"))
del households['home_zone_id']
del households['household_id']
households.to_csv(output_path("households_asim.csv"), index=False)
# 6 - persons
persons = pd.read_csv(input_path("persons_asim.csv"))
del persons['person_id']
del persons['household_id']
del persons['is_university']
persons.to_csv(output_path("persons_asim.csv"), index=False)
# 7 - tours file
work_tours = pd.read_csv(input_path("work_tours.csv"))
del work_tours["household_id"]
del work_tours["destination"]
del work_tours["start"]
del work_tours["end"]
del work_tours["tour_type"]
work_tours.to_csv(output_path("work_tours.csv"), index=False)
| 2.375 | 2 |
Python/Listas/Q1.py | Flavio-Varejao/Exercicios | 0 | 12773780 | '''
Faça um Programa que leia um vetor de 5 números inteiros e mostre-os.
'''
lista=[]
for numero in range(1,6):
lista.append(int(input("Digite um número: ")))
print(lista)
| 4 | 4 |
python3/forwardfinancing.py | ForwardFinancing/partner_api_client_samples | 0 | 12773781 | <filename>python3/forwardfinancing.py
import requests
import os
import json
import base64
# Get api_key from your environment variables
api_key = os.environ["FORWARD_FINANCING_API_KEY"]
# sample payload
body = {
"lead": {
"contacts_attributes": [
{
"first_name": "string",
"last_name": "string",
"email": "<EMAIL>",
"title": "string",
"born_on": "2015-01-01",
"home_phone": "6176781000",
"cell_phone": "6176781000",
"ssn": "234345566",
"ownership_date": "2015-01-01",
"current_address_attributes": {
"street1": "string",
"street2": "string",
"city": "string",
"state": "AK",
"zip": "00112"
}
}
],
"account_attributes": {
"entity_type": "Sole Proprietor",
"name": "string",
"started_on": "2015-01-01",
"legal_name": "string",
"phone": "6176781000",
"email": "<EMAIL>",
"website": "string",
"fein": "string",
"monthly_revenue": "Less than $5,000",
"industry_name": "Laundry and dry cleaning services",
"current_address_attributes": {
"street1": "string",
"street2": "string",
"city": "string",
"state": "AK",
"zip": "00112"
}
},
"loan_attributes": {
"company_name": "string",
"daily_payment_amount": 0,
"balance": 0
},
"application_attributes": {
"has_current_loan": True,
"applicant_is_owner": True,
"loan_use": "Debt Refinancing",
"capital_needed": "10000",
"owner_1_percent_ownership": 0,
"owner_2_percent_ownership": 0,
"reference_id": "unique_string_here"
}
}
}
# Send your api_key in in the header
# Make sure to specify the Content-Type for the /lead endpoint!
headers = {
"api_key": api_key,
"Content-Type": "application/json"
}
print(json.dumps(body, indent=4))
response = requests.post("https://api-staging.forwardfinancing.com/v1/lead", headers=headers, data=json.dumps(body))
# If the response was 201, it worked
if response.status_code == 201:
print(response.json())
lead_id = response.json()["id"]
print("Lead id: ", lead_id)
else:
# If the response was not 201, something went wrong
# The response body might have some info about what wrong in addition to the
# status code
print(response.status_code, " - Error")
# Send an attachment
with open("forwardfinancing.py", "rb") as file_binary:
response = requests.post("https://api-staging.forwardfinancing.com/v1/attachment?lead_id={0}&filename=test.txt".format(lead_id),
headers={"api_key": api_key},
data=file_binary
)
print(response.json())
if response.status_code == 202:
print("It worked!")
with open("forwardfinancing.py", "rb") as file_binary:
# Send an attachment base64 encoded
file_string = base64.b64encode(file_binary.read())
# Send the same request, only with the encoded=true url param and the encoded
# body
response = requests.post("https://api-staging.forwardfinancing.com/v1/attachment?lead_id={0}&filename=test.txt&encoded=true".format(lead_id),
headers={"api_key": api_key},
data=file_string
)
print(response.json())
if response.status_code == 202:
print("It worked!")
| 2.296875 | 2 |
Raspberry-Pi/PWM colorful LED/PWM-colorful-LED.py | 5j54d93/NTOU-CS-Course-Works | 1 | 12773782 | import RPi.GPIO as GPIO
import time
import random
GPIO.setmode(GPIO.BOARD)
BTN_PIN = 11
BTN_PIN1 = 10
LED_PIN = 12
WAIT_TIME = 200
status = GPIO.LOW
GPIO.setup(BTN_PIN, GPIO.IN, pull_up_down = GPIO.PUD_UP)
# GPIO.setup(LED_PIN, GPIO.OUT, initial = status)
R_pin = 33 # R:33 號腳位(第 33 根 pin)
G_pin = 35 # G:35 號腳位(第 35 根 pin)
B_pin = 37 # B:37 號腳位(第 37 根 pin)
GPIO.setup(R_pin, GPIO.OUT)
GPIO.setup(G_pin, GPIO.OUT)
GPIO.setup(B_pin, GPIO.OUT)
R_pwm = GPIO.PWM(R_pin, 100)
G_pwm = GPIO.PWM(G_pin, 100)
B_pwm = GPIO.PWM(B_pin, 100)
# use python RPi.GPIO, square wave is 70k Hz
# use python wiringpi2 or bindings, square wave is 28k Hz
# use C wiringPi, square wave is 4.1-4.6M Hz
def check_RGB_range(R,G,B):
if R < 0 : R = 0
if G < 0 : G = 0
if B < 0 : B = 0
if R > 255 : R = 255
if G > 255 : G = 255
if B > 255 : B = 255
return R,G,B
R,G,B = 255,255,255
state = 0
interval = 32
R_pwm.start(0)
G_pwm.start(0)
B_pwm.start(0)
def mycallback(channel):
print("Button pressed @", time.ctime())
global R,G,B,state
while(1):
if (state == 0):
R = R
G = G - interval
B = B - interval
if (state == 1):
R = R - interval
G = G + interval
B = B
if (state == 2):
R = R
G = G - interval
B = B + interval
if (state == 3):
R = R + interval
G = G
B = B
if (state == 4):
R = R - interval
G = G + interval
B = B
if (state == 5):
R = R + interval
G = G
B = B - interval
if (state == 6):
R = R
G = G - interval
B = B
R,G,B = check_RGB_range(R,G,B)
if(R == 255 and G == 255 and B == 255): state = 0 # WHITE
if(R == 255 and G == 0 and B == 0): state = 1 # RED
if(R == 0 and G == 255 and B == 0): state = 2 # GREEN
if(R == 0 and G == 0 and B == 255): state = 3 # BLUE
if(R == 255 and G == 0 and B == 255): state = 4 # Magenta
if(R == 0 and G == 255 and B == 255): state = 5 # cyan-blue
if(R == 255 and G == 255 and B == 0): state = 6 # YELLOW
#mapping
R_mapping = int (R / 255 * 100)
G_mapping = int (G / 255 * 100)
B_mapping = int (B / 255 * 100)
R_pwm.ChangeDutyCycle(R_mapping)
G_pwm.ChangeDutyCycle(G_mapping)
B_pwm.ChangeDutyCycle(B_mapping)
print("R,G,B:\t",R,G,B,"\t mapping: ",R_mapping,G_mapping,B_mapping)
time.sleep(0.3)
#GPIO.output(LED_PIN, status)
def mycallback1(channel):
print("Button pressed @", time.ctime())
global R,G,B,state
state=random.random(0,6)
while(1):
if (state == 0):
R = R
G = G - interval
B = B - interval
if (state == 1):
R = R - interval
G = G + interval
B = B
if (state == 2):
R = R
G = G - interval
B = B + interval
if (state == 3):
R = R + interval
G = G
B = B
if (state == 4):
R = R - interval
G = G + interval
B = B
if (state == 5):
R = R + interval
G = G
B = B - interval
if (state == 6):
R = R
G = G - interval
B = B
R,G,B = check_RGB_range(R,G,B)
if(R == 255 and G == 255 and B == 255): state = 0 # WHITE
if(R == 255 and G == 0 and B == 0): state = 1 # RED
if(R == 0 and G == 255 and B == 0): state = 2 # GREEN
if(R == 0 and G == 0 and B == 255): state = 3 # BLUE
if(R == 255 and G == 0 and B == 255): state = 4 # Magenta
if(R == 0 and G == 255 and B == 255): state = 5 # cyan-blue
if(R == 255 and G == 255 and B == 0): state = 6 # YELLOW
#mapping
R_mapping = int (R / 255 * 100)
G_mapping = int (G / 255 * 100)
B_mapping = int (B / 255 * 100)
R_pwm.ChangeDutyCycle(R_mapping)
G_pwm.ChangeDutyCycle(G_mapping)
B_pwm.ChangeDutyCycle(B_mapping)
print("R,G,B:\t",R,G,B,"\t mapping: ",R_mapping,G_mapping,B_mapping)
time.sleep(0.3)
try:
GPIO.add_event_detect(BTN_PIN, GPIO.FALLING, callback=mycallback(11), bouncetime=WAIT_TIME)
GPIO.add_event_detect(BTN_PIN1, GPIO.FALLING, callback=mycallback1(10), bouncetime=WAIT_TIME)
while True:
time.sleep(10)
except KeyboardInterrupt:
print("Exception: KeyboardInterrupt")
finally:
GPIO.cleanup()
| 3.140625 | 3 |
pyelong/api/hotel/__init__.py | DeanThompson/pyelong | 1 | 12773783 | # -*- coding: utf-8 -*-
from ..base import ApiBase
from .order import Order
from .data import Data
from .incr import Incr
__all__ = 'Hotel'
class Hotel(ApiBase):
_category = ''
def list(self, **kwargs):
"""酒店列表搜索,方法名:hotel.list
文档
~~~~
- http://open.elong.com/wiki/Hotel.list
"""
return self._request('list', **kwargs)
def detail(self, **kwargs):
"""酒店详情搜索,方法名:hotel.detail
文档
~~~~
- http://open.elong.com/wiki/Hotel.detail
"""
return self._request('detail', **kwargs)
@property
def order(self):
return Order(self._client)
@property
def data(self):
return Data(self._client)
@property
def incr(self):
return Incr(self._client)
@property
def id(self):
return ID(self._client)
@property
def inv(self):
return Inv(self._client)
class ID(ApiBase):
_category = 'hotel'
def list(self, **kwargs):
"""酒店Id列表搜索,方法名: hotel.id.list
文档
~~~~
- http://open.elong.com/wiki/Hotel.id.list
"""
return self._request('list', **kwargs)
class Inv(ApiBase):
_category = 'hotel'
def validate(self, **kwargs):
"""库存验证,方法名:hotel.inv.validate
文档
~~~~
- http://open.elong.com/wiki/Hotel.inv.validate
"""
return self._request('validate', **kwargs)
| 2.59375 | 3 |
scripts/calibrate_dark_current.py | mjerrar/duo3d_ros | 5 | 12773784 | <filename>scripts/calibrate_dark_current.py
#!/usr/bin/python
# Copyright (c) 2016 AIT, ETH Zurich. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# 3. Neither the name AIT nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# File: calibrate_dark_current.py
# Created on: 09.03.16
# Author: <NAME>
from __future__ import print_function, division
import os
import rospy
from ait_ros_messages.msg import VioSensorMsg
from std_msgs.msg import String
import rospkg
from cv_bridge import CvBridge
import cv2
import numpy as np
def vio_sensor_cb(data):
global cnt, active, imgs
num_samples = 100 # number of image samples to take
if cnt == num_samples and active:
imgs['l'] /= num_samples
imgs['r'] /= num_samples
active = 0
return
left = np.float32(CvBridge().imgmsg_to_cv2(data.left_image, "mono8"))
right = np.float32(CvBridge().imgmsg_to_cv2(data.right_image, "mono8"))
if cnt == 0:
imgs['l'] = left
imgs['r'] = right
else:
cv2.accumulate(left, imgs['l'])
cv2.accumulate(right, imgs['r'])
cnt += 1
def device_serial_nr_cb(data):
global device_serial_nr
device_serial_nr = data.data
if __name__ == "__main__":
rospy.init_node('calibrate_dark_current')
device_serial_nr = None
cnt = 0
active = 1
imgs = {'l': [], 'r': []}
print('To calibrate the dark current, make sure the lenses are completely covered.')
raw_input('Press enter to start calibration...')
rospy.Subscriber("/vio_sensor", VioSensorMsg, vio_sensor_cb, queue_size=1)
rospy.Subscriber("/vio_sensor/device_serial_nr", String, device_serial_nr_cb, queue_size=1)
rate = rospy.Rate(10)
while active and not rospy.is_shutdown():
rate.sleep()
if not rospy.is_shutdown():
alpha = 0.2 # why is this necessary? I don't know but it makes the result better
left = cv2.convertScaleAbs(imgs['l'], alpha=alpha)
right = cv2.convertScaleAbs(imgs['r'], alpha=alpha)
duo_path = rospkg.RosPack().get_path('duo3d_ros')
# select lens
lenses = os.listdir(os.path.join(duo_path, 'calib', device_serial_nr))
lenses = [lens for lens in lenses if os.path.isdir(os.path.join(duo_path, 'calib', device_serial_nr, lens))]
if len(lenses) == 1:
print('Found one lens: {}. Using that one.'.format(lenses[0]))
lens = lenses[0]
else:
print('Found several lenses:')
for i, lens in enumerate(lenses):
print('{}: {}'.format(i+1, lens))
selection = int(raw_input('Select the lens you want by providing the appropriate number: '))
if selection < 1 or selection > len(lenses):
raise Exception('The provided number {} is not in the valid range [{}:{}]'.format(selection, 1, len(lenses)))
lens = lenses[selection-1]
resolution = '{}x{}'.format(left.shape[0], left.shape[1])
cv2.imwrite(os.path.join(duo_path, 'calib', device_serial_nr, lens, resolution, 'dark_current_l.bmp'), left)
cv2.imwrite(os.path.join(duo_path, 'calib', device_serial_nr, lens, resolution, 'dark_current_r.bmp'), right)
| 1.257813 | 1 |
eyes_detect.py | kmranrg/FaceID | 2 | 12773785 | import cv2
eyeCascade = cv2.CascadeClassifier('haarcascade_eye.xml')
video_capture = cv2.VideoCapture(0)
while True:
ret,frame = video_capture.read()
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
eyes = eyeCascade.detectMultiScale(gray,
scaleFactor=1.2,
minNeighbors=5,
minSize=(30,30))
for(x,y,w,h) in eyes:
cv2.rectangle(frame,(x,y),(x+w, y+h),(0,0,255),2)
cv2.imshow("Video",frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.DestroyAllWindows()
| 2.828125 | 3 |
ph5/utilities/validation.py | PIC-IRIS/PH5 | 21 | 12773786 | <reponame>PIC-IRIS/PH5
"""
common functions for validation
"""
import tables
import re
ERRORS = {'smodel': "Array_t_%(array)s:sensor_model=%(smodel)s",
'dmodel': "Array_t_%(array)s:das_model=%(dmodel)s",
'spr': "Array_t_%(array)s:sr=%(spr)s",
'sprm': "Array_t_%(array)s:srm=%(sprm)s",
'gain': "Array_t_%(array)s:gain=%(gain)s"}
def combine_errors(check_fail, incomplete_errmsg, info):
parts_errmsg = ''
if check_fail != set():
parts_errmsg = "inconsistent with " + ' '.join(
[ERRORS[k] for k in check_fail])
errmsg = ' or '.join([incomplete_errmsg, parts_errmsg % info])
return errmsg.strip(" or ")
def addLog(errmsg, unique_errors, logger=None, logType='error'):
unique_errors.add((errmsg, logType))
if logger is not None:
if logType == 'error':
logger.error(errmsg)
if logType == 'warning':
logger.warning(errmsg)
def check_resp_data(ph5table, path, header, checked_data_files, n_i):
"""
Check if response data is loaded for the response filename
:param ph5table: table ph5
:param path: path filled in the response file name of response_t (str)
:param header: string of array-station-channel to help users identify
where the problem belong to (str)
:param checked_data_files: set of resp filenames that check_response_info()
has run for it
:param n_i: response index
:return: raise Exception if there is no response data
"""
name = path.split('/')[-1]
if name in checked_data_files.keys():
if checked_data_files[name] != '':
raise Exception(checked_data_files[name])
else:
return
checked_data_files[name] = ''
try:
ph5table.get_node(ph5table.root.Experiment_g.Responses_g, name)
except tables.NoSuchNodeError:
errmsg = "%sResponse_t[%s]:No response data loaded for %s." % \
(header, n_i, name)
checked_data_files[name] = errmsg
raise Exception(errmsg)
return
def check_metadatatoph5_format(Response_t, info, header, errors, logger):
"""
Check response_file_das_a in response_t matches with info from
station entry
:param Response_t: response entry according to info[n_i]
:param info: info needed from each station:
dict {n_i, array, sta, cha_id, cha_code, dmodel, smodel, spr, sprm}
:para header: string of array-station-channel to help user identify where
the problem belong to (str)
:para errors: list of errors
:param logger: logger of the caller
:return:
if there are more than 3 parts return False
if all (3) parts pass checks return True
if 2 parts pass checks, decide that this is created from metadatatoph5
return True and log as error
if less than 2 parts pass checks return incomplete_errmsg, m_check_fail
to be included if check for resp_load format also failed.
"""
if Response_t['response_file_das_a'] == '':
# blank response_file_das_a return False in check_response_info
# to throw error
return True
response_fname = Response_t['response_file_das_a'].split('/')[-1]
parts = response_fname.split('_')
if len(parts) > 3:
return False
incomplete_errmsg = ''
if len(parts) < 3:
incomplete_errmsg = "incomplete"
count_corr_parts = 0
m_check_fail = set()
try:
if parts[0] == info['dmodel_no_special_char']:
count_corr_parts += 1
else:
m_check_fail.add('dmodel')
if parts[1] == info['smodel_no_special_char']:
count_corr_parts += 1
else:
m_check_fail.add('smodel')
sr = re.split(r'(\d+)', parts[2])[1]
if sr == str(int(info['spr'])):
count_corr_parts += 1
else:
m_check_fail.add('spr')
except IndexError:
pass
if count_corr_parts >= 2:
# at least 2 parts correct, decide this is created from resp_load
if m_check_fail != set([]) or incomplete_errmsg != '':
errmsg = combine_errors(m_check_fail, incomplete_errmsg, info)
errmsg = ("{0}Response_t[{1}]:response_file_das_a '{2}' is {3}. "
"Please check with metadatatoph5 format "
"[das_model]_[sensor_model]_[sr][cha] "
"(check doesn't include [cha])."
).format(header,
info['n_i'],
response_fname,
errmsg)
addLog(errmsg, errors, logger, logType='error')
return True
else:
# if less than 2 parts correct, return checks to be included if check
# for resp_load format also failed.
return incomplete_errmsg, m_check_fail
def check_das_resp_load_format(Response_t, info, header, errors, logger,
m_check_ret):
"""
Check response_file_das_a in response_t matches with info from
station entry
:param Response_t: response entry according to info[n_i]
:param info: info needed from each station:
dict {n_i, array, sta, cha_id, cha_code, dmodel, smodel, spr, sprm}
:param header: string of array-station-channel to help user identify where
the problem belong to (str)
:param errors: list of errors
:param logger: logger of the caller
:param m_check_ret: if incomplete_errmsg, parts_errmsg are return
from check_metadatatoph5_format, they will be included if this check
is also failed
:log as error and return for the following cases:
+ more than 4 parts
+ if 3 parts corrects, decide this is created from resp_load so
errmsg only includes resp_load's checks and format
+ if less than 3 parts corrects, cannot decide this is created from
resp_load or metadatatoph5, so errmsg includes resp_load's
checks and formats and metadatatoph5's if m_check_ret!=True
"""
if Response_t['response_file_das_a'] == '':
# blank response_file_das_a return False in check_response_info
# to throw error
return True
info['gain'] = Response_t['gain/value_i']
response_fname = Response_t['response_file_das_a'].split('/')[-1]
r_format = "resp_load format [das_model]_[sr]_[srm]_[gain]"
m_format = ("metadatatoph5 format [das_model]_[sensor_model]_[sr][cha] "
"(check doesn't include [cha])")
parts = response_fname.split('_')
if len(parts) > 4:
errmsg = ("%sResponse_t[%s]:response_file_das_a '%s' has too many "
"parts. Please check with format %s or %s"
% (header, info['n_i'], response_fname, m_format, r_format))
addLog(errmsg, errors, logger, logType='error')
return
incomplete_errmsg = ''
if len(parts) < 4:
incomplete_errmsg = "incomplete"
count_corr_parts = 0
r_check_fail = set()
try:
if parts[0] == info['dmodel_no_special_char']:
count_corr_parts += 1
else:
r_check_fail.add('dmodel')
if parts[1] == str(int(info['spr'])):
count_corr_parts += 1
else:
r_check_fail.add('spr')
if parts[2] == str(int(info['sprm'])):
count_corr_parts += 1
else:
r_check_fail.add('sprm')
if parts[3] == str(int(info['gain'])):
count_corr_parts += 1
else:
r_check_fail.add('gain')
except IndexError:
pass
if r_check_fail != set([]) or incomplete_errmsg != '':
errmsg = combine_errors(r_check_fail, incomplete_errmsg, info)
if count_corr_parts >= 3 or m_check_ret is False:
# at least 3 parts correct, decide this is created from resp_load
errmsg = ("{0}Response_t[{1}]:response_file_das_a '{2}' is {3}. "
"Please check with {4}."
).format(header,
info['n_i'],
response_fname,
errmsg.strip(),
r_format)
else:
# if less than 3 parts correct, include the check and format
# of checking metadatatoph5 format to error message
if incomplete_errmsg != 'incomplete':
incomplete_errmsg = m_check_ret[0]
for c in m_check_ret[1]:
r_check_fail.add(c)
errmsg = combine_errors(r_check_fail, incomplete_errmsg, info)
errmsg = ("{0}Response_t[{1}]:response_file_das_a {2} is {3}. "
"Please check with {4} or {5}."
).format(header,
info['n_i'],
response_fname,
errmsg.strip(),
r_format,
m_format)
addLog(errmsg, errors, logger, logType='error')
def check_sensor(Response_t, info, header, errors, logger):
"""
Check response_file_sensor_a in response_t matches with info from
station entry
:param Response_t: response entry according to info[n_i]
:param info: info needed from each station:
dict {n_i, array, sta, cha_id, cha_code, dmodel, smodel, spr, sprm}
:para header: string of array-station-channel to help user identify where
the problem belong to (str)
:para errors: list of errors
:param logger: logger of the caller
:Log as error for 2 cases:
response_file_sensor_a is blank while sensor model exists.
response_file_sensor_a not match with sensor model
"""
response_fname = Response_t['response_file_sensor_a'].split('/')[-1]
if info['smodel'] != '' and response_fname == '':
errmsg = ("%sResponse_t[%s]:response_file_sensor_a is blank while "
"sensor model exists." % (header, info['n_i']))
addLog(errmsg, errors, logger, logType='error')
return
if info['smodel_no_special_char'] != response_fname:
errmsg = ("{0}Response_t[{1}]:response_file_sensor_a '{2}' is "
"inconsistent with {3}."
).format(header,
info['n_i'],
response_fname,
ERRORS['smodel'] % info)
addLog(errmsg, errors, logger, logType='error')
return
def check_response_info(info, ph5, checked_data_files, errors, logger):
"""
Check in response info for each station entry if the response filenames are
correct (das filename created by metadata or das/sensor filename
created by resp_load) and the response data are loaded.
:param info: info needed from each station:
dict {n_i, sta, cha_id, cha_code, dmodel, smodel, spr, sprm}
:param ph5: ph5 object
:param checked_data_files: set of resp filenames that check_response_info()
has run for it
:param errors: list of errors from caller
:param logger: logger of the caller
:return:
False, list of error messages if no response data loaded
(d_path, s_path) in which d_path and s_path are response paths for
das or sensor if response data are loaded for the file name stated
in response table
"""
Response_t = ph5.get_response_t_by_n_i(info['n_i'])
header = ("array {0} station {1}, channel {2}: ").format(info['array'],
info['sta'],
info['cha_id'])
if Response_t is None:
errmsg = ("%sResponse_t has no entry for n_i=%s"
% (header, info['n_i']))
return False, [errmsg]
if info['n_i'] == -1:
# metadata no response signal
errmsg = ("%sResponse_t[-1]:Metadata response with n_i=-1 has no "
"response data." % header)
return False, [errmsg]
info['dmodel_no_special_char'] = info['dmodel'].translate(None, ' ,/-=._')
info['smodel_no_special_char'] = info['smodel'].translate(None, ' ,/-=._')
m_check_ret = check_metadatatoph5_format(
Response_t, info, header, errors, logger)
if m_check_ret is not True:
check_sensor(
Response_t, info, header, errors, logger)
check_das_resp_load_format(
Response_t, info, header, errors, logger, m_check_ret)
das_resp_path = Response_t['response_file_das_a']
sens_resp_path = Response_t['response_file_sensor_a']
data_errors = []
if das_resp_path == '':
errmsg = "%sresponse_file_das_a is blank." % header
data_errors.append(errmsg)
else:
try:
check_resp_data(ph5.ph5, das_resp_path, header,
checked_data_files, info['n_i'])
except Exception as e:
data_errors.append(str(e))
if sens_resp_path != '':
try:
check_resp_data(ph5.ph5, sens_resp_path, header,
checked_data_files, info['n_i'])
except Exception as e:
data_errors.append(str(e))
if data_errors != []:
return False, data_errors
return das_resp_path, sens_resp_path
def check_resp_unique_n_i(ph5, errors, logger=None):
# check for duplicated n_i in response table
n_i_list = [e['n_i'] for e in ph5.Response_t['rows']]
dup_indexes = set([i for i in n_i_list
if n_i_list.count(i) > 1])
if len(dup_indexes) != 0:
errmsg = ("Response_t n_i(s) duplicated: %s. "
"Try to rerun resp_load to see if it fix the problem."
% ','.join(map(str, dup_indexes)))
addLog(errmsg, errors, logger)
return errmsg
return True
def check_has_response_filename(Response_t, errors, logger):
# check if Response table contain any response file name
for entry in Response_t['rows']:
if entry['response_file_das_a'] != '':
return True
errmsg = ("Response table does not contain any response file names. "
"Check if resp_load has been run or if metadatatoph5 input "
"contained response information.")
addLog(errmsg, errors, logger)
return errmsg
def check_lat_lon_elev(station):
errors = []
warnings = []
if station['location/X/value_d'] == 0:
warnings.append("Channel longitude seems to be 0. Is this correct???")
if not -180 <= float(station['location/X/value_d']) <= 180:
errors.append("Channel longitude %s not in range [-180,180]"
% station['location/X/value_d'])
if station['location/X/units_s'] in [None, '']:
warnings.append("No Station location/X/units_s value found.")
if station['location/Y/value_d'] == 0:
warnings.append("Channel latitude seems to be 0. Is this correct???")
if not -90 <= float(station['location/Y/value_d']) <= 90:
errors.append("Channel latitude %s not in range [-90,90]"
% station['location/Y/value_d'])
if station['location/Y/units_s'] in [None, '']:
warnings.append("No Station location/Y/units_s value found.")
if station['location/Z/value_d'] == 0:
warnings.append("Channel elevation seems to be 0. Is this correct???")
if station['location/Z/units_s'] in [None, '']:
warnings.append("No Station location/Z/units_s value found.")
return errors, warnings
| 2.3125 | 2 |
tests/mlir/polybench/python/adi.py | chhzh123/heterocl | 0 | 12773787 | ###################################################################
# This implementation is based on the following papaer:
#
# <NAME> and <NAME>. Automatic data and computation
# decomposition on distributed# memory parallel computers.
# ACM Transactions on Programming Languages and Systems,
# 24(1):1–50, Jan. 2002.
#
# Algorithm of Figure 5
#
###################################################################
import heterocl as hcl
import numpy as np
def top_adi(Nx=20, Ny=20, NT=20, Dx=0.1, Dy=0.1, DT=0.1, B1=0.1, B2=0.1,
mu1=0.1, mu2=0.1, a=0.1, b=0.1, c=0.1, d=0.1, e=0.1, f=0.1,
dtype=hcl.Int(), target=None):
hcl.init(dtype)
u = hcl.placeholder((Nx, Ny), "u")
v = hcl.placeholder((Nx, Ny), "v")
p = hcl.placeholder((Nx, Ny), "p")
q = hcl.placeholder((Nx, Ny), "q")
def kernel_adi(u, v, p, q):
def sweep(u, v, p, q):
with hcl.for_(1, Ny - 1, name="L1") as i:
v[0][i] = hcl.scalar(1.0)
p[i][0] = hcl.scalar(0.0)
q[i][0] = v[0][i]
with hcl.for_(1, Nx - 1, name="L2") as j:
p[i][j] = -1.0 * c / (a * p[i][j - 1] + b)
q[i][j] = (-1.0 * d * u[j][i - 1] + (1.0 + 2.0 * d) * u[j][i] - f * u[j][i + 1] - a * q[i][j - 1])/(a * p[i][j - 1] + b)
v[Nx - 1][i] = hcl.scalar(1.0)
with hcl.for_(Nx - 2, 0, -1, name="L3") as j:
v[j][i] = p[i][j] * v[j + 1][i] + q[i][j]
with hcl.for_(1, Nx - 1, name="L4") as i:
u[i][0] = hcl.scalar(1.0)
p[i][0] = hcl.scalar(0.0)
q[i][0] = u[i][0]
with hcl.for_(1, Ny - 1, name="L5") as j:
p[i][j] = -1.0 * f / (d * p[i][j - 1] + e)
q[i][j] = (-1.0 * a * v[i - 1][j] + (1.0 + 2 * a) * v[i][j] - c * v[i + 1][j] - d * q[i][j - 1])/(d * p[i][j - 1] + e)
u[i][Ny - 1] = hcl.scalar(1.0)
with hcl.for_(Ny - 2, 0, -1, name="L6") as j:
u[i][j] = p[i][j] * u[i][j + 1] + q[i][j]
hcl.mutate((NT,), lambda m: sweep(u, v, p, q), "main_loop")
s = hcl.create_schedule([u, v, p, q], kernel_adi)
#### Apply customizations ####
main_loop = kernel_adi.main_loop
#s[main_loop].pipeline(main_loop.L1)
#s[main_loop].pipeline(main_loop.L4)
#### Apply customizations ####
return hcl.build(s, target=target)
def adi_golden(N, TSTEPS, Dx, Dy, DT, B1, B2, mu1, mu2, a, b, c, d, e, f, u, v, p, q):
for t in range(TSTEPS):
## Column sweep
for i in range(1, N - 1):
v[0][i] = 1.0
p[i][0] = 0.0
q[i][0] = v[0][i]
for j in range(1, N - 1):
p[i][j] = -1.0 * c / (a * p[i][j - 1] + b)
q[i][j] = (-1.0 * d * u[j][i - 1] + (1.0 + 2.0 * d) * u[j][i] - f * u[j][i + 1] - a * q[i][j - 1])/(a * p[i][j - 1] + b)
v[N - 1][i] = 1.0
for j in range(N - 2, 0, -1):
v[j][i] = p[i][j] * v[j + 1][i] + q[i][j]
## Row sweep
for i in range(1, N - 1):
u[i][0] = 1.0
p[i][0] = 0.0
q[i][0] = u[i][0]
for j in range(1, N - 1):
p[i][j] = -1.0 * f / (d * p[i][j - 1] + e)
q[i][j] = (-1.0 * a * v[i - 1][j] + (1.0 + 2.0 * a) *
v[i][j] - c * v[i + 1][j] -d * q[i][j - 1])/(d * p[i][j - 1] + e)
u[i][N - 1] = 1.0
for j in range(N - 2, 0, -1):
u[i][j] = p[i][j] * u[i][j + 1] + q[i][j]
def main(Nx=20, Ny=20, NT=20, Dx=0.1, Dy=0.1, DT=0.1, B1=0.1, B2=0.1,
mu1=0.1, mu2=0.1, a=0.1, b=0.1, c=0.1, d=0.1, e=0.1, f=0.1,
dtype=hcl.Float(32), target=None):
u = np.random.randint(10, size=(Nx, Ny)).astype(np.float32)
v = np.random.randint(10, size=(Nx, Ny)).astype(np.float32)
p = np.random.randint(10, size=(Nx, Ny)).astype(np.float32)
q = np.random.randint(10, size=(Nx, Ny)).astype(np.float32)
f = top_adi(Nx, Ny, NT, Dx, Dy, DT, B1, B2, mu1, mu2, a, b, c, d, e, f, dtype, target)
f(u, v, p, q)
if __name__ == "__main__":
main() | 2.65625 | 3 |
Crawl/Code/com.vitan.test/maoyan_pool_csv.py | ivitan/LearnPython | 1 | 12773788 | <filename>Crawl/Code/com.vitan.test/maoyan_pool_csv.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 18-10-28 下午12:07
# @Author : Vitan
# @File : maoyan_pool_csv.py
import requests
import re
import pandas
from multiprocessing import Pool
from requests.exceptions import RequestException
def get_one_page(url):
headers = {'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36'}
try:
response = requests.get(url,headers = headers)
if response.status_code == 200:
html = response.text
return html
return None
except RequestException:
return None
def parse_one_page(html):
pageary = []
pattern = re.compile('<dd>.*?board-index.*?>(\d+)</i>'
+ '.*?<p.*?title="(.*?)".*?</p>.*?star">(.*?)</p>'
+ '.*?releasetime">(.*?)</p>.*?integer">(.*?)'
+ '<.*?fraction">(.*?)</i>',re.S)
movies = re.findall(pattern,html)
for item in movies:
dict = {
'排名':item[0],
'电影名':item[1],
'主演':item[2].strip()[3:],
'上映时间':item[3][5:],
'评分':item[4]+item[5]
}
pageary.append(dict)
return pageary
def write_to_csv(pageary):
ary = []
ary = ary + pageary
df = pandas.DataFrame(ary)
df.to_csv('movies.csv')
def main(offset):
url = 'https://maoyan.com/board/4?offset=' + str(offset)
ary = parse_one_page(url)
write_to_csv(ary)
if __name__ == '__main__':
pool = Pool()
pool.map(main, [i*10 for i in range(10)])
pool.close()
pool.join() | 2.984375 | 3 |
Problem Set 0 - Student Version/college.py | MuhammeedAlaa/MI-Assignemnets | 0 | 12773789 | class Student:
def __init__(self, id: str, name: str) -> None:
self.id = id
self.name = name
class Course:
def __init__(self, id: str, name: str, hours: int, grades = None) -> None:
self.id = id
self.name = name
self.hours = hours
self.grades = grades or {}
def add_grade(self, student: Student, grade: str):
self.grades[student.id] = grade
@staticmethod
def convert_grade_to_points(grade: str) -> float:
return {
"A+": 4.0,
"A" : 4.0,
"A-": 3.7,
"B+": 3.5,
"B" : 3.3,
"B-": 3.0,
"C+": 2.7,
"C" : 2.5,
"C-": 2.3,
"D" : 2.0,
"F" : 0.0
}.get(grade, 0) | 3.625 | 4 |
API/UtilsNGSIv1.py | IoTCrawler/PEP-Proxy | 0 | 12773790 | #
#Copyright Odin Solutions S.L. All Rights Reserved.
#
#SPDX-License-Identifier: Apache-2.0
#
import json
import uuid
#import os
from subprocess import Popen, PIPE
def processUri(uri):
try:
if(str(uri).upper().startswith("/v1".upper()) == False ):
uri = "/v1"+uri
return uri
except:
return uri
def validateMethodPath(method,path):
return True
def processBody(method,uri,body,sPAE,rPAE,noEncryptedKeys):
bodyBackUp = body
try:
#Determine if method / uri is actually comtemplated by the process and run the corresponding process
#depending de body.
state = False
#TODO
body, state = processCypher(body,sPAE,rPAE,noEncryptedKeys)
return body, state
except:
return bodyBackUp, False
#This process consider ONLY a JSON format.
def processCypher(body,sPAE,rPAE,noEncryptedKeys):
bodyBackUp = body
try:
encryptAttributes,state = obtainAttributesToCipher(body,sPAE,rPAE,noEncryptedKeys)
if(state == False):
return bodyBackUp, False
body,state = cipherBodyAttributes(body,encryptAttributes)
if(state == False):
return bodyBackUp, False
return body, True
except:
return bodyBackUp, False
def obtainAttributesToCipher(body,sPAE,rPAE,noEncryptedKeys):
encryptAttributes = []
try:
#TODO
return encryptAttributes, True
except:
return encryptAttributes, False
def getstatusoutput(command):
process = Popen(command, stdout=PIPE,stderr=PIPE)
out, err = process.communicate()
#print("out")
#print(out)
#print("err")
#print(err)
return (process.returncode, out)
#This process consider ONLY a JSON format.
def cipherBodyAttributes(body,encryptAttributes):
bodyBackUp = body
try:
#print("body - BEFORE ENCRYPT")
#print(body)
#Encrypt values of attributes of array list encryptAttributes.
for m in range(len(encryptAttributes)):
try:
#TODO
print(encryptAttributes[m])
except Exception as e:
print(e)
bodyBackUp, False
#print("body - AFTER ENCRYPT")
#print(body)
return body, True
except:
return bodyBackUp, False
def errorHeaders(method=None,message=None):
headers = dict()
'''
#GET - headersError
if(method.upper()=="GET"):
else:
#POST - headersError
if(method.upper()=="POST"):
else: #PATCH - headersError
if(method.upper()=="PATCH"):
else:#PUT - headersError
if(method.upper()=="PUT"):
'''
#headers['Connection'] = 'Keep-Alive'
#headers['Content-Length'] = len(message)
headers['Access-Control-Allow-Origin'] = '*'
headers['Content-Type'] = 'application/json'
headers['Fiware-Correlator'] = uuid.uuid4()
#Second value is false because API no send Transfer-Encoding=chunked header response.
return headers, False
def errorBody(method,code,title,details):
#return {"orionError":{"code":"400","reasonPhrase":"Bad Request","details":"service not found"}}
return {"code": code, "error": title, "details": details}
#def errorCode(method):
#
# return 400 | 2.5 | 2 |
football/teams.py | tbradshaw91/DS-OOP-Review | 0 | 12773791 | team_names = ['LA Chargers', 'LA Rams', 'NE Patriots', NY Giants', 'Chicago Bears']
| 1.242188 | 1 |
tests/scripts/script_test_gpu_1.py | mpelchat04/hypertrainer | 1 | 12773792 | import os
import time
vd = os.environ['CUDA_VISIBLE_DEVICES']
print(f'gpu_id={vd}')
num_gpus = len(vd.split(','))
if vd == '' or num_gpus != 1:
raise Exception('There must be one visible gpu.')
time.sleep(0.5)
| 2.921875 | 3 |
run.py | ShiChristineChen/CustomerClassification_RFM_python | 0 | 12773793 |
"""This program will focus on developing a program package for the application of RFM (Recency, Frequency, Monetary Value)
model and output the customer classification results into a new document.
Name: <NAME>
Date:28/05/2020
"""
from app.saver import Saver
if __name__ == '__main__':
saver = Saver()
saver.reset()
saver.customer_to_csv()
saver.customer_by_cate_chart()
| 2.609375 | 3 |
src/eAsisitent_scraper/scraper.py | PingWasFun/eAsistent-scraper | 0 | 12773794 | <gh_stars>0
import bs4.element
import datetime
import re
import requests
import time
from bs4 import BeautifulSoup
def request_schedule(
school_id: str,
class_id=0,
professor=0,
classroom=0,
interest_activity=0,
school_week=0,
student_id=0,
soup=False,
):
"""
It requests schedule from easistent.com and returns it as a response
:param school_id: The ID of the school you want to get data for
:type school_id: str
:param class_id: The ID of the class you want to get data for, 0 is all classes, defaults to 0 (optional)
:param professor: The ID of the professor you want to get data for, 0 is all professors, defaults to 0 (optional)
:param classroom: The classroom you want to get data for, 0 is all classrooms, defaults to 0 (optional)
:param interest_activity: The activity you want to get data for, 0 is all interest activities, defaults to 0 (optional)
:param school_week: school week that you want to get the data for, 0 is the current week, defaults to 0 (optional)
:param student_id: The ID of the student you want to get the schedule for,0 is all students, defaults to 0 (optional)
:param soup: Return a BeautifulSoup object (optional)
:return: A response object is a requests.models.Response object.
"""
url = f"https://www.easistent.com/urniki/izpis/{school_id}/{class_id}/{professor}/{classroom}/{interest_activity}/{school_week}/{student_id}"
response = requests.get(url)
if response.text == "Šola ni veljavna!" or response.text == "Šola ni izbrana!":
raise ValueError("This school does not exist. school_id is invalid")
if soup:
return BeautifulSoup(response.text, "html5lib")
return response
today = datetime.date.today()
def get_schedule_data(
school_id: str,
class_id=0,
professor=0,
classroom=0,
interest_activity=0,
school_week=0,
student_id=0,
):
"""
Date format is: YYYY-MM-DD
If school id is invalid ValueError is raised
:param school_id: The ID of the school you want to get data for
:type school_id: str
:param class_id: The ID of the class you want to get data for, 0 is all classes, defaults to 0 (optional)
:param professor: The ID of the professor you want to get data for, 0 is all professors, defaults to 0 (optional)
:param classroom: The classroom you want to get data for, 0 is all classrooms, defaults to 0 (optional)
:param interest_activity: The activity you want to get data for, 0 is all interest activities, defaults to 0 (optional)
:param school_week: school week that you want to get the data for, 0 is the current week, defaults to 0 (optional)
:param student_id: The ID of the student you want to get the schedule for,0 is all students, defaults to 0 (optional)
:return: A dictionary with the data.
"""
# TODO: reduce complexity of the function,
# better naming of variables,
response = request_schedule(
school_id=school_id,
class_id=class_id,
professor=professor,
classroom=classroom,
interest_activity=interest_activity,
school_week=school_week,
student_id=student_id,
)
request_time = int(time.time())
soup = BeautifulSoup(response.text, "html5lib")
table_rows = soup.select("body > table > tbody > tr")
count: int = -1
dates: list = []
dates_formatted: list = []
hour_times: list = []
scraped_data: dict = {}
current_week = int(
"".join(
re.findall(
"[0-9]",
[item.text.split(",")[0] for item in
soup.select("body > div > span")][
0
],
)
)
)
current_class = str(
[item.text.strip() for item in soup.select("body > div > strong")][0]
)
for table_row in table_rows:
if count == -1:
for days in table_row:
if type(days) == bs4.element.Tag:
day = days.select("div")
if day[0].text != "Ura":
temp_date = re.findall(r"[^A-z,. ]+", day[1].text)
temp_datetime = datetime.datetime(
day=int(temp_date[0]),
month=int(temp_date[1]),
year=today.year,
)
dates_formatted.append(
str(temp_datetime.strftime("%Y-%m-%d")))
dates.append(temp_datetime)
if count >= 0:
row = table_row.find_all("td",
class_="ednevnik-seznam_ur_teden-td")
hour_name = str(row[0].find(class_="text14").text)
hour_time = row[0].find(class_="text10").text.replace(" ", "")
hour_times.append(hour_time)
count2: int = 0
for row_part in row:
if count2 != 0:
"""Pass the first collum that contains hour times"""
date = dates[count2 - 1]
day_num = str(date.weekday())
date_formatted = str(date.strftime("%Y-%m-%d"))
if day_num not in scraped_data.keys():
scraped_data.update({str(day_num): {}})
scraped_data[day_num].update({str(hour_name): {}})
if "style" not in row_part.attrs:
data_out = {
"subject": None,
"teacher": None,
"classroom": None,
"group": None,
"event": None,
"hour": hour_name,
"week_day": int(day_num),
"hour_in_block": 0,
"date": date_formatted,
}
scraped_data[day_num][hour_name]["0"] = data_out
else:
classes_in_hour = 0
for section in row_part:
if type(section) == bs4.element.Tag:
event = None
subject = None
group_raw = None
group = []
teacher = None
classroom = None
teacher_classroom = None
for img in section.select("img"):
events_list = {
"Odpadla ura": "cancelled",
"Dogodek": "event",
"Nadomeščanje": "substitute",
"Polovična ura": "half_hour",
"Videokonferenca": "video_call",
"Interesna dejavnost": "activity",
"Zaposlitev": "occupation",
"Neopravljena ura": "unfinished_hour",
"Govorilne ure": "office_hours",
"Izpiti": "exams",
}
try:
event = events_list[img.attrs["title"]]
except KeyError:
event = "unknown_event"
try:
subject = (
section.find(class_="text14")
.text.replace("\n", "")
.replace("\t", "")
)
group_raw = section.find_all(
class_="text11 gray bold"
)
teacher_classroom = (
section.find(class_="text11")
.text.replace("\n", "")
.replace("\t", "")
.replace("\r", "")
.split(", ")
)
teacher = teacher_classroom[0]
classroom = teacher_classroom[1]
except IndexError:
pass # Makes it so empty strings don't
# crash the program
except AttributeError:
pass # Makes it so empty strings don't
# crash the program
if group_raw:
for gr in group_raw:
group.append(gr.text)
if ("id" in section.attrs) and bool(
re.match(
r"ednevnik-seznam_ur_teden-blok"
r"-\d\d\d\d\d\d-\d\d\d\d-\d\d-\d\d",
section.attrs["id"],
)
):
# Check for blocks
for block in section:
if type(block) == bs4.element.Tag:
event = None
subject = None
group_raw = None
group = []
teacher = None
classroom = None
teacher_classroom = None
for img in block.select("img"):
events_list = {
"Odpadla ura": "cancelled",
"Dogodek": "event",
"Nadomeščanje": "substitute",
"Polovična ura": "half_hour",
"Videokonferenca": "video_call",
"Interesna dejavnost": "activity",
"Zaposlitev": "occupation",
"Neopravljena ura": "unfinished_hour",
"Govorilne ure": "office hours",
"Izpiti": "exams",
}
try:
event = events_list[
img.attrs["title"]
]
except KeyError:
event = "unknown_event"
try:
subject = (
block.find(class_="text14")
.text.replace("\n", "")
.replace("\t", "")
)
group_raw = block.find_all(
class_="text11 gray bold"
)
teacher_classroom = (
block.find(class_="text11")
.text.replace("\n", "")
.replace("\t", "")
.replace("\r", "")
.split(", ")
)
teacher = teacher_classroom[0]
classroom = teacher_classroom[
1]
except IndexError:
pass
except AttributeError:
pass # Makes it so empty
# strings don't crash the
# program
if group_raw:
for gr in group_raw:
group.append(gr.text)
data_out = {
"subject": subject,
"teacher": teacher,
"classroom": classroom,
"group": group,
"event": event,
"hour": hour_name,
"week_day": int(day_num),
"hour_in_block": int(
classes_in_hour),
"date": date_formatted,
}
scraped_data[day_num][hour_name][
str(classes_in_hour)
] = data_out
classes_in_hour += 1
else:
data_out = {
"subject": subject,
"teacher": teacher,
"classroom": classroom,
"group": group,
"event": event,
"hour": hour_name,
"week_day": int(day_num),
"hour_in_block": int(classes_in_hour),
"date": date_formatted,
}
scraped_data[day_num][hour_name][
str(classes_in_hour)
] = data_out
classes_in_hour += 1
count2 += 1
count += 1
scraped_data["request_data"] = {}
scraped_data["request_data"]["hour_times"] = hour_times
scraped_data["request_data"]["dates"] = dates_formatted
scraped_data["request_data"]["class"] = current_class
scraped_data["request_data"]["request_week"] = current_week
scraped_data["request_data"]["request_epoch"] = request_time
scraped_data["request_data"]["used_data"] = \
{
"school_id": school_id,
"class_id": class_id,
"professor": professor,
"classroom": classroom,
"interest_activity": interest_activity,
"school_week": school_week,
"student_id": student_id
}
return scraped_data
| 3.515625 | 4 |
src/test.py | myscience/ltts | 5 | 12773795 | <gh_stars>1-10
'''
This is the Learning Through Target Spikes (LTTS) repository for code associated to
the paper: <NAME>, <NAME>, <NAME> (2020)
"Target spike patterns enable efficient and biologically plausible learning for
complex temporal tasks*" (currently *under review*).
Please give credit to this paper if you use or modify the code in a derivative work.
This work is licensed under the Creative Commons Attribution 4.0 International License.
To view a copy of this license, visit http://creativecommons.org/licenses/by/4.0/
or send a letter to Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
'''
import numpy as np
import utils as ut
from ltts import CuLTTS
# Here we define the parameters of our model
N = 500;
T = 1000;
dt = 1 / T;
tau_m = 8. * dt;
tau_s = 2. * dt;
tau_ro = 20. * dt;
beta_s = np.exp (-dt / tau_s);
beta_ro = np.exp (-dt / tau_ro);
sigma_teach = 2.;
sigma_clock = 4.;
offT = 20;
dv = 1 / 20.;
alpha = 0.01;
alpha_rout = 0.02;
Vo = -4;
h = -4;
s_inh = 20;
# Here we build the dictionary of the simulation parameters
par = {'tau_m' : tau_m, 'tau_s' : tau_s, 'tau_ro' : tau_ro, 'beta_ro' : beta_ro,
'dv' : dv, 'alpha' : alpha, 'alpha_rout' : alpha_rout, 'Vo' : Vo, 'h' : h, 's_inh' : s_inh,
'N' : N, 'T' : T, 'dt' : dt, 'offT' : offT};
ltts = CuLTTS ((N, T), par);
# Here we set up the 3-Trajectory Task
P = ut.kTrajectory (T, K = 3, offT = offT, norm = True);
C = ut.kClock (T, K = 5);
Jteach = np.random.normal (0., sigma_teach, size = (N, 3));
Jclock = np.random.normal (0., sigma_clock, size = (N, 5));
Iteach = np.matmul(Jteach, P);
Iclock = np.matmul(Jclock, C);
S_init = np.zeros (N);
S_targ = ltts.compute (Iteach + Iclock, init = S_init);
# Here we train our model
J_rout, track = ltts.train ([S_targ], [Iclock], out = [P], epochs = 1000, track = True);
# Here we save the results of our training
np.save ("Trained Model.npy", np.array ([P, C, Iteach, Iclock, J_rout, S_targ,
ltts.J, par, track], dtype = np.object));
| 2.703125 | 3 |
frappe-bench/env/lib/python2.7/site-packages/gocardless_pro/services/base_service.py | Semicheche/foa_frappe_docker | 0 | 12773796 | <filename>frappe-bench/env/lib/python2.7/site-packages/gocardless_pro/services/base_service.py
# WARNING: Do not edit by hand, this file was generated by Crank:
#
# https://github.com/gocardless/crank
#
import re
import time
from requests import Timeout, ConnectionError
from uuid import uuid4
from .. import list_response
from ..api_response import ApiResponse
from ..errors import MalformedResponseError
class BaseService(object):
"""Base class for API service classes."""
def __init__(self, api_client, max_network_retries=3, retry_delay_in_seconds=0.5):
self._api_client = api_client
self.max_network_retries = max_network_retries
self.retry_delay_in_seconds = retry_delay_in_seconds
def _perform_request(self, method, path, params, headers=None, retry_failures=False):
if method == 'POST':
headers = self._inject_idempotency_key(headers)
if retry_failures:
for retries_left in range(self.max_network_retries-1, -1, -1):
try:
return self._attempt_request(method, path, params, headers)
except (Timeout, ConnectionError, MalformedResponseError) as err:
if retries_left > 0:
time.sleep(self.retry_delay_in_seconds)
else:
raise err
else:
return self._attempt_request(method, path, params, headers)
def _attempt_request(self, method, path, params, headers):
if method == 'GET':
return self._api_client.get(path, params=params, headers=headers)
if method == 'POST':
return self._api_client.post(path, body=params, headers=headers)
if method == 'PUT':
return self._api_client.put(path, body=params, headers=headers)
raise ValueError('Invalid method "{}"'.format(method))
def _inject_idempotency_key(self, headers):
headers = headers or {}
if 'Idempotency-Key' not in headers:
headers['Idempotency-Key'] = str(uuid4())
return headers
def _envelope_key(self):
return type(self).RESOURCE_NAME
def _resource_for(self, response):
api_response = ApiResponse(response)
data = api_response.body[self._envelope_key()]
klass = type(self).RESOURCE_CLASS
if isinstance(data, dict):
return klass(data, api_response)
else:
records = [klass(item, api_response) for item in data]
return list_response.ListResponse(records, api_response)
def _sub_url_params(self, url, params):
return re.sub(r':(\w+)', lambda match: params[match.group(1)], url)
| 1.882813 | 2 |
multienv/web_servers/web_server_definitions.py | sfelix-martins/laradock-up-env | 2 | 12773797 | <filename>multienv/web_servers/web_server_definitions.py
class WebServerDefinitions:
name = None
root = None
template = None
https = False
default_template = 'laravel'
def __init__(self, name, root, template=None, https=False):
self.name = name
self.root = root
self.https = https
if not template:
template = self.default_template
self.template = template
| 1.945313 | 2 |
refinery/units/blockwise/pack.py | bronxc/refinery | 0 | 12773798 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
from typing import Iterable
from refinery.units.blockwise import Arg, BlockTransformationBase
from refinery.units.encoding.base import base as BaseUnit
from refinery.lib.argformats import number
from refinery.lib.patterns import formats
class pack(BlockTransformationBase):
"""
Scans the input data for numeric constants and packs them into a binary
format. This is useful to convert the textual representation of an array of
numbers into its binary form. For example, `123,34,256,12,1,234` would be
transformed into the byte sequence `7B22000C01EA`, where `256` was wrapped
and packed as a null byte because the default block size is one byte. If
the above sequence would be packed with options -EB2, the result would be
equal to `007B00220100000C000100EA` in hexadecimal.
"""
def __init__(self,
base: Arg(type=number[2:36], help=(
'Find only numbers in given base. Default of 0 means that '
'common expressions for hexadecimal, octal and binary are '
'accepted.')) = 0,
prefix: Arg.Switch('-r', help='Add numeric prefixes like 0x, 0b, and 0o in reverse mode.') = False,
strict: Arg.Switch('-s', help='Only parse integers that fit in one block of the given block size.') = False,
width : Arg.Number('-w', help='Pad numbers with the specified amount of leading zeros.') = 0,
bigendian=False, blocksize=1
):
super().__init__(
base=base,
prefix=prefix,
strict=strict,
width=width,
bigendian=bigendian,
blocksize=blocksize
)
@property
def bytestream(self):
# never alow bytes to be left unchunked
return False
def reverse(self, data):
base = self.args.base or 10
width = self.args.width
prefix = B''
self.log_debug(F'using base {base:d}')
if self.args.prefix:
prefix = {
0x02: b'0b',
0x08: b'0o',
0x10: b'0x'
}.get(base, B'')
converter = BaseUnit(base, not self.args.bigendian)
for n in self.chunk(data, raw=True):
converted = converter.reverse(n)
if width:
converted = converted.rjust(width, B'0')
if prefix:
converted = prefix + converted
yield converted
def process(self, data):
base: int = self.args.base
strict: bool = self.args.strict
def intb(literals: Iterable[bytes]):
for literal in literals:
if base == 0 and literal[0] == 0x30 and literal[1:].isdigit():
literal = B'0o%s' % literal
N = int(literal, base)
M = N & self.fmask
if strict and M != N:
continue
yield M
if base == 0:
pattern = formats.integer
elif base <= 10:
pattern = re.compile(B'[-+]?[0-%d]{1,64}' % (base - 1))
else:
pattern = re.compile(B'[-+]?[0-9a-%c]{1,20}' % (0x57 + base), re.IGNORECASE)
it = (m[0] for m in pattern.finditer(data))
return self.unchunk(intb(it))
| 2.90625 | 3 |
dafipost/functional_test.py | UncleGoogle/dafipost | 0 | 12773799 | <filename>dafipost/functional_test.py
"""
User stories as a bunch of functional tests in a real webdriver.
"""
from selenium import webdriver
import pytest
@pytest.fixture
def browser():
browser = webdriver.Firefox()
yield browser
browser.quit()
@pytest.fixture
def host():
return 'http://localhost:8000'
@pytest.mark.functional
def test_index_non_authorized(browser, host):
"""
Evo is here for the first time and wants to give a try to our application.
"""
# Evo goes to home page
browser.get(host)
# He see the blackdog app title
assert 'dafipost' in browser.title.lower()
# He see 2 buttons to login and create new account
assert browser.find_element_by_xpath("//*[contains(text(), 'Sign up')]")
| 2.46875 | 2 |
file/delfiles.py | stormabq/python-examples | 1 | 12773800 | # Shows how to delete files
import glob
import os
from os import listdir
from os.path import isfile, join
def get_and_delete_symbol_files(path, symbol):
filenames = []
pattern = path + "/" + symbol + "-*.csv"
for name in glob.glob(pattern):
filenames.append(name)
for filename in filenames:
if os.path.exists(filename):
os.remove(filename)
def deletefile(filename):
if os.path.exists(filename):
os.remove(filename)
def getsymbolfiles(mypath, symbol):
hits = []
pattern = mypath + "/" + symbol + "-*.csv"
for name in glob.glob(pattern):
hits.append(name)
return hits
def deletesymbolfiles(filenames):
for filename in filenames:
deletefile(filename)
if __name__ == "__main__":
path = os.environ["BMTOP"]
# path = path + "/bluemesa/tmp/fun/in/test"
path = "/tmp/fun"
#
# This is the first way
#
# symbolfiles = getsymbolfiles(path, "ui")
# print(symbolfiles)
# for filename in symbolfiles:
# print(filename)
# deletefile(filename)
get_and_delete_symbol_files(path, "rdfn")
| 3.15625 | 3 |
userbot/modules/multimemes.py | JoanLindo/BaianoBot-backup-sexo | 1 | 12773801 | # Copyright (C) 2020 MoveAngel and MinaProject
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
# Multifunction memes
#
# Based code + improve from AdekMaulana and aidilaryanto
import asyncio
import os
import random
import re
import textwrap
import time
from asyncio.exceptions import TimeoutError
from glitch_this import ImageGlitcher
from PIL import Image, ImageDraw, ImageFont
from telethon import events, functions, types
from telethon.errors.rpcerrorlist import YouBlockedUserError
from userbot import CMD_HELP, TEMP_DOWNLOAD_DIRECTORY, bot
from userbot.events import register
from userbot.utils import check_media, progress
Glitched = TEMP_DOWNLOAD_DIRECTORY + "glitch.gif"
EMOJI_PATTERN = re.compile(
"["
"\U0001F1E0-\U0001F1FF" # flags (iOS)
"\U0001F300-\U0001F5FF" # symbols & pictographs
"\U0001F600-\U0001F64F" # emoticons
"\U0001F680-\U0001F6FF" # transport & map symbols
"\U0001F700-\U0001F77F" # alchemical symbols
"\U0001F780-\U0001F7FF" # Geometric Shapes Extended
"\U0001F800-\U0001F8FF" # Supplemental Arrows-C
"\U0001F900-\U0001F9FF" # Supplemental Symbols and Pictographs
"\U0001FA00-\U0001FA6F" # Chess Symbols
"\U0001FA70-\U0001FAFF" # Symbols and Pictographs Extended-A
"\U00002702-\U000027B0" # Dingbats
"]+"
)
@register(outgoing=True, pattern=r"^\.glitch(?: |$)(.*)")
async def glitch(event):
if not event.reply_to_msg_id:
await event.edit("`Não vou falhar um fantasma!`")
return
reply_message = await event.get_reply_message()
if not reply_message.media:
await event.edit("`responda a uma imagem/sticker`")
return
await bot.download_file(reply_message.media)
await event.edit("`Baixando mídia..`")
if event.is_reply:
data = await check_media(reply_message)
if isinstance(data, bool):
await event.edit("`Arquivos não suportados...`")
return
else:
await event.edit("`Responda a qualquer mídia`")
return
try:
value = int(event.pattern_match.group(1))
if value > 8:
raise ValueError
except ValueError:
value = 2
await event.edit("```Falhando essa mídia```")
await asyncio.sleep(2)
file_name = "glitch.png"
to_download_directory = TEMP_DOWNLOAD_DIRECTORY
downloaded_file_name = os.path.join(to_download_directory, file_name)
downloaded_file_name = await bot.download_media(
reply_message,
downloaded_file_name,
)
glitch_file = downloaded_file_name
glitcher = ImageGlitcher()
img = Image.open(glitch_file)
glitch_img = glitcher.glitch_image(img, value, color_offset=True, gif=True)
DURATION = 200
LOOP = 0
glitch_img[0].save(
Glitched,
format="GIF",
append_images=glitch_img[1:],
save_all=True,
duration=DURATION,
loop=LOOP,
)
await event.edit("`Enviando mídia falhada...`")
c_time = time.time()
nosave = await event.client.send_file(
event.chat_id,
Glitched,
force_document=False,
reply_to=event.reply_to_msg_id,
progress_callback=lambda d, t: asyncio.get_event_loop().create_task(
progress(d, t, event, c_time, "[UPLOAD]")
),
)
await event.delete()
os.remove(Glitched)
await bot(
functions.messages.SaveGifRequest(
id=types.InputDocument(
id=nosave.media.document.id,
access_hash=nosave.media.document.access_hash,
file_reference=nosave.media.document.file_reference,
),
unsave=True,
)
)
os.remove(glitch_file)
@register(outgoing=True, pattern=r"^\.mmf(?: |$)(.*)")
async def mim(event):
if not event.reply_to_msg_id:
await event.edit(
"`Syntax: responda a uma imagem com .mmf` 'texto de cima' ; 'texto de baixo' "
)
return
reply_message = await event.get_reply_message()
if not reply_message.media:
await event.edit("```responda a uma imagem/sticker/gif```")
return
await bot.download_file(reply_message.media)
if event.is_reply:
data = await check_media(reply_message)
if isinstance(data, bool):
await event.edit("`Arquivos não suportados...`")
return
await event.edit(
"```Hora da Transfiguração! Mwahaha Memificando essa imagem! (」゚ロ゚)」 ```"
)
await asyncio.sleep(5)
text = event.pattern_match.group(1)
if event.reply_to_msg_id:
file_name = "meme.jpg"
to_download_directory = TEMP_DOWNLOAD_DIRECTORY
downloaded_file_name = os.path.join(to_download_directory, file_name)
downloaded_file_name = await bot.download_media(
reply_message,
downloaded_file_name,
)
dls_loc = downloaded_file_name
webp_file = await draw_meme_text(dls_loc, text)
await event.client.send_file(
event.chat_id, webp_file, reply_to=event.reply_to_msg_id
)
await event.delete()
os.remove(webp_file)
os.remove(dls_loc)
async def draw_meme_text(image_path, text):
img = Image.open(image_path)
os.remove(image_path)
i_width, i_height = img.size
m_font = ImageFont.truetype(
"resources/MutantAcademyStyle.ttf", int((70 / 640) * i_width)
)
if ";" in text:
upper_text, lower_text = text.split(";")
else:
upper_text = text
lower_text = ""
draw = ImageDraw.Draw(img)
current_h, pad = 10, 5
if upper_text:
for u_text in textwrap.wrap(upper_text, width=15):
u_width, u_height = draw.textsize(u_text, font=m_font)
draw.text(
xy=(((i_width - u_width) / 2) - 1, int((current_h / 640) * i_width)),
text=u_text,
font=m_font,
fill=(0, 0, 0),
)
draw.text(
xy=(((i_width - u_width) / 2) + 1, int((current_h / 640) * i_width)),
text=u_text,
font=m_font,
fill=(0, 0, 0),
)
draw.text(
xy=((i_width - u_width) / 2, int(((current_h / 640) * i_width)) - 1),
text=u_text,
font=m_font,
fill=(0, 0, 0),
)
draw.text(
xy=(((i_width - u_width) / 2), int(((current_h / 640) * i_width)) + 1),
text=u_text,
font=m_font,
fill=(0, 0, 0),
)
draw.text(
xy=((i_width - u_width) / 2, int((current_h / 640) * i_width)),
text=u_text,
font=m_font,
fill=(255, 255, 255),
)
current_h += u_height + pad
if lower_text:
for l_text in textwrap.wrap(lower_text, width=15):
u_width, u_height = draw.textsize(l_text, font=m_font)
draw.text(
xy=(
((i_width - u_width) / 2) - 1,
i_height - u_height - int((20 / 640) * i_width),
),
text=l_text,
font=m_font,
fill=(0, 0, 0),
)
draw.text(
xy=(
((i_width - u_width) / 2) + 1,
i_height - u_height - int((20 / 640) * i_width),
),
text=l_text,
font=m_font,
fill=(0, 0, 0),
)
draw.text(
xy=(
(i_width - u_width) / 2,
(i_height - u_height - int((20 / 640) * i_width)) - 1,
),
text=l_text,
font=m_font,
fill=(0, 0, 0),
)
draw.text(
xy=(
(i_width - u_width) / 2,
(i_height - u_height - int((20 / 640) * i_width)) + 1,
),
text=l_text,
font=m_font,
fill=(0, 0, 0),
)
draw.text(
xy=(
(i_width - u_width) / 2,
i_height - u_height - int((20 / 640) * i_width),
),
text=l_text,
font=m_font,
fill=(255, 255, 255),
)
current_h += u_height + pad
image_name = "memify.webp"
webp_file = os.path.join(TEMP_DOWNLOAD_DIRECTORY, image_name)
img.save(webp_file, "WebP")
return webp_file
@register(outgoing=True, pattern=r"^\.q")
async def quotess(qotli):
if qotli.fwd_from:
return
if not qotli.reply_to_msg_id:
return await qotli.edit("```Responda a qualquer mensagem do usuário.```")
reply_message = await qotli.get_reply_message()
if not reply_message.text:
return await qotli.edit("```Responda a uma mensagem de texto```")
chat = "@QuotLyBot"
if reply_message.sender.bot:
return await qotli.edit("```Responda a uma mensagem de usuários reais.```")
await qotli.edit("```Fazendo uma citação```")
try:
async with bot.conversation(chat) as conv:
try:
response = conv.wait_event(
events.NewMessage(incoming=True, from_users=1031952739)
)
msg = await bot.forward_messages(chat, reply_message)
response = await response
""" - don't spam notif - """
await bot.send_read_acknowledge(conv.chat_id)
except YouBlockedUserError:
return await qotli.reply(
"```Desbloqueie @QuotLyBot e tente novamente```"
)
if response.text.startswith("Hi!"):
await qotli.edit(
"```Você pode gentilmente desativar suas configurações de privacidade de encaminhamento?```"
)
else:
await qotli.delete()
await bot.forward_messages(qotli.chat_id, response.message)
await bot.send_read_acknowledge(qotli.chat_id)
""" - cleanup chat after completed - """
await qotli.client.delete_messages(conv.chat_id, [msg.id, response.id])
except TimeoutError:
await qotli.edit()
@register(outgoing=True, pattern=r"^.hz(:? |$)(.*)?")
async def hazz(hazmat):
await hazmat.edit("`Enviando informação...`")
level = hazmat.pattern_match.group(2)
if hazmat.fwd_from:
return
if not hazmat.reply_to_msg_id:
await hazmat.edit("`WoWoWo Capt!, não vamos vestir um fantasma!...`")
return
reply_message = await hazmat.get_reply_message()
if not reply_message.media:
await hazmat.edit("`Palavras podem destruir qualquer coisa Capt!...`")
return
if reply_message.sender.bot:
await hazmat.edit("`Responda a um usuário real...`")
return
chat = "@hazmat_suit_bot"
await hazmat.edit("```Se vista Capt!, Vamos exterminar alguns vírus...```")
message_id_to_reply = hazmat.message.reply_to_msg_id
msg_reply = None
async with hazmat.client.conversation(chat) as conv:
try:
msg = await conv.send_message(reply_message)
if level:
m = f"/hazmat {level}"
msg_reply = await conv.send_message(m, reply_to=msg.id)
r = await conv.get_response()
response = await conv.get_response()
elif reply_message.gif:
m = f"/hazmat"
msg_reply = await conv.send_message(m, reply_to=msg.id)
r = await conv.get_response()
response = await conv.get_response()
else:
response = await conv.get_response()
""" - don't spam notif - """
await bot.send_read_acknowledge(conv.chat_id)
except YouBlockedUserError:
await hazmat.reply("`Desbloqueie` @hazmat_suit_bot`...`")
return
if response.text.startswith("I can't"):
await hazmat.edit("`GIF não suportado...`")
await hazmat.client.delete_messages(
conv.chat_id, [msg.id, response.id, r.id, msg_reply.id]
)
return
else:
downloaded_file_name = await hazmat.client.download_media(
response.media, TEMP_DOWNLOAD_DIRECTORY
)
await hazmat.client.send_file(
hazmat.chat_id,
downloaded_file_name,
force_document=False,
reply_to=message_id_to_reply,
)
""" - cleanup chat after completed - """
if msg_reply is not None:
await hazmat.client.delete_messages(
conv.chat_id, [msg.id, msg_reply.id, r.id, response.id]
)
else:
await hazmat.client.delete_messages(conv.chat_id, [msg.id, response.id])
await hazmat.delete()
return os.remove(downloaded_file_name)
@register(outgoing=True, pattern=r"^.df(:? |$)([1-8])?")
async def fryerrr(fry):
await fry.edit("`Enviando informação...`")
level = fry.pattern_match.group(2)
if fry.fwd_from:
return
if not fry.reply_to_msg_id:
await fry.edit("`Responda a qualquer foto de mensagem do usuário...`")
return
reply_message = await fry.get_reply_message()
if not reply_message.media:
await fry.edit("`Nenhuma imagem encontrada para fritar...`")
return
if reply_message.sender.bot:
await fry.edit("`Responda a um usuário real...`")
return
chat = "@image_deepfrybot"
message_id_to_reply = fry.message.reply_to_msg_id
async with fry.client.conversation(chat) as conv:
try:
msg = await conv.send_message(reply_message)
if level:
m = f"/deepfry {level}"
msg_level = await conv.send_message(m, reply_to=msg.id)
r = await conv.get_response()
response = await conv.get_response()
else:
response = await conv.get_response()
""" - don't spam notif - """
await bot.send_read_acknowledge(conv.chat_id)
except YouBlockedUserError:
await fry.reply("`Desbloqueie` @image_deepfrybot`...`")
return
if response.text.startswith("Forward"):
await fry.edit("`Desative sua configuração de privacidade de encaminhamento...`")
else:
downloaded_file_name = await fry.client.download_media(
response.media, TEMP_DOWNLOAD_DIRECTORY
)
await fry.client.send_file(
fry.chat_id,
downloaded_file_name,
force_document=False,
reply_to=message_id_to_reply,
)
""" - cleanup chat after completed - """
try:
msg_level
except NameError:
await fry.client.delete_messages(conv.chat_id, [msg.id, response.id])
else:
await fry.client.delete_messages(
conv.chat_id, [msg.id, response.id, r.id, msg_level.id]
)
await fry.delete()
return os.remove(downloaded_file_name)
@register(outgoing=True, pattern="^.sg(?: |$)(.*)")
async def lastname(steal):
if steal.fwd_from:
return
if not steal.reply_to_msg_id:
await steal.edit("```Responda a qualquer mensagem do usuário.```")
return
message = await steal.get_reply_message()
chat = "@SangMataInfo_bot"
user_id = message.sender.id
id = f"/search_id {user_id}"
if message.sender.bot:
await steal.edit("```Responda a mensagem de usuários reais.```")
return
await steal.edit("```Espere enquanto eu roubo alguns dados da NASA```")
async with bot.conversation(chat) as conv:
try:
msg = await conv.send_message(id)
r = await conv.get_response()
response = await conv.get_response()
except YouBlockedUserError:
await steal.reply("```Desbloqueie @sangmatainfo_bot e tente novamente```")
return
if response.text.startswith("No records"):
await steal.edit("```Nenhum registro encontrado para este usuário```")
await steal.client.delete_messages(
conv.chat_id, [msg.id, r.id, response.id]
)
return
else:
respond = await conv.get_response()
await steal.edit(f"{response.message}")
await steal.client.delete_messages(
conv.chat_id, [msg.id, r.id, response.id, respond.id]
)
@register(outgoing=True, pattern="^.waifu(?: |$)(.*)")
async def waifu(animu):
text = animu.pattern_match.group(1)
if not text:
if animu.is_reply:
text = (await animu.get_reply_message()).message
else:
await animu.answer("`Nenhum texto fornecido, por isso a waifu fugiu.`")
return
animus = [20, 32, 33, 40, 41, 42, 58]
sticcers = await bot.inline_query(
"stickerizerbot", f"#{random.choice(animus)}{(deEmojify(text))}"
)
await sticcers[0].click(
animu.chat_id,
reply_to=animu.reply_to_msg_id,
silent=True if animu.is_reply else False,
hide_via=True,
)
await animu.delete()
def deEmojify(inputString: str) -> str:
return re.sub(EMOJI_PATTERN, "", inputString)
CMD_HELP.update(
{
"glitch": ".glitch <1-8>\
\nUso: Responda a um sticker/imagem e envia com cmd.\
\no valor varia de 1 a 8 se não, ele usará o valor padrão que é 2"
}
)
CMD_HELP.update(
{
"memify": ".mmf textodecima ; textodebaixo\
\nUso: Responda a um sticker/imagem/gif e envia com cmd."
}
)
CMD_HELP.update(
{
"quotly": ".q \
\nUso: Transforma um texto em sticker."
}
)
CMD_HELP.update(
{
"hazmat": ".hz or .hz [flip, x2, rotate (graus), background (numero), black]"
"\nUso: Responda a uma imagem/sticker para se vestir!"
"\n@hazmat_suit_bot"
}
)
CMD_HELP.update(
{
"deepfry": ".df ou .df [level(1-8)]"
"\nUso: Frita a imagem/sticker da resposta."
"\n@image_deepfrybot"
}
)
CMD_HELP.update(
{
"sangmata": ".sg \
\nUso: Descobre nomes passados do usuário."
}
)
CMD_HELP.update(
{
"waifu": ".waifu \
\nUso: Melhore seu texto com belos modelos de anime girl. \
\n@StickerizerBot"
}
)
| 1.789063 | 2 |
GraphRepr/GraphReprBase.py | olokshyn/Parallel-Regulations | 1 | 12773802 | <reponame>olokshyn/Parallel-Regulations
class GraphReprBase(object):
@staticmethod
def read_from_file(input_file):
raise NotImplemented()
| 1.796875 | 2 |
dpython/__init__.py | Ajoo/dpy | 0 | 12773803 | # -*- coding: utf-8 -*-
"""
Created on Sun Sep 4 17:31:30 2016
@author: Ajoo
"""
from .autodiff import *
from .dfloat import *
print('Initializing dpy...')
__all__ = autodiff.__all__ + dfloat.__all__
__version__ = '0.0.0'
__author__ = u'<NAME> <<EMAIL>>'
if __name__ == "__main__":
pass | 1.898438 | 2 |
terrascript/data/mailgun.py | amlodzianowski/python-terrascript | 0 | 12773804 | # terrascript/data/mailgun.py
import terrascript
__all__ = []
| 1 | 1 |
piecutter/engines/jinja.py | diecutter/piecutter | 2 | 12773805 | # -*- coding: utf-8 -*-
"""Jinja2 template engine."""
import os
import re
from jinja2 import Environment
from jinja2.exceptions import UndefinedError, TemplateSyntaxError
from piecutter.engines import Engine
from piecutter.exceptions import TemplateError
def path_join(*args, **kwargs):
"""Return ``args`` joined as file paths like with os.path.join().
>>> from piecutter.engines.jinja import path_join
>>> path_join('foo', 'bar')
'foo/bar'
Paths are normalized.
>>> path_join('foo', '..', 'bar')
'bar'
You can pass an extra keyword argument 'target_os': a value in os.name
capabilities.
>>> path_join('foo', 'bar', target_os='posix')
'foo/bar'
Currently, this is using os.path, i.e. the separator and rules for the
computer running Jinja2 engine. A NotImplementedError exception will be
raised if 'os' argument differs from 'os.name'.
>>> import os
>>> os.name == 'posix' # Sorry if you are running tests on another OS.
True
>>> path_join('foo', 'bar', target_os='nt') # Doctest: +ELLIPSIS
Traceback (most recent call last):
...
NotImplementedError: Cannot join path with "nt" style. Host OS is "posix".
"""
target_os = kwargs.get('target_os', None)
if target_os and target_os is not os.name:
raise NotImplementedError('Cannot join path with "{target}" style. '
'Host OS is "{host}".'.format(
target=target_os,
host=os.name))
result = os.path.join(*args)
result = path_normalize(result, target_os)
return result
def path_normalize(path, target_os=None):
"""Normalize path (like os.path.normpath) for given os.
>>> from piecutter.engines.jinja import path_normalize
>>> path_normalize('foo/bar')
'foo/bar'
>>> path_normalize('foo/toto/../bar')
'foo/bar'
Currently, this is using os.path, i.e. the separator and rules for the
computer running Jinja2 engine. A NotImplementedError exception will be
raised if 'os' argument differs from 'os.name'.
>>> import os
>>> os.name == 'posix' # Sorry if you are running tests on another OS.
True
>>> path_normalize('foo/bar', target_os='nt') # Doctest: +ELLIPSIS
Traceback (most recent call last):
...
NotImplementedError: Cannot join path with "nt" style. Host OS is "posix".
"""
if target_os and target_os is not os.name:
raise NotImplementedError('Cannot join path with "{target}" style. '
'Host OS is "{host}".'.format(
target=target_os,
host=os.name))
return os.path.normpath(path)
class Jinja2Engine(Engine):
"""Jinja2 template engine."""
def __init__(self, environment=None):
if environment is None:
environment = Environment()
self.environment = environment
self.register_environment_functions()
def register_environment_functions(self):
"""Populate self.environment.globals with some global functions."""
self.environment.globals['path_join'] = path_join
self.environment.globals['path_normalize'] = path_normalize
def render(self, template, context):
"""Return the rendered template against context."""
try:
template = self.environment.from_string(template)
except TemplateSyntaxError as e:
raise TemplateError(e)
try:
return template.render(**context)
except (UndefinedError, TypeError) as e:
raise TemplateError(e)
def match(self, template, context):
"""Return a ratio showing whether template looks like using engine.
>>> engine = Jinja2Engine()
>>> engine.match('', {})
0.0
>>> engine.match('{# Jinja2 #}', {})
1.0
>>> engine.match('Not shebang {# Jinja2 #}', {})
0.0
>>> engine.match('{{ key }}', {})
0.9
"""
# Try to locate a root variable in template.
if template.startswith('{# Jinja2 #}'):
return 1.0
if re.search(r'{{ .+ }}', template):
return 0.9
return 0.0
| 2.640625 | 3 |
cognite/extractorutils/metrics.py | thomafred/python-extractor-utils | 0 | 12773806 | # Copyright 2020 Cognite AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module containing tools for pushers for metric reporting.
The classes in this module scrape the default Prometheus registry and uploads it periodically to either a Prometheus
push gateway, or to CDF as time series.
The ``BaseMetrics`` class forms the basis for a metrics collection for an extractor, containing some general metrics
that all extractors should report. To create your own set of metrics, subclass this class and populate it with
extractor-specific metrics, as such:
.. code-block:: python
class MyMetrics(BaseMetrics):
def __init__(self):
super().__init__(extractor_name="my_extractor", extractor_version=__version__)
self.a_counter = Counter("my_extractor_example_counter", "An example counter")
...
"""
import logging
import os
import threading
from abc import ABC, abstractmethod
from time import sleep
from typing import Any, Callable, Dict, List, Optional, T, Tuple, Type, Union
import arrow
import psutil
from prometheus_client import Gauge, Info, Metric
from prometheus_client.core import REGISTRY
from prometheus_client.exposition import basic_auth_handler, delete_from_gateway, pushadd_to_gateway
from cognite.client import CogniteClient
from cognite.client.data_classes import Asset, TimeSeries
from cognite.client.exceptions import CogniteDuplicatedError
from .util import ensure_time_series
_metrics_singularities = {}
def safe_get(cls: Type[T]) -> T:
"""
A factory for instances of metrics collections.
Since Prometheus doesn't allow multiple metrics with the same name, any subclass of BaseMetrics must never be
created more than once. This function creates an instance of the given class on the first call and stores it, any
subsequent calls with the same class as argument will return the same instance.
.. code-block:: python
>>> a = safe_get(MyMetrics) # This will create a new instance of MyMetrics
>>> b = safe_get(MyMetrics) # This will return the same instance
>>> a is b
True
Args:
cls: Metrics class to either create or get a cached version of
Returns:
An instance of given class
"""
global _metrics_singularities
if cls not in _metrics_singularities:
_metrics_singularities[cls] = cls()
return _metrics_singularities[cls]
class BaseMetrics:
"""
Base collection of extractor metrics. The class also spawns a collector thread on init that regularly fetches
process information and update the ``process_*`` gauges.
To create a set of metrics for an extractor, create a subclass of this class.
**Note that only one instance of this class (or any subclass) can exist simultaneously**
The collection includes the following metrics:
* startup: Startup time (unix epoch)
* finish: Finish time (unix epoch)
* process_num_threads Number of active threads. Set automatically.
* process_memory_bytes Memory usage of extractor. Set automatically.
* process_cpu_percent CPU usage of extractor. Set automatically.
Args:
extractor_name: Name of extractor, used to prefix metric names
process_scrape_interval: Interval (in seconds) between each fetch of data for the ``process_*`` gauges
"""
def __init__(self, extractor_name: str, extractor_version: str, process_scrape_interval: float = 15):
extractor_name = extractor_name.strip().replace(" ", "_")
self.startup = Gauge(f"{extractor_name}_start_time", "Timestamp (seconds) of when the extractor last started")
self.finish = Gauge(
f"{extractor_name}_finish_time", "Timestamp (seconds) of then the extractor last finished cleanly"
)
self._process = psutil.Process(os.getpid())
self.process_num_threads = Gauge(f"{extractor_name}_num_threads", "Number of threads")
self.process_memory_bytes = Gauge(f"{extractor_name}_memory_bytes", "Memory usage in bytes")
self.process_cpu_percent = Gauge(f"{extractor_name}_cpu_percent", "CPU usage percent")
self.info = Info(f"{extractor_name}_info", "Information about running extractor")
self.info.info({"extractor_version": extractor_version, "extractor_type": extractor_name})
self.process_scrape_interval = process_scrape_interval
self._start_proc_collector()
self.startup.set_to_current_time()
def _proc_collect(self) -> None:
"""
Collect values for process metrics
"""
while True:
self.process_num_threads.set(self._process.num_threads())
self.process_memory_bytes.set(self._process.memory_info().rss)
self.process_cpu_percent.set(self._process.cpu_percent())
sleep(self.process_scrape_interval)
def _start_proc_collector(self) -> None:
"""
Start a thread that collects process metrics at a regular interval
"""
thread = threading.Thread(target=self._proc_collect, name="ProcessMetricsCollector", daemon=True)
thread.start()
class AbstractMetricsPusher(ABC):
"""
Base class for metric pushers. Metric pushers spawns a thread that routinely pushes metrics to a configured
destination.
Contains all the logic for starting and running threads.
Args:
push_interval: Seconds between each upload call
thread_name: Name of thread to start. If omitted, a standard name such as Thread-4 will be generated.
"""
def __init__(self, push_interval: Optional[int] = None, thread_name: Optional[str] = None):
self.push_interval = push_interval
self.thread_name = thread_name
self.thread: Optional[threading.Thread] = None
self.thread_name = thread_name
self.stopping = threading.Event()
self.logger = logging.getLogger(__name__)
@abstractmethod
def _push_to_server(self) -> None:
"""
Push metrics to a remote server, to be overrided in subclasses.
"""
pass
def _run(self) -> None:
"""
Run push loop.
"""
while not self.stopping.is_set():
self._push_to_server()
self.stopping.wait(self.push_interval)
def start(self) -> None:
"""
Starts a thread that pushes the default registry to the configured gateway at certain intervals.
"""
self.stopping.clear()
self.thread = threading.Thread(target=self._run, daemon=True, name=self.thread_name)
self.thread.start()
def stop(self) -> None:
"""
Stop the push loop.
"""
# Make sure everything is pushed
self._push_to_server()
self.stopping.set()
def __enter__(self) -> "AbstractMetricsPusher":
"""
Wraps around start method, for use as context manager
Returns:
self
"""
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
"""
Wraps around stop method, for use as context manager
Args:
exc_type: Exception type
exc_val: Exception value
exc_tb: Traceback
"""
self.stop()
class PrometheusPusher(AbstractMetricsPusher):
"""
Pusher to a Prometheus push gateway.
Args:
job_name: Prometheus job name
username: Push gateway credentials
password: <PASSWORD>
url: URL (with portnum) of push gateway
push_interval: Seconds between each upload call
thread_name: Name of thread to start. If omitted, a standard name such as Thread-4 will be generated.
"""
def __init__(
self,
job_name: str,
url: str,
push_interval: int,
username: Optional[str] = None,
password: Optional[str] = None,
thread_name: Optional[str] = None,
):
super(PrometheusPusher, self).__init__(push_interval, thread_name)
self.username = username
self.job_name = job_name
self.password = password
self.url = url
def _auth_handler(self, url: str, method: str, timeout: int, headers: Dict[str, str], data: Any) -> Callable:
"""
Returns a authentication handler against the Prometheus Pushgateway to use in the pushadd_to_gateway method.
Args:
url: Push gateway
method: HTTP method
timeout: Request timeout (seconds)
headers: HTTP headers
data: Data to send
Returns:
prometheus_client.exposition.basic_auth_handler: A authentication handler based on this client.
"""
return basic_auth_handler(url, method, timeout, headers, data, self.username, self.password)
def _push_to_server(self) -> None:
"""
Push the default metrics registry to the configured Prometheus Pushgateway.
"""
if not self.url or not self.job_name:
return
try:
pushadd_to_gateway(self.url, job=self.job_name, registry=REGISTRY, handler=self._auth_handler)
except OSError as exp:
self.logger.warning("Failed to push metrics to %s: %s", self.url, str(exp))
except:
self.logger.exception("Failed to push metrics to %s", self.url)
self.logger.debug("Pushed metrics to %s", self.url)
def clear_gateway(self) -> None:
"""
Delete metrics stored at the gateway (reset gateway).
"""
delete_from_gateway(self.url, job=self.job_name, handler=self._auth_handler)
self.logger.debug("Deleted metrics from push gateway %s", self.url)
class CognitePusher(AbstractMetricsPusher):
"""
Pusher to CDF. Creates time series in CDF for all Gauges and Counters in the default Prometheus registry.
Optional contextualization with an Asset to make the time series observable in Asset Data Insight. The given asset
will be created at root level in the tenant if it doesn't already exist.
Args:
cdf_client: The CDF tenant to upload time series to
external_id_prefix: Unique external ID prefix for this pusher.
push_interval: Seconds between each upload call
asset: Optional contextualization.
thread_name: Name of thread to start. If omitted, a standard name such as Thread-4 will be generated.
"""
def __init__(
self,
cdf_client: CogniteClient,
external_id_prefix: str,
push_interval: int,
asset: Optional[Asset] = None,
thread_name: Optional[str] = None,
):
super(CognitePusher, self).__init__(push_interval, thread_name)
self.cdf_client = cdf_client
self.asset = asset
self.external_id_prefix = external_id_prefix
self._init_cdf()
self._cdf_project = cdf_client.login.status().project
def _init_cdf(self) -> None:
"""
Initialize the CDF tenant with the necessary time series and asset.
"""
time_series: List[TimeSeries] = []
if self.asset is not None:
# Ensure that asset exist, and retrieve internal ID
try:
asset = self.cdf_client.assets.create(self.asset)
except CogniteDuplicatedError:
asset = self.cdf_client.assets.retrieve(external_id=self.asset.external_id)
asset_id = asset.id if asset is not None else None
else:
asset_id = None
for metric in REGISTRY.collect():
if type(metric) == Metric and metric.type in ["gauge", "counter"]:
external_id = self.external_id_prefix + metric.name
time_series.append(
TimeSeries(
external_id=external_id,
name=metric.name,
legacy_name=external_id,
description=metric.documentation,
asset_id=asset_id,
)
)
ensure_time_series(self.cdf_client, time_series)
def _push_to_server(self) -> None:
"""
Create datapoints an push them to their respective time series
"""
timestamp = int(arrow.get().float_timestamp * 1000)
datapoints: List[Dict[str, Union[str, List[Tuple[float, float]]]]] = []
for metric in REGISTRY.collect():
if type(metric) == Metric and metric.type in ["gauge", "counter"]:
if len(metric.samples) == 0:
continue
external_id = self.external_id_prefix + metric.name
datapoints.append({"externalId": external_id, "datapoints": [(timestamp, metric.samples[0].value)]})
self.cdf_client.datapoints.insert_multiple(datapoints)
self.logger.debug("Pushed metrics to CDF tenant '%s'", self._cdf_project)
| 2.140625 | 2 |
examples/dectree.py | tresoldi/multitiers | 0 | 12773807 | <gh_stars>0
from pathlib import Path
import multitiers
# Read data
source = Path(__file__).parent.parent / "resources" / "germanic.tsv"
data = multitiers.read_wordlist_data(source.as_posix(), comma=False)
# Build classifier
print("============ STUDY 1")
clf = multitiers.Classifier(data, models=["cv"], left=1, right=1)
# Study
X_tiers = {
"index": {"include": [1]}, # first position in word...
"segment_Proto-Germanic": {"include": ["s"]}, # when PG has /s/
"cv_Proto-Germanic_R1": {}, # any following class
}
y_tiers = {"segment_German": {"exclude": ["r"]}} # and G doesn't have /r/
clf.train(X_tiers, y_tiers)
clf.to_graphviz("docs/germanic")
#############
print("============ STUDY 2")
# Build classifier
clf2 = multitiers.Classifier(data, models=["sca"], left=1, right=1)
study = """
X_tier index INCLUDE 1
X_tier segment_Proto-Germanic INCLUDE s
X_tier sca_Proto-Germanic_L1
X_tier sca_Proto-Germanic_R1
y_tier segment_German EXCLUDE r
y_tier segment_English
"""
X_tiers2, y_tiers2 = multitiers.utils.parse_study(study)
clf2.train(X_tiers2, y_tiers2, max_depth=3)
clf2.to_graphviz("docs/germanic2")
###########
print("============ STUDY 3")
# Build classifier
clf3 = multitiers.Classifier(data, models=["cv"])
study3 = """
X_tier segment_German
X_tier segment_English
X_tier cv_Dutch INCLUDE V
y_tier segment_Dutch
"""
X_tiers3, y_tiers3 = multitiers.utils.parse_study(study3)
clf3.train(X_tiers3, y_tiers3, min_impurity_decrease=0.0333)
clf3.to_graphviz("docs/dutch_pred")
###########
print("============ STUDY 4")
# Build classifier
clf4 = multitiers.Classifier(data, models=["cv", "sca"], left=1, right=1)
study4 = """
X_tier segment_German
X_tier segment_English
X_tier sca_German
X_tier sca_German_L1
X_tier sca_German_R1
X_tier sca_English
X_tier sca_English_L1
X_tier sca_English_R1
X_tier cv_Dutch INCLUDE C
y_tier segment_Dutch
"""
X_tiers4, y_tiers4 = multitiers.utils.parse_study(study4)
clf4.train(X_tiers4, y_tiers4, max_depth=15)
clf4.show_pred(max_lines=10)
clf4.show_pred_prob(max_lines=10)
##############################
print("============ STUDY 5")
# Read data
source = Path(__file__).parent.parent / "resources" / "latin2spanish.tsv"
data = multitiers.read_wordlist_data(source.as_posix(), comma=False)
# Build classifier
clf = multitiers.Classifier(data, models=["sca"], left=1, right=1)
study = """
X_tier segment_Latin INCLUDE t
X_tier sca_Latin_L1
X_tier sca_Latin_R1
y_tier segment_Spanish
"""
X_tiers, y_tiers = multitiers.utils.parse_study(study)
clf.train(X_tiers, y_tiers)
clf.to_graphviz("docs/latin_t")
print(clf.feature_extraction("tree", num_feats=5))
print(clf.feature_extraction("lsvc"))
| 2.46875 | 2 |
setup.py | Parnassius/construct-typing | 16 | 12773808 | #!/usr/bin/env python
from setuptools import setup
version_string = "?.?.?"
exec(open("./construct_typed/version.py").read())
setup(
name="construct-typing",
version=version_string,
packages=["construct-stubs", "construct_typed"],
package_data={
"construct-stubs": ["*.pyi", "lib/*.pyi"],
"construct_typed": ["py.typed"],
},
license="MIT",
license_files=("LICENSE",),
description="Extension for the python package 'construct' that adds typing features",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
platforms=["POSIX", "Windows"],
url="https://github.com/timrid/construct-typing",
author="<NAME>",
python_requires=">=3.7",
install_requires=["construct==2.10.67"],
keywords=[
"construct",
"kaitai",
"declarative",
"data structure",
"struct",
"binary",
"symmetric",
"parser",
"builder",
"parsing",
"building",
"pack",
"unpack",
"packer",
"unpacker",
"bitstring",
"bytestring",
"annotation",
"type hint",
"typing",
"typed",
"bitstruct",
"PEP 561",
],
classifiers=[
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: MIT License",
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Build Tools",
"Topic :: Software Development :: Code Generators",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: Implementation :: CPython",
"Typing :: Typed",
],
)
| 1.757813 | 2 |
src/skim/modeling/skim_attention/__init__.py | recitalAI/skim-attention | 4 | 12773809 | <reponame>recitalAI/skim-attention<filename>src/skim/modeling/skim_attention/__init__.py<gh_stars>1-10
from .configuration_skim import (
SkimformerConfig,
BertWithSkimEmbedConfig,
SkimmingMaskConfig,
)
from .configuration_longskim import (
LongSkimformerConfig
)
from .modeling_skim import (
SkimformerForMaskedLM,
SkimformerForTokenClassification,
BertWithSkimEmbedForTokenClassification,
SkimmingMaskForTokenClassification,
)
from .modeling_longskim import (
LongSkimformerForMaskedLM,
LongSkimformerForTokenClassification,
) | 1.140625 | 1 |
tests/model_options/test_default_related_name.py | benjaoming/django | 2 | 12773810 | from django.test import TestCase
from .models.default_related_name import Author, Editor, Book
class DefaultRelatedNameTests(TestCase):
def setUp(self):
self.author = Author.objects.create(first_name="Dave", last_name="Loper")
self.editor = Editor.objects.create(name="Test Editions",
bestselling_author=self.author)
self.book = Book.objects.create(title="Test Book", editor=self.editor)
self.book.authors.add(self.author)
self.book.save()
def test_no_default_related_name(self):
try:
self.author.editor_set
except AttributeError:
self.fail("Author should have an editor_set relation.")
def test_default_related_name(self):
try:
self.author.books
except AttributeError:
self.fail("Author should have a books relation.")
def test_related_name_overrides_default_related_name(self):
try:
self.editor.edited_books
except AttributeError:
self.fail("Editor should have a edited_books relation.")
def test_inheritance(self):
try:
# Here model_options corresponds to the name of the application used
# in this test
self.book.model_options_bookstores
except AttributeError:
self.fail("Book should have a model_options_bookstores relation.")
def test_inheritance_with_overrided_default_related_name(self):
try:
self.book.editor_stores
except AttributeError:
self.fail("Book should have a editor_stores relation.")
| 2.6875 | 3 |
src/modularitydensity/metrics.py | ckmanalytix/modularity-density | 4 | 12773811 | # -*- coding: utf-8 -*-
# Copyright (c) CKM Analytix Corp. All rights reserved.
# Authors: <NAME> (<EMAIL>), <NAME> (<EMAIL>)
"""
Metrics for determining quality of community structure
"""
import numpy as np
from scipy.sparse import identity
__all__ = ['modularity_r', 'modularity_density', 'mula_modularity_density']
def cluster_total_weight(adj_r, c, cluster_num, dict_bool):
"""Determines the 2*total weight of a community.
Parameters
----------
adj_r : SciPy sparse matrix (csr or csc)
The N x N rescaled Adjacency matrix constructed from N x N adjacency
matrix of the graph and scale 'r'.
c : Integer array
Array of community labels for the nodes in the graph as ordered by the
adjacency matrix.
cluster_num : Integer
Label of the community of interest.
dict_bool : dictionary
Tracks the nodes in each community, with cluster labels as dictionary
keys, and the corresponding boolean arrays (c == label) as values
Returns
-------
float
Twice the total weight of all nodes in the rescaled topology of
cluster 'cluster_num'.
"""
bool_r = dict_bool[cluster_num]
zero = np.zeros(adj_r.shape[0], dtype=int)
zero[bool_r] = 1
return (adj_r[bool_r].dot(zero)).sum()
def cluster_total_volume(adj, c, cluster_num, dict_bool):
"""Determines the volume of a community.
Parameters
----------
adj : SciPy sparse matrix (csr or csc)
The N x N rescaled Adjacency matrix constructed from
N x N adjacency matrix of the graph and scale r.
c : Integer array
Array of community labels for the nodes in the graph as ordered
by the adjacency matrix.
cluster_num : Integer
Label of the community of interest.
dict_bool : dictionary
Tracks the nodes in each community, with cluster labels as dictionary-
keys, and the corresponding boolean arrays (c == label) as values.
Returns
-------
float
Total volume of all nodes in the rescaled topology of
cluster 'cluster_num'.
"""
return adj[dict_bool[cluster_num]].sum()
def modularity_r(adj, c, cluster_labels, r=0, dict_bool=None):
r"""Determines the modularity (of rescaled topology) for a subset of
communities in the network.
Parameters
----------
adj : SciPy sparse matrix (csr or csc)
The N x N Adjacency matrix of the graph of interest.
c : Integer array
Array of community labels for the nodes in the graph as ordered by
the adjacency matrix.
cluster_labels : Integer array or list
Array/list of unique cluster labels for which modularity is calculated.
r : float
Resolution of the topology: smaller 'r' favors larger communities,
while larger 'r' favors smaller communities.
dict_bool : dictionary, optional
Tracks the nodes in each community, with cluster labels as dictionary-
keys, and the corresponding boolean arrays (c == label) as values.
Returns
-------
float
total modularity (of rescaled topology) for a set of communities given
by 'cluster_labels'.
Examples
--------
>>> G = nx.karate_club_graph()
>>> adj = nx.to_scipy_sparse_matrix(G)
>>> c = fine_tuned_clustering_q(G)
>>> c
array([2, 2, 2, 2, 4, 4, 4, 2, 3, 3, 4, 2, 2, 2, 3, 3, 4, 2, 3, 2, 3, 2,
3, 1, 1, 1, 3, 1, 1, 3, 3, 1, 3, 3])
>>> modularity_r(adj, c, np.unique(c), r=0)
0.4197896120973044
>>> modularity_r(adj, c, [1, 2], r=0)
0.21301775147928995
Notes
-----
Modularity in [1]_,[2]_ is given as
.. math::
Q = \sum_{c_i \in C}\left [ \frac{|E_{c_i}^{in}|}{|E|} -
\left (\frac{2|E_{c_i}^{in}| +
|E_{c_i}^{out}|}{2|E|} \right )^2 \right ],
where $C$ is the set of all communities. $c_i$ is a specific community in
$C$, $|E_{c_i}^{in}|$ is the total weight of edges between nodes within
community $c_i$, $|E_{c_i}{out}|$ is the total weight of edges from
nodes in community $c_i$ to the nodes outside $c_i$, and $|E|$ is the
total weight of edges in the network.
Modularity for rescaled topology (see [1]_) at scale $r$ is given as
.. math::
Q_r = \sum_{c_i \in C}\left [ \frac{2|E_{c_i}^{in}| +r|c_i|}{2|E| +
r|V|} - \left (\frac{2|E_{c_i}^{in}| + |E_{c_i}^{out}| +
r|c_i|}{2|E| + r|V|} \right )^2 \right ],
where $|c_i|$ is the number of nodes in a specific community. $|V|$ is the
total number of nodes in the entire network structure.
References
----------
.. [1] <NAME>, <NAME>, <NAME>. Community detection via maximization
of modularity and its variants. IEEE Transactions on Computational
Social Systems. 1(1), 46–65, 2014
.. [2] <NAME>, <NAME>. Finding and evaluating community structure in
community structure in networks. Phys. Rev. E. 69, 026113, 2004
"""
Identity = identity(n=(adj).shape[0])
# Rescaled adjancency matrix
adj = adj + (Identity*r)
if (dict_bool is None):
# Track the nodes in each community
dict_bool = {}
for label in np.unique(cluster_labels):
dict_bool[label] = (c == label)
one = np.ones(adj.shape[0], dtype=int)
# Twice the total weight of all nodes in the rescaled topology
total_weight = (adj.dot(one)).sum()
# Function to determine modularity of each community in the network
modularize = np.vectorize(lambda cluster_num:
(cluster_total_weight(adj, c,
cluster_num, dict_bool)/total_weight) -
((cluster_total_volume(adj, c, cluster_num,
dict_bool)/total_weight)**2))
# Total modularity (of rescaled topology) for a set of communities
# given by 'cluster_labels'
return np.sum(modularize(cluster_labels))
def split_penalty(adj, c, ci, conn_clusters, total_weight, dict_bool):
"""Determines total Split Penalty density for splitting edges between a
community and a set of communities.
Parameters
----------
adj : SciPy sparse matrix (csr or csc)
The N x N Adjacency matrix of the graph of interest.
c : Integer array
Current array of community labels for the nodes in the graph as ordered
by the adjacency matrix.
ci : Integer
Label of the community of interest.
conn_clusters : Integer array
Array of unique labels of communities that may be connected
to the community 'ci'.
total_weight : float
Twice the total weight of all nodes in the adjacency matrix.
dict_bool : dictionary
Tracks the nodes in each community, with cluster labels as dictionary-
keys, and the corresponding boolean arrays (c == label) as values.
Returns
-------
float
Total Split Penalty density for splitting edges between 'ci' and
a set of other communities in 'conn_clusters'.
"""
bool_ci = dict_bool[ci]
adj_ci = adj[bool_ci]
# Make sure the array of unique labels do not contain 'ci'
search_bool = (conn_clusters != ci)
# Determine total split penalty density of splitting edges between
# 'ci' and 'conn_clusters'
if(np.sum(search_bool) > 0):
penalty = sum_penalty(adj_ci, c, conn_clusters[search_bool],
dict_bool)/(np.count_nonzero(bool_ci)
* total_weight)
else:
penalty = 0
# Total Split Penalty density for splitting edges between 'ci' and
# a set of other communities in 'conn_clusters'
return penalty
def individual_penalty(adj_ci, c, cj, dict_bool):
"""Determines partial component of split penalty density for splitting edges
between two communities.
Parameters
----------
adj_ci : SciPy sparse matrix (csr or csc)
The subset of N X N adjacency matrix: adj[c == ci].
c : Integer array
Array of community labels for the nodes in the graph as ordered
by the adjacency matrix.
cj : Integer
Label of a community connected to the community 'ci'.
dict_bool : dictionary
Tracks the nodes in each community, with cluster labels as dictionary-
keys, and the corresponding boolean arrays (c == label) as values.
Returns
-------
float
Partial component of split penalty density for splitting edges
between 'ci' and 'cj'.
"""
bool_cj = dict_bool[cj]
zero = np.zeros(len(c), dtype=int)
zero[bool_cj] = 1
# Determine partial component of split penalty density for splitting edges
# between 'ci' and 'cj'
return ((((adj_ci.dot(zero)).sum())**2)/np.count_nonzero(bool_cj))
def sum_penalty(adj_ci, c, conn_clusters, dict_bool):
"""Determines partial component of total Split Penalty density for splitting
edges between a community and a set of communities.
Parameters
----------
adj_ci : SciPy sparse matrix (csr or csc)
The subset of N X N adjacency matrix: adj[c == ci].
c : Integer array
Array of community labels for the nodes in the graph as ordered
by the adjacency matrix.
conn_clusters : Integer array
Array of unique labels of communities that may be connected
to community 'ci'.
dict_bool : dictionary
Tracks the nodes in each community, with cluster labels as dictionary-
keys, and the corresponding boolean arrays (c == label) as values.
Returns
-------
float
Partial component of total Split Penalty density for splitting edges
between 'ci' and a set of other communities in 'conn_clusters'.
"""
# Function to determine partial component of total Split Penalty density
# for splitting edges between 'ci' and 'cj'
penalize = np.vectorize(lambda cj: individual_penalty(adj_ci, c,
cj, dict_bool))
# Partial component of total Split Penalty density for splitting edges
# between 'ci'and a set of other communities in 'conn_clusters'
return np.sum(penalize(conn_clusters))
def density_based_modularity(adj, c, ci, total_weight, dict_bool):
"""Determines partial component of modularity density of a community.
Parameters
----------
adj : SciPy sparse matrix (csr or csc)
The subset of N X N adjacency matrix: adj[c == ci].
c : Integer array
Array of community labels for the nodes in the graph as ordered
by the adjacency matrix.
ci : Integer
Label of community of interest.
total_weight : float
Twice the total weight of all nodes in the adjacency matrix.
dict_bool : dictionary
Tracks the nodes in each community, with cluster labels as dictionary-
keys, and the corresponding boolean arrays (c == label) as values.
Returns
-------
float
Partial component of modularity density of a community 'ci'.
"""
# Determine Internal community density of 'ci'
comm_density = community_density(adj, c, ci, dict_bool)
first_term = (cluster_total_weight(adj, c,
ci, dict_bool) * comm_density)/total_weight
second_term = ((cluster_total_volume(adj, c,
ci, dict_bool) * comm_density)/total_weight)**2
# Partial component of modularity density of 'ci'
return (first_term - second_term)
def community_density(adj, c, ci, dict_bool):
"""Determines internal community density of a community.
Parameters
----------
adj : SciPy sparse matrix (csr or csc)
The subset of N X N adjacency matrix: adj[c == ci].
c : Integer array
Array of community labels for the nodes in the graph as ordered
by the adjacency matrix.
ci : Integer
Label of community of interest.
dict_bool : dictionary
Tracks the nodes in each community, with cluster labels as dictionary-
keys, and the corresponding boolean arrays (c == label) as values.
Returns
-------
float
Determines internal community density of community 'ci'.
"""
bool_ci = dict_bool[ci]
zero = np.zeros(adj.shape[0], dtype=int)
zero[bool_ci] = 1
# Twice the weight of all edges in the cluster 'ci'
community_sum = (adj[bool_ci].dot(zero)).sum()
# Number of nodes in commmunity 'ci'
size = np.count_nonzero(bool_ci)
# Internal community density of 'ci'
if(size <= 1):
density = 0
else:
density = (community_sum)/(size*(size - 1))
# Internal community density of 'ci'
return density
def compute_modularity_density(adj, c, conn_clusters, cluster_labels,
total_weight, dict_bool):
"""Determines modularity density of a set of communities.
Parameters
----------
adj : SciPy sparse matrix (csr or csc)
The N x N Adjacency matrix of the graph of interest.
c : Integer array
Array of community labels for the nodes in the graph as ordered
by the adjacency matrix.
conn_clusters : Integer array
Array of unique labels of communities that may be connected to
communities in 'cluster_labels'.
cluster_labels : Integer array or list
Array/list of unique labels of communities of interest.
total_weight : float
Twice the total weight of all nodes in the adjacency matrix.
dict_bool : dictionary
Tracks the nodes in each community, with cluster labels as dictionary-
keys, and the corresponding boolean arrays (c == label) as values.
Returns
-------
float
Determines modularity density of a set of communities in
'cluster_labels' with a set of connected communities
in 'conn_clusters'.
"""
# Function to determine modularity density of 'ci' with connected
# communities in 'conn_clusters'
mod_density = np.vectorize(lambda ci: density_based_modularity(adj, c, ci,
total_weight, dict_bool) - split_penalty(adj, c,
ci, conn_clusters, total_weight, dict_bool))
# Modularity density of a set of communities in 'cluster_labels' with a
# set of connected communities in 'conn_clusters'
return np.sum(mod_density(cluster_labels))
def modularity_density(adj, c, cluster_labels,
dict_bool=None, conn_clusters=None):
r"""Determines modularity_density of a set of communities.
Parameters
----------
adj : SciPy sparse matrix (csr or csc)
The N x N Adjacency matrix of the graph of interest.
c : Integer array
Current array of community labels for the nodes in the graph as ordered
by the adjacency matrix.
cluster_labels : Integer array or list
Array/list of unique labels of communities of interest.
dict_bool : dictionary, optional
Tracks the nodes in each community, with cluster labels as dictionary-
keys, and the corresponding boolean arrays (c == label) as values.
conn_clusters : Integer array, optional
Array of unique labels of communities that may be connected to
communities in 'cluster_labels'. It is helpful to send this input when
computing modularity density for each community in order to reduce the
computational time
Returns
-------
float
Determines modularity_density of a set of communities
in 'cluster_labels'.
Examples
--------
>>> G = nx.karate_club_graph()
>>> adj = nx.to_scipy_sparse_matrix(G)
>>> c = fine_tuned_clustering_qds(G)
>>> c
array([4, 4, 4, 4, 2, 2, 2, 4, 3, 3, 2, 4, 4, 4, 3, 3, 2, 4, 3, 4, 3, 4,
3, 3, 1, 1, 3, 3, 3, 3, 3, 1, 3, 3])
>>> modularity_density(adj, c, np.unique(c))
0.23126500169457212
>>> modularity_density(adj, c, [1, 2])
0.06929093698324468
>>> modularity_density(adj, c, [1])
0.028788874942721095
>>> modularity_density(adj, c, [1], conn_clusters = np.array([3, 4]))
0.028788874942721095
Notes
-----
Modularity density in [1] is given as
.. math::
Q = \sum_{c_i \in C}\left [ \frac{|E_{c_i}^{in}|}{|E|}d_{c_i} -
\left (\frac{2|E_{c_i}^{in}| +
|E_{c_i}{out}|}{2|E|}d_{c_i} \right )^2 -
\sum_{c_j \in C, c_j \neq c_i}
\frac{|E_{c_i, c_j}|}{2|E|}d_{c_i,c_j} \right ],
d_{c_i} = \frac{2|E_{c_i}^{in}|}{|c_i|\left ( |c_i| - 1 \right )},
d_{c_i,c_j} = \frac{|E_{c_i, c_j}|}{|c_i||c_j|}.
where $C$ is the set of all communities. $c_i$ is a specific community in
$C$, $|E_{c_i}^{in}|$ is the total weight of edges between nodes within
community $c_i$, $|E_{c_i}{out}|$ is the total weight of edges from
nodes in community $c_i$ to the nodes outside $c_i$, and $|E|$ is the
total weight of edges in the network. $d_{c_i}$ is the internal community
density of community $c_i$, $d_{c_i, c_j}$ is the pair-wise density between
communities $c_i$ and $c_j$.
References
----------
.. [1] <NAME>, <NAME>, <NAME>. Community detection via maximization
of modularity and its variants. IEEE Transactions on Computational
Social Systems. 1(1), 46–65, 2014
"""
one = np.ones(adj.shape[0], dtype=int)
# Twice the total weight of all nodes in the adjacency matrix
total_weight = (adj.dot(one)).sum()
# Array of unique labels of communities in the network
unique_clusters = np.unique(c)
if (dict_bool is None):
# Track the nodes in each community
dict_bool = {}
for label in unique_clusters:
dict_bool[label] = (c == label)
if (conn_clusters is None):
# Array of labels of communities that may be connected to communities
# in 'cluster_labels'
conn_clusters = unique_clusters
# Compute modularity density of a set of communities in 'cluster_labels'
return compute_modularity_density(adj, c, conn_clusters, cluster_labels,
total_weight, dict_bool)
def dotdot(adj, vec1, vec2):
"""Computes the dot product of a matrix with two vectors
Parameters
----------
adj : Numpy Matrix or SciPy sparse matrix (csr or csc)
The N x N Adjacency matrix of the graph of interest.
vec1 : first Numpy array
vec2 : second Numpy array
Returns
-------
scalar (float, int, boolean, etc.)
Resulting scalar of dot product
"""
return ((((adj).dot(vec1)).dot(vec2)))
def norm_vector(vec):
"""Normalizes vector for modularity density calculation
Parameters
----------
vec : Numpy array to be normalized
Returns
-------
Numpy array
"""
mod = (np.count_nonzero(vec))**0.5
vec = vec/mod
return vec
def mula_modularity_density(adj, c, dict_vec=None):
r"""Determines modularity_density of a set of communities using a metric
that is free from bias and faster to compute.
Parameters
----------
adj : SciPy sparse matrix (csr or csc)
The N x N Adjacency matrix of the graph of interest.
c : Integer array
Current array of community labels for the nodes in the graph as ordered
by the adjacency matrix.
dict_vec : dictionary, optional
Tracks the nodes in each community, with cluster labels as dictionary-
keys, and the corresponding boolean arrays (c == label) as values.
Returns
-------
float
Determines modularity_density of a set of communities
in 'cluster_labels'.
Examples
--------
>>> G = nx.karate_club_graph()
>>> adj = nx.to_scipy_sparse_matrix(G)
>>> c = fine_tuned_clustering_qds(G)
>>> c
array([4, 4, 4, 4, 2, 2, 2, 4, 3, 3, 2, 4, 4, 4, 3, 3, 2, 4, 3, 4, 3, 4,
3, 3, 1, 1, 3, 3, 3, 3, 3, 1, 3, 3])
>>> new_modularity_density(adj, c, np.unique(c))
0.23126500169457212
>>> new_modularity_density(adj, c, [1, 2])
0.06929093698324468
>>> new_modularity_density(adj, c, [1])
0.028788874942721095
>>> new_modularity_density(adj, c, [1])
0.028788874942721095
Notes
-----
Modularity density in [1] is given as
.. math::
Q = \sum_{c \in C}\Bigg\{\frac{\sum_{i,j \in c}T_{ij}}{n_c} - \sum_{c^{\prime} \in C-c}\Bigg( \frac{\sum_{{i \in c,}{j \in c^{\prime}}}T_{ij}}{\sqrt{n_c n_{c^{\prime}}}}\Bigg)\Bigg\}
where:
- each cluster ${c \in C}$ is represented by an indicator vector ${\vec{v}_c = [v{_{c_i}}] \in {\R}^{|V|} : v{_{c_i}}= 1}$ if ${i \in c}$, else $0$
- \hat{n}_c = \frac{\vec{v}_c}{|\vec{v}_c|}
References
----------
.. [1] <NAME>, <NAME>. A new measure of modularity density for
community detection. arXiv:1908.08452 2019.
"""
cluster_labels = np.unique(c)
Nsum = 0
if (dict_vec is None):
collect_dict_vec = True
dict_vec = {}
for label in cluster_labels:
if collect_dict_vec:
vector = norm_vector((c == label)*1)
dict_vec[label] = vector
else:
dict_vect = dict_vec[label]*1 # verify vec is 0|1
Nsum += dict_vec[label]
# penalty
penalty = dotdot(adj, Nsum, Nsum)
modularize = np.vectorize(lambda label: dotdot(adj, dict_vec[label],
dict_vec[label]))
# Compute reduced modularity density of a set of communities
# in 'cluster_labels'
metric = 2*np.sum(modularize(cluster_labels)) - penalty
return(metric)
| 2.78125 | 3 |
EBOLA/Scripts/HMMSEIR_expanded.py | LorenzoRimella/Multinomial-Approximations-for-compartmental-models | 3 | 12773812 | import numpy as np
from scipy.stats import gamma as RVgamma
# the gamma distribution consider a varying shape parameter and a scale parameter equal to 1
class HMM_approxSEIR_expanded:
def __init__( self, N, beta, rho, gamma, q, eta_zero, q_r, t_star ):
self.N = N
self.beta = beta
self.rho = rho
self.gamma = gamma
self.q = q
self.eta_zero = eta_zero
self.q_r = q_r
self.t_star = t_star
def eta_computation(self, T):
eta = np.zeros((4, T))
eta[:, 0] = self.eta_zero
pC = 1 - np.exp(-self.rho)
pR = 1 - np.exp(-self.gamma)
for t in range(1, T):
Kappa_eta_prev = np.array([[ np.exp(-self.beta*eta[2,t-1]), 1 - np.exp(-self.beta*eta[2,t-1]), 0, 0 ], [ 0, 1 - pC, pC, 0 ], [ 0, 0, 1 - pR, pR ], [ 0, 0, 0, 1 ]])
eta[:, t] = eta[:, t-1] @ Kappa_eta_prev
return eta
def filtering(self, y):
T = np.size(y[0, 0, :])
pC = 1 - np.exp(-self.rho)
pR = 1 - np.exp(-self.gamma)
pitt = np.zeros([4,T])
pitt[:,0]= self.eta_zero
pitt_expanded = np.zeros((4, 4, T))
pitt_expanded[0, :, 0] = pitt[:,0]
pitt_prev_expanded = np.zeros((4, 4, T))
pitt_prev_expanded[0, :, 0] = pitt[:,0]
Kappa = np.zeros([4,4,T-1])
pitt_expanded_q = np.zeros([4,4,T])
for t in range(1, T):
beta_restr = self.beta*(t< self.t_star) + self.beta*(np.exp(-self.q_r*(t-self.t_star)))*(t>= self.t_star)
Kappa_eta_prev = np.array([[ np.exp(-beta_restr*pitt[2,t-1]), 1 - np.exp(-beta_restr*pitt[2,t-1]), 0, 0 ], [ 0, 1 - pC, pC, 0 ], [ 0, 0, 1 - pR, pR ], [ 0, 0, 0, 1 ]])
Kappa[:,:,t-1] = Kappa_eta_prev
pitt_prev_expanded[:,:, t] = Kappa_eta_prev*( np.sum(pitt_expanded[:, :, t-1], 0) ).reshape(4,1)
#rho_vec = pitt_prev_expanded[:,:, t]*(1-self.q)
#rho_vec = rho_vec/np.sum(rho_vec)
pitt_expanded_q[:,:,t] = pitt_prev_expanded[:,:, t]*(1-self.q)
pitt_expanded_q[:,:,t] = pitt_expanded_q[:,:,t]/np.sum(pitt_expanded_q[:,:,t])
pitt_expanded[:,:, t] = y[:,:, t]/self.N + ( 1 - (np.sum( y[:,:, t] ))/(self.N) )*pitt_expanded_q[:,:,t]
pitt[:,t] = np.sum( pitt_expanded[:,:, t], 0 )
return pitt, Kappa, pitt_expanded, pitt_prev_expanded
def smoothing(self,pitt_expanded, pitt):
T = np.size(pitt_expanded[1,1,:])
pist = np.zeros((4, T))
pist[:,T-1] = np.sum(pitt_expanded[:,:,T-1],0)
L = np.zeros((4,4))
pist_expanded = np.zeros((4, 4, T))
pist_expanded[:,:,T-1] = pitt_expanded[:,:,T-1]
for t in range(T-1,1,-1):
pist[:,t-1] = np.sum(pist_expanded[:,:,t],1)
L[np.outer(pitt[:,t-1],np.ones(4))!=0] = np.transpose(pitt_expanded[:,:,t-1])[np.outer(pitt[:,t-1],np.ones(4))!=0] / np.outer(pitt[:,t-1],np.ones(4))[np.outer(pitt[:,t-1],np.ones(4))!=0]
pist_expanded[:,:,t-1] = np.outer(np.ones(4),pist[:,t-1]) * np.transpose(L)
pist[:,0] = np.sum(pist_expanded[:,:,1],1)
pist_expanded[0, :, 0] = pist[:,0]
return pist, pist_expanded | 2.546875 | 3 |
fluent_pages/pagetypes/textfile/migrations/0002_add_translation_model.py | django-fluent/django-fluent-pages | 59 | 12773813 | from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("textfile", "0001_initial")]
operations = [
migrations.CreateModel(
name="TextFileTranslation",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"language_code",
models.CharField(max_length=15, verbose_name="Language", db_index=True),
),
("content", models.TextField(verbose_name="File contents")),
(
"master",
models.ForeignKey(
related_name="text_translations",
editable=False,
to="textfile.TextFile",
on_delete=models.CASCADE,
null=True,
),
),
],
options={
"managed": True,
"db_table": "textfile_textfile_translation",
"db_tablespace": "",
"default_permissions": (),
"verbose_name": "Plain text file Translation",
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name="textfiletranslation",
unique_together={("language_code", "master")},
),
]
| 1.984375 | 2 |
magnets/reduction/mod_reduction.py | meghnathomas/MAGNets | 2 | 12773814 | <gh_stars>1-10
import wntr
from magnets.reduction import build_g_matrix
from magnets.reduction import reinitialize
from magnets.utils.characteristics import *
from magnets.utils.call_on_functions import *
import warnings
def mod_reduction(wn, new_link_list, junc_dict, pipe_dict, unremovable_nodes, relations, nodes_to_be_removed, new_pipe_len, alpha, inp_file, max_nodal_degree, op_pt):
if max_nodal_degree == None:
max_nodal_degree = 100
if (max_nodal_degree < 1 or (isinstance(max_nodal_degree, int) == False and max_nodal_degree != None)):
warnings.warn('Invalid maximum nodal degree provided by user. \ Running simulation with maximum nodal degree = 100.')
max_nodal_degree = 100
count = 1
while (len(nodes_to_be_removed)!=0):
junc_names = wn.junction_name_list
num_junc = len(junc_names)
pipe_names = wn.pipe_name_list
num_pipes = len(pipe_names)
#identify node with minimum number of connections
num_connections = []
for i in range(len(nodes_to_be_removed)):
num_connections.append(len(junc_dict[nodes_to_be_removed[i]]['Connected nodes']))
removal_node = nodes_to_be_removed[num_connections.index(min(num_connections))]
if min(num_connections) > max_nodal_degree:
break
neighbor_nodes = junc_dict[removal_node]['Connected nodes']
if len(neighbor_nodes)==1:
nb_node = neighbor_nodes[0]
for j in range(num_pipes):
if (pipe_dict[pipe_names[j]]['Start node name'] == nb_node and pipe_dict[pipe_names[j]]['End node name'] == removal_node) or (pipe_dict[pipe_names[j]]['Start node name'] == removal_node and pipe_dict[pipe_names[j]]['End node name'] == nb_node):
nb_link = pipe_names[j]
#update diagonal g~
junc_dict[nb_node]['Diagonal g'] = junc_dict[nb_node]['Diagonal g'] - junc_dict[removal_node]['Diagonal g']
#update demand
junc_dict[nb_node]['Demand'] = junc_dict[nb_node]['Demand'] + junc_dict[removal_node]['Demand']
wn.get_node(nb_node).demand_timeseries_list[0].base_value = wn.get_node(nb_node).demand_timeseries_list[0].base_value + wn.get_node(removal_node).demand_timeseries_list[0].base_value
#delete link and node and update dictionaries
wn.remove_link(nb_link)
wn.remove_node(removal_node)
nodes_to_be_removed.remove(removal_node)
del relations[removal_node]
del junc_dict[removal_node]
del pipe_dict[nb_link]
junc_names = wn.junction_name_list
num_junc = len(junc_names)
for value in relations.values():
if removal_node in value:
value.remove(removal_node)
for k in range(num_junc):
if removal_node in junc_dict[junc_names[k]]['Connected nodes']:
junc_dict[junc_names[k]]['Connected nodes'].remove(removal_node)
else:
removal_links = []
pipe_names = wn.pipe_name_list
for d in range(len(neighbor_nodes)):
node_x = neighbor_nodes[d]
for e in range(len(pipe_names)):
if (pipe_dict[pipe_names[e]]['Start node name'] == node_x and pipe_dict[pipe_names[e]]['End node name'] == removal_node) or (pipe_dict[pipe_names[e]]['Start node name'] == removal_node and pipe_dict[pipe_names[e]]['End node name'] == node_x):
link_x = pipe_names[e]
#update demand
junc_dict[node_x]['Demand'] = junc_dict[node_x]['Demand'] + abs(pipe_dict[link_x]['Linear g']*junc_dict[removal_node]['Demand']/junc_dict[removal_node]['Diagonal g'])
wn.get_node(node_x).demand_timeseries_list[0].base_value = wn.get_node(node_x).demand_timeseries_list[0].base_value + abs(pipe_dict[link_x]['Linear g']*wn.get_node(removal_node).demand_timeseries_list[0].base_value/junc_dict[removal_node]['Diagonal g'])
#update diagonal g
junc_dict[node_x]['Diagonal g'] = junc_dict[node_x]['Diagonal g'] - abs((pipe_dict[link_x]['Linear g']**2)/junc_dict[removal_node]['Diagonal g'])
removal_links.append(link_x)
nb_pairs = [(neighbor_nodes[a],neighbor_nodes[b]) for a in range(len(neighbor_nodes)) for b in range(a+1, len(neighbor_nodes))]
for node1, node2 in nb_pairs:
link_1_2 = -1
pipe_names = wn.pipe_name_list
for l in range(len(pipe_names)):
if (pipe_dict[pipe_names[l]]['Start node name'] == node1 and pipe_dict[pipe_names[l]]['End node name'] == node2) or (pipe_dict[pipe_names[l]]['Start node name'] == node2 and pipe_dict[pipe_names[l]]['End node name'] == node1):
link_1_2 = pipe_names[l]
if (pipe_dict[pipe_names[l]]['Start node name'] == node1 and pipe_dict[pipe_names[l]]['End node name'] == removal_node) or (pipe_dict[pipe_names[l]]['Start node name'] == removal_node and pipe_dict[pipe_names[l]]['End node name'] == node1):
link_1_rem = pipe_names[l]
if (pipe_dict[pipe_names[l]]['Start node name'] == node2 and pipe_dict[pipe_names[l]]['End node name'] == removal_node) or (pipe_dict[pipe_names[l]]['Start node name'] == removal_node and pipe_dict[pipe_names[l]]['End node name'] == node2):
link_2_rem = pipe_names[l]
#update non-diagonal g
#Existing link between nodes 1 and 2
if link_1_2 != -1:
pipe_dict[link_1_2]['Linear g'] = pipe_dict[link_1_2]['Linear g'] - abs(pipe_dict[link_1_rem]['Linear g']*pipe_dict[link_2_rem]['Linear g']/junc_dict[removal_node]['Diagonal g'])
pipe_dict[link_1_2]['Diameter'] = calculate_new_D(lin_g_to_nonlin_g(pipe_dict[link_1_2]['Linear g'],junc_dict[node1]['Head at op pt'], junc_dict[node2]['Head at op pt']),pipe_dict[link_1_2]['Length'],alpha,100)
pipe = wn.get_link(link_1_2)
pipe.diameter = pipe_dict[link_1_2]['Diameter']
pipe.roughness = 100
#No previously existing link between nodes 1 and 2
else:
new_g = - abs(pipe_dict[link_1_rem]['Linear g']*pipe_dict[link_2_rem]['Linear g']/junc_dict[removal_node]['Diagonal g'])
wn.add_pipe('new-pipe-{}'.format(count), start_node_name = node1, end_node_name = node2, length = new_pipe_len, diameter = calculate_new_D(lin_g_to_nonlin_g(new_g,junc_dict[node1]['Head at op pt'], junc_dict[node2]['Head at op pt']), new_pipe_len, alpha, 100), roughness = 100, minor_loss = 0)
pipe_dict['new-pipe-{}'.format(count)] = {'Start node name': node1, 'End node name': node2, 'Length': new_pipe_len, 'Diameter':calculate_new_D(lin_g_to_nonlin_g(new_g,junc_dict[node1]['Head at op pt'], junc_dict[node2]['Head at op pt']), new_pipe_len, alpha, 100), 'Roughness':100, 'Linear g': new_g}
junc_dict[node1]['Connected nodes'].append(node2)
junc_dict[node2]['Connected nodes'].append(node1)
relations[node1].append(node2)
relations[node2].append(node1)
count = count + 1
for m in range(len(removal_links)):
wn.remove_link(removal_links[m])
del pipe_dict[removal_links[m]]
wn.remove_node(removal_node)
nodes_to_be_removed.remove(removal_node)
del relations[removal_node]
del junc_dict[removal_node]
junc_names = wn.junction_name_list
num_junc = len(junc_names)
for value in relations.values():
if removal_node in value:
value.remove(removal_node)
for k in range(num_junc):
if removal_node in junc_dict[junc_names[k]]['Connected nodes']:
junc_dict[junc_names[k]]['Connected nodes'].remove(removal_node)
if '/' in inp_file:
index = inp_file.rindex('/') + 1
new_name = inp_file[:index] + 'reduced ' + inp_file[index:] + '_' + str(op_pt)
wn.write_inpfile(new_name)
else:
wn.write_inpfile('reduced {} {}'.format(inp_file, op_pt))
return 1
| 2.296875 | 2 |
molsysmt/element/molecule/get_molecule_index_from_atom.py | uibcdf/MolModMTs | 0 | 12773815 | <gh_stars>0
from molsysmt._private.exceptions import *
from molsysmt._private.digestion import *
import numpy as np
def get_molecule_index_from_atom(molecular_system, indices='all', check=True):
if check:
digest_single_molecular_system(molecular_system)
indices = digest_indices(indices)
from molsysmt.basic import get
output = get(molecular_system, element='atom', indices=indices, component_index=True)
return output
| 2.421875 | 2 |
cookiedjango/twitter/models.py | shanenater/shanecookie | 0 | 12773816 | from django.db import models
from cookiedjango.core.models import TimeStampedModel
class TwitterPost(TimeStampedModel):
tag = models.CharField(max_length=140)
text = models.TextField()
| 2.3125 | 2 |
src/model/GCN.py | Zeyuzhao/DeepKidney | 1 | 12773817 | import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import BatchNorm1d
from torch_geometric.nn import GCNConv, GraphConv, GATConv, GatedGraphConv
from torch_geometric.utils import add_remaining_self_loops, add_self_loops
class BasicNet(torch.nn.Module):
def __init__(self):
super(BasicNet, self).__init__()
self.conv1 = GCNConv(1, 16)
self.conv2 = GCNConv(16, 1)
def forward(self, data):
weight = data["x"].view(-1, 1)
edge_index = data["edge_index"]
x = self.conv1(weight, edge_index)
x = F.relu(x)
x = self.conv2(x, edge_index)
x = torch.sigmoid(x)
return x
class MultiNet(torch.nn.Module):
def __init__(self):
super(MultiNet, self).__init__()
self.conv_in = GCNConv(1, 32)
self.conv_mid1 = GCNConv(32, 32)
self.conv_mid2 = GCNConv(32, 32)
self.conv_mid3 = GCNConv(32, 32)
self.conv_out = GCNConv(32, 1)
def forward(self, data):
weight = data["x"].view(-1, 1)
edge_index = data["edge_index"] # add_self_loops(data["edge_index"])
x = self.conv_in(weight, edge_index)
x = F.relu(x)
x = self.conv_mid1(x, edge_index)
x = F.relu(x)
x = self.conv_mid2(x, edge_index)
x = F.relu(x)
x = self.conv_mid3(x, edge_index)
x = F.relu(x)
x = self.conv_out(x, edge_index)
x = torch.sigmoid(x)
return x.reshape(-1)
class ConvNet(torch.nn.Module):
def __init__(self):
super(ConvNet, self).__init__()
self.conv_in = GraphConv(1, 32, aggr="add")
self.conv_max1 = GraphConv(32, 64, aggr="max")
self.conv_mean1 = GraphConv(64, 32, aggr="mean")
self.conv_max2 = GraphConv(32, 64, aggr="max")
self.conv_mean2 = GraphConv(64, 32, aggr="mean")
self.conv_max3 = GraphConv(32, 64, aggr="max")
self.conv_mean3 = GraphConv(64, 32, aggr="mean")
self.conv_out = GraphConv(32, 1, aggr="max")
def forward(self, data):
weight = data["x"].view(-1, 1)
edge_index = data["edge_index"]
x = self.conv_in(weight, edge_index)
x = F.relu(x)
x = self.conv_max1(x, edge_index)
x = F.relu(x)
x = self.conv_mean1(x, edge_index)
x = F.relu(x)
x = self.conv_max2(x, edge_index)
x = F.relu(x)
x = self.conv_mean2(x, edge_index)
x = F.relu(x)
x = self.conv_max3(x, edge_index)
x = F.relu(x)
x = self.conv_mean3(x, edge_index)
x = F.relu(x)
x = self.conv_out(x, edge_index)
x = torch.sigmoid(x)
return x
class ConvNet2(torch.nn.Module):
def __init__(self):
super(ConvNet2, self).__init__()
self.conv_in = GraphConv(1, 32, aggr="add")
self.conv_mean1 = GraphConv(32, 32, aggr="mean")
self.conv_mean2 = GraphConv(32, 32, aggr="add")
self.conv_mean3 = GraphConv(32, 32, aggr="mean")
self.conv_mean4 = GraphConv(32, 32, aggr="add")
self.conv_mean5 = GraphConv(32, 32, aggr="mean")
self.conv_max1 = GraphConv(32, 32, aggr="max")
self.conv_out = GraphConv(32, 1, aggr="mean")
def forward(self, data):
weight = data["x"].view(-1, 1)
edge_index = data["edge_index"]
x = self.conv_in(weight, edge_index)
x = F.relu(x)
x = self.conv_mean1(x, edge_index)
x = F.relu(x)
x = self.conv_mean2(x, edge_index)
x = F.relu(x)
x = self.conv_mean3(x, edge_index)
x = F.relu(x)
x = self.conv_mean4(x, edge_index)
x = F.relu(x)
x = self.conv_mean5(x, edge_index)
x = F.relu(x)
x = self.conv_max1(x, edge_index)
x = F.relu(x)
x = self.conv_out(x, edge_index)
x = torch.sigmoid(x).reshape(-1)
return x
class ConvNet3(torch.nn.Module):
def __init__(self):
super(ConvNet3, self).__init__()
self.conv_in = GraphConv(1, 32, aggr="add")
self.conv_mean1 = GraphConv(32, 32, aggr="mean")
self.conv_max1 = GraphConv(32, 32, aggr="max")
self.conv_mean2 = GraphConv(32, 32, aggr="add")
self.conv_max2 = GraphConv(32, 32, aggr="max")
self.conv_mean3 = GraphConv(32, 32, aggr="mean")
self.conv_max3 = GraphConv(32, 32, aggr="max")
self.conv_mean4 = GraphConv(32, 32, aggr="mean")
self.conv_max4 = GraphConv(32, 32, aggr="max")
self.conv_mean5 = GraphConv(32, 32, aggr="mean")
self.conv_max5 = GraphConv(32, 32, aggr="max")
self.conv_out = GraphConv(32, 1, aggr="max")
def forward(self, data):
weight = data["x"].view(-1, 1)
edge_index, _ = add_self_loops(data["edge_index"], num_nodes=len(data["x"]))
#edge_index = data["edge_index"]
x = self.conv_in(weight, edge_index)
x = F.relu(x)
x = self.conv_mean1(x, edge_index)
x = F.relu(x)
x = self.conv_max1(x, edge_index)
x = F.relu(x)
x = self.conv_mean2(x, edge_index)
x = F.relu(x)
x = self.conv_max2(x, edge_index)
x = F.relu(x)
x = self.conv_mean3(x, edge_index)
x = F.relu(x)
x = self.conv_max3(x, edge_index)
x = F.relu(x)
x = self.conv_mean4(x, edge_index)
x = F.relu(x)
x = self.conv_max4(x, edge_index)
x = F.relu(x)
x = self.conv_mean5(x, edge_index)
x = F.relu(x)
x = self.conv_max5(x, edge_index)
x = F.relu(x)
x = self.conv_out(x, edge_index)
x = torch.sigmoid(x).reshape(-1)
return x
| 2.71875 | 3 |
examples/gui/ReFlowClient.py | whitews/ReFlowRESTClient | 0 | 12773818 | <reponame>whitews/ReFlowRESTClient
import Tkinter
import ttk
import tkMessageBox
import tkFileDialog
import tkFont
from PIL import Image, ImageTk
import json
import re
import sys
import os
import calendar
from threading import Thread
from exceptions import TypeError
import reflowrestclient.utils as rest
import flowio
VERSION = '0.13'
if hasattr(sys, '_MEIPASS'):
# for PyInstaller 2.0
RESOURCE_DIR = sys._MEIPASS
else:
# for development
RESOURCE_DIR = '../../resources'
LOGO_PATH = os.path.join(RESOURCE_DIR, 'reflow_text.gif')
if sys.platform == 'win32':
ICON_PATH = os.path.join(RESOURCE_DIR, 'reflow2.ico')
elif sys.platform == 'darwin':
ICON_PATH = os.path.join(RESOURCE_DIR, 'reflow.icns')
elif sys.platform == 'linux2':
ICON_PATH = None # haven't figured out icons on linux yet : (
else:
sys.exit("Your operating system is not supported.")
BACKGROUND_COLOR = '#ededed'
INACTIVE_BACKGROUND_COLOR = '#e2e2e2'
INACTIVE_FOREGROUND_COLOR = '#767676'
BORDER_COLOR = '#bebebe'
HIGHLIGHT_COLOR = '#5489b9'
ROW_ALT_COLOR = '#f3f6fa'
SUCCESS_FOREGROUND_COLOR = '#00cc00'
ERROR_FOREGROUND_COLOR = '#ff0000'
WINDOW_WIDTH = 1200
WINDOW_HEIGHT = 776
PAD_SMALL = 2
PAD_MEDIUM = 4
PAD_LARGE = 8
PAD_EXTRA_LARGE = 14
LABEL_WIDTH = 16
# Headers for the upload queue tree view
QUEUE_HEADERS = [
'File',
'Project',
'Subject',
'Visit',
'Specimen',
'Pre-treatment',
'Storage',
'Stimulation',
'Site Panel',
'Cytometer',
'Acquisition Date',
'Status'
]
class ChosenFile(object):
def __init__(self, f, checkbox):
self.file_path = f.name
self.file_name = os.path.basename(f.name)
# test if file is an FCS file, raise TypeError if not
try:
flow_data = flowio.FlowData(f.name)
self.flow_metadata = flow_data.text
del flow_data
except:
raise TypeError("File %s is not an FCS file." % self.file_name)
self.checkbox = checkbox
self.status = 'Pending' # other values are 'Error' and 'Complete'
self.error_msg = ""
self.project = None
self.project_pk = None
self.subject = None
self.subject_pk = None
self.visit = None
self.visit_pk = None
self.specimen = None
self.specimen_pk = None
self.stimulation = None
self.stimulation_pk = None
self.pretreatment = None
self.storage = None
self.site_panel = None
self.site_panel_pk = None
self.cytometer = None
self.cytometer_pk = None
self.acq_date = None
del f
def reinitialize(self):
self.status = 'Pending' # other values are 'Error' and 'Complete'
self.error_msg = None
self.project = None
self.project_pk = None
self.subject = None
self.subject_pk = None
self.visit = None
self.visit_pk = None
self.specimen = None
self.specimen_pk = None
self.stimulation = None
self.stimulation_pk = None
self.site_panel = None
self.site_panel_pk = None
# re-activate the checkbox
self.checkbox.config(state=Tkinter.ACTIVE)
self.checkbox.mark_unchecked()
def mark_as_not_matching(self):
self.checkbox.config(background='#FFAAAA')
def mark_as_matching(self):
self.checkbox.config(background='white')
class MyCheckbutton(Tkinter.Checkbutton):
def __init__(self, *args, **kwargs):
# We need to save the full path to populate the tree item later
# Pop the value b/c the parent init is not expecting the kwarg
self.file_path = kwargs.pop('file_path')
# we create checkboxes dynamically and need to control the value
# so we need to access the widget's value using our own attribute
self.var = kwargs.get('variable', Tkinter.IntVar())
kwargs['variable'] = self.var
Tkinter.Checkbutton.__init__(self, *args, **kwargs)
def is_checked(self):
return self.var.get()
def mark_checked(self):
self.var.set(1)
def mark_unchecked(self):
self.var.set(0)
def get_calendar(locale, fwday):
# instantiate proper calendar class
if locale is None:
return calendar.TextCalendar(fwday)
else:
return calendar.LocaleTextCalendar(fwday, locale)
class Calendar(ttk.Frame):
"""
Simple calendar using ttk Treeview together with calendar and datetime
classes.
Graciously borrowed from:
http://svn.python.org/projects/sandbox/trunk/ttk-gsoc/samples/ttkcalendar.py
"""
datetime = calendar.datetime.datetime
timedelta = calendar.datetime.timedelta
def __init__(self, master=None, variable=None, **kw):
"""
WIDGET-SPECIFIC OPTIONS
locale, firstweekday, year, month, selectbackground,
selectforeground
"""
# remove custom options from kw before initializating ttk.Frame
fwday = kw.pop('firstweekday', calendar.MONDAY)
year = kw.pop('year', self.datetime.now().year)
month = kw.pop('month', self.datetime.now().month)
locale = kw.pop('locale', None)
sel_bg = kw.pop('selectbackground', '#ecffc4')
sel_fg = kw.pop('selectforeground', '#05640e')
self._date = self.datetime(year, month, 1)
self._selection = None # no date selected
ttk.Frame.__init__(self, master, **kw)
self._variable = variable
self._cal = get_calendar(locale, fwday)
self.__setup_styles() # creates custom styles
self.__place_widgets() # pack/grid used widgets
self.__config_calendar() # adjust calendar columns and setup tags
# configure a canvas, and proper bindings, for selecting dates
self._font = tkFont.Font()
self._canvas = Tkinter.Canvas(
self._calendar,
background=sel_bg,
borderwidth=0,
highlightthickness=1,
highlightbackground="black")
self._canvas.text = self._canvas.create_text(
0,
0,
fill=sel_fg,
anchor='w')
self.__setup_selection()
# store items ids, used for insertion later
self._items = [self._calendar.insert(
'', 'end', values='') for _ in range(6)]
# insert dates in the currently empty calendar
self._build_calendar()
# set the minimal size for the widget
self._calendar.bind('<Map>', self.__minsize)
def __setitem__(self, item, value):
if item in ('year', 'month'):
raise AttributeError("attribute '%s' is not writeable" % item)
elif item == 'selectbackground':
self._canvas['background'] = value
elif item == 'selectforeground':
self._canvas.itemconfigure(self._canvas.text, item=value)
else:
ttk.Frame.__setitem__(self, item, value)
def __getitem__(self, item):
if item in ('year', 'month'):
return getattr(self._date, item)
elif item == 'selectbackground':
return self._canvas['background']
elif item == 'selectforeground':
return self._canvas.itemcget(self._canvas.text, 'fill')
else:
r = ttk.tclobjs_to_py({item: ttk.Frame.__getitem__(self, item)})
return r[item]
def __setup_styles(self):
# custom ttk styles
style = ttk.Style(self.master)
arrow_layout = lambda direction: (
[
(
'Button.focus',
{
'children': [('Button.%sarrow' % direction, None)]
}
)
]
)
style.layout('L.TButton', arrow_layout('left'))
style.layout('R.TButton', arrow_layout('right'))
def __place_widgets(self):
# header frame and its widgets
hframe = ttk.Frame(self)
lbtn = ttk.Button(hframe, style='L.TButton', command=self._prev_month)
rbtn = ttk.Button(hframe, style='R.TButton', command=self._next_month)
self._header = ttk.Label(hframe, width=15, anchor='center')
# the calendar
self._calendar = ttk.Treeview(show='', selectmode='none', height=7)
# pack the widgets
hframe.pack(in_=self, side='top', pady=4, anchor='center')
lbtn.grid(in_=hframe)
self._header.grid(in_=hframe, column=1, row=0, padx=12)
rbtn.grid(in_=hframe, column=2, row=0)
self._calendar.pack(in_=self, expand=1, fill='both', side='bottom')
def __config_calendar(self):
cols = self._cal.formatweekheader(3).split()
self._calendar['columns'] = cols
self._calendar.tag_configure('header', background='grey90')
self._calendar.insert('', 'end', values=cols, tag='header')
# adjust its columns width
font = tkFont.Font()
max_width = max(font.measure(col) for col in cols)
for col in cols:
self._calendar.column(
col,
width=max_width,
minwidth=max_width,
anchor='e')
def __setup_selection(self):
self._canvas.bind(
'<ButtonPress-1>',
lambda evt: self._canvas.place_forget())
self._calendar.bind(
'<Configure>',
lambda evt: self._canvas.place_forget())
self._calendar.bind('<ButtonPress-1>', self._pressed)
def __minsize(self, evt):
width, height = self._calendar.master.geometry().split('x')
height = height[:height.index('+')]
self._calendar.master.minsize(width, height)
def _build_calendar(self):
year, month = self._date.year, self._date.month
# update header text (Month, YEAR)
header = self._cal.formatmonthname(year, month, 0)
self._header['text'] = header.title()
# update calendar shown dates
cal = self._cal.monthdayscalendar(year, month)
for indx, item in enumerate(self._items):
week = cal[indx] if indx < len(cal) else []
fmt_week = [('%02d' % day) if day else '' for day in week]
self._calendar.item(item, values=fmt_week)
def _show_selection(self, text, bbox):
"""Configure canvas for a new selection."""
x, y, width, height = bbox
text_width = self._font.measure(text)
canvas = self._canvas
canvas.configure(width=width, height=height-1)
canvas.coords(canvas.text, width - text_width - 2, height / 2)
canvas.itemconfigure(canvas.text, text=text)
canvas.place(in_=self._calendar, x=x-1, y=y-1)
def clear_selection(self):
"""Clear current selection."""
self._selection = None
self._canvas.place_forget()
# Callbacks
def _pressed(self, evt):
"""Clicked somewhere in the calendar."""
x, y, widget = evt.x, evt.y, evt.widget
item = widget.identify_row(y)
column = widget.identify_column(x)
if not column or not item in self._items:
# clicked in the weekdays row or just outside the columns
return
item_values = widget.item(item)['values']
if not len(item_values): # row is empty for this month
return
text = item_values[int(column[1]) - 1]
if not text: # date is empty
return
bbox = widget.bbox(item, column)
if not bbox: # calendar not visible yet
return
# update and then show selection
text = '%02d' % text
self._selection = (text, item, column)
self._show_selection(text, bbox)
if self._variable:
self._variable.set(
"%d-%d-%d" % (
self._date.year,
self._date.month,
int(self._selection[0])))
def _prev_month(self):
"""Updated calendar to show the previous month."""
self._canvas.place_forget()
self._date = self._date - self.timedelta(days=1)
self._date = self.datetime(self._date.year, self._date.month, 1)
self._build_calendar() # reconstuct calendar
def _next_month(self):
"""Update calendar to show the next month."""
self._canvas.place_forget()
year, month = self._date.year, self._date.month
self._date = self._date + self.timedelta(
days=calendar.monthrange(year, month)[1] + 1)
self._date = self.datetime(self._date.year, self._date.month, 1)
self._build_calendar() # reconstruct calendar
# Properties
@property
def selection(self):
"""Return a datetime representing the current selected date."""
if not self._selection:
return None
year, month = self._date.year, self._date.month
return self.datetime(year, month, int(self._selection[0]))
class Application(Tkinter.Frame):
def __init__(self, master):
style = ttk.Style()
style.configure(
'Treeview',
borderwidth=1,
font=('TkDefaultFont', 12, 'normal'))
self.host = None
self.username = None
self.token = None
# Using the names (project, site, etc.) as the key, pk as the value
# for the choice dictionaries below.
# The names need to be unique (and they should be) and
# it's more convenient to lookup by key using the name.
self.project_dict = dict()
self.site_dict = dict()
self.subject_dict = dict()
self.visit_dict = dict()
self.site_panel_dict = dict()
self.cytometer_dict = dict()
self.specimen_dict = dict()
self.stimulation_dict = dict()
# dict of ChosenFile objects, key is file path, value is ChosenFile
self.file_dict = dict()
# start the metadata menus
self.project_menu = None
self.project_selection = Tkinter.StringVar()
self.project_selection.trace("w", self.update_metadata)
self.site_menu = None
self.site_selection = Tkinter.StringVar()
self.site_selection.trace("w", self.update_site_metadata)
self.subject_menu = None
self.subject_selection = Tkinter.StringVar()
self.subject_selection.trace(
"w",
self.update_add_to_queue_button_state)
self.visit_menu = None
self.visit_selection = Tkinter.StringVar()
self.visit_selection.trace("w", self.update_add_to_queue_button_state)
self.specimen_menu = None
self.specimen_selection = Tkinter.StringVar()
self.specimen_selection.trace(
"w",
self.update_add_to_queue_button_state)
self.pretreatment_menu = None
self.pretreatment_selection = Tkinter.StringVar()
self.pretreatment_selection.trace(
"w",
self.update_add_to_queue_button_state)
self.storage_menu = None
self.storage_selection = Tkinter.StringVar()
self.storage_selection.trace(
"w",
self.update_add_to_queue_button_state)
self.stimulation_menu = None
self.stimulation_selection = Tkinter.StringVar()
self.stimulation_selection.trace(
"w",
self.update_add_to_queue_button_state)
self.site_panel_menu = None
self.site_panel_selection = Tkinter.StringVar()
self.site_panel_selection.trace(
"w",
self.site_selection_changed)
self.cytometer_menu = None
self.cytometer_selection = Tkinter.StringVar()
self.cytometer_selection.trace(
"w",
self.update_add_to_queue_button_state)
self.acquisition_cal = None
self.acquisition_date_selection = Tkinter.StringVar()
self.acquisition_date_selection.trace(
"w",
self.update_add_to_queue_button_state)
# can't call super on old-style class, call parent init directly
Tkinter.Frame.__init__(self, master)
if sys.platform == 'linux2':
pass
else:
self.master.iconbitmap(ICON_PATH)
self.master.title('ReFlow Client - ' + VERSION)
self.master.minsize(width=WINDOW_WIDTH, height=WINDOW_HEIGHT)
self.master.config(bg=BACKGROUND_COLOR)
self.menu_bar = Tkinter.Menu(master)
self.master.config(menu=self.menu_bar)
self.upload_button = None
self.clear_selected_queue_button = None
self.queue_tree = None
self.upload_progress_bar = None
self.view_metadata_button = None
self.add_to_queue_button = None
self.file_list_canvas = None
self.s = ttk.Style()
self.s.map(
'Inactive.TButton',
foreground=[('disabled', INACTIVE_FOREGROUND_COLOR)])
self.pack()
self.login_frame = Tkinter.Frame(bg=BACKGROUND_COLOR)
self.logo_image = ImageTk.PhotoImage(Image.open(LOGO_PATH))
self.load_login_frame()
#self.load_main_frame()
def load_login_frame(self):
def login(*args):
host_text = host_entry.get()
self.username = user_entry.get()
password = password_entry.get()
# remove 'http://' or trailing slash from host text if present
matches = re.search('^(https://)?([^/]+)(/)*', host_text)
try:
self.host = matches.groups()[1]
self.token = rest.get_token(self.host, self.username, password)
except Exception, e:
print e
if not self.token:
tkMessageBox.showwarning(
'Login Failed',
'Are the hostname, username, and password are correct?')
return
self.login_frame.destroy()
self.master.unbind('<Return>')
self.load_main_frame()
self.master.bind('<Return>', login)
logo_label = Tkinter.Label(self.login_frame, image=self.logo_image)
logo_label.config(bg=BACKGROUND_COLOR)
logo_label.pack(side='top', pady=PAD_EXTRA_LARGE)
host_entry_frame = Tkinter.Frame(self.login_frame, bg=BACKGROUND_COLOR)
host_label = Tkinter.Label(
host_entry_frame,
text='Hostname',
bg=BACKGROUND_COLOR,
width=8,
anchor='e')
host_label.pack(side='left')
host_entry = Tkinter.Entry(
host_entry_frame,
highlightbackground=BACKGROUND_COLOR,
width=24)
host_entry.pack(padx=PAD_SMALL)
host_entry_frame.pack(pady=PAD_SMALL)
user_entry_frame = Tkinter.Frame(self.login_frame, bg=BACKGROUND_COLOR)
user_label = Tkinter.Label(
user_entry_frame,
text='Username',
bg=BACKGROUND_COLOR,
width=8,
anchor='e')
user_label.pack(side='left')
user_entry = Tkinter.Entry(
user_entry_frame,
highlightbackground=BACKGROUND_COLOR,
width=24)
user_entry.pack(padx=PAD_SMALL)
user_entry_frame.pack(pady=PAD_SMALL)
password_entry_frame = Tkinter.Frame(
self.login_frame,
bg=BACKGROUND_COLOR)
password_label = Tkinter.Label(
password_entry_frame,
text='Password',
bg=BACKGROUND_COLOR,
width=8,
anchor='e')
password_label.pack(side='left')
password_entry = Tkinter.Entry(
password_entry_frame,
show='*',
highlightbackground=BACKGROUND_COLOR,
width=24)
password_entry.pack(padx=PAD_SMALL)
password_entry_frame.pack(pady=PAD_SMALL)
login_button_frame = Tkinter.Frame(
self.login_frame,
bg=BACKGROUND_COLOR)
login_button_label = Tkinter.Label(
login_button_frame,
bg=BACKGROUND_COLOR)
login_button = ttk.Button(
login_button_label,
text='Login',
command=login)
login_button.pack()
login_button_label.pack(side='right')
login_button_frame.pack(fill='x')
self.login_frame.place(in_=self.master, anchor='c', relx=.5, rely=.5)
def load_main_frame(self):
main_frame = Tkinter.Frame(self.master, bg=BACKGROUND_COLOR)
main_frame.pack(
fill='both',
expand=True,
anchor='n',
padx=PAD_MEDIUM,
pady=PAD_MEDIUM)
top_frame = Tkinter.LabelFrame(
main_frame,
bg=BACKGROUND_COLOR)
top_frame.pack(
fill='both',
expand=True,
anchor='n',
padx=PAD_MEDIUM,
pady=PAD_MEDIUM)
top_frame.config(text="Choose & Categorize Files")
bottom_frame = Tkinter.Frame(main_frame, bg=BACKGROUND_COLOR)
bottom_frame.pack(
fill='both',
expand=True,
anchor='n',
padx=0,
pady=0)
# Metadata frame - for choosing project/subject/site etc.
metadata_frame = Tkinter.Frame(
top_frame,
bg=BACKGROUND_COLOR)
# overall project frame
project_frame = Tkinter.Frame(
metadata_frame,
bg=BACKGROUND_COLOR)
# project label frame
project_chooser_label_frame = Tkinter.Frame(
project_frame,
bg=BACKGROUND_COLOR)
project_chooser_label = Tkinter.Label(
project_chooser_label_frame,
text='Project:',
bg=BACKGROUND_COLOR,
width=LABEL_WIDTH,
anchor=Tkinter.E)
project_chooser_label.pack(side='left')
project_chooser_label_frame.pack(side='left', fill='x')
# project chooser listbox frame
project_chooser_frame = Tkinter.Frame(
project_frame,
bg=BACKGROUND_COLOR)
self.project_menu = Tkinter.OptionMenu(
project_chooser_frame,
self.project_selection,
'')
self.project_menu.config(
bg=BACKGROUND_COLOR,
width=36)
self.project_menu.pack(fill='x', expand=True)
project_chooser_frame.pack(fill='x', expand=True)
project_frame.pack(side='top', fill='x', expand=True)
# overall site frame
site_frame = Tkinter.Frame(metadata_frame, bg=BACKGROUND_COLOR)
# site label frame
site_chooser_label_frame = Tkinter.Frame(
site_frame,
bg=BACKGROUND_COLOR)
site_chooser_label = Tkinter.Label(
site_chooser_label_frame,
text='Site:',
bg=BACKGROUND_COLOR,
width=LABEL_WIDTH,
anchor=Tkinter.E)
site_chooser_label.pack(side='left')
site_chooser_label_frame.pack(side='left', fill='x')
# site chooser listbox frame
site_chooser_frame = Tkinter.Frame(
site_frame,
bg=BACKGROUND_COLOR)
self.site_menu = Tkinter.OptionMenu(
site_chooser_frame,
self.site_selection,
'')
self.site_menu.config(bg=BACKGROUND_COLOR)
self.site_menu.pack(fill='x', expand=True)
site_chooser_frame.pack(fill='x', expand=True)
site_frame.pack(side='top', fill='x', expand=True)
# overall subject frame
subject_frame = Tkinter.Frame(metadata_frame, bg=BACKGROUND_COLOR)
# subject label frame
subject_chooser_label_frame = Tkinter.Frame(
subject_frame,
bg=BACKGROUND_COLOR)
subject_chooser_label = Tkinter.Label(
subject_chooser_label_frame,
text='Subject:',
bg=BACKGROUND_COLOR,
width=LABEL_WIDTH,
anchor=Tkinter.E)
subject_chooser_label.pack(side='left')
subject_chooser_label_frame.pack(side='left', fill='x')
# subject chooser listbox frame
subject_chooser_frame = Tkinter.Frame(
subject_frame,
bg=BACKGROUND_COLOR)
self.subject_menu = Tkinter.OptionMenu(
subject_chooser_frame,
self.subject_selection,
'')
self.subject_menu.config(bg=BACKGROUND_COLOR)
self.subject_menu.pack(fill='x', expand=True)
subject_chooser_frame.pack(fill='x', expand=True)
subject_frame.pack(side='top', fill='x', expand=True)
# overall visit frame
visit_frame = Tkinter.Frame(metadata_frame, bg=BACKGROUND_COLOR)
# visit label frame
visit_chooser_label_frame = Tkinter.Frame(
visit_frame,
bg=BACKGROUND_COLOR)
visit_chooser_label = Tkinter.Label(
visit_chooser_label_frame,
text='Visit:',
bg=BACKGROUND_COLOR,
width=LABEL_WIDTH,
anchor=Tkinter.E)
visit_chooser_label.pack(side='left')
visit_chooser_label_frame.pack(side='left', fill='x')
# visit chooser listbox frame
visit_chooser_frame = Tkinter.Frame(visit_frame, bg=BACKGROUND_COLOR)
self.visit_menu = Tkinter.OptionMenu(
visit_chooser_frame,
self.visit_selection,
'')
self.visit_menu.config(bg=BACKGROUND_COLOR)
self.visit_menu.pack(fill='x', expand=True)
visit_chooser_frame.pack(fill='x', expand=True)
visit_frame.pack(side='top', fill='x', expand=True)
# overall specimen frame
specimen_frame = Tkinter.Frame(
metadata_frame,
bg=BACKGROUND_COLOR)
# specimen label frame
specimen_chooser_label_frame = Tkinter.Frame(
specimen_frame,
bg=BACKGROUND_COLOR)
specimen_chooser_label = Tkinter.Label(
specimen_chooser_label_frame,
text='Specimen:',
bg=BACKGROUND_COLOR,
width=LABEL_WIDTH,
anchor=Tkinter.E)
specimen_chooser_label.pack(side='left')
specimen_chooser_label_frame.pack(side='left', fill='x')
# specimen chooser listbox frame
specimen_chooser_frame = Tkinter.Frame(
specimen_frame,
bg=BACKGROUND_COLOR)
self.specimen_menu = Tkinter.OptionMenu(
specimen_chooser_frame,
self.specimen_selection,
'')
self.specimen_menu.config(bg=BACKGROUND_COLOR)
self.specimen_menu.pack(fill='x', expand=True)
specimen_chooser_frame.pack(fill='x', expand=True)
specimen_frame.pack(side='top', fill='x', expand=True)
# overall pretreatment frame
pretreatment_frame = Tkinter.Frame(
metadata_frame,
bg=BACKGROUND_COLOR)
# pretreatment label frame
pretreatment_chooser_label_frame = Tkinter.Frame(
pretreatment_frame,
bg=BACKGROUND_COLOR)
pretreatment_chooser_label = Tkinter.Label(
pretreatment_chooser_label_frame,
text='Pre-treatment:',
bg=BACKGROUND_COLOR,
width=LABEL_WIDTH,
anchor=Tkinter.E)
pretreatment_chooser_label.pack(side='left')
pretreatment_chooser_label_frame.pack(side='left', fill='x')
# pretreatment chooser listbox frame
pretreatment_chooser_frame = Tkinter.Frame(
pretreatment_frame,
bg=BACKGROUND_COLOR)
self.pretreatment_menu = Tkinter.OptionMenu(
pretreatment_chooser_frame,
self.pretreatment_selection,
'')
self.pretreatment_menu.config(bg=BACKGROUND_COLOR)
self.pretreatment_menu.pack(fill='x', expand=True)
pretreatment_chooser_frame.pack(fill='x', expand=True)
pretreatment_frame.pack(side='top', fill='x', expand=True)
# overall storage frame
storage_frame = Tkinter.Frame(
metadata_frame,
bg=BACKGROUND_COLOR)
# storage label frame
storage_chooser_label_frame = Tkinter.Frame(
storage_frame,
bg=BACKGROUND_COLOR)
storage_chooser_label = Tkinter.Label(
storage_chooser_label_frame,
text='Storage:',
bg=BACKGROUND_COLOR,
width=LABEL_WIDTH,
anchor=Tkinter.E)
storage_chooser_label.pack(side='left')
storage_chooser_label_frame.pack(side='left', fill='x')
# storage chooser listbox frame
storage_chooser_frame = Tkinter.Frame(
storage_frame,
bg=BACKGROUND_COLOR)
self.storage_menu = Tkinter.OptionMenu(
storage_chooser_frame,
self.storage_selection,
'')
self.storage_menu.config(bg=BACKGROUND_COLOR)
self.storage_menu.pack(fill='x', expand=True)
storage_chooser_frame.pack(fill='x', expand=True)
storage_frame.pack(side='top', fill='x', expand=True)
# overall stimulation frame
stimulation_frame = Tkinter.Frame(
metadata_frame,
bg=BACKGROUND_COLOR)
# stimulation label frame
stimulation_chooser_label_frame = Tkinter.Frame(
stimulation_frame,
bg=BACKGROUND_COLOR)
stimulation_chooser_label = Tkinter.Label(
stimulation_chooser_label_frame,
text='Stimulation:',
bg=BACKGROUND_COLOR,
width=LABEL_WIDTH,
anchor=Tkinter.E)
stimulation_chooser_label.pack(side='left')
stimulation_chooser_label_frame.pack(side='left', fill='x')
# stimulation chooser listbox frame
stimulation_chooser_frame = Tkinter.Frame(
stimulation_frame,
bg=BACKGROUND_COLOR)
self.stimulation_menu = Tkinter.OptionMenu(
stimulation_chooser_frame,
self.stimulation_selection,
'')
self.stimulation_menu.config(bg=BACKGROUND_COLOR)
self.stimulation_menu.pack(fill='x', expand=True)
stimulation_chooser_frame.pack(fill='x', expand=True)
stimulation_frame.pack(side='top', fill='x', expand=True)
# overall site_panel frame
site_panel_frame = Tkinter.Frame(
metadata_frame,
bg=BACKGROUND_COLOR)
# site_panel label frame
site_panel_chooser_label_frame = Tkinter.Frame(
site_panel_frame,
bg=BACKGROUND_COLOR)
site_panel_chooser_label = Tkinter.Label(
site_panel_chooser_label_frame,
text='Site Panel:',
bg=BACKGROUND_COLOR,
width=LABEL_WIDTH,
anchor=Tkinter.E)
site_panel_chooser_label.pack(side='left')
site_panel_chooser_label_frame.pack(side='left', fill='x')
# site_panel chooser listbox frame
site_panel_chooser_frame = Tkinter.Frame(
site_panel_frame,
bg=BACKGROUND_COLOR)
self.site_panel_menu = Tkinter.OptionMenu(
site_panel_chooser_frame,
self.site_panel_selection,
'')
self.site_panel_menu.config(bg=BACKGROUND_COLOR)
self.site_panel_menu.pack(fill='x', expand=True)
site_panel_chooser_frame.pack(fill='x', expand=True)
site_panel_frame.pack(side='top', fill='x', expand=True)
# overall cytometer frame
cytometer_frame = Tkinter.Frame(
metadata_frame,
bg=BACKGROUND_COLOR)
# cytometer label frame
cytometer_chooser_label_frame = Tkinter.Frame(
cytometer_frame,
bg=BACKGROUND_COLOR)
cytometer_chooser_label = Tkinter.Label(
cytometer_chooser_label_frame,
text='Cytometer:',
bg=BACKGROUND_COLOR,
width=LABEL_WIDTH,
anchor=Tkinter.E)
cytometer_chooser_label.pack(side='left')
cytometer_chooser_label_frame.pack(side='left', fill='x')
# cytometer chooser listbox frame
cytometer_chooser_frame = Tkinter.Frame(
cytometer_frame,
bg=BACKGROUND_COLOR)
self.cytometer_menu = Tkinter.OptionMenu(
cytometer_chooser_frame,
self.cytometer_selection,
'')
self.cytometer_menu.config(bg=BACKGROUND_COLOR)
self.cytometer_menu.pack(fill='x', expand=True)
cytometer_chooser_frame.pack(fill='x', expand=True)
cytometer_frame.pack(side='top', fill='x', expand=True)
# overall acquisition date frame
acquisition_date_frame = Tkinter.Frame(
metadata_frame,
bg=BACKGROUND_COLOR)
# acq date label frame
acquisition_date_chooser_label_frame = Tkinter.Frame(
acquisition_date_frame,
bg=BACKGROUND_COLOR)
acquisition_date_chooser_label = Tkinter.Label(
acquisition_date_chooser_label_frame,
text='Acquisition Date:',
bg=BACKGROUND_COLOR,
width=LABEL_WIDTH,
anchor=Tkinter.E)
acquisition_date_chooser_label.pack(
side='top',
fill=Tkinter.BOTH,
anchor=Tkinter.N)
acquisition_date_chooser_label_frame.pack(
side='left',
fill=Tkinter.BOTH,
anchor=Tkinter.N)
# acquisition_date chooser frame
acquisition_date_chooser_frame = Tkinter.Frame(
acquisition_date_frame,
bg=BACKGROUND_COLOR)
self.acquisition_cal = Calendar(
master=acquisition_date_chooser_frame,
variable=self.acquisition_date_selection,
firstweekday=calendar.SUNDAY,
)
self.acquisition_cal.pack(expand=1, fill='both')
acquisition_date_chooser_frame.pack(fill='x', expand=True)
acquisition_date_frame.pack(side='top', fill='x', expand=True)
self.load_user_projects()
self.load_specimens()
self.load_pretreatment()
self.load_storage()
metadata_frame.pack(
fill='x',
expand=False,
anchor='n',
side='left',
padx=PAD_MEDIUM,
pady=PAD_MEDIUM)
# start file chooser widgets
file_chooser_frame = Tkinter.Frame(
top_frame,
bg=BACKGROUND_COLOR)
file_chooser_button_frame = Tkinter.Frame(
file_chooser_frame,
bg=BACKGROUND_COLOR)
file_chooser_button = ttk.Button(
file_chooser_button_frame,
text='Choose Files...',
command=self.choose_files)
file_clear_selection_button = ttk.Button(
file_chooser_button_frame,
text='Remove Selected Files',
command=self.clear_selected_files)
file_clear_all_button = ttk.Button(
file_chooser_button_frame,
text='Select All',
command=self.select_all_files)
self.view_metadata_button = ttk.Button(
file_chooser_button_frame,
text='View Metadata',
command=self.view_metadata)
self.add_to_queue_button = ttk.Button(
file_chooser_button_frame,
text='Add to Queue',
state='disabled',
style='Inactive.TButton',
command=self.add_to_upload_queue)
file_chooser_button.pack(side='left')
file_clear_selection_button.pack(side='left')
file_clear_all_button.pack(side='left')
self.add_to_queue_button.pack(side='right')
self.view_metadata_button.pack(side='right')
file_chooser_button_frame.pack(
anchor='n',
fill='x',
expand=False,
)
file_list_frame = Tkinter.Frame(
file_chooser_frame,
bg=BACKGROUND_COLOR,
highlightcolor=HIGHLIGHT_COLOR,
highlightbackground=BORDER_COLOR,
highlightthickness=1)
file_scroll_bar = Tkinter.Scrollbar(
file_list_frame,
orient='vertical')
self.file_list_canvas = Tkinter.Canvas(
file_list_frame,
yscrollcommand=file_scroll_bar.set,
relief='flat',
borderwidth=0)
self.file_list_canvas.bind('<MouseWheel>', self._on_mousewheel)
file_scroll_bar.config(command=self.file_list_canvas.yview)
file_scroll_bar.pack(side='right', fill='y')
self.file_list_canvas.pack(
fill='both',
expand=True
)
file_list_frame.pack(
fill='both',
expand=True)
file_chooser_frame.pack(
fill='both',
expand=True,
anchor='n',
side='right',
padx=PAD_MEDIUM,
pady=PAD_MEDIUM)
# start upload queue stuff
upload_queue_frame = Tkinter.LabelFrame(
bottom_frame,
bg=BACKGROUND_COLOR,
text='Upload Queue',
padx=PAD_MEDIUM,
pady=PAD_MEDIUM)
upload_queue_button_frame = Tkinter.Frame(
upload_queue_frame,
bg=BACKGROUND_COLOR)
self.upload_button = ttk.Button(
upload_queue_button_frame,
text='Upload',
style='Inactive.TButton',
command=self.upload_files)
self.upload_button.pack(side='left', expand=False)
self.clear_selected_queue_button = ttk.Button(
upload_queue_button_frame,
text='Clear Selected',
style='Inactive.TButton',
command=self.clear_selected_queue)
self.clear_selected_queue_button.pack(side='left', expand=False)
display_error_button = ttk.Button(
upload_queue_button_frame,
text='View Errors',
command=self.display_error)
display_error_button.pack(side='left', expand=False)
upload_queue_button_frame.pack(
fill='x',
expand=False,
anchor='n',
padx=0,
pady=PAD_SMALL)
# using a Treeview to mimic a table, no table in Tkinter/ttk
self.queue_tree = ttk.Treeview(
upload_queue_frame,
columns=QUEUE_HEADERS,
show="headings")
queue_vertical_scroll_bar = ttk.Scrollbar(
upload_queue_frame,
orient="vertical",
command=self.queue_tree.yview)
self.queue_tree.config(
yscrollcommand=queue_vertical_scroll_bar.set)
self.queue_tree.pack(
side='left',
fill='both',
expand=True,
anchor='w')
queue_vertical_scroll_bar.pack(
side='right',
fill='y')
for header in QUEUE_HEADERS:
self.queue_tree.heading(header, text=header.title())
self.queue_tree.column(
header,
minwidth=0,
width=25,
stretch=Tkinter.TRUE)
# setup Treeview tag styles, it's the only way to change colors/fonts
# Note: it changes the entire row, individual cells cannot be
# formatted
self.queue_tree.tag_configure(
tagname='pending',
font=('TkDefaultFont', 11, 'bold'))
self.queue_tree.tag_configure(
tagname='error',
font=('TkDefaultFont', 11, 'bold'),
foreground='red')
self.queue_tree.tag_configure(
tagname='complete',
font=('TkDefaultFont', 11, 'italic'),
foreground='#555555')
upload_queue_frame.pack(
fill='both',
expand=True,
padx=PAD_MEDIUM,
pady=0)
# Progress bar
progress_frame = Tkinter.Frame(bottom_frame, bg=BACKGROUND_COLOR)
self.upload_progress_bar = ttk.Progressbar(progress_frame)
self.upload_progress_bar.pack(side='bottom', fill='x', expand=True)
progress_frame.pack(
fill='x',
expand=False,
anchor='s',
padx=PAD_MEDIUM,
pady=PAD_SMALL)
def _on_mousewheel(self, event):
self.file_list_canvas.yview_scroll(-event.delta, "units")
def clear_selected_files(self):
cb_to_delete = []
for k, cb in self.file_list_canvas.children.items():
if isinstance(cb, MyCheckbutton):
if cb.is_checked() and cb.cget('state') != Tkinter.DISABLED:
cb_to_delete.append(cb)
if len(cb_to_delete) == 0:
self.update_add_to_queue_button_state()
return
for cb in cb_to_delete:
file_path = cb.file_path
del(self.file_dict[file_path])
cb.destroy()
# and re-order items to not leave blank spaces
i = 0
cb_dict = self.file_list_canvas.children
for cb in sorted(cb_dict.values(), key=lambda x: x.cget('text')):
if isinstance(cb, MyCheckbutton):
self.file_list_canvas.create_window(
10,
(24 * i),
anchor='nw',
window=cb
)
i += 1
self.update_add_to_queue_button_state()
def select_all_files(self):
for k, v in self.file_list_canvas.children.items():
if isinstance(v, MyCheckbutton):
v.mark_checked()
self.update_add_to_queue_button_state()
def choose_files(self):
# Some Tkinter bug in Windows where askopenfiles throws an IOError
# Looks like this will be fixed in the next Python release 2.7.7 ???
# See http://bugs.python.org/issue5712
if sys.platform == 'win32':
selected_files = []
selected_file_paths = tkFileDialog.askopenfilenames()
f = Tkinter.Frame()
selected_file_paths = f.tk.splitlist(selected_file_paths)
for path in selected_file_paths:
f = open(path)
selected_files.append(f)
else:
selected_files = tkFileDialog.askopenfiles('r')
if len(selected_files) < 1:
return
# clear the canvas and the relevant file_dict keys
self.file_list_canvas.delete(Tkinter.ALL)
for k in self.file_list_canvas.children.keys():
file_path = self.file_list_canvas.children[k].file_path
del(self.file_list_canvas.children[k])
# if file is not in queue, delete it from the app
if not self.queue_tree.exists(file_path):
del(self.file_dict[file_path])
for i, f in enumerate(selected_files):
cb = MyCheckbutton(
self.file_list_canvas,
text=os.path.basename(f.name),
file_path=f.name
)
try:
chosen_file = ChosenFile(f, cb)
f.close()
del f
except TypeError:
del cb
continue
# bind to our canvas mouse function
# to keep scrolling working when the mouse is over a checkbox
cb.bind('<MouseWheel>', self._on_mousewheel)
self.file_list_canvas.create_window(
10,
(24 * i),
anchor='nw',
window=cb
)
self.file_dict[chosen_file.file_path] = chosen_file
del chosen_file
# update scroll region
self.file_list_canvas.config(
scrollregion=(0, 0, 1000, len(selected_files)*20))
# clear the acquisition date and subject
self.subject_selection.set('')
self.acquisition_date_selection.set('')
self.acquisition_cal.clear_selection()
del selected_files
self.mark_site_panel_matches()
self.update_add_to_queue_button_state()
def load_user_projects(self):
try:
response = rest.get_projects(self.host, self.token)
except Exception, e:
print e
return
if not 'data' in response:
return
self.project_menu['menu'].delete(0, 'end')
self.site_menu['menu'].delete(0, 'end')
self.subject_menu['menu'].delete(0, 'end')
self.visit_menu['menu'].delete(0, 'end')
self.stimulation_menu['menu'].delete(0, 'end')
self.site_panel_menu['menu'].delete(0, 'end')
self.cytometer_menu['menu'].delete(0, 'end')
for result in response['data']:
self.project_dict[result['project_name']] = result['id']
for project_name in sorted(self.project_dict.keys()):
self.project_menu['menu'].add_command(
label=project_name,
command=lambda value=project_name:
self.project_selection.set(value))
def load_project_sites(self, project_id):
self.site_menu['menu'].delete(0, 'end')
self.site_selection.set('')
self.site_dict.clear()
response = None
try:
response = rest.get_sites(
self.host,
self.token,
project_pk=project_id)
except Exception, e:
print e
if not 'data' in response:
return
for result in response['data']:
self.site_dict[result['site_name']] = result['id']
for site_name in sorted(self.site_dict.keys()):
self.site_menu['menu'].add_command(
label=site_name,
command=lambda value=site_name:
self.site_selection.set(value))
def load_project_subjects(self, project_id):
self.subject_menu['menu'].delete(0, 'end')
self.subject_selection.set('')
self.subject_dict.clear()
response = None
try:
response = rest.get_subjects(
self.host,
self.token,
project_pk=project_id)
except Exception, e:
print e
if not 'data' in response:
return
for result in response['data']:
self.subject_dict[result['subject_code']] = result['id']
for subject_code in sorted(self.subject_dict.keys()):
self.subject_menu['menu'].add_command(
label=subject_code,
command=lambda value=subject_code:
self.subject_selection.set(value))
def load_project_visits(self, project_id):
self.visit_menu['menu'].delete(0, 'end')
self.visit_selection.set('')
self.visit_dict.clear()
response = None
try:
response = rest.get_visit_types(
self.host,
self.token,
project_pk=project_id)
except Exception, e:
print e
if not 'data' in response:
return
for result in response['data']:
self.visit_dict[result['visit_type_name']] = result['id']
for visit_type_name in sorted(self.visit_dict.keys()):
self.visit_menu['menu'].add_command(
label=visit_type_name,
command=lambda value=visit_type_name:
self.visit_selection.set(value))
def load_specimens(self):
try:
response = rest.get_specimens(self.host, self.token)
except Exception, e:
print e
return
if not 'data' in response:
return
for result in response['data']:
self.specimen_dict[result['specimen_description']] = result['id']
for specimen in sorted(self.specimen_dict.keys()):
self.specimen_menu['menu'].add_command(
label=specimen,
command=lambda value=specimen:
self.specimen_selection.set(value))
def load_pretreatment(self):
pretreatment_list = ['In vitro', 'Ex vivo']
self.pretreatment_menu['menu'].delete(0, 'end')
for item in pretreatment_list:
self.pretreatment_menu['menu'].add_command(
label=item,
command=lambda value=item:
self.pretreatment_selection.set(value))
def load_storage(self):
storage_list = ['Fresh', 'Cryopreserved']
self.storage_menu['menu'].delete(0, 'end')
for item in storage_list:
self.storage_menu['menu'].add_command(
label=item,
command=lambda value=item:
self.storage_selection.set(value))
def load_stimulations(self, project_id):
self.stimulation_menu['menu'].delete(0, 'end')
self.stimulation_selection.set('')
self.stimulation_dict.clear()
try:
response = rest.get_stimulations(
self.host,
self.token,
project_pk=project_id)
except Exception, e:
print e
return
if not 'data' in response:
return
for result in response['data']:
self.stimulation_dict[result['stimulation_name']] = result['id']
for stimulation in sorted(self.stimulation_dict.keys()):
self.stimulation_menu['menu'].add_command(
label=stimulation,
command=lambda value=stimulation:
self.stimulation_selection.set(value))
def update_site_metadata(self, *args, **kwargs):
self.site_panel_menu['menu'].delete(0, 'end')
self.site_panel_selection.set('')
self.site_panel_dict.clear()
self.cytometer_menu['menu'].delete(0, 'end')
self.cytometer_selection.set('')
self.cytometer_dict.clear()
if not self.site_selection.get():
return
site_pk = self.site_dict[self.site_selection.get()]
rest_args = [self.host, self.token]
rest_kwargs = {'site_pk': site_pk}
try:
response = rest.get_site_panels(*rest_args, **rest_kwargs)
except Exception, e:
print e
return
if not 'data' in response:
return
for result in response['data']:
self.site_panel_dict[result['name']] = result['id']
for panel_name in sorted(self.site_panel_dict.keys()):
self.site_panel_menu['menu'].add_command(
label=panel_name,
command=lambda value=panel_name:
self.site_panel_selection.set(value))
try:
response = rest.get_cytometers(*rest_args, **rest_kwargs)
except Exception, e:
print e
return
if not 'data' in response:
return
for result in response['data']:
self.cytometer_dict[result['cytometer_name']] = result['id']
for panel_name in sorted(self.cytometer_dict.keys()):
self.cytometer_menu['menu'].add_command(
label=panel_name,
command=lambda value=panel_name:
self.cytometer_selection.set(value))
def update_metadata(*args):
self = args[0]
option_value = self.project_selection.get()
if option_value in self.project_dict:
self.load_project_sites(self.project_dict[option_value])
self.load_project_subjects(self.project_dict[option_value])
self.load_project_visits(self.project_dict[option_value])
self.load_stimulations(self.project_dict[option_value])
self.update_add_to_queue_button_state()
def update_add_to_queue_button_state(self, *args, **kwargs):
active = True
if not self.site_selection.get() or \
not self.subject_selection.get() or \
not self.visit_selection.get() or \
not self.specimen_selection.get() or \
not self.pretreatment_selection.get() or \
not self.storage_selection.get() or \
not self.stimulation_selection.get() or \
not self.site_panel_selection.get() or \
not self.cytometer_selection.get() or \
not self.acquisition_date_selection.get():
active = False
if len(self.file_list_canvas.children) == 0:
active = False
if active:
self.add_to_queue_button.config(state='active')
else:
self.add_to_queue_button.config(state='disabled')
def mark_site_panel_matches(self):
"""
Change text color of FCS files' in file chooser frame based
on whether the file matches the selected site panel
"""
site_panel_selection = self.site_panel_selection.get()
if not site_panel_selection:
return
site_panel_pk = self.site_panel_dict[site_panel_selection]
for fcs_file in self.file_dict:
param_dict = {}
metadata = self.file_dict[fcs_file].flow_metadata
for key in metadata:
matches = re.search('^P(\d+)([N,S])$', key, flags=re.IGNORECASE)
if matches:
channel_number = int(matches.group(1))
n_or_s = str.lower(matches.group(2))
if not param_dict.has_key(channel_number):
param_dict[channel_number] = {}
param_dict[channel_number][n_or_s] = metadata[key]
is_match = rest.is_site_panel_match(
self.host,
self.token,
site_panel_pk,
param_dict)
if is_match:
self.file_dict[fcs_file].mark_as_matching()
else:
self.file_dict[fcs_file].mark_as_not_matching()
def site_selection_changed(self, *args, **kwargs):
self.mark_site_panel_matches()
self.update_add_to_queue_button_state(*args, **kwargs)
def view_metadata(self):
message_win = Tkinter.Toplevel()
meta_frame = Tkinter.Frame(message_win)
meta_scroll_bar = Tkinter.Scrollbar(
meta_frame,
orient='vertical')
metadata_text = Tkinter.Text(
meta_frame,
bg=BACKGROUND_COLOR,
yscrollcommand=meta_scroll_bar.set)
metadata_text.tag_configure('alt-row', background=ROW_ALT_COLOR)
metadata_text.tag_configure('file-name', font=('TkDefaultFont', 18, 'bold'))
for k, v in self.file_list_canvas.children.items():
if isinstance(v, MyCheckbutton):
if v.is_checked() and v.cget('state') != Tkinter.DISABLED:
# get metadata for only the selected checkboxes
chosen_file = self.file_dict[v.file_path]
metadata_text.insert(Tkinter.END, chosen_file.file_name, ("file-name"))
metadata_text.insert(Tkinter.END, '\n')
metadata_dict = chosen_file.flow_metadata
for i, key in enumerate(sorted(metadata_dict)):
line_start = metadata_text.index(Tkinter.INSERT)
metadata_text.insert(Tkinter.END, key + ": ")
metadata_text.insert(
Tkinter.END,
unicode(metadata_dict[key], errors='replace'))
metadata_text.insert(Tkinter.END, '\n')
if i % 2:
metadata_text.tag_add("alt-row", line_start, "end")
else:
metadata_text.tag_remove("alt-row", line_start, "end")
meta_scroll_bar.config(command=metadata_text.yview)
meta_scroll_bar.pack(side='right', fill='y')
metadata_text.config(
state=Tkinter.DISABLED,
background="white",
highlightthickness=1,
highlightbackground=BORDER_COLOR)
meta_frame.pack(
fill=Tkinter.BOTH,
expand=Tkinter.TRUE,
padx=PAD_MEDIUM,
pady=PAD_MEDIUM)
message_win.title('Metadata')
message_win.minsize(width=480, height=320)
message_win.config(bg=BACKGROUND_COLOR)
metadata_text.pack(
anchor='nw',
fill=Tkinter.BOTH,
expand=Tkinter.TRUE,
padx=0,
pady=0
)
# make sure there's a way to destroy the dialog
message_button = ttk.Button(
message_win,
text='OK',
command=message_win.destroy)
message_button.pack(
anchor=Tkinter.E,
padx=PAD_MEDIUM,
pady=PAD_MEDIUM)
def add_to_upload_queue(self):
for k, v in self.file_list_canvas.children.items():
if isinstance(v, MyCheckbutton):
if v.is_checked() and v.cget('state') != Tkinter.DISABLED:
# populate the ChosenFile attributes
c_file = self.file_dict[v.file_path]
c_file.project = self.project_selection.get()
c_file.project_pk = self.project_dict[c_file.project]
c_file.subject = self.subject_selection.get()
c_file.subject_pk = self.subject_dict[c_file.subject]
c_file.visit = self.visit_selection.get()
c_file.visit_pk = self.visit_dict[c_file.visit]
c_file.specimen = self.specimen_selection.get()
c_file.specimen_pk = self.specimen_dict[c_file.specimen]
c_file.pretreatment = self.pretreatment_selection.get()
c_file.storage = self.storage_selection.get()
c_file.stimulation = self.stimulation_selection.get()
c_file.stimulation_pk = \
self.stimulation_dict[c_file.stimulation]
c_file.site_panel = self.site_panel_selection.get()
c_file.site_panel_pk = \
self.site_panel_dict[c_file.site_panel]
c_file.cytometer = self.cytometer_selection.get()
c_file.cytometer_pk = \
self.cytometer_dict[c_file.cytometer]
c_file.acq_date = self.acquisition_date_selection.get()
# Populate our tree item,
item = list()
item.append(c_file.file_name)
item.append(c_file.project)
item.append(c_file.subject)
item.append(c_file.visit)
item.append(c_file.specimen)
item.append(c_file.pretreatment)
item.append(c_file.storage)
item.append(c_file.stimulation)
item.append(c_file.site_panel)
item.append(c_file.cytometer)
item.append(c_file.acq_date)
item.append(c_file.status)
# check if the item is already in the queue
# and remove it if it is
if self.queue_tree.exists(c_file.file_path):
self.queue_tree.delete(c_file.file_path)
# add item to the tree, the id will be the file's
# full path so we can identify tree items with the same
# file name
self.queue_tree.insert(
'',
'end',
values=item,
tags='pending',
iid=c_file.file_path)
# finally, disable our checkboxes
v.config(state=Tkinter.DISABLED)
def clear_selected_queue(self):
# get_children returns a tuple of item IDs from the tree
tree_items = self.queue_tree.selection()
# the items are the tree rows
for item in tree_items:
# the items are the row IDs, which we set as the file's full path
try:
chosen_file = self.file_dict[item]
except Exception, e:
self.queue_tree.delete(item)
continue
# user may have cleared the checkbox by now,
# if so, we'll delete this file from our list,
# if not, re-initialize the checkbox
if chosen_file.checkbox in self.file_list_canvas.children.values():
chosen_file.reinitialize()
else:
del(self.file_dict[item])
self.queue_tree.delete(item)
def display_error(self):
# get_children returns a tuple of item IDs from the tree
tree_items = self.queue_tree.selection()
message_list = []
# the items are the tree rows
for item in tree_items:
# the items are the row IDs, which we set as the file's full path
try:
chosen_file = self.file_dict[item]
except Exception, e:
print e
break
if chosen_file.error_msg:
message_list.append(
"%s:\n\t%s" % (chosen_file.file_name, chosen_file.error_msg)
)
if len(tree_items) == 0:
message_list.append("No items selected")
elif len(message_list) == 0:
message_list.append("No errors")
message_win = Tkinter.Toplevel()
message_win.title('Errors')
message_win.minsize(width=480, height=320)
message_win.config(bg=BACKGROUND_COLOR)
message_label = Tkinter.Label(
message_win,
text="\n\n".join(message_list),
bg=BACKGROUND_COLOR,
justify=Tkinter.LEFT)
message_label.pack(
anchor='nw',
padx=PAD_MEDIUM,
pady=PAD_MEDIUM
)
# make sure there's a way to destroy the dialog
message_button = ttk.Button(
message_win,
text='OK',
command=message_win.destroy)
message_button.pack()
def upload_files(self):
t = Thread(target=self._upload_files)
t.start()
def _upload_files(self):
# get_children returns a tuple of item IDs from the tree
tree_items = self.queue_tree.get_children()
# use the item IDs as keys, file names as values
# we'll check the row's status value to upload only 'Pending' files
upload_list = []
# the items are the tree rows
for item in tree_items:
# the row's values are in the order we created them in
# the status is the last column
# only upload files with status=="Pending"
if self.queue_tree.item(item)['values'][-1] == 'Pending':
# the file path is the item
upload_list.append(item)
self.upload_progress_bar.config(maximum=len(upload_list))
self.add_to_queue_button.config(state='disabled')
self.upload_button.config(state='disabled')
self.clear_selected_queue_button.config(state='disabled')
for item in upload_list:
try:
chosen_file = self.file_dict[item]
except Exception, e:
print e
continue
if not chosen_file.project or \
not chosen_file.subject_pk or \
not chosen_file.site_panel_pk or \
not chosen_file.cytometer_pk or \
not chosen_file.acq_date or \
not chosen_file.specimen_pk or \
not chosen_file.pretreatment or \
not chosen_file.storage or \
not chosen_file.file_path or \
not chosen_file.stimulation_pk or \
not chosen_file.visit_pk:
break
rest_args = [
self.host,
self.token,
chosen_file.file_path
]
rest_kwargs = {
'subject_pk': str(chosen_file.subject_pk),
'site_panel_pk': str(chosen_file.site_panel_pk),
'cytometer_pk': str(chosen_file.cytometer_pk),
'visit_type_pk': str(chosen_file.visit_pk),
'specimen_pk': str(chosen_file.specimen_pk),
'pretreatment': str(chosen_file.pretreatment),
'storage': str(chosen_file.storage),
'stimulation_pk': str(chosen_file.stimulation_pk),
'acquisition_date': str(chosen_file.acq_date)
}
try:
response_dict = rest.post_sample(
*rest_args,
**rest_kwargs
)
except Exception, e:
print e
if response_dict['status'] == 201:
status = 'Complete'
elif response_dict['status'] == 400:
chosen_file.error_msg = "\n".join(
json.loads(response_dict['data']).values()[0])
status = 'Error'
else:
status = 'Error'
self.queue_tree.item(item, tags=status.lower())
# cannot set the values directly, must get them and reset
values = list(self.queue_tree.item(item, 'values'))
values[-1] = status
# re-populate the values
self.queue_tree.item(item, values=values)
# update our ChosenFile object
chosen_file.status = status
self.upload_progress_bar.step()
self.upload_progress_bar.update()
self.update_add_to_queue_button_state()
self.upload_button.config(state='active')
self.clear_selected_queue_button.config(state='active')
root = Tkinter.Tk()
app = Application(root)
app.mainloop() | 2.21875 | 2 |
test-integration/test_integration/fixtures/complex-output-project/predict.py | dashstander/cog | 0 | 12773819 | <reponame>dashstander/cog
from pydantic import BaseModel
from cog import BasePredictor
class Output(BaseModel):
hello: str
goodbye: str
class Predictor(BasePredictor):
def predict(self, name: str) -> Output:
return Output(
hello="hello " + name,
goodbye="goodbye " + name,
)
| 2.8125 | 3 |
api/kiln_share/__init__.py | jim8786453/kiln_share | 1 | 12773820 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import json
import os
from copy import deepcopy
from eve import Eve
from eve.auth import TokenAuth
from eve_swagger import swagger, add_documentation
from flask import current_app as app, abort, request
from kiln_share import settings
from kiln_share.storage import GridFSImageStorage
_MODULE_PATH = os.path.dirname(os.path.abspath(__file__))
_SETTINGS_PATH = os.path.join(_MODULE_PATH, 'settings.py')
class Auth(TokenAuth):
"""Checks the HTTP 'Authorization' header has been set with an Auth0
identifier. This will have been added by an upstream proxy.
"""
def authorized(self, allowed_roles, resource, method):
"""Validates the the current request is allowed to pass through.
"""
auth = None
if 'X-Kiln-Share-Id' in request.headers:
auth = request.headers.get('X-Kiln-Share-Id')
if auth:
self.set_request_auth_value(auth)
return auth and self.check_auth(auth, allowed_roles,
resource, method)
def check_auth(self, token, allowed_roles, resource, method):
users = app.data.driver.db['users']
user = users.find_one({'auth0_id': token})
if user:
return user
settings.logger.info('Creating user %s' % token)
user = users.insert({'auth0_id': token})
return user
def on_insert_conversations(items):
"""Combine the posted contents of participants with the current
authorised user so all users can see the conversation.
"""
data = json.loads(request.data)
if not isinstance(data, list):
data = [data]
for (d, i) in zip(data, items):
p = deepcopy(i['participants'])
d['participants'].append(p)
i['participants'] = d['participants']
def on_pre_GET_conversations(request, lookup):
"""Ensure the current user is included in the lookup.
"""
user = request.headers['X-Kiln-Share-Id']
lookup['participants'] = user
def on_insert_messages(items):
"""Check the user is a participant in the conversation.
"""
collection = app.data.driver.db['conversations']
conversation_ids = [item['conversation'] for item in items]
conversations = collection.find({'_id': item['conversation']})
# From value will be the same in all items.
from_ = items[0]['from']
for conversation in conversations:
if from_ not in conversation['participants']:
abort(403)
def on_fetched_item_conversations(response):
"""Add the messages into the conversation response.
"""
conversation_id = response['_id']
collection = app.data.driver.db['messages']
response['messages'] = [message for message in collection.find(
{'conversation': conversation_id})]
def create_app():
# Create the Eve app.
app = Eve(auth=Auth, settings=_SETTINGS_PATH,
media=GridFSImageStorage)
# Events
app.on_insert_conversations += on_insert_conversations
app.on_pre_GET_conversations += on_pre_GET_conversations
app.on_insert_messages += on_insert_messages
app.on_fetched_item_conversations += on_fetched_item_conversations
# Register Swagger extension.
app.register_blueprint(swagger, url_prefix='/auth')
app.config['SWAGGER_INFO'] = {
'title': 'Kiln Share',
'version': '0.0.1',
'description': 'Backend Api for kilnshare.co.uk',
'contact': {
'name': '<EMAIL>',
},
'schemes': ['https'],
}
return app
app = create_app()
| 2.265625 | 2 |
contentcuration/contentcuration/migrations/0120_auto_20210128_1646.py | d0sadata/studio | 60 | 12773821 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-01-28 16:46
from __future__ import unicode_literals
import contentcuration.models
from django.db import migrations, models
import django.db.models.functions.text
class Migration(migrations.Migration):
dependencies = [
('contentcuration', '0119_task_channel_id'),
]
operations = [
migrations.AddIndex(
model_name='user',
index=contentcuration.models.UniqueActiveUserIndex(django.db.models.functions.text.Lower('email'), condition=models.Q(('is_active', True)), name='contentcura_email_d4d492_idx'),
),
]
| 1.890625 | 2 |
examples/gto/00-input_mole.py | robert-anderson/pyscf | 2 | 12773822 | #!/usr/bin/env python
'''
Initialize a molecular system.
There are many methods to define/initialize a molecule. This example presents
three methods to create/initialize the molecular object. Mole object is a
Python object. You can initialize the Mole object using any methods supported
by Python.
See also
pyscf/examples/pbc/06-load_mol_from_chkfile.py to initialize mol from chkfile
pyscf/examples/pbc/00-input_cell.py for initialization of crystal
'''
from pyscf import gto
#
# First method is to assign the geometry, basis etc. to Mole object, then
# call build() function to initialize the molecule
#
mol = gto.Mole()
mol.atom = '''O 0 0 0; H 0 1 0; H 0 0 1'''
mol.basis = 'sto-3g'
mol.build()
#
# Shortcuts for initialization.
# Use the keyword arguments of mol.build() to initialize a molecule
#
mol = gto.Mole()
mol.build(
atom = '''O 0 0 0; H 0 1 0; H 0 0 1''',
basis = 'sto-3g',
)
#
# Use shortcut function gto.M to initialize a molecule
#
mol = gto.M(
atom = '''O 0 0 0; H 0 1 0; H 0 0 1''',
basis = 'sto-3g',
)
#
# Other parameters
# ================
#
mol.charge = 0
mol.spin = 0 # 2j == nelec_alpha - nelec_beta
mol.symmetry = 1 # Allow the program to apply point group symmetry if possible
# .unit can be 'bohr', 'ang' to indicate the coordinates unit of the input mol.atom
# If a number is assigned to unit, this number will be used as the length of
# 1 Bohr (in Angstrom). Eg you can double the bond length of a system by
# setting mol.unit = 0.529*.5.
mol.unit = 'Ang' # (New in version 1.1)
# Output
# ------
# To write output on disk, assign a filename to Mole.output
mol.output = 'path/to/my_out.txt'
# if Mole.output is not given, the default output would be stdout
# Print level
# -----------
# Mole.verbose is used to control print level. The print level can be 0 (quite,
# no output) to 9 (very noise). The default level is 1, which only outputs the
# error message, it works almost the same as level 0. Level 4 (info), or 5 (debug)
# are recommended value if some calculation detials are needed.
mol.verbose = 4
# level 4 hides some details such as CPU timings, the orbital energies during
# the SCF iterations.
# max memory to use
# -----------------
mol.max_memory = 1000 # in MB
# or use evnrionment PYSCF_MAX_MEMORY to control the memory usage
# (New in PySCF-1.3) eg
# export PYSCF_MAX_MEMORY=10000 # 10 GB
# python 00-input_mole.py
| 4.375 | 4 |
post.py | botarsiv/arsiv | 0 | 12773823 | import uuid
from aiogram.utils.callback_data import CallbackData
POSTS = {
str(uuid.uuid4()): {
'title': f'Grup Yönetimi\n',
'body': '\n@Combot \n@MissRose_bot \n@baymax_en_bot',
}
}
posts_cb = CallbackData('post','id', 'action')
| 2.15625 | 2 |
async_gsm_modem/quectel_ec25/constants.py | Sinusoidal36/async-gsm-modem | 4 | 12773824 |
STATUS_MAP = {
'RECEIVED_UNREAD': b'0',
'RECEIVED_READ': b'1',
'STORED_UNSENT': b'2',
'STORED_SENT': b'3',
'ALL': b'4'
}
STATUS_MAP_R = {
b'0': 'RECEIVED_UNREAD',
b'1': 'RECEIVED_READ',
b'2': 'STORED_UNSENT',
b'3': 'STORED_SENT',
b'4': 'ALL'
}
DELETE_FLAG = {
'ALL_READ': b'1',
'READ_AND_SENT': b'2',
'READ_AND_UNSENT': b'3',
'ALL': b'4'
}
ERROR_CODES = [
b'+CMS ERROR',
b'+CME ERROR'
]
# EC25 URC codes with the corresponding chunk count
UNSOLICITED_RESULT_CODES = [
(b'+CREG', 1),
(b'+CGREG', 1),
(b'+CTZV', 1),
(b'+CTZE', 1),
(b'+CMTI', 1),
(b'+CMT', 2),
(b'^HCMT', 2),
(b'+CBM', 2),
(b'+CDS', 1),
(b'+CDSI', 1),
(b'^HCDS', 2),
(b'+COLP', 1),
(b'+CLIP', 1),
(b'+CRING', 1),
(b'+CCWA', 1),
(b'+CSSI', 1),
(b'+CSSU', 1),
(b'+CUSD', 1),
(b'RDY', 1),
(b'+CFUN', 1),
(b'+CPIN', 1),
(b'+QIND', 1),
(b'POWERED DOWN', 1),
(b'+CGEV', 1),
(b'NO CARRIER', 1)
] | 1.445313 | 1 |
responsibly/tests/test_we.py | staeiou/responsibly | 48 | 12773825 | <gh_stars>10-100
"""Unit test module for responsibly.we"""
# pylint: disable=redefined-outer-name,unused-variable,expression-not-assigned,singleton-comparison,protected-access
import copy
from math import isclose
import numpy as np
import pytest
from responsibly.consts import RANDOM_STATE
from responsibly.tests.data import TOLGA_GENDER_ANALOGIES
from responsibly.tests.utils import assert_deep_almost_equal
from responsibly.we import (
GenderBiasWE, calc_all_weat, calc_weat_pleasant_unpleasant_attribute,
)
from responsibly.we.data import WEAT_DATA, load_w2v_small
from responsibly.we.utils import (
most_similar, normalize, project_params, project_reject_vector,
project_vector,
)
ATOL = 1e-6
N_RANDOM_NEUTRAL_WORDS_DEBIAS_TO_TEST = 1000
@pytest.fixture
def w2v_small():
return load_w2v_small()
@pytest.fixture
def gender_biased_w2v_small():
model = load_w2v_small()
return GenderBiasWE(model, only_lower=True, verbose=True)
def test_identify_direction_single(w2v_small):
gb = GenderBiasWE(w2v_small, only_lower=True, verbose=True,
identify_direction='single')
direction = normalize(w2v_small['she'] - w2v_small['he'])
np.testing.assert_allclose(gb.direction, direction)
# TODO: only check that there is no exception,
# should b change to a better test cas
def test_identify_direction_sum(w2v_small):
gb = GenderBiasWE(w2v_small, only_lower=True, verbose=True,
identify_direction='sum')
def test_assert_gensim_keyed_vectors():
with pytest.raises(TypeError):
GenderBiasWE(['one', 'two'], only_lower=True, verbose=True)
def test_project_params():
# pylint: disable=arguments-out-of-order
v = np.array([1, 2, 3])
u = np.array([-4, 5, -6])
(_,
projected_vector_v1,
rejected_vector_v1) = project_params(v, u)
projected_vector_v2, rejected_vector_v2 = project_reject_vector(u, v)
np.testing.assert_allclose(projected_vector_v1, projected_vector_v2)
np.testing.assert_allclose(rejected_vector_v1, rejected_vector_v2)
def test_words_embbeding_loading(gender_biased_w2v_small):
assert len(gender_biased_w2v_small.model.vocab) == 26423
def test_contains(gender_biased_w2v_small):
assert 'home' in gender_biased_w2v_small
assert 'HOME' not in gender_biased_w2v_small
def test_data_is_sorted_list(gender_biased_w2v_small):
# otherwise 'specific_full_with_definitional_equalize' is not sorted
assert gender_biased_w2v_small.only_lower
for key in gender_biased_w2v_small._data['word_group_keys']:
word_list = gender_biased_w2v_small._data[key]
assert isinstance(word_list, list)
assert all(word_list[i] <= word_list[i + 1]
for i in range(len(word_list) - 1))
def test_calc_direct_bias(gender_biased_w2v_small):
"""
Test calc_direct_bias method in GenderBiasWE.
Based on section 5.2
"""
# TODO: it seemse that in the article it was checked on
# all the professions names including gender specific ones
# (e.g. businesswomen)
assert isclose(gender_biased_w2v_small.calc_direct_bias(),
0.07, abs_tol=1e-2)
assert isclose(gender_biased_w2v_small.calc_direct_bias(gender_biased_w2v_small # pylint: disable=C0301
._data['profession_names']), # pylint: disable=C0301
0.08, abs_tol=1e-2)
# TODO: iterate over a dictionary
def test_calc_indirect_bias(gender_biased_w2v_small, all_zero=False):
"""
Test calc_direct_bias method in GenderBiasWE.
Based on figure 3 & section 3.5
"""
assert isclose(gender_biased_w2v_small.calc_indirect_bias('softball',
'pitcher'),
0 if all_zero else -0.01, abs_tol=1e-2)
assert isclose(gender_biased_w2v_small.calc_indirect_bias('softball',
'bookkeeper'),
0 if all_zero else 0.20, abs_tol=1e-2)
assert isclose(gender_biased_w2v_small.calc_indirect_bias('softball',
'receptionist'),
0 if all_zero else 0.67, abs_tol=1e-2)
# these words have legit gender direction projection
if not all_zero:
assert isclose(gender_biased_w2v_small.calc_indirect_bias('softball',
'registered_nurse'), # pylint: disable=C0301
0 if all_zero else 0.29, abs_tol=1e-2)
# TODO: in the article it is 0.35 - why?
assert isclose(gender_biased_w2v_small.calc_indirect_bias('softball',
'waitress'),
0 if all_zero else 0.31, abs_tol=1e-2)
assert isclose(gender_biased_w2v_small.calc_indirect_bias('softball',
'homemaker'),
0 if all_zero else 0.38, abs_tol=1e-2)
assert isclose(gender_biased_w2v_small.calc_indirect_bias('football',
'footballer'),
0 if all_zero else 0.02, abs_tol=1e-2)
# this word have legit gender direction projection
if not all_zero:
# TODO in the article it is 0.31 - why?
assert isclose(gender_biased_w2v_small.calc_indirect_bias('football',
'businessman'), # pylint: disable=C0301
0 if all_zero else 0.17, abs_tol=1e-2)
assert isclose(gender_biased_w2v_small.calc_indirect_bias('football',
'pundit'),
0 if all_zero else 0.10, abs_tol=1e-2)
assert isclose(gender_biased_w2v_small.calc_indirect_bias('football',
'maestro'),
0 if all_zero else 0.41, abs_tol=1e-2)
assert isclose(gender_biased_w2v_small.calc_indirect_bias('football',
'cleric'),
0 if all_zero else 0.02, abs_tol=1e-2)
def test_generate_closest_words_indirect_bias(gender_biased_w2v_small):
"""Test generate_closest_words_indirect_bias in GenderBiasWE."""
result = {'indirect_bias': {('football', 'businessman'): 0.17,
('football', 'cleric'): 0.02,
('football', 'footballer'): 0.02,
('football', 'maestro'): 0.42,
('football', 'pundit'): 0.1,
('softball', 'bookkeeper'): 0.2,
('softball', 'paralegal'): 0.37,
('softball', 'receptionist'): 0.67,
('softball', 'registered_nurse'): 0.29,
('softball', 'waitress'): 0.32},
'projection': {('football', 'businessman'): -0.2,
('football', 'cleric'): -0.17,
('football', 'footballer'): -0.34,
('football', 'maestro'): -0.18,
('football', 'pundit'): -0.19,
('softball', 'bookkeeper'): 0.18,
('softball', 'paralegal'): 0.14,
('softball', 'receptionist'): 0.16,
('softball', 'registered_nurse'): 0.16,
('softball', 'waitress'): 0.15}}
indirect_bias_df = (gender_biased_w2v_small
.generate_closest_words_indirect_bias('softball',
'football'))
assert (indirect_bias_df
.round(2)
.to_dict()) == result
def check_all_vectors_unit_length(bias_we):
for word in bias_we.model.vocab:
vector = bias_we[word]
norm = (vector ** 2).sum()
np.testing.assert_allclose(norm, 1, atol=ATOL)
def test_neutralize(gender_biased_w2v_small, is_preforming=True):
"""Test _neutralize method in GenderBiasWE."""
neutral_words = gender_biased_w2v_small._data['neutral_words']
if is_preforming:
gender_biased_w2v_small._neutralize(neutral_words)
direction_projections = [project_vector(gender_biased_w2v_small[word],
gender_biased_w2v_small.direction)
for word in neutral_words]
np.testing.assert_allclose(direction_projections, 0, atol=ATOL)
np.testing.assert_allclose(gender_biased_w2v_small.calc_direct_bias(), 0,
atol=ATOL)
check_all_vectors_unit_length(gender_biased_w2v_small)
test_calc_indirect_bias(gender_biased_w2v_small, all_zero=True)
def test_equalize(gender_biased_w2v_small, is_preforming=True):
"""Test _equalize method in GenderBiasWE."""
# pylint: disable=line-too-long
equality_sets = {tuple(w) for w in gender_biased_w2v_small._data['equalize_pairs']}
equality_sets |= {tuple(w) for w in gender_biased_w2v_small._data['definitional_pairs']}
equality_sets = gender_biased_w2v_small._generate_pair_candidates(equality_sets)
if is_preforming:
gender_biased_w2v_small._equalize(equality_sets)
for equality_set in equality_sets:
projection_vectors = []
rejection_vectors = []
for equality_word in equality_set:
vector = gender_biased_w2v_small[equality_word]
np.testing.assert_allclose(np.linalg.norm(vector), 1, atol=ATOL)
(projection_vector,
rejection_vector) = project_reject_vector(vector,
gender_biased_w2v_small.direction)
projection_vectors.append(projection_vector)
rejection_vectors.append(rejection_vector)
# <e1, d> == -<e2, d>
# assuming equality sets of size 2
assert len(projection_vectors) == 2
np.testing.assert_allclose(projection_vectors[0] @ gender_biased_w2v_small.direction,
-projection_vectors[1] @ gender_biased_w2v_small.direction,
atol=ATOL)
# all rejection part is equal for all the vectors
for rejection_vector in rejection_vectors[1:]:
np.testing.assert_allclose(rejection_vectors[0],
rejection_vector,
atol=ATOL)
check_all_vectors_unit_length(gender_biased_w2v_small)
def test_hard_debias_inplace(gender_biased_w2v_small, is_preforming=True):
"""Test hard_debias method in GenderBiasWE."""
# pylint: disable=C0301
if is_preforming:
test_calc_direct_bias(gender_biased_w2v_small)
gender_biased_w2v_small.debias(method='hard')
test_neutralize(gender_biased_w2v_small, is_preforming=False)
test_equalize(gender_biased_w2v_small, is_preforming=False)
equality_sets = gender_biased_w2v_small._data['definitional_pairs']
np.random.seed(RANDOM_STATE)
neutral_words = np.random.choice(gender_biased_w2v_small._data['neutral_words'],
N_RANDOM_NEUTRAL_WORDS_DEBIAS_TO_TEST,
replace=False)
# for every neutal word w: <e1, w> == <e2, w> AND ||e1 - w|| == ||e2 - w||
for neutral_word in neutral_words:
for equality_word1, equality_word2 in equality_sets:
we1 = gender_biased_w2v_small[neutral_word] @ gender_biased_w2v_small[equality_word1]
we2 = gender_biased_w2v_small[neutral_word] @ gender_biased_w2v_small[equality_word2]
np.testing.assert_allclose(we1, we2, atol=ATOL)
we1_distance = np.linalg.norm(gender_biased_w2v_small[neutral_word]
- gender_biased_w2v_small[equality_word1])
we2_distance = np.linalg.norm(gender_biased_w2v_small[neutral_word]
- gender_biased_w2v_small[equality_word2])
np.testing.assert_allclose(we1_distance, we2_distance, atol=ATOL)
def test_hard_debias_not_inplace(gender_biased_w2v_small):
test_calc_direct_bias(gender_biased_w2v_small)
gender_debiased_we = gender_biased_w2v_small.debias(method='hard',
inplace=False)
test_calc_direct_bias(gender_biased_w2v_small)
test_hard_debias_inplace(gender_debiased_we, is_preforming=False)
def test_copy(gender_biased_w2v_small):
gender_biased_w2v_small_copy = copy.copy(gender_biased_w2v_small)
assert (gender_biased_w2v_small.direction
is not gender_biased_w2v_small_copy.direction)
assert gender_biased_w2v_small.model is gender_biased_w2v_small_copy.model
def test_deepcopy(gender_biased_w2v_small):
gender_biased_w2v_small_copy = copy.deepcopy(gender_biased_w2v_small)
assert (gender_biased_w2v_small.direction
is not gender_biased_w2v_small_copy.direction)
assert (gender_biased_w2v_small.model
is not gender_biased_w2v_small_copy.model)
def test_evaluate_word_embedding(gender_biased_w2v_small):
"""Test evaluate_word_embedding method in GenderBiasWE."""
# pylint: disable=C0301
(word_pairs_evaluation,
word_analogies_evaluation) = gender_biased_w2v_small.evaluate_word_embedding()
assert (word_pairs_evaluation.to_dict()
== {'pearson_r': {'WS353': 0.645, 'RG65': 0.576, 'RW': 0.611, 'Mturk': 0.65, 'MEN': 0.766, 'SimLex999': 0.456, 'TR9856': 0.666},
'pearson_pvalue': {'WS353': 0.0, 'RG65': 0.232, 'RW': 0.0, 'Mturk': 0.0, 'MEN': 0.0, 'SimLex999': 0.0, 'TR9856': 0.0},
'spearman_r': {'WS353': 0.688, 'RG65': 0.493, 'RW': 0.655, 'Mturk': 0.674, 'MEN': 0.782, 'SimLex999': 0.444, 'TR9856': 0.676},
'spearman_pvalue': {'WS353': 0.0, 'RG65': 0.321, 'RW': 0.0, 'Mturk': 0.0, 'MEN': 0.0, 'SimLex999': 0.0, 'TR9856': 0.0},
'ratio_unkonwn_words': {'WS353': 9.915, 'RG65': 14.286, 'RW': 77.384, 'Mturk': 1.558, 'MEN': 15.148, 'SimLex999': 1.702, 'TR9856': 89.722}})
assert (word_analogies_evaluation.to_dict()
== {'score': {'MSR-syntax': 0.75, 'Google': 0.729}})
def test_generate_analogies(gender_biased_w2v_small):
"""Test generate_analogies method in GenderBiasWE.
Based on:
https://github.com/tolga-b/debiaswe/blob/master/tutorial_example1.ipynb
"""
analogies_df = (gender_biased_w2v_small
.generate_analogies(500, unrestricted=False))
analogies_df = analogies_df[['she', 'he']]
assert_deep_almost_equal(analogies_df.values, TOLGA_GENDER_ANALOGIES)
# TODO deeper testing, this is barely checking it runs
# TODO not all full_specific_words are lower case - why? maybe just names?
# TODO maybe it was trained on the whole w2v?
def test_learn_full_specific_words(gender_biased_w2v_small):
(full_specific_words,
clf, X, y) = gender_biased_w2v_small.learn_full_specific_words(debug=True)
full_specific_words.sort()
assert (set(gender_biased_w2v_small._data['specific_seed'])
.issubset(full_specific_words))
def test_calc_all_weat(w2v_small):
calc_all_weat(w2v_small, filter_by='model', with_original_finding=True,
with_pvalue=True, pvalue_kwargs={'method': 'approximate'})
def test_calc_all_weat_index(w2v_small):
all_weat = calc_all_weat(w2v_small, filter_by='model',
with_original_finding=True,
with_pvalue=True,
pvalue_kwargs={'method': 'approximate'})
for index in range(len(WEAT_DATA)):
single_weat = calc_all_weat(w2v_small, weat_data=index,
filter_by='model',
with_original_finding=True,
with_pvalue=True,
pvalue_kwargs={'method':
'approximate'})
assert_deep_almost_equal(single_weat.iloc[0].to_dict(),
all_weat.iloc[index].to_dict(),
atol=0.01)
def test_calc_all_weat_indices(w2v_small):
all_weat = calc_all_weat(w2v_small, filter_by='model',
with_original_finding=True,
with_pvalue=True,
pvalue_kwargs={'method': 'approximate'})
for index in range(1, len(WEAT_DATA)):
indices = tuple(range(index))
singles_weat = calc_all_weat(w2v_small, weat_data=indices,
filter_by='model',
with_original_finding=True,
with_pvalue=True,
pvalue_kwargs={'method':
'approximate'})
assert_deep_almost_equal(singles_weat.to_dict(),
all_weat.iloc[:index].to_dict(),
atol=0.01)
def test_calc_all_weat_defaults(w2v_small):
weat_5_results_default = (calc_all_weat(w2v_small, (5,))
.iloc[0].to_dict())
weat_5_results = (calc_all_weat(w2v_small, (5,),
filter_by='model',
with_pvalue=True,
pvalue_kwargs={'method': 'exact'})
.iloc[0].to_dict())
assert_deep_almost_equal(weat_5_results_default, weat_5_results)
def test_calc_weat_pleasant_attribute(w2v_small):
# pylint: disable=line-too-long
pvalue_kwargs = {'method': 'approximate'}
result_v1 = calc_weat_pleasant_unpleasant_attribute(w2v_small,
WEAT_DATA[1]['first_target'],
WEAT_DATA[1]['second_target'],
pvalue_kwargs=pvalue_kwargs)
result_v1['p'] = round(result_v1['p'], 4)
result_v1['d'] = round(result_v1['d'], 4)
result_v1['s'] = round(result_v1['s'], 4)
result_v2 = (calc_all_weat(w2v_small, (1,),
pvalue_kwargs=pvalue_kwargs)
.iloc[0]
.to_dict())
assert_deep_almost_equal(result_v1, result_v2, atol=0.01)
def test_most_similar(w2v_small):
POSITIVE, NEGATIVE = ('doctor', 'she'), ('he',)
responsibly_results = most_similar(w2v_small, POSITIVE, NEGATIVE,
topn=10)
gensim_results = w2v_small.most_similar(POSITIVE, NEGATIVE,
topn=9)
assert responsibly_results[0][0] == 'doctor'
assert_deep_almost_equal(responsibly_results[1:], gensim_results,
atol=0.01)
def test_compute_association(gender_biased_w2v_small):
"""
Test compute_association method in GenderBiasWE.
"""
(r, pvalue), _ = gender_biased_w2v_small.compute_factual_association()
assert isclose(r, 0.7070401592764508, abs_tol=ATOL)
assert isclose(pvalue, 1.4324502214459908e-06, abs_tol=ATOL)
| 2.5 | 2 |
pycopula/estimation.py | merz9b/pycopula | 71 | 12773826 | <gh_stars>10-100
import numpy as np
from scipy.optimize import minimize, minimize_scalar
def cmle(log_lh, theta_start=0, theta_bounds=None, optimize_method='Nelder-Mead', bounded_optimize_method='SLSQP', is_scalar=False):
"""
Computes the CMLE on a specified log-likelihood function.
Parameters
----------
log_lh : Function
The log-likelihood.
theta_start : float
Initial value of theta in optimization algorithm.
theta_bounds : couple
Allowed values of theta.
optimize_method : str
The optimization method used in SciPy minimization when no theta_bounds was specified.
bounded_optimize_method : str
The optimization method used in SciPy minimization under constraints
Returns
-------
OptimizeResult
The optimization result returned from SciPy.
"""
if is_scalar:
if theta_bounds == None:
return minimize_scalar(log_lh, method=optimize_method)
return minimize_scalar(log_lh, bounds=theta_bounds, method='bounded', options={'maxiter': 200})
if theta_bounds == None:
return minimize(log_lh, theta_start, method=optimize_method)
return minimize(log_lh, theta_start, method=bounded_optimize_method, bounds=[theta_bounds], options={'maxiter': 200})
def mle(copula, X, marginals, hyper_param, hyper_param_start=None, hyper_param_bounds=None, theta_start=[0], theta_bounds=None, optimize_method='Nelder-Mead', bounded_optimize_method='SLSQP'):
"""
Computes the MLE on specified data.
Parameters
----------
copula : Copula
The copula.
X : numpy array (of size n * copula dimension)
The data to fit.
marginals : numpy array
The marginals distributions. Use scipy.stats distributions or equivalent that requires pdf and cdf functions according to rv_continuous class from scipy.stat.
hyper_param : numpy array
The hyper-parameters for each marginal distribution. Use None when the hyper-parameter is unknow and must be estimated.
hyper_param_start : numpy array
The start value of hyper-parameters during optimization. Must be same dimension of hyper_param.
hyper_param_bounds : numpy array
Allowed values for each hyper-parameter.
theta_start : numpy array
Initial value of theta in optimization algorithm.
theta_bounds : couple
Allowed values of theta.
optimize_method : str
The optimization method used in SciPy minimization when no theta_bounds was specified.
bounded_optimize_method : str
The optimization method used in SciPy minimization under constraints.
Returns
-------
optimizeResult : OptimizeResult
The optimization result returned from SciPy.
estimatedHyperParams : numpy array
The estimated hyper-parameters.
"""
hyperParams = np.asarray(hyper_param)
hyperOptimizeParams = np.copy([ dic.copy() for dic in hyperParams ]) # Hyper-parameters during optimization will be stored here
hyperStart = np.asarray(hyper_param_start)
n, d = X.shape
# We get the initialization vector of the optimization algorithm
thetaOffset = len(theta_start)
start_vector = np.repeat(0, d + thetaOffset)
start_vector[0:thetaOffset] = theta_start
if hyper_param_start == None:
start_vector[thetaOffset:] = [ 1.0 for i in range(d) ]
# The hyper-parameters that need to be fitted
optiVector = []
idx = 1
# Each element of hyperParams is a dictionary
for k in range(len(hyperParams)):
for key in hyperParams[k]:
optiVector.append(hyperParams[k][key])
# If we have a start value for this specified unknown parameter
if hyper_param_start != None and hyperParams[k][key] != None:
start_vector[idx] = hyperStart[k][key]
idx += 1
# The global log-likelihood to maximize
def log_likelihood(x):
lh = 0
idx = 1
for k in range(len(hyperParams)):
for key in hyperParams[k]:
# We need to replace None hyper-parameters with current x value of optimization algorithm
if hyperParams[k][key] == None:
hyperOptimizeParams[k][key] = x[idx]
idx += 1
marginCDF = [ marginals[j].cdf(np.transpose(X)[j], **hyperOptimizeParams[j]) for j in range(d) ]
marginCDF = np.transpose(marginCDF)
# The first member : the copula's density
if thetaOffset == 1:
lh += sum([ np.log(copula.pdf_param(marginCDF[i], x[0])) for i in range(n)])
else:
lh += sum([ np.log(copula.pdf_param(marginCDF[i], x[0:thetaOffset])) for i in range(n)])
# The second member : sum of PDF
print("OK")
lh += sum([ sum(np.log(marginals[j].pdf(np.transpose(X)[j], **hyperOptimizeParams[j]))) for j in range(d) ])
return lh
# Optimization result will be stored here
# In case whether there are bounds conditions or not, we use different methods or arguments
optimizeResult = None
if hyper_param_bounds == None:
if theta_bounds == None:
optimizeResult = minimize(lambda x: -log_likelihood(x), start_vector, method = optimize_method)
else:
optiBounds = np.vstack((np.array([theta_bounds]), np.tile(np.array([None, None]), [d, 1]) ))
optimizeResult = minimize(lambda x: -log_likelihood(x), start_vector, method = bounded_optimize_method, bounds=optiBounds)
else:
if theta_bounds == None:
optiBounds = np.vstack((np.array([None, None]), np.tile(np.array([None, None]), [d, 1]) ))
optimizeResult = minimize(lambda x: -log_likelihood(x), start_vector, method = bounded_optimize_method, bounds=optiBounds)
else:
optiBounds = np.vstack((np.array([theta_bounds]), hyper_param_bounds))
optimizeResult = minimize(lambda x: -log_likelihood(x), start_vector, method = bounded_optimize_method, bounds=optiBounds)
# We replace every None values in the hyper-parameter with estimated ones
estimatedHyperParams = hyperParams
idx = 1
for k in range(len(hyperParams)):
for key in hyperParams[k]:
if estimatedHyperParams[k][key] == None:
estimatedHyperParams[k][key] = optimizeResult['x'][idx]
idx += 1
return optimizeResult, estimatedHyperParams
def ifm(copula, X, marginals, hyper_param, hyper_param_start=None, hyper_param_bounds=None, theta_start=0, theta_bounds=None, optimize_method='Nelder-Mead', bounded_optimize_method='SLSQP'):
"""
Computes the IFM estimation on specified data.
Parameters
----------
copula : Copula
The copula.
X : numpy array (of size n * copula dimension)
The data to fit.
marginals : numpy array
The marginals distributions. Use scipy.stats distributions or equivalent that requires pdf and cdf functions according to rv_continuous class from scipy.stat.
hyper_param : numpy array
The hyper-parameters for each marginal distribution. Use None when the hyper-parameter is unknow and must be estimated.
hyper_param_start : numpy array
The start value of hyper-parameters during optimization. Must be same dimension of hyper_param.
hyper_param_bounds : numpy array
Allowed values for each hyper-parameter.
theta_start : float
Initial value of theta in optimization algorithm.
theta_bounds : couple
Allowed values of theta.
optimize_method : str
The optimization method used in SciPy minimization when no theta_bounds was specified.
bounded_optimize_method : str
The optimization method used in SciPy minimization under constraints
Returns
-------
optimizeResult : OptimizeResult
The optimization result returned from SciPy.
estimatedHyperParams : numpy array
The estimated hyper-parameters
"""
hyperParams = np.asarray(hyper_param)
hyperOptimizeParams = np.copy([ dic.copy() for dic in hyperParams ]) # Hyper-parameters during optimization will be stored here
hyperStart = np.asarray(hyper_param_start)
n, d = X.shape
hyperEstimated = np.copy(hyperParams)
pobs = np.zeros((d, n)) # Pseudo-observations
# Estimation of each hyper-parameter
for j in range(d):
# start is our initialization vector for minimization
if hyper_param_start == None:
start = [ 1.0 ]
else:
start = []
for key in hyperParams[j]:
if hyperParams[j][key] == None:
start.append(hyperStart[j][key])
# The log-likelihood function for our current random variable
def uni_log_likelihood(x):
idx = 0
for key in hyperParams[j]:
if hyperParams[j][key] == None:
hyperOptimizeParams[j][key] = x[idx]
idx += 1
return sum(np.log(marginals[j].pdf(np.transpose(X)[j], **hyperOptimizeParams[j])))
# In case of bounds conditions, we use different arguments
if hyper_param_bounds[j] == None:
optiRes = minimize(lambda x: -uni_log_likelihood(x), start, method = optimize_method)['x']
else:
optiRes = minimize(lambda x: -uni_log_likelihood(x), start, method = bounded_optimize_method, bounds=[hyper_param_bounds[j]])['x']
# We need to replaceNone values with estimated hyper-parameters
idx = 0
for key in hyperEstimated[j]:
if hyperEstimated[j][key] == None:
hyperEstimated[j][key] = optiRes[idx]
idx += 1
pobs[j] = marginals[j].cdf(np.transpose(X)[j], **hyperEstimated[j])
pobs = np.transpose(pobs)
optimizeResult = None
# The log-likelihood function for our copula
def log_likelihood(x):
return sum([ np.log(copula.pdf_param(pobs[i], x)) for i in range(n) ])
if theta_bounds == None:
optimizeResult = minimize(lambda x: -log_likelihood(x), theta_start, method = optimize_method)
else:
optimizeResult = minimize(lambda x: -log_likelihood(x), theta_start, method = bounded_optimize_method, bounds=[theta_bounds])
return optimizeResult, hyperEstimated
| 2.921875 | 3 |
src/rbvfit/rb_setline.py | manoranjan-s/rbvfit | 2 | 12773827 | from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
from astropy.io import ascii
from pkg_resources import resource_filename
'''
Function to read in atomic line information for a given rest frame wavelength.
Or
For the line matching the closest wavelength.
Input :
lambda_rest :- Rest Frame wavelength (in \AA) of the line to match
method :- 'closest' -> If set will match the closest line.
'Exact' -> If set will match the exact wavelength.
Output: dic :- Dictionary with fval,lambda and species name.
Example: str=rb_setline(2796.3,'closest')
Written By: <NAME> Jan 2018, Python 2.7
Edit: <NAME> Sep 2018, Depreciated kwargs to be compatible with python 3
'''
def rb_setline(lambda_rest,method,linelist='atom'):
#if kwargs.has_key('linelist'):
# linelist=kwargs['linelist']
#else:
# linelist='LLS'
line_str=read_line_list(linelist)
wavelist=np.zeros((len(line_str),))
name = np.empty(len(line_str), dtype='object')
fval=np.zeros((len(line_str),))
if linelist=='atom':
gamma=np.zeros((len(line_str),))
for i in range(0,len(wavelist)):
wavelist[i]=np.double(line_str[i]['wrest'])
fval[i]=np.float(line_str[i]['fval'])
name[i]=np.str(line_str[i]['ion'])
if linelist=='atom':
gamma[i]=np.str(line_str[i]['gamma'])
if method=='Exact':
q= np.where( (np.abs(lambda_rest-wavelist) < 1e-3))
if linelist=='atom':
outstr={'wave':wavelist[q],'fval':fval[q],'name':name[q],'gamma':gamma[q]}
else:
outstr={'wave':wavelist[q],'fval':fval[q],'name':name[q]}
elif method=='closest':
idx=(np.abs(lambda_rest-wavelist)).argmin()
if linelist=='atom':
outstr={'wave':wavelist[idx],'fval':fval[idx],'name':name[idx],'gamma':gamma[idx]}
else:
outstr={'wave':wavelist[idx],'fval':fval[idx],'name':name[idx]}
else:
raise NameError('Specify the matching method, closest or Exact')
return outstr
def read_line_list(label):
if label=='atom':
filename=resource_filename('rbvfit','lines/atom_full.dat')
elif label == 'LLS':
filename=resource_filename('rbvfit','lines/lls.lst')
elif label == 'LLS Small':
filename=resource_filename('rbvfit','lines/lls_sub.lst')
elif label == 'DLA':
filename=resource_filename('rbvfit','lines/dla.lst')
else:
print('Give Correct LineList')
data = []
if label=='atom':
s=ascii.read(filename)
for line in range(0,len(s['col1'])):
source = {}
source['wrest'] = float(s['col2'][line])
source['ion'] = s['col1'][line]+' '+np.str(np.int(s['col2'][line]))
source['fval']=float(s['col3'][line])
source['gamma']=float(s['col4'][line])
data.append(source)
else:
f=open(filename,'r')
header1 = f.readline()
for line in f:
line = line.strip()
columns = line.split()
source = {}
source['wrest'] = float(columns[0])
source['ion'] = columns[1]+' '+columns[2]
source['fval']=float(columns[3])
data.append(source)
return data
| 3.03125 | 3 |
alfworld/utils.py | zhaozj89/alfworld_meta_dqn | 42 | 12773828 | <reponame>zhaozj89/alfworld_meta_dqn
import os
def mkdirs(dirpath: str) -> str:
""" Create a directory and all its parents.
If the folder already exists, its path is returned without raising any exceptions.
Arguments:
dirpath: Path where a folder need to be created.
Returns:
Path to the (created) folder.
"""
try:
os.makedirs(dirpath)
except FileExistsError:
pass
return dirpath
| 3.625 | 4 |
src/shape_constraint_detectors/TargetNodeTargetDetector.py | IDLabResearch/lovstats | 1 | 12773829 | <reponame>IDLabResearch/lovstats
from utils.RestrictionTypeDetector import RestrictionTypeDetector
from utils.RestrictionTypeDetector import TYPE_INT
from utils.RestrictionTypeDetector import MEASURE_OCCURRENCE
class TargetNodeTargetDetector(RestrictionTypeDetector):
"""
This class serves as interface for all statistics of shapes applying on individuals.
Subclasses of this class, should implement the compute method in which they should perform
their computation and call the set* methods of this class here.
"""
def __init__(self):
super(TargetNodeTargetDetector, self).__init__()
self.addResult(MEASURE_OCCURRENCE, 0, TYPE_INT)
def getRestrictionType(self):
return "targetNodeTarget"
def setNumberTargetNodeTarget(self, number):
self.addResult(MEASURE_OCCURRENCE, number, TYPE_INT)
| 2.640625 | 3 |
src/correr_modelo.py | osim-microgrid-tool/osim_islanded_microgrids_sizing | 2 | 12773830 | <reponame>osim-microgrid-tool/osim_islanded_microgrids_sizing
import gurobipy
import warnings
import pyomo.environ as pyo
from pyomo.opt import *
import sys
warnings.filterwarnings("ignore")
from time import time
import os
def correr_modelo(model=None, gap=None, time_limit=None, experimento=None):
"""Correr el modelo"""
solver = solvers.SolverFactory("gurobi", solver_io="python")
solver.options['mipgap'] = gap
if time_limit != None:
solver.options['TimeLimit'] = time_limit
results = solver.solve(model, tee=True, keepfiles=True,
logfile = os.getcwd()+os.sep+"src\log_opt"+os.sep+"%s.txt"%(experimento))
term_cond = results.solver.termination_condition
print(f"El programa '{model.name}' es: ", term_cond)
return results, term_cond | 2.21875 | 2 |
scripts/plot_vorticity.py | rohitsupekar/active_matter_spheres | 1 | 12773831 | <gh_stars>1-10
print('starting')
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.basemap import Basemap, shiftgrid
from matplotlib import cm, colors
import sphere as sph
import equations as eq
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.rank
size = comm.size
# change if you change your resolution
L_max = 255
grid = sph.grid(L_max)
print('made grid')
figure, ax = plt.subplots(1,1)
figure.set_size_inches(6,6)
lon = np.linspace(0, 2*np.pi, 2*(L_max+1))
lat = grid - np.pi/2
meshed_grid = np.meshgrid(lon, lat)
lat_grid = meshed_grid[1]
lon_grid = meshed_grid[0]
mp = Basemap(projection='ortho', lat_0=33, lon_0=0, ax=ax)
mp.drawmapboundary()
mp.drawmeridians(np.arange(0, 360, 30),dashes = (None,None), linewidth=0.5)
mp.drawparallels(np.arange(-90, 90, 30),dashes = (None,None), linewidth=0.5)
x, y = mp(np.degrees(lon_grid), np.degrees(lat_grid))
for i in range(1+rank,3,size):
print(i)
output = np.load('output_files/output_%i.npz' %i)
im = mp.pcolor(x, y, np.transpose(output['om']), cmap='RdBu_r')
im.set_clim([-1000,1000])
if i == 1+rank:
plt.colorbar(im)
title = figure.suptitle('t = %.4f' %output['t'][0])
else:
title.set_text('t = %.4f' %output['t'][0])
plt.savefig('images/vorticity_%05i.png' %i,dpi=300)
| 2.28125 | 2 |
POP3Drop.py | andrewksimon/POP3Drop | 0 | 12773832 | <filename>POP3Drop.py
# This script had been written by <NAME>.
# This scrip was written on 09/06/2018
# The purpose of this script is to delete messages on a POP3 server (especially if they are in large quantities).
# Import statements
import poplib # POP3 Manipulation library
# Defines environment variables
user = 'user' # Inbox username - <EMAIL>
passw = 'password' # Inbox user's password
pop3hst = 'domain.com' # POP3 hostname/resolver
port = '110' # POP3 Port number... ~ NON SSL
# Sets up the connection
Mailbox = poplib.POP3(pop3hst, port) # Define the instance "Mailbox" with the connection
Mailbox.user(user) # Defines the mailbox's connection username.
Mailbox.pass_(passw) # Defines the mailbox's connection password.
numMessages = len(Mailbox.list()[1]) # Grabs the total number of emails that are sitting on user's account.
print(Mailbox.getwelcome()) # Gets welcome message - usual Dovecot server greeting
print("Total messages: " + str(numMessages) + "\n") # Print out total number of messages on the server.
# Iterate through from message #1 to total number of messages that are on the server.
# IMPORTANT: i must start at 1... 0 is not a message index.
for i in range(1, numMessages+1):
print("Deleting Message #" + str(i) + "...") # Verbose message.
Mailbox.dele(i) # POP3 delete command.
print("DELETED Message #" + str(i)) # Verbose message.
print("There are now " + str(len(Mailbox.list()[1])) + " messages left. \n") # Verbose message... reprint # of msgs.
print("Committing changes to server... this may take a while...")
newMsgLst = str(len(Mailbox.list()[1]))
Mailbox.quit() # Close the connection to the machine.
print("CHANGES COMITTED! There is " + newMsgLst + " message(s) left on the server.")
print("Connection to " + user + "@" + pop3hst + " has now been close. Have a good day!")
| 3.09375 | 3 |
accounting_integrations/allauth/providers/fylein/views.py | fylein/fyle-accounting-integrations | 1 | 12773833 | from fylesdk import FyleSDK
from django.conf import settings
from allauth.socialaccount.providers.oauth2.views import (
OAuth2Adapter,
OAuth2CallbackView,
OAuth2LoginView,
)
from accounting_integrations.allauth.providers.fylein.provider import FyleProvider
class FyleOAuth2Adapter(OAuth2Adapter):
provider_id = FyleProvider.id
base_url = settings.FYLE_BASE_URL.rstrip('/')
access_token_url = '{0}/api/oauth/token'.format(base_url)
authorize_url = '{0}/app/main/#/oauth/authorize'.format(base_url)
profile_url = '{0}/user'.format(base_url)
def complete_login(self, request, app, token, **kwargs):
# Setup the fyle API
fyle_api = FyleSDK(
client_id=settings.FYLE_CLIENT_ID,
client_secret=settings.FYLE_CLIENT_SECRET,
refresh_token=token.token_secret
)
profile = fyle_api.Employees.get_my_profile()
extra_data = {
'id': profile['data']['id'],
'email': profile['data']['employee_email'],
'name': profile['data']['full_name']
}
return self.get_provider().sociallogin_from_response(
request, extra_data
)
oauth2_login = OAuth2LoginView.adapter_view(FyleOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(FyleOAuth2Adapter)
| 1.96875 | 2 |
2017/round_1c/ample_syrup.py | laichunpongben/CodeJam | 0 | 12773834 | #!/usr/bin/env python
# Google Code Jam
# Google Code Jam 2017
# Round 1C 2017
# Problem A. <NAME>
# Solved small test set
from __future__ import print_function, division
import math
import itertools
def get_k_highest_pancakes(n_pancakes, k):
n_pancakes = sorted(n_pancakes, key=lambda x: -x[1])
return n_pancakes[:k]
def get_k_largest_pancakes(n_pancakes, k):
n_pancakes = sorted(n_pancakes, key=lambda x: -x[0])
return n_pancakes[:k]
def get_k_pancakes(n_pancakes, k):
n_pancakes = sorted(n_pancakes, key=lambda x: -(x[0] ** 2 + 2 * x[0] * x[1]))
return n_pancakes[:k]
def get_exposed_area(pancakes):
pancakes = sorted(pancakes, key=lambda x: -x[0])
largest_r_pancake = pancakes[0]
horizontal = math.pi * largest_r_pancake[0] ** 2
vertical_total = 0
for pancake in pancakes:
r, h = pancake
vertical = h * 2 * math.pi * r
vertical_total += vertical
return vertical_total + horizontal
def solve(n, k, pancakes):
pancakes_high = get_k_highest_pancakes(pancakes, k)
area_high = get_exposed_area(pancakes_high)
pancakes_large = get_k_largest_pancakes(pancakes, k)
area_large = get_exposed_area(pancakes_large)
return max(area_large, area_high)
max_area = 0
potential_combo = itertools.combinations(pancakes, k)
for c in potential_combo:
area = get_exposed_area(c)
if area > max_area:
print(pancakes)
print(n, k)
print(c)
print()
max_area = area
return max_area
if __name__ == '__main__':
import os
samples = [
(2, 1, [(100, 20), (200, 10)]),
(2, 2, [(100, 20), (200, 10)]),
(3, 2, [(100, 10), (100, 10), (100, 10)]),
(4, 2, [(9, 3), (7, 1), (10, 1), (8, 4)]),
]
for sample in samples:
print(solve(*sample))
data_files = ['A-large']
for f in data_files:
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'{0}.in'.format(f)), 'r') as input_file:
lines = input_file.readlines()
input_count = int(lines[0].replace('\n' ,''))
inputs = [line.replace('\n', '') for line in lines[1:]]
test_cases = []
j = 0
for _ in range(input_count):
pancakes = []
n, k = tuple([int(_) for _ in inputs[j].split(' ')])
j += 1
for _ in range(n):
row = tuple([int(_) for _ in inputs[j].split(' ')])
pancakes.append(row)
j += 1
test_cases.append((n, k, pancakes))
i = 1
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'{0}.out'.format(f)), 'w') as output_file:
for test_case in test_cases:
area = solve(*test_case)
output_file.write('Case #{0}: {1:0.6f}\n'.format(i, area))
i += 1
| 3.375 | 3 |
utilities/file.py | IOTECH-Donegal/NMEA | 0 | 12773835 | <reponame>IOTECH-Donegal/NMEA
from datetime import datetime
import sys, csv
def path_name():
# Operating system dependent stuff
this_os = sys.platform
if this_os == 'win32':
return './logfiles/'
elif this_os == 'linux':
return '/home/pi/logfiles/'
else:
print(f'Unsupported OS: {this_os}')
exit(0)
def log_file_name(extension):
"""
Create a file name in the logfiles directory, based on current data and time
Requires the computer to have an RTC or synched clock
"""
now = datetime.now()
# Linux
file_name = '%0.4d%0.2d%0.2d-%0.2d%0.2d%0.2d' % (now.year, now.month, now.day, now.hour, now.minute, now.second)
return file_name + extension
| 2.734375 | 3 |
tests/hq_mock.py | SrihariThalla/seesaw-kit | 49 | 12773836 | '''Mocks Warrior HQ.'''
import tornado.web
class RegisterHandler(tornado.web.RequestHandler):
def initialize(self, **kwargs):
pass
def post(self):
self.write({
'warrior_id': 'test-warrior-id',
})
class UpdateHandler(tornado.web.RedirectHandler):
def initialize(self, **kwargs):
pass
def post(self):
self.write({
"warrior": {
"seesaw_version": "0.0.15"
},
"broadcast_message": "<i>Hello world</i>",
"auto_project": "testproject",
"projects": [
{
"name": "testproject",
"title": "A test project",
"description": "Testing a project",
"repository":
"https://github.com/ArchiveTeam/example-seesaw-project",
"logo":
"https://raw.github.com/ArchiveTeam/warrior-preseed/master"
"/splash/Archive_team-white.png",
"marker_html": "hi",
"lat_lng": [
0.0,
0.0
],
"leaderboard": "http://example.com/"
},
{
"name": "localproject",
"title": "A local project",
"description":
"A project loaded from /tmp/mywarriorproject "
"Useful for testing auto update project",
"repository":
"/tmp/mywarriorproject",
"logo":
"",
"marker_html": "hi",
"lat_lng": [
0.0,
0.0
],
"leaderboard": "http://example.com/"
},
]
})
if __name__ == '__main__':
handlers = [
(r'/api/register.json', RegisterHandler),
(r'/api/update.json', UpdateHandler),
]
app = tornado.web.Application(handlers=handlers)
app.listen(8681, 'localhost')
tornado.ioloop.IOLoop.instance().start()
| 2.359375 | 2 |
archive/archive/circular_buffer.py | walchko/the-collector | 0 | 12773837 | <reponame>walchko/the-collector
##############################################
# The MIT License (MIT)
# Copyright (c) 2017 <NAME>
# see LICENSE for full details
##############################################
#
# class CircularBuffer(object):
# def __init__(self, size, mmax=100.0, mmin=0.0):
# """initialization"""
# self.index = 1
# self.size = size
# self._data = [0.0]*size
# self.sum = 0.0
# self.min = 1E900
# self.max = -1E900
# # self.min = mmin
# # self.max = mmax
#
# def push(self, value):
# """append an element"""
# self.sum += value
# self.max = value if value > self.max else self.max
# self.min = value if value < self.min else self.min
#
# # if len(self._data) == self.size:
# self._data[self.index] = value
# # else:
# # self._data.append(value)
# self.index = (self.index + 1) % self.size
#
# def __getitem__(self, key):
# """get element by index like a regular array"""
# i = self.index + key
# return(self._data[i])
#
# def __repr__(self):
# """return string representation"""
# return self._data.__repr__() + ' (' + str(len(self._data))+' items)'
#
# def get_all(self):
# """return a list of all the elements"""
# ret = []
# if self.index > 0:
# ret = self._data[self.index:self.size] + self._data[0:self.index]
# else:
# ret = self._data
# return ret
#
# def get_last(self):
# return self._data[self.index-1]
#
# def get_first(self):
# return self._data[self.index]
| 2.875 | 3 |
iceworm/engine/connectors/base.py | wrmsr0/iceworm | 0 | 12773838 | <reponame>wrmsr0/iceworm
"""
TODO:
- virtual vs physical tables
- physical tables requiring refresh
- incremental vs total physical tables
- materialized vs unmaterialized virtuals
- ** dataclass interop ** - dc->tbl, query
- just return object refs? jsonize?
- support snowflake json garbage on objects
- piecewise conf? csv mounts? ...
- *no*, but could have a csv_mount rule before ctor phase that rewrites the sole ctor cfg ele
- ctors/conns as ctxmgrs?
- HeapConnector - writable
- simpler dumber connector? where does sf query jit live?
- conns that support sql pushdown vs not..
- 'union'? 'overlay'? wrap one by heap/pg to give txns?
Def conns:
- sql - snow + pg (+ incl internal state storage pg, 'self')
- kafka
- dynamo
- system - conns, nodes, running ops, etc
- mongo
- redis
Alt conns:
- salesforce
- pagerduty
- jira
- gsheets
- slack
- github
- pandas? :/
"""
import abc
import typing as ta
from omnibus import check
from omnibus import configs as cfgs
from omnibus import dataclasses as dc
from omnibus import defs
from omnibus import lang
from omnibus.serde import mapping as sm
from .. import elements as els
from ... import metadata as md
from ...types import QualifiedName
ConnectorT = ta.TypeVar('ConnectorT', bound='Connector')
ConnectorConfigT = ta.TypeVar('ConnectorConfigT', bound='Connector.Config')
Row = ta.Mapping[str, ta.Any]
Rows = ta.Iterable[Row]
class RowSource(lang.Abstract):
@abc.abstractmethod
def produce_rows(self) -> Rows:
raise NotImplementedError
class RowSink(lang.Abstract):
@abc.abstractmethod
def consume_rows(self, rows: ta.Iterable[Row]) -> None:
raise NotImplementedError
class ListRowSource(RowSource):
def __init__(self, rows: ta.Iterable[Row]) -> None:
super().__init__()
self._rows = list(rows)
@property
def rows(self) -> ta.List[Row]:
return self._rows
def produce_rows(self) -> Rows:
yield from self._rows
class ListRowSink(RowSink):
def __init__(self, rows: ta.Optional[ta.List[Row]] = None) -> None:
super().__init__()
self._rows = rows if rows is not None else []
@property
def rows(self) -> ta.List[Row]:
return self._rows
def __iter__(self) -> ta.Iterator[Row]:
return iter(self._rows)
def consume_rows(self, rows: ta.Iterable[Row]) -> None:
self._rows.extend(rows)
class Connector(ta.Generic[ConnectorT, ConnectorConfigT], cfgs.Configurable[ConnectorConfigT], lang.Abstract):
class Config(els.Element, cfgs.Config, abstract=True):
dc.metadata({
els.PhaseFrozen: els.PhaseFrozen(els.Phases.CONNECTORS),
sm.Name: lambda cls: lang.decamelize(cfgs.get_impl(cls).__name__),
})
id: els.Id = dc.field(check=lambda s: isinstance(s, els.Id) and s)
def __init__(self, config: ConnectorConfigT) -> None:
super().__init__(config)
defs.repr('id')
@property
def config(self) -> ConnectorConfigT:
return self._config
@property
def id(self) -> els.Id:
return self._config.id
def close(self) -> None:
pass
@abc.abstractmethod
def connect(self) -> 'Connection[ConnectorT]':
raise NotImplementedError
@classmethod
def of(cls, obj: ta.Union['Connector', Config]) -> 'Connector':
if isinstance(obj, Connector):
return obj
elif isinstance(obj, Connector.Config):
return check.isinstance(check.issubclass(cfgs.get_impl(obj), cls)(obj), Connector)
else:
raise TypeError(obj)
class Connection(lang.Abstract, ta.Generic[ConnectorT]):
def __init__(self, ctor: ConnectorT) -> None:
super().__init__()
self._ctor: ConnectorT = check.isinstance(ctor, Connector)
self._reflect_cache: ta.MutableMapping[QualifiedName, ta.Optional[md.Object]] = {}
defs.repr('ctor')
@property
def ctor(self) -> ConnectorT:
return self._ctor
def close(self) -> None:
pass
@abc.abstractmethod
def create_row_source(self, query: str) -> RowSource:
raise NotImplementedError
@abc.abstractmethod
def create_row_sink(self, table: QualifiedName) -> RowSink:
raise NotImplementedError
def reflect(self, names: ta.Optional[ta.Iterable[QualifiedName]] = None) -> ta.Mapping[QualifiedName, md.Object]:
if names is not None:
check.not_isinstance(names, (str, QualifiedName))
ret = {}
missing = set()
for name in names:
check.isinstance(name, QualifiedName)
try:
obj = self._reflect_cache[name]
except KeyError:
missing.add(name)
else:
if obj is not None:
ret[name] = obj
if missing:
new = self._reflect(missing)
for name, obj in new.items():
check.not_in(name, ret)
check.not_in(name, self._reflect_cache)
ret[name] = self._reflect_cache[name] = obj
return ret
else:
raise NotImplementedError
@abc.abstractmethod
def _reflect(self, names: ta.Optional[ta.Iterable[QualifiedName]] = None) -> ta.Mapping[QualifiedName, md.Object]:
raise NotImplementedError
| 1.945313 | 2 |
tests/test_ops/test_correlation.py | BIGWangYuDong/mmcv | 1 | 12773839 | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmcv.ops import Correlation
_input1 = [[[[1., 2., 3.], [0., 1., 2.], [3., 5., 2.]]]]
_input2 = [[[[1., 2., 3.], [3., 1., 2.], [8., 5., 2.]]]]
gt_out_shape = (1, 1, 1, 3, 3)
_gt_out = [[[[[1., 4., 9.], [0., 1., 4.], [24., 25., 4.]]]]]
gt_input1_grad = [[[[1., 2., 3.], [3., 1., 2.], [8., 5., 2.]]]]
def assert_equal_tensor(tensor_a, tensor_b):
assert tensor_a.eq(tensor_b).all()
class TestCorrelation:
def _test_correlation(self, dtype=torch.float):
layer = Correlation(max_displacement=0)
input1 = torch.tensor(_input1, dtype=dtype).cuda()
input2 = torch.tensor(_input2, dtype=dtype).cuda()
input1.requires_grad = True
input2.requires_grad = True
out = layer(input1, input2)
out.backward(torch.ones_like(out))
# `eq_cpu` is not implemented for 'Half' in torch1.5.0,
# so we need to make a comparison for cuda tensor
# rather than cpu tensor
gt_out = torch.tensor(_gt_out, dtype=dtype).cuda()
assert_equal_tensor(out, gt_out)
assert_equal_tensor(input1.grad.detach(), input2)
assert_equal_tensor(input2.grad.detach(), input1)
@pytest.mark.skipif(
not torch.cuda.is_available(), reason='requires CUDA support')
def test_correlation(self):
self._test_correlation(torch.float)
self._test_correlation(torch.double)
self._test_correlation(torch.half)
| 2.171875 | 2 |
Project/Final-Code/Graph Downloader.py | Connectomics-Classes/pseudocoelomates | 0 | 12773840 | import os
from urllib2 import urlopen, URLError, HTTPError
import lxml.html
from graphml2mat import graphml2mat
"""
Returns a list of all links at a given url string.
"""
def link_crawler(url):
connection = urlopen(url)
domain = lxml.html.fromstring(connection.read())
url_list = []
for link in domain.xpath('//a/@href'): # select the url in href for all a tags(links)
url_list.append(url+link)
return url_list
"""
Function to download a url in a web directory.
"""
def dlfile(directory, url):
# Open the url
try:
f = urlopen(url)
print "downloading " + url
# Open our local file for writing
with open(os.path.basename(url), "wb") as local_file:
local_file.write(f.read())
f.close()
url = url.replace(directory,"")
url2 = url.replace("graphml","mat")
graphml2mat(url, url2)
#handle errors
except HTTPError, e:
print "HTTP Error:", e.code, url
except URLError, e:
print "URL Error:", e.reason, url
"""
Download all urls in a url_list from a web directory.
"""
def dlfiles(directory, url_list):
for url in url_list:
dlfile(directory, url)
def main():
directory = "http://openconnecto.me/data/public/MR/m2g_v1_1_2/SWU4/sg/desikan/"
urls = link_crawler(directory)
urls.remove(urls[0])
dlfiles(directory, urls)
if __name__ == '__main__':
main()
| 3.421875 | 3 |
flask_maple/lazy.py | honmaple/flask-maple | 9 | 12773841 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# **************************************************************************
# Copyright © 2016 jianglin
# File Name: lazy.py
# Author: jianglin
# Email: <EMAIL>
# Created: 2016-11-08 23:02:24 (CST)
# Last Update: Wednesday 2018-09-26 10:52:51 (CST)
# By:
# Description:
# **************************************************************************
from werkzeug import import_string, cached_property
class LazyView(object):
def __init__(self, app=None, url=None, name=None, **options):
self.app = app
self.url = url
self.name = name
self.options = options
if app is not None:
self.init_app(app)
def init_app(self, app):
app.add_url_rule(self.url, view_func=self.view, **self.options)
@cached_property
def view(self):
view = import_string(self.name)
if isinstance(view, (object, )):
assert self.options.get('endpoint') is not None
endpoint = self.options.pop('endpoint')
view = view.as_view(endpoint)
return view
class LazyBlueprint(object):
def __init__(self, app=None, blueprint=None, module='app.', **options):
self.app = app
self.module = module
self.blueprint = blueprint
self.options = options
if app is not None:
self.init_app(app)
def init_app(self, app):
if isinstance(self.blueprint, (list, tuple)):
self._multi(app)
else:
self._single(app)
def _single(self, app):
blueprint = import_string(self.module + self.blueprint)
app.register_blueprint(blueprint, **self.options)
def _multi(self, app):
blueprints = list(set(self.blueprint))
for name in blueprints:
blueprint = import_string(self.module + name)
app.register_blueprint(blueprint, **self.options)
class LazyExtension(object):
def __init__(self, app=None, extension=None, module='app.extensions.'):
self.app = app
self.module = module
self.extension = extension
if app is not None:
self.init_app(app)
def init_app(self, app):
if isinstance(self.extension, (list, tuple)):
self._multi(app)
else:
self._single(app)
def _single(self, app):
extension = import_string(self.module + self.extension)
extension.init_app(app)
def _multi(self, app):
extensions = list(set(self.extension))
for name in extensions:
extension = import_string(self.module + name)
extension.init_app(app)
| 2.203125 | 2 |
api/serializers.py | themontem/EverPoll-api | 0 | 12773842 | import time
from datetime import datetime
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers, exceptions
from .models import *
def time_ago_in_words(time=False):
"""
Get a datetime object or a int() Epoch timestamp and return a
pretty string like 'an hour ago', 'Yesterday', '3 months ago',
'just now', etc
"""
now = timezone.now()
if type(time) is int:
diff = now - datetime.fromtimestamp(time)
elif isinstance(time, datetime):
diff = now - time
elif not time:
diff = now - now
second_diff = diff.seconds
day_diff = diff.days
if day_diff < 0:
return ''
if day_diff == 0:
if second_diff < 10:
return "just now"
if second_diff < 60:
return str(second_diff) + " seconds ago"
if second_diff < 120:
return "a minute ago"
if second_diff < 3600:
return str(int(second_diff / 60)) + " minutes ago"
if second_diff < 7200:
return "an hour ago"
if second_diff < 86400:
return str(int(second_diff / 3600)) + " hours ago"
if day_diff == 1:
return "Yesterday"
if day_diff < 7:
return str(day_diff) + " days ago"
if day_diff < 31:
return str(int(day_diff / 7)) + " weeks ago"
if day_diff < 365:
return str(int(day_diff / 30)) + " months ago"
return str(int(day_diff / 365)) + " years ago"
class ChoiceSerializers(serializers.ModelSerializer):
class Meta:
model = Choice
fields = ('id', 'choice_text', 'votes', 'question')
read_only_fields = ('votes', )
extra_kwargs = {
'question': {'required': False},
}
def validate(self, attrs):
try:
if Choice.objects.filter(question=attrs['question']).count() >= 4:
msg = _('Can\'t add more than four choice')
raise serializers.ValidationError(msg)
elif self.context['request'].user != attrs['question'].set.owner:
msg = _('Not Authorised to add choice to this question')
raise exceptions.NotAuthenticated(msg)
except Choice.DoesNotExist:
pass
except KeyError:
pass
return attrs
class QuestionSerializers(serializers.ModelSerializer):
choice = ChoiceSerializers(many=True)
class Meta:
model = Question
fields = ('id', 'question_text', 'set', 'choice')
extra_kwargs = {
'set': {'required': False},
}
def validate(self, attrs):
try:
if Question.objects.filter(set=attrs['set']).count() >= 10:
msg = _('Can\'t add more than ten question')
raise serializers.ValidationError(msg)
elif self.context['request'].user != attrs['set'].owner:
msg = _('Not Authorised to add question to this set')
raise exceptions.NotAuthenticated(msg)
except Question.DoesNotExist:
pass
except KeyError:
pass
return attrs
def create(self, validated_data):
choices_data = validated_data.pop('choice')
question = Question.objects.create(**validated_data)
for choice_data in choices_data:
Choice.objects.create(question=question, **choice_data)
return question
class QuestionSetListSerializers(serializers.ModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
question = QuestionSerializers(many=True)
class Meta:
model = QuestionSet
fields = ('id', 'name', 'owner', 'question')
extra_kwargs = {
'question': {'write_only': True},
}
def create(self, validated_data):
questions_data = validated_data.pop('question')
question_set = QuestionSet.objects.create(**validated_data)
for question_data in questions_data:
choices_data = question_data.pop('choice')
question = Question.objects.create(set=question_set, **question_data)
for choice_data in choices_data:
Choice.objects.create(question=question, **choice_data)
return question_set
def validate(self, attrs):
try:
q= QuestionSet.objects.get(name=attrs['name'], owner=self.context['request'].user)
print(q.id)
except QuestionSet.DoesNotExist:
pass
except KeyError:
pass
else:
# msg = _('A Question set with that user already exists')
# raise serializers.ValidationError(msg)
pass
return attrs
class QuestionSetDetailSerializers(QuestionSetListSerializers):
question = QuestionSerializers(many=True, read_only=True)
class Meta(QuestionSetListSerializers.Meta):
fields = ('id', 'name', 'owner', 'question')
class RoomSerializers(serializers.ModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
days_ago = serializers.SerializerMethodField()
response = serializers.SerializerMethodField()
class Meta:
model = Room
fields = (
'id', 'name', 'description', 'owner', 'question_set', 'destroyed', 'public', 'response', 'days_ago'
)
def validate(self, attrs):
try:
Room.objects.get(name=attrs['name'], owner=self.context['request'].user)
except Room.DoesNotExist:
pass
except KeyError:
pass
else:
msg = _('A Room with that user already exists')
raise serializers.ValidationError(msg)
return attrs
@staticmethod
def get_days_ago(obj):
return time_ago_in_words(obj.created)
@staticmethod
def get_response(obj):
return obj.users.count()
class RoomDetailSerializers(RoomSerializers):
question_set_detail = QuestionSetDetailSerializers(source='question_set')
class Meta(RoomSerializers.Meta):
fields = (
'id', 'name', 'description', 'owner', 'destroyed', 'public', 'response', 'days_ago', 'question_set_detail',
)
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'password', 'email', 'first_name', 'last_name')
extra_kwargs = {
'password': {'<PASSWORD>},
'first_name': {'required': True},
'last_name': {'required': True},
'email': {'required': True},
}
read_only_fields = ('id',)
def create(self, validated_data):
user = User.objects.create(
username=self.get_unique_username(),
email=validated_data['email'],
first_name=validated_data['first_name'],
last_name=validated_data['last_name']
)
user.set_password(validated_data['password'])
user.save()
return user
def get_unique_username(self):
username = (self.validated_data['first_name'] + self.validated_data['last_name']).replace(' ', '').lower()
while True:
username += str(int(time.time() * 1000))
if User.objects.filter(username=username).exists():
username += str(int(time.time() * 1000))
else:
break
return username
@staticmethod
def validate_email(value):
if value in User.objects.values_list('email', flat=True):
raise serializers.ValidationError("A user with that email already exists.")
return value
class AuthTokenSerializer(serializers.Serializer):
email = serializers.CharField(label=_("Email"))
password = serializers.CharField(label=_("Password"), style={'input_type': 'password'})
def validate(self, attrs):
email_or_username = attrs.get('email')
password = attrs.get('password')
if email_or_username and password:
if self.validate_email_bool(email_or_username):
try:
user_request = User.objects.get(email=email_or_username)
except User.DoesNotExist:
msg = _('Unable to log in with provided credentials.')
raise serializers.ValidationError(msg)
email_or_username = user_request.username
user = authenticate(username=email_or_username, password=password)
if user:
if not user.is_active:
msg = _('User account is disabled.')
raise serializers.ValidationError(msg)
else:
msg = _('Unable to log in with provided credentials.')
raise serializers.ValidationError(msg)
else:
msg = _('Must include "email" and "password".')
raise serializers.ValidationError(msg)
attrs['user'] = user
return attrs
@staticmethod
def validate_email_bool(email):
from django.core.validators import validate_email
from django.core.exceptions import ValidationError
try:
validate_email(email)
return True
except ValidationError:
return False
| 2.765625 | 3 |
gbf_bot/battle_result.py | mythnc/gbf-bot | 4 | 12773843 | <reponame>mythnc/gbf-bot
import logging
from os.path import join
import random
import sys
import time
import pyautogui
from .constants import images_dir, battle_result_config as config
from .components import Button, AppWindow
logger = logging.getLogger(__name__)
result_ok = Button("ok1.png", config["result ok"])
to_next = Button("to_quest.png", config["to next"])
if config["has again"] == "yes":
to_next = Button("again.png", config["again"])
friend_cancel = Button("cancel.png", config["friend cancel"])
def activate():
pyautogui.PAUSE = 1
# wait before battle end
logger.debug("wait before battle end")
while True:
time.sleep(0.5)
found = AppWindow.locate_on(result_ok.path, (0, 1 / 2, 1, 1 / 4))
if found is not None:
break
logger.info("click result ok")
result_ok.click()
time.sleep(0.8 + random.random() * 0.25)
chips_dialog()
guild_wars_dialog()
# wait before next step
count = 0
logger.debug("wait before next step")
while True:
# if characters' LB level up
if count % 10 == 0:
pyautogui.click()
time.sleep(0.5)
found = AppWindow.locate_on(to_next.path, (0, 2 / 5, 1, 1 / 5))
if found is not None:
break
count += 1
logger.info("click to next")
to_next.click()
time.sleep(0.8)
# friend request cancel if any
found = AppWindow.locate_on(friend_cancel.path, (0, 3 / 5, 1, 1 / 9))
if found is not None:
logger.info("click friend request cancel")
friend_cancel.click()
time.sleep(0.75 + random.random() * 0.35)
halo_dialog()
def chips_dialog():
# if De La Fille (Earth) in the party
# there is chance casino chips dialog will be popped up
if config["get chips"] == "no":
return
chips_ok = Button("ok1.png", config["chips ok"])
medal = join(images_dir, "medal.png")
found = AppWindow.locate_on(medal, (0, 2 / 5, 1 / 2, 1 / 5))
if found is not None:
logger.info("chip dialog found")
chips_ok.click()
def halo_dialog():
# dimension halo if any
# do not handle it, leave it to user
if config["dimension halo"] == "no":
return
dimension_close = Button("close.png", config["dimension close"])
found = AppWindow.locate_on(dimension_close.path, (0, 2 / 3, 1, 1 / 7))
if found is not None:
logger.info("dimension dialog found")
logger.info("gbf robt finished")
sys.exit(0)
def guild_wars_dialog():
if config["guild wars"] == "no":
return
count = 0
ok = Button("ok1.png", config["guild wars ok"])
while True:
count += 1
if count % 10 == 0:
to_next.click()
time.sleep(0.5)
found = AppWindow.locate_on(ok.path, (0, 2 / 3, 1, 1 / 6))
if found is not None:
logger.info("guild wars result dialog found")
ok.click()
return
| 2.203125 | 2 |
CODES/webserver.py | PacktPublishing/Raspberry-Pi-Essentials-Learn-More-in-Less-Time | 3 | 12773844 | from flask import Flask
#import json
import RPi.GPIO as GPIO
GPIO.setwarnings(False) # Ignore warning for now
GPIO.setmode(GPIO.BCM) # Use physical pin numbering
GPIO.setup(21, GPIO.OUT, initial=GPIO.LOW)
app = Flask(__name__)
@app.route("/")
def hello():
return "Lets Have a Party"
@app.route("/<key>")
def led(key):
if key == "1":
GPIO.output(21, GPIO.HIGH)
return "LED ON \n"
elif key == "0":
GPIO.output(21, GPIO.LOW)
return "LED OFF \n"
else:
return "Command Not Found \n"
if __name__ == "__main__":
app.run(host='0.0.0.0',port=5000,debug=True)
| 3.046875 | 3 |
DVDR_Client-master/client/alcohol_sensor.py | unimelb-networkedsociety/ser-pi | 0 | 12773845 | import time
import grovepi
#grovepi.pinMode(heaterSelPin,"OUTPUT")
def read_sensor():
try:
analogInDatPin = 0
heaterSelPin = 15
sensorValue = 0
RS_air = 0
grovepi.digitalWrite(heaterSelPin, 0)
time.sleep(0.1)
#/*--- Get a average data by testing 100 times ---*/
for x in range(1,100):
sensorValue = sensorValue + grovepi.analogRead(analogInDatPin)
sensorValue = sensorValue/100.0
sensor_volt = sensorValue/1024*5.0
RS_gas = sensor_volt/(5.0-sensor_volt)
#/*-Replace the name "R0" with the value of R0 in the demo of First Test -*/
ratio = RS_gas/90.0222; #// ratio = RS/R0
#/*-----------------------------------------------------------------------*/
print("sensor_volt = ")
print(sensor_volt)
print("V")
print("RS_ratio = ")
print(RS_gas)
print("Rs/R0 = ");
print(ratio)
#--------------------
grovepi.digitalWrite(heaterSelPin, 1)
return [RS_gas, ratio]
except IOError:
print "Error" | 3.140625 | 3 |
demo-led/led-gateway/repo/web_viewer/viewer.py | isandlaTech/cohorte-demos | 1 | 12773846 | <reponame>isandlaTech/cohorte-demos
#!/usr/bin/python
from pelix.ipopo.decorators import ComponentFactory, Provides, Requires, Property, \
BindField, UnbindField
import pelix.remote
import os
import json
import time
import uuid
import logging
_logger = logging.getLogger("viewer.viewer")
@ComponentFactory("led_viewer_factory")
@Provides('pelix.http.servlet')
@Requires("_leds", "java:/led.services.LedService", optional=True, aggregate=True)
@Requires("_cams", "java:/led.services.CameraService", optional=True, aggregate=True)
@Property('_path', 'pelix.http.path', "/")
@Property('_reject', pelix.remote.PROP_EXPORT_REJECT, ['pelix.http.servlet'])
class Viewer(object):
def __init__(self):
self._path = None
self._leds = []
self._leds_map = {}
self._leds_list_lastupdate = time.time()
self._cams = []
self._cams_map = {}
self._uuid = None
self._time_uuid = 0
def get_lastupdate(self):
result = {"lastupdate" : self._leds_list_lastupdate}
return result
def get_leds(self):
#_logger.critical("get_leds")
result = {"leds": []}
for led in self._leds_map:
state = self._leds_map[led]["svc"].get_state()
result["leds"].append({"name": led, "state": state})
return result
def get_led(self, led):
#_logger.critical("get_led %s", led)
result = {}
if led in self._leds_map:
result["name"] = led
state = self._leds_map[led]["svc"].get_state()
result["state"] = state
return result
else:
return {"name": "unknown", "state": "unknown"}
def get_cams(self):
#_logger.critical("get_cams")
result = {"cams": []}
for cam in self._cams_map:
state = self._cams_map[cam]["svc"].get_state()
result["cams"].append({"name": cam, "state": state})
return result
def get_cam(self, cam):
#_logger.critical("get_cam %s", cam)
result = {}
if cam in self._cams_map:
result["name"] = cam
state = self._cams_map[cam]["svc"].get_state()
result["state"] = state
return result
else:
return {"name": "unknown", "state": "unknown"}
def send_action(self, led, action):
_logger.critical("send_action %s to led: %d", action, led)
result = {}
_led = self._leds_map[led]
if _led:
result["name"] = led
if action == "on":
result["state"] = self._leds_map[led]["svc"].on()
elif action == "off":
result["state"] = self._leds_map[led]["svc"].off()
return result
def send_action_cam(self, cam, action):
_logger.critical("send_action_cam %s to led: %d", action, cam)
result = {}
_cam = self._cams_map[cam]
if _cam:
result["name"] = cam
if action == "picture":
result["state"] = "not busy"
result["res"] = self._cams_map[cam]["svc"].takePicture()
_logger.critical("RES : %s", result["res"])
return result
@BindField('_leds')
def on_bind_led(self, field, svc, svc_ref):
#_logger.critical("binding a new led...")
props = svc_ref.get_properties()
led_name = props.get("led.name")
led_name = str(led_name).lower()
self._leds_map[led_name] = {}
self._leds_map[led_name]["svc_ref"] = svc_ref
self._leds_map[led_name]["svc"] = svc
self._leds_list_lastupdate = time.time()
_logger.critical("name: %s", led_name)
@UnbindField('_leds')
def on_unbind_led(self, field, svc, svc_ref):
#_logger.critical("unbinding a led...")
props = svc_ref.get_properties()
led_name = props.get("led.name")
led_name = str(led_name).lower()
del self._leds_map[led_name]
self._leds_list_lastupdate = time.time()
#_logger.critical("name: %s", led_name)
@BindField('_cams')
def on_bind_cam(self, field, svc, svc_ref):
_logger.critical("binding a new cam...")
props = svc_ref.get_properties()
cam_name = props.get("cam.name")
cam_name = str(cam_name).lower()
self._cams_map[cam_name] = {}
self._cams_map[cam_name]["svc_ref"] = svc_ref
self._cams_map[cam_name]["svc"] = svc
self._leds_list_lastupdate = time.time()
#_logger.critical("name: %s", led_name)
@UnbindField('_cams')
def on_unbind_cam(self, field, svc, svc_ref):
_logger.critical("unbinding a cam...")
props = svc_ref.get_properties()
cam_name = props.get("cam.name")
cam_name = str(cam_name).lower()
del self._cams_map[cam_name]
self._leds_list_lastupdate = time.time()
#_logger.critical("name: %s", led_name)
"""
Resources -----------------------------------------------------
"""
def root_dir(self):
return os.path.abspath(os.path.dirname(__file__))
def get_file(self, filename):
try:
src = os.path.join(self.root_dir(), filename)
with open(src, 'rb') as fp:
return fp.read()
except IOError as exc:
return str(exc)
def load_resource(self, path, request, response):
mimetypes = {
".css": "text/css",
".html": "text/html",
".js": "application/javascript",
".jpeg": "image/jpeg",
".jpg": "image/jpeg",
".png": "image/png",
".gif": "image/gif"
}
complete_path = os.path.join(self.root_dir(), path)
ext = os.path.splitext(path)[1]
mimetype = mimetypes.get(ext, "text/html")
content = self.get_file(complete_path)
return response.send_content(200, content, mimetype)
def show_main_page(self, request, response):
rel_path = self._path
while len(rel_path) > 0 and rel_path[0] == '/':
rel_path = rel_path[1:]
if not rel_path:
rel_path = ''
content = "<html><head><meta http-equiv='refresh' content='0; URL=" #+ self._path
content += rel_path + "static/web/index.html'/></head><body></body></html>"
response.send_content(200, content)
def show_error_page(self, request, response):
content = """<html>
<head><title>Cohorte Robots</title><head><body><h3>404 This is not the web page you are looking for!</h3></body></html>"""
response.send_content(404, content)
def sendJson(self, data, response):
result = json.dumps(data, sort_keys=False,
indent=4, separators=(',', ': '))
print(result)
response.set_header("cache-control", "no-cache")
response.send_content(200, result, "application/json")
"""
Get -----------------------------------------------------------
"""
def do_GET(self, request, response):
"""
(1) /leds/
(2) /leds/static
(3) /leds/api/lastupdate
(4) /leds/api/leds
(5) /leds/api/leds/ARDUINO_YUN_LED
(6) /leds/api/leds/ARDUINO_YUN_LED/on
(7) /leds/api/leds/ARDUINO_YUN_LED/off
(8) /leds/api/cams
(9) /leds/api/cams/CAMERA1
(10) /leds/api/cams/CAMERA1/picture
"""
if((time.time() - self._time_uuid) > 3) :
self._uuid = None
cookie = request.get_header("Cookie")
uuid_var = str(uuid.uuid4())
if(cookie != None) :
uuid_var = str(cookie.split('=')[1])
if(uuid_var == self._uuid) :
self._time_uuid = time.time()
if((time.time() - self._time_uuid) > 800) :
response.set_header("Set-Cookie", "sessionToken="+ uuid_var +"; Max-Age=900; path=/")
else:
response.set_header("Set-Cookie", "sessionToken="+ uuid_var +"; Max-Age=900; path=/")
if(self._uuid == None) :
self._time_uuid = time.time()
self._uuid = uuid_var
query = request.get_path()
# prepare query path: remove first and last '/' if exists
while len(query) > 0 and query[0] == '/':
query = query[1:]
while len(query) > 0 and query[-1] == '/':
query = query[:-1]
# get parts of the url
if len(query) == 0:
self.show_main_page(request, response)
else:
#parts = str(query).split('?')[0].split('/')
parts = str(query).split('/')
if len(parts) == 0:
# show main page
self.show_main_page(request, response)
#self.show_error_page(request, response)
elif len(parts) > 0:
if str(parts[0]) == "static":
if len(parts) > 1:
self.load_resource('/'.join(parts[1:]), request, response)
else:
self.show_error_page(request, response)
elif str(parts[0]) == "api":
if len(parts) == 2:
if str(parts[1]).lower() == "leds":
t = self.get_leds()
if(self._uuid == uuid_var) :
t["prioritaire"]="yes"
else:
t["prioritaire"]="no"
self.sendJson(t, response)
elif str(parts[1]).lower() == "lastupdate":
t = self.get_lastupdate()
if(self._uuid == uuid_var) :
t["prioritaire"]="yes"
else:
t["prioritaire"]="no"
self.sendJson(t, response)
elif str(parts[1]).lower() == "cams":
t = self.get_cams()
if(self._uuid == uuid_var) :
t["prioritaire"]="yes"
else:
t["prioritaire"]="no"
self.sendJson(t, response)
elif len(parts) == 3:
if str(parts[1]).lower() == "leds":
led = str(parts[2]).lower()
t = self.get_led(led)
if(self._uuid == uuid_var) :
t["prioritaire"]="yes"
else:
t["prioritaire"]="no"
self.sendJson(t, response)
elif str(parts[1]).lower() == "cams":
cam = str(parts[2]).lower()
t = self.get_cam(cam)
if(self._uuid == uuid_var) :
t["prioritaire"]="yes"
else:
t["prioritaire"]="no"
self.sendJson(t, response)
elif str(parts[1]).lower() == "connexion":
mdp = str(parts[2])
t = {}
if(mdp=="isandla$38TECH"):
self._uuid = uuid_var
if(self._uuid == uuid_var) :
t["prioritaire"]="yes"
else:
t["prioritaire"]="no"
self.sendJson(t, response)
elif len(parts) == 4:
if str(parts[1]).lower() == "leds":
led = str(parts[2]).lower()
action = str(parts[3]).lower()
t = self.send_action(led, action)
if(self._uuid == uuid_var) :
t["prioritaire"]="yes"
else:
t["prioritaire"]="no"
self.sendJson(t, response)
elif str(parts[1]).lower() == "cams":
cam = str(parts[2]).lower()
action = str(parts[3]).lower()
t = self.send_action_cam(cam, action)
if(self._uuid == uuid_var) :
t["prioritaire"]="yes"
else:
t["prioritaire"]="no"
self.sendJson(t, response)
| 2.015625 | 2 |
pydemic/models/model.py | GCES-Pydemic/pydemic | 0 | 12773847 | import datetime
import warnings
from copy import copy
from types import MappingProxyType
from typing import Sequence, Callable, Mapping, Union, TypeVar, TYPE_CHECKING
import numpy as np
import pandas as pd
import sidekick as sk
from .clinical_acessor import Clinical
from .metaclass import ModelMeta
from .. import fitting as fit
from .. import formulas
from ..diseases import Disease, DiseaseParams, disease as get_disease
from ..logging import log
from ..mixins import (
Meta,
WithParamsMixin,
WithDataModelMixin,
WithInfoMixin,
WithResultsMixin,
WithRegionDemography,
)
from ..packages import plt
from ..utils import today, not_implemented, extract_keys, param_property
T = TypeVar("T")
NOW = datetime.datetime.now()
TODAY = datetime.date(NOW.year, NOW.month, NOW.day)
DAY = datetime.timedelta(days=1)
pplt = sk.import_later("..plot", package=__package__)
if TYPE_CHECKING:
from ..model_group import ModelGroup
from pydemic_ui.model import UIProperty
class Model(
WithDataModelMixin,
WithInfoMixin,
WithResultsMixin,
WithParamsMixin,
WithRegionDemography,
metaclass=ModelMeta,
):
"""
Base class for all models.
"""
meta: Meta
class Meta:
model_name = "Model"
data_aliases = {}
# Initial values
state: np.ndarray = None
initial_cases: float = sk.lazy(lambda self: self._initial_cases())
initial_infected: float = sk.lazy(lambda self: self._initial_infected())
# Initial time
date: datetime.date = None
time: float = 0.0
iter: int = sk.property(lambda m: len(m.data))
dates: pd.DatetimeIndex = sk.property(lambda m: m.to_dates(m.times))
times: pd.Index = sk.property(lambda m: m.data.index)
# Common epidemiological parameters
R0: float = param_property("R0", default=2.0)
K = sk.property(not_implemented)
duplication_time = property(lambda self: np.log(2) / self.K)
# Special accessors
clinical: Clinical = property(lambda self: Clinical(self))
clinical_model: type = None
clinical_params: Mapping = MappingProxyType({})
disease: Disease = None
disease_params: DiseaseParams = None
@property
def ui(self) -> "UIProperty":
try:
from pydemic_ui.model import UIProperty
except ImportError as ex:
log.warn(f"Could not import pydemic_ui.model: {ex}")
msg = (
"must have pydemic-ui installed to access the model.ui attribute.\n"
"Please 'pip install pydemic-ui' before proceeding'"
)
raise RuntimeError(msg)
return UIProperty(self)
def __init__(
self, params=None, *, run=None, name=None, date=None, clinical=None, disease=None, **kwargs
):
self.name = name or f"{type(self).__name__} model"
self.date = pd.to_datetime(date or today())
self.disease = get_disease(disease)
self._initialized = False
# Fix demography
demography_opts = WithRegionDemography._init_from_dict(self, kwargs)
self.disease_params = self.disease.params(**demography_opts)
# Init other mixins
WithParamsMixin.__init__(self, params, keywords=kwargs)
WithInfoMixin.__init__(self)
WithResultsMixin.__init__(self)
WithDataModelMixin.__init__(self)
if clinical:
clinical = dict(clinical)
self.clinical_model = clinical.pop("model", None)
self.clinical_params = clinical
for k, v in kwargs.items():
if hasattr(self, k):
try:
setattr(self, k, v)
except AttributeError:
name = type(self).__name__
msg = f"cannot set '{k}' attribute in '{name}' model"
raise AttributeError(msg)
else:
raise TypeError(f"invalid arguments: {k}")
if run is not None:
self.run(run)
def __str__(self):
return self.name
def _initial_cases(self):
raise NotImplementedError("must be implemented in subclass")
def _initial_infected(self):
raise NotImplementedError("must be implemented in subclass")
def epidemic_model_name(self):
"""
Return the epidemic model name.
"""
return self.meta.model_name
#
# Pickling and copying
#
# noinspection PyUnresolvedReferences
def copy(self, **kwargs):
"""
Copy instance possibly setting new values for attributes.
Keyword Args:
All keyword arguments are used to reset attributes in the copy.
Examples:
>>> m.copy(R0=1.0, name="Stable")
<SIR(name="Stable")>
"""
cls = type(self)
data = self.__dict__.copy()
params = data.pop("_params")
data.pop("_results_cache")
new = object.__new__(cls)
for k in list(kwargs):
if k in data:
data[k] = kwargs.pop(k)
new._params = copy(params)
new._results_cache = {}
new.__dict__.update(copy(data))
for k, v in kwargs.items():
setattr(new, k, v)
return new
def split(self, n=None, **kwargs) -> "ModelGroup":
"""
Create n copies of model, each one may override a different set of
parameters and return a ModelGroup.
Args:
n:
Number of copies in the resulting list. It can also be a sequence
of dictionaries with arguments to pass to the .copy() constructor.
Keyword Args:
Keyword arguments are passed to the `.copy()` method of the model. If
the keyword is a sequence, it applies the n-th component of the sequence
to the corresponding n-th model.
"""
from ..model_group import ModelGroup
if n is None:
for k, v in kwargs.items():
if not isinstance(v, str) and isinstance(v, Sequence):
n = len(v)
break
else:
raise TypeError("cannot determine the group size from arguments")
if isinstance(n, int):
options = [{} for _ in range(n)]
else:
options = [dict(d) for d in n]
n: int = len(options)
# Merge option dicts
for k, v in kwargs.items():
if not isinstance(v, str) and isinstance(v, Sequence):
xs = v
m = len(xs)
if m != n:
raise ValueError(
f"sizes do not match: "
f"{k} should be a sequence of {n} "
f"items, got {m}"
)
for opt, x in zip(options, xs):
opt.setdefault(k, x)
else:
for opt in options:
opt.setdefault(k, v)
# Fix name
for opt in options:
try:
name = opt["name"]
except KeyError:
pass
else:
opt["name"] = name.format(n=n, **opt)
return ModelGroup(self.copy(**opt) for opt in options)
def split_children(self, options=MappingProxyType({}), **kwargs) -> "ModelGroup":
"""
Similar to split, but split into the children of the given class.
Args:
options:
A mapping between region or region id
"""
from ..model_group import ModelGroup
if self.region is None:
raise ValueError("model is not bound to a region")
for k in self._params:
if k not in kwargs:
kwargs[k] = self.get_param(k)
for attr in ("disease",):
kwargs.setdefault(attr, getattr(self, attr))
return ModelGroup.from_children(self.region, type(self), options, **kwargs)
def reset(self, date: Union[datetime.date, float] = None, **kwargs):
"""
Return a copy of the model setting the state to the final state. If a
positional "date" argument is given, reset to the state to the one in the
specified date.
Args:
date (float or date):
An optional float or datetime selecting the desired date.
Keyword Args:
Additional keyword arguments are handled the same way as the
:method:`copy` method.
"""
if date is None:
date = self.date
time = self.time
elif isinstance(date, (float, int)):
time = float(date)
date = self.to_date(date)
else:
time: float = self.to_time(date)
kwargs["data"] = self.data.loc[[time]]
kwargs["date"] = date
kwargs["state"] = kwargs["data"].iloc[0].values
kwargs["time"] = 1
return self.copy(**kwargs)
def trim_dates(self, start=0, end=None):
"""
Trim data in model to the given interval specified by start and end
dates or times.
Args:
start (int or date):
Starting date. If not given, start at zero.
end (int or date):
End date. If not given, select up to the final date.
"""
start = int(start or 0)
end = int(end or self.time)
new = self.copy(
date=self.to_date(start),
data=self.data.iloc[start:end].reset_index(drop=True),
time=end - start,
state=self.data.iloc[end].values,
)
return new
#
# Initial conditions
#
def set_ic(self, state=None, **kwargs):
"""
Set initial conditions.
"""
if self.state is None:
if state is None:
state = self.initial_state(**kwargs)
self.state = np.array(state, dtype=float)
alias = self.meta.data_aliases
for k, v in list(kwargs.items()):
if k in alias:
del kwargs[k]
kwargs[alias[k]] = v
components = extract_keys(self.meta.variables, kwargs)
for k, v in components.items():
idx = self.meta.get_variable_index(k)
self.state[idx] = v
return self
def set_data(self, data):
"""
Force a dataframe into simulation state.
"""
data = data.copy()
data.columns = [self.meta.data_aliases.get(c, c) for c in data.columns]
self.set_ic(state=data.iloc[0])
self.data = data.reset_index(drop=True)
self.time = len(data) - 1
self.date = data.index[-1]
self.state[:] = data.iloc[-1]
self.info["observed.dates"] = data.index[[0, -1]]
self._initialized = True
return self
def set_cases_from_region(self: T) -> T:
"""
Set the number of cases from region.
"""
self.set_cases()
return self
def set_cases(self: T, curves=None, adjust_R0=False, save_observed=False) -> T:
"""
Initialize model from a dataframe with the deaths and cases curve.
This curve is usually the output of disease.epidemic_curve(region), and is
automatically retrieved if not passed explicitly and the region of the model
is set.
Args:
curves:
Dataframe with cumulative ["cases", "deaths"] columns. If not given,
or None, fetches from disease.epidemic_curves(info)
adjust_R0:
If true, adjust R0 from the observed cases.
save_observed:
If true, save the cases curves into the model.info["observed.cases"] key.
"""
if curves is None:
warnings.warn("omitting curves from set_cases will be deprecated.")
if self.region is None or self.disease is None:
msg = 'must provide both "region" and "disease" or an explicit cases ' "curve."
raise ValueError(msg)
curves = self.region.pydemic.epidemic_curve(self.disease)
if adjust_R0:
warnings.warn("adjust_R0 argument is deprecated")
method = "RollingOLS" if adjust_R0 is True else adjust_R0
Re, _ = value = fit.estimate_R0(self, curves, Re=True, method=method)
assert np.isfinite(Re), f"invalid value for R0: {value}"
self.R0 = Re
# Save notification it in the info dictionary for reference
if "cases_observed" in curves:
tf = curves.index[-1]
rate = curves.loc[tf, "cases_observed"] / curves.loc[tf, "cases"]
else:
rate = 1.0
self.info["observed.notification_rate"] = rate
# Save simulation state from data
model = self.epidemic_model_name()
curve = fit.cases(curves)
data = fit.epidemic_curve(model, curve, self)
self.set_data(data)
self.initial_cases = curve.iloc[0]
if adjust_R0:
self.R0 /= self["susceptible:final"] / self.population
self.info["observed.R0"] = self.R0
# Optionally save cases curves into the info dictionary
if save_observed:
key = "observed.curves" if save_observed is True else save_observed
df = curves.rename(columns={"cases": "cases_raw"})
df["cases"] = curve
self.info[key] = df
return self
def adjust_R0(self, method="RollingOLS"):
curves = self["cases"]
self.R0, _ = fit.estimate_R0(self, curves, method=method)
self.info["observed.R0"] = self.R0
def initial_state(self, cases=None, **kwargs):
"""
Create the default initial vector for model.
"""
if cases is not None:
kwargs.setdefault("population", self.population)
return formulas.initial_state(self.epidemic_model_name(), cases, self, **kwargs)
return self._initial_state()
def infect(self, n=1, column="infectious"):
"""
Convert 'n' susceptible individuals to infectious.
"""
last = self.data.index[-1]
n = min(n, self.data.loc[last, "susceptible"])
self.data.loc[last, column] += n
self.data.loc[last, "susceptible"] -= n
return self
def _initial_state(self):
raise NotImplementedError
def initialize(self):
"""
Force initialization.
"""
if not self._initialized:
self.set_ic()
self.data = make_dataframe(self)
self._initialized = True
#
# Running simulation
#
def run(self: T, time) -> T:
"""
Runs the model for the given duration.
"""
steps = int(time)
self.initialize()
if time == 0:
return
_, *shape = self.data.shape
ts = self.time + 1.0 + np.arange(steps)
data = np.zeros((steps, *shape))
date = self.date
if self.info.get("event.simulation_start") is None:
self.info.save_event("simulation_start")
self.run_to_fill(data, ts)
extra = pd.DataFrame(data, columns=self.data.columns, index=ts)
self.data = pd.concat([self.data, extra])
self.date = date + time * DAY
self.time = ts[-1]
self.state = data[-1]
return self
def run_to_fill(self: T, data, times) -> T:
"""
Run simulation to fill pre-allocated array of data.
"""
raise NotImplementedError
def run_until(self, condition: Callable[["Model"], bool]):
"""
Run until stop condition is satisfied.
Args:
condition:
A function that receives a model and return True if stop
criteria is satisfied.
"""
raise NotImplementedError
#
# Utility methods
#
def to_dates(self, times: Sequence[int], start_date=None) -> pd.DatetimeIndex:
"""
Convert an array of numerical times to dates.
Args:
times:
Sequence of times.
start_date:
Starting date. If not given, uses the starting date for
simulation.
"""
dates: pd.DatetimeIndex
if isinstance(times, pd.DatetimeIndex):
return times
if start_date is None:
start_date = self.date - self.time * DAY
# noinspection PyTypeChecker
return pd.to_datetime(times, unit="D", origin=start_date)
def to_date(self, time: Union[float, int]) -> datetime.date:
"""
Convert a single instant to the corresponding datetime
"""
return pd.to_datetime(time - self.time, unit="D", origin=self.date)
def to_times(self, dates: Sequence, start_date=None) -> np.ndarray:
"""
Convert an array of numerical times to dates.
Args:
dates:
Sequence of dates.
start_date:
Starting date. If not given, uses the starting date for
simulation.
"""
if start_date is None:
start_date = self.date - self.time * DAY
data = [(date - start_date).days for date in dates]
return np.array(data) if data else np.array([], dtype=int)
def to_time(self, date, start_date=None) -> float:
"""
Convert date to time.
"""
if start_date is None:
return self.to_time(date, self.date) - self.time
return float((date - start_date).days)
def get_times(self, idx=None):
"""
Get times possibly sliced by an index.
"""
if idx is None:
return self.times
else:
return self.times[idx]
def get_data_time(self, idx):
times = self.get_times(idx)
return pd.Series(times, index=times)
def get_data_date(self, idx):
times = self.get_times(idx)
dates = self.to_dates(times)
return pd.Series(dates, index=times)
def get_data_cases(self, idx):
raise NotImplementedError
#
# Plotting and showing information
#
def plot(
self,
components=None,
*,
ax=None,
logy=False,
show=False,
dates=False,
legend=True,
grid=True,
):
"""
Plot the result of simulation.
"""
ax = ax or plt.gca()
kwargs = {"logy": logy, "ax": ax, "grid": grid, "legend": legend}
def get_column(col):
if dates:
col += ":dates"
data = self[col]
return data
components = self.meta.variables if components is None else components
for col in components:
data = get_column(col)
data.plot(**kwargs)
if show:
plt.show()
def make_dataframe(model: Model):
"""
Create the initial dataframe for the given model.
"""
data = [model.state]
cols = model.meta.variables
index = [model.time]
return pd.DataFrame(data, columns=cols, index=index)
| 1.90625 | 2 |
DeepCode.py | nexfreak07/Mask-Detector | 1 | 12773848 | <reponame>nexfreak07/Mask-Detector
#Data Preprocessing
#--------------------- Importing the Libraries-----------------------------
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications import MobileNetV2
from tensorflow.keras.layers import AveragePooling2D
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.utils import to_categorical
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from imutils import paths
import matplotlib.pyplot as plt
import numpy as np
import os
# Now we will create the directory in which our data is stored by the name Directory
DIC = r"/content/drive/MyDrive/Mask_Detect/data"
# Now we will create the categories as there are two categories
# With Mask and the Without Mask
CAT = ["with_mask","without_mask"]
print("Loading the images...")
# We will now have two lists the data (i.e Images) and the labels (i.e Categories) later on we will append
data = []
labels = []
# We are here looping through the categories in directory which are with and without mask and joining to form the path
# Then loop through all the path and join path and corresponding image.
# Then we load the image of the particular path at size (224,224)
for category in CAT:
path = os.path.join(DIC,category) # with_mask or without_mask will get join
for img in os.listdir(path):
img_path = os.path.join(path,img) # each image in with_mas or without_mask will get join
image = load_img(img_path, target_size=(224,224))
image = img_to_array(image) #converts to array (As deeplearning only works with array)
image = preprocess_input(image) # If you use MobileNet you need to use this
# Here we append
data.append(image)
labels.append(category)
# Converting data and labels to numpy arrays
data = np.array(data,dtype='float32')
labels = np.array(labels)
# Splitting the images to train and test (Splits)
(trainx, testx, trainy, testy) = train_test_split(data, labels, test_size=0.20, stratify=labels, random_state=42)
# Data Augmentation (Create more data in the memory)
aug = ImageDataGenerator(
rotation_range=20,
zoom_range=0.15,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.15,
horizontal_flip=True,
fill_mode='nearest' )
Lr = 1e-4
epochs = 20
batch = 32
# There are two type of models here I have made the top model and bottom model
# Top Model is basically the top layers made by me
# Bottom Model are the bottom layes of MobileNetv2
# Let's start the make of model now using Functional API
bottomModel = MobileNetV2(weights="imagenet", include_top=False, input_tensor=Input(shape=(224,224,3)))
topModel = bottomModel.output
topModel = AveragePooling2D(pool_size=(7,7))(topModel)
topModel = Flatten()(topModel)
topModel = Dense(128, activation='relu')(topModel)
topModel = Dropout(0.5)(topModel)# To Avoid Overfitting
topModel = Dense(2, activation='softmax')(topModel)
# Connecting both top and bottom model
model = Model(inputs=bottomModel.input, outputs=topModel)
# Now we will loop over all the layers in bottom model and freeze so they won't be updated
for layer in bottomModel.layers:
layer.trainable = False
# Compile the model
print("Compiling...")
opt = Adam(lr=Lr, decay=Lr/epochs)
model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])
# train the head
print("Training the Head...")
history = model.fit(aug.flow(trainx, trainy, batch_size=batch),
steps_per_epoch=len(trainx)//batch,
validation_data=(testx, testy),
validation_steps=len(testx)//batch,
epochs=epochs)
#prediction on testing set
print("Evaluating Network...")
predIdxs = model.predict(testx, batch_size=batch)
# For image intest set we need to find the index of the label so inorder to do that we are using argmax
# This will give us labels with largest predicted probablity
predIdxs = np.argmax(predIdxs, axis=1)
# Saving the model
print("Saving...")
model.save('maskDetector', save_format='h5')
#Plotting the training loss and Accuracy
N = epochs
plt.style.use('ggplot')
plt.figure()
plt.plot(np.arange(0,N), history.history['loss'], label='train_loss')
plt.plot(np.arange(0,N), history.history['val_loss'], label='val_loss')
plt.plot(np.arange(0,N), history.history['accuracy'], label='train_acc')
plt.plot(np.arange(0,N), history.history['val_accuracy'], label='val_acc')
plt.title('Training Loass and Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Loss-Accuracy')
plt.legend(loc='lower left')
plt.savefig('plot.png')
# ------------------------------------ONE-HOT-Encoding--------------------------------
# We have labels as with mask or without mask so it's not useful for us so in order to convert we will use the one hot encoding
lb = LabelBinarizer()
labels = lb.fit_transform(labels)
labels = to_categorical(labels)
| 2.953125 | 3 |
stemdl/runtime.py | nlaanait/stemdl | 0 | 12773849 | <filename>stemdl/runtime.py<gh_stars>0
"""
Created on 10/9/17.
@author: <NAME>, <NAME>
email: <EMAIL>, <EMAIL>
"""
import time
from datetime import datetime
import os
import sys
import re
import numpy as np
import math
from itertools import chain
from multiprocessing import cpu_count
from copy import deepcopy
#TF
import tensorflow as tf
from collections import OrderedDict
import horovod.tensorflow as hvd
from tensorflow.python.client import timeline
#from tensorflow.contrib.compiler import xla
# stemdl
from . import network
from . import inputs
from . import optimizers
from . import lr_policies
from . import losses
tf.logging.set_verbosity(tf.logging.ERROR)
def tensorflow_version_tuple():
v = tf.__version__
major, minor, patch = v.split('.')
return (int(major), int(minor), patch)
def float32_variable_storage_getter(getter, name, shape=None, dtype=None,
initializer=None, regularizer=None,
trainable=True,
*args, **kwargs):
storage_dtype = tf.float32 if trainable else dtype
variable = getter(name, shape, dtype=storage_dtype,
initializer=initializer, regularizer=regularizer,
trainable=trainable,
*args, **kwargs)
if trainable and dtype != tf.float32:
variable = tf.cast(variable, dtype)
return variable
class TrainHelper:
def __init__(self, params, saver, writer, net_ops, last_step=0, log_freq=1):
self.params = params
self.last_step = last_step
self.net_ops = net_ops
self.start_time = time.time()
self.cumm_time = time.time()
self.saver = saver
self.writer = writer
self.elapsed_epochs = self.last_step * self.params['batch_size'] * 1.0 * hvd.size() / \
self.params['NUM_EXAMPLES_PER_EPOCH']
self.log_freq = log_freq
def before_run(self):
self.last_step +=1
self.start_time = time.time()
self.elapsed_epochs = self.last_step * self.params['batch_size'] * 1.0 * hvd.size() / \
self.params['NUM_EXAMPLES_PER_EPOCH']
# call to hvd forces global namespace into class on purpose.
def write_summaries(self, summary):
if hvd.rank() == 0:
with tf.summary.FileWriter(self.params['checkpt_dir']) as summary_writer:
summary_writer.add_summary(summary, global_step=self.last_step)
print_rank('Saved Summaries.')
def save_checkpoint(self):
pass
def run_summary(self) :
tfversion = tensorflow_version_tuple()
print_rank( 'TensorFlow ... %i.%i.%s' % tfversion )
if 'LSB_JOBNAME' in os.environ :
print_rank( 'job name ... %s' % os.environ[ 'LSB_JOBNAME' ] )
if 'LSB_JOBID' in os.environ :
print_rank( 'job number ... %s' % os.environ[ 'LSB_JOBID' ] )
if 'LSB_OUTPUTFILE' in os.environ :
print_rank( 'job output ... %s' % os.environ[ 'LSB_OUTPUTFILE' ] )
print_rank( 'number of ranks ... %d' % hvd.size( ) )
print_rank( 'network_config ... %s' % self.params[ 'network_config' ] )
print_rank( 'batch_size ... %d' % self.params[ 'batch_size' ] )
print_rank( ' ... %d total' % ( self.params[ 'batch_size' ] * hvd.size( ) ) )
print_rank( 'data type ... %s' % ( 'fp16' if self.params[ 'IMAGE_FP16' ] else 'fp32' ) )
print_rank( 'data_dir ... %s' % self.params[ 'data_dir' ] )
print_rank( 'input_flags ... %s' % self.params[ 'input_flags' ] )
print_rank( 'hyper_params ... %s' % self.params[ 'hyper_params' ] )
print_rank( 'checkpt_dir ... %s' % self.params[ 'checkpt_dir' ] )
print_rank( '' )
print_rank( 'command line ... %s' % self.params[ 'cmdline' ] )
print_rank( '' )
@staticmethod
def save_trace(run_metadata, trace_dir, trace_step):
# Writing trace to json file. open with chrome://tracing
trace = timeline.Timeline(step_stats=run_metadata.step_stats)
with open( trace_dir + '/timeline_' + str( trace_step ) + '.ctf.' + str(hvd.rank()) + '.json', 'w') as f:
f.write(trace.generate_chrome_trace_format( show_memory = True, show_dataflow = True ))
print_rank('Run & Saved GPU Trace.')
def log_stats(self, loss_value, learning_rate):
self.nanloss(loss_value)
t = time.time( )
duration = t - self.start_time
examples_per_sec = self.params['batch_size'] * hvd.size() / duration
self.cumm_time = (time.time() - self.cumm_time)/self.log_freq
flops = self.net_ops * examples_per_sec
avg_flops = self.net_ops * self.params['batch_size'] * hvd.size() / self.cumm_time
format_str = (
'time= %.1f, step= %d, epoch= %2.2e, loss= %.3e, lr= %.2e, step_time= %2.2f sec, ranks= %d, examples/sec= %.1f, flops = %3.2e, average_time= %2.2f, average_flops= %3.3e')
print_rank(format_str % ( t - self.params[ 'start_time' ], self.last_step, self.elapsed_epochs,
loss_value, learning_rate, duration, hvd.size(), examples_per_sec, flops, self.cumm_time, avg_flops) )
self.cumm_time = time.time()
@staticmethod
def nanloss(loss_value):
if np.isnan(loss_value):
print_rank('loss is nan...')
# sys.exit(0)
class TrainHelper_YNet(TrainHelper):
def log_stats(self, loss_value, aux_losses, learning_rate):
t = time.time( )
duration = t - self.start_time
examples_per_sec = self.params['batch_size'] * hvd.size() / duration
self.cumm_time = (time.time() - self.cumm_time)/self.log_freq
flops = self.net_ops * examples_per_sec
avg_flops = self.net_ops * self.params['batch_size'] * hvd.size() / self.cumm_time
loss_inv, loss_dec_re, loss_dec_im, loss_reg = aux_losses
self.nanloss(loss_value)
format_str = (
'time= %.1f, step= %2.2e, epoch= %2.2e, lr= %.2e, loss=%.3e, loss_inv= %.2e, loss_dec_im=%.2e, loss_dec_re=%.2e, loss_reg=%.2e, step_time= %2.2f sec, ranks= %d, examples/sec= %.1f')
print_rank(format_str % ( t - self.params[ 'start_time' ], self.last_step, self.elapsed_epochs,
learning_rate, loss_value, loss_inv, loss_dec_im, loss_dec_re, loss_reg, duration, hvd.size(), examples_per_sec))
self.cumm_time = time.time()
def print_rank(*args, **kwargs):
if hvd.rank() == 0:
print(*args, **kwargs)
def train(network_config, hyper_params, params, gpu_id=None):
"""
Train the network for a number of steps using horovod and asynchronous I/O staging ops.
:param network_config: OrderedDict, network configuration
:param hyper_params: OrderedDict, hyper_parameters
:param params: dict
:return: None
"""
#########################
# Start Session #
#########################
# Config file for tf.Session()
config = tf.ConfigProto(allow_soft_placement=params['allow_soft_placement'],
log_device_placement=params['log_device_placement'],
)
config.gpu_options.allow_growth = True
if gpu_id is None:
gpu_id = hvd.local_rank()
config.gpu_options.visible_device_list = str(gpu_id)
config.gpu_options.force_gpu_compatible = True
config.intra_op_parallelism_threads = 6
config.inter_op_parallelism_threads = max(1, cpu_count()//6)
#config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
#jit_scope = tf.contrib.compiler.jit.experimental_jit_scope
# JIT causes gcc errors on dgx-dl and is built without on Summit.
sess = tf.Session(config=config)
############################
# Setting up Checkpointing #
###########################
last_step = 0
if params[ 'restart' ] :
# Check if training is a restart from checkpoint
ckpt = tf.train.get_checkpoint_state(params[ 'checkpt_dir' ] )
if ckpt is None :
print_rank( '<ERROR> Could not restart from checkpoint %s' % params[ 'checkpt_dir' ])
else :
last_step = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])
print_rank("Restoring from previous checkpoint @ step=%d" %last_step)
global_step = tf.Variable(last_step, name='global_step',trainable=False)
############################################
# Setup Graph, Input pipeline and optimizer#
############################################
# Start building the graph
# Setup data stream
with tf.device(params['CPU_ID']):
with tf.name_scope('Input') as _:
if params['filetype'] == 'tfrecord':
dset = inputs.DatasetTFRecords(params, dataset=params['dataset'], debug=False)
elif params['filetype'] == 'lmdb':
dset = inputs.DatasetLMDB(params, dataset=params['dataset'], debug=params['debug'])
images, labels = dset.minibatch()
# Staging images on host
staging_op, (images, labels) = dset.stage([images, labels])
with tf.device('/gpu:%d' % hvd.local_rank()):
# Copy images from host to device
gpucopy_op, (images, labels) = dset.stage([images, labels])
IO_ops = [staging_op, gpucopy_op]
##################
# Building Model#
##################
# Build model, forward propagate, and calculate loss
scope = 'model'
summary = False
if params['debug']:
summary = True
print_rank('Starting up queue of images+labels: %s, %s ' % (format(images.get_shape()),
format(labels.get_shape())))
with tf.variable_scope(scope,
# Force all variables to be stored as float32
custom_getter=float32_variable_storage_getter) as _:
# Setup Neural Net
if params['network_class'] == 'resnet':
n_net = network.ResNet(scope, params, hyper_params, network_config, images, labels,
operation='train', summary=summary, verbose=False)
if params['network_class'] == 'cnn':
n_net = network.ConvNet(scope, params, hyper_params, network_config, images, labels,
operation='train', summary=summary, verbose=True)
if params['network_class'] == 'fcdensenet':
n_net = network.FCDenseNet(scope, params, hyper_params, network_config, images, labels,
operation='train', summary=summary, verbose=True)
if params['network_class'] == 'fcnet':
n_net = network.FCNet(scope, params, hyper_params, network_config, images, labels,
operation='train', summary=summary, verbose=True)
if params['network_class'] == 'YNet':
n_net = network.YNet(scope, params, hyper_params, network_config, images, labels,
operation='train', summary=summary, verbose=True)
###### XLA compilation #########
#if params['network_class'] == 'fcdensenet':
# def wrap_n_net(*args):
# images, labels = args
# n_net = network.FCDenseNet(scope, params, hyper_params, network_config, images, labels,
# operation='train', summary=False, verbose=True)
# n_net.build_model()
# return n_net.model_output
#
# n_net.model_output = xla.compile(wrap_n_net, inputs=[images, labels])
##############################
# Build it and propagate images through it.
n_net.build_model()
# calculate the total loss
total_loss, loss_averages_op = losses.calc_loss(n_net, scope, hyper_params, params, labels, step=global_step, images=images, summary=summary)
#get summaries, except for the one produced by string_input_producer
if summary: summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)
# print_rank([scope.name for scope in n_net.scopes])
#######################################
# Apply Gradients and setup train op #
#######################################
# get learning policy
def learning_policy_func(step):
return lr_policies.decay_warmup(params, hyper_params, step)
## TODO: implement other policies in lr_policies
iter_size = params.get('accumulate_step', 0)
skip_update_cond = tf.cast(tf.floormod(global_step, tf.constant(iter_size, dtype=tf.int32)), tf.bool)
if params['IMAGE_FP16']:
opt_type='mixed'
else:
opt_type=tf.float32
# setup optimizer
opt_dict = hyper_params['optimization']['params']
train_opt, learning_rate = optimizers.optimize_loss(total_loss, hyper_params['optimization']['name'],
opt_dict, learning_policy_func, run_params=params, hyper_params=hyper_params, iter_size=iter_size, dtype=opt_type,
loss_scaling=hyper_params.get('loss_scaling',1.0),
skip_update_cond=skip_update_cond,
on_horovod=True, model_scopes=n_net.scopes)
# Gather all training related ops into a single one.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
increment_op = tf.assign_add(global_step, 1)
ema = tf.train.ExponentialMovingAverage(decay=0.9, num_updates=global_step)
all_ops = tf.group(*([train_opt] + update_ops + IO_ops + [increment_op]))
with tf.control_dependencies([all_ops]):
train_op = ema.apply(tf.trainable_variables())
# train_op = tf.no_op(name='train')
########################
# Setting up Summaries #
########################
# Stats and summaries
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
# if hvd.rank() == 0:
summary_writer = tf.summary.FileWriter(os.path.join(params['checkpt_dir'], str(hvd.rank())), sess.graph)
# Add Summary histograms for trainable variables and their gradients
if params['debug']:
if hyper_params['network_type'] == 'inverter':
predic = tf.transpose(n_net.model_output, perm=[0,2,3,1])
tf.summary.image("outputs", predic, max_outputs=4)
tf.summary.image("targets", tf.transpose(labels, perm=[0,2,3,1]), max_outputs=4)
tf.summary.image("inputs", tf.transpose(tf.reduce_mean(images, axis=1, keepdims=True), perm=[0,2,3,1]), max_outputs=4)
elif hyper_params['network_type'] == 'YNet':
predic_inverter = tf.transpose(n_net.model_output['inverter'], perm=[0,2,3,1])
tf.summary.image("output_inverter", predic_inverter, max_outputs=2)
predic_decoder_RE = tf.transpose(n_net.model_output['decoder_RE'], perm=[0,2,3,1])
predic_decoder_IM = tf.transpose(n_net.model_output['decoder_IM'], perm=[0,2,3,1])
tf.summary.image("output_decoder_RE", predic_decoder_RE, max_outputs=2)
tf.summary.image("output_decoder_IM", predic_decoder_IM, max_outputs=2)
new_labels = tf.unstack(labels, axis=1)
for label, tag in zip(new_labels, ['potential', 'probe_RE', 'probe_IM']):
label = tf.expand_dims(label, axis=-1)
# label = tf.transpose(label, perm=[0,2,3,1])
tf.summary.image(tag, label, max_outputs=2)
tf.summary.image("inputs", tf.transpose(tf.reduce_mean(images, axis=1, keepdims=True), perm=[0,2,3,1]), max_outputs=4)
summary_merged = tf.summary.merge_all()
###############################
# Setting up training session #
###############################
#Initialize variables
init_op = tf.global_variables_initializer()
sess.run(init_op)
# Sync
print_rank('Syncing horovod ranks...')
sync_op = hvd.broadcast_global_variables(0)
sess.run(sync_op)
# prefill pipeline first
print_rank('Prefilling I/O pipeline...')
for i in range(len(IO_ops)):
sess.run(IO_ops[:i + 1])
# Saver and Checkpoint restore
checkpoint_file = os.path.join(params[ 'checkpt_dir' ], 'model.ckpt')
saver = tf.train.Saver(max_to_keep=None, save_relative_paths=True)
# Check if training is a restart from checkpoint
if params['restart'] and ckpt is not None:
saver.restore(sess, ckpt.model_checkpoint_path)
print_rank("Restoring from previous checkpoint @ step=%d" % last_step)
# Train
train_elf = TrainHelper(params, saver, summary_writer, n_net.get_ops(), last_step=last_step, log_freq=params['log_frequency'])
saveStep = params['save_step']
validateStep = params['validate_step']
summaryStep = params['summary_step']
train_elf.run_summary()
maxSteps = params[ 'max_steps' ]
logFreq = params[ 'log_frequency' ]
traceStep = params[ 'trace_step' ]
maxTime = params.get('max_time', 1e12)
val_results = []
loss_results = []
loss_value = 1e10
val = 1e10
while train_elf.last_step < maxSteps :
train_elf.before_run()
doLog = bool(train_elf.last_step % logFreq == 0)
doSave = bool(train_elf.last_step % saveStep == 0)
doSumm = bool(train_elf.last_step % summaryStep == 0 and params['debug'])
doTrace = bool(train_elf.last_step == traceStep and params['gpu_trace'])
doValidate = bool(train_elf.last_step % validateStep == 0)
doFinish = bool(train_elf.start_time - params['start_time'] > maxTime)
if train_elf.last_step == 1 and params['debug']:
summary = sess.run([train_op, summary_merged])[-1]
train_elf.write_summaries( summary )
elif not doLog and not doSave and not doTrace and not doSumm:
sess.run(train_op)
elif doLog and not doSave and not doSumm:
_, loss_value, lr = sess.run( [ train_op, total_loss, learning_rate ] )
loss_results.append((train_elf.last_step, loss_value))
train_elf.log_stats( loss_value, lr )
elif doLog and doSumm and doSave :
_, summary, loss_value, lr = sess.run( [ train_op, summary_merged, total_loss, learning_rate ])
loss_results.append((train_elf.last_step, loss_value))
train_elf.log_stats( loss_value, lr )
train_elf.write_summaries( summary )
if hvd.rank( ) == 0 :
saver.save(sess, checkpoint_file, global_step=train_elf.last_step)
print_rank('Saved Checkpoint.')
elif doLog and doSumm :
_, summary, loss_value, lr = sess.run( [ train_op, summary_merged, total_loss, learning_rate ])
loss_results.append((train_elf.last_step, loss_value))
train_elf.log_stats( loss_value, lr )
train_elf.write_summaries( summary )
elif doSumm:
summary = sess.run([train_op, summary_merged])[-1]
train_elf.write_summaries( summary )
elif doSave :
if hvd.rank( ) == 0 :
saver.save(sess, checkpoint_file, global_step=train_elf.last_step)
print_rank('Saved Checkpoint.')
elif doTrace :
sess.run(train_op, options=run_options, run_metadata=run_metadata)
train_elf.save_trace(run_metadata, params[ 'trace_dir' ], params[ 'trace_step' ] )
train_elf.before_run()
# Here we do validation:
if doValidate:
val = validate(network_config, hyper_params, params, sess, dset, num_batches=50)
val_results.append((train_elf.last_step,val))
if doFinish:
#val = validate(network_config, hyper_params, params, sess, dset, num_batches=50)
#val_results.append((train_elf.last_step, val))
tf.reset_default_graph()
tf.keras.backend.clear_session()
sess.close()
return val_results, loss_results
if np.isnan(loss_value):
break
val_results.append((train_elf.last_step,val))
tf.reset_default_graph()
tf.keras.backend.clear_session()
sess.close()
return val_results, loss_results
def train_YNet(network_config, hyper_params, params, gpu_id=None):
"""
Train the network for a number of steps using horovod and asynchronous I/O staging ops.
:param network_config: OrderedDict, network configuration
:param hyper_params: OrderedDict, hyper_parameters
:param params: dict
:return: None
"""
#########################
# Start Session #
#########################
# Config file for tf.Session()
config = tf.ConfigProto(allow_soft_placement=params['allow_soft_placement'],
log_device_placement=params['log_device_placement'],
)
config.gpu_options.allow_growth = True
if gpu_id is None:
gpu_id = hvd.local_rank()
config.gpu_options.visible_device_list = str(gpu_id)
config.gpu_options.force_gpu_compatible = True
config.intra_op_parallelism_threads = 6
config.inter_op_parallelism_threads = max(1, cpu_count()//6)
#config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
#jit_scope = tf.contrib.compiler.jit.experimental_jit_scope
# JIT causes gcc errors on dgx-dl and is built without on Summit.
sess = tf.Session(config=config)
############################
# Setting up Checkpointing #
###########################
last_step = 0
if params[ 'restart' ] :
# Check if training is a restart from checkpoint
ckpt = tf.train.get_checkpoint_state(params[ 'checkpt_dir' ] )
if ckpt is None :
print_rank( '<ERROR> Could not restart from checkpoint %s' % params[ 'checkpt_dir' ])
else :
last_step = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])
print_rank("Restoring from previous checkpoint @ step=%d" %last_step)
global_step = tf.Variable(last_step, name='global_step',trainable=False)
############################################
# Setup Graph, Input pipeline and optimizer#
############################################
# Start building the graph
# Setup data stream
with tf.device(params['CPU_ID']):
with tf.name_scope('Input') as _:
if params['filetype'] == 'tfrecord':
dset = inputs.DatasetTFRecords(params, dataset=params['dataset'], debug=False)
elif params['filetype'] == 'lmdb':
dset = inputs.DatasetLMDB(params, dataset=params['dataset'], debug=params['debug'])
images, labels = dset.minibatch()
# Staging images on host
staging_op, (images, labels) = dset.stage([images, labels])
with tf.device('/gpu:%d' % hvd.local_rank()):
# Copy images from host to device
gpucopy_op, (images, labels) = dset.stage([images, labels])
IO_ops = [staging_op, gpucopy_op]
##################
# Building Model#
##################
# Build model, forward propagate, and calculate loss
scope = 'model'
summary = False
if params['debug']:
summary = True
print_rank('Starting up queue of images+labels: %s, %s ' % (format(images.get_shape()),
format(labels.get_shape())))
with tf.variable_scope(scope,
# Force all variables to be stored as float32
custom_getter=float32_variable_storage_getter) as _:
# Setup Neural Net
n_net = network.YNet(scope, params, hyper_params, network_config, images, labels,
operation='train', summary=summary, verbose=True)
###### XLA compilation #########
#if params['network_class'] == 'fcdensenet':
# def wrap_n_net(*args):
# images, labels = args
# n_net = network.FCDenseNet(scope, params, hyper_params, network_config, images, labels,
# operation='train', summary=False, verbose=True)
# n_net.build_model()
# return n_net.model_output
#
# n_net.model_output = xla.compile(wrap_n_net, inputs=[images, labels])
##############################
# Build it and propagate images through it.
n_net.build_model()
# # Stop gradients
# stop_op = tf.stop_gradient(n_net.model_output['encoder'])
# calculate the total loss
psi_out_true = images
constr_loss = losses.get_YNet_constraint(n_net, hyper_params, params, images, weight=10)
total_loss, _, indv_losses = losses.calc_loss(n_net, scope, hyper_params, params, labels, step=global_step, images=images, summary=summary)
#get summaries, except for the one produced by string_input_producer
if summary: summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)
# print_rank([scope.name for scope in n_net.scopes])
#######################################
# Apply Gradients and setup train op #
#######################################
# optimizer for unsupervised step
var_list = [itm for itm in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) if 'CVAE' in str(itm.name)]
reg_hyper = deepcopy(hyper_params)
reg_hyper['initial_learning_rate'] = 1e-1
def learning_policy_func_reg(step):
return lr_policies.decay_warmup(params, reg_hyper, step)
iter_size = params.get('accumulate_step', 0)
skip_update_cond = tf.cast(tf.floormod(global_step, tf.constant(iter_size, dtype=tf.int32)), tf.bool)
if params['IMAGE_FP16']:
opt_type='mixed'
else:
opt_type=tf.float32
reg_opt, learning_rate = optimizers.optimize_loss(constr_loss, 'Momentum',
{'momentum': 0.9}, learning_policy_func_reg, var_list=var_list, run_params=params, hyper_params=reg_hyper, iter_size=iter_size, dtype=opt_type,
loss_scaling=1.0,
skip_update_cond=skip_update_cond,
on_horovod=True, model_scopes=None)
# optimizer for supervised step
def learning_policy_func(step):
return lr_policies.decay_warmup(params, hyper_params, step)
## TODO: implement other policies in lr_policies
opt_dict = hyper_params['optimization']['params']
train_opt, learning_rate = optimizers.optimize_loss(total_loss, hyper_params['optimization']['name'],
opt_dict, learning_policy_func, run_params=params, hyper_params=hyper_params, iter_size=iter_size, dtype=opt_type,
loss_scaling=hyper_params.get('loss_scaling',1.0),
skip_update_cond=skip_update_cond,
on_horovod=True, model_scopes=n_net.scopes)
# Gather unsupervised training ops
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
ema = tf.train.ExponentialMovingAverage(decay=0.9, num_updates=global_step)
increment_op = tf.assign_add(global_step, 1)
with tf.control_dependencies([tf.group(*[reg_opt, update_ops])]):
reg_op = ema.apply(var_list=var_list)
# Gather supervised training related ops into a single one.
increment_op = tf.assign_add(global_step, 1)
all_ops = tf.group(*([train_opt] + update_ops + IO_ops + [increment_op]))
with tf.control_dependencies([all_ops]):
train_op = ema.apply(tf.trainable_variables())
########################
# Setting up Summaries #
########################
# Stats and summaries
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
# if hvd.rank() == 0:
summary_writer = tf.summary.FileWriter(os.path.join(params['checkpt_dir'], str(hvd.rank())), sess.graph)
# Add Summary histograms for trainable variables and their gradients
if params['debug']:
predic_inverter = tf.transpose(n_net.model_output['inverter'], perm=[0,2,3,1])
tf.summary.image("output_inverter", predic_inverter, max_outputs=2)
predic_decoder_RE = tf.transpose(n_net.model_output['decoder_RE'], perm=[0,2,3,1])
predic_decoder_IM = tf.transpose(n_net.model_output['decoder_IM'], perm=[0,2,3,1])
tf.summary.image("output_decoder_RE", predic_decoder_RE, max_outputs=2)
tf.summary.image("output_decoder_IM", predic_decoder_IM, max_outputs=2)
new_labels = tf.unstack(labels, axis=1)
for label, tag in zip(new_labels, ['potential', 'probe_RE', 'probe_IM']):
label = tf.expand_dims(label, axis=-1)
# label = tf.transpose(label, perm=[0,2,3,1])
tf.summary.image(tag, label, max_outputs=2)
tf.summary.image("inputs", tf.transpose(tf.reduce_mean(images, axis=1, keepdims=True), perm=[0,2,3,1]), max_outputs=4)
summary_merged = tf.summary.merge_all()
###############################
# Setting up training session #
###############################
#Initialize variables
init_op = tf.global_variables_initializer()
sess.run(init_op)
# Sync
print_rank('Syncing horovod ranks...')
sync_op = hvd.broadcast_global_variables(0)
sess.run(sync_op)
# prefill pipeline first
print_rank('Prefilling I/O pipeline...')
for i in range(len(IO_ops)):
sess.run(IO_ops[:i + 1])
# Saver and Checkpoint restore
checkpoint_file = os.path.join(params[ 'checkpt_dir' ], 'model.ckpt')
saver = tf.train.Saver(max_to_keep=None, save_relative_paths=True)
# Check if training is a restart from checkpoint
if params['restart'] and ckpt is not None:
saver.restore(sess, ckpt.model_checkpoint_path)
print_rank("Restoring from previous checkpoint @ step=%d" % last_step)
# Train
train_elf = TrainHelper_YNet(params, saver, summary_writer, n_net.get_ops(), last_step=last_step, log_freq=params['log_frequency'])
saveStep = params['save_step']
validateStep = params['validate_step']
summaryStep = params['summary_step']
train_elf.run_summary()
maxSteps = params[ 'max_steps' ]
logFreq = params[ 'log_frequency' ]
traceStep = params[ 'trace_step' ]
maxTime = params.get('max_time', 1e12)
inner_loop = hyper_params.get('inner_iter', 1e12)
val_results = []
loss_results = []
loss_value = 1e10
val = 1e10
current_batch = np.zeros(images.shape.as_list(), dtype=np.float32)
batch_buffer = []
while train_elf.last_step < maxSteps :
# batch_buffer.append(images.eval(session=sess))
train_elf.before_run()
doLog = bool(train_elf.last_step % logFreq == 0)
doSave = bool(train_elf.last_step % saveStep == 0)
doSumm = bool(train_elf.last_step % summaryStep == 0 and params['debug'])
doTrace = bool(train_elf.last_step == traceStep and params['gpu_trace'])
doValidate = bool(train_elf.last_step % validateStep == 0)
doFinish = bool(train_elf.start_time - params['start_time'] > maxTime)
if train_elf.last_step == 1 and params['debug']:
_, summary, current_batch = sess.run([train_op, summary_merged, images])
train_elf.write_summaries( summary )
elif not doLog and not doSave and not doTrace and not doSumm:
_, current_batch = sess.run([train_op, images])
elif doLog and not doSave and not doSumm:
_, lr, loss_value, aux_losses, current_batch = sess.run( [ train_op, learning_rate, total_loss, indv_losses, images])
loss_results.append((train_elf.last_step, loss_value))
train_elf.log_stats( loss_value, aux_losses, lr)
elif doLog and doSumm and doSave :
_, summary, loss_value, aux_losses, lr, current_batch = sess.run( [ train_op, summary_merged, total_loss, indv_losses,
learning_rate, images ])
loss_results.append((train_elf.last_step, loss_value))
train_elf.log_stats( loss_value, aux_losses, lr )
train_elf.write_summaries( summary )
if hvd.rank( ) == 0 :
saver.save(sess, checkpoint_file, global_step=train_elf.last_step)
print_rank('Saved Checkpoint.')
elif doLog and doSumm :
_, summary, loss_value, aux_losses, lr, current_batch = sess.run( [ train_op, summary_merged, total_loss, indv_losses, learning_rate, images ])
loss_results.append((train_elf.last_step, loss_value))
train_elf.log_stats( loss_value, aux_losses, lr )
train_elf.write_summaries( summary )
elif doSumm:
_, summary, current_batch = sess.run([train_op, summary_merged, images])
train_elf.write_summaries( summary )
elif doSave :
if hvd.rank( ) == 0 :
saver.save(sess, checkpoint_file, global_step=train_elf.last_step)
print_rank('Saved Checkpoint.')
elif doTrace :
sess.run(train_op, options=run_options, run_metadata=run_metadata)
train_elf.save_trace(run_metadata, params[ 'trace_dir' ], params[ 'trace_step' ] )
train_elf.before_run()
# Here we do validation:
if doValidate:
val = validate(network_config, hyper_params, params, sess, dset, num_batches=50)
val_results.append((train_elf.last_step,val))
if doFinish:
#val = validate(network_config, hyper_params, params, sess, dset, num_batches=50)
#val_results.append((train_elf.last_step, val))
tf.reset_default_graph()
tf.keras.backend.clear_session()
sess.close()
return val_results, loss_results
if np.isnan(loss_value):
break
if inner_loop < 100:
batch_buffer.append(current_batch)
if bool(train_elf.last_step % inner_loop == 0 and train_elf.last_step >= 10):
for itr, current_batch in enumerate(batch_buffer):
_, constr_val = sess.run([reg_op, constr_loss], feed_dict={psi_out_true:current_batch})
if doLog:
print_rank('\t\tstep={}, reg iter={}, constr_loss={:2.3e}'.format(train_elf.last_step, itr, constr_val))
del batch_buffer
batch_buffer = []
val_results.append((train_elf.last_step,val))
tf.reset_default_graph()
tf.keras.backend.clear_session()
sess.close()
return val_results, loss_results
def validate(network_config, hyper_params, params, sess, dset, num_batches=10):
"""
Runs validation with current weights
:param params:
:param hyper_params:
:param network_config:
:param sess:
:param num_batches: default 100.
:return:
"""
print_rank("Running Validation ..." )
with tf.device(params['CPU_ID']):
# Get Test data
dset.set_mode(mode='eval')
images, labels = dset.minibatch()
# Staging images on host
staging_op, (images, labels) = dset.stage([images, labels])
with tf.device('/gpu:%d' % hvd.local_rank()):
# Copy images from host to device
gpucopy_op, (images, labels) = dset.stage([images, labels])
IO_ops = [staging_op, gpucopy_op]
scope = 'model'
summary = False
# prefill pipeline first
print_rank('Prefilling I/O pipeline...')
for i in range(len(IO_ops)):
sess.run(IO_ops[:i + 1])
with tf.variable_scope(scope, reuse=True) as _:
# Setup Neural Net
params['IMAGE_FP16'] = False
if images.dtype != tf.float32:
images = tf.cast(images, tf.float32)
# Setup Neural Net
if params['network_class'] == 'resnet':
n_net = network.ResNet(scope, params, hyper_params, network_config, images, labels,
operation='eval', summary=False, verbose=False)
if params['network_class'] == 'cnn':
n_net = network.ConvNet(scope, params, hyper_params, network_config, images, labels,
operation='eval', summary=False, verbose=False)
if params['network_class'] == 'fcdensenet':
n_net = network.FCDenseNet(scope, params, hyper_params, network_config, images, labels,
operation='eval', summary=False, verbose=False)
if params['network_class'] == 'fcnet':
n_net = network.FCNet(scope, params, hyper_params, network_config, images, labels,
operation='eval', summary=summary, verbose=True)
if params['network_class'] == 'YNet':
n_net = network.YNet(scope, params, hyper_params, network_config, images, labels,
operation='eval', summary=summary, verbose=True)
# Build it and propagate images through it.
n_net.build_model()
# Calculate predictions
if hyper_params['network_type'] == 'regressor' or hyper_params['network_type'] == 'classifier':
labels_shape = labels.get_shape().as_list()
layer_params={'bias':labels_shape[-1], 'weights':labels_shape[-1],'regularize':False}
logits = losses.fully_connected(n_net, layer_params, params['batch_size'],
name='linear',reuse=None)
else:
pass
#TODO: implement prediction layer for hybrid network
# Do evaluation
result = None
if hyper_params['network_type'] == 'regressor':
validation_error = tf.losses.mean_squared_error(labels, predictions=logits, reduction=tf.losses.Reduction.NONE)
# Average validation error over the batches
errors = np.array([sess.run(validation_error) for _ in range(num_batches)])
errors = errors.reshape(-1, params['NUM_CLASSES'])
avg_errors = errors.mean(0)
result = avg_errors
print_rank('Validation MSE: %s' % format(avg_errors))
elif hyper_params['network_type'] == 'classifier':
labels = tf.argmax(labels, axis=1)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits)
in_top_1_op = tf.cast(tf.nn.in_top_k(logits, labels, 1), tf.float32)
in_top_5_op = tf.cast(tf.nn.in_top_k(logits, labels, 5), tf.float32)
eval_ops = [in_top_1_op, in_top_5_op, cross_entropy]
output = np.array([sess.run(eval_ops) for _ in range(num_batches)])
accuracy = output[:,:2]
val_loss = output[:,-1]
accuracy = accuracy.sum(axis=(0,-1))/(num_batches*params['batch_size'])*100
val_loss = val_loss.sum()/(num_batches*params['batch_size'])
result = accuracy
print_rank('Validation Accuracy (.pct), Top-1: %2.2f , Top-5: %2.2f, Loss: %2.2f' %(accuracy[0], accuracy[1], val_loss))
elif hyper_params['network_type'] == 'hybrid':
#TODO: implement evaluation call for hybrid network
print('not implemented')
elif hyper_params['network_type'] == 'YNet':
loss_params = hyper_params['loss_function']
#model_output = tf.concat([n_net.model_output[subnet] for subnet in ['inverter', 'decoder_RE', 'decoder_IM']], axis=1)
model_output = [n_net.model_output[subnet] for subnet in ['inverter', 'decoder_RE', 'decoder_IM']]
labels = [tf.expand_dims(itm, axis=1) for itm in tf.unstack(labels, axis=1)]
if loss_params['type'] == 'MSE_PAIR':
errors = [tf.losses.mean_pairwise_squared_error(tf.cast(label, tf.float32), out)
for label, out in zip(labels, model_output)]
errors = tf.stack(errors)
loss_label= loss_params['type']
elif loss_params['type'] == 'ABS_DIFF':
loss_label= 'ABS_DIFF'
errors = tf.losses.absolute_difference(tf.cast(labels, tf.float32), tf.cast(model_output, tf.float32), reduction=tf.losses.Reduction.SUM)
elif loss_params['type'] == 'MSE':
errors = tf.losses.mean_squared_error(tf.cast(labels, tf.float32), tf.cast(model_output, tf.float32), reduction=tf.losses.Reduction.SUM)
loss_label= loss_params['type']
errors = tf.expand_dims(errors,axis=0)
error_averaging = hvd.allreduce(errors)
if num_batches is not None:
num_samples = num_batches
elif num_batches > dset.num_samples:
num_samples = dset.num_samples
errors = np.array([sess.run([IO_ops,error_averaging])[-1] for i in range(num_samples//params['batch_size'])])
result = errors.mean(0)
print_rank('Validation Reconstruction Error %s: '% loss_label, result)
elif hyper_params['network_type'] == 'inverter':
loss_params = hyper_params['loss_function']
if labels.shape.as_list()[1] > 1:
labels, _, _ = [tf.expand_dims(itm, axis=1) for itm in tf.unstack(labels, axis=1)]
if loss_params['type'] == 'MSE_PAIR':
errors = tf.losses.mean_pairwise_squared_error(tf.cast(labels, tf.float32), tf.cast(n_net.model_output, tf.float32))
loss_label= loss_params['type']
elif loss_params['type'] == 'rMSE':
labels = tf.cast(labels, tf.float32)
l2_true = tf.sqrt(tf.reduce_sum(labels ** 2, axis=[1,2,3]))
l2_output = tf.sqrt(tf.reduce_sum(n_net.model_output **2, axis = [1,2,3]))
errors = tf.reduce_mean(tf.abs(l2_true - l2_output)/l2_true)
errors *= 100
loss_label= loss_params['type']
else:
loss_label= 'ABS_DIFF'
errors = tf.losses.absolute_difference(tf.cast(labels, tf.float32), tf.cast(n_net.model_output, tf.float32), reduction=tf.losses.Reduction.MEAN)
errors = tf.expand_dims(errors,axis=0)
error_averaging = hvd.allreduce(errors, average=True)
if num_batches is not None:
num_samples = num_batches
else:
num_samples = dset.num_samples
errors = np.array([sess.run([IO_ops,error_averaging])[-1] for i in range(num_samples//params['batch_size'])])
result = errors.mean()
print_rank('Validation Reconstruction Error %s: %3.3e' % (loss_label, result))
tf.summary.scalar("Validation_loss_label_%s" % loss_label, tf.constant(errors.mean()))
return result
def validate_ckpt(network_config, hyper_params, params, num_batches=None,
last_model= False, sleep=-1):
"""
Runs evaluation with current weights
:param params:
:param hyper_params:
:param network_config:
:param num_batches: default 100.
:params sleep: number of seconds to sleep. for single eval pass sleep<0.
:return:
"""
#########################
# Start Session #
#########################
# Config file for tf.Session()
config = tf.ConfigProto(allow_soft_placement=True,
log_device_placement=False,
)
config.gpu_options.visible_device_list = str(hvd.local_rank())
config.intra_op_parallelism_threads = 1
# config.inter_op_parallelism_threads = 12
sess = tf.Session(config=config)
# Get Test data
with tf.device(params['CPU_ID']):
with tf.name_scope('Input') as _:
if params['filetype'] == 'tfrecord':
dset = inputs.DatasetTFRecords(params, dataset=params['dataset'], debug=False)
elif params['filetype'] == 'lmdb':
dset = inputs.DatasetLMDB(params, dataset=params['dataset'], debug=params['debug'])
images, labels = dset.minibatch()
# Staging images on host
staging_op, (images, labels) = dset.stage([images, labels])
with tf.device('/gpu:%d' % hvd.local_rank()):
# Copy images from host to device
gpucopy_op, (images, labels) = dset.stage([images, labels])
IO_ops = [staging_op, gpucopy_op]
scope='model'
with tf.variable_scope(
scope,
# Force all variables to be stored as float32
custom_getter=float32_variable_storage_getter) as _:
# Setup Neural Net
if params['network_class'] == 'resnet':
n_net = network.ResNet(scope, params, hyper_params, network_config, tf.cast(images, tf.float32), labels,
operation='eval_ckpt', summary=False, verbose=False)
if params['network_class'] == 'cnn':
n_net = network.ConvNet(scope, params, hyper_params, network_config, tf.cast(images, tf.float32), labels,
operation='eval_ckpt', summary=False, verbose=False)
if params['network_class'] == 'fcdensenet':
n_net = network.FCDenseNet(scope, params, hyper_params, network_config, tf.cast(images, tf.float32),
labels, operation='eval_ckpt', summary=False, verbose=True)
if params['network_class'] == 'fcnet':
n_net = network.FCNet(scope, params, hyper_params, network_config, images, labels,
operation='eval_ckpt', summary=False, verbose=True)
if params['network_class'] == 'YNet':
n_net = network.YNet(scope, params, hyper_params, network_config, images, labels,
operation='eval_ckpt', summary=False, verbose=True)
# Build it and propagate images through it.
n_net.build_model()
# Calculate predictions
#if hyper_params['network_type'] == 'regressor' or hyper_params['network_type'] == 'classifier':
# labels_shape = labels.get_shape().as_list()
# layer_params={'bias':labels_shape[-1], 'weights':labels_shape[-1],'regularize':False}
# logits = fully_connected(n_net, layer_params, params['batch_size'],
# name='linear',reuse=None)
#else:
# pass
# Initialize variables
init_op = tf.global_variables_initializer()
sess.run(init_op)
# Sync
sync_op = hvd.broadcast_global_variables(0)
sess.run(sync_op)
# prefill pipeline first
print_rank('Prefilling I/O pipeline...')
for i in range(len(IO_ops)):
sess.run(IO_ops[:i + 1])
# restore from moving averages
ema = tf.train.ExponentialMovingAverage(0.9999)
vars_to_restore = ema.variables_to_restore()
saver = tf.train.Saver(var_list=vars_to_restore)
# saver = tf.train.Saver()
# Find models in checkpoint directory
dirs = np.array(os.listdir(params['checkpt_dir']))
pattern = re.compile("meta")
steps = np.array([bool(re.search(pattern,itm)) for itm in dirs])
saved_steps = dirs[steps]
model_steps = np.array([int(itm.split('.')[1].split('-')[-1]) for itm in saved_steps])
model_steps = np.sort(model_steps)
ckpt_paths = [os.path.join(params['checkpt_dir'], "model.ckpt-%s" % step) for step in model_steps]
if last_model:
ckpt_paths = [ckpt_paths[-1]]
model_steps = [model_steps[-1]]
if params['output']:
output_dir = os.path.join(os.getcwd(), 'outputs_%s' % params['checkpt_dir'].split('/')[-1])
if not os.path.exists(output_dir):
tf.gfile.MakeDirs(output_dir)
# Validate Models
for ckpt, last_step in zip(ckpt_paths, model_steps):
#
saver.restore(sess, os.path.join(params['checkpt_dir'],"model.ckpt-%s" %format(last_step)))
print_rank("Restoring from previous checkpoint @ step=%d" % last_step)
# Validate model
# TODO: add hybrid validation and check that it works correctly for previous
if hyper_params['network_type'] == 'regressor':
validation_error = tf.losses.mean_squared_error(labels, predictions=logits, reduction=tf.losses.Reduction.NONE)
# Average validation error over batches
errors = np.array([sess.run([IO_ops, validation_error])[-1] for _ in range(num_batches)])
errors = errors.reshape(-1, params['NUM_CLASSES'])
avg_errors = errors.mean(0)
print_rank('Validation MSE: %s' % format(avg_errors))
elif hyper_params['network_type'] == 'classifier':
# Average validation accuracies over batches
label = tf.argmax(labels, axis=1)
in_top_1_op = tf.cast(tf.nn.in_top_k(logits, label, 1), tf.float32)
in_top_5_op = tf.cast(tf.nn.in_top_k(logits, label, 5), tf.float32)
eval_ops = [in_top_1_op,in_top_5_op]
output = np.array([sess.run([IO_ops,eval_ops])[-1] for _ in range(num_batches)])
accuracy = output.sum(axis=(0,-1))/(num_batches*params['batch_size'])*100
print_rank('Validation Accuracy (.pct), Top-1: %2.2f , Top-5: %2.2f' %(accuracy[0], accuracy[1]))
elif hyper_params['network_type'] == 'hybrid':
pass
elif hyper_params['network_type'] == 'inverter':
if labels.shape.as_list()[1] > 1:
labels, _, _ = [tf.expand_dims(itm, axis=1) for itm in tf.unstack(labels, axis=1)]
loss_params = hyper_params['loss_function']
if params['output']:
output = tf.cast(n_net.model_output, tf.float32)
print('output shape',output.get_shape().as_list())
if num_batches is not None:
num_samples = num_batches
else:
num_samples = dset.num_samples
for idx in range(num_samples):
output_arr, label_arr = sess.run([IO_ops, n_net.model_output, labels])[-2:]
#label_arr = sess.run([IO_ops, labels])[-1]
np.save(os.path.join(output_dir,'label_%d_%d_%s.npy' % (idx, hvd.rank(), format(last_step))), label_arr)
np.save(os.path.join(output_dir,'output_%d_%d_%s.npy' % (idx, hvd.rank(), format(last_step))), output_arr)
else:
if loss_params['type'] == 'MSE_PAIR':
errors = tf.losses.mean_pairwise_squared_error(tf.cast(labels, tf.float32), tf.cast(n_net.model_output, tf.float32))
loss_label= loss_params['type']
else:
loss_label= 'ABS_DIFF'
errors = tf.losses.absolute_difference(tf.cast(labels, tf.float32), tf.cast(n_net.model_output, tf.float32), reduction=tf.losses.Reduction.MEAN)
errors = tf.expand_dims(errors,axis=0)
error_averaging = hvd.allreduce(errors)
if num_batches is not None:
num_samples = num_batches
else:
num_samples = dset.num_samples
error = np.array([sess.run([IO_ops,error_averaging])[-1] for i in range(num_samples)])
print_rank('Validation Reconstruction Error %s: %3.3e' % (loss_label, error.mean()))
elif hyper_params['network_type'] == 'YNet':
loss_params = hyper_params['loss_function']
model_output = tf.concat([n_net.model_output[subnet] for subnet in ['inverter', 'decoder_RE', 'decoder_IM']], axis=1)
if params['output']:
output = tf.cast(model_output, tf.float32)
print('output shape',output.get_shape().as_list())
if num_batches is not None:
num_samples = num_batches
else:
num_samples = dset.num_samples
for idx in range(num_samples):
output_arr, label_arr = sess.run([IO_ops, model_output, labels])[-2:]
#label_arr = sess.run([IO_ops, labels])[-1]
np.save(os.path.join(output_dir,'label_%d_%d_%s.npy' % (idx, hvd.rank(), format(last_step))), label_arr)
np.save(os.path.join(output_dir,'output_%d_%d_%s.npy' % (idx, hvd.rank(), format(last_step))), output_arr)
else:
if loss_params['type'] == 'MSE_PAIR':
errors = tf.losses.mean_pairwise_squared_error(tf.cast(labels, tf.float32), tf.cast(model_output, tf.float32))
loss_label= loss_params['type']
else:
loss_label= 'ABS_DIFF'
errors = tf.losses.absolute_difference(tf.cast(labels, tf.float32), tf.cast(model_output, tf.float32), reduction=tf.losses.Reduction.MEAN)
#errors = tf.expand_dims(errors,axis=0)
#error_averaging = hvd.allreduce(errors)
error_averaging = errors
if num_batches is not None:
num_samples = num_batches
else:
num_samples = dset.num_samples
#error = np.array([sess.run([IO_ops,error_averaging])[-1] for i in range(4)])
error = np.array([sess.run([IO_ops,error_averaging])[-1] for i in range(num_samples)])
print('Rank=%d, Validation Reconstruction Error %s: %3.3e' % (hvd.rank(),loss_label, error.mean()))
#print_rank('Validation Reconstruction Error %s: %3.3e' % (loss_label, error.mean()))
if sleep < 0:
break
else:
print_rank('sleeping for %d s ...' % sleep)
time.sleep(sleep)
| 1.945313 | 2 |
tests/factories.py | lisac-usds/dsnap_rules | 4 | 12773850 | <reponame>lisac-usds/dsnap_rules<gh_stars>1-10
import factory
from dsnap_rules import models
class DisasterFactory(factory.django.DjangoModelFactory):
class Meta:
model = 'dsnap_rules.Disaster'
disaster_request_no = factory.Faker('pystr')
title = factory.Faker('pystr')
state = factory.Iterator(models.State.objects.all())
benefit_begin_date = factory.Faker('date')
benefit_end_date = factory.Faker('date')
residency_required = factory.Faker('boolean')
uses_DSED = factory.Faker('boolean')
allows_food_loss_alone = factory.Faker('boolean')
class ApplicationPeriodFactory(factory.django.DjangoModelFactory):
class Meta:
model = 'dsnap_rules.ApplicationPeriod'
begin_date = factory.Faker('date')
end_date = factory.Faker('date')
registration_begin_date = factory.Faker('date')
registration_end_date = factory.Faker('date')
| 2.09375 | 2 |