hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
68272da9ef5f1c688a86d4dfd33eef8b07b524c3 | 2,447 | py | Python | laceworksdk/api/run_reports.py | kiddinn/python-sdk | 23a33313f97337fddea155bcb19c8d5270fc8013 | [
"MIT"
] | 10 | 2021-03-20T18:12:16.000Z | 2022-02-14T21:33:23.000Z | laceworksdk/api/run_reports.py | kiddinn/python-sdk | 23a33313f97337fddea155bcb19c8d5270fc8013 | [
"MIT"
] | 10 | 2021-02-22T23:31:32.000Z | 2022-03-25T14:11:27.000Z | laceworksdk/api/run_reports.py | kiddinn/python-sdk | 23a33313f97337fddea155bcb19c8d5270fc8013 | [
"MIT"
] | 7 | 2021-06-18T18:17:12.000Z | 2022-03-25T13:52:14.000Z | """
Lacework Run Reports API wrapper.
"""
import logging
logger = logging.getLogger(__name__)
class RunReportsAPI(object):
"""
Lacework RunReports API.
"""
def __init__(self, session):
"""
Initializes the RunReportsAPI object.
:param session: An instance of the HttpSession class
:return RunReportsAPI object.
"""
super(RunReportsAPI, self).__init__()
self._session = session
def run_report(self,
type,
account_id):
"""
A method to initiate a compliance assessment.
:param type: A string representing the type of compliance assessment to initiate.
:param account_id: A string representing the account identifier for which to initiate a compliance assessment.
:return response json
"""
logger.info(f"Initiating '{type}' compliance assessment in Lacework...")
# Build the Run Report request URI
api_uri = f"/api/v1/external/runReport/{type}/{account_id}"
response = self._session.post(api_uri)
return response.json()
def aws(self,
aws_account_id):
"""
A method to initiate a compliance assessment for an AWS account.
:param aws_account_id: A string representing which AWS account to assess.
:return response json
"""
return self.run_report("aws", aws_account_id)
def azure(self,
azure_tenant_id):
"""
A method to initiate a compliance assessment for an Azure tenant.
:param azure_tenant_id: A string representing which Azure tenant to assess.
:return response json
"""
return self.run_report("azure", azure_tenant_id)
def gcp(self,
gcp_project_id):
"""
A method to initiate a compliance assessment for a GCP project.
:param gcp_project_id: A string representing which GCP project to assess.
:return response json
"""
return self.run_report("gcp", gcp_project_id)
def integration(self,
integration_guid):
"""
A method to run a compliance report based on a Lacework integration GUID.
:param integration_guid: A string representing the Lacework integration ID to query.
:return response json
"""
return self.run_report("integration", integration_guid)
| 25.489583 | 118 | 0.624029 | 2,348 | 0.959542 | 0 | 0 | 0 | 0 | 0 | 0 | 1,555 | 0.635472 |
682755414252899eae0fb1b519e05847d5ff143c | 56 | py | Python | djangoautoconf/keys_default/admin_account_template.py | weijia/djangoautoconf | 590acfdcc6a3e051a2048ba1dbf980f908a7af91 | [
"BSD-3-Clause"
] | null | null | null | djangoautoconf/keys_default/admin_account_template.py | weijia/djangoautoconf | 590acfdcc6a3e051a2048ba1dbf980f908a7af91 | [
"BSD-3-Clause"
] | null | null | null | djangoautoconf/keys_default/admin_account_template.py | weijia/djangoautoconf | 590acfdcc6a3e051a2048ba1dbf980f908a7af91 | [
"BSD-3-Clause"
] | null | null | null | admin_username = "richard"
admin_password = "richard666" | 28 | 29 | 0.803571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 21 | 0.375 |
68276c114e2c9758ed125fc1c18ac3b0599cfe2a | 888 | py | Python | open_box/conf/__init__.py | PeterPZhang/open-box | 987f91edf0f502d678459f7cc50070cae34accd9 | [
"MIT"
] | 2 | 2021-02-20T04:28:48.000Z | 2021-02-23T09:51:02.000Z | open_box/conf/__init__.py | PeterPZhang/open-box | 987f91edf0f502d678459f7cc50070cae34accd9 | [
"MIT"
] | null | null | null | open_box/conf/__init__.py | PeterPZhang/open-box | 987f91edf0f502d678459f7cc50070cae34accd9 | [
"MIT"
] | 1 | 2021-02-22T06:52:48.000Z | 2021-02-22T06:52:48.000Z | import json
class ConfigObject(dict):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.load_attributes(kwargs)
def load_attributes(self, d):
for k, v in d.items():
if isinstance(v, dict):
self[k] = self.__class__(**v)
elif isinstance(v, list):
vs = []
for i in v:
if isinstance(i, dict):
vs.append(self.__class__(**i))
else:
vs.append(i)
self[k] = vs
def __getattr__(self, name):
try:
value = self[name]
return value
except Exception as _:
return None
def __setattr__(self, key, value):
self[key] = value
def __str__(self):
return json.dumps(self, indent=4, ensure_ascii=False)
| 24 | 61 | 0.480856 | 872 | 0.981982 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
682899166e4668f5ec87c91ad26dfdd76110e5e1 | 2,523 | py | Python | discordbot.py | reowoon/discordpy-startup | 9922c48569277b18d74af9bc889aea6b1c54e424 | [
"MIT"
] | null | null | null | discordbot.py | reowoon/discordpy-startup | 9922c48569277b18d74af9bc889aea6b1c54e424 | [
"MIT"
] | null | null | null | discordbot.py | reowoon/discordpy-startup | 9922c48569277b18d74af9bc889aea6b1c54e424 | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
import os
from discord.ext import tasks
from datetime import datetime
bot = commands.Bot(command_prefix='&')
token = os.environ['DISCORD_BOT_TOKEN']
guild = bot.get_guild(417245684656373766)
#ステータス
@bot.event
async def on_ready():
activity = discord.Activity(name='&help | Pornhub',type=discord.ActivityType.streaming)
channel = bot.get_channel(417245684656373768)
await bot.change_presence(activity=activity)
await channel.send('おはようございます!')
@bot.command()
async def ping(ctx):
await ctx.send('pong')
#話すう
@bot.command()
async def rrr(ctx, arg1, arg2):
if ctx.author.id != 540536805099831299:
await ctx.send('君には使えないよ!')
return
channel = bot.get_channel(int(arg1))
await channel.send(arg2)
#アウト
@bot.command()
async def out(ctx, arg:discord.Member):
if ctx.author.id != 540536805099831299:
await ctx.send('君には使えないよ!')
return
role = guild.get_role(714777128079720478)
await arg.add_roles(role)
await ctx.send(arg.mention+'を出禁にしました!')
#ステータス変更
@bot.command()
async def stats(ctx, arg):
if ctx.author.id == 275574408372944897:
arg1 = arg.replace('影','禿').replace('か','は').replace('カ','ハ').replace('k','h').replace('K','H').replace('K','H').replace('k','h')
activity = discord.Activity(name='&help | '+arg1,type=discord.ActivityType.streaming)
await bot.change_presence(activity=activity)
await ctx.send('ステータスを '+arg1+'を配信中 にしました!')
activity = discord.Activity(name='&help | '+arg,type=discord.ActivityType.streaming)
await bot.change_presence(activity=activity)
await ctx.send('ステータスを '+arg+'を配信中 にしました!')
#上級VC変更
@bot.command()
@commands.has_role('⚜️上級ロメダ民')
async def lcname(ctx, arg):
arg = arg.replace('影','禿').replace('か','は').replace('カ','ハ').replace('k','h').replace('K','H').replace('K','H').replace('k','h')
channel = bot.get_channel(801398685828382751)
await channel.edit(name='📍'+arg)
await ctx.send('固定チャンネル名を📍'+arg+'にしました!')
#鯖名変更
@bot.command()
@commands.has_role('⚜️上級ロメダ民')
async def sname(ctx, arg):
guild = guild
await guild.edit(name='&ROMEDA-'+arg)
await ctx.send('サーバー名を&ROMEDA-'+arg+'にしました!')
#ニックネーム変更
@bot.command()
@commands.has_role('👑KING OF ROMEDA')
async def nick(ctx, arg1:discord.Member, arg2):
if arg1.id == 714776261410553907:
await ctx.send('やめてください!')
return
arg = arg1.name + arg2
await arg1.edit(nick=arg)
bot.run(token)
| 29.682353 | 137 | 0.671027 | 0 | 0 | 0 | 0 | 2,470 | 0.857639 | 2,219 | 0.770486 | 742 | 0.257639 |
682938278246a7af352a4330807844b209118da3 | 7,225 | py | Python | mplStyle/types/lib.py | khanfarhan10/mplStyle | f657f54c6c101811b8bf0c44f4b16d4f4926685d | [
"BSD-3-Clause"
] | 39 | 2015-03-08T23:05:01.000Z | 2022-02-07T16:03:35.000Z | mplStyle/types/lib.py | khanfarhan10/mplStyle | f657f54c6c101811b8bf0c44f4b16d4f4926685d | [
"BSD-3-Clause"
] | null | null | null | mplStyle/types/lib.py | khanfarhan10/mplStyle | f657f54c6c101811b8bf0c44f4b16d4f4926685d | [
"BSD-3-Clause"
] | 23 | 2015-03-08T19:56:59.000Z | 2021-07-15T15:16:26.000Z | #===========================================================================
#
# Copyright (c) 2014, California Institute of Technology.
# U.S. Government Sponsorship under NASA Contract NAS7-03001 is
# acknowledged. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#===========================================================================
"""A library of top-level style functions.
"""
__version__ = "$Revision: #1 $"
#===========================================================================
import os
import os.path as path
from .SubStyle import SubStyle
#===========================================================================
__all__ = [
'cleanupFilename',
'mergeDicts',
'resolveDefaults',
'stylePath',
]
#===========================================================================
def cleanupFilename( fname ):
""": Make the filename usable.
= INPUT VARIABLES
- fname Given a filename, clean-it up to make sure we can use it with
the file system.
= RETURN VALUE
- Returns a cleaned up form of the input file name.
"""
fname = fname.replace( ' ', '_' )
fname = fname.replace( '/', '_' )
fname = fname.replace( '\\', '_' )
fname = fname.replace( '!', '_' )
fname = fname.replace( '*', '_' )
fname = fname.replace( '`', '_' )
fname = fname.replace( "'", "_" )
fname = fname.replace( '"', "_" )
fname = fname.replace( '{', '(' )
fname = fname.replace( '}', ')' )
fname = fname.replace( '&', '_and_' )
return fname
#===========================================================================
# For internal use only.
def mergeDicts( d1, d2 ):
""": Recursively merge two dictionary data structures.
This essentially performs a union of nested dictionary data
"""
r = {}
r.update( d1 )
for key in d2:
value = d2[ key ]
if key in d1:
if isinstance( value, SubStyle ):
value = value.kwargs()
if isinstance( value, dict ) and ( key in d1 ):
other = d1[ key ]
if isinstance( other, SubStyle ):
other = other.kwargs()
value = mergeDicts( other, value )
r[ key ] = value
return r
#===========================================================================
# For internal use only.
def resolveDefaults( defaults, subNames = [], **kwargs ):
""": Resolve a new set of defaults.
What this funtion will do is:
1) Make a duplicate of the default dictionary to be modified and
returned.
2) For each keyword-value parameter that is not set to None, that value
will be set in the dictionary to be returned. If the value is itself
a dictionary, then it will be "merged" into the return dictionary.
3) For each of the names specified by subNames that exists in the default
dictionary, its values will be set in the dictionary to be returned.
If the value is itself a dictionary, then it will be "merged" into the
return dictionary. It is important to note that the order of the
names specified in 'subNames' is important as that is the order
in which they are resolved.
4) Returns the return dictionary.
When a dictionary 'A' is "merged" into another dictionary 'B', this is much
like the built-in dictionary 'update' method ( 'B.update( A )' ). The
difference is that any value in 'A' that is set to None is not 'updated'
in 'B' and for any values that are themselves dictionaries, then they will
be "merged".
= INPUT VARIABLES
- defaults The current set of default values to resolve with.
- subNames A list of names of sub-properties to resolve (in the order
to resolve them in).
- kwargs Optional keyword arguments to also resolve.
= RETURN VALUE
- Return a new dictionary of default values.
"""
# First duplicate the given defaults
subDefaults = {}
subDefaults.update( defaults )
# Next add in any keyword arguments
for key in kwargs:
value = kwargs[ key ]
# If the kw value is not set, then ignore
if value is None:
continue
# We have a kw value and nothing has been set yet.
if isinstance( value, SubStyle ):
value = value.kwargs()
if isinstance( value, dict ) and ( key in subDefaults ):
other = subDefaults[ key ]
if isinstance( other, SubStyle ):
other = other.kwargs()
value = mergeDicts( other, value )
# Store the value
subDefaults[ key ] = value
for name in subNames:
if name in defaults:
tmp = defaults[ name ]
if tmp is None:
continue
if isinstance( tmp, SubStyle ):
tmp = tmp.kwargs()
if isinstance( tmp, dict ):
subDefaults = mergeDicts( subDefaults, tmp )
else:
subDefaults[ name ] = tmp
return subDefaults
#===========================================================================
def stylePath( envvar = 'STYLEPATH' ):
""": Get the value of the STYLEPATH environment variable
= INPUT VARIABLE
- envvar The name of the environment variable to use for the style path.
= RETURN VALUE
- Return a list of paths as defined by the STYLEPATH environment variable.
"""
result = []
if envvar.startswith( '$' ):
envvar = envvar[ 1: ]
stylepath = os.getenv( envvar, "" )
stylepath = stylepath.split( ':' )
for directory in stylepath:
if len( directory.strip() ) == 0:
continue
p = path.normpath( path.expanduser( path.expandvars( directory ) ) )
result.append( p )
return result
#===========================================================================
| 33.142202 | 79 | 0.593356 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,873 | 0.674464 |
68294eda3a4048aa6022b5b8002d5929dc0d41b6 | 12,285 | py | Python | plot.py | osamayasserr/vaccum-cleaner | d6b8ea2eedadaca001ae14b66ff55c42ff3facb8 | [
"MIT"
] | 2 | 2021-01-11T22:54:38.000Z | 2021-01-28T16:15:13.000Z | plot.py | osamayasserr/vaccum-cleaner | d6b8ea2eedadaca001ae14b66ff55c42ff3facb8 | [
"MIT"
] | null | null | null | plot.py | osamayasserr/vaccum-cleaner | d6b8ea2eedadaca001ae14b66ff55c42ff3facb8 | [
"MIT"
] | 1 | 2021-01-13T09:59:14.000Z | 2021-01-13T09:59:14.000Z | import math
import pylab
from app import StandardRobot, LeastDistanceRobot, RandomWalkRobot, runSimulation
def timeNumberPlot(title, x_label, y_label, dim_length):
"""
Plots the relation between the number of robots and the average time
taken by different robots to clean a portion of the room.
"""
num_robot_range = range(1, 11)
times1, times2, times3 = ([] for i in range(3))
time_Robot_list = [times1, times2, times3]
Robots = [StandardRobot, LeastDistanceRobot, RandomWalkRobot]
for i in range(len(Robots)):
for num_robots in num_robot_range:
result = runSimulation(
num_robots, 1.0, dim_length, dim_length, 1, 100, Robots[i])
time_Robot_list[i].append(result[0])
for time in time_Robot_list:
pylab.plot(num_robot_range, time)
pylab.title(title+f"\n for room size of {dim_length}x{dim_length}")
pylab.legend(('StandardRobot', 'LeastDistanceRobot', 'RandomWalkRobot'))
pylab.xlabel(x_label)
pylab.ylabel(y_label)
pylab.show()
def timeAspectRatioPlot(title, x_label, y_label, area, num_robots):
"""
Plots the relation between the area of a square room and the average and
the average time taken by diffrent robots to clean a portion of th
room.
"""
aspect_ratios = []
times1, times2, times3 = ([] for i in range(3))
time_Robot_list = [times1, times2, times3]
Robots = [StandardRobot, LeastDistanceRobot, RandomWalkRobot]
start = math.sqrt(area)
aspect_dim_list = []
for dim in range(1, 11):
aspect_dim_list.append(start*dim)
for width in aspect_dim_list:
height = area / width
aspect_ratios.append("1 : {0}".format(int(width/height)))
for i in range(len(Robots)):
result = runSimulation(
num_robots, 1.0, int(width), int(height), 1, 100, Robots[i])
time_Robot_list[i].append(result[0])
for time in time_Robot_list:
pylab.plot(aspect_ratios, time)
pylab.title(
title+f"\n for {num_robots} Robots & Area of {area}")
pylab.legend(('StandardRobot', 'LeastDistanceRobot', "RandomWalkRobot"))
pylab.xlabel(x_label)
pylab.ylabel(y_label)
pylab.show()
def wasteAspectRatioPlot(title, x_label, y_label, num_robots, area):
"""
Plots the relation between room's aspect ratio and the waste percentage
for each robot.
"""
aspect_ratios = []
times1, times2, times3 = ([] for i in range(3))
waste_Robot_list = [times1, times2, times3]
Robots = [StandardRobot, LeastDistanceRobot, RandomWalkRobot]
start = math.sqrt(area)
aspect_dim_list = []
for dim in range(1, 11):
aspect_dim_list.append(start*dim)
for width in aspect_dim_list:
height = area / width
aspect_ratios.append("1 : {0}".format(int(width/height)))
for i in range(len(Robots)):
result = runSimulation(
num_robots, 1.0, int(width), int(height), 1, 100, Robots[i])
waste_Robot_list[i].append(result[1]/result[0])
for time in waste_Robot_list:
pylab.plot(aspect_ratios, time)
pylab.title(
title+"\n for {0} Robots & Area of {1}".format(num_robots, area))
pylab.legend(('StandardRobot', 'LeastDistanceRobot', "RandomWalkRobot"))
pylab.xlabel(x_label)
pylab.ylabel(y_label)
pylab.show()
def timeAreaPlot(title, x_label, y_label, num_robots):
"""
Plots the relation between the area of a room and the average time
taken by different robots to clean a certain portion of that room.
"""
dim_length_range = range(5, 31, 5)
times1, times2, times3 = ([] for i in range(3))
time_Robot_list = [times1, times2, times3]
Robots = [StandardRobot, LeastDistanceRobot, RandomWalkRobot]
for i in range(len(Robots)):
for dim_length in dim_length_range:
result = runSimulation(
num_robots, 1.0, dim_length, dim_length, 1, 100, Robots[i])
time_Robot_list[i].append(result[0])
for time in time_Robot_list:
pylab.plot(dim_length_range, time)
pylab.title(title+"\n for {0} Robots".format(num_robots))
pylab.legend(('StandardRobot', 'LeastDistanceRobot', "RandomWalkRobot"))
pylab.xlabel(x_label)
pylab.ylabel(y_label)
pylab.show()
def consistencyPlot(title, x_label, y_label, dim_length, num_robots):
"""
Performs the same exact experiment multiple of times
for a robot or a number of robots and plots the outcomes of
these experiments in terms of time taken for each individual
experiment to measure the consistency of performance for various robots.
"""
try_num_range = range(16)
times1, times2, times3 = ([] for i in range(3))
time_Robot_list = [times1, times2, times3]
Robots = [StandardRobot, LeastDistanceRobot, RandomWalkRobot]
for i in range(len(Robots)):
for try_num in try_num_range:
result = runSimulation(
num_robots, 1.0, dim_length, dim_length, 1, 1, Robots[i])
time_Robot_list[i].append(result[0])
for time in time_Robot_list:
pylab.plot(try_num_range, time)
pylab.title(
title+"\n for size of {0}x{0} & {1} Robots".format(dim_length, num_robots))
pylab.legend(('StandardRobot', 'LeastDistanceRobot', "RandomWalkRobot"))
pylab.xlabel(x_label)
pylab.ylabel(y_label)
pylab.show()
def timeAreaPortionPlot(title, x_label, y_label, num_robots, robot_type):
"""
Plots the relation between the area of square room and the average
time taken by a robot to clean a specific portion of the room,
(Different portions are plotted)
"""
dim_length_range = range(5, 31, 5)
coverage_percent_range = range(70, 105, 5)
coverage_percent_range = [i/100 for i in coverage_percent_range]
alist, blist, clist, dlist, elist, flist, glist = ([] for i in range(7))
coverage_percent_list = [alist, blist, clist, dlist, elist, flist, glist]
for dim_length in dim_length_range:
for i in range(len(coverage_percent_range)):
result = runSimulation(
num_robots, 1.0, dim_length, dim_length,
coverage_percent_range[i], 100, robot_type)
coverage_percent_list[i].append(result[0])
for percentlist in coverage_percent_list:
pylab.plot(dim_length_range, percentlist)
pylab.title(
title+f"\n for {num_robots} bots of {robot_type.__name__} type")
pylab.legend(("0.7", "0.75", "0.8", "0.85", "0.9", "0.95", "1.0"))
pylab.xlabel(x_label)
pylab.ylabel(y_label)
pylab.show()
# Not sure about it
def CoverageWasteRatio(title, x_label, y_label, num_robots, robot_type):
"""
efficiency
"""
dim_length_range = range(5, 31, 5)
coverage_percent_range = range(70, 105, 5)
coverage_percent_range = [i/100 for i in coverage_percent_range]
alist, blist, clist, dlist, elist, flist, glist = ([] for i in range(7))
coverage_percent_list = [alist, blist, clist, dlist, elist, flist, glist]
for dim_length in dim_length_range:
for i in range(len(coverage_percent_range)):
result = runSimulation(
num_robots, 1.0, dim_length, dim_length,
coverage_percent_range[i], 100, robot_type)
coverage_percent_list[i].append(result[1]/result[0])
for percentlist in coverage_percent_list:
pylab.plot(dim_length_range, percentlist)
pylab.title(
title+f"\n for {num_robots} bots of {robot_type.__name__} type")
pylab.legend(("0.7", "0.75", "0.8", "0.85", "0.9", "0.95", "1.0"))
pylab.xlabel(x_label)
pylab.ylabel(y_label)
pylab.show()
def timeNumberPortionPlot(title, x_label, y_label, dim_length, robot_type):
"""
Plots the relation between the number of robots and the average time
taken to clean a certain portion of the room,
(each portion is plotted)
"""
num_robot_range = range(1, 11)
coverage_percent_range = range(70, 105, 5)
coverage_percent_range = [i/100 for i in coverage_percent_range]
alist, blist, clist, dlist, elist, flist, glist = ([] for i in range(7))
coverage_percent_list = [alist, blist, clist, dlist, elist, flist, glist]
for num_robots in num_robot_range:
for i in range(len(coverage_percent_range)):
result = runSimulation(
num_robots, 1.0, dim_length, dim_length,
coverage_percent_range[i], 100, robot_type)
coverage_percent_list[i].append(result[0])
for percentlist in coverage_percent_list:
pylab.plot(num_robot_range, percentlist)
pylab.title(
title+f"\n for {num_robots} bots of {robot_type.__name__} type")
pylab.legend(("0.7", "0.75", "0.8", "0.85", "0.9", "0.95", "1.0"))
pylab.xlabel(x_label)
pylab.ylabel(y_label)
pylab.show()
def wasteAreaNumberPlot(title, x_label, y_label):
"""
Plots the relation between the waste percentage and the area of the
room for a different number of robots,
(each plotted individually)
"""
alist, blist, clist, dlist, elist = ([] for i in range(5))
num_robots_list = [alist, blist, clist, dlist, elist]
t1list, t2list, t3list, t4list, t5list = ([] for i in range(5))
time_robots_list = [t1list, t2list, t3list, t4list, t5list]
dim_length_range = range(10, 51, 5)
num_robots_range = range(5, 26, 5)
for dim_length in dim_length_range:
for i in range(len(num_robots_list)):
results = runSimulation(
num_robots_range[i], 1.0, dim_length,
dim_length, 1, 100, LeastDistanceRobot)
num_robots_list[i].append(results[1]/results[0])
time_robots_list[i].append(results[0])
for i in range(len(num_robots_range)):
pylab.plot(dim_length_range, num_robots_list[i])
pylab.title(title)
pylab.legend(('5', '10', "15", "20", "25"))
pylab.xlabel(x_label)
pylab.ylabel(y_label)
pylab.show()
# Not sure about it
def NumOfBotSizeWasteRatio(title, x_label, y_label, dim_length):
"""
What information does the plot produced by this function tell you?
waste on average with num on bots
to find the sweet spot for this room another function must be madfe with
"""
alist, blist, clist, dlist, elist = ([] for i in range(5))
num_robots_list = [alist, blist, clist, dlist, elist]
t1list, t2list, t3list, t4list, t5list = ([] for i in range(5))
time_robots_list = [t1list, t2list, t3list, t4list, t5list]
num_robots_range = range(5, 26, 5)
for i in range(len(num_robots_list)):
results = runSimulation(
num_robots_range[i], 1.0, dim_length, dim_length, 1, 50, LeastDistanceRobot)
num_robots_list[i].append(results[1]/results[0])
time_robots_list[i].append(results[0])
for i in range(len(num_robots_list)):
pylab.scatter(time_robots_list[i], num_robots_list[i], s=100)
pylab.title(title)
pylab.legend(('5', '10', "15", "20", "25"))
pylab.xlabel(x_label)
pylab.ylabel(y_label)
pylab.show()
# Plots
# timeNumberPlot('Number of robots & Time relation',
# 'Number of robots', 'Time (Tick)', 20)
# timeAspectRatioPlot('Aspect Ratio & Time relation',
# 'Room Aspect Ratio', 'Time', 100, 1)
# wasteAspectRatioPlot("AspectRatio & WasteRatio relation",
# "Room Aspect Ratio", "Waste Percentage", 1, 100)
# timeAreaPlot("Time & Room Area relation",
# "Length (squared)", "Time", 1)
# consistencyPlot("Consistency", "Try number", "Time", 20, 1)
# timeAreaPortionPlot('Room Portion & Time relation',
# 'Length (squared)', 'Time', 1, RandomWalkRobot)
# # CoverageWasteRatio('Coverage Percent & Size Relation','Length (squared)',
# # 'Waste Percentage', 5, LeastDistanceRobot)
# timeNumberPortionPlot("CostQualityTime", "Number of robots",
# "Time", 10, LeastDistanceRobot)
# wasteAreaNumberPlot('Waste & Size Relation', 'Length (squared)',
# 'Waste Percentage')
# # NumOfBotSizeWasteRatio('Waste & Size & Num of bots Relation\n LeastDistanceRobot',
# # 'Time', ' Waste Percentage', 20)
| 39 | 88 | 0.654945 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,541 | 0.288238 |
6829fc0412ea7a5055c0ef733f3314f215ac41ec | 795 | py | Python | kf_lib_data_ingest/templates/my_ingest_package/ingest_package_config.py | kids-first/kf-lib-data-ingest | 92889efef082c64744a00a9c110d778da7383959 | [
"Apache-2.0"
] | 3 | 2018-10-30T17:56:44.000Z | 2020-05-27T16:18:05.000Z | kf_lib_data_ingest/templates/my_ingest_package/ingest_package_config.py | kids-first/kf-lib-data-ingest | 92889efef082c64744a00a9c110d778da7383959 | [
"Apache-2.0"
] | 344 | 2018-11-01T16:47:56.000Z | 2022-02-23T20:36:21.000Z | kf_lib_data_ingest/templates/my_ingest_package/ingest_package_config.py | kids-first/kf-lib-data-ingest | 92889efef082c64744a00a9c110d778da7383959 | [
"Apache-2.0"
] | 1 | 2020-08-19T21:25:25.000Z | 2020-08-19T21:25:25.000Z | """ Ingest Package Config """
# The list of entities that will be loaded into the target service. These
# should be class_name values of your target API config's target entity
# classes.
target_service_entities = [
"family",
"participant",
"diagnosis",
"phenotype",
"outcome",
"biospecimen",
"read_group",
"sequencing_experiment",
"genomic_file",
"biospecimen_genomic_file",
"sequencing_experiment_genomic_file",
"read_group_genomic_file",
]
# All paths are relative to the directory this file is in
extract_config_dir = "extract_configs"
transform_function_path = "transform_module.py"
# TODO - Replace this with your own unique identifier for the project. This
# will become CONCEPT.PROJECT.ID during the Load stage.
project = "SD_ME0WME0W"
| 27.413793 | 75 | 0.732075 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 622 | 0.78239 |
682a6e3c9903c978ec62e69b0a956020a4566963 | 1,877 | py | Python | src/dbmanage/database_from_csv.py | resolutedreamer/NESLDashboard | b87c65db8bbb2e4999073f08bfd374a3eb769b3d | [
"MIT"
] | null | null | null | src/dbmanage/database_from_csv.py | resolutedreamer/NESLDashboard | b87c65db8bbb2e4999073f08bfd374a3eb769b3d | [
"MIT"
] | null | null | null | src/dbmanage/database_from_csv.py | resolutedreamer/NESLDashboard | b87c65db8bbb2e4999073f08bfd374a3eb769b3d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Written by Anthony Nguyen 2015/06/20
"""
import sqlite3
import sys
#import smap_analytics as smap_analytics
open_this_file = None
if len(sys.argv) == 2:
open_this_file = sys.argv[1]
else:
open_this_file = "config/smap_2015.csv"
with sqlite3.connect('dashboard.db') as conn:
c = conn.cursor()
# Remove any existing house_layout table
command = '''DROP TABLE IF EXISTS house_layout'''
print command
c.execute(command)
# Create a new house_layout table
command = '''CREATE TABLE house_layout
(path VARCHAR(100) PRIMARY KEY, uuid VARCHAR(36) NOT NULL UNIQUE,
heat_map_enable BOOLEAN, x_coord INT NOT NULL, y_coord INT NOT NULL,
room VARCHAR(16), description VARCHAR(255), tab_type VARCHAR(16),
channel_units VARCHAR(16))'''
print command
c.execute(command)
#Parse the file of UUIDs and add each as a row
with open(open_this_file) as f:
for line in f.readlines():
row = line.split(",")
#print row
uuid = row[0]
path = row[1]
heat_map_enable = row[2]
x_coord = int(row[3])
y_coord = int(row[4])
room = row[5]
description = row[6]
tab_type = row[7]
channel_units = row[8]
# Insert a row of data
command = '''INSERT INTO house_layout VALUES ('%s','%s','%s','%d','%d','%s','%s','%s',
'%s')'''%(path, uuid, heat_map_enable, x_coord, y_coord, room, description, tab_type, channel_units)
print command
c.execute(command)
# Print the whole table at the end to make sure it works
c.execute("SELECT * FROM house_layout")
print c.fetchall()
# Save (commit) the changes
conn.commit() | 31.283333 | 112 | 0.580181 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 852 | 0.453916 |
682b4349f15570d914af6d4986f32343c6454f8e | 3,746 | py | Python | RevCompLibrary.py | sayloren/fennel | b85a08b36900c23931ef2cd988798ae9e671cf8b | [
"Apache-2.0"
] | null | null | null | RevCompLibrary.py | sayloren/fennel | b85a08b36900c23931ef2cd988798ae9e671cf8b | [
"Apache-2.0"
] | null | null | null | RevCompLibrary.py | sayloren/fennel | b85a08b36900c23931ef2cd988798ae9e671cf8b | [
"Apache-2.0"
] | null | null | null | """
Script to perform RC sorting
Wren Saylor
July 5 2017
Copyright 2017 Harvard University, Wu Lab
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import pandas as pd
from FangsLibrary import run_sliding_window_for_each_nucleotide_string
from ElementLibrary import get_bedtools_features
from MethylationLibrary import collect_methylation_data_by_element
import GlobalVariables
# Methylation RCsorting
def sort_methylation_by_directionality(negStr,posStr):
posMeth = collect_methylation_data_by_element(posStr)
negMeth = collect_methylation_data_by_element(negStr)
# Zip reversed range to make a dictionary for replacing the location of the neg methylation
originalRange = range(0,GlobalVariables.num)
reverseRange = originalRange[::-1]
rangeDict = dict(zip(originalRange,reverseRange))
# Zip reverse complement sequence for replacing the nucleotides for neg methylation
seqDict = {'A':'T','T':'A','C':'G','G':'C','N':'N'}
# Convert neg Meth df
negMeth['methLocNew'] = negMeth.methLoc.map(rangeDict)
negMeth['CytosineNew'] = negMeth.Cytosine.map(seqDict)
negMeth['ContextNew'] = negMeth.Context.map(seqDict)
negMethNew = negMeth[['id','methLocNew','methPer','methCov','methFreq','CytosineNew','ContextNew','tissue']]
negMethNew.columns = ['id','methLoc','methPer','methCov','methFreq','Cytosine','Context','tissue']
# Concat pos and revised neg meth dfs
frames = [posMeth,negMethNew]
catMerge = pd.concat(frames)
# Update Frequencey count column
catMerge['methFreqNew'] = catMerge.groupby(['methLoc','tissue','Cytosine'])['methLoc'].transform('count')
outMerge = catMerge[['id','methLoc','methPer','methCov','methFreqNew','Cytosine','Context','tissue']]
outMerge.columns = ['id','methLoc','methPer','methCov','methFreq','Cytosine','Context','tissue']
return outMerge
# Sliding window RCsorting
def sort_sliding_window_by_directionality(negStr,posStr):
negDF, negNames = run_sliding_window_for_each_nucleotide_string(negStr['reverseComplement'],negStr['id'])
posDF, posNames = run_sliding_window_for_each_nucleotide_string(posStr['combineString'],posStr['id'])
compWindow = []
for x, y in zip(negDF, posDF):
tempCat = pd.concat([x,y],axis=1)
tempGroup = tempCat.groupby(tempCat.columns,axis=1).sum()
compWindow.append(tempGroup)
return compWindow, negNames
# Separate on plus and minus orientation, RCsort and return methylation and sliding window computations
def sort_elements_by_directionality(directionFeatures):
negStr = (directionFeatures[(directionFeatures['compareBoundaries'] == '-')])
posStr = (directionFeatures[(directionFeatures['compareBoundaries'] == '+')])
compWindow, compNames = sort_sliding_window_by_directionality(negStr,posStr)
if any(x in GlobalVariables.graphs for x in ['methylation','cluster','methextend']):
groupMeth = sort_methylation_by_directionality(negStr,posStr)
else:
groupMeth = None
return groupMeth,compWindow,compNames
def main(directionFeatures):
groupMeth,compWindow,compNames = sort_elements_by_directionality(directionFeatures)
print 'Completed reverse complement sorting for {0} items, with {1} bin sorting'.format(len(directionFeatures.index),GlobalVariables.binDir)
return groupMeth,compWindow,compNames
if __name__ == "__main__":
main()
| 41.622222 | 141 | 0.780566 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,657 | 0.442338 |
682b439a68c9aa5528add307c36e60310cd9655f | 580 | py | Python | exos/temperatures.py | afafe78/python-im | c831eb92c163084bb3c9098e40d28e7ba8600319 | [
"MIT"
] | 7 | 2016-09-29T07:20:25.000Z | 2021-03-15T15:00:06.000Z | exos/temperatures.py | afafe78/python-im | c831eb92c163084bb3c9098e40d28e7ba8600319 | [
"MIT"
] | 1 | 2018-10-08T10:52:48.000Z | 2018-10-08T10:52:48.000Z | exos/temperatures.py | afafe78/python-im | c831eb92c163084bb3c9098e40d28e7ba8600319 | [
"MIT"
] | 11 | 2017-01-12T18:50:49.000Z | 2021-02-28T22:44:29.000Z | import sys
import math
# Auto-generated code below aims at helping you parse
# the standard input according to the problem statement.
n = int(input()) # the number of temperatures to analyse
temps = input() # the n temperatures expressed as integers ranging from -273 to 5526
temps = temps.split()
if n == 0:
print(0)
else:
temp_ref = int(temps[0]);
for temp in temps:
temp = int(temp)
if abs(temp) < abs(temp_ref):
temp_ref = temp
elif abs(temp) == abs(temp_ref) and temp > 0:
temp_ref = temp;
print(temp_ref)
| 26.363636 | 85 | 0.641379 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 216 | 0.372414 |
682c0922524d4921d715e9972404209c73d7dea6 | 694 | py | Python | omep-python-test.py | yas-sim/openvino-model-experiment-package | cb96331908b55c5ab15c7bc8c042fb53cd4a2b35 | [
"Apache-2.0"
] | 3 | 2020-07-04T02:16:42.000Z | 2020-07-05T21:21:22.000Z | omep-python-test.py | yas-sim/openvino-model-experiment-package | cb96331908b55c5ab15c7bc8c042fb53cd4a2b35 | [
"Apache-2.0"
] | null | null | null | omep-python-test.py | yas-sim/openvino-model-experiment-package | cb96331908b55c5ab15c7bc8c042fb53cd4a2b35 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import cv2
import matplotlib.pyplot as plt
from openvino.inference_engine import IECore
import openvino_model_experiment_package as omep
# Load an IR model
model = 'intel/human-pose-estimation-0001/FP16/human-pose-estimation-0001'
ie, net, exenet, inblobs, outblobs, inshapes, outshapes = omep.load_IR_model(model)
# Load an image and run inference
img_orig = cv2.imread('people.jpg')
res = omep.infer_ocv_image(exenet, img_orig, inblobs[0])
img = cv2.cvtColor(img_orig, cv2.COLOR_BGR2RGB)
plt.imshow(img)
plt.show()
omep.display_heatmap(res['Mconv7_stage2_L2'], overlay_img=img_orig, statistics=False)
omep.display_heatmap(res['Mconv7_stage2_L1'], statistics=False)
| 27.76 | 85 | 0.795389 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 165 | 0.237752 |
682c4faf90aa04f1fdb70d9f1262a3c8fcfba960 | 657 | py | Python | molecule/tiff/tests/request_tile_in_container.py | girder/ansible-role-large-image | bb2fe69373bf38a2e0afd0a8ae011dabd9f1caf9 | [
"Apache-2.0"
] | 1 | 2018-09-17T17:38:41.000Z | 2018-09-17T17:38:41.000Z | molecule/tiff/tests/request_tile_in_container.py | girder/ansible-role-large-image | bb2fe69373bf38a2e0afd0a8ae011dabd9f1caf9 | [
"Apache-2.0"
] | 1 | 2020-03-05T03:18:07.000Z | 2020-03-05T03:18:07.000Z | molecule/tiff/tests/request_tile_in_container.py | girder/ansible-role-large-image | bb2fe69373bf38a2e0afd0a8ae011dabd9f1caf9 | [
"Apache-2.0"
] | null | null | null | import large_image
import urllib
import pytest
@pytest.mark.parametrize("item, output", [
('590346ff8d777f16d01e054c', '/tmp/Huron.Image2_JPEG2K.tif')
])
def test_tiff_tile_source(item, output):
"""Check whether large_image can return a tile with tiff sources."""
test_url = 'https://data.kitware.com/api/v1/item/{}/download'.format(item)
urllib.urlretrieve(test_url, output)
image = large_image.getTileSource(output)
# Make sure it is the tiff tile source
assert isinstance(image, large_image.tilesource.TiffFileTileSource)
# Make sure we can get a tile without an exception
assert type(image.getTile(0, 0, 0)) == str
| 36.5 | 78 | 0.733638 | 0 | 0 | 0 | 0 | 607 | 0.923896 | 0 | 0 | 276 | 0.420091 |
682c8eecc709a3adc399c98e9632ec8b019beeda | 3,359 | py | Python | XSTAF/core/tool_manage.py | xcgspring/XSTAF | 68a5bec4312173f931b9024e851cefaf778e3bfd | [
"Apache-2.0"
] | 2 | 2016-03-21T01:02:57.000Z | 2018-03-16T02:25:04.000Z | XSTAF/core/tool_manage.py | xcgspring/XSTAF | 68a5bec4312173f931b9024e851cefaf778e3bfd | [
"Apache-2.0"
] | null | null | null | XSTAF/core/tool_manage.py | xcgspring/XSTAF | 68a5bec4312173f931b9024e851cefaf778e3bfd | [
"Apache-2.0"
] | null | null | null |
import os
import sys
import pickle
import traceback
from XSTAF.core.logger import LOGGER
class ToolManager(object):
def __init__(self):
self.settings = {"ToolsLocation" : r"tools",
"ToolsConfigureFile" : "config.pickle"}
self.tool_name_list = []
self.abs_tools_location = ""
self.pickle_config_file = ""
def apply_settings(self, **kwargs):
for arg in kwargs.items():
if arg[0] in self.settings:
self.settings[arg[0]] = arg[1]
def config(self):
tools_location = self.settings["ToolsLocation"]
if not os.path.isabs(tools_location):
#check if tools location exist in XSTAF path
XSTAF_path = os.path.dirname(os.path.abspath(__file__))
self.abs_tools_location = os.path.join(XSTAF_path, "..", tools_location)
else:
self.abs_tools_location = tools_location
#try get tools name list from pickle file
self.pickle_config_file = os.path.join(self.abs_tools_location, self.settings["ToolsConfigureFile"])
self.load_config()
def get_tool(self, tool_module_name):
if not os.path.isdir(self.abs_tools_location):
LOGGER.warning("Can not find tools location: %s", self.abs_tools_location)
return None
#append abs tools path to python lib path
#so we can dynamic import them
sys.path.append(self.abs_tools_location)
try:
tool_module = __import__(tool_module_name)
#want to reload the tool if tool is updated
tool_module = reload(tool_module)
tool = tool_module.Tool
except (ImportError, AttributeError) as e:
LOGGER.info("Can not import tool: %s" % tool_module_name)
LOGGER.debug(traceback.format_exc())
return None
else:
return tool
def load_config(self):
if not os.path.isfile(self.pickle_config_file):
LOGGER.warning("Can not find config file: %s", self.pickle_config_file)
return
#we load tool names from pickle file
if os.path.isfile(self.pickle_config_file):
with open(self.pickle_config_file, 'r') as f:
self.tool_name_list = pickle.load(f)
def save_config(self):
if not os.path.isdir(self.abs_tools_location):
LOGGER.warning("Can not find tools location: %s", self.abs_tools_location)
return None
#we save current tool names to pickle file
with open(self.pickle_config_file, 'w') as f:
pickle.dump(self.tool_name_list, f)
@property
def available_tool_name_list(self):
if not os.path.isdir(self.abs_tools_location):
LOGGER.warning("Can not find tools location: %s", self.abs_tools_location)
while False:
yield None
else:
#check all packages under abs_tools_location
for name in os.listdir(self.abs_tools_location):
#only check dirs
abs_name = os.path.join(self.abs_tools_location, name)
if os.path.isdir(abs_name):
if not(name in self.tool_name_list) and not(self.get_tool(name) is None):
yield name | 39.05814 | 108 | 0.608514 | 3,267 | 0.972611 | 638 | 0.189937 | 652 | 0.194105 | 0 | 0 | 598 | 0.178029 |
682d4d61b5d8bc567488186baf7596163fa2d5a9 | 1,200 | py | Python | foodies/forms.py | sharonandisi/foodgram | 489794bf23734a80961fd9a790ac17c694335d27 | [
"PostgreSQL"
] | null | null | null | foodies/forms.py | sharonandisi/foodgram | 489794bf23734a80961fd9a790ac17c694335d27 | [
"PostgreSQL"
] | null | null | null | foodies/forms.py | sharonandisi/foodgram | 489794bf23734a80961fd9a790ac17c694335d27 | [
"PostgreSQL"
] | null | null | null | from django import forms
from .models import Image, Profile, Comments
class NewsLetterForm(forms.Form):
your_name = forms.CharField(label='First Name', max_length=30)
email = forms.EmailField(label='Email')
class NewImageForm(forms.ModelForm):
class Meta:
model = Image
exclude = ['profile', 'pub_date']
widgets = {
'tags':forms.CheckboxSelectMultiple(),
}
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
exclude = ['user']
class ImageUpload(forms.ModelForm):
class Meta:
model = Image
exclude = ['pub_date', 'profile']
class CommentForm(forms.ModelForm):
class Meta:
model = Comments
exclude = ['pub_date', 'image', 'profile']
fields = ['comments']
widgets = {
'comments':forms.TextInput(attrs={
'class': u'comments-input form-control', 'placeholder': u'Insert Comment'})
}
class profileEdit(forms.Form):
name = forms.CharField(max_length=20)
username = forms.CharField(max_length=20)
Bio = forms.Textarea()
Email = forms.EmailField()
phone_number = forms.CharField(max_length=12)
| 28.571429 | 91 | 0.626667 | 1,110 | 0.925 | 0 | 0 | 0 | 0 | 0 | 0 | 182 | 0.151667 |
682eb91e753309f5167c5c3ce7b313c8b3f7ea9a | 120 | py | Python | Oefeningen/standalone/list_comprehension_4.py | Seviran/Python_3 | e30ead250129d25bbc0a7ee2f6298775b2f4529a | [
"MIT"
] | null | null | null | Oefeningen/standalone/list_comprehension_4.py | Seviran/Python_3 | e30ead250129d25bbc0a7ee2f6298775b2f4529a | [
"MIT"
] | null | null | null | Oefeningen/standalone/list_comprehension_4.py | Seviran/Python_3 | e30ead250129d25bbc0a7ee2f6298775b2f4529a | [
"MIT"
] | null | null | null |
string_input = "amazing"
vowels = "aeiou"
answer = [char for char in string_input if char not in vowels]
print(answer)
| 20 | 62 | 0.741667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 16 | 0.133333 |
682f048ab5bbb8cf7aa88b049ef30b187c3d0139 | 3,404 | py | Python | pyingest/tests/test_serializers.py | golnazads/adsabs-pyingest | 37b37dd9e0d8a6e5cc34c59d30acd14e3381b48e | [
"MIT"
] | null | null | null | pyingest/tests/test_serializers.py | golnazads/adsabs-pyingest | 37b37dd9e0d8a6e5cc34c59d30acd14e3381b48e | [
"MIT"
] | null | null | null | pyingest/tests/test_serializers.py | golnazads/adsabs-pyingest | 37b37dd9e0d8a6e5cc34c59d30acd14e3381b48e | [
"MIT"
] | null | null | null | """
Test serializer
"""
import unittest
import glob
import json
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
from pyingest.parsers.iop import IOPJATSParser
from pyingest.serializers.classic import Tagged
from pyingest.serializers.refwriter import *
class TestClassic(unittest.TestCase):
def setUp(self):
stubdata_dir = os.path.join(os.path.dirname(__file__), 'data/stubdata')
self.inputdocs = glob.glob(os.path.join(stubdata_dir, 'parsed/*.json'))
self.outputdir = os.path.join(stubdata_dir, 'serialized')
# sys.stderr.write("test cases are: {}\n".format(self.inputdocs))
def test_classic_tagged(self):
serializer = Tagged()
for file in self.inputdocs:
# this will raise exceptions if something is wrong
document = ''
with open(file, 'r') as fp:
document = json.load(fp)
self.assertIsNotNone(document, "%s: error reading doc" % file)
outputfp = StringIO()
serializer.write(document, outputfp)
output = outputfp.getvalue()
outputfp.close()
self.assertNotEqual(output, '')
basefile, _ = os.path.splitext(os.path.basename(file))
target = os.path.join(self.outputdir, basefile + '.tag')
# save temporary copy
target_saved = target + '.parsed'
with open(target_saved, 'w') as fp:
fp.write(output)
ok = False
# Python 3 orders the properties dictionary differently
if sys.version_info > (3,) and os.path.exists(os.path.join(self.outputdir, 'python3', basefile + '.tag')):
target = os.path.join(self.outputdir, 'python3', basefile + '.tag')
if os.path.exists(target):
with open(target, 'r') as fp:
shouldbe = fp.read()
self.assertEqual(shouldbe, output, "results differ from %s" % target)
ok = True
else:
sys.stderr.write("could not find shouldbe file %s\n" % target)
if ok:
os.remove(target_saved)
else:
sys.stderr.write("parsed output saved to %s\n" % target_saved)
class TestReferenceWriter(unittest.TestCase):
def setup(self):
pass
def test_write_refhandler_data(self):
paperdata = IOPJATSParser()
inputdoc = 'pyingest/tests/data/stubdata/input/iop_apj.xml'
with open(inputdoc, 'r') as fm:
pdat = paperdata.parse(fm)
if 'refhandler_list' in pdat:
refwriter = ReferenceWriter()
refwriter.topdir = 'pyingest/tests/data/output/'
refwriter.refsource = '.jats.iopft.xml'
refwriter.writeref(pdat)
self.assertEqual('1', '1')
else:
self.assertEqual('a', 'b')
# I changed these to let go with a pass instead of raising an exception
#
# def test_no_refdata(self):
# refwriter = ReferenceWriter()
# with self.assertRaises(NoReferencesException):
# refwriter.writeref({})
# def test_no_metadata(self):
# refwriter = ReferenceWriter()
# bogus_data = {'refhandler_list': ['fnord']}
# with self.assertRaises(WriteErrorException):
# refwriter.writeref(bogus_data)
| 36.212766 | 118 | 0.596063 | 3,105 | 0.912162 | 0 | 0 | 0 | 0 | 0 | 0 | 980 | 0.287897 |
682fac4fc964dc03647c016ce6c38daafb6bb051 | 1,473 | py | Python | Chapter10-debugging/maxsubarray_v4.py | showa-yojyo/Software-Architecture-with-Python | 46ba48911065292c9d391f66d7213c01699fca80 | [
"MIT"
] | null | null | null | Chapter10-debugging/maxsubarray_v4.py | showa-yojyo/Software-Architecture-with-Python | 46ba48911065292c9d391f66d7213c01699fca80 | [
"MIT"
] | null | null | null | Chapter10-debugging/maxsubarray_v4.py | showa-yojyo/Software-Architecture-with-Python | 46ba48911065292c9d391f66d7213c01699fca80 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Code Listing #4
"""
Maximum subarray problem - final version
"""
from contextlib import contextmanager
import random
import time
# 復習
@contextmanager
def timer():
""" Measure real-time execution of a block of code """
try:
start = time.time()
yield
finally:
end = (time.time() - start)*1000
print('time taken=> %.2f ms' % end)
def num_array(size):
""" Return a list of numbers in a fixed random range
of given size """
nums = []
for i in range(size):
nums.append(random.randrange(-25, 30))
return nums
def max_subarray1(sequence):
""" Find sub-sequence in sequence having maximum sum """
# this is the version before the final version for testing purposes
max_sum, max_sub = 0, []
for i in range(len(sequence)):
for j in range(i+1, len(sequence)):
sub_seq = sequence[i:j+1]
sum_s = sum(sub_seq)
if sum_s > max_sum:
max_sum, max_sub = sum_s, sub_seq
return max_sum, max_sub
def max_subarray(sequence):
""" Maximum subarray - optimized version """
max_ending_here = max_so_far = 0
for x in sequence:
max_ending_here = max(0, max_ending_here + x)
max_so_far = max(max_so_far, max_ending_here)
return max_so_far
if __name__ == "__main__":
#with timer():
# max_subarray1(num_array(10000))
print(max_subarray([-5, 20, -10, 30, 15])) # 55
| 22.318182 | 71 | 0.618466 | 0 | 0 | 221 | 0.149628 | 237 | 0.16046 | 0 | 0 | 477 | 0.322952 |
68301f03cab4108ed375425743ca10036cf4ec40 | 4,012 | py | Python | scripts/frontpage_sqlite_migration.py | r-anime/modbot | 52e8f251273435e0146bd8d6633ff22549e138aa | [
"MIT"
] | 3 | 2020-07-06T08:26:12.000Z | 2021-04-20T05:31:38.000Z | scripts/frontpage_sqlite_migration.py | r-anime/modbot | 52e8f251273435e0146bd8d6633ff22549e138aa | [
"MIT"
] | 8 | 2021-06-01T03:49:28.000Z | 2022-03-18T02:27:43.000Z | scripts/frontpage_sqlite_migration.py | r-anime/modbot | 52e8f251273435e0146bd8d6633ff22549e138aa | [
"MIT"
] | 1 | 2021-04-20T05:30:46.000Z | 2021-04-20T05:30:46.000Z | """
One-time migration script from sqlalchemy models and sqlite database to custom ORM & PostgreSQL.
Not designed to work as part of the regular alembic system, merely placed here for archive purposes.
Should never need to run this again.
2021-05-03
"""
from datetime import datetime, timedelta
import sqlite3
from data.post_data import PostData, PostModel
from data.snapshot_data import SnapshotData, SnapshotModel, SnapshotFrontpageModel
from data.user_data import UserData
from services import post_service
from utils.logger import logger
from utils.reddit import base36decode
_post_data = PostData()
_snapshot_data = SnapshotData()
_user_data = UserData()
DB_FILE = "src/database.db"
def migrate_posts(offset=0):
"""Grabs posts in batches of 1000 at a time and migrates them to the new database.
Returns number of processed rows. If less than 1000, at end of the table."""
conn = sqlite3.connect(DB_FILE)
conn.row_factory = sqlite3.Row
rows = conn.execute("SELECT * FROM posts LIMIT 1000 OFFSET ?;", (offset,)).fetchall()
conn.close()
row = None
for row in rows:
# If the post already exists in the database we don't need to do anything.
post_id36 = row["id"]
post = post_service.get_post_by_id(post_id36)
if post:
continue
# OH RIGHT NO USER DATA IS SAVED IN THE OLD DATABASE.
# username = row["name"]
# if not user_service.get_user(username):
# user = UserModel()
# user.username = username
# _user_data.insert(user, error_on_conflict=False)
post = PostModel()
post.set_id(post_id36)
# post.author = username
post.title = row["title"]
post.created_time = row["created_time"]
post.flair_text = row["flair"] # will add flair id in later mass update/backfill.. and user info
_post_data.insert(post, error_on_conflict=False)
if not row:
logger.warning("No rows processed!")
else:
logger.info(f"Most recent migrated row: psk={row['psk']}, id={row['id']}")
return len(rows)
def migrate_snapshots(date, hour):
conn = sqlite3.connect(DB_FILE)
conn.row_factory = sqlite3.Row
row = conn.execute("SELECT * FROM snapshots WHERE date=? and hour=?;", (date, hour)).fetchone()
# No data, past the last recorded snapshot?
if not row:
return
old_snapshot_psk = row["psk"]
snapshot = SnapshotModel()
snapshot.created_time = row["datetime"]
snapshot.date = date
snapshot.hour = hour
snapshot.subscribers = row["subscribers"]
new_snapshot = _snapshot_data.insert(snapshot)
rows = conn.execute(
"SELECT sf.*, p.id FROM snapshot_frontpage sf JOIN posts p on sf.post_psk = p.psk WHERE snapshot_psk=?;",
(old_snapshot_psk,),
).fetchall()
conn.close()
for row in rows:
sfp_model = SnapshotFrontpageModel()
sfp_model.post_id = base36decode(row["id"])
sfp_model.snapshot_id = new_snapshot.id
sfp_model.rank = row["rank"]
sfp_model.score = row["score"]
_snapshot_data.insert(sfp_model)
def main():
current_offset = 0
while True:
processed_posts = migrate_posts(current_offset)
current_offset += processed_posts
if processed_posts < 1000:
break
if current_offset % 1000 == 0:
logger.info(f"Migrated {current_offset} posts total")
current_datetime = datetime.fromisoformat("2020-05-12 04:00:00.000")
now = datetime.utcnow()
while current_datetime <= now:
try:
migrate_snapshots(current_datetime.date(), current_datetime.hour)
except Exception:
logger.exception(f"Failed to migrate {current_datetime.date()} - {current_datetime.hour}")
current_datetime += timedelta(hours=1)
if current_datetime.hour == 0:
logger.info(f"Finished migrating {current_datetime.date()}")
if __name__ == "__main__":
main()
| 31.100775 | 113 | 0.667248 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,415 | 0.352692 |
68306374b036197a8b5db541b5852c5594665fb4 | 291 | py | Python | coursera/a_8_4_romeo.py | polde-live/learnpython | ff8ec96db1951d99797205d0bd491e542152a36f | [
"Unlicense"
] | null | null | null | coursera/a_8_4_romeo.py | polde-live/learnpython | ff8ec96db1951d99797205d0bd491e542152a36f | [
"Unlicense"
] | null | null | null | coursera/a_8_4_romeo.py | polde-live/learnpython | ff8ec96db1951d99797205d0bd491e542152a36f | [
"Unlicense"
] | null | null | null | # fname = raw_input("Enter file name: ")
fname = "romeo.txt"
fh = open(fname)
lst = list()
def add_to_list(words):
for word in words:
if word not in lst:
lst.append(word)
for line in fh:
words = line.split()
add_to_list(words)
lst.sort()
print lst | 17.117647 | 40 | 0.597938 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 51 | 0.175258 |
68311b9713af61f80d3b503c3c629b2c60cf2d3b | 1,376 | py | Python | Code/datasets/yahoo.py | jiahuanluo/label-inference-attacks | ac944418a207943f92f3b45b5c741aa41984a786 | [
"MIT"
] | 3 | 2022-01-14T10:15:41.000Z | 2022-03-14T12:53:11.000Z | Code/datasets/yahoo.py | jiahuanluo/label-inference-attacks | ac944418a207943f92f3b45b5c741aa41984a786 | [
"MIT"
] | 1 | 2022-03-04T06:28:35.000Z | 2022-03-04T06:28:35.000Z | Code/datasets/yahoo.py | jiahuanluo/label-inference-attacks | ac944418a207943f92f3b45b5c741aa41984a786 | [
"MIT"
] | 4 | 2021-11-20T09:11:19.000Z | 2022-03-03T08:16:14.000Z | from datasets.dataset_setup import DatasetSetup
from models import read_data_text
class YahooSetup(DatasetSetup):
def __init__(self):
super().__init__()
self.num_classes = 10
self.size_bottom_out = 10
def set_datasets_for_ssl(self, file_path, n_labeled, party_num):
train_labeled_dataset, train_unlabeled_dataset, val_dataset, test_dataset, n_labels = \
read_data_text.get_data(file_path, int(n_labeled / 10))
train_complete_labeled_dataset, _, _, _, _ = \
read_data_text.get_data(file_path, 5000)
print("#Labeled:", len(train_labeled_dataset), "#Unlabeled:", len(train_unlabeled_dataset))
return train_labeled_dataset, train_unlabeled_dataset, test_dataset, train_complete_labeled_dataset
def get_transformed_dataset(self, file_path, party_num=None, train=True):
if train:
train_complete_labeled_dataset, _, _, _, _ = \
read_data_text.get_data(file_path, 5000)
return train_complete_labeled_dataset
else:
_, _, _, test_dataset, _ = \
read_data_text.get_data(file_path, 10)
return test_dataset
if __name__ == '__main__':
dataset_setup = YahooSetup()
train_dataset = dataset_setup.get_transformed_dataset(file_path='D:/Datasets/yahoo_answers_csv/',train=True)
print("s")
| 40.470588 | 112 | 0.694767 | 1,101 | 0.800145 | 0 | 0 | 0 | 0 | 0 | 0 | 69 | 0.050145 |
6831695451c0f9a55c30a633eac4c6bb030724d8 | 744 | py | Python | scripts/cloud_function.py | sansbacon/mfl_playoff_leagues | 33fd070be6e9e4d900acea2fbc9cf47068c655e0 | [
"MIT"
] | null | null | null | scripts/cloud_function.py | sansbacon/mfl_playoff_leagues | 33fd070be6e9e4d900acea2fbc9cf47068c655e0 | [
"MIT"
] | 2 | 2022-01-24T14:06:11.000Z | 2022-01-24T14:48:02.000Z | scripts/cloud_function.py | sansbacon/mfl_playoff_leagues | 33fd070be6e9e4d900acea2fbc9cf47068c655e0 | [
"MIT"
] | null | null | null | from mfl_playoff_leagues import MFL
def run(request):
"""Responds to any HTTP request.
Args:
request (flask.Request): HTTP request object.
Returns:
The response text or any set of values that can be turned into a
Response object using
`make_response <http://flask.pocoo.org/docs/1.0/api/#flask.Flask.make_response>`.
"""
# get the query string parsed as a dict
request_json = request.get_json(silent=True)
args = request.args
m = MFL(year=args['year'], league=args['league'])
if request.args and 'live_scoring' in request.args:
return m.live_scoring_html()
if request.args and 'league' in request.args:
return m.league_html()
return {}
| 26.571429 | 89 | 0.653226 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 392 | 0.526882 |
6831702b54dbaea7d1289e2c3040f25037a1de8b | 262 | py | Python | third_party/pdfium/build/gyp_pdfium.py | satorumpen/node-pdfium-native | 90e5bf8bc69c80620f9f4231ebf8e39ef1178b8c | [
"BSD-2-Clause"
] | 303 | 2015-03-13T08:31:24.000Z | 2022-03-21T10:06:45.000Z | third_party/pdfium/build/gyp_pdfium.py | satorumpen/node-pdfium-native | 90e5bf8bc69c80620f9f4231ebf8e39ef1178b8c | [
"BSD-2-Clause"
] | 15 | 2015-04-03T02:33:53.000Z | 2020-01-28T10:42:29.000Z | third_party/pdfium/build/gyp_pdfium.py | satorumpen/node-pdfium-native | 90e5bf8bc69c80620f9f4231ebf8e39ef1178b8c | [
"BSD-2-Clause"
] | 100 | 2015-03-13T08:28:56.000Z | 2022-02-18T03:19:39.000Z | # Copyright 2014 PDFium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
path = os.path.abspath(os.path.split(__file__)[0])
execfile(os.path.join(path, 'gyp_pdfium'))
| 29.111111 | 72 | 0.751908 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 165 | 0.629771 |
6831bfd553ffd66dddccc11e3bad3d1009993596 | 1,508 | py | Python | docs/source/examples/complex.py | dev10110/pyObjective | 6d831accaf5c67a8252b2dc744001dd9e5db5f4d | [
"MIT"
] | null | null | null | docs/source/examples/complex.py | dev10110/pyObjective | 6d831accaf5c67a8252b2dc744001dd9e5db5f4d | [
"MIT"
] | null | null | null | docs/source/examples/complex.py | dev10110/pyObjective | 6d831accaf5c67a8252b2dc744001dd9e5db5f4d | [
"MIT"
] | null | null | null | from pyObjective import Variable, Model
import numpy as np
"""This example script is written to demonstrate the use of classes, and how more complicated models can be built,
and still passed to the solver. As a rudimentary example, it has two cubes and a sphere, and we are trying to find
the dimensions such that the cube1 - cube2 + sphere volume is minimized, subject to the bounds. """
# define a new class
class Cube:
def __init__(self, model):
self.x = Variable('x', 1, (0.5, 2), "cube length x")
self.y = Variable('y', 1, (0.5, 2))
self.z = Variable('z', 1, (0.5, 2))
model.add_var(self.x)
model.add_var(self.y)
model.add_var(self.z)
def volume(self):
return self.x() * self.y() * self.z()
# define a sphere, but keep the variable definition on the outside. For fun
class Sphere:
def __init__(self, radius):
self.r = radius
def volume(self):
return (4 / 3) * np.pi * self.r() ** 3 # unfortunate brackets needed in here, and not before :(
# define simulation model
m = Model()
# create cube
c1 = Cube(m)
c2 = Cube(m)
# define the sphere radius
r = Variable("r", 1, (0.5, 2), "sphere radius")
m.add_var(r) # try commenting this line, and you will see that it was removed from the optimization
s = Sphere(r)
# define objective function (to be minimized)
def cost():
return c1.volume() - c2.volume() + s.volume()
m.objective = cost
# solve
m.solve()
# display results
m.display_results()
| 24.322581 | 115 | 0.653183 | 550 | 0.364721 | 0 | 0 | 0 | 0 | 0 | 0 | 743 | 0.492706 |
6833573a8789e8696ad0b078fc32733ac35d4da7 | 3,005 | py | Python | tests/test_inlinequeryresultgame.py | ehsanbarkhordar/botcup | 4e45c3df2dceb8afe3833c0e89813fa9493295ed | [
"MIT"
] | 1 | 2019-10-22T03:46:17.000Z | 2019-10-22T03:46:17.000Z | python-telegram-bot/tests/test_inlinequeryresultgame.py | shyguy-ry/paddingCheckBot | d0a60cc2f397b9b8e4d60bdea699a94beaff2ea1 | [
"Apache-2.0"
] | null | null | null | python-telegram-bot/tests/test_inlinequeryresultgame.py | shyguy-ry/paddingCheckBot | d0a60cc2f397b9b8e4d60bdea699a94beaff2ea1 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2018
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
import pytest
from telegram import (InlineKeyboardButton, InlineQueryResultGame,
InlineQueryResultVoice, InlineKeyboardMarkup)
@pytest.fixture(scope='class')
def inline_query_result_game():
return InlineQueryResultGame(TestInlineQueryResultGame.id,
TestInlineQueryResultGame.game_short_name,
reply_markup=TestInlineQueryResultGame.reply_markup)
class TestInlineQueryResultGame(object):
id = 'id'
type = 'game'
game_short_name = 'game short name'
reply_markup = InlineKeyboardMarkup([[InlineKeyboardButton('reply_markup')]])
def test_expected_values(self, inline_query_result_game):
assert inline_query_result_game.type == self.type
assert inline_query_result_game.id == self.id
assert inline_query_result_game.game_short_name == self.game_short_name
assert (inline_query_result_game.reply_markup.to_dict()
== self.reply_markup.to_dict())
def test_to_dict(self, inline_query_result_game):
inline_query_result_game_dict = inline_query_result_game.to_dict()
assert isinstance(inline_query_result_game_dict, dict)
assert inline_query_result_game_dict['type'] == inline_query_result_game.type
assert inline_query_result_game_dict['id'] == inline_query_result_game.id
assert (inline_query_result_game_dict['game_short_name']
== inline_query_result_game.game_short_name)
assert (inline_query_result_game_dict['reply_markup']
== inline_query_result_game.reply_markup.to_dict())
def test_equality(self):
a = InlineQueryResultGame(self.id, self.game_short_name)
b = InlineQueryResultGame(self.id, self.game_short_name)
c = InlineQueryResultGame(self.id, '')
d = InlineQueryResultGame('', self.game_short_name)
e = InlineQueryResultVoice(self.id, '', '')
assert a == b
assert hash(a) == hash(b)
assert a is not b
assert a == c
assert hash(a) == hash(c)
assert a != d
assert hash(a) != hash(d)
assert a != e
assert hash(a) != hash(e)
| 39.539474 | 85 | 0.702163 | 1,752 | 0.583028 | 0 | 0 | 287 | 0.095507 | 0 | 0 | 888 | 0.295507 |
6833d75d22c19c184314891bfedfda10a657b443 | 835 | py | Python | project/ui/sheet.py | surister/code-jam-3 | 9d13c4e3f99dd34b3ced699964fd1298c2c8df81 | [
"MIT"
] | 2 | 2018-10-30T13:03:43.000Z | 2019-03-08T10:59:01.000Z | project/ui/sheet.py | skilldeliver/code-jam-3 | 9d13c4e3f99dd34b3ced699964fd1298c2c8df81 | [
"MIT"
] | null | null | null | project/ui/sheet.py | skilldeliver/code-jam-3 | 9d13c4e3f99dd34b3ced699964fd1298c2c8df81 | [
"MIT"
] | 2 | 2019-07-21T15:32:41.000Z | 2020-10-01T17:49:57.000Z | import pygame as pg
class Sheet:
"""
Represents tool for extracting sprites from spritesheet.
"""
def __init__(self, sheet_path):
"""
Constructor for the sheet tool.
Loading the spritesheet.
"""
self.spritesheet = pg.image.load(sheet_path).convert_alpha()
def get_image(self, x, y, width, height, alpha=False):
"""
Extracts sprite of given point (x, y) (left, top) and width and height.
alpha boolean keyword argument for converting the sprite in alpha or non-alpha.
"""
image = pg.Surface((width, height))
image.blit(self.spritesheet, (0, 0), (x, y, width, height))
image.set_colorkey((0, 0, 0))
image.set_alpha(255)
if alpha:
return image.convert_alpha()
return image.convert()
| 30.925926 | 87 | 0.602395 | 812 | 0.972455 | 0 | 0 | 0 | 0 | 0 | 0 | 343 | 0.410778 |
6837584b088def649cb22875398b8143f9388db8 | 5,398 | py | Python | sandbox/bendpy/discont/dg_sine.py | MiroK/lega | ceb684ad639521e4a6e679188761984e2caa5611 | [
"MIT"
] | 3 | 2015-07-14T01:19:17.000Z | 2020-12-10T14:00:45.000Z | sandbox/bendpy/discont/dg_sine.py | MiroK/lega | ceb684ad639521e4a6e679188761984e2caa5611 | [
"MIT"
] | null | null | null | sandbox/bendpy/discont/dg_sine.py | MiroK/lega | ceb684ad639521e4a6e679188761984e2caa5611 | [
"MIT"
] | null | null | null | # Analytical solutions for problems that can be solved with the two sine basis.
###
# Problem one is
#
# -u`` = f in [0, pi] with u(0) = u(pi) = 0 for f which is
# g on [0, pi/2) and h on [pi/2, pi]
#
###
# Problem two is
#
# u```` = f in [0, pi] with u(0) = u(pi) = 0, u`(0) = u`(pi) = 0 for f which is
# g on [0, pi/2) and h on [pi/2, pi]
from __future__ import division
from sympy import Symbol, integrate
import numpy as np
from math import pi
def solve_poisson(g, h):
'''Solve the Poisson problem with f defined by g, h'''
x = Symbol('x')
# Primitive functions of g
G = integrate(-g, x)
GG = integrate(G, x)
# Primitive functions of h
H = integrate(-h, x)
HH = integrate(H, x)
# The solution is GG + a0*x + b0 on [-1, 0] and HH + a1*x + b1 on [0, 1]
# Build the lin sys for the coefficients. The system reflects bcs and
# continuity of u and u` in 0
A = np.array([[0, 1., 0., 0.],
[0., 0., pi, 1.],
[pi/2., 1., -pi/2, -1.],
[1., 0., -1., 0.]])
b = np.array([-GG.subs(x, 0),
-HH.subs(x, pi),
HH.subs(x, pi/2) - GG.subs(x, pi/2),
H.subs(x, pi/2) - G.subs(x, pi/2)])
[a0, b0, a1, b1] = np.linalg.solve(A, b)
u0 = GG + a0*x + b0
u1 = HH + a1*x + b1
# Let's the the checks
# Boundary conditions
bcl = u0.subs(x, 0)
bcr = u1.subs(x, pi)
# Continuity of solution and the derivative
u_cont = u0.subs(x, pi/2) - u1.subs(x, pi/2)
du_cont = u0.diff(x, 1).subs(x, pi/2) - u1.diff(x, 1).subs(x, pi/2)
# That it in fact solves the laplacian
u0_lap = integrate((u0.diff(x, 2) + g)**2, (x, 0, pi/2))
u1_lap = integrate((u1.diff(x, 2) + h)**2, (x, pi/2, pi))
conds = [bcl, bcr, u_cont, du_cont, u0_lap, u1_lap]
for i, c in enumerate(conds):
print i, c, abs(c) < 1E-13
return u0, u1
def solve_biharmonic(g, h):
'''Solve the biharmonic problem with f defined by g, h'''
x = Symbol('x')
# Primitive functions of g
G = integrate(g, x)
GG = integrate(G, x)
GGG = integrate(GG, x)
GGGG = integrate(GGG, x)
# Primitive functions of h
H = integrate(h, x)
HH = integrate(H, x)
HHH = integrate(HH, x)
HHHH = integrate(HHH, x)
# The solution now needs to match bcs and continuity.
A = np.array([[-1./6, 1./2, -1., 1., 0., 0., 0., 0.],
[0, 0, 0, 0, 1/6., 1/2., 1., 1.],
[-1., 1, 0, 0, 0, 0, 0, 0.],
[0, 0, 0, 0, 1., 1., 0, 0],
[0, 0, 0, 1, 0, 0, 0, -1],
[0, 0, 1, 0, 0, 0, -1, 0],
[0, 1, 0, 0, 0, -1, 0, 0],
[1, 0, 0, 0, -1, 0, 0, 0]])
b = np.array([-GGGG.subs(x, -1),
-HHHH.subs(x, 1),
-GG.subs(x, -1),
-HH.subs(x, 1),
HHHH.subs(x, 0) - GGGG.subs(x, 0),
HHH.subs(x, 0) - GGG.subs(x, 0),
HH.subs(x, 0) - GG.subs(x, 0),
H.subs(x, 0) - G.subs(x, 0)])
[a0, a1, a2, a3, b0, b1, b2, b3] = np.linalg.solve(A, b)
u0 = GGGG + a0*x**3/6 + a1*x**2/2 + a2*x + a3
u1 = HHHH + b0*x**3/6 + b1*x**2/2 + b2*x + b3
# Let's the the checks
checks = []
# Boundary conditions
checks.append(u0.subs(x, -1))
checks.append(u1.subs(x, 1))
checks.append(u0.diff(x, 2).subs(x, -1))
checks.append(u1.diff(x, 2).subs(x, 1))
# Continuity of solution and the derivatives
checks.append(u0.subs(x, 0) - u1.subs(x, 0))
checks.append(u0.diff(x, 1).subs(x, 0) - u1.diff(x, 1).subs(x, 0))
checks.append(u0.diff(x, 2).subs(x, 0) - u1.diff(x, 2).subs(x, 0))
checks.append(u0.diff(x, 3).subs(x, 0) - u1.diff(x, 3).subs(x, 0))
# That it in fact solves the biharmonic equation
checks.append(integrate((u0.diff(x, 4) - g)**2, (x, -1, 0)))
checks.append(integrate((u1.diff(x, 4) - h)**2, (x, 0, 1)))
assert all(map(lambda v: abs(v) < 1E-13, checks))
return u0, u1
# -----------------------------------------------------------------------------
if __name__ == '__main__':
from sympy import S
from sympy.plotting import plot
x = Symbol('x')
g, h = S(1), x
problem = 'biharmonic'
if problem == 'poisson':
u0, u1 = solve_poisson(g, h)
p0 = plot(u0, (x, 0, pi/2), show=False)
p1 = plot(u1, (x, pi/2, pi), show=False)
p2 = plot(g, (x, 0, pi/2), show=False)
p3 = plot(h, (x, pi/2, pi), show=False)
p4 = plot(u0.diff(x, 1), (x, 0, pi/2), show=False)
p5 = plot(u1.diff(x, 1), (x, pi/2, pi), show=False)
p0[0].line_color='red'
p1[0].line_color='red'
# p2[0].line_color='blue'
# p3[0].line_color='blue'
# p4[0].line_color='green'
# p5[0].line_color='green'
p0.append(p1[0])
# p0.append(p2[0])
# p0.append(p3[0])
# p0.append(p4[0])
# p0.append(p5[0])
if problem == 'biharmonic':
u0, u1 = solve_biharmonic(g, h)
u0.subs(x, 2/pi*x-1), u1.subs(x, 2/pi*x-1)
# Sol
k = 3
p0 = plot(u0.diff(x, k), (x, 0, pi/2), show=False)
p1 = plot(u1.diff(x, k), (x, pi/2, pi), show=False)
p0[0].line_color='red'
p1[0].line_color='red'
p0.append(p1[0])
p0.show()
| 31.022989 | 79 | 0.483142 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,361 | 0.25213 |
6837ab42c2f65e2a2df59f39ed16d067247c7e29 | 7,642 | py | Python | pyunity/values/quaternion.py | rayzchen/PyUnity | 8ed436eca7a84f05190c1fa275c58da5c6059926 | [
"MIT"
] | null | null | null | pyunity/values/quaternion.py | rayzchen/PyUnity | 8ed436eca7a84f05190c1fa275c58da5c6059926 | [
"MIT"
] | null | null | null | pyunity/values/quaternion.py | rayzchen/PyUnity | 8ed436eca7a84f05190c1fa275c58da5c6059926 | [
"MIT"
] | null | null | null | # Copyright (c) 2020-2022 The PyUnity Team
# This file is licensed under the MIT License.
# See https://docs.pyunity.x10.bz/en/latest/license.html
"""Class to represent a rotation in 3D space."""
__all__ = ["Quaternion", "QuaternionDiff"]
from . import Mathf
from .vector import Vector3, conv
from .other import LockedLiteral
class Quaternion(LockedLiteral):
"""
Class to represent a unit quaternion, also known as a versor.
Parameters
----------
w : float
Real value of Quaternion
x : float
x coordinate of Quaternion
y : float
y coordinate of Quaternion
z : float
z coordinate of Quaternion
"""
def __init__(self, w, x, y, z):
self.w = w
self.x = x
self.y = y
self.z = z
self._lock()
def __repr__(self):
return f"Quaternion({', '.join(map(conv, self))})"
def __str__(self):
return f"Quaternion({', '.join(map(conv, self))})"
def __getitem__(self, i):
if i == 0:
return self.w
elif i == 1:
return self.x
elif i == 2:
return self.y
elif i == 3:
return self.z
raise IndexError()
def __iter__(self):
yield self.w
yield self.x
yield self.y
yield self.z
def __list__(self):
return [self.w, self.x, self.y, self.z]
def __len__(self):
return 4
def __hash__(self):
return hash((self.w, self.x, self.y, self.z))
def __eq__(self, other):
if hasattr(other, "__getitem__") and len(other) == 4:
return self.w == other[0] and self.x == other[1] and self.y == other[2] and self.z == other[3]
else:
return False
def __ne__(self, other):
if hasattr(other, "__getitem__") and len(other) == 4:
return self.w != other[0] or self.x != other[1] or self.y != other[2] or self.z != other[3]
else:
return True
def __mul__(self, other):
if isinstance(other, Quaternion):
w = self.w * other.w - self.x * other.x - self.y * other.y - self.z * other.z
x = self.w * other.x + self.x * other.w + self.y * other.z - self.z * other.y
y = self.w * other.y - self.x * other.z + self.y * other.w + self.z * other.x
z = self.w * other.z + self.x * other.y - self.y * other.x + self.z * other.w
return Quaternion(w, x, y, z)
elif isinstance(other, (int, float)):
angle, axis = self.angleAxisPair
return Quaternion.FromAxis((angle * other) % 360, axis)
return NotImplemented
def __truediv__(self, other):
if isinstance(other, (int, float)):
angle, axis = self.angleAxisPair
return Quaternion.FromAxis((angle / other) % 360, axis)
return NotImplemented
def __sub__(self, other):
if isinstance(other, Quaternion):
diff = (self * other.conjugate).normalized()
return QuaternionDiff(*diff)
def absDiff(self, other):
return abs(other - self)
def copy(self):
"""
Deep copy of the Quaternion.
Returns
-------
Quaternion
A deep copy
"""
return Quaternion(self.w, self.x, self.y, self.z)
def normalized(self):
"""
A normalized Quaternion, for rotations.
If the length is 0, then the identity
quaternion is returned.
Returns
-------
Quaternion
A unit quaternion
"""
length = Mathf.Sqrt(self.w ** 2 + self.x ** 2 +
self.y ** 2 + self.z ** 2)
if length:
return Quaternion(self.w / length, self.x / length, self.y / length, self.z / length)
else:
return Quaternion.identity()
@property
def conjugate(self):
"""The conjugate of a unit quaternion"""
return Quaternion(self.w, -self.x, -self.y, -self.z)
def RotateVector(self, vector):
"""Rotate a vector by the quaternion"""
other = Quaternion(0, *vector)
return Vector3(self * other * self.conjugate)
@staticmethod
def FromAxis(angle, a):
"""
Create a quaternion from an angle and an axis.
Parameters
----------
angle : float
Angle to rotate
a : Vector3
Axis to rotate about
"""
axis = a.normalized()
cos = Mathf.Cos(angle / 2 * Mathf.DEG_TO_RAD)
sin = Mathf.Sin(angle / 2 * Mathf.DEG_TO_RAD)
return Quaternion(cos, axis.x * sin, axis.y * sin, axis.z * sin)
@staticmethod
def Between(v1, v2):
a = v1.cross(v2)
if a.dot(a) == 0:
if v1 == v2 or v1.dot(v1) == 0 or v2.dot(v2) == 0:
return Quaternion.identity()
else:
return Quaternion.FromAxis(180, Vector3.up())
angle = Mathf.Acos(v1.dot(v2) / (Mathf.Sqrt(v1.length * v2.length)))
q = Quaternion.FromAxis(angle * Mathf.DEG_TO_RAD, a)
return q.normalized()
@staticmethod
def FromDir(v):
a = Quaternion.FromAxis(
Mathf.Atan2(v.x, v.z) * Mathf.RAD_TO_DEG,
Vector3.up())
b = Quaternion.FromAxis(
Mathf.Atan2(-v.y, Mathf.Sqrt(v.z ** 2 + v.x ** 2)) * Mathf.RAD_TO_DEG,
Vector3.right())
return a * b
@property
def angleAxisPair(self):
"""
Gets the angle and axis pair. Tuple of form (angle, axis).
"""
angle = 2 * Mathf.Acos(self.w) * Mathf.RAD_TO_DEG
if angle == 0:
return (0, Vector3.up())
return (angle, Vector3(self).normalized())
@staticmethod
def Euler(vector):
"""
Create a quaternion using Euler rotations.
Parameters
----------
vector : Vector3
Euler rotations
Returns
-------
Quaternion
Generated quaternion
"""
a = Quaternion.FromAxis(vector.x, Vector3.right())
b = Quaternion.FromAxis(vector.y, Vector3.up())
c = Quaternion.FromAxis(vector.z, Vector3.forward())
return b * a * c
@property
def eulerAngles(self):
"""Gets the Euler angles of the quaternion"""
s = self.w ** 2 + self.x ** 2 + self.y ** 2 + self.z ** 2
r23 = 2 * (self.w * self.x - self.y * self.z)
if r23 > 0.999999 * s:
x = Mathf.PI / 2
y = 2 * Mathf.Atan2(self.y, self.x)
z = 0
elif r23 < -0.999999 * s:
x = -Mathf.PI / 2
y = -2 * Mathf.Atan2(self.y, self.x)
z = 0
else:
x = Mathf.Asin(r23)
r13 = 2 * (self.w * self.y + self.z * self.x) / s
r33 = 1 - 2 * (self.x ** 2 + self.y ** 2) / s
r21 = 2 * (self.w * self.z + self.x * self.y) / s
r22 = 1 - 2 * (self.x ** 2 + self.z ** 2) / s
y = Mathf.Atan2(r13, r33)
z = Mathf.Atan2(r21, r22)
euler = [x, y, z]
for i in range(3):
euler[i] = euler[i] * Mathf.RAD_TO_DEG % 360
if euler[i] > 180:
euler[i] -= 360
return Vector3(euler)
@staticmethod
def identity():
"""Identity quaternion representing no rotation"""
return Quaternion(1, 0, 0, 0)
class QuaternionDiff:
def __init__(self, w, x, y, z):
self.w = w
self.x = x
self.y = y
self.z = z
def __abs__(self):
return abs(2 * Mathf.Acos(self.w) * Mathf.DEG_TO_RAD)
| 29.392308 | 106 | 0.522245 | 7,309 | 0.956425 | 103 | 0.013478 | 3,327 | 0.435357 | 0 | 0 | 1,701 | 0.222586 |
6837cc30692c462a7a0c489ba23cd6a7b34c84cf | 402 | py | Python | db/db_connector.py | waynshang/stock_institution | 72b0374ed148f1bdb4e276422a095411ab564455 | [
"MIT"
] | 1 | 2021-05-29T11:08:09.000Z | 2021-05-29T11:08:09.000Z | db/db_connector.py | waynshang/stock_institution | 72b0374ed148f1bdb4e276422a095411ab564455 | [
"MIT"
] | 1 | 2021-08-03T12:36:46.000Z | 2021-08-03T12:36:46.000Z | db/db_connector.py | waynshang/stock_institution | 72b0374ed148f1bdb4e276422a095411ab564455 | [
"MIT"
] | null | null | null | from config import secret
from utils import getLogger
DEBUG = getLogger()
class MysqlConnection:
def __init__(self, database, server_name):
self.host = secret[server_name]['mysql']['host']
self.port = secret[server_name]['mysql']['port']
self.username = secret[server_name]['mysql']['username']
self.password = secret[server_name]['mysql']['password']
self.database = database
| 33.5 | 60 | 0.711443 | 327 | 0.813433 | 0 | 0 | 0 | 0 | 0 | 0 | 60 | 0.149254 |
683999d61c2d040fef9e9f17e0c36c60ebd1671e | 5,147 | py | Python | tests/docker/testhost/ui_test.py | huskywhale/safeplaces-frontend | bb46580837692a8ec6e02af3fa9d415242439b25 | [
"MIT"
] | null | null | null | tests/docker/testhost/ui_test.py | huskywhale/safeplaces-frontend | bb46580837692a8ec6e02af3fa9d415242439b25 | [
"MIT"
] | null | null | null | tests/docker/testhost/ui_test.py | huskywhale/safeplaces-frontend | bb46580837692a8ec6e02af3fa9d415242439b25 | [
"MIT"
] | null | null | null | #from selenium.webdriver.remote import webdriver
from selenium import webdriver
#from selenium.webdriver.chrome import options
from page_objects import EntryPage, LoginPage, RedactionPage, ContactTracePage, AddNewRecordPage, AddDataToRecordPage, StageForPublishingPage, PublishDataPage, SettingsPage, Tools
import unittest
import os
class TestRedaction(unittest.TestCase):
def setUp(self):
#Change this to TRUE if you don't want to use a dockerised stack
self.local_mode = False
#setup environment based on environment variables
if 'HOME_DIR' in os.environ.copy():
self.home_dir = os.environ['HOME_DIR']
else:
self.home_dir = os.getcwd()
if 'DATA_DIR' in os.environ.copy():
self.data_dir = os.environ['DATA_DIR']
else:
self.data_dir = 'tests/data/'
if 'TMP_DIR' in os.environ.copy():
self.tmp_dir = os.environ['TMP_DIR']
else:
self.tmp_dir = '/tmp/'
if 'BASE_TEST_URL' in os.environ.copy():
self.base_url = os.environ['BASE_TEST_URL']
else:
self.base_url = 'https://react.safeplaces.extremesolution.com/'
if 'SELENIUM_URL' in os.environ.copy():
self.sel_url = os.environ['SELENIUM_URL']
else:
self.sel_url = 'http://172.17.0.2:4444/wd/hub'
chrome_options = webdriver.ChromeOptions()
prefs = {'download.default_directory': '/tmp'}
chrome_options.add_experimental_option('prefs', prefs)
if self.local_mode:
self.driver = webdriver.Chrome(chrome_options=chrome_options)
else:
self.driver = webdriver.Remote(command_executor=self.sel_url, options=chrome_options)
def app_loads(self):
tools = Tools()
entry_page = EntryPage(self.driver,base_url=self.base_url)
entry_page.open_page()
def contact_trace(self):
tools = Tools()
entry_page = EntryPage(self.driver,base_url=self.base_url)
entry_page.open_page()
login_page = LoginPage(self.driver)
login_page.login_if_required()
entry_page.open_trace()
contact_trace_page = ContactTracePage(self.driver)
contact_trace_page.add_new_record()
add_record_page = AddNewRecordPage(self.driver)
add_record_page.create_manually()
contact_trace_page.more()
contact_trace_page.add_data_point()
# start to add a point and cancel editing the point
# if the test works this far, we can expand it later
point_editor_page = AddDataToRecordPage(self.driver)
point_editor_page.enter_location('-122.19732036472264, 37.718665250290684')
point_editor_page.enter_date('06/08/2020 07:00')
point_editor_page.close()
entry_page.open_settings()
settings_page = SettingsPage(self.driver)
settings_page.logout
# leaving test_ out of the method name until the SUT works
def settings(self):
login_page = LoginPage(self.driver)
login_page.login_if_required()
entry_page = EntryPage(self.driver,base_url=self.base_url)
entry_page.open_page()
entry_page.open_settings()
settings_page = SettingsPage(self.driver)
settings_page.set_health_authority_name('Test Health Authority')
settings_page.set_information_website_URL('https://cdc.gov')
settings_page.set_reference_website_URL('https://cdc.gov')
settings_page.set_api_endpoint('https://s3.aws.com/bucket_name/safepaths.json')
settings_page.set_privacy_policy_URL('https://www.cdc.gov/other/privacy.html')
# set retention policy slider to 50% of the way across, which would be 15 days
# commented out until we find how to get ActionChains working
# settings_page.set_retention_policy('50')
settings_page.reset_gps_coordinates
settings_page.save_and_continue
#def test_redaction(self): <--- removed test_ from the method name until the SUT works!
def redaction(self):
tools = Tools()
entry_page = EntryPage(self.driver,base_url=self.base_url)
entry_page.open_page()
entry_page.setup_case()
entry_page.open_redactor()
login_page = LoginPage(self.driver)
login_page.login_if_required()
redaction_page = RedactionPage(self.driver)
redaction_page.load_file(self.data_dir +'/privkit31A-synthetic-REDACTED.json')
redaction_page.check_start_date_is('1-Mar-2020 1:00pm GMT')
redaction_page.check_end_date_is('19-Mar-2020 10:00pm GMT')
redaction_page.check_duration_is('18 days 9 hrs')
redaction_page.save_file()
#TODO: this next step fails because it was designed for backend=OFF. To test this, we need to load the publisher screen and see what data is there when we hit load
#tools.compare_files(self.tmp_dir + '/privkit31A-synthetic-REDACTED-REDACTED.json', self.home_dir + '/' + self.data_dir + '/expected_results/privkit31A-synthetic-REDACTED-REDACTED.json')
def tearDown(self):
self.driver.close()
| 45.149123 | 194 | 0.682922 | 4,812 | 0.934914 | 0 | 0 | 0 | 0 | 0 | 0 | 1,544 | 0.299981 |
683a3d763a7c968c779e9e79c5590b8d66a1acc0 | 2,532 | py | Python | kenning/outputcollectors/name_printer.py | antmicro/edge-ai-tester | 6b145145ed1cec206ae0229c846fb33d272f3ffa | [
"Apache-2.0"
] | 20 | 2021-06-24T13:37:21.000Z | 2022-03-25T10:50:26.000Z | kenning/outputcollectors/name_printer.py | antmicro/edge-ai-tester | 6b145145ed1cec206ae0229c846fb33d272f3ffa | [
"Apache-2.0"
] | null | null | null | kenning/outputcollectors/name_printer.py | antmicro/edge-ai-tester | 6b145145ed1cec206ae0229c846fb33d272f3ffa | [
"Apache-2.0"
] | 1 | 2021-11-09T17:23:04.000Z | 2021-11-09T17:23:04.000Z | """
A small, very basic OutputCollector-derived class used to test
handling of multiple OutputCollectors in inference_runner scenario
"""
from kenning.core.outputcollector import OutputCollector
from kenning.datasets.open_images_dataset import DectObject
from typing import Any, Union
import numpy as np
from pathlib import Path
class NamePrinter(OutputCollector):
def __init__(self, print_type: str = "detector", file_path: Path = None):
self.frame_counter = 0
self.print_type = print_type
self.classnames = []
if file_path:
with open(file_path, 'r') as f:
for line in f:
self.classnames.append(line.strip())
super().__init__()
@classmethod
def form_argparse(cls):
parser, group = super().form_argparse()
group.add_argument(
'--print-type',
help='What is the type of model that will input data to the NamePrinter', # noqa: E501
choices=['detector', 'classificator'],
default='detector'
)
group.add_argument(
'--classification-class-names',
help='File with class names used to identify the output from classification models', # noqa: E501
type=Path
)
return parser, group
@classmethod
def from_argparse(cls, args):
return cls(args.print_type, args.classification_class_names)
def detach_from_output(self):
pass
def should_close(self):
return False
def process_output(self, i: Any, o: Union[DectObject, np.array]):
print("Frame", self.frame_counter, end=": ")
o = o[0]
if self.print_type == 'detector':
for x in o:
print(x.clsname, end=" ")
print()
elif self.print_type == 'classificator':
tuples = []
if self.classnames:
for i, j in zip(o, self.classnames):
tuples.append((i, j))
else:
it = 0
for i in o:
tuples.append((i, "object {}".format(it)))
it += 1
tuples.sort(key=lambda x: x[0], reverse=True)
for i in range(min(5, len(tuples))):
print(
"{}: {:.2f}".format(
tuples[i][1],
float(tuples[i][0])
),
end=', '
)
print()
self.frame_counter += 1
| 32.883117 | 110 | 0.540679 | 2,200 | 0.868878 | 0 | 0 | 695 | 0.274487 | 0 | 0 | 464 | 0.183254 |
683a460d29953d3626084a99677147d11506c15d | 291 | py | Python | plugins/bilibili_activity/contents/unknown.py | su226/IdhagnBot | a5db1b6ab69fdf67fd6e53a63b34c6bc863d6609 | [
"MIT"
] | 2 | 2022-02-14T06:37:05.000Z | 2022-03-30T12:18:15.000Z | plugins/bilibili_activity/contents/unknown.py | su226/IdhagnBot | a5db1b6ab69fdf67fd6e53a63b34c6bc863d6609 | [
"MIT"
] | null | null | null | plugins/bilibili_activity/contents/unknown.py | su226/IdhagnBot | a5db1b6ab69fdf67fd6e53a63b34c6bc863d6609 | [
"MIT"
] | null | null | null | from typing import Any
from .. import util
FORMAT = '''\
🤔 {username} 发布了……一些东西
https://t.bilibili.com/{id}
目前机器人还不能理解这个qwq'''
def handle(content: Any) -> str:
return FORMAT.format(
username=content["desc"]["user_profile"]["info"]["uname"],
id=content["desc"]["dynamic_id_str"])
| 22.384615 | 62 | 0.670103 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 174 | 0.517857 |
683d706676e142f86e414d75b32aa04fdb20a6fb | 4,159 | py | Python | georiviere/observations/tests/test_views.py | georiviere/Georiviere-admin | 4ac532f84a7a8fef3e01384fad63e8e288d397c0 | [
"BSD-2-Clause"
] | 7 | 2021-11-05T14:52:25.000Z | 2022-03-24T21:18:02.000Z | georiviere/observations/tests/test_views.py | georiviere/Georiviere-admin | 4ac532f84a7a8fef3e01384fad63e8e288d397c0 | [
"BSD-2-Clause"
] | 57 | 2021-11-02T10:27:34.000Z | 2022-03-31T14:08:32.000Z | georiviere/observations/tests/test_views.py | georiviere/Georiviere-admin | 4ac532f84a7a8fef3e01384fad63e8e288d397c0 | [
"BSD-2-Clause"
] | 1 | 2021-12-05T14:55:42.000Z | 2021-12-05T14:55:42.000Z | from collections import OrderedDict
from geotrek.authent.tests.factories import StructureFactory
from georiviere.tests import CommonRiverTest
from georiviere.observations.models import Station, ParameterTracking
from .factories import (
StationFactory, StationProfileFactory, ParameterFactory
)
class StationViewTestCase(CommonRiverTest):
model = Station
modelfactory = StationFactory
def get_expected_json_attrs(self):
return {
'id': self.obj.pk,
'code': self.obj.code,
'date_insert': '2020-03-17T00:00:00Z',
'date_update': '2020-03-17T00:00:00Z',
'description': self.obj.description,
'geom': self.obj.geom.ewkt,
'hardness': self.obj.hardness,
'in_service': self.obj.in_service,
'label': self.obj.label,
'local_influence': self.obj.local_influence,
'purpose_code': self.obj.purpose_code,
'site_code': self.obj.site_code,
'station_profiles': [],
'structure': self.obj.structure.pk,
'station_uri': self.obj.station_uri,
}
def get_bad_data(self):
return OrderedDict([
('label', ''),
('parametertracking_set-TOTAL_FORMS', '0'),
('parametertracking_set-INITIAL_FORMS', '1'),
('parametertracking_set-MAX_NUM_FORMS', '0'),
]), 'This field is required.'
def get_good_data(self):
structure = StructureFactory.create()
station_profile = StationProfileFactory.create()
temp_data = self.modelfactory.build(
structure=structure,
station_profiles=[station_profile]
)
return {
'structure': structure.pk,
'geom': '{"geom": "%s", "snap": [%s]}' % (temp_data.geom.transform(4326, clone=True).ewkt,
','.join(['null'])),
'code': '1234',
'label': 'test',
'station_profiles': [station_profile.pk],
'local_influence': Station.LocalInfluenceChoices.UNKNOWN,
'parametertracking_set-TOTAL_FORMS': '2',
'parametertracking_set-INITIAL_FORMS': '0',
'parametertracking_set-MAX_NUM_FORMS': '',
'parametertracking_set-0-label': 'Paramètre suivi Ted',
'parametertracking_set-0-parameter': ParameterFactory.create().pk,
'parametertracking_set-0-measure_frequency': 'par jour',
'parametertracking_set-0-transmission_frequency': 'par semaine',
'parametertracking_set-0-data_availability': ParameterTracking.DataAvailabilityChoice.ONDEMAND,
'parametertracking_set-0-measure_start_date': '2019-06-06',
'parametertracking_set-0-measure_end_date': '2020-06-06',
'parametertracking_set-0-id': '',
'parametertracking_set-0-DELETE': '',
'parametertracking_set-1-label': 'Paramètre suivi Bob',
'parametertracking_set-1-parameter': ParameterFactory.create().pk,
'parametertracking_set-1-measure_frequency': '',
'parametertracking_set-1-transmission_frequency': '',
'parametertracking_set-1-data_availability': ParameterTracking.DataAvailabilityChoice.ONDEMAND,
'parametertracking_set-1-measure_start_date': '',
'parametertracking_set-1-measure_end_date': '',
'parametertracking_set-1-id': '',
'parametertracking_set-1-DELETE': '',
}
def test_listing_number_queries(self):
"""Test number queries when get list object"""
self.login()
self.modelfactory.create_batch(100)
with self.assertNumQueries(7):
self.client.get(self.model.get_jsonlist_url())
with self.assertNumQueries(6):
self.client.get(self.model.get_format_list_url())
def test_detail_number_queries(self):
"""Test number queries when get detail object"""
self.login()
station = self.modelfactory.create()
with self.assertNumQueries(49):
self.client.get(station.get_detail_url())
| 40.77451 | 107 | 0.622265 | 3,857 | 0.926941 | 0 | 0 | 0 | 0 | 0 | 0 | 1,490 | 0.358087 |
683ea3da0b0b99dd76dd80bf6093b1dfbb0b984c | 8,145 | py | Python | lowendspirit/cloudflareAPI.py | boxcontrol/lowendspirit | d6ca41d14c1a974794c483d3baf885f1f3a40baf | [
"MIT"
] | 3 | 2017-04-25T20:00:35.000Z | 2021-12-03T01:28:38.000Z | lowendspirit/cloudflareAPI.py | boxcontrol/lowendspirit | d6ca41d14c1a974794c483d3baf885f1f3a40baf | [
"MIT"
] | null | null | null | lowendspirit/cloudflareAPI.py | boxcontrol/lowendspirit | d6ca41d14c1a974794c483d3baf885f1f3a40baf | [
"MIT"
] | 2 | 2020-05-14T23:07:47.000Z | 2022-01-08T16:49:05.000Z | import requests
from requests.auth import HTTPBasicAuth
import json
import pprint
class Cloudflare_Enduser_API:
def __init__(self, cf_token, cf_email):
self.cf_token = cf_token
self.cf_email = cf_email
self.headers = {
'Content-Type': 'application/json',
'X-Auth-Key': self.cf_token,
'X-Auth-Email': self.cf_email
}
def cfQuery(self, domain_id='', endpoint='', data={}):
"""
Main GET query function
:param domain_id: ID of domain to query
:param endpoint: user, zone, etc...
:return: result cloudflare response
"""
if not domain_id and not endpoint:
#print('domain id and endpoint not set')
return requests.get(api_endpoint + '/zones' + domain_id + endpoint, headers=self.headers)
elif domain_id and endpoint:
return requests.get(api_endpoint + '/zones/' + domain_id + '/' + endpoint, headers=self.headers)
elif endpoint:
return requests.get(api_endpoint + '/' + endpoint, headers=self.headers)
def cfQuery_patch(self, data, endpoint):
"""
Main PATCH query function
:param domain_id: ID of domain to query
:param endpoint: user, zone, etc...
:param data: data to be patched
:return: result cloudflare response
"""
auth = HTTPBasicAuth(self.cf_email, self.cf_token)
return requests.patch(api_endpoint + endpoint, data=json.dumps(data), auth=auth,
headers=self.headers)
def cfQuery_post(self, data, endpoint):
"""
Main POST query function
:param data: data to be posted
:return: cloudflare response
"""
return requests.post(api_endpoint + endpoint, data=json.dumps(data), headers=self.headers)
def cfQuery_delete(self, id, endpoint):
return requests.delete(api_endpoint + endpoint + id, headers=self.headers)
def cfQuery_put(self, data, endpoint):
"""
Main POST query function
:param data: data to be posted
:return: cloudflare response
"""
return requests.put(api_endpoint + endpoint, data=json.dumps(data), headers=self.headers)
def pp_print_of(self, some_input):
"""
Preaty print the output
:param some_input: anything it can be a function too
:return: function output with pprint
"""
pp = pprint.PrettyPrinter(indent=4)
return pp.pprint(some_input)
def list_all(self):
"""
Get all data associated with cloudflare user
:return: all data for the user account
"""
r = self.cfQuery().json()
pp = pprint.PrettyPrinter(indent=4)
return pp.pprint(r)
def list_domain_ids(self):
"""
lists domain ID/'s
:return: get list of all domain's and id's associated with cloudflare account
"""
r = self.cfQuery().json()
records = []
for domain in r.get('result'):
domain_name = domain.get('name')
domain_id = domain.get('id')
records.append((domain_name, domain_id))
return dict(records)
def get_record(self, id):
"""
get dns records for given domain id
:param id: you need to know the domain id that can be found through list_domain_ids function
:return: return list of all dns records associated with domain
"""
domains = self.list_domain_ids()
endpoint = 'dns_records' # endpoint of the query for CloudFlare API v4
if str(id) in domains.values():
r = self.cfQuery(domain_id=id, endpoint=endpoint)
return r.json().get('result')[0].get('content')
elif str(id) not in domains.values():
return ''.join('There is no domain with id: ' + str(id) +
"\nlist od available domain id's is:\n" +
json.dumps(domains))
def get_user(self):
"""
Get user info
:return: return user info
"""
return self.cfQuery(endpoint='user').json()
def patch_user(self, data):
"""
Update your user profile
:param data: dictionary of data to be changed,
:return: PATCH request to update user profile
"""
return self.cfQuery_patch(endpoint='/user', data=data)
def get_user_billing_profile(self, x):
"""
User Billing Profile
:param x: cloudflare app endpoint (profile, history)
:return: current billing profile
"""
return self.cfQuery(endpoint='user/billing/' + x).json()
def get_app_subscriptions(self, x=None):
"""
List all of your app subscriptions
:param x: optional parameter to show info for app
:return: list
"""
if x:
return self.cfQuery(endpoint='user/billing/subscriptions/apps' + '/' + x).json()
return self.cfQuery(endpoint='user/billing/subscriptions/apps').json()
######## FOR NOW WILL LEAVE OUT REST OF PAYED API CALLES
def get_firewall_rules(self):
"""
List firewall access rules
:param x: optional parameter to show info for app
:return: list
"""
return self.cfQuery(endpoint='/user/firewall/access_rules/rules').json()
def set_access_rule(self, data):
return self.cfQuery_post(data, endpoint='/user/firewall/access_rules/rules').json()
def update_access_rule(self, data):
return self.cfQuery_patch(data, endpoint='/user/firewall/access_rules/rules').json()
def del_access_rule(self, id):
return self.cfQuery_delete(id, endpoint='/user/firewall/access_rules/rules/').json()
################################################################
########## Domain related
def add_new_zone(self, data):
#return self.cfQuery_post(data, endpoint='/zones')
pass
def get_zone_details(self, id):
return self.cfQuery(domain_id=id).json()
################################################################
############## DNS Record
def create_dns_record(self, id, data):
return self.cfQuery_post(data, endpoint='/zones/' + id + '/dns_records').json()
def get_dns_records(self, id):
endpoint = '/dns_records'
return self.cfQuery(domain_id=id, endpoint=endpoint).json()
def get_dns_record_details(self, id, record_id):
endpoint = '/dns_records/' + record_id
return self.cfQuery(domain_id=id, endpoint=endpoint).json()
def update_dns_record(self, data):
endpoint = '/zones/' + data.get('zone_id') + '/dns_records/' + data.get('id')
return self.cfQuery_put(data, endpoint=endpoint).json()
def del_dns_record(self, zone_id, domain_id):
endpoint = '/zones/' + zone_id + '/dns_records/'
return self.cfQuery_delete(id=domain_id, endpoint=endpoint).json()
#################################################################
################## Analytics
def get_zone_analytics(self, zone_id):
endpoint = 'analytics/dashboard'
return self.cfQuery(domain_id=zone_id, endpoint=endpoint).json()
##################################################################
################## Zone Firewall rules
def get_zone_firewall_rules(self, zone_id):
endpoint = 'firewall/access_rules/rules'
return self.cfQuery(domain_id=zone_id, endpoint=endpoint).json()
def create_zone_firewall_rule(self, zone_id, data):
endpoint = '/zones/' + zone_id + '/firewall/access_rules/rules'
return self.cfQuery_post(data=data, endpoint=endpoint).json()
def update_zone_firewall_rule(self, zone_id, data):
endpoint = '/zones/' + zone_id + '/firewall/access_rules/rules'
return self.cfQuery_patch(data=data, endpoint=endpoint)
def del_zone_firewall_rule(self, zone_id, data):
endpoint = '/zones/' + zone_id + '/firewall/access_rules/rules'
#return self.cfQuery_delete(id=data.get(id), endpoint=endpoint)
pass
| 35.881057 | 108 | 0.59558 | 8,056 | 0.989073 | 0 | 0 | 0 | 0 | 0 | 0 | 3,276 | 0.40221 |
683f08f0ca695b37d41ba51ec1f2b90d9c25e9ea | 1,085 | py | Python | tests/unit_tests/test_features.py | constantinpape/mc_luigi | c8dac84ace7d422f7ec25d722204b25d625c84e1 | [
"MIT"
] | null | null | null | tests/unit_tests/test_features.py | constantinpape/mc_luigi | c8dac84ace7d422f7ec25d722204b25d625c84e1 | [
"MIT"
] | null | null | null | tests/unit_tests/test_features.py | constantinpape/mc_luigi | c8dac84ace7d422f7ec25d722204b25d625c84e1 | [
"MIT"
] | null | null | null | import unittest
import os
from subprocess import call
import z5py
import vigra
from test_class import McLuigiTestCase
class TestDataTasks(McLuigiTestCase):
@classmethod
def setUpClass(cls):
super(TestDataTasks, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestDataTasks, cls).tearDownClass()
def check_features(self, feature_path):
rag_path = './cache/StackedRegionAdjacencyGraph_sampleA_watershed.h5'
self.assertTrue(os.path.exists(rag_path))
n_edges = vigra.readHDF5(rag_path, 'numberOfEdges')
self.assertTrue(os.path.exists(feature_path))
features = z5py.File(feature_path, use_zarr_format=False)['data'][:]
self.assertEqual(n_edges, len(features))
for feat_id in range(features.shape[1]):
self.assertFalse((features[:, feat_id] == 0).all())
def test_region_features(self):
call(['python', './executables/features.py', 'region'])
feat_path = ''
self.check_features(feat_path)
if __name__ == '__main__':
unittest.main()
| 27.125 | 77 | 0.684793 | 913 | 0.841475 | 0 | 0 | 174 | 0.160369 | 0 | 0 | 134 | 0.123502 |
683fdfc38c6be9c97c19d59c1851c1bf1394efb9 | 1,912 | py | Python | src/images/migrations/0004_auto_20200713_1943.py | thesus/bokstaever | e0a5e2614dd222ccd56a8945aba4fd28de85dd31 | [
"MIT"
] | null | null | null | src/images/migrations/0004_auto_20200713_1943.py | thesus/bokstaever | e0a5e2614dd222ccd56a8945aba4fd28de85dd31 | [
"MIT"
] | 7 | 2019-11-18T16:11:01.000Z | 2019-11-18T16:11:04.000Z | src/images/migrations/0004_auto_20200713_1943.py | thesus/bokstaever | e0a5e2614dd222ccd56a8945aba4fd28de85dd31 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.8 on 2020-07-13 19:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("images", "0003_image_creation_date"),
]
operations = [
migrations.AddField(
model_name="image",
name="feed",
field=models.BooleanField(default=False, verbose_name="In feed"),
),
migrations.AlterField(
model_name="image",
name="creation_date",
field=models.DateTimeField(auto_now_add=True, verbose_name="Creation date"),
),
migrations.AlterField(
model_name="image",
name="files",
field=models.ManyToManyField(to="images.ImageFile", verbose_name="Files"),
),
migrations.AlterField(
model_name="image",
name="thumbnail",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="container",
to="images.ImageFile",
verbose_name="Thumbnail",
),
),
migrations.AlterField(
model_name="image",
name="title",
field=models.CharField(max_length=200, verbose_name="Title"),
),
migrations.AlterField(
model_name="imagefile",
name="height",
field=models.IntegerField(verbose_name="Height"),
),
migrations.AlterField(
model_name="imagefile",
name="image_file",
field=models.FileField(upload_to="", verbose_name="Image file"),
),
migrations.AlterField(
model_name="imagefile",
name="width",
field=models.IntegerField(verbose_name="Width"),
),
]
| 30.83871 | 88 | 0.550732 | 1,786 | 0.9341 | 0 | 0 | 0 | 0 | 0 | 0 | 347 | 0.181485 |
684030ef33682d87ebeb550a206ba38d9db656d9 | 4,733 | py | Python | grblas/backends/python/matrix.py | jim22k/grblas-dev | 5574894c6317c61fa747ac17b4112f2c6b3c4a6c | [
"Apache-2.0"
] | null | null | null | grblas/backends/python/matrix.py | jim22k/grblas-dev | 5574894c6317c61fa747ac17b4112f2c6b3c4a6c | [
"Apache-2.0"
] | null | null | null | grblas/backends/python/matrix.py | jim22k/grblas-dev | 5574894c6317c61fa747ac17b4112f2c6b3c4a6c | [
"Apache-2.0"
] | null | null | null | import numba
import numpy as np
from scipy.sparse import csr_matrix
from .base import BasePointer, GraphBlasContainer
from .context import handle_panic, return_error
from .exceptions import GrB_Info
class MatrixPtr(BasePointer):
def set_matrix(self, matrix):
self.instance = matrix
class Matrix(GraphBlasContainer):
def __init__(self, matrix):
assert isinstance(matrix, csr_matrix)
self.matrix = matrix
@classmethod
def new_from_dtype(cls, dtype, nrows, ncols):
matrix = csr_matrix((nrows, ncols), dtype=dtype)
return cls(matrix)
@classmethod
def new_from_existing(cls, other):
matrix = csr_matrix(other)
return cls(matrix)
@classmethod
def get_pointer(cls):
return MatrixPtr()
@handle_panic
def Matrix_new(A: MatrixPtr, dtype: type, nrows: int, ncols: int):
if nrows <= 0:
return_error(GrB_Info.GrB_INVALID_VALUE, "nrows must be > 0")
if ncols <= 0:
return_error(GrB_Info.GrB_INVALID_VALUE, "ncols must be > 0")
matrix = Matrix.new_from_dtype(dtype, nrows, ncols)
A.set_matrix(matrix)
return GrB_Info.GrB_SUCCESS
@handle_panic
def Matrix_dup(C: MatrixPtr, A: Matrix):
matrix = Matrix.new_from_existing(A)
C.set_matrix(matrix)
return GrB_Info.GrB_SUCCESS
@handle_panic
def Matrix_resize(C: Matrix, nrows: int, ncols: int):
if nrows <= 0:
return_error(GrB_Info.GrB_INVALID_VALUE, "nrows must be > 0")
if ncols <= 0:
return_error(GrB_Info.GrB_INVALID_VALUE, "ncols must be > 0")
C.matrix.resize((nrows, ncols))
return GrB_Info.GrB_SUCCESS
# TODO: this is just the essential code; it needs to handle descriptors, masks, accumulators, etc
@handle_panic
def mxm(C, A, B, semiring):
cr, cc = C.shape
ar, ac = A.shape
br, bc = B.shape
if cr != ar:
return_error(GrB_Info.GrB_DIMENSION_MISMATCH, "C.nrows != A.nrows")
if cc != bc:
return_error(GrB_Info.GrB_DIMENSION_MISMATCH, "C.ncols != B.ncols")
if ac != br:
return_error(GrB_Info.GrB_DIMENSION_MISMATCH, "A.nrows != B.ncols")
b = B.tocsc()
d, i, ip = _sparse_matmul(
A.data,
A.indices,
A.indptr,
b.data,
b.indices,
b.indptr,
semiring.plus.op,
semiring.times,
semiring.plus.identity,
C.dtype,
)
C.data = d
C.indices = i
C.indptr = ip
return GrB_Info.GrB_SUCCESS
@numba.njit
def _sparse_matmul(
a_data,
a_indices,
a_indptr,
b_data,
b_indices,
b_indptr,
plus,
times,
identity,
dtype,
):
# Final array size is unknown, so we give ourselves room and then adjust on the fly
tmp_output_size = a_data.size * 2
data = np.empty((tmp_output_size,), dtype=dtype)
indices = np.empty((tmp_output_size,), dtype=a_indices.dtype)
indptr = np.empty((a_indptr.size,), dtype=a_indptr.dtype)
output_counter = 0
for iptr in range(a_indptr.size - 1):
indptr[iptr] = output_counter
for jptr in range(b_indptr.size - 1):
a_counter = a_indptr[iptr]
a_stop = a_indptr[iptr + 1]
b_counter = b_indptr[jptr]
b_stop = b_indptr[jptr + 1]
val = identity
nonempty = False
while a_counter < a_stop and b_counter < b_stop:
a_k = a_indices[a_counter]
b_k = b_indices[b_counter]
if a_k == b_k:
val = plus(val, times(a_data[a_counter], b_data[b_counter]))
nonempty = True
a_counter += 1
b_counter += 1
elif a_k < b_k:
a_counter += 1
else:
b_counter += 1
if nonempty:
if output_counter >= tmp_output_size:
# We filled up the allocated space; copy existing data to a larger array
tmp_output_size *= 2
new_data = np.empty((tmp_output_size,), dtype=data.dtype)
new_indices = np.empty((tmp_output_size,), dtype=indices.dtype)
new_data[:output_counter] = data[:output_counter]
new_indices[:output_counter] = indices[:output_counter]
data = new_data
indices = new_indices
data[output_counter] = val
indices[output_counter] = jptr
output_counter += 1
# Add final entry to indptr (should indicate nnz in the output)
nnz = output_counter
indptr[iptr + 1] = nnz
# Trim output arrays
data = data[:nnz]
indices = indices[:nnz]
return (data, indices, indptr)
| 30.733766 | 97 | 0.603 | 576 | 0.121699 | 0 | 0 | 4,163 | 0.879569 | 0 | 0 | 471 | 0.099514 |
684188842440f0302697bdfa81cff956f82483fa | 266 | py | Python | server/medication_icons/admin.py | nickdotreid/opioid-mat-decision-aid | bbc2a0d8931d59cd6ab64b0b845e88c8dc1af5d1 | [
"MIT"
] | null | null | null | server/medication_icons/admin.py | nickdotreid/opioid-mat-decision-aid | bbc2a0d8931d59cd6ab64b0b845e88c8dc1af5d1 | [
"MIT"
] | 27 | 2018-09-30T07:59:21.000Z | 2020-11-05T19:25:41.000Z | server/medication_icons/admin.py | nickdotreid/opioid-mat-decision-aid | bbc2a0d8931d59cd6ab64b0b845e88c8dc1af5d1 | [
"MIT"
] | null | null | null | from django.contrib import admin
from imagekit.admin import AdminThumbnail
from .models import Icon
@admin.register(Icon)
class IconAdmin(admin.ModelAdmin):
list_display = ('name', 'icon_thumbnail')
icon_thumbnail = AdminThumbnail(image_field='thumbnail')
| 26.6 | 60 | 0.785714 | 141 | 0.530075 | 0 | 0 | 163 | 0.612782 | 0 | 0 | 33 | 0.12406 |
6842dd797c24725a8fb09ac24488ca4ad6bea4a3 | 245 | py | Python | dlutils/models/sklearn/classification.py | chelseajohn/dlapplication | d2eaba9077320f5a33e122b99691577fe899e1d6 | [
"Apache-2.0"
] | 2 | 2020-05-07T05:08:54.000Z | 2020-05-13T10:14:53.000Z | dlutils/models/sklearn/classification.py | chelseajohn/dlapplication | d2eaba9077320f5a33e122b99691577fe899e1d6 | [
"Apache-2.0"
] | null | null | null | dlutils/models/sklearn/classification.py | chelseajohn/dlapplication | d2eaba9077320f5a33e122b99691577fe899e1d6 | [
"Apache-2.0"
] | 3 | 2020-05-06T18:49:37.000Z | 2020-07-13T05:11:56.000Z | def getSKLearnLogisticRegression(self, regParam, dim=1):
from DLplatform.learning.batch.sklearnClassifiers import LogisticRegression
learner = LogisticRegression(regParam = regParam, dim = dim)
return learner | 49 | 83 | 0.718367 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
6842eadf7eb0da603518d7dfbb7c45e88afc4efa | 2,933 | py | Python | tests/test_storage.py | tefra/pytubefm | a7c5d6252584dc0abee946e707f496cecaebf1bb | [
"MIT"
] | null | null | null | tests/test_storage.py | tefra/pytubefm | a7c5d6252584dc0abee946e707f496cecaebf1bb | [
"MIT"
] | 2 | 2019-01-06T00:16:23.000Z | 2019-01-12T19:17:07.000Z | tests/test_storage.py | tefra/pytubefm | a7c5d6252584dc0abee946e707f496cecaebf1bb | [
"MIT"
] | null | null | null | import json
import os
import shutil
import tempfile
from datetime import timedelta
from unittest import mock
from unittest import TestCase
from pytuber.storage import Registry
class RegistryTests(TestCase):
def tearDown(self):
Registry.clear()
Registry._obj = {}
def test_singleton(self):
a = Registry()
b = Registry()
self.assertIs(a, b)
a[1] = 2
self.assertEqual({1: 2}, b)
def test_set(self):
Registry.set(1, 2, 3, 4, 5)
self.assertEqual({1: {2: {3: {4: 5}}}}, Registry())
Registry.set(1, 3, 5)
self.assertEqual({1: {2: {3: {4: 5}}, 3: 5}}, Registry())
def test_get(self):
Registry.set(1, 2, 3, 4, 5)
self.assertEqual({4: 5}, Registry.get(1, 2, 3))
with self.assertRaises(KeyError):
Registry.get(2)
def test_clear(self):
Registry.set(1, 2, 3, 4, 5)
self.assertEqual({4: 5}, Registry.get(1, 2, 3))
Registry.clear()
self.assertEqual({}, Registry())
def test_from_file(self):
try:
tmp = tempfile.mkdtemp()
file_path = os.path.join(tmp, "foo.json")
with open(file_path, "w") as fp:
json.dump({"a": True}, fp)
Registry.from_file(file_path)
self.assertEqual({"a": True}, Registry())
Registry.set("a", False)
self.assertFalse(Registry.get("a"))
Registry.from_file(file_path)
self.assertFalse(Registry.get("a"))
finally:
shutil.rmtree(tmp)
def test_persist(self):
try:
Registry.set(1, 2, 3, 4)
tmp = tempfile.mkdtemp()
file_path = os.path.join(tmp, "foo.json")
Registry.persist(file_path)
Registry.set(1, 2, 3, 5)
Registry._obj = {}
Registry.from_file(file_path)
self.assertEqual({"1": {"2": {"3": 4}}}, Registry())
finally:
shutil.rmtree(tmp)
@mock.patch("pytuber.storage.time.time")
def test_cache(self, time):
time.side_effect = [10, 20.1, 20.1, 20.5, 20.8]
def callme(ttl, value, refresh=False):
return Registry.cache(
key="foo",
ttl=timedelta(seconds=ttl),
func=lambda: value,
refresh=refresh,
)
self.assertEqual("first", callme(10, "first"))
self.assertEqual(("first", 20.0), Registry.get("foo"))
self.assertEqual("second", callme(1, "second"))
self.assertEqual(("second", 21.1), Registry.get("foo"))
self.assertEqual("second", callme(1, "third"))
self.assertEqual(("second", 21.1), Registry.get("foo"))
self.assertEqual("third", callme(100, "third", refresh=True))
self.assertEqual(("third", 120.8), Registry.get("foo"))
self.assertEqual(5, time.call_count)
| 27.157407 | 69 | 0.545176 | 2,753 | 0.938629 | 0 | 0 | 906 | 0.308899 | 0 | 0 | 188 | 0.064098 |
6842faa221c3f8cd72b5a952bcb528ae995ea413 | 2,636 | py | Python | Solutions/187.py | ruppysuppy/Daily-Coding-Problem-Solutions | 37d061215a9af2ce39c51f8816c83039914c0d0b | [
"MIT"
] | 70 | 2021-03-18T05:22:40.000Z | 2022-03-30T05:36:50.000Z | Solutions/187.py | ungaro/Daily-Coding-Problem-Solutions | 37d061215a9af2ce39c51f8816c83039914c0d0b | [
"MIT"
] | null | null | null | Solutions/187.py | ungaro/Daily-Coding-Problem-Solutions | 37d061215a9af2ce39c51f8816c83039914c0d0b | [
"MIT"
] | 30 | 2021-03-18T05:22:43.000Z | 2022-03-17T10:25:18.000Z | """
Problem:
You are given given a list of rectangles represented by min and max x- and
y-coordinates. Compute whether or not a pair of rectangles overlap each other. If one
rectangle completely covers another, it is considered overlapping.
For example, given the following rectangles:
{
"top_left": (1, 4),
"dimensions": (3, 3) # width, height
},
{
"top_left": (-1, 3),
"dimensions": (2, 1)
},
{
"top_left": (0, 5),
"dimensions": (4, 3)
}
return true as the first and third rectangle overlap each other.
"""
from typing import Dict, List, Tuple
Rectangle = Dict[str, Tuple[int, int]]
def get_intersection_area(rect1: List[Rectangle], rect2: List[Rectangle]) -> int:
if rect1["top_left"][0] < rect2["top_left"][0]:
left = rect1
right = rect2
else:
left = rect2
right = rect1
if rect1["top_left"][1] > rect2["top_left"][1]:
top = rect1
bottom = rect2
else:
top = rect2
bottom = rect1
if (left["top_left"][0] + left["dimensions"][0]) < right["top_left"][0]:
return 0
else:
span_x = (left["top_left"][0] + left["dimensions"][0]) - right["top_left"][0]
if (top["top_left"][1] - top["dimensions"][1]) > bottom["top_left"][1]:
return 0
else:
span_y = bottom["top_left"][1] - (top["top_left"][1] - top["dimensions"][1])
return span_x * span_y
def get_covered_area(rect: Rectangle) -> int:
width, height = rect["dimensions"]
return width * height
def check_rectangles_intersection(rectangles: List[Rectangle]) -> bool:
length = len(rectangles)
# checking for intersection for each pair of rectangles
for i in range(length - 1):
for j in range(i + 1, length):
intersection_area = get_intersection_area(rectangles[i], rectangles[j])
rect1_area = get_covered_area(rectangles[i])
rect2_area = get_covered_area(rectangles[j])
if intersection_area in (rect1_area, rect2_area):
return True
return False
if __name__ == "__main__":
# NOTE: THE QUESTION STATEMENT IS WRONG THE RECTANGLES 1 & 3 DOES NOT OVERLAP BUT
# ONLY INTERSECT (SMALL MODIFICATION DONE TO MAKE THEM OVERLAP)
rectangles = [
{"top_left": (1, 4), "dimensions": (3, 3)},
{"top_left": (-1, 3), "dimensions": (2, 1)},
{"top_left": (0, 5), "dimensions": (4, 4)}, # MODIFICATION
]
print(check_rectangles_intersection(rectangles))
rectangles.pop()
print(check_rectangles_intersection(rectangles))
"""
SPECS:
TIME COMPLEXITY: O(n ^ 2)
SPACE COMPLEXITY: O(1)
"""
| 27.747368 | 85 | 0.621396 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,075 | 0.407815 |
6845278d22fbe126696231718367291253a5efff | 1,819 | py | Python | benchmark/bench_asynckafka.py | askuratovsky/asynckafka | 16be6d89b0e5c18899047c1c5abd899edec1204b | [
"MIT"
] | 31 | 2018-02-28T09:43:01.000Z | 2022-01-06T14:51:03.000Z | benchmark/bench_asynckafka.py | askuratovsky/asynckafka | 16be6d89b0e5c18899047c1c5abd899edec1204b | [
"MIT"
] | 7 | 2018-11-07T20:21:57.000Z | 2022-01-06T14:50:58.000Z | benchmark/bench_asynckafka.py | askuratovsky/asynckafka | 16be6d89b0e5c18899047c1c5abd899edec1204b | [
"MIT"
] | 6 | 2019-05-17T06:25:07.000Z | 2022-01-15T13:05:29.000Z | import asyncio
from asynckafka import Producer, Consumer
import config
import utils
loop = asyncio.get_event_loop()
async def fill_topic_with_messages():
producer = Producer(
brokers=config.KAFKA_URL,
rdk_producer_config=config.RDK_PRODUCER_CONFIG,
rdk_topic_config=config.RDK_TOPIC_CONFIG,
)
producer.start()
messages_consumed = 0
print(f"Preparing benchmark. Filling topic {config.TOPIC} with "
f"{config.MESSAGE_NUMBER} messages of {config.MESSAGE_BYTES} bytes "
f"each one.")
await asyncio.sleep(0.1)
with utils.Timer() as timer:
for _ in range(config.MESSAGE_NUMBER):
messages_consumed += 1
await producer.produce(config.TOPIC, config.MESSAGE)
producer.stop()
print(f"The producer time to send the messages is {timer.interval} "
f"seconds.")
utils.print_statistics(timer.interval)
async def consume_the_messages_stream_consumer():
stream_consumer = Consumer(
brokers=config.KAFKA_URL,
topics=[config.TOPIC],
rdk_consumer_config=config.RDK_CONSUMER_CONFIG,
rdk_topic_config=config.RDK_TOPIC_CONFIG
)
stream_consumer.start()
messages_consumed = 0
print("Starting to consume the messages.")
with utils.Timer() as timer:
async for message in stream_consumer:
messages_consumed += 1
if messages_consumed == config.MESSAGE_NUMBER:
stream_consumer.stop()
print(f"The time used to consume the messages is {timer.interval} "
f"seconds.")
utils.print_statistics(timer.interval)
async def main_coro():
await fill_topic_with_messages()
await consume_the_messages_stream_consumer()
if __name__ == "__main__":
loop.run_until_complete(main_coro())
| 27.984615 | 78 | 0.68829 | 0 | 0 | 0 | 0 | 0 | 0 | 1,621 | 0.891149 | 329 | 0.180869 |
6849219d3c1b5f5dd246b210ec443de592738989 | 744 | py | Python | lcd/interrupts/interrupt3.py | BornToDebug/homeStruction | 354e03c05cb363d8397d0e2d7afeb78a029266f9 | [
"Apache-2.0"
] | 6 | 2016-08-31T16:46:54.000Z | 2017-09-15T19:34:30.000Z | lcd/interrupts/interrupt3.py | BornToDebug/homeStruction | 354e03c05cb363d8397d0e2d7afeb78a029266f9 | [
"Apache-2.0"
] | 4 | 2016-09-02T09:18:41.000Z | 2016-09-02T09:24:08.000Z | lcd/interrupts/interrupt3.py | BornToDebug/homeStruction | 354e03c05cb363d8397d0e2d7afeb78a029266f9 | [
"Apache-2.0"
] | null | null | null | import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setup(23, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(18, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(24, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
def my_callback(channel):
print "falling edge detected on 18"
def my_callback2(channel):
print "falling edge detected on 23"
raw_input("Press Enter when ready\n>")
GPIO.add_event_detect(18, GPIO.FALLING, callback=my_callback, bouncetime=300)
GPIO.add_event_detect(23, GPIO.FALLING, callback=my_callback2, bouncetime=300)
try:
print "Waiting for rising edge on port 24"
GPIO.wait_for_edge(24, GPIO.RISING)
print "Rising edge detected on port 24. Here endeth the third lesson."
except KeyboardInterrupt:
GPIO.cleanup()
GPIO.cleanup()
| 27.555556 | 78 | 0.77957 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 185 | 0.248656 |
6849e5b2237ba2dd6b7a8100fe52a7772987a103 | 2,465 | py | Python | tests/module/module_topology_distmat_test.py | MD-Studio/MDInteract | 55bb51d27ec8d4095118837a997eea7467c2ac53 | [
"Apache-2.0"
] | 4 | 2019-06-24T12:56:28.000Z | 2021-03-27T17:32:17.000Z | tests/module/module_topology_distmat_test.py | MD-Studio/MDInteract | 55bb51d27ec8d4095118837a997eea7467c2ac53 | [
"Apache-2.0"
] | 1 | 2020-09-25T12:15:27.000Z | 2020-10-11T10:28:30.000Z | tests/module/module_topology_distmat_test.py | MD-Studio/MDInteract | 55bb51d27ec8d4095118837a997eea7467c2ac53 | [
"Apache-2.0"
] | 5 | 2019-09-12T02:35:04.000Z | 2021-07-30T18:05:28.000Z | # -*- coding: utf-8 -*-
"""
file: module_topology_distmat_test.py
Unit tests for distance matrix computations
"""
import os
from pandas import DataFrame
from interact.md_system import System
from tests.module.unittest_baseclass import UnittestPythonCompatibility
class DistanceMatrixTests(UnittestPythonCompatibility):
currpath = os.path.dirname(__file__)
pdb_file = os.path.abspath(os.path.join(currpath, '../files/1acj.pdb'))
mol_file = os.path.abspath(os.path.join(currpath, '../files/1acj.mol2'))
def setUp(self):
"""
Prepare TopologyDataFrame once for every test
"""
self.top = System(self.pdb_file, mol2file=self.mol_file).topology
def test_distmat_overflow_exception(self):
"""
Test OverflowError exception for (too) large distance matrix
"""
# Unable to compute distance matrix > max_distmat_size
self.assertRaises(OverflowError, self.top.distances, max_distmat_size=10000)
def test_distmat_attribute_exception(self):
"""
Test AttributeError on missing or incomplete coordinates
"""
# No coordinates
self.top._coordinates = None
self.assertRaises(AttributeError, self.top.distances)
def test_distmat_square(self):
"""
Test computation of default square matrix
"""
distmat = self.top.distances()
self.assertIsInstance(distmat, DataFrame)
self.assertEqual(distmat.shape[0], distmat.shape[1])
self.assertEqual(list(distmat.columns), list(distmat.index))
def test_distmat_target(self):
"""
Test computation of matrix with custom source and target selection
"""
source = self.top[self.top['resSeq'] == 999]
target = self.top[self.top['resName'] == 'HIS']
distmat = source.distances(target=target)
self.assertIsInstance(distmat, DataFrame)
self.assertEqual(distmat.shape, (17, 138))
self.assertEqual(len(distmat.columns), len(target))
self.assertEqual(len(distmat.index), len(source))
def test_distmat_empty_selection(self):
"""
Test compuation of matrix when (one of) the input selections is empty
"""
source = self.top[self.top['resSeq'] == 9999]
target = self.top[self.top['resName'] == 'HIS']
self.assertTrue(source.distances().empty)
self.assertTrue(source.distances(target=target).empty) | 30.8125 | 84 | 0.664909 | 2,195 | 0.890467 | 0 | 0 | 0 | 0 | 0 | 0 | 747 | 0.303043 |
684a7ad5dfe8d2dab224d7cde08220c071cef36c | 1,010 | py | Python | src/kgmk/ruijianime/scrape/comic_ids/all.py | kagemeka/python | 486ce39d97360b61029527bacf00a87fdbcf552c | [
"MIT"
] | null | null | null | src/kgmk/ruijianime/scrape/comic_ids/all.py | kagemeka/python | 486ce39d97360b61029527bacf00a87fdbcf552c | [
"MIT"
] | null | null | null | src/kgmk/ruijianime/scrape/comic_ids/all.py | kagemeka/python | 486ce39d97360b61029527bacf00a87fdbcf552c | [
"MIT"
] | null | null | null | import typing
import bs4
import requests
class ScrapeAllComicIds():
def __call__(
self,
) -> typing.List[int]:
self.__find()
return self.__ids
def __find(
self,
) -> typing.NoReturn:
self.__ids = []
for q in self.__query:
self.__find_per_page(q)
def __find_per_page(
self,
query: str,
) -> typing.NoReturn:
response = requests.get(
f'{self.__base_url}'
f'{query}',
)
soup = bs4.BeautifulSoup(
response.content,
'html.parser',
)
elms = soup.find(
id='all_title',
).find('ul').find_all('li')
for elm in elms:
url = elm.find(
'a',
).get('href')
self.__ids.append(
int(url.split('=')[-1])
)
def __init__(
self,
) -> typing.NoReturn:
self.__base_url = (
'http://ruijianime.com/'
'comic/title/all_title'
'.php?q='
)
self.__query = (
'a', 'ka', 'sa', 'ta',
'na', 'ha', 'ma', 'ya',
'ra', 'wa',
) | 16.833333 | 31 | 0.515842 | 965 | 0.955446 | 0 | 0 | 0 | 0 | 0 | 0 | 169 | 0.167327 |
684aa04b39502e680a61c0b2aa16db3b3d2dae83 | 6,241 | py | Python | pymdownx/pathconverter.py | willstott101/pymdown-extensions | 11796a749d781b12db43c3967af66e901c1dc6d8 | [
"MIT"
] | null | null | null | pymdownx/pathconverter.py | willstott101/pymdown-extensions | 11796a749d781b12db43c3967af66e901c1dc6d8 | [
"MIT"
] | 2 | 2019-12-10T23:12:38.000Z | 2020-02-20T23:27:26.000Z | pymdownx/pathconverter.py | Richienb/pymdown-extensions | 2385c38bf6988c86c553557e248c4d8c3140dfc1 | [
"MIT"
] | null | null | null | """
Path Converter.
pymdownx.pathconverter
An extension for Python Markdown.
An extension to covert tag paths to relative or absolute:
Given an absolute base and a target relative path, this extension searches for file
references that are relative and converts them to a path relative
to the base path.
-or-
Given an absolute base path, this extension searches for file
references that are relative and converts them to absolute paths.
MIT license.
Copyright (c) 2014 - 2017 Isaac Muse <isaacmuse@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import unicode_literals
from markdown import Extension
from markdown.postprocessors import Postprocessor
from . import util
import os
import re
from urllib.parse import urlunparse
RE_TAG_HTML = r'''(?xus)
(?:
(?P<comments>(\r?\n?\s*)<!--[\s\S]*?-->(\s*)(?=\r?\n)|<!--[\s\S]*?-->)|
(?P<open><(?P<tag>(?:%s)))
(?P<attr>(?:\s+[\w\-:]+(?:\s*=\s*(?:"[^"]*"|'[^']*'))?)*)
(?P<close>\s*(?:\/?)>)
)
'''
RE_TAG_LINK_ATTR = re.compile(
r'''(?xus)
(?P<attr>
(?:
(?P<name>\s+(?:href|src)\s*=\s*)
(?P<path>"[^"]*"|'[^']*')
)
)
'''
)
def repl_relative(m, base_path, relative_path):
"""Replace path with relative path."""
link = m.group(0)
try:
scheme, netloc, path, params, query, fragment, is_url, is_absolute = util.parse_url(m.group('path')[1:-1])
if not is_url:
# Get the absolute path of the file or return
# if we can't resolve the path
path = util.url2path(path)
if (not is_absolute):
# Convert current relative path to absolute
path = os.path.relpath(
os.path.normpath(os.path.join(base_path, path)),
os.path.normpath(relative_path)
)
# Convert the path, URL encode it, and format it as a link
path = util.path2url(path)
link = '%s"%s"' % (
m.group('name'),
urlunparse((scheme, netloc, path, params, query, fragment))
)
except Exception: # pragma: no cover
# Parsing crashed and burned; no need to continue.
pass
return link
def repl_absolute(m, base_path):
"""Replace path with absolute path."""
link = m.group(0)
try:
scheme, netloc, path, params, query, fragment, is_url, is_absolute = util.parse_url(m.group('path')[1:-1])
if (not is_absolute and not is_url):
path = util.url2path(path)
path = os.path.normpath(os.path.join(base_path, path))
path = util.path2url(path)
start = '/' if not path.startswith('/') else ''
link = '%s"%s%s"' % (
m.group('name'),
start,
urlunparse((scheme, netloc, path, params, query, fragment))
)
except Exception: # pragma: no cover
# Parsing crashed and burned; no need to continue.
pass
return link
def repl(m, base_path, rel_path=None):
"""Replace."""
if m.group('comments'):
tag = m.group('comments')
else:
tag = m.group('open')
if rel_path is None:
tag += RE_TAG_LINK_ATTR.sub(lambda m2: repl_absolute(m2, base_path), m.group('attr'))
else:
tag += RE_TAG_LINK_ATTR.sub(lambda m2: repl_relative(m2, base_path, rel_path), m.group('attr'))
tag += m.group('close')
return tag
class PathConverterPostprocessor(Postprocessor):
"""Post process to find tag lings to convert."""
def run(self, text):
"""Find and convert paths."""
basepath = self.config['base_path']
relativepath = self.config['relative_path']
absolute = bool(self.config['absolute'])
tags = re.compile(RE_TAG_HTML % '|'.join(self.config['tags'].split()))
if not absolute and basepath and relativepath:
text = tags.sub(lambda m: repl(m, basepath, relativepath), text)
elif absolute and basepath:
text = tags.sub(lambda m: repl(m, basepath), text)
return text
class PathConverterExtension(Extension):
"""PathConverter extension."""
def __init__(self, *args, **kwargs):
"""Initialize."""
self.config = {
'base_path': ["", "Base path used to find files - Default: \"\""],
'relative_path': ["", "Path that files will be relative to (not needed if using absolute) - Default: \"\""],
'absolute': [False, "Paths are absolute by default; disable for relative - Default: False"],
'tags': ["img script a link", "tags to convert src and/or href in - Default: 'img scripts a link'"]
}
super(PathConverterExtension, self).__init__(*args, **kwargs)
def extendMarkdown(self, md):
"""Add post processor to Markdown instance."""
rel_path = PathConverterPostprocessor(md)
rel_path.config = self.getConfigs()
md.postprocessors.register(rel_path, "path-converter", 2)
md.registerExtension(self)
def makeExtension(*args, **kwargs):
"""Return extension."""
return PathConverterExtension(*args, **kwargs)
| 34.865922 | 120 | 0.619452 | 1,593 | 0.255248 | 0 | 0 | 0 | 0 | 0 | 0 | 3,013 | 0.482775 |
684b4c2dccb681c7ad6626bfed2957b27acae3cb | 879 | py | Python | backdoors/shell/__pupy/pupy/packages/src/VideoCapture/src/fixhtml.py | mehrdad-shokri/backdoorme | f9755ca6cec600335e681752e7a1c5c617bb5a39 | [
"MIT"
] | 796 | 2015-10-09T20:30:04.000Z | 2022-03-24T19:07:32.000Z | backdoors/shell/__pupy/pupy/packages/src/VideoCapture/src/fixhtml.py | mehrdad-shokri/backdoorme | f9755ca6cec600335e681752e7a1c5c617bb5a39 | [
"MIT"
] | 169 | 2015-11-26T16:14:02.000Z | 2020-08-04T21:51:58.000Z | backdoors/shell/__pupy/pupy/packages/src/VideoCapture/src/fixhtml.py | mehrdad-shokri/backdoorme | f9755ca6cec600335e681752e7a1c5c617bb5a39 | [
"MIT"
] | 168 | 2015-11-27T23:21:04.000Z | 2022-01-23T06:14:33.000Z | import os, string
oldWin = '''span {
font-family: Verdana;
background: #e0e0d0;
font-size: 10pt;
}
</style>
</head>
<body bgcolor="#e0e0d0">
'''
oldLinux = '''span {
font-family: Verdana;
background: #e0e0d0;
font-size: 13pt;
}
</style>
</head>
<body bgcolor="#e0e0d0">
'''
new = '''span {
font-family: Verdana;
}
</style>
</head>
<body bgcolor="#f0f0f8">
'''
def fixhtmlfile(file):
if os.path.isfile(file) and file[-5:] == '.html':
print file
fp = open(file, 'rt')
cont = fp.read()
fp.close()
cont = string.replace(cont, '\r\n', '\n')
cont = string.replace(cont, oldWin, new)
cont = string.replace(cont, oldLinux, new)
fp = open(file, 'wt')
fp.write(cont)
fp.close()
def fixhtmlfiles(dir):
files = os.listdir(dir)
for file in files:
fixhtmlfile(dir + os.sep + file)
| 18.3125 | 53 | 0.571104 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 345 | 0.392491 |
684d4d1dfa257a989173c87dfaa7f66ed6d73ef4 | 1,191 | py | Python | pyaww/static_header.py | ammarsys/pyanywhere-wrapper | d8cde2d29900c25fc7ab3cd8103923f727b5dade | [
"MIT"
] | 5 | 2021-06-25T14:34:52.000Z | 2021-07-04T14:15:13.000Z | pyaww/static_header.py | ammarsys/pyanywhere-wrapper | d8cde2d29900c25fc7ab3cd8103923f727b5dade | [
"MIT"
] | 1 | 2021-12-12T00:47:25.000Z | 2022-01-24T17:19:43.000Z | pyaww/static_header.py | ammarsys/pyanywhere-wrapper | d8cde2d29900c25fc7ab3cd8103923f727b5dade | [
"MIT"
] | 1 | 2021-12-14T15:44:52.000Z | 2021-12-14T15:44:52.000Z | from typing import TYPE_CHECKING
if TYPE_CHECKING:
from .webapp import WebApp
class StaticHeader:
"""Contains all relevant methods to a static header."""
id: int
url: str
name: str
value: dict
def __init__(self, resp: dict, webapp: 'WebApp') -> None:
"""
Initialize the class variables.
:param dict resp: json dictionary
:param webapp: WebApp class (see pyaww.webapp)
"""
self._webapp = webapp
vars(self).update(resp)
def delete(self) -> None:
"""Delete a static header."""
self._webapp.userclass.request(
'DELETE',
f'/api/v0/user/{self._webapp.user}/webapps/{self._webapp.domain_name}/static_headers/{self.id}/',
)
def update(self, **kwargs) -> None:
"""
Update a static header.
Sample usage -> StaticHeader.update(...)
:param kwargs: takes url, name, value
"""
self._webapp.userclass.request(
'PATCH',
f'/api/v0/user/{self._webapp.user}/webapps/{self._webapp.domain_name}/static_headers/{self.id}/',
data=kwargs
)
vars(self).update(kwargs)
| 26.466667 | 109 | 0.581864 | 1,105 | 0.927792 | 0 | 0 | 0 | 0 | 0 | 0 | 596 | 0.50042 |
684d711535d79d99779edb8a3462b424e50c2b1d | 1,654 | py | Python | Clase6/pila.py | JoseCordobaEAN/EstructurasDeDatosUE4P | 86a5c426d83d9d9ae86656c3c78324a1c07f608d | [
"MIT"
] | 2 | 2019-08-17T21:15:47.000Z | 2019-09-21T12:15:19.000Z | Clase6/pila.py | JoseCordobaEAN/EstructurasDeDatosUE4P | 86a5c426d83d9d9ae86656c3c78324a1c07f608d | [
"MIT"
] | null | null | null | Clase6/pila.py | JoseCordobaEAN/EstructurasDeDatosUE4P | 86a5c426d83d9d9ae86656c3c78324a1c07f608d | [
"MIT"
] | null | null | null | class Nodo:
elemento = None
Siguiente = None
def __init__(self, elemento, siguiente):
self.elemento = elemento
self.Siguiente = siguiente
class Pila:
tamano = 0
top = None
def apilar(self, elemento):
"""
Agrega un elemento al tope de la pila
:param elemento: Cualquier elemento
:return: None
"""
nuevo_nodo = Nodo(elemento, self.top)
self.top = nuevo_nodo
self.tamano += 1
def desapilar(self):
"""
Retorna el elemento del Tope de la pila y lo elimina
:return: El elemento del tope de la pila
"""
if self.tamano > 0:
elemento_auxiliar = self.top.elemento
self.top = self.top.Siguiente
self.tamano -= 1
return elemento_auxiliar
raise IndexError('La pila esta vacía')
def mirar(self):
"""
Ve el elemento del tope de la pila sin eliminarlo
:return: El elemento del tope de la pila
"""
return self.top.elemento
def es_vacia(self):
return self.tamano == 0
def invertir(self):
auxiliar = Pila()
nodo_auxiliar = self.top
for i in range(self.tamano):
auxiliar.apilar(nodo_auxiliar.elemento)
nodo_auxiliar = nodo_auxiliar.Siguiente
return auxiliar
def copiar(self):
return self.invertir().invertir()
def __repr__(self):
resultado = []
auxiliar = self
while not auxiliar.es_vacia():
resultado.append(auxiliar.desapilar())
resultado.reverse()
return str(resultado)
| 24.686567 | 60 | 0.576179 | 1,652 | 0.998187 | 0 | 0 | 0 | 0 | 0 | 0 | 396 | 0.239275 |
684e278a6a7cc07b68dfd75e5943c572c4fc026f | 3,354 | py | Python | mmdet/core/utils/my_hook.py | ydiller/NoMoreNMS | 1c1557357e5312c287f0971c840060deb1bcd039 | [
"Apache-2.0"
] | null | null | null | mmdet/core/utils/my_hook.py | ydiller/NoMoreNMS | 1c1557357e5312c287f0971c840060deb1bcd039 | [
"Apache-2.0"
] | null | null | null | mmdet/core/utils/my_hook.py | ydiller/NoMoreNMS | 1c1557357e5312c287f0971c840060deb1bcd039 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import logging
from collections import defaultdict
from itertools import chain
from torch.nn.utils import clip_grad
from mmcv.utils import TORCH_VERSION, _BatchNorm, digit_version
# from ..dist_utils import allreduce_grads
# from ..fp16_utils import LossScaler, wrap_fp16_model
from mmcv.runner.hooks import HOOKS, Hook
try:
# If PyTorch version >= 1.6.0, torch.cuda.amp.GradScaler would be imported
# and used; otherwise, auto fp16 will adopt mmcv's implementation.
from torch.cuda.amp import GradScaler
except ImportError:
pass
@HOOKS.register_module()
class MyHook(Hook):
"""A hook contains custom operations for the optimizer.
Args:
grad_clip (dict, optional): A config dict to control the clip_grad.
Default: None.
detect_anomalous_params (bool): This option is only used for
debugging which will slow down the training speed.
Detect anomalous parameters that are not included in
the computational graph with `loss` as the root.
There are two cases
- Parameters were not used during
forward pass.
- Parameters were not used to produce
loss.
Default: False.
"""
def __init__(self, grad_clip=None, detect_anomalous_params=False):
self.grad_clip = grad_clip
self.detect_anomalous_params = detect_anomalous_params
def clip_grads(self, params):
params = list(
filter(lambda p: p.requires_grad and p.grad is not None, params))
if len(params) > 0:
return clip_grad.clip_grad_norm_(params, **self.grad_clip)
def after_train_iter(self, runner):
runner.optimizer.zero_grad()
if self.detect_anomalous_params:
self.detect_anomalous_parameters(runner.outputs['loss'], runner)
if runner.outputs['loss'] is not None:
runner.outputs['loss'].backward()
if self.grad_clip is not None:
grad_norm = self.clip_grads(runner.model.parameters())
if grad_norm is not None:
# Add grad norm to the logger
runner.log_buffer.update({'grad_norm': float(grad_norm)},
runner.outputs['num_samples'])
runner.optimizer.step()
def detect_anomalous_parameters(self, loss, runner):
logger = runner.logger
parameters_in_graph = set()
visited = set()
def traverse(grad_fn):
if grad_fn is None:
return
if grad_fn not in visited:
visited.add(grad_fn)
if hasattr(grad_fn, 'variable'):
parameters_in_graph.add(grad_fn.variable)
parents = grad_fn.next_functions
if parents is not None:
for parent in parents:
grad_fn = parent[0]
traverse(grad_fn)
traverse(loss.grad_fn)
for n, p in runner.model.named_parameters():
if p not in parameters_in_graph and p.requires_grad:
logger.log(
level=logging.ERROR,
msg=f'{n} with shape {p.size()} is not '
f'in the computational graph \n')
| 36.857143 | 78 | 0.613298 | 2,717 | 0.810078 | 0 | 0 | 2,742 | 0.817531 | 0 | 0 | 1,088 | 0.324389 |
684f7a6494a0ed2b61957330a13887c3aaff66e6 | 436 | py | Python | tests/fixtures/reporters/feed.py | kuc2477/news | 215f87e6ce1a7fc99175596e6fd5b4b50a3179c6 | [
"MIT"
] | 2 | 2016-01-21T04:16:57.000Z | 2016-04-27T04:46:13.000Z | tests/fixtures/reporters/feed.py | kuc2477/news | 215f87e6ce1a7fc99175596e6fd5b4b50a3179c6 | [
"MIT"
] | null | null | null | tests/fixtures/reporters/feed.py | kuc2477/news | 215f87e6ce1a7fc99175596e6fd5b4b50a3179c6 | [
"MIT"
] | null | null | null | import pytest
from news.reporters import ReporterMeta
from news.reporters.feed import AtomReporter, RSSReporter
@pytest.fixture
def rss_reporter(sa_schedule, sa_backend):
meta = ReporterMeta(schedule=sa_schedule)
return RSSReporter(meta=meta, backend=sa_backend)
@pytest.fixture
def atom_reporter(sa_schedule, sa_backend):
meta = ReporterMeta(schedule=sa_schedule)
return AtomReporter(meta=meta, backend=sa_backend)
| 27.25 | 57 | 0.805046 | 0 | 0 | 0 | 0 | 318 | 0.729358 | 0 | 0 | 0 | 0 |
6850a777d683c3f59834728ab8d549add89917c0 | 4,945 | py | Python | deployer/main.py | sportsy/deployer | e5dff46707773992aad7bcf47539b1d59ac3ee2c | [
"MIT"
] | null | null | null | deployer/main.py | sportsy/deployer | e5dff46707773992aad7bcf47539b1d59ac3ee2c | [
"MIT"
] | null | null | null | deployer/main.py | sportsy/deployer | e5dff46707773992aad7bcf47539b1d59ac3ee2c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
import os
import ConfigParser
import json
import time
import threading
from distutils.core import setup
import pika
import boto
import pusherclient
from boto.sqs.message import RawMessage
# set defaults
channel = None
global pusher
config = ConfigParser.ConfigParser()
class MQServer(threading.Thread):
"""
Message Queue Thread Listener (RabbitMQ, ZeroMQ, other MQs)
"""
def __init__(self):
threading.Thread.__init__(self)
def run(self):
# get the items in the config
endpoint = config.get('MQSERVER', 'endpoint')
queue = config.get('MQSERVER', 'queue')
# start the connection to the MQ server
connection = pika.BlockingConnection(pika.ConnectionParameters(host=endpoint))
channel = connection.channel()
channel.basic_consume(self.callback, queue=queue, no_ack=True)
print '[*] Waiting for queue messages'
# start the consumer for the channel
while True:
channel.start_consuming()
def callback(self, ch, method, properties, body):
print "[x] Received %r" % (body,)
os.system(config.get('MQSERVER', 'command'))
class AmazonSQS(threading.Thread):
"""
Amazon SQS Thread Listener
"""
def __init__(self):
threading.Thread.__init__(self)
def run(self):
# start the connection to the SQS endpoint
sqs = boto.sqs.connect_to_region(config.get('AWSSQS', 'region'), aws_access_key_id=config.get('AWSSQS', 'key'), aws_secret_access_key=config.get('AWSSQS', 'secret'))
q = sqs.lookup(config.get('AWSSQS', 'queue'))
q.set_message_class(RawMessage)
results = sqs.receive_message(q, number_messages=1)
print '[*] Waiting for Amazon SQS messages'
last_received = time.time()
# loop and look for messages
while True:
# loop through the results
for result in results:
# Check to see the last time and the last message
offset = int(time.time() - last_received)
if offset > 60:
# You could set a command to do certain things based upon the config
if str(result.get_body()) == 'somvalue':
#DO something
print str(result.get_body())
# execute your command from the config
os.system(config.get('AWSSQS', 'command'))
last_received = time.time()
# re-get the messages
results = sqs.receive_message(q, number_messages=1)
time.sleep(30) # at 30 seconds, it's guaranteed to get it at least once
class PusherWebSocket(threading.Thread):
"""
Pusher websocket Thread Listener
"""
def __init__(self):
threading.Thread.__init__(self)
def run(self):
# get the items in the config
key = config.get('PUSHER', 'key')
secret = config.get('PUSHER', 'secret')
app_id = config.get('PUSHER', 'app_id')
# start the connection to the Pusher client
pusher = pusherclient.Pusher(key, secret=secret)
pusher.connection.bind('pusher:connection_established', self.connect_handler)
print '[*] Waiting for Pusher messages'
while True:
time.sleep(1)
def connect_handler(self, data):
channel = pusher.subscribe(config.get('PUSHER', 'channel'))
channel.bind(config.get('PUSHER', 'event'), self.channel_callback)
print '[-] Connected to Pusher'
def channel_callback(self, data):
# execute your command from the config
os.system(config.get('PUSHER', 'command'))
def main():
print 'Starting listeners...To exit press CTRL+C'
# create a list of threads
threads = []
# open the config file
config.readfp(open('deployer.cfg'))
mq = MQServer()
sqs = AmazonSQS()
p = PusherWebSocket()
try:
if len(config.get('MQSERVER', 'endpoint')) > 1:
mq.daemon = True # daemonize the thread
threads.append(mq) # append the threads to the thread list
mq.start() # start the thread
if len(config.get('AWSSQS', 'key')) > 1:
sqs.daemon = True # daemonize the thread
threads.append(sqs) # append the threads to the thread list
sqs.start() # start the thread
if len(config.get('PUSHER', 'key')) > 1:
p.daemon = True # daemonize the thread
threads.append(p) # append the threads to the thread list
p.start() # start the thread
for thread in threads:
# check to see the thread is still alive
while thread.isAlive():
thread.join(1)
except (KeyboardInterrupt, SystemExit):
print '\n!Received keyboard interrupt, quitting threads.\n'
if __name__ == '__main__':
main()
| 29.969697 | 173 | 0.61092 | 3,405 | 0.688574 | 0 | 0 | 0 | 0 | 0 | 0 | 1,658 | 0.335288 |
68510211118d2b8c5a255e3f39fd3647748d0494 | 245 | py | Python | nipype/workflows/dmri/mrtrix/__init__.py | felixsc1/nipype | e722d6170593583f16ddfcb95473e5d30b5f1d7c | [
"Apache-2.0"
] | 8 | 2019-05-29T09:38:30.000Z | 2021-01-20T03:36:59.000Z | nipype/workflows/dmri/mrtrix/__init__.py | felixsc1/nipype | e722d6170593583f16ddfcb95473e5d30b5f1d7c | [
"Apache-2.0"
] | 12 | 2021-03-09T03:01:16.000Z | 2022-03-11T23:59:36.000Z | nipype/workflows/dmri/mrtrix/__init__.py | felixsc1/nipype | e722d6170593583f16ddfcb95473e5d30b5f1d7c | [
"Apache-2.0"
] | 2 | 2017-09-23T16:22:00.000Z | 2019-08-01T14:18:52.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from .diffusion import create_mrtrix_dti_pipeline
from .connectivity_mapping import create_connectivity_pipeline
from .group_connectivity import (create_group_connectivity_pipeline)
| 40.833333 | 68 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 0.093878 |
6851217599a86b71913e6f6728bab12730f9d8ae | 1,528 | py | Python | main.py | dunderstab/SembleLang | 395d30419d52107b2fa248555b839bd852992594 | [
"MIT"
] | null | null | null | main.py | dunderstab/SembleLang | 395d30419d52107b2fa248555b839bd852992594 | [
"MIT"
] | null | null | null | main.py | dunderstab/SembleLang | 395d30419d52107b2fa248555b839bd852992594 | [
"MIT"
] | null | null | null | import src.lex
import src.parse
import src.eval
import src.tools
import src.optimize
import src.pre
import re
import sys
def main(file, n=None, first=False):
import os
debug = False
version = "Beta 2.5"
l = src.tools.readSembleFile(file)
#if first:
# l += src.pre.importf("base.smb", l)[0]
l, fname = src.pre.process(l)
with open("newcode.smb", "w") as newcode:
newcode.write(l)
x = src.lex.lex(l)
with open("lexout.txt", "w") as fw:
fw.write(str(x).replace("[", "[\n").replace("]", "]\n"))
v = src.parse.parse(x)
with open("parseout.txt", "w") as fw:
fw.write(str(v).replace("[", "[\n").replace("]", "]\n"))
src.eval.cmpf(v, "semble.asm")
src.optimize.optimize("semble.asm")
os.system("as --32 semble.asm -o semble.o")
if n != None:
fname = n
os.system("ld -m elf_i386 -dynamic-linker /lib/ld-linux.so.2 -o " + fname + " semble.o -lc")
if not debug:
os.system("rm semble.o parseout.txt lexout.txt newcode.smb console.txt")
if __name__ == '__main__':
#print(src.lex.checkFuncCall("hello(helo,hello, h)"))
#if re.match(r"^\[\d+\]\[(.*\,)*(.*)\]$", "[7][6, 6,]"):
# print("hello")
#print(src.lex.checkIndexRef("hello[5]"))
#print(re.match(r"^(\[[^\n\]]+\])+$", "[56][65][67]"))
#src.lex.getIndexs("[56][65][67]")
#print(src.lex.checkIndexRef("people.names[3]"))
#print(src.lex.lex("5 + 5 * 5\n"))
#print("\n")
#print(src.parse.organizeEquation(src.lex.lex("5 + 5 * 5 + 5 + 5\n")))
main(sys.argv[1])
print("Compiled Succesfully") | 25.04918 | 94 | 0.59555 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 779 | 0.509817 |
6851afbd7c56c12caaf48e494760ed9de7cf64a5 | 5,005 | py | Python | lib/browser.py | qwghlm/WhensMyBus | bef206c15a5efdeeca234a9f31d98b2ec33342af | [
"MIT"
] | 4 | 2015-01-02T20:31:43.000Z | 2017-03-02T11:27:39.000Z | lib/browser.py | qwghlm/WhensMyBus | bef206c15a5efdeeca234a9f31d98b2ec33342af | [
"MIT"
] | 1 | 2016-10-17T21:23:48.000Z | 2016-10-17T21:23:48.000Z | lib/browser.py | qwghlm/WhensMyBus | bef206c15a5efdeeca234a9f31d98b2ec33342af | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Data browser for When's My Transport, with caching and JSON/XML parsing
"""
import json
import logging
import os
import urllib2
import time
from xml.dom.minidom import parseString
from xml.etree.ElementTree import fromstring
from lib.exceptions import WhensMyTransportException
#
# API URLs - for live APIs and test data we have cached for unit testing
#
HOME_DIR = os.path.dirname(os.path.abspath(__file__)) + '/..'
URL_SETS = {
'live': {
'BUS_URL': "http://countdown.tfl.gov.uk/stopBoard/%s",
'DLR_URL': "http://www.dlrlondon.co.uk/xml/mobile/%s.xml",
'TUBE_URL': "http://cloud.tfl.gov.uk/TrackerNet/PredictionDetailed/%s/%s",
'STATUS_URL': "http://cloud.tfl.gov.uk/TrackerNet/StationStatus/IncidentsOnly",
},
'test': {
'BUS_URL': "file://" + HOME_DIR + "/tests/data/bus/%s.json",
'DLR_URL': "file://" + HOME_DIR + "/tests/data/dlr/%s.xml",
'TUBE_URL': "file://" + HOME_DIR + "/tests/data/tube/%s-%s.xml",
'STATUS_URL': "file://" + HOME_DIR + "/tests/data/tube/status.xml",
}
}
CACHE_MAXIMUM_AGE = 30 # 30 seconds maximum cache age
class WMTURLProvider:
"""
Simple wrapper that provides URLs for the TfL APIs, or test data depending on how we have set this up
"""
#pylint: disable=R0903
def __init__(self, use_test_data=False):
if use_test_data:
self.urls = URL_SETS['test']
else:
self.urls = URL_SETS['live']
def __getattr__(self, key):
return self.urls[key]
class WMTBrowser:
"""
A simple JSON/XML fetcher with caching. Not designed to be used for many thousands of URLs, or for concurrent access
"""
def __init__(self):
self.opener = urllib2.build_opener()
self.opener.addheaders = [('User-agent', 'When\'s My Transport?'),
('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8')]
logging.debug("Starting up browser")
self.cache = {}
def fetch_url(self, url, default_exception_code):
"""
Fetch a URL and returns the raw data as a string
"""
# If URL is in cache and still considered fresh, fetch that
if url in self.cache and (time.time() - self.cache[url]['time']) < CACHE_MAXIMUM_AGE:
logging.debug("Using cached URL %s", url)
url_data = self.cache[url]['data']
# Else fetch URL and store
else:
logging.debug("Fetching URL %s", url)
try:
response = self.opener.open(url)
url_data = response.read()
self.cache[url] = {'data': url_data, 'time': time.time()}
# Handle browsing error
except urllib2.HTTPError, exc:
logging.error("HTTP Error %s reading %s, aborting", exc.code, url)
raise WhensMyTransportException(default_exception_code)
except Exception, exc:
logging.error("%s (%s) encountered for %s, aborting", exc.__class__.__name__, exc, url)
raise WhensMyTransportException(default_exception_code)
return url_data
def fetch_json(self, url, default_exception_code='tfl_server_down'):
"""
Fetch a JSON URL and returns Python object representation of it
"""
json_data = self.fetch_url(url, default_exception_code)
if json_data:
try:
obj = json.loads(json_data)
return obj
# If the JSON parser is choking, probably a 503 Error message in HTML so raise a ValueError
except ValueError, exc:
del self.cache[url]
logging.error("%s encountered when parsing %s - likely not JSON!", exc, url)
raise WhensMyTransportException(default_exception_code)
else:
return None
def fetch_xml_tree(self, url, default_exception_code='tfl_server_down'):
"""
Fetch an XML URL and returns Python object representation of it as an ElementTree
"""
xml_data = self.fetch_url(url, default_exception_code)
if xml_data:
try:
tree = fromstring(xml_data)
namespace = '{%s}' % parseString(xml_data).firstChild.getAttribute('xmlns')
# Remove horrible namespace functionality
if namespace:
for elem in tree.getiterator():
if elem.tag.startswith(namespace):
elem.tag = elem.tag[len(namespace):]
return tree
# If the XML parser is choking, probably a 503 Error message in HTML so raise a ValueError
except Exception, exc:
del self.cache[url]
logging.error("%s encountered when parsing %s - likely not XML!", exc, url)
raise WhensMyTransportException(default_exception_code)
else:
return None
| 39.409449 | 120 | 0.600999 | 3,851 | 0.769431 | 0 | 0 | 0 | 0 | 0 | 0 | 1,946 | 0.388811 |
6851b38c6e5c265b8398f65f866f61d65d0c141e | 1,099 | py | Python | oommfpy/tools/plot_tools.py | davidcortesortuno/oommfpy | daa56f96fd0d301d42a4bc260f7142f0a8e62f8d | [
"BSD-2-Clause"
] | 9 | 2019-05-25T07:42:14.000Z | 2022-02-22T21:08:47.000Z | oommfpy/tools/plot_tools.py | davidcortesortuno/oommfpy | daa56f96fd0d301d42a4bc260f7142f0a8e62f8d | [
"BSD-2-Clause"
] | 9 | 2019-05-25T07:41:57.000Z | 2021-11-27T14:12:28.000Z | oommfpy/tools/plot_tools.py | davidcortesortuno/oommfpy | daa56f96fd0d301d42a4bc260f7142f0a8e62f8d | [
"BSD-2-Clause"
] | 7 | 2019-07-21T05:42:39.000Z | 2022-03-28T13:57:03.000Z | import colorsys
import numpy as np
# -----------------------------------------------------------------------------
# Utilities to generate a HSL colourmap from the magnetisation field data
def convert_to_RGB(hls_color):
return np.array(colorsys.hls_to_rgb(hls_color[0] / (2 * np.pi),
hls_color[1],
hls_color[2]))
def generate_colours(field_data, colour_model='rgb'):
"""
field_data :: (n, 3) array
"""
hls = np.ones_like(field_data)
hls[:, 0] = np.arctan2(field_data[:, 1],
field_data[:, 0]
)
hls[:, 0][hls[:, 0] < 0] = hls[:, 0][hls[:, 0] < 0] + 2 * np.pi
hls[:, 1] = 0.5 * (field_data[:, 2] + 1)
if colour_model == 'rgb':
rgbs = np.apply_along_axis(convert_to_RGB, 1, hls)
return rgbs
elif colour_model == 'hls':
return hls
else:
raise Exception('Specify a valid colour model: rgb or hls')
# -----------------------------------------------------------------------------
| 28.921053 | 79 | 0.44404 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 336 | 0.305732 |
68532915a517819212ed6da9e432ef69c63e0d22 | 299 | py | Python | 83. Remove Duplicates from Sorted List.py | fossabot/leetcode-2 | 335f1aa3ee785320515c3d3f03c2cb2df3bc13ba | [
"MIT"
] | 2 | 2018-02-26T09:12:19.000Z | 2019-06-07T13:38:10.000Z | 83. Remove Duplicates from Sorted List.py | fossabot/leetcode-2 | 335f1aa3ee785320515c3d3f03c2cb2df3bc13ba | [
"MIT"
] | 1 | 2018-12-24T07:03:34.000Z | 2018-12-24T07:03:34.000Z | 83. Remove Duplicates from Sorted List.py | fossabot/leetcode-2 | 335f1aa3ee785320515c3d3f03c2cb2df3bc13ba | [
"MIT"
] | 2 | 2018-12-24T07:01:03.000Z | 2019-06-07T13:38:07.000Z | class Solution(object):
def deleteDuplicates(self, head):
initial = head
while head:
if head.next and head.val == head.next.val:
head.next = head.next.next
else:
head = head.next
head = initial
return head
| 24.916667 | 55 | 0.51505 | 297 | 0.993311 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
6853ac15da1a977cf572b5b81090168754bb6148 | 9,165 | py | Python | RecSysFramework/Evaluation/Comparator.py | damicoedoardo/NNMF | 69fff4848b3243a2d3347bec3e1c9a01ae40e51a | [
"MIT"
] | 2 | 2020-12-11T12:46:00.000Z | 2021-08-17T13:29:41.000Z | RecSysFramework/Evaluation/Comparator.py | damicoedoardo/NNMF | 69fff4848b3243a2d3347bec3e1c9a01ae40e51a | [
"MIT"
] | null | null | null | RecSysFramework/Evaluation/Comparator.py | damicoedoardo/NNMF | 69fff4848b3243a2d3347bec3e1c9a01ae40e51a | [
"MIT"
] | null | null | null | '''
Created on Wed Sep 18 2019
@author XXX
'''
import numpy as np
import scipy.sparse as sps
import time
import sys
import copy
from tqdm import tqdm
from enum import Enum
from RecSysFramework.Utils import seconds_to_biggest_unit
from RecSysFramework.Utils.compute_popularity import compute_popularity_user
from RecSysFramework.Utils.WriteTextualFile import WriteTextualFile
from RecSysFramework.Utils.Log import dict_to_string
from scipy.stats import spearmanr
import rbo
class Comparator(object):
""" Abstract comprator
Used in general for statistics that take into account more recommenders jointly.
eg: variety of recommendations, it measures the difference between items recommended.
"""
COMPARATOR_NAME = "Comparator"
metrics = ['jaccard', 'spearman', 'RBO']
def __init__(self, URM_test, recommenders_to_compare, cutoff=5, metrics_list=['jaccard', 'spearman', 'RBO'],
minRatingsPerUser=1, exclude_seen=True, exclude_top_pop=False, save_to_file=True, verbose=True):
"""
Arguments:
recommenders_to_compare {RecommenderBase} -- instances to be compared
URM_test {[type]} -- ground thruth
Keyword Arguments:
cutoff_list {list} -- cutoff for the list of recommendations to consider
metrics_list {list} -- list of metrics to be computed (default: [])
minRatingsPerUser {int} -- recommendations are provided only for users with at least this values of ratings (default: {1})
exclude_seen {bool} -- shall we exlude from the recommendations the items seen at training time? (default: {True})
exclude_top_pop {bool} -- shall we exlude from the recommendations the most popular items? (default: {False})
save_to_file {bool} -- if True, the results are saved into a file, otherwise just printed
"""
super(Comparator, self).__init__()
self.minRatingsPerUser = minRatingsPerUser
self.exclude_seen = exclude_seen
self.metrics_list = metrics_list
self.exclude_top_pop = exclude_top_pop
self.recommenders_to_compare = recommenders_to_compare
self.cutoff = cutoff
self.save_to_file = save_to_file
self.verbose = verbose
if save_to_file:
recs = ''
for r in recommenders_to_compare:
recs += r.RECOMMENDER_NAME + '_'
self.file = WriteTextualFile(
'ComparationResults', recs, append_datetime=True)
if URM_test is not None:
self.URM_test = sps.csr_matrix(URM_test)
self.n_users, self.n_items = URM_test.shape
numRatings = np.ediff1d(self.URM_test.tocsr().indptr)
self.usersToEvaluate = np.arange(
self.n_users)[numRatings >= self.minRatingsPerUser]
def _evaluate_recommender(self, recommender_object):
""" should return
[{user_x: [rec_items_for_x], user_y: [rec_items_for_y]},
{user_i: [rec_items_for_i], user_j: [rec_items_for_j]},
...
],
[descr1,
descr2,
...
]
A subclass will give an implementation to this method
"""
pass
def _print(self, text, only_on_file=False):
if self.verbose:
print(text)
if self.save_to_file:
self.file.write_line(text)
def compare(self):
"""compares the recommenders according to the metrics defined
Returns:
[string] -- string describing the results
[dict] -- the format is {'descr': descr, 'cutoff': cutoff, 'value': value}
"""
return_string = ''
return_dict = {}
for m in self.metrics_list:
assert m in self.metrics, "metric provided should be among {}".format(
self.metrics)
return_string += 'cutoff: {}\n'.format(str(self.cutoff))
return_string += 'computing {}. considering only the first two recommenders provided\n'.format(
m)
if hasattr(self.recommenders_to_compare[0], 'model_parameters') and hasattr(self.recommenders_to_compare[1], 'model_parameters'):
return_string += 'recommender 1: {}\n\n'.format(dict_to_string(
self.recommenders_to_compare[0].model_parameters, style='constructor'))
return_string += 'recommender 2: {}\n\n'.format(dict_to_string(
self.recommenders_to_compare[1].model_parameters, style='constructor'))
evaluation_results_first, descr = self._evaluate_recommender(
self.recommenders_to_compare[0])
evaluation_results_second, _ = self._evaluate_recommender(
self.recommenders_to_compare[1])
for idx in range(len(evaluation_results_second)):
if m == 'jaccard':
result = self.compute_jaccard(
evaluation_results_first[idx], evaluation_results_second[idx])
elif m == 'RBO':
result = self.compute_RBO(
evaluation_results_first[idx], evaluation_results_second[idx])
return_string += '{} on {}: {}\n'.format(
m, descr[idx], result)
return_dict['{}_{}_{}'.format(
m, descr[idx], self.cutoff)] = result
self._print(return_string)
return return_string, return_dict
def compute_jaccard(self, d1, d2):
j = []
for key, l1 in d1.items():
s1 = set(l1)
s2 = set(d2[key])
j.append(len(s1 & s2)/len(s1 | s2))
return sum(j)/len(j)
def compute_RBO(self, d1, d2):
j = []
for key, l1 in d1.items():
l2 = d2[key]
j.append(rbo.RankingSimilarity(l1, l2).rbo())
return sum(j)/len(j)
class ComparatorHoldout(Comparator):
"""ComparatorHoldout"""
EVALUATOR_NAME = "ComparatorHoldout"
def _evaluate_recommender(self, recommender_object):
recommended_items_batch_list, _ = recommender_object.recommend(self.usersToEvaluate,
remove_seen_flag=self.exclude_seen,
cutoff=self.cutoff,
remove_top_pop_flag=self.exclude_top_pop,
return_scores=True)
return [dict(zip(self.usersToEvaluate, recommended_items_batch_list))], ['all_users']
class ComparatorHoldoutUserPopularity(Comparator):
"""ComparatorHoldoutUserPopularity
evaluates the recommender considering different sets of users based on their popularity
"""
EVALUATOR_NAME = "ComparatorHoldoutUserPopularity"
def __init__(self, URM_train, *pos_args, cuts=[0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.6, 0.8, 1], **key_args):
self.URM_train = URM_train
self.cuts = cuts
super(ComparatorHoldoutUserPopularity,
self).__init__(*pos_args, **key_args)
def _recommend_in_batch(self, recommender_object, users, remove_seen_flag, cutoff, remove_top_pop_flag):
r = []
size = 1000
n_users = len(users)
n_batch = n_users // size
for idx in range(n_batch):
r += recommender_object.recommend(
users[size*idx: size*(idx+1)], remove_seen_flag=remove_seen_flag, cutoff=cutoff, remove_top_pop_flag=remove_top_pop_flag, return_scores=True)[0]
r += recommender_object.recommend(
users[(size*n_batch) % n_users: n_users], remove_seen_flag=remove_seen_flag, cutoff=cutoff, remove_top_pop_flag=remove_top_pop_flag, return_scores=True)[0]
return r
def _evaluate_recommender(self, recommender_object):
pop = compute_popularity_user(self.URM_train, ordered=True)
r = []
descr = []
users, interactions = zip(*pop)
users = np.array(users)
interactions = np.array(interactions)
cum_sum_interactions = np.cumsum(interactions)
tot_interactions = np.sum(interactions)
recommended_items_all_users = self._recommend_in_batch(recommender_object,
np.sort(users),
self.exclude_seen,
self.cutoff,
self.exclude_top_pop,
)
recommended_items_all_users = np.array(recommended_items_all_users)
for cut in self.cuts:
users_in_cut = users[cum_sum_interactions < cut*tot_interactions]
recommended_items_batch_list = recommended_items_all_users[users_in_cut, :].tolist()
r.append(dict(zip(users_in_cut, recommended_items_batch_list)))
descr.append('{}'.format(cut))
return r, descr
| 41.098655 | 167 | 0.600327 | 8,681 | 0.94719 | 0 | 0 | 0 | 0 | 0 | 0 | 2,258 | 0.246372 |
6853c8f28ea7f7496002136021b31195092bc07c | 3,233 | py | Python | app/grid_model.py | vinhta314/game-of-life-visualiser | 957ba490c12836e38e6b40024046c10466780739 | [
"MIT"
] | null | null | null | app/grid_model.py | vinhta314/game-of-life-visualiser | 957ba490c12836e38e6b40024046c10466780739 | [
"MIT"
] | null | null | null | app/grid_model.py | vinhta314/game-of-life-visualiser | 957ba490c12836e38e6b40024046c10466780739 | [
"MIT"
] | null | null | null | import numpy as np
class CellularAutomationModel:
grid_width = 40
grid_height = 40
def __init__(self):
self.grid = self._randomised_grid()
def evolve(self):
"""
Evolve the current grid state using Conway's Game of Life algorithm.
:returns
dict: A dictionary representation of the state of cells in the grid
"""
base_grid = self.grid.copy()
for y in range(self.grid_height):
for x in range(self.grid_width):
cell_state = base_grid[x, y]
n_neighbours = self._calculate_alive_neighbours(x, y, cell_state, grid=base_grid)
self.grid[x, y] = self._next_cell_state(cell_state, n_neighbours)
return self._json_formatted_grid()
def toggle_cell_state(self, x, y):
"""
Reverses the cell state for a particular cell coordinate.
"""
self.grid[x][y] = 0 if self.grid[x][y] == 1 else 1
def reset_grid(self):
"""
Resets the grid array to a random state.
:returns
dict: A dictionary representation of the state of cells in the grid
"""
self.grid = self._randomised_grid()
return self._json_formatted_grid()
def _calculate_alive_neighbours(self, x, y, cell_state, grid):
"""
Returns the number of alive nearest neighbours.
"""
surrounding_arr = self._surrounding_arr(x, y, grid)
n_alive = sum(sum(surrounding_arr))
return n_alive - cell_state
def _json_formatted_grid(self):
"""
Returns a python dictionary which represents the current state of the cells in the grid.
key: An integer that represents a single cell based on the coordinate position.
value: The cell state <0 or 1> to represent whether a cell is dead or alive.
"""
json_grid = {}
for x in range(self.grid_width):
for y in range(self.grid_height):
cell_id = int(x + y*self.grid_width)
json_grid[cell_id] = int(self.grid[x, y])
return json_grid
def _randomised_grid(self):
"""
Returns a 2d array with values of randomly assigned values of 0 or 1.
"""
return np.random.randint(2, size=(self.grid_height, self.grid_width))
@staticmethod
def _surrounding_arr(x, y, grid):
"""
Returns an 2d array containing all the adjacent cells for a particular coordinate (radius = 1 cell).
"""
if x != 0 and y != 0:
return grid[x - 1:x + 2, y - 1:y + 2]
elif x == 0:
return grid[x:x + 2, y - 1:y + 2]
elif y == 0:
return grid[x - 1:x + 2, y:y + 2]
else:
return grid[x:x + 2, y:y + 2]
@staticmethod
def _next_cell_state(cell_state, n_neighbours):
"""
Returns the new cell state 0 (dead) or 1 (alive). New state is determined using the current cell state
and number of alive neighbours based on the rules in Conway's Game of Life.
"""
if (cell_state == 1 and (n_neighbours not in range(2, 4))) or (cell_state == 0 and n_neighbours != 3):
return 0
return 1
| 32.989796 | 110 | 0.590473 | 3,211 | 0.993195 | 0 | 0 | 887 | 0.274358 | 0 | 0 | 1,217 | 0.376431 |
68584b0872eb1fd43baeac7af1a1487c2977e004 | 21,365 | py | Python | session.py | cheng6076/virnng | d790c02833865c43cb8afb2552a75c9445365f24 | [
"Apache-2.0"
] | 13 | 2017-05-18T22:44:22.000Z | 2020-09-16T14:19:49.000Z | session.py | cheng6076/virnng | d790c02833865c43cb8afb2552a75c9445365f24 | [
"Apache-2.0"
] | 1 | 2018-07-02T12:08:15.000Z | 2018-07-02T12:08:15.000Z | session.py | cheng6076/virnng | d790c02833865c43cb8afb2552a75c9445365f24 | [
"Apache-2.0"
] | 3 | 2017-11-08T11:51:17.000Z | 2019-11-03T23:18:50.000Z | from encoder import Encoder
from decoder import Decoder
from parser import Parser
from baseline import *
from language_model import LanguageModel
from util import Reader
import dynet as dy
from misc import compute_eval_score, compute_perplexity
import os
initializers = {'glorot': dy.GlorotInitializer(),
'constant': dy.ConstInitializer(0.01),
'uniform': dy.UniformInitializer(0.1),
'normal': dy.NormalInitializer(mean = 0, var = 1)
}
optimizers = {
"sgd": dy.SimpleSGDTrainer,
"adam": dy.AdamTrainer,
"adadelta": dy.AdadeltaTrainer,
"adagrad": dy.AdagradTrainer
}
class Session(object):
def __init__(self, options):
self.reader = Reader(options.data_dir, options.data_augment)
self.options = options
def supervised_enc(self):
encoder = self.create_encoder()
if os.path.exists(self.options.result_dir + 'model_enc'):
self.load_encoder(encoder)
enc_trainer = optimizers[self.options.optimizer](encoder.model)
lr = self.options.lr #used only for sgd
i = 0
best_f1 = 0
print ('supervised training for encoder...')
for epoch in range(self.options.epochs):
sents = 0
total_loss = 0.0
train = self.reader.next_example(0)
train_size = len(self.reader.data[0])
for data in train:
s1, s2, s3, pos, act = data[0], data[1], data[2], data[3], data[4]
loss = encoder.train(s1, s2, s3, pos, act, self.options.enc_dropout)
sents += 1
if loss is not None:
total_loss += loss.scalar_value()
loss.backward()
if self.options.optimizer == 'sgd':
enc_trainer.update(lr)
else:
enc_trainer.update()
e = float(i) / train_size
if i % self.options.print_every == 0:
print('epoch {}: loss per sentence: {}'.format(e, total_loss / sents))
sents = 0
total_loss = 0.0
if i!=0 and i % self.options.save_every == 0:
print('computing loss on validation set...')
valid = self.reader.next_example(2) #fix this
valid_size = len(self.reader.data[2])
rf = open(self.options.result_dir+'result', 'w')
for vdata in valid:
s1, s2, s3, pos, act = vdata[0], vdata[1], vdata[2], vdata[3], vdata[4]
_, output, _ = encoder.parse(s1, s2, s3, pos)
rf.write(output + '\n')
rf.close()
f1 = compute_eval_score(self.options.result_dir)
if f1 > best_f1:
best_f1 = f1
print ('highest f1: {}'.format(f1))
print ('saving model...')
encoder.Save(self.options.result_dir + 'model_enc')
else:
lr = lr * self.options.decay
i += 1
def supervised_dec(self):
decoder = self.create_decoder()
if os.path.exists(self.options.result_dir + 'model_dec'):
self.load_decoder(decoder)
dec_trainer = optimizers[self.options.optimizer](decoder.model)
lr = self.options.lr #used only for sgd
i = 0
lowest_valid_loss = 9999
print ('supervised training for decoder...')
for epoch in range(self.options.epochs):
sents = 0
total_loss = 0.0
train = self.reader.next_example(0)
train_size = len(self.reader.data[0])
for data in train:
s1, s2, s3, pos, act = data[0], data[1], data[2], data[3], data[4]
loss, loss_act, loss_word = decoder.compute_loss(s3, act, self.options.dec_dropout)
sents += 1
if loss is not None:
total_loss += loss.scalar_value()
loss.backward()
if self.options.optimizer == 'sgd':
dec_trainer.update(lr)
else:
dec_trainer.update()
e = float(i) / train_size
if i % self.options.print_every == 0:
print('epoch {}: loss per sentence: {}'.format(e, total_loss / sents))
sents = 0
total_loss = 0.0
if i!=0 and i % self.options.save_every == 0:
print('computing loss on validation set...')
total_valid_loss = 0
valid = self.reader.next_example(1)
valid_size = len(self.reader.data[1])
for vdata in valid:
s1, s2, s3, pos, act = vdata[0], vdata[1], vdata[2], vdata[3], vdata[4]
valid_loss, _, _ = decoder.compute_loss(s3, act)
if valid_loss is not None:
total_valid_loss += valid_loss.scalar_value()
total_valid_loss = total_valid_loss * 1.0 / valid_size
if total_valid_loss < lowest_valid_loss:
lowest_valid_loss = total_valid_loss
print ('saving model...')
decoder.Save(self.options.result_dir + 'model_dec')
else:
lr = lr * self.options.decay
i += 1
def unsupervised_with_baseline(self):
decoder = self.create_decoder()
assert(os.path.exists(self.options.result_dir + 'model_dec'))
self.load_decoder(decoder)
encoder = self.create_encoder()
assert(os.path.exists(self.options.result_dir + 'model_enc'))
self.load_encoder(encoder)
baseline = self.create_baseline()
if os.path.exists(self.options.result_dir + 'baseline'):
self.load_baseline(baseline)
enc_trainer = optimizers[self.options.optimizer](encoder.model)
dec_trainer = optimizers[self.options.optimizer](decoder.model)
baseline_trainer = optimizers[self.options.optimizer](baseline.model)
lr = self.options.lr #used only for sgd
i = 0
lowest_valid_loss = 9999
print ('unsupervised training...')
for epoch in range(self.options.epochs):
sents = 0
total_loss = 0.0
train = self.reader.next_example(0)
train_size = len(self.reader.data[0])
for data in train:
s1, s2, s3, pos, act = data[0], data[1], data[2], data[3], data[4]
sents += 1
# random sample
enc_loss_act, _, act = encoder.parse(s1, s2, s3, pos, sample=True)
_, dec_loss_act, dec_loss_word = decoder.compute_loss(s3, act)
# save reward
logpx = -dec_loss_word.scalar_value()
total_loss -= logpx
# reconstruction and regularization loss backprop to theta_d
dec_loss_total = dec_loss_word + dec_loss_act * dy.scalarInput(self.options.dec_reg)
dec_loss_total = dec_loss_total * dy.scalarInput(1.0 / self.options.mcsamples)
dec_loss_total.scalar_value()
dec_loss_total.backward()
# update decoder
if self.options.optimizer == 'sgd':
dec_trainer.update(lr)
else:
dec_trainer.update()
if self.options.enc_update > 0:
# compute baseline and backprop to theta_b
b = baseline(s3)
logpxb = b.scalar_value()
b_loss = dy.squared_distance(b, dy.scalarInput(logpx))
b_loss.value()
b_loss.backward()
# update baseline
if self.options.optimizer == 'sgd':
baseline_trainer.update(lr)
else:
baseline_trainer.update()
# policy and and regularization loss backprop to theta_e
enc_loss_act = encoder.train(s1, s2, s3, pos, act)
enc_loss_policy = enc_loss_act * dy.scalarInput((logpx - logpxb) / len(s1))
enc_loss_total = enc_loss_policy * dy.scalarInput(self.options.enc_update) - enc_loss_act * dy.scalarInput(self.options.enc_reg)
enc_loss_total = enc_loss_total * dy.scalarInput(1.0 / self.options.mcsamples)
enc_loss_total.value()
enc_loss_total.backward()
# update encoder
if self.options.optimizer == 'sgd':
enc_trainer.update(lr)
else:
enc_trainer.update()
e = float(i) / train_size
if i % self.options.print_every == 0:
print('epoch {}: loss per sentence: {}'.format(e, total_loss / sents))
sents = 0
total_loss = 0.0
if i!=0 and i % self.options.save_every == 0:
print('computing loss on validation set...')
total_valid_loss = 0
valid = self.reader.next_example(1)
valid_size = len(self.reader.data[1])
for vdata in valid:
s1, s2, s3, pos, act = vdata[0], vdata[1], vdata[2], vdata[3], vdata[4]
_, _, valid_word_loss = decoder.compute_loss(s3, act)
if valid_word_loss is not None:
total_valid_loss += valid_word_loss.scalar_value()
total_valid_loss = total_valid_loss * 1.0 / valid_size
if total_valid_loss < lowest_valid_loss:
lowest_valid_loss = total_valid_loss
print ('saving model...')
encoder.Save(self.options.result_dir + 'model_enc')
decoder.Save(self.options.result_dir + 'model_dec')
baseline.Save(self.options.result_dir + 'baseline')
else:
lr = lr * self.options.decay
i += 1
def unsupervised_without_baseline(self):
decoder = self.create_decoder()
assert(os.path.exists(self.options.result_dir + 'model_dec'))
self.load_decoder(decoder)
encoder = self.create_encoder()
assert(os.path.exists(self.options.result_dir + 'model_enc'))
self.load_encoder(encoder)
enc_trainer = optimizers[self.options.optimizer](encoder.model)
dec_trainer = optimizers[self.options.optimizer](decoder.model)
lr = self.options.lr #used only for sgd
i = 0
lowest_valid_loss = 9999
print ('unsupervised training...')
for epoch in range(self.options.epochs):
sents = 0
total_loss = 0.0
train = self.reader.next_example(0)
train_size = len(self.reader.data[0])
for data in train:
s1, s2, s3, pos, act = data[0], data[1], data[2], data[3], data[4]
sents += 1
# max sample
enc_loss_act, _, act = encoder.parse(s1, s2, s3, pos, sample=False)
_, dec_loss_act, dec_loss_word = decoder.compute_loss(s3, act)
logpxb = -dec_loss_word.scalar_value()
total_loss -= logpxb
# random sample
enc_loss_act, _, act = encoder.parse(s1, s2, s3, pos, sample=True)
_, dec_loss_act, dec_loss_word = decoder.compute_loss(s3, act)
# save reward
logpx = -dec_loss_word.scalar_value()
# reconstruction and regularization loss backprop to theta_d
dec_loss_total = dec_loss_word + dec_loss_act * dy.scalarInput(self.options.dec_reg)
dec_loss_total = dec_loss_total * dy.scalarInput(1.0 / self.options.mcsamples)
dec_loss_total.scalar_value()
dec_loss_total.backward()
# update decoder
if self.options.optimizer == 'sgd':
dec_trainer.update(lr)
else:
dec_trainer.update()
if self.options.enc_update > 0:
# policy and and regularization loss backprop to theta_e
enc_loss_act = encoder.train(s1, s2, s3, pos, act)
enc_loss_policy = enc_loss_act * dy.scalarInput((logpx - logpxb) / len(s1))
enc_loss_total = enc_loss_policy * dy.scalarInput(self.options.enc_update) - enc_loss_act * dy.scalarInput(self.options.enc_reg)
enc_loss_total = enc_loss_total * dy.scalarInput(1.0 / self.options.mcsamples)
enc_loss_total.value()
enc_loss_total.backward()
if self.options.optimizer == 'sgd':
enc_trainer.update(lr)
else:
enc_trainer.update()
e = float(i) / train_size
if i % self.options.print_every == 0:
print('epoch {}: loss per sentence: {}'.format(e, total_loss / sents))
sents = 0
total_loss = 0.0
if i!=0 and i % self.options.save_every == 0:
print('computing loss on validation set...')
total_valid_loss = 0
valid = self.reader.next_example(1)
valid_size = len(self.reader.data[1])
for vdata in valid:
s1, s2, s3, pos, act = vdata[0], vdata[1], vdata[2], vdata[3], vdata[4]
_, _, valid_word_loss = decoder.compute_loss(s3, act)
if valid_word_loss is not None:
total_valid_loss += valid_word_loss.scalar_value()
total_valid_loss = total_valid_loss * 1.0 / valid_size
if total_valid_loss < lowest_valid_loss:
lowest_valid_loss = total_valid_loss
print ('saving model...')
encoder.Save(self.options.result_dir + 'model_enc')
decoder.Save(self.options.result_dir + 'model_dec')
else:
lr = lr * self.options.decay
i += 1
def pretrain_baseline(self):
baseline = self.create_baseline()
if os.path.exists(self.options.result_dir + 'baseline'):
self.load_baseline(baseline)
baseline_trainer = optimizers[self.options.optimizer](baseline.model)
lr = self.options.lr #used only for sgd
i = 0
lowest_valid_loss = 9999
print ('train baseline, for simplicity use the same data here')
for epoch in range(self.options.epochs):
sents = 0
total_loss = 0.0
train = self.reader.next_example(0)
train_size = len(self.reader.data[0])
for data in train:
s1, s2, s3, pos, act = data[0], data[1], data[2], data[3], data[4]
sents += 1
loss = -baseline(s3)
if loss is not None:
total_loss += loss.scalar_value()
loss.backward()
if self.options.optimizer == 'sgd':
baseline_trainer.update(lr)
else:
baseline_trainer.update()
e = float(i) / train_size
if i % self.options.print_every == 0:
print('epoch {}: loss per sentence: {}'.format(e, total_loss / sents))
sents = 0
total_loss = 0.0
if i!=0 and i % self.options.save_every == 0:
print('computing loss on validation set...')
total_valid_loss = 0
valid = self.reader.next_example(1)
valid_size = len(self.reader.data[1])
for vdata in valid:
s1, s2, s3, pos, act = vdata[0], vdata[1], vdata[2], vdata[3], vdata[4]
valid_loss = -baseline(s3)
if valid_loss is not None:
total_valid_loss += valid_loss.scalar_value()
total_valid_loss = total_valid_loss * 1.0 / valid_size
if total_valid_loss < lowest_valid_loss:
lowest_valid_loss = total_valid_loss
print ('saving model...')
baseline.Save(self.options.result_dir + 'baseline')
else:
lr = lr * self.options.decay
i += 1
def parsing(self):
decoder = self.create_decoder()
assert(os.path.exists(self.options.result_dir + 'model_dec'))
self.load_decoder(decoder)
encoder = self.create_encoder()
assert(os.path.exists(self.options.result_dir + 'model_enc'))
self.load_encoder(encoder)
print('parsing...')
rf = open(os.path.join(self.options.result_dir, 'result'), 'w')
test = self.reader.next_example(2)
p = Parser(encoder, decoder)
for dataid, data in enumerate(test):
s1, s2, s3, pos, act = data[0], data[1], data[2], data[3], data[4]
output = p(s1, s2, s3, pos, self.options.nsamples)
rf.write(output + '\n')
rf.close()
f1 = compute_eval_score(self.options.result_dir)
print('bracket F1 score is {}'.format(f1))
def language_modeling(self):
decoder = self.create_decoder()
assert(os.path.exists(self.options.result_dir + 'model_dec'))
self.load_decoder(decoder)
encoder = self.create_encoder()
assert(os.path.exists(self.options.result_dir + 'model_enc'))
self.load_encoder(encoder)
print('computing language model score...')
test = self.reader.next_example(2)
lm = LanguageModel(encoder, decoder)
total_ll = 0
total_tokens = 0
for dataid, data in enumerate(test):
s1, s2, s3, pos, act = data[0], data[1], data[2], data[3], data[4]
if len(s1) <= 1:
continue
total_ll += lm(s1, s2, s3, pos, self.options.nsamples)
total_tokens += len(s1)
perp = compute_perplexity(total_ll, total_tokens)
print('perplexity: {}'.format(perp))
def create_decoder(self):
return Decoder(self.reader,
self.options.nlayers,
self.options.word_dim,
self.options.pretrained_dim,
self.options.action_dim,
self.options.dec_lstm_dim,
self.options.embedding_file)
def create_encoder(self):
return Encoder(self.reader,
self.options.nlayers,
self.options.word_dim,
self.options.pretrained_dim,
self.options.pos_dim,
self.options.action_dim,
self.options.enc_lstm_dim,
self.options.embedding_file)
def create_baseline(self):
baseline = None
if self.options.baseline == 'rnnlm':
baseline = LanguageModelBaseline(self.reader,
self.options.word_dim,
self.options.pretrained_dim,
self.options.dec_lstm_dim,
self.options.embedding_file)
elif self.options.baseline == 'rnnauto':
baseline = RNNAutoencBaseline(self.reader,
self.options.word_dim,
self.options.pretrained_dim,
self.options.dec_lstm_dim,
self.options.embedding_file)
elif self.options.baseline == 'mlp':
baseline = MLPAutoencBaseline(self.reader,
self.options.word_dim,
self.options.pretrained_dim,
self.options.embedding_file)
else:
raise NotImplementedError("Baseline Not Implmented")
return baseline
def load_decoder(self, decoder):
decoder.Load(self.options.result_dir + 'model_dec')
def load_encoder(self, encoder):
encoder.Load(self.options.result_dir + 'model_enc')
def load_baseline(self, baseline):
baseline.Load(self.options.result_dir + 'baseline')
| 42.475149 | 148 | 0.514299 | 20,657 | 0.966862 | 0 | 0 | 0 | 0 | 0 | 0 | 1,654 | 0.077416 |
68587aa670f83d82520736428332f2bf416f400e | 254 | py | Python | funcion_mutacion.py | J0SU3IC3/Proyecto_Algoritmo_Genetico | bcf98d335def651726b17154624c96ee3b8cfdeb | [
"BSD-2-Clause"
] | null | null | null | funcion_mutacion.py | J0SU3IC3/Proyecto_Algoritmo_Genetico | bcf98d335def651726b17154624c96ee3b8cfdeb | [
"BSD-2-Clause"
] | null | null | null | funcion_mutacion.py | J0SU3IC3/Proyecto_Algoritmo_Genetico | bcf98d335def651726b17154624c96ee3b8cfdeb | [
"BSD-2-Clause"
] | null | null | null | import random
def mutacion(indiv):
alea_1=random.randint(0,len(indiv)-1)
alea_2 = random.randint(0, len(indiv)-1)
interc_1=indiv[alea_1]
interc_2=indiv[alea_2]
indiv[alea_1] = interc_2
indiv[alea_2] = interc_1
return indiv
| 19.538462 | 44 | 0.681102 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
685b7db6474e2581fd6fc7a88d44f479e33ae88c | 4,724 | py | Python | examples/import_hostgroups.py | jkraenzle/steelscript-netprofiler | 970a8f492203875a35cc13e94237740b31eb01b4 | [
"MIT"
] | 5 | 2016-02-29T01:16:36.000Z | 2019-12-08T19:04:54.000Z | examples/import_hostgroups.py | jkraenzle/steelscript-netprofiler | 970a8f492203875a35cc13e94237740b31eb01b4 | [
"MIT"
] | 5 | 2015-08-18T19:07:44.000Z | 2020-06-04T15:56:38.000Z | examples/import_hostgroups.py | jkraenzle/steelscript-netprofiler | 970a8f492203875a35cc13e94237740b31eb01b4 | [
"MIT"
] | 3 | 2016-02-29T01:16:37.000Z | 2020-06-04T00:43:38.000Z | #!/usr/bin/env python
# Copyright (c) 2019 Riverbed Technology, Inc.
#
# This software is licensed under the terms and conditions of the MIT License
# accompanying the software ("License"). This software is distributed "AS IS"
# as set forth in the License.
import csv
import sys
import string
import optparse
from collections import defaultdict
from steelscript.netprofiler.core.app import NetProfilerApp
from steelscript.netprofiler.core.hostgroup import HostGroupType, HostGroup
from steelscript.commands.steel import prompt_yn
from steelscript.common.exceptions import RvbdException
# This script will take a file with subnets and SiteNames
# and create a HostGroupType on the target NetProfiler.
# If the HostGroupType already exists, it will be deleted,
# before creating a new one with the same name.
#
# See the EXAMPLE text below for the format of the input
# file. Note that multiple SiteNames with different
# IP address spaces can be included.
EXAMPLE_WARN = """
Invalid file format
Ensure file has correct header.
example file:
subnet SiteName
10.143.58.64/26 CZ-Prague-HG
10.194.32.0/23 MX-SantaFe-HG
10.170.55.0/24 KR-Seoul-HG
10.234.9.0/24 ID-Surabaya-HG
10.143.58.63/23 CZ-Prague-HG
"""
class HostGroupImport(NetProfilerApp):
def add_options(self, parser):
super(HostGroupImport, self).add_options(parser)
group = optparse.OptionGroup(parser, "HostGroup Options")
group.add_option('--hostgroup', action='store',
help='Name of hostgroup to overwrite')
group.add_option('-i', '--input-file', action='store',
help='File path to hostgroup file')
parser.add_option_group(group)
def validate_args(self):
"""Ensure all arguments are present."""
super(HostGroupImport, self).validate_args()
if not self.options.input_file:
self.parser.error('Host group file is required, specify with '
'"-i" or "--input-file"')
if not self.options.hostgroup:
self.parser.error('Hostgroup name is required, specify with '
'"--hostgroup"')
def validate(self, name):
valid = set(string.letters + string.digits + '.-_')
return all(c in valid for c in name)
def import_file(self):
"""Process the input file and load into dict."""
groups = defaultdict(list)
with open(self.options.input_file, 'rb') as f:
dialect = csv.Sniffer().sniff(f.read(1024))
f.seek(0)
reader = csv.reader(f, dialect)
header = reader.next()
if header != ['subnet', 'SiteName']:
print(EXAMPLE_WARN)
for i, row in enumerate(reader):
cidr, group = row
if not self.validate(group):
print('Invalid group name on line {0}: {1}'
''.format(i+2, group))
sys.exit()
groups[group].append(cidr)
return groups
def update_hostgroups(self, groups):
"""Replace existing HostGroupType with contents of groups dict."""
# First find any existing HostGroupType
try:
hgtype = HostGroupType.find_by_name(self.netprofiler,
self.options.hostgroup)
hgtype.config = []
hgtype.groups = {}
print('Existing HostGroupType "{0}" found.'
''.format(self.options.hostgroup))
except RvbdException:
print('No existing HostGroupType found, creating a new one.')
hgtype = HostGroupType.create(self.netprofiler,
self.options.hostgroup)
# Add new values
for group, cidrs in groups.items():
hg = HostGroup(hgtype, group)
hg.add(cidrs)
# Save to NetProfiler
hgtype.save()
print ('HostGroupType "%s" configuration saved.'
% self.options.hostgroup)
def main(self):
"""Confirm overwrite then update hostgroups."""
confirm = ('The contents of hostgroup {0} will be overwritten '
'by the file {1}, are you sure?'
''.format(self.options.hostgroup, self.options.input_file))
if not prompt_yn(confirm):
print('Okay, aborting.')
sys.exit()
groups = self.import_file()
self.update_hostgroups(groups)
print('Successfully updated {0} on {1}'.format(self.options.hostgroup,
self.netprofiler.host))
if __name__ == '__main__':
HostGroupImport().run()
| 34.231884 | 78 | 0.605419 | 3,450 | 0.730313 | 0 | 0 | 0 | 0 | 0 | 0 | 1,721 | 0.36431 |
685bdb02c954b53c73259dab606713b94ba02c39 | 330 | py | Python | exec.py | developerHaneum/listBug | d3828813b36a533aef93ac691eeafb08a62a03c0 | [
"MIT"
] | 2 | 2021-07-07T06:38:05.000Z | 2021-07-07T07:07:35.000Z | exec.py | developerHaneum/listBug | d3828813b36a533aef93ac691eeafb08a62a03c0 | [
"MIT"
] | null | null | null | exec.py | developerHaneum/listBug | d3828813b36a533aef93ac691eeafb08a62a03c0 | [
"MIT"
] | 1 | 2021-07-07T06:38:06.000Z | 2021-07-07T06:38:06.000Z | from clint.textui import *
def Write():
i = 0
while True:
i += 1
text = input(colored.cyan("%d: "%i))
if text == "q":
sys.exit(0)
def Print():
print(colored.cyan("-- Bug list --"))
print(colored.cyan("-- ") + colored.red("<q:quit>") + colored.cyan(" --"))
Write()
Print()
| 22 | 78 | 0.490909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 45 | 0.136364 |
685bf7a01002d00376abfb0f9c41e4c0b30ff038 | 229 | py | Python | datasets/hscic/scrape.py | nhsengland/publish-o-matic | dc8f16cb83a2360989afa44d887e63b5cde6af29 | [
"MIT"
] | null | null | null | datasets/hscic/scrape.py | nhsengland/publish-o-matic | dc8f16cb83a2360989afa44d887e63b5cde6af29 | [
"MIT"
] | 11 | 2015-03-02T16:30:20.000Z | 2016-11-29T12:16:15.000Z | datasets/hscic/scrape.py | nhsengland/publish-o-matic | dc8f16cb83a2360989afa44d887e63b5cde6af29 | [
"MIT"
] | 2 | 2020-12-25T20:38:31.000Z | 2021-04-11T07:35:01.000Z |
from datasets.hscic.hscic_datasets import scrape as datasets_scrape
from datasets.hscic.hscic_indicators import scrape as indicators_scrape
def main(workspace):
datasets_scrape(workspace)
indicators_scrape(workspace) | 22.9 | 71 | 0.829694 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
685caf0e5603d61d8ec4f3adc7a5d8734b0c911d | 5,754 | py | Python | src/tests/behave/api/features/client.py | shaneutt/mercury | d8d3c9a86ab3235d4e36583fcee6f656e5209b7e | [
"Apache-2.0"
] | 4 | 2017-07-21T20:56:46.000Z | 2018-04-30T13:37:37.000Z | src/tests/behave/api/features/client.py | jr0d/mercury | d8d3c9a86ab3235d4e36583fcee6f656e5209b7e | [
"Apache-2.0"
] | 11 | 2017-08-24T04:55:58.000Z | 2021-12-13T19:36:21.000Z | src/tests/behave/api/features/client.py | shaneutt/mercury | d8d3c9a86ab3235d4e36583fcee6f656e5209b7e | [
"Apache-2.0"
] | 6 | 2017-08-18T15:59:15.000Z | 2018-08-21T19:54:34.000Z | import copy
import json
import requests
import datetime
from src.tests.behave.common.config import get_conflagration
class APIClient(object):
def __init__(self, base_url, request_kwargs=None, client_kwargs=None):
self.cfg = get_conflagration()
token = self.get_identity_token()
timeout = None
ssl_certificate_verify = False
verbose = True
if request_kwargs:
timeout = request_kwargs.get("timeout", timeout)
ssl_certificate_verify = request_kwargs.get(
"ssl_certificate_verify", ssl_certificate_verify
)
verbose = request_kwargs.get("verbose", verbose)
self.base_url = base_url
self.headers = {
"Content-Type": "application/json",
"X-Auth-Token": token,
}
if client_kwargs:
if not client_kwargs["authorized"]:
self.headers["X-Auth-Token"] = ""
self.verify = ssl_certificate_verify
self.verbose = verbose
self.request_kwargs = dict()
self.request_kwargs["url"] = self.base_url
self.request_kwargs["headers"] = self.headers
self.request_kwargs["verify"] = self.verify
if timeout:
self.request_kwargs["timeout"] = timeout
def get(
self,
resource_id=None,
params=None,
url=None,
url_suffix=None,
headers=None,
):
request_kwargs = copy.deepcopy(self.request_kwargs)
if url:
request_kwargs["url"] = url
if resource_id:
resource_url = request_kwargs["url"]
request_kwargs["url"] = "{0}/{1}".format(resource_url, resource_id)
if url_suffix:
resource_url = request_kwargs["url"]
request_kwargs["url"] = "{0}/{1}".format(resource_url, url_suffix)
if params:
request_kwargs["params"] = params
if headers:
request_kwargs["headers"].update(headers)
resp = requests.get(**request_kwargs)
if self.verbose:
d = datetime.datetime.now()
s = "{}-{}-{} {}:{}:{}".format(
d.year, d.month, d.day, d.hour, d.minute, d.second
)
print("{0}GET REQUEST{1}".format("*" * 20, "*" * 24))
print("{0}{1}{2}".format("*" * 16, s, "*" * 20))
print(request_kwargs)
print("{0}RESPONSE{1}".format("*" * 20, "*" * 27))
print(resp.content)
print("*" * 48)
return resp
def post(self, data, url_suffix=None, headers=None, resource_id=None):
request_kwargs = copy.deepcopy(self.request_kwargs)
if resource_id:
resource_url = request_kwargs["url"]
request_kwargs["url"] = "{0}/{1}".format(resource_url, resource_id)
if url_suffix:
resource_url = request_kwargs["url"]
request_kwargs["url"] = "{0}/{1}".format(resource_url, url_suffix)
if headers:
request_kwargs["headers"].update(headers)
request_kwargs["data"] = data
resp = requests.post(**request_kwargs)
if self.verbose:
d = datetime.datetime.now()
s = "{}-{}-{} {}:{}:{}".format(
d.year, d.month, d.day, d.hour, d.minute, d.second
)
print("{0}POST REQUEST{1}".format("*" * 20, "*" * 24))
print("{0}{1}{2}".format("*" * 16, s, "*" * 21))
print(request_kwargs)
print("{0}RESPONSE{1}".format("*" * 20, "*" * 27))
print(resp.content)
print("*" * 48)
return resp
def delete(self, url_suffix=None, headers=None, resource_id=None):
request_kwargs = copy.deepcopy(self.request_kwargs)
if resource_id:
resource_url = request_kwargs["url"]
request_kwargs["url"] = "{0}/{1}".format(resource_url, resource_id)
if url_suffix:
resource_url = request_kwargs["url"]
request_kwargs["url"] = "{0}/{1}".format(resource_url, url_suffix)
if headers:
request_kwargs["headers"].update(headers)
resp = requests.delete(**request_kwargs)
if self.verbose:
d = datetime.datetime.now()
s = "{}-{}-{} {}:{}:{}".format(
d.year, d.month, d.day, d.hour, d.minute, d.second
)
print("{0}DELETE REQUEST{1}".format("*" * 20, "*" * 24))
print("{0}{1}{2}".format("*" * 18, s, "*" * 21))
print(request_kwargs)
print("{0}RESPONSE{1}".format("*" * 20, "*" * 27))
print(resp.content)
print("*" * 48)
return resp
def get_identity_token(self):
internal_idenity_url = self.cfg.INTERNAL_IDENTITY.internal_identity_url
internal_identity_username = (
self.cfg.INTERNAL_IDENTITY.internal_identity_username
)
internal_identity_password = (
self.cfg.INTERNAL_IDENTITY.internal_identity_password
)
domain = self.cfg.INTERNAL_IDENTITY.domain
domain_name = self.cfg.INTERNAL_IDENTITY.domain_name
identity_headers = {"Content-Type": "application/json"}
identity_data = {
"auth": {
"passwordCredentials": {
"username": internal_identity_username,
"password": internal_identity_password,
},
domain: {"name": domain_name},
}
}
token_resp = requests.post(
internal_idenity_url,
headers=identity_headers,
data=json.dumps(identity_data),
)
token = token_resp.json()["access"]["token"]["id"]
return token
| 36.649682 | 79 | 0.550574 | 5,634 | 0.979145 | 0 | 0 | 0 | 0 | 0 | 0 | 673 | 0.116962 |
685d6be8bc9e1c64b0fa673c0b867858db37f70e | 10,556 | py | Python | Instrument_Examples/DMM6500/Upload_and_Execute_a_Test_Sequence_to_the_Series_2260B_Power_Supply/Upload_and_Execute_Test_Sequence_File_to_2260B_Power_Supply_Rev_B.py | 398786172/keithley | f78c5220841775a45ae60645c774e8b443b02ec3 | [
"BSD-Source-Code"
] | 31 | 2019-04-11T14:25:39.000Z | 2022-03-18T15:09:33.000Z | Instrument_Examples/DMM6500/Upload_and_Execute_a_Test_Sequence_to_the_Series_2260B_Power_Supply/Upload_and_Execute_Test_Sequence_File_to_2260B_Power_Supply_Rev_B.py | 398786172/keithley | f78c5220841775a45ae60645c774e8b443b02ec3 | [
"BSD-Source-Code"
] | 27 | 2019-04-10T20:21:52.000Z | 2021-12-09T01:59:32.000Z | Instrument_Examples/DMM6500/Upload_and_Execute_a_Test_Sequence_to_the_Series_2260B_Power_Supply/Upload_and_Execute_Test_Sequence_File_to_2260B_Power_Supply_Rev_B.py | 398786172/keithley | f78c5220841775a45ae60645c774e8b443b02ec3 | [
"BSD-Source-Code"
] | 30 | 2019-06-08T09:38:20.000Z | 2022-03-18T15:10:37.000Z | """***********************************************************
*** Copyright Tektronix, Inc. ***
*** See www.tek.com/sample-license for licensing terms. ***
***********************************************************"""
import socket
import struct
import math
import time
import sys
echo_cmd = 0
"""*********************************************************************************
Function: instrument_connect(my_socket, ip_address string, my_port int, timeout
do_reset, do_id_query)
Purpose: Open an instance of an instrument object for remote communication
over LAN/Ethernet.
Parameters:
my_socket - Instance of a socket object.
ip_address (string) - The TCP/IP address string associated with the
target instrument.
my_port (int) - The instrument connection port.
timeout (int) - The timeout limit for query/communication exchanges.
do_reset (int) - Determines whether the instrument is to be reset
upon connection to the instrument. Setting to 1
will perform the reset; setting to zero avoids it.
do_clear (int) - Determines whether the instrument is to be cleared
do_id_query (int) - Deterines when the instrument is to echho its
identification string after it is initialized.
Returns:
my_socket - Updated instance of a socket object that includes
attributes of a valid connection.
Revisions:
2019-07-30 JJB Initial revision.
*********************************************************************************"""
def instrument_connect(my_socket, my_address, my_port, timeout, do_reset, do_clear, do_id_query):
my_socket.connect((my_address, my_port)) # input to connect must be a tuple
my_socket.settimeout(timeout)
my_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if do_reset == 1:
instrument_write(my_socket, "*RST")
if do_clear == 1:
instrument_write(my_socket, "*CLS")
if do_id_query == 1:
tmp_id = instrument_query(my_socket, "*IDN?", 100)
print(tmp_id)
return my_socket
"""*********************************************************************************
Function: instrument_disconnect(my_socket)
Purpose: Break the LAN/Ethernet connection between the controlling computer
and the target instrument.
Parameters:
my_socket - The TCP instrument connection object used for sending
and receiving data.
Returns:
None
Revisions:
2019-07-30 JJB Initial revision.
*********************************************************************************"""
def instrument_disconnect(my_socket):
my_socket.close()
return
"""*********************************************************************************
Function: instrument_write(my_socket, my_command)
Purpose: This function issues control commands to the target instrument.
Parameters:
my_socket - The TCP instrument connection object used for sending
and receiving data.
my_command (string) - The command issued to the instrument to make it
perform some action or service.
Returns:
None
Revisions:
2019-07-30 JJB Initial revision.
*********************************************************************************"""
def instrument_write(my_socket, my_command):
if echo_cmd == 1:
print(my_command)
cmd = "{0}\n".format(my_command)
my_socket.send(cmd.encode())
return
"""*********************************************************************************
Function: instrument_read(my_socket, receive_size)
Purpose: This function asks the connected instrument to reply with some
previously requested information, typically queued up from a call
to instrument_write().
Parameters:
my_socket - The TCP instrument connection object used for sending
and receiving data.
receive_size (int) - Size of the data/string to be returned to
the caller.
Returns:
reply_string (string) - The requested information returned from the
target instrument.
Revisions:
2019-07-30 JJB Initial revision.
*********************************************************************************"""
def instrument_read(my_socket, receive_size):
return my_socket.recv(receive_size).decode()
"""*********************************************************************************
Function: instrument_query(my_socket, my_command, receive_size)
Purpose: This function issues control commands to the target instrument with
the expectation that data will be returned. For this function
instance, the returned data is (typically) in string format.
Parameters:
my_socket - The TCP instrument connection object used for sending
and receiving data.
my_command (string) - The command issued to the instrument to make it
perform some action or service.
receive_size (int) - The approximate number of bytes of data the caller
expects to be returned in the response from the
instrument.
Returns:
reply_string (string) - The requested information returned from the
target instrument. Obtained by way of a caller
to instrument_read().
Revisions:
2019-07-30 JJB Initial revision.
*********************************************************************************"""
def instrument_query(my_socket, my_command, receive_size):
instrument_write(my_socket, my_command)
return instrument_read(my_socket, receive_size)
"""*********************************************************************************
Function: write_data(output_data_path, data_str)
Purpose: This function issues control commands to the target instrument.
Parameters:
output_data_path (string) - The file name and path of the file to be written
to. Note that the file is opened in append mode
and no previously existing data will be over-
written.
data_str (string) - The data to be written to file. It is up to the
user to format this data external to this
function prior to using it.
Returns:
None
Revisions:
2020-01-03 JJB Initial revision.
*********************************************************************************"""
def write_data(output_data_path, data_str):
# This function writes the floating point data to the
# target file.
# for f in floats:
ofile = open(output_data_path, "a") # append the target data
dataStr = "{0}\n".format(data_str)
ofile.write(dataStr)
ofile.close() # Close the data file.
return
def upload_test_sequence(instrument_object, file_and_path):
with open(file_and_path) as file_in:
n = 1 # The first line in the sequence file is the header and not intended to be part of the test sequence
for line in file_in:
if n != 1:
instrument_write(instrument_object, "append_test_to_global_table(\"{0}\")".format(line.rstrip('\r\n')))
n += 1
return
"""*********************************************************************************
This example shows how the user of a Keithley DMM6500 can load a TSP script file
and execute embedded functions. This allow the user to customize test operations
at the instrument level. In particular, this example shows how a user might
create a direct socket connection to the Series 2260B power supply and execute
a supply output test sequence that defines voltage/current levels, durations for
each defined step, and slew control.
This program is dependendent on two additional files:
A. The series_2260B_sequence_control.tsp script which....
1. Promotes the transfer of the test sequence file to a Lua table
on the DMM.
2. Initiates the sockets connection to the 2260B
3. Executes the uploaded test sequence.
B. A 2260B test sequence in *.csv format.
*********************************************************************************"""
my_ip_address = "192.168.1.104" # Define your instrument's IP address here.
my_port = 5025 # Define your instrument's port number here.
do_instr_reset = 1
do_instr_clear = 1
do_instr_id_query = 1
t1 = time.time()
# Open the socket connections...
my_instr = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Establish a TCP/IP socket object
instrument_connect(my_instr, my_ip_address, my_port, 20000, do_instr_reset, do_instr_clear, do_instr_id_query)
# Ready the instrument to receive the target TSP file contents
file = "series_2260B_sequence_control.tsp"
func_file = open(file, "r")
contents = func_file.read()
func_file.close()
instrument_write(my_instr, "if loadfuncs ~= nil then script.delete('loadfuncs') end")
# Load the script file in one large chunk then close out the loadfuncs wrapper script.
instrument_write(my_instr, "loadscript loadfuncs\n{0}\nendscript\n".format(contents))
# Call loadfuncs to load the contents of the script file into active memory
print(instrument_query(my_instr, "loadfuncs()", 32)) # Note that we are echoing a queried function here.
# You will note that the final line in the functions.tsp
# script file is a print() command that will push its
# contents to the output data queue.
instrument_write(my_instr, "do_beep(0.250, 1000, 3)")
file = "Test_Sequence_06.csv"
upload_test_sequence(my_instr, file)
ip_address_2260B = "192.168.1.117"
instrument_write(my_instr, "connect_to_2260B(\"{0}\")".format(ip_address_2260B))
instrument_write(my_instr, "enable_2260B_output({0}, {1}, {2})".format(0.0, 1.0, "ON"))
instrument_write(my_instr, "ps2260_execute_test_sequence()")
instrument_write(my_instr, "enable_2260B_output({0}, {1}, {2})".format(0.0, 1.0, "OFF"))
instrument_write(my_instr, "disconnect_from_2260B()")
instrument_disconnect(my_instr)
t2 = time.time() # Stop the timer...
# Notify the user of completion and the data streaming rate achieved.
print("done")
print("Total Time Elapsed: {0:.3f} s".format(t2 - t1))
input("Press Enter to continue...")
exit() | 38.108303 | 120 | 0.597764 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,914 | 0.749716 |
685e4d13ea104ac619e52a74bbf71675b69b19ec | 1,143 | py | Python | autopilot.py | SpeedrunnerG55/EDAutopilot | e8553f66e51731a3e8ed04f168e36524cdd0678d | [
"MIT"
] | 5 | 2020-05-08T12:58:39.000Z | 2020-06-21T23:39:00.000Z | autopilot.py | SpeedrunnerG55/EDAutopilot | e8553f66e51731a3e8ed04f168e36524cdd0678d | [
"MIT"
] | null | null | null | autopilot.py | SpeedrunnerG55/EDAutopilot | e8553f66e51731a3e8ed04f168e36524cdd0678d | [
"MIT"
] | null | null | null | import cv2
from dev_autopilot import autopilot
from emulation import get_bindings, clear_input
import threading
import kthread
from pynput import keyboard
from programInfo import showInfo
STATE = 0
def start_action():
stop_action()
kthread.KThread(target = autopilot, name = "EDAutopilot").start()
def stop_action():
cv2.destroyAllWindows()
for thread in threading.enumerate():
if thread.getName() == 'EDAutopilot':
thread.kill()
clear_input(get_bindings())
def on_press(key):
try:
if key == keyboard.Key.home:
print('start action')
start_action()
if key == keyboard.Key.end:
print('stop action')
stop_action()
except AttributeError:
print('special key {0} pressed'.format(key))
def on_release(key):
if key == keyboard.Key.esc:
# Stop listener
cv2.destroyAllWindows()
stop_action()
return False
showInfo();
# Collect events until released
with keyboard.Listener(on_press=on_press,on_release=on_release) as listener:
listener.join()
| 25.4 | 77 | 0.63867 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 126 | 0.110236 |
685ec69b006b0a9078bc34ecf58f2a15ca333c06 | 3,284 | py | Python | colour/examples/temperature/examples_cct.py | soma2000-lang/colour | bb7ee23ac65e09613af78bd18dd98dffb1a2904a | [
"BSD-3-Clause"
] | 1 | 2022-02-12T06:28:15.000Z | 2022-02-12T06:28:15.000Z | colour/examples/temperature/examples_cct.py | soma2000-lang/colour | bb7ee23ac65e09613af78bd18dd98dffb1a2904a | [
"BSD-3-Clause"
] | null | null | null | colour/examples/temperature/examples_cct.py | soma2000-lang/colour | bb7ee23ac65e09613af78bd18dd98dffb1a2904a | [
"BSD-3-Clause"
] | null | null | null | """
Showcases correlated colour temperature computations.
"""
import colour
from colour.utilities import message_box
message_box("Correlated Colour Temperature Computations")
cmfs = colour.MSDS_CMFS["CIE 1931 2 Degree Standard Observer"]
illuminant = colour.SDS_ILLUMINANTS["D65"]
xy = colour.XYZ_to_xy(colour.sd_to_XYZ(illuminant, cmfs) / 100)
uv = colour.UCS_to_uv(colour.XYZ_to_UCS(colour.xy_to_XYZ(xy)))
message_box(
f'Converting to "CCT" and "D_uv" from given "CIE UCS" colourspace "uv" '
f'chromaticity coordinates using "Ohno (2013)" method:\n\n\t{uv}'
)
print(colour.uv_to_CCT(uv, cmfs=cmfs))
print(colour.temperature.uv_to_CCT_Ohno2013(uv, cmfs=cmfs))
print("\n")
message_box("Faster computation with 3 iterations but a lot less precise.")
print(colour.uv_to_CCT(uv, cmfs=cmfs, iterations=3))
print(colour.temperature.uv_to_CCT_Ohno2013(uv, cmfs=cmfs, iterations=3))
print("\n")
message_box(
f'Converting to "CCT" and "D_uv" from given "CIE UCS" colourspace "uv" '
f'chromaticity coordinates using "Robertson (1968)" method:\n\n\t{uv}'
)
print(colour.uv_to_CCT(uv, method="Robertson 1968"))
print(colour.temperature.uv_to_CCT_Robertson1968(uv))
print("\n")
CCT_D_uv = [6503.49254150, 0.00320598]
message_box(
f'Converting to "CIE UCS" colourspace "uv" chromaticity coordinates from '
f'given "CCT" and "D_uv" using "Ohno (2013)" method:\n\n\t{CCT_D_uv}'
)
print(colour.CCT_to_uv(CCT_D_uv, cmfs=cmfs))
print(colour.temperature.CCT_to_uv_Ohno2013(CCT_D_uv, cmfs=cmfs))
print("\n")
message_box(
f'Converting to "CIE UCS" colourspace "uv" chromaticity coordinates from '
f'given "CCT" and "D_uv" using "Robertson (1968)" method:\n\n\t{CCT_D_uv}'
)
print(colour.CCT_to_uv(CCT_D_uv, method="Robertson 1968"))
print(colour.temperature.CCT_to_uv_Robertson1968(CCT_D_uv))
print("\n")
CCT = 6503.49254150
message_box(
f'Converting to "CIE UCS" colourspace "uv" chromaticity coordinates from '
f'given "CCT" using "Krystek (1985)" method:\n\n\t({CCT})'
)
print(colour.CCT_to_uv(CCT, method="Krystek 1985"))
print(colour.temperature.CCT_to_uv_Krystek1985(CCT))
print("\n")
xy = colour.CCS_ILLUMINANTS["CIE 1931 2 Degree Standard Observer"]["D65"]
message_box(
f'Converting to "CCT" from given "CIE xy" chromaticity coordinates using '
f'"McCamy (1992)" method:\n\n\t{xy}'
)
print(colour.xy_to_CCT(xy, method="McCamy 1992"))
print(colour.temperature.xy_to_CCT_McCamy1992(xy))
print("\n")
message_box(
f'Converting to "CCT" from given "CIE xy" chromaticity coordinates using '
f'"Hernandez-Andres, Lee and Romero (1999)" method:\n\n\t{xy}'
)
print(colour.xy_to_CCT(xy, method="Hernandez 1999"))
print(colour.temperature.xy_to_CCT_Hernandez1999(xy))
print("\n")
CCT = 6503.49254150
message_box(
f'Converting to "CIE xy" chromaticity coordinates from given "CCT" using '
f'"Kang, Moon, Hong, Lee, Cho and Kim (2002)" method:\n\n\t{CCT}'
)
print(colour.CCT_to_xy(CCT, method="Kang 2002"))
print(colour.temperature.CCT_to_xy_Kang2002(CCT))
print("\n")
message_box(
f'Converting to "CIE xy" chromaticity coordinates from given "CCT" using '
f'"CIE Illuminant D Series" method:\n\n\t{CCT}'
)
print(colour.CCT_to_xy(CCT, method="CIE Illuminant D Series"))
print(colour.temperature.CCT_to_xy_CIE_D(CCT))
| 32.196078 | 78 | 0.739647 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,606 | 0.489038 |
685ee66a7e20613fe4b2dd17fc3161351e75e8b6 | 9,940 | py | Python | qsync.py | Bibliome/misc-utils | e44ac3232f2763836d844576d812a552e54e8a38 | [
"MIT"
] | 1 | 2017-02-24T09:57:24.000Z | 2017-02-24T09:57:24.000Z | qsync.py | Bibliome/misc-utils | e44ac3232f2763836d844576d812a552e54e8a38 | [
"MIT"
] | 4 | 2018-10-10T09:59:48.000Z | 2019-01-17T15:32:48.000Z | qsync.py | Bibliome/misc-utils | e44ac3232f2763836d844576d812a552e54e8a38 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import drmaa
import shlex
from optparse import OptionParser
from sys import stderr, stdin, exit
from datetime import datetime
import traceback
def Stop(pool, jt, info):
'''Job failure function that stops synchronization.'''
pool.shall_stop = True
pool.all_done = False
pool.failed_jobs.append(jt)
def Proceed(pool, jt, info):
'''Job failure function that proceeds with the remaining jobs.'''
pool.all_done = False
pool.failed_jobs.append(jt)
def Resubmit(max_tries, fail):
'''Job failure function factory that resubmits a failed job.
:Parameters:
max_tries: maximum number of submissions for a job.
fail: failure function to call if the maximum number of tries has been reached.
'''
def resubmit_function(pool, jt, info):
if jt.failures >= max_tries:
fail(pool, jt, info)
else:
jt.jobid = pool.session.runJob(jt)
pool.log('job specified at ' + jt.source + ' resubmitted with id ' + jt.jobid)
pool.current_jobs[jt.jobid] = jt
return resubmit_function
class JobPool:
'''
A pool of jobs.
:Members:
session: DRMAA session
logfile: file where actions and status are written
current_jobs: jobs that have been submitted and that are not finished
all_done: either all finished jobs were successful
shall_stop: either this object should stop the synchronization
'''
def __init__(self, session, logfile):
self.session = session
self.logfile = logfile
self.current_jobs = {}
self.all_done = True
self.shall_stop = False
self.failed_jobs = []
def log(self, msg=''):
'''Logs a message'''
d = datetime.now()
self.logfile.write('[' + d.strftime('%Y-%m-%d %H:%M:%S') + '] ' + msg + '\n')
self.logfile.flush()
def createJobTemplate(self):
'''Creates a job template (delegates to self.session)'''
return self.session.createJobTemplate()
def runJob(self, jt):
'''Submits a job.
This method delegates to self.session, then keeps track of the submitted job
:Parameters:
jt: job template, with a member 'source' indicating where this template was specified
'''
jt.jobid = self.session.runJob(jt)
if jt.source is None:
jt.source = jobid
jt.failures = 0
self.log('job specified at ' + jt.source + ' submitted with id ' + jt.jobid)
self.current_jobs[jt.jobid] = jt
return jt.jobid
def waitall(self, fail=Proceed, interval=60):
'''Waits for all submitted jobs to finish.
:Parameters:
fail: function called in case of failure, the function must accept 3 paremeters: this object, the JobTemplate object and the DRMAA JobInfo object.
interval: check for job status every number of seconds.
'''
start = datetime.now()
running = 0
while self.current_jobs:
joblist = list(self.current_jobs.keys()) # create fresh list to work around Python 3 iterator
try:
self.log('synchronizing %d jobs (%d running), see you in %d seconds' % (len(joblist), running, interval))
self.session.synchronize(joblist, interval, False)
except drmaa.errors.ExitTimeoutException:
pass
running = 0
for jobid in joblist:
status = self.session.jobStatus(jobid)
if status == drmaa.JobState.DONE:
try:
info = self.session.wait(jobid, drmaa.Session.TIMEOUT_NO_WAIT)
jt = self.current_jobs[jobid]
except drmaa.errors.ExitTimeoutException:
pass
if info.wasAborted:
self.log('job specified at %s with id %s aborted' % (self.current_jobs[jobid].source, jobid))
self._failed(jobid, fail, info)
elif info.hasSignal:
self.log('job specified at %s with id %s aborted received signal %d' % (self.current_jobs[jobid].source, jobid, info.terminatedSignal))
self._failed(jobid, fail, info)
elif info.exitStatus != 0:
self.log('job specified at %s with id %s aborted exited with status %d' % (self.current_jobs[jobid].source, jobid, info.exitStatus))
self._failed(jobid, fail, info)
else:
self.log('job specified at %s with id %s is done' % (self.current_jobs[jobid].source, jobid))
del self.current_jobs[jobid]
elif status == drmaa.JobState.FAILED:
self.log('job specified at %s with id %s failed somehow' % (self.current_jobs[jobid].source, jobid))
self._failed(jobid, fail, None)
elif status == drmaa.JobState.RUNNING:
running += 1
if self.shall_stop:
break
if self.all_done:
delta = datetime.now() - start
self.log('all jobs completed successfully in ' + str(delta) + ', you\'re welcome')
else:
self.log('sorry, the following jobs have failed:')
for job in self.failed_jobs:
self.log(job.source + ' with id ' + str(job.jobid))
def _failed(self, jobid, fail, info):
jt = self.current_jobs[jobid]
jt.failures += 1
del self.current_jobs[jobid]
fail(self, jt, info)
def runall(self, jobs, fail=Proceed, interval=60):
'''Submits jobs and waits for them to finish.
:Parameters:
jobs: a sequence of job templates
fail: job failure function
interval: job status check interval in seconds
:Return value:
True if all jobs finished successfully, False otherwise.
'''
for jt in jobs:
self.runJob(jt)
self.waitall(fail, interval)
return self.all_done
def terminate(self):
'''Terminates all remaining jobs.'''
self.log('terminating remaining jobs')
self.session.control(drmaa.Session.JOB_IDS_SESSION_ALL, drmaa.JobControlAction.TERMINATE)
self.current_jobs = {}
class QSyncBase:
def __init__(self):
pass
def create_jobs(self, session):
raise NotImplemented()
def go(self, interval=60, force_interval=False, fail=Proceed, logfile=stderr):
if interval < 1:
raise Exception('illegal interval: %d' % interval)
if interval <= 10 and not force_interval:
raise Exception('unwise interval: %d (use force interval if you want this anyway')
session = drmaa.Session()
session.initialize()
jobs = self.create_jobs(session)
pool = JobPool(session, logfile)
try:
r = pool.runall(jobs, fail, interval)
if not r:
pool.terminate()
return r
except BaseException as e:
pool.log('wow, some exception here...')
traceback.print_exc()
pool.terminate()
finally:
session.exit()
class QSync(OptionParser, QSyncBase):
def __init__(self):
OptionParser.__init__(self, usage='Usage: %prog [OPTIONS] [FILE...]')
self.set_defaults(fail=Proceed)
self.add_option('-s', '--stop-on-failure', action='store_const', const=Stop, dest='fail', help='if one job fails, stop synchronization and terminate all remaining jobs')
self.add_option('-p', '--proceed-on-failure', action='store_const', const=Proceed, dest='fail', help='continue running jobs even if some fail (default behaviour)')
self.add_option('-r', '--resubmit-on-failure', action='store', type='int', dest='resubmit', help='resubmit failed jobs at most N times each', metavar='N')
self.add_option('-l', '--log-file', action='store', type='string', dest='logfile', default=None, help='write log into FILE (default: stderr)', metavar='FILE')
self.add_option('-i', '--interval', action='store', type='int', dest='interval', default=60, help='wait T seconds before polling job status, values below 10 require --force-interval (default: %default)', metavar='T')
self.add_option('--force-interval', action='store_true', dest='force_interval', default=False, help='accept poll intervals below 10 seconds')
def run(self):
options, self.filenames = self.parse_args()
fail = options.fail
if options.resubmit:
if options.resubmit < 1:
raise Exception('illegal number of resubmissions: %d' % options.resubmit)
fail = Resubmit(options.resubmit, fail)
logfile = stderr
if options.logfile:
logfile = open(options.logfile, 'w')
self.go(interval=options.interval, force_interval=options.force_interval, fail=fail, logfile=logfile)
@staticmethod
def _create_job(session, filename, f):
for n, line in enumerate(f):
jt = session.createJobTemplate()
b, dd, a = line.partition('--')
if dd != '':
jt.nativeSpecification = b
line = a
args = shlex.split(line)
jt.remoteCommand = args[0]
jt.args = args[1:]
jt.source = '%s:%d' % (filename, n + 1)
yield jt
def create_jobs(self, session):
if self.filenames:
for filename in self.filenames:
f = open(filename)
for p in QSync._create_job(session, filename, f):
yield p
f.close()
else:
for p in QSync._create_job(session, '<stdin>', stdin):
yield p
if __name__ == '__main__':
if not QSync().run():
exit(1)
| 40.242915 | 224 | 0.593763 | 8,761 | 0.881388 | 799 | 0.080382 | 455 | 0.045775 | 0 | 0 | 3,090 | 0.310865 |
685f5db033e1479b41714578109a39dd1abc851f | 678 | py | Python | setup.py | Bpowers4/jupyterlab-heroku | 817a841a54f6196942e152704d1e654301def5d9 | [
"BSD-3-Clause"
] | null | null | null | setup.py | Bpowers4/jupyterlab-heroku | 817a841a54f6196942e152704d1e654301def5d9 | [
"BSD-3-Clause"
] | null | null | null | setup.py | Bpowers4/jupyterlab-heroku | 817a841a54f6196942e152704d1e654301def5d9 | [
"BSD-3-Clause"
] | null | null | null | import os
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
here = os.path.dirname(os.path.abspath(__file__))
version_ns = {}
with open(os.path.join(here, "jupyterlab_heroku", "_version.py")) as f:
exec(f.read(), {}, version_ns)
setuptools.setup(
name="jupyterlab_heroku",
version=version_ns["__version__"],
author="Jeremy Tuloup",
description="A server extension for the JupyterLab Heroku extension",
long_description=long_description,
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
install_requires=["jupyterlab"],
package_data={"jupyterlab_heroku": ["*"]},
)
| 29.478261 | 73 | 0.715339 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 198 | 0.292035 |
685f6ab1c2d0c0bbd4bb81f5a339300b7344f2f1 | 258 | py | Python | placement/serializers.py | uditpd3000/SCP-Backend | 604c9040faf5652d31bf923d647684ff8f272dc7 | [
"Apache-2.0"
] | 5 | 2021-06-21T18:36:56.000Z | 2021-08-04T04:07:56.000Z | placement/serializers.py | uditpd3000/SCP-Backend | 604c9040faf5652d31bf923d647684ff8f272dc7 | [
"Apache-2.0"
] | 23 | 2021-05-31T13:41:13.000Z | 2021-07-23T08:12:17.000Z | placement/serializers.py | uditpd3000/SCP-Backend | 604c9040faf5652d31bf923d647684ff8f272dc7 | [
"Apache-2.0"
] | 30 | 2021-05-31T10:53:14.000Z | 2022-03-01T12:56:36.000Z | from rest_framework import serializers
from .models import Placement
class PlacementSerializer(serializers.ModelSerializer):
class Meta:
model = Placement
fields = ("key", "placement_name", "company", "role", "description", "deadline")
| 28.666667 | 88 | 0.72093 | 186 | 0.72093 | 0 | 0 | 0 | 0 | 0 | 0 | 59 | 0.228682 |
686056fb7ad084c4bde838715d568d384a2cd5ae | 509 | py | Python | tutorials/W0D1_PythonWorkshop1/solutions/W0D1_Tutorial1_Solution_695309c2.py | liuxiaomiao123/NeuroMathAcademy | 16a7969604a300bf9fbb86f8a5b26050ebd14c65 | [
"CC-BY-4.0"
] | 26 | 2020-07-01T20:38:44.000Z | 2021-06-20T06:37:27.000Z | tutorials/W0D1_PythonWorkshop1/solutions/W0D1_Tutorial1_Solution_695309c2.py | NinaHKivanani/course-content | 3c91dd1a669cebce892486ba4f8086b1ef2e1e49 | [
"CC-BY-4.0"
] | 3 | 2020-06-23T03:46:36.000Z | 2020-07-07T05:26:01.000Z | tutorials/W0D1_PythonWorkshop1/solutions/W0D1_Tutorial1_Solution_695309c2.py | NinaHKivanani/course-content | 3c91dd1a669cebce892486ba4f8086b1ef2e1e49 | [
"CC-BY-4.0"
] | 16 | 2020-07-06T06:48:02.000Z | 2021-07-30T08:18:52.000Z |
# set random number generator
np.random.seed(2020)
# initialize step_end and v
step_end = int(t_max / dt)
v = el
t = 0
with plt.xkcd():
# initialize the figure
plt.figure()
plt.title('$V_m$ with random I(t)')
plt.xlabel('time (s)')
plt.ylabel(r'$V_m$ (V)')
# loop for step_end steps
for step in range(step_end):
t = step * dt
plt.plot(t, v, 'k.')
i = i_mean * (1 + 0.1 * (t_max / dt)**(0.5) * (2 * np.random.random() - 1))
v = v + (dt / tau) * (el - v + r * i)
plt.show() | 20.36 | 79 | 0.565815 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 154 | 0.302554 |
6861bdce3968ccd85c24124237aad059f44788f8 | 1,509 | py | Python | varclr/utils/similarity_search.py | qibinc/VarCLR | 1c59ae4a8e27b71de2f57db13d3278a8aecfc9e9 | [
"MIT"
] | 28 | 2021-12-03T22:39:10.000Z | 2022-03-13T02:38:04.000Z | varclr/utils/similarity_search.py | qibinc/VarCLR | 1c59ae4a8e27b71de2f57db13d3278a8aecfc9e9 | [
"MIT"
] | 1 | 2021-12-16T11:33:35.000Z | 2021-12-27T00:52:59.000Z | varclr/utils/similarity_search.py | qibinc/VarCLR | 1c59ae4a8e27b71de2f57db13d3278a8aecfc9e9 | [
"MIT"
] | 3 | 2021-12-05T18:30:19.000Z | 2021-12-16T11:01:28.000Z | import sys
from collections import defaultdict
import torch
from varclr.utils.infer import MockArgs
from varclr.data.preprocessor import CodePreprocessor
if __name__ == "__main__":
ret = torch.load(sys.argv[2])
vars, embs = ret["vars"], ret["embs"]
embs /= embs.norm(dim=1, keepdim=True)
embs = embs.cuda()
var2idx = dict([(var, idx) for idx, var in enumerate(vars)])
processor = CodePreprocessor(MockArgs())
Ks = [1, 5, 10, 25, 50, 100, 250, 500, 1000]
topk_succ = defaultdict(int)
tot = 0
with open(sys.argv[1], "r") as f:
for line in f:
try:
var1, var2 = line.strip().split()
except ValueError:
print("skpped: ", line)
def canon(var):
return "".join(
[
word.capitalize() if idx > 0 else word
for idx, word in enumerate(processor(var).split())
]
)
var1, var2 = canon(var1), canon(var2)
if var1 not in var2idx or var2 not in var2idx:
print(f"variable {var1} or {var2} not found")
continue
tot += 1
for k in Ks:
result = torch.topk(embs @ embs[var2idx[var1]], k=k + 1)
topk_succ[k] += var2 in [vars[idx] for idx in result.indices][1:]
print(f"Total {tot} variable pairs")
for k in Ks:
print(f"Recall@{k} = {100 * topk_succ[k] / tot:.1f}")
| 32.804348 | 81 | 0.524851 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 150 | 0.099404 |
68620b151d37333bd311ec05c4015d1947dbf90c | 11,285 | py | Python | plugin/CustomerSupportArchive/chipDiagnostics/tools/flowcorr.py | iontorrent/TS | 7591590843c967435ee093a3ffe9a2c6dea45ed8 | [
"Apache-2.0"
] | 125 | 2015-01-22T05:43:23.000Z | 2022-03-22T17:15:59.000Z | plugin/CustomerSupportArchive/Lane_Diagnostics/tools/flowcorr.py | iontorrent/TS | 7591590843c967435ee093a3ffe9a2c6dea45ed8 | [
"Apache-2.0"
] | 59 | 2015-02-10T09:13:06.000Z | 2021-11-11T02:32:38.000Z | plugin/CustomerSupportArchive/autoCal/tools/flowcorr.py | iontorrent/TS | 7591590843c967435ee093a3ffe9a2c6dea45ed8 | [
"Apache-2.0"
] | 98 | 2015-01-17T01:25:10.000Z | 2022-03-18T17:29:42.000Z | import os
import numpy as np
from . import imtools, datprops
from .datfile import DatFile
from .chiptype import ChipType
moduleDir = os.path.abspath( os.path.dirname( __file__ ) )
class FlowCorr:
def __init__( self, chiptype, xblock=None, yblock=None, rootdir='.', method='' ):
'''
Initialize a flowcorr object
chiptype: a ChipType object
xblock: The full-chip column origin; setting to None returns a full chip
yblock: The full-chip row origin; setting to None returns a full chip
rootdir: root directory to look for flowcorr files.
search will also look up a level, within the
module directory, and in the dats directory
method: if specified, automaticaly loads the corresponding flowcorr
'buffer'
'file'
if advanced options need to be passed into the load functions,
they should be called separatly with method being left empty
'''
self.chiptype = ChipType(chiptype)
self.xblock = xblock
self.yblock = yblock
self.searchpath = [ rootdir,
os.path.join( rootdir, '..' ),
os.path.join( moduleDir, '../dats' ),
moduleDir,
os.path.join( moduleDir, 'dats' ) ]
if method.lower() == 'buffer':
self.frombuffer()
elif method.lower() == 'file':
self.fromfile()
elif not method:
pass
else:
raise ValueError( 'Flowcorr method "%s" is undefined' % method )
def frombuffer(self, flow_file='C2_step.dat', force=False, framerate=15):
'''
Returns the flow correction measured from a buffered flow
flowfile: measurement file used to calculate the flowcorr
force: calculate the data from raw, even if an existing analysis is present
framerate: fps
'''
try:
if force:
raise IOError
self.filename = os.path.join( self.searchpath[0], 'flowcorr_slopes.dat' )
self.flowcorr = datprops.read_dat( self.filename, 'flowcorr', chiptype=self.chiptype )
except IOError:
# Read the dat file
found = False
for dirname in self.searchpath:
self.filename = os.path.join( dirname, flow_file )
if os.path.exists( self.filename ):
found = True
break
if not found:
raise IOError( '%s was not found' % self.filename )
data = DatFile( self.filename, chiptype=self.chiptype )
# Calculate properties
self.flowcorr = data.measure_slope( method='maxslope' )
self.time_offset = np.min(data.measure_t0( method='maxslope' )) #TODO: This is not very robust. should just shift t0 here and record the offest instead of trying to do things later with it
self.pinned = data.measure_pinned()
# remove pins
self.flowcorr[ self.pinned ] = 1
# Save a few more variables
self.t0 = data.measure_t0( meathod='maxslope' )
self.actpix = data.measure_actpix
self.phpoint = data.measure_plateau()
return self.flowcorr
def fromfile( self, fc_type ):
'''
Loads the flow correction from file based on the chip type and scales up from miniblocks to full chips or analysis blocks.
This method only differentiates based on thumbnail or full chip/analysis block. All other differences are rolled into ChipType.
fc_type: can be 'ecc' or 'wt'.
flowcorr file is defined by self.chiptype.flowcorr_<fc_type>
'''
# Thumbnails are enough different to have their own function
if self.chiptype.tn == 'self':
return self.tn_fromfile( fc_type )
# Spatial thumbnails are just subsampled data. We don't need special loading
# Calculate the size of the flowcorr files
xMiniBlocks = self.chiptype.chipC / self.chiptype.miniC
yMiniBlocks = self.chiptype.chipR / self.chiptype.miniR
# Set the flowcorr path starting local before using the default
for path in self.searchpath:
filename = os.path.join( path, '%s.dat' % getattr( self.chiptype, 'flowcorr_%s' % fc_type ) )
try:
flowcorr = datprops.read_dat( filename , metric='flowcorr' )
break
except IOError:
continue
raise IOError( 'Could not find a flowcorr file' )
# Scale the flowcorr data to the entire well
sizes = [ ( 96, 168 ), # This is an unscaled P1-sized flowcorr file. This is the most likely size when reading fc_flowcorr.dat
( yMiniBlocks, xMiniBlocks ), # This is the historical per-chip file. This is ( 96, 168 ) for a P1/540 chip
( self.chiptype.chipR, self.chiptype.chipC ) ] # This is the pre-compiled value
try:
fc_xMiniBlocks = self.chiptype.fullchip.chipC / self.chiptype.fullchip.miniC
fc_yMiniBlocks = self.chiptype.fullchip.chipR / self.chiptype.fullchip.miniR
sizes.append( ( fc_yMiniBlocks, fc_xMiniBlocks ) )
sizes.append( ( self.chiptype.fullchip.chipR, self.chiptype.fullchip.chipC ) )
except AttributeError:
pass
for size in sizes:
try:
flowcorr = flowcorr.reshape( size )
break
except ValueError:
# Keep going until you itterate through all possible sizes. If you still get an error, then die
if size == sizes[-1]:
print 'Possible Sizes'
print sizes
print 'Elements'
print flowcorr.shape
raise ValueError( 'Could not determine flowcorr size' )
continue
# Resize the image to the current size
if self.chiptype.burger is None:
# This is a standard resize operation
flowcorr = imtools.imresize( flowcorr, ( self.chiptype.chipR, self.chiptype.chipC ) )
elif self.chiptype.spatn != 'self':
# This is burger mode on a full size chip
flowcorr = imtools.imresize( flowcorr, ( self.chiptype.burger.chipR, self.chiptype.burger.chipC ) )
# Clip off the top and bottom
first = ( flowcorr.shape[0] - self.chiptype.chipR ) / 2
last = first + self.chiptype.chipR
flowcorr = flowcorr[ first:last, : ]
else:
# This is burger mode on a spatial thumbnail
# This has the effect of adding more rows beyond the 800 typically used for a spatial thumbnail
rows = self.chiptype.chipR * self.chiptype.burger.chipR / self.chiptype.fullchip.chipR
flowcorr = imtools.imresize( flowcorr, ( rows, self.chiptype.chipC ) )
# Clip off the top and bottom
first = ( flowcorr.shape[0] - self.chiptype.chipR ) / 2
last = first + self.chiptype.chipR
flowcorr = flowcorr[ first:last, : ]
# Reduce to a single analysis block
if ( self.xblock is not None and self.yblock is not None and
self.xblock != -1 and self.yblock != -1 ):
flowcorr = flowcorr[ self.yblock: self.chiptype.blockR + self.yblock,
self.xblock: self.chiptype.blockC + self.xblock ]
self.flowcorr = flowcorr
return flowcorr
def tn_fromfile( self, fc_type ):
'''
Gets the per-well flowcorrection for a STANDARD (not spatial) thumbnail
'''
# Calculate the size of the flowcorr files
xMiniBlocks = self.chiptype.chipC / self.chiptype.miniC
yMiniBlocks = self.chiptype.chipR / self.chiptype.miniR
# Set the flowcorr path starting local before using the default
for path in self.searchpath:
filename = os.path.join( path, '%s.dat' % getattr( self.chiptype, 'flowcorr_%s' % fc_type ) )
try:
flowcorr = datprops.read_dat( filename , metric='flowcorr' )
break
except IOError:
continue
raise IOError( 'Could not find a flowcorr file' )
# Scale the flowcorr data to the entire well
sizes = ( ( 96, 168 ), # This is an unscaled P1-sized flowcorr file.
( 48, 96 ) , # This is an unscaled P0-sized flowcorr file.
( yMiniBlocks, xMiniBlocks ), # This is the historical thumbnail flowcorr (swapped x & y - STP 7/13/2015)
( self.chiptype.fullchip.chipR, self.chiptype.fullchip.chipC ) ) # This is the pre-compiled value
for size in sizes:
try:
flowcorr = flowcorr.reshape( size )
break
except ValueError:
# Keep going until you itterate through all possible sizes. If you still get an error, then die
if size == sizes[-1]:
raise ValueError( 'Could not determine flowcorr size' )
continue
# Resize the image to the full chip size
if self.chiptype.burger is None:
# This is a standard resize operation based on the full chip
flowcorr = imtools.imresize( flowcorr, ( self.chiptype.fullchip.chipR, self.chiptype.fullchip.chipC ) )
else:
# This is burger mode on a regular thumbnail. Full chip is actually specified by burger and then we have to clip
flowcorr = imtools.imresize( flowcorr, ( self.chiptype.burger.chipR, self.chiptype.burger.chipC ) )
# Clip off the top and bottom
first = ( flowcorr.shape[0] - self.chiptype.fullchip.chipR ) / 2
last = first + self.chiptype.fullchip.chipR
flowcorr = flowcorr[ first:last, : ]
# Reduce to thumbnail data
tnflowcorr = np.zeros( ( self.chiptype.chipR, self.chiptype.chipC ) )
for r in range( self.chiptype.yBlocks ):
tn_rstart = r*self.chiptype.blockR
tn_rend = tn_rstart + self.chiptype.blockR
#fc_rstart = int( (r+0.5)*self.chiptype.fullchip.blockR ) - self.chiptype.blockR/2
# middle of block in case the thumbnail different yBlocks center within the block
fc_rstart = int( (r+0.5)*(self.chiptype.fullchip.chipR/self.chiptype.yBlocks) ) - self.chiptype.blockR/2
fc_rend = fc_rstart + self.chiptype.blockR
for c in range( self.chiptype.xBlocks ):
tn_cstart = c*self.chiptype.blockC
tn_cend = tn_cstart + self.chiptype.blockC
fc_cstart = int( (c+0.5)*self.chiptype.fullchip.blockC ) - self.chiptype.blockC/2
fc_cend = fc_cstart + self.chiptype.blockC
tnflowcorr[ tn_rstart:tn_rend, tn_cstart:tn_cend ] = flowcorr[ fc_rstart:fc_rend, fc_cstart:fc_cend ]
self.flowcorr = tnflowcorr
return self.flowcorr
| 49.933628 | 203 | 0.589012 | 11,102 | 0.983784 | 0 | 0 | 0 | 0 | 0 | 0 | 3,968 | 0.351617 |
6862d3264687638cc92a1818114ac16c66aeabc5 | 688 | py | Python | classifier/cal.py | EnviewFulda/SeagrassExplorer | 0f6a0e15d1ffb8220e951c52ec1634e7a0763f4e | [
"BSD-2-Clause"
] | null | null | null | classifier/cal.py | EnviewFulda/SeagrassExplorer | 0f6a0e15d1ffb8220e951c52ec1634e7a0763f4e | [
"BSD-2-Clause"
] | null | null | null | classifier/cal.py | EnviewFulda/SeagrassExplorer | 0f6a0e15d1ffb8220e951c52ec1634e7a0763f4e | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
import numpy as np
def ini():
'''initialization
Args:
Returns:
'''
pass
def accuracy(Yte_predict, Yte):
'''verification of the correct label with the predicted one
Args:
Yte_predict (list): predicted labels (by computer)
Yte (list): true labels (by human)
Returns:
accuracy (list): congruity
'''
return np.mean(Yte_predict == Yte)
def ratio(Yte_predict):
'''Percentage portion of array
Args:
Yte_predict (list): predicted labels (by computer)
Returns:
accuracy (list): relative part of the labels "1" in the array
'''
return np.mean(Yte_predict == 1)
| 16.780488 | 69 | 0.616279 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 495 | 0.719477 |
6864b398d75f0afbf2d3bc574f814cc0b57e494e | 2,354 | py | Python | recohut/models/dnn.py | sparsh-ai/recohut | 4121f665761ffe38c9b6337eaa9293b26bee2376 | [
"Apache-2.0"
] | null | null | null | recohut/models/dnn.py | sparsh-ai/recohut | 4121f665761ffe38c9b6337eaa9293b26bee2376 | [
"Apache-2.0"
] | 1 | 2022-01-12T05:40:57.000Z | 2022-01-12T05:40:57.000Z | recohut/models/dnn.py | RecoHut-Projects/recohut | 4121f665761ffe38c9b6337eaa9293b26bee2376 | [
"Apache-2.0"
] | null | null | null | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/models/models.dnn.ipynb (unless otherwise specified).
__all__ = ['Multi_Layer_Perceptron', 'CollabFNet']
# Cell
import torch
import torch.nn as nn
import torch.nn.functional as F
# Cell
class Multi_Layer_Perceptron(nn.Module):
def __init__(self, args, num_users, num_items):
super(Multi_Layer_Perceptron, self).__init__()
self.num_users = num_users
self.num_items = num_items
self.factor_num = args.factor_num
self.layers = args.layers
self.embedding_user = nn.Embedding(num_embeddings=self.num_users, embedding_dim=self.factor_num)
self.embedding_item = nn.Embedding(num_embeddings=self.num_items, embedding_dim=self.factor_num)
self.fc_layers = nn.ModuleList()
for idx, (in_size, out_size) in enumerate(zip(self.layers[:-1], self.layers[1:])):
self.fc_layers.append(nn.Linear(in_size, out_size))
self.affine_output = nn.Linear(in_features=self.layers[-1], out_features=1)
self.logistic = nn.Sigmoid()
def forward(self, user_indices, item_indices):
user_embedding = self.embedding_user(user_indices)
item_embedding = self.embedding_item(item_indices)
vector = torch.cat([user_embedding, item_embedding], dim=-1) # the concat latent vector
for idx, _ in enumerate(range(len(self.fc_layers))):
vector = self.fc_layers[idx](vector)
vector = nn.ReLU()(vector)
# vector = nn.BatchNorm1d()(vector)
# vector = nn.Dropout(p=0.5)(vector)
logits = self.affine_output(vector)
rating = self.logistic(logits)
return rating
def init_weight(self):
pass
# Cell
class CollabFNet(nn.Module):
def __init__(self, num_users, num_items, emb_size=100, n_hidden=10):
super(CollabFNet, self).__init__()
self.user_emb = nn.Embedding(num_users, emb_size)
self.item_emb = nn.Embedding(num_items, emb_size)
self.lin1 = nn.Linear(emb_size*2, n_hidden)
self.lin2 = nn.Linear(n_hidden, 1)
self.drop1 = nn.Dropout(0.1)
def forward(self, u, v):
U = self.user_emb(u)
V = self.item_emb(v)
x = F.relu(torch.cat([U, V], dim=1))
x = self.drop1(x)
x = F.relu(self.lin1(x))
x = self.lin2(x)
return x | 37.967742 | 104 | 0.65463 | 2,108 | 0.895497 | 0 | 0 | 0 | 0 | 0 | 0 | 252 | 0.107052 |
6865502dfb12453a7c664da7a679fbf4159aa717 | 477 | py | Python | src/features/test_outlier_correction.py | KennedyMurphy/elomerchant | b6561de4e13a0bcfcda72bb99cec722a58e8f09e | [
"FTL"
] | null | null | null | src/features/test_outlier_correction.py | KennedyMurphy/elomerchant | b6561de4e13a0bcfcda72bb99cec722a58e8f09e | [
"FTL"
] | 12 | 2019-12-26T17:02:54.000Z | 2022-03-21T22:16:55.000Z | src/features/test_outlier_correction.py | KennedyMurphy/elomerchant | b6561de4e13a0bcfcda72bb99cec722a58e8f09e | [
"FTL"
] | null | null | null | import unittest
import pandas as pd
import numpy as np
import src.features.outlier_correction as oc
class TestFlagNormalOutliers(unittest.TestCase):
def setUp(self):
self.series = pd.Series(np.random.normal(0, 0.1, 1000))
self.series.loc[500] = -5
self.series.loc[42] = 5
def test_flag(self):
outliers = oc.flag_normal_outliers(self.series, 5)
self.assertTrue(outliers.loc[500])
self.assertTrue(outliers.loc[42]) | 26.5 | 63 | 0.677149 | 376 | 0.78826 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
68691956fa1e08fddebbb9eacec7b248df8c8d1e | 3,613 | py | Python | Practica2/ejercicio1.1.py | martapastor/GIW-Practicas | 550e98b376fdea51c8594da6d1b616286c74bed5 | [
"MIT"
] | null | null | null | Practica2/ejercicio1.1.py | martapastor/GIW-Practicas | 550e98b376fdea51c8594da6d1b616286c74bed5 | [
"MIT"
] | null | null | null | Practica2/ejercicio1.1.py | martapastor/GIW-Practicas | 550e98b376fdea51c8594da6d1b616286c74bed5 | [
"MIT"
] | null | null | null | import csv
import operator
if __name__ == "__main__":
# We use try-except statements to properly handle errors, as in case the
# input file does not exist in the directory.
try:
# We open the .csv file loading the filds separated by a ; delimiter:
csv_archivo_locales = open("Locales.csv", encoding="utf8", errors='ignore')
locales = csv.reader(csv_archivo_locales, delimiter=";")
csv_archivo_terrazas = open("Terrazas.csv", encoding="utf8", errors='ignore')
terrazas = csv.reader(csv_archivo_terrazas, delimiter=";")
# We skip the first line before saving the results into a list for later
# processing by using the next() statement. The reason why we do not use
# the line_num function is because we would need to include it in a loop
# and read file line by line whereas this way, with just two instructions
# we get the same result:
next(locales, None)
lista_locales = list(locales)
next(terrazas, None)
lista_terrazas = list(terrazas)
# When we read the fields from the CSV, they are stored as strings, so we
# need to explicitely convert the ID to int to be able to sort them:
for i in lista_locales:
i[0] = int(i[0])
for j in lista_terrazas:
j[0] = int(j[0])
# We sort the lists taking into account the ID column which is in the
# first position to get it with the itemergetter function:
sorted_lista_locales = sorted(lista_locales, key=operator.itemgetter(0), reverse = False)
sorted_lista_terrazas = sorted(lista_terrazas, key=operator.itemgetter(0), reverse = False)
# For each entry in lista_terrazas, we check where is its corresponding
# entry in lista_locales. As they are sorted ascendently, all the entries
# before the wanted one are included in the result list as they do not
# appear in lista_terrazas. Moreover, we keep the index counter to just
# traverse the lista_locales once.
index = 0
no_terrazas = []
for terraza in sorted_lista_terrazas:
while (terraza[0] > sorted_lista_locales[index][0]):
no_terrazas.append(sorted_lista_locales[index])
index += 1
# It is important to perform this step once we have reached the local
# entry from the lista_locales, as if not, the next entry from
# lista_terrazas will be greater than the found one in lista_locales
# and will wrongly include it in the result list:
if (terraza[0] == sorted_lista_locales[index][0]):
index += 1
# We open the output file to store the data retrieved:
csvFileObj = open("NoTerrazas.csv", "w")
csvWriter = csv.writer(csvFileObj, delimiter=";")
# For each row in the final list, we write it to the CSV output file:
for row in no_terrazas:
csvWriter.writerow(row)
print ("The file has been successfully created.")
# We close the opened CSV file:
csvFileObj.close()
# NOTE: we have realized that a few error occurs when comparing the total
# amount of entries in NoTerrazas.csv with the entries in Locales.csv and
# Terrazas.csv, as it is expected to have NoTerrazas = Locales - Terrazas
# However, as the exercice statement indicates that some inconsistency
# may exist in the files, we assume the error is due to this unsteadiness.
except IOError:
print("The file does not exist.")
| 46.320513 | 99 | 0.65375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,012 | 0.556878 |
6869245bc8389ed7735b3d7033bfcf7e4bd56869 | 3,076 | py | Python | sensor_test.py | YoungYoung619/driving-desicion-in-carla | 7641c4b970d780b7c9ea242c72f186d8dbe5d296 | [
"MIT"
] | 1 | 2022-03-31T08:30:55.000Z | 2022-03-31T08:30:55.000Z | sensor_test.py | YoungYoung619/driving-desicion-in-carla | 7641c4b970d780b7c9ea242c72f186d8dbe5d296 | [
"MIT"
] | 1 | 2019-08-02T12:24:02.000Z | 2019-08-02T12:24:02.000Z | sensor_test.py | YoungYoung619/driving-desicion-in-carla | 7641c4b970d780b7c9ea242c72f186d8dbe5d296 | [
"MIT"
] | 3 | 2020-05-17T11:21:55.000Z | 2021-11-15T06:32:01.000Z | """
Copyright (c) College of Mechatronics and Control Engineering, Shenzhen University.
All rights reserved.
Description :
Author:Team Li
"""
import glob, os, sys, time
from threading import Thread
import cv2
from carla_utils.world_ops import *
from carla_utils.sensor_ops import *
try:
sys.path.append(glob.glob('**/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
import carla
except:
raise ImportError('Please check your carla file')
def check_whether_respawn_actors(world, vehicles):
"""check whether to respawn the static acotors in a frequency"""
while True:
if carla_actors_static(vehicles, bigger_than=0.8):
respawn_static_actors(world, vehicles)
time.sleep(20)
if __name__ == '__main__':
client = carla.Client('127.0.0.1', 2000)
client.set_timeout(10.0) # seconds
logger.info('connect success...')
world = client.get_world()
## spawn vehicles in carla world
spawn_vehicles(world, number=5)
actor_list = world.get_actors()
vehicles = list(actor_list.filter('vehicle*'))
bgr_camera_config = {'data_type': 'sensor.camera.rgb', 'image_size_x': 418,
'image_size_y': 278, 'fov': 110, 'sensor_tick': 0.05,
'transform': carla.Transform(carla.Location(x=0.8, z=1.7)),
'attach_to':vehicles[0]}
bgr_sensor = bgr_camera(world, bgr_camera_config)
depth_camera_config = {'data_type': 'sensor.camera.depth', 'image_size_x': 418,
'image_size_y': 278, 'fov': 110, 'sensor_tick': 0.02,
'transform': carla.Transform(carla.Location(x=0.8, z=1.7)),
'attach_to':vehicles[0]}
depth_sensor = depth_camera(world, depth_camera_config)
semantic_camera_config = {'data_type': 'sensor.camera.semantic_segmentation', 'image_size_x': 418,
'image_size_y': 278, 'fov': 110, 'sensor_tick': 0.02,
'transform': carla.Transform(carla.Location(x=0.8, z=1.7)),
'attach_to':vehicles[0]}
semantic_sensor = semantic_camera(world, semantic_camera_config)
## start check
t = Thread(target=check_whether_respawn_actors, args=(world, vehicles))
t.daemon = True
t.start()
logger.info('Press key a to stop...')
while True:
bgr = bgr_sensor.get()
depth = depth_sensor.get()
semantic = semantic_sensor.get()
cv2.imshow('Color', bgr)
cv2.imshow('Depth', depth)
cv2.imshow('Semantic', semantic)
a = cv2.waitKey(10)
if a == 97:
cv2.destroyAllWindows()
break
vehicles = list(actor_list.filter('vehicle*'))
for vehicle in vehicles:
vehicle.destroy()
logger.info('Destroy all vehicles...')
sensors = list(actor_list.filter('sensor*'))
for sensor in sensors:
sensor.destroy()
logger.info('Destroy all sensors...')
| 32.723404 | 102 | 0.619636 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 830 | 0.269656 |
68696cfa2203f9ed2457ed75b22b08dfe6fa5fca | 2,792 | py | Python | config.py | aEnigmatic/nwwatch | d9d81501d0d5eb717fcf4d2bb3de2b16180b23d2 | [
"MIT"
] | null | null | null | config.py | aEnigmatic/nwwatch | d9d81501d0d5eb717fcf4d2bb3de2b16180b23d2 | [
"MIT"
] | null | null | null | config.py | aEnigmatic/nwwatch | d9d81501d0d5eb717fcf4d2bb3de2b16180b23d2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
if not os.path.isfile("/proc/sys/fs/binfmt_misc/WSLInterop"):
# windows
LOGFILE = os.environ['LOCALAPPDATA'] + "\\AGS\\New World\\Game.log"
else:
# wsl
LOGFILE = os.popen('cmd.exe /c "echo %LocalAppData%"').read().strip() + "\\AGS\\New World\\Game.log"
LOGFILE = os.popen("wslpath '{}'".format(LOGFILE)).read().strip()
# INSTRUCTIONS
# 1. Uncomment the names of the plugins you wish to use
# 2. Configure the relevant variables below
# Need help? Check out the wiki for a sample config file - https://github.com/Rawr0/nwwatch/wiki
###############################
## PLUGIN SETTINGS ##
###############################
# It is recommended you leave these two as default, no testing has been performed with different values
NW_FILE_CHECK_FREQUENCY = 60 # How frequently the script checks your queue position. Default: 60
NW_ALERT_AT_QUEUE_POSITION = 25 # Send a notification when you are at this position in queue (or less). It is recommended this be 25 or greater.
PLUGINS_ENABLED = { # Remove the hash (#) before a line to enable it
#"NotifyByPushover",
#"NotifyBySMS",
#"NotifyByDiscord"
}
# Want to test your notifications? Enable it above and then set "TEST_MODE" to True. A notification will be triggered as soon as the script starts
TEST_MODE = True
###############################
## PLUGIN SPECIFIC VARIABLES ##
###############################
# Plugin: NotifyByPushover
PUSHOVER_TOKEN = "<VALUEHERE>"
PUSHOVER_USER = "<VALUEHERE>"
PUSHOVER_DEVICE = "<VALUEHERE>"
PUSHOVER_HIGHPRIORITY = True
# Plugin: NotifyBySMS (sinch.com) (Note: Paid service)
# Note: The SMS provider is currently going through a rebranding and, as a result, the APIs below could stop working.
# If any issues are encountered, please raise an issue on Github
SMS_PLAN_ID = ""
SMS_TOKEN = ""
SMS_SOURCE = "New World" # Source phone number, including country code (eg. +15551231234) or alphanumeric string
SMS_TARGET = "" # Destination phone number, including country code (eg. +15555551234 or +61411000000)
# Plugin: NotifyByDiscord
# In a Discord server you own/manage, navigate to Server Settings -> Integrations -> Webooks -> New Webhook. Click "Copy Webhook URL" and paste it below
DISCORD_WEBHOOKURL = "https://discord.com/api/webhooks/xxxxxxxxxxxxxx/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
DISCORD_TTS = False # Use Discord "Text to speech" to speak the announcement
###############################
## INTERNAL VARIABLES ##
###############################
# Don't change these unless you're having issues
NW_LOGFILE_CHECK_LENGTH = 100 # Number of lines to monitor in the logfile
NW_SEARCH_REGEX = ".*Waiting in login queue.*Position \((.*)\)$"
NW_SEARCH_REGEX_INDEX = 1
| 42.30303 | 152 | 0.678009 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,153 | 0.771132 |
686a214c567ebd3ab53753f4ce246c0fac04cd72 | 109 | py | Python | Python/Arrays/Solution.py | chessmastersan/HackerRank | 850319e6f79e7473afbb847d28edde7b2cdfc37d | [
"MIT"
] | 2 | 2019-08-07T19:58:20.000Z | 2019-08-27T00:06:09.000Z | Python/Arrays/Solution.py | chessmastersan/HackerRank | 850319e6f79e7473afbb847d28edde7b2cdfc37d | [
"MIT"
] | 1 | 2020-06-11T19:09:48.000Z | 2020-06-11T19:09:48.000Z | Python/Arrays/Solution.py | chessmastersan/HackerRank | 850319e6f79e7473afbb847d28edde7b2cdfc37d | [
"MIT"
] | 7 | 2019-08-27T00:06:11.000Z | 2021-12-11T10:01:45.000Z | #author SANKALP SAXENA
def arrays(arr):
arr.reverse()
l = numpy.array(arr, float)
return l
| 10.9 | 31 | 0.614679 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 24 | 0.220183 |
686c412499560689228388863939962fab82d49c | 2,508 | py | Python | test/connect/test_KafkaConnection.py | pip-services3-python/pip-services3-kafka-python | 8c6df619af56c033de68b518004c93e45a335517 | [
"MIT"
] | null | null | null | test/connect/test_KafkaConnection.py | pip-services3-python/pip-services3-kafka-python | 8c6df619af56c033de68b518004c93e45a335517 | [
"MIT"
] | null | null | null | test/connect/test_KafkaConnection.py | pip-services3-python/pip-services3-kafka-python | 8c6df619af56c033de68b518004c93e45a335517 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import time
import pytest
from pip_services3_commons.config import ConfigParams
from pip_services3_kafka.connect.KafkaConnection import KafkaConnection
broker_host = os.environ.get('KAFKA_SERVICE_HOST') or 'localhost'
broker_port = os.environ.get('KAFKA_SERVICE_PORT') or 9092
broker_topic = os.environ.get('KAFKA_TOPIC') or 'test'
broker_user = os.environ.get('KAFKA_USER') # or 'kafka'
broker_pass = os.environ.get('KAFKA_PASS') # or 'pass123'
@pytest.mark.skipif(not broker_host and not broker_port, reason="Kafka server is not configured")
class TestKafkaConnection:
connection: KafkaConnection
def setup_method(self):
config = ConfigParams.from_tuples(
'topic', broker_topic,
'connection.protocol', 'tcp',
'connection.host', broker_host,
'connection.port', broker_port,
'credential.username', broker_user,
'credential.password', broker_pass,
'credential.mechanism', 'plain'
)
self.connection = KafkaConnection()
self.connection.configure(config)
def test_open_close(self):
self.connection.open(None)
assert self.connection.is_open() is True
assert self.connection.get_connection() is not None
self.connection.close(None)
assert self.connection.is_open() is False
assert self.connection.get_connection() is None
def test_list_topics(self):
self.connection.open(None)
assert self.connection.is_open() is True
assert self.connection.get_connection() is not None
topics = self.connection.read_queue_names()
assert isinstance(topics, list)
self.connection.close(None)
assert self.connection.is_open() is False
assert self.connection.get_connection() is None
def test_create_delete_topics(self):
topics = ['new_topic1', 'new_topic2']
self.connection.open(None)
self.connection.create_queue(topics[0])
self.connection.create_queue(topics[1])
time.sleep(0.5)
kafka_topics = self.connection.read_queue_names()
assert topics[0] in kafka_topics
assert topics[1] in kafka_topics
self.connection.delete_queue(topics[0])
self.connection.delete_queue(topics[1])
time.sleep(0.5)
kafka_topics = self.connection.read_queue_names()
assert topics[0] not in kafka_topics
assert topics[1] not in kafka_topics
| 32.153846 | 97 | 0.681818 | 1,921 | 0.765949 | 0 | 0 | 2,019 | 0.805024 | 0 | 0 | 337 | 0.13437 |
686d545f481b3b15e8e0f634c3a1126c10626456 | 5,964 | py | Python | src/command_modules/azure-cli-monitor/azure/cli/command_modules/monitor/operations/actions.py | mickeymitic/azure-cli | 92af6b3cea52f99eee84df93f5d3e2003a273d04 | [
"MIT"
] | 1 | 2018-01-30T05:55:29.000Z | 2018-01-30T05:55:29.000Z | src/command_modules/azure-cli-monitor/azure/cli/command_modules/monitor/operations/actions.py | mickeymitic/azure-cli | 92af6b3cea52f99eee84df93f5d3e2003a273d04 | [
"MIT"
] | null | null | null | src/command_modules/azure-cli-monitor/azure/cli/command_modules/monitor/operations/actions.py | mickeymitic/azure-cli | 92af6b3cea52f99eee84df93f5d3e2003a273d04 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import argparse
from azure.cli.command_modules.monitor.util import get_aggregation_map, get_operator_map
def period_type(value):
import re
def _get_substring(indices):
if indices == tuple([-1, -1]):
return ''
return value[indices[0]: indices[1]]
regex = r'(p)?(\d+y)?(\d+m)?(\d+d)?(t)?(\d+h)?(\d+m)?(\d+s)?'
match = re.match(regex, value.lower())
match_len = match.regs[0]
if match_len != tuple([0, len(value)]):
raise ValueError
# simply return value if a valid ISO8601 string is supplied
if match.regs[1] != tuple([-1, -1]) and match.regs[5] != tuple([-1, -1]):
return value
# if shorthand is used, only support days, minutes, hours, seconds
# ensure M is interpretted as minutes
days = _get_substring(match.regs[4])
minutes = _get_substring(match.regs[6]) or _get_substring(match.regs[3])
hours = _get_substring(match.regs[7])
seconds = _get_substring(match.regs[8])
return 'P{}T{}{}{}'.format(days, minutes, hours, seconds).upper()
# pylint: disable=too-few-public-methods
class ConditionAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
from azure.mgmt.monitor.models import ThresholdRuleCondition, RuleMetricDataSource
# get default description if not specified
if namespace.description is None:
namespace.description = ' '.join(values)
if len(values) == 1:
# workaround because CMD.exe eats > character... Allows condition to be
# specified as a quoted expression
values = values[0].split(' ')
if len(values) < 5:
from knack.util import CLIError
raise CLIError('usage error: --condition METRIC {>,>=,<,<=} THRESHOLD {avg,min,max,total,last} DURATION')
metric_name = ' '.join(values[:-4])
operator = get_operator_map()[values[-4]]
threshold = int(values[-3])
aggregation = get_aggregation_map()[values[-2].lower()]
window = period_type(values[-1])
metric = RuleMetricDataSource(None, metric_name) # target URI will be filled in later
condition = ThresholdRuleCondition(operator, threshold, metric, window, aggregation)
namespace.condition = condition
# pylint: disable=protected-access
class AlertAddAction(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AlertAddAction, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string): # pylint: disable=no-self-use
from knack.util import CLIError
_type = values[0].lower()
if _type == 'email':
from azure.mgmt.monitor.models import RuleEmailAction
return RuleEmailAction(custom_emails=values[1:])
elif _type == 'webhook':
from azure.mgmt.monitor.models import RuleWebhookAction
uri = values[1]
try:
properties = dict(x.split('=', 1) for x in values[2:])
except ValueError:
raise CLIError('usage error: {} webhook URI [KEY=VALUE ...]'.format(option_string))
return RuleWebhookAction(uri, properties)
raise CLIError('usage error: {} TYPE KEY [ARGS]'.format(option_string))
class AlertRemoveAction(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AlertRemoveAction, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string): # pylint: disable=no-self-use
# TYPE is artificially enforced to create consistency with the --add-action argument
# but it could be enhanced to do additional validation in the future.
from knack.util import CLIError
_type = values[0].lower()
if _type not in ['email', 'webhook']:
raise CLIError('usage error: {} TYPE KEY [KEY ...]'.format(option_string))
return values[1:]
class MultiObjectsDeserializeAction(argparse._AppendAction): # pylint: disable=protected-access
def __call__(self, parser, namespace, values, option_string=None):
type_name = values[0]
type_properties = values[1:]
try:
super(MultiObjectsDeserializeAction, self).__call__(parser,
namespace,
self.get_deserializer(type_name)(*type_properties),
option_string)
except KeyError:
raise ValueError('usage error: the type "{}" is not recognizable.'.format(type_name))
except TypeError:
raise ValueError(
'usage error: Failed to parse "{}" as object of type "{}".'.format(' '.join(values), type_name))
except ValueError as ex:
raise ValueError(
'usage error: Failed to parse "{}" as object of type "{}". {}'.format(
' '.join(values), type_name, str(ex)))
def get_deserializer(self, type_name):
raise NotImplementedError()
class ActionGroupReceiverParameterAction(MultiObjectsDeserializeAction):
def get_deserializer(self, type_name):
from azure.mgmt.monitor.models import EmailReceiver, SmsReceiver, WebhookReceiver
return {'email': EmailReceiver, 'sms': SmsReceiver, 'webhook': WebhookReceiver}[type_name]
| 46.232558 | 117 | 0.618545 | 4,480 | 0.751174 | 0 | 0 | 0 | 0 | 0 | 0 | 1,516 | 0.254192 |
686f8c834ca06e0613cf090830a8208f91cd4c47 | 2,465 | py | Python | tests/test_gremlin_pl.py | joshim5/mogwai | 917fe5b2dea9c3adc3a3d1dfe41ae33c3ae86f55 | [
"BSD-3-Clause"
] | 24 | 2020-11-20T19:10:23.000Z | 2022-03-13T13:26:56.000Z | tests/test_gremlin_pl.py | joshim5/mogwai | 917fe5b2dea9c3adc3a3d1dfe41ae33c3ae86f55 | [
"BSD-3-Clause"
] | 10 | 2020-10-21T21:42:14.000Z | 2020-11-18T07:57:30.000Z | tests/test_gremlin_pl.py | joshim5/mogwai | 917fe5b2dea9c3adc3a3d1dfe41ae33c3ae86f55 | [
"BSD-3-Clause"
] | 7 | 2020-12-27T00:44:18.000Z | 2021-11-07T05:16:49.000Z | import itertools
import numpy as np
import torch
import unittest
from mogwai.data_loading import one_hot
from mogwai.models import GremlinPseudolikelihood
class TestGremlinPL(unittest.TestCase):
def setUp(self):
torch.manual_seed(0)
N = 100
L = 20
A = 8
msa = torch.randint(0, A, [N, L])
msa = torch.FloatTensor(one_hot(msa.numpy()))
msa_counts = msa.sum(0)
self.msa = msa
self.model = GremlinPseudolikelihood(N, L, msa_counts, vocab_size=A)
# Need nonzero weights but don't want to take a grad for this test
wt = self.model.weight.data
self.model.weight.data = torch.randn_like(wt)
# Used for data leakage test.
self.A = A
def test_parameter_shapes(self):
self.assertTupleEqual(self.model.weight.shape, (20, 8, 20, 8))
self.assertTupleEqual(self.model.bias.shape, (20, 8))
def test_forward_shape(self):
batch = self.msa[:64]
loss, logits = self.model(batch)
self.assertTupleEqual(logits.shape, (64, 20, 8))
def onehot_vector(self, idx: int):
oh = torch.zeros(self.A)
oh[idx] = 1.0
return oh
@torch.no_grad()
def test_data_leakage(self):
# Confirm that logits for position 0 do not change
# when sequence at position 0 is exhaustively changed.
logits_list = []
example = self.msa[0]
seq_pos = 0
for i in range(self.A):
example[seq_pos] = self.onehot_vector(i)
_, logits = self.model(example.unsqueeze(0))
logits_list.append(logits[0, seq_pos])
all_pairs = itertools.combinations(logits_list, 2)
for x, y in all_pairs:
np.testing.assert_array_almost_equal(x.numpy(), y.numpy())
class TestGremlinPLGrad(unittest.TestCase):
def setUp(self):
torch.manual_seed(0)
N = 100
L = 20
A = 8
msa = torch.randint(0, A, [N, L])
msa = torch.FloatTensor(one_hot(msa.numpy()))
msa_counts = msa.sum(0)
self.msa = msa
self.model = GremlinPseudolikelihood(N, L, msa_counts, vocab_size=A)
def test_gradient(self):
# Tests that backward runs.
batch = self.msa[:64]
loss, _ = self.model(batch)
loss.backward()
# TODO: Presumably there's a less stupid approach
self.assertTrue(True)
if __name__ == "__main__":
unittest.main()
| 28.333333 | 76 | 0.610142 | 2,254 | 0.914402 | 0 | 0 | 601 | 0.243813 | 0 | 0 | 285 | 0.115619 |
68717b37b4acbe351d989e9f887e2dec9723c9d1 | 10,233 | py | Python | autograd_cupy/cupy_jvps.py | ericmjl/autograd-cupy | 493a90cabae42f9e0fdbea77cef758aff659604f | [
"MIT"
] | 3 | 2018-08-03T00:11:17.000Z | 2018-12-27T17:47:54.000Z | autograd_cupy/cupy_jvps.py | ericmjl/autograd-cupy | 493a90cabae42f9e0fdbea77cef758aff659604f | [
"MIT"
] | null | null | null | autograd_cupy/cupy_jvps.py | ericmjl/autograd-cupy | 493a90cabae42f9e0fdbea77cef758aff659604f | [
"MIT"
] | null | null | null | from . import cupy_wrapper as acp
from .cupy_vjps import (
untake,
balanced_eq,
match_complex,
replace_zero,
dot_adjoint_0,
dot_adjoint_1,
tensordot_adjoint_0,
tensordot_adjoint_1,
nograd_functions,
)
from autograd.extend import (
defjvp,
defjvp_argnum,
def_linear,
vspace,
JVPNode,
register_notrace,
)
from autograd.util import func
from .cupy_boxes import ArrayBox
for fun in nograd_functions:
register_notrace(JVPNode, fun)
defjvp(func(ArrayBox.__getitem__), "same")
defjvp(untake, "same")
defjvp_argnum(
acp.array_from_args,
lambda argnum, g, ans, args, kwargs: untake(g, argnum - 2, vspace(ans)),
)
defjvp(
acp._array_from_scalar_or_array,
None,
None,
lambda g, ans, args, kwargs, _: acp._array_from_scalar_or_array(
args, kwargs, g
),
)
# ----- Functions that are constant w.r.t. continuous inputs -----
# defjvp(acp.nan_to_num, lambda g, ans, x: acp.where(acp.isfinite(x), g, 0.))
# ----- Binary ufuncs (linear) -----
def_linear(acp.multiply)
# ----- Binary ufuncs -----
defjvp(
acp.add,
lambda g, ans, x, y: broadcast(g, ans),
lambda g, ans, x, y: broadcast(g, ans),
)
defjvp(
acp.subtract,
lambda g, ans, x, y: broadcast(g, ans),
lambda g, ans, x, y: broadcast(-g, ans),
)
defjvp(acp.divide, "same", lambda g, ans, x, y: -g * x / y ** 2)
defjvp(
acp.maximum,
lambda g, ans, x, y: g * balanced_eq(x, ans, y),
lambda g, ans, x, y: g * balanced_eq(y, ans, x),
)
defjvp(
acp.minimum,
lambda g, ans, x, y: g * balanced_eq(x, ans, y),
lambda g, ans, x, y: g * balanced_eq(y, ans, x),
)
defjvp(
acp.fmax,
lambda g, ans, x, y: g * balanced_eq(x, ans, y),
lambda g, ans, x, y: g * balanced_eq(y, ans, x),
)
defjvp(
acp.fmin,
lambda g, ans, x, y: g * balanced_eq(x, ans, y),
lambda g, ans, x, y: g * balanced_eq(y, ans, x),
)
defjvp(
acp.logaddexp,
lambda g, ans, x, y: g * acp.exp(x - ans),
lambda g, ans, x, y: g * acp.exp(y - ans),
)
defjvp(
acp.logaddexp2,
lambda g, ans, x, y: g * 2 ** (x - ans),
lambda g, ans, x, y: g * 2 ** (y - ans),
)
defjvp(acp.true_divide, "same", lambda g, ans, x, y: -g * x / y ** 2)
defjvp(
acp.mod,
lambda g, ans, x, y: broadcast(g, ans),
lambda g, ans, x, y: -g * acp.floor(x / y),
)
defjvp(
acp.remainder,
lambda g, ans, x, y: broadcast(g, ans),
lambda g, ans, x, y: -g * acp.floor(x / y),
)
defjvp(
acp.power,
lambda g, ans, x, y: g * y * x ** acp.where(y, y - 1, 1.0),
lambda g, ans, x, y: g * acp.log(replace_zero(x, 1.0)) * x ** y,
)
defjvp(
acp.arctan2,
lambda g, ans, x, y: g * y / (x ** 2 + y ** 2),
lambda g, ans, x, y: g * -x / (x ** 2 + y ** 2),
)
# ----- Simple grads (linear) -----
defjvp(acp.negative, "same")
defjvp(acp.rad2deg, "same")
defjvp(acp.degrees, "same")
defjvp(acp.deg2rad, "same")
defjvp(acp.radians, "same")
defjvp(acp.reshape, "same")
defjvp(acp.roll, "same")
defjvp(acp.array_split, "same")
defjvp(acp.split, "same")
defjvp(acp.vsplit, "same")
defjvp(acp.hsplit, "same")
defjvp(acp.dsplit, "same")
defjvp(acp.ravel, "same")
defjvp(acp.expand_dims, "same")
defjvp(acp.squeeze, "same")
defjvp(acp.diag, "same")
defjvp(acp.diagonal, "same")
defjvp(acp.make_diagonal, "same")
defjvp(acp.flipud, "same")
defjvp(acp.fliplr, "same")
defjvp(acp.rot90, "same")
defjvp(acp.trace, "same")
defjvp(acp.full, "same", argnums=(1,))
defjvp(acp.triu, "same")
defjvp(acp.tril, "same")
defjvp(acp.swapaxes, "same")
defjvp(acp.rollaxis, "same")
defjvp(acp.moveaxis, "same")
# def_linear(acp.cross)
# ----- Simple grads -----
defjvp(
acp.abs,
lambda g, ans, x: acp.real(g * replace_zero(acp.conj(x), 0.0))
/ replace_zero(ans, 1.0),
)
# defjvp(acp.fabs, lambda g, ans, x : acp.sign(x) * g) # fabs doesn't take complex
# numbers.
defjvp(acp.absolute, lambda g, ans, x: acp.real(g * acp.conj(x)) / ans)
defjvp(acp.reciprocal, lambda g, ans, x: -g / x ** 2)
defjvp(acp.exp, lambda g, ans, x: ans * g)
defjvp(acp.exp2, lambda g, ans, x: ans * acp.log(2) * g)
defjvp(acp.expm1, lambda g, ans, x: (ans + 1) * g)
defjvp(acp.log, lambda g, ans, x: g / x)
defjvp(acp.log2, lambda g, ans, x: g / x / acp.log(2))
defjvp(acp.log10, lambda g, ans, x: g / x / acp.log(10))
defjvp(acp.log1p, lambda g, ans, x: g / (x + 1))
defjvp(acp.sin, lambda g, ans, x: g * acp.cos(x))
defjvp(acp.cos, lambda g, ans, x: -g * acp.sin(x))
defjvp(acp.tan, lambda g, ans, x: g / acp.cos(x) ** 2)
defjvp(acp.arcsin, lambda g, ans, x: g / acp.sqrt(1 - x ** 2))
defjvp(acp.arccos, lambda g, ans, x: -g / acp.sqrt(1 - x ** 2))
defjvp(acp.arctan, lambda g, ans, x: g / (1 + x ** 2))
defjvp(acp.sinh, lambda g, ans, x: g * acp.cosh(x))
defjvp(acp.cosh, lambda g, ans, x: g * acp.sinh(x))
defjvp(acp.tanh, lambda g, ans, x: g / acp.cosh(x) ** 2)
defjvp(acp.arcsinh, lambda g, ans, x: g / acp.sqrt(x ** 2 + 1))
defjvp(acp.arccosh, lambda g, ans, x: g / acp.sqrt(x ** 2 - 1))
defjvp(acp.arctanh, lambda g, ans, x: g / (1 - x ** 2))
defjvp(acp.square, lambda g, ans, x: g * 2 * x)
defjvp(acp.sqrt, lambda g, ans, x: g * 0.5 * x ** -0.5)
# defjvp(acp.sinc, lambda g, ans, x : g * (acp.cos(acp.pi*x)*acp.pi*x -
# acp.sin(acp.pi*x))/(acp.pi*x**2))
defjvp(
acp.clip,
lambda g, ans, x, a_min, a_max: g
* acp.logical_and(ans != a_min, ans != a_max),
)
# defjvp(acp.real_if_close, lambda g, ans, x : match_complex(ans, g))
defjvp(acp.real, lambda g, ans, x: acp.real(g))
defjvp(acp.imag, lambda g, ans, x: match_complex(ans, -1j * g))
defjvp(acp.conj, lambda g, ans, x: acp.conj(g))
defjvp(
acp.angle,
lambda g, ans, x: match_complex(
ans, g * acp.conj(x * 1j) / acp.abs(x) ** 2
),
)
defjvp(
acp.where,
None,
lambda g, ans, c, x=None, y=None: acp.where(c, g, acp.zeros(g.shape)),
lambda g, ans, c, x=None, y=None: acp.where(c, acp.zeros(g.shape), g),
)
# ----- Trickier grads -----
defjvp(acp.kron, "same", "same")
# defjvp(acp.diff, 'same')
defjvp(acp.repeat, "same")
defjvp(acp.tile, "same")
defjvp(acp.transpose, "same")
defjvp(acp.sum, "same")
defjvp(acp.mean, "same")
defjvp(
acp.prod,
lambda g, ans, x, axis=None, keepdims=False: ans
* acp.sum(g / x, axis=axis, keepdims=keepdims),
)
defjvp(
acp.linspace,
lambda g, ans, start, stop, *args, **kwargs: acp.linspace(
g, 0, *args, **kwargs
),
lambda g, ans, start, stop, *args, **kwargs: acp.linspace(
0, g, *args, **kwargs
),
)
def forward_grad_np_var(g, ans, x, axis=None, ddof=0, keepdims=False):
if axis is None:
num_reps = acp.size(g)
elif isinstance(axis, int):
num_reps = g.shape[axis]
elif isinstance(axis, tuple):
num_reps = acp.prod(acp.array(np.shape(g))[list(axis)])
x_minus_mean = acp.conj(x - acp.mean(x, axis=axis, keepdims=True))
return (
2.0
* acp.sum(acp.real(g * x_minus_mean), axis=axis, keepdims=keepdims)
/ (num_reps - ddof)
)
defjvp(acp.var, forward_grad_np_var)
def forward_grad_np_std(g, ans, x, axis=None, ddof=0, keepdims=False):
if axis is None:
num_reps = acp.size(g)
elif isinstance(axis, int):
num_reps = g.shape[axis]
elif isinstance(axis, tuple):
num_reps = acp.prod(acp.array(g.shape)[list(axis)])
if num_reps <= 1:
return acp.zeros_like(ans)
x_minus_mean = acp.conj(x - acp.mean(x, axis=axis, keepdims=True))
return acp.sum(
acp.real(g * x_minus_mean), axis=axis, keepdims=keepdims
) / ((num_reps - ddof) * ans)
defjvp(acp.std, forward_grad_np_std)
def fwd_grad_chooser(g, ans, x, axis=None, keepdims=False):
if acp.isscalar(x):
return g
if not keepdims:
if isinstance(axis, int):
ans = acp.expand_dims(ans, axis)
elif isinstance(axis, tuple):
for ax in sorted(axis):
ans = acp.expand_dims(ans, ax)
chosen_locations = x == ans
return acp.sum(
(g * chosen_locations), axis=axis, keepdims=keepdims
) / acp.sum(chosen_locations, axis=axis, keepdims=keepdims)
defjvp(acp.max, fwd_grad_chooser)
defjvp(acp.min, fwd_grad_chooser)
defjvp(acp.amax, fwd_grad_chooser)
defjvp(acp.amin, fwd_grad_chooser)
defjvp(acp.cumsum, "same")
def_linear(acp.inner)
def_linear(acp.matmul)
def_linear(acp.dot)
def_linear(acp.tensordot)
def_linear(acp.outer)
def_linear(dot_adjoint_0)
def_linear(dot_adjoint_1)
def_linear(tensordot_adjoint_0)
def_linear(tensordot_adjoint_1)
def fwd_grad_concatenate_args(argnum, g, ans, axis_args, kwargs):
result = []
for i in range(1, len(axis_args)):
if i == argnum:
result.append(g)
else:
result.append(acp.zeros_like(axis_args[i]))
return acp.concatenate_args(axis_args[0], *result)
defjvp_argnum(acp.concatenate_args, fwd_grad_concatenate_args)
def fwd_grad_sort(g, ans, x, axis=-1, kind="quicksort", order=None):
sort_perm = acp.argsort(x, axis, kind, order)
return g[sort_perm]
defjvp(acp.sort, fwd_grad_sort)
defjvp(acp.msort, lambda g, ans, x: fwd_grad_sort(g, ans, x, axis=0))
def fwd_grad_partition(
g, ans, x, kth, axis=-1, kind="introselect", order=None
):
partition_perm = acp.argpartition(x, kth, axis, kind, order)
return g[partition_perm]
defjvp(acp.partition, fwd_grad_partition)
def atleast_jvpmaker(fun):
def jvp(g, ans, *arys):
if len(arys) > 1:
raise NotImplementedError("Can't handle multiple arguments yet.")
return fun(g)
return jvp
defjvp(acp.atleast_1d, atleast_jvpmaker(acp.atleast_1d))
defjvp(acp.atleast_2d, atleast_jvpmaker(acp.atleast_2d))
defjvp(acp.atleast_3d, atleast_jvpmaker(acp.atleast_3d))
def_linear(acp.einsum)
# TODO(mattjj): can we call np.broadcast_to or a related function instead?
def broadcast(x, target):
target_shape, target_ndim, target_dtype, target_iscomplex = acp.metadata(
target
)
while acp.ndim(x) < target_ndim:
x = acp.expand_dims(x, 0)
for axis, size in enumerate(x.shape):
if size == 1:
x = acp.repeat(x, target_shape[axis], axis=axis)
if target_iscomplex and not acp.iscomplexobj(x):
x = x + 0j # TODO(mattjj): this might promote the dtype
return x
| 28.267956 | 90 | 0.626991 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,051 | 0.102707 |
68725e7d29c539b971a9d4373987ad3774e42e53 | 1,950 | py | Python | testAppium/conf/getStartActivityConfig.py | moulage/appium-android | 082e4018673fecd260552d758f8a8ba154838b9a | [
"Apache-2.0"
] | null | null | null | testAppium/conf/getStartActivityConfig.py | moulage/appium-android | 082e4018673fecd260552d758f8a8ba154838b9a | [
"Apache-2.0"
] | null | null | null | testAppium/conf/getStartActivityConfig.py | moulage/appium-android | 082e4018673fecd260552d758f8a8ba154838b9a | [
"Apache-2.0"
] | null | null | null | # !/usr/bin/env python
# -*- coding = utf-8 -*-
# @Author:wanghui
# @Time:
# @File:getStartActivityConfig.py
import configparser
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
class GetStartActivityConfig(object):
def __init__(self):
self.config = configparser.ConfigParser()
self.read_config()
def read_config(self):
"""读取配置文件"""
try:
self.config.read(os.path.join(BASE_DIR, ''))
except Exception as e:
print(f'配置文件不正确: ', e)
return None
def get_set_up(self):
"""获取setUp的信息"""
return self.config.items('SETUP')
def get_tear_down(self):
"""获取setUp的信息"""
return self.config.items('TEARDOWN')
def get_sections(self):
""" 获取所有的section """
return self.config.sections()
def get_option(self, section):
"""获取当前section下的所有options"""
return self.config.options(section)
def get_section_items(self, section):
"""获取当前section下的所有键值对"""
return self.config.items(section)
def get_section_password(self, section, option):
"""获取当前option对应的值"""
return self.config.get(section, option)
def check_config(self, *arg):
"""检查配置文件信息是否正确"""
try:
self.read_config()
if len(arg) == 1: # 判断是否有section
return self.config.has_section(arg[0])
elif len(arg) == 3: # 判断section下 option是否正确
if self.config[arg[0]][arg[1]] == arg[2]:
return True
else:
return False
else:
return False
except Exception as e:
return False
def main():
config = GetStartActivityConfig()
print(config.get_set_up())
# print(config.get_section_items('360'))
# print(config.get_section_password('360', 'platformVersion'))
if __name__ == '__main__':
main()
| 25 | 66 | 0.578462 | 1,674 | 0.794118 | 0 | 0 | 0 | 0 | 0 | 0 | 596 | 0.282732 |
6872e6802247502095c67e090c8829c461de73f4 | 6,773 | py | Python | Sudoku/norvig_solver.py | ancientHacker/sudoku-generator | a1b395cef2cc699951e17e56fc699df83309d5e1 | [
"MIT"
] | 4 | 2020-03-29T20:10:52.000Z | 2021-01-04T07:46:21.000Z | Sudoku/norvig_solver.py | ancientHacker/sudoku-generator | a1b395cef2cc699951e17e56fc699df83309d5e1 | [
"MIT"
] | null | null | null | Sudoku/norvig_solver.py | ancientHacker/sudoku-generator | a1b395cef2cc699951e17e56fc699df83309d5e1 | [
"MIT"
] | 6 | 2019-07-05T14:54:42.000Z | 2022-01-28T10:34:05.000Z | # MIT License
#
# Copyright (c) 2019 Daniel Brotsky
#
# Portions copyright (c) 2017 by Peter Norvig and Naoki Shibuya
# (See https://towardsdatascience.com/peter-norvigs-sudoku-solver-25779bb349ce)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from typing import Dict
from .board import Board
class Solver:
"""This solver was originally published by Peter Norvig.
It was later updated to py3 by Naoki Shibuya.
See https://towardsdatascience.com/peter-norvigs-sudoku-solver-25779bb349ce
for the code and an explanation of how it works. Small adaptations have been made
to adapt for multiple sizes of puzzle and to invoke the solver from the generator.
"""
# digits, row labels and col labels up to 16 (max puzzle size)
all_digits = '123456789ABCDEFG'
all_rows = 'ABCDEFGHIJKLMNOP'
all_cols = 'abcdefghijklmnop'
def __init__(self, board: Board):
"""Captures the board so you can solve it non-destructively."""
self.digits = self.all_digits[:board.side_length]
self.rows = self.all_rows[:board.side_length]
self.cols = self.all_cols[:board.side_length]
self.squares = self.cross(self.rows, self.cols)
self.unit_list = ([self.cross(self.rows, c) for c in self.cols] +
[self.cross(r, self.cols) for r in self.rows] +
[self.cross(rs, cs)
for rs in self.rank_groups(self.rows, board.rank, board.side_length)
for cs in self.rank_groups(self.cols, board.rank, board.side_length)])
self.units = dict((s, [u for u in self.unit_list if s in u]) for s in self.squares)
self.peers = dict((s, set(sum(self.units[s], [])) - {s}) for s in self.squares)
self.values = self.board2values(board)
self.solution = {}
def board2values(self, board):
return self.parse_grid(''.join([str(c) for c in board.cells]))
def values2board(self, values):
try:
return Board([self.all_digits.index(values[s][0])+1 for s in self.squares])
except ValueError:
return {}
def is_solution(self):
return all(len(self.values[s]) == 1 for s in self.squares)
def cross(self, aa: [str], bb: [str]):
"""Cross product of elements in aa and elements in bb."""
return [a + b for a in aa for b in bb]
def rank_groups(self, indices: str, step: int, end: int) -> [str]:
return [indices[start:start + step] for start in range(0, end, step)]
def parse_grid(self, grid: str) -> Dict[str, str]:
"""Convert grid to a dict of possible values, {square: digits}, or
return {} if a contradiction is detected."""
# To start, every square can be any digit; then assign values from the grid.
values = dict((s, self.digits) for s in self.squares)
for s, d in self.grid_values(grid).items():
if d in self.digits and not self.assign(values, s, d):
return {} # (Fail if we can't assign d to square s.)
return values
def grid_values(self, grid: str) -> Dict[str, str]:
"""Convert grid into a dict of {square: char} with '0' or '.' for empties."""
chars = [c for c in grid if c in self.digits or c in '0.-']
if len(chars) != len(self.squares):
raise ValueError("Grid contains illegal characters")
return dict(zip(self.squares, chars))
def assign(self, values: Dict[str, str], s: str, d: str):
"""Eliminate all the other values (except d) from values[s] and propagate.
Return values, except return {} if a contradiction is detected."""
other_values = values[s].replace(d, '')
if all(self.eliminate(values, s, d2) for d2 in other_values):
return values
else:
return {}
def eliminate(self, values: Dict[str, str], s: str, d: str):
"""Eliminate d from values[s]; propagate when values or places <= 2.
Return values, except return {} if a contradiction is detected."""
if d not in values[s]:
return values # Already eliminated
values[s] = values[s].replace(d, '')
# (1) If a square s is reduced to one value d2, then eliminate d2 from the peers.
if len(values[s]) == 0:
return {} # Contradiction: removed last value
elif len(values[s]) == 1:
d2 = values[s]
if not all(self.eliminate(values, s2, d2) for s2 in self.peers[s]):
return {}
# (2) If a unit u is reduced to only one place for a value d, then put it there.
for u in self.units[s]:
places = [s for s in u if d in values[s]]
if len(places) == 0:
return {} # Contradiction: no place for this value
elif len(places) == 1:
# d can only be in one place in unit; assign it there
if not self.assign(values, places[0], d):
return {}
return values
def can_solve(self):
values = self.search(self.values)
if values:
self.solution = self.values2board(values)
return True
return False
def search(self, values: Dict[str, str]) -> Dict[str, str]:
"""Using depth-first search and propagation, try all possible values."""
if not values:
return {} # Failed earlier
if all(len(values[s]) == 1 for s in self.squares):
return values # Solved!
# Chose the unfilled square s with the fewest possibilities
n, s = min((len(values[s]), s) for s in self.squares if len(values[s]) > 1)
return next(filter(len, (self.search(self.assign(values.copy(), s, d)) for d in values[s])), {})
| 47.363636 | 104 | 0.630297 | 5,465 | 0.80688 | 0 | 0 | 0 | 0 | 0 | 0 | 2,950 | 0.435553 |
6873156e99d844af13d6d77379f2ea3cbcc2572b | 1,442 | py | Python | deserialize/decorators/default.py | KrzysztofSajko/deserialize | f9192cfc9913003c7362d0ae246e3475e78690d8 | [
"MIT"
] | 18 | 2019-06-27T12:08:36.000Z | 2022-03-11T03:36:50.000Z | deserialize/decorators/default.py | KrzysztofSajko/deserialize | f9192cfc9913003c7362d0ae246e3475e78690d8 | [
"MIT"
] | 19 | 2019-01-05T16:52:28.000Z | 2021-11-12T12:17:42.000Z | deserialize/decorators/default.py | KrzysztofSajko/deserialize | f9192cfc9913003c7362d0ae246e3475e78690d8 | [
"MIT"
] | 5 | 2019-06-26T13:41:44.000Z | 2021-11-02T17:03:38.000Z | """Decorators used for adding functionality to the library."""
from deserialize.exceptions import NoDefaultSpecifiedException
def default(key_name, default_value):
"""A decorator function for mapping default values to key names."""
def store_defaults_map(class_reference):
"""Store the defaults map."""
if not hasattr(class_reference, "__deserialize_defaults_map__"):
setattr(class_reference, "__deserialize_defaults_map__", {})
class_reference.__deserialize_defaults_map__[key_name] = default_value
return class_reference
return store_defaults_map
def _has_default(class_reference, key_name):
"""Returns True if this key has a default, False otherwise.
:returns: True if this key has a default, False otherwise.
"""
if not hasattr(class_reference, "__deserialize_defaults_map__"):
return False
return key_name in class_reference.__deserialize_defaults_map__
def _get_default(class_reference, key_name):
"""Get the default value for the given class and key name.
:raises NoDefaultSpecifiedException: If a default hasn't been specified
"""
if not hasattr(class_reference, "__deserialize_defaults_map__"):
raise NoDefaultSpecifiedException()
if key_name in class_reference.__deserialize_defaults_map__:
return class_reference.__deserialize_defaults_map__[key_name]
raise NoDefaultSpecifiedException()
| 30.680851 | 78 | 0.751734 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 552 | 0.382802 |
68777c8ef19f2208020f8f076d6002af81ebebd7 | 3,608 | py | Python | doc/source/notebooks/regression.py | codelover-without-talent/GPflow | 1af7b1ca7da6687974150a1440d821a106b2159d | [
"Apache-2.0"
] | 1 | 2018-08-22T06:34:59.000Z | 2018-08-22T06:34:59.000Z | doc/source/notebooks/regression.py | codelover-without-talent/GPflow | 1af7b1ca7da6687974150a1440d821a106b2159d | [
"Apache-2.0"
] | null | null | null | doc/source/notebooks/regression.py | codelover-without-talent/GPflow | 1af7b1ca7da6687974150a1440d821a106b2159d | [
"Apache-2.0"
] | 2 | 2019-03-09T11:46:11.000Z | 2021-12-20T10:22:34.000Z | from matplotlib import pyplot as plt
import gpflow
import tensorflow as tf
import os
import numpy as np
import cProfile
def outputGraph(model, dirName, fileName):
model.compile()
if not(os.path.isdir(dirName)):
os.mkdir(dirName)
fullFileName = os.path.join(dirName, fileName)
if os.path.isfile(fullFileName):
os.remove(fullFileName)
tf.train.write_graph(model.session.graph_def, dirName+'/', fileName, as_text=False)
# build a very simple data set:
def getData():
rng = np.random.RandomState(1)
N = 30
X = rng.rand(N,1)
Y = np.sin(12*X) + 0.66*np.cos(25*X) + rng.randn(N,1)*0.1 + 3
return X,Y
def plotData(X,Y):
plt.figure()
plt.plot(X, Y, 'kx', mew=2)
def getRegressionModel(X,Y):
#build the GPR object
k = gpflow.kernels.Matern52(1)
meanf = gpflow.mean_functions.Linear(1,0)
m = gpflow.models.GPR(X, Y, k, meanf)
m.likelihood.variance = 0.01
print "Here are the parameters before optimization"
m
return m
def optimizeModel(m):
m.optimize()
print "Here are the parameters after optimization"
m
def plotOptimizationResult(X,Y,m):
#plot!
xx = np.linspace(-0.1, 1.1, 100)[:,None]
mean, var = m.predict_y(xx)
plt.figure()
plt.plot(X, Y, 'kx', mew=2)
plt.plot(xx, mean, 'b', lw=2)
plt.plot(xx, mean + 2*np.sqrt(var), 'b--', xx, mean - 2*np.sqrt(var), 'b--', lw=1.2)
def setModelPriors(m):
#we'll choose rather arbitrary priors.
m.kern.lengthscales.prior = gpflow.priors.Gamma(1., 1.)
m.kern.variance.prior = gpflow.priors.Gamma(1., 1.)
m.likelihood.variance.prior = gpflow.priors.Gamma(1., 1.)
m.mean_function.A.prior = gpflow.priors.Gaussian(0., 10.)
m.mean_function.b.prior = gpflow.priors.Gaussian(0., 10.)
print "model with priors ", m
def getSamples(m):
samples = m.sample(100, epsilon = 0.1)
return samples
def plotSamples(X, Y, m, samples):
xx = np.linspace(-0.1, 1.1, 100)[:,None]
plt.figure()
plt.plot(samples)
f, axs = plt.subplots(1,3, figsize=(12,4), tight_layout=True)
axs[0].plot(samples[:,0], samples[:,1], 'k.', alpha = 0.15)
axs[0].set_xlabel('noise_variance')
axs[0].set_ylabel('signal_variance')
axs[1].plot(samples[:,0], samples[:,2], 'k.', alpha = 0.15)
axs[1].set_xlabel('noise_variance')
axs[1].set_ylabel('lengthscale')
axs[2].plot(samples[:,2], samples[:,1], 'k.', alpha = 0.1)
axs[2].set_xlabel('lengthscale')
axs[2].set_ylabel('signal_variance')
#an attempt to plot the function posterior
#Note that we should really sample the function values here, instead of just using the mean.
#We are under-representing the uncertainty here.
# TODO: get full_covariance of the predictions (predict_f only?)
plt.figure()
for s in samples:
m.set_state(s)
mean, _ = m.predict_y(xx)
plt.plot(xx, mean, 'b', lw=2, alpha = 0.05)
plt.plot(X, Y, 'kx', mew=2)
def showAllPlots():
plt.show()
def runExperiments(plotting=True,outputGraphs=False):
X,Y = getData()
if plotting:
plotData(X,Y)
m = getRegressionModel(X,Y)
if outputGraphs:
modelDir = 'models'
outputGraph(m, modelDir, 'pointHypers')
optimizeModel(m)
if plotting:
plotOptimizationResult(X,Y,m)
setModelPriors(m)
if outputGraphs:
outputGraph(m, modelDir, 'bayesHypers')
samples = getSamples(m)
if plotting:
plotSamples(X, Y, m, samples)
showAllPlots()
if __name__ == '__main__':
runExperiments()
#cProfile.run('runExperiments(plotting=False)')
| 29.818182 | 96 | 0.641075 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 678 | 0.187916 |
6878add955a141a4910a16b4698b123a6f9a5ed7 | 29,160 | py | Python | agnosia_tools/pointcloud.py | adurdin/agnosia-blender | addef1a0cb555c22186f7059b2da257fd74cc83d | [
"MIT"
] | null | null | null | agnosia_tools/pointcloud.py | adurdin/agnosia-blender | addef1a0cb555c22186f7059b2da257fd74cc83d | [
"MIT"
] | null | null | null | agnosia_tools/pointcloud.py | adurdin/agnosia-blender | addef1a0cb555c22186f7059b2da257fd74cc83d | [
"MIT"
] | null | null | null | import bpy
import bmesh
import base64
import math
import mathutils
import random
import struct
import zlib
from array import array
from itertools import islice
from bpy.props import IntProperty, PointerProperty, StringProperty
from bpy.types import Object, Operator, Panel, PropertyGroup
from mathutils import Vector
from mathutils.bvhtree import BVHTree
#---------------------------------------------------------------------------#
# Operators
class AgnosiaCreatePointcloudOperator(Operator):
bl_idname = "object.create_pointcloud"
bl_label = "Create pointcloud"
bl_options = {'REGISTER'}
def execute(self, context):
if context.mode != "OBJECT":
self.report({'WARNING'}, "Create pointcloud: must be in Object mode.")
return {'CANCELLED'}
target = context.object
if (target is None) or (target.type != 'MESH'):
self.report({'WARNING'}, "Create pointcloud: must select a Mesh object.")
return {'CANCELLED'}
if target.pointclouds:
self.report({'WARNING'}, "Create pointcloud: can't create a pointcloud from a pointcloud.")
return {'CANCELLED'}
# Deselect and hide the sampled object.
target.select_set(False)
target.hide_set(True)
# Create a new pointcloud.
o = create_pointcloud_from(context, target)
# Make the pointcloud active, and select it.
context.view_layer.objects.active = o
o.select_set(True)
# And begin updating the new pointcloud.
bpy.ops.object.update_pointcloud()
return {'FINISHED'}
class AgnosiaUpdatePointcloudOperator(Operator):
bl_idname = "object.update_pointcloud"
bl_label = "Update pointcloud"
bl_options = set()
_timer = None
_generator = None
_finished = False
_cancelled = False
_object = None
# Class variable
_running_on = {}
def execute(self, context):
self._object = context.object
# Only allow one instance of the operator to run on any given object at a time.
prior_op = self.__class__._running_on.get(self._object)
if prior_op is not None:
prior_op.abort()
self.__class__._running_on[self._object] = self
self._generator = update_pointcloud_iter(self._object)
self._cancelled = False
self._finished = False
wm = context.window_manager
self._timer = wm.event_timer_add(0.1, window=context.window)
wm.modal_handler_add(self)
return {'RUNNING_MODAL'}
def modal(self, context, event):
if event.type in {'RIGHTMOUSE', 'ESC'}:
self._cancelled = True
if self._cancelled or self._finished:
# Remove ourselves
if self.__class__._running_on.get(self._object) == self:
del self.__class__._running_on[self._object]
# Remove the timer
wm = context.window_manager
wm.event_timer_remove(self._timer)
self._timer = None
if self._cancelled:
return {'CANCELLED'}
elif self._finished:
return {'FINISHED'}
elif event.type == 'TIMER':
try:
next(self._generator)
except StopIteration:
self._finished = True
return {'PASS_THROUGH'}
def abort(self):
self._cancelled = True
class AgnosiaPointcloudExportOperator(Operator):
bl_idname = "object.export_pointcloud"
bl_label = "Export pointcloud"
bl_options = {'REGISTER'}
filepath : bpy.props.StringProperty(subtype="FILE_PATH")
@classmethod
def poll(cls, context):
o = context.object
return (
(context.mode == 'OBJECT')
and (o is not None)
and (len(o.pointclouds) > 0)
)
def invoke(self, context, event):
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
def execute(self, context):
o = context.object
pc = o.pointclouds[0]
def to_uint8(f):
return min(max(0, int(f * 255.0)), 255)
with PointcloudBinWriter(self.filepath) as f:
vertices = pc.raw_vertices
normals = pc.raw_normals
colors = pc.raw_colors
def records():
v_it = iter(vertices)
n_it = iter(normals)
c_it = iter(colors)
while True:
v_xyz = list(islice(v_it, 3))
n_xyz = list(islice(n_it, 3))
c_rgb = [to_uint8(f) for f in list(islice(c_it, 4))][:3]
if v_xyz and c_rgb:
yield v_xyz + c_rgb
else:
break
for r in records():
f.write(*r)
return {'FINISHED'}
#---------------------------------------------------------------------------#
# Panels
class AGNOSIA_PT_pointcloud(Panel):
bl_label = "Pointcloud"
bl_idname = "AGNOSIA_PT_pointcloud"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_category = "Agnosia"
bl_context = "objectmode"
@classmethod
def poll(self, context):
o = context.object
if o is None: return False
if not o.select_get(): return False
if not o.pointclouds: return False
return True
def draw(self, context):
o = context.object
pc = o.pointclouds[0]
layout = self.layout
row = layout.row(align=True)
box = row.box()
box.label(text="There is nothing here that you recognise. Yet.");
box = layout.box()
box.prop(pc, 'target')
box.prop(pc, 'point_count')
box.prop(pc, 'seed')
layout.operator('object.export_pointcloud', text="Export .bin")
#---------------------------------------------------------------------------#
# Pointcloud property and data
def _pointcloud_property_update(self, context):
bpy.ops.object.update_pointcloud()
class PointcloudProperty(PropertyGroup):
target : PointerProperty(name="Sample", type=Object, update=_pointcloud_property_update)
point_count : IntProperty(name="Point count", default=1024, min=128, step=64, update=_pointcloud_property_update)
seed : IntProperty(name="Seed", default=0, update=_pointcloud_property_update)
raw_vertices_string : StringProperty(name="_RawVerticesString", default="")
raw_normals_string : StringProperty(name="_RawNormalsString", default="")
raw_colors_string : StringProperty(name="_RawColorsString", default="")
_raw_cache = None
@staticmethod
def _pack_array(a):
if a:
b = a.tobytes()
c = zlib.compress(b)
d = base64.encodebytes(c)
return d.decode('ascii')
else:
return ""
@staticmethod
def _unpack_array(s, typecode):
if s:
a = array(typecode)
b = bytes(s, 'ascii')
c = base64.decodebytes(b)
d = zlib.decompress(c)
a.frombytes(d)
return a
else:
return array(typecode)
@property
def raw_cache(self):
cache = self.__dict__.get('_raw_cache')
if cache is None:
cache = {}
self.__dict__['_raw_cache'] = cache
return cache
@property
def raw_vertices(self):
if self.raw_vertices_string:
value = self.raw_cache.get('vertices')
if value is None:
value = self._unpack_array(self.raw_vertices_string, 'f')
self.raw_cache['vertices'] = value
return value
else:
return array('f')
@property
def raw_normals(self):
if self.raw_normals_string:
value = self.raw_cache.get('normals')
if value is None:
value = self._unpack_array(self.raw_normals_string, 'f')
self.raw_cache['normals'] = value
return value
else:
return array('f')
@property
def raw_colors(self):
if self.raw_colors_string:
value = self.raw_cache.get('colors')
if value is None:
value = self._unpack_array(self.raw_colors_string, 'f')
self.raw_cache['colors'] = value
return value
else:
return array('f')
def set_raw_data(self, vertices, normals=None, colors=None):
if (not isinstance(vertices, array)) or (vertices.typecode != 'f'):
raise ValueError("vertices must be type array('f')")
if len(vertices) % 3 != 0:
raise ValueError("vertices length must be multiple of 3")
vertex_count = len(vertices) // 3
if (normals is not None):
if (not isinstance(normals, array)) or (normals.typecode != 'f'):
raise ValueError("normals must be type array('f')")
if len(normals) != (3 * vertex_count):
raise ValueError("len(normals) must be 3 * vertex_count")
if (colors is not None):
if (not isinstance(colors, array)) or (colors.typecode != 'f'):
raise ValueError("colors must be type array('f')")
if len(colors) != 4 * vertex_count:
raise ValueError("len(colors) must be 4 * vertex_count")
self.raw_vertices_string = self._pack_array(vertices)
self.raw_normals_string = self._pack_array(normals)
self.raw_colors_string = self._pack_array(colors)
# Cached
self.raw_cache['vertices'] = vertices
self.raw_cache['normals'] = normals
self.raw_cache['colors'] = colors
#---------------------------------------------------------------------------#
# Material
def layout_nodes(node_tree, root_node):
"""Make all the nodes in node_tree, starting from root_node, nice and tidy."""
from collections import defaultdict
from math import ceil
# Lookup table of nodes to their incoming links
incoming = defaultdict(list)
for l in node_tree.links:
incoming[l.to_node].append(l)
# Lookup table of nodes to their sort keys
sort_keys = {}
sort_keys[root_node] = ('_root',)
all_columns = [[root_node]]
links = list(incoming[root_node])
# Arrange all the nodes from the root nodes into columns,
# with each column's nodes in order by the outputs and nodes they feed into.
while links:
# Drop all the nodes on all the links into this column
column = []
for l in links:
# k = ((l.to_socket.name, l.from_socket.name), ) + sort_keys[l.to_node]
k = (l.to_socket.name, ) + sort_keys[l.to_node]
other_k = sort_keys.get(l.from_node, None)
if other_k is not None:
k = max(k, other_k)
sort_keys[l.from_node] = k
if l.from_node not in column:
column.append(l.from_node)
column.sort(key=sort_keys.get)
all_columns.append(column)
# Get the next set of links to sort
links = []
for n in column:
links.extend(incoming[n])
# Now lay out all the nodes right-to-left, with each column vertically
# centered with respect to all the other columns. Coordinates are +Y up, +X right.
grid_size = 20.0
def total_height(n):
# Height of a node including its title bar. Not exact numbers, but good enough.
return (20.0 if n.hide else (n.height + 30.0))
column_location = Vector((0.0, 0.0)) # x: right edge, y: center.
spacing = Vector((3.0, 2.0)) * grid_size
for i, column in enumerate(all_columns):
# Calculate the total size
max_node_width = max(ceil(n.width) for n in column)
total_node_height = sum(ceil(total_height(n)) for n in column)
total_spacing = spacing[1] * (len(column) - 1)
column_width = ceil(max_node_width / grid_size) * grid_size
column_height = total_node_height + total_spacing
# Lay out these nodes vertically down the column.
x = column_location[0] - (column_width / 2.0)
y = column_location[1] + (column_height / 2.0)
for n in column:
node_x = round(x - n.width / 2.0)
node_y = y #round(y - total_height(n) / 2.0)
n.location = Vector((node_x, node_y))
y -= (ceil(total_height(n) + spacing[1]))
column_location[0] -= (column_width + spacing[0])
def define_pointcloud_material(material):
material.use_nodes = True
tree = material.node_tree
nodes = tree.nodes
links = tree.links
nodes.clear()
output = nodes.new(type='ShaderNodeOutputMaterial')
# output.location = self._grid_location(6, 4)
diffuse = nodes.new(type='ShaderNodeBsdfDiffuse')
diffuse.inputs['Color'].default_value = (1.0, 1.0, 1.0, 1.0)
diffuse.inputs['Roughness'].default_value = 0.5
links.new(diffuse.outputs['BSDF'], output.inputs['Surface'])
# Get colors and normals from the vertex color layers.
colors = nodes.new(type='ShaderNodeAttribute')
colors.label = "PointColor Attribute"
colors.attribute_name = 'PointColor'
links.new(colors.outputs['Color'], diffuse.inputs['Color'])
normals = nodes.new(type='ShaderNodeAttribute')
colors.label = "PointNormal Attribute"
normals.attribute_name = 'PointNormal'
# Create nodes to unpack the normals from the second vertex color layer.
combine = nodes.new(type='ShaderNodeCombineXYZ')
combine.hide = True
links.new(combine.outputs['Vector'], diffuse.inputs['Normal'])
separate = nodes.new(type='ShaderNodeSeparateXYZ')
separate.hide = True
links.new(normals.outputs['Vector'], separate.inputs['Vector'])
# Each of the X, Y, and Z channels needs (foo - 0.5) * 2.0
for i in range(3):
sub = nodes.new(type='ShaderNodeMath')
sub.label = " - 0.5"
sub.operation = 'SUBTRACT'
sub.hide = True
sub.inputs[1].default_value = 0.5
links.new(separate.outputs[i], sub.inputs[0])
mul = nodes.new(type='ShaderNodeMath')
mul.label = " * 2.0"
mul.operation = 'MULTIPLY'
mul.hide = True
mul.inputs[1].default_value = 2.0
links.new(sub.outputs[0], mul.inputs[0])
links.new(mul.outputs[0], combine.inputs[i])
layout_nodes(tree, output)
def get_pointcloud_material():
name = 'PointcloudMaterial'
m = bpy.data.materials.get(name)
if not m:
m = bpy.data.materials.new(name)
define_pointcloud_material(m);
return m
def assign_material(o, mat):
if (o.data.materials):
o.data.materials[0] = mat
else:
o.data.materials.append(mat)
#---------------------------------------------------------------------------#
# Pointcloud objects.
def create_pointcloud_from(context, target):
o = create_empty_mesh_obj(context, 'Pointcloud')
pc = o.pointclouds.add()
pc.target = target
pc.seed = random.randint(-2**31, 2**31)
return o
def create_empty_mesh_obj(context, name):
mesh = bpy.data.meshes.new(name + 'Mesh')
o = bpy.data.objects.new(name, mesh)
o.show_name = True
context.scene.collection.objects.link(o)
return o
def update_pointcloud_iter(o):
if not o.pointclouds:
return
pc = o.pointclouds[0]
target = pc.target
if (target is None) or (target.type != 'MESH') or (target.pointclouds):
return
seed = pc.seed
rng = random.Random(seed)
for data in generate_points(pc.target, pc.point_count, rng, step_count=4096):
yield
vertices_arr = array('f', (f for vec in data[0] for f in vec))
normals_arr = array('f', (f for vec in data[1] for f in vec))
colors_arr = array('f', (f for vec in data[2] for f in vec))
pc.set_raw_data(vertices_arr, normals=normals_arr, colors=colors_arr)
o.data = create_pointcloud_mesh(o.data.name, data)
assign_material(o, get_pointcloud_material())
def generate_points(target, count, rng=random, step_count=0):
if not step_count: step_count = count
total_count = 0
total_data = [[], [], []]
while total_count < count:
step_count = min(step_count, (count - total_count))
# data = sphere_sample_obj(target, step_count, rng)
# data = volume_sample_obj(target, step_count, rng)
data = surface_sample_obj(target, step_count, rng)
for i in range(len(total_data)):
total_data[i] += data[i]
total_count += step_count
if total_count < count:
yield list(total_data)
yield total_data
#---------------------------------------------------------------------------#
# Meshes for in-Blender visualization.
def create_pointcloud_mesh(name, data):
mesh = bpy.data.meshes.new(name)
(vertices, normals, colors) = data
# Expand each vertex to make a quad facing the -y axis.
if vertices:
(vertices, faces, normals, colors) = \
expand_vertex_data_to_mesh(vertices, normals, colors)
mesh.from_pydata(vertices, [], faces)
mesh.validate(verbose=True, clean_customdata=False)
mesh.update()
# Apply per-vertex colors and normals
color_layer = mesh.vertex_colors.new(name='PointColor')
for (i, color) in enumerate(colors):
color_layer.data[i].color = color
normal_layer = mesh.vertex_colors.new(name='PointNormal')
for (i, normal) in enumerate(normals):
# Pack the normals into the color data
n = (normal / 2.0) + Vector((0.5, 0.5, 0.5))
normal_layer.data[i].color = (n[0], n[1], n[2], 0.0)
return mesh
def expand_vertex_data_to_mesh(vertices, normals, colors):
expanded_vertices = []
expanded_normals = []
expanded_colors = []
faces = []
# Size of the mesh representing a point.
scale = 0.05
quad = (
Vector((1, 0, 1)) * scale,
Vector((-1, 0, 1)) * scale,
Vector((-1, 0, -1)) * scale,
Vector((1, 0, -1)) * scale,
)
# Expand the source data to a quad.
for v in vertices:
expanded_vertices.extend((v + quad[0], v + quad[1], v + quad[2], v + quad[3]))
for n in normals:
expanded_normals.extend((n, n, n, n))
for c in colors:
expanded_colors.extend((c, c, c, c))
# Generate faces
for i in range(len(vertices)):
base = (4 * i)
faces.append((
base + 0,
base + 1,
base + 2,
base + 3,
))
return (expanded_vertices, faces, expanded_normals, expanded_colors)
#---------------------------------------------------------------------------#
# Sampling.
def sphere_sample_obj(o, count, rng):
# Sample the object by raycasting from a sphere surrounding it
# towards the origin.
vertices = []
normals = []
colors = []
radius = object_bounding_radius(o) + 0.1
it = iter(sphere_surface_points(radius, rng))
while len(vertices) < count:
pt = next(it)
result, position, normal, index = raycast_to_origin(o, pt)
if result:
vertices.append(position)
normals.append(normal)
colors.append((1.0, 0.0, 1.0, 1.0))
return (vertices, normals, colors)
def volume_sample_obj(o, count, rng):
# Sample the object by generating points within its bounds and
# testing if they're inside it. Assumes the mesh is watertight.
vertices = []
normals = []
colors = []
bm = bmesh.new()
bm.from_mesh(o.data)
bvh = BVHTree.FromBMesh(bm)
halfwidth = object_bounding_halfwidth(o) + 0.1
it = iter(cube_volume_points(halfwidth, rng))
while len(vertices) < count:
pt = next(it)
(location, normal, index, distance) = raycast_to_exterior(bvh, pt)
pt_is_inside = (location is not None)
if pt_is_inside:
vertices.append(location)
normals.append(normal)
# TEMP: color each point by its coordinates
r = (abs(location[0]) / halfwidth)
g = (abs(location[1]) / halfwidth)
b = (abs(location[2]) / halfwidth)
colors.append((r, g, b, 1.0))
return (vertices, normals, colors)
def surface_sample_obj(o, count, rng):
# Sample the object by generating points on the surfaces of its tris.
vertices = []
normals = []
colors = []
mesh = o.data
# Find the surface area of each poly and the whole mesh.
poly_areas = [p.area for p in mesh.polygons]
surface_area = sum(poly_areas)
# Generate uniform random area targets.
area_targets = sorted(rng.uniform(0, surface_area) for _ in range(count))
# Iterate the polys to see which reaches the target.
area_so_far = 0
target = area_targets.pop(0)
for i, poly in enumerate(mesh.polygons):
area_so_far += poly_areas[i]
while target <= area_so_far:
# Spawn a point.
poly_vertices = [Vector(mesh.vertices[j].co) for j in poly.vertices]
location = polygon_surface_point(poly_vertices, rng)
normal = poly.normal
# Save the point.
vertices.append(location)
normals.append(normal)
# TEMP: color each point by its coordinates
halfwidth = object_bounding_halfwidth(o) + 0.1
r = (abs(location[0]) / halfwidth)
g = (abs(location[1]) / halfwidth)
b = (abs(location[2]) / halfwidth)
colors.append((r, g, b, 1.0))
# Get a new target
if area_targets:
target = area_targets.pop(0)
else:
# If we've run out of targets, then we have enough points.
break
if not vertices:
print(f"ERROR: didn't generate any vertices!")
return (vertices, normals, colors)
def object_bounding_radius(o):
from math import sqrt
radius = 0.0
for (x, y, z) in o.bound_box:
radius = max(radius, sqrt(x*x + y*y + z*z))
return radius
def object_bounding_halfwidth(o):
halfwidth = 0.0
for (x, y, z) in o.bound_box:
halfwidth = max(halfwidth, abs(x), abs(y), abs(z))
return halfwidth
def sphere_surface_points(radius, rng):
# Generate Vectors randomly distributed on the surface of
# a sphere with the given radius.
while True:
from math import acos, cos, pi, sin, sqrt
u = rng.random()
v = rng.random()
theta = 2 * pi * u
phi = acos(2 * v - 1)
x = radius * cos(theta) * sin(phi)
y = radius * sin(theta) * sin(phi)
z = radius * cos(phi)
yield Vector((x, y, z))
def cube_volume_points(halfwidth, rng):
# Generate Vectors randomly distributed within the volume
# of a cube with the given halfwidth.
while True:
u = rng.uniform(-1, 1)
v = rng.uniform(-1, 1)
w = rng.uniform(-1, 1)
x = halfwidth * u
y = halfwidth * v
z = halfwidth * w
yield Vector((x, y, z))
def polygon_surface_point(vertices, rng):
# Return a random point on the surface of the polygon.
# Determine the vertices abc of each triangle, and the vectors ab and ac.
tris = [
(
vertices[0], # a
vertices[i+1], # b
vertices[i+2], # c
(vertices[i+1] - vertices[0]), # ab
(vertices[i+2] - vertices[0]), # ac
)
for i in range(len(vertices) - 2)
]
# Find the area of each tri and the total polygon area.
tri_areas = [
(ab.cross(ac)).length / 2.0
for (_, _, _, ab, ac) in tris
]
surface_area = sum(tri_areas)
# Pick a target tri by area
target = rng.uniform(0, surface_area)
area_so_far = 0
for (i, (a, b, c, ab, ac)) in enumerate(tris):
area_so_far += tri_areas[i]
while target <= area_so_far:
# Pick a point in this tri
r1 = rng.random()
r2 = rng.random()
r1root = math.sqrt(r1)
pt = ((1 - r1root) * a
+ r1root * (1 - r2) * b
+ r1root * r2 * c)
return pt
# Shouldn't get here, but if we do, just return a vertex
print("ERROR: failed to find target tri.")
return vertices[0]
def raycast_to_origin(o, pt):
# Raycast the object o from pt (in object space) to its origin.
# Return a tuple: (result, position, normal, index)
origin = Vector((0.0, 0.0, 0.0))
direction = (origin - pt).normalized()
return o.ray_cast(pt, direction)
def raycast_to_exterior(bvh, pt):
"""Raycast the BVHTree bvh from pt to the object's exterior.
If pt is on the object's interior, return (location, normal, index, distance);
if it's on the exterior, return (None, None, None, None)."""
NO_HIT = (None, None, None, None)
# If the point's too close to the origin, we can't get a proper direction,
# so just skip it.
origin = Vector((0, 0, 0))
from_origin = (pt - origin)
if (from_origin.length < 0.0001):
return NO_HIT
ray_origin = pt
direction = from_origin.normalized()
# Raycast from the point towards the exterior, iterating
# until we don't hit any faces.
tiny_step = (direction * 0.0001)
first_outward = None
outward_crossings = 0
(location, normal, index, distance) = bvh.ray_cast(ray_origin, direction)
if location is None:
# Didn't hit anything, so we're done.
return NO_HIT
# Check if the face is oriented towards the ray or away from it.
inward_facing = (direction.dot(normal) < 0)
if inward_facing:
# Must have been outside the object.
return NO_HIT
return (location, normal, index, distance)
#---------------------------------------------------------------------------#
# Pointcloud file types
## Structures in binary pointcloud file
def bin_size(size):
return struct.pack('=L', size)
def bin_point(x, y, z, r, g, b):
return struct.pack('=fffBBBx', x, y, z, r, g, b)
## Binary pointcloud writing
class PointcloudBinWriter:
# File format:
# uint32_t size_of_data
# struct record {
# float x, y, z;
# uint8_t r, g, b;
# uint8_t pad;
# } records[size / sizeof(struct record)]
def __init__(self, filename):
self.filename = filename
self.file = None
self.count = 0
self.size = 0
def write(self, x, y, z, r, g, b):
assert (self.file is not None), "File is not open."
blob = bin_point(x, y, z, r, g, b)
self.file.write(blob)
self.size += len(blob)
self.count += 1
def __len__(self):
return self.count
def __enter__(self):
self.file = open(self.filename, 'wb')
# The file starts with the size of its data. We write a zero
# initially, and fill in the actual size on __exit__().
self.file.write(bin_size(0))
return self
def __exit__(self, exc_type, exc_value, traceback):
if not exc_type and not exc_value:
self.file.seek(0)
self.file.write(bin_size(self.size))
self.file.close()
#---------------------------------------------------------------------------#
# Utils.
from contextlib import contextmanager
@contextmanager
def tempfile(suffix='', dir=None):
""" Context for temporary file.
Will find a free temporary filename upon entering
and will try to delete the file on leaving, even in case of an exception.
Parameters
----------
suffix : string
optional file suffix
dir : string
optional directory to save temporary file in
"""
# From: https://stackoverflow.com/a/29491523
import os
import tempfile as tmp
tf = tmp.NamedTemporaryFile(delete=False, suffix=suffix, dir=dir)
tf.file.close()
try:
yield tf.name
finally:
try:
os.remove(tf.name)
except OSError as e:
if e.errno == 2:
pass
else:
raise
@contextmanager
def file_atomic(filepath, *args, **kwargs):
""" Open temporary file object that atomically moves to destination upon
exiting.
Allows reading and writing to and from the same filename.
The file will not be moved to destination in case of an exception.
Parameters
----------
filepath : string
the file path to be opened
fsync : bool
whether to force write the file to disk
*args : mixed
Any valid arguments for :code:`open`
**kwargs : mixed
Any valid keyword arguments for :code:`open`
"""
# From: https://stackoverflow.com/a/29491523
import os
fsync = kwargs.get('fsync', False)
with tempfile(dir=os.path.dirname(os.path.abspath(filepath))) as tmppath:
with open(tmppath, *args, **kwargs) as file:
try:
yield file
finally:
if fsync:
file.flush()
os.fsync(file.fileno())
os.rename(tmppath, filepath) | 32.986425 | 117 | 0.5881 | 10,090 | 0.346022 | 4,834 | 0.165775 | 3,960 | 0.135802 | 0 | 0 | 7,264 | 0.249108 |
6878bc63f5a5d101c27062d6aecd9e1581e3ee6e | 335 | py | Python | database/__init__.py | eddycheong/skeleton-flask-sqlalchemy | 117e4cac0bf4d912f9546e2aeac77bccc2b7e3c0 | [
"MIT"
] | null | null | null | database/__init__.py | eddycheong/skeleton-flask-sqlalchemy | 117e4cac0bf4d912f9546e2aeac77bccc2b7e3c0 | [
"MIT"
] | null | null | null | database/__init__.py | eddycheong/skeleton-flask-sqlalchemy | 117e4cac0bf4d912f9546e2aeac77bccc2b7e3c0 | [
"MIT"
] | null | null | null | import configparser
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
config = configparser.ConfigParser()
config.read('alembic.ini')
connection_url = config['alembic']['sqlalchemy.url']
Engine = create_engine(connection_url, connect_args={'check_same_thread': False})
Session = sessionmaker(bind=Engine)
| 27.916667 | 81 | 0.81194 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 57 | 0.170149 |
6879269c1812f94d1e6cc9bd8f13c7d884e2c991 | 4,682 | py | Python | wlauto/workloads/octaned8/__init__.py | joesavage/workload-automation | 3a863fa14369d9bf1f20f82eb5ab4582499c6b99 | [
"Apache-2.0"
] | 5 | 2016-04-27T13:51:12.000Z | 2016-06-23T12:38:14.000Z | wlauto/workloads/octaned8/__init__.py | joesavage/workload-automation | 3a863fa14369d9bf1f20f82eb5ab4582499c6b99 | [
"Apache-2.0"
] | 110 | 2016-05-05T19:13:26.000Z | 2017-01-20T16:18:02.000Z | wlauto/workloads/octaned8/__init__.py | joesavage/workload-automation | 3a863fa14369d9bf1f20f82eb5ab4582499c6b99 | [
"Apache-2.0"
] | 1 | 2016-04-27T15:18:55.000Z | 2016-04-27T15:18:55.000Z | # Copyright 2014-2016 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#pylint: disable=E1101,W0201
import os
import re
from wlauto import Workload, Parameter, Executable
from wlauto.common.resources import File
from wlauto.exceptions import ConfigError
regex_map = {
"Richards": (re.compile(r'Richards: (\d+.*)')),
"DeltaBlue": (re.compile(r'DeltaBlue: (\d+.*)')),
"Crypto": (re.compile(r'Crypto: (\d+.*)')),
"RayTrace": (re.compile(r'RayTrace: (\d+.*)')),
"EarleyBoyer": (re.compile(r'EarleyBoyer: (\d+.*)')),
"RegExp": (re.compile(r'RegExp: (\d+.*)')),
"Splay": (re.compile(r'Splay: (\d+.*)')),
"SplayLatency": (re.compile(r'SplayLatency: (\d+.*)')),
"NavierStokes": (re.compile(r'NavierStokes: (\d+.*)')),
"PdfJS": (re.compile(r'PdfJS: (\d+.*)')),
"Mandreel": (re.compile(r'Mandreel: (\d+.*)')),
"MandreelLatency": (re.compile(r'MandreelLatency: (\d+.*)')),
"Gameboy": (re.compile(r'Gameboy: (\d+.*)')),
"CodeLoad": (re.compile(r'CodeLoad: (\d+.*)')),
"Box2D": (re.compile(r'Box2D: (\d+.*)')),
"zlib": (re.compile(r'zlib: (\d+.*)')),
"Score": (re.compile(r'Score .*: (\d+.*)'))
}
class Octaned8(Workload):
name = 'octaned8'
description = """
Runs the Octane d8 benchmark.
This workload runs d8 binaries built from source and placed in the dependencies folder along
with test assets from https://github.com/chromium/octane which also need to be placed in an
assets folder within the dependencies folder.
Original source from::
https://github.com/v8/v8/wiki/D8%20on%20Android
"""
parameters = [
Parameter('run_timeout', kind=int, default=180,
description='Timeout, in seconds, for the test execution.'),
]
supported_platforms = ['android']
executables = ['d8', 'natives_blob.bin', 'snapshot_blob.bin']
def initialize(self, context): # pylint: disable=no-self-use
assets_dir = self.device.path.join(self.device.working_directory, 'assets')
self.device.execute('mkdir -p {}'.format(assets_dir))
assets_tar = 'octaned8-assets.tar'
fpath = context.resolver.get(File(self, assets_tar))
self.device.push_file(fpath, assets_dir, timeout=300)
self.command = 'cd {}; {} busybox tar -x -f {}'.format(assets_dir, self.device.busybox, assets_tar)
self.output = self.device.execute(self.command, timeout=self.run_timeout)
for f in self.executables:
binFile = context.resolver.get(Executable(self, self.device.abi, f))
self.device_exe = self.device.install(binFile)
def setup(self, context):
self.logger.info('Copying d8 binaries to device')
assets_dir = self.device.path.join(self.device.working_directory, 'assets')
device_file = self.device.path.join(self.device.working_directory, 'octaned8.output')
self.command = 'cd {}; {}/d8 ./run.js >> {} 2>&1'.format(assets_dir, self.device.binaries_directory, device_file)
def run(self, context):
self.logger.info('Starting d8 tests')
self.output = self.device.execute(self.command, timeout=self.run_timeout)
def update_result(self, context):
host_file = os.path.join(context.output_directory, 'octaned8.output')
device_file = self.device.path.join(self.device.working_directory, 'octaned8.output')
self.device.pull_file(device_file, host_file)
with open(os.path.join(host_file)) as octaned8_file:
for line in octaned8_file:
for label, regex in regex_map.iteritems():
match = regex.search(line)
if match:
context.result.add_metric(label, float(match.group(1)))
self.device.execute('rm {}'.format(device_file))
def finalize(self, context):
for f in self.executables:
self.device.uninstall_executable(f)
self.device.execute('rm {}'.format(self.device.path.join(self.device.working_directory, f)))
assets_dir = self.device.path.join(self.device.working_directory, 'assets')
self.device.execute('rm -rf {}'.format(assets_dir))
| 40.362069 | 121 | 0.650577 | 3,008 | 0.64246 | 0 | 0 | 0 | 0 | 0 | 0 | 1,882 | 0.401965 |
6879a284797d76bfbb26fa832ed9b1a3cdbb362e | 11,872 | py | Python | mrplot/modules/design/variable_design.py | enzofabricio/mrplot | 45e865241ea6ed7a4e524cbcbac54b75f2976696 | [
"MIT"
] | null | null | null | mrplot/modules/design/variable_design.py | enzofabricio/mrplot | 45e865241ea6ed7a4e524cbcbac54b75f2976696 | [
"MIT"
] | null | null | null | mrplot/modules/design/variable_design.py | enzofabricio/mrplot | 45e865241ea6ed7a4e524cbcbac54b75f2976696 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'variable_design.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.setWindowModality(QtCore.Qt.WindowModal)
MainWindow.resize(929, 461)
MainWindow.setMinimumSize(QtCore.QSize(929, 461))
MainWindow.setMaximumSize(QtCore.QSize(929, 461))
MainWindow.setStyleSheet("background-color: rgb(70, 70, 70);")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.x_variable_lineEdit = QtWidgets.QLineEdit(self.centralwidget)
self.x_variable_lineEdit.setGeometry(QtCore.QRect(10, 40, 651, 51))
font = QtGui.QFont()
font.setFamily("Courier")
font.setPointSize(10)
self.x_variable_lineEdit.setFont(font)
self.x_variable_lineEdit.setStyleSheet("color: rgb(255, 255, 255);\n"
"background-color: rgb(53, 53, 53);\n"
"\n"
"")
self.x_variable_lineEdit.setObjectName("x_variable_lineEdit")
self.newVariableButton = QtWidgets.QPushButton(self.centralwidget)
self.newVariableButton.setGeometry(QtCore.QRect(10, 380, 35, 35))
font = QtGui.QFont()
font.setPointSize(8)
self.newVariableButton.setFont(font)
self.newVariableButton.setStyleSheet("color: rgb(255, 255, 255);")
self.newVariableButton.setText("")
self.newVariableButton.setObjectName("newVariableButton")
self.variablesList_treeWidget = QtWidgets.QTreeWidget(self.centralwidget)
self.variablesList_treeWidget.setGeometry(QtCore.QRect(10, 190, 651, 181))
self.variablesList_treeWidget.setStyleSheet("color: rgb(1, 1, 1);\n"
"background-color: rgb(120, 120, 120);")
self.variablesList_treeWidget.setObjectName("variablesList_treeWidget")
self.variablesList_treeWidget.header().setDefaultSectionSize(120)
self.RemoveVar_Button = QtWidgets.QPushButton(self.centralwidget)
self.RemoveVar_Button.setGeometry(QtCore.QRect(50, 380, 35, 35))
self.RemoveVar_Button.setStyleSheet("color: rgb(255, 255, 255);")
self.RemoveVar_Button.setText("")
self.RemoveVar_Button.setObjectName("RemoveVar_Button")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(10, 10, 91, 16))
font = QtGui.QFont()
font.setPointSize(8)
self.label.setFont(font)
self.label.setStyleSheet("color: rgb(255, 255, 255);")
self.label.setObjectName("label")
self.x_expr_lineEdit = QtWidgets.QLineEdit(self.centralwidget)
self.x_expr_lineEdit.setGeometry(QtCore.QRect(110, 10, 551, 20))
font = QtGui.QFont()
font.setFamily("Courier")
self.x_expr_lineEdit.setFont(font)
self.x_expr_lineEdit.setStyleSheet("color: rgb(204, 204, 204);")
self.x_expr_lineEdit.setReadOnly(True)
self.x_expr_lineEdit.setObjectName("x_expr_lineEdit")
self.y_variable_lineEdit = QtWidgets.QLineEdit(self.centralwidget)
self.y_variable_lineEdit.setGeometry(QtCore.QRect(10, 130, 651, 51))
font = QtGui.QFont()
font.setFamily("Courier")
font.setPointSize(10)
self.y_variable_lineEdit.setFont(font)
self.y_variable_lineEdit.setStyleSheet("color: rgb(255, 255, 255);\n"
"background-color: rgb(53, 53, 53);\n"
"\n"
"")
self.y_variable_lineEdit.setObjectName("y_variable_lineEdit")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(10, 100, 91, 16))
font = QtGui.QFont()
font.setPointSize(8)
self.label_2.setFont(font)
self.label_2.setStyleSheet("color: rgb(255, 255, 255);")
self.label_2.setObjectName("label_2")
self.y_expr_lineEdit = QtWidgets.QLineEdit(self.centralwidget)
self.y_expr_lineEdit.setGeometry(QtCore.QRect(110, 100, 551, 20))
font = QtGui.QFont()
font.setFamily("Courier")
self.y_expr_lineEdit.setFont(font)
self.y_expr_lineEdit.setStyleSheet("color: rgb(204, 204, 204);")
self.y_expr_lineEdit.setReadOnly(True)
self.y_expr_lineEdit.setObjectName("y_expr_lineEdit")
self.apply_pushButton = QtWidgets.QPushButton(self.centralwidget)
self.apply_pushButton.setGeometry(QtCore.QRect(500, 380, 75, 35))
self.apply_pushButton.setStyleSheet("color: rgb(255, 255, 255);")
self.apply_pushButton.setObjectName("apply_pushButton")
self.cancel_pushButton = QtWidgets.QPushButton(self.centralwidget)
self.cancel_pushButton.setGeometry(QtCore.QRect(580, 380, 75, 35))
self.cancel_pushButton.setStyleSheet("color: rgb(255, 255, 255);")
self.cancel_pushButton.setObjectName("cancel_pushButton")
self.ok_btn = QtWidgets.QPushButton(self.centralwidget)
self.ok_btn.setGeometry(QtCore.QRect(420, 380, 75, 35))
self.ok_btn.setStyleSheet("color: rgb(255, 255, 255);")
self.ok_btn.setObjectName("ok_btn")
self.groupBox = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox.setGeometry(QtCore.QRect(670, 0, 251, 181))
self.groupBox.setStyleSheet("color: rgb(200, 200, 200);")
self.groupBox.setObjectName("groupBox")
self.grab_gbl_btn = QtWidgets.QPushButton(self.groupBox)
self.grab_gbl_btn.setGeometry(QtCore.QRect(90, 150, 75, 23))
self.grab_gbl_btn.setObjectName("grab_gbl_btn")
self.rm_gbl_btn = QtWidgets.QPushButton(self.groupBox)
self.rm_gbl_btn.setGeometry(QtCore.QRect(170, 150, 71, 23))
self.rm_gbl_btn.setObjectName("rm_gbl_btn")
self.global_treeview = QtWidgets.QTreeView(self.groupBox)
self.global_treeview.setGeometry(QtCore.QRect(10, 20, 231, 121))
self.global_treeview.setObjectName("global_treeview")
self.set_gbl_btn = QtWidgets.QPushButton(self.groupBox)
self.set_gbl_btn.setGeometry(QtCore.QRect(10, 150, 75, 23))
self.set_gbl_btn.setToolTip("")
self.set_gbl_btn.setObjectName("set_gbl_btn")
self.groupBox_2 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_2.setGeometry(QtCore.QRect(670, 190, 251, 221))
self.groupBox_2.setStyleSheet("color: rgb(200, 200, 200);")
self.groupBox_2.setObjectName("groupBox_2")
self.search_alf_le = QtWidgets.QLineEdit(self.groupBox_2)
self.search_alf_le.setGeometry(QtCore.QRect(10, 190, 110, 20))
self.search_alf_le.setStyleSheet("color: rgb(200, 200, 200);")
self.search_alf_le.setObjectName("search_alf_le")
self.search_num_le = QtWidgets.QLineEdit(self.groupBox_2)
self.search_num_le.setGeometry(QtCore.QRect(130, 190, 110, 20))
self.search_num_le.setStyleSheet("color: rgb(200, 200, 200);")
self.search_num_le.setObjectName("search_num_le")
self.num_view = QtWidgets.QListView(self.groupBox_2)
self.num_view.setGeometry(QtCore.QRect(130, 60, 110, 121))
self.num_view.setStyleSheet("color: rgb(150, 150, 150);\n"
"background-color: rgb(60, 60, 60);\n"
"")
self.num_view.setObjectName("num_view")
self.alf_view = QtWidgets.QListView(self.groupBox_2)
self.alf_view.setGeometry(QtCore.QRect(10, 60, 110, 121))
self.alf_view.setStyleSheet("color: rgb(150, 150, 150);\n"
"background-color: rgb(60, 60, 60);\n"
"")
self.alf_view.setObjectName("alf_view")
self.alf_label_2 = QtWidgets.QLabel(self.groupBox_2)
self.alf_label_2.setGeometry(QtCore.QRect(140, 40, 51, 16))
font = QtGui.QFont()
font.setPointSize(8)
self.alf_label_2.setFont(font)
self.alf_label_2.setStyleSheet("color: rgb(255, 255, 255);")
self.alf_label_2.setObjectName("alf_label_2")
self.alf_label = QtWidgets.QLabel(self.groupBox_2)
self.alf_label.setGeometry(QtCore.QRect(10, 40, 51, 16))
font = QtGui.QFont()
font.setPointSize(8)
self.alf_label.setFont(font)
self.alf_label.setStyleSheet("color: rgb(255, 255, 255);")
self.alf_label.setObjectName("alf_label")
self.add2Y_checkBox = QtWidgets.QCheckBox(self.groupBox_2)
self.add2Y_checkBox.setGeometry(QtCore.QRect(90, 20, 61, 17))
self.add2Y_checkBox.setStyleSheet("color: rgb(255, 255, 255);")
self.add2Y_checkBox.setObjectName("add2Y_checkBox")
self.add2XcheckBox = QtWidgets.QCheckBox(self.groupBox_2)
self.add2XcheckBox.setGeometry(QtCore.QRect(10, 20, 70, 17))
self.add2XcheckBox.setStyleSheet("color: rgb(255, 255, 255);")
self.add2XcheckBox.setObjectName("add2XcheckBox")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 929, 21))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Variables"))
self.variablesList_treeWidget.headerItem().setText(0, _translate("MainWindow", "Variable Name"))
self.variablesList_treeWidget.headerItem().setText(1, _translate("MainWindow", "Label"))
self.variablesList_treeWidget.headerItem().setText(2, _translate("MainWindow", "x"))
self.variablesList_treeWidget.headerItem().setText(3, _translate("MainWindow", "y"))
self.variablesList_treeWidget.headerItem().setText(4, _translate("MainWindow", "Restart"))
self.variablesList_treeWidget.headerItem().setText(5, _translate("MainWindow", "csv"))
self.variablesList_treeWidget.headerItem().setText(6, _translate("MainWindow", "Plot"))
self.label.setText(_translate("MainWindow", "Current xVariable"))
self.label_2.setText(_translate("MainWindow", "Current yVariable"))
self.apply_pushButton.setText(_translate("MainWindow", "Apply"))
self.cancel_pushButton.setText(_translate("MainWindow", "Cancel"))
self.ok_btn.setText(_translate("MainWindow", "Ok"))
self.groupBox.setTitle(_translate("MainWindow", "Global expressions"))
self.grab_gbl_btn.setText(_translate("MainWindow", "grab"))
self.rm_gbl_btn.setText(_translate("MainWindow", "remove"))
self.set_gbl_btn.setText(_translate("MainWindow", "set"))
self.groupBox_2.setTitle(_translate("MainWindow", "Plot_alf | Plot_num"))
self.search_alf_le.setPlaceholderText(_translate("MainWindow", "find plot_alfa"))
self.search_num_le.setPlaceholderText(_translate("MainWindow", "find plot_num"))
self.alf_label_2.setText(_translate("MainWindow", "plot_num"))
self.alf_label.setText(_translate("MainWindow", "plot_alf"))
self.add2Y_checkBox.setText(_translate("MainWindow", "Add to Y"))
self.add2XcheckBox.setText(_translate("MainWindow", "Add to X"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| 54.962963 | 105 | 0.685057 | 11,372 | 0.957884 | 0 | 0 | 0 | 0 | 0 | 0 | 2,082 | 0.175371 |
687a1f3c513c197ca73d029ec4ca40557a75d7df | 2,597 | py | Python | 4-2/TextMining/Class_/week7/LanguageModelWithN_gram.py | define16/Class | 8b0771a348b2bcb19ba338ebff94326828a293ea | [
"Apache-2.0"
] | null | null | null | 4-2/TextMining/Class_/week7/LanguageModelWithN_gram.py | define16/Class | 8b0771a348b2bcb19ba338ebff94326828a293ea | [
"Apache-2.0"
] | null | null | null | 4-2/TextMining/Class_/week7/LanguageModelWithN_gram.py | define16/Class | 8b0771a348b2bcb19ba338ebff94326828a293ea | [
"Apache-2.0"
] | null | null | null | '''
n-gram은 이와 같은 확률적 언어 모델의 대표적인 것으로서,
n개 단어의 연쇄를 확률적으로 표현해 두면 실제로
발성된 문장의 기록을 계산할 수 있다.
'''
# Step 1 : Bag of Words
from nltk.corpus import reuters
from collections import Counter, defaultdict
counts = Counter(reuters.words())
total_count = len(reuters.words())
# 공통적으로 가장 많이 나타나는 20개의 단어
print(counts.most_common(n=20))
# 빈도 비율 계산
for word in counts:
counts[word] /= float(total_count)
# 빈도 비율의 총합 계산
print(sum(counts.values()))
import random
# 100개의 단어 생성
text = []
for _ in range(100) :
r = random.random()
accumulator = 0
for word, freq in counts.items():
accumulator += freq
if accumulator >= r:
text.append(word)
break
print(" ".join(text))
# 텍스트의 확률 계산
from operator import mul
from functools import reduce
print(reduce(mul, [counts[w] for w in text], 1.0))
# Step 2 : Bi-gram & Tri-gram
from nltk import bigrams, trigrams
first_sentence = reuters.sents()[0]
print(first_sentence)
### Bi-gram 결과
print("\n----- Bi-gram 결과 확 확인인 -----")
print(list(bigrams(first_sentence)))
### Bi-gram 결과
print("\n----- 패딩된 Bi-gram 결과 확인 -----")
print((list(bigrams(first_sentence, pad_left=True, pad_right=True))))
### Bi-gram 결과
print("\n----- Tri-gram 결과 확인 -----")
print(list(trigrams(first_sentence)))
### Bi-gram 결과
print("\n----- 패딩된 Tri-gram 결과 확인 -----")
print((list(trigrams(first_sentence, pad_left=True, pad_right=True))))
### Reuters 데이터(말뭉치)를 이용한 Tri-gram 모델 생성
print("\n----- Tri-gram 모델 생성 -----")
model = defaultdict(lambda : defaultdict(lambda : 0))
for sentence in reuters.sents() :
print("\n문장 : ", sentence)
for w1, w2, w3 in trigrams(sentence, pad_right=True, pad_left=True):
model[(w1,w2)][w3] += 1
# 'what the' 다음에 'economists'가 나오는 것이 2개 존재
print(model['what','the']['economists'])
for w1_w2 in model:
total_count = float(sum(model[w1_w2].values()))
for w3 in model[w1_w2]:
model[w1_w2][w3] /= total_count
print(model['what', 'the']['economists'])
### Language Model을 이용해 텍스트 생성하기
print("\n----- 언어 모델을 이용해 텍스트 생성하기 -----")
import random
text = [None, None]
prob = 1.0
sentence_finished = False
while not sentence_finished:
r = random.random()
accumulator = .0
for word in model[tuple(text[-2:])].keys():
accumulator += model[tuple(text[-2:])][word]
if accumulator >= r:
prob *= model[tuple(text[-2:])][word]
text.append(word)
print("aa : " ,text)
break
if text[-2:] == [None, None]:
sentence_finished = True
print("텍스트의 확률 : ", prob)
print(" ".join([t for t in text if t])) | 23.609091 | 72 | 0.628032 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,064 | 0.355496 |
687be2e6d6e71a7b227db867acdd0430e68a8454 | 1,977 | py | Python | testing/composite.py | aheadley/pynemap | f2aa373e7c43755ce73c0a60110b609018cd2dcb | [
"MIT"
] | 6 | 2015-10-15T08:28:19.000Z | 2021-11-17T23:02:02.000Z | testing/composite.py | aheadley/pynemap | f2aa373e7c43755ce73c0a60110b609018cd2dcb | [
"MIT"
] | null | null | null | testing/composite.py | aheadley/pynemap | f2aa373e7c43755ce73c0a60110b609018cd2dcb | [
"MIT"
] | null | null | null | #!/usr/bin/python
import numpy
class Color(object):
lower_bound = 0
upper_bound = 255
def __init__(self, r, g, b, a):
self.r = r
self.g = g
self.b = b
self.a = a
def __str__(self):
return '(%s, %s, %s, %s)' % (self.r, self.g, self.b, self.a)
def composite_pixels(src, dest):
new_color = Color(0,0,0,0)
new_color.r = src.a * src.r * dest.a + src.r * (1 - dest.a) + dest.r * dest.a * (1 - src.a)
new_color.g = src.a * src.g * dest.a + src.g * (1 - dest.a) + dest.g * dest.a * (1 - src.a)
new_color.b = src.a * src.b * dest.a + src.b * (1 - dest.a) + dest.b * dest.a * (1 - src.a)
new_color.a = src.a * dest.a + src.a * (1 - dest.a) + dest.a * (1 - src.a)
return new_color
def simple_composite_pixels(src, dest):
return Color(
(src.r * src.a)/255 + ((dest.r * dest.a) * (255 - src.a))/255**2,
(src.g * src.a)/255 + ((dest.g * dest.a) * (255 - src.a))/255**2,
(src.b * src.a)/255 + ((dest.b * dest.a) * (255 - src.a))/255**2,
src.a + dest.a - (src.a * dest.a)/255,
)
def overlay_pixel(src, dest):
a = numpy.array([
(src[0] * src[3]) / 255 + ((dest[0] * dest[3]) * (255 - src[3])) / 255 ** 2,
(src[1] * src[3]) / 255 + ((dest[1] * dest[3]) * (255 - src[3])) / 255 ** 2,
(src[2] * src[3]) / 255 + ((dest[2] * dest[3]) * (255 - src[3])) / 255 ** 2,
src[3] + dest[3] - (src[3] * dest[3]) / 255,
], dtype=numpy.uint8)
print [
(src[0] * src[3]) / 255 + ((dest[0] * dest[3]) * (255 - src[3])) / 255 ** 2,
(src[1] * src[3]) / 255 + ((dest[1] * dest[3]) * (255 - src[3])) / 255 ** 2,
(src[2] * src[3]) / 255 + ((dest[2] * dest[3]) * (255 - src[3])) / 255 ** 2,
src[3] + dest[3] - (src[3] * dest[3]) / 255,
]
print src, dest, a
return a
if __name__ == '__main__':
a = (120,120,120,255)
b = (117,176,73,255)
c = (134,96,67,255)
d = (38,92,255,100)
e = (0,0,0,0)
print overlay_pixel(a,e)
print (a[0] * a[3]) / 255 + ((e[0] * e[3]) * (255 - a[3])) / 255 ** 2 | 35.303571 | 92 | 0.494689 | 224 | 0.113303 | 0 | 0 | 0 | 0 | 0 | 0 | 45 | 0.022762 |
687d1421e017adb345f125a4ddfc5d44f177f7ca | 10,139 | py | Python | util.py | tudat-team/webservices-dispatch-action | 6ed60ab2046e8541c8e09e5a94bedc9673dcc4f9 | [
"MIT"
] | null | null | null | util.py | tudat-team/webservices-dispatch-action | 6ed60ab2046e8541c8e09e5a94bedc9673dcc4f9 | [
"MIT"
] | null | null | null | util.py | tudat-team/webservices-dispatch-action | 6ed60ab2046e8541c8e09e5a94bedc9673dcc4f9 | [
"MIT"
] | null | null | null | import os
import requests
import urllib3.util.retry
import logging
import subprocess
import re
import pprint
from datetime import datetime, timedelta
import pygit2
from github import Github
from github.GithubException import UnknownObjectException
# Create logger with logging level set to all
LOGGER = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def get_project_and_feedstock_repos(github_client, repo_name):
"""
Get the project and feedstock repos from the repo name.
Parameters
----------
github_client : github.MainClass.Github
The Github client.
repo_name : str
The repo name.
Returns
-------
project_repo : github.Repository.Repository
The project repository.
feedstock_repo : github.Repository.Repository
The feedstock repository.
feedstock_repo_name : str
The feedstock repository name.
"""
feedstock_repo_name = repo_name + "-feedstock"
project_repo = github_client.get_repo(repo_name)
# Get feedstock repository
try:
feedstock_repo = github_client.get_repo(feedstock_repo_name)
# If feedstock repo does not exist, log an error and exit
except UnknownObjectException:
LOGGER.error(
"repository_dispatch event: feedstock repository of '%s' not found" % repo_name)
return None, None, None
return project_repo, feedstock_repo, feedstock_repo_name
def get_project_version(repo_dir, VERSION_PEP440=re.compile(r'(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)(\.(?P<release>[a-z]+)(?P<dev>\d+))?')):
"""
Get the project version from the version file in a given repository.
Parameters
----------
repo_dir : str
The path to the repo directory.
VERSION_PEP440 : re.compile
The regex to match the version.
Returns
-------
str
The project version.
"""
# Get version from version file in project repo
with open(os.path.join(repo_dir, "version"), 'r') as fp:
version = fp.read().rstrip("\n")
# Match version from file to pep440 and retrieve groups
match = VERSION_PEP440.match(version)
if match:
major = match.group("major")
minor = match.group("minor")
patch = match.group("patch")
release = match.group("release")
dev = match.group("dev")
if release:
version = f"{major}.{minor}.{patch}.{release}{dev}"
else:
version = f"{major}.{minor}.{patch}"
LOGGER.info("version: %s", version)
LOGGER.info("major: %s", major)
LOGGER.info("minor: %s", minor)
LOGGER.info("patch: %s", patch)
LOGGER.info("release: %s", release)
LOGGER.info("dev: %s", dev)
return version
else:
LOGGER.error(
"repository_dispatch event: could not parse version")
return None
def get_commit_tags(repo, commit_hash, supported_tags=["ci", "rerender"]):
"""
Get the tags of a commit.
Parameters
----------
repo : github.Repository.Repository
The repository.
commit_hash : str
The commit hash.
supported_tags : list[str]
List of tags that are to be searched for.
Returns
-------
tags : dict[str, bool]
Dictionary of tags found, with supported tags as keys and the value denoting wether they were found.
commit_message : str
New commit message cleaned of the tag in brackets, but with the tag in front instead.
"""
# Get commit from its sha
commit = repo.get_commit(sha=commit_hash)
message = commit.raw_data["commit"]["message"]
# Extract commit tag if there is one
tag = re.search(r'\[(.*?)\]', message)
if tag:
tag = tag.group(1)
LOGGER.info("tag: %s", tag)
if tag.lower() in supported_tags:
# Remove tag from message, and add it in front
commit_message = message.replace(f'[{tag}]', "")
# Clean excess spaces
commit_message = re.sub(r'\s+', " ", commit_message).strip()
# Add tag in front of commit message
commit_message = "%s: %s" % (tag.upper(), commit_message)
# Return True for the tag that was found
return {possible_tag: tag.lower() == possible_tag for possible_tag in supported_tags}, commit_message
else:
# Quit if the tag is not in the list of supported ones
LOGGER.info(
"no supported tag detected (was '%s', supported are %s" % (tag, supported_tags)
)
return {possible_tag: False for possible_tag in supported_tags}, None
else:
# Quit if there is not tag
LOGGER.info("no tag detected")
return {possible_tag: False for possible_tag in supported_tags}, None
def was_branch_last_commit_recent(repo, branch_name, time_treshold=timedelta(hours=24)):
"""
Check if the last commit of a branch is recent.
Parameters
----------
repo : github.Repository.Repository
The repository.
branch_name : str
The branch name.
time_treshold : datetime.timedelta
The time threshold under which the last commit will be considered recent.
Returns
-------
bool
True if the last commit is recent, False otherwise.
"""
# Get info of latest commit for given branch
branch = repo.get_branch(branch_name)
date_string = branch.commit.raw_data["commit"]["author"]["date"]
last_commit_time = datetime.strptime(date_string, "%Y-%m-%dT%H:%M:%SZ")
# Trigger release if last commit time was less than some time ago
if last_commit_time > datetime.now() - time_treshold:
LOGGER.info(
"since is within specified time, nightly release will follow")
return True
return False
def push_all_to_github(repo, branch_name, directory, commit_message):
"""
Push all files in a directory to a github repository.
Parameters
----------
repo : github.Repository.Repository
The repository.
branch_name : str
The branch name.
directory : str
The directory to push.
commit_message : str
The commit message.
"""
# Add all files
subprocess.run(["git", "add", "."], cwd=directory)
# Commit with proper commit message
subprocess.run(["git", "commit", "-m", commit_message], cwd=directory)
# Get url to push to
repo_auth_url = "https://%s@github.com/%s.git" % (os.environ["GH_TOKEN"], repo)
# Push changes and tags
subprocess.run(["git", "push", "--all", "-f", repo_auth_url], cwd=directory)
subprocess.run(["git", "push", repo_auth_url, branch_name, "--tags"], cwd=directory)
def create_api_sessions(github_token):
"""Create API sessions for GitHub.
Parameters
----------
github_token : str
The GitHub access token.
Returns
-------
session : requests.Session
A `requests` session w/ the beta `check_run` API configured.
gh : github.MainClass.Github
A `Github` object from the PyGithub package.
"""
# based on
# https://alexwlchan.net/2019/03/
# creating-a-github-action-to-auto-merge-pull-requests/
# with lots of edits
sess = requests.Session()
sess.headers = {
"Accept": "; ".join([
"application/vnd.github.v3+json",
# special beta api for check_suites endpoint
"application/vnd.github.antiope-preview+json",
]),
"Authorization": f"token {github_token}",
"User-Agent": f"GitHub Actions script in {__file__}"
}
def raise_for_status(resp, *args, **kwargs):
try:
resp.raise_for_status()
except Exception as e:
print('ERROR:', resp.text)
raise e
sess.hooks["response"].append(raise_for_status)
# build a github object too
gh = Github(
github_token,
retry=urllib3.util.retry.Retry(total=10, backoff_factor=0.1))
return sess, gh
def clone_repo(clone_url, clone_path, branch, auth_token):
# Use pygit2 to clone the repo to disk
# if using github app pem key token, use x-access-token like below
# if you were using a personal access token, use auth_method = 'x-oauth-basic' AND reverse the auth_method and token parameters
auth_method = 'x-access-token'
callbacks = pygit2.RemoteCallbacks(pygit2.UserPass(auth_method, auth_token))
pygit2_repo = pygit2.clone_repository(clone_url, clone_path,
callbacks=callbacks)
pygit2_branch = pygit2_repo.branches['origin/' + branch]
pygit2_ref = pygit2_repo.lookup_reference(pygit2_branch.name)
pygit2_repo.checkout(pygit2_ref)
# Checkout correct branch
subprocess.run(["git", "checkout", branch], cwd=clone_path)
return pygit2_repo, pygit2_ref
def get_var_values(var_retrieve, root=''):
ret = {}
for var, file, regex in var_retrieve:
with open(os.path.join(root, file), 'r') as f:
s = f.read()
m = regex.search(s)
v = m.group(1)
ret[var] = v
return ret
def update_var_values(var_retrieved, version_tag, git_rev=None, root=''):
ret = {}
git_rev = version_tag if git_rev is None else git_rev
for k, v in var_retrieved.items():
if k == 'build':
if var_retrieved['version'] == version_tag:
# NOTE(Geoffrey): we are only bumping build number at the moment.
v = int(v) + 1
else:
v = 0
elif k == 'git_rev':
v = git_rev
elif k == 'version':
v = version_tag
ret[k] = v
return ret
def substitute_vars_in_file(vars_substitute, directory):
# Substitute variables in files
for file, regex, subst, val in vars_substitute:
path = os.path.join(directory, file)
# Read file
with open(path, "r") as f:
s = f.read()
# Substitute
s = regex.sub(subst.replace("{}", str(val)), s)
# Write file
with open(path, "w") as f:
f.write(s) | 34.604096 | 148 | 0.624223 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,745 | 0.467995 |
687da6b0eb4b67490262b063fd83e99351f1851c | 1,538 | py | Python | ptxt/journal/models.py | mvasilkov/scrapheap | 53e30b88879ab8e4d80867b0ec7fa631ce46e55e | [
"MIT"
] | 2 | 2021-11-29T13:51:27.000Z | 2021-12-12T14:59:42.000Z | ptxt/journal/models.py | mvasilkov/scrapheap | 53e30b88879ab8e4d80867b0ec7fa631ce46e55e | [
"MIT"
] | null | null | null | ptxt/journal/models.py | mvasilkov/scrapheap | 53e30b88879ab8e4d80867b0ec7fa631ce46e55e | [
"MIT"
] | null | null | null | from django.core.validators import MinLengthValidator
from django.contrib.auth.models import User
from django.contrib.auth.validators import UnicodeUsernameValidator
from django.db import models
from django.db.models.signals import pre_save
from django.dispatch import receiver
from django.utils.encoding import iri_to_uri
from mongo.objectid import ObjectId
from mur.commonmark import commonmark
def _objectid():
return str(ObjectId())
class Post(models.Model):
path_validators = [MinLengthValidator(6), UnicodeUsernameValidator()]
objectid = models.CharField(max_length=24, default=_objectid, editable=False, unique=True)
user = models.ForeignKey(User, on_delete=models.PROTECT, related_name='posts')
path = models.CharField(max_length=127, validators=path_validators)
contents = models.TextField()
contents_html = models.TextField(default='', editable=False)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
def get_absolute_url(self):
# The string returned from get_absolute_url() must contain only ASCII characters.
return iri_to_uri(f'/{self.user}/{self.path}/')
def __str__(self):
return f'Post({self.user}/{self.path})'
class Meta:
unique_together = ('user', 'path')
@receiver(pre_save, sender=Post)
def update_html(sender, instance, update_fields, **kwargs):
if update_fields and 'contents' not in update_fields:
return
instance.contents_html = commonmark(instance.contents)
| 34.954545 | 94 | 0.755527 | 864 | 0.561769 | 0 | 0 | 224 | 0.145644 | 0 | 0 | 172 | 0.111834 |
687e76240c6372225f7e38483f2cc12aea5247cb | 611 | py | Python | autocomplete/survey.py | converj/reasonSurvey | dd40784da4b07cfeb9fd873bab820d627cb026c1 | [
"Apache-2.0"
] | null | null | null | autocomplete/survey.py | converj/reasonSurvey | dd40784da4b07cfeb9fd873bab820d627cb026c1 | [
"Apache-2.0"
] | null | null | null | autocomplete/survey.py | converj/reasonSurvey | dd40784da4b07cfeb9fd873bab820d627cb026c1 | [
"Apache-2.0"
] | null | null | null | # Import external modules.
from google.appengine.ext import ndb
import logging
# Import local modules.
from configAutocomplete import const as conf
from constants import Constants
class Survey(ndb.Model):
surveyId = ndb.StringProperty() # Primary key
title = ndb.StringProperty()
introduction = ndb.StringProperty()
creator = ndb.StringProperty()
allowEdit = ndb.BooleanProperty()
freezeUserInput = ndb.BooleanProperty( default=False )
hideReasons = ndb.BooleanProperty( default=False ) # Experimental option
questionIds = ndb.StringProperty( repeated=True ) # Ordered
| 27.772727 | 77 | 0.747954 | 425 | 0.695581 | 0 | 0 | 0 | 0 | 0 | 0 | 92 | 0.150573 |
688059ffe96ee1e3cfd3aaf09375c53be6410286 | 50 | py | Python | app/__init__.py | SLB974/GrandPyBot-dev | 7a0268d4ffa58c37eed37253c6afb00874dbabe4 | [
"MIT"
] | null | null | null | app/__init__.py | SLB974/GrandPyBot-dev | 7a0268d4ffa58c37eed37253c6afb00874dbabe4 | [
"MIT"
] | null | null | null | app/__init__.py | SLB974/GrandPyBot-dev | 7a0268d4ffa58c37eed37253c6afb00874dbabe4 | [
"MIT"
] | null | null | null | from flask import Flask
from app.views import app
| 16.666667 | 25 | 0.82 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
6880d63fbc74e65ed6cf0cc2590cb8b33e1d322f | 394 | py | Python | wavenetlike/examples/analyzer_example.py | redwrasse/wavenetlike | ce15d58eafe03437ee485532ea3b43580e1895a9 | [
"MIT"
] | null | null | null | wavenetlike/examples/analyzer_example.py | redwrasse/wavenetlike | ce15d58eafe03437ee485532ea3b43580e1895a9 | [
"MIT"
] | 17 | 2020-12-10T19:50:39.000Z | 2021-07-29T03:19:42.000Z | wavenetlike/examples/analyzer_example.py | redwrasse/wavenetlike | ce15d58eafe03437ee485532ea3b43580e1895a9 | [
"MIT"
] | null | null | null | from wavenetlike.analyzers import DatasetAnalyzer
from wavenetlike.datasetid import TorchAudioDataSetId
def analyzer_example():
dataset = TorchAudioDataSetId("SPEECHCOMMANDS")
data_analyzer = DatasetAnalyzer(dataset)
data_analyzer.analyze_dataset()
analysis_res = data_analyzer.get_analysis_result()
print(analysis_res)
if __name__ == "__main__":
analyzer_example()
| 26.266667 | 54 | 0.78934 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.06599 |
688122f978cd09ce576380b1318b3268f2968ae6 | 271 | py | Python | python/cuspatial/cuspatial/utils/traj_utils.py | kkraus14/cuspatial | efe4c6de76c7e4f4888fb27425f3cfb42493a5ac | [
"Apache-2.0"
] | 2 | 2020-01-30T15:10:05.000Z | 2020-07-13T04:16:42.000Z | python/cuspatial/cuspatial/utils/traj_utils.py | kkraus14/cuspatial | efe4c6de76c7e4f4888fb27425f3cfb42493a5ac | [
"Apache-2.0"
] | null | null | null | python/cuspatial/cuspatial/utils/traj_utils.py | kkraus14/cuspatial | efe4c6de76c7e4f4888fb27425f3cfb42493a5ac | [
"Apache-2.0"
] | 1 | 2021-02-22T08:31:00.000Z | 2021-02-22T08:31:00.000Z | def get_ts_struct(ts):
y=ts&0x3f
ts=ts>>6
m=ts&0xf
ts=ts>>4
d=ts&0x1f
ts=ts>>5
hh=ts&0x1f
ts=ts>>5
mm=ts&0x3f
ts=ts>>6
ss=ts&0x3f
ts=ts>>6
wd=ts&0x8
ts=ts>>3
yd=ts&0x1ff
ts=ts>>9
ms=ts&0x3ff
ts=ts>>10
pid=ts&0x3ff
return y,m,d,hh,mm,ss,wd,yd,ms,pid
| 11.291667 | 35 | 0.616236 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |