hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7950ccf0237b0c5403a45b351cd7e235a59d5cd8
| 1,356
|
py
|
Python
|
stl_dsa/users/tests/test_views.py
|
renodubois/site
|
028caa79cbb6d116aeb57aaf12a693cda6382072
|
[
"MIT"
] | null | null | null |
stl_dsa/users/tests/test_views.py
|
renodubois/site
|
028caa79cbb6d116aeb57aaf12a693cda6382072
|
[
"MIT"
] | null | null | null |
stl_dsa/users/tests/test_views.py
|
renodubois/site
|
028caa79cbb6d116aeb57aaf12a693cda6382072
|
[
"MIT"
] | null | null | null |
import pytest
from stl_dsa.users.models import User
from stl_dsa.users.views import UserRedirectView, UserUpdateView
pytestmark = pytest.mark.django_db
class TestUserUpdateView:
"""
TODO:
extracting view initialization code as class-scoped fixture
would be great if only pytest-django supported non-function-scoped
fixture db access -- this is a work-in-progress for now:
https://github.com/pytest-dev/pytest-django/pull/258
"""
def test_get_success_url(self, user: User, rf):
view = UserUpdateView()
request = rf.get("/fake-url/")
request.user = user
view.request = request
assert view.get_success_url() == f"/users/{user.id}/"
def test_get_user_object(self, user: User, rf):
view = UserUpdateView()
request = rf.get("/fake-url/")
request.user = user
view.request = request
view_user = view.get_user_object()
assert view_user == user
def test_membership_status_returned(self, client):
response = client.get("/user")
class TestUserRedirectView:
def test_get_redirect_url(self, user: User, rf):
view = UserRedirectView()
request = rf.get("/fake-url")
request.user = user
view.request = request
assert view.get_redirect_url() == f"/users/{user.id}/"
| 28.851064
| 74
| 0.652655
|
7950cd2c66eb54c7c1ac32dc6ed3e18f07417dcc
| 10,250
|
py
|
Python
|
ikbtleaves/sub_transform.py
|
uw-biorobotics/IKBT
|
be1923b441e5bac6662baf64b186cd69f7e31e31
|
[
"BSD-3-Clause"
] | 129
|
2017-11-17T15:59:31.000Z
|
2022-03-19T14:37:56.000Z
|
ikbtleaves/sub_transform.py
|
uw-biorobotics/IKBT
|
be1923b441e5bac6662baf64b186cd69f7e31e31
|
[
"BSD-3-Clause"
] | 36
|
2018-03-07T01:18:45.000Z
|
2021-11-17T02:59:05.000Z
|
ikbtleaves/sub_transform.py
|
uw-biorobotics/IKBT
|
be1923b441e5bac6662baf64b186cd69f7e31e31
|
[
"BSD-3-Clause"
] | 33
|
2017-09-22T22:42:37.000Z
|
2022-03-16T22:52:07.000Z
|
#!/usr/bin/python
#
# Implement a transform in which we identify
# RHS elements which can be substituted into
# another RHS to elminiate unknowns.
#
# This is a new approach rather than making it a SOLVER
# it is just a transform which allows other solvers to work.
#
# Copyright 2017 University of Washington
# Developed by Dianmu Zhang and Blake Hannaford
# BioRobotics Lab, University of Washington
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sympy as sp
import numpy as np
from sys import exit
from ikbtfunctions.helperfunctions import *
from ikbtbasics.kin_cl import *
from ikbtbasics.ik_classes import * # special classes for Inverse kinematics in sympy
import b3 as b3 # behavior trees
class test_sub_transform(b3.Action): # tester for your ID
def tick(self, tick):
#test_number = tick.blackboard.get('test_number') # if present
R = tick.blackboard.get('Robot')
sp.var('a b c d e f g')
# set up bb data for testing
Td = ik_lhs()
Ts = sp.zeros(4)
Ts[1,1] = sp.sin(th_1)*sp.cos(th_2)+sp.sin(th_5)
Ts[1,2] = sp.sin(th_1)*sp.cos(th_2)
Ts[2,1] = a+b+c+d
Ts[2,2] = a+b+c # for using sum of angles identities
Ts[2,3] = a*b+c
Ts[2,0] = a
Ts[0,0] = e+f+g
Ts[0,1] = sp.sin(e+f+g)
testm = matrix_equation(Td,Ts)
ua = unknown(a)
ub = unknown(b)
uc = unknown(c)
ud = unknown(d)
ue = unknown(e)
uf = unknown(f)
ug = unknown(g)
uth2 = unknown(th_2)
uth3 = unknown(th_3)
uth4 = unknown(th_4)
uth5 = unknown(th_5)
variables = [ua,ub,uc,ud,ue,uf,ug,uth2, uth3, uth4, uth5]
R.mequation_list = [testm]
[L1, L2] = R.scan_Mequation(testm, variables) # lists of 1unk and 2unk equations
print(' INITIAL Ts:')
Tm = R.mequation_list[0] # for a single test as above
sp.pprint(Tm.Ts)
print('')
tick.blackboard.set('eqns_1u', L1)
tick.blackboard.set('eqns_2u', L2)
tick.blackboard.set('unknowns',variables)
tick.blackboard.set('Robot',R)
return b3.SUCCESS
class sub_transform(b3.Action): # action leaf for
def tick(self, tick):
unknowns = tick.blackboard.get('unknowns') # the current list of unknowns
R = tick.blackboard.get('Robot') # the current robot instance
if(self.BHdebug):
print("running: ", self.Name)
print('number of matrix equations: ', len(R.mequation_list))
print('first matrix equation: ', R.mequation_list[0])
print('number of input equations: ', len(R.mequation_list[0].get_kequation_list()))
print("unknowns:")
for u in unknowns:
print(u.symbol, ', solved: ',u.solved)
print('')
# We're going to look at the first N equations in the mequation_list
N = 1 # were only looking at first few (1 or 2) matrix equations (at least for now)
assert (N <= len(R.mequation_list)), 'sub_transform test wants too many meqns '
# identify elements of eqns where another element can be substituted in
# to eliminate unknowns
#
found = False
sp.var('a z')
z = a-a # (define symbolic zero!)
cols = [0,1,2,3]
rows = [0,1,2] # we don't care about row 4 ([0,0,0,1])!
for m in range(0,N):
for i in rows:
for j in cols:
e2 = R.mequation_list[m].Ts[i,j]
for k in rows:
for l in cols:
e1 = R.mequation_list[m].Ts[k,l]
# substitute with e1 or -e1 ####################################3 ******* adapt ".has" to both LHS and RHS??
if((e1 != e2) and e2 != z and e2.has(e1)): # we found a substitution
if(self.BHdebug):
print('')
print(self.Name, ' found a sub transform (+)')
print(e1, ' / ', e2)
print('new: ', e2, ' = ', e2.subs(e1, e2) )
nold = count_unknowns(unknowns, e2)
new = e2.subs(e1, R.mequation_list[m].Td[k,l]) # substitute
nnew = count_unknowns(unknowns, new)
if(self.BHdebug):
print('Unknowns: old/new:', nold, '/', nnew)
print('Prop Sub: ', e2, '/', new)
if(nnew < nold):
R.mequation_list[m].Ts[i,j] = new
found = True
elif((e1 != e2) and e2 != z and e2.has(-e1)): # we found a substitution -e1
if(self.BHdebug):
print(self.Name, ' found a (-) sub transform')
print(e1, '/', e2)
nold = count_unknowns(unknowns, e2)
new = e2.subs(-e1, -R.mequation_list[m].Td[k,l]) # substitute with -e1
nnew = count_unknowns(unknowns, new)
if(self.BHdebug):
print('Unknowns: old/new:', nold, '/', nnew)
print('Prop Sub: ', e2, '/', new)
if(nnew < nold): # only do this to *reduce* # of unknowns!
R.mequation_list[m].Ts[i,j] = new
found = True
if found:
# put the tmp_eqns list back into R !!!! ******************************
[L1, L2, L3p] = R.scan_for_equations(unknowns)
tick.blackboard.set('eqns_1u', L1)
tick.blackboard.set('eqns_2u', L2)
tick.blackboard.set('eqns_3pu', L3p)
tick.blackboard.set('Robot', R)
return b3.SUCCESS
#else:
#return b3.FAILURE
#class test_sincos_solve(b3.Action): # tester for sincos solver
#def tick(self, tick):
## set up bb data for testing sincos_solve
#####################################################################################
# Test code below. See sincos_solver.py for example
#
class TestSolver006(unittest.TestCase): # change TEMPLATE to unique name (2 places)
def setUp(self):
self.DB = False # debug flag
print('=============== Test sub_transform.py =====================')
return
def runTest(self):
self.test_subber()
def test_subber(self):
sub_tester = b3.BehaviorTree()
bb = b3.Blackboard()
bb.set('Robot', Robot())
setup = test_sub_transform()
trans = sub_transform()
trans.Name = 'Substitution Transf'
trans.BHdebug = True
test = b3.Sequence([setup, trans])
sub_tester.root = test
sub_tester.tick("Test the substitution test tree", bb)
# now examine results
R = bb.get('Robot')
Tm = R.mequation_list[0] # for a single test as above
sp.var('a b c d r_11 r_23 r_31 r_33 r_43 ')
fs = " sub_transform FAIL"
self.assertTrue(Tm.Ts[1,1]== r_23+sp.sin(th_5), fs)
self.assertTrue(Tm.Ts[1,2]== sp.sin(th_1)*sp.cos(th_2), fs)
self.assertTrue(Tm.Ts[2,1]== d+r_33, fs)
self.assertTrue(Tm.Ts[2,3]== b*r_31+c, fs)
self.assertTrue(Tm.Ts[2,0]==a, fs)
self.assertTrue(Tm.Ts[0,1]==sp.sin(r_11), fs)
print('\n\n Passed 6 assertions\n\n')
#
# Can run your test from command line by invoking this file
#
# - or - call your TestSolverTEMPLATE() from elsewhere
#
def run_test():
print('\n\n=============== Test sub_transform nodes=====================')
testsuite = unittest.TestLoader().loadTestsFromTestCase(TestSolver006) # replace TEMPLATE
unittest.TextTestRunner(verbosity=2).run(testsuite)
if __name__ == "__main__":
print('\n\n=============== Test sub_transform nodes=====================')
testsuite = unittest.TestLoader().loadTestsFromTestCase(TestSolver006) # replace TEMPLATE
unittest.TextTestRunner(verbosity=2).run(testsuite)
| 44.372294
| 757
| 0.532878
|
7950cd9b5c0c000e6cc2c25a267c3e4e6a56b4e3
| 14,259
|
py
|
Python
|
Old-Bots/TAG/main.py
|
CDFalcon/Discord-Bots
|
10baa0b883cbf57b2c5f0719ac3df9797ad50520
|
[
"MIT"
] | 3
|
2018-09-14T18:38:46.000Z
|
2018-09-15T16:26:46.000Z
|
Old-Bots/TAG/main.py
|
CDFalcon/Discord-Bots
|
10baa0b883cbf57b2c5f0719ac3df9797ad50520
|
[
"MIT"
] | null | null | null |
Old-Bots/TAG/main.py
|
CDFalcon/Discord-Bots
|
10baa0b883cbf57b2c5f0719ac3df9797ad50520
|
[
"MIT"
] | null | null | null |
#
# main.py
#
# Created by CDFalcon on 4/18/18.
# Copyright (c) 2018 CDFalcon. All rights reserved.
#
#Imports#
#-----------------------------------------------------------------------------#
import discord
from discord.ext import commands
from discord.ext.commands import Bot
from discord.ext import commands
from datetime import datetime
import random
import settings
#Classwork#
#-----------------------------------------------------------------------------#
tag = commands.Bot(command_prefix='?')
tag.remove_command("help")
#Functions#
#-----------------------------------------------------------------------------#
async def isAuthorized(context):
hasRoles = False
for role in settings.ADMIN_ROLES:
if discord.utils.get(context.guild.roles, name=role) in context.author.roles:
hasRoles = True
return hasRoles
async def isHubAdmin(context, *args):
try:
await context.message.delete()
except:
await context.author.send(settings.ERROR__WRONG_CHANNEL)
return False #21
if len(args) != 2:
await context.author.send(settings.ERROR__INVALID_ARGS)
return False
partner = args[0]
try:
if (discord.utils.get(context.guild.roles, name = partner + " Admin")) not in context.author.roles:
await context.author.send(settings.ERROR__NOT_HUB_ADMIN)
return False
except:
return await context.author.send(settings.ERROR__INVALID_ARGS)
return True
#Events#
#-----------------------------------------------------------------------------#
@tag.event
async def on_ready():
print("Ready")
await tag.change_presence(game=discord.Game(name=(settings.VERSION))) #50
@tag.event
async def on_voice_state_update(member, previous, current):
if current.channel == discord.utils.get(member.guild.voice_channels, id=settings.DY_CHAN_ID):
numbers = '1234567890'
channelNumber = int(''.join(random.sample(numbers, 5)))
await member.guild.create_voice_channel(str(channelNumber), category = discord.utils.get(member.guild.categories, id=settings.DY_CAT_ID))
await member.move_to(discord.utils.get(member.guild.voice_channels, name=str(channelNumber)))
try:
if previous.channel.category_id == settings.DY_CAT_ID:
if len(previous.channel.members) == 0:
await previous.channel.delete()
except:
pass
#General Commands#
#-----------------------------------------------------------------------------#
@tag.command(pass_context=True)
async def help(context):
try:
await context.author.send(settings.HELP_MENU)
await context.message.delete()
except:
await context.author.send(settings.ERROR__WRONG_CHANNEL)
@tag.command(pass_context=True)
async def join(context, *args):
try:
await context.message.delete()
except:
return await context.author.send(settings.ERROR__WRONG_CHANNEL)
if not args or len(args) > 2:
return await context.author.send(settings.ERROR__INVALID_ARGS)
password = 0
if len(args) == 2:
password = args[1]
partner = args[0]
partnerRole = (discord.utils.get(context.guild.roles, name = partner))
try:
if partnerRole.color != discord.Color.dark_blue():
return await context.author.send(settings.ERROR__INVALID_HUB)
except:
return await context.author.send(settings.ERROR__INVALID_HUB)
try:
if (discord.utils.get(context.guild.channels, name = partner + " password")).id != int(password):
return await context.author.send(settings.ERROR__WRONG_PASSWORD) #91
except:
pass
if (discord.utils.get(context.guild.roles, name = "Banned from " + partner)) not in context.author.roles:
await context.author.send(settings.JOIN_MESSAGE_START + partner + settings.JOIN_MESSAGE_END)
return await context.author.add_roles(partnerRole)
else:
return await context.author.send("**You are banned from **`" + partner + "` **and therefore cannot join.**")
@tag.command(pass_context=True)
async def leave(context, *args):
try:
await context.message.delete()
except:
return await context.author.send(settings.ERROR__WRONG_CHANNEL)
if len(args) != 1:
return await context.author.send(settings.ERROR__INVALID_ARGS)
partnerRole = (discord.utils.get(context.guild.roles, name = args[0]))
try:
await context.author.remove_roles(partnerRole) #111
except:
return await context.author.send(ERROR__INVALID_HUB)
return await context.author.send(settings.LEAVE_MESSAGE_START + args[0] + settings.LEAVE_MESSAGE_END)
#Hub Admin Commands#
#-----------------------------------------------------------------------------#
@tag.command(pass_context=True)
async def createPassword(context, *args):
if await isHubAdmin(context, *args) == False:
return
partner = args[0]
try:
await (discord.utils.get(context.guild.channels, name = partner + " password")).delete()
except:
await context.author.send("**No current password found, creating a new password.**")
try:
if len(args) != 2:
newPassword = await context.guild.create_voice_channel(partner + " password", category = (discord.utils.get(context.guild.categories, id = settings.PASSWORD_ID)))
return await context.author.send("**Your new password is **`" + str(newPassword.id) + "`**.**")
elif args[1] == "true":
return await context.author.send("**Password deleted.**")
except: #130
return await context.author.send(settings.ERROR__INVALID_ARGS)
@tag.command(pass_context=True)
async def ban(context, *args):
if await isHubAdmin(context, *args) == False:
return
partner = args[0]
try:
await (discord.utils.get(context.guild.members, name = args[1])).add_roles(discord.utils.get(context.guild.roles, name = "Banned from " + partner))
await (discord.utils.get(context.guild.members, name = args[1])).remove_roles(discord.utils.get(context.guild.roles, name = partner))
await (discord.utils.get(context.guild.members, name = args[1])).remove_roles(discord.utils.get(context.guild.roles, name = partner + " Mod"))
except:
return await context.author.send(settings.ERROR__INVALID_ARGS)
await context.author.send("`" + args[1] + "` **has been banned from your hub.**")
@tag.command(pass_context=True) #143
async def unban(context, *args):
if await isHubAdmin(context, *args) == False:
return
partner = args[0]
try:
await (discord.utils.get(context.guild.members, name = args[1])).remove_roles(discord.utils.get(context.guild.roles, name = "Banned from " + partner))
except:
return await context.author.send(settings.ERROR__INVALID_ARGS)
await context.author.send("`" + args[1] + "` **has been unbanned from your hub.**")
@tag.command(pass_context=True)
async def mod(context, *args):
if await isHubAdmin(context, *args) == False:
return
partner = args[0]
try:
await (discord.utils.get(context.guild.members, name = args[1])).add_roles(discord.utils.get(context.guild.roles, name = partner + " Mod"))
await (discord.utils.get(context.guild.members, name = args[1])).remove_roles(discord.utils.get(context.guild.roles, name = partner))
except:
return await context.author.send(settings.ERROR__INVALID_ARGS)
await context.author.send("`" + args[1] + "` **has been added as a mod for your hub.**")
@tag.command(pass_context=True)
async def unmod(context, *args):
if isHubAdmin(context, *args) == False:
return
partner = args[0]
try:
await (discord.utils.get(context.guild.members, name = args[1])).remove_roles(discord.utils.get(context.guild.roles, name = partner + " Mod"))
except:
return await context.author.send(settings.ERROR__INVALID_ARGS) #172
await context.author.send("`" + args[1] + "` **has been removed as a mod from your hub.**")
@tag.command(pass_context=True)
async def hide(context, *args):
if await isHubAdmin(context, *args) == False:
return
partner = args[0]
try:
channelToBeHidden = (discord.utils.get(context.guild.channels, name = args[1]))
if channelToBeHidden.category == discord.utils.get(context.guild.categories, name = partner):
await channelToBeHidden.set_permissions(discord.utils.get(context.guild.roles, name = partner), read_messages = False, read_message_history = False, connect = False)
except:
return await context.author.send(settings.ERROR__INVALID_ARGS)
await context.author.send("**Channel hidden.**")
@tag.command(pass_context=True)
async def unhide(context, *args):
if await isHubAdmin(context, *args) == False:
return
partner = args[0] #190
try:
channelToBeHidden = (discord.utils.get(context.guild.channels, name = args[1]))
if channelToBeHidden.category == discord.utils.get(context.guild.categories, name = partner):
await channelToBeHidden.set_permissions(discord.utils.get(context.guild.roles, name = partner), read_messages = True, read_message_history = True, connect = True)
except:
return await context.author.send(settings.ERROR__INVALID_ARGS)
await context.author.send("**Channel unhidden.**")
#TAG Admin Commands#
#-----------------------------------------------------------------------------#
@tag.command(pass_context=True)
async def addPartner(context, *args):
try: #200
if(await isAuthorized(context)):
await context.message.delete()
else:
return await author.context.send(settings.ERROR__NOT_TAG_ADMIN)
except:
return await context.author.send(settings.ERROR__WRONG_CHANNEL)
if not args:
return await context.author.send(settings.ERROR__INVALID_ARGS)
else:
newPartner = args[0]
if len(args) < 2 or len(args) > 3:
return await context.author.send(settings.ERROR__INVALID_ARGS)
firstAdmin = (discord.utils.get(context.guild.members, name = args[1]))
if len(args) == 3:
if args[2] == "true":
password = await context.guild.create_voice_channel(newPartner + " password", category=(discord.utils.get(context.guild.categories, id = settings.PASSWORD_ID)))
await firstAdmin.send("Your hub's password is `" + str(password.id) + "`.")
else:
await context.author.send(settings.ERROR__INVALID_ARGUMENT)
await firstAdmin.send(settings.NEW_HUB_MESSAGE)
newCategory = await context.guild.create_category(newPartner)
newAdminRole = await context.guild.create_role(name = newPartner + " Admin", color = discord.Color.red())
await firstAdmin.add_roles(newAdminRole)
newRole = await context.guild.create_role(name = newPartner, color = discord.Color.dark_blue())
newModRole = await context.guild.create_role(name = newPartner + " Mod", color = discord.Color.dark_purple())
newBanRole = await context.guild.create_role(name = "Banned from " + newPartner, color = discord.Color.greyple())
await newCategory.set_permissions(context.guild.default_role, read_messages = False, read_message_history = False, connect = False)
await newCategory.set_permissions(newAdminRole, read_messages = True, read_message_history = True, connect = True, manage_channels = True, manage_messages = True, move_members = True)
await newCategory.set_permissions(newModRole, read_messages = True, read_message_history = True, connect = True, manage_messages = True)
await newCategory.set_permissions(newRole, read_messages = True, read_message_history = True, connect = True)
await newCategory.set_permissions(newBanRole, read_messages = False, read_message_history = False, connect = False)
await context.guild.create_text_channel("General Chat", category = newCategory)
recruit = await context.guild.create_text_channel(newPartner + "-recruitment", category = (discord.utils.get(context.guild.categories, id = settings.RECRUIT_CAT_ID)))
await recruit.set_permissions(newModRole, manage_messages = True)
await recruit.set_permissions(newAdminRole, manage_messages= True)
@tag.command(pass_context=True)
async def removePartner(context, *args):
try:
if(await isAuthorized(context)):
await context.message.delete()
else:
return await context.author.send(settings.ERROR__NOT_TAG_ADMIN)
except:
return await context.author.send(settings.ERROR__WRONG_CHANNEL)
if not args:
return await context.author.send(settings.ERROR__INVALID_ARGS)
else:
partner = args[0]
if len(args) > 1: #260
return await context.author.send(settings.ERROR__INVALID_ARGS)
try:
await (discord.utils.get(context.guild.roles, name = partner)).delete()
await (discord.utils.get(context.guild.roles, name = (partner + " Admin"))).delete()
await (discord.utils.get(context.guild.roles, name = (partner + " Mod"))).delete()
await (discord.utils.get(context.guild.roles, name = ("Banned from " + partner))).delete()
except:
return await context.author.send(settings.ERROR__INVALID_HUB)
category = (discord.utils.get(context.guild.categories, name = partner))
for channel in context.guild.channels:
if channel.category == category:
await channel.delete()
try:
await (discord.utils.get(context.guild.channels, name = partner + " password")).delete()
except:
pass
try:
await (discord.utils.get(context.guild.channels, name = partner + "-recruitment")).delete()
except:
pass
return await category.delete()
#Script Start#
#-----------------------------------------------------------------------------#
tag.run(settings.BOT_TOKEN) #283
| 37.622691
| 187
| 0.644786
|
7950cdad367d74e03220a993eb68d992fd97d49d
| 207
|
py
|
Python
|
agent/model/AgentModel.py
|
aaitor/agent
|
835ddf5037b1b6254eda57f056f54195670c17ff
|
[
"Apache-2.0"
] | null | null | null |
agent/model/AgentModel.py
|
aaitor/agent
|
835ddf5037b1b6254eda57f056f54195670c17ff
|
[
"Apache-2.0"
] | null | null | null |
agent/model/AgentModel.py
|
aaitor/agent
|
835ddf5037b1b6254eda57f056f54195670c17ff
|
[
"Apache-2.0"
] | 1
|
2019-08-28T09:19:05.000Z
|
2019-08-28T09:19:05.000Z
|
import json
class AgentModel:
def __init__(self):
pass
def toJson(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
| 17.25
| 61
| 0.574879
|
7950ce977d4c3dffa699bfa53e41f5a52a0b5dea
| 6,015
|
py
|
Python
|
recovery/redisUsersRecovery.py
|
SlavomirMazurPantheon/backend
|
f2e3e6e3a70a0038f706fffec411cc627a480969
|
[
"Apache-2.0"
] | 2
|
2020-08-19T16:44:48.000Z
|
2021-04-30T06:48:16.000Z
|
recovery/redisUsersRecovery.py
|
SlavomirMazurPantheon/backend
|
f2e3e6e3a70a0038f706fffec411cc627a480969
|
[
"Apache-2.0"
] | 243
|
2018-08-21T09:12:57.000Z
|
2022-03-31T12:31:48.000Z
|
recovery/redisUsersRecovery.py
|
SlavomirMazurPantheon/backend
|
f2e3e6e3a70a0038f706fffec411cc627a480969
|
[
"Apache-2.0"
] | 25
|
2018-08-21T08:45:43.000Z
|
2021-12-12T13:51:47.000Z
|
# Copyright The IETF Trust 2021, All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Richard Zilincik'
__copyright__ = 'Copyright The IETF Trust 2021, All Rights Reserved'
__license__ = 'Apache License, Version 2.0'
__email__ = 'richard.zilincik@pantheon.tech'
import argparse
import datetime
import json
import os
import time
from redis import Redis
import utility.log as log
from utility.create_config import create_config
from utility.staticVariables import backup_date_format
from utility.util import get_list_of_backups, job_log
class ScriptConfig:
def __init__(self):
self.help = 'Save or load the users database stored on redis. An automatic backup is made' \
' before a load is performed'
config = create_config()
self.log_directory = config.get('Directory-Section', 'logs')
self.temp_dir = config.get('Directory-Section', 'temp')
self.cache_directory = config.get('Directory-Section', 'cache')
self.redis_host = config.get('DB-Section', 'redis-host')
self.redis_port = config.get('DB-Section', 'redis-port')
# self.var_yang = config.get('Directory-Section', 'var')
parser = argparse.ArgumentParser(description=self.help)
parser.add_argument('--name_save',
default=datetime.datetime.utcnow().strftime(backup_date_format),
type=str, help='Set name of the file to save. Default name is date and time in UTC')
parser.add_argument('--name_load', type=str, default='',
help='Set name of the file to load. Default will take a last saved file')
parser.add_argument('--type', default='save', type=str, choices=['save', 'load'],
help='Set whether you want to save a file or load a file. Default is save')
self.args = parser.parse_args()
self.defaults = [parser.get_default(key) for key in self.args.__dict__.keys()]
def get_args_list(self):
args_dict = {}
keys = list(self.args.__dict__.keys())
types = [type(value).__name__ for value in self.args.__dict__.values()]
for i, key in enumerate(keys):
args_dict[key] = dict(type=types[i], default=self.defaults[i])
return args_dict
def get_help(self):
ret = {}
ret['help'] = self.help
ret['options'] = {}
ret['options']['type'] = 'Set whether you want to save a file or load a file. Default is save'
ret['options']['name_load'] = 'Set name of the file to load. Default will take a last saved file'
ret['options']['name_save'] = 'Set name of the file to save. Default name is date and time in UTC'
return ret
def main(scriptConf=None):
start_time = int(time.time())
if scriptConf is None:
scriptConf = ScriptConfig()
log_directory = scriptConf.log_directory
cache_directory = scriptConf.cache_directory
temp_dir = scriptConf.temp_dir
redis_host = scriptConf.redis_host
redis_port = scriptConf.redis_port
args = scriptConf.args
backups = os.path.join(cache_directory, 'redis-users')
LOGGER = log.get_logger('recovery', os.path.join(log_directory, 'yang.log'))
LOGGER.info('Starting {} process of redis users database'.format(args.type))
if args.type == 'save':
data = {}
redis = Redis(host=redis_host, port=redis_port, db=2)
cursor = 0
while 1:
cursor, keys = redis.scan(cursor)
for key in keys:
key_type = redis.type(key).decode()
if key_type == 'string':
value = redis.get(key).decode()
elif key_type == 'set':
value = [i.decode() for i in redis.smembers(key)]
elif key_type == 'hash':
hash_table = redis.hgetall(key)
value = {hash_key.decode(): hash_table[hash_key].decode() for hash_key in hash_table}
else:
print(key_type)
assert False
data[key.decode()] = value
if cursor == 0:
break
if not os.path.isdir(backups):
os.mkdir(backups)
args.name_save += '.json'
with open(os.path.join(backups, args.name_save), 'w') as f:
json.dump(data, f)
LOGGER.info('Data saved to {} successfully'.format(args.name_save))
filename = '{} - save'.format(os.path.basename(__file__).split('.py')[0])
job_log(start_time, temp_dir, filename, status='Success')
elif args.type == 'load':
if args.name_load:
file_name = '{}.json'.format(os.path.join(backups, args.name_load))
else:
list_of_backups = get_list_of_backups(backups)
file_name = os.path.join(backups, list_of_backups[-1])
with open(file_name) as f:
data = json.load(f)
redis = Redis(host=redis_host, port=redis_port, db=2)
redis.flushdb()
for key, value in data.items():
if isinstance(value, str):
redis.set(key, value)
elif isinstance(value, list):
redis.sadd(key, *value)
elif isinstance(value, dict):
redis.hset(key, mapping=value)
LOGGER.info('Data loaded from {} successfully'.format(file_name))
LOGGER.info('Job finished successfully')
if __name__ == '__main__':
main()
| 40.641892
| 112
| 0.622943
|
7950cf0237bff15f997ec676e381060a44dd2735
| 5,448
|
py
|
Python
|
aiida/cmdline/commands/cmd_comment.py
|
iriberri/aiida_core
|
c4a1ec5dac92ee62c59d39ca580bde449f3abf73
|
[
"BSD-2-Clause"
] | null | null | null |
aiida/cmdline/commands/cmd_comment.py
|
iriberri/aiida_core
|
c4a1ec5dac92ee62c59d39ca580bde449f3abf73
|
[
"BSD-2-Clause"
] | null | null | null |
aiida/cmdline/commands/cmd_comment.py
|
iriberri/aiida_core
|
c4a1ec5dac92ee62c59d39ca580bde449f3abf73
|
[
"BSD-2-Clause"
] | 1
|
2018-12-21T11:10:09.000Z
|
2018-12-21T11:10:09.000Z
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=superfluous-parens
"""
This allows to manage comments from command line.
"""
import click
from aiida.cmdline.commands.cmd_verdi import verdi
from aiida.cmdline.params import arguments, options
from aiida.cmdline.utils import decorators, echo, multi_line_input
@verdi.group('comment')
def verdi_comment():
"""Inspect, create and manage comments."""
pass
@verdi_comment.command()
@click.option('--comment', '-c', type=str, required=False)
@arguments.NODES(required=True)
@decorators.with_dbenv()
def add(comment, nodes):
"""
Add comment to one or more nodes in the database
"""
from aiida.orm.backend import construct_backend
backend = construct_backend()
user = backend.users.get_automatic_user()
if not comment:
comment = multi_line_input.edit_comment()
for node in nodes:
node.add_comment(comment, user)
echo.echo_info("Comment added to node(s) '{}'".format(", ".join([str(node.pk) for node in nodes])))
@verdi_comment.command()
@options.USER()
@arguments.NODES()
@decorators.with_dbenv()
def show(user, nodes):
"""
Show the comments of (a) node(s) in the database
"""
for node in nodes:
all_comments = node.get_comments()
if user is not None:
to_print = [i for i in all_comments if i['user__email'] == user.email]
if not to_print:
valid_users = ", ".join(set(["'" + i['user__email'] + "'" for i in all_comments]))
echo.echo_info("Nothing found for user '{}'.\n"
"Valid users found for Node {} are: {}.".format(user, node.pk, valid_users))
else:
to_print = all_comments
for i in to_print:
comment_msg = [
"***********************************************************", "Comment of '{}' on {}".format(
i['user__email'], i['ctime'].strftime("%Y-%m-%d %H:%M")), "PK {} ID {}. Last modified on {}".format(
node.pk, i['pk'], i['mtime'].strftime("%Y-%m-%d %H:%M")), "", "{}".format(i['content']), ""
]
echo.echo_info("\n".join(comment_msg))
# If there is nothing to print, print a message
if not to_print:
echo.echo_info("No comments found.")
@verdi_comment.command()
@click.option(
'--all',
'-a',
'remove_all',
default=False,
is_flag=True,
help='If used, deletes all the comments of the active user attached to the node')
@options.FORCE()
@arguments.NODE()
@click.argument('comment_id', type=int, required=False, metavar='COMMENT_ID')
@decorators.with_dbenv()
def remove(remove_all, force, node, comment_id):
"""
Remove comment(s) of a node. The user can only remove their own comments.
pk = The pk (an integer) of the node
id = #ID of the comment to be removed from node #PK
"""
# Note: in fact, the user can still manually delete any comment
from aiida.orm.backend import construct_backend
backend = construct_backend()
user = backend.users.get_automatic_user()
if comment_id is None and not remove_all:
echo.echo_error("One argument between -a and ID must be provided")
return 101
if comment_id is not None and remove_all:
echo.echo_error("Cannot use -a together with a comment id")
return 102
if remove_all:
comment_id = None
if not force:
if remove_all:
click.confirm("Delete all comments of user {} on node <{}>? ".format(user, node.pk), abort=True)
else:
click.confirm("Delete comment? ", abort=True)
comments = node.get_comment_obj(comment_id=comment_id, user=user)
for comment in comments:
comment.delete()
echo.echo_info("Deleted {} comments.".format(len(comments)))
return 0
@verdi_comment.command()
@click.option('--comment', '-c', type=str, required=False)
@arguments.NODE()
@click.argument('comment_id', type=int, metavar='COMMENT_ID')
@decorators.with_dbenv()
def update(comment, node, comment_id):
"""
Update a comment.
id = The id of the comment
comment = The comment (a string) to be added to the node(s)
"""
from aiida.orm.backend import construct_backend
backend = construct_backend()
user = backend.users.get_automatic_user()
# read the comment from terminal if it is not on command line
if comment is None:
try:
current_comment = node.get_comments(comment_id)[0]
except IndexError:
echo.echo_error("Comment with id '{}' not found".format(comment_id))
return 1
comment = multi_line_input.edit_comment(current_comment['content'])
# pylint: disable=protected-access
node._update_comment(comment, comment_id, user)
return 0
| 33.219512
| 120
| 0.598201
|
7950cf5396b543953cb295e0133641d75bd4d635
| 1,541
|
py
|
Python
|
CMU/hand_labels/normdat.py
|
naoc-1861355/Public_Handpose_datasets
|
a0119226859ffad64bd7ceac8f8b4b67d2aebf8b
|
[
"MIT"
] | 1
|
2022-01-11T03:43:17.000Z
|
2022-01-11T03:43:17.000Z
|
CMU/hand_labels/normdat.py
|
naoc-1861355/Public_Handpose_datasets
|
a0119226859ffad64bd7ceac8f8b4b67d2aebf8b
|
[
"MIT"
] | null | null | null |
CMU/hand_labels/normdat.py
|
naoc-1861355/Public_Handpose_datasets
|
a0119226859ffad64bd7ceac8f8b4b67d2aebf8b
|
[
"MIT"
] | null | null | null |
import json
import os.path
import cv2
import numpy as np
from utils import generate_json_2d
def normdat(outpath):
"""
normdat is a function that convert this dataset to standard ezxr format output
Args:
:param outpath : root output path of the formatted files
Returns:
:return: None
"""
# Input data paths
# paths = ['synth1/', 'synth2/', 'synth3/', 'synth4/']
paths = ['manual_test/', 'manual_train/']
inpath = paths[0]
outpath = outpath + inpath
if not os.path.isdir(outpath):
os.makedirs(outpath)
files = sorted([f for f in os.listdir(inpath) if f.endswith('.json')])
for f in files:
with open(inpath + f, 'r') as fid:
dat = json.load(fid)
pts = np.array(dat['hand_pts'], dtype=float)
# Left hands are marked, but otherwise follow the same point order
is_left = dat['is_left']
# find bounding point for each img (hand)
x_min = min(pts[:, 0])
x_max = max(pts[:, 0])
y_min = min(pts[:, 1])
y_max = max(pts[:, 1])
hand_bbox = [x_min, x_max, y_min, y_max]
dict_kp = generate_json_2d(pts, hand_bbox, is_left)
# copy and dump .jpg and .json
img = cv2.imread(inpath + f[0:-5] + '.jpg')
cv2.imwrite(outpath + f[0:-5] + '.jpg', img)
with open(outpath + f[0:-5] + '.json', 'w') as outfile:
json.dump(dict_kp, outfile)
def main():
outpath = './data/'
normdat(outpath)
if __name__ == '__main__':
main()
| 25.683333
| 82
| 0.579494
|
7950d0557725ab94ef9754a4d635e1267072ebc8
| 825
|
py
|
Python
|
research/attention_ocr/python/datasets/__init__.py
|
udaylunawat/models
|
a07de068e5fe56d0b2bd3c155844b35954c180a3
|
[
"Apache-2.0"
] | null | null | null |
research/attention_ocr/python/datasets/__init__.py
|
udaylunawat/models
|
a07de068e5fe56d0b2bd3c155844b35954c180a3
|
[
"Apache-2.0"
] | null | null | null |
research/attention_ocr/python/datasets/__init__.py
|
udaylunawat/models
|
a07de068e5fe56d0b2bd3c155844b35954c180a3
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from datasets import fsns
from datasets import fsns_test
from datasets import number_plates
__all__ = [fsns, fsns_test, number_plates]
| 39.285714
| 80
| 0.701818
|
7950d08eb03fa8769a6f5ab780ffdfeae10cffb9
| 1,521
|
py
|
Python
|
app/auth/views.py
|
01king-ori/Kingsblog
|
624a0e738f5ff8728bbcc327c5e6dd4144901e3f
|
[
"MIT"
] | null | null | null |
app/auth/views.py
|
01king-ori/Kingsblog
|
624a0e738f5ff8728bbcc327c5e6dd4144901e3f
|
[
"MIT"
] | null | null | null |
app/auth/views.py
|
01king-ori/Kingsblog
|
624a0e738f5ff8728bbcc327c5e6dd4144901e3f
|
[
"MIT"
] | null | null | null |
from flask import render_template, redirect, url_for, flash, request
from flask_login import login_user,logout_user,login_required
from . import auth
from ..models import User
from .forms import LoginForm, RegistrationForm
from .. import db
from ..email import mail_message
@auth.route('/login', methods=['GET', 'POST'])
def login():
login_form = LoginForm()
if login_form.validate_on_submit():
user = User.query.filter_by(email=login_form.email.data).first()
if user is not None and user.verify_password(login_form.password.data):
login_user(user, login_form.remember.data)
return redirect(request.args.get('next') or url_for('main.home'))
flash('Invalid username or Password')
title = "Welcome to the BLOG"
return render_template('auth/login.html', login_form=login_form, title=title)
@auth.route('/register', methods=["GET", "POST"])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email=form.email.data, username=form.username.data, password=form.password.data)
db.session.add(user)
db.session.commit()
mail_message("Welcome to the BLOG","email/subscriber",user.email,user=user)
return redirect(url_for('auth.login'))
title = "New Account"
return render_template('auth/register.html', registration_form=form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for("main.index"))
| 31.040816
| 100
| 0.692965
|
7950d0bcd2bbd793375dfb268d3ce08907f6e4bd
| 8,701
|
py
|
Python
|
pdns_auth_tsigkey.py
|
massonpj/ansible-pdns-auth-api
|
7cd6389e28a4eb359998669a92f075ee402ccced
|
[
"Apache-2.0"
] | null | null | null |
pdns_auth_tsigkey.py
|
massonpj/ansible-pdns-auth-api
|
7cd6389e28a4eb359998669a92f075ee402ccced
|
[
"Apache-2.0"
] | null | null | null |
pdns_auth_tsigkey.py
|
massonpj/ansible-pdns-auth-api
|
7cd6389e28a4eb359998669a92f075ee402ccced
|
[
"Apache-2.0"
] | 1
|
2020-08-24T11:07:46.000Z
|
2020-08-24T11:07:46.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2020, Kevin P. Fleming <kevin@km6g.us>
# Apache License 2.0 (see LICENSE)
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
%YAML 1.2
---
module: pdns_auth_tsigkey
short_description: Manages a TSIG key in a PowerDNS Authoritative server
description:
- This module allows a task to manage the presence and content
of a TSIG key in a PowerDNS Authoritative server.
requirements:
- bravado
options:
state:
description:
- If C(present) the zone will be created if necessary; if it
already exists, its configuration will be updated to match
the provided attributes.
- If C(absent) the key will be removed it if exists.
- If C(exists) the key's existence will be checked, but it
will not be modified.
choices: [ 'present', 'absent', 'exists' ]
type: str
required: false
default: 'present'
name:
description:
- Name of the key to be managed.
type: str
required: true
server_id:
description:
- ID of the server instance which holds the key.
type: str
required: false
default: 'localhost'
api_url:
description:
- URL of the API endpoint in the server.
type: str
required: false
default: 'http://localhost:8081'
api_key:
description:
- Key (token) used to authenticate to the API endpoint in the server.
type: str
required: true
api_spec_file:
description:
- Path to a file containing the OpenAPI (Swagger) specification for the
API version implemented by the server.
type: path
required: true
algorithm:
description:
- The message digest algorithm, as specified by RFC 2845 and its updates,
which will be used to validate requests including this key.
- Required when C(state) is C(present).
choices: [ 'hmac-md5', 'hmac-sha1', 'hmac-sha224', 'hmac-sha256', 'hmac-sha384', 'hmac-sha512' ]
type: str
required: false
default: 'hmac-md5'
key:
description:
- The base-64 encoded key value.
type: str
author:
- Kevin P. Fleming (@kpfleming)
"""
EXAMPLES = """
%YAML 1.2
---
# create and populate a file which holds the API specification
- name: temp file to hold spec
tempfile:
state: file
suffix: '.json'
register: temp_file
- name: populate spec file
copy:
src: api-swagger.json
dest: "{{ temp_file.path }}"
- name: check that key exists
pdns_auth_tsigkey:
name: key1
state: exists
api_key: 'foobar'
api_spec_file: "{{ temp_file.path }}"
- name: create key with default algorithm
pdns_auth_tsigkey:
name: key2
state: present
api_key: 'foobar'
api_spec_file: "{{ temp_file.path }}"
- name: remove key
pdns_auth_tsigkey:
name: key2
state: absent
api_key: 'foobar'
api_spec_file: "{{ temp_file.path }}"
- name: create key with algorithm and content
pdns_auth_tsigkey:
name: key3
state: present
api_key: 'foobar'
api_spec_file: "{{ temp_file.path }}"
algorithm: hmac-sha256
key: '+8fQxgYhf5PVGPKclKnk8ReujIfWXOw/aEzzPPhDi6AGagpg/r954FPZdzgFfUjnmjMSA1Yu7vo6DQHVoGnRkw=='
"""
RETURN = """
%YAML 1.2
---
key:
description: Information about the key
returned: always
type: complex
contains:
name:
description: Name
returned: always
type: str
exists:
description: Indicate whether the key exists
returned: always
type: bool
algorithm:
description:
- The message digest algorithm, as specified by RFC 2845 and its updates,
which will be used to validate requests including this key.
returned: always
type: str
key:
description:
- The base-64 encoded key value.
returned: always
type: str
"""
from ansible.module_utils.basic import AnsibleModule
from urllib.parse import urlparse
def main():
module_args = {
"state": {
"type": "str",
"default": "present",
"choices": ["present", "absent", "exists"],
},
"name": {"type": "str", "required": True,},
"server_id": {"type": "str", "default": "localhost",},
"api_url": {"type": "str", "default": "http://localhost:8081",},
"api_key": {"type": "str", "required": True, "no_log": True,},
"api_spec_file": {"type": "path", "required": True,},
"algorithm": {
"type": "str",
"default": "hmac-md5",
"choices": [
"hmac-md5",
"hmac-sha1",
"hmac-sha224",
"hmac-sha256",
"hmac-sha384",
"hmac-sha512",
],
},
"key": {"type": "str"},
}
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
try:
from bravado.requests_client import RequestsClient
from bravado.client import SwaggerClient
from bravado.swagger_model import load_file
except ImportError:
module.fail_json(
msg="The pdns_auth_tsigkey module requires the 'bravado' package."
)
result = {
"changed": False,
}
state = module.params["state"]
server_id = module.params["server_id"]
key = module.params["name"]
if module.check_mode:
module.exit_json(**result)
url = urlparse(module.params["api_url"])
http_client = RequestsClient()
http_client.set_api_key(
url.netloc, module.params["api_key"], param_name="X-API-Key", param_in="header"
)
spec = load_file(module.params["api_spec_file"])
spec["host"] = url.netloc
spec["schemes"] = [url.scheme]
api = SwaggerClient.from_spec(spec, http_client=http_client)
result["key"] = {"name": key, "exists": False}
# first step is to get information about the key, if it exists
# this is required to translate the user-friendly key name into
# the key_id required for subsequent API calls
partial_key_info = [
k
for k in api.tsigkey.listTSIGKeys(server_id=server_id).result()
if k["name"] == key
]
if len(partial_key_info) == 0:
if (state == "exists") or (state == "absent"):
# exit as there is nothing left to do
module.exit_json(**result)
else:
# state must be 'present'
key_id = None
else:
# get the full key info and populate the result dict
key_id = partial_key_info[0]["id"]
key_info = api.tsigkey.getTSIGKey(
server_id=server_id, tsigkey_id=key_id
).result()
result["key"]["exists"] = True
result["key"]["algorithm"] = key_info["algorithm"]
result["key"]["key"] = key_info["key"]
# if only an existence check was requested,
# the operation is complete
if state == "exists":
module.exit_json(**result)
# if absence was requested, remove the zone and exit
if state == "absent":
api.tsigkey.deleteTSIGKey(server_id=server_id, tsigkey_id=key_id).result()
result["changed"] = True
module.exit_json(**result)
# state must be 'present'
if not key_id:
# create the requested key
key_struct = {
"name": key,
"algorithm": module.params["algorithm"],
}
if module.params["key"]:
key_struct["key"] = module.params["key"]
key_info = api.tsigkey.createTSIGKey(
server_id=server_id, tsigkey=key_struct
).result()
result["changed"] = True
result["key"]["exists"] = True
result["key"]["algorithm"] = key_info["algorithm"]
result["key"]["key"] = key_info["key"]
else:
# compare the key's attributes to the provided
# options and update it if necessary
key_struct = {}
if module.params["algorithm"]:
if module.params["algorithm"] != key_info["algorithm"]:
key_struct["algorithm"] = module.params["algorithm"]
if module.params["key"]:
if module.params["key"] != key_info["key"]:
key_struct["key"] = module.params["key"]
if len(key_struct):
key_info = api.tsigkey.putTSIGKey(
server_id=server_id, tsigkey_id=key_id, tsigkey=key_struct
).result()
result["changed"] = True
if result["changed"]:
result["key"]["algorithm"] = key_info["algorithm"]
result["key"]["key"] = key_info["key"]
module.exit_json(**result)
if __name__ == "__main__":
main()
| 27.710191
| 101
| 0.604643
|
7950d15e58f29bc7fd9a2365bf7ea9e1c0bf291f
| 3,495
|
py
|
Python
|
tests/test_repr.py
|
interrogator/loguru
|
892ca5ef415af000a5fe77f3632f3903da46b39f
|
[
"MIT"
] | null | null | null |
tests/test_repr.py
|
interrogator/loguru
|
892ca5ef415af000a5fe77f3632f3903da46b39f
|
[
"MIT"
] | null | null | null |
tests/test_repr.py
|
interrogator/loguru
|
892ca5ef415af000a5fe77f3632f3903da46b39f
|
[
"MIT"
] | null | null | null |
from loguru import logger
import logging
import sys
import pathlib
import re
def test_no_handler():
assert repr(logger) == "<loguru.logger handlers=[]>"
def test_stderr():
logger.add(sys.__stderr__)
assert repr(logger) == "<loguru.logger handlers=[(id=0, level=10, sink=<stderr>)]>"
def test_stdout():
logger.add(sys.__stdout__)
assert repr(logger) == "<loguru.logger handlers=[(id=0, level=10, sink=<stdout>)]>"
def test_file_object(tmpdir):
path = str(tmpdir.join("test.log"))
file = open(path, "w")
logger.add(file)
assert repr(logger) == "<loguru.logger handlers=[(id=0, level=10, sink=%s)]>" % path
def test_file_str(tmpdir):
path = str(tmpdir.join("test.log"))
logger.add(path)
assert repr(logger) == "<loguru.logger handlers=[(id=0, level=10, sink=%s)]>" % path
def test_file_pathlib(tmpdir):
path = str(tmpdir.join("test.log"))
logger.add(pathlib.Path(path))
assert repr(logger) == "<loguru.logger handlers=[(id=0, level=10, sink=%s)]>" % path
def test_stream_object():
class MyStream:
def __init__(self, name):
self.name = name
def write(self, m):
pass
def __repr__(self):
return "MyStream()"
logger.add(MyStream("<foobar>"))
assert repr(logger) == "<loguru.logger handlers=[(id=0, level=10, sink=<foobar>)]>"
def test_stream_object_without_name_attr():
class MyStream:
def write(self, m):
pass
def __repr__(self):
return "MyStream()"
logger.add(MyStream())
assert repr(logger) == "<loguru.logger handlers=[(id=0, level=10, sink=MyStream())]>"
def test_function():
def my_function(message):
pass
logger.add(my_function)
assert repr(logger) == "<loguru.logger handlers=[(id=0, level=10, sink=my_function)]>"
def test_function_without_name():
class Function:
def __call__(self, message):
pass
def __repr__(self):
return "<Function>"
def __getattr__(self, name):
if name == "__name__":
raise AttributeError
return getattr(self.__class__, name)
function = Function()
logger.add(function)
assert repr(logger) == "<loguru.logger handlers=[(id=0, level=10, sink=<Function>)]>"
def test_standard_handler():
handler = logging.StreamHandler(sys.__stderr__)
logger.add(handler)
if sys.version_info >= (3, 6):
r = "<loguru.logger handlers=[(id=0, level=10, sink=<StreamHandler <stderr> (NOTSET)>)]>"
assert repr(logger) == r
else:
r = r"<loguru\.logger handlers=\[\(id=0, level=10, sink=<logging\.StreamHandler .*>\)\]>"
assert re.match(r, repr(logger))
def test_multiple_handlers():
logger.add(sys.__stdout__)
logger.add(sys.__stderr__)
r = "<loguru.logger handlers=[(id=0, level=10, sink=<stdout>), (id=1, level=10, sink=<stderr>)]>"
assert repr(logger) == r
def test_handler_removed():
i = logger.add(sys.__stdout__)
logger.add(sys.__stderr__)
logger.remove(i)
assert repr(logger) == "<loguru.logger handlers=[(id=1, level=10, sink=<stderr>)]>"
def test_handler_level_name():
logger.add(sys.__stderr__, level="TRACE")
assert repr(logger) == "<loguru.logger handlers=[(id=0, level=5, sink=<stderr>)]>"
def test_handler_level_num():
logger.add(sys.__stderr__, level=33)
assert repr(logger) == "<loguru.logger handlers=[(id=0, level=33, sink=<stderr>)]>"
| 27.519685
| 101
| 0.629757
|
7950d1e65a20728de269d38416a20a07bda70281
| 11,425
|
py
|
Python
|
aiida/tools/data/array/kpoints/__init__.py
|
joepvd/aiida_core
|
6e9711046753332933f982971db1d7ac7e7ade58
|
[
"BSD-2-Clause"
] | null | null | null |
aiida/tools/data/array/kpoints/__init__.py
|
joepvd/aiida_core
|
6e9711046753332933f982971db1d7ac7e7ade58
|
[
"BSD-2-Clause"
] | null | null | null |
aiida/tools/data/array/kpoints/__init__.py
|
joepvd/aiida_core
|
6e9711046753332933f982971db1d7ac7e7ade58
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from aiida.orm.data.array.kpoints import KpointsData
from aiida.orm.data.parameter import ParameterData
from aiida.tools.data.array.kpoints import legacy
from aiida.tools.data.array.kpoints import seekpath
__all__ = ['get_kpoints_path', 'get_explicit_kpoints_path']
def get_kpoints_path(structure, method='seekpath', **kwargs):
"""
Returns a dictionary whose contents depend on the method but includes at least the following keys
* parameters: ParameterData node
The contents of the parameters depends on the method but contains at least the keys
* 'point_coords': a dictionary with 'kpoints-label': [float coordinates]
* 'path': a list of length-2 tuples, with the labels of the starting
and ending point of each label section
The 'seekpath' method which is the default also returns the following additional nodes
* primitive_structure: StructureData with the primitive cell
* conv_structure: StructureData with the conventional cell
Note that the generated kpoints for the seekpath method only apply on the returned primitive_structure
and not on the input structure that was provided
:param structure: a StructureData node
:param method: the method to use for kpoint generation, options are 'seekpath' and 'legacy'.
It is strongly advised to use the default 'seekpath' as the 'legacy' implementation is known to have
bugs for certain structure cells
:param kwargs: optional keyword arguments that depend on the selected method
:returns: dictionary as described above in the docstring
"""
if method not in _get_kpoints_path_methods.keys():
raise ValueError("the method '{}' is not implemented".format(method))
if method == 'seekpath':
try:
seekpath.check_seekpath_is_installed()
except ImportError as exception:
raise ValueError("selected method is 'seekpath' but the package is not installed\n"
"Either install it or pass method='legacy' as input to the function call")
method = _get_kpoints_path_methods[method]
return method(structure, **kwargs)
def get_explicit_kpoints_path(structure, method='seekpath', **kwargs):
"""
Returns a dictionary whose contents depend on the method but includes at least the following keys
* parameters: ParameterData node
* explicit_kpoints: KpointsData node with explicit kpoints path
The contents of the parameters depends on the method but contains at least the keys
* 'point_coords': a dictionary with 'kpoints-label': [float coordinates]
* 'path': a list of length-2 tuples, with the labels of the starting
and ending point of each label section
The 'seekpath' method which is the default also returns the following additional nodes
* primitive_structure: StructureData with the primitive cell
* conv_structure: StructureData with the conventional cell
Note that the generated kpoints for the seekpath method only apply on the returned primitive_structure
and not on the input structure that was provided
:param structure: a StructureData node
:param method: the method to use for kpoint generation, options are 'seekpath' and 'legacy'.
It is strongly advised to use the default 'seekpath' as the 'legacy' implementation is known to have
bugs for certain structure cells
:param kwargs: optional keyword arguments that depend on the selected method
:returns: dictionary as described above in the docstring
"""
if method not in _get_explicit_kpoints_path_methods.keys():
raise ValueError("the method '{}' is not implemented".format(method))
if method == 'seekpath':
try:
seekpath.check_seekpath_is_installed()
except ImportError as exception:
raise ValueError("selected method is 'seekpath' but the package is not installed\n"
"Either install it or pass method='legacy' as input to the function call")
method = _get_explicit_kpoints_path_methods[method]
return method(structure, **kwargs)
def _seekpath_get_kpoints_path(structure, **kwargs):
"""
Call the get_kpoints_path wrapper function for Seekpath
:param structure: a StructureData node
:param with_time_reversal: if False, and the group has no inversion
symmetry, additional lines are returned
:param recipe: choose the reference publication that defines the special points and paths.
Currently, the following value is implemented:
- ``hpkot``: HPKOT paper:
Y. Hinuma, G. Pizzi, Y. Kumagai, F. Oba, I. Tanaka, Band structure
diagram paths based on crystallography, Comp. Mat. Sci. 128, 140 (2017).
DOI: 10.1016/j.commatsci.2016.10.015
:param threshold: the threshold to use to verify if we are in
and edge case (e.g., a tetragonal cell, but ``a==c``). For instance,
in the tI lattice, if ``abs(a-c) < threshold``, a
:py:exc:`~seekpath.hpkot.EdgeCaseWarning` is issued.
Note that depending on the bravais lattice, the meaning of the
threshold is different (angle, length, ...)
:param symprec: the symmetry precision used internally by SPGLIB
:param angle_tolerance: the angle_tolerance used internally by SPGLIB
"""
assert structure.pbc == (True, True, True), 'Seekpath only implemented for three-dimensional structures'
recognized_args = ['with_time_reversal', 'recipe', 'threshold', 'symprec', 'angle_tolerance']
unknown_args = set(kwargs).difference(recognized_args)
if unknown_args:
raise ValueError("unknown arguments {}".format(unknown_args))
return seekpath.get_kpoints_path(structure, kwargs)
def _seekpath_get_explicit_kpoints_path(structure, **kwargs):
"""
Call the get_explicit_kpoints_path wrapper function for Seekpath
:param structure: a StructureData node
:param with_time_reversal: if False, and the group has no inversion
symmetry, additional lines are returned
:param reference_distance: a reference target distance between neighboring
k-points in the path, in units of 1/ang. The actual value will be as
close as possible to this value, to have an integer number of points in
each path
:param recipe: choose the reference publication that defines the special points and paths.
Currently, the following value is implemented:
- ``hpkot``: HPKOT paper:
Y. Hinuma, G. Pizzi, Y. Kumagai, F. Oba, I. Tanaka, Band structure
diagram paths based on crystallography, Comp. Mat. Sci. 128, 140 (2017).
DOI: 10.1016/j.commatsci.2016.10.015
:param threshold: the threshold to use to verify if we are in
and edge case (e.g., a tetragonal cell, but ``a==c``). For instance,
in the tI lattice, if ``abs(a-c) < threshold``, a
:py:exc:`~seekpath.hpkot.EdgeCaseWarning` is issued.
Note that depending on the bravais lattice, the meaning of the
threshold is different (angle, length, ...)
:param symprec: the symmetry precision used internally by SPGLIB
:param angle_tolerance: the angle_tolerance used internally by SPGLIB
"""
assert structure.pbc == (True, True, True), 'Seekpath only implemented for three-dimensional structures'
recognized_args = ['with_time_reversal', 'reference_distance', 'recipe', 'threshold', 'symprec', 'angle_tolerance']
unknown_args = set(kwargs).difference(recognized_args)
if unknown_args:
raise ValueError("unknown arguments {}".format(unknown_args))
return seekpath.get_explicit_kpoints_path(structure, kwargs)
def _legacy_get_kpoints_path(structure, **kwargs):
"""
Call the get_kpoints_path of the legacy implementation
:param structure: a StructureData node
:param bool cartesian: if set to true, reads the coordinates eventually passed in value as cartesian coordinates
:param epsilon_length: threshold on lengths comparison, used to get the bravais lattice info
:param epsilon_angle: threshold on angles comparison, used to get the bravais lattice info
"""
args_recognized = ['cartesian', 'epsilon_length', 'epsilon_angle']
args_unknown = set(kwargs).difference(args_recognized)
if args_unknown:
raise ValueError("unknown arguments {}".format(args_unknown))
point_coords, path, bravais_info = legacy.get_kpoints_path(
cell=structure.cell, pbc=structure.pbc, **kwargs
)
parameters = {
'bravais_info': bravais_info,
'point_coords': point_coords,
'path': path,
}
return {'parameters': ParameterData(dict=parameters)}
def _legacy_get_explicit_kpoints_path(structure, **kwargs):
"""
Call the get_explicit_kpoints_path of the legacy implementation
:param structure: a StructureData node
:param float kpoint_distance: parameter controlling the distance between kpoints. Distance is
given in crystal coordinates, i.e. the distance is computed in the space of b1, b2, b3.
The distance set will be the closest possible to this value, compatible with the requirement
of putting equispaced points between two special points (since extrema are included).
:param bool cartesian: if set to true, reads the coordinates eventually passed in value as cartesian coordinates
:param float epsilon_length: threshold on lengths comparison, used to get the bravais lattice info
:param float epsilon_angle: threshold on angles comparison, used to get the bravais lattice info
"""
args_recognized = ['value', 'kpoint_distance', 'cartesian', 'epsilon_length', 'epsilon_angle']
args_unknown = set(kwargs).difference(args_recognized)
if args_unknown:
raise ValueError("unknown arguments {}".format(args_unknown))
point_coords, path, bravais_info, explicit_kpoints, labels = legacy.get_explicit_kpoints_path(
cell=structure.cell, pbc=structure.pbc, **kwargs
)
kpoints = KpointsData()
kpoints.set_cell(structure.cell)
kpoints.set_kpoints(explicit_kpoints)
kpoints.labels = labels
parameters = {
'bravais_info': bravais_info,
'point_coords': point_coords,
'path': path,
}
return {
'parameters': ParameterData(dict=parameters),
'explicit_kpoints': kpoints
}
_get_kpoints_path_methods = {
'legacy': _legacy_get_kpoints_path,
'seekpath': _seekpath_get_kpoints_path,
}
_get_explicit_kpoints_path_methods = {
'legacy': _legacy_get_explicit_kpoints_path,
'seekpath': _seekpath_get_explicit_kpoints_path,
}
| 44.803922
| 119
| 0.698818
|
7950d29a64dbb28b60bf8229788687f93a122298
| 11
|
py
|
Python
|
src/test/resources/expressions/enclosure/display/list.py
|
oxisto/reticulated-python
|
a38c8bd9c842be4f4c8ddc73c61c70aeceb07248
|
[
"Apache-2.0"
] | 3
|
2019-11-23T10:19:43.000Z
|
2021-03-19T03:18:30.000Z
|
src/test/resources/expressions/enclosure/display/list.py
|
oxisto/reticulated-python
|
a38c8bd9c842be4f4c8ddc73c61c70aeceb07248
|
[
"Apache-2.0"
] | 46
|
2019-11-23T12:11:52.000Z
|
2022-03-07T13:39:12.000Z
|
src/test/resources/expressions/enclosure/display/list.py
|
oxisto/reticulated-python
|
a38c8bd9c842be4f4c8ddc73c61c70aeceb07248
|
[
"Apache-2.0"
] | 3
|
2020-03-02T13:48:45.000Z
|
2020-03-06T09:33:25.000Z
|
[1, ['a']]
| 5.5
| 10
| 0.181818
|
7950d36494c5876ecc143027629021ed60c36553
| 4,913
|
py
|
Python
|
tilt/wlt/wallet.py
|
inc/tilt
|
2d5e9040cc28e325ae3365f04d3c89c402ebba0f
|
[
"BSD-1-Clause"
] | 1
|
2021-04-13T11:08:42.000Z
|
2021-04-13T11:08:42.000Z
|
tilt/wlt/wallet.py
|
inc/tilt
|
2d5e9040cc28e325ae3365f04d3c89c402ebba0f
|
[
"BSD-1-Clause"
] | null | null | null |
tilt/wlt/wallet.py
|
inc/tilt
|
2d5e9040cc28e325ae3365f04d3c89c402ebba0f
|
[
"BSD-1-Clause"
] | 2
|
2021-04-13T11:09:00.000Z
|
2021-04-25T14:09:06.000Z
|
#!/bin/python3
#
# Tilt - Wallet Manager
# Copyright (c) 2021 Lone Dynamics Corporation. All rights reserved.
#
import json
import os
import time
import glob
import logging
import sys
import pprint
import zipfile
import bitcoinlib
from bitcoinlib.keys import Key
from cryptography.fernet import Fernet
import tilt.utils as tilt
class WalletManager:
def __init__(self):
tilt.setup()
self.tiltdir = os.path.expanduser("~/.tilt")
self.walletdir = self.tiltdir + '/wallet'
wallet_key = tilt.get_config("wallet_key")
self.fernet = Fernet(wallet_key)
return
# create a new address/key pair, return the new address
def create(self, currency, meta={}, label=None, unused=False):
k = Key(network=self.currency_to_network(currency))
address = k.address()
wif_plain_bytes = k.wif().encode('utf-8')
wif_cipher_bytes = self.fernet.encrypt(wif_plain_bytes).decode('utf-8')
r = {
'currency': currency,
'address': address,
'cipher_wif': wif_cipher_bytes,
'meta': meta,
'label': label,
'unused': unused,
'ts': int(time.time())
}
fn = self.walletdir + "/" + currency + "." + address + ".json"
with open(os.open(fn, os.O_CREAT | os.O_WRONLY, 0o600), 'w') as f:
f.write(json.dumps(r))
return address
# create new unused address/key pairs, return the new address
def create_unused(self, currency, quantity=1):
addrs = []
for i in range(quantity):
addrs.append(self.create(currency, unused=True))
return addrs
def load(self, filename):
with open(filename, "r") as f:
return json.loads(f.read())
def decrypt(self, filename):
w = self.load(filename)
wif_cipher_bytes = w['cipher_wif'].encode('utf-8')
w['plain_wif'] = self.fernet.decrypt(wif_cipher_bytes).decode('utf-8')
del w['cipher_wif']
return w
def exists(self, currency, address):
fn = self.walletdir + "/" + currency + "." + address + ".json"
return os.path.isfile(fn)
def get(self, currency, address):
fn = self.walletdir + "/" + currency + "." + address + ".json"
return self.decrypt(fn)
def show(self, currency, address):
pp = pprint.PrettyPrinter()
pp.pprint(self.get(currency, address))
def list(self, currency, show_labels=False, show_balances=False,
show_unused=False, confs=6):
if currency:
fn = self.walletdir + "/" + currency + ".*.json"
else:
fn = self.walletdir + "/*.json"
balances = {}
if show_balances:
res = tilt.balances(confs)
balances = res['balances']
files = list(glob.iglob(fn))
files.sort(key=os.path.getmtime)
for fn in files:
fs = os.path.basename(fn).split('.')
with open(fn) as f:
w = json.loads(f.read())
if 'unused' in w and w['unused'] and not show_unused:
continue
print(fs[0], "\t", fs[1], end='')
if fs[0] in balances and fs[1] in balances[fs[0]]:
balance = balances[fs[0]][fs[1]]
else:
balance = 0
if show_balances:
print("\t", balance, end='')
if show_labels:
label = ''
if 'label' in w and w['label']: label = w['label']
print("\t", label, end='')
print('', flush=True)
def freeze(self):
zfn = 'tilt-freeze-' + str(int(time.time())) + '.zip'
with zipfile.ZipFile(zfn, 'w') as z:
files = list(glob.iglob(self.walletdir + '/*.json'))
for f in files:
an = os.path.basename(f)
plainjson = json.dumps(self.decrypt(f))
z.writestr('wallet/' + an, plainjson)
logging.info("froze %s files" % len(files))
def destroy(self, zfn):
logging.warning("about to permanently delete every file in" \
" ~/.tilt/wallet that also exists in " + zfn + \
"; type 'yes' to proceed:")
res = input()
if res != "yes":
print("aborted")
return
with zipfile.ZipFile(zfn, 'r') as z:
files = z.namelist()
for f in files:
print("deleting", f)
def currency_to_network(self, currency):
if currency == "btc": return "bitcoin"
if currency == "tbtc": return "testnet"
if currency == "ltc": return "litecoin"
if currency == "tltc": return "litecoin_testnet"
if currency == "doge": return "dogecoin"
if currency == "tdoge": return "dogecoin_testnet"
raise Exception("unsupported currency", currency)
| 30.515528
| 79
| 0.549766
|
7950d391f7996ef25b858ff1e5ca9cbc483d6071
| 16,337
|
py
|
Python
|
testing/scripts/run_android_wpt.py
|
mduclehcm/react-native-skia
|
de1ede5332ed66f731be4389cd625f95c32e2733
|
[
"MIT"
] | 643
|
2021-08-02T05:04:20.000Z
|
2022-03-27T22:56:02.000Z
|
testing/scripts/run_android_wpt.py
|
mduclehcm/react-native-skia
|
de1ede5332ed66f731be4389cd625f95c32e2733
|
[
"MIT"
] | 18
|
2021-05-13T05:53:06.000Z
|
2022-03-31T21:24:25.000Z
|
testing/scripts/run_android_wpt.py
|
mduclehcm/react-native-skia
|
de1ede5332ed66f731be4389cd625f95c32e2733
|
[
"MIT"
] | 16
|
2021-08-31T07:08:45.000Z
|
2022-02-14T12:36:15.000Z
|
#!/usr/bin/env vpython
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs Web Platform Tests (WPT) on Android browsers.
This script supports running tests on the Chromium Waterfall by mapping isolated
script flags to WPT flags.
It is also useful for local reproduction by performing APK installation and
configuring the browser to resolve test hosts. Be sure to invoke this
executable directly rather than using python run_android_wpt.py so that
WPT dependencies in Chromium vpython are found.
If you need more advanced test control, please use the runner located at
//third_party/blink/web_tests/external/wpt/wpt.
Here's the mapping [isolate script flag] : [wpt flag]
--isolated-script-test-output : --log-chromium
--total-shards : --total-chunks
--shard-index : -- this-chunk
"""
# TODO(aluo): Combine or factor out commons parts with run_wpt_tests.py script.
import argparse
import contextlib
import json
import logging
import os
import shutil
import sys
import common
import wpt_common
logger = logging.getLogger(__name__)
SRC_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
BUILD_ANDROID = os.path.join(SRC_DIR, 'build', 'android')
BLINK_TOOLS_DIR = os.path.join(
SRC_DIR, 'third_party', 'blink', 'tools')
CATAPULT_DIR = os.path.join(SRC_DIR, 'third_party', 'catapult')
DEFAULT_WPT = os.path.join(wpt_common.WEB_TESTS_DIR, 'external', 'wpt', 'wpt')
PYUTILS = os.path.join(CATAPULT_DIR, 'common', 'py_utils')
if PYUTILS not in sys.path:
sys.path.append(PYUTILS)
if BLINK_TOOLS_DIR not in sys.path:
sys.path.append(BLINK_TOOLS_DIR)
if BUILD_ANDROID not in sys.path:
sys.path.append(BUILD_ANDROID)
import devil_chromium
from blinkpy.web_tests.port.android import (
PRODUCTS, PRODUCTS_TO_EXPECTATION_FILE_PATHS, ANDROID_WEBLAYER,
ANDROID_WEBVIEW, CHROME_ANDROID, ANDROID_DISABLED_TESTS)
from devil import devil_env
from devil.android import apk_helper
from devil.android import device_utils
from devil.android.tools import system_app
from devil.android.tools import webview_app
from py_utils.tempfile_ext import NamedTemporaryDirectory
class PassThroughArgs(argparse.Action):
pass_through_args = []
def __call__(self, parser, namespace, values, option_string=None):
if option_string:
if self.nargs == 0:
self.add_unique_pass_through_arg(option_string)
elif self.nargs is None:
self.add_unique_pass_through_arg('{}={}'.format(option_string, values))
else:
raise ValueError("nargs {} not supported: {} {}".format(
self.nargs, option_string, values))
@classmethod
def add_unique_pass_through_arg(cls, arg):
if arg not in cls.pass_through_args:
cls.pass_through_args.append(arg)
def _get_adapter(device):
usage = '%(prog)s --product={' + ','.join(PRODUCTS) + '} ...'
product_parser = argparse.ArgumentParser(
add_help=False, prog='run_android_wpt.py', usage=usage)
product_parser.add_argument(
'--product', action='store', required=True, choices=PRODUCTS)
options, _ = product_parser.parse_known_args()
product = options.product
if product == ANDROID_WEBLAYER:
return WPTWeblayerAdapter(device)
elif product == ANDROID_WEBVIEW:
return WPTWebviewAdapter(device)
else:
return WPTClankAdapter(device)
class WPTAndroidAdapter(wpt_common.BaseWptScriptAdapter):
def __init__(self, device):
self.pass_through_wpt_args = []
self.pass_through_binary_args = []
self._metadata_dir = None
self._device = device
super(WPTAndroidAdapter, self).__init__()
# Arguments from add_extra_argumentsparse were added so
# its safe to parse the arguments and set self._options
self.parse_args()
@property
def rest_args(self):
rest_args = super(WPTAndroidAdapter, self).rest_args
# Here we add all of the arguments required to run WPT tests on Android.
rest_args.extend([self.options.wpt_path])
# vpython has packages needed by wpt, so force it to skip the setup
rest_args.extend(["--venv=../../", "--skip-venv-setup"])
rest_args.extend(["run",
"--test-type=" + self.options.test_type,
"--webdriver-binary",
self.options.webdriver_binary,
"--headless",
"--no-pause-after-test",
"--no-capture-stdio",
"--no-manifest-download",
])
# if metadata was created then add the metadata directory
# to the list of wpt arguments
if self._metadata_dir:
rest_args.extend(['--metadata', self._metadata_dir])
if self.options.verbose >= 3:
rest_args.extend(["--log-mach=-", "--log-mach-level=debug",
"--log-mach-verbose"])
if self.options.verbose >= 4:
rest_args.extend(['--webdriver-arg=--verbose',
'--webdriver-arg="--log-path=-"'])
rest_args.extend(self.pass_through_wpt_args)
return rest_args
def _extra_metadata_builder_args(self):
raise NotImplementedError
def _maybe_build_metadata(self):
metadata_builder_cmd = [
sys.executable,
os.path.join(wpt_common.BLINK_TOOLS_DIR, 'build_wpt_metadata.py'),
'--android-product',
self.options.product,
'--ignore-default-expectations',
'--metadata-output-dir',
self._metadata_dir,
'--additional-expectations',
ANDROID_DISABLED_TESTS,
]
metadata_builder_cmd.extend(self._extra_metadata_builder_args())
return common.run_command(metadata_builder_cmd)
def run_test(self):
with NamedTemporaryDirectory() as self._metadata_dir, self._install_apks():
metadata_command_ret = self._maybe_build_metadata()
if metadata_command_ret != 0:
return metadata_command_ret
return super(WPTAndroidAdapter, self).run_test()
def _install_apks(self):
raise NotImplementedError
def clean_up_after_test_run(self):
# Avoid having a dangling reference to the temp directory
# which was deleted
self._metadata_dir = None
def add_extra_arguments(self, parser):
# TODO: |pass_through_args| are broke and need to be supplied by way of
# --binary-arg".
class BinaryPassThroughArgs(PassThroughArgs):
pass_through_args = self.pass_through_binary_args
class WPTPassThroughArgs(PassThroughArgs):
pass_through_args = self.pass_through_wpt_args
# Add this so that product argument does not go in self._rest_args
# when self.parse_args() is called
parser.add_argument('--product', help=argparse.SUPPRESS)
parser.add_argument('--webdriver-binary', required=True,
help='Path of the webdriver binary. It needs to have'
' the same major version as the apk.')
parser.add_argument('--wpt-path', default=DEFAULT_WPT,
help='Controls the path of the WPT runner to use'
' (therefore tests). Defaults the revision rolled into'
' Chromium.')
parser.add_argument('--test-type', default='testharness',
help='Specify to experiment with other test types.'
' Currently only the default is expected to work.')
parser.add_argument('--verbose', '-v', action='count',
help='Verbosity level.')
parser.add_argument('--include', metavar='TEST_OR_DIR',
action=WPTPassThroughArgs,
help='Test(s) to run, defaults to run all tests.')
parser.add_argument('--list-tests', action=WPTPassThroughArgs, nargs=0,
help="Don't run any tests, just print out a list of"
' tests that would be run.')
parser.add_argument('--webdriver-arg', action=WPTPassThroughArgs,
help='WebDriver args.')
parser.add_argument('--log-wptreport', metavar='WPT_REPORT_FILE',
action=WPTPassThroughArgs,
help="Log wptreport with subtest details.")
parser.add_argument('--log-raw', metavar='RAW_REPORT_FILE',
action=WPTPassThroughArgs,
help="Log raw report.")
parser.add_argument('--log-html', metavar='HTML_REPORT_FILE',
action=WPTPassThroughArgs,
help="Log html report.")
parser.add_argument('--log-xunit', metavar='XUNIT_REPORT_FILE',
action=WPTPassThroughArgs,
help="Log xunit report.")
parser.add_argument('--enable-features', action=BinaryPassThroughArgs,
help='Chromium features to enable during testing.')
parser.add_argument('--disable-features', action=BinaryPassThroughArgs,
help='Chromium features to disable during testing.')
parser.add_argument('--disable-field-trial-config',
action=BinaryPassThroughArgs,
help='Disable test trials for Chromium features.')
parser.add_argument('--force-fieldtrials', action=BinaryPassThroughArgs,
help='Force trials for Chromium features.')
parser.add_argument('--force-fieldtrial-params',
action=BinaryPassThroughArgs,
help='Force trial params for Chromium features.')
class WPTWeblayerAdapter(WPTAndroidAdapter):
WEBLAYER_SHELL_PKG = 'org.chromium.weblayer.shell'
WEBLAYER_SUPPORT_PKG = 'org.chromium.weblayer.support'
@contextlib.contextmanager
def _install_apks(self):
install_weblayer_shell_as_needed = maybe_install_user_apk(
self._device, self.options.weblayer_shell, self.WEBLAYER_SHELL_PKG)
install_weblayer_support_as_needed = maybe_install_user_apk(
self._device, self.options.weblayer_support, self.WEBLAYER_SUPPORT_PKG)
install_webview_provider_as_needed = maybe_install_webview_provider(
self._device, self.options.webview_provider)
with install_weblayer_shell_as_needed, \
install_weblayer_support_as_needed, \
install_webview_provider_as_needed:
yield
def _extra_metadata_builder_args(self):
return [
'--additional-expectations',
PRODUCTS_TO_EXPECTATION_FILE_PATHS[ANDROID_WEBLAYER]]
def add_extra_arguments(self, parser):
super(WPTWeblayerAdapter, self).add_extra_arguments(parser)
parser.add_argument('--weblayer-shell',
help='WebLayer Shell apk to install.')
parser.add_argument('--weblayer-support',
help='WebLayer Support apk to install.')
parser.add_argument('--webview-provider',
help='Webview provider apk to install.')
@property
def rest_args(self):
args = super(WPTWeblayerAdapter, self).rest_args
args.append(ANDROID_WEBLAYER)
return args
class WPTWebviewAdapter(WPTAndroidAdapter):
SYSTEM_WEBVIEW_SHELL_PKG = 'org.chromium.webview_shell'
@contextlib.contextmanager
def _install_apks(self):
install_shell_as_needed = maybe_install_user_apk(
self._device, self.options.system_webview_shell,
self.SYSTEM_WEBVIEW_SHELL_PKG)
install_webview_provider_as_needed = maybe_install_webview_provider(
self._device, self.options.webview_provider)
with install_shell_as_needed, install_webview_provider_as_needed:
yield
def _extra_metadata_builder_args(self):
return [
'--additional-expectations',
PRODUCTS_TO_EXPECTATION_FILE_PATHS[ANDROID_WEBVIEW]]
def add_extra_arguments(self, parser):
super(WPTWebviewAdapter, self).add_extra_arguments(parser)
parser.add_argument('--system-webview-shell',
help=('System WebView Shell apk to install. If not '
'specified then the on-device WebView apk '
'will be used.'))
parser.add_argument('--webview-provider',
help='Webview provider APK to install.')
@property
def rest_args(self):
args = super(WPTWebviewAdapter, self).rest_args
args.append(ANDROID_WEBVIEW)
return args
class WPTClankAdapter(WPTAndroidAdapter):
@contextlib.contextmanager
def _install_apks(self):
install_clank_as_needed = maybe_install_user_apk(
self._device, self.options.chrome_apk)
with install_clank_as_needed:
yield
def _extra_metadata_builder_args(self):
return [
'--additional-expectations',
PRODUCTS_TO_EXPECTATION_FILE_PATHS[CHROME_ANDROID]]
def add_extra_arguments(self, parser):
super(WPTClankAdapter, self).add_extra_arguments(parser)
parser.add_argument(
'--chrome-apk', help='Chrome apk to install.')
parser.add_argument(
'--chrome-package-name',
help=('The package name of Chrome to test,'
' defaults to that of the compiled Chrome apk.'))
@property
def rest_args(self):
args = super(WPTClankAdapter, self).rest_args
if not self.options.chrome_package_name and not self.options.chrome_apk:
raise Exception('Either the --chrome-package-name or --chrome-apk '
'command line arguments must be used.')
if not self.options.chrome_package_name:
self.options.chrome_package_name = apk_helper.GetPackageName(
self.options.chrome_apk)
logger.info("Using Chrome apk's default package %s." %
self.options.chrome_package_name)
args.extend(['--package-name', self.options.chrome_package_name])
# add the product postional argument
args.append(CHROME_ANDROID)
return args
def maybe_install_webview_provider(device, apk):
if apk:
logger.info('Will install WebView apk at ' + apk)
return webview_app.UseWebViewProvider(device, apk)
else:
return no_op()
def maybe_install_user_apk(device, apk, expected_pkg=None):
"""contextmanager to install apk on device.
Args:
device: DeviceUtils instance on which to install the apk.
apk: Apk file path on host.
expected_pkg: Optional, check that apk's package name matches.
Returns:
If apk evaluates to false, returns a do-nothing contextmanager.
Otherwise, returns a contextmanager to install apk on device.
"""
if apk:
pkg = apk_helper.GetPackageName(apk)
if expected_pkg and pkg != expected_pkg:
raise ValueError('{} has incorrect package name: {}, expected {}.'.format(
apk, pkg, expected_pkg))
install_as_needed = app_installed(device, apk)
logger.info('Will install ' + pkg + ' at ' + apk)
else:
install_as_needed = no_op()
return install_as_needed
@contextlib.contextmanager
def app_installed(device, apk):
pkg = apk_helper.GetPackageName(apk)
device.Install(apk)
try:
yield
finally:
device.Uninstall(pkg)
# Dummy contextmanager to simplify multiple optional managers.
@contextlib.contextmanager
def no_op():
yield
# This is not really a "script test" so does not need to manually add
# any additional compile targets.
def main_compile_targets(args):
json.dump([], args.output)
def main():
devil_chromium.Initialize()
devices = device_utils.DeviceUtils.HealthyDevices()
if not devices:
logger.error('There are no devices attached to this host. Exiting script.')
return 1
# Only 1 device is supported for Android locally, this will work well with
# sharding support via swarming infra.
device = devices[0]
adapter = _get_adapter(device)
if adapter.options.verbose:
if adapter.options.verbose == 1:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.DEBUG)
# WPT setup for chrome and webview requires that PATH contains adb.
platform_tools_path = os.path.dirname(devil_env.config.FetchPath('adb'))
os.environ['PATH'] = ':'.join([platform_tools_path] +
os.environ['PATH'].split(':'))
return adapter.run_test()
if __name__ == '__main__':
# Conform minimally to the protocol defined by ScriptTest.
if 'compile_targets' in sys.argv:
funcs = {
'run': None,
'compile_targets': main_compile_targets,
}
sys.exit(common.run_script(sys.argv[1:], funcs))
logging.basicConfig(level=logging.WARNING)
logger = logging.getLogger()
sys.exit(main())
| 35.984581
| 80
| 0.689294
|
7950d5102694d907493bb089b21903d6788ede4b
| 1,205
|
py
|
Python
|
Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Lib/dsz/mca/file/cmd/move/data/dsz/__init__.py
|
bidhata/EquationGroupLeaks
|
1ff4bc115cb2bd5bf2ed6bf769af44392926830c
|
[
"Unlicense"
] | 9
|
2019-11-22T04:58:40.000Z
|
2022-02-26T16:47:28.000Z
|
Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Lib/dsz/mca/file/cmd/move/data/dsz/__init__.py
|
bidhata/EquationGroupLeaks
|
1ff4bc115cb2bd5bf2ed6bf769af44392926830c
|
[
"Unlicense"
] | null | null | null |
Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Lib/dsz/mca/file/cmd/move/data/dsz/__init__.py
|
bidhata/EquationGroupLeaks
|
1ff4bc115cb2bd5bf2ed6bf769af44392926830c
|
[
"Unlicense"
] | 8
|
2017-09-27T10:31:18.000Z
|
2022-01-08T10:30:46.000Z
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: __init__.py
import dsz
import dsz.cmd
import dsz.data
import dsz.lp
class Move(dsz.data.Task):
def __init__(self, cmd=None):
dsz.data.Task.__init__(self, cmd)
def _LoadData(self):
try:
self.MoveResults = Move.MoveResults(dsz.cmd.data.Get('MoveResults', dsz.TYPE_OBJECT)[0])
except:
self.MoveResults = None
return
class MoveResults(dsz.data.DataBean):
def __init__(self, obj):
try:
self.delay = dsz.cmd.data.ObjectGet(obj, 'delay', dsz.TYPE_BOOL)[0]
except:
self.delay = None
try:
self.destination = dsz.cmd.data.ObjectGet(obj, 'destination', dsz.TYPE_STRING)[0]
except:
self.destination = None
try:
self.source = dsz.cmd.data.ObjectGet(obj, 'source', dsz.TYPE_STRING)[0]
except:
self.source = None
return
dsz.data.RegisterCommand('Move', Move)
MOVE = Move
move = Move
| 25.638298
| 100
| 0.580913
|
7950d5690cb9cdf85abbba0ca2ddaf60421183b3
| 3,099
|
py
|
Python
|
projDir/uw/scripts/ftpCSapr2Images.py
|
NCAR/lrose-projects-relampago
|
8208e4bd83ac8007a04987c0531fb60cc629a05a
|
[
"BSD-2-Clause"
] | 1
|
2018-12-03T19:51:14.000Z
|
2018-12-03T19:51:14.000Z
|
projDir/uw/scripts/ftpCSapr2Images.py
|
NCAR/lrose-projects-relampago
|
8208e4bd83ac8007a04987c0531fb60cc629a05a
|
[
"BSD-2-Clause"
] | null | null | null |
projDir/uw/scripts/ftpCSapr2Images.py
|
NCAR/lrose-projects-relampago
|
8208e4bd83ac8007a04987c0531fb60cc629a05a
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/python
import sys
import os
import time
import datetime
from datetime import timedelta
import requests
from bs4 import BeautifulSoup
from ftplib import FTP
#if len(sys.argv) != 2:
# print >>sys.stderr, "Useage: ",sys.argv[0]," [YYYY_MM_DD]"
# quit()
#date = sys.argv[1]
# get current date and time minus one hour
UTC_OFFSET_TIMEDELTA = datetime.datetime.utcnow() - datetime.datetime.now()
date_1_hour_ago = datetime.datetime.now() - timedelta(hours=1) + UTC_OFFSET_TIMEDELTA
date = date_1_hour_ago.strftime("%Y_%m_%d")
dateNoHyphens = date_1_hour_ago.strftime("%Y%m%d")
hour = date_1_hour_ago.strftime("%H")
#nowTime = time.gmtime()
#now = datetime.datetime(nowTime.tm_year, nowTime.tm_mon, nowTime.tm_mday,
# nowTime.tm_hour, nowTime.tm_min, nowTime.tm_sec)
#date = now.strftime("%Y_%m_%d")
#date = '2018_11_01'
url = 'https://engineering.arm.gov/~radar/amf1_csapr2_incoming_images/hsrhi/'+date+'/'
ext = 'png'
homeDir = os.getenv('HOME')
outDir = os.path.join(homeDir, 'radar/csapr2/' + date)
category = 'radar'
platform = 'DOE_CSapr2'
ftpCatalogServer = 'catalog.eol.ucar.edu'
ftpCatalogUser = 'anonymous'
catalogDestDir = '/pub/incoming/catalog/relampago'
debug = 1
def listFD(url, ext=''):
page = requests.get(url).text
print page
soup = BeautifulSoup(page, 'html.parser')
return [url + '/' + node.get('href') for node in soup.find_all('a') if node.get('href').endswith(ext)]
if not os.path.exists(outDir):
os.makedirs(outDir)
os.chdir(outDir)
for file in listFD(url, ext):
tmp = os.path.basename(file)
(f,e) = os.path.splitext(tmp)
parts = f.split('_')
(fdate,ftime) = parts[3].split('-')
fhour = ftime[0:2]
if fdate == dateNoHyphens and fhour == hour:
print file
cmd = 'wget '+file
os.system(cmd)
# correct names of -0.0 files
#cmd = 'mmv "*_-0.0.png" "#1_00.0.png"'
#os.system(cmd)
# rename files and ftp them
for file in os.listdir(outDir):
if file.startswith('cor_'):
if debug:
print >>sys.stderr, "file = ",file
(filename, file_ext) = os.path.splitext(file)
parts = filename.split('_')
(date,time) = parts[3].split('-')
angle_parts = parts[5].split('.')
if len(angle_parts[0]) == 1:
angle = '00'+angle_parts[0]
elif len(angle_parts[0]) == 2:
angle = '0'+angle_parts[0]
else:
angle = angle_parts[0]
product = parts[2]+'_'+parts[4]+'_'+angle
file_cat = category+'.'+platform+'.'+date+time+'.'+product+file_ext
if debug:
print >>sys.stderr, "file_cat = ",file_cat
cmd = 'mv '+file+' '+file_cat
os.system(cmd)
# ftp file
try:
catalogFTP = FTP(ftpCatalogServer,ftpCatalogUser)
catalogFTP.cwd(catalogDestDir)
file = open(file_cat,'rb')
catalogFTP.storbinary('STOR '+file_cat,file)
file.close()
catalogFTP.quit()
except Exception as e:
print >>sys.stderr, "FTP failed, exception: ", e
| 28.694444
| 106
| 0.622136
|
7950d5c597bdfe185bc2350c8cec2863843cc600
| 7,531
|
py
|
Python
|
lib/googlecloudsdk/command_lib/interactive/parser.py
|
bshaffer/google-cloud-sdk
|
f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/command_lib/interactive/parser.py
|
bshaffer/google-cloud-sdk
|
f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/command_lib/interactive/parser.py
|
bshaffer/google-cloud-sdk
|
f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*- #
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A basic command line parser.
This command line parser does the bare minimum required to understand the
commands and flags being used as well as perform completion. This is not a
replacement for argparse (yet).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import enum
from googlecloudsdk.calliope import cli_tree
from googlecloudsdk.command_lib.interactive import lexer
import six
LOOKUP_COMMANDS = cli_tree.LOOKUP_COMMANDS
LOOKUP_CHOICES = cli_tree.LOOKUP_CHOICES
LOOKUP_COMPLETER = cli_tree.LOOKUP_COMPLETER
LOOKUP_FLAGS = cli_tree.LOOKUP_FLAGS
LOOKUP_GROUPS = cli_tree.LOOKUP_GROUPS
LOOKUP_IS_GROUP = cli_tree.LOOKUP_IS_GROUP
LOOKUP_IS_HIDDEN = cli_tree.LOOKUP_IS_HIDDEN
LOOKUP_IS_SPECIAL = 'interactive.is_special'
LOOKUP_NAME = cli_tree.LOOKUP_NAME
LOOKUP_NARGS = cli_tree.LOOKUP_NARGS
LOOKUP_POSITIONALS = cli_tree.LOOKUP_POSITIONALS
LOOKUP_TYPE = cli_tree.LOOKUP_TYPE
LOOKUP_CLI_VERSION = cli_tree.LOOKUP_CLI_VERSION
class ArgTokenType(enum.Enum):
UNKNOWN = 0 # Unknown token type in any position
PREFIX = 1 # Potential command name, maybe after lex.SHELL_TERMINATOR_CHARS
GROUP = 2 # Command arg with subcommands
COMMAND = 3 # Command arg
FLAG = 4 # Flag arg
FLAG_ARG = 5 # Flag value arg
POSITIONAL = 6 # Positional arg
SPECIAL = 7 # Special keyword that is followed by PREFIX.
class ArgToken(object):
"""Shell token info.
Attributes:
value: A string associated with the token.
token_type: Instance of ArgTokenType
tree: A subtree of CLI root.
start: The index of the first char in the original string.
end: The index directly after the last char in the original string.
"""
def __init__(self, value, token_type=ArgTokenType.UNKNOWN, tree=None,
start=None, end=None):
self.value = value
self.token_type = token_type
self.tree = tree
self.start = start
self.end = end
def __eq__(self, other):
"""Equality based on properties."""
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return False
def __repr__(self):
"""Improve debugging during tests."""
return 'ArgToken({}, {}, {}, {})'.format(self.value, self.token_type,
self.start, self.end)
class Parser(object):
"""Shell command line parser.
Attributes:
args:
context:
cmd:
hidden:
positionals_seen:
root:
statement:
tokens:
"""
def __init__(self, root, context=None, hidden=False):
self.root = root
self.hidden = hidden
self.args = []
self.cmd = self.root
self.positionals_seen = 0
self.previous_line = None
self.statement = 0
self.tokens = None
self.SetContext(context)
def SetContext(self, context=None):
"""Sets the default command prompt context."""
self.context = six.text_type(context or '')
def ParseCommand(self, line):
"""Parses the next command from line and returns a list of ArgTokens.
The parse stops at the first token that is not an ARG or FLAG. That token is
not consumed. The caller can examine the return value to determine the
parts of the line that were ignored and the remainder of the line that was
not lexed/parsed yet.
Args:
line: a string containing the current command line
Returns:
A list of ArgTokens.
"""
self.tokens = lexer.GetShellTokens(line)
self.cmd = self.root
self.positionals_seen = 0
self.args = []
unknown = False
while self.tokens:
token = self.tokens.pop(0)
value = token.UnquotedValue()
if token.lex == lexer.ShellTokenType.TERMINATOR:
unknown = False
self.cmd = self.root
self.args.append(ArgToken(value, ArgTokenType.SPECIAL, self.cmd,
token.start, token.end))
elif token.lex == lexer.ShellTokenType.FLAG:
self.ParseFlag(token, value)
elif token.lex == lexer.ShellTokenType.ARG and not unknown:
if value in self.cmd[LOOKUP_COMMANDS]:
self.cmd = self.cmd[LOOKUP_COMMANDS][value]
if self.cmd[LOOKUP_IS_GROUP]:
token_type = ArgTokenType.GROUP
elif LOOKUP_IS_SPECIAL in self.cmd:
token_type = ArgTokenType.SPECIAL
self.cmd = self.root
else:
token_type = ArgTokenType.COMMAND
self.args.append(ArgToken(value, token_type, self.cmd,
token.start, token.end))
elif self.cmd == self.root and '=' in value:
token_type = ArgTokenType.SPECIAL
self.cmd = self.root
self.args.append(ArgToken(value, token_type, self.cmd,
token.start, token.end))
elif self.positionals_seen < len(self.cmd[LOOKUP_POSITIONALS]):
positional = self.cmd[LOOKUP_POSITIONALS][self.positionals_seen]
self.args.append(ArgToken(value, ArgTokenType.POSITIONAL,
positional, token.start, token.end))
if positional[LOOKUP_NARGS] not in ('*', '+'):
self.positionals_seen += 1
elif not value: # trailing space
break
else:
unknown = True
if self.cmd == self.root:
token_type = ArgTokenType.PREFIX
else:
token_type = ArgTokenType.UNKNOWN
self.args.append(ArgToken(value, token_type, self.cmd,
token.start, token.end))
else:
unknown = True
self.args.append(ArgToken(value, ArgTokenType.UNKNOWN, self.cmd,
token.start, token.end))
return self.args
def ParseFlag(self, token, name):
"""Parses the flag token and appends it to the arg list."""
name_start = token.start
name_end = token.end
value = None
value_start = None
value_end = None
if '=' in name:
# inline flag value
name, value = name.split('=', 1)
name_end = name_start + len(name)
value_start = name_end + 1
value_end = value_start + len(value)
flag = self.cmd[LOOKUP_FLAGS].get(name)
if not flag or not self.hidden and flag[LOOKUP_IS_HIDDEN]:
self.args.append(ArgToken(name, ArgTokenType.UNKNOWN, self.cmd,
token.start, token.end))
return
if flag[LOOKUP_TYPE] != 'bool' and value is None and self.tokens:
# next arg is the flag value
token = self.tokens.pop(0)
value = token.UnquotedValue()
value_start = token.start
value_end = token.end
self.args.append(ArgToken(name, ArgTokenType.FLAG, flag,
name_start, name_end))
if value is not None:
self.args.append(ArgToken(value, ArgTokenType.FLAG_ARG, None,
value_start, value_end))
| 31.776371
| 80
| 0.656354
|
7950d5d7433ed236942c163a5b774ce167c380ba
| 5,011
|
py
|
Python
|
paddle/ds2.py
|
tensor-tang/DeepSpeech2
|
6ea38aa2a47a1045770d387c0474b266dc5aa311
|
[
"Apache-2.0"
] | null | null | null |
paddle/ds2.py
|
tensor-tang/DeepSpeech2
|
6ea38aa2a47a1045770d387c0474b266dc5aa311
|
[
"Apache-2.0"
] | 1
|
2017-06-05T14:05:35.000Z
|
2017-06-05T14:05:35.000Z
|
paddle/ds2.py
|
tensor-tang/DeepSpeech2
|
6ea38aa2a47a1045770d387c0474b266dc5aa311
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from paddle.trainer_config_helpers import *
use_dummy = get_config_arg("use_dummy", bool, True)
batch_size = get_config_arg('batch_size', int, 1)
is_predict = get_config_arg("is_predict", bool, False)
is_test = get_config_arg("is_test", bool, False)
layer_num = get_config_arg('layer_num', int, 6)
####################Data Configuration ##################
# 10ms as one step
dataSpec = dict(
uttLengths = [100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400, 1500],
counts = [3, 10, 11, 13, 14, 13, 9, 8, 5, 4, 3, 2, 2, 2, 1],
lblLengths = [7, 17, 35, 48, 62, 78, 93, 107, 120, 134, 148, 163, 178, 193, 209],
freqBins = 161,
charNum = 29, # 29 chars
scaleNum = 1280
)
num_classes = dataSpec['charNum']
if not is_predict:
train_list = 'data/train.list' if not is_test else None
test_list = None #'data/test.list'
args = {
'uttLengths': dataSpec['uttLengths'],
'counts': dataSpec['counts'],
'lblLengths': dataSpec['lblLengths'],
'freqBins': dataSpec['freqBins'],
'charNum': dataSpec['charNum'],
'scaleNum': dataSpec['scaleNum'],
'batch_size': batch_size
}
define_py_data_sources2(
train_list,
test_list,
module='dummy_provider' if use_dummy else 'image_provider',
obj='process',
args=args)
###################### Algorithm Configuration #############
settings(
batch_size=batch_size,
learning_rate=1e-3,
# learning_method=AdamOptimizer(),
# regularization=L2Regularization(8e-4),
)
####################### Deep Speech 2 Configuration #############
### TODO:
### 1. change all relu to clipped relu
### 2. rnn
def mkldnn_CBR(input, kh, kw, sh, sw, ic, oc, clipped = 20):
tmp = mkldnn_conv(
input = input,
num_channels = ic,
num_filters = oc,
filter_size = [kw, kh],
stride = [sw, sh],
act = LinearActivation()
)
return mkldnn_bn(
input = tmp,
num_channels = oc,
act = MkldnnReluActivation())
def BiDRNN(input, dim_out, dim_in=None):
if dim_in is None:
dim_in = dim_out
tmp = mkldnn_fc(input=input, dim_in=dim_in, dim_out=dim_out,
bias_attr=False, act=LinearActivation()) # maybe act=None
tmp = mkldnn_bn(input = tmp, isSeq=True, num_channels = dim_out, act = None)
return mkldnn_rnn(
input=tmp,
input_mode=MkldnnRnnConfig.SKIP_INPUT,
alg_kind = MkldnnRnnConfig.RNN_RELU, # try to use clipped
use_bi_direction = True,
sum_output = True,
layer_num=1)
######## DS2 model ########
tmp = data_layer(name = 'data', size = dataSpec['freqBins'])
tmp = mkldnn_reorder(input = tmp,
format_from='nchw',
format_to='nhwc',
dims_from=[-1, -1, 1, dataSpec['freqBins']],
bs_index=0)
tmp = mkldnn_reshape(input=tmp,
name="view_to_noseq",
reshape_type=ReshapeType.TO_NON_SEQUENCE,
img_dims=[1, dataSpec['freqBins'], -1])
# conv, bn, relu
tmp = mkldnn_CBR(tmp, 5, 20, 2, 2, 1, 32)
tmp = mkldnn_CBR(tmp, 5, 10, 1, 2, 32, 32)
# (bs, 32, 75, seq) to (seq,bs,2400)
tmp = mkldnn_reorder(
input = tmp,
format_from='nhwc',
format_to='chwn',
dims_from=[1, -1, 2400, -1],
bs_index=1)
tmp = mkldnn_reshape(input=tmp,
name="view_to_mklseq",
reshape_type=ReshapeType.TO_MKL_SEQUENCE,
img_dims=[2400, 1, 1],
seq_len=-1)
tmp = BiDRNN(tmp, 1760, 2400)
for i in xrange(layer_num):
tmp = BiDRNN(tmp, 1760)
# since ctc should +1 of the dim
ctc_dim = num_classes + 1
tmp = mkldnn_fc(input=tmp,
dim_in = 1760,
dim_out = ctc_dim,
act=LinearActivation()) #act=None
# (seq, bs, dim) to (bs, dim, seq)
tmp = mkldnn_reorder(
input = tmp,
format_from='chwn',
format_to='nhwc',
dims_from=[-1, -1, ctc_dim, 1],
bs_index=1)
# (bs, dim, seq) to (bs, seq, dim)
tmp = mkldnn_reorder(
input = tmp,
format_from='nchw',
format_to='nhwc',
dims_from=[-1, ctc_dim, -1, 1],
bs_index=0)
output = mkldnn_reshape(input=tmp,
name="view_to_paddle_seq",
reshape_type=ReshapeType.TO_PADDLE_SEQUENCE,
img_dims=[ctc_dim, 1, 1],
seq_len=-1)
if not is_predict:
lbl = data_layer(name='label', size=num_classes)
cost = warp_ctc_layer(input=output, name = "WarpCTC", blank = 0, label=lbl, size = ctc_dim) # CTC size should +1
# use ctc so we can use multi threads
# cost = ctc_layer(input=output, name = "CTC", label=lbl, size = num_classes + 1) # CTC size should +1
outputs(cost)
else:
outputs(output)
| 31.71519
| 116
| 0.564558
|
7950d647ae6c25dc76dfceebbd1d4b4f40951066
| 566
|
py
|
Python
|
pirates/flag/DistributedFlagShop.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | 3
|
2021-02-25T06:38:13.000Z
|
2022-03-22T07:00:15.000Z
|
pirates/flag/DistributedFlagShop.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | null | null | null |
pirates/flag/DistributedFlagShop.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | 1
|
2021-02-25T06:38:17.000Z
|
2021-02-25T06:38:17.000Z
|
# uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.flag.DistributedFlagShop
from pandac.PandaModules import *
from direct.distributed.DistributedObject import DistributedObject
import FlagGlobals
from Flag import Flag
class DistributedFlagShop(DistributedObject):
__module__ = __name__
notify = directNotify.newCategory('DistributedFlagShop')
def __init__(self, cr):
DistributedObject.__init__(self, cr)
| 37.733333
| 104
| 0.773852
|
7950d6fe5019e5f5a01c5b4808ca9d15b68b8aa8
| 2,296
|
py
|
Python
|
alipay/aop/api/domain/AlipayUserMpointPreconsultModel.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/domain/AlipayUserMpointPreconsultModel.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/domain/AlipayUserMpointPreconsultModel.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayUserMpointPreconsultModel(object):
def __init__(self):
self._biz_sub_type = None
self._biz_type = None
self._point = None
self._user_id = None
@property
def biz_sub_type(self):
return self._biz_sub_type
@biz_sub_type.setter
def biz_sub_type(self, value):
self._biz_sub_type = value
@property
def biz_type(self):
return self._biz_type
@biz_type.setter
def biz_type(self, value):
self._biz_type = value
@property
def point(self):
return self._point
@point.setter
def point(self, value):
self._point = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.biz_sub_type:
if hasattr(self.biz_sub_type, 'to_alipay_dict'):
params['biz_sub_type'] = self.biz_sub_type.to_alipay_dict()
else:
params['biz_sub_type'] = self.biz_sub_type
if self.biz_type:
if hasattr(self.biz_type, 'to_alipay_dict'):
params['biz_type'] = self.biz_type.to_alipay_dict()
else:
params['biz_type'] = self.biz_type
if self.point:
if hasattr(self.point, 'to_alipay_dict'):
params['point'] = self.point.to_alipay_dict()
else:
params['point'] = self.point
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayUserMpointPreconsultModel()
if 'biz_sub_type' in d:
o.biz_sub_type = d['biz_sub_type']
if 'biz_type' in d:
o.biz_type = d['biz_type']
if 'point' in d:
o.point = d['point']
if 'user_id' in d:
o.user_id = d['user_id']
return o
| 26.697674
| 75
| 0.570557
|
7950d793ebe8d0398dd4ae9be65b1d289c8b5d04
| 27,383
|
py
|
Python
|
onnx/helper.py
|
vinitra/onnx
|
531e6dd459003fc8d13b8abb66b29a72a571c865
|
[
"MIT"
] | null | null | null |
onnx/helper.py
|
vinitra/onnx
|
531e6dd459003fc8d13b8abb66b29a72a571c865
|
[
"MIT"
] | null | null | null |
onnx/helper.py
|
vinitra/onnx
|
531e6dd459003fc8d13b8abb66b29a72a571c865
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
import numbers
from six import text_type, integer_types, binary_type
import google.protobuf.message
from onnx import TensorProto, SparseTensorProto, AttributeProto, ValueInfoProto, \
TensorShapeProto, NodeProto, ModelProto, GraphProto, OperatorSetIdProto, \
TypeProto, SequenceProto, MapProto, IR_VERSION, TrainingInfoProto
from onnx import defs
from onnx import mapping
from onnx.mapping import STORAGE_TENSOR_TYPE_TO_FIELD
from typing import Text, Sequence, Any, Optional, Dict, Union, TypeVar, Callable, Tuple, List, cast
import numpy as np # type: ignore
VersionRowType = Union[Tuple[Text, int, int, int], Tuple[Text, int, int, int, int]]
VersionTableType = List[VersionRowType]
AssignmentBindingType = List[Tuple[Text, Text]]
# This is a copy of the documented version in https://github.com/onnx/onnx/blob/master/docs/Versioning.md#released-versions
# Both must be updated whenever a new version of ONNX is released.
VERSION_TABLE = [
# Release-version, IR version, ai.onnx version, ai.onnx.ml version, (optional) ai.onnx.training version
('1.0', 3, 1, 1),
('1.1', 3, 5, 1),
('1.1.2', 3, 6, 1),
('1.2', 3, 7, 1),
('1.3', 3, 8, 1),
('1.4.1', 4, 9, 1),
('1.5.0', 5, 10, 1),
('1.6.0', 6, 11, 2),
('1.7.0', 7, 12, 2, 1)
] # type: VersionTableType
VersionMapType = Dict[Tuple[Text, int], int]
# create a map from (opset-domain, opset-version) to ir-version from above table
def create_op_set_id_version_map(table): # type: (VersionTableType) -> VersionMapType
result = dict() # type: VersionMapType
def process(release_version, ir_version, *args): # type: (Text, int, Any) -> None
for pair in zip(['ai.onnx', 'ai.onnx.ml', 'ai.onnx.training'], args):
if (pair not in result):
result[pair] = ir_version
for row in table:
process(*row)
return result
OP_SET_ID_VERSION_MAP = create_op_set_id_version_map(VERSION_TABLE)
# Given list of opset ids, determine minimum IR version required
def find_min_ir_version_for(opsetidlist): # type: (List[OperatorSetIdProto]) -> int
default_min_version = 3
def find_min(domain, version): # type: (Union[Text, None], int) -> int
key = (domain if domain else 'ai.onnx', version)
if (key in OP_SET_ID_VERSION_MAP):
return OP_SET_ID_VERSION_MAP[key]
else:
raise ValueError("Unsupported opset-version.")
if (opsetidlist):
return max([find_min(x.domain, x.version) for x in opsetidlist])
return default_min_version # if no opsets specified
def make_node(
op_type, # type: Text
inputs, # type: Sequence[Text]
outputs, # type: Sequence[Text]
name=None, # type: Optional[Text]
doc_string=None, # type: Optional[Text]
domain=None, # type: Optional[Text]
**kwargs # type: Any
): # type: (...) -> NodeProto
"""Construct a NodeProto.
Arguments:
op_type (string): The name of the operator to construct
inputs (list of string): list of input names
outputs (list of string): list of output names
name (string, default None): optional unique identifier for NodeProto
doc_string (string, default None): optional documentation string for NodeProto
domain (string, default None): optional domain for NodeProto.
If it's None, we will just use default domain (which is empty)
**kwargs (dict): the attributes of the node. The acceptable values
are documented in :func:`make_attribute`.
"""
node = NodeProto()
node.op_type = op_type
node.input.extend(inputs)
node.output.extend(outputs)
if name:
node.name = name
if doc_string:
node.doc_string = doc_string
if domain is not None:
node.domain = domain
if kwargs:
node.attribute.extend(
make_attribute(key, value)
for key, value in sorted(kwargs.items()))
return node
def make_operatorsetid(
domain, # type: Text
version, # type: int
): # type: (...) -> OperatorSetIdProto
"""Construct an OperatorSetIdProto.
Arguments:
domain (string): The domain of the operator set id
version (integer): Version of operator set id
"""
operatorsetid = OperatorSetIdProto()
operatorsetid.domain = domain
operatorsetid.version = version
return operatorsetid
def make_graph(
nodes, # type: Sequence[NodeProto]
name, # type: Text
inputs, # type: Sequence[ValueInfoProto]
outputs, # type: Sequence[ValueInfoProto]
initializer=None, # type: Optional[Sequence[TensorProto]]
doc_string=None, # type: Optional[Text]
value_info=[], # type: Sequence[ValueInfoProto]
sparse_initializer=None, # type: Optional[Sequence[SparseTensorProto]]
): # type: (...) -> GraphProto
if initializer is None:
initializer = []
if sparse_initializer is None:
sparse_initializer = []
if value_info is None:
value_info = []
graph = GraphProto()
graph.node.extend(nodes)
graph.name = name
graph.input.extend(inputs)
graph.output.extend(outputs)
graph.initializer.extend(initializer)
graph.sparse_initializer.extend(sparse_initializer)
graph.value_info.extend(value_info)
if doc_string:
graph.doc_string = doc_string
return graph
def make_opsetid(domain, version): # type: (Text, int) -> OperatorSetIdProto
opsetid = OperatorSetIdProto()
opsetid.domain = domain
opsetid.version = version
return opsetid
def make_model(graph, **kwargs): # type: (GraphProto, **Any) -> ModelProto
model = ModelProto()
# Touch model.ir_version so it is stored as the version from which it is
# generated.
model.ir_version = IR_VERSION
model.graph.CopyFrom(graph)
opset_imports = None # type: Optional[Sequence[OperatorSetIdProto]]
opset_imports = kwargs.pop('opset_imports', None) # type: ignore
if opset_imports is not None:
model.opset_import.extend(opset_imports)
else:
# Default import
imp = model.opset_import.add()
imp.version = defs.onnx_opset_version()
for k, v in kwargs.items():
# TODO: Does this work with repeated fields?
setattr(model, k, v)
return model
# An extension of make_model that infers an IR_VERSION for the model,
# if not specified, using a best-effort-basis.
def make_model_gen_version(graph, **kwargs): # type: (GraphProto, **Any) -> ModelProto
ir_version_field = str('ir_version')
if (ir_version_field not in kwargs):
opset_imports_field = str('opset_imports')
imports = (kwargs[opset_imports_field] if opset_imports_field in kwargs else [])
kwargs[ir_version_field] = find_min_ir_version_for(imports)
return make_model(graph, **kwargs)
def set_model_props(model, dict_value): # type: (ModelProto, Dict[Text, Text]) -> None
del model.metadata_props[:]
for (k, v) in dict_value.items():
entry = model.metadata_props.add()
entry.key = k
entry.value = v
# model.metadata_properties.append(entry)
def split_complex_to_pairs(ca): # type: (Sequence[np.complex64]) -> Sequence[int]
return [(ca[i // 2].real if (i % 2 == 0) else ca[i // 2].imag)
for i in range(len(ca) * 2)]
def make_tensor(
name, # type: Text
data_type, # type: int
dims, # type: Sequence[int]
vals, # type: Any
raw=False # type: bool
): # type: (...) -> TensorProto
'''
Make a TensorProto with specified arguments. If raw is False, this
function will choose the corresponding proto field to store the
values based on data_type. If raw is True, use "raw_data" proto
field to store the values, and values should be of type bytes in
this case.
'''
tensor = TensorProto()
tensor.data_type = data_type
tensor.name = name
if data_type == TensorProto.STRING:
assert not raw, "Can not use raw_data to store string type"
# Check number of vals specified equals tensor size
size = 1 if (not raw) else (mapping.TENSOR_TYPE_TO_NP_TYPE[data_type].itemsize)
for d in dims:
size = size * d
if (len(vals) != size):
raise ValueError("Number of values does not match tensor's size.")
if (data_type == TensorProto.COMPLEX64
or data_type == TensorProto.COMPLEX128):
vals = split_complex_to_pairs(vals)
if raw:
tensor.raw_data = vals
else:
field = mapping.STORAGE_TENSOR_TYPE_TO_FIELD[
mapping.TENSOR_TYPE_TO_STORAGE_TENSOR_TYPE[data_type]]
getattr(tensor, field).extend(vals)
tensor.dims.extend(dims)
return tensor
def make_sparse_tensor(
values, # type: TensorProto
indices, # type: TensorProto
dims # type: Sequence[int]
): # type: (...) -> SparseTensorProto
sparse = SparseTensorProto()
sparse.values.CopyFrom(values)
sparse.indices.CopyFrom(indices)
sparse.dims.extend(dims)
return sparse
def make_sequence(
name, # type: Text
elem_type, # type: int
values, # type: Sequence[Any]
): # type: (...) -> SequenceProto
'''
Make a Sequence with specified value arguments.
'''
sequence = SequenceProto()
sequence.name = name
sequence.elem_type = elem_type
values_field = mapping.STORAGE_ELEMENT_TYPE_TO_FIELD[elem_type]
getattr(sequence, values_field).CopyFrom(values)
return sequence
def make_map(
name, # type: Text
key_type, # type: int
keys, # type: List[Any]
values # type: SequenceProto
): # type: (...) -> MapProto
'''
Make a Map with specified key-value pair arguments.
Criteria for conversion:
- Keys and Values must have the same number of elements
- Every key in keys must be of the same type
- Every value in values must be of the same type
'''
map = MapProto()
valid_key_int_types = [TensorProto.INT8, TensorProto.INT16, TensorProto.INT32,
TensorProto.INT64, TensorProto.UINT8, TensorProto.UINT16,
TensorProto.UINT32, TensorProto.UINT64]
map.name = name
map.key_type = key_type
if key_type == TensorProto.STRING:
map.string_keys.extend(keys)
elif key_type in valid_key_int_types:
map.keys.extend(keys)
map.values.CopyFrom(values)
return map
def _to_bytes_or_false(val): # type: (Union[Text, bytes]) -> Union[bytes, bool]
"""An internal graph to convert the input to a bytes or to False.
The criteria for conversion is as follows and should be python 2 and 3
compatible:
- If val is py2 str or py3 bytes: return bytes
- If val is py2 unicode or py3 str: return val.decode('utf-8')
- Otherwise, return False
"""
if isinstance(val, bytes):
return val
try:
return val.encode('utf-8')
except AttributeError:
return False
def make_attribute(
key, # type: Text
value, # type: Any
doc_string=None # type: Optional[Text]
): # type: (...) -> AttributeProto
"""Makes an AttributeProto based on the value type."""
attr = AttributeProto()
attr.name = key
if doc_string:
attr.doc_string = doc_string
is_iterable = isinstance(value, collections.Iterable)
bytes_or_false = _to_bytes_or_false(value)
# First, singular cases
# float
if isinstance(value, float):
attr.f = value
attr.type = AttributeProto.FLOAT
# integer
elif isinstance(value, numbers.Integral):
attr.i = cast(int, value)
attr.type = AttributeProto.INT
# string
elif bytes_or_false is not False:
assert isinstance(bytes_or_false, bytes)
attr.s = bytes_or_false
attr.type = AttributeProto.STRING
elif isinstance(value, TensorProto):
attr.t.CopyFrom(value)
attr.type = AttributeProto.TENSOR
elif isinstance(value, SparseTensorProto):
attr.sparse_tensor.CopyFrom(value)
attr.type = AttributeProto.SPARSE_TENSOR
elif isinstance(value, GraphProto):
attr.g.CopyFrom(value)
attr.type = AttributeProto.GRAPH
# third, iterable cases
elif is_iterable:
byte_array = [_to_bytes_or_false(v) for v in value]
if all(isinstance(v, numbers.Integral) for v in value):
# Turn np.int32/64 into Python built-in int.
attr.ints.extend(int(v) for v in value)
attr.type = AttributeProto.INTS
elif all(isinstance(v, numbers.Real) for v in value):
# Since ints and floats are members of Real, this allows a mix of ints and floats
# (and converts the ints to floats).
attr.floats.extend(float(v) for v in value)
attr.type = AttributeProto.FLOATS
elif all(map(lambda bytes_or_false: bytes_or_false is not False, byte_array)):
attr.strings.extend(cast(List[bytes], byte_array))
attr.type = AttributeProto.STRINGS
elif all(isinstance(v, TensorProto) for v in value):
attr.tensors.extend(value)
attr.type = AttributeProto.TENSORS
elif all(isinstance(v, SparseTensorProto) for v in value):
attr.sparse_tensors.extend(value)
attr.type = AttributeProto.SPARSE_TENSORS
elif all(isinstance(v, GraphProto) for v in value):
attr.graphs.extend(value)
attr.type = AttributeProto.GRAPHS
else:
raise ValueError(
"You passed in an iterable attribute but I cannot figure out "
"its applicable type.")
else:
raise TypeError(
'value "{}" is not valid attribute data type.'.format(value))
return attr
def get_attribute_value(attr): # type: (AttributeProto) -> Any
if attr.type == AttributeProto.FLOAT:
return attr.f
if attr.type == AttributeProto.INT:
return attr.i
if attr.type == AttributeProto.STRING:
return attr.s
if attr.type == AttributeProto.TENSOR:
return attr.t
if attr.type == AttributeProto.GRAPH:
return attr.g
if attr.type == AttributeProto.FLOATS:
return list(attr.floats)
if attr.type == AttributeProto.INTS:
return list(attr.ints)
if attr.type == AttributeProto.STRINGS:
return list(attr.strings)
if attr.type == AttributeProto.TENSORS:
return list(attr.tensors)
if attr.type == AttributeProto.GRAPHS:
return list(attr.graphs)
raise ValueError("Unsupported ONNX attribute: {}".format(attr))
def make_empty_tensor_value_info(name): # type: (Text) -> ValueInfoProto
value_info_proto = ValueInfoProto()
value_info_proto.name = name
return value_info_proto
def make_tensor_value_info(
name, # type: Text
elem_type, # type: int
shape, # type: Optional[Sequence[Union[Text, int]]]
doc_string="", # type: Text
shape_denotation=None, # type: Optional[List[Text]]
): # type: (...) -> ValueInfoProto
"""Makes a ValueInfoProto based on the data type and shape."""
value_info_proto = ValueInfoProto()
value_info_proto.name = name
if doc_string:
value_info_proto.doc_string = doc_string
tensor_type_proto = value_info_proto.type.tensor_type
tensor_type_proto.elem_type = elem_type
tensor_shape_proto = tensor_type_proto.shape
if shape is not None:
# You might think this is a no-op (extending a normal Python
# list by [] certainly is), but protobuf lists work a little
# differently; if a field is never set, it is omitted from the
# resulting protobuf; a list that is explicitly set to be
# empty will get an (empty) entry in the protobuf. This
# difference is visible to our consumers, so make sure we emit
# an empty shape!
tensor_shape_proto.dim.extend([])
if shape_denotation:
if len(shape_denotation) != len(shape):
raise ValueError(
'Invalid shape_denotation. '
'Must be of the same length as shape.')
for i, d in enumerate(shape):
dim = tensor_shape_proto.dim.add()
if d is None:
pass
elif isinstance(d, integer_types):
dim.dim_value = d
elif isinstance(d, text_type):
dim.dim_param = d
else:
raise ValueError(
'Invalid item in shape: {}. '
'Needs to of integer_types or text_type.'.format(d))
if shape_denotation:
dim.denotation = shape_denotation[i]
return value_info_proto
def make_sequence_value_info(
name, # type: Text
elem_type, # type: int
shape, # type: Optional[Sequence[Union[Text, int]]]
doc_string="", # type: Text
elem_shape_denotation=None, # type: Optional[List[Text]]
): # type: (...) -> ValueInfoProto
"""Makes a ValueInfoProto based on the data type and shape for Sequence."""
value_info_proto = ValueInfoProto()
value_info_proto.name = name
if doc_string:
value_info_proto.doc_string = doc_string
sequence_type_proto = value_info_proto.type.sequence_type
sequence_type_proto.elem_type.tensor_type.elem_type = elem_type
tensor_value_info = make_tensor_value_info(name, elem_type, shape, doc_string, elem_shape_denotation)
if shape is not None:
sequence_type_proto.elem_type.tensor_type.shape.CopyFrom(tensor_value_info.type.tensor_type.shape)
return value_info_proto
def _sanitize_str(s): # type: (Union[Text, bytes]) -> Text
if isinstance(s, text_type):
sanitized = s
elif isinstance(s, binary_type):
sanitized = s.decode('utf-8', errors='ignore')
else:
sanitized = str(s)
if len(sanitized) < 64:
return sanitized
return sanitized[:64] + '...<+len=%d>' % (len(sanitized) - 64)
def printable_attribute(attr, subgraphs=False): # type: (AttributeProto, bool) -> Union[Text, Tuple[Text, List[GraphProto]]]
content = []
content.append(attr.name)
content.append("=")
def str_float(f): # type: (float) -> Text
# NB: Different Python versions print different numbers of trailing
# decimals, specifying this explicitly keeps it consistent for all
# versions
return '{:.15g}'.format(f)
def str_int(i): # type: (int) -> Text
# NB: In Python 2, longs will repr() as '2L', which is ugly and
# unnecessary. Explicitly format it to keep it consistent.
return '{:d}'.format(i)
def str_str(s): # type: (Text) -> Text
return repr(s)
_T = TypeVar('_T') # noqa
def str_list(str_elem, xs): # type: (Callable[[_T], Text], Sequence[_T]) -> Text
return '[' + ', '.join(map(str_elem, xs)) + ']'
# for now, this logic should continue to work as long as we are running on a proto3
# implementation. If/when we switch to proto3, we will need to use attr.type
# To support printing subgraphs, if we find a graph attribute, print out
# its name here and pass the graph itself up to the caller for later
# printing.
graphs = []
if attr.HasField("f"):
content.append(str_float(attr.f))
elif attr.HasField("i"):
content.append(str_int(attr.i))
elif attr.HasField("s"):
# TODO: Bit nervous about Python 2 / Python 3 determinism implications
content.append(repr(_sanitize_str(attr.s)))
elif attr.HasField("t"):
if len(attr.t.dims) > 0:
content.append("<Tensor>")
else:
# special case to print scalars
field = STORAGE_TENSOR_TYPE_TO_FIELD[attr.t.data_type]
content.append('<Scalar Tensor {}>'.format(str(getattr(attr.t, field))))
elif attr.HasField("g"):
content.append("<graph {}>".format(attr.g.name))
graphs.append(attr.g)
elif attr.floats:
content.append(str_list(str_float, attr.floats))
elif attr.ints:
content.append(str_list(str_int, attr.ints))
elif attr.strings:
# TODO: Bit nervous about Python 2 / Python 3 determinism implications
content.append(str(list(map(_sanitize_str, attr.strings))))
elif attr.tensors:
content.append("[<Tensor>, ...]")
elif attr.graphs:
content.append('[')
for i, g in enumerate(attr.graphs):
comma = ',' if i != len(attr.graphs) - 1 else ''
content.append('<graph {}>{}'.format(g.name, comma))
content.append(']')
graphs.extend(attr.graphs)
else:
content.append("<Unknown>")
if subgraphs:
return ' '.join(content), graphs
else:
return ' '.join(content)
def printable_dim(dim): # type: (TensorShapeProto.Dimension) -> Text
which = dim.WhichOneof('value')
assert which is not None
return str(getattr(dim, which))
def printable_type(t): # type: (TypeProto) -> Text
if t.WhichOneof('value') == "tensor_type":
s = TensorProto.DataType.Name(t.tensor_type.elem_type)
if t.tensor_type.HasField('shape'):
if len(t.tensor_type.shape.dim):
s += str(', ' + 'x'.join(map(printable_dim, t.tensor_type.shape.dim)))
else:
s += str(', scalar')
return s
if t.WhichOneof('value') is None:
return ""
return 'Unknown type {}'.format(t.WhichOneof('value'))
def printable_value_info(v): # type: (ValueInfoProto) -> Text
s = '%{}'.format(v.name)
if v.type:
s = '{}[{}]'.format(s, printable_type(v.type))
return s
def printable_tensor_proto(t): # type: (TensorProto) -> Text
s = '%{}['.format(t.name)
s += TensorProto.DataType.Name(t.data_type)
if t.dims is not None:
if len(t.dims):
s += str(', ' + 'x'.join(map(str, t.dims)))
else:
s += str(', scalar')
s += ']'
return s
def printable_node(node, prefix='', subgraphs=False): # type: (NodeProto, Text, bool) -> Union[Text, Tuple[Text, List[GraphProto]]]
content = []
if len(node.output):
content.append(
', '.join(['%{}'.format(name) for name in node.output]))
content.append('=')
# To deal with nested graphs
graphs = [] # type: List[GraphProto]
printed_attrs = []
for attr in node.attribute:
if subgraphs:
printed_attr, gs = printable_attribute(attr, subgraphs)
assert isinstance(gs, list)
graphs.extend(gs)
printed_attrs.append(printed_attr)
else:
printed = printable_attribute(attr)
assert isinstance(printed, Text)
printed_attrs.append(printed)
printed_attributes = ', '.join(sorted(printed_attrs))
printed_inputs = ', '.join(['%{}'.format(name) for name in node.input])
if node.attribute:
content.append("{}[{}]({})".format(node.op_type, printed_attributes, printed_inputs))
else:
content.append("{}({})".format(node.op_type, printed_inputs))
if subgraphs:
return prefix + ' '.join(content), graphs
else:
return prefix + ' '.join(content)
def printable_graph(graph, prefix=''): # type: (GraphProto, Text) -> Text
content = []
indent = prefix + ' '
# header
header = ['graph', graph.name]
initializers = {t.name for t in graph.initializer}
if len(graph.input):
header.append("(")
in_strs = [] # required inputs
in_with_init_strs = [] # optional inputs with initializer providing default value
for inp in graph.input:
if inp.name not in initializers:
in_strs.append(printable_value_info(inp))
else:
in_with_init_strs.append(printable_value_info(inp))
if in_strs:
content.append(prefix + ' '.join(header))
header = []
for line in in_strs:
content.append(prefix + ' ' + line)
header.append(")")
if in_with_init_strs:
header.append("optional inputs with matching initializers (")
content.append(prefix + ' '.join(header))
header = []
for line in in_with_init_strs:
content.append(prefix + ' ' + line)
header.append(")")
# from IR 4 onwards an initializer is not required to have a matching graph input
# so output the name, type and shape of those as well
if len(in_with_init_strs) < len(initializers):
graph_inputs = {i.name for i in graph.input}
init_strs = [printable_tensor_proto(i) for i in graph.initializer
if i.name not in graph_inputs]
header.append("initializers (")
content.append(prefix + ' '.join(header))
header = []
for line in init_strs:
content.append(prefix + ' ' + line)
header.append(")")
header.append('{')
content.append(prefix + ' '.join(header))
graphs = [] # type: List[GraphProto]
# body
for node in graph.node:
pn, gs = printable_node(node, indent, subgraphs=True)
assert isinstance(gs, list)
content.append(pn)
graphs.extend(gs)
# tail
tail = ['return']
if len(graph.output):
tail.append(
', '.join(['%{}'.format(out.name) for out in graph.output]))
content.append(indent + ' '.join(tail))
# closing bracket
content.append(prefix + '}')
for g in graphs:
content.append('\n' + printable_graph(g))
return '\n'.join(content)
def strip_doc_string(proto): # type: (google.protobuf.message.Message) -> None
"""
Empties `doc_string` field on any nested protobuf messages
"""
assert isinstance(proto, google.protobuf.message.Message)
for descriptor in proto.DESCRIPTOR.fields:
if descriptor.name == 'doc_string':
proto.ClearField(descriptor.name)
elif descriptor.type == descriptor.TYPE_MESSAGE:
if descriptor.label == descriptor.LABEL_REPEATED:
for x in getattr(proto, descriptor.name):
strip_doc_string(x)
elif proto.HasField(descriptor.name):
strip_doc_string(getattr(proto, descriptor.name))
def make_training_info(algorithm, algorithm_bindings, initialization, initialization_bindings): # type: (GraphProto, AssignmentBindingType, Optional[GraphProto], Optional[AssignmentBindingType]) -> TrainingInfoProto
training_info = TrainingInfoProto()
training_info.algorithm.CopyFrom(algorithm)
for k, v in algorithm_bindings:
binding = training_info.update_binding.add()
binding.key = k
binding.value = v
if initialization:
training_info.initialization.CopyFrom(initialization)
if initialization_bindings:
for k, v in initialization_bindings:
binding = training_info.initialization_binding.add()
binding.key = k
binding.value = v
return training_info
| 36.220899
| 217
| 0.635577
|
7950d7dbad68be4cd56f2630ba48da83d18298e2
| 1,481
|
py
|
Python
|
sillygamble/wallet/admin.py
|
bitcoinuprising/silly-gamble
|
d6c7e92d90b4f3e06ab2ceda0f0f1ea7acec72f7
|
[
"MIT"
] | 2
|
2018-01-18T13:07:03.000Z
|
2020-03-05T07:30:45.000Z
|
sillygamble/wallet/admin.py
|
bitcoinuprising/silly-gamble
|
d6c7e92d90b4f3e06ab2ceda0f0f1ea7acec72f7
|
[
"MIT"
] | 1
|
2018-10-02T09:06:05.000Z
|
2018-10-05T14:12:19.000Z
|
sillygamble/wallet/admin.py
|
bitcoinuprising/silly-gamble
|
d6c7e92d90b4f3e06ab2ceda0f0f1ea7acec72f7
|
[
"MIT"
] | 7
|
2018-01-18T13:10:52.000Z
|
2019-12-02T02:58:04.000Z
|
from django.contrib import admin
# Register your models here.
from .models import Wallet, Transaction
from .services.wallet import WalletImportTransaction
# Register your models here.
# class DepositInline(admin.TabularInline):
# model = Deposit
# extra = 0
class TransactionInline(admin.TabularInline):
model = Transaction
extra = 0
def import_transactions(modeladmin, request, queryset):
importTransctions = WalletImportTransaction(request, queryset)
importTransctions.run()
import_transactions.short_description = "Import new transactions"
class WalletAdmin(admin.ModelAdmin):
list_display = ['__str__', 'active', 'created_at']
list_filter = ['active', 'created_at']
search_fields = ['wallet_id', 'label']
inlines = [
TransactionInline,
]
actions = [import_transactions]
class DepositAdmin(admin.ModelAdmin):
list_display = ['deposit_id', 'from_wallet', 'to_wallet', 'bitcoin_amount', 'spent', 'created_at']
list_filter = ['spent', 'created_at']
search_fields = ['deposit_id', 'from_wallet', 'to_wallet']
class TransactionAdmin(admin.ModelAdmin):
list_display = ['__str__', 'from_wallet', 'to_wallet', 'amount_out', 'amount', 'created_at']
list_filter = ['spent', 'created_at']
search_fields = ['__str__', 'transaction_id', 'from_wallet', 'to_wallet']
admin.site.register(Wallet, WalletAdmin)
# admin.site.register(Deposit, DepositAdmin)
admin.site.register(Transaction, TransactionAdmin)
| 33.659091
| 102
| 0.731938
|
7950d7edd6c4fb9665b7adf742ec344ca02cfbb6
| 1,268
|
py
|
Python
|
setup.py
|
itsbenweeks/python-lsp-jsonrpc
|
8aee0038336e83d649b59813a31b5b75b2c81074
|
[
"MIT"
] | 2
|
2021-02-21T17:21:27.000Z
|
2021-03-05T11:22:13.000Z
|
setup.py
|
itsbenweeks/python-lsp-jsonrpc
|
8aee0038336e83d649b59813a31b5b75b2c81074
|
[
"MIT"
] | null | null | null |
setup.py
|
itsbenweeks/python-lsp-jsonrpc
|
8aee0038336e83d649b59813a31b5b75b2c81074
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2017-2020 Palantir Technologies, Inc.
# Copyright 2021- Python Language Server Contributors.
import ast
import os
from setuptools import find_packages, setup
HERE = os.path.abspath(os.path.dirname(__file__))
def get_version(module='pylsp_jsonrpc'):
"""Get version."""
with open(os.path.join(HERE, module, '_version.py'), 'r') as f:
data = f.read()
lines = data.split('\n')
for line in lines:
if line.startswith('VERSION_INFO'):
version_tuple = ast.literal_eval(line.split('=')[-1].strip())
version = '.'.join(map(str, version_tuple))
break
return version
README = open('README.md', 'r').read()
setup(
name='python-lsp-jsonrpc',
version=get_version(),
description='JSON RPC 2.0 server library',
long_description=README,
long_description_content_type='text/markdown',
url='https://github.com/python-lsp/python-lsp-jsonrpc',
author='Python Language Server Contributors',
packages=find_packages(exclude=['contrib', 'docs', 'test']),
install_requires=[
'ujson>=3.0.0',
],
extras_require={
'test': ['pylint', 'pycodestyle', 'pyflakes', 'pytest',
'pytest-cov', 'coverage'],
},
)
| 27.565217
| 73
| 0.638801
|
7950d7fe353371ee126bd46c09a8ba594b469841
| 78
|
py
|
Python
|
pastycake/notifier.py
|
9b/pastycake
|
f02363d822dae7111ecc70a1ad435d88d57be939
|
[
"BSD-3-Clause"
] | 18
|
2015-02-02T16:12:44.000Z
|
2021-01-22T01:04:23.000Z
|
pastycake/notifier.py
|
5l1v3r1/pastycake
|
f02363d822dae7111ecc70a1ad435d88d57be939
|
[
"BSD-3-Clause"
] | null | null | null |
pastycake/notifier.py
|
5l1v3r1/pastycake
|
f02363d822dae7111ecc70a1ad435d88d57be939
|
[
"BSD-3-Clause"
] | 2
|
2020-05-11T15:15:24.000Z
|
2021-06-21T12:21:06.000Z
|
import abc
class Notifier(object):
__metaclass__ = abc.ABCMeta
pass
| 11.142857
| 31
| 0.705128
|
7950d82dd9afba67bda6e05db43e6520ffa748c1
| 957
|
py
|
Python
|
python/dgl/backend/set_default_backend.py
|
yuanqing-wang/dgl
|
434f9542b5a95c4700020d07d6622a5dd45a6465
|
[
"Apache-2.0"
] | null | null | null |
python/dgl/backend/set_default_backend.py
|
yuanqing-wang/dgl
|
434f9542b5a95c4700020d07d6622a5dd45a6465
|
[
"Apache-2.0"
] | null | null | null |
python/dgl/backend/set_default_backend.py
|
yuanqing-wang/dgl
|
434f9542b5a95c4700020d07d6622a5dd45a6465
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import os
import json
def set_default_backend(backend_name):
default_dir = os.path.join(os.path.expanduser('~'), '.dgl')
if not os.path.exists(default_dir):
os.makedirs(default_dir)
config_path = os.path.join(default_dir, 'config.json')
with open(config_path, "w") as config_file:
json.dump({'backend': backend_name.lower()}, config_file)
print('Setting the default backend to "{}". You can change it in the '
'~/.dgl/config.json file or export the DGLBACKEND environment variable. '
'Valid options are: pytorch, mxnet, tensorflow (all lowercase)'.format(
backend_name))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("backend", nargs=1, type=str, choices=[
'jax', 'pytorch', 'tensorflow', 'mxnet'], help="Set default backend")
args = parser.parse_args()
set_default_backend(args.backend[0])
| 41.608696
| 93
| 0.663532
|
7950d95b757684445173875002354fa0118a81c3
| 431
|
py
|
Python
|
app/core/migrations/0006_recipe_image.py
|
guma44/recipe-app-api
|
715a0b6a0dce05756c72f93e25c7fa88efbdc6a1
|
[
"MIT"
] | null | null | null |
app/core/migrations/0006_recipe_image.py
|
guma44/recipe-app-api
|
715a0b6a0dce05756c72f93e25c7fa88efbdc6a1
|
[
"MIT"
] | null | null | null |
app/core/migrations/0006_recipe_image.py
|
guma44/recipe-app-api
|
715a0b6a0dce05756c72f93e25c7fa88efbdc6a1
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.15 on 2021-02-15 11:41
import core.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0005_recipe'),
]
operations = [
migrations.AddField(
model_name='recipe',
name='image',
field=models.ImageField(null=True, upload_to=core.models.recipe_image_file_path),
),
]
| 21.55
| 93
| 0.62181
|
7950dc0852c9a6990a68a4ef14e017efc8e20fef
| 11,341
|
py
|
Python
|
ote_sdk/ote_sdk/usecases/exportable_code/streamer/streamer.py
|
ntyukaev/training_extensions
|
c897d42e50828fea853ceda0795e1f0e7d6e9909
|
[
"Apache-2.0"
] | null | null | null |
ote_sdk/ote_sdk/usecases/exportable_code/streamer/streamer.py
|
ntyukaev/training_extensions
|
c897d42e50828fea853ceda0795e1f0e7d6e9909
|
[
"Apache-2.0"
] | null | null | null |
ote_sdk/ote_sdk/usecases/exportable_code/streamer/streamer.py
|
ntyukaev/training_extensions
|
c897d42e50828fea853ceda0795e1f0e7d6e9909
|
[
"Apache-2.0"
] | 1
|
2020-12-13T22:13:51.000Z
|
2020-12-13T22:13:51.000Z
|
"""
Streamer for reading input
"""
# Copyright (C) 2021-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
import abc
import multiprocessing
import queue
import sys
from enum import Enum
from pathlib import Path
from typing import Iterable, Iterator, List, NamedTuple, Optional, Tuple, Union
import cv2
import numpy as np
from natsort import natsorted
class MediaType(Enum):
"""
This Enum represents the types of input
"""
IMAGE = 1
VIDEO = 2
CAMERA = 3
class MediaExtensions(NamedTuple):
"""
This NamedTuple represents the extensions for input
"""
IMAGE: Tuple[str, ...]
VIDEO: Tuple[str, ...]
MEDIA_EXTENSIONS = MediaExtensions(
IMAGE=(".jpg", ".jpeg", ".png", ".ppm", ".bmp", ".pgm", ".tif", ".tiff", ".webp"),
VIDEO=(".avi", ".mp4"),
)
def get_media_type(path: Optional[Union[str, Path]]) -> MediaType:
"""
Get Media Type from the input path.
:param path: Path to file or directory.
Could be None, which implies camera media type.
"""
if isinstance(path, str):
path = Path(path)
media_type: MediaType
if path is None:
media_type = MediaType.CAMERA
elif path.is_dir():
if _get_filenames(path, MediaType.IMAGE):
media_type = MediaType.IMAGE
elif path.is_file():
if _is_file_with_supported_extensions(path, _get_extensions(MediaType.IMAGE)):
media_type = MediaType.IMAGE
elif _is_file_with_supported_extensions(path, _get_extensions(MediaType.VIDEO)):
media_type = MediaType.VIDEO
else:
raise ValueError("File extension not supported.")
else:
raise ValueError("File or folder does not exist")
return media_type
def _get_extensions(media_type: MediaType) -> Tuple[str, ...]:
"""
Get extensions of the input media type.
:param media_type: Type of the media. Either image or video.
:return: Supported extensions for the corresponding media type.
:example:
>>> _get_extensions(media_type=MediaType.IMAGE)
('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp')
>>> _get_extensions(media_type=MediaType.VIDEO)
('.avi', '.mp4')
"""
return getattr(MEDIA_EXTENSIONS, media_type.name)
def _is_file_with_supported_extensions(path: Path, extensions: Tuple[str, ...]) -> bool:
"""
Check if the file is supported for the media type
:param path: File path to check
:param extensions: Supported extensions for the media type
:example:
>>> from pathlib import Path
>>> path = Path("./demo.mp4")
>>> extensions = _get_extensions(media_type=MediaType.VIDEO)
>>> _is_file_with_supported_extensions(path, extensions)
True
>>> path = Path("demo.jpg")
>>> extensions = _get_extensions(media_type=MediaType.IMAGE)
>>> _is_file_with_supported_extensions(path, extensions)
True
>>> path = Path("demo.mp3")
>>> extensions = _get_extensions(media_type=MediaType.IMAGE)
>>> _is_file_with_supported_extensions(path, extensions)
False
"""
return path.suffix.lower() in extensions
def _get_filenames(path: Union[str, Path], media_type: MediaType) -> List[str]:
"""
Get filenames from a directory or a path to a file.
:param path: Path to the file or to the location that contains files.
:param media_type: Type of the media (image or video)
:example:
>>> path = "../images"
>>> _get_filenames(path, media_type=MediaType.IMAGE)
['images/4.jpeg', 'images/1.jpeg', 'images/5.jpeg', 'images/3.jpeg', 'images/2.jpeg']
"""
extensions = _get_extensions(media_type)
filenames: List[str] = []
if media_type == MediaType.CAMERA:
raise ValueError(
"Cannot get filenames for camera. Only image and video files are supported."
)
if isinstance(path, str):
path = Path(path)
if path.is_file():
if _is_file_with_supported_extensions(path, extensions):
filenames = [path.as_posix()]
else:
raise ValueError("Extension not supported for media type")
if path.is_dir():
for filename in path.rglob("*"):
if _is_file_with_supported_extensions(filename, extensions):
filenames.append(filename.as_posix())
filenames = natsorted(filenames) # type: ignore[assignment]
if len(filenames) == 0:
raise FileNotFoundError(f"No {media_type.name} file found in {path}!")
return filenames
def _read_video_stream(stream: cv2.VideoCapture) -> Iterator[np.ndarray]:
"""
Read video and yield the frame.
:param stream: Video stream captured via OpenCV's VideoCapture
:return: Individual frame
"""
while True:
frame_available, frame = stream.read()
if not frame_available:
break
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
yield frame
stream.release()
class BaseStreamer(metaclass=abc.ABCMeta):
"""
Base Streamer interface to implement Image, Video and Camera streamers.
"""
@abc.abstractmethod
def get_stream(self, stream_input):
"""
Get the streamer object, depending on the media type.
:param stream_input: Path to the stream or
camera device index in case to capture from camera.
:return: Streamer object.
"""
raise NotImplementedError
@abc.abstractmethod
def __iter__(self) -> Iterator[np.ndarray]:
"""
Iterate through the streamer object that is a Python Generator object.
:return: Yield the image or video frame.
"""
raise NotImplementedError
def _process_run(streamer: BaseStreamer, buffer: multiprocessing.Queue):
"""
Private function that is run by the thread.
Waits for the buffer to gain space for timeout seconds while it is full.
If no space was available within this time the function will exit
:param streamer: The streamer to retrieve frames from
:param buffer: The buffer to place the retrieved frames in
"""
for frame in streamer:
buffer.put(frame)
class ThreadedStreamer(BaseStreamer):
"""
Runs a BaseStreamer on a seperate thread.
:param streamer: The streamer to run on a thread
:param buffer_size: Number of frame to buffer internally
:example:
>>> streamer = VideoStreamer(path="../demo.mp4")
>>> threaded_streamer = ThreadedStreamer(streamer)
... for frame in threaded_streamer:
... pass
"""
def __init__(self, streamer: BaseStreamer, buffer_size: int = 2):
self.buffer_size = buffer_size
self.streamer = streamer
def get_stream(self, _=None) -> BaseStreamer:
return self.streamer
def __iter__(self) -> Iterator[np.ndarray]:
buffer: multiprocessing.Queue = multiprocessing.Queue(maxsize=self.buffer_size)
process = multiprocessing.Process(
target=_process_run, args=(self.get_stream(), buffer)
)
# Make thread a daemon so that it will exit when the main program exits as well
process.daemon = True
process.start()
try:
while process.is_alive() or not buffer.empty():
try:
yield buffer.get(timeout=0.1)
except queue.Empty:
pass
except GeneratorExit:
process.terminate()
finally:
process.join(timeout=0.1)
# The kill() function is only available in Python 3.7.
# Skip it if running an older Python version.
if sys.version_info >= (3, 7) and process.exitcode is None:
process.kill()
class VideoStreamer(BaseStreamer):
"""
Video Streamer
:param path: Path to the video file or directory.
:example:
>>> streamer = VideoStreamer(path="../demo.mp4")
... for frame in streamer:
... pass
"""
def __init__(self, path: str) -> None:
self.media_type = MediaType.VIDEO
self.filenames = _get_filenames(path, media_type=MediaType.VIDEO)
def get_stream(self, stream_input: str) -> cv2.VideoCapture:
return cv2.VideoCapture(stream_input)
def __iter__(self) -> Iterator[np.ndarray]:
for filename in self.filenames:
stream = self.get_stream(stream_input=filename)
yield from _read_video_stream(stream)
class CameraStreamer(BaseStreamer):
"""
Stream video frames from camera
:param camera_device: Camera device index e.g, 0, 1
:example:
>>> streamer = CameraStreamer(camera_device=0)
... for frame in streamer:
... cv2.imshow("Window", frame)
... if ord("q") == cv2.waitKey(1):
... break
"""
def __init__(self, camera_device: Optional[int] = None):
self.media_type = MediaType.CAMERA
self.camera_device = 0 if camera_device is None else camera_device
def get_stream(self, stream_input: int):
return cv2.VideoCapture(stream_input)
def __iter__(self) -> Iterator[np.ndarray]:
stream = self.get_stream(stream_input=self.camera_device)
yield from _read_video_stream(stream)
class ImageStreamer(BaseStreamer):
"""
Stream from image file or directory.
:param path: Path to an image or directory.
:example:
>>> streamer = ImageStreamer(path="../images")
... for frame in streamer:
... cv2.imshow("Window", frame)
... cv2.waitKey(0)
"""
def __init__(self, path: str) -> None:
self.media_type = MediaType.IMAGE
self.filenames = _get_filenames(path=path, media_type=MediaType.IMAGE)
@staticmethod
def get_stream(stream_input: str) -> Iterable[np.ndarray]:
image = cv2.imread(stream_input)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
yield image
def __iter__(self) -> Iterator[np.ndarray]:
for filename in self.filenames:
yield from self.get_stream(stream_input=filename)
def get_streamer(
path: Optional[str] = None,
camera_device: Optional[int] = None,
threaded: bool = False,
) -> BaseStreamer:
"""
Get streamer object based on the file path or camera device index provided.
:param path: Path to file or directory.
:param camera_device: Camera device index.
:param threaded: Threaded streaming option
"""
if path is not None and camera_device is not None:
raise ValueError(
"Both path and camera device is provided. Choose either camera or path to a image/video file."
)
media_type = get_media_type(path)
streamer: BaseStreamer
if path is not None and media_type == MediaType.IMAGE:
streamer = ImageStreamer(path)
elif path is not None and media_type == MediaType.VIDEO:
streamer = VideoStreamer(path)
elif media_type == MediaType.CAMERA:
if camera_device is None:
camera_device = 0
streamer = CameraStreamer(camera_device)
else:
raise ValueError("Unknown media type")
if threaded:
streamer = ThreadedStreamer(streamer)
return streamer
| 29.610966
| 106
| 0.638127
|
7950ddaf3f54e0fd5a71728e8fd2d8f0da0b959f
| 3,775
|
py
|
Python
|
examples/opencv_app.py
|
saddy001/remi
|
1dd886a55b0d2750880253508df43c90db4f0b08
|
[
"Apache-2.0"
] | 1
|
2018-03-30T16:57:49.000Z
|
2018-03-30T16:57:49.000Z
|
examples/opencv_app.py
|
saddy001/remi
|
1dd886a55b0d2750880253508df43c90db4f0b08
|
[
"Apache-2.0"
] | null | null | null |
examples/opencv_app.py
|
saddy001/remi
|
1dd886a55b0d2750880253508df43c90db4f0b08
|
[
"Apache-2.0"
] | null | null | null |
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import time
import io
import cv2
import remi.gui as gui
from remi import start, App
class OpencvVideoWidget(gui.Image):
def __init__(self, video_source=0, fps=5, **kwargs):
super(OpencvVideoWidget, self).__init__("/%s/get_image_data" % id(self), **kwargs)
self.fps = fps
self.capture = cv2.VideoCapture(video_source)
javascript_code = gui.Tag()
javascript_code.type = 'script'
javascript_code.attributes['type'] = 'text/javascript'
javascript_code.add_child('code', """
function update_image%(id)s(){
if(document.getElementById('%(id)s').getAttribute('play')=='False')
return;
var url = '/%(id)s/get_image_data';
var xhr = new XMLHttpRequest();
xhr.open('GET', url, true);
xhr.responseType = 'blob'
xhr.onload = function(e){
var urlCreator = window.URL || window.webkitURL;
var imageUrl = urlCreator.createObjectURL(this.response);
document.getElementById('%(id)s').src = imageUrl;
}
xhr.send();
};
setInterval( update_image%(id)s, %(update_rate)s );
""" % {'id': id(self), 'update_rate': 1000.0 / self.fps})
self.add_child('javascript_code', javascript_code)
self.play()
def play(self):
self.attributes['play'] = True
def stop(self):
self.attributes['play'] = False
def get_image_data(self):
ret, frame = self.capture.read()
if ret:
ret, jpeg = cv2.imencode('.jpg', frame)
if ret:
headers = {'Content-type': 'image/jpeg'}
# tostring is an alias to tobytes, which wasn't added till numpy 1.9
return [jpeg.tostring(), headers]
return None, None
class MyApp(App):
def __init__(self, *args):
super(MyApp, self).__init__(*args)
def main(self, name='world'):
# the arguments are width - height - layoutOrientationOrizontal
wid = gui.Widget(width=640, height=600, margin='0px auto')
self.opencvideo_widget = OpencvVideoWidget(0, 10, width=620, height=530)
self.opencvideo_widget.style['margin'] = '10px'
menu = gui.Menu(width=620, height=30)
m1 = gui.MenuItem('Video', width=100, height=30)
m11 = gui.MenuItem('Play', width=100, height=30)
m12 = gui.MenuItem('Stop', width=100, height=30)
m11.set_on_click_listener(self.menu_play_clicked)
m12.set_on_click_listener(self.menu_stop_clicked)
menu.append(m1)
m1.append(m11)
m1.append(m12)
wid.append(menu)
wid.append(self.opencvideo_widget)
# returning the root widget
return wid
def menu_play_clicked(self, widget):
self.opencvideo_widget.play()
def menu_stop_clicked(self, widget):
self.opencvideo_widget.stop()
if __name__ == "__main__":
# optional parameters
# start(MyApp,address='127.0.0.1', port=8081, multiple_instance=False,enable_file_cache=True, update_interval=0.1, start_browser=True)
start(MyApp)
| 34.633028
| 138
| 0.615629
|
7950de24fdc3f3cedd834064297305bb42a59917
| 10,871
|
py
|
Python
|
tests/tasks/prefect/test_flow_run.py
|
knockrentals/prefect_core
|
d1e4413f1fa18baef0db6dba0c053b04ce593577
|
[
"Apache-2.0"
] | null | null | null |
tests/tasks/prefect/test_flow_run.py
|
knockrentals/prefect_core
|
d1e4413f1fa18baef0db6dba0c053b04ce593577
|
[
"Apache-2.0"
] | null | null | null |
tests/tasks/prefect/test_flow_run.py
|
knockrentals/prefect_core
|
d1e4413f1fa18baef0db6dba0c053b04ce593577
|
[
"Apache-2.0"
] | null | null | null |
from datetime import timedelta
import pendulum
import pytest
from unittest.mock import MagicMock
import prefect
from prefect.client.client import FlowRunInfoResult, ProjectInfo
from prefect.engine import signals, state
from prefect.run_configs import UniversalRun
from prefect.tasks.prefect.flow_run import StartFlowRun
@pytest.fixture()
def client(monkeypatch):
cloud_client = MagicMock(
graphql=MagicMock(
return_value=MagicMock(
data=MagicMock(flow=[MagicMock(id="abc123"), MagicMock(id="def456")])
)
),
create_flow_run=MagicMock(return_value="xyz890"),
get_cloud_url=MagicMock(return_value="https://api.prefect.io/flow/run/url"),
create_task_run_artifact=MagicMock(return_value="id"),
get_flow_run_info=MagicMock(
return_value=FlowRunInfoResult(
id="my-flow-run-id",
name="test-run",
flow_id="xyz890",
version=1,
task_runs=[],
state=state.Success(),
scheduled_start_time=None,
project=ProjectInfo(id="my-project-id", name="Test Project"),
parameters={"test": "ing"},
context={},
)
),
)
monkeypatch.setattr(
"prefect.tasks.prefect.flow_run.Client", MagicMock(return_value=cloud_client)
)
monkeypatch.setattr(
"prefect.artifacts.Client", MagicMock(return_value=cloud_client)
)
yield cloud_client
def test_deprecated_old_name():
from prefect.tasks.prefect import FlowRunTask
with pytest.warns(UserWarning, match="StartFlowRun"):
task = FlowRunTask(name="My flow run")
assert isinstance(task, StartFlowRun)
assert task.name == "My flow run"
class TestStartFlowRunCloud:
def test_initialization(self, cloud_api):
now = pendulum.now()
run_config = UniversalRun()
# verify that the task is initialized as expected
task = StartFlowRun(
name="My Flow Run Task",
checkpoint=False,
project_name="Test Project",
flow_name="Test Flow",
new_flow_context={"foo": "bar"},
parameters={"test": "ing"},
run_config=run_config,
run_name="test-run",
scheduled_start_time=now,
)
assert task.name == "My Flow Run Task"
assert task.checkpoint is False
assert task.project_name == "Test Project"
assert task.flow_name == "Test Flow"
assert task.new_flow_context == {"foo": "bar"}
assert task.parameters == {"test": "ing"}
assert task.run_config == run_config
assert task.run_name == "test-run"
assert task.scheduled_start_time == now
def test_init_errors_if_tasks_passed_to_parameters(self, cloud_api):
with pytest.raises(TypeError, match="An instance of `Task` was passed"):
StartFlowRun(
name="testing", parameters={"a": 1, "b": prefect.Parameter("b")}
)
@pytest.mark.parametrize("idempotency_key", [None, "my-key"])
@pytest.mark.parametrize("task_run_id", [None, "test-id"])
def test_flow_run_task_submit_args(
self, client, cloud_api, idempotency_key, task_run_id
):
run_config = UniversalRun()
# verify that create_flow_run was called
task = StartFlowRun(
project_name="Test Project",
flow_name="Test Flow",
parameters={"test": "ing"},
run_config=run_config,
run_name="test-run",
)
# verify that run returns the new flow run ID
with prefect.context(task_run_id=task_run_id):
assert task.run(idempotency_key=idempotency_key) == "xyz890"
# verify the GraphQL query was called with the correct arguments
query_args = list(client.graphql.call_args_list[0][0][0]["query"].keys())[0]
assert "Test Project" in query_args
assert "Test Flow" in query_args
# verify create_flow_run was called with the correct arguments
assert client.create_flow_run.call_args[1] == dict(
flow_id="abc123",
parameters={"test": "ing"},
run_config=run_config,
idempotency_key=idempotency_key or task_run_id,
context=None,
run_name="test-run",
scheduled_start_time=None,
)
def test_flow_run_task_uses_scheduled_start_time(self, client, cloud_api):
in_one_hour = pendulum.now().add(hours=1)
# verify that create_flow_run was called
task = StartFlowRun(
project_name="Test Project",
flow_name="Test Flow",
scheduled_start_time=in_one_hour,
)
# verify that run returns the new flow run ID
assert task.run() == "xyz890"
# verify create_flow_run was called with the correct arguments
client.create_flow_run.assert_called_once_with(
flow_id="abc123",
parameters=None,
idempotency_key=None,
context=None,
run_name=None,
scheduled_start_time=in_one_hour,
run_config=None,
)
def test_flow_run_task_without_flow_name(self, cloud_api):
# verify that a ValueError is raised without a flow name
task = StartFlowRun(project_name="Test Project")
with pytest.raises(ValueError, match="Must provide a flow name."):
task.run()
def test_flow_run_task_without_project_name(self, cloud_api):
# verify that a ValueError is raised without a project name
task = StartFlowRun(flow_name="Test Flow")
with pytest.raises(ValueError, match="Must provide a project name."):
task.run()
def test_flow_run_task_with_no_matching_flow(self, client, cloud_api):
# verify a ValueError is raised if the client returns no flows
task = StartFlowRun(flow_name="Test Flow", project_name="Test Project")
client.graphql = MagicMock(return_value=MagicMock(data=MagicMock(flow=[])))
with pytest.raises(ValueError, match="Flow 'Test Flow' not found."):
task.run()
def test_flow_run_link_artifact(self, client, cloud_api):
task = StartFlowRun(
project_name="Test Project",
flow_name="Test Flow",
parameters={"test": "ing"},
run_name="test-run",
)
with prefect.context(running_with_backend=True, task_run_id="trid"):
task.run()
client.create_task_run_artifact.assert_called_once_with(
data={"link": "/flow/run/url"}, kind="link", task_run_id="trid"
)
class TestStartFlowRunServer:
def test_initialization(self, server_api):
now = pendulum.now()
# verify that the task is initialized as expected
task = StartFlowRun(
name="My Flow Run Task",
project_name="Demo",
checkpoint=False,
flow_name="Test Flow",
new_flow_context={"foo": "bar"},
parameters={"test": "ing"},
run_name="test-run",
scheduled_start_time=now,
)
assert task.name == "My Flow Run Task"
assert task.checkpoint is False
assert task.flow_name == "Test Flow"
assert task.new_flow_context == {"foo": "bar"}
assert task.parameters == {"test": "ing"}
assert task.run_name == "test-run"
assert task.scheduled_start_time == now
def test_flow_run_task(self, client, server_api):
# verify that create_flow_run was called
task = StartFlowRun(
flow_name="Test Flow",
project_name="Demo",
parameters={"test": "ing"},
run_name="test-run",
)
# verify that run returns the new flow run ID
assert task.run() == "xyz890"
# verify the GraphQL query was called with the correct arguments
query_args = list(client.graphql.call_args_list[0][0][0]["query"].keys())[0]
assert "Test Flow" in query_args
# verify create_flow_run was called with the correct arguments
client.create_flow_run.assert_called_once_with(
flow_id="abc123",
parameters={"test": "ing"},
idempotency_key=None,
context=None,
run_name="test-run",
scheduled_start_time=None,
run_config=None,
)
def test_flow_run_task_with_wait(self, client, server_api):
# verify that create_flow_run was called
task = StartFlowRun(
flow_name="Test Flow",
project_name="Demo",
parameters={"test": "ing"},
run_name="test-run",
wait=True,
poll_interval=timedelta(seconds=3),
)
assert task.poll_interval == timedelta(seconds=3)
# Run flow, and assert that signals a success
with pytest.raises(signals.SUCCESS) as exc_info:
task.run()
flow_state_signal = exc_info.value
assert isinstance(flow_state_signal.state, state.Success)
# Check flow ID
assert str(flow_state_signal).split(" ")[0] == "xyz890"
# verify the GraphQL query was called with the correct arguments
query_args = list(client.graphql.call_args_list[0][0][0]["query"].keys())[0]
assert "Test Flow" in query_args
# verify create_flow_run was called with the correct arguments
client.create_flow_run.assert_called_once_with(
flow_id="abc123",
parameters={"test": "ing"},
idempotency_key=None,
context=None,
run_name="test-run",
scheduled_start_time=None,
run_config=None,
)
def test_flow_run_task_poll_interval_too_short(self):
with pytest.raises(ValueError):
task = StartFlowRun(
flow_name="Test Flow",
project_name="Demo",
parameters={"test": "ing"},
run_name="test-run",
wait=True,
poll_interval=timedelta(seconds=2),
)
def test_flow_run_task_without_flow_name(self, server_api):
# verify that a ValueError is raised without a flow name
task = StartFlowRun()
with pytest.raises(ValueError, match="Must provide a flow name."):
task.run()
def test_flow_run_task_with_no_matching_flow(self, client, server_api):
# verify a ValueError is raised if the client returns no flows
task = StartFlowRun(flow_name="Test Flow", project_name="Demo")
client.graphql = MagicMock(return_value=MagicMock(data=MagicMock(flow=[])))
with pytest.raises(ValueError, match="Flow 'Test Flow' not found."):
task.run()
| 38.14386
| 85
| 0.615123
|
7950de6f095226cc19c10921e1c58a21c9dad234
| 2,185
|
py
|
Python
|
src/main/resources/redmine/Server.py
|
xebialabs-community/xlr-redmine-plugin
|
8257bb050ce6a0fa40b98dfae074d0802365f818
|
[
"MIT"
] | null | null | null |
src/main/resources/redmine/Server.py
|
xebialabs-community/xlr-redmine-plugin
|
8257bb050ce6a0fa40b98dfae074d0802365f818
|
[
"MIT"
] | null | null | null |
src/main/resources/redmine/Server.py
|
xebialabs-community/xlr-redmine-plugin
|
8257bb050ce6a0fa40b98dfae074d0802365f818
|
[
"MIT"
] | null | null | null |
#
# Copyright 2019 XEBIALABS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from org.apache.http.client import ClientProtocolException
params = {'url': configuration.url, 'proxyHost': configuration.proxyHost, 'proxyPort': configuration.proxyPort, 'proxyUsername': configuration.proxyUsername, 'proxyPassword': configuration.proxyPassword}
response = None
try:
headers = None
if configuration.apiKey:
headers = {'X-Redmine-API-Key': configuration.apiKey }
elif configuration.password:
params['username'] = configuration.username
params['password'] = configuration.password
response = HttpRequest(params).get('/issues.json',
contentType='application/json', headers=headers)
except ClientProtocolException:
raise Exception("URL is not valid")
# Redmine api returns 403 in case you are authenticated but not enough permissions
if response.status != 403 and response.status != 200:
reason = "Unknown"
if response.status == 400:
reason = "Bad request"
elif response.status == 401:
reason = "Unauthorized"
raise Exception("HTTP response code %s, reason %s" % (response.status, reason))
| 62.428571
| 462
| 0.747368
|
7950df4d9487228feba7ed00fb1b4690ba7b89c5
| 7,461
|
py
|
Python
|
eth2/beacon/types/states.py
|
onyb/trinity
|
347e2beac23c5c1bb4aab136bb44c162467f6ff7
|
[
"MIT"
] | null | null | null |
eth2/beacon/types/states.py
|
onyb/trinity
|
347e2beac23c5c1bb4aab136bb44c162467f6ff7
|
[
"MIT"
] | null | null | null |
eth2/beacon/types/states.py
|
onyb/trinity
|
347e2beac23c5c1bb4aab136bb44c162467f6ff7
|
[
"MIT"
] | null | null | null |
from typing import Sequence, Type, TypeVar
from eth.constants import ZERO_HASH32
from eth_typing import Hash32
from eth_utils import humanize_hash
from ssz.hashable_container import HashableContainer
from ssz.sedes import Bitvector, List, Vector, bytes32, uint64
from eth2.beacon.constants import JUSTIFICATION_BITS_LENGTH, ZERO_ROOT
from eth2.beacon.helpers import compute_epoch_at_slot
from eth2.beacon.typing import Bitfield, Epoch, Gwei, Root, Slot, Timestamp
from eth2.configs import Eth2Config
from .block_headers import BeaconBlockHeader, default_beacon_block_header
from .checkpoints import Checkpoint, default_checkpoint
from .defaults import (
default_slot,
default_timestamp,
default_tuple,
default_tuple_of_size,
)
from .eth1_data import Eth1Data, default_eth1_data
from .forks import Fork, default_fork
from .pending_attestations import PendingAttestation
from .validators import Validator
default_justification_bits = Bitfield((False,) * JUSTIFICATION_BITS_LENGTH)
TBeaconState = TypeVar("TBeaconState", bound="BeaconState")
# Use mainnet constants for defaults. We can't import the config object because of an import cycle.
# TODO: When py-ssz is updated to support size configs, the config will be passed to the `create`
# classmethod and we can create the defaults dynamically there.
default_block_roots = default_tuple_of_size(2 ** 13, ZERO_ROOT)
default_state_roots = default_tuple_of_size(2 ** 13, ZERO_HASH32)
default_randao_mixes = default_tuple_of_size(2 ** 16, ZERO_HASH32)
default_slashings = default_tuple_of_size(2 ** 13, Gwei(0))
class BeaconState(HashableContainer):
fields = [
# Versioning
("genesis_time", uint64),
("slot", uint64),
("fork", Fork),
# History
("latest_block_header", BeaconBlockHeader),
(
"block_roots",
Vector(bytes32, 1),
), # Needed to process attestations, older to newer # noqa: E501
("state_roots", Vector(bytes32, 1)),
(
"historical_roots",
List(bytes32, 1),
), # allow for a log-sized Merkle proof from any block to any historical block root # noqa: E501
# Ethereum 1.0 chain
("eth1_data", Eth1Data),
("eth1_data_votes", List(Eth1Data, 1)),
("eth1_deposit_index", uint64),
# Validator registry
("validators", List(Validator, 1)),
("balances", List(uint64, 1)),
# Shuffling
("randao_mixes", Vector(bytes32, 1)),
# Slashings
(
"slashings",
Vector(uint64, 1),
), # Balances slashed at every withdrawal period # noqa: E501
# Attestations
("previous_epoch_attestations", List(PendingAttestation, 1)),
("current_epoch_attestations", List(PendingAttestation, 1)),
# Justification
("justification_bits", Bitvector(JUSTIFICATION_BITS_LENGTH)),
("previous_justified_checkpoint", Checkpoint),
("current_justified_checkpoint", Checkpoint),
# Finality
("finalized_checkpoint", Checkpoint),
]
@classmethod
def create(
cls: Type[TBeaconState],
*,
genesis_time: Timestamp = default_timestamp,
slot: Slot = default_slot,
fork: Fork = default_fork,
latest_block_header: BeaconBlockHeader = default_beacon_block_header,
block_roots: Sequence[Root] = default_block_roots,
state_roots: Sequence[Hash32] = default_state_roots,
historical_roots: Sequence[Hash32] = default_tuple,
eth1_data: Eth1Data = default_eth1_data,
eth1_data_votes: Sequence[Eth1Data] = default_tuple,
eth1_deposit_index: int = 0,
validators: Sequence[Validator] = default_tuple,
balances: Sequence[Gwei] = default_tuple,
randao_mixes: Sequence[Hash32] = default_randao_mixes,
slashings: Sequence[Gwei] = default_slashings,
previous_epoch_attestations: Sequence[PendingAttestation] = default_tuple,
current_epoch_attestations: Sequence[PendingAttestation] = default_tuple,
justification_bits: Bitfield = default_justification_bits,
previous_justified_checkpoint: Checkpoint = default_checkpoint,
current_justified_checkpoint: Checkpoint = default_checkpoint,
finalized_checkpoint: Checkpoint = default_checkpoint,
config: Eth2Config = None,
validator_and_balance_length_check: bool = True,
) -> TBeaconState:
# We usually want to check that the lengths of each list are the same
# In some cases, e.g. SSZ fuzzing, they are not and we still want to instantiate an object.
if validator_and_balance_length_check:
if len(validators) != len(balances):
raise ValueError(
f"The length of validators ({len(validators)}) and balances ({len(balances)}) "
"lists should be the same."
)
if config:
# try to provide sane defaults
if block_roots == default_tuple:
block_roots = default_tuple_of_size(
config.SLOTS_PER_HISTORICAL_ROOT, ZERO_ROOT
)
if state_roots == default_tuple:
state_roots = default_tuple_of_size(
config.SLOTS_PER_HISTORICAL_ROOT, ZERO_HASH32
)
if randao_mixes == default_tuple:
randao_mixes = default_tuple_of_size(
config.EPOCHS_PER_HISTORICAL_VECTOR, ZERO_HASH32
)
if slashings == default_tuple:
slashings = default_tuple_of_size(
config.EPOCHS_PER_SLASHINGS_VECTOR, Gwei(0)
)
return super().create(
genesis_time=genesis_time,
slot=slot,
fork=fork,
latest_block_header=latest_block_header,
block_roots=block_roots,
state_roots=state_roots,
historical_roots=historical_roots,
eth1_data=eth1_data,
eth1_data_votes=eth1_data_votes,
eth1_deposit_index=eth1_deposit_index,
validators=validators,
balances=balances,
randao_mixes=randao_mixes,
slashings=slashings,
previous_epoch_attestations=previous_epoch_attestations,
current_epoch_attestations=current_epoch_attestations,
justification_bits=justification_bits,
previous_justified_checkpoint=previous_justified_checkpoint,
current_justified_checkpoint=current_justified_checkpoint,
finalized_checkpoint=finalized_checkpoint,
)
def __str__(self) -> str:
return (
f"[hash_tree_root]={humanize_hash(self.hash_tree_root)}, slot={self.slot}"
)
@property
def validator_count(self) -> int:
return len(self.validators)
def current_epoch(self, slots_per_epoch: int) -> Epoch:
return compute_epoch_at_slot(self.slot, slots_per_epoch)
def previous_epoch(self, slots_per_epoch: int, genesis_epoch: Epoch) -> Epoch:
current_epoch = self.current_epoch(slots_per_epoch)
if current_epoch == genesis_epoch:
return genesis_epoch
else:
return Epoch(current_epoch - 1)
def next_epoch(self, slots_per_epoch: int) -> Epoch:
return Epoch(self.current_epoch(slots_per_epoch) + 1)
| 40.548913
| 106
| 0.667873
|
7950e128e74649ab0a837e8ededf4cdf3faf0b64
| 5,495
|
py
|
Python
|
deprecated/matrix.py
|
s-geronimoanderson/compat-id
|
3ae52dd3d3e92285de425304ccde02f87d2ae880
|
[
"Apache-2.0"
] | null | null | null |
deprecated/matrix.py
|
s-geronimoanderson/compat-id
|
3ae52dd3d3e92285de425304ccde02f87d2ae880
|
[
"Apache-2.0"
] | null | null | null |
deprecated/matrix.py
|
s-geronimoanderson/compat-id
|
3ae52dd3d3e92285de425304ccde02f87d2ae880
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from scipy.sparse import coo_matrix
class Matrix:
"""A sparse matrix class (indexed from zero). Replace with NumPy arrays."""
def __init__(self, matrix=None, size=None):
"""Size is a tuple (m,n) representing m rows and n columns."""
if matrix is None:
self.data = {}
if size is None:
self.column_count = 0
self.row_count = 0
self.size = (self.row_count, self.column_count)
else:
self.row_count, self.column_count = size[:2]
self.size = size
else:
"""Initialize to be a clone of the given matrix."""
self.column_count = matrix.column_count
self.data = matrix.data
self.row_count = matrix.row_count
self.size = matrix.size
def get(self, subscript):
"""Return the matrix element indexed by the given (valid) subscript."""
row, column = subscript[:2]
if self.__is_valid(subscript):
# Get the value if it's present, else a zero.
result = self.data.get(subscript, 0)
else:
raise IndexError
return result
def set(self, subscript, value):
"""Set the matrix element indexed by the given (valid) subscript."""
if value != 0 and self.__is_valid(subscript):
self.data[subscript] = value
else:
raise IndexError
def __is_valid(self, subscript):
"""Return whether the given subscript is within the matrix's bounds."""
return ((0,0) <= subscript and subscript < self.size)
def __is_valid_row(self, row_number):
"""Return whether the given row is within the matrix's bounds."""
return self.__is_valid((row_number, 0))
def __str__(self):
"""Return a NumPy-like matrix representation."""
result = ""
for row in range(self.size):
current = []
for column in range(self.size):
subscript = (row, column)
current.append(self.get(subscript))
if result == "":
result = "[{}".format(current)
else:
result = "{0}\n {1}".format(result, current)
return "{}]".format(result)
def extend_columns(self, matrix):
raise NotImplementedError
def extend_rows(self, matrix):
"""Extend the current matrix with the given matrix."""
row_count, column_count = matrix.size[:2]
if column_count != self.column_count:
raise ValueError
self.row_count += row_count
self.size = (self.row_count, self.column_count)
base_row_count = self.row_count
for key, value in matrix.data.items():
row, column = key[:2]
self.set((base_row_count + row, column), value)
return self
def replace_row(self, row_number, vector):
"""Replace the specified row with the given vector."""
if not self.__is_valid_row(row_number):
raise ValueError
row_count, column_count = vector.size[:2]
if row_count != 1 and column_count != 1:
raise ValueError
# Eliminate current row entries.
for col in [col for (row, col) in self.data.items()
if row == row_number]:
self.data.pop(row_number, col)
# Update row with vector elements.
if row_count == 1:
new_row = vector.transpose()
else:
new_row = vector
for key, value in new_row.data.items():
row, _ = key[:2]
self.set((row_number, row), value)
return self
def submatrix(self, row_set, column_set):
"""Return a submatrix with the given rows and columns."""
submatrix = Matrix(len(row_set), len(column_set))
raise NotImplementedError
def to_vec(self):
"""Return an m*n length vector comprising all the matrix's columns."""
column_count = self.column_count
vector = Matrix(size=(self.row_count * column_count, 1))
for key, value in self.data.items():
row, column = key[:2]
subscript = (column * column_count + row, 0)
vector.set(subscript, value)
return vector
def to_ijv(self):
"""Return the matrix in ijv (triplet) array format."""
row_indices = []
column_indices = []
nonzero_elements = []
k = 0
for key, value in self.data.items():
if value == 0:
continue
row, col = key[:2]
row_indices.append(row)
column_indices.append(col)
nonzero_elements.append(value)
k += 1
return row_indices, column_indices, nonzero_elements
def to_coo_matrix(self):
"""Return the matrix in COOrdinate format."""
row_indices, column_indices, nonzero_elements = self.to_ijv()
return coo_matrix((nonzero_elements, (row_indices, column_indices)),
shape=(self.size, self.size))
def transpose(self):
"""Transpose the matrix."""
m, n = self.size[:2]
transposed_size = (n, m)
transposed_matrix = {}
for key, value in matrix.data.items():
i, j = key[:2]
transposed_key = (j, i)
transposed_matrix[transposed_key] = value
self.matrix = transposed_matrix
self.size = transposed_size
| 36.879195
| 79
| 0.571611
|
7950e187cab9539ccfb7de350c2196fd7a3a9a64
| 43,521
|
py
|
Python
|
tensorflow/python/framework/type_spec.py
|
kim-com/tensorflow
|
4301e3f34b8da528c58bdafe05cd66c8a55fce9e
|
[
"Apache-2.0"
] | 1
|
2022-03-29T23:09:34.000Z
|
2022-03-29T23:09:34.000Z
|
tensorflow/python/framework/type_spec.py
|
kim-com/tensorflow
|
4301e3f34b8da528c58bdafe05cd66c8a55fce9e
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/framework/type_spec.py
|
kim-com/tensorflow
|
4301e3f34b8da528c58bdafe05cd66c8a55fce9e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Type specifications for TensorFlow APIs."""
import abc
import collections
import functools
import re
from typing import List, Optional, Sequence, Any
import warnings
import numpy as np
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.types import trace
from tensorflow.python.util import _pywrap_utils
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util.lazy_loader import LazyLoader
from tensorflow.python.util.tf_export import tf_export
# Use LazyLoader to avoid circular dependencies.
tensor_spec = LazyLoader(
"tensor_spec", globals(),
"tensorflow.python.framework.tensor_spec")
ops = LazyLoader("ops", globals(),
"tensorflow.python.framework.ops")
@tf_export("TypeSpec", v1=["TypeSpec", "data.experimental.Structure"])
class TypeSpec(trace.TraceType, metaclass=abc.ABCMeta):
"""Specifies a TensorFlow value type.
A `tf.TypeSpec` provides metadata describing an object accepted or returned
by TensorFlow APIs. Concrete subclasses, such as `tf.TensorSpec` and
`tf.RaggedTensorSpec`, are used to describe different value types.
For example, `tf.function`'s `input_signature` argument accepts a list
(or nested structure) of `TypeSpec`s.
Creating new subclasses of `TypeSpec` (outside of TensorFlow core) is not
currently supported. In particular, we may make breaking changes to the
private methods and properties defined by this base class.
Example:
>>> spec = tf.RaggedTensorSpec(shape=[None, None], dtype=tf.int32)
>>> @tf.function(input_signature=[spec])
... def double(x):
... return x * 2
>>> print(double(tf.ragged.constant([[1, 2], [3]])))
<tf.RaggedTensor [[2, 4], [6]]>
"""
# === Subclassing ===
#
# Each `TypeSpec` subclass must define:
#
# * A "component encoding" for values.
# * A "serialization" for types.
#
# The component encoding for a value is a nested structure of `tf.Tensor`
# or `CompositeTensor` that can be used by the `TypeSpec` to reconstruct
# the value. Each individual `TypeSpec` must use the same nested structure
# for all values -- this structure is defined by the `component_specs`
# attribute. Decomposing values into components, and reconstructing them
# from those components, should be inexpensive. In particular, it should
# *not* require any TensorFlow ops.
#
# The serialization for a `TypeSpec` is a nested tuple of values that can
# be used to reconstruct the `TypeSpec`. See the documentation for
# `_serialize()` for more information.
__slots__ = []
@abc.abstractproperty
def value_type(self):
"""The Python type for values that are compatible with this TypeSpec.
In particular, all values that are compatible with this TypeSpec must be an
instance of this type.
"""
raise NotImplementedError("%s.value_type" % type(self).__name__)
def is_subtype_of(self, other: trace.TraceType) -> bool:
"""Returns True if `self` is a subtype of `other`.
Implements the tf.types.experimental.func.TraceType interface.
If not overridden by a subclass, the default behavior is to assume the
TypeSpec is covariant upon attributes that implement TraceType and
invariant upon rest of the attributes as well as the structure and type
of the TypeSpec.
Args:
other: A TraceType object.
"""
if type(self) is not type(other):
return False
is_subtype = True
def check_attribute(attribute_self, attribute_other):
nonlocal is_subtype
if not is_subtype:
return
if isinstance(attribute_self, trace.TraceType):
if not attribute_self.is_subtype_of(attribute_other):
is_subtype = False
return
else:
if attribute_self != attribute_other:
is_subtype = False
try:
# TODO(b/217959193): Replace _serialize with parameter decomposition.
nest.map_structure(check_attribute, self._serialize(),
other._serialize()) # pylint: disable=protected-access
except (ValueError, TypeError):
return False
return is_subtype
def most_specific_common_supertype(
self,
others: Sequence[trace.TraceType]) -> Optional["TypeSpec"]:
"""Returns the most specific supertype TypeSpec of `self` and `others`.
Implements the tf.types.experimental.func.TraceType interface.
If not overridden by a subclass, the default behavior is to assume the
TypeSpec is covariant upon attributes that implement TraceType and
invariant upon rest of the attributes as well as the structure and type
of the TypeSpec.
Args:
others: A sequence of TraceTypes.
"""
if any(type(self) is not type(other) for other in others):
return None
has_supertype = True
def make_supertype_attribute(attribute_self, *attribute_others):
nonlocal has_supertype
if not has_supertype:
return
if isinstance(attribute_self, trace.TraceType):
attribute_supertype = attribute_self.most_specific_common_supertype(
attribute_others)
if attribute_supertype is None:
has_supertype = False
return
return attribute_supertype
else:
if not all(attribute_self == attribute_other
for attribute_other in attribute_others):
has_supertype = False
return
return attribute_self
try:
# TODO(b/217959193): Replace _serialize with parameter decomposition.
serialized_supertype = nest.map_structure(
make_supertype_attribute, self._serialize(),
*(o._serialize() for o in others)) # pylint: disable=protected-access
except (ValueError, TypeError):
return None
return self._deserialize(serialized_supertype) if has_supertype else None
# TODO(b/202447704): Reduce internal usages.
def is_compatible_with(self, spec_or_value):
"""Returns true if `spec_or_value` is compatible with this TypeSpec."""
# === Subclassing ===
# If not overridden by subclasses, the default behavior is to convert
# `spec_or_value` to a `TypeSpec` (if it isn't already); and then to
# consider two `TypeSpec`s compatible if they have the same type, and
# the values returned by `_serialize` are compatible (where
# `tf.TensorShape`, `tf.TensorSpec`, and `tf.DType` are checked for
# compatibility using their `is_compatible_with` method; and all other
# types are considered compatible if they are equal).
if not isinstance(spec_or_value, TypeSpec):
spec_or_value = type_spec_from_value(spec_or_value)
if type(self) is not type(spec_or_value):
return False
return self.__is_compatible(self._serialize(), spec_or_value._serialize()) # pylint: disable=protected-access
@deprecation.deprecated(None, "Use most_specific_common_supertype instead.")
def most_specific_compatible_type(self, other: "TypeSpec") -> "TypeSpec":
"""Returns the most specific TypeSpec compatible with `self` and `other`.
Deprecated. Please use `most_specific_common_supertype` instead.
Do not override this function.
Args:
other: A `TypeSpec`.
Raises:
ValueError: If there is no TypeSpec that is compatible with both `self`
and `other`.
"""
result = self.most_specific_common_supertype([other])
if result is None:
raise ValueError("No TypeSpec is compatible with both %s and %s" %
(self, other))
return result
def _with_tensor_ranks_only(self) -> "TypeSpec":
"""Returns a TypeSpec compatible with `self`, with tensor shapes relaxed.
Returns:
A `TypeSpec` that is compatible with `self`, where any `TensorShape`
information has been relaxed to include only tensor rank (and not
the dimension sizes for individual axes).
"""
# === Subclassing ===
# If not overridden by a subclass, the default behavior is to serialize
# this TypeSpec, relax any TensorSpec or TensorShape values, and
# deserialize the result.
def relax(value):
if isinstance(value, TypeSpec):
return value._with_tensor_ranks_only() # pylint: disable=protected-access
elif (isinstance(value, tensor_shape.TensorShape) and
value.rank is not None):
return tensor_shape.TensorShape([None] * value.rank)
else:
return value
return self._deserialize(nest.map_structure(relax, self._serialize()))
# TODO(b/206014848): Helper function to support logic that does not consider
# Tensor name. Will be removed once load-bearing usages of Tensor name are
# fixed.
def _without_tensor_names(self) -> "TypeSpec":
"""Returns a TypeSpec compatible with `self`, with tensor names removed.
Returns:
A `TypeSpec` that is compatible with `self`, where the name of any
`TensorSpec` is set to `None`.
"""
# === Subclassing ===
# If not overridden by a subclass, the default behavior is to serialize
# this TypeSpec, set the TensorSpecs' names to None, and deserialize the
# result.
def rename(value):
if isinstance(value, TypeSpec):
return value._without_tensor_names() # pylint: disable=protected-access
return value
return self._deserialize(nest.map_structure(rename, self._serialize()))
# === Component encoding for values ===
@abc.abstractmethod
def _to_components(self, value):
"""Encodes `value` as a nested structure of `Tensor` or `CompositeTensor`.
Args:
value: A value compatible with this `TypeSpec`. (Caller is responsible
for ensuring compatibility.)
Returns:
A nested structure of `tf.Tensor` or `tf.CompositeTensor` compatible with
`self._component_specs`, which can be used to reconstruct `value`.
"""
# === Subclassing ===
# This method must be inexpensive (do not call TF ops).
raise NotImplementedError("%s._to_components()" % type(self).__name__)
@abc.abstractmethod
def _from_components(self, components):
"""Reconstructs a value from a nested structure of Tensor/CompositeTensor.
Args:
components: A nested structure of `tf.Tensor` or `tf.CompositeTensor`,
compatible with `self._component_specs`. (Caller is responsible for
ensuring compatibility.)
Returns:
A value that is compatible with this `TypeSpec`.
"""
# === Subclassing ===
# This method must be inexpensive (do not call TF ops).
raise NotImplementedError("%s._from_components()" % type(self).__name__)
@abc.abstractproperty
def _component_specs(self):
"""A nested structure of TypeSpecs for this type's components.
Returns:
A nested structure describing the component encodings that are returned
by this TypeSpec's `_to_components` method. In particular, for a
TypeSpec `spec` and a compatible value `value`:
```
nest.map_structure(lambda t, c: assert t.is_compatible_with(c),
spec._component_specs, spec._to_components(value))
```
"""
raise NotImplementedError("%s._component_specs()" % type(self).__name__)
# === Tensor list encoding for values ===
def _to_tensor_list(self, value) -> List["ops.Tensor"]:
"""Encodes `value` as a flat list of `tf.Tensor`.
By default, this just flattens `self._to_components(value)` using
`nest.flatten`. However, subclasses may override this to return a
different tensor encoding for values. In particular, some subclasses
of `BatchableTypeSpec` override this method to return a "boxed" encoding
for values, which then can be batched or unbatched. See
`BatchableTypeSpec` for more details.
Args:
value: A value with compatible this `TypeSpec`. (Caller is responsible
for ensuring compatibility.)
Returns:
A list of `tf.Tensor`, compatible with `self._flat_tensor_specs`, which
can be used to reconstruct `value`.
"""
return nest.flatten(self._to_components(value), expand_composites=True)
def _from_tensor_list(self, tensor_list: List["ops.Tensor"]) -> Any:
"""Reconstructs a value from a flat list of `tf.Tensor`.
Args:
tensor_list: A flat list of `tf.Tensor`, compatible with
`self._flat_tensor_specs`.
Returns:
A value that is compatible with this `TypeSpec`.
Raises:
ValueError: If `tensor_list` is not compatible with
`self._flat_tensor_specs`.
"""
self.__check_tensor_list(tensor_list)
return self._from_compatible_tensor_list(tensor_list)
def _from_compatible_tensor_list(
self, tensor_list: List["ops.Tensor"]) -> Any:
"""Reconstructs a value from a compatible flat list of `tf.Tensor`.
Args:
tensor_list: A flat list of `tf.Tensor`, compatible with
`self._flat_tensor_specs`. (Caller is responsible for ensuring
compatibility.)
Returns:
A value that is compatible with this `TypeSpec`.
"""
return self._from_components(
nest.pack_sequence_as(
self._component_specs, tensor_list, expand_composites=True))
@property
def _flat_tensor_specs(self):
"""A list of TensorSpecs compatible with self._to_tensor_list(v)."""
return nest.flatten(self._component_specs, expand_composites=True)
# === Serialization for types ===
@abc.abstractmethod
def _serialize(self):
"""Returns a nested tuple containing the state of this TypeSpec.
The serialization may contain the following value types: boolean,
integer, string, float, None, `TensorSpec`, `tf.TensorShape`, `tf.DType`,
`np.ndarray`, `TypeSpec`, and nested tuples, namedtuples, dicts, and
OrderedDicts of any of the above.
This method is used to provide default definitions for: equality
testing (__eq__, __ne__), hashing (__hash__), pickling (__reduce__),
string representation (__repr__), `self.is_compatible_with()`,
`self.most_specific_compatible_type()`, and protobuf serialization
(e.g. TensorInfo and StructuredValue).
"""
raise NotImplementedError("%s._serialize()" % type(self).__name__)
@classmethod
def _deserialize(cls, serialization):
"""Reconstructs a TypeSpec from a value returned by `serialize`.
Args:
serialization: A value returned by _serialize. In some contexts,
`namedtuple`s in `serialization` may not have the identical type that
was returned by `_serialize` (but its type will still be a `namedtuple`
type with the same type name and field names). For example, the code
that loads a SavedModel does not have access to the original
`namedtuple` type, so it dynamically creates a new `namedtuple` type
with the same type name and field names as the original one. If
necessary, you can check `serialization` for these duck-typed
`nametuple` types, and restore them to the original type. (E.g., this
would be necessary if you rely on type checks such as `isinstance` for
this `TypeSpec`'s member variables).
Returns:
A `TypeSpec` of type `cls`.
"""
return cls(*serialization)
# === Operators ===
def __eq__(self, other) -> bool:
# pylint: disable=protected-access
return (type(other) is type(self) and
self.__get_cmp_key() == other.__get_cmp_key())
def __ne__(self, other) -> bool:
return not self == other
def __hash__(self) -> int:
return hash(self.__get_cmp_key())
def __reduce__(self):
return type(self), self._serialize()
def __repr__(self) -> str:
return "%s%r" % (type(self).__name__, self._serialize())
# === Legacy Output ===
# TODO(b/133606651) Document and/or deprecate the legacy_output methods.
# (These are used by tf.data.)
def _to_legacy_output_types(self):
raise NotImplementedError("%s._to_legacy_output_types()" %
type(self).__name__)
def _to_legacy_output_shapes(self):
raise NotImplementedError("%s._to_legacy_output_shapes()" %
type(self).__name__)
def _to_legacy_output_classes(self):
return self.value_type
# === Private Helper Methods ===
# TODO(b/216206374): Currently this usage is used to represent a Tensor
# argument not a TensorSpec argument as it should be.
def __tf_tracing_type__(self,
context: trace.TracingContext) -> trace.TraceType:
if context.include_tensor_ranks_only:
return self._with_tensor_ranks_only()
else:
return self
def __check_tensor_list(self, tensor_list):
"""Raises an exception if tensor_list incompatible w/ flat_tensor_specs."""
expected = self._flat_tensor_specs
specs = [type_spec_from_value(t) for t in tensor_list]
if len(specs) != len(expected):
raise ValueError(f"Cannot create a {self.value_type.__name__} from the "
f"tensor list because the TypeSpec expects "
f"{len(expected)} items, but the provided tensor list "
f"has {len(specs)} items.")
for i, (s1, s2) in enumerate(zip(specs, expected)):
if not s1.is_compatible_with(s2):
raise ValueError(f"Cannot create a {self.value_type.__name__} from the "
f"tensor list because item {i} ({tensor_list[i]!r}) "
f"is incompatible with the expected TypeSpec {s2}.")
def __get_cmp_key(self):
"""Returns a hashable eq-comparable key for `self`."""
# TODO(b/133606651): Decide whether to cache this value.
return (type(self), self.__make_cmp_key(self._serialize()))
def __make_cmp_key(self, value):
"""Converts `value` to a hashable key."""
if isinstance(value, (int, float, bool, np.generic, dtypes.DType, TypeSpec,
tensor_shape.TensorShape)):
return value
if isinstance(value, compat.bytes_or_text_types):
return value
if value is None:
return value
if isinstance(value, dict):
return tuple([
tuple([self.__make_cmp_key(key),
self.__make_cmp_key(value[key])])
for key in sorted(value.keys())
])
if isinstance(value, tuple):
return tuple([self.__make_cmp_key(v) for v in value])
if isinstance(value, list):
return (list, tuple([self.__make_cmp_key(v) for v in value]))
if isinstance(value, np.ndarray):
return (np.ndarray, value.shape,
TypeSpec.__nested_list_to_tuple(value.tolist()))
raise ValueError(f"Cannot generate a hashable key for {self} because "
f"the _serialize() method "
f"returned an unsupproted value of type {type(value)}")
@staticmethod
def __nested_list_to_tuple(value):
"""Converts a nested list to a corresponding nested tuple."""
if isinstance(value, list):
return tuple(TypeSpec.__nested_list_to_tuple(v) for v in value)
return value
@staticmethod
def __same_types(a, b):
"""Returns whether a and b have the same type, up to namedtuple equivalence.
Consistent with tf.nest.assert_same_structure(), two namedtuple types
are considered the same iff they agree in their class name (without
qualification by module name) and in their sequence of field names.
This makes namedtuples recreated by nested_structure_coder compatible with
their original Python definition.
Args:
a: a Python object.
b: a Python object.
Returns:
A boolean that is true iff type(a) and type(b) are the same object
or equivalent namedtuple types.
"""
if nest.is_namedtuple(a) and nest.is_namedtuple(b):
return nest.same_namedtuples(a, b)
else:
return type(a) is type(b)
@staticmethod
def __is_compatible(a, b):
"""Returns true if the given type serializations compatible."""
if isinstance(a, TypeSpec):
return a.is_compatible_with(b)
if not TypeSpec.__same_types(a, b):
return False
if isinstance(a, (list, tuple)):
return (len(a) == len(b) and
all(TypeSpec.__is_compatible(x, y) for (x, y) in zip(a, b)))
if isinstance(a, dict):
return (len(a) == len(b) and sorted(a.keys()) == sorted(b.keys()) and
all(TypeSpec.__is_compatible(a[k], b[k]) for k in a.keys()))
if isinstance(a, (tensor_shape.TensorShape, dtypes.DType)):
return a.is_compatible_with(b)
return a == b
# TODO(b/221459366): Remove after usages are removed.
@staticmethod
def __most_specific_compatible_type_serialization(a, b):
"""Helper for most_specific_compatible_type.
Combines two type serializations as follows:
* If they are both tuples of the same length, then recursively combine
the respective tuple elements.
* If they are both dicts with the same keys, then recursively combine
the respective dict elements.
* If they are both TypeSpecs, then combine using
TypeSpec.most_specific_compatible_type.
* If they are both TensorShapes, then combine using
TensorShape.most_specific_compatible_shape.
* If they are both TensorSpecs with the same dtype, then combine using
TensorShape.most_specific_compatible_shape to combine shapes.
* If they are equal, then return a.
* If none of the above, then raise a ValueError.
Args:
a: A serialized TypeSpec or nested component from a serialized TypeSpec.
b: A serialized TypeSpec or nested component from a serialized TypeSpec.
Returns:
A value with the same type and structure as `a` and `b`.
Raises:
ValueError: If `a` and `b` are incompatible.
"""
if not TypeSpec.__same_types(a, b):
raise ValueError(
f"Encountered incompatible types while determining the most specific "
f"compatible type. "
f"The Python type structures of `a` and `b` are different. "
f"`a` : {a!r} `b` : {b!r}")
if nest.is_namedtuple(a):
assert a._fields == b._fields # Implied by __same_types(a, b).
return type(a)(*[
TypeSpec.__most_specific_compatible_type_serialization(x, y)
for (x, y) in zip(a, b)
])
if isinstance(a, (list, tuple)):
if len(a) != len(b):
raise ValueError(
f"Encountered incompatible types while determining the most specific "
f"compatible type. "
f"Type spec structure `a` has a length of {len(a)} and "
f"type spec structure `b` has a different length of {len(b)}."
f"`a` : {a!r} `b` : {b!r}")
return tuple(
TypeSpec.__most_specific_compatible_type_serialization(x, y)
for (x, y) in zip(a, b))
if isinstance(a, collections.OrderedDict):
a_keys, b_keys = a.keys(), b.keys()
if len(a) != len(b) or a_keys != b_keys:
raise ValueError(
f"Encountered incompatible types while determining the most specific "
f"compatible type. "
f"Type spec structure `a` has keys {a_keys} and "
f"type spec structure `b` has different keys {b_keys}."
f"`a` : {a!r} `b` : {b!r}")
return collections.OrderedDict([
(k,
TypeSpec.__most_specific_compatible_type_serialization(a[k], b[k]))
for k in a_keys
])
if isinstance(a, dict):
a_keys, b_keys = sorted(a.keys()), sorted(b.keys())
if len(a) != len(b) or a_keys != b_keys:
raise ValueError(
f"Encountered incompatible types while determining the most specific "
f"compatible type. "
f"Type spec structure `a` has keys {a_keys} and "
f"type spec structure `b` has different keys {b_keys}."
f"`a` : {a!r} `b` : {b!r}")
return {
k: TypeSpec.__most_specific_compatible_type_serialization(a[k], b[k])
for k in a_keys
}
if isinstance(a, tensor_shape.TensorShape):
return a.most_specific_compatible_shape(b)
if isinstance(a, list):
raise AssertionError(
f"{type(a).__name__}._serialize() should not return list values.")
if isinstance(a, TypeSpec):
return a.most_specific_compatible_type(b)
if a != b:
raise ValueError(
f"Encountered incompatible types while determining the most specific "
f"compatible type. "
f"Type spec structure `a` and `b` are different. "
f"`a` : {a!r} `b` : {b!r}")
return a
class TypeSpecBatchEncoder(object, metaclass=abc.ABCMeta):
"""Class used to encode and decode composite tensor values for batching.
In order to be batched and unbatched by APIs such as `tf.data.Dataset` and
`tf.map_fn`, composite tensors must be encoded using flat tensors that can
themselves be batched or unbatched. `TypeSpecBatchEncoder`s are
responsible for implementing this encoding.
If a composite tensor's shape is a prefix of the shape of all of its
component tensors, then this encoding can usually be performed by just
returning those component tensors as a list. But if the composite tensor
has components whose shape has a more complex relationship to the shape
of the composite tensor, then a custom `TypeSpecBatchEncoder` may
need to be implemented.
"""
@abc.abstractmethod
def batch(self, spec, batch_size):
"""Returns the TypeSpec representing a batch of values described by `spec`.
Args:
spec: The `TypeSpec` for an individual value.
batch_size: An `int` indicating the number of values that are batched
together, or `None` if the batch size is not known.
Returns:
A `TypeSpec` for a batch of values.
"""
raise NotImplementedError(f"{type(self).__name__}.batch")
@abc.abstractmethod
def unbatch(self, spec):
"""Returns the TypeSpec for a single unbatched element in `spec`.
Args:
spec: The `TypeSpec` for a batch of values.
Returns:
A `TypeSpec` for an individual value.
"""
raise NotImplementedError(f"{type(self).__name__}.unbatch")
@abc.abstractmethod
def encode(self, spec, value, minimum_rank=0):
"""Encodes `value` as a nest of batchable `Tensor` or `CompositeTensor`.
Args:
spec: The TypeSpec of the value to encode.
value: A value compatible with `spec`.
minimum_rank: The minimum rank for the returned Tensors, CompositeTensors,
and ExtensionType values. This can be used to ensure that the encoded
values can be unbatched this number of times. If `minimum_rank>0`,
then `t.shape[:minimum_rank]` must be compatible for all values `t`
returned by `encode`.
Returns:
A nest (as defined by `tf.nest`) of `tf.Tensor`s, batchable
`tf.CompositeTensor`s, or `tf.ExtensionType`s. Stacking, unstacking, or
concatenating these encoded values and then decoding the result must be
equivalent to stacking, unstacking, or concatenating the original values.
"""
raise NotImplementedError(f"{type(self).__name__}.encode")
@abc.abstractmethod
def decode(self, spec, encoded_value):
"""Decodes `value` from a batchable tensor encoding.
Args:
spec: The TypeSpec for the result value. If encoded values with spec `s`
were batched, then `spec` should be `s.batch(batch_size)`; or if encoded
values with spec `s` were unbatched, then `spec` should be
`s.unbatch()`.
encoded_value: A nest of values returned by `encode`; or a nest of
values that was formed by stacking, unstacking, or concatenating the
corresponding elements of values returned by `encode`.
Returns:
A value compatible with `type_spec`.
"""
raise NotImplementedError(f"{type(self).__name__}.decode")
@abc.abstractmethod
def encoding_specs(self, spec):
"""Returns a nest of `TypeSpec`(s) describing the encoding for `spec`.
Args:
spec: The TypeSpec whose encoding should be described.
Returns:
A nest (as defined by `tf.nest) of `tf.TypeSpec`, describing the values
that are returned by `self.encode(spec, ...)`. All TypeSpecs in this
nest must be batchable.
"""
raise NotImplementedError(f"{type(self).__name__}.encoding_specs")
class LegacyTypeSpecBatchEncoder(TypeSpecBatchEncoder):
"""TypeSpecBatchEncoder for legacy composite tensor classes.
TODO(edloper): Update existing composite tensors to use non-legacy
CompositTensorBatchEncoders.
"""
def batch(self, type_spec, batch_size):
return type_spec._batch(batch_size) # pylint: disable=protected-access
def unbatch(self, type_spec):
return type_spec._unbatch() # pylint: disable=protected-access
def encode(self, type_spec, value, minimum_rank=0):
if minimum_rank == 0:
return type_spec._to_tensor_list(value) # pylint: disable=protected-access
elif minimum_rank == 1:
if not isinstance(type_spec, BatchableTypeSpec):
raise ValueError(f"{type_spec.__name__}.encode does not support "
"minimum_rank>0.")
return type_spec._to_batched_tensor_list(value) # pylint: disable=protected-access
else:
raise ValueError(f"{type_spec.__name__}.encode does not support "
"minimum_rank>1.")
def decode(self, type_spec, encoded_value):
return type_spec._from_tensor_list(encoded_value) # pylint: disable=protected-access
def encoding_specs(self, spec):
return spec._flat_tensor_specs # pylint: disable=protected-access
class BatchableTypeSpec(TypeSpec, metaclass=abc.ABCMeta):
"""TypeSpec with a batchable tensor encoding.
The batchable tensor encoding is a list of `tf.Tensor`s that supports
batching and unbatching. In particular, stacking (or unstacking)
values with the same `TypeSpec` must be equivalent to stacking (or
unstacking) each of their tensor lists. Unlike the component encoding
(returned by `self._to_components)`, the batchable tensor encoding
may require using encoding/decoding ops.
If a subclass's batchable tensor encoding is not simply a flattened version
of the component encoding, then the subclass must override `_to_tensor_list`,
`_from_tensor_list`, and _flat_tensor_specs`.
"""
__slots__ = []
__batch_encoder__ = LegacyTypeSpecBatchEncoder()
@abc.abstractmethod
def _batch(self, batch_size) -> TypeSpec:
"""Returns a TypeSpec representing a batch of objects with this TypeSpec.
Args:
batch_size: An `int` representing the number of elements in a batch, or
`None` if the batch size may vary.
Returns:
A `TypeSpec` representing a batch of objects with this TypeSpec.
"""
raise NotImplementedError(f"{type(self).__name__}._batch")
@abc.abstractmethod
def _unbatch(self) -> TypeSpec:
"""Returns a TypeSpec representing a single element this TypeSpec.
Returns:
A `TypeSpec` representing a single element of objects with this TypeSpec.
"""
raise NotImplementedError(f"{type(self).__name__}._unbatch")
@property
def _flat_tensor_specs(self) -> List[TypeSpec]:
"""A list of TensorSpecs compatible with self._to_tensor_list(v)."""
component_flat_tensor_specs = nest.map_structure(
functools.partial(get_batchable_flat_tensor_specs, context_spec=self),
self._component_specs)
return nest.flatten(component_flat_tensor_specs)
def _to_tensor_list(
self,
value: composite_tensor.CompositeTensor) -> List["ops.Tensor"]:
"""Encodes `value` as a flat list of `ops.Tensor`."""
component_tensor_lists = nest.map_structure(
batchable_to_tensor_list,
self._component_specs,
self._to_components(value))
return nest.flatten(component_tensor_lists)
def _to_batched_tensor_list(
self,
value: composite_tensor.CompositeTensor) -> List["ops.Tensor"]:
"""Encodes `value` as a flat list of `ops.Tensor` each with rank>0."""
get_spec_tensor_list = lambda spec, v: ( # pylint: disable=g-long-lambda
batchable_to_tensor_list(spec, v, minimum_rank=1)
if isinstance(spec, BatchableTypeSpec) else spec._to_tensor_list(v)) # pylint: disable=protected-access
component_batched_tensor_lists = nest.map_structure(
get_spec_tensor_list, self._component_specs, self._to_components(value))
tensor_list = nest.flatten(component_batched_tensor_lists)
if any(t.shape.ndims == 0 for t in tensor_list):
raise ValueError(
f"While converting {value} to a list of tensors for batching, "
f"found a scalar item which cannot be batched.")
return tensor_list
def _from_compatible_tensor_list(
self, tensor_list: List["ops.Tensor"]
) -> composite_tensor.CompositeTensor:
"""Reconstructs a value from a compatible flat list of `ops.Tensor`."""
flat_specs = nest.map_structure(
functools.partial(get_batchable_flat_tensor_specs, context_spec=self),
self._component_specs)
nested_tensor_list = nest.pack_sequence_as(flat_specs, tensor_list)
components = nest.map_structure_up_to(
self._component_specs,
batchable_from_tensor_list,
self._component_specs,
nested_tensor_list)
return self._from_components(components)
def get_batchable_flat_tensor_specs(spec, context_spec=None):
"""Returns the flat tensor specs for `spec`."""
if isinstance(spec, tensor_spec.TensorSpec):
return [spec]
elif hasattr(spec, "__batch_encoder__"):
encoding_specs = nest.map_structure(
functools.partial(get_batchable_flat_tensor_specs,
context_spec=context_spec),
spec.__batch_encoder__.encoding_specs(spec))
return nest.flatten(encoding_specs)
else:
# TODO(edloper) Fix existing CompositeTensors that permit this, and
# then turn this warning into an error.
warnings.warn(f"Batchable type {context_spec} contains non-batchable "
f"field or component with type {spec}.")
return spec._flat_tensor_specs # pylint: disable=protected-access
def batchable_to_tensor_list(spec, value, minimum_rank=0):
"""Returns a list of tensors encoding `value`, whose type is `spec`."""
if isinstance(spec, tensor_spec.TensorSpec):
return [value]
elif hasattr(spec, "__batch_encoder__"):
encoded_value = spec.__batch_encoder__.encode(spec, value, minimum_rank)
encoded_specs = spec.__batch_encoder__.encoding_specs(spec)
encoded_flats = nest.map_structure(
functools.partial(batchable_to_tensor_list, minimum_rank=minimum_rank),
encoded_specs,
encoded_value)
return nest.flatten(encoded_flats)
else:
return spec._to_tensor_list(value) # pylint: disable=protected-access
def batchable_from_tensor_list(spec, tensor_list):
"""Returns a value with type `spec` decoded from `tensor_list`."""
if isinstance(spec, tensor_spec.TensorSpec):
assert len(tensor_list) == 1
return tensor_list[0]
elif hasattr(spec, "__batch_encoder__"):
encoded_specs = spec.__batch_encoder__.encoding_specs(spec)
flat_specs = nest.map_structure(get_batchable_flat_tensor_specs,
encoded_specs)
encoded_flats = nest.pack_sequence_as(flat_specs, tensor_list)
encoded_value = nest.map_structure_up_to(
encoded_specs,
batchable_from_tensor_list,
encoded_specs,
encoded_flats)
return spec.__batch_encoder__.decode(spec, encoded_value)
else:
return spec._from_compatible_tensor_list(tensor_list) # pylint: disable=protected-access
@tf_export("type_spec_from_value")
def type_spec_from_value(value) -> TypeSpec:
"""Returns a `tf.TypeSpec` that represents the given `value`.
Examples:
>>> tf.type_spec_from_value(tf.constant([1, 2, 3]))
TensorSpec(shape=(3,), dtype=tf.int32, name=None)
>>> tf.type_spec_from_value(np.array([4.0, 5.0], np.float64))
TensorSpec(shape=(2,), dtype=tf.float64, name=None)
>>> tf.type_spec_from_value(tf.ragged.constant([[1, 2], [3, 4, 5]]))
RaggedTensorSpec(TensorShape([2, None]), tf.int32, 1, tf.int64)
>>> example_input = tf.ragged.constant([[1, 2], [3]])
>>> @tf.function(input_signature=[tf.type_spec_from_value(example_input)])
... def f(x):
... return tf.reduce_sum(x, axis=1)
Args:
value: A value that can be accepted or returned by TensorFlow APIs. Accepted
types for `value` include `tf.Tensor`, any value that can be converted to
`tf.Tensor` using `tf.convert_to_tensor`, and any subclass of
`CompositeTensor` (such as `tf.RaggedTensor`).
Returns:
A `TypeSpec` that is compatible with `value`.
Raises:
TypeError: If a TypeSpec cannot be built for `value`, because its type
is not supported.
"""
spec = _type_spec_from_value(value)
if spec is not None:
return spec
# Fallback: try converting value to a tensor.
try:
tensor = ops.convert_to_tensor(value)
spec = _type_spec_from_value(tensor)
if spec is not None:
return spec
except (ValueError, TypeError) as e:
logging.vlog(
3, "Failed to convert %r to tensor: %s" % (type(value).__name__, e))
raise TypeError(f"Could not build a TypeSpec for {value} of "
f"unsupported type {type(value)}.")
def _type_spec_from_value(value) -> TypeSpec:
"""Returns a `TypeSpec` that represents the given `value`."""
if isinstance(value, ops.Tensor):
# Note: we do not include Tensor names when constructing TypeSpecs.
return tensor_spec.TensorSpec(value.shape, value.dtype)
if isinstance(value, composite_tensor.CompositeTensor):
return value._type_spec # pylint: disable=protected-access
# If `value` is a list and all of its elements can be represented by the same
# batchable type spec, then we can represent the entire list using a single
# type spec that captures the type accurately (unlike the `convert_to_tensor`
# fallback).
if isinstance(value, list) and value:
subspecs = [_type_spec_from_value(v) for v in value]
if isinstance(subspecs[0], BatchableTypeSpec):
merged_subspec = subspecs[0]
try:
for subspec in subspecs[1:]:
merged_subspec = merged_subspec.most_specific_compatible_type(subspec)
return merged_subspec._batch(len(subspecs)) # pylint: disable=protected-access
except (ValueError, TypeError):
pass # incompatible subspecs
for entry in reversed(_TYPE_CONVERSION_FUNCTION_REGISTRY):
type_object, converter_fn, allow_subclass = entry
if ((type(value) is type_object) or # pylint: disable=unidiomatic-typecheck
(allow_subclass and isinstance(value, type_object))):
return converter_fn(value)
return None
_TYPE_CONVERSION_FUNCTION_REGISTRY = []
def register_type_spec_from_value_converter(type_object,
converter_fn,
allow_subclass=False):
"""Registers a function for converting values with a given type to TypeSpecs.
If multiple registered `type_object`s match a value, then the most recent
registration takes precedence. Custom converters should not be defined for
`CompositeTensor`s; use `CompositeTensor._type_spec` instead.
Args:
type_object: A Python `type` object representing the type of values accepted
by `converter_fn`.
converter_fn: A function that takes one argument (an instance of the type
represented by `type_object`) and returns a `TypeSpec`.
allow_subclass: If true, then use `isinstance(value, type_object)` to check
for matches. If false, then use `type(value) is type_object`.
"""
_, type_object = tf_decorator.unwrap(type_object)
_TYPE_CONVERSION_FUNCTION_REGISTRY.append(
(type_object, converter_fn, allow_subclass))
_pywrap_utils.RegisterType("TypeSpec", TypeSpec)
_TYPE_SPEC_TO_NAME = {}
_NAME_TO_TYPE_SPEC = {}
# Regular expression for valid TypeSpec names.
_REGISTERED_NAME_RE = re.compile(r"^(\w+\.)+\w+$")
# TODO(b/173744905) tf_export this as "tf.register_type_spec". (And add a
# usage example to the docstring, once the API is public.)
#
# TODO(b/173744905) Update this decorator to apply to ExtensionType rather than
# TypeSpec (once we do refactoring to move to_components/from_components from
# TypeSpec to ExtensionType).
def register(name):
"""Decorator used to register a globally unique name for a TypeSpec subclass.
Args:
name: The name of the type spec. Must be globally unique. Must have the
form `"{project_name}.{type_name}"`. E.g. `"my_project.MyTypeSpec"`.
Returns:
A class decorator that registers the decorated class with the given name.
"""
if not isinstance(name, str):
raise TypeError("Expected `name` to be a string; got %r" % (name,))
if not _REGISTERED_NAME_RE.match(name):
raise ValueError(
"Registered name must have the form '{project_name}.{type_name}' "
"(e.g. 'my_project.MyTypeSpec'); got %r." % name)
def decorator_fn(cls):
if not (isinstance(cls, type) and issubclass(cls, TypeSpec)):
raise TypeError("Expected `cls` to be a TypeSpec; got %r" % (cls,))
if cls in _TYPE_SPEC_TO_NAME:
raise ValueError("Class %s.%s has already been registered with name %s." %
(cls.__module__, cls.__name__, _TYPE_SPEC_TO_NAME[cls]))
if name in _NAME_TO_TYPE_SPEC:
raise ValueError("Name %s has already been registered for class %s.%s." %
(name, _NAME_TO_TYPE_SPEC[name].__module__,
_NAME_TO_TYPE_SPEC[name].__name__))
_TYPE_SPEC_TO_NAME[cls] = name
_NAME_TO_TYPE_SPEC[name] = cls
return cls
return decorator_fn
# TODO(edloper) tf_export this as "tf.get_type_spec_name" (or some similar name)
def get_name(cls):
"""Returns the registered name for TypeSpec `cls`."""
if not (isinstance(cls, type) and issubclass(cls, TypeSpec)):
raise TypeError("Expected `cls` to be a TypeSpec; got %r" % (cls,))
if cls not in _TYPE_SPEC_TO_NAME:
raise ValueError("TypeSpec %s.%s has not been registered." %
(cls.__module__, cls.__name__))
return _TYPE_SPEC_TO_NAME[cls]
# TODO(edloper) tf_export this as "tf.lookup_type_spec" (or some similar name)
def lookup(name):
"""Returns the TypeSpec that has been registered with name `name`."""
if not isinstance(name, str):
raise TypeError("Expected `name` to be a string; got %r" % (name,))
if name not in _NAME_TO_TYPE_SPEC:
raise ValueError("No TypeSpec has been registered with name %r" % (name,))
return _NAME_TO_TYPE_SPEC[name]
| 39.34991
| 114
| 0.691505
|
7950e31c6c57cdca61de8e44feaba28cdb00dc0a
| 10,409
|
py
|
Python
|
vspk/v4_0/nuvcentereamconfig.py
|
cldelcourt/vspk-python
|
cdea810cd220e6ddc131407735941b9a26b2edda
|
[
"BSD-3-Clause"
] | null | null | null |
vspk/v4_0/nuvcentereamconfig.py
|
cldelcourt/vspk-python
|
cdea810cd220e6ddc131407735941b9a26b2edda
|
[
"BSD-3-Clause"
] | null | null | null |
vspk/v4_0/nuvcentereamconfig.py
|
cldelcourt/vspk-python
|
cdea810cd220e6ddc131407735941b9a26b2edda
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUGlobalMetadatasFetcher
from .fetchers import NUMetadatasFetcher
from bambou import NURESTObject
class NUVCenterEAMConfig(NURESTObject):
""" Represents a VCenterEAMConfig in the VSD
Notes:
The EAM solution configuration.
"""
__rest_name__ = "eamconfig"
__resource_name__ = "eamconfigs"
## Constants
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
def __init__(self, **kwargs):
""" Initializes a VCenterEAMConfig instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> vcentereamconfig = NUVCenterEAMConfig(id=u'xxxx-xxx-xxx-xxx', name=u'VCenterEAMConfig')
>>> vcentereamconfig = NUVCenterEAMConfig(data=my_dict)
"""
super(NUVCenterEAMConfig, self).__init__()
# Read/Write Attributes
self._eam_server_ip = None
self._eam_server_port_number = None
self._eam_server_port_type = None
self._entity_scope = None
self._extension_key = None
self._external_id = None
self._last_updated_by = None
self._ovf_url = None
self._vib_url = None
self.expose_attribute(local_name="eam_server_ip", remote_name="eamServerIP", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="eam_server_port_number", remote_name="eamServerPortNumber", attribute_type=int, is_required=True, is_unique=False)
self.expose_attribute(local_name="eam_server_port_type", remote_name="eamServerPortType", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="extension_key", remote_name="extensionKey", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="ovf_url", remote_name="ovfURL", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="vib_url", remote_name="vibURL", attribute_type=str, is_required=False, is_unique=False)
# Fetchers
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def eam_server_ip(self):
""" Get eam_server_ip value.
Notes:
The EAM server IP
This attribute is named `eamServerIP` in VSD API.
"""
return self._eam_server_ip
@eam_server_ip.setter
def eam_server_ip(self, value):
""" Set eam_server_ip value.
Notes:
The EAM server IP
This attribute is named `eamServerIP` in VSD API.
"""
self._eam_server_ip = value
@property
def eam_server_port_number(self):
""" Get eam_server_port_number value.
Notes:
The EAM server port number
This attribute is named `eamServerPortNumber` in VSD API.
"""
return self._eam_server_port_number
@eam_server_port_number.setter
def eam_server_port_number(self, value):
""" Set eam_server_port_number value.
Notes:
The EAM server port number
This attribute is named `eamServerPortNumber` in VSD API.
"""
self._eam_server_port_number = value
@property
def eam_server_port_type(self):
""" Get eam_server_port_type value.
Notes:
The EAM server port Type
This attribute is named `eamServerPortType` in VSD API.
"""
return self._eam_server_port_type
@eam_server_port_type.setter
def eam_server_port_type(self, value):
""" Set eam_server_port_type value.
Notes:
The EAM server port Type
This attribute is named `eamServerPortType` in VSD API.
"""
self._eam_server_port_type = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def extension_key(self):
""" Get extension_key value.
Notes:
Key of the extension that the solution registers
This attribute is named `extensionKey` in VSD API.
"""
return self._extension_key
@extension_key.setter
def extension_key(self, value):
""" Set extension_key value.
Notes:
Key of the extension that the solution registers
This attribute is named `extensionKey` in VSD API.
"""
self._extension_key = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def ovf_url(self):
""" Get ovf_url value.
Notes:
The url for the ovf
This attribute is named `ovfURL` in VSD API.
"""
return self._ovf_url
@ovf_url.setter
def ovf_url(self, value):
""" Set ovf_url value.
Notes:
The url for the ovf
This attribute is named `ovfURL` in VSD API.
"""
self._ovf_url = value
@property
def vib_url(self):
""" Get vib_url value.
Notes:
The url for the optional vib
This attribute is named `vibURL` in VSD API.
"""
return self._vib_url
@vib_url.setter
def vib_url(self, value):
""" Set vib_url value.
Notes:
The url for the optional vib
This attribute is named `vibURL` in VSD API.
"""
self._vib_url = value
| 29.571023
| 175
| 0.596311
|
7950e4f9722b8b2924e216c80cad1587fcfc60d2
| 7,691
|
py
|
Python
|
docs/conf.py
|
julienmendes/corona
|
25b085090df1c0a6f415be96fb21bcf1373c230d
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
julienmendes/corona
|
25b085090df1c0a6f415be96fb21bcf1373c230d
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
julienmendes/corona
|
25b085090df1c0a6f415be96fb21bcf1373c230d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# corona documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'corona'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'coronadoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'corona.tex',
u'corona Documentation',
u"JM", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'corona', u'corona Documentation',
[u"JM"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'corona', u'corona Documentation',
u"JM", 'corona',
'coronavirus', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| 31.391837
| 80
| 0.70472
|
7950e50045c40383a6db8405526dcd960304ab3f
| 598
|
py
|
Python
|
Application/ReclamaCaicoProject/ReclamaCaicoApp/migrations/0010_comentario_user.py
|
WesleyVitor/ReclamaCaico
|
df67997821fc00236f1d9c77e8685ed8e4a6934b
|
[
"MIT"
] | null | null | null |
Application/ReclamaCaicoProject/ReclamaCaicoApp/migrations/0010_comentario_user.py
|
WesleyVitor/ReclamaCaico
|
df67997821fc00236f1d9c77e8685ed8e4a6934b
|
[
"MIT"
] | null | null | null |
Application/ReclamaCaicoProject/ReclamaCaicoApp/migrations/0010_comentario_user.py
|
WesleyVitor/ReclamaCaico
|
df67997821fc00236f1d9c77e8685ed8e4a6934b
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.2 on 2019-09-07 14:36
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('ReclamaCaicoApp', '0009_auto_20190907_1132'),
]
operations = [
migrations.AddField(
model_name='comentario',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| 27.181818
| 121
| 0.682274
|
7950e5a8ce8111b5af259667b7e8d572addd1c07
| 1,485
|
py
|
Python
|
Incident-Response/Tools/Loki/loki-package-builder.py
|
sn0b4ll/Incident-Playbook
|
cf519f58fcd4255674662b3620ea97c1091c1efb
|
[
"MIT"
] | 1
|
2021-07-24T17:22:50.000Z
|
2021-07-24T17:22:50.000Z
|
Incident-Response/Tools/Loki/loki-package-builder.py
|
sn0b4ll/Incident-Playbook
|
cf519f58fcd4255674662b3620ea97c1091c1efb
|
[
"MIT"
] | 2
|
2022-02-28T03:40:31.000Z
|
2022-02-28T03:40:52.000Z
|
Incident-Response/Tools/Loki/loki-package-builder.py
|
sn0b4ll/Incident-Playbook
|
cf519f58fcd4255674662b3620ea97c1091c1efb
|
[
"MIT"
] | 2
|
2022-02-25T08:34:51.000Z
|
2022-03-16T17:29:44.000Z
|
import sys
import argparse
import io
from lib.privrules import *
def parse_arguments():
parser = argparse.ArgumentParser(description='Package builder for Loki')
parser.add_argument('--ruledir', help='directory containing the rules to build into Loki', required=True)
parser.add_argument('--target', help='target where to store the compiled ruleset', required=True)
return parser.parse_args()
def main():
args = parse_arguments()
rules = read_rules_from_dir(args.ruledir)
# stop if no private rules were found
if rules == None:
return
buffer = io.BytesIO()
rules.save(file=buffer)
serialized_rules = buffer.getvalue()
serialized_rules_compressed = compress(serialized_rules)
rsakey = generate_RSA_key(RSA_KEY_SIZE)
rsa_cipher = get_cipher_RSA_PKCS1_OAEP(rsakey.publickey())
aes_iv = Random.new().read(AES.block_size)
aeskey = generate_AES_key(32)
aes_cipher = get_cipher_AES(aeskey, aes_iv)
encrypted_rules = encrypt(serialized_rules_compressed, aes_cipher)
encrypted_rules = aes_iv + encrypted_rules
encrypted_aes_key = encrypt(aeskey, rsa_cipher)
encrypted_rules = encrypted_aes_key + encrypted_rules
with open(args.target, "wb") as f:
f.write(str(encrypted_rules))
n = export_RSA_key(rsakey, "%s.key" % args.target)
if decrypt_rules(args.target) == None:
print("unable to decrypt package")
sys.exit(-1)
if __name__ == "__main__":
main()
| 31.595745
| 109
| 0.713805
|
7950e5b8306e665c63e9ed17dec883e394ece72b
| 234,804
|
py
|
Python
|
venv/Lib/site-packages/matplotlib/tests/test_axes.py
|
amelliaaas/tugastkc4
|
f442382c72379e911f3780543b95345a3b1c9407
|
[
"Apache-2.0"
] | 7
|
2021-09-20T19:23:05.000Z
|
2022-01-22T13:28:01.000Z
|
venv/Lib/site-packages/matplotlib/tests/test_axes.py
|
amelliaaas/tugastkc4
|
f442382c72379e911f3780543b95345a3b1c9407
|
[
"Apache-2.0"
] | null | null | null |
venv/Lib/site-packages/matplotlib/tests/test_axes.py
|
amelliaaas/tugastkc4
|
f442382c72379e911f3780543b95345a3b1c9407
|
[
"Apache-2.0"
] | 20
|
2021-11-07T13:55:56.000Z
|
2021-12-02T10:54:01.000Z
|
from collections import namedtuple
import datetime
from decimal import Decimal
import io
from itertools import product
import platform
from types import SimpleNamespace
try:
from contextlib import nullcontext
except ImportError:
from contextlib import ExitStack as nullcontext # Py3.6.
import dateutil.tz
import numpy as np
from numpy import ma
from cycler import cycler
import pytest
import matplotlib
import matplotlib as mpl
from matplotlib.testing.decorators import (
image_comparison, check_figures_equal, remove_ticks_and_titles)
import matplotlib.colors as mcolors
import matplotlib.dates as mdates
from matplotlib.figure import Figure
import matplotlib.font_manager as mfont_manager
import matplotlib.markers as mmarkers
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import matplotlib.transforms as mtransforms
from numpy.testing import (
assert_allclose, assert_array_equal, assert_array_almost_equal)
from matplotlib import rc_context
from matplotlib.cbook import MatplotlibDeprecationWarning
# Note: Some test cases are run twice: once normally and once with labeled data
# These two must be defined in the same test function or need to have
# different baseline images to prevent race conditions when pytest runs
# the tests with multiple threads.
def test_get_labels():
fig, ax = plt.subplots()
ax.set_xlabel('x label')
ax.set_ylabel('y label')
assert ax.get_xlabel() == 'x label'
assert ax.get_ylabel() == 'y label'
@check_figures_equal()
def test_label_loc_vertical(fig_test, fig_ref):
ax = fig_test.subplots()
sc = ax.scatter([1, 2], [1, 2], c=[1, 2], label='scatter')
ax.legend()
ax.set_ylabel('Y Label', loc='top')
ax.set_xlabel('X Label', loc='right')
cbar = fig_test.colorbar(sc)
cbar.set_label("Z Label", loc='top')
ax = fig_ref.subplots()
sc = ax.scatter([1, 2], [1, 2], c=[1, 2], label='scatter')
ax.legend()
ax.set_ylabel('Y Label', y=1, ha='right')
ax.set_xlabel('X Label', x=1, ha='right')
cbar = fig_ref.colorbar(sc)
cbar.set_label("Z Label", y=1, ha='right')
@check_figures_equal()
def test_label_loc_horizontal(fig_test, fig_ref):
ax = fig_test.subplots()
sc = ax.scatter([1, 2], [1, 2], c=[1, 2], label='scatter')
ax.legend()
ax.set_ylabel('Y Label', loc='bottom')
ax.set_xlabel('X Label', loc='left')
cbar = fig_test.colorbar(sc, orientation='horizontal')
cbar.set_label("Z Label", loc='left')
ax = fig_ref.subplots()
sc = ax.scatter([1, 2], [1, 2], c=[1, 2], label='scatter')
ax.legend()
ax.set_ylabel('Y Label', y=0, ha='left')
ax.set_xlabel('X Label', x=0, ha='left')
cbar = fig_ref.colorbar(sc, orientation='horizontal')
cbar.set_label("Z Label", x=0, ha='left')
@check_figures_equal()
def test_label_loc_rc(fig_test, fig_ref):
with matplotlib.rc_context({"xaxis.labellocation": "right",
"yaxis.labellocation": "top"}):
ax = fig_test.subplots()
sc = ax.scatter([1, 2], [1, 2], c=[1, 2], label='scatter')
ax.legend()
ax.set_ylabel('Y Label')
ax.set_xlabel('X Label')
cbar = fig_test.colorbar(sc, orientation='horizontal')
cbar.set_label("Z Label")
ax = fig_ref.subplots()
sc = ax.scatter([1, 2], [1, 2], c=[1, 2], label='scatter')
ax.legend()
ax.set_ylabel('Y Label', y=1, ha='right')
ax.set_xlabel('X Label', x=1, ha='right')
cbar = fig_ref.colorbar(sc, orientation='horizontal')
cbar.set_label("Z Label", x=1, ha='right')
@check_figures_equal(extensions=["png"])
def test_acorr(fig_test, fig_ref):
np.random.seed(19680801)
Nx = 512
x = np.random.normal(0, 1, Nx).cumsum()
maxlags = Nx-1
ax_test = fig_test.subplots()
ax_test.acorr(x, maxlags=maxlags)
ax_ref = fig_ref.subplots()
# Normalized autocorrelation
norm_auto_corr = np.correlate(x, x, mode="full")/np.dot(x, x)
lags = np.arange(-maxlags, maxlags+1)
norm_auto_corr = norm_auto_corr[Nx-1-maxlags:Nx+maxlags]
ax_ref.vlines(lags, [0], norm_auto_corr)
ax_ref.axhline(y=0, xmin=0, xmax=1)
@check_figures_equal(extensions=["png"])
def test_spy(fig_test, fig_ref):
np.random.seed(19680801)
a = np.ones(32 * 32)
a[:16 * 32] = 0
np.random.shuffle(a)
a = a.reshape((32, 32))
axs_test = fig_test.subplots(2)
axs_test[0].spy(a)
axs_test[1].spy(a, marker=".", origin="lower")
axs_ref = fig_ref.subplots(2)
axs_ref[0].imshow(a, cmap="gray_r", interpolation="nearest")
axs_ref[0].xaxis.tick_top()
axs_ref[1].plot(*np.nonzero(a)[::-1], ".", markersize=10)
axs_ref[1].set(
aspect=1, xlim=axs_ref[0].get_xlim(), ylim=axs_ref[0].get_ylim()[::-1])
for ax in axs_ref:
ax.xaxis.set_ticks_position("both")
def test_spy_invalid_kwargs():
fig, ax = plt.subplots()
for unsupported_kw in [{'interpolation': 'nearest'},
{'marker': 'o', 'linestyle': 'solid'}]:
with pytest.raises(TypeError):
ax.spy(np.eye(3, 3), **unsupported_kw)
@check_figures_equal(extensions=["png"])
def test_matshow(fig_test, fig_ref):
mpl.style.use("mpl20")
a = np.random.rand(32, 32)
fig_test.add_subplot().matshow(a)
ax_ref = fig_ref.add_subplot()
ax_ref.imshow(a)
ax_ref.xaxis.tick_top()
ax_ref.xaxis.set_ticks_position('both')
@image_comparison(['formatter_ticker_001',
'formatter_ticker_002',
'formatter_ticker_003',
'formatter_ticker_004',
'formatter_ticker_005',
])
def test_formatter_ticker():
import matplotlib.testing.jpl_units as units
units.register()
# This should affect the tick size. (Tests issue #543)
matplotlib.rcParams['lines.markeredgewidth'] = 30
# This essentially test to see if user specified labels get overwritten
# by the auto labeler functionality of the axes.
xdata = [x*units.sec for x in range(10)]
ydata1 = [(1.5*y - 0.5)*units.km for y in range(10)]
ydata2 = [(1.75*y - 1.0)*units.km for y in range(10)]
ax = plt.figure().subplots()
ax.set_xlabel("x-label 001")
ax = plt.figure().subplots()
ax.set_xlabel("x-label 001")
ax.plot(xdata, ydata1, color='blue', xunits="sec")
ax = plt.figure().subplots()
ax.set_xlabel("x-label 001")
ax.plot(xdata, ydata1, color='blue', xunits="sec")
ax.set_xlabel("x-label 003")
ax = plt.figure().subplots()
ax.plot(xdata, ydata1, color='blue', xunits="sec")
ax.plot(xdata, ydata2, color='green', xunits="hour")
ax.set_xlabel("x-label 004")
# See SF bug 2846058
# https://sourceforge.net/tracker/?func=detail&aid=2846058&group_id=80706&atid=560720
ax = plt.figure().subplots()
ax.plot(xdata, ydata1, color='blue', xunits="sec")
ax.plot(xdata, ydata2, color='green', xunits="hour")
ax.set_xlabel("x-label 005")
ax.autoscale_view()
def test_funcformatter_auto_formatter():
def _formfunc(x, pos):
return ''
ax = plt.figure().subplots()
assert ax.xaxis.isDefault_majfmt
assert ax.xaxis.isDefault_minfmt
assert ax.yaxis.isDefault_majfmt
assert ax.yaxis.isDefault_minfmt
ax.xaxis.set_major_formatter(_formfunc)
assert not ax.xaxis.isDefault_majfmt
assert ax.xaxis.isDefault_minfmt
assert ax.yaxis.isDefault_majfmt
assert ax.yaxis.isDefault_minfmt
targ_funcformatter = mticker.FuncFormatter(_formfunc)
assert isinstance(ax.xaxis.get_major_formatter(),
mticker.FuncFormatter)
assert ax.xaxis.get_major_formatter().func == targ_funcformatter.func
def test_strmethodformatter_auto_formatter():
formstr = '{x}_{pos}'
ax = plt.figure().subplots()
assert ax.xaxis.isDefault_majfmt
assert ax.xaxis.isDefault_minfmt
assert ax.yaxis.isDefault_majfmt
assert ax.yaxis.isDefault_minfmt
ax.yaxis.set_minor_formatter(formstr)
assert ax.xaxis.isDefault_majfmt
assert ax.xaxis.isDefault_minfmt
assert ax.yaxis.isDefault_majfmt
assert not ax.yaxis.isDefault_minfmt
targ_strformatter = mticker.StrMethodFormatter(formstr)
assert isinstance(ax.yaxis.get_minor_formatter(),
mticker.StrMethodFormatter)
assert ax.yaxis.get_minor_formatter().fmt == targ_strformatter.fmt
@image_comparison(["twin_axis_locators_formatters"])
def test_twin_axis_locators_formatters():
vals = np.linspace(0, 1, num=5, endpoint=True)
locs = np.sin(np.pi * vals / 2.0)
majl = plt.FixedLocator(locs)
minl = plt.FixedLocator([0.1, 0.2, 0.3])
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1)
ax1.plot([0.1, 100], [0, 1])
ax1.yaxis.set_major_locator(majl)
ax1.yaxis.set_minor_locator(minl)
ax1.yaxis.set_major_formatter(plt.FormatStrFormatter('%08.2lf'))
ax1.yaxis.set_minor_formatter(plt.FixedFormatter(['tricks', 'mind',
'jedi']))
ax1.xaxis.set_major_locator(plt.LinearLocator())
ax1.xaxis.set_minor_locator(plt.FixedLocator([15, 35, 55, 75]))
ax1.xaxis.set_major_formatter(plt.FormatStrFormatter('%05.2lf'))
ax1.xaxis.set_minor_formatter(plt.FixedFormatter(['c', '3', 'p', 'o']))
ax1.twiny()
ax1.twinx()
def test_twinx_cla():
fig, ax = plt.subplots()
ax2 = ax.twinx()
ax3 = ax2.twiny()
plt.draw()
assert not ax2.xaxis.get_visible()
assert not ax2.patch.get_visible()
ax2.cla()
ax3.cla()
assert not ax2.xaxis.get_visible()
assert not ax2.patch.get_visible()
assert ax2.yaxis.get_visible()
assert ax3.xaxis.get_visible()
assert not ax3.patch.get_visible()
assert not ax3.yaxis.get_visible()
assert ax.xaxis.get_visible()
assert ax.patch.get_visible()
assert ax.yaxis.get_visible()
@pytest.mark.parametrize('twin', ('x', 'y'))
@check_figures_equal(extensions=['png'], tol=0.19)
def test_twin_logscale(fig_test, fig_ref, twin):
twin_func = f'twin{twin}' # test twinx or twiny
set_scale = f'set_{twin}scale'
x = np.arange(1, 100)
# Change scale after twinning.
ax_test = fig_test.add_subplot(2, 1, 1)
ax_twin = getattr(ax_test, twin_func)()
getattr(ax_test, set_scale)('log')
ax_twin.plot(x, x)
# Twin after changing scale.
ax_test = fig_test.add_subplot(2, 1, 2)
getattr(ax_test, set_scale)('log')
ax_twin = getattr(ax_test, twin_func)()
ax_twin.plot(x, x)
for i in [1, 2]:
ax_ref = fig_ref.add_subplot(2, 1, i)
getattr(ax_ref, set_scale)('log')
ax_ref.plot(x, x)
# This is a hack because twinned Axes double-draw the frame.
# Remove this when that is fixed.
Path = matplotlib.path.Path
fig_ref.add_artist(
matplotlib.patches.PathPatch(
Path([[0, 0], [0, 1],
[0, 1], [1, 1],
[1, 1], [1, 0],
[1, 0], [0, 0]],
[Path.MOVETO, Path.LINETO] * 4),
transform=ax_ref.transAxes,
facecolor='none',
edgecolor=mpl.rcParams['axes.edgecolor'],
linewidth=mpl.rcParams['axes.linewidth'],
capstyle='projecting'))
remove_ticks_and_titles(fig_test)
remove_ticks_and_titles(fig_ref)
@image_comparison(['twin_autoscale.png'])
def test_twinx_axis_scales():
x = np.array([0, 0.5, 1])
y = 0.5 * x
x2 = np.array([0, 1, 2])
y2 = 2 * x2
fig = plt.figure()
ax = fig.add_axes((0, 0, 1, 1), autoscalex_on=False, autoscaley_on=False)
ax.plot(x, y, color='blue', lw=10)
ax2 = plt.twinx(ax)
ax2.plot(x2, y2, 'r--', lw=5)
ax.margins(0, 0)
ax2.margins(0, 0)
def test_twin_inherit_autoscale_setting():
fig, ax = plt.subplots()
ax_x_on = ax.twinx()
ax.set_autoscalex_on(False)
ax_x_off = ax.twinx()
assert ax_x_on.get_autoscalex_on()
assert not ax_x_off.get_autoscalex_on()
ax_y_on = ax.twiny()
ax.set_autoscaley_on(False)
ax_y_off = ax.twiny()
assert ax_y_on.get_autoscaley_on()
assert not ax_y_off.get_autoscaley_on()
def test_inverted_cla():
# GitHub PR #5450. Setting autoscale should reset
# axes to be non-inverted.
# plotting an image, then 1d graph, axis is now down
fig = plt.figure(0)
ax = fig.gca()
# 1. test that a new axis is not inverted per default
assert not ax.xaxis_inverted()
assert not ax.yaxis_inverted()
img = np.random.random((100, 100))
ax.imshow(img)
# 2. test that a image axis is inverted
assert not ax.xaxis_inverted()
assert ax.yaxis_inverted()
# 3. test that clearing and plotting a line, axes are
# not inverted
ax.cla()
x = np.linspace(0, 2*np.pi, 100)
ax.plot(x, np.cos(x))
assert not ax.xaxis_inverted()
assert not ax.yaxis_inverted()
# 4. autoscaling should not bring back axes to normal
ax.cla()
ax.imshow(img)
plt.autoscale()
assert not ax.xaxis_inverted()
assert ax.yaxis_inverted()
# 5. two shared axes. Inverting the master axis should invert the shared
# axes; clearing the master axis should bring axes in shared
# axes back to normal.
ax0 = plt.subplot(211)
ax1 = plt.subplot(212, sharey=ax0)
ax0.yaxis.set_inverted(True)
assert ax1.yaxis_inverted()
ax1.plot(x, np.cos(x))
ax0.cla()
assert not ax1.yaxis_inverted()
ax1.cla()
# 6. clearing the nonmaster should not touch limits
ax0.imshow(img)
ax1.plot(x, np.cos(x))
ax1.cla()
assert ax.yaxis_inverted()
# clean up
plt.close(fig)
@check_figures_equal(extensions=["png"])
def test_minorticks_on_rcParams_both(fig_test, fig_ref):
with matplotlib.rc_context({"xtick.minor.visible": True,
"ytick.minor.visible": True}):
ax_test = fig_test.subplots()
ax_test.plot([0, 1], [0, 1])
ax_ref = fig_ref.subplots()
ax_ref.plot([0, 1], [0, 1])
ax_ref.minorticks_on()
@image_comparison(["autoscale_tiny_range"], remove_text=True)
def test_autoscale_tiny_range():
# github pull #904
fig, axs = plt.subplots(2, 2)
for i, ax in enumerate(axs.flat):
y1 = 10**(-11 - i)
ax.plot([0, 1], [1, 1 + y1])
@pytest.mark.style('default')
def test_autoscale_tight():
fig, ax = plt.subplots(1, 1)
ax.plot([1, 2, 3, 4])
ax.autoscale(enable=True, axis='x', tight=False)
ax.autoscale(enable=True, axis='y', tight=True)
assert_allclose(ax.get_xlim(), (-0.15, 3.15))
assert_allclose(ax.get_ylim(), (1.0, 4.0))
@pytest.mark.style('default')
def test_autoscale_log_shared():
# related to github #7587
# array starts at zero to trigger _minpos handling
x = np.arange(100, dtype=float)
fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
ax1.loglog(x, x)
ax2.semilogx(x, x)
ax1.autoscale(tight=True)
ax2.autoscale(tight=True)
plt.draw()
lims = (x[1], x[-1])
assert_allclose(ax1.get_xlim(), lims)
assert_allclose(ax1.get_ylim(), lims)
assert_allclose(ax2.get_xlim(), lims)
assert_allclose(ax2.get_ylim(), (x[0], x[-1]))
@pytest.mark.style('default')
def test_use_sticky_edges():
fig, ax = plt.subplots()
ax.imshow([[0, 1], [2, 3]], origin='lower')
assert_allclose(ax.get_xlim(), (-0.5, 1.5))
assert_allclose(ax.get_ylim(), (-0.5, 1.5))
ax.use_sticky_edges = False
ax.autoscale()
xlim = (-0.5 - 2 * ax._xmargin, 1.5 + 2 * ax._xmargin)
ylim = (-0.5 - 2 * ax._ymargin, 1.5 + 2 * ax._ymargin)
assert_allclose(ax.get_xlim(), xlim)
assert_allclose(ax.get_ylim(), ylim)
# Make sure it is reversible:
ax.use_sticky_edges = True
ax.autoscale()
assert_allclose(ax.get_xlim(), (-0.5, 1.5))
assert_allclose(ax.get_ylim(), (-0.5, 1.5))
@check_figures_equal(extensions=["png"])
def test_sticky_shared_axes(fig_test, fig_ref):
# Check that sticky edges work whether they are set in an axes that is a
# "master" in a share, or an axes that is a "follower".
Z = np.arange(15).reshape(3, 5)
ax0 = fig_test.add_subplot(211)
ax1 = fig_test.add_subplot(212, sharex=ax0)
ax1.pcolormesh(Z)
ax0 = fig_ref.add_subplot(212)
ax1 = fig_ref.add_subplot(211, sharex=ax0)
ax0.pcolormesh(Z)
@image_comparison(['offset_points'], remove_text=True)
def test_basic_annotate():
# Setup some data
t = np.arange(0.0, 5.0, 0.01)
s = np.cos(2.0*np.pi * t)
# Offset Points
fig = plt.figure()
ax = fig.add_subplot(autoscale_on=False, xlim=(-1, 5), ylim=(-3, 5))
line, = ax.plot(t, s, lw=3, color='purple')
ax.annotate('local max', xy=(3, 1), xycoords='data',
xytext=(3, 3), textcoords='offset points')
def test_annotate_parameter_warn():
fig, ax = plt.subplots()
with pytest.warns(MatplotlibDeprecationWarning,
match=r"The \'s\' parameter of annotate\(\) "
"has been renamed \'text\'"):
ax.annotate(s='now named text', xy=(0, 1))
@image_comparison(['arrow_simple.png'], remove_text=True)
def test_arrow_simple():
# Simple image test for ax.arrow
# kwargs that take discrete values
length_includes_head = (True, False)
shape = ('full', 'left', 'right')
head_starts_at_zero = (True, False)
# Create outer product of values
kwargs = product(length_includes_head, shape, head_starts_at_zero)
fig, axs = plt.subplots(3, 4)
for i, (ax, kwarg) in enumerate(zip(axs.flat, kwargs)):
ax.set_xlim(-2, 2)
ax.set_ylim(-2, 2)
# Unpack kwargs
(length_includes_head, shape, head_starts_at_zero) = kwarg
theta = 2 * np.pi * i / 12
# Draw arrow
ax.arrow(0, 0, np.sin(theta), np.cos(theta),
width=theta/100,
length_includes_head=length_includes_head,
shape=shape,
head_starts_at_zero=head_starts_at_zero,
head_width=theta / 10,
head_length=theta / 10)
def test_arrow_empty():
_, ax = plt.subplots()
# Create an empty FancyArrow
ax.arrow(0, 0, 0, 0, head_length=0)
def test_arrow_in_view():
_, ax = plt.subplots()
ax.arrow(1, 1, 1, 1)
assert ax.get_xlim() == (0.8, 2.2)
assert ax.get_ylim() == (0.8, 2.2)
def test_annotate_default_arrow():
# Check that we can make an annotation arrow with only default properties.
fig, ax = plt.subplots()
ann = ax.annotate("foo", (0, 1), xytext=(2, 3))
assert ann.arrow_patch is None
ann = ax.annotate("foo", (0, 1), xytext=(2, 3), arrowprops={})
assert ann.arrow_patch is not None
@image_comparison(['fill_units.png'], savefig_kwarg={'dpi': 60})
def test_fill_units():
import matplotlib.testing.jpl_units as units
units.register()
# generate some data
t = units.Epoch("ET", dt=datetime.datetime(2009, 4, 27))
value = 10.0 * units.deg
day = units.Duration("ET", 24.0 * 60.0 * 60.0)
dt = np.arange('2009-04-27', '2009-04-29', dtype='datetime64[D]')
dtn = mdates.date2num(dt)
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)
ax1.plot([t], [value], yunits='deg', color='red')
ind = [0, 0, 1, 1]
ax1.fill(dtn[ind], [0.0, 0.0, 90.0, 0.0], 'b')
ax2.plot([t], [value], yunits='deg', color='red')
ax2.fill([t, t, t + day, t + day],
[0.0, 0.0, 90.0, 0.0], 'b')
ax3.plot([t], [value], yunits='deg', color='red')
ax3.fill(dtn[ind],
[0 * units.deg, 0 * units.deg, 90 * units.deg, 0 * units.deg],
'b')
ax4.plot([t], [value], yunits='deg', color='red')
ax4.fill([t, t, t + day, t + day],
[0 * units.deg, 0 * units.deg, 90 * units.deg, 0 * units.deg],
facecolor="blue")
fig.autofmt_xdate()
def test_plot_format_kwarg_redundant():
with pytest.warns(UserWarning, match="marker .* redundantly defined"):
plt.plot([0], [0], 'o', marker='x')
with pytest.warns(UserWarning, match="linestyle .* redundantly defined"):
plt.plot([0], [0], '-', linestyle='--')
with pytest.warns(UserWarning, match="color .* redundantly defined"):
plt.plot([0], [0], 'r', color='blue')
# smoke-test: should not warn
plt.errorbar([0], [0], fmt='none', color='blue')
@image_comparison(['single_point', 'single_point'])
def test_single_point():
# Issue #1796: don't let lines.marker affect the grid
matplotlib.rcParams['lines.marker'] = 'o'
matplotlib.rcParams['axes.grid'] = True
fig, (ax1, ax2) = plt.subplots(2)
ax1.plot([0], [0], 'o')
ax2.plot([1], [1], 'o')
# Reuse testcase from above for a labeled data test
data = {'a': [0], 'b': [1]}
fig, (ax1, ax2) = plt.subplots(2)
ax1.plot('a', 'a', 'o', data=data)
ax2.plot('b', 'b', 'o', data=data)
@image_comparison(['single_date.png'], style='mpl20')
def test_single_date():
# use former defaults to match existing baseline image
plt.rcParams['axes.formatter.limits'] = -7, 7
dt = mdates.date2num(np.datetime64('0000-12-31'))
time1 = [721964.0]
data1 = [-65.54]
fig, ax = plt.subplots(2, 1)
ax[0].plot_date(time1 + dt, data1, 'o', color='r')
ax[1].plot(time1, data1, 'o', color='r')
@check_figures_equal(extensions=["png"])
def test_shaped_data(fig_test, fig_ref):
row = np.arange(10).reshape((1, -1))
col = np.arange(0, 100, 10).reshape((-1, 1))
axs = fig_test.subplots(2)
axs[0].plot(row) # Actually plots nothing (columns are single points).
axs[1].plot(col) # Same as plotting 1d.
axs = fig_ref.subplots(2)
# xlim from the implicit "x=0", ylim from the row datalim.
axs[0].set(xlim=(-.06, .06), ylim=(0, 9))
axs[1].plot(col.ravel())
def test_structured_data():
# support for structured data
pts = np.array([(1, 1), (2, 2)], dtype=[("ones", float), ("twos", float)])
# this should not read second name as a format and raise ValueError
axs = plt.figure().subplots(2)
axs[0].plot("ones", "twos", data=pts)
axs[1].plot("ones", "twos", "r", data=pts)
@image_comparison(['aitoff_proj'], extensions=["png"],
remove_text=True, style='mpl20')
def test_aitoff_proj():
"""
Test aitoff projection ref.:
https://github.com/matplotlib/matplotlib/pull/14451
"""
x = np.linspace(-np.pi, np.pi, 20)
y = np.linspace(-np.pi / 2, np.pi / 2, 20)
X, Y = np.meshgrid(x, y)
fig, ax = plt.subplots(figsize=(8, 4.2),
subplot_kw=dict(projection="aitoff"))
ax.grid()
ax.plot(X.flat, Y.flat, 'o', markersize=4)
@image_comparison(['axvspan_epoch'])
def test_axvspan_epoch():
import matplotlib.testing.jpl_units as units
units.register()
# generate some data
t0 = units.Epoch("ET", dt=datetime.datetime(2009, 1, 20))
tf = units.Epoch("ET", dt=datetime.datetime(2009, 1, 21))
dt = units.Duration("ET", units.day.convert("sec"))
ax = plt.gca()
ax.axvspan(t0, tf, facecolor="blue", alpha=0.25)
ax.set_xlim(t0 - 5.0*dt, tf + 5.0*dt)
@image_comparison(['axhspan_epoch'], tol=0.02)
def test_axhspan_epoch():
import matplotlib.testing.jpl_units as units
units.register()
# generate some data
t0 = units.Epoch("ET", dt=datetime.datetime(2009, 1, 20))
tf = units.Epoch("ET", dt=datetime.datetime(2009, 1, 21))
dt = units.Duration("ET", units.day.convert("sec"))
ax = plt.gca()
ax.axhspan(t0, tf, facecolor="blue", alpha=0.25)
ax.set_ylim(t0 - 5.0*dt, tf + 5.0*dt)
@image_comparison(['hexbin_extent.png', 'hexbin_extent.png'], remove_text=True)
def test_hexbin_extent():
# this test exposes sf bug 2856228
fig, ax = plt.subplots()
data = (np.arange(2000) / 2000).reshape((2, 1000))
x, y = data
ax.hexbin(x, y, extent=[.1, .3, .6, .7])
# Reuse testcase from above for a labeled data test
data = {"x": x, "y": y}
fig, ax = plt.subplots()
ax.hexbin("x", "y", extent=[.1, .3, .6, .7], data=data)
@image_comparison(['hexbin_empty.png'], remove_text=True)
def test_hexbin_empty():
# From #3886: creating hexbin from empty dataset raises ValueError
ax = plt.gca()
ax.hexbin([], [])
def test_hexbin_pickable():
# From #1973: Test that picking a hexbin collection works
fig, ax = plt.subplots()
data = (np.arange(200) / 200).reshape((2, 100))
x, y = data
hb = ax.hexbin(x, y, extent=[.1, .3, .6, .7], picker=-1)
mouse_event = SimpleNamespace(x=400, y=300)
assert hb.contains(mouse_event)[0]
@image_comparison(['hexbin_log.png'], style='mpl20')
def test_hexbin_log():
# Issue #1636 (and also test log scaled colorbar)
# Remove this line when this test image is regenerated.
plt.rcParams['pcolormesh.snap'] = False
np.random.seed(19680801)
n = 100000
x = np.random.standard_normal(n)
y = 2.0 + 3.0 * x + 4.0 * np.random.standard_normal(n)
y = np.power(2, y * 0.5)
fig, ax = plt.subplots()
h = ax.hexbin(x, y, yscale='log', bins='log')
plt.colorbar(h)
def test_inverted_limits():
# Test gh:1553
# Calling invert_xaxis prior to plotting should not disable autoscaling
# while still maintaining the inverted direction
fig, ax = plt.subplots()
ax.invert_xaxis()
ax.plot([-5, -3, 2, 4], [1, 2, -3, 5])
assert ax.get_xlim() == (4, -5)
assert ax.get_ylim() == (-3, 5)
plt.close()
fig, ax = plt.subplots()
ax.invert_yaxis()
ax.plot([-5, -3, 2, 4], [1, 2, -3, 5])
assert ax.get_xlim() == (-5, 4)
assert ax.get_ylim() == (5, -3)
# Test inverting nonlinear axes.
fig, ax = plt.subplots()
ax.set_yscale("log")
ax.set_ylim(10, 1)
assert ax.get_ylim() == (10, 1)
@image_comparison(['nonfinite_limits'])
def test_nonfinite_limits():
x = np.arange(0., np.e, 0.01)
# silence divide by zero warning from log(0)
with np.errstate(divide='ignore'):
y = np.log(x)
x[len(x)//2] = np.nan
fig, ax = plt.subplots()
ax.plot(x, y)
@pytest.mark.style('default')
@pytest.mark.parametrize('plot_fun',
['scatter', 'plot', 'fill_between'])
@check_figures_equal(extensions=["png"])
def test_limits_empty_data(plot_fun, fig_test, fig_ref):
# Check that plotting empty data doesn't change autoscaling of dates
x = np.arange("2010-01-01", "2011-01-01", dtype="datetime64[D]")
ax_test = fig_test.subplots()
ax_ref = fig_ref.subplots()
getattr(ax_test, plot_fun)([], [])
for ax in [ax_test, ax_ref]:
getattr(ax, plot_fun)(x, range(len(x)), color='C0')
@image_comparison(['imshow', 'imshow'], remove_text=True, style='mpl20')
def test_imshow():
# use former defaults to match existing baseline image
matplotlib.rcParams['image.interpolation'] = 'nearest'
# Create a NxN image
N = 100
(x, y) = np.indices((N, N))
x -= N//2
y -= N//2
r = np.sqrt(x**2+y**2-x*y)
# Create a contour plot at N/4 and extract both the clip path and transform
fig, ax = plt.subplots()
ax.imshow(r)
# Reuse testcase from above for a labeled data test
data = {"r": r}
fig, ax = plt.subplots()
ax.imshow("r", data=data)
@image_comparison(['imshow_clip'], style='mpl20')
def test_imshow_clip():
# As originally reported by Gellule Xg <gellule.xg@free.fr>
# use former defaults to match existing baseline image
matplotlib.rcParams['image.interpolation'] = 'nearest'
# Create a NxN image
N = 100
(x, y) = np.indices((N, N))
x -= N//2
y -= N//2
r = np.sqrt(x**2+y**2-x*y)
# Create a contour plot at N/4 and extract both the clip path and transform
fig, ax = plt.subplots()
c = ax.contour(r, [N/4])
x = c.collections[0]
clip_path = x.get_paths()[0]
clip_transform = x.get_transform()
clip_path = mtransforms.TransformedPath(clip_path, clip_transform)
# Plot the image clipped by the contour
ax.imshow(r, clip_path=clip_path)
@check_figures_equal(extensions=["png"])
def test_imshow_norm_vminvmax(fig_test, fig_ref):
"""Parameters vmin, vmax should be ignored if norm is given."""
a = [[1, 2], [3, 4]]
ax = fig_ref.subplots()
ax.imshow(a, vmin=0, vmax=5)
ax = fig_test.subplots()
with pytest.warns(MatplotlibDeprecationWarning,
match="Passing parameters norm and vmin/vmax "
"simultaneously is deprecated."):
ax.imshow(a, norm=mcolors.Normalize(-10, 10), vmin=0, vmax=5)
@image_comparison(['polycollection_joinstyle'], remove_text=True)
def test_polycollection_joinstyle():
# Bug #2890979 reported by Matthew West
fig, ax = plt.subplots()
verts = np.array([[1, 1], [1, 2], [2, 2], [2, 1]])
c = mpl.collections.PolyCollection([verts], linewidths=40)
ax.add_collection(c)
ax.set_xbound(0, 3)
ax.set_ybound(0, 3)
@pytest.mark.parametrize(
'x, y1, y2', [
(np.zeros((2, 2)), 3, 3),
(np.arange(0.0, 2, 0.02), np.zeros((2, 2)), 3),
(np.arange(0.0, 2, 0.02), 3, np.zeros((2, 2)))
], ids=[
'2d_x_input',
'2d_y1_input',
'2d_y2_input'
]
)
def test_fill_between_input(x, y1, y2):
fig, ax = plt.subplots()
with pytest.raises(ValueError):
ax.fill_between(x, y1, y2)
@pytest.mark.parametrize(
'y, x1, x2', [
(np.zeros((2, 2)), 3, 3),
(np.arange(0.0, 2, 0.02), np.zeros((2, 2)), 3),
(np.arange(0.0, 2, 0.02), 3, np.zeros((2, 2)))
], ids=[
'2d_y_input',
'2d_x1_input',
'2d_x2_input'
]
)
def test_fill_betweenx_input(y, x1, x2):
fig, ax = plt.subplots()
with pytest.raises(ValueError):
ax.fill_betweenx(y, x1, x2)
@image_comparison(['fill_between_interpolate'], remove_text=True)
def test_fill_between_interpolate():
x = np.arange(0.0, 2, 0.02)
y1 = np.sin(2*np.pi*x)
y2 = 1.2*np.sin(4*np.pi*x)
fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
ax1.plot(x, y1, x, y2, color='black')
ax1.fill_between(x, y1, y2, where=y2 >= y1, facecolor='white', hatch='/',
interpolate=True)
ax1.fill_between(x, y1, y2, where=y2 <= y1, facecolor='red',
interpolate=True)
# Test support for masked arrays.
y2 = np.ma.masked_greater(y2, 1.0)
# Test that plotting works for masked arrays with the first element masked
y2[0] = np.ma.masked
ax2.plot(x, y1, x, y2, color='black')
ax2.fill_between(x, y1, y2, where=y2 >= y1, facecolor='green',
interpolate=True)
ax2.fill_between(x, y1, y2, where=y2 <= y1, facecolor='red',
interpolate=True)
@image_comparison(['fill_between_interpolate_decreasing'],
style='mpl20', remove_text=True)
def test_fill_between_interpolate_decreasing():
p = np.array([724.3, 700, 655])
t = np.array([9.4, 7, 2.2])
prof = np.array([7.9, 6.6, 3.8])
fig, ax = plt.subplots(figsize=(9, 9))
ax.plot(t, p, 'tab:red')
ax.plot(prof, p, 'k')
ax.fill_betweenx(p, t, prof, where=prof < t,
facecolor='blue', interpolate=True, alpha=0.4)
ax.fill_betweenx(p, t, prof, where=prof > t,
facecolor='red', interpolate=True, alpha=0.4)
ax.set_xlim(0, 30)
ax.set_ylim(800, 600)
# test_symlog and test_symlog2 used to have baseline images in all three
# formats, but the png and svg baselines got invalidated by the removal of
# minor tick overstriking.
@image_comparison(['symlog.pdf'])
def test_symlog():
x = np.array([0, 1, 2, 4, 6, 9, 12, 24])
y = np.array([1000000, 500000, 100000, 100, 5, 0, 0, 0])
fig, ax = plt.subplots()
ax.plot(x, y)
ax.set_yscale('symlog')
ax.set_xscale('linear')
ax.set_ylim(-1, 10000000)
@image_comparison(['symlog2.pdf'], remove_text=True)
def test_symlog2():
# Numbers from -50 to 50, with 0.1 as step
x = np.arange(-50, 50, 0.001)
fig, axs = plt.subplots(5, 1)
for ax, linthresh in zip(axs, [20., 2., 1., 0.1, 0.01]):
ax.plot(x, x)
ax.set_xscale('symlog', linthresh=linthresh)
ax.grid(True)
axs[-1].set_ylim(-0.1, 0.1)
def test_pcolorargs_5205():
# Smoketest to catch issue found in gh:5205
x = [-1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 1.5]
y = [-1.5, -1.25, -1.0, -0.75, -0.5, -0.25, 0,
0.25, 0.5, 0.75, 1.0, 1.25, 1.5]
X, Y = np.meshgrid(x, y)
Z = np.hypot(X, Y)
plt.pcolor(Z)
plt.pcolor(list(Z))
plt.pcolor(x, y, Z[:-1, :-1])
plt.pcolor(X, Y, list(Z[:-1, :-1]))
@image_comparison(['pcolormesh'], remove_text=True)
def test_pcolormesh():
# Remove this line when this test image is regenerated.
plt.rcParams['pcolormesh.snap'] = False
n = 12
x = np.linspace(-1.5, 1.5, n)
y = np.linspace(-1.5, 1.5, n*2)
X, Y = np.meshgrid(x, y)
Qx = np.cos(Y) - np.cos(X)
Qz = np.sin(Y) + np.sin(X)
Qx = (Qx + 1.1)
Z = np.hypot(X, Y) / 5
Z = (Z - Z.min()) / Z.ptp()
# The color array can include masked values:
Zm = ma.masked_where(np.abs(Qz) < 0.5 * np.max(Qz), Z)
fig, (ax1, ax2, ax3) = plt.subplots(1, 3)
ax1.pcolormesh(Qx, Qz, Z[:-1, :-1], lw=0.5, edgecolors='k')
ax2.pcolormesh(Qx, Qz, Z[:-1, :-1], lw=2, edgecolors=['b', 'w'])
ax3.pcolormesh(Qx, Qz, Z, shading="gouraud")
@image_comparison(['pcolormesh_alpha'], extensions=["png", "pdf"],
remove_text=True)
def test_pcolormesh_alpha():
# Remove this line when this test image is regenerated.
plt.rcParams['pcolormesh.snap'] = False
n = 12
X, Y = np.meshgrid(
np.linspace(-1.5, 1.5, n),
np.linspace(-1.5, 1.5, n*2)
)
Qx = X
Qy = Y + np.sin(X)
Z = np.hypot(X, Y) / 5
Z = (Z - Z.min()) / Z.ptp()
vir = plt.get_cmap("viridis", 16)
# make another colormap with varying alpha
colors = vir(np.arange(16))
colors[:, 3] = 0.5 + 0.5*np.sin(np.arange(16))
cmap = mcolors.ListedColormap(colors)
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)
for ax in ax1, ax2, ax3, ax4:
ax.add_patch(mpatches.Rectangle(
(0, -1.5), 1.5, 3, facecolor=[.7, .1, .1, .5], zorder=0
))
# ax1, ax2: constant alpha
ax1.pcolormesh(Qx, Qy, Z[:-1, :-1], cmap=vir, alpha=0.4,
shading='flat', zorder=1)
ax2.pcolormesh(Qx, Qy, Z, cmap=vir, alpha=0.4, shading='gouraud', zorder=1)
# ax3, ax4: alpha from colormap
ax3.pcolormesh(Qx, Qy, Z[:-1, :-1], cmap=cmap, shading='flat', zorder=1)
ax4.pcolormesh(Qx, Qy, Z, cmap=cmap, shading='gouraud', zorder=1)
@image_comparison(['pcolormesh_datetime_axis.png'],
remove_text=False, style='mpl20')
def test_pcolormesh_datetime_axis():
# Remove this line when this test image is regenerated.
plt.rcParams['pcolormesh.snap'] = False
fig = plt.figure()
fig.subplots_adjust(hspace=0.4, top=0.98, bottom=.15)
base = datetime.datetime(2013, 1, 1)
x = np.array([base + datetime.timedelta(days=d) for d in range(21)])
y = np.arange(21)
z1, z2 = np.meshgrid(np.arange(20), np.arange(20))
z = z1 * z2
plt.subplot(221)
plt.pcolormesh(x[:-1], y[:-1], z[:-1, :-1])
plt.subplot(222)
plt.pcolormesh(x, y, z)
x = np.repeat(x[np.newaxis], 21, axis=0)
y = np.repeat(y[:, np.newaxis], 21, axis=1)
plt.subplot(223)
plt.pcolormesh(x[:-1, :-1], y[:-1, :-1], z[:-1, :-1])
plt.subplot(224)
plt.pcolormesh(x, y, z)
for ax in fig.get_axes():
for label in ax.get_xticklabels():
label.set_ha('right')
label.set_rotation(30)
@image_comparison(['pcolor_datetime_axis.png'],
remove_text=False, style='mpl20')
def test_pcolor_datetime_axis():
fig = plt.figure()
fig.subplots_adjust(hspace=0.4, top=0.98, bottom=.15)
base = datetime.datetime(2013, 1, 1)
x = np.array([base + datetime.timedelta(days=d) for d in range(21)])
y = np.arange(21)
z1, z2 = np.meshgrid(np.arange(20), np.arange(20))
z = z1 * z2
plt.subplot(221)
plt.pcolor(x[:-1], y[:-1], z[:-1, :-1])
plt.subplot(222)
plt.pcolor(x, y, z)
x = np.repeat(x[np.newaxis], 21, axis=0)
y = np.repeat(y[:, np.newaxis], 21, axis=1)
plt.subplot(223)
plt.pcolor(x[:-1, :-1], y[:-1, :-1], z[:-1, :-1])
plt.subplot(224)
plt.pcolor(x, y, z)
for ax in fig.get_axes():
for label in ax.get_xticklabels():
label.set_ha('right')
label.set_rotation(30)
def test_pcolorargs():
n = 12
x = np.linspace(-1.5, 1.5, n)
y = np.linspace(-1.5, 1.5, n*2)
X, Y = np.meshgrid(x, y)
Z = np.hypot(X, Y) / 5
_, ax = plt.subplots()
with pytest.raises(TypeError):
ax.pcolormesh(y, x, Z)
with pytest.raises(TypeError):
ax.pcolormesh(X, Y, Z.T)
with pytest.raises(TypeError):
ax.pcolormesh(x, y, Z[:-1, :-1], shading="gouraud")
with pytest.raises(TypeError):
ax.pcolormesh(X, Y, Z[:-1, :-1], shading="gouraud")
x[0] = np.NaN
with pytest.raises(ValueError):
ax.pcolormesh(x, y, Z[:-1, :-1])
with np.errstate(invalid='ignore'):
x = np.ma.array(x, mask=(x < 0))
with pytest.raises(ValueError):
ax.pcolormesh(x, y, Z[:-1, :-1])
# Expect a warning with non-increasing coordinates
x = [359, 0, 1]
y = [-10, 10]
X, Y = np.meshgrid(x, y)
Z = np.zeros(X.shape)
with pytest.warns(UserWarning,
match='are not monotonically increasing or decreasing'):
ax.pcolormesh(X, Y, Z, shading='auto')
@check_figures_equal(extensions=["png"])
def test_pcolornearest(fig_test, fig_ref):
ax = fig_test.subplots()
x = np.arange(0, 10)
y = np.arange(0, 3)
np.random.seed(19680801)
Z = np.random.randn(2, 9)
ax.pcolormesh(x, y, Z, shading='flat')
ax = fig_ref.subplots()
# specify the centers
x2 = x[:-1] + np.diff(x) / 2
y2 = y[:-1] + np.diff(y) / 2
ax.pcolormesh(x2, y2, Z, shading='nearest')
@check_figures_equal(extensions=["png"])
def test_pcolornearestunits(fig_test, fig_ref):
ax = fig_test.subplots()
x = [datetime.datetime.fromtimestamp(x * 3600) for x in range(10)]
y = np.arange(0, 3)
np.random.seed(19680801)
Z = np.random.randn(2, 9)
ax.pcolormesh(x, y, Z, shading='flat')
ax = fig_ref.subplots()
# specify the centers
x2 = [datetime.datetime.fromtimestamp((x + 0.5) * 3600) for x in range(9)]
y2 = y[:-1] + np.diff(y) / 2
ax.pcolormesh(x2, y2, Z, shading='nearest')
@check_figures_equal(extensions=["png"])
def test_pcolordropdata(fig_test, fig_ref):
ax = fig_test.subplots()
x = np.arange(0, 10)
y = np.arange(0, 4)
np.random.seed(19680801)
Z = np.random.randn(3, 9)
# fake dropping the data
ax.pcolormesh(x[:-1], y[:-1], Z[:-1, :-1], shading='flat')
ax = fig_ref.subplots()
# test dropping the data...
x2 = x[:-1]
y2 = y[:-1]
with pytest.warns(MatplotlibDeprecationWarning):
ax.pcolormesh(x2, y2, Z, shading='flat')
@check_figures_equal(extensions=["png"])
def test_pcolorauto(fig_test, fig_ref):
ax = fig_test.subplots()
x = np.arange(0, 10)
y = np.arange(0, 4)
np.random.seed(19680801)
Z = np.random.randn(3, 9)
ax.pcolormesh(x, y, Z, shading='auto')
ax = fig_ref.subplots()
# specify the centers
x2 = x[:-1] + np.diff(x) / 2
y2 = y[:-1] + np.diff(y) / 2
ax.pcolormesh(x2, y2, Z, shading='auto')
@image_comparison(['canonical'])
def test_canonical():
fig, ax = plt.subplots()
ax.plot([1, 2, 3])
@image_comparison(['arc_angles.png'], remove_text=True, style='default')
def test_arc_angles():
# Ellipse parameters
w = 2
h = 1
centre = (0.2, 0.5)
scale = 2
fig, axs = plt.subplots(3, 3)
for i, ax in enumerate(axs.flat):
theta2 = i * 360 / 9
theta1 = theta2 - 45
ax.add_patch(mpatches.Ellipse(centre, w, h, alpha=0.3))
ax.add_patch(mpatches.Arc(centre, w, h, theta1=theta1, theta2=theta2))
# Straight lines intersecting start and end of arc
ax.plot([scale * np.cos(np.deg2rad(theta1)) + centre[0],
centre[0],
scale * np.cos(np.deg2rad(theta2)) + centre[0]],
[scale * np.sin(np.deg2rad(theta1)) + centre[1],
centre[1],
scale * np.sin(np.deg2rad(theta2)) + centre[1]])
ax.set_xlim(-scale, scale)
ax.set_ylim(-scale, scale)
# This looks the same, but it triggers a different code path when it
# gets large enough.
w *= 10
h *= 10
centre = (centre[0] * 10, centre[1] * 10)
scale *= 10
@image_comparison(['arc_ellipse'], remove_text=True)
def test_arc_ellipse():
xcenter, ycenter = 0.38, 0.52
width, height = 1e-1, 3e-1
angle = -30
theta = np.deg2rad(np.arange(360))
x = width / 2. * np.cos(theta)
y = height / 2. * np.sin(theta)
rtheta = np.deg2rad(angle)
R = np.array([
[np.cos(rtheta), -np.sin(rtheta)],
[np.sin(rtheta), np.cos(rtheta)]])
x, y = np.dot(R, np.array([x, y]))
x += xcenter
y += ycenter
fig = plt.figure()
ax = fig.add_subplot(211, aspect='auto')
ax.fill(x, y, alpha=0.2, facecolor='yellow', edgecolor='yellow',
linewidth=1, zorder=1)
e1 = mpatches.Arc((xcenter, ycenter), width, height,
angle=angle, linewidth=2, fill=False, zorder=2)
ax.add_patch(e1)
ax = fig.add_subplot(212, aspect='equal')
ax.fill(x, y, alpha=0.2, facecolor='green', edgecolor='green', zorder=1)
e2 = mpatches.Arc((xcenter, ycenter), width, height,
angle=angle, linewidth=2, fill=False, zorder=2)
ax.add_patch(e2)
def test_marker_as_markerstyle():
fix, ax = plt.subplots()
m = mmarkers.MarkerStyle('o')
ax.plot([1, 2, 3], [3, 2, 1], marker=m)
ax.scatter([1, 2, 3], [4, 3, 2], marker=m)
ax.errorbar([1, 2, 3], [5, 4, 3], marker=m)
@image_comparison(['markevery'], remove_text=True)
def test_markevery():
x = np.linspace(0, 10, 100)
y = np.sin(x) * np.sqrt(x/10 + 0.5)
# check marker only plot
fig, ax = plt.subplots()
ax.plot(x, y, 'o', label='default')
ax.plot(x, y, 'd', markevery=None, label='mark all')
ax.plot(x, y, 's', markevery=10, label='mark every 10')
ax.plot(x, y, '+', markevery=(5, 20), label='mark every 5 starting at 10')
ax.legend()
@image_comparison(['markevery_line'], remove_text=True)
def test_markevery_line():
x = np.linspace(0, 10, 100)
y = np.sin(x) * np.sqrt(x/10 + 0.5)
# check line/marker combos
fig, ax = plt.subplots()
ax.plot(x, y, '-o', label='default')
ax.plot(x, y, '-d', markevery=None, label='mark all')
ax.plot(x, y, '-s', markevery=10, label='mark every 10')
ax.plot(x, y, '-+', markevery=(5, 20), label='mark every 5 starting at 10')
ax.legend()
@image_comparison(['markevery_linear_scales'], remove_text=True, tol=0.001)
def test_markevery_linear_scales():
cases = [None,
8,
(30, 8),
[16, 24, 30], [0, -1],
slice(100, 200, 3),
0.1, 0.3, 1.5,
(0.0, 0.1), (0.45, 0.1)]
cols = 3
gs = matplotlib.gridspec.GridSpec(len(cases) // cols + 1, cols)
delta = 0.11
x = np.linspace(0, 10 - 2 * delta, 200) + delta
y = np.sin(x) + 1.0 + delta
for i, case in enumerate(cases):
row = (i // cols)
col = i % cols
plt.subplot(gs[row, col])
plt.title('markevery=%s' % str(case))
plt.plot(x, y, 'o', ls='-', ms=4, markevery=case)
@image_comparison(['markevery_linear_scales_zoomed'], remove_text=True)
def test_markevery_linear_scales_zoomed():
cases = [None,
8,
(30, 8),
[16, 24, 30], [0, -1],
slice(100, 200, 3),
0.1, 0.3, 1.5,
(0.0, 0.1), (0.45, 0.1)]
cols = 3
gs = matplotlib.gridspec.GridSpec(len(cases) // cols + 1, cols)
delta = 0.11
x = np.linspace(0, 10 - 2 * delta, 200) + delta
y = np.sin(x) + 1.0 + delta
for i, case in enumerate(cases):
row = (i // cols)
col = i % cols
plt.subplot(gs[row, col])
plt.title('markevery=%s' % str(case))
plt.plot(x, y, 'o', ls='-', ms=4, markevery=case)
plt.xlim((6, 6.7))
plt.ylim((1.1, 1.7))
@image_comparison(['markevery_log_scales'], remove_text=True)
def test_markevery_log_scales():
cases = [None,
8,
(30, 8),
[16, 24, 30], [0, -1],
slice(100, 200, 3),
0.1, 0.3, 1.5,
(0.0, 0.1), (0.45, 0.1)]
cols = 3
gs = matplotlib.gridspec.GridSpec(len(cases) // cols + 1, cols)
delta = 0.11
x = np.linspace(0, 10 - 2 * delta, 200) + delta
y = np.sin(x) + 1.0 + delta
for i, case in enumerate(cases):
row = (i // cols)
col = i % cols
plt.subplot(gs[row, col])
plt.title('markevery=%s' % str(case))
plt.xscale('log')
plt.yscale('log')
plt.plot(x, y, 'o', ls='-', ms=4, markevery=case)
@image_comparison(['markevery_polar'], style='default', remove_text=True)
def test_markevery_polar():
cases = [None,
8,
(30, 8),
[16, 24, 30], [0, -1],
slice(100, 200, 3),
0.1, 0.3, 1.5,
(0.0, 0.1), (0.45, 0.1)]
cols = 3
gs = matplotlib.gridspec.GridSpec(len(cases) // cols + 1, cols)
r = np.linspace(0, 3.0, 200)
theta = 2 * np.pi * r
for i, case in enumerate(cases):
row = (i // cols)
col = i % cols
plt.subplot(gs[row, col], polar=True)
plt.title('markevery=%s' % str(case))
plt.plot(theta, r, 'o', ls='-', ms=4, markevery=case)
@image_comparison(['marker_edges'], remove_text=True)
def test_marker_edges():
x = np.linspace(0, 1, 10)
fig, ax = plt.subplots()
ax.plot(x, np.sin(x), 'y.', ms=30.0, mew=0, mec='r')
ax.plot(x+0.1, np.sin(x), 'y.', ms=30.0, mew=1, mec='r')
ax.plot(x+0.2, np.sin(x), 'y.', ms=30.0, mew=2, mec='b')
@image_comparison(['bar_tick_label_single.png', 'bar_tick_label_single.png'])
def test_bar_tick_label_single():
# From 2516: plot bar with array of string labels for x axis
ax = plt.gca()
ax.bar(0, 1, align='edge', tick_label='0')
# Reuse testcase from above for a labeled data test
data = {"a": 0, "b": 1}
fig, ax = plt.subplots()
ax = plt.gca()
ax.bar("a", "b", align='edge', tick_label='0', data=data)
def test_nan_bar_values():
fig, ax = plt.subplots()
ax.bar([0, 1], [np.nan, 4])
def test_bar_ticklabel_fail():
fig, ax = plt.subplots()
ax.bar([], [])
@image_comparison(['bar_tick_label_multiple.png'])
def test_bar_tick_label_multiple():
# From 2516: plot bar with array of string labels for x axis
ax = plt.gca()
ax.bar([1, 2.5], [1, 2], width=[0.2, 0.5], tick_label=['a', 'b'],
align='center')
@image_comparison(['bar_tick_label_multiple_old_label_alignment.png'])
def test_bar_tick_label_multiple_old_alignment():
# Test that the alignment for class is backward compatible
matplotlib.rcParams["ytick.alignment"] = "center"
ax = plt.gca()
ax.bar([1, 2.5], [1, 2], width=[0.2, 0.5], tick_label=['a', 'b'],
align='center')
@check_figures_equal(extensions=["png"])
def test_bar_decimal_center(fig_test, fig_ref):
ax = fig_test.subplots()
x0 = [1.5, 8.4, 5.3, 4.2]
y0 = [1.1, 2.2, 3.3, 4.4]
x = [Decimal(x) for x in x0]
y = [Decimal(y) for y in y0]
# Test image - vertical, align-center bar chart with Decimal() input
ax.bar(x, y, align='center')
# Reference image
ax = fig_ref.subplots()
ax.bar(x0, y0, align='center')
@check_figures_equal(extensions=["png"])
def test_barh_decimal_center(fig_test, fig_ref):
ax = fig_test.subplots()
x0 = [1.5, 8.4, 5.3, 4.2]
y0 = [1.1, 2.2, 3.3, 4.4]
x = [Decimal(x) for x in x0]
y = [Decimal(y) for y in y0]
# Test image - horizontal, align-center bar chart with Decimal() input
ax.barh(x, y, height=[0.5, 0.5, 1, 1], align='center')
# Reference image
ax = fig_ref.subplots()
ax.barh(x0, y0, height=[0.5, 0.5, 1, 1], align='center')
@check_figures_equal(extensions=["png"])
def test_bar_decimal_width(fig_test, fig_ref):
x = [1.5, 8.4, 5.3, 4.2]
y = [1.1, 2.2, 3.3, 4.4]
w0 = [0.7, 1.45, 1, 2]
w = [Decimal(i) for i in w0]
# Test image - vertical bar chart with Decimal() width
ax = fig_test.subplots()
ax.bar(x, y, width=w, align='center')
# Reference image
ax = fig_ref.subplots()
ax.bar(x, y, width=w0, align='center')
@check_figures_equal(extensions=["png"])
def test_barh_decimal_height(fig_test, fig_ref):
x = [1.5, 8.4, 5.3, 4.2]
y = [1.1, 2.2, 3.3, 4.4]
h0 = [0.7, 1.45, 1, 2]
h = [Decimal(i) for i in h0]
# Test image - horizontal bar chart with Decimal() height
ax = fig_test.subplots()
ax.barh(x, y, height=h, align='center')
# Reference image
ax = fig_ref.subplots()
ax.barh(x, y, height=h0, align='center')
def test_bar_color_none_alpha():
ax = plt.gca()
rects = ax.bar([1, 2], [2, 4], alpha=0.3, color='none', edgecolor='r')
for rect in rects:
assert rect.get_facecolor() == (0, 0, 0, 0)
assert rect.get_edgecolor() == (1, 0, 0, 0.3)
def test_bar_edgecolor_none_alpha():
ax = plt.gca()
rects = ax.bar([1, 2], [2, 4], alpha=0.3, color='r', edgecolor='none')
for rect in rects:
assert rect.get_facecolor() == (1, 0, 0, 0.3)
assert rect.get_edgecolor() == (0, 0, 0, 0)
@image_comparison(['barh_tick_label.png'])
def test_barh_tick_label():
# From 2516: plot barh with array of string labels for y axis
ax = plt.gca()
ax.barh([1, 2.5], [1, 2], height=[0.2, 0.5], tick_label=['a', 'b'],
align='center')
def test_bar_timedelta():
"""Smoketest that bar can handle width and height in delta units."""
fig, ax = plt.subplots()
ax.bar(datetime.datetime(2018, 1, 1), 1.,
width=datetime.timedelta(hours=3))
ax.bar(datetime.datetime(2018, 1, 1), 1.,
xerr=datetime.timedelta(hours=2),
width=datetime.timedelta(hours=3))
fig, ax = plt.subplots()
ax.barh(datetime.datetime(2018, 1, 1), 1,
height=datetime.timedelta(hours=3))
ax.barh(datetime.datetime(2018, 1, 1), 1,
height=datetime.timedelta(hours=3),
yerr=datetime.timedelta(hours=2))
fig, ax = plt.subplots()
ax.barh([datetime.datetime(2018, 1, 1), datetime.datetime(2018, 1, 1)],
np.array([1, 1.5]),
height=datetime.timedelta(hours=3))
ax.barh([datetime.datetime(2018, 1, 1), datetime.datetime(2018, 1, 1)],
np.array([1, 1.5]),
height=[datetime.timedelta(hours=t) for t in [1, 2]])
ax.broken_barh([(datetime.datetime(2018, 1, 1),
datetime.timedelta(hours=1))],
(10, 20))
def test_boxplot_dates_pandas(pd):
# smoke test for boxplot and dates in pandas
data = np.random.rand(5, 2)
years = pd.date_range('1/1/2000',
periods=2, freq=pd.DateOffset(years=1)).year
plt.figure()
plt.boxplot(data, positions=years)
def test_pcolor_regression(pd):
from pandas.plotting import (
register_matplotlib_converters,
deregister_matplotlib_converters,
)
fig = plt.figure()
ax = fig.add_subplot(111)
times = [datetime.datetime(2021, 1, 1)]
while len(times) < 7:
times.append(times[-1] + datetime.timedelta(seconds=120))
y_vals = np.arange(5)
time_axis, y_axis = np.meshgrid(times, y_vals)
shape = (len(y_vals) - 1, len(times) - 1)
z_data = np.arange(shape[0] * shape[1])
z_data.shape = shape
try:
register_matplotlib_converters()
im = ax.pcolormesh(time_axis, y_axis, z_data)
# make sure this does not raise!
fig.canvas.draw()
finally:
deregister_matplotlib_converters()
def test_bar_pandas(pd):
# Smoke test for pandas
df = pd.DataFrame(
{'year': [2018, 2018, 2018],
'month': [1, 1, 1],
'day': [1, 2, 3],
'value': [1, 2, 3]})
df['date'] = pd.to_datetime(df[['year', 'month', 'day']])
monthly = df[['date', 'value']].groupby(['date']).sum()
dates = monthly.index
forecast = monthly['value']
baseline = monthly['value']
fig, ax = plt.subplots()
ax.bar(dates, forecast, width=10, align='center')
ax.plot(dates, baseline, color='orange', lw=4)
def test_bar_pandas_indexed(pd):
# Smoke test for indexed pandas
df = pd.DataFrame({"x": [1., 2., 3.], "width": [.2, .4, .6]},
index=[1, 2, 3])
fig, ax = plt.subplots()
ax.bar(df.x, 1., width=df.width)
@check_figures_equal()
@pytest.mark.style('default')
def test_bar_hatches(fig_test, fig_ref):
ax_test = fig_test.subplots()
ax_ref = fig_ref.subplots()
x = [1, 2]
y = [2, 3]
hatches = ['x', 'o']
for i in range(2):
ax_ref.bar(x[i], y[i], color='C0', hatch=hatches[i])
ax_test.bar(x, y, hatch=hatches)
def test_pandas_minimal_plot(pd):
# smoke test that series and index objcets do not warn
x = pd.Series([1, 2], dtype="float64")
plt.plot(x, x)
plt.plot(x.index, x)
plt.plot(x)
plt.plot(x.index)
@image_comparison(['hist_log'], remove_text=True)
def test_hist_log():
data0 = np.linspace(0, 1, 200)**3
data = np.concatenate([1 - data0, 1 + data0])
fig, ax = plt.subplots()
ax.hist(data, fill=False, log=True)
@check_figures_equal(extensions=["png"])
def test_hist_log_2(fig_test, fig_ref):
axs_test = fig_test.subplots(2, 3)
axs_ref = fig_ref.subplots(2, 3)
for i, histtype in enumerate(["bar", "step", "stepfilled"]):
# Set log scale, then call hist().
axs_test[0, i].set_yscale("log")
axs_test[0, i].hist(1, 1, histtype=histtype)
# Call hist(), then set log scale.
axs_test[1, i].hist(1, 1, histtype=histtype)
axs_test[1, i].set_yscale("log")
# Use hist(..., log=True).
for ax in axs_ref[:, i]:
ax.hist(1, 1, log=True, histtype=histtype)
def test_hist_log_barstacked():
fig, axs = plt.subplots(2)
axs[0].hist([[0], [0, 1]], 2, histtype="barstacked")
axs[0].set_yscale("log")
axs[1].hist([0, 0, 1], 2, histtype="barstacked")
axs[1].set_yscale("log")
fig.canvas.draw()
assert axs[0].get_ylim() == axs[1].get_ylim()
@image_comparison(['hist_bar_empty.png'], remove_text=True)
def test_hist_bar_empty():
# From #3886: creating hist from empty dataset raises ValueError
ax = plt.gca()
ax.hist([], histtype='bar')
@image_comparison(['hist_step_empty.png'], remove_text=True)
def test_hist_step_empty():
# From #3886: creating hist from empty dataset raises ValueError
ax = plt.gca()
ax.hist([], histtype='step')
@image_comparison(['hist_step_filled.png'], remove_text=True)
def test_hist_step_filled():
np.random.seed(0)
x = np.random.randn(1000, 3)
n_bins = 10
kwargs = [{'fill': True}, {'fill': False}, {'fill': None}, {}]*2
types = ['step']*4+['stepfilled']*4
fig, axs = plt.subplots(nrows=2, ncols=4)
for kg, _type, ax in zip(kwargs, types, axs.flat):
ax.hist(x, n_bins, histtype=_type, stacked=True, **kg)
ax.set_title('%s/%s' % (kg, _type))
ax.set_ylim(bottom=-50)
patches = axs[0, 0].patches
assert all(p.get_facecolor() == p.get_edgecolor() for p in patches)
@image_comparison(['hist_density.png'])
def test_hist_density():
np.random.seed(19680801)
data = np.random.standard_normal(2000)
fig, ax = plt.subplots()
ax.hist(data, density=True)
def test_hist_unequal_bins_density():
# Test correct behavior of normalized histogram with unequal bins
# https://github.com/matplotlib/matplotlib/issues/9557
rng = np.random.RandomState(57483)
t = rng.randn(100)
bins = [-3, -1, -0.5, 0, 1, 5]
mpl_heights, _, _ = plt.hist(t, bins=bins, density=True)
np_heights, _ = np.histogram(t, bins=bins, density=True)
assert_allclose(mpl_heights, np_heights)
def test_hist_datetime_datasets():
data = [[datetime.datetime(2017, 1, 1), datetime.datetime(2017, 1, 1)],
[datetime.datetime(2017, 1, 1), datetime.datetime(2017, 1, 2)]]
fig, ax = plt.subplots()
ax.hist(data, stacked=True)
ax.hist(data, stacked=False)
@pytest.mark.parametrize("bins_preprocess",
[mpl.dates.date2num,
lambda bins: bins,
lambda bins: np.asarray(bins).astype('datetime64')],
ids=['date2num', 'datetime.datetime',
'np.datetime64'])
def test_hist_datetime_datasets_bins(bins_preprocess):
data = [[datetime.datetime(2019, 1, 5), datetime.datetime(2019, 1, 11),
datetime.datetime(2019, 2, 1), datetime.datetime(2019, 3, 1)],
[datetime.datetime(2019, 1, 11), datetime.datetime(2019, 2, 5),
datetime.datetime(2019, 2, 18), datetime.datetime(2019, 3, 1)]]
date_edges = [datetime.datetime(2019, 1, 1), datetime.datetime(2019, 2, 1),
datetime.datetime(2019, 3, 1)]
fig, ax = plt.subplots()
_, bins, _ = ax.hist(data, bins=bins_preprocess(date_edges), stacked=True)
np.testing.assert_allclose(bins, mpl.dates.date2num(date_edges))
_, bins, _ = ax.hist(data, bins=bins_preprocess(date_edges), stacked=False)
np.testing.assert_allclose(bins, mpl.dates.date2num(date_edges))
@pytest.mark.parametrize('data, expected_number_of_hists',
[([], 1),
([[]], 1),
([[], []], 2)])
def test_hist_with_empty_input(data, expected_number_of_hists):
hists, _, _ = plt.hist(data)
hists = np.asarray(hists)
if hists.ndim == 1:
assert 1 == expected_number_of_hists
else:
assert hists.shape[0] == expected_number_of_hists
@pytest.mark.parametrize("histtype, zorder",
[("bar", mpl.patches.Patch.zorder),
("step", mpl.lines.Line2D.zorder),
("stepfilled", mpl.patches.Patch.zorder)])
def test_hist_zorder(histtype, zorder):
ax = plt.figure().add_subplot()
ax.hist([1, 2], histtype=histtype)
assert ax.patches
for patch in ax.patches:
assert patch.get_zorder() == zorder
@check_figures_equal(extensions=['png'])
def test_stairs(fig_test, fig_ref):
import matplotlib.lines as mlines
y = np.array([6, 14, 32, 37, 48, 32, 21, 4]) # hist
x = np.array([1., 2., 3., 4., 5., 6., 7., 8., 9.]) # bins
test_axes = fig_test.subplots(3, 2).flatten()
test_axes[0].stairs(y, x, baseline=None)
test_axes[1].stairs(y, x, baseline=None, orientation='horizontal')
test_axes[2].stairs(y, x)
test_axes[3].stairs(y, x, orientation='horizontal')
test_axes[4].stairs(y, x)
test_axes[4].semilogy()
test_axes[5].stairs(y, x, orientation='horizontal')
test_axes[5].semilogx()
# defaults of `PathPatch` to be used for all following Line2D
style = {'solid_joinstyle': 'miter', 'solid_capstyle': 'butt'}
ref_axes = fig_ref.subplots(3, 2).flatten()
ref_axes[0].plot(x, np.append(y, y[-1]), drawstyle='steps-post', **style)
ref_axes[1].plot(np.append(y[0], y), x, drawstyle='steps-post', **style)
ref_axes[2].plot(x, np.append(y, y[-1]), drawstyle='steps-post', **style)
ref_axes[2].add_line(mlines.Line2D([x[0], x[0]], [0, y[0]], **style))
ref_axes[2].add_line(mlines.Line2D([x[-1], x[-1]], [0, y[-1]], **style))
ref_axes[2].set_ylim(0, None)
ref_axes[3].plot(np.append(y[0], y), x, drawstyle='steps-post', **style)
ref_axes[3].add_line(mlines.Line2D([0, y[0]], [x[0], x[0]], **style))
ref_axes[3].add_line(mlines.Line2D([0, y[-1]], [x[-1], x[-1]], **style))
ref_axes[3].set_xlim(0, None)
ref_axes[4].plot(x, np.append(y, y[-1]), drawstyle='steps-post', **style)
ref_axes[4].add_line(mlines.Line2D([x[0], x[0]], [0, y[0]], **style))
ref_axes[4].add_line(mlines.Line2D([x[-1], x[-1]], [0, y[-1]], **style))
ref_axes[4].semilogy()
ref_axes[5].plot(np.append(y[0], y), x, drawstyle='steps-post', **style)
ref_axes[5].add_line(mlines.Line2D([0, y[0]], [x[0], x[0]], **style))
ref_axes[5].add_line(mlines.Line2D([0, y[-1]], [x[-1], x[-1]], **style))
ref_axes[5].semilogx()
@check_figures_equal(extensions=['png'])
def test_stairs_fill(fig_test, fig_ref):
h, bins = [1, 2, 3, 4, 2], [0, 1, 2, 3, 4, 5]
bs = -2
# Test
test_axes = fig_test.subplots(2, 2).flatten()
test_axes[0].stairs(h, bins, fill=True)
test_axes[1].stairs(h, bins, orientation='horizontal', fill=True)
test_axes[2].stairs(h, bins, baseline=bs, fill=True)
test_axes[3].stairs(h, bins, baseline=bs, orientation='horizontal',
fill=True)
# # Ref
ref_axes = fig_ref.subplots(2, 2).flatten()
ref_axes[0].fill_between(bins, np.append(h, h[-1]), step='post', lw=0)
ref_axes[0].set_ylim(0, None)
ref_axes[1].fill_betweenx(bins, np.append(h, h[-1]), step='post', lw=0)
ref_axes[1].set_xlim(0, None)
ref_axes[2].fill_between(bins, np.append(h, h[-1]),
np.ones(len(h)+1)*bs, step='post', lw=0)
ref_axes[2].set_ylim(bs, None)
ref_axes[3].fill_betweenx(bins, np.append(h, h[-1]),
np.ones(len(h)+1)*bs, step='post', lw=0)
ref_axes[3].set_xlim(bs, None)
@check_figures_equal(extensions=['png'])
def test_stairs_update(fig_test, fig_ref):
# fixed ylim because stairs() does autoscale, but updating data does not
ylim = -3, 4
# Test
test_ax = fig_test.add_subplot()
h = test_ax.stairs([1, 2, 3])
test_ax.set_ylim(ylim)
h.set_data([3, 2, 1])
h.set_data(edges=np.arange(4)+2)
h.set_data([1, 2, 1], np.arange(4)/2)
h.set_data([1, 2, 3])
h.set_data(None, np.arange(4))
assert np.allclose(h.get_data()[0], np.arange(1, 4))
assert np.allclose(h.get_data()[1], np.arange(4))
h.set_data(baseline=-2)
assert h.get_data().baseline == -2
# Ref
ref_ax = fig_ref.add_subplot()
h = ref_ax.stairs([1, 2, 3], baseline=-2)
ref_ax.set_ylim(ylim)
@check_figures_equal(extensions=['png'])
def test_stairs_baseline_0(fig_test, fig_ref):
# Test
test_ax = fig_test.add_subplot()
test_ax.stairs([5, 6, 7], baseline=None)
# Ref
ref_ax = fig_ref.add_subplot()
style = {'solid_joinstyle': 'miter', 'solid_capstyle': 'butt'}
ref_ax.plot(range(4), [5, 6, 7, 7], drawstyle='steps-post', **style)
ref_ax.set_ylim(0, None)
def test_stairs_empty():
ax = plt.figure().add_subplot()
ax.stairs([], [42])
assert ax.get_xlim() == (39, 45)
assert ax.get_ylim() == (-0.06, 0.06)
def test_stairs_invalid_nan():
with pytest.raises(ValueError, match='Nan values in "edges"'):
plt.stairs([1, 2], [0, np.nan, 1])
def test_stairs_invalid_mismatch():
with pytest.raises(ValueError, match='Size mismatch'):
plt.stairs([1, 2], [0, 1])
def test_stairs_invalid_update():
h = plt.stairs([1, 2], [0, 1, 2])
with pytest.raises(ValueError, match='Nan values in "edges"'):
h.set_data(edges=[1, np.nan, 2])
def test_stairs_invalid_update2():
h = plt.stairs([1, 2], [0, 1, 2])
with pytest.raises(ValueError, match='Size mismatch'):
h.set_data(edges=np.arange(5))
@image_comparison(['test_stairs_options.png'], remove_text=True)
def test_stairs_options():
x, y = np.array([1, 2, 3, 4, 5]), np.array([1, 2, 3, 4]).astype(float)
yn = y.copy()
yn[1] = np.nan
fig, ax = plt.subplots()
ax.stairs(y*3, x, color='green', fill=True, label="A")
ax.stairs(y, x*3-3, color='red', fill=True,
orientation='horizontal', label="B")
ax.stairs(yn, x, color='orange', ls='--', lw=2, label="C")
ax.stairs(yn/3, x*3-2, ls='--', lw=2, baseline=0.5,
orientation='horizontal', label="D")
ax.stairs(y[::-1]*3+13, x-1, color='red', ls='--', lw=2, baseline=None,
label="E")
ax.stairs(y[::-1]*3+14, x, baseline=26,
color='purple', ls='--', lw=2, label="F")
ax.stairs(yn[::-1]*3+15, x+1, baseline=np.linspace(27, 25, len(y)),
color='blue', ls='--', lw=2, label="G", fill=True)
ax.stairs(y[:-1][::-1]*2+11, x[:-1]+0.5, color='black', ls='--', lw=2,
baseline=12, hatch='//', label="H")
ax.legend(loc=0)
@image_comparison(['test_stairs_datetime.png'])
def test_stairs_datetime():
f, ax = plt.subplots(constrained_layout=True)
ax.stairs(np.arange(36),
np.arange(np.datetime64('2001-12-27'),
np.datetime64('2002-02-02')))
plt.xticks(rotation=30)
def contour_dat():
x = np.linspace(-3, 5, 150)
y = np.linspace(-3, 5, 120)
z = np.cos(x) + np.sin(y[:, np.newaxis])
return x, y, z
@image_comparison(['contour_hatching'], remove_text=True, style='mpl20')
def test_contour_hatching():
x, y, z = contour_dat()
fig, ax = plt.subplots()
ax.contourf(x, y, z, 7, hatches=['/', '\\', '//', '-'],
cmap=plt.get_cmap('gray'),
extend='both', alpha=0.5)
@image_comparison(['contour_colorbar'], style='mpl20')
def test_contour_colorbar():
# Remove this line when this test image is regenerated.
plt.rcParams['pcolormesh.snap'] = False
x, y, z = contour_dat()
fig, ax = plt.subplots()
cs = ax.contourf(x, y, z, levels=np.arange(-1.8, 1.801, 0.2),
cmap=plt.get_cmap('RdBu'),
vmin=-0.6,
vmax=0.6,
extend='both')
cs1 = ax.contour(x, y, z, levels=np.arange(-2.2, -0.599, 0.2),
colors=['y'],
linestyles='solid',
linewidths=2)
cs2 = ax.contour(x, y, z, levels=np.arange(0.6, 2.2, 0.2),
colors=['c'],
linewidths=2)
cbar = fig.colorbar(cs, ax=ax)
cbar.add_lines(cs1)
cbar.add_lines(cs2, erase=False)
@image_comparison(['hist2d', 'hist2d'], remove_text=True, style='mpl20')
def test_hist2d():
# Remove this line when this test image is regenerated.
plt.rcParams['pcolormesh.snap'] = False
np.random.seed(0)
# make it not symmetric in case we switch x and y axis
x = np.random.randn(100)*2+5
y = np.random.randn(100)-2
fig, ax = plt.subplots()
ax.hist2d(x, y, bins=10, rasterized=True)
# Reuse testcase from above for a labeled data test
data = {"x": x, "y": y}
fig, ax = plt.subplots()
ax.hist2d("x", "y", bins=10, data=data, rasterized=True)
@image_comparison(['hist2d_transpose'], remove_text=True, style='mpl20')
def test_hist2d_transpose():
# Remove this line when this test image is regenerated.
plt.rcParams['pcolormesh.snap'] = False
np.random.seed(0)
# make sure the output from np.histogram is transposed before
# passing to pcolorfast
x = np.array([5]*100)
y = np.random.randn(100)-2
fig, ax = plt.subplots()
ax.hist2d(x, y, bins=10, rasterized=True)
def test_hist2d_density():
x, y = np.random.random((2, 100))
ax = plt.figure().subplots()
for obj in [ax, plt]:
obj.hist2d(x, y, density=True)
class TestScatter:
@image_comparison(['scatter'], style='mpl20', remove_text=True)
def test_scatter_plot(self):
data = {"x": np.array([3, 4, 2, 6]), "y": np.array([2, 5, 2, 3]),
"c": ['r', 'y', 'b', 'lime'], "s": [24, 15, 19, 29],
"c2": ['0.5', '0.6', '0.7', '0.8']}
fig, ax = plt.subplots()
ax.scatter(data["x"] - 1., data["y"] - 1., c=data["c"], s=data["s"])
ax.scatter(data["x"] + 1., data["y"] + 1., c=data["c2"], s=data["s"])
ax.scatter("x", "y", c="c", s="s", data=data)
@image_comparison(['scatter_marker.png'], remove_text=True)
def test_scatter_marker(self):
fig, (ax0, ax1, ax2) = plt.subplots(ncols=3)
ax0.scatter([3, 4, 2, 6], [2, 5, 2, 3],
c=[(1, 0, 0), 'y', 'b', 'lime'],
s=[60, 50, 40, 30],
edgecolors=['k', 'r', 'g', 'b'],
marker='s')
ax1.scatter([3, 4, 2, 6], [2, 5, 2, 3],
c=[(1, 0, 0), 'y', 'b', 'lime'],
s=[60, 50, 40, 30],
edgecolors=['k', 'r', 'g', 'b'],
marker=mmarkers.MarkerStyle('o', fillstyle='top'))
# unit area ellipse
rx, ry = 3, 1
area = rx * ry * np.pi
theta = np.linspace(0, 2 * np.pi, 21)
verts = np.column_stack([np.cos(theta) * rx / area,
np.sin(theta) * ry / area])
ax2.scatter([3, 4, 2, 6], [2, 5, 2, 3],
c=[(1, 0, 0), 'y', 'b', 'lime'],
s=[60, 50, 40, 30],
edgecolors=['k', 'r', 'g', 'b'],
marker=verts)
@image_comparison(['scatter_2D'], remove_text=True, extensions=['png'])
def test_scatter_2D(self):
x = np.arange(3)
y = np.arange(2)
x, y = np.meshgrid(x, y)
z = x + y
fig, ax = plt.subplots()
ax.scatter(x, y, c=z, s=200, edgecolors='face')
@check_figures_equal(extensions=["png"])
def test_scatter_decimal(self, fig_test, fig_ref):
x0 = np.array([1.5, 8.4, 5.3, 4.2])
y0 = np.array([1.1, 2.2, 3.3, 4.4])
x = np.array([Decimal(i) for i in x0])
y = np.array([Decimal(i) for i in y0])
c = ['r', 'y', 'b', 'lime']
s = [24, 15, 19, 29]
# Test image - scatter plot with Decimal() input
ax = fig_test.subplots()
ax.scatter(x, y, c=c, s=s)
# Reference image
ax = fig_ref.subplots()
ax.scatter(x0, y0, c=c, s=s)
def test_scatter_color(self):
# Try to catch cases where 'c' kwarg should have been used.
with pytest.raises(ValueError):
plt.scatter([1, 2], [1, 2], color=[0.1, 0.2])
with pytest.raises(ValueError):
plt.scatter([1, 2, 3], [1, 2, 3], color=[1, 2, 3])
def test_scatter_unfilled(self):
coll = plt.scatter([0, 1, 2], [1, 3, 2], c=['0.1', '0.3', '0.5'],
marker=mmarkers.MarkerStyle('o', fillstyle='none'),
linewidths=[1.1, 1.2, 1.3])
assert coll.get_facecolors().shape == (0, 4) # no facecolors
assert_array_equal(coll.get_edgecolors(), [[0.1, 0.1, 0.1, 1],
[0.3, 0.3, 0.3, 1],
[0.5, 0.5, 0.5, 1]])
assert_array_equal(coll.get_linewidths(), [1.1, 1.2, 1.3])
@pytest.mark.style('default')
def test_scatter_unfillable(self):
coll = plt.scatter([0, 1, 2], [1, 3, 2], c=['0.1', '0.3', '0.5'],
marker='x',
linewidths=[1.1, 1.2, 1.3])
assert_array_equal(coll.get_facecolors(), coll.get_edgecolors())
assert_array_equal(coll.get_edgecolors(), [[0.1, 0.1, 0.1, 1],
[0.3, 0.3, 0.3, 1],
[0.5, 0.5, 0.5, 1]])
assert_array_equal(coll.get_linewidths(), [1.1, 1.2, 1.3])
def test_scatter_size_arg_size(self):
x = np.arange(4)
with pytest.raises(ValueError, match='same size as x and y'):
plt.scatter(x, x, x[1:])
with pytest.raises(ValueError, match='same size as x and y'):
plt.scatter(x[1:], x[1:], x)
with pytest.raises(ValueError, match='float array-like'):
plt.scatter(x, x, 'foo')
def test_scatter_edgecolor_RGB(self):
# Github issue 19066
coll = plt.scatter([1, 2, 3], [1, np.nan, np.nan],
edgecolor=(1, 0, 0))
assert mcolors.same_color(coll.get_edgecolor(), (1, 0, 0))
coll = plt.scatter([1, 2, 3, 4], [1, np.nan, np.nan, 1],
edgecolor=(1, 0, 0, 1))
assert mcolors.same_color(coll.get_edgecolor(), (1, 0, 0, 1))
@check_figures_equal(extensions=["png"])
def test_scatter_invalid_color(self, fig_test, fig_ref):
ax = fig_test.subplots()
cmap = plt.get_cmap("viridis", 16)
cmap.set_bad("k", 1)
# Set a nonuniform size to prevent the last call to `scatter` (plotting
# the invalid points separately in fig_ref) from using the marker
# stamping fast path, which would result in slightly offset markers.
ax.scatter(range(4), range(4),
c=[1, np.nan, 2, np.nan], s=[1, 2, 3, 4],
cmap=cmap, plotnonfinite=True)
ax = fig_ref.subplots()
cmap = plt.get_cmap("viridis", 16)
ax.scatter([0, 2], [0, 2], c=[1, 2], s=[1, 3], cmap=cmap)
ax.scatter([1, 3], [1, 3], s=[2, 4], color="k")
@check_figures_equal(extensions=["png"])
def test_scatter_no_invalid_color(self, fig_test, fig_ref):
# With plotninfinite=False we plot only 2 points.
ax = fig_test.subplots()
cmap = plt.get_cmap("viridis", 16)
cmap.set_bad("k", 1)
ax.scatter(range(4), range(4),
c=[1, np.nan, 2, np.nan], s=[1, 2, 3, 4],
cmap=cmap, plotnonfinite=False)
ax = fig_ref.subplots()
ax.scatter([0, 2], [0, 2], c=[1, 2], s=[1, 3], cmap=cmap)
@check_figures_equal(extensions=["png"])
def test_scatter_norm_vminvmax(self, fig_test, fig_ref):
"""Parameters vmin, vmax should be ignored if norm is given."""
x = [1, 2, 3]
ax = fig_ref.subplots()
ax.scatter(x, x, c=x, vmin=0, vmax=5)
ax = fig_test.subplots()
with pytest.warns(MatplotlibDeprecationWarning,
match="Passing parameters norm and vmin/vmax "
"simultaneously is deprecated."):
ax.scatter(x, x, c=x, norm=mcolors.Normalize(-10, 10),
vmin=0, vmax=5)
@check_figures_equal(extensions=["png"])
def test_scatter_single_point(self, fig_test, fig_ref):
ax = fig_test.subplots()
ax.scatter(1, 1, c=1)
ax = fig_ref.subplots()
ax.scatter([1], [1], c=[1])
@check_figures_equal(extensions=["png"])
def test_scatter_different_shapes(self, fig_test, fig_ref):
x = np.arange(10)
ax = fig_test.subplots()
ax.scatter(x, x.reshape(2, 5), c=x.reshape(5, 2))
ax = fig_ref.subplots()
ax.scatter(x.reshape(5, 2), x, c=x.reshape(2, 5))
# Parameters for *test_scatter_c*. NB: assuming that the
# scatter plot will have 4 elements. The tuple scheme is:
# (*c* parameter case, exception regexp key or None if no exception)
params_test_scatter_c = [
# single string:
('0.5', None),
# Single letter-sequences
(["rgby"], "conversion"),
# Special cases
("red", None),
("none", None),
(None, None),
(["r", "g", "b", "none"], None),
# Non-valid color spec (FWIW, 'jaune' means yellow in French)
("jaune", "conversion"),
(["jaune"], "conversion"), # wrong type before wrong size
(["jaune"]*4, "conversion"),
# Value-mapping like
([0.5]*3, None), # should emit a warning for user's eyes though
([0.5]*4, None), # NB: no warning as matching size allows mapping
([0.5]*5, "shape"),
# list of strings:
(['0.5', '0.4', '0.6', '0.7'], None),
(['0.5', 'red', '0.6', 'C5'], None),
(['0.5', 0.5, '0.6', 'C5'], "conversion"),
# RGB values
([[1, 0, 0]], None),
([[1, 0, 0]]*3, "shape"),
([[1, 0, 0]]*4, None),
([[1, 0, 0]]*5, "shape"),
# RGBA values
([[1, 0, 0, 0.5]], None),
([[1, 0, 0, 0.5]]*3, "shape"),
([[1, 0, 0, 0.5]]*4, None),
([[1, 0, 0, 0.5]]*5, "shape"),
# Mix of valid color specs
([[1, 0, 0, 0.5]]*3 + [[1, 0, 0]], None),
([[1, 0, 0, 0.5], "red", "0.0"], "shape"),
([[1, 0, 0, 0.5], "red", "0.0", "C5"], None),
([[1, 0, 0, 0.5], "red", "0.0", "C5", [0, 1, 0]], "shape"),
# Mix of valid and non valid color specs
([[1, 0, 0, 0.5], "red", "jaune"], "conversion"),
([[1, 0, 0, 0.5], "red", "0.0", "jaune"], "conversion"),
([[1, 0, 0, 0.5], "red", "0.0", "C5", "jaune"], "conversion"),
]
@pytest.mark.parametrize('c_case, re_key', params_test_scatter_c)
def test_scatter_c(self, c_case, re_key):
def get_next_color():
return 'blue' # currently unused
xsize = 4
# Additional checking of *c* (introduced in #11383).
REGEXP = {
"shape": "^'c' argument has [0-9]+ elements", # shape mismatch
"conversion": "^'c' argument must be a color", # bad vals
}
if re_key is None:
mpl.axes.Axes._parse_scatter_color_args(
c=c_case, edgecolors="black", kwargs={}, xsize=xsize,
get_next_color_func=get_next_color)
else:
with pytest.raises(ValueError, match=REGEXP[re_key]):
mpl.axes.Axes._parse_scatter_color_args(
c=c_case, edgecolors="black", kwargs={}, xsize=xsize,
get_next_color_func=get_next_color)
@pytest.mark.style('default')
@check_figures_equal(extensions=["png"])
def test_scatter_single_color_c(self, fig_test, fig_ref):
rgb = [[1, 0.5, 0.05]]
rgba = [[1, 0.5, 0.05, .5]]
# set via color kwarg
ax_ref = fig_ref.subplots()
ax_ref.scatter(np.ones(3), range(3), color=rgb)
ax_ref.scatter(np.ones(4)*2, range(4), color=rgba)
# set via broadcasting via c
ax_test = fig_test.subplots()
ax_test.scatter(np.ones(3), range(3), c=rgb)
ax_test.scatter(np.ones(4)*2, range(4), c=rgba)
def test_scatter_linewidths(self):
x = np.arange(5)
fig, ax = plt.subplots()
for i in range(3):
pc = ax.scatter(x, np.full(5, i), c=f'C{i}', marker='x', s=100,
linewidths=i + 1)
assert pc.get_linewidths() == i + 1
pc = ax.scatter(x, np.full(5, 3), c='C3', marker='x', s=100,
linewidths=[*range(1, 5), None])
assert_array_equal(pc.get_linewidths(),
[*range(1, 5), mpl.rcParams['lines.linewidth']])
def _params(c=None, xsize=2, *, edgecolors=None, **kwargs):
return (c, edgecolors, kwargs if kwargs is not None else {}, xsize)
_result = namedtuple('_result', 'c, colors')
@pytest.mark.parametrize(
'params, expected_result',
[(_params(),
_result(c='b', colors=np.array([[0, 0, 1, 1]]))),
(_params(c='r'),
_result(c='r', colors=np.array([[1, 0, 0, 1]]))),
(_params(c='r', colors='b'),
_result(c='r', colors=np.array([[1, 0, 0, 1]]))),
# color
(_params(color='b'),
_result(c='b', colors=np.array([[0, 0, 1, 1]]))),
(_params(color=['b', 'g']),
_result(c=['b', 'g'], colors=np.array([[0, 0, 1, 1], [0, .5, 0, 1]]))),
])
def test_parse_scatter_color_args(params, expected_result):
def get_next_color():
return 'blue' # currently unused
c, colors, _edgecolors = mpl.axes.Axes._parse_scatter_color_args(
*params, get_next_color_func=get_next_color)
assert c == expected_result.c
assert_allclose(colors, expected_result.colors)
del _params
del _result
@pytest.mark.parametrize(
'kwargs, expected_edgecolors',
[(dict(), None),
(dict(c='b'), None),
(dict(edgecolors='r'), 'r'),
(dict(edgecolors=['r', 'g']), ['r', 'g']),
(dict(edgecolor='r'), 'r'),
(dict(edgecolors='face'), 'face'),
(dict(edgecolors='none'), 'none'),
(dict(edgecolor='r', edgecolors='g'), 'r'),
(dict(c='b', edgecolor='r', edgecolors='g'), 'r'),
(dict(color='r'), 'r'),
(dict(color='r', edgecolor='g'), 'g'),
])
def test_parse_scatter_color_args_edgecolors(kwargs, expected_edgecolors):
def get_next_color():
return 'blue' # currently unused
c = kwargs.pop('c', None)
edgecolors = kwargs.pop('edgecolors', None)
_, _, result_edgecolors = \
mpl.axes.Axes._parse_scatter_color_args(
c, edgecolors, kwargs, xsize=2, get_next_color_func=get_next_color)
assert result_edgecolors == expected_edgecolors
def test_parse_scatter_color_args_error():
def get_next_color():
return 'blue' # currently unused
with pytest.raises(ValueError,
match="RGBA values should be within 0-1 range"):
c = np.array([[0.1, 0.2, 0.7], [0.2, 0.4, 1.4]]) # value > 1
mpl.axes.Axes._parse_scatter_color_args(
c, None, kwargs={}, xsize=2, get_next_color_func=get_next_color)
def test_as_mpl_axes_api():
# tests the _as_mpl_axes api
from matplotlib.projections.polar import PolarAxes
class Polar:
def __init__(self):
self.theta_offset = 0
def _as_mpl_axes(self):
# implement the matplotlib axes interface
return PolarAxes, {'theta_offset': self.theta_offset}
prj = Polar()
prj2 = Polar()
prj2.theta_offset = np.pi
prj3 = Polar()
# testing axes creation with plt.axes
ax = plt.axes([0, 0, 1, 1], projection=prj)
assert type(ax) == PolarAxes
with pytest.warns(
MatplotlibDeprecationWarning,
match=r'Calling gca\(\) with keyword arguments was deprecated'):
ax_via_gca = plt.gca(projection=prj)
assert ax_via_gca is ax
plt.close()
# testing axes creation with gca
with pytest.warns(
MatplotlibDeprecationWarning,
match=r'Calling gca\(\) with keyword arguments was deprecated'):
ax = plt.gca(projection=prj)
assert type(ax) == mpl.axes._subplots.subplot_class_factory(PolarAxes)
with pytest.warns(
MatplotlibDeprecationWarning,
match=r'Calling gca\(\) with keyword arguments was deprecated'):
ax_via_gca = plt.gca(projection=prj)
assert ax_via_gca is ax
# try getting the axes given a different polar projection
with pytest.warns(
MatplotlibDeprecationWarning,
match=r'Calling gca\(\) with keyword arguments was deprecated'):
ax_via_gca = plt.gca(projection=prj2)
assert ax_via_gca is ax
assert ax.get_theta_offset() == 0
# try getting the axes given an == (not is) polar projection
with pytest.warns(
MatplotlibDeprecationWarning,
match=r'Calling gca\(\) with keyword arguments was deprecated'):
ax_via_gca = plt.gca(projection=prj3)
assert ax_via_gca is ax
plt.close()
# testing axes creation with subplot
ax = plt.subplot(121, projection=prj)
assert type(ax) == mpl.axes._subplots.subplot_class_factory(PolarAxes)
plt.close()
def test_pyplot_axes():
# test focusing of Axes in other Figure
fig1, ax1 = plt.subplots()
fig2, ax2 = plt.subplots()
plt.sca(ax1)
assert ax1 is plt.gca()
assert fig1 is plt.gcf()
plt.close(fig1)
plt.close(fig2)
@image_comparison(['log_scales'])
def test_log_scales():
fig, ax = plt.subplots()
ax.plot(np.log(np.linspace(0.1, 100)))
ax.set_yscale('log', base=5.5)
ax.invert_yaxis()
ax.set_xscale('log', base=9.0)
def test_log_scales_no_data():
_, ax = plt.subplots()
ax.set(xscale="log", yscale="log")
ax.xaxis.set_major_locator(mticker.MultipleLocator(1))
assert ax.get_xlim() == ax.get_ylim() == (1, 10)
def test_log_scales_invalid():
fig, ax = plt.subplots()
ax.set_xscale('log')
with pytest.warns(UserWarning, match='Attempted to set non-positive'):
ax.set_xlim(-1, 10)
ax.set_yscale('log')
with pytest.warns(UserWarning, match='Attempted to set non-positive'):
ax.set_ylim(-1, 10)
@image_comparison(['stackplot_test_image', 'stackplot_test_image'])
def test_stackplot():
fig = plt.figure()
x = np.linspace(0, 10, 10)
y1 = 1.0 * x
y2 = 2.0 * x + 1
y3 = 3.0 * x + 2
ax = fig.add_subplot(1, 1, 1)
ax.stackplot(x, y1, y2, y3)
ax.set_xlim((0, 10))
ax.set_ylim((0, 70))
# Reuse testcase from above for a labeled data test
data = {"x": x, "y1": y1, "y2": y2, "y3": y3}
fig, ax = plt.subplots()
ax.stackplot("x", "y1", "y2", "y3", data=data)
ax.set_xlim((0, 10))
ax.set_ylim((0, 70))
@image_comparison(['stackplot_test_baseline'], remove_text=True)
def test_stackplot_baseline():
np.random.seed(0)
def layers(n, m):
a = np.zeros((m, n))
for i in range(n):
for j in range(5):
x = 1 / (.1 + np.random.random())
y = 2 * np.random.random() - .5
z = 10 / (.1 + np.random.random())
a[:, i] += x * np.exp(-((np.arange(m) / m - y) * z) ** 2)
return a
d = layers(3, 100)
d[50, :] = 0 # test for fixed weighted wiggle (issue #6313)
fig, axs = plt.subplots(2, 2)
axs[0, 0].stackplot(range(100), d.T, baseline='zero')
axs[0, 1].stackplot(range(100), d.T, baseline='sym')
axs[1, 0].stackplot(range(100), d.T, baseline='wiggle')
axs[1, 1].stackplot(range(100), d.T, baseline='weighted_wiggle')
def _bxp_test_helper(
stats_kwargs={}, transform_stats=lambda s: s, bxp_kwargs={}):
np.random.seed(937)
logstats = mpl.cbook.boxplot_stats(
np.random.lognormal(mean=1.25, sigma=1., size=(37, 4)), **stats_kwargs)
fig, ax = plt.subplots()
if bxp_kwargs.get('vert', True):
ax.set_yscale('log')
else:
ax.set_xscale('log')
# Work around baseline images generate back when bxp did not respect the
# boxplot.boxprops.linewidth rcParam when patch_artist is False.
if not bxp_kwargs.get('patch_artist', False):
mpl.rcParams['boxplot.boxprops.linewidth'] = \
mpl.rcParams['lines.linewidth']
ax.bxp(transform_stats(logstats), **bxp_kwargs)
@image_comparison(['bxp_baseline.png'],
savefig_kwarg={'dpi': 40},
style='default')
def test_bxp_baseline():
_bxp_test_helper()
@image_comparison(['bxp_rangewhis.png'],
savefig_kwarg={'dpi': 40},
style='default')
def test_bxp_rangewhis():
_bxp_test_helper(stats_kwargs=dict(whis=[0, 100]))
@image_comparison(['bxp_percentilewhis.png'],
savefig_kwarg={'dpi': 40},
style='default')
def test_bxp_percentilewhis():
_bxp_test_helper(stats_kwargs=dict(whis=[5, 95]))
@image_comparison(['bxp_with_xlabels.png'],
savefig_kwarg={'dpi': 40},
style='default')
def test_bxp_with_xlabels():
def transform(stats):
for s, label in zip(stats, list('ABCD')):
s['label'] = label
return stats
_bxp_test_helper(transform_stats=transform)
@image_comparison(['bxp_horizontal.png'],
remove_text=True,
savefig_kwarg={'dpi': 40},
style='default',
tol=0.1)
def test_bxp_horizontal():
_bxp_test_helper(bxp_kwargs=dict(vert=False))
@image_comparison(['bxp_with_ylabels.png'],
savefig_kwarg={'dpi': 40},
style='default',
tol=0.1)
def test_bxp_with_ylabels():
def transform(stats):
for s, label in zip(stats, list('ABCD')):
s['label'] = label
return stats
_bxp_test_helper(transform_stats=transform, bxp_kwargs=dict(vert=False))
@image_comparison(['bxp_patchartist.png'],
remove_text=True,
savefig_kwarg={'dpi': 40},
style='default')
def test_bxp_patchartist():
_bxp_test_helper(bxp_kwargs=dict(patch_artist=True))
@image_comparison(['bxp_custompatchartist.png'],
remove_text=True,
savefig_kwarg={'dpi': 100},
style='default')
def test_bxp_custompatchartist():
_bxp_test_helper(bxp_kwargs=dict(
patch_artist=True,
boxprops=dict(facecolor='yellow', edgecolor='green', ls=':')))
@image_comparison(['bxp_customoutlier.png'],
remove_text=True,
savefig_kwarg={'dpi': 40},
style='default')
def test_bxp_customoutlier():
_bxp_test_helper(bxp_kwargs=dict(
flierprops=dict(linestyle='none', marker='d', mfc='g')))
@image_comparison(['bxp_withmean_custompoint.png'],
remove_text=True,
savefig_kwarg={'dpi': 40},
style='default')
def test_bxp_showcustommean():
_bxp_test_helper(bxp_kwargs=dict(
showmeans=True,
meanprops=dict(linestyle='none', marker='d', mfc='green'),
))
@image_comparison(['bxp_custombox.png'],
remove_text=True,
savefig_kwarg={'dpi': 40},
style='default')
def test_bxp_custombox():
_bxp_test_helper(bxp_kwargs=dict(
boxprops=dict(linestyle='--', color='b', lw=3)))
@image_comparison(['bxp_custommedian.png'],
remove_text=True,
savefig_kwarg={'dpi': 40},
style='default')
def test_bxp_custommedian():
_bxp_test_helper(bxp_kwargs=dict(
medianprops=dict(linestyle='--', color='b', lw=3)))
@image_comparison(['bxp_customcap.png'],
remove_text=True,
savefig_kwarg={'dpi': 40},
style='default')
def test_bxp_customcap():
_bxp_test_helper(bxp_kwargs=dict(
capprops=dict(linestyle='--', color='g', lw=3)))
@image_comparison(['bxp_customwhisker.png'],
remove_text=True,
savefig_kwarg={'dpi': 40},
style='default')
def test_bxp_customwhisker():
_bxp_test_helper(bxp_kwargs=dict(
whiskerprops=dict(linestyle='-', color='m', lw=3)))
@image_comparison(['bxp_withnotch.png'],
remove_text=True,
savefig_kwarg={'dpi': 40},
style='default')
def test_bxp_shownotches():
_bxp_test_helper(bxp_kwargs=dict(shownotches=True))
@image_comparison(['bxp_nocaps.png'],
remove_text=True,
savefig_kwarg={'dpi': 40},
style='default')
def test_bxp_nocaps():
_bxp_test_helper(bxp_kwargs=dict(showcaps=False))
@image_comparison(['bxp_nobox.png'],
remove_text=True,
savefig_kwarg={'dpi': 40},
style='default')
def test_bxp_nobox():
_bxp_test_helper(bxp_kwargs=dict(showbox=False))
@image_comparison(['bxp_no_flier_stats.png'],
remove_text=True,
savefig_kwarg={'dpi': 40},
style='default')
def test_bxp_no_flier_stats():
def transform(stats):
for s in stats:
s.pop('fliers', None)
return stats
_bxp_test_helper(transform_stats=transform,
bxp_kwargs=dict(showfliers=False))
@image_comparison(['bxp_withmean_point.png'],
remove_text=True,
savefig_kwarg={'dpi': 40},
style='default')
def test_bxp_showmean():
_bxp_test_helper(bxp_kwargs=dict(showmeans=True, meanline=False))
@image_comparison(['bxp_withmean_line.png'],
remove_text=True,
savefig_kwarg={'dpi': 40},
style='default')
def test_bxp_showmeanasline():
_bxp_test_helper(bxp_kwargs=dict(showmeans=True, meanline=True))
@image_comparison(['bxp_scalarwidth.png'],
remove_text=True,
savefig_kwarg={'dpi': 40},
style='default')
def test_bxp_scalarwidth():
_bxp_test_helper(bxp_kwargs=dict(widths=.25))
@image_comparison(['bxp_customwidths.png'],
remove_text=True,
savefig_kwarg={'dpi': 40},
style='default')
def test_bxp_customwidths():
_bxp_test_helper(bxp_kwargs=dict(widths=[0.10, 0.25, 0.65, 0.85]))
@image_comparison(['bxp_custompositions.png'],
remove_text=True,
savefig_kwarg={'dpi': 40},
style='default')
def test_bxp_custompositions():
_bxp_test_helper(bxp_kwargs=dict(positions=[1, 5, 6, 7]))
def test_bxp_bad_widths():
with pytest.raises(ValueError):
_bxp_test_helper(bxp_kwargs=dict(widths=[1]))
def test_bxp_bad_positions():
with pytest.raises(ValueError):
_bxp_test_helper(bxp_kwargs=dict(positions=[2, 3]))
@image_comparison(['boxplot', 'boxplot'], tol=1.28, style='default')
def test_boxplot():
# Randomness used for bootstrapping.
np.random.seed(937)
x = np.linspace(-7, 7, 140)
x = np.hstack([-25, x, 25])
fig, ax = plt.subplots()
ax.boxplot([x, x], bootstrap=10000, notch=1)
ax.set_ylim((-30, 30))
# Reuse testcase from above for a labeled data test
data = {"x": [x, x]}
fig, ax = plt.subplots()
ax.boxplot("x", bootstrap=10000, notch=1, data=data)
ax.set_ylim((-30, 30))
@image_comparison(['boxplot_sym2.png'], remove_text=True, style='default')
def test_boxplot_sym2():
# Randomness used for bootstrapping.
np.random.seed(937)
x = np.linspace(-7, 7, 140)
x = np.hstack([-25, x, 25])
fig, [ax1, ax2] = plt.subplots(1, 2)
ax1.boxplot([x, x], bootstrap=10000, sym='^')
ax1.set_ylim((-30, 30))
ax2.boxplot([x, x], bootstrap=10000, sym='g')
ax2.set_ylim((-30, 30))
@image_comparison(['boxplot_sym.png'],
remove_text=True,
savefig_kwarg={'dpi': 40},
style='default')
def test_boxplot_sym():
x = np.linspace(-7, 7, 140)
x = np.hstack([-25, x, 25])
fig, ax = plt.subplots()
ax.boxplot([x, x], sym='gs')
ax.set_ylim((-30, 30))
@image_comparison(['boxplot_autorange_false_whiskers.png',
'boxplot_autorange_true_whiskers.png'],
style='default')
def test_boxplot_autorange_whiskers():
# Randomness used for bootstrapping.
np.random.seed(937)
x = np.ones(140)
x = np.hstack([0, x, 2])
fig1, ax1 = plt.subplots()
ax1.boxplot([x, x], bootstrap=10000, notch=1)
ax1.set_ylim((-5, 5))
fig2, ax2 = plt.subplots()
ax2.boxplot([x, x], bootstrap=10000, notch=1, autorange=True)
ax2.set_ylim((-5, 5))
def _rc_test_bxp_helper(ax, rc_dict):
x = np.linspace(-7, 7, 140)
x = np.hstack([-25, x, 25])
with matplotlib.rc_context(rc_dict):
ax.boxplot([x, x])
return ax
@image_comparison(['boxplot_rc_parameters'],
savefig_kwarg={'dpi': 100}, remove_text=True,
tol=1, style='default')
def test_boxplot_rc_parameters():
# Randomness used for bootstrapping.
np.random.seed(937)
fig, ax = plt.subplots(3)
rc_axis0 = {
'boxplot.notch': True,
'boxplot.whiskers': [5, 95],
'boxplot.bootstrap': 10000,
'boxplot.flierprops.color': 'b',
'boxplot.flierprops.marker': 'o',
'boxplot.flierprops.markerfacecolor': 'g',
'boxplot.flierprops.markeredgecolor': 'b',
'boxplot.flierprops.markersize': 5,
'boxplot.flierprops.linestyle': '--',
'boxplot.flierprops.linewidth': 2.0,
'boxplot.boxprops.color': 'r',
'boxplot.boxprops.linewidth': 2.0,
'boxplot.boxprops.linestyle': '--',
'boxplot.capprops.color': 'c',
'boxplot.capprops.linewidth': 2.0,
'boxplot.capprops.linestyle': '--',
'boxplot.medianprops.color': 'k',
'boxplot.medianprops.linewidth': 2.0,
'boxplot.medianprops.linestyle': '--',
}
rc_axis1 = {
'boxplot.vertical': False,
'boxplot.whiskers': [0, 100],
'boxplot.patchartist': True,
}
rc_axis2 = {
'boxplot.whiskers': 2.0,
'boxplot.showcaps': False,
'boxplot.showbox': False,
'boxplot.showfliers': False,
'boxplot.showmeans': True,
'boxplot.meanline': True,
'boxplot.meanprops.color': 'c',
'boxplot.meanprops.linewidth': 2.0,
'boxplot.meanprops.linestyle': '--',
'boxplot.whiskerprops.color': 'r',
'boxplot.whiskerprops.linewidth': 2.0,
'boxplot.whiskerprops.linestyle': '-.',
}
dict_list = [rc_axis0, rc_axis1, rc_axis2]
for axis, rc_axis in zip(ax, dict_list):
_rc_test_bxp_helper(axis, rc_axis)
assert (matplotlib.patches.PathPatch in
[type(t) for t in ax[1].get_children()])
@image_comparison(['boxplot_with_CIarray.png'],
remove_text=True, savefig_kwarg={'dpi': 40}, style='default')
def test_boxplot_with_CIarray():
# Randomness used for bootstrapping.
np.random.seed(937)
x = np.linspace(-7, 7, 140)
x = np.hstack([-25, x, 25])
fig, ax = plt.subplots()
CIs = np.array([[-1.5, 3.], [-1., 3.5]])
# show a boxplot with Matplotlib medians and confidence intervals, and
# another with manual values
ax.boxplot([x, x], bootstrap=10000, usermedians=[None, 1.0],
conf_intervals=CIs, notch=1)
ax.set_ylim((-30, 30))
@image_comparison(['boxplot_no_inverted_whisker.png'],
remove_text=True, savefig_kwarg={'dpi': 40}, style='default')
def test_boxplot_no_weird_whisker():
x = np.array([3, 9000, 150, 88, 350, 200000, 1400, 960],
dtype=np.float64)
ax1 = plt.axes()
ax1.boxplot(x)
ax1.set_yscale('log')
ax1.yaxis.grid(False, which='minor')
ax1.xaxis.grid(False)
def test_boxplot_bad_medians():
x = np.linspace(-7, 7, 140)
x = np.hstack([-25, x, 25])
fig, ax = plt.subplots()
with pytest.raises(ValueError):
ax.boxplot(x, usermedians=[1, 2])
with pytest.raises(ValueError):
ax.boxplot([x, x], usermedians=[[1, 2], [1, 2]])
def test_boxplot_bad_ci():
x = np.linspace(-7, 7, 140)
x = np.hstack([-25, x, 25])
fig, ax = plt.subplots()
with pytest.raises(ValueError):
ax.boxplot([x, x], conf_intervals=[[1, 2]])
with pytest.raises(ValueError):
ax.boxplot([x, x], conf_intervals=[[1, 2], [1]])
def test_boxplot_zorder():
x = np.arange(10)
fix, ax = plt.subplots()
assert ax.boxplot(x)['boxes'][0].get_zorder() == 2
assert ax.boxplot(x, zorder=10)['boxes'][0].get_zorder() == 10
def test_boxplot_marker_behavior():
plt.rcParams['lines.marker'] = 's'
plt.rcParams['boxplot.flierprops.marker'] = 'o'
plt.rcParams['boxplot.meanprops.marker'] = '^'
fig, ax = plt.subplots()
test_data = np.arange(100)
test_data[-1] = 150 # a flier point
bxp_handle = ax.boxplot(test_data, showmeans=True)
for bxp_lines in ['whiskers', 'caps', 'boxes', 'medians']:
for each_line in bxp_handle[bxp_lines]:
# Ensure that the rcParams['lines.marker'] is overridden by ''
assert each_line.get_marker() == ''
# Ensure that markers for fliers and means aren't overridden with ''
assert bxp_handle['fliers'][0].get_marker() == 'o'
assert bxp_handle['means'][0].get_marker() == '^'
@image_comparison(['boxplot_mod_artists_after_plotting.png'],
remove_text=True, savefig_kwarg={'dpi': 40}, style='default')
def test_boxplot_mod_artist_after_plotting():
x = [0.15, 0.11, 0.06, 0.06, 0.12, 0.56, -0.56]
fig, ax = plt.subplots()
bp = ax.boxplot(x, sym="o")
for key in bp:
for obj in bp[key]:
obj.set_color('green')
@image_comparison(['violinplot_vert_baseline.png',
'violinplot_vert_baseline.png'])
def test_vert_violinplot_baseline():
# First 9 digits of frac(sqrt(2))
np.random.seed(414213562)
data = [np.random.normal(size=100) for _ in range(4)]
ax = plt.axes()
ax.violinplot(data, positions=range(4), showmeans=0, showextrema=0,
showmedians=0)
# Reuse testcase from above for a labeled data test
data = {"d": data}
fig, ax = plt.subplots()
ax.violinplot("d", positions=range(4), showmeans=0, showextrema=0,
showmedians=0, data=data)
@image_comparison(['violinplot_vert_showmeans.png'])
def test_vert_violinplot_showmeans():
ax = plt.axes()
# First 9 digits of frac(sqrt(3))
np.random.seed(732050807)
data = [np.random.normal(size=100) for _ in range(4)]
ax.violinplot(data, positions=range(4), showmeans=1, showextrema=0,
showmedians=0)
@image_comparison(['violinplot_vert_showextrema.png'])
def test_vert_violinplot_showextrema():
ax = plt.axes()
# First 9 digits of frac(sqrt(5))
np.random.seed(236067977)
data = [np.random.normal(size=100) for _ in range(4)]
ax.violinplot(data, positions=range(4), showmeans=0, showextrema=1,
showmedians=0)
@image_comparison(['violinplot_vert_showmedians.png'])
def test_vert_violinplot_showmedians():
ax = plt.axes()
# First 9 digits of frac(sqrt(7))
np.random.seed(645751311)
data = [np.random.normal(size=100) for _ in range(4)]
ax.violinplot(data, positions=range(4), showmeans=0, showextrema=0,
showmedians=1)
@image_comparison(['violinplot_vert_showall.png'])
def test_vert_violinplot_showall():
ax = plt.axes()
# First 9 digits of frac(sqrt(11))
np.random.seed(316624790)
data = [np.random.normal(size=100) for _ in range(4)]
ax.violinplot(data, positions=range(4), showmeans=1, showextrema=1,
showmedians=1,
quantiles=[[0.1, 0.9], [0.2, 0.8], [0.3, 0.7], [0.4, 0.6]])
@image_comparison(['violinplot_vert_custompoints_10.png'])
def test_vert_violinplot_custompoints_10():
ax = plt.axes()
# First 9 digits of frac(sqrt(13))
np.random.seed(605551275)
data = [np.random.normal(size=100) for _ in range(4)]
ax.violinplot(data, positions=range(4), showmeans=0, showextrema=0,
showmedians=0, points=10)
@image_comparison(['violinplot_vert_custompoints_200.png'])
def test_vert_violinplot_custompoints_200():
ax = plt.axes()
# First 9 digits of frac(sqrt(17))
np.random.seed(123105625)
data = [np.random.normal(size=100) for _ in range(4)]
ax.violinplot(data, positions=range(4), showmeans=0, showextrema=0,
showmedians=0, points=200)
@image_comparison(['violinplot_horiz_baseline.png'])
def test_horiz_violinplot_baseline():
ax = plt.axes()
# First 9 digits of frac(sqrt(19))
np.random.seed(358898943)
data = [np.random.normal(size=100) for _ in range(4)]
ax.violinplot(data, positions=range(4), vert=False, showmeans=0,
showextrema=0, showmedians=0)
@image_comparison(['violinplot_horiz_showmedians.png'])
def test_horiz_violinplot_showmedians():
ax = plt.axes()
# First 9 digits of frac(sqrt(23))
np.random.seed(795831523)
data = [np.random.normal(size=100) for _ in range(4)]
ax.violinplot(data, positions=range(4), vert=False, showmeans=0,
showextrema=0, showmedians=1)
@image_comparison(['violinplot_horiz_showmeans.png'])
def test_horiz_violinplot_showmeans():
ax = plt.axes()
# First 9 digits of frac(sqrt(29))
np.random.seed(385164807)
data = [np.random.normal(size=100) for _ in range(4)]
ax.violinplot(data, positions=range(4), vert=False, showmeans=1,
showextrema=0, showmedians=0)
@image_comparison(['violinplot_horiz_showextrema.png'])
def test_horiz_violinplot_showextrema():
ax = plt.axes()
# First 9 digits of frac(sqrt(31))
np.random.seed(567764362)
data = [np.random.normal(size=100) for _ in range(4)]
ax.violinplot(data, positions=range(4), vert=False, showmeans=0,
showextrema=1, showmedians=0)
@image_comparison(['violinplot_horiz_showall.png'])
def test_horiz_violinplot_showall():
ax = plt.axes()
# First 9 digits of frac(sqrt(37))
np.random.seed(82762530)
data = [np.random.normal(size=100) for _ in range(4)]
ax.violinplot(data, positions=range(4), vert=False, showmeans=1,
showextrema=1, showmedians=1,
quantiles=[[0.1, 0.9], [0.2, 0.8], [0.3, 0.7], [0.4, 0.6]])
@image_comparison(['violinplot_horiz_custompoints_10.png'])
def test_horiz_violinplot_custompoints_10():
ax = plt.axes()
# First 9 digits of frac(sqrt(41))
np.random.seed(403124237)
data = [np.random.normal(size=100) for _ in range(4)]
ax.violinplot(data, positions=range(4), vert=False, showmeans=0,
showextrema=0, showmedians=0, points=10)
@image_comparison(['violinplot_horiz_custompoints_200.png'])
def test_horiz_violinplot_custompoints_200():
ax = plt.axes()
# First 9 digits of frac(sqrt(43))
np.random.seed(557438524)
data = [np.random.normal(size=100) for _ in range(4)]
ax.violinplot(data, positions=range(4), vert=False, showmeans=0,
showextrema=0, showmedians=0, points=200)
def test_violinplot_bad_positions():
ax = plt.axes()
# First 9 digits of frac(sqrt(47))
np.random.seed(855654600)
data = [np.random.normal(size=100) for _ in range(4)]
with pytest.raises(ValueError):
ax.violinplot(data, positions=range(5))
def test_violinplot_bad_widths():
ax = plt.axes()
# First 9 digits of frac(sqrt(53))
np.random.seed(280109889)
data = [np.random.normal(size=100) for _ in range(4)]
with pytest.raises(ValueError):
ax.violinplot(data, positions=range(4), widths=[1, 2, 3])
def test_violinplot_bad_quantiles():
ax = plt.axes()
# First 9 digits of frac(sqrt(73))
np.random.seed(544003745)
data = [np.random.normal(size=100)]
# Different size quantile list and plots
with pytest.raises(ValueError):
ax.violinplot(data, quantiles=[[0.1, 0.2], [0.5, 0.7]])
def test_violinplot_outofrange_quantiles():
ax = plt.axes()
# First 9 digits of frac(sqrt(79))
np.random.seed(888194417)
data = [np.random.normal(size=100)]
# Quantile value above 100
with pytest.raises(ValueError):
ax.violinplot(data, quantiles=[[0.1, 0.2, 0.3, 1.05]])
# Quantile value below 0
with pytest.raises(ValueError):
ax.violinplot(data, quantiles=[[-0.05, 0.2, 0.3, 0.75]])
@check_figures_equal(extensions=["png"])
def test_violinplot_single_list_quantiles(fig_test, fig_ref):
# Ensures quantile list for 1D can be passed in as single list
# First 9 digits of frac(sqrt(83))
np.random.seed(110433579)
data = [np.random.normal(size=100)]
# Test image
ax = fig_test.subplots()
ax.violinplot(data, quantiles=[0.1, 0.3, 0.9])
# Reference image
ax = fig_ref.subplots()
ax.violinplot(data, quantiles=[[0.1, 0.3, 0.9]])
@check_figures_equal(extensions=["png"])
def test_violinplot_pandas_series(fig_test, fig_ref, pd):
np.random.seed(110433579)
s1 = pd.Series(np.random.normal(size=7), index=[9, 8, 7, 6, 5, 4, 3])
s2 = pd.Series(np.random.normal(size=9), index=list('ABCDEFGHI'))
s3 = pd.Series(np.random.normal(size=11))
fig_test.subplots().violinplot([s1, s2, s3])
fig_ref.subplots().violinplot([s1.values, s2.values, s3.values])
def test_manage_xticks():
_, ax = plt.subplots()
ax.set_xlim(0, 4)
old_xlim = ax.get_xlim()
np.random.seed(0)
y1 = np.random.normal(10, 3, 20)
y2 = np.random.normal(3, 1, 20)
ax.boxplot([y1, y2], positions=[1, 2], manage_ticks=False)
new_xlim = ax.get_xlim()
assert_array_equal(old_xlim, new_xlim)
def test_boxplot_not_single():
fig, ax = plt.subplots()
ax.boxplot(np.random.rand(100), positions=[3])
ax.boxplot(np.random.rand(100), positions=[5])
fig.canvas.draw()
assert ax.get_xlim() == (2.5, 5.5)
assert list(ax.get_xticks()) == [3, 5]
assert [t.get_text() for t in ax.get_xticklabels()] == ["3", "5"]
def test_tick_space_size_0():
# allow font size to be zero, which affects ticks when there is
# no other text in the figure.
plt.plot([0, 1], [0, 1])
matplotlib.rcParams.update({'font.size': 0})
b = io.BytesIO()
plt.savefig(b, dpi=80, format='raw')
@image_comparison(['errorbar_basic', 'errorbar_mixed', 'errorbar_basic'])
def test_errorbar():
x = np.arange(0.1, 4, 0.5)
y = np.exp(-x)
yerr = 0.1 + 0.2*np.sqrt(x)
xerr = 0.1 + yerr
# First illustrate basic pyplot interface, using defaults where possible.
fig = plt.figure()
ax = fig.gca()
ax.errorbar(x, y, xerr=0.2, yerr=0.4)
ax.set_title("Simplest errorbars, 0.2 in x, 0.4 in y")
# Now switch to a more OO interface to exercise more features.
fig, axs = plt.subplots(nrows=2, ncols=2, sharex=True)
ax = axs[0, 0]
ax.errorbar(x, y, yerr=yerr, fmt='o')
ax.set_title('Vert. symmetric')
# With 4 subplots, reduce the number of axis ticks to avoid crowding.
ax.locator_params(nbins=4)
ax = axs[0, 1]
ax.errorbar(x, y, xerr=xerr, fmt='o', alpha=0.4)
ax.set_title('Hor. symmetric w/ alpha')
ax = axs[1, 0]
ax.errorbar(x, y, yerr=[yerr, 2*yerr], xerr=[xerr, 2*xerr], fmt='--o')
ax.set_title('H, V asymmetric')
ax = axs[1, 1]
ax.set_yscale('log')
# Here we have to be careful to keep all y values positive:
ylower = np.maximum(1e-2, y - yerr)
yerr_lower = y - ylower
ax.errorbar(x, y, yerr=[yerr_lower, 2*yerr], xerr=xerr,
fmt='o', ecolor='g', capthick=2)
ax.set_title('Mixed sym., log y')
fig.suptitle('Variable errorbars')
# Reuse the first testcase from above for a labeled data test
data = {"x": x, "y": y}
fig = plt.figure()
ax = fig.gca()
ax.errorbar("x", "y", xerr=0.2, yerr=0.4, data=data)
ax.set_title("Simplest errorbars, 0.2 in x, 0.4 in y")
def test_errorbar_colorcycle():
f, ax = plt.subplots()
x = np.arange(10)
y = 2*x
e1, _, _ = ax.errorbar(x, y, c=None)
e2, _, _ = ax.errorbar(x, 2*y, c=None)
ln1, = ax.plot(x, 4*y)
assert mcolors.to_rgba(e1.get_color()) == mcolors.to_rgba('C0')
assert mcolors.to_rgba(e2.get_color()) == mcolors.to_rgba('C1')
assert mcolors.to_rgba(ln1.get_color()) == mcolors.to_rgba('C2')
@check_figures_equal()
def test_errorbar_cycle_ecolor(fig_test, fig_ref):
x = np.arange(0.1, 4, 0.5)
y = [np.exp(-x+n) for n in range(4)]
axt = fig_test.subplots()
axr = fig_ref.subplots()
for yi, color in zip(y, ['C0', 'C1', 'C2', 'C3']):
axt.errorbar(x, yi, yerr=(yi * 0.25), linestyle='-',
marker='o', ecolor='black')
axr.errorbar(x, yi, yerr=(yi * 0.25), linestyle='-',
marker='o', color=color, ecolor='black')
def test_errorbar_shape():
fig = plt.figure()
ax = fig.gca()
x = np.arange(0.1, 4, 0.5)
y = np.exp(-x)
yerr1 = 0.1 + 0.2*np.sqrt(x)
yerr = np.vstack((yerr1, 2*yerr1)).T
xerr = 0.1 + yerr
with pytest.raises(ValueError):
ax.errorbar(x, y, yerr=yerr, fmt='o')
with pytest.raises(ValueError):
ax.errorbar(x, y, xerr=xerr, fmt='o')
with pytest.raises(ValueError):
ax.errorbar(x, y, yerr=yerr, xerr=xerr, fmt='o')
@image_comparison(['errorbar_limits'])
def test_errorbar_limits():
x = np.arange(0.5, 5.5, 0.5)
y = np.exp(-x)
xerr = 0.1
yerr = 0.2
ls = 'dotted'
fig, ax = plt.subplots()
# standard error bars
ax.errorbar(x, y, xerr=xerr, yerr=yerr, ls=ls, color='blue')
# including upper limits
uplims = np.zeros_like(x)
uplims[[1, 5, 9]] = True
ax.errorbar(x, y+0.5, xerr=xerr, yerr=yerr, uplims=uplims, ls=ls,
color='green')
# including lower limits
lolims = np.zeros_like(x)
lolims[[2, 4, 8]] = True
ax.errorbar(x, y+1.0, xerr=xerr, yerr=yerr, lolims=lolims, ls=ls,
color='red')
# including upper and lower limits
ax.errorbar(x, y+1.5, marker='o', ms=8, xerr=xerr, yerr=yerr,
lolims=lolims, uplims=uplims, ls=ls, color='magenta')
# including xlower and xupper limits
xerr = 0.2
yerr = np.full_like(x, 0.2)
yerr[[3, 6]] = 0.3
xlolims = lolims
xuplims = uplims
lolims = np.zeros_like(x)
uplims = np.zeros_like(x)
lolims[[6]] = True
uplims[[3]] = True
ax.errorbar(x, y+2.1, marker='o', ms=8, xerr=xerr, yerr=yerr,
xlolims=xlolims, xuplims=xuplims, uplims=uplims,
lolims=lolims, ls='none', mec='blue', capsize=0,
color='cyan')
ax.set_xlim((0, 5.5))
ax.set_title('Errorbar upper and lower limits')
def test_errobar_nonefmt():
# Check that passing 'none' as a format still plots errorbars
x = np.arange(5)
y = np.arange(5)
plotline, _, barlines = plt.errorbar(x, y, xerr=1, yerr=1, fmt='none')
assert plotline is None
for errbar in barlines:
assert np.all(errbar.get_color() == mcolors.to_rgba('C0'))
def test_errorbar_line_specific_kwargs():
# Check that passing line-specific keyword arguments will not result in
# errors.
x = np.arange(5)
y = np.arange(5)
plotline, _, _ = plt.errorbar(x, y, xerr=1, yerr=1, ls='None',
marker='s', fillstyle='full',
drawstyle='steps-mid',
dash_capstyle='round',
dash_joinstyle='miter',
solid_capstyle='butt',
solid_joinstyle='bevel')
assert plotline.get_fillstyle() == 'full'
assert plotline.get_drawstyle() == 'steps-mid'
@check_figures_equal(extensions=['png'])
def test_errorbar_with_prop_cycle(fig_test, fig_ref):
ax = fig_ref.subplots()
ax.errorbar(x=[2, 4, 10], y=[0, 1, 2], yerr=0.5,
ls='--', marker='s', mfc='k')
ax.errorbar(x=[2, 4, 10], y=[2, 3, 4], yerr=0.5, color='tab:green',
ls=':', marker='s', mfc='y')
ax.errorbar(x=[2, 4, 10], y=[4, 5, 6], yerr=0.5, fmt='tab:blue',
ls='-.', marker='o', mfc='c')
ax.set_xlim(1, 11)
_cycle = cycler(ls=['--', ':', '-.'], marker=['s', 's', 'o'],
mfc=['k', 'y', 'c'], color=['b', 'g', 'r'])
plt.rc("axes", prop_cycle=_cycle)
ax = fig_test.subplots()
ax.errorbar(x=[2, 4, 10], y=[0, 1, 2], yerr=0.5)
ax.errorbar(x=[2, 4, 10], y=[2, 3, 4], yerr=0.5, color='tab:green')
ax.errorbar(x=[2, 4, 10], y=[4, 5, 6], yerr=0.5, fmt='tab:blue')
ax.set_xlim(1, 11)
def test_errorbar_every_invalid():
x = np.linspace(0, 1, 15)
y = x * (1-x)
yerr = y/6
ax = plt.figure().subplots()
with pytest.raises(ValueError, match='not a tuple of two integers'):
ax.errorbar(x, y, yerr, errorevery=(1, 2, 3))
with pytest.raises(ValueError, match='not a tuple of two integers'):
ax.errorbar(x, y, yerr, errorevery=(1.3, 3))
with pytest.raises(ValueError, match='not a valid NumPy fancy index'):
ax.errorbar(x, y, yerr, errorevery=[False, True])
with pytest.raises(ValueError, match='not a recognized value'):
ax.errorbar(x, y, yerr, errorevery='foobar')
@check_figures_equal()
def test_errorbar_every(fig_test, fig_ref):
x = np.linspace(0, 1, 15)
y = x * (1-x)
yerr = y/6
ax_ref = fig_ref.subplots()
ax_test = fig_test.subplots()
for color, shift in zip('rgbk', [0, 0, 2, 7]):
y += .02
# Check errorevery using an explicit offset and step.
ax_test.errorbar(x, y, yerr, errorevery=(shift, 4),
capsize=4, c=color)
# Using manual errorbars
# n.b. errorbar draws the main plot at z=2.1 by default
ax_ref.plot(x, y, c=color, zorder=2.1)
ax_ref.errorbar(x[shift::4], y[shift::4], yerr[shift::4],
capsize=4, c=color, fmt='none')
# Check that markevery is propagated to line, without affecting errorbars.
ax_test.errorbar(x, y + 0.1, yerr, markevery=(1, 4), capsize=4, fmt='o')
ax_ref.plot(x[1::4], y[1::4] + 0.1, 'o', zorder=2.1)
ax_ref.errorbar(x, y + 0.1, yerr, capsize=4, fmt='none')
# Check that passing a slice to markevery/errorevery works.
ax_test.errorbar(x, y + 0.2, yerr, errorevery=slice(2, None, 3),
markevery=slice(2, None, 3),
capsize=4, c='C0', fmt='o')
ax_ref.plot(x[2::3], y[2::3] + 0.2, 'o', c='C0', zorder=2.1)
ax_ref.errorbar(x[2::3], y[2::3] + 0.2, yerr[2::3],
capsize=4, c='C0', fmt='none')
# Check that passing an iterable to markevery/errorevery works.
ax_test.errorbar(x, y + 0.2, yerr, errorevery=[False, True, False] * 5,
markevery=[False, True, False] * 5,
capsize=4, c='C1', fmt='o')
ax_ref.plot(x[1::3], y[1::3] + 0.2, 'o', c='C1', zorder=2.1)
ax_ref.errorbar(x[1::3], y[1::3] + 0.2, yerr[1::3],
capsize=4, c='C1', fmt='none')
@pytest.mark.parametrize('elinewidth', [[1, 2, 3],
np.array([1, 2, 3]),
1])
def test_errorbar_linewidth_type(elinewidth):
plt.errorbar([1, 2, 3], [1, 2, 3], yerr=[1, 2, 3], elinewidth=elinewidth)
@image_comparison(['hist_stacked_stepfilled', 'hist_stacked_stepfilled'])
def test_hist_stacked_stepfilled():
# make some data
d1 = np.linspace(1, 3, 20)
d2 = np.linspace(0, 10, 50)
fig, ax = plt.subplots()
ax.hist((d1, d2), histtype="stepfilled", stacked=True)
# Reuse testcase from above for a labeled data test
data = {"x": (d1, d2)}
fig, ax = plt.subplots()
ax.hist("x", histtype="stepfilled", stacked=True, data=data)
@image_comparison(['hist_offset'])
def test_hist_offset():
# make some data
d1 = np.linspace(0, 10, 50)
d2 = np.linspace(1, 3, 20)
fig, ax = plt.subplots()
ax.hist(d1, bottom=5)
ax.hist(d2, bottom=15)
@image_comparison(['hist_step.png'], remove_text=True)
def test_hist_step():
# make some data
d1 = np.linspace(1, 3, 20)
fig, ax = plt.subplots()
ax.hist(d1, histtype="step")
ax.set_ylim(0, 10)
ax.set_xlim(-1, 5)
@image_comparison(['hist_step_horiz.png'])
def test_hist_step_horiz():
# make some data
d1 = np.linspace(0, 10, 50)
d2 = np.linspace(1, 3, 20)
fig, ax = plt.subplots()
ax.hist((d1, d2), histtype="step", orientation="horizontal")
@image_comparison(['hist_stacked_weights'])
def test_hist_stacked_weighted():
# make some data
d1 = np.linspace(0, 10, 50)
d2 = np.linspace(1, 3, 20)
w1 = np.linspace(0.01, 3.5, 50)
w2 = np.linspace(0.05, 2., 20)
fig, ax = plt.subplots()
ax.hist((d1, d2), weights=(w1, w2), histtype="stepfilled", stacked=True)
@pytest.mark.parametrize("use_line_collection", [True, False],
ids=['w/ line collection', 'w/o line collection'])
@image_comparison(['stem.png'], style='mpl20', remove_text=True)
def test_stem(use_line_collection):
x = np.linspace(0.1, 2 * np.pi, 100)
fig, ax = plt.subplots()
# Label is a single space to force a legend to be drawn, but to avoid any
# text being drawn
ax.stem(x, np.cos(x),
linefmt='C2-.', markerfmt='k+', basefmt='C1-.', label=' ',
use_line_collection=use_line_collection)
ax.legend()
def test_stem_args():
fig, ax = plt.subplots()
x = list(range(10))
y = list(range(10))
# Test the call signatures
ax.stem(y)
ax.stem(x, y)
ax.stem(x, y, 'r--')
ax.stem(x, y, 'r--', basefmt='b--')
def test_stem_dates():
fig, ax = plt.subplots(1, 1)
xs = [dateutil.parser.parse("2013-9-28 11:00:00"),
dateutil.parser.parse("2013-9-28 12:00:00")]
ys = [100, 200]
ax.stem(xs, ys, "*-")
@pytest.mark.parametrize("use_line_collection", [True, False],
ids=['w/ line collection', 'w/o line collection'])
@image_comparison(['stem_orientation.png'], style='mpl20', remove_text=True)
def test_stem_orientation(use_line_collection):
x = np.linspace(0.1, 2*np.pi, 50)
fig, ax = plt.subplots()
ax.stem(x, np.cos(x),
linefmt='C2-.', markerfmt='kx', basefmt='C1-.',
use_line_collection=use_line_collection, orientation='horizontal')
@image_comparison(['hist_stacked_stepfilled_alpha'])
def test_hist_stacked_stepfilled_alpha():
# make some data
d1 = np.linspace(1, 3, 20)
d2 = np.linspace(0, 10, 50)
fig, ax = plt.subplots()
ax.hist((d1, d2), histtype="stepfilled", stacked=True, alpha=0.5)
@image_comparison(['hist_stacked_step'])
def test_hist_stacked_step():
# make some data
d1 = np.linspace(1, 3, 20)
d2 = np.linspace(0, 10, 50)
fig, ax = plt.subplots()
ax.hist((d1, d2), histtype="step", stacked=True)
@image_comparison(['hist_stacked_normed'])
def test_hist_stacked_density():
# make some data
d1 = np.linspace(1, 3, 20)
d2 = np.linspace(0, 10, 50)
fig, ax = plt.subplots()
ax.hist((d1, d2), stacked=True, density=True)
@image_comparison(['hist_step_bottom.png'], remove_text=True)
def test_hist_step_bottom():
# make some data
d1 = np.linspace(1, 3, 20)
fig, ax = plt.subplots()
ax.hist(d1, bottom=np.arange(10), histtype="stepfilled")
def test_hist_stepfilled_geometry():
bins = [0, 1, 2, 3]
data = [0, 0, 1, 1, 1, 2]
_, _, (polygon, ) = plt.hist(data,
bins=bins,
histtype='stepfilled')
xy = [[0, 0], [0, 2], [1, 2], [1, 3], [2, 3], [2, 1], [3, 1],
[3, 0], [2, 0], [2, 0], [1, 0], [1, 0], [0, 0]]
assert_array_equal(polygon.get_xy(), xy)
def test_hist_step_geometry():
bins = [0, 1, 2, 3]
data = [0, 0, 1, 1, 1, 2]
_, _, (polygon, ) = plt.hist(data,
bins=bins,
histtype='step')
xy = [[0, 0], [0, 2], [1, 2], [1, 3], [2, 3], [2, 1], [3, 1], [3, 0]]
assert_array_equal(polygon.get_xy(), xy)
def test_hist_stepfilled_bottom_geometry():
bins = [0, 1, 2, 3]
data = [0, 0, 1, 1, 1, 2]
_, _, (polygon, ) = plt.hist(data,
bins=bins,
bottom=[1, 2, 1.5],
histtype='stepfilled')
xy = [[0, 1], [0, 3], [1, 3], [1, 5], [2, 5], [2, 2.5], [3, 2.5],
[3, 1.5], [2, 1.5], [2, 2], [1, 2], [1, 1], [0, 1]]
assert_array_equal(polygon.get_xy(), xy)
def test_hist_step_bottom_geometry():
bins = [0, 1, 2, 3]
data = [0, 0, 1, 1, 1, 2]
_, _, (polygon, ) = plt.hist(data,
bins=bins,
bottom=[1, 2, 1.5],
histtype='step')
xy = [[0, 1], [0, 3], [1, 3], [1, 5], [2, 5], [2, 2.5], [3, 2.5], [3, 1.5]]
assert_array_equal(polygon.get_xy(), xy)
def test_hist_stacked_stepfilled_geometry():
bins = [0, 1, 2, 3]
data_1 = [0, 0, 1, 1, 1, 2]
data_2 = [0, 1, 2]
_, _, patches = plt.hist([data_1, data_2],
bins=bins,
stacked=True,
histtype='stepfilled')
assert len(patches) == 2
polygon, = patches[0]
xy = [[0, 0], [0, 2], [1, 2], [1, 3], [2, 3], [2, 1], [3, 1],
[3, 0], [2, 0], [2, 0], [1, 0], [1, 0], [0, 0]]
assert_array_equal(polygon.get_xy(), xy)
polygon, = patches[1]
xy = [[0, 2], [0, 3], [1, 3], [1, 4], [2, 4], [2, 2], [3, 2],
[3, 1], [2, 1], [2, 3], [1, 3], [1, 2], [0, 2]]
assert_array_equal(polygon.get_xy(), xy)
def test_hist_stacked_step_geometry():
bins = [0, 1, 2, 3]
data_1 = [0, 0, 1, 1, 1, 2]
data_2 = [0, 1, 2]
_, _, patches = plt.hist([data_1, data_2],
bins=bins,
stacked=True,
histtype='step')
assert len(patches) == 2
polygon, = patches[0]
xy = [[0, 0], [0, 2], [1, 2], [1, 3], [2, 3], [2, 1], [3, 1], [3, 0]]
assert_array_equal(polygon.get_xy(), xy)
polygon, = patches[1]
xy = [[0, 2], [0, 3], [1, 3], [1, 4], [2, 4], [2, 2], [3, 2], [3, 1]]
assert_array_equal(polygon.get_xy(), xy)
def test_hist_stacked_stepfilled_bottom_geometry():
bins = [0, 1, 2, 3]
data_1 = [0, 0, 1, 1, 1, 2]
data_2 = [0, 1, 2]
_, _, patches = plt.hist([data_1, data_2],
bins=bins,
stacked=True,
bottom=[1, 2, 1.5],
histtype='stepfilled')
assert len(patches) == 2
polygon, = patches[0]
xy = [[0, 1], [0, 3], [1, 3], [1, 5], [2, 5], [2, 2.5], [3, 2.5],
[3, 1.5], [2, 1.5], [2, 2], [1, 2], [1, 1], [0, 1]]
assert_array_equal(polygon.get_xy(), xy)
polygon, = patches[1]
xy = [[0, 3], [0, 4], [1, 4], [1, 6], [2, 6], [2, 3.5], [3, 3.5],
[3, 2.5], [2, 2.5], [2, 5], [1, 5], [1, 3], [0, 3]]
assert_array_equal(polygon.get_xy(), xy)
def test_hist_stacked_step_bottom_geometry():
bins = [0, 1, 2, 3]
data_1 = [0, 0, 1, 1, 1, 2]
data_2 = [0, 1, 2]
_, _, patches = plt.hist([data_1, data_2],
bins=bins,
stacked=True,
bottom=[1, 2, 1.5],
histtype='step')
assert len(patches) == 2
polygon, = patches[0]
xy = [[0, 1], [0, 3], [1, 3], [1, 5], [2, 5], [2, 2.5], [3, 2.5], [3, 1.5]]
assert_array_equal(polygon.get_xy(), xy)
polygon, = patches[1]
xy = [[0, 3], [0, 4], [1, 4], [1, 6], [2, 6], [2, 3.5], [3, 3.5], [3, 2.5]]
assert_array_equal(polygon.get_xy(), xy)
@image_comparison(['hist_stacked_bar'])
def test_hist_stacked_bar():
# make some data
d = [[100, 100, 100, 100, 200, 320, 450, 80, 20, 600, 310, 800],
[20, 23, 50, 11, 100, 420], [120, 120, 120, 140, 140, 150, 180],
[60, 60, 60, 60, 300, 300, 5, 5, 5, 5, 10, 300],
[555, 555, 555, 30, 30, 30, 30, 30, 100, 100, 100, 100, 30, 30],
[30, 30, 30, 30, 400, 400, 400, 400, 400, 400, 400, 400]]
colors = [(0.5759849696758961, 1.0, 0.0), (0.0, 1.0, 0.350624650815206),
(0.0, 1.0, 0.6549834156005998), (0.0, 0.6569064625276622, 1.0),
(0.28302699607823545, 0.0, 1.0), (0.6849123462299822, 0.0, 1.0)]
labels = ['green', 'orange', ' yellow', 'magenta', 'black']
fig, ax = plt.subplots()
ax.hist(d, bins=10, histtype='barstacked', align='mid', color=colors,
label=labels)
ax.legend(loc='upper right', bbox_to_anchor=(1.0, 1.0), ncol=1)
def test_hist_barstacked_bottom_unchanged():
b = np.array([10, 20])
plt.hist([[0, 1], [0, 1]], 2, histtype="barstacked", bottom=b)
assert b.tolist() == [10, 20]
def test_hist_emptydata():
fig, ax = plt.subplots()
ax.hist([[], range(10), range(10)], histtype="step")
def test_hist_labels():
# test singleton labels OK
fig, ax = plt.subplots()
_, _, bars = ax.hist([0, 1], label=0)
assert bars[0].get_label() == '0'
_, _, bars = ax.hist([0, 1], label=[0])
assert bars[0].get_label() == '0'
_, _, bars = ax.hist([0, 1], label=None)
assert bars[0].get_label() == '_nolegend_'
_, _, bars = ax.hist([0, 1], label='0')
assert bars[0].get_label() == '0'
_, _, bars = ax.hist([0, 1], label='00')
assert bars[0].get_label() == '00'
@image_comparison(['transparent_markers'], remove_text=True)
def test_transparent_markers():
np.random.seed(0)
data = np.random.random(50)
fig, ax = plt.subplots()
ax.plot(data, 'D', mfc='none', markersize=100)
@image_comparison(['rgba_markers'], remove_text=True)
def test_rgba_markers():
fig, axs = plt.subplots(ncols=2)
rcolors = [(1, 0, 0, 1), (1, 0, 0, 0.5)]
bcolors = [(0, 0, 1, 1), (0, 0, 1, 0.5)]
alphas = [None, 0.2]
kw = dict(ms=100, mew=20)
for i, alpha in enumerate(alphas):
for j, rcolor in enumerate(rcolors):
for k, bcolor in enumerate(bcolors):
axs[i].plot(j+1, k+1, 'o', mfc=bcolor, mec=rcolor,
alpha=alpha, **kw)
axs[i].plot(j+1, k+3, 'x', mec=rcolor, alpha=alpha, **kw)
for ax in axs:
ax.axis([-1, 4, 0, 5])
@image_comparison(['mollweide_grid'], remove_text=True)
def test_mollweide_grid():
# test that both horizontal and vertical gridlines appear on the Mollweide
# projection
fig = plt.figure()
ax = fig.add_subplot(projection='mollweide')
ax.grid()
def test_mollweide_forward_inverse_closure():
# test that the round-trip Mollweide forward->inverse transformation is an
# approximate identity
fig = plt.figure()
ax = fig.add_subplot(projection='mollweide')
# set up 1-degree grid in longitude, latitude
lon = np.linspace(-np.pi, np.pi, 360)
lat = np.linspace(-np.pi / 2.0, np.pi / 2.0, 180)
lon, lat = np.meshgrid(lon, lat)
ll = np.vstack((lon.flatten(), lat.flatten())).T
# perform forward transform
xy = ax.transProjection.transform(ll)
# perform inverse transform
ll2 = ax.transProjection.inverted().transform(xy)
# compare
np.testing.assert_array_almost_equal(ll, ll2, 3)
def test_mollweide_inverse_forward_closure():
# test that the round-trip Mollweide inverse->forward transformation is an
# approximate identity
fig = plt.figure()
ax = fig.add_subplot(projection='mollweide')
# set up grid in x, y
x = np.linspace(0, 1, 500)
x, y = np.meshgrid(x, x)
xy = np.vstack((x.flatten(), y.flatten())).T
# perform inverse transform
ll = ax.transProjection.inverted().transform(xy)
# perform forward transform
xy2 = ax.transProjection.transform(ll)
# compare
np.testing.assert_array_almost_equal(xy, xy2, 3)
@image_comparison(['test_alpha'], remove_text=True)
def test_alpha():
np.random.seed(0)
data = np.random.random(50)
fig, ax = plt.subplots()
# alpha=.5 markers, solid line
ax.plot(data, '-D', color=[1, 0, 0], mfc=[1, 0, 0, .5],
markersize=20, lw=10)
# everything solid by kwarg
ax.plot(data + 2, '-D', color=[1, 0, 0, .5], mfc=[1, 0, 0, .5],
markersize=20, lw=10,
alpha=1)
# everything alpha=.5 by kwarg
ax.plot(data + 4, '-D', color=[1, 0, 0], mfc=[1, 0, 0],
markersize=20, lw=10,
alpha=.5)
# everything alpha=.5 by colors
ax.plot(data + 6, '-D', color=[1, 0, 0, .5], mfc=[1, 0, 0, .5],
markersize=20, lw=10)
# alpha=.5 line, solid markers
ax.plot(data + 8, '-D', color=[1, 0, 0, .5], mfc=[1, 0, 0],
markersize=20, lw=10)
@image_comparison(['eventplot', 'eventplot'], remove_text=True)
def test_eventplot():
np.random.seed(0)
data1 = np.random.random([32, 20]).tolist()
data2 = np.random.random([6, 20]).tolist()
data = data1 + data2
num_datasets = len(data)
colors1 = [[0, 1, .7]] * len(data1)
colors2 = [[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[1, .75, 0],
[1, 0, 1],
[0, 1, 1]]
colors = colors1 + colors2
lineoffsets1 = 12 + np.arange(0, len(data1)) * .33
lineoffsets2 = [-15, -3, 1, 1.5, 6, 10]
lineoffsets = lineoffsets1.tolist() + lineoffsets2
linelengths1 = [.33] * len(data1)
linelengths2 = [5, 2, 1, 1, 3, 1.5]
linelengths = linelengths1 + linelengths2
fig = plt.figure()
axobj = fig.add_subplot()
colls = axobj.eventplot(data, colors=colors, lineoffsets=lineoffsets,
linelengths=linelengths)
num_collections = len(colls)
assert num_collections == num_datasets
# Reuse testcase from above for a labeled data test
data = {"pos": data, "c": colors, "lo": lineoffsets, "ll": linelengths}
fig = plt.figure()
axobj = fig.add_subplot()
colls = axobj.eventplot("pos", colors="c", lineoffsets="lo",
linelengths="ll", data=data)
num_collections = len(colls)
assert num_collections == num_datasets
@image_comparison(['test_eventplot_defaults.png'], remove_text=True)
def test_eventplot_defaults():
"""
test that eventplot produces the correct output given the default params
(see bug #3728)
"""
np.random.seed(0)
data1 = np.random.random([32, 20]).tolist()
data2 = np.random.random([6, 20]).tolist()
data = data1 + data2
fig = plt.figure()
axobj = fig.add_subplot()
axobj.eventplot(data)
@pytest.mark.parametrize(('colors'), [
('0.5',), # string color with multiple characters: not OK before #8193 fix
('tab:orange', 'tab:pink', 'tab:cyan', 'bLacK'), # case-insensitive
('red', (0, 1, 0), None, (1, 0, 1, 0.5)), # a tricky case mixing types
])
def test_eventplot_colors(colors):
"""Test the *colors* parameter of eventplot. Inspired by issue #8193."""
data = [[0], [1], [2], [3]] # 4 successive events of different nature
# Build the list of the expected colors
expected = [c if c is not None else 'C0' for c in colors]
# Convert the list into an array of RGBA values
# NB: ['rgbk'] is not a valid argument for to_rgba_array, while 'rgbk' is.
if len(expected) == 1:
expected = expected[0]
expected = np.broadcast_to(mcolors.to_rgba_array(expected), (len(data), 4))
fig, ax = plt.subplots()
if len(colors) == 1: # tuple with a single string (like '0.5' or 'rgbk')
colors = colors[0]
collections = ax.eventplot(data, colors=colors)
for coll, color in zip(collections, expected):
assert_allclose(coll.get_color(), color)
@image_comparison(['test_eventplot_problem_kwargs.png'], remove_text=True)
def test_eventplot_problem_kwargs(recwarn):
"""
test that 'singular' versions of LineCollection props raise an
IgnoredKeywordWarning rather than overriding the 'plural' versions (e.g.
to prevent 'color' from overriding 'colors', see issue #4297)
"""
np.random.seed(0)
data1 = np.random.random([20]).tolist()
data2 = np.random.random([10]).tolist()
data = [data1, data2]
fig = plt.figure()
axobj = fig.add_subplot()
axobj.eventplot(data,
colors=['r', 'b'],
color=['c', 'm'],
linewidths=[2, 1],
linewidth=[1, 2],
linestyles=['solid', 'dashed'],
linestyle=['dashdot', 'dotted'])
# check that three IgnoredKeywordWarnings were raised
assert len(recwarn) == 3
assert all(issubclass(wi.category, MatplotlibDeprecationWarning)
for wi in recwarn)
def test_empty_eventplot():
fig, ax = plt.subplots(1, 1)
ax.eventplot([[]], colors=[(0.0, 0.0, 0.0, 0.0)])
plt.draw()
@pytest.mark.parametrize('data', [[[]], [[], [0, 1]], [[0, 1], []]])
@pytest.mark.parametrize(
'orientation', ['_empty', 'vertical', 'horizontal', None, 'none'])
def test_eventplot_orientation(data, orientation):
"""Introduced when fixing issue #6412."""
opts = {} if orientation == "_empty" else {'orientation': orientation}
fig, ax = plt.subplots(1, 1)
with (pytest.warns(MatplotlibDeprecationWarning)
if orientation in [None, 'none'] else nullcontext()):
ax.eventplot(data, **opts)
plt.draw()
@image_comparison(['marker_styles.png'], remove_text=True)
def test_marker_styles():
fig, ax = plt.subplots()
for y, marker in enumerate(sorted(matplotlib.markers.MarkerStyle.markers,
key=lambda x: str(type(x))+str(x))):
ax.plot((y % 2)*5 + np.arange(10)*10, np.ones(10)*10*y, linestyle='',
marker=marker, markersize=10+y/5, label=marker)
@image_comparison(['rc_markerfill.png'])
def test_markers_fillstyle_rcparams():
fig, ax = plt.subplots()
x = np.arange(7)
for idx, (style, marker) in enumerate(
[('top', 's'), ('bottom', 'o'), ('none', '^')]):
matplotlib.rcParams['markers.fillstyle'] = style
ax.plot(x+idx, marker=marker)
@image_comparison(['vertex_markers.png'], remove_text=True)
def test_vertex_markers():
data = list(range(10))
marker_as_tuple = ((-1, -1), (1, -1), (1, 1), (-1, 1))
marker_as_list = [(-1, -1), (1, -1), (1, 1), (-1, 1)]
fig, ax = plt.subplots()
ax.plot(data, linestyle='', marker=marker_as_tuple, mfc='k')
ax.plot(data[::-1], linestyle='', marker=marker_as_list, mfc='b')
ax.set_xlim([-1, 10])
ax.set_ylim([-1, 10])
@image_comparison(['vline_hline_zorder', 'errorbar_zorder'],
tol=0 if platform.machine() == 'x86_64' else 0.02)
def test_eb_line_zorder():
x = list(range(10))
# First illustrate basic pyplot interface, using defaults where possible.
fig = plt.figure()
ax = fig.gca()
ax.plot(x, lw=10, zorder=5)
ax.axhline(1, color='red', lw=10, zorder=1)
ax.axhline(5, color='green', lw=10, zorder=10)
ax.axvline(7, color='m', lw=10, zorder=7)
ax.axvline(2, color='k', lw=10, zorder=3)
ax.set_title("axvline and axhline zorder test")
# Now switch to a more OO interface to exercise more features.
fig = plt.figure()
ax = fig.gca()
x = list(range(10))
y = np.zeros(10)
yerr = list(range(10))
ax.errorbar(x, y, yerr=yerr, zorder=5, lw=5, color='r')
for j in range(10):
ax.axhline(j, lw=5, color='k', zorder=j)
ax.axhline(-j, lw=5, color='k', zorder=j)
ax.set_title("errorbar zorder test")
@check_figures_equal()
def test_axline_loglog(fig_test, fig_ref):
ax = fig_test.subplots()
ax.set(xlim=(0.1, 10), ylim=(1e-3, 1))
ax.loglog([.3, .6], [.3, .6], ".-")
ax.axline((1, 1e-3), (10, 1e-2), c="k")
ax = fig_ref.subplots()
ax.set(xlim=(0.1, 10), ylim=(1e-3, 1))
ax.loglog([.3, .6], [.3, .6], ".-")
ax.loglog([1, 10], [1e-3, 1e-2], c="k")
@check_figures_equal()
def test_axline(fig_test, fig_ref):
ax = fig_test.subplots()
ax.set(xlim=(-1, 1), ylim=(-1, 1))
ax.axline((0, 0), (1, 1))
ax.axline((0, 0), (1, 0), color='C1')
ax.axline((0, 0.5), (1, 0.5), color='C2')
# slopes
ax.axline((-0.7, -0.5), slope=0, color='C3')
ax.axline((1, -0.5), slope=-0.5, color='C4')
ax.axline((-0.5, 1), slope=float('inf'), color='C5')
ax = fig_ref.subplots()
ax.set(xlim=(-1, 1), ylim=(-1, 1))
ax.plot([-1, 1], [-1, 1])
ax.axhline(0, color='C1')
ax.axhline(0.5, color='C2')
# slopes
ax.axhline(-0.5, color='C3')
ax.plot([-1, 1], [0.5, -0.5], color='C4')
ax.axvline(-0.5, color='C5')
@check_figures_equal()
def test_axline_transaxes(fig_test, fig_ref):
ax = fig_test.subplots()
ax.set(xlim=(-1, 1), ylim=(-1, 1))
ax.axline((0, 0), slope=1, transform=ax.transAxes)
ax.axline((1, 0.5), slope=1, color='C1', transform=ax.transAxes)
ax.axline((0.5, 0.5), slope=0, color='C2', transform=ax.transAxes)
ax.axline((0.5, 0), (0.5, 1), color='C3', transform=ax.transAxes)
ax = fig_ref.subplots()
ax.set(xlim=(-1, 1), ylim=(-1, 1))
ax.plot([-1, 1], [-1, 1])
ax.plot([0, 1], [-1, 0], color='C1')
ax.plot([-1, 1], [0, 0], color='C2')
ax.plot([0, 0], [-1, 1], color='C3')
@check_figures_equal()
def test_axline_transaxes_panzoom(fig_test, fig_ref):
# test that it is robust against pan/zoom and
# figure resize after plotting
ax = fig_test.subplots()
ax.set(xlim=(-1, 1), ylim=(-1, 1))
ax.axline((0, 0), slope=1, transform=ax.transAxes)
ax.axline((0.5, 0.5), slope=2, color='C1', transform=ax.transAxes)
ax.axline((0.5, 0.5), slope=0, color='C2', transform=ax.transAxes)
ax.set(xlim=(0, 5), ylim=(0, 10))
fig_test.set_size_inches(3, 3)
ax = fig_ref.subplots()
ax.set(xlim=(0, 5), ylim=(0, 10))
fig_ref.set_size_inches(3, 3)
ax.plot([0, 5], [0, 5])
ax.plot([0, 5], [0, 10], color='C1')
ax.plot([0, 5], [5, 5], color='C2')
def test_axline_args():
"""Exactly one of *xy2* and *slope* must be specified."""
fig, ax = plt.subplots()
with pytest.raises(TypeError):
ax.axline((0, 0)) # missing second parameter
with pytest.raises(TypeError):
ax.axline((0, 0), (1, 1), slope=1) # redundant parameters
ax.set_xscale('log')
with pytest.raises(TypeError):
ax.axline((0, 0), slope=1)
ax.set_xscale('linear')
ax.set_yscale('log')
with pytest.raises(TypeError):
ax.axline((0, 0), slope=1)
ax.set_yscale('linear')
with pytest.raises(ValueError):
ax.axline((0, 0), (0, 0)) # two identical points are not allowed
plt.draw()
@image_comparison(['vlines_basic', 'vlines_with_nan', 'vlines_masked'],
extensions=['png'])
def test_vlines():
# normal
x1 = [2, 3, 4, 5, 7]
y1 = [2, -6, 3, 8, 2]
fig1, ax1 = plt.subplots()
ax1.vlines(x1, 0, y1, colors='g', linewidth=5)
# GH #7406
x2 = [2, 3, 4, 5, 6, 7]
y2 = [2, -6, 3, 8, np.nan, 2]
fig2, (ax2, ax3, ax4) = plt.subplots(nrows=3, figsize=(4, 8))
ax2.vlines(x2, 0, y2, colors='g', linewidth=5)
x3 = [2, 3, 4, 5, 6, 7]
y3 = [np.nan, 2, -6, 3, 8, 2]
ax3.vlines(x3, 0, y3, colors='r', linewidth=3, linestyle='--')
x4 = [2, 3, 4, 5, 6, 7]
y4 = [np.nan, 2, -6, 3, 8, np.nan]
ax4.vlines(x4, 0, y4, colors='k', linewidth=2)
# tweak the x-axis so we can see the lines better
for ax in [ax1, ax2, ax3, ax4]:
ax.set_xlim(0, 10)
# check that the y-lims are all automatically the same
assert ax1.get_ylim() == ax2.get_ylim()
assert ax1.get_ylim() == ax3.get_ylim()
assert ax1.get_ylim() == ax4.get_ylim()
fig3, ax5 = plt.subplots()
x5 = np.ma.masked_equal([2, 4, 6, 8, 10, 12], 8)
ymin5 = np.ma.masked_equal([0, 1, -1, 0, 2, 1], 2)
ymax5 = np.ma.masked_equal([13, 14, 15, 16, 17, 18], 18)
ax5.vlines(x5, ymin5, ymax5, colors='k', linewidth=2)
ax5.set_xlim(0, 15)
def test_vlines_default():
fig, ax = plt.subplots()
with mpl.rc_context({'lines.color': 'red'}):
lines = ax.vlines(0.5, 0, 1)
assert mpl.colors.same_color(lines.get_color(), 'red')
@image_comparison(['hlines_basic', 'hlines_with_nan', 'hlines_masked'],
extensions=['png'])
def test_hlines():
# normal
y1 = [2, 3, 4, 5, 7]
x1 = [2, -6, 3, 8, 2]
fig1, ax1 = plt.subplots()
ax1.hlines(y1, 0, x1, colors='g', linewidth=5)
# GH #7406
y2 = [2, 3, 4, 5, 6, 7]
x2 = [2, -6, 3, 8, np.nan, 2]
fig2, (ax2, ax3, ax4) = plt.subplots(nrows=3, figsize=(4, 8))
ax2.hlines(y2, 0, x2, colors='g', linewidth=5)
y3 = [2, 3, 4, 5, 6, 7]
x3 = [np.nan, 2, -6, 3, 8, 2]
ax3.hlines(y3, 0, x3, colors='r', linewidth=3, linestyle='--')
y4 = [2, 3, 4, 5, 6, 7]
x4 = [np.nan, 2, -6, 3, 8, np.nan]
ax4.hlines(y4, 0, x4, colors='k', linewidth=2)
# tweak the y-axis so we can see the lines better
for ax in [ax1, ax2, ax3, ax4]:
ax.set_ylim(0, 10)
# check that the x-lims are all automatically the same
assert ax1.get_xlim() == ax2.get_xlim()
assert ax1.get_xlim() == ax3.get_xlim()
assert ax1.get_xlim() == ax4.get_xlim()
fig3, ax5 = plt.subplots()
y5 = np.ma.masked_equal([2, 4, 6, 8, 10, 12], 8)
xmin5 = np.ma.masked_equal([0, 1, -1, 0, 2, 1], 2)
xmax5 = np.ma.masked_equal([13, 14, 15, 16, 17, 18], 18)
ax5.hlines(y5, xmin5, xmax5, colors='k', linewidth=2)
ax5.set_ylim(0, 15)
def test_hlines_default():
fig, ax = plt.subplots()
with mpl.rc_context({'lines.color': 'red'}):
lines = ax.hlines(0.5, 0, 1)
assert mpl.colors.same_color(lines.get_color(), 'red')
@pytest.mark.parametrize('data', [[1, 2, 3, np.nan, 5],
np.ma.masked_equal([1, 2, 3, 4, 5], 4)])
@check_figures_equal(extensions=["png"])
def test_lines_with_colors(fig_test, fig_ref, data):
test_colors = ['red', 'green', 'blue', 'purple', 'orange']
fig_test.add_subplot(2, 1, 1).vlines(data, 0, 1,
colors=test_colors, linewidth=5)
fig_test.add_subplot(2, 1, 2).hlines(data, 0, 1,
colors=test_colors, linewidth=5)
expect_xy = [1, 2, 3, 5]
expect_color = ['red', 'green', 'blue', 'orange']
fig_ref.add_subplot(2, 1, 1).vlines(expect_xy, 0, 1,
colors=expect_color, linewidth=5)
fig_ref.add_subplot(2, 1, 2).hlines(expect_xy, 0, 1,
colors=expect_color, linewidth=5)
@image_comparison(['step_linestyle', 'step_linestyle'], remove_text=True)
def test_step_linestyle():
x = y = np.arange(10)
# First illustrate basic pyplot interface, using defaults where possible.
fig, ax_lst = plt.subplots(2, 2)
ax_lst = ax_lst.flatten()
ln_styles = ['-', '--', '-.', ':']
for ax, ls in zip(ax_lst, ln_styles):
ax.step(x, y, lw=5, linestyle=ls, where='pre')
ax.step(x, y + 1, lw=5, linestyle=ls, where='mid')
ax.step(x, y + 2, lw=5, linestyle=ls, where='post')
ax.set_xlim([-1, 5])
ax.set_ylim([-1, 7])
# Reuse testcase from above for a labeled data test
data = {"X": x, "Y0": y, "Y1": y+1, "Y2": y+2}
fig, ax_lst = plt.subplots(2, 2)
ax_lst = ax_lst.flatten()
ln_styles = ['-', '--', '-.', ':']
for ax, ls in zip(ax_lst, ln_styles):
ax.step("X", "Y0", lw=5, linestyle=ls, where='pre', data=data)
ax.step("X", "Y1", lw=5, linestyle=ls, where='mid', data=data)
ax.step("X", "Y2", lw=5, linestyle=ls, where='post', data=data)
ax.set_xlim([-1, 5])
ax.set_ylim([-1, 7])
@image_comparison(['mixed_collection'], remove_text=True)
def test_mixed_collection():
# First illustrate basic pyplot interface, using defaults where possible.
fig, ax = plt.subplots()
c = mpatches.Circle((8, 8), radius=4, facecolor='none', edgecolor='green')
# PDF can optimize this one
p1 = mpl.collections.PatchCollection([c], match_original=True)
p1.set_offsets([[0, 0], [24, 24]])
p1.set_linewidths([1, 5])
# PDF can't optimize this one, because the alpha of the edge changes
p2 = mpl.collections.PatchCollection([c], match_original=True)
p2.set_offsets([[48, 0], [-32, -16]])
p2.set_linewidths([1, 5])
p2.set_edgecolors([[0, 0, 0.1, 1.0], [0, 0, 0.1, 0.5]])
ax.patch.set_color('0.5')
ax.add_collection(p1)
ax.add_collection(p2)
ax.set_xlim(0, 16)
ax.set_ylim(0, 16)
def test_subplot_key_hash():
ax = plt.subplot(np.int32(5), np.int64(1), 1)
ax.twinx()
assert ax.get_subplotspec().get_geometry() == (5, 1, 0, 0)
@image_comparison(
["specgram_freqs.png", "specgram_freqs_linear.png",
"specgram_noise.png", "specgram_noise_linear.png"],
remove_text=True, tol=0.07, style="default")
def test_specgram():
"""Test axes.specgram in default (psd) mode."""
# use former defaults to match existing baseline image
matplotlib.rcParams['image.interpolation'] = 'nearest'
n = 1000
Fs = 10.
fstims = [[Fs/4, Fs/5, Fs/11], [Fs/4.7, Fs/5.6, Fs/11.9]]
NFFT_freqs = int(10 * Fs / np.min(fstims))
x = np.arange(0, n, 1/Fs)
y_freqs = np.concatenate(
np.sin(2 * np.pi * np.multiply.outer(fstims, x)).sum(axis=1))
NFFT_noise = int(10 * Fs / 11)
np.random.seed(0)
y_noise = np.concatenate([np.random.standard_normal(n), np.random.rand(n)])
all_sides = ["default", "onesided", "twosided"]
for y, NFFT in [(y_freqs, NFFT_freqs), (y_noise, NFFT_noise)]:
noverlap = NFFT // 2
pad_to = int(2 ** np.ceil(np.log2(NFFT)))
for ax, sides in zip(plt.figure().subplots(3), all_sides):
ax.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides=sides)
for ax, sides in zip(plt.figure().subplots(3), all_sides):
ax.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides=sides,
scale="linear", norm=matplotlib.colors.LogNorm())
@image_comparison(
["specgram_magnitude_freqs.png", "specgram_magnitude_freqs_linear.png",
"specgram_magnitude_noise.png", "specgram_magnitude_noise_linear.png"],
remove_text=True, tol=0.07, style="default")
def test_specgram_magnitude():
"""Test axes.specgram in magnitude mode."""
# use former defaults to match existing baseline image
matplotlib.rcParams['image.interpolation'] = 'nearest'
n = 1000
Fs = 10.
fstims = [[Fs/4, Fs/5, Fs/11], [Fs/4.7, Fs/5.6, Fs/11.9]]
NFFT_freqs = int(100 * Fs / np.min(fstims))
x = np.arange(0, n, 1/Fs)
y = np.sin(2 * np.pi * np.multiply.outer(fstims, x)).sum(axis=1)
y[:, -1] = 1
y_freqs = np.hstack(y)
NFFT_noise = int(10 * Fs / 11)
np.random.seed(0)
y_noise = np.concatenate([np.random.standard_normal(n), np.random.rand(n)])
all_sides = ["default", "onesided", "twosided"]
for y, NFFT in [(y_freqs, NFFT_freqs), (y_noise, NFFT_noise)]:
noverlap = NFFT // 2
pad_to = int(2 ** np.ceil(np.log2(NFFT)))
for ax, sides in zip(plt.figure().subplots(3), all_sides):
ax.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides=sides, mode="magnitude")
for ax, sides in zip(plt.figure().subplots(3), all_sides):
ax.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides=sides, mode="magnitude",
scale="linear", norm=matplotlib.colors.LogNorm())
@image_comparison(
["specgram_angle_freqs.png", "specgram_phase_freqs.png",
"specgram_angle_noise.png", "specgram_phase_noise.png"],
remove_text=True, tol=0.07, style="default")
def test_specgram_angle():
"""Test axes.specgram in angle and phase modes."""
# use former defaults to match existing baseline image
matplotlib.rcParams['image.interpolation'] = 'nearest'
n = 1000
Fs = 10.
fstims = [[Fs/4, Fs/5, Fs/11], [Fs/4.7, Fs/5.6, Fs/11.9]]
NFFT_freqs = int(10 * Fs / np.min(fstims))
x = np.arange(0, n, 1/Fs)
y = np.sin(2 * np.pi * np.multiply.outer(fstims, x)).sum(axis=1)
y[:, -1] = 1
y_freqs = np.hstack(y)
NFFT_noise = int(10 * Fs / 11)
np.random.seed(0)
y_noise = np.concatenate([np.random.standard_normal(n), np.random.rand(n)])
all_sides = ["default", "onesided", "twosided"]
for y, NFFT in [(y_freqs, NFFT_freqs), (y_noise, NFFT_noise)]:
noverlap = NFFT // 2
pad_to = int(2 ** np.ceil(np.log2(NFFT)))
for mode in ["angle", "phase"]:
for ax, sides in zip(plt.figure().subplots(3), all_sides):
ax.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides=sides, mode=mode)
with pytest.raises(ValueError):
ax.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides=sides, mode=mode,
scale="dB")
def test_specgram_fs_none():
"""Test axes.specgram when Fs is None, should not throw error."""
spec, freqs, t, im = plt.specgram(np.ones(300), Fs=None, scale='linear')
xmin, xmax, freq0, freq1 = im.get_extent()
assert xmin == 32 and xmax == 96
@check_figures_equal(extensions=["png"])
def test_specgram_origin_rcparam(fig_test, fig_ref):
"""Test specgram ignores image.origin rcParam and uses origin 'upper'."""
t = np.arange(500)
signal = np.sin(t)
plt.rcParams["image.origin"] = 'upper'
# Reference: First graph using default origin in imshow (upper),
fig_ref.subplots().specgram(signal)
# Try to overwrite the setting trying to flip the specgram
plt.rcParams["image.origin"] = 'lower'
# Test: origin='lower' should be ignored
fig_test.subplots().specgram(signal)
def test_specgram_origin_kwarg():
"""Ensure passing origin as a kwarg raises a TypeError."""
t = np.arange(500)
signal = np.sin(t)
with pytest.raises(TypeError):
plt.specgram(signal, origin='lower')
@image_comparison(
["psd_freqs.png", "csd_freqs.png", "psd_noise.png", "csd_noise.png"],
remove_text=True, tol=0.002)
def test_psd_csd():
n = 10000
Fs = 100.
fstims = [[Fs/4, Fs/5, Fs/11], [Fs/4.7, Fs/5.6, Fs/11.9]]
NFFT_freqs = int(1000 * Fs / np.min(fstims))
x = np.arange(0, n, 1/Fs)
ys_freqs = np.sin(2 * np.pi * np.multiply.outer(fstims, x)).sum(axis=1)
NFFT_noise = int(1000 * Fs / 11)
np.random.seed(0)
ys_noise = [np.random.standard_normal(n), np.random.rand(n)]
all_kwargs = [{"sides": "default"},
{"sides": "onesided", "return_line": False},
{"sides": "twosided", "return_line": True}]
for ys, NFFT in [(ys_freqs, NFFT_freqs), (ys_noise, NFFT_noise)]:
noverlap = NFFT // 2
pad_to = int(2 ** np.ceil(np.log2(NFFT)))
for ax, kwargs in zip(plt.figure().subplots(3), all_kwargs):
ret = ax.psd(np.concatenate(ys), NFFT=NFFT, Fs=Fs,
noverlap=noverlap, pad_to=pad_to, **kwargs)
assert len(ret) == 2 + kwargs.get("return_line", False)
ax.set(xlabel="", ylabel="")
for ax, kwargs in zip(plt.figure().subplots(3), all_kwargs):
ret = ax.csd(*ys, NFFT=NFFT, Fs=Fs,
noverlap=noverlap, pad_to=pad_to, **kwargs)
assert len(ret) == 2 + kwargs.get("return_line", False)
ax.set(xlabel="", ylabel="")
@image_comparison(
["magnitude_spectrum_freqs_linear.png",
"magnitude_spectrum_freqs_dB.png",
"angle_spectrum_freqs.png",
"phase_spectrum_freqs.png",
"magnitude_spectrum_noise_linear.png",
"magnitude_spectrum_noise_dB.png",
"angle_spectrum_noise.png",
"phase_spectrum_noise.png"],
remove_text=True)
def test_spectrum():
n = 10000
Fs = 100.
fstims1 = [Fs/4, Fs/5, Fs/11]
NFFT = int(1000 * Fs / min(fstims1))
pad_to = int(2 ** np.ceil(np.log2(NFFT)))
x = np.arange(0, n, 1/Fs)
y_freqs = ((np.sin(2 * np.pi * np.outer(x, fstims1)) * 10**np.arange(3))
.sum(axis=1))
np.random.seed(0)
y_noise = np.hstack([np.random.standard_normal(n), np.random.rand(n)]) - .5
all_sides = ["default", "onesided", "twosided"]
kwargs = {"Fs": Fs, "pad_to": pad_to}
for y in [y_freqs, y_noise]:
for ax, sides in zip(plt.figure().subplots(3), all_sides):
spec, freqs, line = ax.magnitude_spectrum(y, sides=sides, **kwargs)
ax.set(xlabel="", ylabel="")
for ax, sides in zip(plt.figure().subplots(3), all_sides):
spec, freqs, line = ax.magnitude_spectrum(y, sides=sides, **kwargs,
scale="dB")
ax.set(xlabel="", ylabel="")
for ax, sides in zip(plt.figure().subplots(3), all_sides):
spec, freqs, line = ax.angle_spectrum(y, sides=sides, **kwargs)
ax.set(xlabel="", ylabel="")
for ax, sides in zip(plt.figure().subplots(3), all_sides):
spec, freqs, line = ax.phase_spectrum(y, sides=sides, **kwargs)
ax.set(xlabel="", ylabel="")
@image_comparison(['twin_spines.png'], remove_text=True)
def test_twin_spines():
def make_patch_spines_invisible(ax):
ax.set_frame_on(True)
ax.patch.set_visible(False)
ax.spines[:].set_visible(False)
fig = plt.figure(figsize=(4, 3))
fig.subplots_adjust(right=0.75)
host = fig.add_subplot()
par1 = host.twinx()
par2 = host.twinx()
# Offset the right spine of par2. The ticks and label have already been
# placed on the right by twinx above.
par2.spines.right.set_position(("axes", 1.2))
# Having been created by twinx, par2 has its frame off, so the line of
# its detached spine is invisible. First, activate the frame but make
# the patch and spines invisible.
make_patch_spines_invisible(par2)
# Second, show the right spine.
par2.spines.right.set_visible(True)
p1, = host.plot([0, 1, 2], [0, 1, 2], "b-")
p2, = par1.plot([0, 1, 2], [0, 3, 2], "r-")
p3, = par2.plot([0, 1, 2], [50, 30, 15], "g-")
host.set_xlim(0, 2)
host.set_ylim(0, 2)
par1.set_ylim(0, 4)
par2.set_ylim(1, 65)
host.yaxis.label.set_color(p1.get_color())
par1.yaxis.label.set_color(p2.get_color())
par2.yaxis.label.set_color(p3.get_color())
tkw = dict(size=4, width=1.5)
host.tick_params(axis='y', colors=p1.get_color(), **tkw)
par1.tick_params(axis='y', colors=p2.get_color(), **tkw)
par2.tick_params(axis='y', colors=p3.get_color(), **tkw)
host.tick_params(axis='x', **tkw)
@image_comparison(['twin_spines_on_top.png', 'twin_spines_on_top.png'],
remove_text=True)
def test_twin_spines_on_top():
matplotlib.rcParams['axes.linewidth'] = 48.0
matplotlib.rcParams['lines.linewidth'] = 48.0
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1)
data = np.array([[1000, 1100, 1200, 1250],
[310, 301, 360, 400]])
ax2 = ax1.twinx()
ax1.plot(data[0], data[1]/1E3, color='#BEAED4')
ax1.fill_between(data[0], data[1]/1E3, color='#BEAED4', alpha=.8)
ax2.plot(data[0], data[1]/1E3, color='#7FC97F')
ax2.fill_between(data[0], data[1]/1E3, color='#7FC97F', alpha=.5)
# Reuse testcase from above for a labeled data test
data = {"i": data[0], "j": data[1]/1E3}
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1)
ax2 = ax1.twinx()
ax1.plot("i", "j", color='#BEAED4', data=data)
ax1.fill_between("i", "j", color='#BEAED4', alpha=.8, data=data)
ax2.plot("i", "j", color='#7FC97F', data=data)
ax2.fill_between("i", "j", color='#7FC97F', alpha=.5, data=data)
@pytest.mark.parametrize("grid_which, major_visible, minor_visible", [
("both", True, True),
("major", True, False),
("minor", False, True),
])
def test_rcparam_grid_minor(grid_which, major_visible, minor_visible):
mpl.rcParams.update({"axes.grid": True, "axes.grid.which": grid_which})
fig, ax = plt.subplots()
fig.canvas.draw()
assert all(tick.gridline.get_visible() == major_visible
for tick in ax.xaxis.majorTicks)
assert all(tick.gridline.get_visible() == minor_visible
for tick in ax.xaxis.minorTicks)
def test_grid():
fig, ax = plt.subplots()
ax.grid()
fig.canvas.draw()
assert ax.xaxis.majorTicks[0].gridline.get_visible()
ax.grid(visible=False)
fig.canvas.draw()
assert not ax.xaxis.majorTicks[0].gridline.get_visible()
ax.grid(visible=True)
fig.canvas.draw()
assert ax.xaxis.majorTicks[0].gridline.get_visible()
ax.grid()
fig.canvas.draw()
assert not ax.xaxis.majorTicks[0].gridline.get_visible()
def test_reset_grid():
fig, ax = plt.subplots()
ax.tick_params(reset=True, which='major', labelsize=10)
assert not ax.xaxis.majorTicks[0].gridline.get_visible()
ax.grid(color='red') # enables grid
assert ax.xaxis.majorTicks[0].gridline.get_visible()
with plt.rc_context({'axes.grid': True}):
ax.clear()
ax.tick_params(reset=True, which='major', labelsize=10)
assert ax.xaxis.majorTicks[0].gridline.get_visible()
def test_vline_limit():
fig = plt.figure()
ax = fig.gca()
ax.axvline(0.5)
ax.plot([-0.1, 0, 0.2, 0.1])
assert_allclose(ax.get_ylim(), (-.1, .2))
@pytest.mark.parametrize('fv, fh, args', [[plt.axvline, plt.axhline, (1,)],
[plt.axvspan, plt.axhspan, (1, 1)]])
def test_axline_minmax(fv, fh, args):
bad_lim = matplotlib.dates.num2date(1)
# Check vertical functions
with pytest.raises(ValueError, match='ymin must be a single scalar value'):
fv(*args, ymin=bad_lim, ymax=1)
with pytest.raises(ValueError, match='ymax must be a single scalar value'):
fv(*args, ymin=1, ymax=bad_lim)
# Check horizontal functions
with pytest.raises(ValueError, match='xmin must be a single scalar value'):
fh(*args, xmin=bad_lim, xmax=1)
with pytest.raises(ValueError, match='xmax must be a single scalar value'):
fh(*args, xmin=1, xmax=bad_lim)
def test_empty_shared_subplots():
# empty plots with shared axes inherit limits from populated plots
fig, axs = plt.subplots(nrows=1, ncols=2, sharex=True, sharey=True)
axs[0].plot([1, 2, 3], [2, 4, 6])
x0, x1 = axs[1].get_xlim()
y0, y1 = axs[1].get_ylim()
assert x0 <= 1
assert x1 >= 3
assert y0 <= 2
assert y1 >= 6
def test_shared_with_aspect_1():
# allow sharing one axis
for adjustable in ['box', 'datalim']:
fig, axs = plt.subplots(nrows=2, sharex=True)
axs[0].set_aspect(2, adjustable=adjustable, share=True)
assert axs[1].get_aspect() == 2
assert axs[1].get_adjustable() == adjustable
fig, axs = plt.subplots(nrows=2, sharex=True)
axs[0].set_aspect(2, adjustable=adjustable)
assert axs[1].get_aspect() == 'auto'
def test_shared_with_aspect_2():
# Share 2 axes only with 'box':
fig, axs = plt.subplots(nrows=2, sharex=True, sharey=True)
axs[0].set_aspect(2, share=True)
axs[0].plot([1, 2], [3, 4])
axs[1].plot([3, 4], [1, 2])
plt.draw() # Trigger apply_aspect().
assert axs[0].get_xlim() == axs[1].get_xlim()
assert axs[0].get_ylim() == axs[1].get_ylim()
def test_shared_with_aspect_3():
# Different aspect ratios:
for adjustable in ['box', 'datalim']:
fig, axs = plt.subplots(nrows=2, sharey=True)
axs[0].set_aspect(2, adjustable=adjustable)
axs[1].set_aspect(0.5, adjustable=adjustable)
axs[0].plot([1, 2], [3, 4])
axs[1].plot([3, 4], [1, 2])
plt.draw() # Trigger apply_aspect().
assert axs[0].get_xlim() != axs[1].get_xlim()
assert axs[0].get_ylim() == axs[1].get_ylim()
fig_aspect = fig.bbox_inches.height / fig.bbox_inches.width
for ax in axs:
p = ax.get_position()
box_aspect = p.height / p.width
lim_aspect = ax.viewLim.height / ax.viewLim.width
expected = fig_aspect * box_aspect / lim_aspect
assert round(expected, 4) == round(ax.get_aspect(), 4)
@pytest.mark.parametrize('twin', ('x', 'y'))
def test_twin_with_aspect(twin):
fig, ax = plt.subplots()
# test twinx or twiny
ax_twin = getattr(ax, 'twin{}'.format(twin))()
ax.set_aspect(5)
ax_twin.set_aspect(2)
assert_array_equal(ax.bbox.extents,
ax_twin.bbox.extents)
def test_relim_visible_only():
x1 = (0., 10.)
y1 = (0., 10.)
x2 = (-10., 20.)
y2 = (-10., 30.)
fig = matplotlib.figure.Figure()
ax = fig.add_subplot()
ax.plot(x1, y1)
assert ax.get_xlim() == x1
assert ax.get_ylim() == y1
line, = ax.plot(x2, y2)
assert ax.get_xlim() == x2
assert ax.get_ylim() == y2
line.set_visible(False)
assert ax.get_xlim() == x2
assert ax.get_ylim() == y2
ax.relim(visible_only=True)
ax.autoscale_view()
assert ax.get_xlim() == x1
assert ax.get_ylim() == y1
def test_text_labelsize():
"""
tests for issue #1172
"""
fig = plt.figure()
ax = fig.gca()
ax.tick_params(labelsize='large')
ax.tick_params(direction='out')
@image_comparison(['pie_default.png'])
def test_pie_default():
# The slices will be ordered and plotted counter-clockwise.
labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'
sizes = [15, 30, 45, 10]
colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']
explode = (0, 0.1, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
fig1, ax1 = plt.subplots(figsize=(8, 6))
ax1.pie(sizes, explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=90)
@image_comparison(['pie_linewidth_0', 'pie_linewidth_0', 'pie_linewidth_0'],
extensions=['png'])
def test_pie_linewidth_0():
# The slices will be ordered and plotted counter-clockwise.
labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'
sizes = [15, 30, 45, 10]
colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']
explode = (0, 0.1, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
plt.pie(sizes, explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=90,
wedgeprops={'linewidth': 0})
# Set aspect ratio to be equal so that pie is drawn as a circle.
plt.axis('equal')
# Reuse testcase from above for a labeled data test
data = {"l": labels, "s": sizes, "c": colors, "ex": explode}
fig = plt.figure()
ax = fig.gca()
ax.pie("s", explode="ex", labels="l", colors="c",
autopct='%1.1f%%', shadow=True, startangle=90,
wedgeprops={'linewidth': 0}, data=data)
ax.axis('equal')
# And again to test the pyplot functions which should also be able to be
# called with a data kwarg
plt.figure()
plt.pie("s", explode="ex", labels="l", colors="c",
autopct='%1.1f%%', shadow=True, startangle=90,
wedgeprops={'linewidth': 0}, data=data)
plt.axis('equal')
@image_comparison(['pie_center_radius.png'])
def test_pie_center_radius():
# The slices will be ordered and plotted counter-clockwise.
labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'
sizes = [15, 30, 45, 10]
colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']
explode = (0, 0.1, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
plt.pie(sizes, explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=90,
wedgeprops={'linewidth': 0}, center=(1, 2), radius=1.5)
plt.annotate("Center point", xy=(1, 2), xytext=(1, 1.3),
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3"),
bbox=dict(boxstyle="square", facecolor="lightgrey"))
# Set aspect ratio to be equal so that pie is drawn as a circle.
plt.axis('equal')
@image_comparison(['pie_linewidth_2.png'])
def test_pie_linewidth_2():
# The slices will be ordered and plotted counter-clockwise.
labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'
sizes = [15, 30, 45, 10]
colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']
explode = (0, 0.1, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
plt.pie(sizes, explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=90,
wedgeprops={'linewidth': 2})
# Set aspect ratio to be equal so that pie is drawn as a circle.
plt.axis('equal')
@image_comparison(['pie_ccw_true.png'])
def test_pie_ccw_true():
# The slices will be ordered and plotted counter-clockwise.
labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'
sizes = [15, 30, 45, 10]
colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']
explode = (0, 0.1, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
plt.pie(sizes, explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=90,
counterclock=True)
# Set aspect ratio to be equal so that pie is drawn as a circle.
plt.axis('equal')
@image_comparison(['pie_frame_grid.png'])
def test_pie_frame_grid():
# The slices will be ordered and plotted counter-clockwise.
labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'
sizes = [15, 30, 45, 10]
colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']
# only "explode" the 2nd slice (i.e. 'Hogs')
explode = (0, 0.1, 0, 0)
plt.pie(sizes, explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=90,
wedgeprops={'linewidth': 0},
frame=True, center=(2, 2))
plt.pie(sizes[::-1], explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=90,
wedgeprops={'linewidth': 0},
frame=True, center=(5, 2))
plt.pie(sizes, explode=explode[::-1], labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=90,
wedgeprops={'linewidth': 0},
frame=True, center=(3, 5))
# Set aspect ratio to be equal so that pie is drawn as a circle.
plt.axis('equal')
@image_comparison(['pie_rotatelabels_true.png'])
def test_pie_rotatelabels_true():
# The slices will be ordered and plotted counter-clockwise.
labels = 'Hogwarts', 'Frogs', 'Dogs', 'Logs'
sizes = [15, 30, 45, 10]
colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']
explode = (0, 0.1, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
plt.pie(sizes, explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=90,
rotatelabels=True)
# Set aspect ratio to be equal so that pie is drawn as a circle.
plt.axis('equal')
@image_comparison(['pie_no_label.png'])
def test_pie_nolabel_but_legend():
labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'
sizes = [15, 30, 45, 10]
colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']
explode = (0, 0.1, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
plt.pie(sizes, explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=90, labeldistance=None,
rotatelabels=True)
plt.axis('equal')
plt.ylim(-1.2, 1.2)
plt.legend()
def test_pie_textprops():
data = [23, 34, 45]
labels = ["Long name 1", "Long name 2", "Long name 3"]
textprops = dict(horizontalalignment="center",
verticalalignment="top",
rotation=90,
rotation_mode="anchor",
size=12, color="red")
_, texts, autopct = plt.gca().pie(data, labels=labels, autopct='%.2f',
textprops=textprops)
for labels in [texts, autopct]:
for tx in labels:
assert tx.get_ha() == textprops["horizontalalignment"]
assert tx.get_va() == textprops["verticalalignment"]
assert tx.get_rotation() == textprops["rotation"]
assert tx.get_rotation_mode() == textprops["rotation_mode"]
assert tx.get_size() == textprops["size"]
assert tx.get_color() == textprops["color"]
def test_pie_get_negative_values():
# Test the ValueError raised when feeding negative values into axes.pie
fig, ax = plt.subplots()
with pytest.raises(ValueError):
ax.pie([5, 5, -3], explode=[0, .1, .2])
def test_normalize_kwarg_warn_pie():
fig, ax = plt.subplots()
with pytest.warns(MatplotlibDeprecationWarning):
ax.pie(x=[0], normalize=None)
def test_normalize_kwarg_pie():
fig, ax = plt.subplots()
x = [0.3, 0.3, 0.1]
t1 = ax.pie(x=x, normalize=True)
assert abs(t1[0][-1].theta2 - 360.) < 1e-3
t2 = ax.pie(x=x, normalize=False)
assert abs(t2[0][-1].theta2 - 360.) > 1e-3
@image_comparison(['set_get_ticklabels.png'])
def test_set_get_ticklabels():
# test issue 2246
fig, ax = plt.subplots(2)
ha = ['normal', 'set_x/yticklabels']
ax[0].plot(np.arange(10))
ax[0].set_title(ha[0])
ax[1].plot(np.arange(10))
ax[1].set_title(ha[1])
# set ticklabel to 1 plot in normal way
ax[0].set_xticks(range(10))
ax[0].set_yticks(range(10))
ax[0].set_xticklabels(['a', 'b', 'c', 'd'] + 6 * [''])
ax[0].set_yticklabels(['11', '12', '13', '14'] + 6 * [''])
# set ticklabel to the other plot, expect the 2 plots have same label
# setting pass get_ticklabels return value as ticklabels argument
ax[1].set_xticks(ax[0].get_xticks())
ax[1].set_yticks(ax[0].get_yticks())
ax[1].set_xticklabels(ax[0].get_xticklabels())
ax[1].set_yticklabels(ax[0].get_yticklabels())
def test_subsampled_ticklabels():
# test issue 11937
fig, ax = plt.subplots()
ax.plot(np.arange(10))
ax.xaxis.set_ticks(np.arange(10) + 0.1)
ax.locator_params(nbins=5)
ax.xaxis.set_ticklabels([c for c in "bcdefghijk"])
plt.draw()
labels = [t.get_text() for t in ax.xaxis.get_ticklabels()]
assert labels == ['b', 'd', 'f', 'h', 'j']
def test_mismatched_ticklabels():
fig, ax = plt.subplots()
ax.plot(np.arange(10))
ax.xaxis.set_ticks([1.5, 2.5])
with pytest.raises(ValueError):
ax.xaxis.set_ticklabels(['a', 'b', 'c'])
def test_empty_ticks_fixed_loc():
# Smoke test that [] can be used to unset all tick labels
fig, ax = plt.subplots()
ax.bar([1, 2], [1, 2])
ax.set_xticks([1, 2])
ax.set_xticklabels([])
@image_comparison(['retain_tick_visibility.png'])
def test_retain_tick_visibility():
fig, ax = plt.subplots()
plt.plot([0, 1, 2], [0, -1, 4])
plt.setp(ax.get_yticklabels(), visible=False)
ax.tick_params(axis="y", which="both", length=0)
def test_tick_label_update():
# test issue 9397
fig, ax = plt.subplots()
# Set up a dummy formatter
def formatter_func(x, pos):
return "unit value" if x == 1 else ""
ax.xaxis.set_major_formatter(plt.FuncFormatter(formatter_func))
# Force some of the x-axis ticks to be outside of the drawn range
ax.set_xticks([-1, 0, 1, 2, 3])
ax.set_xlim(-0.5, 2.5)
ax.figure.canvas.draw()
tick_texts = [tick.get_text() for tick in ax.xaxis.get_ticklabels()]
assert tick_texts == ["", "", "unit value", "", ""]
@image_comparison(['o_marker_path_snap.png'], savefig_kwarg={'dpi': 72})
def test_o_marker_path_snap():
fig, ax = plt.subplots()
ax.margins(.1)
for ms in range(1, 15):
ax.plot([1, 2, ], np.ones(2) + ms, 'o', ms=ms)
for ms in np.linspace(1, 10, 25):
ax.plot([3, 4, ], np.ones(2) + ms, 'o', ms=ms)
def test_margins():
# test all ways margins can be called
data = [1, 10]
xmin = 0.0
xmax = len(data) - 1.0
ymin = min(data)
ymax = max(data)
fig1, ax1 = plt.subplots(1, 1)
ax1.plot(data)
ax1.margins(1)
assert ax1.margins() == (1, 1)
assert ax1.get_xlim() == (xmin - (xmax - xmin) * 1,
xmax + (xmax - xmin) * 1)
assert ax1.get_ylim() == (ymin - (ymax - ymin) * 1,
ymax + (ymax - ymin) * 1)
fig2, ax2 = plt.subplots(1, 1)
ax2.plot(data)
ax2.margins(0.5, 2)
assert ax2.margins() == (0.5, 2)
assert ax2.get_xlim() == (xmin - (xmax - xmin) * 0.5,
xmax + (xmax - xmin) * 0.5)
assert ax2.get_ylim() == (ymin - (ymax - ymin) * 2,
ymax + (ymax - ymin) * 2)
fig3, ax3 = plt.subplots(1, 1)
ax3.plot(data)
ax3.margins(x=-0.2, y=0.5)
assert ax3.margins() == (-0.2, 0.5)
assert ax3.get_xlim() == (xmin - (xmax - xmin) * -0.2,
xmax + (xmax - xmin) * -0.2)
assert ax3.get_ylim() == (ymin - (ymax - ymin) * 0.5,
ymax + (ymax - ymin) * 0.5)
def test_set_margin_updates_limits():
mpl.style.use("default")
fig, ax = plt.subplots()
ax.plot([1, 2], [1, 2])
ax.set(xscale="log", xmargin=0)
assert ax.get_xlim() == (1, 2)
def test_length_one_hist():
fig, ax = plt.subplots()
ax.hist(1)
ax.hist([1])
def test_pathological_hexbin():
# issue #2863
mylist = [10] * 100
fig, ax = plt.subplots(1, 1)
ax.hexbin(mylist, mylist)
fig.savefig(io.BytesIO()) # Check that no warning is emitted.
def test_color_None():
# issue 3855
fig, ax = plt.subplots()
ax.plot([1, 2], [1, 2], color=None)
def test_color_alias():
# issues 4157 and 4162
fig, ax = plt.subplots()
line = ax.plot([0, 1], c='lime')[0]
assert 'lime' == line.get_color()
def test_numerical_hist_label():
fig, ax = plt.subplots()
ax.hist([range(15)] * 5, label=range(5))
ax.legend()
def test_unicode_hist_label():
fig, ax = plt.subplots()
a = (b'\xe5\xbe\x88\xe6\xbc\x82\xe4\xba\xae, ' +
b'r\xc3\xb6m\xc3\xa4n ch\xc3\xa4r\xc3\xa1ct\xc3\xa8rs')
b = b'\xd7\xa9\xd7\x9c\xd7\x95\xd7\x9d'
labels = [a.decode('utf-8'),
'hi aardvark',
b.decode('utf-8'),
]
ax.hist([range(15)] * 3, label=labels)
ax.legend()
def test_move_offsetlabel():
data = np.random.random(10) * 1e-22
fig, ax = plt.subplots()
ax.plot(data)
fig.canvas.draw()
before = ax.yaxis.offsetText.get_position()
assert ax.yaxis.offsetText.get_horizontalalignment() == 'left'
ax.yaxis.tick_right()
fig.canvas.draw()
after = ax.yaxis.offsetText.get_position()
assert after[0] > before[0] and after[1] == before[1]
assert ax.yaxis.offsetText.get_horizontalalignment() == 'right'
fig, ax = plt.subplots()
ax.plot(data)
fig.canvas.draw()
before = ax.xaxis.offsetText.get_position()
assert ax.xaxis.offsetText.get_verticalalignment() == 'top'
ax.xaxis.tick_top()
fig.canvas.draw()
after = ax.xaxis.offsetText.get_position()
assert after[0] == before[0] and after[1] > before[1]
assert ax.xaxis.offsetText.get_verticalalignment() == 'bottom'
@image_comparison(['rc_spines.png'], savefig_kwarg={'dpi': 40})
def test_rc_spines():
rc_dict = {
'axes.spines.left': False,
'axes.spines.right': False,
'axes.spines.top': False,
'axes.spines.bottom': False}
with matplotlib.rc_context(rc_dict):
plt.subplots() # create a figure and axes with the spine properties
@image_comparison(['rc_grid.png'], savefig_kwarg={'dpi': 40})
def test_rc_grid():
fig = plt.figure()
rc_dict0 = {
'axes.grid': True,
'axes.grid.axis': 'both'
}
rc_dict1 = {
'axes.grid': True,
'axes.grid.axis': 'x'
}
rc_dict2 = {
'axes.grid': True,
'axes.grid.axis': 'y'
}
dict_list = [rc_dict0, rc_dict1, rc_dict2]
for i, rc_dict in enumerate(dict_list, 1):
with matplotlib.rc_context(rc_dict):
fig.add_subplot(3, 1, i)
def test_rc_tick():
d = {'xtick.bottom': False, 'xtick.top': True,
'ytick.left': True, 'ytick.right': False}
with plt.rc_context(rc=d):
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1)
xax = ax1.xaxis
yax = ax1.yaxis
# tick1On bottom/left
assert not xax._major_tick_kw['tick1On']
assert xax._major_tick_kw['tick2On']
assert not xax._minor_tick_kw['tick1On']
assert xax._minor_tick_kw['tick2On']
assert yax._major_tick_kw['tick1On']
assert not yax._major_tick_kw['tick2On']
assert yax._minor_tick_kw['tick1On']
assert not yax._minor_tick_kw['tick2On']
def test_rc_major_minor_tick():
d = {'xtick.top': True, 'ytick.right': True, # Enable all ticks
'xtick.bottom': True, 'ytick.left': True,
# Selectively disable
'xtick.minor.bottom': False, 'xtick.major.bottom': False,
'ytick.major.left': False, 'ytick.minor.left': False}
with plt.rc_context(rc=d):
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1)
xax = ax1.xaxis
yax = ax1.yaxis
# tick1On bottom/left
assert not xax._major_tick_kw['tick1On']
assert xax._major_tick_kw['tick2On']
assert not xax._minor_tick_kw['tick1On']
assert xax._minor_tick_kw['tick2On']
assert not yax._major_tick_kw['tick1On']
assert yax._major_tick_kw['tick2On']
assert not yax._minor_tick_kw['tick1On']
assert yax._minor_tick_kw['tick2On']
def test_square_plot():
x = np.arange(4)
y = np.array([1., 3., 5., 7.])
fig, ax = plt.subplots()
ax.plot(x, y, 'mo')
ax.axis('square')
xlim, ylim = ax.get_xlim(), ax.get_ylim()
assert np.diff(xlim) == np.diff(ylim)
assert ax.get_aspect() == 1
assert_array_almost_equal(
ax.get_position(original=True).extents, (0.125, 0.1, 0.9, 0.9))
assert_array_almost_equal(
ax.get_position(original=False).extents, (0.2125, 0.1, 0.8125, 0.9))
def test_bad_plot_args():
with pytest.raises(ValueError):
plt.plot(None)
with pytest.raises(ValueError):
plt.plot(None, None)
with pytest.raises(ValueError):
plt.plot(np.zeros((2, 2)), np.zeros((2, 3)))
with pytest.raises(ValueError):
plt.plot((np.arange(5).reshape((1, -1)), np.arange(5).reshape(-1, 1)))
@pytest.mark.parametrize(
"xy, cls", [
((), mpl.image.AxesImage), # (0, N)
(((3, 7), (2, 6)), mpl.image.AxesImage), # (xmin, xmax)
((range(5), range(4)), mpl.image.AxesImage), # regular grid
(([1, 2, 4, 8, 16], [0, 1, 2, 3]), # irregular grid
mpl.image.PcolorImage),
((np.random.random((4, 5)), np.random.random((4, 5))), # 2D coords
mpl.collections.QuadMesh),
]
)
@pytest.mark.parametrize(
"data", [np.arange(12).reshape((3, 4)), np.random.rand(3, 4, 3)]
)
def test_pcolorfast(xy, data, cls):
fig, ax = plt.subplots()
assert type(ax.pcolorfast(*xy, data)) == cls
def test_shared_scale():
fig, axs = plt.subplots(2, 2, sharex=True, sharey=True)
axs[0, 0].set_xscale("log")
axs[0, 0].set_yscale("log")
for ax in axs.flat:
assert ax.get_yscale() == 'log'
assert ax.get_xscale() == 'log'
axs[1, 1].set_xscale("linear")
axs[1, 1].set_yscale("linear")
for ax in axs.flat:
assert ax.get_yscale() == 'linear'
assert ax.get_xscale() == 'linear'
def test_shared_bool():
with pytest.raises(TypeError):
plt.subplot(sharex=True)
with pytest.raises(TypeError):
plt.subplot(sharey=True)
def test_violin_point_mass():
"""Violin plot should handle point mass pdf gracefully."""
plt.violinplot(np.array([0, 0]))
def generate_errorbar_inputs():
base_xy = cycler('x', [np.arange(5)]) + cycler('y', [np.ones(5)])
err_cycler = cycler('err', [1,
[1, 1, 1, 1, 1],
[[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1]],
np.ones(5),
np.ones((2, 5)),
None
])
xerr_cy = cycler('xerr', err_cycler)
yerr_cy = cycler('yerr', err_cycler)
empty = ((cycler('x', [[]]) + cycler('y', [[]])) *
cycler('xerr', [[], None]) * cycler('yerr', [[], None]))
xerr_only = base_xy * xerr_cy
yerr_only = base_xy * yerr_cy
both_err = base_xy * yerr_cy * xerr_cy
return [*xerr_only, *yerr_only, *both_err, *empty]
@pytest.mark.parametrize('kwargs', generate_errorbar_inputs())
def test_errorbar_inputs_shotgun(kwargs):
ax = plt.gca()
eb = ax.errorbar(**kwargs)
eb.remove()
@image_comparison(["dash_offset"], remove_text=True)
def test_dash_offset():
fig, ax = plt.subplots()
x = np.linspace(0, 10)
y = np.ones_like(x)
for j in range(0, 100, 2):
ax.plot(x, j*y, ls=(j, (10, 10)), lw=5, color='k')
def test_title_pad():
# check that title padding puts the title in the right
# place...
fig, ax = plt.subplots()
ax.set_title('aardvark', pad=30.)
m = ax.titleOffsetTrans.get_matrix()
assert m[1, -1] == (30. / 72. * fig.dpi)
ax.set_title('aardvark', pad=0.)
m = ax.titleOffsetTrans.get_matrix()
assert m[1, -1] == 0.
# check that it is reverted...
ax.set_title('aardvark', pad=None)
m = ax.titleOffsetTrans.get_matrix()
assert m[1, -1] == (matplotlib.rcParams['axes.titlepad'] / 72. * fig.dpi)
def test_title_location_roundtrip():
fig, ax = plt.subplots()
# set default title location
plt.rcParams['axes.titlelocation'] = 'center'
ax.set_title('aardvark')
ax.set_title('left', loc='left')
ax.set_title('right', loc='right')
assert 'left' == ax.get_title(loc='left')
assert 'right' == ax.get_title(loc='right')
assert 'aardvark' == ax.get_title(loc='center')
with pytest.raises(ValueError):
ax.get_title(loc='foo')
with pytest.raises(ValueError):
ax.set_title('fail', loc='foo')
@image_comparison(["loglog.png"], remove_text=True, tol=0.02)
def test_loglog():
fig, ax = plt.subplots()
x = np.arange(1, 11)
ax.loglog(x, x**3, lw=5)
ax.tick_params(length=25, width=2)
ax.tick_params(length=15, width=2, which='minor')
@pytest.mark.parametrize("new_api", [False, True])
@image_comparison(["test_loglog_nonpos.png"], remove_text=True, style='mpl20')
def test_loglog_nonpos(new_api):
fig, axs = plt.subplots(3, 3)
x = np.arange(1, 11)
y = x**3
y[7] = -3.
x[4] = -10
for (i, j), ax in np.ndenumerate(axs):
mcx = ['mask', 'clip', ''][j]
mcy = ['mask', 'clip', ''][i]
if new_api:
if mcx == mcy:
if mcx:
ax.loglog(x, y**3, lw=2, nonpositive=mcx)
else:
ax.loglog(x, y**3, lw=2)
else:
ax.loglog(x, y**3, lw=2)
if mcx:
ax.set_xscale("log", nonpositive=mcx)
if mcy:
ax.set_yscale("log", nonpositive=mcy)
else:
kws = {}
if mcx:
kws['nonposx'] = mcx
if mcy:
kws['nonposy'] = mcy
with (pytest.warns(MatplotlibDeprecationWarning) if kws
else nullcontext()):
ax.loglog(x, y**3, lw=2, **kws)
@pytest.mark.style('default')
def test_axes_margins():
fig, ax = plt.subplots()
ax.plot([0, 1, 2, 3])
assert ax.get_ybound()[0] != 0
fig, ax = plt.subplots()
ax.bar([0, 1, 2, 3], [1, 1, 1, 1])
assert ax.get_ybound()[0] == 0
fig, ax = plt.subplots()
ax.barh([0, 1, 2, 3], [1, 1, 1, 1])
assert ax.get_xbound()[0] == 0
fig, ax = plt.subplots()
ax.pcolor(np.zeros((10, 10)))
assert ax.get_xbound() == (0, 10)
assert ax.get_ybound() == (0, 10)
fig, ax = plt.subplots()
ax.pcolorfast(np.zeros((10, 10)))
assert ax.get_xbound() == (0, 10)
assert ax.get_ybound() == (0, 10)
fig, ax = plt.subplots()
ax.hist(np.arange(10))
assert ax.get_ybound()[0] == 0
fig, ax = plt.subplots()
ax.imshow(np.zeros((10, 10)))
assert ax.get_xbound() == (-0.5, 9.5)
assert ax.get_ybound() == (-0.5, 9.5)
@pytest.fixture(params=['x', 'y'])
def shared_axis_remover(request):
def _helper_x(ax):
ax2 = ax.twinx()
ax2.remove()
ax.set_xlim(0, 15)
r = ax.xaxis.get_major_locator()()
assert r[-1] > 14
def _helper_y(ax):
ax2 = ax.twiny()
ax2.remove()
ax.set_ylim(0, 15)
r = ax.yaxis.get_major_locator()()
assert r[-1] > 14
return {"x": _helper_x, "y": _helper_y}[request.param]
@pytest.fixture(params=['gca', 'subplots', 'subplots_shared', 'add_axes'])
def shared_axes_generator(request):
# test all of the ways to get fig/ax sets
if request.param == 'gca':
fig = plt.figure()
ax = fig.gca()
elif request.param == 'subplots':
fig, ax = plt.subplots()
elif request.param == 'subplots_shared':
fig, ax_lst = plt.subplots(2, 2, sharex='all', sharey='all')
ax = ax_lst[0][0]
elif request.param == 'add_axes':
fig = plt.figure()
ax = fig.add_axes([.1, .1, .8, .8])
return fig, ax
def test_remove_shared_axes(shared_axes_generator, shared_axis_remover):
# test all of the ways to get fig/ax sets
fig, ax = shared_axes_generator
shared_axis_remover(ax)
def test_remove_shared_axes_relim():
fig, ax_lst = plt.subplots(2, 2, sharex='all', sharey='all')
ax = ax_lst[0][0]
orig_xlim = ax_lst[0][1].get_xlim()
ax.remove()
ax.set_xlim(0, 5)
assert_array_equal(ax_lst[0][1].get_xlim(), orig_xlim)
def test_shared_axes_autoscale():
l = np.arange(-80, 90, 40)
t = np.random.random_sample((l.size, l.size))
fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True, sharey=True)
ax1.set_xlim(-1000, 1000)
ax1.set_ylim(-1000, 1000)
ax1.contour(l, l, t)
ax2.contour(l, l, t)
assert not ax1.get_autoscalex_on() and not ax2.get_autoscalex_on()
assert not ax1.get_autoscaley_on() and not ax2.get_autoscaley_on()
assert ax1.get_xlim() == ax2.get_xlim() == (-1000, 1000)
assert ax1.get_ylim() == ax2.get_ylim() == (-1000, 1000)
def test_adjust_numtick_aspect():
fig, ax = plt.subplots()
ax.yaxis.get_major_locator().set_params(nbins='auto')
ax.set_xlim(0, 1000)
ax.set_aspect('equal')
fig.canvas.draw()
assert len(ax.yaxis.get_major_locator()()) == 2
ax.set_ylim(0, 1000)
fig.canvas.draw()
assert len(ax.yaxis.get_major_locator()()) > 2
@image_comparison(["auto_numticks.png"], style='default')
def test_auto_numticks():
# Make tiny, empty subplots, verify that there are only 3 ticks.
plt.subplots(4, 4)
@image_comparison(["auto_numticks_log.png"], style='default')
def test_auto_numticks_log():
# Verify that there are not too many ticks with a large log range.
fig, ax = plt.subplots()
matplotlib.rcParams['axes.autolimit_mode'] = 'round_numbers'
ax.loglog([1e-20, 1e5], [1e-16, 10])
def test_broken_barh_empty():
fig, ax = plt.subplots()
ax.broken_barh([], (.1, .5))
def test_broken_barh_timedelta():
"""Check that timedelta works as x, dx pair for this method."""
fig, ax = plt.subplots()
d0 = datetime.datetime(2018, 11, 9, 0, 0, 0)
pp = ax.broken_barh([(d0, datetime.timedelta(hours=1))], [1, 2])
assert pp.get_paths()[0].vertices[0, 0] == mdates.date2num(d0)
assert pp.get_paths()[0].vertices[2, 0] == mdates.date2num(d0) + 1 / 24
def test_pandas_pcolormesh(pd):
time = pd.date_range('2000-01-01', periods=10)
depth = np.arange(20)
data = np.random.rand(19, 9)
fig, ax = plt.subplots()
ax.pcolormesh(time, depth, data)
def test_pandas_indexing_dates(pd):
dates = np.arange('2005-02', '2005-03', dtype='datetime64[D]')
values = np.sin(np.array(range(len(dates))))
df = pd.DataFrame({'dates': dates, 'values': values})
ax = plt.gca()
without_zero_index = df[np.array(df.index) % 2 == 1].copy()
ax.plot('dates', 'values', data=without_zero_index)
def test_pandas_errorbar_indexing(pd):
df = pd.DataFrame(np.random.uniform(size=(5, 4)),
columns=['x', 'y', 'xe', 'ye'],
index=[1, 2, 3, 4, 5])
fig, ax = plt.subplots()
ax.errorbar('x', 'y', xerr='xe', yerr='ye', data=df)
def test_pandas_index_shape(pd):
df = pd.DataFrame({"XX": [4, 5, 6], "YY": [7, 1, 2]})
fig, ax = plt.subplots()
ax.plot(df.index, df['YY'])
def test_pandas_indexing_hist(pd):
ser_1 = pd.Series(data=[1, 2, 2, 3, 3, 4, 4, 4, 4, 5])
ser_2 = ser_1.iloc[1:]
fig, ax = plt.subplots()
ax.hist(ser_2)
def test_pandas_bar_align_center(pd):
# Tests fix for issue 8767
df = pd.DataFrame({'a': range(2), 'b': range(2)})
fig, ax = plt.subplots(1)
ax.bar(df.loc[df['a'] == 1, 'b'],
df.loc[df['a'] == 1, 'b'],
align='center')
fig.canvas.draw()
def test_axis_set_tick_params_labelsize_labelcolor():
# Tests fix for issue 4346
axis_1 = plt.subplot()
axis_1.yaxis.set_tick_params(labelsize=30, labelcolor='red',
direction='out')
# Expected values after setting the ticks
assert axis_1.yaxis.majorTicks[0]._size == 4.0
assert axis_1.yaxis.majorTicks[0].tick1line.get_color() == 'k'
assert axis_1.yaxis.majorTicks[0].label1.get_size() == 30.0
assert axis_1.yaxis.majorTicks[0].label1.get_color() == 'red'
def test_axes_tick_params_gridlines():
# Now treating grid params like other Tick params
ax = plt.subplot()
ax.tick_params(grid_color='b', grid_linewidth=5, grid_alpha=0.5,
grid_linestyle='dashdot')
for axis in ax.xaxis, ax.yaxis:
assert axis.majorTicks[0].gridline.get_color() == 'b'
assert axis.majorTicks[0].gridline.get_linewidth() == 5
assert axis.majorTicks[0].gridline.get_alpha() == 0.5
assert axis.majorTicks[0].gridline.get_linestyle() == '-.'
def test_axes_tick_params_ylabelside():
# Tests fix for issue 10267
ax = plt.subplot()
ax.tick_params(labelleft=False, labelright=True,
which='major')
ax.tick_params(labelleft=False, labelright=True,
which='minor')
# expects left false, right true
assert ax.yaxis.majorTicks[0].label1.get_visible() is False
assert ax.yaxis.majorTicks[0].label2.get_visible() is True
assert ax.yaxis.minorTicks[0].label1.get_visible() is False
assert ax.yaxis.minorTicks[0].label2.get_visible() is True
def test_axes_tick_params_xlabelside():
# Tests fix for issue 10267
ax = plt.subplot()
ax.tick_params(labeltop=True, labelbottom=False,
which='major')
ax.tick_params(labeltop=True, labelbottom=False,
which='minor')
# expects top True, bottom False
# label1.get_visible() mapped to labelbottom
# label2.get_visible() mapped to labeltop
assert ax.xaxis.majorTicks[0].label1.get_visible() is False
assert ax.xaxis.majorTicks[0].label2.get_visible() is True
assert ax.xaxis.minorTicks[0].label1.get_visible() is False
assert ax.xaxis.minorTicks[0].label2.get_visible() is True
def test_none_kwargs():
ax = plt.figure().subplots()
ln, = ax.plot(range(32), linestyle=None)
assert ln.get_linestyle() == '-'
def test_bar_uint8():
xs = [0, 1, 2, 3]
b = plt.bar(np.array(xs, dtype=np.uint8), [2, 3, 4, 5], align="edge")
for (patch, x) in zip(b.patches, xs):
assert patch.xy[0] == x
@image_comparison(['date_timezone_x.png'], tol=1.0)
def test_date_timezone_x():
# Tests issue 5575
time_index = [datetime.datetime(2016, 2, 22, hour=x,
tzinfo=dateutil.tz.gettz('Canada/Eastern'))
for x in range(3)]
# Same Timezone
plt.figure(figsize=(20, 12))
plt.subplot(2, 1, 1)
plt.plot_date(time_index, [3] * 3, tz='Canada/Eastern')
# Different Timezone
plt.subplot(2, 1, 2)
plt.plot_date(time_index, [3] * 3, tz='UTC')
@image_comparison(['date_timezone_y.png'])
def test_date_timezone_y():
# Tests issue 5575
time_index = [datetime.datetime(2016, 2, 22, hour=x,
tzinfo=dateutil.tz.gettz('Canada/Eastern'))
for x in range(3)]
# Same Timezone
plt.figure(figsize=(20, 12))
plt.subplot(2, 1, 1)
plt.plot_date([3] * 3,
time_index, tz='Canada/Eastern', xdate=False, ydate=True)
# Different Timezone
plt.subplot(2, 1, 2)
plt.plot_date([3] * 3, time_index, tz='UTC', xdate=False, ydate=True)
@image_comparison(['date_timezone_x_and_y.png'], tol=1.0)
def test_date_timezone_x_and_y():
# Tests issue 5575
UTC = datetime.timezone.utc
time_index = [datetime.datetime(2016, 2, 22, hour=x, tzinfo=UTC)
for x in range(3)]
# Same Timezone
plt.figure(figsize=(20, 12))
plt.subplot(2, 1, 1)
plt.plot_date(time_index, time_index, tz='UTC', ydate=True)
# Different Timezone
plt.subplot(2, 1, 2)
plt.plot_date(time_index, time_index, tz='US/Eastern', ydate=True)
@image_comparison(['axisbelow.png'], remove_text=True)
def test_axisbelow():
# Test 'line' setting added in 6287.
# Show only grids, not frame or ticks, to make this test
# independent of future change to drawing order of those elements.
axs = plt.figure().subplots(ncols=3, sharex=True, sharey=True)
settings = (False, 'line', True)
for ax, setting in zip(axs, settings):
ax.plot((0, 10), (0, 10), lw=10, color='m')
circ = mpatches.Circle((3, 3), color='r')
ax.add_patch(circ)
ax.grid(color='c', linestyle='-', linewidth=3)
ax.tick_params(top=False, bottom=False,
left=False, right=False)
ax.spines[:].set_visible(False)
ax.set_axisbelow(setting)
def test_titletwiny():
plt.style.use('mpl20')
fig, ax = plt.subplots(dpi=72)
ax2 = ax.twiny()
xlabel2 = ax2.set_xlabel('Xlabel2')
title = ax.set_title('Title')
fig.canvas.draw()
renderer = fig.canvas.get_renderer()
# ------- Test that title is put above Xlabel2 (Xlabel2 at top) ----------
bbox_y0_title = title.get_window_extent(renderer).y0 # bottom of title
bbox_y1_xlabel2 = xlabel2.get_window_extent(renderer).y1 # top of xlabel2
y_diff = bbox_y0_title - bbox_y1_xlabel2
assert np.isclose(y_diff, 3)
def test_titlesetpos():
# Test that title stays put if we set it manually
fig, ax = plt.subplots()
fig.subplots_adjust(top=0.8)
ax2 = ax.twiny()
ax.set_xlabel('Xlabel')
ax2.set_xlabel('Xlabel2')
ax.set_title('Title')
pos = (0.5, 1.11)
ax.title.set_position(pos)
renderer = fig.canvas.get_renderer()
ax._update_title_position(renderer)
assert ax.title.get_position() == pos
def test_title_xticks_top():
# Test that title moves if xticks on top of axes.
mpl.rcParams['axes.titley'] = None
fig, ax = plt.subplots()
ax.xaxis.set_ticks_position('top')
ax.set_title('xlabel top')
fig.canvas.draw()
assert ax.title.get_position()[1] > 1.04
def test_title_xticks_top_both():
# Test that title moves if xticks on top of axes.
mpl.rcParams['axes.titley'] = None
fig, ax = plt.subplots()
ax.tick_params(axis="x",
bottom=True, top=True, labelbottom=True, labeltop=True)
ax.set_title('xlabel top')
fig.canvas.draw()
assert ax.title.get_position()[1] > 1.04
def test_title_no_move_off_page():
# If an axes is off the figure (ie. if it is cropped during a save)
# make sure that the automatic title repositioning does not get done.
mpl.rcParams['axes.titley'] = None
fig = plt.figure()
ax = fig.add_axes([0.1, -0.5, 0.8, 0.2])
ax.tick_params(axis="x",
bottom=True, top=True, labelbottom=True, labeltop=True)
tt = ax.set_title('Boo')
fig.canvas.draw()
assert tt.get_position()[1] == 1.0
def test_offset_label_color():
# Tests issue 6440
fig, ax = plt.subplots()
ax.plot([1.01e9, 1.02e9, 1.03e9])
ax.yaxis.set_tick_params(labelcolor='red')
assert ax.yaxis.get_offset_text().get_color() == 'red'
def test_offset_text_visible():
fig, ax = plt.subplots()
ax.plot([1.01e9, 1.02e9, 1.03e9])
ax.yaxis.set_tick_params(label1On=False, label2On=True)
assert ax.yaxis.get_offset_text().get_visible()
ax.yaxis.set_tick_params(label2On=False)
assert not ax.yaxis.get_offset_text().get_visible()
def test_large_offset():
fig, ax = plt.subplots()
ax.plot((1 + np.array([0, 1.e-12])) * 1.e27)
fig.canvas.draw()
def test_barb_units():
fig, ax = plt.subplots()
dates = [datetime.datetime(2017, 7, 15, 18, i) for i in range(0, 60, 10)]
y = np.linspace(0, 5, len(dates))
u = v = np.linspace(0, 50, len(dates))
ax.barbs(dates, y, u, v)
def test_quiver_units():
fig, ax = plt.subplots()
dates = [datetime.datetime(2017, 7, 15, 18, i) for i in range(0, 60, 10)]
y = np.linspace(0, 5, len(dates))
u = v = np.linspace(0, 50, len(dates))
ax.quiver(dates, y, u, v)
def test_bar_color_cycle():
to_rgb = mcolors.to_rgb
fig, ax = plt.subplots()
for j in range(5):
ln, = ax.plot(range(3))
brs = ax.bar(range(3), range(3))
for br in brs:
assert to_rgb(ln.get_color()) == to_rgb(br.get_facecolor())
def test_tick_param_label_rotation():
fix, (ax, ax2) = plt.subplots(1, 2)
ax.plot([0, 1], [0, 1])
ax2.plot([0, 1], [0, 1])
ax.xaxis.set_tick_params(which='both', rotation=75)
ax.yaxis.set_tick_params(which='both', rotation=90)
for text in ax.get_xticklabels(which='both'):
assert text.get_rotation() == 75
for text in ax.get_yticklabels(which='both'):
assert text.get_rotation() == 90
ax2.tick_params(axis='x', labelrotation=53)
ax2.tick_params(axis='y', rotation=35)
for text in ax2.get_xticklabels(which='major'):
assert text.get_rotation() == 53
for text in ax2.get_yticklabels(which='major'):
assert text.get_rotation() == 35
@pytest.mark.style('default')
def test_fillbetween_cycle():
fig, ax = plt.subplots()
for j in range(3):
cc = ax.fill_between(range(3), range(3))
target = mcolors.to_rgba('C{}'.format(j))
assert tuple(cc.get_facecolors().squeeze()) == tuple(target)
for j in range(3, 6):
cc = ax.fill_betweenx(range(3), range(3))
target = mcolors.to_rgba('C{}'.format(j))
assert tuple(cc.get_facecolors().squeeze()) == tuple(target)
target = mcolors.to_rgba('k')
for al in ['facecolor', 'facecolors', 'color']:
cc = ax.fill_between(range(3), range(3), **{al: 'k'})
assert tuple(cc.get_facecolors().squeeze()) == tuple(target)
edge_target = mcolors.to_rgba('k')
for j, el in enumerate(['edgecolor', 'edgecolors'], start=6):
cc = ax.fill_between(range(3), range(3), **{el: 'k'})
face_target = mcolors.to_rgba('C{}'.format(j))
assert tuple(cc.get_facecolors().squeeze()) == tuple(face_target)
assert tuple(cc.get_edgecolors().squeeze()) == tuple(edge_target)
def test_log_margins():
plt.rcParams['axes.autolimit_mode'] = 'data'
fig, ax = plt.subplots()
margin = 0.05
ax.set_xmargin(margin)
ax.semilogx([10, 100], [10, 100])
xlim0, xlim1 = ax.get_xlim()
transform = ax.xaxis.get_transform()
xlim0t, xlim1t = transform.transform([xlim0, xlim1])
x0t, x1t = transform.transform([10, 100])
delta = (x1t - x0t) * margin
assert_allclose([xlim0t + delta, xlim1t - delta], [x0t, x1t])
def test_color_length_mismatch():
N = 5
x, y = np.arange(N), np.arange(N)
colors = np.arange(N+1)
fig, ax = plt.subplots()
with pytest.raises(ValueError):
ax.scatter(x, y, c=colors)
c_rgb = (0.5, 0.5, 0.5)
ax.scatter(x, y, c=c_rgb)
ax.scatter(x, y, c=[c_rgb] * N)
def test_eventplot_legend():
plt.eventplot([1.0], label='Label')
plt.legend()
def test_bar_broadcast_args():
fig, ax = plt.subplots()
# Check that a bar chart with a single height for all bars works.
ax.bar(range(4), 1)
# Check that a horizontal chart with one width works.
ax.barh(0, 1, left=range(4), height=1)
# Check that edgecolor gets broadcast.
rect1, rect2 = ax.bar([0, 1], [0, 1], edgecolor=(.1, .2, .3, .4))
assert rect1.get_edgecolor() == rect2.get_edgecolor() == (.1, .2, .3, .4)
def test_invalid_axis_limits():
plt.plot([0, 1], [0, 1])
with pytest.raises(ValueError):
plt.xlim(np.nan)
with pytest.raises(ValueError):
plt.xlim(np.inf)
with pytest.raises(ValueError):
plt.ylim(np.nan)
with pytest.raises(ValueError):
plt.ylim(np.inf)
# Test all 4 combinations of logs/symlogs for minorticks_on()
@pytest.mark.parametrize('xscale', ['symlog', 'log'])
@pytest.mark.parametrize('yscale', ['symlog', 'log'])
def test_minorticks_on(xscale, yscale):
ax = plt.subplot()
ax.plot([1, 2, 3, 4])
ax.set_xscale(xscale)
ax.set_yscale(yscale)
ax.minorticks_on()
def test_twinx_knows_limits():
fig, ax = plt.subplots()
ax.axvspan(1, 2)
xtwin = ax.twinx()
xtwin.plot([0, 0.5], [1, 2])
# control axis
fig2, ax2 = plt.subplots()
ax2.axvspan(1, 2)
ax2.plot([0, 0.5], [1, 2])
assert_array_equal(xtwin.viewLim.intervalx, ax2.viewLim.intervalx)
def test_zero_linewidth():
# Check that setting a zero linewidth doesn't error
plt.plot([0, 1], [0, 1], ls='--', lw=0)
def test_empty_errorbar_legend():
fig, ax = plt.subplots()
ax.errorbar([], [], xerr=[], label='empty y')
ax.errorbar([], [], yerr=[], label='empty x')
ax.legend()
@check_figures_equal(extensions=["png"])
def test_plot_decimal(fig_test, fig_ref):
x0 = np.arange(-10, 10, 0.3)
y0 = [5.2 * x ** 3 - 2.1 * x ** 2 + 7.34 * x + 4.5 for x in x0]
x = [Decimal(i) for i in x0]
y = [Decimal(i) for i in y0]
# Test image - line plot with Decimal input
fig_test.subplots().plot(x, y)
# Reference image
fig_ref.subplots().plot(x0, y0)
# pdf and svg tests fail using travis' old versions of gs and inkscape.
@check_figures_equal(extensions=["png"])
def test_markerfacecolor_none_alpha(fig_test, fig_ref):
fig_test.subplots().plot(0, "o", mfc="none", alpha=.5)
fig_ref.subplots().plot(0, "o", mfc="w", alpha=.5)
def test_tick_padding_tightbbox():
"""Test that tick padding gets turned off if axis is off"""
plt.rcParams["xtick.direction"] = "out"
plt.rcParams["ytick.direction"] = "out"
fig, ax = plt.subplots()
bb = ax.get_tightbbox(fig.canvas.get_renderer())
ax.axis('off')
bb2 = ax.get_tightbbox(fig.canvas.get_renderer())
assert bb.x0 < bb2.x0
assert bb.y0 < bb2.y0
def test_inset():
"""
Ensure that inset_ax argument is indeed optional
"""
dx, dy = 0.05, 0.05
# generate 2 2d grids for the x & y bounds
y, x = np.mgrid[slice(1, 5 + dy, dy),
slice(1, 5 + dx, dx)]
z = np.sin(x) ** 10 + np.cos(10 + y * x) * np.cos(x)
fig, ax = plt.subplots()
ax.pcolormesh(x, y, z[:-1, :-1])
ax.set_aspect(1.)
ax.apply_aspect()
# we need to apply_aspect to make the drawing below work.
xlim = [1.5, 2.15]
ylim = [2, 2.5]
rect = [xlim[0], ylim[0], xlim[1] - xlim[0], ylim[1] - ylim[0]]
rec, connectors = ax.indicate_inset(bounds=rect)
assert connectors is None
fig.canvas.draw()
xx = np.array([[1.5, 2.],
[2.15, 2.5]])
assert np.all(rec.get_bbox().get_points() == xx)
def test_zoom_inset():
dx, dy = 0.05, 0.05
# generate 2 2d grids for the x & y bounds
y, x = np.mgrid[slice(1, 5 + dy, dy),
slice(1, 5 + dx, dx)]
z = np.sin(x)**10 + np.cos(10 + y*x) * np.cos(x)
fig, ax = plt.subplots()
ax.pcolormesh(x, y, z[:-1, :-1])
ax.set_aspect(1.)
ax.apply_aspect()
# we need to apply_aspect to make the drawing below work.
# Make the inset_axes... Position axes coordinates...
axin1 = ax.inset_axes([0.7, 0.7, 0.35, 0.35])
# redraw the data in the inset axes...
axin1.pcolormesh(x, y, z[:-1, :-1])
axin1.set_xlim([1.5, 2.15])
axin1.set_ylim([2, 2.5])
axin1.set_aspect(ax.get_aspect())
rec, connectors = ax.indicate_inset_zoom(axin1)
assert len(connectors) == 4
fig.canvas.draw()
xx = np.array([[1.5, 2.],
[2.15, 2.5]])
assert(np.all(rec.get_bbox().get_points() == xx))
xx = np.array([[0.6325, 0.692308],
[0.8425, 0.907692]])
np.testing.assert_allclose(
axin1.get_position().get_points(), xx, rtol=1e-4)
@pytest.mark.parametrize('x_inverted', [False, True])
@pytest.mark.parametrize('y_inverted', [False, True])
def test_indicate_inset_inverted(x_inverted, y_inverted):
"""
Test that the inset lines are correctly located with inverted data axes.
"""
fig, (ax1, ax2) = plt.subplots(1, 2)
x = np.arange(10)
ax1.plot(x, x, 'o')
if x_inverted:
ax1.invert_xaxis()
if y_inverted:
ax1.invert_yaxis()
rect, bounds = ax1.indicate_inset([2, 2, 5, 4], ax2)
lower_left, upper_left, lower_right, upper_right = bounds
sign_x = -1 if x_inverted else 1
sign_y = -1 if y_inverted else 1
assert sign_x * (lower_right.xy2[0] - lower_left.xy2[0]) > 0
assert sign_x * (upper_right.xy2[0] - upper_left.xy2[0]) > 0
assert sign_y * (upper_left.xy2[1] - lower_left.xy2[1]) > 0
assert sign_y * (upper_right.xy2[1] - lower_right.xy2[1]) > 0
def test_set_position():
fig, ax = plt.subplots()
ax.set_aspect(3.)
ax.set_position([0.1, 0.1, 0.4, 0.4], which='both')
assert np.allclose(ax.get_position().width, 0.1)
ax.set_aspect(2.)
ax.set_position([0.1, 0.1, 0.4, 0.4], which='original')
assert np.allclose(ax.get_position().width, 0.15)
ax.set_aspect(3.)
ax.set_position([0.1, 0.1, 0.4, 0.4], which='active')
assert np.allclose(ax.get_position().width, 0.1)
def test_spines_properbbox_after_zoom():
fig, ax = plt.subplots()
bb = ax.spines.bottom.get_window_extent(fig.canvas.get_renderer())
# this is what zoom calls:
ax._set_view_from_bbox((320, 320, 500, 500), 'in',
None, False, False)
bb2 = ax.spines.bottom.get_window_extent(fig.canvas.get_renderer())
np.testing.assert_allclose(bb.get_points(), bb2.get_points(), rtol=1e-6)
def test_cartopy_backcompat():
class Dummy(matplotlib.axes.Axes):
...
class DummySubplot(matplotlib.axes.SubplotBase, Dummy):
_axes_class = Dummy
matplotlib.axes._subplots._subplot_classes[Dummy] = DummySubplot
FactoryDummySubplot = matplotlib.axes.subplot_class_factory(Dummy)
assert DummySubplot is FactoryDummySubplot
def test_gettightbbox_ignore_nan():
fig, ax = plt.subplots()
remove_ticks_and_titles(fig)
ax.text(np.NaN, 1, 'Boo')
renderer = fig.canvas.get_renderer()
np.testing.assert_allclose(ax.get_tightbbox(renderer).width, 496)
def test_scatter_series_non_zero_index(pd):
# create non-zero index
ids = range(10, 18)
x = pd.Series(np.random.uniform(size=8), index=ids)
y = pd.Series(np.random.uniform(size=8), index=ids)
c = pd.Series([1, 1, 1, 1, 1, 0, 0, 0], index=ids)
plt.scatter(x, y, c)
def test_scatter_empty_data():
# making sure this does not raise an exception
plt.scatter([], [])
plt.scatter([], [], s=[], c=[])
@image_comparison(['annotate_across_transforms.png'],
style='mpl20', remove_text=True)
def test_annotate_across_transforms():
x = np.linspace(0, 10, 200)
y = np.exp(-x) * np.sin(x)
fig, ax = plt.subplots(figsize=(3.39, 3))
ax.plot(x, y)
axins = ax.inset_axes([0.4, 0.5, 0.3, 0.3])
axins.set_aspect(0.2)
axins.xaxis.set_visible(False)
axins.yaxis.set_visible(False)
ax.annotate("", xy=(x[150], y[150]), xycoords=ax.transData,
xytext=(1, 0), textcoords=axins.transAxes,
arrowprops=dict(arrowstyle="->"))
@image_comparison(['secondary_xy.png'], style='mpl20')
def test_secondary_xy():
fig, axs = plt.subplots(1, 2, figsize=(10, 5), constrained_layout=True)
def invert(x):
with np.errstate(divide='ignore'):
return 1 / x
for nn, ax in enumerate(axs):
ax.plot(np.arange(2, 11), np.arange(2, 11))
if nn == 0:
secax = ax.secondary_xaxis
else:
secax = ax.secondary_yaxis
secax(0.2, functions=(invert, invert))
secax(0.4, functions=(lambda x: 2 * x, lambda x: x / 2))
secax(0.6, functions=(lambda x: x**2, lambda x: x**(1/2)))
secax(0.8)
def test_secondary_fail():
fig, ax = plt.subplots()
ax.plot(np.arange(2, 11), np.arange(2, 11))
with pytest.raises(ValueError):
ax.secondary_xaxis(0.2, functions=(lambda x: 1 / x))
with pytest.raises(ValueError):
ax.secondary_xaxis('right')
with pytest.raises(ValueError):
ax.secondary_yaxis('bottom')
def test_secondary_resize():
fig, ax = plt.subplots(figsize=(10, 5))
ax.plot(np.arange(2, 11), np.arange(2, 11))
def invert(x):
with np.errstate(divide='ignore'):
return 1 / x
ax.secondary_xaxis('top', functions=(invert, invert))
fig.canvas.draw()
fig.set_size_inches((7, 4))
assert_allclose(ax.get_position().extents, [0.125, 0.1, 0.9, 0.9])
def test_secondary_minorloc():
fig, ax = plt.subplots(figsize=(10, 5))
ax.plot(np.arange(2, 11), np.arange(2, 11))
def invert(x):
with np.errstate(divide='ignore'):
return 1 / x
secax = ax.secondary_xaxis('top', functions=(invert, invert))
assert isinstance(secax._axis.get_minor_locator(),
mticker.NullLocator)
secax.minorticks_on()
assert isinstance(secax._axis.get_minor_locator(),
mticker.AutoMinorLocator)
ax.set_xscale('log')
plt.draw()
assert isinstance(secax._axis.get_minor_locator(),
mticker.LogLocator)
ax.set_xscale('linear')
plt.draw()
assert isinstance(secax._axis.get_minor_locator(),
mticker.NullLocator)
def test_secondary_formatter():
fig, ax = plt.subplots()
ax.set_xscale("log")
secax = ax.secondary_xaxis("top")
secax.xaxis.set_major_formatter(mticker.ScalarFormatter())
fig.canvas.draw()
assert isinstance(
secax.xaxis.get_major_formatter(), mticker.ScalarFormatter)
def color_boxes(fig, axs):
"""
Helper for the tests below that test the extents of various axes elements
"""
fig.canvas.draw()
renderer = fig.canvas.get_renderer()
bbaxis = []
for nn, axx in enumerate([axs.xaxis, axs.yaxis]):
bb = axx.get_tightbbox(renderer)
if bb:
axisr = plt.Rectangle(
(bb.x0, bb.y0), width=bb.width, height=bb.height,
linewidth=0.7, edgecolor='y', facecolor="none", transform=None,
zorder=3)
fig.add_artist(axisr)
bbaxis += [bb]
bbspines = []
for nn, a in enumerate(['bottom', 'top', 'left', 'right']):
bb = axs.spines[a].get_window_extent(renderer)
spiner = plt.Rectangle(
(bb.x0, bb.y0), width=bb.width, height=bb.height,
linewidth=0.7, edgecolor="green", facecolor="none", transform=None,
zorder=3)
fig.add_artist(spiner)
bbspines += [bb]
bb = axs.get_window_extent()
rect2 = plt.Rectangle(
(bb.x0, bb.y0), width=bb.width, height=bb.height,
linewidth=1.5, edgecolor="magenta", facecolor="none", transform=None,
zorder=2)
fig.add_artist(rect2)
bbax = bb
bb2 = axs.get_tightbbox(renderer)
rect2 = plt.Rectangle(
(bb2.x0, bb2.y0), width=bb2.width, height=bb2.height,
linewidth=3, edgecolor="red", facecolor="none", transform=None,
zorder=1)
fig.add_artist(rect2)
bbtb = bb2
return bbaxis, bbspines, bbax, bbtb
def test_normal_axes():
with rc_context({'_internal.classic_mode': False}):
fig, ax = plt.subplots(dpi=200, figsize=(6, 6))
fig.canvas.draw()
plt.close(fig)
bbaxis, bbspines, bbax, bbtb = color_boxes(fig, ax)
# test the axis bboxes
target = [
[123.375, 75.88888888888886, 983.25, 33.0],
[85.51388888888889, 99.99999999999997, 53.375, 993.0]
]
for nn, b in enumerate(bbaxis):
targetbb = mtransforms.Bbox.from_bounds(*target[nn])
assert_array_almost_equal(b.bounds, targetbb.bounds, decimal=2)
target = [
[150.0, 119.999, 930.0, 11.111],
[150.0, 1080.0, 930.0, 0.0],
[150.0, 119.9999, 11.111, 960.0],
[1068.8888, 119.9999, 11.111, 960.0]
]
for nn, b in enumerate(bbspines):
targetbb = mtransforms.Bbox.from_bounds(*target[nn])
assert_array_almost_equal(b.bounds, targetbb.bounds, decimal=2)
target = [150.0, 119.99999999999997, 930.0, 960.0]
targetbb = mtransforms.Bbox.from_bounds(*target)
assert_array_almost_equal(bbax.bounds, targetbb.bounds, decimal=2)
target = [85.5138, 75.88888, 1021.11, 1017.11]
targetbb = mtransforms.Bbox.from_bounds(*target)
assert_array_almost_equal(bbtb.bounds, targetbb.bounds, decimal=2)
# test that get_position roundtrips to get_window_extent
axbb = ax.get_position().transformed(fig.transFigure).bounds
assert_array_almost_equal(axbb, ax.get_window_extent().bounds, decimal=2)
def test_nodecorator():
with rc_context({'_internal.classic_mode': False}):
fig, ax = plt.subplots(dpi=200, figsize=(6, 6))
fig.canvas.draw()
ax.set(xticklabels=[], yticklabels=[])
bbaxis, bbspines, bbax, bbtb = color_boxes(fig, ax)
# test the axis bboxes
for nn, b in enumerate(bbaxis):
assert b is None
target = [
[150.0, 119.999, 930.0, 11.111],
[150.0, 1080.0, 930.0, 0.0],
[150.0, 119.9999, 11.111, 960.0],
[1068.8888, 119.9999, 11.111, 960.0]
]
for nn, b in enumerate(bbspines):
targetbb = mtransforms.Bbox.from_bounds(*target[nn])
assert_allclose(b.bounds, targetbb.bounds, atol=1e-2)
target = [150.0, 119.99999999999997, 930.0, 960.0]
targetbb = mtransforms.Bbox.from_bounds(*target)
assert_allclose(bbax.bounds, targetbb.bounds, atol=1e-2)
target = [150., 120., 930., 960.]
targetbb = mtransforms.Bbox.from_bounds(*target)
assert_allclose(bbtb.bounds, targetbb.bounds, atol=1e-2)
def test_displaced_spine():
with rc_context({'_internal.classic_mode': False}):
fig, ax = plt.subplots(dpi=200, figsize=(6, 6))
ax.set(xticklabels=[], yticklabels=[])
ax.spines.bottom.set_position(('axes', -0.1))
fig.canvas.draw()
bbaxis, bbspines, bbax, bbtb = color_boxes(fig, ax)
targets = [
[150., 24., 930., 11.111111],
[150.0, 1080.0, 930.0, 0.0],
[150.0, 119.9999, 11.111, 960.0],
[1068.8888, 119.9999, 11.111, 960.0]
]
for target, bbspine in zip(targets, bbspines):
targetbb = mtransforms.Bbox.from_bounds(*target)
assert_allclose(bbspine.bounds, targetbb.bounds, atol=1e-2)
target = [150.0, 119.99999999999997, 930.0, 960.0]
targetbb = mtransforms.Bbox.from_bounds(*target)
assert_allclose(bbax.bounds, targetbb.bounds, atol=1e-2)
target = [150., 24., 930., 1056.]
targetbb = mtransforms.Bbox.from_bounds(*target)
assert_allclose(bbtb.bounds, targetbb.bounds, atol=1e-2)
def test_tickdirs():
"""
Switch the tickdirs and make sure the bboxes switch with them
"""
targets = [[[150.0, 120.0, 930.0, 11.1111],
[150.0, 120.0, 11.111, 960.0]],
[[150.0, 108.8889, 930.0, 11.111111111111114],
[138.889, 120, 11.111, 960.0]],
[[150.0, 114.44444444444441, 930.0, 11.111111111111114],
[144.44444444444446, 119.999, 11.111, 960.0]]]
for dnum, dirs in enumerate(['in', 'out', 'inout']):
with rc_context({'_internal.classic_mode': False}):
fig, ax = plt.subplots(dpi=200, figsize=(6, 6))
ax.tick_params(direction=dirs)
fig.canvas.draw()
bbaxis, bbspines, bbax, bbtb = color_boxes(fig, ax)
for nn, num in enumerate([0, 2]):
targetbb = mtransforms.Bbox.from_bounds(*targets[dnum][nn])
assert_allclose(
bbspines[num].bounds, targetbb.bounds, atol=1e-2)
def test_minor_accountedfor():
with rc_context({'_internal.classic_mode': False}):
fig, ax = plt.subplots(dpi=200, figsize=(6, 6))
fig.canvas.draw()
ax.tick_params(which='both', direction='out')
bbaxis, bbspines, bbax, bbtb = color_boxes(fig, ax)
bbaxis, bbspines, bbax, bbtb = color_boxes(fig, ax)
targets = [[150.0, 108.88888888888886, 930.0, 11.111111111111114],
[138.8889, 119.9999, 11.1111, 960.0]]
for n in range(2):
targetbb = mtransforms.Bbox.from_bounds(*targets[n])
assert_allclose(
bbspines[n * 2].bounds, targetbb.bounds, atol=1e-2)
fig, ax = plt.subplots(dpi=200, figsize=(6, 6))
fig.canvas.draw()
ax.tick_params(which='both', direction='out')
ax.minorticks_on()
ax.tick_params(axis='both', which='minor', length=30)
fig.canvas.draw()
bbaxis, bbspines, bbax, bbtb = color_boxes(fig, ax)
targets = [[150.0, 36.66666666666663, 930.0, 83.33333333333334],
[66.6667, 120.0, 83.3333, 960.0]]
for n in range(2):
targetbb = mtransforms.Bbox.from_bounds(*targets[n])
assert_allclose(
bbspines[n * 2].bounds, targetbb.bounds, atol=1e-2)
@check_figures_equal(extensions=["png"])
def test_axis_bool_arguments(fig_test, fig_ref):
# Test if False and "off" give the same
fig_test.add_subplot(211).axis(False)
fig_ref.add_subplot(211).axis("off")
# Test if True after False gives the same as "on"
ax = fig_test.add_subplot(212)
ax.axis(False)
ax.axis(True)
fig_ref.add_subplot(212).axis("on")
def test_axis_extent_arg():
fig, ax = plt.subplots()
xmin = 5
xmax = 10
ymin = 15
ymax = 20
extent = ax.axis([xmin, xmax, ymin, ymax])
# test that the docstring is correct
assert tuple(extent) == (xmin, xmax, ymin, ymax)
# test that limits were set per the docstring
assert (xmin, xmax) == ax.get_xlim()
assert (ymin, ymax) == ax.get_ylim()
def test_datetime_masked():
# make sure that all-masked data falls back to the viewlim
# set in convert.axisinfo....
x = np.array([datetime.datetime(2017, 1, n) for n in range(1, 6)])
y = np.array([1, 2, 3, 4, 5])
m = np.ma.masked_greater(y, 0)
fig, ax = plt.subplots()
ax.plot(x, m)
dt = mdates.date2num(np.datetime64('0000-12-31'))
assert ax.get_xlim() == (730120.0 + dt, 733773.0 + dt)
def test_hist_auto_bins():
_, bins, _ = plt.hist([[1, 2, 3], [3, 4, 5, 6]], bins='auto')
assert bins[0] <= 1
assert bins[-1] >= 6
def test_hist_nan_data():
fig, (ax1, ax2) = plt.subplots(2)
data = [1, 2, 3]
nan_data = data + [np.nan]
bins, edges, _ = ax1.hist(data)
with np.errstate(invalid='ignore'):
nanbins, nanedges, _ = ax2.hist(nan_data)
np.testing.assert_allclose(bins, nanbins)
np.testing.assert_allclose(edges, nanedges)
def test_hist_range_and_density():
_, bins, _ = plt.hist(np.random.rand(10), "auto",
range=(0, 1), density=True)
assert bins[0] == 0
assert bins[-1] == 1
def test_bar_errbar_zorder():
# Check that the zorder of errorbars is always greater than the bar they
# are plotted on
fig, ax = plt.subplots()
x = [1, 2, 3]
barcont = ax.bar(x=x, height=x, yerr=x, capsize=5, zorder=3)
data_line, caplines, barlinecols = barcont.errorbar.lines
for bar in barcont.patches:
for capline in caplines:
assert capline.zorder > bar.zorder
for barlinecol in barlinecols:
assert barlinecol.zorder > bar.zorder
def test_set_ticks_inverted():
fig, ax = plt.subplots()
ax.invert_xaxis()
ax.set_xticks([.3, .7])
assert ax.get_xlim() == (1, 0)
def test_aspect_nonlinear_adjustable_box():
fig = plt.figure(figsize=(10, 10)) # Square.
ax = fig.add_subplot()
ax.plot([.4, .6], [.4, .6]) # Set minpos to keep logit happy.
ax.set(xscale="log", xlim=(1, 10),
yscale="logit", ylim=(1/11, 1/1001),
aspect=1, adjustable="box")
ax.margins(0)
pos = fig.transFigure.transform_bbox(ax.get_position())
assert pos.height / pos.width == pytest.approx(2)
def test_aspect_nonlinear_adjustable_datalim():
fig = plt.figure(figsize=(10, 10)) # Square.
ax = fig.add_axes([.1, .1, .8, .8]) # Square.
ax.plot([.4, .6], [.4, .6]) # Set minpos to keep logit happy.
ax.set(xscale="log", xlim=(1, 100),
yscale="logit", ylim=(1 / 101, 1 / 11),
aspect=1, adjustable="datalim")
ax.margins(0)
ax.apply_aspect()
assert ax.get_xlim() == pytest.approx([1*10**(1/2), 100/10**(1/2)])
assert ax.get_ylim() == (1 / 101, 1 / 11)
def test_box_aspect():
# Test if axes with box_aspect=1 has same dimensions
# as axes with aspect equal and adjustable="box"
fig1, ax1 = plt.subplots()
axtwin = ax1.twinx()
axtwin.plot([12, 344])
ax1.set_box_aspect(1)
fig2, ax2 = plt.subplots()
ax2.margins(0)
ax2.plot([0, 2], [6, 8])
ax2.set_aspect("equal", adjustable="box")
fig1.canvas.draw()
fig2.canvas.draw()
bb1 = ax1.get_position()
bbt = axtwin.get_position()
bb2 = ax2.get_position()
assert_array_equal(bb1.extents, bb2.extents)
assert_array_equal(bbt.extents, bb2.extents)
def test_box_aspect_custom_position():
# Test if axes with custom position and box_aspect
# behaves the same independent of the order of setting those.
fig1, ax1 = plt.subplots()
ax1.set_position([0.1, 0.1, 0.9, 0.2])
fig1.canvas.draw()
ax1.set_box_aspect(1.)
fig2, ax2 = plt.subplots()
ax2.set_box_aspect(1.)
fig2.canvas.draw()
ax2.set_position([0.1, 0.1, 0.9, 0.2])
fig1.canvas.draw()
fig2.canvas.draw()
bb1 = ax1.get_position()
bb2 = ax2.get_position()
assert_array_equal(bb1.extents, bb2.extents)
def test_bbox_aspect_axes_init():
# Test that box_aspect can be given to axes init and produces
# all equal square axes.
fig, axs = plt.subplots(2, 3, subplot_kw=dict(box_aspect=1),
constrained_layout=True)
fig.canvas.draw()
renderer = fig.canvas.get_renderer()
sizes = []
for ax in axs.flat:
bb = ax.get_window_extent(renderer)
sizes.extend([bb.width, bb.height])
assert_allclose(sizes, sizes[0])
def test_redraw_in_frame():
fig, ax = plt.subplots(1, 1)
ax.plot([1, 2, 3])
fig.canvas.draw()
ax.redraw_in_frame()
def test_invisible_axes():
# invisible axes should not respond to events...
fig, ax = plt.subplots()
assert fig.canvas.inaxes((200, 200)) is not None
ax.set_visible(False)
assert fig.canvas.inaxes((200, 200)) is None
def test_xtickcolor_is_not_markercolor():
plt.rcParams['lines.markeredgecolor'] = 'white'
ax = plt.axes()
ticks = ax.xaxis.get_major_ticks()
for tick in ticks:
assert tick.tick1line.get_markeredgecolor() != 'white'
def test_ytickcolor_is_not_markercolor():
plt.rcParams['lines.markeredgecolor'] = 'white'
ax = plt.axes()
ticks = ax.yaxis.get_major_ticks()
for tick in ticks:
assert tick.tick1line.get_markeredgecolor() != 'white'
@pytest.mark.parametrize('axis', ('x', 'y'))
@pytest.mark.parametrize('auto', (True, False, None))
def test_unautoscale(axis, auto):
fig, ax = plt.subplots()
x = np.arange(100)
y = np.linspace(-.1, .1, 100)
ax.scatter(y, x)
get_autoscale_on = getattr(ax, f'get_autoscale{axis}_on')
set_lim = getattr(ax, f'set_{axis}lim')
get_lim = getattr(ax, f'get_{axis}lim')
post_auto = get_autoscale_on() if auto is None else auto
set_lim((-0.5, 0.5), auto=auto)
assert post_auto == get_autoscale_on()
fig.canvas.draw()
assert_array_equal(get_lim(), (-0.5, 0.5))
@check_figures_equal(extensions=["png"])
def test_polar_interpolation_steps_variable_r(fig_test, fig_ref):
l, = fig_test.add_subplot(projection="polar").plot([0, np.pi/2], [1, 2])
l.get_path()._interpolation_steps = 100
fig_ref.add_subplot(projection="polar").plot(
np.linspace(0, np.pi/2, 101), np.linspace(1, 2, 101))
@pytest.mark.style('default')
def test_autoscale_tiny_sticky():
fig, ax = plt.subplots()
ax.bar(0, 1e-9)
fig.canvas.draw()
assert ax.get_ylim() == (0, 1.05e-9)
def test_xtickcolor_is_not_xticklabelcolor():
plt.rcParams['xtick.color'] = 'yellow'
plt.rcParams['xtick.labelcolor'] = 'blue'
ax = plt.axes()
ticks = ax.xaxis.get_major_ticks()
for tick in ticks:
assert tick.tick1line.get_color() == 'yellow'
assert tick.label1.get_color() == 'blue'
def test_ytickcolor_is_not_yticklabelcolor():
plt.rcParams['ytick.color'] = 'yellow'
plt.rcParams['ytick.labelcolor'] = 'blue'
ax = plt.axes()
ticks = ax.yaxis.get_major_ticks()
for tick in ticks:
assert tick.tick1line.get_color() == 'yellow'
assert tick.label1.get_color() == 'blue'
@pytest.mark.parametrize('size', [size for size in mfont_manager.font_scalings
if size is not None] + [8, 10, 12])
@pytest.mark.style('default')
def test_relative_ticklabel_sizes(size):
mpl.rcParams['xtick.labelsize'] = size
mpl.rcParams['ytick.labelsize'] = size
fig, ax = plt.subplots()
fig.canvas.draw()
for name, axis in zip(['x', 'y'], [ax.xaxis, ax.yaxis]):
for tick in axis.get_major_ticks():
assert tick.label1.get_size() == axis._get_tick_label_size(name)
def test_multiplot_autoscale():
fig = plt.figure()
ax1, ax2 = fig.subplots(2, 1, sharex='all')
ax1.scatter([1, 2, 3, 4], [2, 3, 2, 3])
ax2.axhspan(-5, 5)
xlim = ax1.get_xlim()
assert np.allclose(xlim, [0.5, 4.5])
def test_sharing_does_not_link_positions():
fig = plt.figure()
ax0 = fig.add_subplot(221)
ax1 = fig.add_axes([.6, .6, .3, .3], sharex=ax0)
init_pos = ax1.get_position()
fig.subplots_adjust(left=0)
assert (ax1.get_position().get_points() == init_pos.get_points()).all()
@check_figures_equal(extensions=["pdf"])
def test_2dcolor_plot(fig_test, fig_ref):
color = np.array([0.1, 0.2, 0.3])
# plot with 1D-color:
axs = fig_test.subplots(5)
axs[0].plot([1, 2], [1, 2], c=color.reshape(-1))
axs[1].scatter([1, 2], [1, 2], c=color.reshape(-1))
axs[2].step([1, 2], [1, 2], c=color.reshape(-1))
axs[3].hist(np.arange(10), color=color.reshape(-1))
axs[4].bar(np.arange(10), np.arange(10), color=color.reshape(-1))
# plot with 2D-color:
axs = fig_ref.subplots(5)
axs[0].plot([1, 2], [1, 2], c=color.reshape((1, -1)))
axs[1].scatter([1, 2], [1, 2], c=color.reshape((1, -1)))
axs[2].step([1, 2], [1, 2], c=color.reshape((1, -1)))
axs[3].hist(np.arange(10), color=color.reshape((1, -1)))
axs[4].bar(np.arange(10), np.arange(10), color=color.reshape((1, -1)))
@check_figures_equal(extensions=['png'])
def test_shared_axes_clear(fig_test, fig_ref):
x = np.arange(0.0, 2*np.pi, 0.01)
y = np.sin(x)
axs = fig_ref.subplots(2, 2, sharex=True, sharey=True)
for ax in axs.flat:
ax.plot(x, y)
axs = fig_test.subplots(2, 2, sharex=True, sharey=True)
for ax in axs.flat:
ax.clear()
ax.plot(x, y)
def test_shared_axes_retick():
fig, axs = plt.subplots(2, 2, sharex='all', sharey='all')
for ax in axs.flat:
ax.plot([0, 2], 'o-')
axs[0, 0].set_xticks([-0.5, 0, 1, 1.5]) # should affect all axes xlims
for ax in axs.flat:
assert ax.get_xlim() == axs[0, 0].get_xlim()
axs[0, 0].set_yticks([-0.5, 0, 2, 2.5]) # should affect all axes ylims
for ax in axs.flat:
assert ax.get_ylim() == axs[0, 0].get_ylim()
@pytest.mark.parametrize('ha', ['left', 'center', 'right'])
def test_ylabel_ha_with_position(ha):
fig = Figure()
ax = fig.subplots()
ax.set_ylabel("test", y=1, ha=ha)
ax.yaxis.set_label_position("right")
assert ax.yaxis.get_label().get_ha() == ha
def test_bar_label_location_vertical():
ax = plt.gca()
xs, heights = [1, 2], [3, -4]
rects = ax.bar(xs, heights)
labels = ax.bar_label(rects)
assert labels[0].xy == (xs[0], heights[0])
assert labels[0].get_ha() == 'center'
assert labels[0].get_va() == 'bottom'
assert labels[1].xy == (xs[1], heights[1])
assert labels[1].get_ha() == 'center'
assert labels[1].get_va() == 'top'
def test_bar_label_location_horizontal():
ax = plt.gca()
ys, widths = [1, 2], [3, -4]
rects = ax.barh(ys, widths)
labels = ax.bar_label(rects)
assert labels[0].xy == (widths[0], ys[0])
assert labels[0].get_ha() == 'left'
assert labels[0].get_va() == 'center'
assert labels[1].xy == (widths[1], ys[1])
assert labels[1].get_ha() == 'right'
assert labels[1].get_va() == 'center'
def test_bar_label_location_center():
ax = plt.gca()
ys, widths = [1, 2], [3, -4]
rects = ax.barh(ys, widths)
labels = ax.bar_label(rects, label_type='center')
assert labels[0].xy == (widths[0] / 2, ys[0])
assert labels[0].get_ha() == 'center'
assert labels[0].get_va() == 'center'
assert labels[1].xy == (widths[1] / 2, ys[1])
assert labels[1].get_ha() == 'center'
assert labels[1].get_va() == 'center'
def test_bar_label_location_errorbars():
ax = plt.gca()
xs, heights = [1, 2], [3, -4]
rects = ax.bar(xs, heights, yerr=1)
labels = ax.bar_label(rects)
assert labels[0].xy == (xs[0], heights[0] + 1)
assert labels[0].get_ha() == 'center'
assert labels[0].get_va() == 'bottom'
assert labels[1].xy == (xs[1], heights[1] - 1)
assert labels[1].get_ha() == 'center'
assert labels[1].get_va() == 'top'
def test_bar_label_fmt():
ax = plt.gca()
rects = ax.bar([1, 2], [3, -4])
labels = ax.bar_label(rects, fmt='%.2f')
assert labels[0].get_text() == '3.00'
assert labels[1].get_text() == '-4.00'
def test_bar_label_labels():
ax = plt.gca()
rects = ax.bar([1, 2], [3, -4])
labels = ax.bar_label(rects, labels=['A', 'B'])
assert labels[0].get_text() == 'A'
assert labels[1].get_text() == 'B'
def test_bar_label_nan_ydata():
ax = plt.gca()
bars = ax.bar([2, 3], [np.nan, 1])
labels = ax.bar_label(bars)
assert [l.get_text() for l in labels] == ['', '1']
assert labels[0].xy == (2, 0)
assert labels[0].get_va() == 'bottom'
def test_patch_bounds(): # PR 19078
fig, ax = plt.subplots()
ax.add_patch(mpatches.Wedge((0, -1), 1.05, 60, 120, 0.1))
bot = 1.9*np.sin(15*np.pi/180)**2
np.testing.assert_array_almost_equal_nulp(
np.array((-0.525, -(bot+0.05), 1.05, bot+0.1)), ax.dataLim.bounds, 16)
@pytest.mark.style('default')
def test_warn_ignored_scatter_kwargs():
with pytest.warns(UserWarning,
match=r"You passed a edgecolor/edgecolors"):
c = plt.scatter(
[0], [0], marker="+", s=500, facecolor="r", edgecolor="b"
)
| 33.11763
| 89
| 0.596016
|
7950e61f996e5d8be1560adeccfb2e045356dd7e
| 1,309
|
py
|
Python
|
tests/test_transaction.py
|
nikola-kocic/sqlalchemy-continuum
|
45b8ada3162435670dbe844b3d630823fa50f6fc
|
[
"BSD-3-Clause"
] | 1
|
2015-04-25T18:42:22.000Z
|
2015-04-25T18:42:22.000Z
|
tests/test_transaction.py
|
nikola-kocic/sqlalchemy-continuum
|
45b8ada3162435670dbe844b3d630823fa50f6fc
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_transaction.py
|
nikola-kocic/sqlalchemy-continuum
|
45b8ada3162435670dbe844b3d630823fa50f6fc
|
[
"BSD-3-Clause"
] | null | null | null |
from sqlalchemy_continuum import versioning_manager
from tests import TestCase
class TestTransaction(TestCase):
def setup_method(self, method):
TestCase.setup_method(self, method)
self.article = self.Article()
self.article.name = u'Some article'
self.article.content = u'Some content'
self.article.tags.append(self.Tag(name=u'Some tag'))
self.session.add(self.article)
self.session.commit()
def test_relationships(self):
tx = self.article.versions[0].transaction
assert tx.id == self.article.versions[0].transaction_id
assert tx.articles == [self.article.versions[0]]
def test_only_saves_transaction_if_actual_modifications(self):
self.article.name = u'Some article'
self.session.commit()
self.article.name = u'Some article'
self.session.commit()
assert self.session.query(
versioning_manager.transaction_cls
).count() == 1
def test_repr(self):
transaction = self.session.query(
versioning_manager.transaction_cls
).first()
assert (
'<Transaction id=%d, issued_at=%r>' % (
transaction.id,
transaction.issued_at
) ==
repr(transaction)
)
| 32.725
| 66
| 0.625668
|
7950e6322810b6d2c32bb03365b23aa9091e390d
| 389
|
py
|
Python
|
oo/pessoa.py
|
Francisco-Mario/pythonbirds
|
a2aeec8821481d740d208462c620a542e761b2c6
|
[
"MIT"
] | null | null | null |
oo/pessoa.py
|
Francisco-Mario/pythonbirds
|
a2aeec8821481d740d208462c620a542e761b2c6
|
[
"MIT"
] | null | null | null |
oo/pessoa.py
|
Francisco-Mario/pythonbirds
|
a2aeec8821481d740d208462c620a542e761b2c6
|
[
"MIT"
] | null | null | null |
class Pessoa:
def __init__(self, nome=None, idade=57):
self.idade = idade
self.nome = nome
def cumprimentar(self):
return f'Olá! {id(self)}'
if __name__ == '__main__':
p = Pessoa('Francisco')
print(Pessoa.cumprimentar(p))
print(id(p))
print(p.cumprimentar())
print(p.nome)
p.nome = 'Mario'
print(p.nome)
print(p.idade)
| 17.681818
| 44
| 0.583548
|
7950e648c001033379e1384ba37cc9b6354835e1
| 3,882
|
py
|
Python
|
venv/Lib/site-packages/wand/compat.py
|
18813684097/new_di
|
cb8f117dee65bdf2cb8d8db5d3a585e23e73cb86
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/wand/compat.py
|
18813684097/new_di
|
cb8f117dee65bdf2cb8d8db5d3a585e23e73cb86
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/wand/compat.py
|
18813684097/new_di
|
cb8f117dee65bdf2cb8d8db5d3a585e23e73cb86
|
[
"MIT"
] | null | null | null |
""":mod:`wand.compat` --- Compatibility layer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module provides several subtle things to support
multiple Python versions (2.6, 2.7, 3.3+) and VM implementations
(CPython, PyPy).
"""
import collections
try:
import collections.abc
except ImportError:
pass
import contextlib
import io
import sys
import types
__all__ = ('PY3', 'abc', 'binary', 'binary_type', 'encode_filename',
'file_types', 'nested', 'string_type', 'text', 'text_type',
'xrange')
#: (:class:`bool`) Whether it is Python 3.x or not.
PY3 = sys.version_info >= (3,)
#: (:class:`module`) Module containing abstract base classes.
#: :mod:`collections` in Python 2 and :mod:`collections.abc` in Python 3.
abc = collections.abc if PY3 else collections
#: (:class:`type`) Type for representing binary data. :class:`str` in Python 2
#: and :class:`bytes` in Python 3.
binary_type = bytes if PY3 else str
#: (:class:`type`) Type for text data. :class:`basestring` in Python 2
#: and :class:`str` in Python 3.
string_type = str if PY3 else basestring # noqa
#: (:class:`type`) Type for representing Unicode textual data.
#: :class:`unicode` in Python 2 and :class:`str` in Python 3.
text_type = str if PY3 else unicode # noqa
def binary(string, var=None):
"""Makes ``string`` to :class:`str` in Python 2.
Makes ``string`` to :class:`bytes` in Python 3.
:param string: a string to cast it to :data:`binary_type`
:type string: :class:`bytes`, :class:`str`, :class:`unicode`
:param var: an optional variable name to be used for error message
:type var: :class:`str`
"""
if isinstance(string, text_type):
return string.encode()
elif isinstance(string, binary_type):
return string
if var:
raise TypeError('{0} must be a string, not {1!r}'.format(var, string))
raise TypeError('expected a string, not ' + repr(string))
if PY3:
def text(string):
if isinstance(string, bytes):
return string.decode('utf-8')
return string
else:
def text(string):
"""Makes ``string`` to :class:`str` in Python 3.
Does nothing in Python 2.
:param string: a string to cast it to :data:`text_type`
:type string: :class:`bytes`, :class:`str`, :class:`unicode`
"""
return string
#: The :func:`xrange()` function. Alias for :func:`range()` in Python 3.
xrange = range if PY3 else xrange # noqa
#: (:class:`type`, :class:`tuple`) Types for file objects that have
#: ``fileno()``.
file_types = io.RawIOBase if PY3 else (io.RawIOBase, types.FileType)
def encode_filename(filename):
"""If ``filename`` is a :data:`text_type`, encode it to
:data:`binary_type` according to filesystem's default encoding.
"""
if isinstance(filename, text_type):
return filename.encode(sys.getfilesystemencoding())
return filename
try:
nested = contextlib.nested
except AttributeError:
# http://hg.python.org/cpython/file/v2.7.6/Lib/contextlib.py#l88
@contextlib.contextmanager
def nested(*managers):
exits = []
vars = []
exc = (None, None, None)
try:
for mgr in managers:
exit = mgr.__exit__
enter = mgr.__enter__
vars.append(enter())
exits.append(exit)
yield vars
except: # noqa: E722
exc = sys.exc_info()
finally:
while exits:
exit = exits.pop()
try:
if exit(*exc):
exc = (None, None, None)
except: # noqa: E722
exc = sys.exc_info()
if exc != (None, None, None):
# PEP 3109
e = exc[0](exc[1])
e.__traceback__ = e[2]
raise e
| 29.861538
| 79
| 0.592478
|
7950ea522362ceb79971dd8de968586eea9b62b1
| 1,888
|
py
|
Python
|
Utils/createBenchmark.py
|
Azoy/swift-experimental-string-processing
|
93b569d0b32be4fc666fec0b2ffe903b4c40eb20
|
[
"Apache-2.0"
] | null | null | null |
Utils/createBenchmark.py
|
Azoy/swift-experimental-string-processing
|
93b569d0b32be4fc666fec0b2ffe903b4c40eb20
|
[
"Apache-2.0"
] | null | null | null |
Utils/createBenchmark.py
|
Azoy/swift-experimental-string-processing
|
93b569d0b32be4fc666fec0b2ffe903b4c40eb20
|
[
"Apache-2.0"
] | null | null | null |
# python3 createBenchmark.py MyRegexBenchmark
# reference: https://github.com/apple/swift/blob/main/benchmark/scripts/create_benchmark.py
import argparse
import os
template = """import _StringProcessing
extension BenchmarkRunner {{
mutating func add{name}() {{
}}
}}
"""
def main():
p = argparse.ArgumentParser()
p.add_argument("name", help="The name of the new benchmark to be created")
args = p.parse_args()
# create a file in Sources/RegexBenchmark/Suite with the benchmark template
create_benchmark_file(args.name)
# add to the registration function in BenchmarkRunner
register_benchmark(args.name)
def create_benchmark_file(name):
contents = template.format(name= name)
relative_path = create_relative_path("../Sources/RegexBenchmark/Suite/")
source_file_path = os.path.join(relative_path, name + ".swift")
print(f"Creating new benchmark file: {source_file_path}")
with open(source_file_path, "w") as f:
f.write(contents)
def register_benchmark(name):
relative_path = create_relative_path("../Sources/RegexBenchmark/BenchmarkRegistration.swift")
# read current contents into an array
file_contents = []
with open(relative_path, "r") as f:
file_contents = f.readlines()
new_file_contents = []
for line in file_contents:
if "end of registrations" not in line:
new_file_contents.append(line)
else:
# add the newest benchmark
new_file_contents.append(f" benchmark.add{name}()\n")
new_file_contents.append(line)
# write the new contents
with open(relative_path, "w") as f:
for line in new_file_contents:
f.write(line)
def create_relative_path(file_path):
return os.path.join(os.path.dirname(__file__), file_path)
if __name__ == "__main__":
main()
| 30.451613
| 97
| 0.684322
|
7950eaecf262415712d4b25140aaeb9dd30ffc5d
| 10,352
|
py
|
Python
|
Detection/ImageTaggingTool/helpers.py
|
mohabouje/cntk-hotel-pictures-classificator
|
a5b37dd90f5e7abf0c752b55b9b06951e4ffc4d1
|
[
"MIT"
] | 28
|
2018-09-02T09:01:20.000Z
|
2022-01-20T12:55:49.000Z
|
Detection/ImageTaggingTool/helpers.py
|
mohabouje/cntk-hotel-pictures-classificator
|
a5b37dd90f5e7abf0c752b55b9b06951e4ffc4d1
|
[
"MIT"
] | 6
|
2018-01-24T10:21:00.000Z
|
2018-04-17T17:39:17.000Z
|
Detection/ImageTaggingTool/helpers.py
|
karolzak/cntk-hotel-pictures-classificator
|
a5b37dd90f5e7abf0c752b55b9b06951e4ffc4d1
|
[
"MIT"
] | 13
|
2018-09-02T09:01:23.000Z
|
2020-11-20T23:00:29.000Z
|
from __future__ import print_function
from builtins import str
import os
import numpy as np
import copy
import cv2
from PIL import Image, ImageFont, ImageDraw
from PIL.ExifTags import TAGS
available_font = "arial.ttf"
try:
dummy = ImageFont.truetype(available_font, 16)
except:
available_font = "FreeMono.ttf"
def imresizeMaxDim(img, maxDim, boUpscale = False, interpolation = cv2.INTER_LINEAR):
scale = 1.0 * maxDim / max(img.shape[:2])
if scale < 1 or boUpscale:
img = imresize(img, scale, interpolation)
else:
scale = 1.0
return img, scale
def imresize(img, scale, interpolation = cv2.INTER_LINEAR):
return cv2.resize(img, (0,0), fx=scale, fy=scale, interpolation=interpolation)
def imread(imgPath, boThrowErrorIfExifRotationTagSet = True):
if not os.path.exists(imgPath):
print("ERROR: image path does not exist.")
error
rotation = rotationFromExifTag(imgPath)
if boThrowErrorIfExifRotationTagSet and rotation != 0:
print ("Error: exif roation tag set, image needs to be rotated by %d degrees." % rotation)
img = cv2.imread(imgPath)
if img is None:
print ("ERROR: cannot load image " + imgPath)
error
if rotation != 0:
img = imrotate(img, -90).copy() # got this error occassionally without copy "TypeError: Layout of the output array img is incompatible with cv::Mat"
return img
def rotationFromExifTag(imgPath):
TAGSinverted = {v: k for k, v in TAGS.items()}
orientationExifId = TAGSinverted['Orientation']
try:
imageExifTags = Image.open(imgPath)._getexif()
except:
imageExifTags = None
# rotate the image if orientation exif tag is present
rotation = 0
if imageExifTags != None and orientationExifId != None and orientationExifId in imageExifTags:
orientation = imageExifTags[orientationExifId]
# print ("orientation = " + str(imageExifTags[orientationExifId]))
if orientation == 1 or orientation == 0:
rotation = 0 # no need to do anything
elif orientation == 6:
rotation = -90
elif orientation == 8:
rotation = 90
else:
print ("ERROR: orientation = " + str(orientation) + " not_supported!")
error
return rotation
def drawRectangles(img, rects, color = (0, 255, 0), thickness = 2):
for rect in rects:
pt1 = tuple(ToIntegers(rect[0:2]))
pt2 = tuple(ToIntegers(rect[2:]))
cv2.rectangle(img, pt1, pt2, color, thickness)
def getDrawTextWidth(text):
textLen=len(text)
textWidth=50
if textLen <6:
textWidth = 60
elif textLen <13 :
textWidth = len(text)*9 +26
else :
textWidth = len(text)*9 +30
return textWidth
def getColorsPalette():
colors = [[255,0,0], [0,255,0], [0,0,255], [255,255,0], [255,0,255]]
for i in range(5):
for dim in range(0,3):
for s in (0.25, 0.5, 0.75):
if colors[i][dim] != 0:
newColor = copy.deepcopy(colors[i])
newColor[dim] = int(round(newColor[dim] * s))
colors.append(newColor)
return colors
def ToIntegers(list1D):
return [int(float(x)) for x in list1D]
def drawCrossbar(img, pt):
(x,y) = pt
cv2.rectangle(img, (0, y), (x, y), (255, 255, 0), 1)
cv2.rectangle(img, (x, 0), (x, y), (255, 255, 0), 1)
cv2.rectangle(img, (img.shape[1],y), (x, y), (255, 255, 0), 1)
cv2.rectangle(img, (x, img.shape[0]), (x, y), (255, 255, 0), 1)
def imconvertPil2Cv(pilImg):
rgb = pilImg.convert('RGB')
return np.array(rgb).copy()[:, :, ::-1]
def imconvertCv2Pil(img):
cv2_im = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
return Image.fromarray(cv2_im)
def cv2DrawText(img, pt, text, color = (255,255,255), colorBackground = None):
# Write some Text
font = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 0.6
lineType =1
cv2.putText(img,text,
pt,
font,
fontScale,
color,
lineType)
def pilDrawText(pilImg, pt, text, textWidth=None, color = (255,255,255), colorBackground = None, font = ImageFont.truetype(available_font, 16)):
textY = pt[1]
draw = ImageDraw.Draw(pilImg)
if textWidth == None:
lines = [text]
else:
lines = textwrap.wrap(text, width=textWidth)
for line in lines:
width, height = font.getsize(line)
if colorBackground != None:
draw.rectangle((pt[0], pt[1], pt[0] + width, pt[1] + height), fill=tuple(colorBackground[::-1]))
draw.text(pt, line, fill = tuple(color), font = font)
textY += height
return pilImg
def drawText(img, pt, text, textWidth=None, color = (255,255,255), colorBackground = None, font = ImageFont.truetype(available_font, 16)):
pilImg = imconvertCv2Pil(img)
pilImg = pilDrawText(pilImg, pt, text, textWidth, color, colorBackground, font)
return imconvertPil2Cv(pilImg)
def imWidth(input):
return imWidthHeight(input)[0]
def imHeight(input):
return imWidthHeight(input)[1]
def imWidthHeight(input):
width, height = Image.open(input).size # this does not load the full image
return width, height
def imArrayWidth(input):
return imArrayWidthHeight(input)[0]
def imArrayHeight(input):
return imArrayWidthHeight(input)[1]
def imArrayWidthHeight(input):
width = input.shape[1]
height = input.shape[0]
return width, height
def ptClip(pt, maxWidth, maxHeight):
pt = list(pt)
pt[0] = max(pt[0], 0)
pt[1] = max(pt[1], 0)
pt[0] = min(pt[0], maxWidth)
pt[1] = min(pt[1], maxHeight)
return pt
def deleteFile(filePath):
if os.path.exists(filePath):
os.remove(filePath)
def writeFile(outputFile, lines):
with open(outputFile,'w') as f:
for line in lines:
f.write("%s\n" % line)
def writeTable(outputFile, table):
lines = tableToList1D(table)
writeFile(outputFile, lines)
def deleteFile(filePath):
if os.path.exists(filePath):
os.remove(filePath)
def tableToList1D(table, delimiter='\t'):
return [delimiter.join([str(s) for s in row]) for row in table]
def getFilesInDirectory(directory, postfix = ""):
fileNames = [s for s in os.listdir(directory) if not os.path.isdir(os.path.join(directory, s))]
if not postfix or postfix == "":
return fileNames
else:
return [s for s in fileNames if s.lower().endswith(postfix)]
def readTable(inputFile, delimiter='\t', columnsToKeep=None):
lines = readFile(inputFile);
if columnsToKeep != None:
header = lines[0].split(delimiter)
columnsToKeepIndices = listFindItems(header, columnsToKeep)
else:
columnsToKeepIndices = None;
return splitStrings(lines, delimiter, columnsToKeepIndices)
def readFile(inputFile):
#reading as binary, to avoid problems with end-of-text characters
#note that readlines() does not remove the line ending characters
with open(inputFile,'rb') as f:
lines = f.readlines()
return [removeLineEndCharacters(s) for s in lines]
def removeLineEndCharacters(line):
if line.endswith(b'\r\n'):
return line[:-2]
elif line.endswith(b'\n'):
return line[:-1]
else:
return line
def splitStrings(strings, delimiter, columnsToKeepIndices=None):
table = [splitString(string, delimiter, columnsToKeepIndices) for string in strings]
return table;
def splitString(string, delimiter='\t', columnsToKeepIndices=None):
if string == None:
return None
items = string.decode('utf-8').split(delimiter)
if columnsToKeepIndices != None:
items = getColumns([items], columnsToKeepIndices)
items = items[0]
return items;
class Bbox:
MAX_VALID_DIM = 100000
left = top = right = bottom = None
def __init__(self, left, top, right, bottom):
self.left = int(round(float(left)))
self.top = int(round(float(top)))
self.right = int(round(float(right)))
self.bottom = int(round(float(bottom)))
self.standardize()
def __str__(self):
return ("Bbox object: left = {0}, top = {1}, right = {2}, bottom = {3}".format(self.left, self.top, self.right, self.bottom))
def __repr__(self):
return str(self)
def rect(self):
return [self.left, self.top, self.right, self.bottom]
def max(self):
return max([self.left, self.top, self.right, self.bottom])
def min(self):
return min([self.left, self.top, self.right, self.bottom])
def width(self):
width = self.right - self.left + 1
assert(width>=0)
return width
def height(self):
height = self.bottom - self.top + 1
assert(height>=0)
return height
def surfaceArea(self):
return self.width() * self.height()
def getOverlapBbox(self, bbox):
left1, top1, right1, bottom1 = self.rect()
left2, top2, right2, bottom2 = bbox.rect()
overlapLeft = max(left1, left2)
overlapTop = max(top1, top2)
overlapRight = min(right1, right2)
overlapBottom = min(bottom1, bottom2)
if (overlapLeft>overlapRight) or (overlapTop>overlapBottom):
return None
else:
return Bbox(overlapLeft, overlapTop, overlapRight, overlapBottom)
def standardize(self): #NOTE: every setter method should call standardize
leftNew = min(self.left, self.right)
topNew = min(self.top, self.bottom)
rightNew = max(self.left, self.right)
bottomNew = max(self.top, self.bottom)
self.left = leftNew
self.top = topNew
self.right = rightNew
self.bottom = bottomNew
def crop(self, maxWidth, maxHeight):
leftNew = min(max(self.left, 0), maxWidth)
topNew = min(max(self.top, 0), maxHeight)
rightNew = min(max(self.right, 0), maxWidth)
bottomNew = min(max(self.bottom, 0), maxHeight)
return Bbox(leftNew, topNew, rightNew, bottomNew)
def isValid(self):
if self.left>=self.right or self.top>=self.bottom:
return False
if min(self.rect()) < -self.MAX_VALID_DIM or max(self.rect()) > self.MAX_VALID_DIM:
return False
return True
| 32.656151
| 157
| 0.629927
|
7950ec0299238aa8c62f5331485aa0412e1170bb
| 935
|
py
|
Python
|
countries_field/bitfield/query.py
|
egosko/django-countries-field
|
0710f6d148dfefd5c56767bc5203081e96b8dee4
|
[
"Unlicense"
] | 3
|
2016-02-18T15:06:41.000Z
|
2019-12-25T15:34:28.000Z
|
countries_field/bitfield/query.py
|
egosko/django-countries-field
|
0710f6d148dfefd5c56767bc5203081e96b8dee4
|
[
"Unlicense"
] | 2
|
2016-02-19T07:54:56.000Z
|
2018-05-15T14:46:31.000Z
|
countries_field/bitfield/query.py
|
egosko/django-countries-field
|
0710f6d148dfefd5c56767bc5203081e96b8dee4
|
[
"Unlicense"
] | 8
|
2015-03-24T10:27:28.000Z
|
2020-11-30T09:56:19.000Z
|
class BitQueryLookupWrapper(object):
def __init__(self, alias, column, bit):
self.table_alias = alias
self.column = column
self.bit = bit
def as_sql(self, qn, connection=None):
"""
Create the proper SQL fragment. This inserts something like
"(T0.flags & value) != 0".
This will be called by Where.as_sql()
"""
query = '%s.%s | %d' if self.bit else '%s.%s & %d'
return query % (qn(self.table_alias), qn(self.column), self.bit.mask), []
class BitQuerySaveWrapper(BitQueryLookupWrapper):
def as_sql(self, qn, connection):
"""
Create the proper SQL fragment. This inserts something like
"(T0.flags & value) != 0".
This will be called by Where.as_sql()
"""
query = '%s.%s | %d' if self.bit else '%s.%s & ~%d'
return query % (qn(self.table_alias), qn(self.column), self.bit.mask), []
| 31.166667
| 81
| 0.57754
|
7950ec16c31dbe0b4ad2c84949152dd1363122f4
| 502
|
py
|
Python
|
setup.py
|
sebastien-boulle/changelog-generator
|
97ee0037f904c133b8182a894dc8ce45d2cb4faa
|
[
"Unlicense",
"MIT"
] | null | null | null |
setup.py
|
sebastien-boulle/changelog-generator
|
97ee0037f904c133b8182a894dc8ce45d2cb4faa
|
[
"Unlicense",
"MIT"
] | null | null | null |
setup.py
|
sebastien-boulle/changelog-generator
|
97ee0037f904c133b8182a894dc8ce45d2cb4faa
|
[
"Unlicense",
"MIT"
] | null | null | null |
from setuptools import setup, find_namespace_packages
setup(
name="changelog-generator",
version="0.1.0dev",
packages=find_namespace_packages(include=["changelog_generator.*"]),
maintainer="LumApps core team",
maintainer_email="core@lumapps.com",
url="https://github.com/lumapps/changelog-generator",
python_requires="~=3.7",
setup_requires=["wheel"],
install_requires=["gitpython", "jinja2"],
extras_require={},
package_data={},
test_suite="tests",
)
| 27.888889
| 72
| 0.697211
|
7950ec43598cb8155eec12a93e8e4d0f0990fcb9
| 699
|
py
|
Python
|
eap/sitecustomize.py
|
destexheteam/neuroneap
|
4dc4ab20c4ff6729718ad3e9abdb85d28299c433
|
[
"CC-BY-4.0",
"MIT"
] | 2
|
2016-11-25T22:32:05.000Z
|
2021-09-02T10:46:59.000Z
|
eap/sitecustomize.py
|
destexheteam/neuroneap
|
4dc4ab20c4ff6729718ad3e9abdb85d28299c433
|
[
"CC-BY-4.0",
"MIT"
] | 1
|
2016-02-04T22:26:08.000Z
|
2016-02-04T22:54:54.000Z
|
eap/sitecustomize.py
|
btel/neuroneap
|
f8846bb8a7487a1e252a2a6359142530f21b424f
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
#!/usr/bin/env python
#coding=utf-8
## {{{ http://code.activestate.com/recipes/65287/ (r5)
# code snippet, to be included in 'sitecustomize.py'
import sys
def info(type, value, tb):
if hasattr(sys, 'ps1') or not sys.stderr.isatty():
# we are in interactive mode or we don't have a tty-like
# device, so we call the default hook
sys.__excepthook__(type, value, tb)
else:
import traceback, pdb
# we are NOT in interactive mode, print the exception...
traceback.print_exception(type, value, tb)
print
# ...then start the debugger in post-mortem mode.
pdb.pm()
sys.excepthook = info
## end of http://code.activestate.com/recipes/65287/ }}}
| 30.391304
| 62
| 0.660944
|
7950ec473265b9c2f1844e9a00a864f7ca8355dc
| 5,927
|
py
|
Python
|
captioning/models/m2/m2transformer/models/transformer/decoders.py
|
linzhlalala/self-critical.pytorch
|
b856250ac52ba63656b1b03cdc3d7e830ed43f68
|
[
"MIT"
] | 1
|
2020-11-19T11:11:01.000Z
|
2020-11-19T11:11:01.000Z
|
captioning/models/m2/m2transformer/models/transformer/decoders.py
|
linzhlalala/self-critical.pytorch
|
b856250ac52ba63656b1b03cdc3d7e830ed43f68
|
[
"MIT"
] | null | null | null |
captioning/models/m2/m2transformer/models/transformer/decoders.py
|
linzhlalala/self-critical.pytorch
|
b856250ac52ba63656b1b03cdc3d7e830ed43f68
|
[
"MIT"
] | null | null | null |
import torch
from torch import nn
from torch.nn import functional as F
import numpy as np
from .attention import MultiHeadAttention
from .utils import sinusoid_encoding_table, PositionWiseFeedForward
from ..containers import Module, ModuleList
class MeshedDecoderLayer(Module):
def __init__(self, d_model=512, d_k=64, d_v=64, h=8, d_ff=2048, dropout=.1, self_att_module=None,
enc_att_module=None, self_att_module_kwargs=None, enc_att_module_kwargs=None):
super(MeshedDecoderLayer, self).__init__()
self.self_att = MultiHeadAttention(d_model, d_k, d_v, h, dropout, can_be_stateful=True,
attention_module=self_att_module,
attention_module_kwargs=self_att_module_kwargs)
self.enc_att = MultiHeadAttention(d_model, d_k, d_v, h, dropout, can_be_stateful=False,
attention_module=enc_att_module,
attention_module_kwargs=enc_att_module_kwargs)
self.pwff = PositionWiseFeedForward(d_model, d_ff, dropout)
self.fc_alpha1 = nn.Linear(d_model + d_model, d_model)
self.fc_alpha2 = nn.Linear(d_model + d_model, d_model)
self.fc_alpha3 = nn.Linear(d_model + d_model, d_model)
self.init_weights()
def init_weights(self):
nn.init.xavier_uniform_(self.fc_alpha1.weight)
nn.init.xavier_uniform_(self.fc_alpha2.weight)
nn.init.xavier_uniform_(self.fc_alpha3.weight)
nn.init.constant_(self.fc_alpha1.bias, 0)
nn.init.constant_(self.fc_alpha2.bias, 0)
nn.init.constant_(self.fc_alpha3.bias, 0)
def forward(self, input, enc_output, mask_pad, mask_self_att, mask_enc_att):
self_att = self.self_att(input, input, input, mask_self_att)
self_att = self_att * mask_pad
# print('enc_output[:, 0]', enc_output.size())
# print('enc_output.size()', enc_output.size())
enc_att1 = self.enc_att(self_att, enc_output[:, 0], enc_output[:, 0], mask_enc_att) * mask_pad
enc_att2 = self.enc_att(self_att, enc_output[:, 1], enc_output[:, 1], mask_enc_att) * mask_pad
enc_att3 = self.enc_att(self_att, enc_output[:, 2], enc_output[:, 2], mask_enc_att) * mask_pad
alpha1 = torch.sigmoid(self.fc_alpha1(torch.cat([self_att, enc_att1], -1)))
alpha2 = torch.sigmoid(self.fc_alpha2(torch.cat([self_att, enc_att2], -1)))
alpha3 = torch.sigmoid(self.fc_alpha3(torch.cat([self_att, enc_att3], -1)))
enc_att = (enc_att1 * alpha1 + enc_att2 * alpha2 + enc_att3 * alpha3) / np.sqrt(3)
enc_att = enc_att * mask_pad
ff = self.pwff(enc_att)
ff = ff * mask_pad
return ff
class MeshedDecoder(Module):
def __init__(self, vocab_size, max_len, N_dec, padding_idx, d_model=512, d_k=64, d_v=64, h=8, d_ff=2048, dropout=.1,
self_att_module=None, enc_att_module=None, self_att_module_kwargs=None, enc_att_module_kwargs=None):
super(MeshedDecoder, self).__init__()
self.d_model = d_model
self.word_emb = nn.Embedding(vocab_size, d_model, padding_idx=padding_idx)
self.pos_emb = nn.Embedding.from_pretrained(sinusoid_encoding_table(max_len + 1, d_model, 0), freeze=True)
self.layers = ModuleList(
[MeshedDecoderLayer(d_model, d_k, d_v, h, d_ff, dropout, self_att_module=self_att_module,
enc_att_module=enc_att_module, self_att_module_kwargs=self_att_module_kwargs,
enc_att_module_kwargs=enc_att_module_kwargs) for _ in range(N_dec)])
self.fc = nn.Linear(d_model, vocab_size, bias=False)
self.max_len = max_len
self.padding_idx = padding_idx
self.N = N_dec
self.register_state('running_mask_self_attention', torch.zeros((1, 1, 0)).byte())
# running_seq is the position_ix
self.register_state('running_seq', torch.zeros((1,)).long())
def forward(self, input, encoder_output, mask_encoder):
# input (b_s, seq_len)
print('forward function MeshedDecoder Class')
b_s, seq_len = input.shape[:2]
mask_queries = (input != self.padding_idx).unsqueeze(-1).float() # (b_s, seq_len, 1)
mask_self_attention = torch.triu(torch.ones((seq_len, seq_len), dtype=torch.uint8, device=input.device),
diagonal=1)
mask_self_attention = mask_self_attention.unsqueeze(0).unsqueeze(0) # (1, 1, seq_len, seq_len)
mask_self_attention = mask_self_attention + (input == self.padding_idx).unsqueeze(1).unsqueeze(1).byte()
mask_self_attention = mask_self_attention.gt(0) # (b_s, 1, seq_len, seq_len)
if self._is_stateful:
self.running_mask_self_attention = torch.cat([self.running_mask_self_attention.bool(), mask_self_attention], -1)
mask_self_attention = self.running_mask_self_attention
seq = torch.arange(1, seq_len + 1).view(1, -1).expand(b_s, -1).to(input.device) # (b_s, seq_len)
seq = seq.masked_fill(mask_queries.squeeze(-1) == 0, 0)
if self._is_stateful:
self.running_seq.add_(1)
seq = self.running_seq
# RT note: this is for my own use.
# When there is a no <pad> token in the vocab, you can use -1 as <pad>
# since Embedding would fail for -1, here I manually alter the input
# Since we have mask_self_attention, this change won't affect result.
input = input.clone().detach() # rtchange
if self.padding_idx == -1:
input[input == self.padding_idx] = 0 # rtchange
out = self.word_emb(input) + self.pos_emb(seq)
for i, l in enumerate(self.layers):
out = l(out, encoder_output, mask_queries, mask_self_attention, mask_encoder)
out = self.fc(out)
return F.log_softmax(out, dim=-1)
| 53.396396
| 124
| 0.655644
|
7950ed055f94a76a8d92f95d3161bda9657bd049
| 820
|
py
|
Python
|
histograms/histogram_equalization_clahe.py
|
vibinash/vision
|
7d775d6a877412c963965ecca2eea71ee2def007
|
[
"MIT"
] | null | null | null |
histograms/histogram_equalization_clahe.py
|
vibinash/vision
|
7d775d6a877412c963965ecca2eea71ee2def007
|
[
"MIT"
] | null | null | null |
histograms/histogram_equalization_clahe.py
|
vibinash/vision
|
7d775d6a877412c963965ecca2eea71ee2def007
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
# CLAHE: contrast Limited Adaptive histogram equalization
# Sometimes global contrast of the image is not a good idea
# since certain parts of the image can face over-brightness
# Adaptive histogram equalization is where the image is divided into small
# blocks called 'tiles'. Contrast Limiting is used to prevent noise being
# amplified. If any bin is above the specified limit (default: 40), those
# pixels are clipped and distributed uniformly to other bins before applying
# equalization. Bilinear interopation is applied to remove artifacts in the
# tile borders
img = cv2.imread('../images/victoria.jpg', 0)
# Create a CLAHE object
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
cl1 = clahe.apply(img)
cv2.imshow('clahe', cl1)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 34.166667
| 76
| 0.776829
|
7950eead1d5a259f62f77862fcc45f86ba7eb684
| 3,590
|
py
|
Python
|
tensorflow/contrib/slim/python/slim/nets/inception_v4_resnet_v2.py
|
alikewater/tensorflow
|
697929b163102db63fcf0599eb718e49d5ecd2c2
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/slim/python/slim/nets/inception_v4_resnet_v2.py
|
alikewater/tensorflow
|
697929b163102db63fcf0599eb718e49d5ecd2c2
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/slim/python/slim/nets/inception_v4_resnet_v2.py
|
alikewater/tensorflow
|
697929b163102db63fcf0599eb718e49d5ecd2c2
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
import tensorflow as tf
import tensorflow.contrib.slim as slim
#v4.default_image_size = 229
def v4(inputs,
sc='Inception-ResNet-v2'):
'''
Inception-V4 Inception-ResNet-v2 结构
net structs
--------------------------------------
input | 229 x 229 x 3
3x3 conv / s2 | 149 x 149 x 32
3x3 conv / s1 | 147 x 147 x 32
3x3 conv / s1 | 147 x 147 x 64
3x3 conv / s1 | 147 x 147 x 32
3x3 conv / s1 | 147 x 147 x 64
--------------------------------------
3x3 maxpool / s2 | 73 x 73 x 64
+
3x3 conv / s2 | 73 x 73 x 96
--------------------------------------
concat | 73 x 73 x 160
--------------------------------------
1x1 conv / s1 | 73 x 73 x 64 #1x1就是为了降维(或是说成将上一层输出的深度压缩)的,将上面的160维降到64维
3x3 conv / s1 | 71 x 71 x 96
+
1x1 conv / s1 | 73 x 73 x 64
7x1 conv / s1 | 73 x 73 x 64
1x7 conv / s1 | 73 x 73 x 64
3x3 conv / s1 | 71 x 71 x 96
--------------------------------------
concat | 71 x 71 x 192
--------------------------------------
3x3 maxpool / s2 | 35 x 35 x 192
+
3x3 conv / s2 | 35 x 35 x 192
--------------------------------------
concat | 35 x 35 x 384
--------------------------------------
'''
end_points = {}
with tf.variable_scope(sc):
with slim.arg_scope([slim.conv2d, slim.max_pool2d],stride=1,padding='SAME'):
net = slim.conv2d(inputs, 32, [3, 3], stride=2, scope='conv_1')
end_points['conv_1'] = net
net = slim.conv2d(net, 32, [3, 3], padding='VALID', name='conv_2')
end_points['conv_2'] = net
net = slim.conv2d(net, 64, [3, 3], name='conv_3')
end_points['conv_3'] = net
with tf.variable_scope('mixed_1'):
with tf.variable_scope('branch_0'):
branch_0 = slim.max_pool2d(net, [3, 3], stride=2, name='branch_0_mp')
with tf.variable_scope('branch_1'):
branch_1 = slim.conv2d(net, 96, [3, 3], stride=2, name='branch_1_conv')
net = tf.concat([branch_0, branch_1], 3)
end_points['mixed_1'] = net
with tf.variable_scope('mixed_2'):
with tf.variable_scope('branch_0'):
branch_0 = slim.conv2d(net, 64, [1, 1], name='branch_0_conv1')
branch_0 = slim.conv2d(branch_0, 96, [3, 3], padding='VALID', name='branch_0_conv2')
with tf.variable_scope('branch_1'):
branch_1 = slim.conv2d(net, 64, [1, 1], name='branch_1_conv1')
branch_1 = slim.conv2d(branch_1, 64, [7, 1], name='branch_1_conv1')
branch_1 = slim.conv2d(branch_1, 64, [1, 7], name='branch_1_conv1')
branch_1 = slim.conv2d(branch_1, 96, [3, 3], padding='VALID', name='branch_1_conv1')
net = tf.concat([branch_0, branch_1], 3)
end_points['mixed_2'] = net
with tf.variable_scope('mixed_3'):
with tf.variable_scope('branch_0'):
branch_0 = slim.max_pool2d(net, [3, 3], stride=2, name='branch_0_mp')
with tf.variable_scope('branch_1'):
branch_1 = slim.conv2d(net, 192, [3, 3], stride=2, name='branch_1_conv')
net = tf.concat([branch_0, branch_1], 3)
end_points['mixed_3'] = net
end_points['net'] = net
return net, end_points
| 46.025641
| 104
| 0.477437
|
7950efb9dad19789ca07c1c2b8d7c9c6f6d68f40
| 918
|
py
|
Python
|
themis/modules/collecting/collector.py
|
addam128/themis
|
113b818d593342f94f9c6c438bbfb0cc7c4f705b
|
[
"MIT"
] | null | null | null |
themis/modules/collecting/collector.py
|
addam128/themis
|
113b818d593342f94f9c6c438bbfb0cc7c4f705b
|
[
"MIT"
] | null | null | null |
themis/modules/collecting/collector.py
|
addam128/themis
|
113b818d593342f94f9c6c438bbfb0cc7c4f705b
|
[
"MIT"
] | null | null | null |
import lddwrap as ldd
import uuid
from pathlib import Path
from zipfile import ZipFile
from themis.modules.common.config import Config
class Collector:
def __init__(
self,
config: Config,
path: str,
name: str
) -> None:
self._path = path
self._config = config
self._name = name
self._deps = None
def collect(
self
) -> 'Collector':
self._deps = list(
map(
lambda dep: str(dep.path),
ldd.list_dependencies(Path(self._path))
)
)
return self
def archive(
self
) -> None:
with ZipFile(f"{self._config.sample_dir}/{self._name}_{uuid.uuid4()}.zip", mode='x') as zf:
for dep in self._deps:
if dep is None or dep == "None":
continue
zf.write(dep)
| 18.36
| 99
| 0.508715
|
7950f1284e644b7560f75d5f8af07e8146821db0
| 292
|
py
|
Python
|
artist/views.py
|
rijkerd/music_stream
|
ff0d7c629c89a07f08a10ed700e703b54704f500
|
[
"MIT",
"Unlicense"
] | 1
|
2020-06-10T23:26:39.000Z
|
2020-06-10T23:26:39.000Z
|
artist/views.py
|
rijkerd/music_stream
|
ff0d7c629c89a07f08a10ed700e703b54704f500
|
[
"MIT",
"Unlicense"
] | 8
|
2021-03-30T18:05:18.000Z
|
2022-03-12T00:16:55.000Z
|
artist/views.py
|
rijkerd/music_stream
|
ff0d7c629c89a07f08a10ed700e703b54704f500
|
[
"MIT",
"Unlicense"
] | null | null | null |
from django.shortcuts import render
from rest_framework.viewsets import ModelViewSet
from .models import Artist
from .serializers import ArtistSerializer
class ArtistViewSet(ModelViewSet):
lookup_field = "id"
queryset = Artist.objects.all()
serializer_class = ArtistSerializer
| 24.333333
| 48
| 0.80137
|
7950f15e69fb5f86ad00af93252871a9b0551928
| 597
|
py
|
Python
|
src/lib/Server/Plugins/Packages/PackagesConfig.py
|
pcmxgti/bcfg2
|
33aaf9c6bbeb0d20eef084b1347a0fce42086663
|
[
"mpich2"
] | null | null | null |
src/lib/Server/Plugins/Packages/PackagesConfig.py
|
pcmxgti/bcfg2
|
33aaf9c6bbeb0d20eef084b1347a0fce42086663
|
[
"mpich2"
] | null | null | null |
src/lib/Server/Plugins/Packages/PackagesConfig.py
|
pcmxgti/bcfg2
|
33aaf9c6bbeb0d20eef084b1347a0fce42086663
|
[
"mpich2"
] | null | null | null |
import Bcfg2.Server.Plugin
class PackagesConfig(Bcfg2.Server.Plugin.SimpleConfig):
_required = False
def Index(self):
""" Build local data structures """
Bcfg2.Server.Plugin.SimpleConfig.Index(self)
if hasattr(self.plugin, "sources") and self.plugin.sources.loaded:
# only reload Packages plugin if sources have been loaded.
# otherwise, this is getting called on server startup, and
# we have to wait until all sources have been indexed
# before we can call Packages.Reload()
self.plugin.Reload()
| 37.3125
| 74
| 0.654941
|
7950f160d55695a961111d226a1accb377e7a5b9
| 1,233
|
py
|
Python
|
tools/skp/page_sets/skia_ynevsvg_desktop.py
|
pospx/external_skia
|
7a135275c9fc2a4b3cbdcf9a96e7102724752234
|
[
"BSD-3-Clause"
] | 2,151
|
2020-04-18T07:31:17.000Z
|
2022-03-31T08:39:18.000Z
|
tools/skp/page_sets/skia_ynevsvg_desktop.py
|
pospx/external_skia
|
7a135275c9fc2a4b3cbdcf9a96e7102724752234
|
[
"BSD-3-Clause"
] | 395
|
2020-04-18T08:22:18.000Z
|
2021-12-08T13:04:49.000Z
|
tools/skp/page_sets/skia_ynevsvg_desktop.py
|
pospx/external_skia
|
7a135275c9fc2a4b3cbdcf9a96e7102724752234
|
[
"BSD-3-Clause"
] | 338
|
2020-04-18T08:03:10.000Z
|
2022-03-29T12:33:22.000Z
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=W0401,W0614
from telemetry import story
from telemetry.page import page as page_module
from telemetry.page import shared_page_state
class SkiaBuildbotDesktopPage(page_module.Page):
def __init__(self, url, page_set):
super(SkiaBuildbotDesktopPage, self).__init__(
url=url,
name=url,
page_set=page_set,
shared_page_state_class=shared_page_state.SharedDesktopPageState)
self.archive_data_file = 'data/skia_ynevsvg_desktop.json'
def RunNavigateSteps(self, action_runner):
action_runner.Navigate(self.url)
action_runner.Wait(5)
class SkiaYnevsvgDesktopPageSet(story.StorySet):
""" Pages designed to represent the median, not highly optimized web """
def __init__(self):
super(SkiaYnevsvgDesktopPageSet, self).__init__(
archive_data_file='data/skia_ynevsvg_desktop.json')
urls_list = [
# Why: from skbug.com/4713
'http://www.googledrive.com/host/0B5nDjttF0gt9QjRKdEZ5MEVYc2c',
]
for url in urls_list:
self.AddStory(SkiaBuildbotDesktopPage(url, self))
| 29.357143
| 74
| 0.74777
|
7950f1db64fe206fcc5ecf3419dcfb6889c94d05
| 8,702
|
py
|
Python
|
salt/modules/incron.py
|
casselt/salt
|
d8a2ef4e0cd544656489d23d161928879b1fc1c0
|
[
"Apache-2.0"
] | 12
|
2015-01-21T00:18:25.000Z
|
2021-07-11T07:35:26.000Z
|
salt/modules/incron.py
|
casselt/salt
|
d8a2ef4e0cd544656489d23d161928879b1fc1c0
|
[
"Apache-2.0"
] | 2
|
2019-03-06T20:43:44.000Z
|
2019-04-10T23:56:02.000Z
|
salt/modules/incron.py
|
casselt/salt
|
d8a2ef4e0cd544656489d23d161928879b1fc1c0
|
[
"Apache-2.0"
] | 12
|
2015-01-05T09:50:42.000Z
|
2019-08-19T01:43:40.000Z
|
# -*- coding: utf-8 -*-
'''
Work with incron
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import logging
import os
# Import salt libs
from salt.ext import six
from salt.ext.six.moves import range
import salt.utils.data
import salt.utils.files
import salt.utils.functools
import salt.utils.stringutils
# Set up logging
log = logging.getLogger(__name__)
TAG = '# Line managed by Salt, do not edit'
_INCRON_SYSTEM_TAB = '/etc/incron.d/'
_MASK_TYPES = [
'IN_ACCESS', 'IN_ATTRIB', 'IN_CLOSE_WRITE',
'IN_CLOSE_NOWRITE', 'IN_CREATE', 'IN_DELETE',
'IN_DELETE_SELF', 'IN_MODIFY', 'IN_MOVE_SELF',
'IN_MOVED_FROM', 'IN_MOVED_TO', 'IN_OPEN',
'IN_ALL_EVENTS', 'IN_MOVE', 'IN_CLOSE',
'IN_DONT_FOLLOW', 'IN_ONESHOT', 'IN_ONLYDIR',
'IN_NO_LOOP'
]
def _needs_change(old, new):
if old != new:
if new == 'random':
# Allow switch from '*' or not present to 'random'
if old == '*':
return True
elif new is not None:
return True
return False
def _render_tab(lst):
'''
Takes a tab list structure and renders it to a list for applying it to
a file
'''
ret = []
for pre in lst['pre']:
ret.append('{0}\n'.format(pre))
for cron in lst['crons']:
ret.append('{0} {1} {2} {3}\n'.format(cron['path'],
cron['mask'],
cron['cmd'],
TAG
)
)
return ret
def _get_incron_cmdstr(path):
'''
Returns a format string, to be used to build an incrontab command.
'''
return 'incrontab {0}'.format(path)
def write_incron_file(user, path):
'''
Writes the contents of a file to a user's incrontab
CLI Example:
.. code-block:: bash
salt '*' incron.write_incron_file root /tmp/new_incron
'''
return __salt__['cmd.retcode'](_get_incron_cmdstr(path), runas=user, python_shell=False) == 0
def write_incron_file_verbose(user, path):
'''
Writes the contents of a file to a user's incrontab and return error message on error
CLI Example:
.. code-block:: bash
salt '*' incron.write_incron_file_verbose root /tmp/new_incron
'''
return __salt__['cmd.run_all'](_get_incron_cmdstr(path), runas=user, python_shell=False)
def _write_incron_lines(user, lines):
'''
Takes a list of lines to be committed to a user's incrontab and writes it
'''
if user == 'system':
ret = {}
ret['retcode'] = _write_file(_INCRON_SYSTEM_TAB, 'salt', ''.join(lines))
return ret
else:
path = salt.utils.files.mkstemp()
with salt.utils.files.fopen(path, 'wb') as fp_:
fp_.writelines(salt.utils.data.encode(lines))
if __grains__['os_family'] == 'Solaris' and user != "root":
__salt__['cmd.run']('chown {0} {1}'.format(user, path), python_shell=False)
ret = __salt__['cmd.run_all'](_get_incron_cmdstr(path), runas=user, python_shell=False)
os.remove(path)
return ret
def _write_file(folder, filename, data):
'''
Writes a file to disk
'''
path = os.path.join(folder, filename)
if not os.path.exists(folder):
msg = '{0} cannot be written. {1} does not exist'.format(filename, folder)
log.error(msg)
raise AttributeError(six.text_type(msg))
with salt.utils.files.fopen(path, 'w') as fp_:
fp_.write(salt.utils.stringutils.to_str(data))
return 0
def _read_file(folder, filename):
'''
Reads and returns the contents of a file
'''
path = os.path.join(folder, filename)
try:
with salt.utils.files.fopen(path, 'rb') as contents:
return salt.utils.data.decode(contents.readlines())
except (OSError, IOError):
return ''
def raw_system_incron():
'''
Return the contents of the system wide incrontab
CLI Example:
.. code-block:: bash
salt '*' incron.raw_system_incron
'''
log.debug("read_file {0}" . format(_read_file(_INCRON_SYSTEM_TAB, 'salt')))
return ''.join(_read_file(_INCRON_SYSTEM_TAB, 'salt'))
def raw_incron(user):
'''
Return the contents of the user's incrontab
CLI Example:
.. code-block:: bash
salt '*' incron.raw_incron root
'''
if __grains__['os_family'] == 'Solaris':
cmd = 'incrontab -l {0}'.format(user)
else:
cmd = 'incrontab -l -u {0}'.format(user)
return __salt__['cmd.run_stdout'](cmd, rstrip=False, runas=user, python_shell=False)
def list_tab(user):
'''
Return the contents of the specified user's incrontab
CLI Example:
.. code-block:: bash
salt '*' incron.list_tab root
'''
if user == 'system':
data = raw_system_incron()
else:
data = raw_incron(user)
log.debug("user data {0}" . format(data))
ret = {'crons': [],
'pre': []
}
flag = False
comment = None
tag = '# Line managed by Salt, do not edit'
for line in data.splitlines():
if line.endswith(tag):
if len(line.split()) > 3:
# Appears to be a standard incron line
comps = line.split()
path = comps[0]
mask = comps[1]
(cmd, comment) = ' '.join(comps[2:]).split(' # ')
dat = {'path': path,
'mask': mask,
'cmd': cmd,
'comment': comment}
ret['crons'].append(dat)
comment = None
else:
ret['pre'].append(line)
return ret
# For consistency's sake
ls = salt.utils.functools.alias_function(list_tab, 'ls')
def set_job(user, path, mask, cmd):
'''
Sets an incron job up for a specified user.
CLI Example:
.. code-block:: bash
salt '*' incron.set_job root '/root' 'IN_MODIFY' 'echo "$$ $@ $# $% $&"'
'''
# Scrub the types
mask = six.text_type(mask).upper()
# Check for valid mask types
for item in mask.split(','):
if item not in _MASK_TYPES:
return 'Invalid mask type: {0}' . format(item)
updated = False
arg_mask = mask.split(',')
arg_mask.sort()
lst = list_tab(user)
updated_crons = []
# Look for existing incrons that have cmd, path and at least one of the MASKS
# remove and replace with the one we're passed
for item, cron in enumerate(lst['crons']):
if path == cron['path']:
if cron['cmd'] == cmd:
cron_mask = cron['mask'].split(',')
cron_mask.sort()
if cron_mask == arg_mask:
return 'present'
if any([x in cron_mask for x in arg_mask]):
updated = True
else:
updated_crons.append(cron)
else:
updated_crons.append(cron)
else:
updated_crons.append(cron)
cron = {'cmd': cmd, 'path': path, 'mask': mask}
updated_crons.append(cron)
lst['crons'] = updated_crons
comdat = _write_incron_lines(user, _render_tab(lst))
if comdat['retcode']:
# Failed to commit, return the error
return comdat['stderr']
if updated:
return 'updated'
else:
return 'new'
def rm_job(user,
path,
mask,
cmd):
'''
Remove a incron job for a specified user. If any of the day/time params are
specified, the job will only be removed if the specified params match.
CLI Example:
.. code-block:: bash
salt '*' incron.rm_job root /path
'''
# Scrub the types
mask = six.text_type(mask).upper()
# Check for valid mask types
for item in mask.split(','):
if item not in _MASK_TYPES:
return 'Invalid mask type: {0}' . format(item)
lst = list_tab(user)
ret = 'absent'
rm_ = None
for ind in range(len(lst['crons'])):
if rm_ is not None:
break
if path == lst['crons'][ind]['path']:
if cmd == lst['crons'][ind]['cmd']:
if mask == lst['crons'][ind]['mask']:
rm_ = ind
if rm_ is not None:
lst['crons'].pop(rm_)
ret = 'removed'
comdat = _write_incron_lines(user, _render_tab(lst))
if comdat['retcode']:
# Failed to commit, return the error
return comdat['stderr']
return ret
rm = salt.utils.functools.alias_function(rm_job, 'rm')
| 27.109034
| 97
| 0.565502
|
7950f4be74d9af708627bb7450f83dbc913266df
| 830
|
py
|
Python
|
src/main/strategy_context.py
|
BMW-InnovationLab/BMW-Anonymization-API
|
6acc59fa18f1e668e6e80a7990aebbf2ab4ade5e
|
[
"Apache-2.0"
] | 108
|
2021-04-08T13:23:03.000Z
|
2022-03-30T14:22:13.000Z
|
src/main/strategy_context.py
|
Elio-hanna/BMW-Anonymization-API
|
c8707bb8cae6524a8c46a6aaadac24fef051c1db
|
[
"Apache-2.0"
] | 1
|
2021-10-06T08:25:51.000Z
|
2021-10-11T08:07:08.000Z
|
src/main/strategy_context.py
|
Elio-hanna/BMW-Anonymization-API
|
c8707bb8cae6524a8c46a6aaadac24fef051c1db
|
[
"Apache-2.0"
] | 12
|
2021-04-10T07:17:56.000Z
|
2022-03-26T17:48:12.000Z
|
from anonymization.base_anonymization import BaseAnonymization
class StrategyContext:
def __init__(self):
pass
def anonymize(self, detection_type: BaseAnonymization, technique: str, image, response, degree,label_id, mask):
"""
:param detection_type: Either it is semantic segmentation or object detection
:param technique: The anonymization method
:param image: Input image
:param response: The bounding boxes taken from the output of the inference api
:param degree: The degree used to specify the opacity of the anonymization
:param label_id: The id of the detected class
:param mask: The mask used to apply the anonymzation
:return:
"""
return getattr(detection_type, technique)(image, response, degree, label_id, mask)
| 41.5
| 115
| 0.704819
|
7950f50c97ccc84243f6b35582bf0bfd56475fb1
| 4,053
|
py
|
Python
|
test/python/transpiler/test_decompose.py
|
TanveshT/qiskit-terra
|
dc3a2a667b8dc22512ca409ecae347d8dbdd944c
|
[
"Apache-2.0"
] | 1
|
2021-07-11T18:17:38.000Z
|
2021-07-11T18:17:38.000Z
|
test/python/transpiler/test_decompose.py
|
TanveshT/qiskit-terra
|
dc3a2a667b8dc22512ca409ecae347d8dbdd944c
|
[
"Apache-2.0"
] | null | null | null |
test/python/transpiler/test_decompose.py
|
TanveshT/qiskit-terra
|
dc3a2a667b8dc22512ca409ecae347d8dbdd944c
|
[
"Apache-2.0"
] | 1
|
2020-10-31T09:26:39.000Z
|
2020-10-31T09:26:39.000Z
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test the decompose pass"""
from numpy import pi
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit.transpiler.passes import Decompose
from qiskit.converters import circuit_to_dag
from qiskit.circuit.library import HGate
from qiskit.circuit.library import CCXGate
from qiskit.quantum_info.operators import Operator
from qiskit.test import QiskitTestCase
class TestDecompose(QiskitTestCase):
"""Tests the decompose pass."""
def test_basic(self):
"""Test decompose a single H into u2.
"""
qr = QuantumRegister(1, 'qr')
circuit = QuantumCircuit(qr)
circuit.h(qr[0])
dag = circuit_to_dag(circuit)
pass_ = Decompose(HGate)
after_dag = pass_.run(dag)
op_nodes = after_dag.op_nodes()
self.assertEqual(len(op_nodes), 1)
self.assertEqual(op_nodes[0].name, 'u2')
def test_decompose_only_h(self):
"""Test to decompose a single H, without the rest
"""
qr = QuantumRegister(2, 'qr')
circuit = QuantumCircuit(qr)
circuit.h(qr[0])
circuit.cx(qr[0], qr[1])
dag = circuit_to_dag(circuit)
pass_ = Decompose(HGate)
after_dag = pass_.run(dag)
op_nodes = after_dag.op_nodes()
self.assertEqual(len(op_nodes), 2)
for node in op_nodes:
self.assertIn(node.name, ['cx', 'u2'])
def test_decompose_toffoli(self):
"""Test decompose CCX.
"""
qr1 = QuantumRegister(2, 'qr1')
qr2 = QuantumRegister(1, 'qr2')
circuit = QuantumCircuit(qr1, qr2)
circuit.ccx(qr1[0], qr1[1], qr2[0])
dag = circuit_to_dag(circuit)
pass_ = Decompose(CCXGate)
after_dag = pass_.run(dag)
op_nodes = after_dag.op_nodes()
self.assertEqual(len(op_nodes), 15)
for node in op_nodes:
self.assertIn(node.name, ['h', 't', 'tdg', 'cx'])
def test_decompose_conditional(self):
"""Test decompose a 1-qubit gates with a conditional.
"""
qr = QuantumRegister(1, 'qr')
cr = ClassicalRegister(1, 'cr')
circuit = QuantumCircuit(qr, cr)
circuit.h(qr).c_if(cr, 1)
circuit.x(qr).c_if(cr, 1)
dag = circuit_to_dag(circuit)
pass_ = Decompose(HGate)
after_dag = pass_.run(dag)
ref_circuit = QuantumCircuit(qr, cr)
ref_circuit.u2(0, pi, qr[0]).c_if(cr, 1)
ref_circuit.x(qr).c_if(cr, 1)
ref_dag = circuit_to_dag(ref_circuit)
self.assertEqual(after_dag, ref_dag)
def test_decompose_oversized_instruction(self):
"""Test decompose on a single-op gate that doesn't use all qubits."""
# ref: https://github.com/Qiskit/qiskit-terra/issues/3440
qc1 = QuantumCircuit(2)
qc1.x(0)
gate = qc1.to_gate()
qc2 = QuantumCircuit(2)
qc2.append(gate, [0, 1])
output = qc2.decompose()
self.assertEqual(qc1, output)
def test_decompose_global_phase_1q(self):
"""Test decomposition of circuit with global phase"""
qc = QuantumCircuit(1)
qc.rz(0.1, 0)
qc.ry(0.5, 0)
qc.global_phase += pi/4
qcd = qc.decompose()
self.assertEqual(Operator(qc), Operator(qcd))
def test_decompose_global_phase_2q(self):
"""Test decomposition of circuit with global phase"""
qc = QuantumCircuit(2, global_phase=pi/4)
qc.rz(0.1, 0)
qc.rxx(0.2, 0, 1)
qcd = qc.decompose()
self.assertEqual(Operator(qc), Operator(qcd))
| 33.495868
| 77
| 0.631878
|
7950f51d516fd9bcae861542d0961d7b7300447e
| 20,695
|
py
|
Python
|
train_cls.py
|
mhwasil/3DmFV-Net
|
9cf8fe5f3875e97dd34997182c5087193a9c15bc
|
[
"MIT"
] | 102
|
2018-07-06T13:39:33.000Z
|
2022-03-27T10:13:58.000Z
|
train_cls.py
|
mhwasil/3DmFV-Net
|
9cf8fe5f3875e97dd34997182c5087193a9c15bc
|
[
"MIT"
] | 7
|
2018-11-08T00:31:48.000Z
|
2021-10-06T08:51:10.000Z
|
train_cls.py
|
mhwasil/3DmFV-Net
|
9cf8fe5f3875e97dd34997182c5087193a9c15bc
|
[
"MIT"
] | 38
|
2018-07-07T12:57:28.000Z
|
2021-09-28T02:04:00.000Z
|
import os
import sys
import numpy as np
import matplotlib
matplotlib.use('pdf')
# import matplotlib.pyplot as plt
import importlib
import argparse
import tensorflow as tf
import pickle
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, 'models'))
sys.path.append(os.path.join(BASE_DIR, 'utils'))
import tf_util
import visualization
import provider
import utils
# ModelNet40 official train/test split. MOdelNet10 requires separate downloading and sampling.
MAX_N_POINTS = 2048
NUM_CLASSES = 40
TRAIN_FILES = provider.getDataFiles( \
os.path.join(BASE_DIR, 'data/modelnet'+str(NUM_CLASSES)+'_ply_hdf5_'+ str(MAX_N_POINTS)+ '/train_files.txt'))
TEST_FILES = provider.getDataFiles(\
os.path.join(BASE_DIR, 'data/modelnet'+str(NUM_CLASSES)+'_ply_hdf5_'+ str(MAX_N_POINTS)+ '/test_files.txt'))
LABEL_MAP = provider.getDataFiles(\
os.path.join(BASE_DIR, 'data/modelnet'+str(NUM_CLASSES)+'_ply_hdf5_'+ str(MAX_N_POINTS)+ '/shape_names.txt'))
print( "Loading Modelnet" + str(NUM_CLASSES))
#Execute
#python train_cls.py --gpu=0 --log_dir='log' --batch_size=64 --num_point=1024 --num_gaussians=8 --gmm_variance=0.0156 --gmm_type='grid' --learning_rate=0.001 --model='voxnet_pfv' --max_epoch=200 --momentum=0.9 --optimizer='adam' --decay_step=200000 --weight_decay=0.0 --decay_rate=0.7
augment_rotation, augment_scale, augment_translation, augment_jitter, augment_outlier = (False, True, True, True, False)
parser = argparse.ArgumentParser()
#Parameters for learning
parser.add_argument('--gpu', type=int, default=2, help='GPU to use [default: GPU 0]')
parser.add_argument('--model', default='3dmfv_net_cls', help='Model name [default: 3dmfv_net_cls]')
parser.add_argument('--log_dir', default='log_trial', help='Log dir [default: log]')
parser.add_argument('--num_point', type=int, default=1024, help='Point Number [256/512/1024/2048] [default: 1024]')
parser.add_argument('--max_epoch', type=int, default=200, help='Epoch to run [default: 200]')
parser.add_argument('--batch_size', type=int, default=64, help='Batch Size during training [default: 64]')
parser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]')
parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]')
parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]')
parser.add_argument('--decay_step', type=int, default=200000, help='Decay step for lr decay [default: 200000]')
parser.add_argument('--decay_rate', type=float, default=0.7, help='Decay rate for lr decay [default: 0.7]')
parser.add_argument('--weight_decay', type=float, default=0.0, help='weight decay coef [default: 0.0]')
# Parameters for GMM
parser.add_argument('--gmm_type', default='grid', help='type of gmm [grid/learn], learn uses expectation maximization algorithm (EM) [default: grid]')
parser.add_argument('--num_gaussians', type=int , default=5, help='number of gaussians for gmm, if grid specify subdivisions, if learned specify actual number[default: 5, for grid it means 125 gaussians]')
parser.add_argument('--gmm_variance', type=float, default=0.04, help='variance for grid gmm, relevant only for grid type')
FLAGS = parser.parse_args()
N_GAUSSIANS = FLAGS.num_gaussians
GMM_TYPE = FLAGS.gmm_type
GMM_VARIANCE = FLAGS.gmm_variance
BATCH_SIZE = FLAGS.batch_size
NUM_POINT = FLAGS.num_point
MAX_EPOCH = FLAGS.max_epoch
BASE_LEARNING_RATE = FLAGS.learning_rate
GPU_INDEX = FLAGS.gpu
MOMENTUM = FLAGS.momentum
OPTIMIZER = FLAGS.optimizer
DECAY_STEP = FLAGS.decay_step
DECAY_RATE = FLAGS.decay_rate
WEIGHT_DECAY = FLAGS.weight_decay
MODEL = importlib.import_module(FLAGS.model) # import network module
MODEL_FILE = os.path.join(BASE_DIR, 'models', FLAGS.model+'.py')
#Creat log directory ant prevent over-write by creating numbered subdirectories
LOG_DIR = 'log/modelnet' + str(NUM_CLASSES) + '/' + FLAGS.model + '/'+ GMM_TYPE + str(N_GAUSSIANS) + '_' + FLAGS.log_dir
if not os.path.exists(LOG_DIR):
os.makedirs(LOG_DIR)
else:
print('Log dir already exists! creating a new one..............')
n = 0
while True:
n+=1
new_log_dir = LOG_DIR+'/'+str(n)
if not os.path.exists(new_log_dir):
os.makedirs(new_log_dir)
print('New log dir:'+new_log_dir)
break
FLAGS.log_dir = new_log_dir
LOG_DIR = new_log_dir
os.system('cp %s %s' % (MODEL_FILE, LOG_DIR)) # bkp of model def
os.system('cp train_cls.py %s' % (LOG_DIR)) # bkp of train procedure
pickle.dump(FLAGS, open( os.path.join(LOG_DIR, 'parameters.p'), "wb" ) )
LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
LOG_FOUT.write("augmentation RSTJ = " + str((augment_rotation, augment_scale, augment_translation, augment_jitter, augment_outlier))) #log augmentaitons
FAIL_CASES_FOUT = open(os.path.join(LOG_DIR, 'fail_cases.txt'), 'w')
BN_INIT_DECAY = 0.5
BN_DECAY_DECAY_RATE = 0.5
BN_DECAY_DECAY_STEP = float(DECAY_STEP)
BN_DECAY_CLIP = 0.99
LIMIT_GPU = True
MAX_ACCURACY = 0.0
MAX_CLASS_ACCURACY = 0.0
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
def get_learning_rate(batch):
learning_rate = tf.train.exponential_decay(
BASE_LEARNING_RATE, # Base learning rate.
batch * BATCH_SIZE, # Current index into the dataset.
DECAY_STEP, # Decay step.
DECAY_RATE, # Decay rate.
staircase=True)
learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!
return learning_rate
def get_bn_decay(batch):
bn_momentum = tf.train.exponential_decay(
BN_INIT_DECAY,
batch*BATCH_SIZE,
BN_DECAY_DECAY_STEP,
BN_DECAY_DECAY_RATE,
staircase=True)
bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum)
return bn_decay
def train(gmm):
global MAX_ACCURACY, MAX_CLASS_ACCURACY
# n_fv_features = 7 * len(gmm.weights_)
# Build Graph, train and classify
with tf.Graph().as_default():
with tf.device('/gpu:'+str(GPU_INDEX)):
points_pl, labels_pl, w_pl, mu_pl, sigma_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT, gmm )
is_training_pl = tf.placeholder(tf.bool, shape=())
# Note the global_step=batch parameter to minimize.
# That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains.
batch = tf.Variable(0)
bn_decay = get_bn_decay(batch)
tf.summary.scalar('bn_decay', bn_decay)
# Get model and loss
pred, fv = MODEL.get_model(points_pl, w_pl, mu_pl, sigma_pl, is_training_pl, bn_decay=bn_decay, weigth_decay=WEIGHT_DECAY, add_noise=False, num_classes=NUM_CLASSES)
loss = MODEL.get_loss(pred, labels_pl)
tf.summary.scalar('loss', loss)
# Get accuracy
correct = tf.equal(tf.argmax(pred, 1), tf.to_int64(labels_pl))
accuracy = tf.reduce_sum(tf.cast(correct, tf.float32)) / float(BATCH_SIZE)
tf.summary.scalar('accuracy', accuracy)
# Get training operator
learning_rate = get_learning_rate(batch)
tf.summary.scalar('learning_rate', learning_rate)
if OPTIMIZER == 'momentum':
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM)
elif OPTIMIZER == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(loss, global_step=batch)#, aggregation_method = tf.AggregationMethod.EXPERIMENTAL_TREE) #consider using: tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
# Create a session
sess = tf_util.get_session(GPU_INDEX, limit_gpu=LIMIT_GPU)
# Add summary writers
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'train'), sess.graph)
test_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'test'))
# Init variables
init = tf.global_variables_initializer()
sess.run(init, {is_training_pl: True})
ops = {'points_pl': points_pl,
'labels_pl': labels_pl,
'w_pl': w_pl,
'mu_pl': mu_pl,
'sigma_pl': sigma_pl,
'is_training_pl': is_training_pl,
'fv': fv,
'pred': pred,
'loss': loss,
'train_op': train_op,
'merged': merged,
'step': batch}
for epoch in range(MAX_EPOCH):
log_string('**** EPOCH %03d ****' % (epoch))
sys.stdout.flush()
train_one_epoch(sess, ops, gmm, train_writer)
acc, acc_avg_cls = eval_one_epoch(sess, ops, gmm, test_writer)
# Save the variables to disk.
if epoch % 10 == 0:
save_path = saver.save(sess, os.path.join(LOG_DIR, "model.ckpt"))
log_string("Model saved in file: %s" % save_path)
if acc > MAX_ACCURACY:
MAX_ACCURACY = acc
MAX_CLASS_ACCURACY = acc_avg_cls
log_string("Best test accuracy: %f" % MAX_ACCURACY)
log_string("Best test class accuracy: %f" % MAX_CLASS_ACCURACY)
def train_one_epoch(sess, ops, gmm, train_writer):
""" ops: dict mapping from string to tf ops """
is_training = True
# Shuffle train files
train_file_idxs = np.arange(0, len(TRAIN_FILES))
np.random.shuffle(train_file_idxs)
for fn in range(len(TRAIN_FILES)):
log_string('----' + str(fn) + '-----')
current_data, current_label = provider.loadDataFile(TRAIN_FILES[train_file_idxs[fn]], compensate = False)
# points_idx = range(0,NUM_POINT)
points_idx = np.random.choice(range(0,2048),NUM_POINT)
current_data = current_data[:, points_idx, :]
current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))
current_label = np.squeeze(current_label)
file_size = current_data.shape[0]
num_batches = file_size / BATCH_SIZE
total_correct = 0
total_seen = 0
loss_sum = 0
for batch_idx in range(num_batches):
start_idx = batch_idx * BATCH_SIZE
end_idx = (batch_idx + 1) * BATCH_SIZE
# Augment batched point clouds by rotation and jittering
augmented_data = current_data[start_idx:end_idx, :, :]
if augment_scale:
augmented_data = provider.scale_point_cloud(augmented_data, smin=0.66, smax=1.5)
if augment_rotation:
augmented_data = provider.rotate_point_cloud(augmented_data)
if augment_translation:
augmented_data = provider.translate_point_cloud(augmented_data, tval = 0.2)
if augment_jitter:
augmented_data = provider.jitter_point_cloud(augmented_data, sigma=0.01,
clip=0.05) # default sigma=0.01, clip=0.05
if augment_outlier:
augmented_data = provider.insert_outliers_to_point_cloud(augmented_data, outlier_ratio=0.02)
feed_dict = {ops['points_pl']: augmented_data,
ops['labels_pl']: current_label[start_idx:end_idx],
ops['w_pl']: gmm.weights_,
ops['mu_pl']: gmm.means_,
ops['sigma_pl']: np.sqrt(gmm.covariances_),
ops['is_training_pl']: is_training, }
summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
ops['train_op'], ops['loss'], ops['pred']],
feed_dict=feed_dict)
train_writer.add_summary(summary, step)
pred_val = np.argmax(pred_val, 1)
correct = np.sum(pred_val == current_label[start_idx:end_idx])
total_correct += correct
total_seen += BATCH_SIZE
loss_sum += loss_val
log_string('mean loss: %f' % (loss_sum / float(num_batches)))
log_string('accuracy: %f' % (total_correct / float(total_seen)))
def eval_one_epoch(sess, ops, gmm, test_writer):
""" ops: dict mapping from string to tf ops """
is_training = False
total_correct = 0
total_seen = 0
loss_sum = 0
total_seen_class = [0 for _ in range(NUM_CLASSES)]
total_correct_class = [0 for _ in range(NUM_CLASSES)]
fail_cases_true_labels_final = []
fail_cases_false_labes_final = []
fail_cases_idx_final = []
# points_idx = np.random.choice(range(0, 2048), NUM_POINT)
points_idx = range(NUM_POINT)
for fn in range(len(TEST_FILES)):
log_string('----' + str(fn) + '-----')
current_data, current_label = provider.loadDataFile(TEST_FILES[fn], compensate=False)
current_data = current_data[:, points_idx, :]
current_label = np.squeeze(current_label)
file_size = current_data.shape[0]
num_batches = file_size / BATCH_SIZE
for batch_idx in range(num_batches):
start_idx = batch_idx * BATCH_SIZE
end_idx = (batch_idx + 1) * BATCH_SIZE
feed_dict = {ops['points_pl']: current_data[start_idx:end_idx, :, :] ,
ops['labels_pl']: current_label[start_idx:end_idx],
ops['w_pl']: gmm.weights_,
ops['mu_pl']: gmm.means_,
ops['sigma_pl']: np.sqrt(gmm.covariances_),
ops['is_training_pl']: is_training}
summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
ops['loss'], ops['pred']], feed_dict=feed_dict)
test_writer.add_summary(summary, step)
pred_val = np.argmax(pred_val, 1)
correct = np.sum(pred_val == current_label[start_idx:end_idx])
#Find the fail cases
batch_current_label = current_label[start_idx:end_idx]
false_idx = pred_val != batch_current_label
fail_cases_true_labels = batch_current_label[np.where(false_idx)] if batch_idx==0 else np.concatenate([fail_cases_true_labels,batch_current_label[np.where(false_idx)]] )
fail_cases_false_labes = pred_val[np.where(false_idx)] if batch_idx==0 else np.concatenate([fail_cases_false_labes, pred_val[np.where(false_idx)]])
fail_cases_idx = false_idx if batch_idx == 0 else np.concatenate([fail_cases_idx, false_idx])
total_correct += correct
total_seen += BATCH_SIZE
loss_sum += (loss_val * BATCH_SIZE)
for i in range(start_idx, end_idx):
l = current_label[i]
total_seen_class[l] += 1
total_correct_class[l] += (pred_val[i - start_idx] == l)
fail_cases_true_labels_final.append(fail_cases_true_labels)
fail_cases_false_labes_final.append(fail_cases_false_labes)
fail_cases_idx_final.append(fail_cases_idx)
acc = total_correct / float(total_seen)
acc_avg_cls = np.mean(np.array(total_correct_class) / np.array(total_seen_class, dtype=np.float))
log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
log_string('eval accuracy: %f' % (acc))
log_string('eval avg class acc: %f' % (acc_avg_cls))
FAIL_CASES_FOUT.write('True:' + str(fail_cases_true_labels) + '\n')
FAIL_CASES_FOUT.write('Pred:' + str(fail_cases_false_labes) + '\n')
FAIL_CASES_FOUT.write('Idx:' + str(fail_cases_idx) + '\n')
FAIL_CASES_FOUT.flush()
dump_dic = {'true_labels': fail_cases_true_labels_final,
'false_pred_labels': fail_cases_false_labes_final,
'idxs': fail_cases_idx_final}
# pickle.dump([fail_cases_true_labels, fail_cases_false_labes], open(os.path.join(LOG_DIR, 'fail_cases.p'), "wb"))
pickle.dump(dump_dic, open(os.path.join(LOG_DIR, 'fail_cases.p'), "wb"))
return (acc, acc_avg_cls)
def export_visualizations(gmm, log_dir):
"""
Visualizes and saves the images of the confusion matrix and fv representations
:param gmm: instance of sklearn GaussianMixture (GMM) object Gauassian mixture model
:param log_dir: path to the trained model
:return None (exports images)
"""
# load the model
model_checkpoint = os.path.join(log_dir, "model.ckpt")
if not(os.path.isfile(model_checkpoint+".meta")):
raise ValueError("No log folder availabe with name " + str(log_dir))
# reBuild Graph
with tf.Graph().as_default():
with tf.device('/gpu:'+str(GPU_INDEX)):
points_pl, labels_pl, w_pl, mu_pl, sigma_pl, = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT, gmm,)
is_training_pl = tf.placeholder(tf.bool, shape=())
# Get model and loss
pred, fv = MODEL.get_model(points_pl, w_pl, mu_pl, sigma_pl, is_training_pl, num_classes=NUM_CLASSES)
ops = {'points_pl': points_pl,
'labels_pl': labels_pl,
'w_pl': w_pl,
'mu_pl': mu_pl,
'sigma_pl': sigma_pl,
'is_training_pl': is_training_pl,
'pred': pred,
'fv': fv}
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
# Create a session
sess = tf_util.get_session(GPU_INDEX, limit_gpu=LIMIT_GPU)
# Restore variables from disk.
saver.restore(sess, model_checkpoint)
print("Model restored.")
# Load the test data
for fn in range(len(TEST_FILES)):
log_string('----' + str(fn) + '-----')
current_data, current_label = provider.loadDataFile(TEST_FILES[fn])
current_data = current_data[:, 0:NUM_POINT, :]
current_label = np.squeeze(current_label)
file_size = current_data.shape[0]
num_batches = file_size / BATCH_SIZE
for batch_idx in range(num_batches):
start_idx = batch_idx * BATCH_SIZE
end_idx = (batch_idx + 1) * BATCH_SIZE
feed_dict = {ops['points_pl']: current_data[start_idx:end_idx, :, :],
ops['labels_pl']: current_label[start_idx:end_idx],
ops['w_pl']: gmm.weights_,
ops['mu_pl']: gmm.means_,
ops['sigma_pl']: np.sqrt(gmm.covariances_),
ops['is_training_pl']: False}
pred_label, fv_data = sess.run([ops['pred'], ops['fv']], feed_dict=feed_dict)
pred_label = np.argmax(pred_label, 1)
all_fv_data = fv_data if (fn==0 and batch_idx==0) else np.concatenate([all_fv_data, fv_data],axis=0)
true_labels = current_label[start_idx:end_idx] if (fn==0 and batch_idx==0) else np.concatenate([true_labels, current_label[start_idx:end_idx]],axis=0)
all_pred_labels = pred_label if (fn==0 and batch_idx==0) else np.concatenate([all_pred_labels, pred_label],axis=0)
# Export Confusion Matrix
visualization.visualize_confusion_matrix(true_labels, all_pred_labels, classes=LABEL_MAP, normalize=False, export=True,
display=False, filename=os.path.join(log_dir,'confusion_mat'), n_classes=NUM_CLASSES)
# Export Fishre Vector Visualization
label_tags = [LABEL_MAP[i] for i in true_labels]
visualization.visualize_fv(all_fv_data, gmm, label_tags, export=True,
display=False,filename=os.path.join(log_dir,'fisher_vectors'))
# plt.show() #uncomment this to see the images in addition to saving them
print("Confusion matrix and Fisher vectores were saved to /" + str(log_dir))
if __name__ == "__main__":
gmm = utils.get_3d_grid_gmm(subdivisions=[N_GAUSSIANS, N_GAUSSIANS, N_GAUSSIANS], variance=GMM_VARIANCE)
pickle.dump(gmm, open(os.path.join(LOG_DIR, 'gmm.p'), "wb"))
train(gmm)
#export_visualizations(gmm, LOG_DIR,n_model_limit=None)
LOG_FOUT.close()
| 44.89154
| 287
| 0.636482
|
7950f6088d314f0ad41e46e38621d8f687d515b6
| 89,392
|
py
|
Python
|
scipy/optimize/tests/test_optimize.py
|
jcharlong/scipy
|
153467a9174b0c6f4b90ffeed5871e5018658108
|
[
"BSD-3-Clause"
] | 11
|
2020-06-28T04:30:26.000Z
|
2022-03-26T08:40:47.000Z
|
scipy/optimize/tests/test_optimize.py
|
jcharlong/scipy
|
153467a9174b0c6f4b90ffeed5871e5018658108
|
[
"BSD-3-Clause"
] | 25
|
2020-11-16T15:36:41.000Z
|
2021-06-01T05:15:31.000Z
|
scipy/optimize/tests/test_optimize.py
|
jcharlong/scipy
|
153467a9174b0c6f4b90ffeed5871e5018658108
|
[
"BSD-3-Clause"
] | 20
|
2021-11-07T13:55:56.000Z
|
2021-12-02T10:54:01.000Z
|
"""
Unit tests for optimization routines from optimize.py
Authors:
Ed Schofield, Nov 2005
Andrew Straw, April 2008
To run it in its simplest form::
nosetests test_optimize.py
"""
import itertools
import numpy as np
from numpy.testing import (assert_allclose, assert_equal,
assert_, assert_almost_equal,
assert_no_warnings, assert_warns,
assert_array_less, suppress_warnings)
import pytest
from pytest import raises as assert_raises
from scipy import optimize
from scipy.optimize._minimize import MINIMIZE_METHODS, MINIMIZE_SCALAR_METHODS
from scipy.optimize._linprog import LINPROG_METHODS
from scipy.optimize._root import ROOT_METHODS
from scipy.optimize._root_scalar import ROOT_SCALAR_METHODS
from scipy.optimize._qap import QUADRATIC_ASSIGNMENT_METHODS
from scipy.optimize._differentiable_functions import ScalarFunction
from scipy.optimize.optimize import MemoizeJac, show_options
def test_check_grad():
# Verify if check_grad is able to estimate the derivative of the
# logistic function.
def logit(x):
return 1 / (1 + np.exp(-x))
def der_logit(x):
return np.exp(-x) / (1 + np.exp(-x))**2
x0 = np.array([1.5])
r = optimize.check_grad(logit, der_logit, x0)
assert_almost_equal(r, 0)
r = optimize.check_grad(logit, der_logit, x0, epsilon=1e-6)
assert_almost_equal(r, 0)
# Check if the epsilon parameter is being considered.
r = abs(optimize.check_grad(logit, der_logit, x0, epsilon=1e-1) - 0)
assert_(r > 1e-7)
class CheckOptimize:
""" Base test case for a simple constrained entropy maximization problem
(the machine translation example of Berger et al in
Computational Linguistics, vol 22, num 1, pp 39--72, 1996.)
"""
def setup_method(self):
self.F = np.array([[1, 1, 1],
[1, 1, 0],
[1, 0, 1],
[1, 0, 0],
[1, 0, 0]])
self.K = np.array([1., 0.3, 0.5])
self.startparams = np.zeros(3, np.float64)
self.solution = np.array([0., -0.524869316, 0.487525860])
self.maxiter = 1000
self.funccalls = 0
self.gradcalls = 0
self.trace = []
def func(self, x):
self.funccalls += 1
if self.funccalls > 6000:
raise RuntimeError("too many iterations in optimization routine")
log_pdot = np.dot(self.F, x)
logZ = np.log(sum(np.exp(log_pdot)))
f = logZ - np.dot(self.K, x)
self.trace.append(np.copy(x))
return f
def grad(self, x):
self.gradcalls += 1
log_pdot = np.dot(self.F, x)
logZ = np.log(sum(np.exp(log_pdot)))
p = np.exp(log_pdot - logZ)
return np.dot(self.F.transpose(), p) - self.K
def hess(self, x):
log_pdot = np.dot(self.F, x)
logZ = np.log(sum(np.exp(log_pdot)))
p = np.exp(log_pdot - logZ)
return np.dot(self.F.T,
np.dot(np.diag(p), self.F - np.dot(self.F.T, p)))
def hessp(self, x, p):
return np.dot(self.hess(x), p)
class CheckOptimizeParameterized(CheckOptimize):
def test_cg(self):
# conjugate gradient optimization routine
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': self.disp,
'return_all': False}
res = optimize.minimize(self.func, self.startparams, args=(),
method='CG', jac=self.grad,
options=opts)
params, fopt, func_calls, grad_calls, warnflag = \
res['x'], res['fun'], res['nfev'], res['njev'], res['status']
else:
retval = optimize.fmin_cg(self.func, self.startparams,
self.grad, (), maxiter=self.maxiter,
full_output=True, disp=self.disp,
retall=False)
(params, fopt, func_calls, grad_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# SciPy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 9, self.funccalls)
assert_(self.gradcalls == 7, self.gradcalls)
# Ensure that the function behaves the same; this is from SciPy 0.7.0
assert_allclose(self.trace[2:4],
[[0, -0.5, 0.5],
[0, -5.05700028e-01, 4.95985862e-01]],
atol=1e-14, rtol=1e-7)
def test_cg_cornercase(self):
def f(r):
return 2.5 * (1 - np.exp(-1.5*(r - 0.5)))**2
# Check several initial guesses. (Too far away from the
# minimum, the function ends up in the flat region of exp.)
for x0 in np.linspace(-0.75, 3, 71):
sol = optimize.minimize(f, [x0], method='CG')
assert_(sol.success)
assert_allclose(sol.x, [0.5], rtol=1e-5)
def test_bfgs(self):
# Broyden-Fletcher-Goldfarb-Shanno optimization routine
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': self.disp,
'return_all': False}
res = optimize.minimize(self.func, self.startparams,
jac=self.grad, method='BFGS', args=(),
options=opts)
params, fopt, gopt, Hopt, func_calls, grad_calls, warnflag = (
res['x'], res['fun'], res['jac'], res['hess_inv'],
res['nfev'], res['njev'], res['status'])
else:
retval = optimize.fmin_bfgs(self.func, self.startparams, self.grad,
args=(), maxiter=self.maxiter,
full_output=True, disp=self.disp,
retall=False)
(params, fopt, gopt, Hopt,
func_calls, grad_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# SciPy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 10, self.funccalls)
assert_(self.gradcalls == 8, self.gradcalls)
# Ensure that the function behaves the same; this is from SciPy 0.7.0
assert_allclose(self.trace[6:8],
[[0, -5.25060743e-01, 4.87748473e-01],
[0, -5.24885582e-01, 4.87530347e-01]],
atol=1e-14, rtol=1e-7)
def test_bfgs_infinite(self):
# Test corner case where -Inf is the minimum. See gh-2019.
func = lambda x: -np.e**-x
fprime = lambda x: -func(x)
x0 = [0]
with np.errstate(over='ignore'):
if self.use_wrapper:
opts = {'disp': self.disp}
x = optimize.minimize(func, x0, jac=fprime, method='BFGS',
args=(), options=opts)['x']
else:
x = optimize.fmin_bfgs(func, x0, fprime, disp=self.disp)
assert_(not np.isfinite(func(x)))
def test_powell(self):
# Powell (direction set) optimization routine
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': self.disp,
'return_all': False}
res = optimize.minimize(self.func, self.startparams, args=(),
method='Powell', options=opts)
params, fopt, direc, numiter, func_calls, warnflag = (
res['x'], res['fun'], res['direc'], res['nit'],
res['nfev'], res['status'])
else:
retval = optimize.fmin_powell(self.func, self.startparams,
args=(), maxiter=self.maxiter,
full_output=True, disp=self.disp,
retall=False)
(params, fopt, direc, numiter, func_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# params[0] does not affect the objective function
assert_allclose(params[1:], self.solution[1:], atol=5e-6)
# Ensure that function call counts are 'known good'; these are from
# SciPy 0.7.0. Don't allow them to increase.
#
# However, some leeway must be added: the exact evaluation
# count is sensitive to numerical error, and floating-point
# computations are not bit-for-bit reproducible across
# machines, and when using e.g., MKL, data alignment
# etc., affect the rounding error.
#
assert_(self.funccalls <= 116 + 20, self.funccalls)
assert_(self.gradcalls == 0, self.gradcalls)
@pytest.mark.xfail(reason="This part of test_powell fails on some "
"platforms, but the solution returned by powell is "
"still valid.")
def test_powell_gh14014(self):
# This part of test_powell started failing on some CI platforms;
# see gh-14014. Since the solution is still correct and the comments
# in test_powell suggest that small differences in the bits are known
# to change the "trace" of the solution, seems safe to xfail to get CI
# green now and investigate later.
# Powell (direction set) optimization routine
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': self.disp,
'return_all': False}
res = optimize.minimize(self.func, self.startparams, args=(),
method='Powell', options=opts)
params, fopt, direc, numiter, func_calls, warnflag = (
res['x'], res['fun'], res['direc'], res['nit'],
res['nfev'], res['status'])
else:
retval = optimize.fmin_powell(self.func, self.startparams,
args=(), maxiter=self.maxiter,
full_output=True, disp=self.disp,
retall=False)
(params, fopt, direc, numiter, func_calls, warnflag) = retval
# Ensure that the function behaves the same; this is from SciPy 0.7.0
assert_allclose(self.trace[34:39],
[[0.72949016, -0.44156936, 0.47100962],
[0.72949016, -0.44156936, 0.48052496],
[1.45898031, -0.88313872, 0.95153458],
[0.72949016, -0.44156936, 0.47576729],
[1.72949016, -0.44156936, 0.47576729]],
atol=1e-14, rtol=1e-7)
def test_powell_bounded(self):
# Powell (direction set) optimization routine
# same as test_powell above, but with bounds
bounds = [(-np.pi, np.pi) for _ in self.startparams]
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': self.disp,
'return_all': False}
res = optimize.minimize(self.func, self.startparams, args=(),
bounds=bounds,
method='Powell', options=opts)
params, fopt, direc, numiter, func_calls, warnflag = (
res['x'], res['fun'], res['direc'], res['nit'],
res['nfev'], res['status'])
assert func_calls == self.funccalls
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'.
# Generally, this takes 131 function calls. However, on some CI
# checks it finds 138 funccalls. This 20 call leeway was also
# included in the test_powell function.
# The exact evaluation count is sensitive to numerical error, and
# floating-point computations are not bit-for-bit reproducible
# across machines, and when using e.g. MKL, data alignment etc.
# affect the rounding error.
assert self.funccalls <= 131 + 20
assert self.gradcalls == 0
def test_neldermead(self):
# Nelder-Mead simplex algorithm
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': self.disp,
'return_all': False}
res = optimize.minimize(self.func, self.startparams, args=(),
method='Nelder-mead', options=opts)
params, fopt, numiter, func_calls, warnflag = (
res['x'], res['fun'], res['nit'], res['nfev'],
res['status'])
else:
retval = optimize.fmin(self.func, self.startparams,
args=(), maxiter=self.maxiter,
full_output=True, disp=self.disp,
retall=False)
(params, fopt, numiter, func_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# SciPy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 167, self.funccalls)
assert_(self.gradcalls == 0, self.gradcalls)
# Ensure that the function behaves the same; this is from SciPy 0.7.0
assert_allclose(self.trace[76:78],
[[0.1928968, -0.62780447, 0.35166118],
[0.19572515, -0.63648426, 0.35838135]],
atol=1e-14, rtol=1e-7)
def test_neldermead_initial_simplex(self):
# Nelder-Mead simplex algorithm
simplex = np.zeros((4, 3))
simplex[...] = self.startparams
for j in range(3):
simplex[j+1, j] += 0.1
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': False,
'return_all': True, 'initial_simplex': simplex}
res = optimize.minimize(self.func, self.startparams, args=(),
method='Nelder-mead', options=opts)
params, fopt, numiter, func_calls, warnflag = (res['x'],
res['fun'],
res['nit'],
res['nfev'],
res['status'])
assert_allclose(res['allvecs'][0], simplex[0])
else:
retval = optimize.fmin(self.func, self.startparams,
args=(), maxiter=self.maxiter,
full_output=True, disp=False, retall=False,
initial_simplex=simplex)
(params, fopt, numiter, func_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# SciPy 0.17.0. Don't allow them to increase.
assert_(self.funccalls == 100, self.funccalls)
assert_(self.gradcalls == 0, self.gradcalls)
# Ensure that the function behaves the same; this is from SciPy 0.15.0
assert_allclose(self.trace[50:52],
[[0.14687474, -0.5103282, 0.48252111],
[0.14474003, -0.5282084, 0.48743951]],
atol=1e-14, rtol=1e-7)
def test_neldermead_initial_simplex_bad(self):
# Check it fails with a bad simplices
bad_simplices = []
simplex = np.zeros((3, 2))
simplex[...] = self.startparams[:2]
for j in range(2):
simplex[j+1, j] += 0.1
bad_simplices.append(simplex)
simplex = np.zeros((3, 3))
bad_simplices.append(simplex)
for simplex in bad_simplices:
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': False,
'return_all': False, 'initial_simplex': simplex}
assert_raises(ValueError,
optimize.minimize,
self.func,
self.startparams,
args=(),
method='Nelder-mead',
options=opts)
else:
assert_raises(ValueError, optimize.fmin,
self.func, self.startparams,
args=(), maxiter=self.maxiter,
full_output=True, disp=False, retall=False,
initial_simplex=simplex)
def test_ncg_negative_maxiter(self):
# Regression test for gh-8241
opts = {'maxiter': -1}
result = optimize.minimize(self.func, self.startparams,
method='Newton-CG', jac=self.grad,
args=(), options=opts)
assert_(result.status == 1)
def test_ncg(self):
# line-search Newton conjugate gradient optimization routine
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': self.disp,
'return_all': False}
retval = optimize.minimize(self.func, self.startparams,
method='Newton-CG', jac=self.grad,
args=(), options=opts)['x']
else:
retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
args=(), maxiter=self.maxiter,
full_output=False, disp=self.disp,
retall=False)
params = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# SciPy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 7, self.funccalls)
assert_(self.gradcalls <= 22, self.gradcalls) # 0.13.0
# assert_(self.gradcalls <= 18, self.gradcalls) # 0.9.0
# assert_(self.gradcalls == 18, self.gradcalls) # 0.8.0
# assert_(self.gradcalls == 22, self.gradcalls) # 0.7.0
# Ensure that the function behaves the same; this is from SciPy 0.7.0
assert_allclose(self.trace[3:5],
[[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],
[-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],
atol=1e-6, rtol=1e-7)
def test_ncg_hess(self):
# Newton conjugate gradient with Hessian
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': self.disp,
'return_all': False}
retval = optimize.minimize(self.func, self.startparams,
method='Newton-CG', jac=self.grad,
hess=self.hess,
args=(), options=opts)['x']
else:
retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
fhess=self.hess,
args=(), maxiter=self.maxiter,
full_output=False, disp=self.disp,
retall=False)
params = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# SciPy 0.7.0. Don't allow them to increase.
assert_(self.funccalls <= 7, self.funccalls) # gh10673
assert_(self.gradcalls <= 18, self.gradcalls) # 0.9.0
# assert_(self.gradcalls == 18, self.gradcalls) # 0.8.0
# assert_(self.gradcalls == 22, self.gradcalls) # 0.7.0
# Ensure that the function behaves the same; this is from SciPy 0.7.0
assert_allclose(self.trace[3:5],
[[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],
[-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],
atol=1e-6, rtol=1e-7)
def test_ncg_hessp(self):
# Newton conjugate gradient with Hessian times a vector p.
if self.use_wrapper:
opts = {'maxiter': self.maxiter, 'disp': self.disp,
'return_all': False}
retval = optimize.minimize(self.func, self.startparams,
method='Newton-CG', jac=self.grad,
hessp=self.hessp,
args=(), options=opts)['x']
else:
retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
fhess_p=self.hessp,
args=(), maxiter=self.maxiter,
full_output=False, disp=self.disp,
retall=False)
params = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# SciPy 0.7.0. Don't allow them to increase.
assert_(self.funccalls <= 7, self.funccalls) # gh10673
assert_(self.gradcalls <= 18, self.gradcalls) # 0.9.0
# assert_(self.gradcalls == 18, self.gradcalls) # 0.8.0
# assert_(self.gradcalls == 22, self.gradcalls) # 0.7.0
# Ensure that the function behaves the same; this is from SciPy 0.7.0
assert_allclose(self.trace[3:5],
[[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],
[-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],
atol=1e-6, rtol=1e-7)
def test_obj_func_returns_scalar():
match = ("The user-provided "
"objective function must "
"return a scalar value.")
with assert_raises(ValueError, match=match):
optimize.minimize(lambda x: x, np.array([1, 1]), method='BFGS')
def test_neldermead_xatol_fatol():
# gh4484
# test we can call with fatol, xatol specified
func = lambda x: x[0]**2 + x[1]**2
optimize._minimize._minimize_neldermead(func, [1, 1], maxiter=2,
xatol=1e-3, fatol=1e-3)
assert_warns(DeprecationWarning,
optimize._minimize._minimize_neldermead,
func, [1, 1], xtol=1e-3, ftol=1e-3, maxiter=2)
def test_neldermead_adaptive():
func = lambda x: np.sum(x**2)
p0 = [0.15746215, 0.48087031, 0.44519198, 0.4223638, 0.61505159,
0.32308456, 0.9692297, 0.4471682, 0.77411992, 0.80441652,
0.35994957, 0.75487856, 0.99973421, 0.65063887, 0.09626474]
res = optimize.minimize(func, p0, method='Nelder-Mead')
assert_equal(res.success, False)
res = optimize.minimize(func, p0, method='Nelder-Mead',
options={'adaptive': True})
assert_equal(res.success, True)
def test_bounded_powell_outsidebounds():
# With the bounded Powell method if you start outside the bounds the final
# should still be within the bounds (provided that the user doesn't make a
# bad choice for the `direc` argument).
func = lambda x: np.sum(x**2)
bounds = (-1, 1), (-1, 1), (-1, 1)
x0 = [-4, .5, -.8]
# we're starting outside the bounds, so we should get a warning
with assert_warns(optimize.OptimizeWarning):
res = optimize.minimize(func, x0, bounds=bounds, method="Powell")
assert_allclose(res.x, np.array([0.] * len(x0)), atol=1e-6)
assert_equal(res.success, True)
assert_equal(res.status, 0)
# However, now if we change the `direc` argument such that the
# set of vectors does not span the parameter space, then we may
# not end up back within the bounds. Here we see that the first
# parameter cannot be updated!
direc = [[0, 0, 0], [0, 1, 0], [0, 0, 1]]
# we're starting outside the bounds, so we should get a warning
with assert_warns(optimize.OptimizeWarning):
res = optimize.minimize(func, x0,
bounds=bounds, method="Powell",
options={'direc': direc})
assert_allclose(res.x, np.array([-4., 0, 0]), atol=1e-6)
assert_equal(res.success, False)
assert_equal(res.status, 4)
def test_bounded_powell_vs_powell():
# here we test an example where the bounded Powell method
# will return a different result than the standard Powell
# method.
# first we test a simple example where the minimum is at
# the origin and the minimum that is within the bounds is
# larger than the minimum at the origin.
func = lambda x: np.sum(x**2)
bounds = (-5, -1), (-10, -0.1), (1, 9.2), (-4, 7.6), (-15.9, -2)
x0 = [-2.1, -5.2, 1.9, 0, -2]
options = {'ftol': 1e-10, 'xtol': 1e-10}
res_powell = optimize.minimize(func, x0, method="Powell", options=options)
assert_allclose(res_powell.x, 0., atol=1e-6)
assert_allclose(res_powell.fun, 0., atol=1e-6)
res_bounded_powell = optimize.minimize(func, x0, options=options,
bounds=bounds,
method="Powell")
p = np.array([-1, -0.1, 1, 0, -2])
assert_allclose(res_bounded_powell.x, p, atol=1e-6)
assert_allclose(res_bounded_powell.fun, func(p), atol=1e-6)
# now we test bounded Powell but with a mix of inf bounds.
bounds = (None, -1), (-np.inf, -.1), (1, np.inf), (-4, None), (-15.9, -2)
res_bounded_powell = optimize.minimize(func, x0, options=options,
bounds=bounds,
method="Powell")
p = np.array([-1, -0.1, 1, 0, -2])
assert_allclose(res_bounded_powell.x, p, atol=1e-6)
assert_allclose(res_bounded_powell.fun, func(p), atol=1e-6)
# next we test an example where the global minimum is within
# the bounds, but the bounded Powell method performs better
# than the standard Powell method.
def func(x):
t = np.sin(-x[0]) * np.cos(x[1]) * np.sin(-x[0] * x[1]) * np.cos(x[1])
t -= np.cos(np.sin(x[1] * x[2]) * np.cos(x[2]))
return t**2
bounds = [(-2, 5)] * 3
x0 = [-0.5, -0.5, -0.5]
res_powell = optimize.minimize(func, x0, method="Powell")
res_bounded_powell = optimize.minimize(func, x0,
bounds=bounds,
method="Powell")
assert_allclose(res_powell.fun, 0.007136253919761627, atol=1e-6)
assert_allclose(res_bounded_powell.fun, 0, atol=1e-6)
# next we test the previous example where the we provide Powell
# with (-inf, inf) bounds, and compare it to providing Powell
# with no bounds. They should end up the same.
bounds = [(-np.inf, np.inf)] * 3
res_bounded_powell = optimize.minimize(func, x0,
bounds=bounds,
method="Powell")
assert_allclose(res_powell.fun, res_bounded_powell.fun, atol=1e-6)
assert_allclose(res_powell.nfev, res_bounded_powell.nfev, atol=1e-6)
assert_allclose(res_powell.x, res_bounded_powell.x, atol=1e-6)
# now test when x0 starts outside of the bounds.
x0 = [45.46254415, -26.52351498, 31.74830248]
bounds = [(-2, 5)] * 3
# we're starting outside the bounds, so we should get a warning
with assert_warns(optimize.OptimizeWarning):
res_bounded_powell = optimize.minimize(func, x0,
bounds=bounds,
method="Powell")
assert_allclose(res_bounded_powell.fun, 0, atol=1e-6)
def test_onesided_bounded_powell_stability():
# When the Powell method is bounded on only one side, a
# np.tan transform is done in order to convert it into a
# completely bounded problem. Here we do some simple tests
# of one-sided bounded Powell where the optimal solutions
# are large to test the stability of the transformation.
kwargs = {'method': 'Powell',
'bounds': [(-np.inf, 1e6)] * 3,
'options': {'ftol': 1e-8, 'xtol': 1e-8}}
x0 = [1, 1, 1]
# df/dx is constant.
f = lambda x: -np.sum(x)
res = optimize.minimize(f, x0, **kwargs)
assert_allclose(res.fun, -3e6, atol=1e-4)
# df/dx gets smaller and smaller.
def f(x):
return -np.abs(np.sum(x)) ** (0.1) * (1 if np.all(x > 0) else -1)
res = optimize.minimize(f, x0, **kwargs)
assert_allclose(res.fun, -(3e6) ** (0.1))
# df/dx gets larger and larger.
def f(x):
return -np.abs(np.sum(x)) ** 10 * (1 if np.all(x > 0) else -1)
res = optimize.minimize(f, x0, **kwargs)
assert_allclose(res.fun, -(3e6) ** 10, rtol=1e-7)
# df/dx gets larger for some of the variables and smaller for others.
def f(x):
t = -np.abs(np.sum(x[:2])) ** 5 - np.abs(np.sum(x[2:])) ** (0.1)
t *= (1 if np.all(x > 0) else -1)
return t
kwargs['bounds'] = [(-np.inf, 1e3)] * 3
res = optimize.minimize(f, x0, **kwargs)
assert_allclose(res.fun, -(2e3) ** 5 - (1e6) ** (0.1), rtol=1e-7)
class TestOptimizeWrapperDisp(CheckOptimizeParameterized):
use_wrapper = True
disp = True
class TestOptimizeWrapperNoDisp(CheckOptimizeParameterized):
use_wrapper = True
disp = False
class TestOptimizeNoWrapperDisp(CheckOptimizeParameterized):
use_wrapper = False
disp = True
class TestOptimizeNoWrapperNoDisp(CheckOptimizeParameterized):
use_wrapper = False
disp = False
class TestOptimizeSimple(CheckOptimize):
def test_bfgs_nan(self):
# Test corner case where nan is fed to optimizer. See gh-2067.
func = lambda x: x
fprime = lambda x: np.ones_like(x)
x0 = [np.nan]
with np.errstate(over='ignore', invalid='ignore'):
x = optimize.fmin_bfgs(func, x0, fprime, disp=False)
assert_(np.isnan(func(x)))
def test_bfgs_nan_return(self):
# Test corner cases where fun returns NaN. See gh-4793.
# First case: NaN from first call.
func = lambda x: np.nan
with np.errstate(invalid='ignore'):
result = optimize.minimize(func, 0)
assert_(np.isnan(result['fun']))
assert_(result['success'] is False)
# Second case: NaN from second call.
func = lambda x: 0 if x == 0 else np.nan
fprime = lambda x: np.ones_like(x) # Steer away from zero.
with np.errstate(invalid='ignore'):
result = optimize.minimize(func, 0, jac=fprime)
assert_(np.isnan(result['fun']))
assert_(result['success'] is False)
def test_bfgs_numerical_jacobian(self):
# BFGS with numerical Jacobian and a vector epsilon parameter.
# define the epsilon parameter using a random vector
epsilon = np.sqrt(np.spacing(1.)) * np.random.rand(len(self.solution))
params = optimize.fmin_bfgs(self.func, self.startparams,
epsilon=epsilon, args=(),
maxiter=self.maxiter, disp=False)
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
def test_finite_differences(self):
methods = ['BFGS', 'CG', 'TNC']
jacs = ['2-point', '3-point', None]
for method, jac in itertools.product(methods, jacs):
result = optimize.minimize(self.func, self.startparams,
method=method, jac=jac)
assert_allclose(self.func(result.x), self.func(self.solution),
atol=1e-6)
def test_bfgs_gh_2169(self):
def f(x):
if x < 0:
return 1.79769313e+308
else:
return x + 1./x
xs = optimize.fmin_bfgs(f, [10.], disp=False)
assert_allclose(xs, 1.0, rtol=1e-4, atol=1e-4)
def test_bfgs_double_evaluations(self):
# check BFGS does not evaluate twice in a row at same point
def f(x):
xp = float(x)
assert xp not in seen
seen.add(xp)
return 10*x**2, 20*x
seen = set()
optimize.minimize(f, -100, method='bfgs', jac=True, tol=1e-7)
def test_l_bfgs_b(self):
# limited-memory bound-constrained BFGS algorithm
retval = optimize.fmin_l_bfgs_b(self.func, self.startparams,
self.grad, args=(),
maxiter=self.maxiter)
(params, fopt, d) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# SciPy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 7, self.funccalls)
assert_(self.gradcalls == 5, self.gradcalls)
# Ensure that the function behaves the same; this is from SciPy 0.7.0
# test fixed in gh10673
assert_allclose(self.trace[3:5],
[[8.117083e-16, -5.196198e-01, 4.897617e-01],
[0., -0.52489628, 0.48753042]],
atol=1e-14, rtol=1e-7)
def test_l_bfgs_b_numjac(self):
# L-BFGS-B with numerical Jacobian
retval = optimize.fmin_l_bfgs_b(self.func, self.startparams,
approx_grad=True,
maxiter=self.maxiter)
(params, fopt, d) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
def test_l_bfgs_b_funjac(self):
# L-BFGS-B with combined objective function and Jacobian
def fun(x):
return self.func(x), self.grad(x)
retval = optimize.fmin_l_bfgs_b(fun, self.startparams,
maxiter=self.maxiter)
(params, fopt, d) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
def test_l_bfgs_b_maxiter(self):
# gh7854
# Ensure that not more than maxiters are ever run.
class Callback:
def __init__(self):
self.nit = 0
self.fun = None
self.x = None
def __call__(self, x):
self.x = x
self.fun = optimize.rosen(x)
self.nit += 1
c = Callback()
res = optimize.minimize(optimize.rosen, [0., 0.], method='l-bfgs-b',
callback=c, options={'maxiter': 5})
assert_equal(res.nit, 5)
assert_almost_equal(res.x, c.x)
assert_almost_equal(res.fun, c.fun)
assert_equal(res.status, 1)
assert_(res.success is False)
assert_equal(res.message,
'STOP: TOTAL NO. of ITERATIONS REACHED LIMIT')
def test_minimize_l_bfgs_b(self):
# Minimize with L-BFGS-B method
opts = {'disp': False, 'maxiter': self.maxiter}
r = optimize.minimize(self.func, self.startparams,
method='L-BFGS-B', jac=self.grad,
options=opts)
assert_allclose(self.func(r.x), self.func(self.solution),
atol=1e-6)
assert self.gradcalls == r.njev
self.funccalls = self.gradcalls = 0
# approximate jacobian
ra = optimize.minimize(self.func, self.startparams,
method='L-BFGS-B', options=opts)
# check that function evaluations in approximate jacobian are counted
# assert_(ra.nfev > r.nfev)
assert self.funccalls == ra.nfev
assert_allclose(self.func(ra.x), self.func(self.solution),
atol=1e-6)
self.funccalls = self.gradcalls = 0
# approximate jacobian
ra = optimize.minimize(self.func, self.startparams, jac='3-point',
method='L-BFGS-B', options=opts)
assert self.funccalls == ra.nfev
assert_allclose(self.func(ra.x), self.func(self.solution),
atol=1e-6)
def test_minimize_l_bfgs_b_ftol(self):
# Check that the `ftol` parameter in l_bfgs_b works as expected
v0 = None
for tol in [1e-1, 1e-4, 1e-7, 1e-10]:
opts = {'disp': False, 'maxiter': self.maxiter, 'ftol': tol}
sol = optimize.minimize(self.func, self.startparams,
method='L-BFGS-B', jac=self.grad,
options=opts)
v = self.func(sol.x)
if v0 is None:
v0 = v
else:
assert_(v < v0)
assert_allclose(v, self.func(self.solution), rtol=tol)
def test_minimize_l_bfgs_maxls(self):
# check that the maxls is passed down to the Fortran routine
sol = optimize.minimize(optimize.rosen, np.array([-1.2, 1.0]),
method='L-BFGS-B', jac=optimize.rosen_der,
options={'disp': False, 'maxls': 1})
assert_(not sol.success)
def test_minimize_l_bfgs_b_maxfun_interruption(self):
# gh-6162
f = optimize.rosen
g = optimize.rosen_der
values = []
x0 = np.full(7, 1000)
def objfun(x):
value = f(x)
values.append(value)
return value
# Look for an interesting test case.
# Request a maxfun that stops at a particularly bad function
# evaluation somewhere between 100 and 300 evaluations.
low, medium, high = 30, 100, 300
optimize.fmin_l_bfgs_b(objfun, x0, fprime=g, maxfun=high)
v, k = max((y, i) for i, y in enumerate(values[medium:]))
maxfun = medium + k
# If the minimization strategy is reasonable,
# the minimize() result should not be worse than the best
# of the first 30 function evaluations.
target = min(values[:low])
xmin, fmin, d = optimize.fmin_l_bfgs_b(f, x0, fprime=g, maxfun=maxfun)
assert_array_less(fmin, target)
def test_custom(self):
# This function comes from the documentation example.
def custmin(fun, x0, args=(), maxfev=None, stepsize=0.1,
maxiter=100, callback=None, **options):
bestx = x0
besty = fun(x0)
funcalls = 1
niter = 0
improved = True
stop = False
while improved and not stop and niter < maxiter:
improved = False
niter += 1
for dim in range(np.size(x0)):
for s in [bestx[dim] - stepsize, bestx[dim] + stepsize]:
testx = np.copy(bestx)
testx[dim] = s
testy = fun(testx, *args)
funcalls += 1
if testy < besty:
besty = testy
bestx = testx
improved = True
if callback is not None:
callback(bestx)
if maxfev is not None and funcalls >= maxfev:
stop = True
break
return optimize.OptimizeResult(fun=besty, x=bestx, nit=niter,
nfev=funcalls, success=(niter > 1))
x0 = [1.35, 0.9, 0.8, 1.1, 1.2]
res = optimize.minimize(optimize.rosen, x0, method=custmin,
options=dict(stepsize=0.05))
assert_allclose(res.x, 1.0, rtol=1e-4, atol=1e-4)
def test_gh10771(self):
# check that minimize passes bounds and constraints to a custom
# minimizer without altering them.
bounds = [(-2, 2), (0, 3)]
constraints = 'constraints'
def custmin(fun, x0, **options):
assert options['bounds'] is bounds
assert options['constraints'] is constraints
return optimize.OptimizeResult()
x0 = [1, 1]
optimize.minimize(optimize.rosen, x0, method=custmin,
bounds=bounds, constraints=constraints)
def test_minimize_tol_parameter(self):
# Check that the minimize() tol= argument does something
def func(z):
x, y = z
return x**2*y**2 + x**4 + 1
def dfunc(z):
x, y = z
return np.array([2*x*y**2 + 4*x**3, 2*x**2*y])
for method in ['nelder-mead', 'powell', 'cg', 'bfgs',
'newton-cg', 'l-bfgs-b', 'tnc',
'cobyla', 'slsqp']:
if method in ('nelder-mead', 'powell', 'cobyla'):
jac = None
else:
jac = dfunc
sol1 = optimize.minimize(func, [1, 1], jac=jac, tol=1e-10,
method=method)
sol2 = optimize.minimize(func, [1, 1], jac=jac, tol=1.0,
method=method)
assert_(func(sol1.x) < func(sol2.x),
"%s: %s vs. %s" % (method, func(sol1.x), func(sol2.x)))
@pytest.mark.parametrize('method',
['fmin', 'fmin_powell', 'fmin_cg', 'fmin_bfgs',
'fmin_ncg', 'fmin_l_bfgs_b', 'fmin_tnc',
'fmin_slsqp'] + MINIMIZE_METHODS)
def test_minimize_callback_copies_array(self, method):
# Check that arrays passed to callbacks are not modified
# inplace by the optimizer afterward
# cobyla doesn't have callback
if method == 'cobyla':
return
if method in ('fmin_tnc', 'fmin_l_bfgs_b'):
func = lambda x: (optimize.rosen(x), optimize.rosen_der(x))
else:
func = optimize.rosen
jac = optimize.rosen_der
hess = optimize.rosen_hess
x0 = np.zeros(10)
# Set options
kwargs = {}
if method.startswith('fmin'):
routine = getattr(optimize, method)
if method == 'fmin_slsqp':
kwargs['iter'] = 5
elif method == 'fmin_tnc':
kwargs['maxfun'] = 100
else:
kwargs['maxiter'] = 5
else:
def routine(*a, **kw):
kw['method'] = method
return optimize.minimize(*a, **kw)
if method == 'tnc':
kwargs['options'] = dict(maxfun=100)
else:
kwargs['options'] = dict(maxiter=5)
if method in ('fmin_ncg',):
kwargs['fprime'] = jac
elif method in ('newton-cg',):
kwargs['jac'] = jac
elif method in ('trust-krylov', 'trust-exact', 'trust-ncg', 'dogleg',
'trust-constr'):
kwargs['jac'] = jac
kwargs['hess'] = hess
# Run with callback
results = []
def callback(x, *args, **kwargs):
results.append((x, np.copy(x)))
routine(func, x0, callback=callback, **kwargs)
# Check returned arrays coincide with their copies
# and have no memory overlap
assert_(len(results) > 2)
assert_(all(np.all(x == y) for x, y in results))
assert_(not any(np.may_share_memory(x[0], y[0])
for x, y in itertools.combinations(results, 2)))
@pytest.mark.parametrize('method', ['nelder-mead', 'powell', 'cg',
'bfgs', 'newton-cg', 'l-bfgs-b',
'tnc', 'cobyla', 'slsqp'])
def test_no_increase(self, method):
# Check that the solver doesn't return a value worse than the
# initial point.
def func(x):
return (x - 1)**2
def bad_grad(x):
# purposefully invalid gradient function, simulates a case
# where line searches start failing
return 2*(x - 1) * (-1) - 2
x0 = np.array([2.0])
f0 = func(x0)
jac = bad_grad
if method in ['nelder-mead', 'powell', 'cobyla']:
jac = None
sol = optimize.minimize(func, x0, jac=jac, method=method,
options=dict(maxiter=20))
assert_equal(func(sol.x), sol.fun)
if method == 'slsqp':
pytest.xfail("SLSQP returns slightly worse")
assert_(func(sol.x) <= f0)
def test_slsqp_respect_bounds(self):
# Regression test for gh-3108
def f(x):
return sum((x - np.array([1., 2., 3., 4.]))**2)
def cons(x):
a = np.array([[-1, -1, -1, -1], [-3, -3, -2, -1]])
return np.concatenate([np.dot(a, x) + np.array([5, 10]), x])
x0 = np.array([0.5, 1., 1.5, 2.])
res = optimize.minimize(f, x0, method='slsqp',
constraints={'type': 'ineq', 'fun': cons})
assert_allclose(res.x, np.array([0., 2, 5, 8])/3, atol=1e-12)
@pytest.mark.parametrize('method', ['Nelder-Mead', 'Powell', 'CG', 'BFGS',
'Newton-CG', 'L-BFGS-B', 'SLSQP',
'trust-constr', 'dogleg', 'trust-ncg',
'trust-exact', 'trust-krylov'])
def test_respect_maxiter(self, method):
# Check that the number of iterations equals max_iter, assuming
# convergence doesn't establish before
MAXITER = 4
x0 = np.zeros(10)
sf = ScalarFunction(optimize.rosen, x0, (), optimize.rosen_der,
optimize.rosen_hess, None, None)
# Set options
kwargs = {'method': method, 'options': dict(maxiter=MAXITER)}
if method in ('Newton-CG',):
kwargs['jac'] = sf.grad
elif method in ('trust-krylov', 'trust-exact', 'trust-ncg', 'dogleg',
'trust-constr'):
kwargs['jac'] = sf.grad
kwargs['hess'] = sf.hess
sol = optimize.minimize(sf.fun, x0, **kwargs)
assert sol.nit == MAXITER
assert sol.nfev >= sf.nfev
if hasattr(sol, 'njev'):
assert sol.njev >= sf.ngev
# method specific tests
if method == 'SLSQP':
assert sol.status == 9 # Iteration limit reached
def test_respect_maxiter_trust_constr_ineq_constraints(self):
# special case of minimization with trust-constr and inequality
# constraints to check maxiter limit is obeyed when using internal
# method 'tr_interior_point'
MAXITER = 4
f = optimize.rosen
jac = optimize.rosen_der
hess = optimize.rosen_hess
fun = lambda x: np.array([0.2 * x[0] - 0.4 * x[1] - 0.33 * x[2]])
cons = ({'type': 'ineq',
'fun': fun},)
x0 = np.zeros(10)
sol = optimize.minimize(f, x0, constraints=cons, jac=jac, hess=hess,
method='trust-constr',
options=dict(maxiter=MAXITER))
assert sol.nit == MAXITER
def test_minimize_automethod(self):
def f(x):
return x**2
def cons(x):
return x - 2
x0 = np.array([10.])
sol_0 = optimize.minimize(f, x0)
sol_1 = optimize.minimize(f, x0, constraints=[{'type': 'ineq',
'fun': cons}])
sol_2 = optimize.minimize(f, x0, bounds=[(5, 10)])
sol_3 = optimize.minimize(f, x0,
constraints=[{'type': 'ineq', 'fun': cons}],
bounds=[(5, 10)])
sol_4 = optimize.minimize(f, x0,
constraints=[{'type': 'ineq', 'fun': cons}],
bounds=[(1, 10)])
for sol in [sol_0, sol_1, sol_2, sol_3, sol_4]:
assert_(sol.success)
assert_allclose(sol_0.x, 0, atol=1e-7)
assert_allclose(sol_1.x, 2, atol=1e-7)
assert_allclose(sol_2.x, 5, atol=1e-7)
assert_allclose(sol_3.x, 5, atol=1e-7)
assert_allclose(sol_4.x, 2, atol=1e-7)
def test_minimize_coerce_args_param(self):
# Regression test for gh-3503
def Y(x, c):
return np.sum((x-c)**2)
def dY_dx(x, c=None):
return 2*(x-c)
c = np.array([3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 5])
xinit = np.random.randn(len(c))
optimize.minimize(Y, xinit, jac=dY_dx, args=(c), method="BFGS")
def test_initial_step_scaling(self):
# Check that optimizer initial step is not huge even if the
# function and gradients are
scales = [1e-50, 1, 1e50]
methods = ['CG', 'BFGS', 'L-BFGS-B', 'Newton-CG']
def f(x):
if first_step_size[0] is None and x[0] != x0[0]:
first_step_size[0] = abs(x[0] - x0[0])
if abs(x).max() > 1e4:
raise AssertionError("Optimization stepped far away!")
return scale*(x[0] - 1)**2
def g(x):
return np.array([scale*(x[0] - 1)])
for scale, method in itertools.product(scales, methods):
if method in ('CG', 'BFGS'):
options = dict(gtol=scale*1e-8)
else:
options = dict()
if scale < 1e-10 and method in ('L-BFGS-B', 'Newton-CG'):
# XXX: return initial point if they see small gradient
continue
x0 = [-1.0]
first_step_size = [None]
res = optimize.minimize(f, x0, jac=g, method=method,
options=options)
err_msg = "{0} {1}: {2}: {3}".format(method, scale,
first_step_size,
res)
assert_(res.success, err_msg)
assert_allclose(res.x, [1.0], err_msg=err_msg)
assert_(res.nit <= 3, err_msg)
if scale > 1e-10:
if method in ('CG', 'BFGS'):
assert_allclose(first_step_size[0], 1.01, err_msg=err_msg)
else:
# Newton-CG and L-BFGS-B use different logic for the first
# step, but are both scaling invariant with step sizes ~ 1
assert_(first_step_size[0] > 0.5 and
first_step_size[0] < 3, err_msg)
else:
# step size has upper bound of ||grad||, so line
# search makes many small steps
pass
@pytest.mark.parametrize('method', ['nelder-mead', 'powell', 'cg', 'bfgs',
'newton-cg', 'l-bfgs-b', 'tnc',
'cobyla', 'slsqp', 'trust-constr',
'dogleg', 'trust-ncg', 'trust-exact',
'trust-krylov'])
def test_nan_values(self, method):
# Check nan values result to failed exit status
np.random.seed(1234)
count = [0]
def func(x):
return np.nan
def func2(x):
count[0] += 1
if count[0] > 2:
return np.nan
else:
return np.random.rand()
def grad(x):
return np.array([1.0])
def hess(x):
return np.array([[1.0]])
x0 = np.array([1.0])
needs_grad = method in ('newton-cg', 'trust-krylov', 'trust-exact',
'trust-ncg', 'dogleg')
needs_hess = method in ('trust-krylov', 'trust-exact', 'trust-ncg',
'dogleg')
funcs = [func, func2]
grads = [grad] if needs_grad else [grad, None]
hesss = [hess] if needs_hess else [hess, None]
with np.errstate(invalid='ignore'), suppress_warnings() as sup:
sup.filter(UserWarning, "delta_grad == 0.*")
sup.filter(RuntimeWarning, ".*does not use Hessian.*")
sup.filter(RuntimeWarning, ".*does not use gradient.*")
for f, g, h in itertools.product(funcs, grads, hesss):
count = [0]
sol = optimize.minimize(f, x0, jac=g, hess=h, method=method,
options=dict(maxiter=20))
assert_equal(sol.success, False)
@pytest.mark.parametrize('method', ['nelder-mead', 'cg', 'bfgs',
'l-bfgs-b', 'tnc',
'cobyla', 'slsqp', 'trust-constr',
'dogleg', 'trust-ncg', 'trust-exact',
'trust-krylov'])
def test_duplicate_evaluations(self, method):
# check that there are no duplicate evaluations for any methods
jac = hess = None
if method in ('newton-cg', 'trust-krylov', 'trust-exact',
'trust-ncg', 'dogleg'):
jac = self.grad
if method in ('trust-krylov', 'trust-exact', 'trust-ncg',
'dogleg'):
hess = self.hess
with np.errstate(invalid='ignore'), suppress_warnings() as sup:
# for trust-constr
sup.filter(UserWarning, "delta_grad == 0.*")
optimize.minimize(self.func, self.startparams,
method=method, jac=jac, hess=hess)
for i in range(1, len(self.trace)):
if np.array_equal(self.trace[i - 1], self.trace[i]):
raise RuntimeError(
"Duplicate evaluations made by {}".format(method))
class TestLBFGSBBounds:
def setup_method(self):
self.bounds = ((1, None), (None, None))
self.solution = (1, 0)
def fun(self, x, p=2.0):
return 1.0 / p * (x[0]**p + x[1]**p)
def jac(self, x, p=2.0):
return x**(p - 1)
def fj(self, x, p=2.0):
return self.fun(x, p), self.jac(x, p)
def test_l_bfgs_b_bounds(self):
x, f, d = optimize.fmin_l_bfgs_b(self.fun, [0, -1],
fprime=self.jac,
bounds=self.bounds)
assert_(d['warnflag'] == 0, d['task'])
assert_allclose(x, self.solution, atol=1e-6)
def test_l_bfgs_b_funjac(self):
# L-BFGS-B with fun and jac combined and extra arguments
x, f, d = optimize.fmin_l_bfgs_b(self.fj, [0, -1], args=(2.0, ),
bounds=self.bounds)
assert_(d['warnflag'] == 0, d['task'])
assert_allclose(x, self.solution, atol=1e-6)
def test_minimize_l_bfgs_b_bounds(self):
# Minimize with method='L-BFGS-B' with bounds
res = optimize.minimize(self.fun, [0, -1], method='L-BFGS-B',
jac=self.jac, bounds=self.bounds)
assert_(res['success'], res['message'])
assert_allclose(res.x, self.solution, atol=1e-6)
@pytest.mark.parametrize('bounds', [
([(10, 1), (1, 10)]),
([(1, 10), (10, 1)]),
([(10, 1), (10, 1)])
])
def test_minimize_l_bfgs_b_incorrect_bounds(self, bounds):
with pytest.raises(ValueError, match='.*bounds.*'):
optimize.minimize(self.fun, [0, -1], method='L-BFGS-B',
jac=self.jac, bounds=bounds)
def test_minimize_l_bfgs_b_bounds_FD(self):
# test that initial starting value outside bounds doesn't raise
# an error (done with clipping).
# test all different finite differences combos, with and without args
jacs = ['2-point', '3-point', None]
argss = [(2.,), ()]
for jac, args in itertools.product(jacs, argss):
res = optimize.minimize(self.fun, [0, -1], args=args,
method='L-BFGS-B',
jac=jac, bounds=self.bounds,
options={'finite_diff_rel_step': None})
assert_(res['success'], res['message'])
assert_allclose(res.x, self.solution, atol=1e-6)
class TestOptimizeScalar:
def setup_method(self):
self.solution = 1.5
def fun(self, x, a=1.5):
"""Objective function"""
return (x - a)**2 - 0.8
def test_brent(self):
x = optimize.brent(self.fun)
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.brent(self.fun, brack=(-3, -2))
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.brent(self.fun, full_output=True)
assert_allclose(x[0], self.solution, atol=1e-6)
x = optimize.brent(self.fun, brack=(-15, -1, 15))
assert_allclose(x, self.solution, atol=1e-6)
def test_golden(self):
x = optimize.golden(self.fun)
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.golden(self.fun, brack=(-3, -2))
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.golden(self.fun, full_output=True)
assert_allclose(x[0], self.solution, atol=1e-6)
x = optimize.golden(self.fun, brack=(-15, -1, 15))
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.golden(self.fun, tol=0)
assert_allclose(x, self.solution)
maxiter_test_cases = [0, 1, 5]
for maxiter in maxiter_test_cases:
x0 = optimize.golden(self.fun, maxiter=0, full_output=True)
x = optimize.golden(self.fun, maxiter=maxiter, full_output=True)
nfev0, nfev = x0[2], x[2]
assert_equal(nfev - nfev0, maxiter)
def test_fminbound(self):
x = optimize.fminbound(self.fun, 0, 1)
assert_allclose(x, 1, atol=1e-4)
x = optimize.fminbound(self.fun, 1, 5)
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.fminbound(self.fun, np.array([1]), np.array([5]))
assert_allclose(x, self.solution, atol=1e-6)
assert_raises(ValueError, optimize.fminbound, self.fun, 5, 1)
def test_fminbound_scalar(self):
with pytest.raises(ValueError, match='.*must be scalar.*'):
optimize.fminbound(self.fun, np.zeros((1, 2)), 1)
x = optimize.fminbound(self.fun, 1, np.array(5))
assert_allclose(x, self.solution, atol=1e-6)
def test_gh11207(self):
def fun(x):
return x**2
optimize.fminbound(fun, 0, 0)
def test_minimize_scalar(self):
# combine all tests above for the minimize_scalar wrapper
x = optimize.minimize_scalar(self.fun).x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, method='Brent')
assert_(x.success)
x = optimize.minimize_scalar(self.fun, method='Brent',
options=dict(maxiter=3))
assert_(not x.success)
x = optimize.minimize_scalar(self.fun, bracket=(-3, -2),
args=(1.5, ), method='Brent').x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, method='Brent',
args=(1.5,)).x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, bracket=(-15, -1, 15),
args=(1.5, ), method='Brent').x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, bracket=(-3, -2),
args=(1.5, ), method='golden').x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, method='golden',
args=(1.5,)).x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, bracket=(-15, -1, 15),
args=(1.5, ), method='golden').x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, bounds=(0, 1), args=(1.5,),
method='Bounded').x
assert_allclose(x, 1, atol=1e-4)
x = optimize.minimize_scalar(self.fun, bounds=(1, 5), args=(1.5, ),
method='bounded').x
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.minimize_scalar(self.fun, bounds=(np.array([1]),
np.array([5])),
args=(np.array([1.5]), ),
method='bounded').x
assert_allclose(x, self.solution, atol=1e-6)
assert_raises(ValueError, optimize.minimize_scalar, self.fun,
bounds=(5, 1), method='bounded', args=(1.5, ))
assert_raises(ValueError, optimize.minimize_scalar, self.fun,
bounds=(np.zeros(2), 1), method='bounded', args=(1.5, ))
x = optimize.minimize_scalar(self.fun, bounds=(1, np.array(5)),
method='bounded').x
assert_allclose(x, self.solution, atol=1e-6)
def test_minimize_scalar_custom(self):
# This function comes from the documentation example.
def custmin(fun, bracket, args=(), maxfev=None, stepsize=0.1,
maxiter=100, callback=None, **options):
bestx = (bracket[1] + bracket[0]) / 2.0
besty = fun(bestx)
funcalls = 1
niter = 0
improved = True
stop = False
while improved and not stop and niter < maxiter:
improved = False
niter += 1
for testx in [bestx - stepsize, bestx + stepsize]:
testy = fun(testx, *args)
funcalls += 1
if testy < besty:
besty = testy
bestx = testx
improved = True
if callback is not None:
callback(bestx)
if maxfev is not None and funcalls >= maxfev:
stop = True
break
return optimize.OptimizeResult(fun=besty, x=bestx, nit=niter,
nfev=funcalls, success=(niter > 1))
res = optimize.minimize_scalar(self.fun, bracket=(0, 4),
method=custmin,
options=dict(stepsize=0.05))
assert_allclose(res.x, self.solution, atol=1e-6)
def test_minimize_scalar_coerce_args_param(self):
# Regression test for gh-3503
optimize.minimize_scalar(self.fun, args=1.5)
@pytest.mark.parametrize('method', ['brent', 'bounded', 'golden'])
def test_nan_values(self, method):
# Check nan values result to failed exit status
np.random.seed(1234)
count = [0]
def func(x):
count[0] += 1
if count[0] > 4:
return np.nan
else:
return x**2 + 0.1 * np.sin(x)
bracket = (-1, 0, 1)
bounds = (-1, 1)
with np.errstate(invalid='ignore'), suppress_warnings() as sup:
sup.filter(UserWarning, "delta_grad == 0.*")
sup.filter(RuntimeWarning, ".*does not use Hessian.*")
sup.filter(RuntimeWarning, ".*does not use gradient.*")
count = [0]
sol = optimize.minimize_scalar(func, bracket=bracket,
bounds=bounds, method=method,
options=dict(maxiter=20))
assert_equal(sol.success, False)
def test_brent_negative_tolerance():
assert_raises(ValueError, optimize.brent, np.cos, tol=-.01)
class TestNewtonCg:
def test_rosenbrock(self):
x0 = np.array([-1.2, 1.0])
sol = optimize.minimize(optimize.rosen, x0,
jac=optimize.rosen_der,
hess=optimize.rosen_hess,
tol=1e-5,
method='Newton-CG')
assert_(sol.success, sol.message)
assert_allclose(sol.x, np.array([1, 1]), rtol=1e-4)
def test_himmelblau(self):
x0 = np.array(himmelblau_x0)
sol = optimize.minimize(himmelblau,
x0,
jac=himmelblau_grad,
hess=himmelblau_hess,
method='Newton-CG',
tol=1e-6)
assert_(sol.success, sol.message)
assert_allclose(sol.x, himmelblau_xopt, rtol=1e-4)
assert_allclose(sol.fun, himmelblau_min, atol=1e-4)
def test_line_for_search():
# _line_for_search is only used in _linesearch_powell, which is also
# tested below. Thus there are more tests of _line_for_search in the
# test_linesearch_powell_bounded function.
line_for_search = optimize.optimize._line_for_search
# args are x0, alpha, lower_bound, upper_bound
# returns lmin, lmax
lower_bound = np.array([-5.3, -1, -1.5, -3])
upper_bound = np.array([1.9, 1, 2.8, 3])
# test when starting in the bounds
x0 = np.array([0., 0, 0, 0])
# and when starting outside of the bounds
x1 = np.array([0., 2, -3, 0])
all_tests = (
(x0, np.array([1., 0, 0, 0]), -5.3, 1.9),
(x0, np.array([0., 1, 0, 0]), -1, 1),
(x0, np.array([0., 0, 1, 0]), -1.5, 2.8),
(x0, np.array([0., 0, 0, 1]), -3, 3),
(x0, np.array([1., 1, 0, 0]), -1, 1),
(x0, np.array([1., 0, -1, 2]), -1.5, 1.5),
(x0, np.array([2., 0, -1, 2]), -1.5, 0.95),
(x1, np.array([1., 0, 0, 0]), -5.3, 1.9),
(x1, np.array([0., 1, 0, 0]), -3, -1),
(x1, np.array([0., 0, 1, 0]), 1.5, 5.8),
(x1, np.array([0., 0, 0, 1]), -3, 3),
(x1, np.array([1., 1, 0, 0]), -3, -1),
(x1, np.array([1., 0, -1, 0]), -5.3, -1.5),
)
for x, alpha, lmin, lmax in all_tests:
mi, ma = line_for_search(x, alpha, lower_bound, upper_bound)
assert_allclose(mi, lmin, atol=1e-6)
assert_allclose(ma, lmax, atol=1e-6)
# now with infinite bounds
lower_bound = np.array([-np.inf, -1, -np.inf, -3])
upper_bound = np.array([np.inf, 1, 2.8, np.inf])
all_tests = (
(x0, np.array([1., 0, 0, 0]), -np.inf, np.inf),
(x0, np.array([0., 1, 0, 0]), -1, 1),
(x0, np.array([0., 0, 1, 0]), -np.inf, 2.8),
(x0, np.array([0., 0, 0, 1]), -3, np.inf),
(x0, np.array([1., 1, 0, 0]), -1, 1),
(x0, np.array([1., 0, -1, 2]), -1.5, np.inf),
(x1, np.array([1., 0, 0, 0]), -np.inf, np.inf),
(x1, np.array([0., 1, 0, 0]), -3, -1),
(x1, np.array([0., 0, 1, 0]), -np.inf, 5.8),
(x1, np.array([0., 0, 0, 1]), -3, np.inf),
(x1, np.array([1., 1, 0, 0]), -3, -1),
(x1, np.array([1., 0, -1, 0]), -5.8, np.inf),
)
for x, alpha, lmin, lmax in all_tests:
mi, ma = line_for_search(x, alpha, lower_bound, upper_bound)
assert_allclose(mi, lmin, atol=1e-6)
assert_allclose(ma, lmax, atol=1e-6)
def test_linesearch_powell():
# helper function in optimize.py, not a public function.
linesearch_powell = optimize.optimize._linesearch_powell
# args are func, p, xi, fval, lower_bound=None, upper_bound=None, tol=1e-3
# returns new_fval, p + direction, direction
func = lambda x: np.sum((x - np.array([-1., 2., 1.5, -.4]))**2)
p0 = np.array([0., 0, 0, 0])
fval = func(p0)
lower_bound = np.array([-np.inf] * 4)
upper_bound = np.array([np.inf] * 4)
all_tests = (
(np.array([1., 0, 0, 0]), -1),
(np.array([0., 1, 0, 0]), 2),
(np.array([0., 0, 1, 0]), 1.5),
(np.array([0., 0, 0, 1]), -.4),
(np.array([-1., 0, 1, 0]), 1.25),
(np.array([0., 0, 1, 1]), .55),
(np.array([2., 0, -1, 1]), -.65),
)
for xi, l in all_tests:
f, p, direction = linesearch_powell(func, p0, xi,
fval=fval, tol=1e-5)
assert_allclose(f, func(l * xi), atol=1e-6)
assert_allclose(p, l * xi, atol=1e-6)
assert_allclose(direction, l * xi, atol=1e-6)
f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5,
lower_bound=lower_bound,
upper_bound=upper_bound,
fval=fval)
assert_allclose(f, func(l * xi), atol=1e-6)
assert_allclose(p, l * xi, atol=1e-6)
assert_allclose(direction, l * xi, atol=1e-6)
def test_linesearch_powell_bounded():
# helper function in optimize.py, not a public function.
linesearch_powell = optimize.optimize._linesearch_powell
# args are func, p, xi, fval, lower_bound=None, upper_bound=None, tol=1e-3
# returns new_fval, p+direction, direction
func = lambda x: np.sum((x-np.array([-1., 2., 1.5, -.4]))**2)
p0 = np.array([0., 0, 0, 0])
fval = func(p0)
# first choose bounds such that the same tests from
# test_linesearch_powell should pass.
lower_bound = np.array([-2.]*4)
upper_bound = np.array([2.]*4)
all_tests = (
(np.array([1., 0, 0, 0]), -1),
(np.array([0., 1, 0, 0]), 2),
(np.array([0., 0, 1, 0]), 1.5),
(np.array([0., 0, 0, 1]), -.4),
(np.array([-1., 0, 1, 0]), 1.25),
(np.array([0., 0, 1, 1]), .55),
(np.array([2., 0, -1, 1]), -.65),
)
for xi, l in all_tests:
f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5,
lower_bound=lower_bound,
upper_bound=upper_bound,
fval=fval)
assert_allclose(f, func(l * xi), atol=1e-6)
assert_allclose(p, l * xi, atol=1e-6)
assert_allclose(direction, l * xi, atol=1e-6)
# now choose bounds such that unbounded vs bounded gives different results
lower_bound = np.array([-.3]*3 + [-1])
upper_bound = np.array([.45]*3 + [.9])
all_tests = (
(np.array([1., 0, 0, 0]), -.3),
(np.array([0., 1, 0, 0]), .45),
(np.array([0., 0, 1, 0]), .45),
(np.array([0., 0, 0, 1]), -.4),
(np.array([-1., 0, 1, 0]), .3),
(np.array([0., 0, 1, 1]), .45),
(np.array([2., 0, -1, 1]), -.15),
)
for xi, l in all_tests:
f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5,
lower_bound=lower_bound,
upper_bound=upper_bound,
fval=fval)
assert_allclose(f, func(l * xi), atol=1e-6)
assert_allclose(p, l * xi, atol=1e-6)
assert_allclose(direction, l * xi, atol=1e-6)
# now choose as above but start outside the bounds
p0 = np.array([-1., 0, 0, 2])
fval = func(p0)
all_tests = (
(np.array([1., 0, 0, 0]), .7),
(np.array([0., 1, 0, 0]), .45),
(np.array([0., 0, 1, 0]), .45),
(np.array([0., 0, 0, 1]), -2.4),
)
for xi, l in all_tests:
f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5,
lower_bound=lower_bound,
upper_bound=upper_bound,
fval=fval)
assert_allclose(f, func(p0 + l * xi), atol=1e-6)
assert_allclose(p, p0 + l * xi, atol=1e-6)
assert_allclose(direction, l * xi, atol=1e-6)
# now mix in inf
p0 = np.array([0., 0, 0, 0])
fval = func(p0)
# now choose bounds that mix inf
lower_bound = np.array([-.3, -np.inf, -np.inf, -1])
upper_bound = np.array([np.inf, .45, np.inf, .9])
all_tests = (
(np.array([1., 0, 0, 0]), -.3),
(np.array([0., 1, 0, 0]), .45),
(np.array([0., 0, 1, 0]), 1.5),
(np.array([0., 0, 0, 1]), -.4),
(np.array([-1., 0, 1, 0]), .3),
(np.array([0., 0, 1, 1]), .55),
(np.array([2., 0, -1, 1]), -.15),
)
for xi, l in all_tests:
f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5,
lower_bound=lower_bound,
upper_bound=upper_bound,
fval=fval)
assert_allclose(f, func(l * xi), atol=1e-6)
assert_allclose(p, l * xi, atol=1e-6)
assert_allclose(direction, l * xi, atol=1e-6)
# now choose as above but start outside the bounds
p0 = np.array([-1., 0, 0, 2])
fval = func(p0)
all_tests = (
(np.array([1., 0, 0, 0]), .7),
(np.array([0., 1, 0, 0]), .45),
(np.array([0., 0, 1, 0]), 1.5),
(np.array([0., 0, 0, 1]), -2.4),
)
for xi, l in all_tests:
f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5,
lower_bound=lower_bound,
upper_bound=upper_bound,
fval=fval)
assert_allclose(f, func(p0 + l * xi), atol=1e-6)
assert_allclose(p, p0 + l * xi, atol=1e-6)
assert_allclose(direction, l * xi, atol=1e-6)
class TestRosen:
def test_hess(self):
# Compare rosen_hess(x) times p with rosen_hess_prod(x,p). See gh-1775.
x = np.array([3, 4, 5])
p = np.array([2, 2, 2])
hp = optimize.rosen_hess_prod(x, p)
dothp = np.dot(optimize.rosen_hess(x), p)
assert_equal(hp, dothp)
def himmelblau(p):
"""
R^2 -> R^1 test function for optimization. The function has four local
minima where himmelblau(xopt) == 0.
"""
x, y = p
a = x*x + y - 11
b = x + y*y - 7
return a*a + b*b
def himmelblau_grad(p):
x, y = p
return np.array([4*x**3 + 4*x*y - 42*x + 2*y**2 - 14,
2*x**2 + 4*x*y + 4*y**3 - 26*y - 22])
def himmelblau_hess(p):
x, y = p
return np.array([[12*x**2 + 4*y - 42, 4*x + 4*y],
[4*x + 4*y, 4*x + 12*y**2 - 26]])
himmelblau_x0 = [-0.27, -0.9]
himmelblau_xopt = [3, 2]
himmelblau_min = 0.0
def test_minimize_multiple_constraints():
# Regression test for gh-4240.
def func(x):
return np.array([25 - 0.2 * x[0] - 0.4 * x[1] - 0.33 * x[2]])
def func1(x):
return np.array([x[1]])
def func2(x):
return np.array([x[2]])
cons = ({'type': 'ineq', 'fun': func},
{'type': 'ineq', 'fun': func1},
{'type': 'ineq', 'fun': func2})
f = lambda x: -1 * (x[0] + x[1] + x[2])
res = optimize.minimize(f, [0, 0, 0], method='SLSQP', constraints=cons)
assert_allclose(res.x, [125, 0, 0], atol=1e-10)
class TestOptimizeResultAttributes:
# Test that all minimizers return an OptimizeResult containing
# all the OptimizeResult attributes
def setup_method(self):
self.x0 = [5, 5]
self.func = optimize.rosen
self.jac = optimize.rosen_der
self.hess = optimize.rosen_hess
self.hessp = optimize.rosen_hess_prod
self.bounds = [(0., 10.), (0., 10.)]
def test_attributes_present(self):
attributes = ['nit', 'nfev', 'x', 'success', 'status', 'fun',
'message']
skip = {'cobyla': ['nit']}
for method in MINIMIZE_METHODS:
with suppress_warnings() as sup:
sup.filter(RuntimeWarning,
("Method .+ does not use (gradient|Hessian.*)"
" information"))
res = optimize.minimize(self.func, self.x0, method=method,
jac=self.jac, hess=self.hess,
hessp=self.hessp)
for attribute in attributes:
if method in skip and attribute in skip[method]:
continue
assert hasattr(res, attribute)
assert_(attribute in dir(res))
# gh13001, OptimizeResult.message should be a str
assert isinstance(res.message, str)
def f1(z, *params):
x, y = z
a, b, c, d, e, f, g, h, i, j, k, l, scale = params
return (a * x**2 + b * x * y + c * y**2 + d*x + e*y + f)
def f2(z, *params):
x, y = z
a, b, c, d, e, f, g, h, i, j, k, l, scale = params
return (-g*np.exp(-((x-h)**2 + (y-i)**2) / scale))
def f3(z, *params):
x, y = z
a, b, c, d, e, f, g, h, i, j, k, l, scale = params
return (-j*np.exp(-((x-k)**2 + (y-l)**2) / scale))
def brute_func(z, *params):
return f1(z, *params) + f2(z, *params) + f3(z, *params)
class TestBrute:
# Test the "brute force" method
def setup_method(self):
self.params = (2, 3, 7, 8, 9, 10, 44, -1, 2, 26, 1, -2, 0.5)
self.rranges = (slice(-4, 4, 0.25), slice(-4, 4, 0.25))
self.solution = np.array([-1.05665192, 1.80834843])
def brute_func(self, z, *params):
# an instance method optimizing
return brute_func(z, *params)
def test_brute(self):
# test fmin
resbrute = optimize.brute(brute_func, self.rranges, args=self.params,
full_output=True, finish=optimize.fmin)
assert_allclose(resbrute[0], self.solution, atol=1e-3)
assert_allclose(resbrute[1], brute_func(self.solution, *self.params),
atol=1e-3)
# test minimize
resbrute = optimize.brute(brute_func, self.rranges, args=self.params,
full_output=True,
finish=optimize.minimize)
assert_allclose(resbrute[0], self.solution, atol=1e-3)
assert_allclose(resbrute[1], brute_func(self.solution, *self.params),
atol=1e-3)
# test that brute can optimize an instance method (the other tests use
# a non-class based function
resbrute = optimize.brute(self.brute_func, self.rranges,
args=self.params, full_output=True,
finish=optimize.minimize)
assert_allclose(resbrute[0], self.solution, atol=1e-3)
def test_1D(self):
# test that for a 1-D problem the test function is passed an array,
# not a scalar.
def f(x):
assert_(len(x.shape) == 1)
assert_(x.shape[0] == 1)
return x ** 2
optimize.brute(f, [(-1, 1)], Ns=3, finish=None)
def test_workers(self):
# check that parallel evaluation works
resbrute = optimize.brute(brute_func, self.rranges, args=self.params,
full_output=True, finish=None)
resbrute1 = optimize.brute(brute_func, self.rranges, args=self.params,
full_output=True, finish=None, workers=2)
assert_allclose(resbrute1[-1], resbrute[-1])
assert_allclose(resbrute1[0], resbrute[0])
def test_cobyla_threadsafe():
# Verify that cobyla is threadsafe. Will segfault if it is not.
import concurrent.futures
import time
def objective1(x):
time.sleep(0.1)
return x[0]**2
def objective2(x):
time.sleep(0.1)
return (x[0]-1)**2
min_method = "COBYLA"
def minimizer1():
return optimize.minimize(objective1,
[0.0],
method=min_method)
def minimizer2():
return optimize.minimize(objective2,
[0.0],
method=min_method)
with concurrent.futures.ThreadPoolExecutor() as pool:
tasks = []
tasks.append(pool.submit(minimizer1))
tasks.append(pool.submit(minimizer2))
for t in tasks:
res = t.result()
class TestIterationLimits:
# Tests that optimisation does not give up before trying requested
# number of iterations or evaluations. And that it does not succeed
# by exceeding the limits.
def setup_method(self):
self.funcalls = 0
def slow_func(self, v):
self.funcalls += 1
r, t = np.sqrt(v[0]**2+v[1]**2), np.arctan2(v[0], v[1])
return np.sin(r*20 + t)+r*0.5
def test_neldermead_limit(self):
self.check_limits("Nelder-Mead", 200)
def test_powell_limit(self):
self.check_limits("powell", 1000)
def check_limits(self, method, default_iters):
for start_v in [[0.1, 0.1], [1, 1], [2, 2]]:
for mfev in [50, 500, 5000]:
self.funcalls = 0
res = optimize.minimize(self.slow_func, start_v,
method=method,
options={"maxfev": mfev})
assert_(self.funcalls == res["nfev"])
if res["success"]:
assert_(res["nfev"] < mfev)
else:
assert_(res["nfev"] >= mfev)
for mit in [50, 500, 5000]:
res = optimize.minimize(self.slow_func, start_v,
method=method,
options={"maxiter": mit})
if res["success"]:
assert_(res["nit"] <= mit)
else:
assert_(res["nit"] >= mit)
for mfev, mit in [[50, 50], [5000, 5000], [5000, np.inf]]:
self.funcalls = 0
res = optimize.minimize(self.slow_func, start_v,
method=method,
options={"maxiter": mit,
"maxfev": mfev})
assert_(self.funcalls == res["nfev"])
if res["success"]:
assert_(res["nfev"] < mfev and res["nit"] <= mit)
else:
assert_(res["nfev"] >= mfev or res["nit"] >= mit)
for mfev, mit in [[np.inf, None], [None, np.inf]]:
self.funcalls = 0
res = optimize.minimize(self.slow_func, start_v,
method=method,
options={"maxiter": mit,
"maxfev": mfev})
assert_(self.funcalls == res["nfev"])
if res["success"]:
if mfev is None:
assert_(res["nfev"] < default_iters*2)
else:
assert_(res["nit"] <= default_iters*2)
else:
assert_(res["nfev"] >= default_iters*2 or
res["nit"] >= default_iters*2)
def test_result_x_shape_when_len_x_is_one():
def fun(x):
return x * x
def jac(x):
return 2. * x
def hess(x):
return np.array([[2.]])
methods = ['Nelder-Mead', 'Powell', 'CG', 'BFGS', 'L-BFGS-B', 'TNC',
'COBYLA', 'SLSQP']
for method in methods:
res = optimize.minimize(fun, np.array([0.1]), method=method)
assert res.x.shape == (1,)
# use jac + hess
methods = ['trust-constr', 'dogleg', 'trust-ncg', 'trust-exact',
'trust-krylov', 'Newton-CG']
for method in methods:
res = optimize.minimize(fun, np.array([0.1]), method=method, jac=jac,
hess=hess)
assert res.x.shape == (1,)
class FunctionWithGradient:
def __init__(self):
self.number_of_calls = 0
def __call__(self, x):
self.number_of_calls += 1
return np.sum(x**2), 2 * x
@pytest.fixture
def function_with_gradient():
return FunctionWithGradient()
def test_memoize_jac_function_before_gradient(function_with_gradient):
memoized_function = MemoizeJac(function_with_gradient)
x0 = np.array([1.0, 2.0])
assert_allclose(memoized_function(x0), 5.0)
assert function_with_gradient.number_of_calls == 1
assert_allclose(memoized_function.derivative(x0), 2 * x0)
assert function_with_gradient.number_of_calls == 1, \
"function is not recomputed " \
"if gradient is requested after function value"
assert_allclose(
memoized_function(2 * x0), 20.0,
err_msg="different input triggers new computation")
assert function_with_gradient.number_of_calls == 2, \
"different input triggers new computation"
def test_memoize_jac_gradient_before_function(function_with_gradient):
memoized_function = MemoizeJac(function_with_gradient)
x0 = np.array([1.0, 2.0])
assert_allclose(memoized_function.derivative(x0), 2 * x0)
assert function_with_gradient.number_of_calls == 1
assert_allclose(memoized_function(x0), 5.0)
assert function_with_gradient.number_of_calls == 1, \
"function is not recomputed " \
"if function value is requested after gradient"
assert_allclose(
memoized_function.derivative(2 * x0), 4 * x0,
err_msg="different input triggers new computation")
assert function_with_gradient.number_of_calls == 2, \
"different input triggers new computation"
def test_memoize_jac_with_bfgs(function_with_gradient):
""" Tests that using MemoizedJac in combination with ScalarFunction
and BFGS does not lead to repeated function evaluations.
Tests changes made in response to GH11868.
"""
memoized_function = MemoizeJac(function_with_gradient)
jac = memoized_function.derivative
hess = optimize.BFGS()
x0 = np.array([1.0, 0.5])
scalar_function = ScalarFunction(
memoized_function, x0, (), jac, hess, None, None)
assert function_with_gradient.number_of_calls == 1
scalar_function.fun(x0 + 0.1)
assert function_with_gradient.number_of_calls == 2
scalar_function.fun(x0 + 0.2)
assert function_with_gradient.number_of_calls == 3
def test_gh12696():
# Test that optimize doesn't throw warning gh-12696
with assert_no_warnings():
optimize.fminbound(
lambda x: np.array([x**2]), -np.pi, np.pi, disp=False)
def test_show_options():
solver_methods = {
'minimize': MINIMIZE_METHODS,
'minimize_scalar': MINIMIZE_SCALAR_METHODS,
'root': ROOT_METHODS,
'root_scalar': ROOT_SCALAR_METHODS,
'linprog': LINPROG_METHODS,
'quadratic_assignment': QUADRATIC_ASSIGNMENT_METHODS,
}
for solver, methods in solver_methods.items():
for method in methods:
# testing that `show_options` works without error
show_options(solver, method)
unknown_solver_method = {
'minimize': "ekki", # unknown method
'maximize': "cg", # unknown solver
'maximize_scalar': "ekki", # unknown solver and method
}
for solver, method in unknown_solver_method.items():
# testing that `show_options` raises ValueError
assert_raises(ValueError, show_options, solver, method)
def test_bounds_with_list():
# gh13501. Bounds created with lists weren't working for Powell.
bounds = optimize.Bounds(lb=[5., 5.], ub=[10., 10.])
optimize.minimize(
optimize.rosen, x0=np.array([9, 9]), method='Powell', bounds=bounds
)
def test_x_overwritten_user_function():
# if the user overwrites the x-array in the user function it's likely
# that the minimizer stops working properly.
# gh13740
def fquad(x):
a = np.arange(np.size(x))
x -= a
x *= x
return np.sum(x)
def fquad_jac(x):
a = np.arange(np.size(x))
x *= 2
x -= 2 * a
return x
fquad_hess = lambda x: np.eye(np.size(x)) * 2.0
meth_jac = [
'newton-cg', 'dogleg', 'trust-ncg', 'trust-exact',
'trust-krylov', 'trust-constr'
]
meth_hess = [
'dogleg', 'trust-ncg', 'trust-exact', 'trust-krylov', 'trust-constr'
]
x0 = np.ones(5) * 1.5
for meth in MINIMIZE_METHODS:
jac = None
hess = None
if meth in meth_jac:
jac = fquad_jac
if meth in meth_hess:
hess = fquad_hess
res = optimize.minimize(fquad, x0, method=meth, jac=jac, hess=hess)
assert_allclose(res.x, np.arange(np.size(x0)), atol=2e-4)
| 39.087014
| 79
| 0.525506
|
7950f6320393a5974a5c16dba6523991a0aa7159
| 707
|
py
|
Python
|
Code/PasswordFromString/StringFunctions.py
|
dealom/Package-Project
|
1feb1c2fc6f12e812a0f732debd2bfdb76588954
|
[
"MIT"
] | null | null | null |
Code/PasswordFromString/StringFunctions.py
|
dealom/Package-Project
|
1feb1c2fc6f12e812a0f732debd2bfdb76588954
|
[
"MIT"
] | null | null | null |
Code/PasswordFromString/StringFunctions.py
|
dealom/Package-Project
|
1feb1c2fc6f12e812a0f732debd2bfdb76588954
|
[
"MIT"
] | null | null | null |
def alpharight(pw,n=1):
newpw = ""
alpha = "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz"
ALPHA = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
for l in pw:
if l in alpha:
l = alpha[alpha.index(l)+n]
elif l in ALPHA:
l = ALPHA[ALPHA.index(l)+n]
newpw += l
return newpw
def keyright(pw,n=1):
newpw = ""
alpha = "qwertyuiopasdfghjklzxcvbnmqwertyuiopasdfghjklzxcvbnm"
ALPHA = "QWERTYUIOPASDFGHJKLZXCVBNMQWERTYUIOPASDFGHJKLZXCVBNM"
for l in pw:
if l in alpha:
l = alpha[alpha.index(l)+n]
elif l in ALPHA:
l = ALPHA[ALPHA.index(l)+n]
newpw += l
return newpw
| 29.458333
| 66
| 0.61669
|
7950f6e458b64342634585205c22e732a2c9e2ea
| 2,114
|
py
|
Python
|
Data-Structures/Trees/BinaryTreeIteration.py
|
kimjiwook0129/Coding-Interivew-Cheatsheet
|
574e6acecdb617b9c3cef7ec3b154ab183d8b99a
|
[
"MIT"
] | 3
|
2022-01-09T04:33:04.000Z
|
2022-02-04T17:40:43.000Z
|
Data-Structures/Trees/BinaryTreeIteration.py
|
kimjiwook0129/Coding-Interivew-Cheatsheet
|
574e6acecdb617b9c3cef7ec3b154ab183d8b99a
|
[
"MIT"
] | null | null | null |
Data-Structures/Trees/BinaryTreeIteration.py
|
kimjiwook0129/Coding-Interivew-Cheatsheet
|
574e6acecdb617b9c3cef7ec3b154ab183d8b99a
|
[
"MIT"
] | null | null | null |
from collections import deque
# Same BinaryTree, but the print methods are implemented using iterations
class BinaryTree:
def __init__(self, data, left = None, right = None):
self.data = data
self.left = left
self.right = right
def preOrderPrint(self):
s = deque([self])
while s:
curNode = s.pop()
print(curNode.data, end = " ")
if curNode.right:
s.append(curNode.right)
if curNode.left:
s.append(curNode.left)
def inOrderPrint(self):
s = deque([])
while True:
if self:
s.append(self)
self = self.left
else:
if len(s) == 0: break
self = s.pop()
print(self.data, end = " ")
self = self.right
def postOrderPrint(self):
s1 = deque([self])
s2 = deque([])
while s1:
curNode = s1.pop()
s2.append(curNode)
if curNode.left:
s1.append(curNode.left)
if curNode.right:
s1.append(curNode.right)
while s2:
print(s2.pop().data, end = " ")
def postOrderPrintOneStack(self):
pass
def levelOrderPrint(self):
q = deque([])
q.append(self)
while q:
curNode = q.popleft()
print(curNode.data, end = " ")
if curNode.left:
q.append(curNode.left)
if curNode.right:
q.append(curNode.right)
if __name__ == "__main__":
tree = BinaryTree(1, BinaryTree(2, BinaryTree(4)), \
BinaryTree(3, BinaryTree(5, None, BinaryTree(7)), BinaryTree(6, BinaryTree(8), BinaryTree(9))))
tree.preOrderPrint() # 1, 2, 4, 3, 5, 7, 6, 8, 9
print()
tree.inOrderPrint() # 4, 2, 1, 5, 7, 3, 8, 6, 9
print()
tree.postOrderPrint() # 4, 2, 7, 5, 8, 9, 6, 3, 1
print()
# tree.postOrderPrintOneStack() # 4, 2, 7, 5, 8, 9, 6, 3, 1
# print()
tree.levelOrderPrint() # 1, 2, 3, 4, 5, 6, 7, 8, 9
print()
| 29.774648
| 103
| 0.497635
|
7950f8b5eb70c4f20082a5323c5618258b677bb9
| 5,070
|
py
|
Python
|
optimize_gan_bo.py
|
miyamotononno/airfoil-opt-gan
|
997c1060dd3dd22572c16101e703e0bf93a316f1
|
[
"MIT"
] | null | null | null |
optimize_gan_bo.py
|
miyamotononno/airfoil-opt-gan
|
997c1060dd3dd22572c16101e703e0bf93a316f1
|
[
"MIT"
] | null | null | null |
optimize_gan_bo.py
|
miyamotononno/airfoil-opt-gan
|
997c1060dd3dd22572c16101e703e0bf93a316f1
|
[
"MIT"
] | null | null | null |
"""
Optimize the airfoil shape in the latent space using Bayesian optimization,
constrained on the running time
Author(s): Wei Chen (wchen459@umd.edu)
"""
from __future__ import division
import time
import argparse
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from importlib import import_module
import sklearn.gaussian_process as gp
from gan import GAN
from simulation import evaluate
from bayesian_opt import normalize, neg_expected_improvement, sample_next_point
from utils import mean_err
def synthesize(z, model):
airfoil = model.synthesize(z)
if airfoil[0,1] < airfoil[-1,1]:
mean = .5*(airfoil[0,1]+airfoil[-1,1])
airfoil[0,1] = airfoil[-1,1] = mean
return airfoil
def optimize(latent_dim, bounds, n_eval, run_id):
# Optimize in the latent space
n_pre_samples = 10
bounds = np.tile(bounds, (latent_dim,1))
kernel = gp.kernels.Matern()
gp_model = gp.GaussianProcessRegressor(kernel=kernel, alpha=1e-4, n_restarts_optimizer=100, normalize_y=False)
zp = []
perfs = []
opt_perfs = [0]
s = 1.0
gamma = 0.99
for i in range(n_eval):
if i < n_pre_samples:
z = np.random.uniform(bounds[:,0], bounds[:,1], size=latent_dim)
else:
perf_normalized = normalize(perfs)
gp_model.fit(np.array(zp), np.array(perf_normalized))
length_scale = gp_model.kernel_.length_scale
print('Length scale = {}'.format(length_scale))
previous_optimum = perf_normalized[opt_idx]
if np.all(np.array(perfs[-5:])==-1): # in case getting stuck in infeasible region
print('Back to {} ...'.format(opt_z))
previous_point = opt_z
else:
previous_point = z
# z = sample_next_point(latent_dim, neg_expected_improvement, gp_model, previous_optimum, bounds, n_restarts=100)
z = sample_next_point(latent_dim, neg_expected_improvement, gp_model, previous_optimum, bounds=None,
random_search=100000, previous_point=previous_point, scale=s)
s *= gamma
x = synthesize(z.reshape(1,-1), model)
perf = evaluate(x)
z = np.squeeze(z)
zp.append(z)
perfs.append(perf)
opt_idx = np.argmax(perfs)
opt_z = zp[opt_idx]
opt_perf = perfs[opt_idx]
opt_perfs.append(opt_perf) # Best performance so far
print('GAN-BO {}-{}: z {} CL/CD {:.2f} best-so-far {:.2f}'.format(run_id, i+1, z, perf, opt_perf))
opt_z = opt_z.reshape(1,-1)
opt_airfoil = synthesize(opt_z, model)
print('Optimal: z {} CL/CD {}'.format(opt_z, opt_perfs[-1]))
return opt_airfoil, opt_perfs
if __name__ == "__main__":
# Arguments
parser = argparse.ArgumentParser(description='Optimize')
parser.add_argument('--n_runs', type=int, default=10, help='number of runs')
parser.add_argument('--n_eval', type=int, default=1000, help='number of evaluations per run')
args = parser.parse_args()
n_runs = args.n_runs
n_eval = args.n_eval
# Airfoil parameters
latent_dim = 3
noise_dim = 10
n_points = 192
bezier_degree = 32
bounds = (0., 1.)
# Restore trained model
model = GAN(latent_dim, noise_dim, n_points, bezier_degree, bounds)
model.restore()
opt_airfoil_runs = []
opt_perfs_runs = []
time_runs = []
for i in range(n_runs):
start_time = time.time()
opt_airfoil, opt_perfs = optimize(latent_dim, bounds, n_eval, i+1)
end_time = time.time()
opt_airfoil_runs.append(opt_airfoil)
opt_perfs_runs.append(opt_perfs)
time_runs.append(end_time-start_time)
opt_airfoil_runs = np.array(opt_airfoil_runs)
opt_perfs_runs = np.array(opt_perfs_runs)
np.save('opt_results/gan_bo/opt_airfoil.npy', opt_airfoil_runs)
np.save('opt_results/gan_bo/opt_history.npy', opt_perfs_runs)
# Plot optimization history
mean_perfs_runs = np.mean(opt_perfs_runs, axis=0)
plt.figure()
plt.plot(np.arange(n_eval+1, dtype=int), mean_perfs_runs)
plt.title('Optimization History')
plt.xlabel('Number of Evaluations')
plt.ylabel('Optimal CL/CD')
# plt.xticks(np.linspace(0, n_eval+1, 5, dtype=int))
plt.savefig('opt_results/gan_bo/opt_history.svg')
plt.close()
# Plot the optimal airfoil
mean_time_runs, err_time_runs = mean_err(time_runs)
mean_final_perf_runs, err_final_perf_runs = mean_err(opt_perfs_runs[:,-1])
plt.figure()
for opt_airfoil in opt_airfoil_runs:
plt.plot(opt_airfoil[:,0], opt_airfoil[:,1], '-', c='k', alpha=1.0/n_runs)
plt.title('CL/CD: %.2f+/-%.2f time: %.2f+/-%.2f min' % (mean_final_perf_runs, err_final_perf_runs,
mean_time_runs/60, err_time_runs/60))
plt.axis('equal')
plt.savefig('opt_results/gan_bo/opt_airfoil.svg')
plt.close()
print 'GAN-BO completed :)'
| 35.704225
| 124
| 0.64497
|
7950fa30a66be88652713ca49d525d8f474d2650
| 327
|
py
|
Python
|
app/Connection/socket.py
|
alexanderscpo/UCi_Desktop
|
a923dd78d2f4df95fdd56c0afc52fc3557b8d4a7
|
[
"MIT"
] | null | null | null |
app/Connection/socket.py
|
alexanderscpo/UCi_Desktop
|
a923dd78d2f4df95fdd56c0afc52fc3557b8d4a7
|
[
"MIT"
] | null | null | null |
app/Connection/socket.py
|
alexanderscpo/UCi_Desktop
|
a923dd78d2f4df95fdd56c0afc52fc3557b8d4a7
|
[
"MIT"
] | null | null | null |
import socket
import os
import struct
import threading
def enviar_archivos(url: str, host: str, port: int):
# Creamos el objeto para la conexión usando (ip4 y TCP)
sck = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Conectamos con el Host
with sck.connect((host, port)) as conexion:
pass
| 20.4375
| 59
| 0.69419
|
7950faaf8969b4bfd67614a72bff42f402a632e6
| 357
|
py
|
Python
|
bookwyrm/activitypub/image.py
|
mouse-reeve/fedireads
|
e3471fcc3500747a1b1deaaca662021aae5b08d4
|
[
"CC0-1.0"
] | 270
|
2020-01-27T06:06:07.000Z
|
2020-06-21T00:28:18.000Z
|
bookwyrm/activitypub/image.py
|
mouse-reeve/fedireads
|
e3471fcc3500747a1b1deaaca662021aae5b08d4
|
[
"CC0-1.0"
] | 158
|
2020-02-10T20:36:54.000Z
|
2020-06-26T17:12:54.000Z
|
bookwyrm/activitypub/image.py
|
mouse-reeve/fedireads
|
e3471fcc3500747a1b1deaaca662021aae5b08d4
|
[
"CC0-1.0"
] | 15
|
2020-02-13T21:53:33.000Z
|
2020-06-17T16:52:46.000Z
|
""" an image, nothing fancy """
from dataclasses import dataclass
from .base_activity import ActivityObject
@dataclass(init=False)
class Document(ActivityObject):
"""a document"""
url: str
name: str = ""
type: str = "Document"
id: str = None
@dataclass(init=False)
class Image(Document):
"""an image"""
type: str = "Image"
| 17
| 41
| 0.64986
|
7950fcbd2f6ab009ee65cce62d72ff7f54f81ced
| 2,454
|
py
|
Python
|
config/settings/local.py
|
black-redoc/django_crud_example
|
143ef8edfd5346087d1c577491460507c87e8e22
|
[
"MIT"
] | null | null | null |
config/settings/local.py
|
black-redoc/django_crud_example
|
143ef8edfd5346087d1c577491460507c87e8e22
|
[
"MIT"
] | null | null | null |
config/settings/local.py
|
black-redoc/django_crud_example
|
143ef8edfd5346087d1c577491460507c87e8e22
|
[
"MIT"
] | null | null | null |
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env(
"DJANGO_SECRET_KEY",
default="kPVUeeHnUxaqrVpYDTQwm23hclk9CPXGwyaoyuZzTu7dSCFuXvXm1Nm9rAEXJD87",
)
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["localhost", "0.0.0.0", "127.0.0.1"]
# CACHES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "",
}
}
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env(
"DJANGO_EMAIL_BACKEND", default="django.core.mail.backends.console.EmailBackend"
)
# WhiteNoise
# ------------------------------------------------------------------------------
# http://whitenoise.evans.io/en/latest/django.html#using-whitenoise-in-development
INSTALLED_APPS = ["whitenoise.runserver_nostatic"] + INSTALLED_APPS # noqa F405
# django-debug-toolbar
# ------------------------------------------------------------------------------
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#prerequisites
INSTALLED_APPS += ["debug_toolbar"] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#middleware
MIDDLEWARE += ["debug_toolbar.middleware.DebugToolbarMiddleware"] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/configuration.html#debug-toolbar-config
DEBUG_TOOLBAR_CONFIG = {
"DISABLE_PANELS": ["debug_toolbar.panels.redirects.RedirectsPanel"],
"SHOW_TEMPLATE_CONTEXT": True,
}
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#internal-ips
INTERNAL_IPS = ["127.0.0.1", "192.168.1.29"]
# django-extensions
# ------------------------------------------------------------------------------
# https://django-extensions.readthedocs.io/en/latest/installation_instructions.html#configuration
INSTALLED_APPS += ["django_extensions"] # noqa F405
# Your stuff...
# ------------------------------------------------------------------------------
| 40.229508
| 97
| 0.579462
|
7950fff4e7c74a6a68098f27ded64d477358dec9
| 424
|
py
|
Python
|
ch05-a-realistic-api/api/weather_api.py
|
hedrickbt/talkpython-fastapi
|
633ee7b6ebfa78933b14fceed0c62884382363a1
|
[
"MIT"
] | null | null | null |
ch05-a-realistic-api/api/weather_api.py
|
hedrickbt/talkpython-fastapi
|
633ee7b6ebfa78933b14fceed0c62884382363a1
|
[
"MIT"
] | null | null | null |
ch05-a-realistic-api/api/weather_api.py
|
hedrickbt/talkpython-fastapi
|
633ee7b6ebfa78933b14fceed0c62884382363a1
|
[
"MIT"
] | null | null | null |
from typing import Optional
import fastapi
from fastapi import Depends
from models.location import Location
from services import openweather_service
router = fastapi.APIRouter()
@router.get('/api/weather/{city}')
async def weather(loc: Location = Depends(),
units: Optional[str] = 'metric'):
report = await openweather_service.get_report_async(loc.city, loc.state, loc.country, units)
return report
| 24.941176
| 96
| 0.75
|
79510010034adafaaddb52db78bf489e225e8c97
| 1,401
|
py
|
Python
|
chapter2/problem1.py
|
hahnicity/ace
|
60e934304b94614c435c7f3da60e3ea13622173e
|
[
"Unlicense"
] | 5
|
2017-07-06T07:08:03.000Z
|
2020-03-11T17:48:02.000Z
|
chapter2/problem1.py
|
lnsongxf/ace
|
60e934304b94614c435c7f3da60e3ea13622173e
|
[
"Unlicense"
] | null | null | null |
chapter2/problem1.py
|
lnsongxf/ace
|
60e934304b94614c435c7f3da60e3ea13622173e
|
[
"Unlicense"
] | 5
|
2017-07-06T07:08:15.000Z
|
2020-12-19T21:52:07.000Z
|
"""
Solve some matrix operations
"""
from numpy import array
from ace.solvers import gauss_jacobi, gauss_seidel, lu_decomposition
def part_a(a, b):
"""
Solve using LU decomposition
"""
x = lu_decomposition(a, b)
print x
return x
def part_b(a, b, x_exact):
"""
Solve using Gauss-Jacobi
"""
for iterations in xrange(1, 100):
x = gauss_jacobi(a, b, iterations)
if _check_for_significant_digits(x, x_exact, 4):
print iterations, x
return
def part_c(a, b, x_exact):
"""
Solve using Gauss-Seidel
"""
for iterations in xrange(1, 100):
x = gauss_seidel(a, b, iterations)
if _check_for_significant_digits(x, x_exact, 4):
print iterations, x
return
def _check_for_significant_digits(x, x_exact, desired_digits):
number_validated = 0
for i, xi in enumerate(x_exact):
comparator = int(xi * 10 ** desired_digits)
if int(x[i] * 10 ** desired_digits) == comparator:
number_validated += 1
if number_validated == len(x_exact):
return True
return False
if __name__ == "__main__":
a = array([[54, 14, -11, 2],
[14, 50, -4, 29],
[-11, -4, 55, 22],
[2, 29, 22, 95]])
b = array([1, 1, 1, 1])
x_exact = part_a(a, b)
part_b(a, b, x_exact)
part_c(a, b, x_exact)
| 23.35
| 68
| 0.576017
|
79510022ea5cd314612fe677a6fb15092158876e
| 8,196
|
py
|
Python
|
docs/conf.py
|
farzadghanei/distutilazy
|
c3c7d062f7cb79abb7677cac57dd752127ff78e7
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
farzadghanei/distutilazy
|
c3c7d062f7cb79abb7677cac57dd752127ff78e7
|
[
"MIT"
] | 2
|
2016-06-16T14:12:48.000Z
|
2018-07-22T12:44:21.000Z
|
docs/conf.py
|
farzadghanei/distutilazy
|
c3c7d062f7cb79abb7677cac57dd752127ff78e7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Distutilazy documentation build configuration file, created by
# sphinx-quickstart on Tue Oct 13 14:17:16 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Distutilazy'
copyright = u'2015, Farzad Ghanei'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.4.0'
# The full version, including alpha/beta/rc tags.
release = '0.4.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Distutilazydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Distutilazy.tex', u'Distutilazy Documentation',
u'Farzad Ghanei', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'distutilazy', u'Distutilazy Documentation',
[u'Farzad Ghanei'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Distutilazy', u'Distutilazy Documentation',
u'Farzad Ghanei', 'Distutilazy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 31.644788
| 79
| 0.719253
|
795104180d5390d7bdc3dbd6b7961610117cffa4
| 1,825
|
py
|
Python
|
moltres-thermal-fluids/full-assembly/auxiliary.py
|
arfc/mhtgr350-benchmark
|
18f7b3fe5742dabb1114c3bf7760b84590d16062
|
[
"BSD-3-Clause"
] | null | null | null |
moltres-thermal-fluids/full-assembly/auxiliary.py
|
arfc/mhtgr350-benchmark
|
18f7b3fe5742dabb1114c3bf7760b84590d16062
|
[
"BSD-3-Clause"
] | 51
|
2020-05-26T16:17:57.000Z
|
2021-02-22T20:08:59.000Z
|
moltres-thermal-fluids/full-assembly/auxiliary.py
|
arfc/mhtgr350-benchmark
|
18f7b3fe5742dabb1114c3bf7760b84590d16062
|
[
"BSD-3-Clause"
] | 2
|
2020-01-02T19:22:59.000Z
|
2020-01-11T15:42:36.000Z
|
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.cbook import get_sample_data
import matplotlib.patches as mpatches
def add_legends_full_assembly():
'''
This function adds legends to 'full-assem-mesh'.
'''
figure = 'full-assem-mesh'
save = 'full-assem-mesh2'
red = mpatches.Patch(color=(1., 0., 0.), label='Fuel')
green = mpatches.Patch(color=(0., 1., 0.), label='Gap')
gray = mpatches.Patch(color=(0.91, 0.91, 0.91), label='Moderator')
yellow = mpatches.Patch(color=(1., 1., 0.), label='Film')
blue = mpatches.Patch(color=(0., 0., 1.), label='Coolant')
cwd = os.getcwd()
fname = get_sample_data('{0}/{1}.png'.format(cwd, figure))
im = plt.imread(fname)
plt.imshow(im)
plt.legend(handles=[red, gray, blue],
loc="upper left", bbox_to_anchor=(1.0, 1.0), fancybox=True)
plt.axis('off')
plt.savefig(save, dpi=300, bbox_inches="tight")
plt.close()
def full_assembly_convergence():
'''
This function plots the values from cool and fuel vs dofs in a figure.
'''
cool = [1060.405, 1062.230, 1063.999, 1065.128, 1065.318]
fuel = [1204.485, 1217.320, 1225.565, 1233.442, 1234.928]
dofs = [524291, 665893, 932129, 1317444, 1524595]
elements = [1025400, 1305800, 1833000, 2596000, 3006200]
plt.plot(dofs, cool, marker='o', label='Coolant')
plt.plot(dofs, fuel, marker='o', label='Fuel')
plt.legend(loc='best')
plt.ylabel(r'Temperature [$^{\circ}$C]')
plt.xlabel('Number of DoFs')
plt.savefig('full-assem-convergence', dpi=300, bbox_inches="tight")
plt.close()
if __name__ == "__main__":
# adds legends to mesh figure
add_legends_full_assembly()
# plots the convergence of the temperatures
full_assembly_convergence()
| 30.416667
| 74
| 0.648767
|
795104c125f710fb199a77f6e793117d15f0886e
| 30,760
|
py
|
Python
|
genres_holder/Homans_3_a.py
|
mmehrani/homans_project
|
37bddd6ed0686739674373264526873f92640346
|
[
"MIT"
] | null | null | null |
genres_holder/Homans_3_a.py
|
mmehrani/homans_project
|
37bddd6ed0686739674373264526873f92640346
|
[
"MIT"
] | 30
|
2019-10-14T15:40:31.000Z
|
2020-09-20T06:34:51.000Z
|
genres_holder/Homans_3_a.py
|
mmehrani/homans_project
|
37bddd6ed0686739674373264526873f92640346
|
[
"MIT"
] | null | null | null |
"""
Created on Mon Aug 12 10:12:03 2019
@author: Taha Enayat, Mohsen Mehrani
Main file
Model's engine
"""
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
#import winsound
import pickle
import Analysis_Tools_Homans
import os
import sys
from decimal import *
"""Platform Detection"""
pd = {'win32':'\\', 'linux':'/'}
if sys.platform.startswith('win32'):
plat = 'win32'
elif sys.platform.startswith('linux'):
plat = 'linux'
start_time = datetime.now()
# =============================================================================
"""Class"""
class NegativeProbability(Exception):
pass
class Agent():
"""
Properties and variables related to an agent
"""
def __init__(self,money,approval,situation):
self.money = money
self.approval = approval
self.neighbor = np.zeros(N,dtype=int) #number of interactions
self.value = np.full(N,-1,dtype=float)
self.time = np.full((N,memory_size),-1)
self.situation = situation
self.active_neighbor = {} #dictianary; keys are active neighbor indexes; values are probabilities
self.sigma = Decimal('0') #sum of probabilities. used in normalization
self.feeling = np.zeros(N)
self.worth_ratio = self.approval/self.money
self.asset = self.money + self.approval / self.worth_ratio
return
def asset_updater(self):
self.asset = self.money + self.approval / self.worth_ratio
return
def worth_ratio_calculator(self):
"""
calculates worth ratio i.e. averages over neighbors' money and approval.
Used in transaction function.
"""
self.n_avg = {'money':0, 'approval':0}
for j in self.active_neighbor.keys():
self.n_avg['money'] += A[j].money
self.n_avg['approval'] += A[j].approval
self.n_avg['money'] += self.money
self.n_avg['approval'] += self.approval
self.n_avg['money'] = self.n_avg['money'] / (len(self.active_neighbor)+1)
self.n_avg['approval'] = self.n_avg['approval'] / (len(self.active_neighbor)+1)
self.n_average = self.n_avg['approval'] / self.n_avg['money']
return self.n_average
def probability_factor(self,neighbor,t):
'''
Calculates the factor for choosing each neighbor that converts to probability in
neighbor_concatenation function.
This factor is multiplication of effect of value (proposition3), frequency (proposition4),
and feeling (proposition5).
'''
p0 = np.exp(self.value[neighbor] * prob0_magnify_factor)
p1 = self.frequency_to_probability(neighbor,t) * prob1_magnify_factor - (prob1_magnify_factor -1)
p2 = np.exp(self.feeling[neighbor]) * prob2_magnify_factor - (prob2_magnify_factor -1)
# p0 = 1.0 #for when we need to turn off the effect
# p1 = 1.0 #for when we need to turn off the effect
# p2 = 1.0 #for when we need to turn off the effect
p0_tracker.append(p0) #tracking
p1_tracker.append(p1)
p2_tracker.append(p2)
probability = p0 * p1 * p2 #not normalized. normalization occurs in neighbor_concatenation()
return Decimal(str(probability)).quantize(Decimal('1e-5'),rounding = ROUND_DOWN) if probability < 10**8 else Decimal('1e8')
def frequency_to_probability(self,neighbor,t):
"""
Homans' proposition 4.
Although he doesn't talk about effect of longterm memory on probability,
it is here to see whether it makes the results more real or not.
"""
mask = (self.time[neighbor] > t-10) & (self.time[neighbor] != -1)
n1 = np.size(self.time[neighbor][mask])
short_term = np.exp(- alpha * n1 / 10)
# n2 = self.neighbor[neighbor]
# long_term = 1 + beta * (n2 * len(self.active_neighbor) /(t*np.average(num_transaction_tot[:t-1]) ) )
long_term = 1.0 #for when we need to turn off the effect
prob = short_term * long_term
return prob
def neighbor_concatenation(self,self_index,new_neighbor,t):
"""
Adds new neighbor to memory and converts factor obtained from probability_factor()
function to probability (that sums to one).
"""
for j in self.active_neighbor.keys():
self.active_neighbor[j] *= self.sigma
grade_new_neighbor = self.probability_factor(new_neighbor,t)
if new_neighbor in self.active_neighbor:
self.sigma += grade_new_neighbor - self.active_neighbor[new_neighbor]
else:
self.sigma += grade_new_neighbor
self.active_neighbor[new_neighbor] = grade_new_neighbor
for j in self.active_neighbor.keys():
if j!=new_neighbor:
self.active_neighbor[j] /= self.sigma
self.active_neighbor[j] = Decimal( str(self.active_neighbor[j]) ).quantize(Decimal('1e-5'),rounding = ROUND_DOWN)
if new_neighbor in self.active_neighbor:
self.active_neighbor[new_neighbor] = 1 - ( sum(self.active_neighbor.values()) - self.active_neighbor[new_neighbor])
else:
self.active_neighbor[new_neighbor] = 1 - sum(self.active_neighbor.values())
"""Error finding"""
if self.active_neighbor[new_neighbor] < 0:
raise NegativeProbability('self index:',self_index,'neighbor',new_neighbor)
elif np.size(np.array(list(self.active_neighbor.values()))[np.array(list(self.active_neighbor.values()))>1]) != 0:
raise NegativeProbability('self index:',self_index,'neighbor',new_neighbor)
elif sum(list(self.active_neighbor.values())) > 1.01 or sum(list(self.active_neighbor.values())) < 0.99:
raise NegativeProbability('not one',sum(list(self.active_neighbor.values())))
return
def second_agent(self,self_index,self_active_neighbor):
"""
Homans' proposition 6
Returns an agent in memory with maximum utility to intract with.
Utility = Value * Acceptance Probability
"""
"""Proposition 6"""
i = 0
Max = 0
for j in self_active_neighbor:
value = self.value[j]
other_probability = float(A[j].active_neighbor[self_index])
utility = value * other_probability
if utility >= Max:
Max = utility
chosen_agent = j
chosen_agent_index = i
i += 1
"""random choice"""
# chosen_agent_index = np.random.choice(range(len(self_active_neighbor)))
# chosen_agent = self_active_neighbor[chosen_agent_index]
return chosen_agent , chosen_agent_index
# =============================================================================
"""Functions"""
def transaction(index1,index2,t):
"""
Transaction with two agents
agent1 proposes to agent2
Uses proposition 3 (value) and proposition 5 (feeling)
"""
agent1 = A[index1]
agent2 = A[index2]
number_of_transaction1 = agent1.neighbor[index2]
number_of_transaction2 = agent2.neighbor[index1]
if len(agent1.active_neighbor) != 0:
worth_ratio1 = agent1.worth_ratio_calculator()
else:
worth_ratio1 = agent1.worth_ratio
if len(agent2.active_neighbor) != 0:
worth_ratio2 = agent2.worth_ratio_calculator()
else:
worth_ratio2 = agent2.worth_ratio
amount = transaction_percentage * agent1.money
agreement_point = (worth_ratio2 - worth_ratio1)/(worth_ratio2 + worth_ratio1) * amount * worth_ratio1 #x=(E2-E1/E2+E1)*AE1
"""Acceptances"""
"""although it seems obvious that the agent2 has to accept the transaction according to
what he thinks of agent1, here in the code it is redundancy;
Because in the code we are sure that agent1 have chosen agent2 according to maximizing
utility, i.e. agent2 is "the chosen one"!
The problem if this acceptance is on is that probabilities attributed to neighbors are
in the order of 1/N and with N=100 it means that most of the time transactions are rejected.
"""
# if index1 in agent2.active_neighbor:
# p = agent2.active_neighbor[index1]
# acceptance_util = np.random.choice([0,1],p=[1-p,p])
# else:
# acceptance_util = 1
acceptance_util = 1 #for turning off the effect of utility acceptance
if agent2.approval > 0.001 and agent2.approval - ( np.round(amount*worth_ratio1 + agreement_point,3) ) > 0.001:
acceptance_neg = 1 #not negative checking acceptance
else: acceptance_neg = 0
# if True: #for turning off the effect of worth ratio acceptance
if worth_ratio2 >= worth_ratio1:
acceptance_worth = 1
else:
p = np.exp( -(worth_ratio1 - worth_ratio2)/normalization_factor )
acceptance_worth = np.random.choice([0,1],p=[1-p,p])
acceptance_worth = acceptance_worth * acceptance_neg
# p = np.exp( -np.abs(agent1.asset - agent2.asset)/param )
# acceptance_asset = np.random.choice([0,1],p=[1-p,p])
acceptance_asset = 1 #for turning off the effect of asset acceptance
threshold = threshold_percentage[index2] * agent2.approval
if threshold > (amount * worth_ratio1 + agreement_point):
acceptance_thr = 1
else: acceptance_thr = 0
acceptance = acceptance_worth * acceptance_thr * acceptance_asset * acceptance_util
acceptance_manager([acceptance_worth, acceptance_thr, acceptance_asset, acceptance_util],index1,t) #tracking
if acceptance: #transaction accepts
num_transaction_tot[t-1] += 1
"""Calculate feeling and value"""
feeling = agreement_point / worth_ratio1 #is equal for both (from definition)
value1 = + amount + agreement_point/worth_ratio1
value2 = + amount
agent1.neighbor[index2] += 1
agent2.neighbor[index1] += 1
agent1.feeling[index2] = feeling
agent2.feeling[index1] = feeling
"""Updating memory"""
agent1.money -= np.round(amount,3)
agent2.money += np.round(amount,3)
agent1.approval += np.round(amount*worth_ratio1 + agreement_point,3)
agent2.approval -= np.round(amount*worth_ratio1 + agreement_point,3)
agent1.worth_ratio = lamda * agent1.worth_ratio + (1-lamda) * (amount*worth_ratio1 + agreement_point) / amount
agent2.worth_ratio = lamda * agent2.worth_ratio + (1-lamda) * (amount*worth_ratio1 + agreement_point) / amount
agent1.asset_updater()
agent2.asset_updater()
agent1.value[index2] = value1
agent2.value[index1] = value2
asset_tracker[index1].append(agent1.asset) #tracker
asset_tracker[index2].append(agent2.asset) #tracker
if number_of_transaction1 < memory_size: #if memory is not full
empty_memory = number_of_transaction1
agent1.time [index2,empty_memory] = t
else:
shift_memory( agent1 , index2)
agent1.time [index2,memory_size-1] = t
if number_of_transaction2 < memory_size: #if memory is not full
empty_memory = number_of_transaction2
agent2.time [index1,empty_memory] = t
else:
shift_memory(agent2,index1)
agent2.time [index1,memory_size-1] = t
agent1.neighbor_concatenation(index1,index2,t)
agent2.neighbor_concatenation(index2,index1,t)
return acceptance
# =============================================================================
def shift_memory(agent,index):
temp = np.delete(agent.time[index],0)
agent.time[index] = np.concatenate((temp,[-1]))
return
# =============================================================================
def acceptance_manager(accept_list,agent,t):
"""
To track acceptances through time
"""
dic_value = conditions_glossary_dict[tuple(accept_list)]
rejection_agent[agent,dic_value] += 1
rejection_time[t-1,dic_value] += 1
return
# =============================================================================
def explore(index,t):
"""
Chooses another agent which is not in his memory
Uses proposition 2 (similar situation)
"""
agent = A[index]
mask = np.ones(N,dtype=bool)
mask[index] = False
agent_active_neighbor = list(agent.active_neighbor.keys())
self_similarity = agent.situation
num_explore[t-1] += 1
global counter_accept_nei, counter_accept_ran
if len(agent_active_neighbor) != N-1:
if len(agent_active_neighbor) != 0:
"""Finding neighbors of neighbors"""
neighbors_of_neighbors_not_flat = []
for j in agent_active_neighbor:
neighbors_of_neighbors_not_flat.append(A[j].active_neighbor.keys())
neighbors_of_neighbors = []
for sublist in neighbors_of_neighbors_not_flat:
for item in sublist:
neighbors_of_neighbors.append(item)
neighbors_of_neighbors = list(set(neighbors_of_neighbors))
neighbors_of_neighbors.remove(index)
for nei in neighbors_of_neighbors:
if nei in agent_active_neighbor:
neighbors_of_neighbors.remove(nei)
"""Proposing"""
if len(neighbors_of_neighbors) != 0:
model_neighbor_index = np.random.choice(agent_active_neighbor,p=list(agent.active_neighbor.values())) #Bias neighbor
model_situation = A[model_neighbor_index].situation
if len(neighbors_of_neighbors) >= num_of_tries2:
arri_choice = np.random.choice(neighbors_of_neighbors,size=num_of_tries2,replace=False)
else:
arri_choice = np.array(neighbors_of_neighbors)
for other_index in arri_choice:
other_situation = A[other_index].situation
if other_situation > (model_situation-similarity) and other_situation < (model_situation+similarity): #if matches the criteria
"""Waiting for the answer of the proposed neighbor"""
other_agent = A[other_index]
if len(other_agent.active_neighbor) != 0:
nearest_choice = 1 #maximum possible situation difference
for k in other_agent.active_neighbor.keys():
diff_abs = np.abs(A[k].situation - self_similarity)
if diff_abs < nearest_choice:
nearest_choice = diff_abs
nearest_choice_index = k
p = other_agent.active_neighbor[nearest_choice_index]
acceptance = np.random.choice([0,1],p=[1-p,p])
if acceptance == 1:
transaction(index,other_index,t)
else:
transaction(index,other_index,t)
if other_index in agent.active_neighbor: #which means transaction has been accepted
counter_accept_nei += 1
break
anyof = True
for i in arri_choice:
if i in agent.active_neighbor:
anyof = False
"""When nobody is the right fit, the agent looks for a random agent"""
if anyof:
mask[agent_active_neighbor] = False
if np.size(mask[mask==True]) >= num_of_tries3:
arri_choice = np.random.choice(np.arange(N)[mask],size=num_of_tries3,replace=False) #difference with above
else:
num_true_in_mask = np.size(mask[mask==True])
arri_choice = np.random.choice(np.arange(N)[mask],size=num_true_in_mask,replace=False)
for other_index in arri_choice:
other_situation = A[other_index].situation
if other_situation > (model_situation-similarity) and other_situation < (model_situation+similarity):
other_agent = A[other_index]
if len(other_agent.active_neighbor) != 0:
nearest_choice = 1 #maximum possible situation difference
for k in other_agent.active_neighbor.keys():
diff_abs = np.abs(A[k].situation - self_similarity)
if diff_abs < nearest_choice:
nearest_choice = diff_abs
nearest_choice_index = k
p = other_agent.active_neighbor[nearest_choice_index]
acceptance = np.random.choice([0,1],p=[1-p,p])
if acceptance == 1:
transaction(index,other_index,t)
else:
transaction(index,other_index,t)
if other_index in agent.active_neighbor: #which means transaction has been accepted
counter_accept_ran += 1
break
else:
"""Nobody is in memory so choose with no model neighbor"""
other_index = np.random.choice(np.arange(N)[mask])
other_agent = A[other_index]
other_situation = other_agent.situation
if len(other_agent.active_neighbor) != 0:
nearest_choice = 1 #maximum possible situation difference
for k in other_agent.active_neighbor.keys():
diff_abs = np.abs(A[k].situation - self_similarity)
if diff_abs < nearest_choice:
nearest_choice = diff_abs
nearest_choice_index = k
p = other_agent.active_neighbor[nearest_choice_index]
acceptance = np.random.choice([0,1],p=[1-p,p])
if acceptance == 1:
transaction(index,other_index,t)
else:
transaction(index,other_index,t)
else:
other_index = np.random.choice(np.arange(N)[mask])
other_agent = A[other_index]
other_situation = other_agent.situation
if len(other_agent.active_neighbor) != 0:
nearest_choice = 1 #maximum possible situation difference
for k in other_agent.active_neighbor.keys():
diff_abs = np.abs(A[k].situation - self_similarity)
if diff_abs < nearest_choice:
nearest_choice = diff_abs
nearest_choice_index = k
p = other_agent.active_neighbor[nearest_choice_index]
acceptance = np.random.choice([0,1],p=[1-p,p])
if acceptance == 1:
transaction(index,other_index,t)
else:
transaction(index,other_index,t)
return
# =============================================================================
def make_directories(version):
"""
Making directories before running the simulation
It also makes a file of initial conditions and parameters
"""
current_path = os.getcwd()
try: os.mkdir(current_path+pd[plat]+'runned_files')
except OSError:
print ("runned_files already exists")
try: os.mkdir(current_path+pd[plat]+'runned_files'+pd[plat]+'N%d_T%d'%(N,T))
except OSError:
print ("version already exists")
path = current_path+pd[plat]+'runned_files'+pd[plat]+'N%d_T%d'%(N,T)+pd[plat]+version+pd[plat]
try: os.mkdir(path)
except OSError:
print ("Creation of the directory failed")
with open(path + 'Initials.txt','w') as initf:
initf.write(str(N)+'\n')
initf.write(str(T)+'\n')
initf.write(str(sampling_time)+'\n')
initf.write(str(saving_time_step)+'\n')
initf.write(str(version)+'\n')
initf.write('respectively: \n')
initf.write('N, T, sampling time, saving time step, version \n\n')
initf.write(str(initial_for_trans_time) + ': initial for trans time \n')
initf.write(str(trans_saving_interval) + ': trans time interval \n')
initf.write(str(similarity) + ': similarity \n')
initf.write(str(num_of_tries1) + ': num of tries 1 \n')
initf.write(str(num_of_tries2) + ': num of tries 2 \n')
initf.write(str(num_of_tries3) + ': num of tries 3 \n')
initf.write(str(prob0_magnify_factor) + ': probability 0 magnify factor \n')
initf.write(str(prob1_magnify_factor) + ': probability 1 magnify factor \n')
initf.write(str(prob2_magnify_factor) + ': probability 2 magnify factor \n')
initf.write(str(lamda) + ': lambda \n')
return path
def save_it(version,t):
"""
Saves essential data and makes corresponding directories
"""
global tracker
current_path = os.getcwd()
path = current_path+pd[plat]+'runned_files'+pd[plat]+'N%d_T%d'%(N,T)+pd[plat]+version+ pd[plat]+'0_%d'%(t)+pd[plat]
try: os.mkdir(path)
except OSError:
print ("Creation of the subdirectory failed")
with open(path + 'Agents.pkl','wb') as agent_file:
pickle.dump(A,agent_file,pickle.HIGHEST_PROTOCOL)
with open(path + 'Other_data.pkl','wb') as data:
pickle.dump(num_transaction_tot[t-sampling_time:t],data,pickle.HIGHEST_PROTOCOL) #should save the midway num_trans
pickle.dump(explore_prob_array,data,pickle.HIGHEST_PROTOCOL)
pickle.dump(rejection_agent,data,pickle.HIGHEST_PROTOCOL)
with open(path + 'Tracker.pkl','wb') as tracker_file:
pickle.dump(tracker,tracker_file,pickle.HIGHEST_PROTOCOL)
return path
# =============================================================================
"""Distinctive parameters""" #necessary for recalling for analysis
N = 100 #Number of agents
"""Parameters"""#XXX
similarity = 0.05 #difference allowed between model neighbor and new found agent. in explore()
memory_size = 10 #how many time of transaction for each agent is stored in memory of one agent
transaction_percentage = 0.1 #percent of amount of money the first agent proposes from his asset
num_of_tries1 = 20 #in main part
num_of_tries2 = 20 #in function explore(); tries from neighbors of neighbors
num_of_tries3 = 1 #in function explore(); tries from random agents (if no neighbor of neighbor have found)
threshold_percentage =np.full(N,1) #the maximum amount which the agent is willing to give
normalization_factor = 1 #rejection rate of acceptance_worth; used in transaction()
prob0_magnify_factor = 0.5 #magnifying factor of P0; in probability_factor()
prob1_magnify_factor = 1 #magnifying factor of P1; in probability_factor(); use with caution
prob2_magnify_factor = 1 #magnifying factor of P2; in probability_factor(); use with caution
alpha = 2.0 #in short-term effect of the frequency of transaction
beta = 3 #in long-term effect of the frequency of transaction
param = 2 #a normalizing factor in assigning the acceptance probability. It normalizes difference of money of both sides
lamda = 0 #how much one agent relies on his last worth_ratio and how much relies on current transaction's worth_ratio
sampling_time = 1000 #time interval used for making network: [T-sampling_time , T]
saving_time_step = T #for saving multiple files change it from T to your desired interval (better for T to be devidable to your number)
initial_for_trans_time = T - 1000 #initial time for trans_time to start recording
trans_saving_interval = 1000 #the interval the trans_time will record
if sampling_time > T:
sampling_time = T
if saving_time_step < sampling_time:
saving_time_step = sampling_time
"""Initial Condition"""
situation_arr = np.random.random(N) #randomly distributed
#money = np.full(N,5.5)
money = np.round(np.random.rand(N) * 9 + 1 ,decimals=3) #randomly between [1,10]
approval = np.full(N,5.5)
#approval = np.round(np.random.rand(N) * 9 + 1 ,decimals=3) #randomly between [1,10]
A = np.zeros(N,dtype=object)
for i in np.arange(N):
A[i]=Agent( money[i], approval[i], situation_arr[i])
"""trackers"""
explore_prob_array = np.zeros(T)
num_transaction_tot = np.zeros(T)
rejection_time = np.zeros((T,16))
rejection_agent = np.zeros((N,16))
binary = [0,1]
conditions_glossary = [(x,y,z,w) for x in binary for y in binary for z in binary for w in binary]
conditions_glossary_dict = { cond:x for cond,x in zip(conditions_glossary,range(16))}
conditions_glossary_string = ['{0}'.format(x) for x in conditions_glossary]
tracker = Analysis_Tools_Homans.Tracker(N,T,memory_size,A,trans_saving_interval,saving_time_step)
num_explore = np.zeros(T)
p0_tracker = []
p1_tracker = []
p2_tracker = []
asset_tracker = [ [] for _ in np.arange(N) ]
counter_entrance = 0
counter_accept_nei = 0
counter_accept_ran = 0
"""preparing for writing files"""
path = make_directories(version)
# =============================================================================
"""Main"""
"""
Choose one agent, find another agent through calculating probability,
explores for new agent (expand memory)
"""
for t in np.arange(T)+1:#t goes from 1 to T
"""computations"""
print(t,'/',T)
tau = (t-1)
shuffled_agents=np.arange(N)
np.random.shuffle(shuffled_agents)
for i in shuffled_agents:
person = A[i]
person_active_neighbor_size = len(person.active_neighbor)
exploration_probability = (N-1-person_active_neighbor_size)/(N-1)#(2*N-2)
explore_prob_array[tau] += exploration_probability
if person_active_neighbor_size != 0: #memory is not empty
rand = np.random.choice([1,0],size=1,p=[1-exploration_probability,exploration_probability])
if rand==1:
person_active_neighbor = np.array(list(person.active_neighbor.keys()))
if person_active_neighbor_size < num_of_tries1:
num_of_choice = person_active_neighbor_size
else:
num_of_choice = num_of_tries1
choice_arr = np.zeros(num_of_choice,dtype=int)
for k in np.arange(num_of_choice):
choice_arr[k] , chosen_index = person.second_agent(i,person_active_neighbor)
person_active_neighbor = np.delete(person_active_neighbor, chosen_index)
for j in choice_arr:
if transaction(i,j,t):
break
else:
counter_entrance += 1
explore(i,t)
else:
counter_entrance += 1
explore(i,t)
"""trackers"""
tracker.update_A(A)
tracker.get_list('self_value',tau)
tracker.get_list('valuable_to_others',tau)
tracker.get_list('correlation_mon',tau)
tracker.get_list('correlation_situ',tau)
tracker.get_list('money',tau)
tracker.get_list('approval',tau)
tracker.get_list('asset',tau)
if t>2:
tracker.get_list('worth_ratio',tau-2)
if tau == saving_time_step - sampling_time:
tracker.get_list('sample_time_trans',tau)
if t % saving_time_step == 0 or t == 1:
boolean = False
if t % saving_time_step == 0 and t >= saving_time_step:
tracker.get_list('rejection',tau,array=rejection_time)
save_it(version,t) #Write File
if t >= initial_for_trans_time and t < initial_for_trans_time + trans_saving_interval:
boolean = True
else:
boolean = False
t_prime = t - initial_for_trans_time
if boolean:
tracker.get_list('trans_time',t_prime)
explore_prob_array[tau] /= N
# =============================================================================
"""Pre-Analysis and Measurements"""
tracker.get_path(path)
tracker.plot_general(explore_prob_array * N,title='Average Exploration Probability',explore=True,N=N)
tracker.plot_general(num_transaction_tot,title='Number of Transaction',trans=True)
plt.figure()
plt.plot(p0_tracker[::2])
plt.plot(p1_tracker[::2])
plt.plot(p2_tracker[::2])
plt.title('P0 & P1 & P2')
plt.savefig(path+'P0 & P1 & P2')
plt.close()
tracker.hist_general(p0_tracker,title='p0')
tracker.hist_general(p1_tracker,title='p1')
tracker.hist_general(p2_tracker,title='p2')
tracker.hist_log_log_general(p0_tracker,title='P0')
tracker.hist_log_log_general(p1_tracker,title='P1')
tracker.hist_log_log_general(p2_tracker,title='P2')
plt.figure()
for i in np.arange(N):
plt.plot(asset_tracker[i])
plt.title('Asset Tracker')
plt.savefig(path+'Asset Tracker')
plt.close()
with open(path + 'Explore_data.txt','w') as ex_file:
ex_file.write('Enterance to exploration \n')
ex_file.write(str(counter_entrance) + '\n\n')
ex_file.write('Total accepted explorations \n')
ex_file.write(str(counter_accept_nei + counter_accept_ran) + '\n\n')
ex_file.write('Accepted in neighbor of neighbor part \n')
ex_file.write(str(counter_accept_nei) + '\n\n')
ex_file.write('Accepted in random part \n')
ex_file.write(str(counter_accept_ran) + '\n\n')
ex_file.write('Neighbor to random ratio \n')
ex_file.write(str(counter_accept_ran / counter_accept_nei) + '\n\n')
ex_file.write('Total accepted to entrance ratio \n')
ex_file.write(str((counter_accept_nei+counter_accept_ran) / counter_entrance) + '\n\n')
ex_file.write('\nRun Time:')
ex_file.write(str(datetime.now() - start_time))
"""Time Evaluation"""
duration = 500 # millisecond
freq = 2000 # Hz
#winsound.Beep(freq, duration)
print (datetime.now() - start_time)
| 44.195402
| 150
| 0.604421
|
795104fd451102ab74a6f058c4e2dc425a73fbc9
| 17,466
|
py
|
Python
|
owslib/fes2.py
|
pmav99/OWSLib
|
414375413c9e2bab33a2d09608ab209875ce6daf
|
[
"BSD-3-Clause"
] | 218
|
2015-01-09T12:55:09.000Z
|
2022-03-29T12:22:54.000Z
|
owslib/fes2.py
|
pmav99/OWSLib
|
414375413c9e2bab33a2d09608ab209875ce6daf
|
[
"BSD-3-Clause"
] | 512
|
2015-01-01T09:52:58.000Z
|
2022-03-30T11:57:07.000Z
|
owslib/fes2.py
|
pmav99/OWSLib
|
414375413c9e2bab33a2d09608ab209875ce6daf
|
[
"BSD-3-Clause"
] | 218
|
2015-01-01T09:44:06.000Z
|
2022-03-31T14:09:13.000Z
|
# -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2021 Tom Kralidis
#
# Authors : Tom Kralidis <tomkralidis@gmail.com>
#
# Contact email: tomkralidis@gmail.com
# =============================================================================
"""
API for OGC Filter Encoding (FE) constructs and metadata.
Filter Encoding: http://www.opengeospatial.org/standards/filter
Supports version 2.0.2 (09-026r2).
"""
from owslib.etree import etree
from owslib import util
from owslib.namespaces import Namespaces
from abc import ABCMeta, abstractmethod
# default variables
def get_namespaces():
n = Namespaces()
ns = n.get_namespaces(["dif", "fes", "gml", "ogc", "ows110", "xs", "xsi"])
ns[None] = n.get_namespace("fes")
return ns
namespaces = get_namespaces()
schema = 'http://schemas.opengis.net/filter/2.0/filterAll.xsd'
schema_location = '%s %s' % (namespaces['fes'], schema)
class FilterRequest(object):
""" filter class """
def __init__(self, parent=None, version='2.0.0'):
"""
filter Constructor
Parameters
----------
- parent: parent etree.Element object (default is None)
- version: version (default is '2.0.0')
"""
self.version = version
self._root = etree.Element(util.nspath_eval('fes:Filter', namespaces))
if parent is not None:
self._root.set(util.nspath_eval('xsi:schemaLocation', namespaces), schema_location)
def set(self, parent=False, qtype=None, keywords=[], typenames='csw:Record', propertyname='csw:AnyText', bbox=None,
identifier=None):
"""
Construct and process a GetRecords request
Parameters
----------
- parent: the parent Element object. If this is not, then generate a standalone request
- qtype: type of resource to query (i.e. service, dataset)
- keywords: list of keywords
- propertyname: the ValueReference to Filter against
- bbox: the bounding box of the spatial query in the form [minx,miny,maxx,maxy]
- identifier: the dc:identifier to query against with a PropertyIsEqualTo. Ignores all other inputs.
"""
# Set the identifier if passed. Ignore other parameters
dc_identifier_equals_filter = None
if identifier is not None:
dc_identifier_equals_filter = PropertyIsEqualTo('dc:identifier', identifier)
self._root.append(dc_identifier_equals_filter.toXML())
return self._root
# Set the query type if passed
dc_type_equals_filter = None
if qtype is not None:
dc_type_equals_filter = PropertyIsEqualTo('dc:type', qtype)
# Set a bbox query if passed
bbox_filter = None
if bbox is not None:
bbox_filter = BBox(bbox)
# Set a keyword query if passed
keyword_filter = None
if len(keywords) > 0:
if len(keywords) > 1: # loop multiple keywords into an Or
ks = []
for i in keywords:
ks.append(PropertyIsLike(propertyname, "*%s*" % i, wildCard="*"))
keyword_filter = Or(operations=ks)
elif len(keywords) == 1: # one keyword
keyword_filter = PropertyIsLike(propertyname, "*%s*" % keywords[0], wildCard="*")
# And together filters if more than one exists
filters = [_f for _f in [keyword_filter, bbox_filter, dc_type_equals_filter] if _f]
if len(filters) == 1:
self._root.append(filters[0].toXML())
elif len(filters) > 1:
self._root.append(And(operations=filters).toXML())
return self._root
def setConstraint(self, constraint, tostring=False):
"""
Construct and process a GetRecords request
Parameters
----------
- constraint: An OgcExpression object
- tostring (optional): return as string
"""
self._root.append(constraint.toXML())
if tostring:
return util.element_to_string(self._root, xml_declaration=False)
return self._root
def setConstraintList(self, constraints, tostring=False):
"""
Construct and process a GetRecords request
Parameters
----------
- constraints: A list of OgcExpression objects
The list is interpretted like so:
[a,b,c]
a || b || c
[[a,b,c]]
a && b && c
[[a,b],[c],[d],[e]] or [[a,b],c,d,e]
(a && b) || c || d || e
- tostring (optional): return as string
"""
ors = []
if len(constraints) == 1:
if isinstance(constraints[0], OgcExpression):
flt = self.setConstraint(constraints[0])
else:
self._root.append(And(operations=constraints[0]).toXML())
flt = self._root
if tostring:
return util.element_to_string(flt, xml_declaration=False)
else:
return flt
for c in constraints:
if isinstance(c, OgcExpression):
ors.append(c)
elif isinstance(c, list) or isinstance(c, tuple):
if len(c) == 1:
ors.append(c[0])
elif len(c) >= 2:
ands = []
for sub in c:
if isinstance(sub, OgcExpression):
ands.append(sub)
ors.append(And(operations=ands))
self._root.append(Or(operations=ors).toXML())
if tostring:
return util.element_to_string(self._root, xml_declaration=False)
return self._root
class FilterCapabilities(object):
"""Abstraction for Filter_Capabilities 2.0"""
def __init__(self, elem):
if elem is None:
self.spatial_operands = []
self.spatial_operators = []
self.temporal_operators = []
self.temporal_operands = []
self.scalar_comparison_operators = []
self.conformance = {}
return
# Spatial_Capabilities
self.spatial_operands = [f.attrib.get('name') for f in elem.findall(util.nspath_eval(
'fes:Spatial_Capabilities/fes:GeometryOperands/fes:GeometryOperand', namespaces))]
self.spatial_operators = []
for f in elem.findall(util.nspath_eval(
'fes:Spatial_Capabilities/fes:SpatialOperators/fes:SpatialOperator', namespaces)):
self.spatial_operators.append(f.attrib['name'])
# Temporal_Capabilities
self.temporal_operands = [f.attrib.get('name') for f in elem.findall(util.nspath_eval(
'fes:Temporal_Capabilities/fes:TemporalOperands/fes:TemporalOperand', namespaces))]
self.temporal_operators = []
for f in elem.findall(util.nspath_eval(
'fes:Temporal_Capabilities/fes:TemporalOperators/fes:TemporalOperator', namespaces)):
self.temporal_operators.append(f.attrib['name'])
# Scalar_Capabilities
self.scalar_comparison_operators = [f.text for f in elem.findall(util.nspath_eval(
'fes:Scalar_Capabilities/fes:ComparisonOperators/fes:ComparisonOperator', namespaces))]
# Conformance
self.conformance = {}
for f in elem.findall(util.nspath_eval('fes:Conformance/fes:Constraint', namespaces)):
self.conformance[f.attrib.get('name')] = f.find(util.nspath_eval('ows110:DefaultValue', namespaces)).text
def setsortby(parent, propertyname, order='ASC'):
"""
constructs a SortBy element
Parameters
----------
- parent: parent etree.Element object
- propertyname: the ValueReference
- order: the SortOrder (default is 'ASC')
"""
tmp = etree.SubElement(parent, util.nspath_eval('fes:SortBy', namespaces))
tmp2 = etree.SubElement(tmp, util.nspath_eval('fes:SortProperty', namespaces))
etree.SubElement(tmp2, util.nspath_eval('fes:ValueReference', namespaces)).text = propertyname
etree.SubElement(tmp2, util.nspath_eval('fes:SortOrder', namespaces)).text = order
class SortProperty(object):
def __init__(self, propertyname, order='ASC'):
self.propertyname = propertyname
self.order = order.upper()
if self.order not in ['DESC', 'ASC']:
raise ValueError("SortOrder can only be 'ASC' or 'DESC'")
def toXML(self):
node0 = etree.Element(util.nspath_eval("fes:SortProperty", namespaces))
etree.SubElement(node0, util.nspath_eval('fes:ValueReference', namespaces)).text = self.propertyname
etree.SubElement(node0, util.nspath_eval('fes:SortOrder', namespaces)).text = self.order
return node0
class SortBy(object):
def __init__(self, properties):
self.properties = properties
def toXML(self):
node0 = etree.Element(util.nspath_eval("fes:SortBy", namespaces))
for prop in self.properties:
node0.append(prop.toXML())
return node0
class OgcExpression(object):
def __init__(self):
pass
class BinaryComparisonOpType(OgcExpression):
""" Super class of all the property operation classes"""
def __init__(self, propertyoperator, propertyname, literal, matchcase=True):
self.propertyoperator = propertyoperator
self.propertyname = propertyname
self.literal = literal
self.matchcase = matchcase
def toXML(self):
node0 = etree.Element(util.nspath_eval(self.propertyoperator, namespaces))
if not self.matchcase:
node0.set('matchCase', 'false')
etree.SubElement(node0, util.nspath_eval('fes:ValueReference', namespaces)).text = self.propertyname
etree.SubElement(node0, util.nspath_eval('fes:Literal', namespaces)).text = self.literal
return node0
class PropertyIsEqualTo(BinaryComparisonOpType):
""" PropertyIsEqualTo class"""
def __init__(self, propertyname, literal, matchcase=True):
BinaryComparisonOpType.__init__(self, 'fes:PropertyIsEqualTo', propertyname, literal, matchcase)
class PropertyIsNotEqualTo(BinaryComparisonOpType):
""" PropertyIsNotEqualTo class """
def __init__(self, propertyname, literal, matchcase=True):
BinaryComparisonOpType.__init__(self, 'fes:PropertyIsNotEqualTo', propertyname, literal, matchcase)
class PropertyIsLessThan(BinaryComparisonOpType):
"""PropertyIsLessThan class"""
def __init__(self, propertyname, literal, matchcase=True):
BinaryComparisonOpType.__init__(self, 'fes:PropertyIsLessThan', propertyname, literal, matchcase)
class PropertyIsGreaterThan(BinaryComparisonOpType):
"""PropertyIsGreaterThan class"""
def __init__(self, propertyname, literal, matchcase=True):
BinaryComparisonOpType.__init__(self, 'fes:PropertyIsGreaterThan', propertyname, literal, matchcase)
class PropertyIsLessThanOrEqualTo(BinaryComparisonOpType):
"""PropertyIsLessThanOrEqualTo class"""
def __init__(self, propertyname, literal, matchcase=True):
BinaryComparisonOpType.__init__(self, 'fes:PropertyIsLessThanOrEqualTo', propertyname, literal, matchcase)
class PropertyIsGreaterThanOrEqualTo(BinaryComparisonOpType):
"""PropertyIsGreaterThanOrEqualTo class"""
def __init__(self, propertyname, literal, matchcase=True):
BinaryComparisonOpType.__init__(self, 'fes:PropertyIsGreaterThanOrEqualTo', propertyname, literal, matchcase)
class PropertyIsLike(OgcExpression):
"""PropertyIsLike class"""
def __init__(self, propertyname, literal, escapeChar='\\', singleChar='_', wildCard='%', matchCase=True):
self.propertyname = propertyname
self.literal = literal
self.escapeChar = escapeChar
self.singleChar = singleChar
self.wildCard = wildCard
self.matchCase = matchCase
def toXML(self):
node0 = etree.Element(util.nspath_eval('fes:PropertyIsLike', namespaces))
node0.set('wildCard', self.wildCard)
node0.set('singleChar', self.singleChar)
node0.set('escapeChar', self.escapeChar)
if not self.matchCase:
node0.set('matchCase', 'false')
etree.SubElement(node0, util.nspath_eval('fes:ValueReference', namespaces)).text = self.propertyname
etree.SubElement(node0, util.nspath_eval('fes:Literal', namespaces)).text = self.literal
return node0
class PropertyIsNull(OgcExpression):
"""PropertyIsNull class"""
def __init__(self, propertyname):
self.propertyname = propertyname
def toXML(self):
node0 = etree.Element(util.nspath_eval('fes:PropertyIsNull', namespaces))
etree.SubElement(node0, util.nspath_eval('fes:ValueReference', namespaces)).text = self.propertyname
return node0
class PropertyIsBetween(OgcExpression):
"""PropertyIsBetween class"""
def __init__(self, propertyname, lower, upper):
self.propertyname = propertyname
self.lower = lower
self.upper = upper
def toXML(self):
node0 = etree.Element(util.nspath_eval('fes:PropertyIsBetween', namespaces))
etree.SubElement(node0, util.nspath_eval('fes:ValueReference', namespaces)).text = self.propertyname
node1 = etree.SubElement(node0, util.nspath_eval('fes:LowerBoundary', namespaces))
etree.SubElement(node1, util.nspath_eval('fes:Literal', namespaces)).text = '%s' % self.lower
node2 = etree.SubElement(node0, util.nspath_eval('fes:UpperBoundary', namespaces))
etree.SubElement(node2, util.nspath_eval('fes:Literal', namespaces)).text = '%s' % self.upper
return node0
class BBox(OgcExpression):
"""Construct a BBox, two pairs of coordinates (west-south and east-north)"""
def __init__(self, bbox, crs=None):
self.bbox = bbox
self.crs = crs
def toXML(self):
tmp = etree.Element(util.nspath_eval('fes:BBOX', namespaces))
etree.SubElement(tmp, util.nspath_eval('fes:ValueReference', namespaces)).text = 'ows:BoundingBox'
tmp2 = etree.SubElement(tmp, util.nspath_eval('gml:Envelope', namespaces))
if self.crs is not None:
tmp2.set('srsName', self.crs)
etree.SubElement(tmp2, util.nspath_eval('gml:lowerCorner', namespaces)).text = '{} {}'.format(
self.bbox[0], self.bbox[1])
etree.SubElement(tmp2, util.nspath_eval('gml:upperCorner', namespaces)).text = '{} {}'.format(
self.bbox[2], self.bbox[3])
return tmp
class Filter(OgcExpression):
def __init__(self, filter):
self.filter = filter
def toXML(self):
node = etree.Element(util.nspath_eval("fes:Filter", namespaces))
node.append(self.filter.toXML())
return node
class TopologicalOpType(OgcExpression, metaclass=ABCMeta):
"""Abstract base class for topological operators."""
@property
@abstractmethod
def operation(self):
"""This is a mechanism to ensure this class is subclassed by an actual operation."""
pass
def __init__(self, propertyname, geometry):
self.propertyname = propertyname
self.geometry = geometry
def toXML(self):
node = etree.Element(util.nspath_eval(f"fes:{self.operation}", namespaces))
etree.SubElement(node, util.nspath_eval("fes:ValueReference", namespaces)).text = self.propertyname
node.append(self.geometry.toXML())
return node
class Intersects(TopologicalOpType):
operation = "Intersects"
class Contains(TopologicalOpType):
operation = "Contains"
class Disjoint(TopologicalOpType):
operation = "Disjoint"
class Within(TopologicalOpType):
operation = "Within"
class Touches(TopologicalOpType):
operation = "Touches"
class Overlaps(TopologicalOpType):
operation = "Overlaps"
class Equals(TopologicalOpType):
operation = "Equals"
# BINARY
class BinaryLogicOpType(OgcExpression):
""" Binary Operators: And / Or """
def __init__(self, binary_operator, operations):
self.binary_operator = binary_operator
try:
assert len(operations) >= 2
self.operations = operations
except Exception:
raise ValueError("Binary operations (And / Or) require a minimum of two operations to operate against")
def toXML(self):
node0 = etree.Element(util.nspath_eval(self.binary_operator, namespaces))
for op in self.operations:
node0.append(op.toXML())
return node0
class And(BinaryLogicOpType):
def __init__(self, operations):
super(And, self).__init__('fes:And', operations)
class Or(BinaryLogicOpType):
def __init__(self, operations):
super(Or, self).__init__('fes:Or', operations)
# UNARY
class UnaryLogicOpType(OgcExpression):
""" Unary Operator: Not """
def __init__(self, unary_operator, operations):
self.unary_operator = unary_operator
self.operations = operations
def toXML(self):
node0 = etree.Element(util.nspath_eval(self.unary_operator, namespaces))
for op in self.operations:
node0.append(op.toXML())
return node0
class Not(UnaryLogicOpType):
def __init__(self, operations):
super(Not, self).__init__('fes:Not', operations)
| 35.072289
| 119
| 0.64262
|
795105870929ac55e13dc4859ab6ef5980891470
| 1,239
|
py
|
Python
|
mnelab/dialogs/filterdialog.py
|
yop0/mnelab
|
12b62d0611ebc63bc23f7c9101d7eabdc1175055
|
[
"BSD-3-Clause"
] | null | null | null |
mnelab/dialogs/filterdialog.py
|
yop0/mnelab
|
12b62d0611ebc63bc23f7c9101d7eabdc1175055
|
[
"BSD-3-Clause"
] | null | null | null |
mnelab/dialogs/filterdialog.py
|
yop0/mnelab
|
12b62d0611ebc63bc23f7c9101d7eabdc1175055
|
[
"BSD-3-Clause"
] | null | null | null |
# Authors: Clemens Brunner <clemens.brunner@gmail.com>
#
# License: BSD (3-clause)
from PySide6.QtWidgets import (QDialog, QDialogButtonBox, QGridLayout, QLabel, QLineEdit,
QVBoxLayout)
class FilterDialog(QDialog):
def __init__(self, parent):
super().__init__(parent)
self.setWindowTitle("Filter data")
vbox = QVBoxLayout(self)
grid = QGridLayout()
grid.addWidget(QLabel("Low cutoff frequency (Hz):"), 0, 0)
self.lowedit = QLineEdit()
grid.addWidget(self.lowedit, 0, 1)
grid.addWidget(QLabel("High cutoff frequency (Hz):"), 1, 0)
self.highedit = QLineEdit()
grid.addWidget(self.highedit, 1, 1)
vbox.addLayout(grid)
buttonbox = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
vbox.addWidget(buttonbox)
buttonbox.accepted.connect(self.accept)
buttonbox.rejected.connect(self.reject)
vbox.setSizeConstraint(QVBoxLayout.SetFixedSize)
@property
def low(self):
low = self.lowedit.text()
return float(low) if low else None
@property
def high(self):
high = self.highedit.text()
return float(high) if high else None
| 33.486486
| 89
| 0.642454
|
7951060aa182d017c73ec27e51125688668499de
| 11,062
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/network/v20200601/get_load_balancer.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_nextgen/network/v20200601/get_load_balancer.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_nextgen/network/v20200601/get_load_balancer.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetLoadBalancerResult',
'AwaitableGetLoadBalancerResult',
'get_load_balancer',
]
@pulumi.output_type
class GetLoadBalancerResult:
"""
LoadBalancer resource.
"""
def __init__(__self__, backend_address_pools=None, etag=None, frontend_ip_configurations=None, id=None, inbound_nat_pools=None, inbound_nat_rules=None, load_balancing_rules=None, location=None, name=None, outbound_rules=None, probes=None, provisioning_state=None, resource_guid=None, sku=None, tags=None, type=None):
if backend_address_pools and not isinstance(backend_address_pools, list):
raise TypeError("Expected argument 'backend_address_pools' to be a list")
pulumi.set(__self__, "backend_address_pools", backend_address_pools)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if frontend_ip_configurations and not isinstance(frontend_ip_configurations, list):
raise TypeError("Expected argument 'frontend_ip_configurations' to be a list")
pulumi.set(__self__, "frontend_ip_configurations", frontend_ip_configurations)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if inbound_nat_pools and not isinstance(inbound_nat_pools, list):
raise TypeError("Expected argument 'inbound_nat_pools' to be a list")
pulumi.set(__self__, "inbound_nat_pools", inbound_nat_pools)
if inbound_nat_rules and not isinstance(inbound_nat_rules, list):
raise TypeError("Expected argument 'inbound_nat_rules' to be a list")
pulumi.set(__self__, "inbound_nat_rules", inbound_nat_rules)
if load_balancing_rules and not isinstance(load_balancing_rules, list):
raise TypeError("Expected argument 'load_balancing_rules' to be a list")
pulumi.set(__self__, "load_balancing_rules", load_balancing_rules)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if outbound_rules and not isinstance(outbound_rules, list):
raise TypeError("Expected argument 'outbound_rules' to be a list")
pulumi.set(__self__, "outbound_rules", outbound_rules)
if probes and not isinstance(probes, list):
raise TypeError("Expected argument 'probes' to be a list")
pulumi.set(__self__, "probes", probes)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_guid and not isinstance(resource_guid, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
pulumi.set(__self__, "resource_guid", resource_guid)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="backendAddressPools")
def backend_address_pools(self) -> Optional[Sequence['outputs.BackendAddressPoolResponse']]:
"""
Collection of backend address pools used by a load balancer.
"""
return pulumi.get(self, "backend_address_pools")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="frontendIPConfigurations")
def frontend_ip_configurations(self) -> Optional[Sequence['outputs.FrontendIPConfigurationResponse']]:
"""
Object representing the frontend IPs to be used for the load balancer.
"""
return pulumi.get(self, "frontend_ip_configurations")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="inboundNatPools")
def inbound_nat_pools(self) -> Optional[Sequence['outputs.InboundNatPoolResponse']]:
"""
Defines an external port range for inbound NAT to a single backend port on NICs associated with a load balancer. Inbound NAT rules are created automatically for each NIC associated with the Load Balancer using an external port from this range. Defining an Inbound NAT pool on your Load Balancer is mutually exclusive with defining inbound Nat rules. Inbound NAT pools are referenced from virtual machine scale sets. NICs that are associated with individual virtual machines cannot reference an inbound NAT pool. They have to reference individual inbound NAT rules.
"""
return pulumi.get(self, "inbound_nat_pools")
@property
@pulumi.getter(name="inboundNatRules")
def inbound_nat_rules(self) -> Optional[Sequence['outputs.InboundNatRuleResponse']]:
"""
Collection of inbound NAT Rules used by a load balancer. Defining inbound NAT rules on your load balancer is mutually exclusive with defining an inbound NAT pool. Inbound NAT pools are referenced from virtual machine scale sets. NICs that are associated with individual virtual machines cannot reference an Inbound NAT pool. They have to reference individual inbound NAT rules.
"""
return pulumi.get(self, "inbound_nat_rules")
@property
@pulumi.getter(name="loadBalancingRules")
def load_balancing_rules(self) -> Optional[Sequence['outputs.LoadBalancingRuleResponse']]:
"""
Object collection representing the load balancing rules Gets the provisioning.
"""
return pulumi.get(self, "load_balancing_rules")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="outboundRules")
def outbound_rules(self) -> Optional[Sequence['outputs.OutboundRuleResponse']]:
"""
The outbound rules.
"""
return pulumi.get(self, "outbound_rules")
@property
@pulumi.getter
def probes(self) -> Optional[Sequence['outputs.ProbeResponse']]:
"""
Collection of probe objects used in the load balancer.
"""
return pulumi.get(self, "probes")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the load balancer resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> str:
"""
The resource GUID property of the load balancer resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.LoadBalancerSkuResponse']:
"""
The load balancer SKU.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetLoadBalancerResult(GetLoadBalancerResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetLoadBalancerResult(
backend_address_pools=self.backend_address_pools,
etag=self.etag,
frontend_ip_configurations=self.frontend_ip_configurations,
id=self.id,
inbound_nat_pools=self.inbound_nat_pools,
inbound_nat_rules=self.inbound_nat_rules,
load_balancing_rules=self.load_balancing_rules,
location=self.location,
name=self.name,
outbound_rules=self.outbound_rules,
probes=self.probes,
provisioning_state=self.provisioning_state,
resource_guid=self.resource_guid,
sku=self.sku,
tags=self.tags,
type=self.type)
def get_load_balancer(expand: Optional[str] = None,
load_balancer_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetLoadBalancerResult:
"""
LoadBalancer resource.
:param str expand: Expands referenced resources.
:param str load_balancer_name: The name of the load balancer.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['loadBalancerName'] = load_balancer_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network/v20200601:getLoadBalancer', __args__, opts=opts, typ=GetLoadBalancerResult).value
return AwaitableGetLoadBalancerResult(
backend_address_pools=__ret__.backend_address_pools,
etag=__ret__.etag,
frontend_ip_configurations=__ret__.frontend_ip_configurations,
id=__ret__.id,
inbound_nat_pools=__ret__.inbound_nat_pools,
inbound_nat_rules=__ret__.inbound_nat_rules,
load_balancing_rules=__ret__.load_balancing_rules,
location=__ret__.location,
name=__ret__.name,
outbound_rules=__ret__.outbound_rules,
probes=__ret__.probes,
provisioning_state=__ret__.provisioning_state,
resource_guid=__ret__.resource_guid,
sku=__ret__.sku,
tags=__ret__.tags,
type=__ret__.type)
| 41.743396
| 572
| 0.6742
|
79510663a0962d73b9a7337e1963c47edb3c462c
| 520
|
py
|
Python
|
app.py
|
andrewst93/strava_snooper_dashboard
|
c7ed786a88e8f7116ce8a24a570752ffb84688ba
|
[
"MIT"
] | null | null | null |
app.py
|
andrewst93/strava_snooper_dashboard
|
c7ed786a88e8f7116ce8a24a570752ffb84688ba
|
[
"MIT"
] | null | null | null |
app.py
|
andrewst93/strava_snooper_dashboard
|
c7ed786a88e8f7116ce8a24a570752ffb84688ba
|
[
"MIT"
] | null | null | null |
import dash
import dash_bootstrap_components as dbc
# used for debugging GCP app engine deployment issues
try:
import googleclouddebugger
googleclouddebugger.enable(breakpoint_enable_canary=True)
except ImportError:
pass
app = dash.Dash(
__name__,
external_stylesheets=[dbc.themes.UNITED],
meta_tags=[{"name": "viewport", "content": "width=device-width, initial-scale=1"}],
) # dbc.themes.UNITED
app.title = "Strava Snooper"
server = app.server
app.config.suppress_callback_exceptions = True
| 26
| 87
| 0.759615
|
795106b0d72a2830f09c77655f36b0673c2d6017
| 1,606
|
py
|
Python
|
sources/model/finetuning/model_loaders/mobilenetv2.py
|
lthamm/concept-embeddings-and-ilp
|
27592c6424147a2fbb54d7daebc92cd72b3f4a0c
|
[
"MIT"
] | 3
|
2020-11-02T12:21:29.000Z
|
2021-08-02T14:01:37.000Z
|
sources/model/finetuning/model_loaders/mobilenetv2.py
|
lthamm/concept-embeddings-and-ilp
|
27592c6424147a2fbb54d7daebc92cd72b3f4a0c
|
[
"MIT"
] | 2
|
2020-11-06T07:58:13.000Z
|
2022-03-13T16:11:30.000Z
|
sources/model/finetuning/model_loaders/mobilenetv2.py
|
lthamm/concept-embeddings-and-ilp
|
27592c6424147a2fbb54d7daebc92cd72b3f4a0c
|
[
"MIT"
] | 1
|
2020-11-03T14:54:16.000Z
|
2020-11-03T14:54:16.000Z
|
"""Loader of modified MobileNetV2 for finetuning on picasso dataset."""
from typing import Sequence, Dict
import torch
from torchvision.models import MobileNetV2, mobilenet_v2
from ..defaults import NUM_CLASSES
MOBILENETV2_FINETUNE_LAYERS: Sequence[str] = (
'features.17',
'features.18',
'classifier',
)
"""Layers to finetune (layer names from model.named_modules()) for modified ResNeXt."""
def modified_mobilenetv2(state_dict: Dict[str, torch.Tensor] = None, *,
pretrained: bool = False,
num_classes: int = NUM_CLASSES) -> MobileNetV2:
"""Modify a ResNeXt50 model to have num_classes output classes.
A MobileNetV2 instance is created (initialized according to pretrained) and modified as follows:
The last (and only) fully connected layer is replaced by one with num_classes output classes.
:param pretrained: whether to initialize the model with the pretrained VGG16 weights where
applicable; overridden by state_dict
:param state_dict: state dict with which to initialize parameters
:param num_classes: number of output classes of the modified model (no sigmoid applied)
:return: the modified VGG instance; all non-modified layers are initialized with the
pretrained weights if pretrained is True
"""
mobilenetv2: MobileNetV2 = mobilenet_v2(pretrained=pretrained)
# Add fine-tuning/transfer learning modules
mobilenetv2.classifier[1] = torch.nn.Linear(1280, num_classes)
if state_dict is not None:
mobilenetv2.load_state_dict(state_dict)
return mobilenetv2
| 42.263158
| 100
| 0.733499
|
79510739c44972099a6d333af0af3b24386c65b9
| 17,382
|
py
|
Python
|
bot/cogs/bot.py
|
ScarletKing001/bot
|
b27c286f2ce648d021eed0c8e07476066b86dd98
|
[
"MIT"
] | null | null | null |
bot/cogs/bot.py
|
ScarletKing001/bot
|
b27c286f2ce648d021eed0c8e07476066b86dd98
|
[
"MIT"
] | null | null | null |
bot/cogs/bot.py
|
ScarletKing001/bot
|
b27c286f2ce648d021eed0c8e07476066b86dd98
|
[
"MIT"
] | null | null | null |
import ast
import logging
import re
import time
from typing import Optional, Tuple
from discord import Embed, Message, RawMessageUpdateEvent, TextChannel
from discord.ext.commands import Cog, Context, command, group
from bot.bot import Bot
from bot.cogs.token_remover import TokenRemover
from bot.constants import Categories, Channels, DEBUG_MODE, Guild, MODERATION_ROLES, Roles, URLs
from bot.decorators import with_role
from bot.utils.messages import wait_for_deletion
log = logging.getLogger(__name__)
RE_MARKDOWN = re.compile(r'([*_~`|>])')
class BotCog(Cog, name="Bot"):
"""Bot information commands."""
def __init__(self, bot: Bot):
self.bot = bot
# Stores allowed channels plus epoch time since last call.
self.channel_cooldowns = {
Channels.python_discussion: 0,
}
# These channels will also work, but will not be subject to cooldown
self.channel_whitelist = (
Channels.bot_commands,
)
# Stores improperly formatted Python codeblock message ids and the corresponding bot message
self.codeblock_message_ids = {}
@group(invoke_without_command=True, name="bot", hidden=True)
@with_role(Roles.verified)
async def botinfo_group(self, ctx: Context) -> None:
"""Bot informational commands."""
await ctx.send_help(ctx.command)
@botinfo_group.command(name='about', aliases=('info',), hidden=True)
@with_role(Roles.verified)
async def about_command(self, ctx: Context) -> None:
"""Get information about the bot."""
embed = Embed(
description="A utility bot designed just for the Python server! Try `!help` for more info.",
url="https://github.com/python-discord/bot"
)
embed.add_field(name="Total Users", value=str(len(self.bot.get_guild(Guild.id).members)))
embed.set_author(
name="Python Bot",
url="https://github.com/python-discord/bot",
icon_url=URLs.bot_avatar
)
await ctx.send(embed=embed)
@command(name='echo', aliases=('print',))
@with_role(*MODERATION_ROLES)
async def echo_command(self, ctx: Context, channel: Optional[TextChannel], *, text: str) -> None:
"""Repeat the given message in either a specified channel or the current channel."""
if channel is None:
await ctx.send(text)
else:
await channel.send(text)
@command(name='embed')
@with_role(*MODERATION_ROLES)
async def embed_command(self, ctx: Context, channel: Optional[TextChannel], *, text: str) -> None:
"""Send the input within an embed to either a specified channel or the current channel."""
embed = Embed(description=text)
if channel is None:
await ctx.send(embed=embed)
else:
await channel.send(embed=embed)
def codeblock_stripping(self, msg: str, bad_ticks: bool) -> Optional[Tuple[Tuple[str, ...], str]]:
"""
Strip msg in order to find Python code.
Tries to strip out Python code out of msg and returns the stripped block or
None if the block is a valid Python codeblock.
"""
if msg.count("\n") >= 3:
# Filtering valid Python codeblocks and exiting if a valid Python codeblock is found.
if re.search("```(?:py|python)\n(.*?)```", msg, re.IGNORECASE | re.DOTALL) and not bad_ticks:
log.trace(
"Someone wrote a message that was already a "
"valid Python syntax highlighted code block. No action taken."
)
return None
else:
# Stripping backticks from every line of the message.
log.trace(f"Stripping backticks from message.\n\n{msg}\n\n")
content = ""
for line in msg.splitlines(keepends=True):
content += line.strip("`")
content = content.strip()
# Remove "Python" or "Py" from start of the message if it exists.
log.trace(f"Removing 'py' or 'python' from message.\n\n{content}\n\n")
pycode = False
if content.lower().startswith("python"):
content = content[6:]
pycode = True
elif content.lower().startswith("py"):
content = content[2:]
pycode = True
if pycode:
content = content.splitlines(keepends=True)
# Check if there might be code in the first line, and preserve it.
first_line = content[0]
if " " in content[0]:
first_space = first_line.index(" ")
content[0] = first_line[first_space:]
content = "".join(content)
# If there's no code we can just get rid of the first line.
else:
content = "".join(content[1:])
# Strip it again to remove any leading whitespace. This is neccessary
# if the first line of the message looked like ```python <code>
old = content.strip()
# Strips REPL code out of the message if there is any.
content, repl_code = self.repl_stripping(old)
if old != content:
return (content, old), repl_code
# Try to apply indentation fixes to the code.
content = self.fix_indentation(content)
# Check if the code contains backticks, if it does ignore the message.
if "`" in content:
log.trace("Detected ` inside the code, won't reply")
return None
else:
log.trace(f"Returning message.\n\n{content}\n\n")
return (content,), repl_code
def fix_indentation(self, msg: str) -> str:
"""Attempts to fix badly indented code."""
def unindent(code: str, skip_spaces: int = 0) -> str:
"""Unindents all code down to the number of spaces given in skip_spaces."""
final = ""
current = code[0]
leading_spaces = 0
# Get numbers of spaces before code in the first line.
while current == " ":
current = code[leading_spaces + 1]
leading_spaces += 1
leading_spaces -= skip_spaces
# If there are any, remove that number of spaces from every line.
if leading_spaces > 0:
for line in code.splitlines(keepends=True):
line = line[leading_spaces:]
final += line
return final
else:
return code
# Apply fix for "all lines are overindented" case.
msg = unindent(msg)
# If the first line does not end with a colon, we can be
# certain the next line will be on the same indentation level.
#
# If it does end with a colon, we will need to indent all successive
# lines one additional level.
first_line = msg.splitlines()[0]
code = "".join(msg.splitlines(keepends=True)[1:])
if not first_line.endswith(":"):
msg = f"{first_line}\n{unindent(code)}"
else:
msg = f"{first_line}\n{unindent(code, 4)}"
return msg
def repl_stripping(self, msg: str) -> Tuple[str, bool]:
"""
Strip msg in order to extract Python code out of REPL output.
Tries to strip out REPL Python code out of msg and returns the stripped msg.
Returns True for the boolean if REPL code was found in the input msg.
"""
final = ""
for line in msg.splitlines(keepends=True):
if line.startswith(">>>") or line.startswith("..."):
final += line[4:]
log.trace(f"Formatted: \n\n{msg}\n\n to \n\n{final}\n\n")
if not final:
log.trace(f"Found no REPL code in \n\n{msg}\n\n")
return msg, False
else:
log.trace(f"Found REPL code in \n\n{msg}\n\n")
return final.rstrip(), True
def has_bad_ticks(self, msg: Message) -> bool:
"""Check to see if msg contains ticks that aren't '`'."""
not_backticks = [
"'''", '"""', "\u00b4\u00b4\u00b4", "\u2018\u2018\u2018", "\u2019\u2019\u2019",
"\u2032\u2032\u2032", "\u201c\u201c\u201c", "\u201d\u201d\u201d", "\u2033\u2033\u2033",
"\u3003\u3003\u3003"
]
return msg.content[:3] in not_backticks
@Cog.listener()
async def on_message(self, msg: Message) -> None:
"""
Detect poorly formatted Python code in new messages.
If poorly formatted code is detected, send the user a helpful message explaining how to do
properly formatted Python syntax highlighting codeblocks.
"""
is_help_channel = (
getattr(msg.channel, "category", None)
and msg.channel.category.id in (Categories.help_available, Categories.help_in_use)
)
parse_codeblock = (
(
is_help_channel
or msg.channel.id in self.channel_cooldowns
or msg.channel.id in self.channel_whitelist
)
and not msg.author.bot
and len(msg.content.splitlines()) > 3
and not TokenRemover.find_token_in_message(msg)
)
if parse_codeblock: # no token in the msg
on_cooldown = (time.time() - self.channel_cooldowns.get(msg.channel.id, 0)) < 300
if not on_cooldown or DEBUG_MODE:
try:
if self.has_bad_ticks(msg):
ticks = msg.content[:3]
content = self.codeblock_stripping(f"```{msg.content[3:-3]}```", True)
if content is None:
return
content, repl_code = content
if len(content) == 2:
content = content[1]
else:
content = content[0]
space_left = 204
if len(content) >= space_left:
current_length = 0
lines_walked = 0
for line in content.splitlines(keepends=True):
if current_length + len(line) > space_left or lines_walked == 10:
break
current_length += len(line)
lines_walked += 1
content = content[:current_length] + "#..."
content_escaped_markdown = RE_MARKDOWN.sub(r'\\\1', content)
howto = (
"It looks like you are trying to paste code into this channel.\n\n"
"You seem to be using the wrong symbols to indicate where the codeblock should start. "
f"The correct symbols would be \\`\\`\\`, not `{ticks}`.\n\n"
"**Here is an example of how it should look:**\n"
f"\\`\\`\\`python\n{content_escaped_markdown}\n\\`\\`\\`\n\n"
"**This will result in the following:**\n"
f"```python\n{content}\n```"
)
else:
howto = ""
content = self.codeblock_stripping(msg.content, False)
if content is None:
return
content, repl_code = content
# Attempts to parse the message into an AST node.
# Invalid Python code will raise a SyntaxError.
tree = ast.parse(content[0])
# Multiple lines of single words could be interpreted as expressions.
# This check is to avoid all nodes being parsed as expressions.
# (e.g. words over multiple lines)
if not all(isinstance(node, ast.Expr) for node in tree.body) or repl_code:
# Shorten the code to 10 lines and/or 204 characters.
space_left = 204
if content and repl_code:
content = content[1]
else:
content = content[0]
if len(content) >= space_left:
current_length = 0
lines_walked = 0
for line in content.splitlines(keepends=True):
if current_length + len(line) > space_left or lines_walked == 10:
break
current_length += len(line)
lines_walked += 1
content = content[:current_length] + "#..."
content_escaped_markdown = RE_MARKDOWN.sub(r'\\\1', content)
howto += (
"It looks like you're trying to paste code into this channel.\n\n"
"Discord has support for Markdown, which allows you to post code with full "
"syntax highlighting. Please use these whenever you paste code, as this "
"helps improve the legibility and makes it easier for us to help you.\n\n"
f"**To do this, use the following method:**\n"
f"\\`\\`\\`python\n{content_escaped_markdown}\n\\`\\`\\`\n\n"
"**This will result in the following:**\n"
f"```python\n{content}\n```"
)
log.debug(f"{msg.author} posted something that needed to be put inside python code "
"blocks. Sending the user some instructions.")
else:
log.trace("The code consists only of expressions, not sending instructions")
if howto != "":
# Increase amount of codeblock correction in stats
self.bot.stats.incr("codeblock_corrections")
howto_embed = Embed(description=howto)
bot_message = await msg.channel.send(f"Hey {msg.author.mention}!", embed=howto_embed)
self.codeblock_message_ids[msg.id] = bot_message.id
self.bot.loop.create_task(
wait_for_deletion(bot_message, user_ids=(msg.author.id,), client=self.bot)
)
else:
return
if msg.channel.id not in self.channel_whitelist:
self.channel_cooldowns[msg.channel.id] = time.time()
except SyntaxError:
log.trace(
f"{msg.author} posted in a help channel, and when we tried to parse it as Python code, "
"ast.parse raised a SyntaxError. This probably just means it wasn't Python code. "
f"The message that was posted was:\n\n{msg.content}\n\n"
)
@Cog.listener()
async def on_raw_message_edit(self, payload: RawMessageUpdateEvent) -> None:
"""Check to see if an edited message (previously called out) still contains poorly formatted code."""
if (
# Checks to see if the message was called out by the bot
payload.message_id not in self.codeblock_message_ids
# Makes sure that there is content in the message
or payload.data.get("content") is None
# Makes sure there's a channel id in the message payload
or payload.data.get("channel_id") is None
):
return
# Retrieve channel and message objects for use later
channel = self.bot.get_channel(int(payload.data.get("channel_id")))
user_message = await channel.fetch_message(payload.message_id)
# Checks to see if the user has corrected their codeblock. If it's fixed, has_fixed_codeblock will be None
has_fixed_codeblock = self.codeblock_stripping(payload.data.get("content"), self.has_bad_ticks(user_message))
# If the message is fixed, delete the bot message and the entry from the id dictionary
if has_fixed_codeblock is None:
bot_message = await channel.fetch_message(self.codeblock_message_ids[payload.message_id])
await bot_message.delete()
del self.codeblock_message_ids[payload.message_id]
log.trace("User's incorrect code block has been fixed. Removing bot formatting message.")
def setup(bot: Bot) -> None:
"""Load the Bot cog."""
bot.add_cog(BotCog(bot))
| 45.031088
| 117
| 0.532562
|
7951076e110e17f2b5c45264d7c7e1a8114f5963
| 6,305
|
py
|
Python
|
postgresqleu/confreg/campaigns.py
|
dlangille/pgeu-system
|
3f1910010063bab118e94a55ed757b23f1d36bf5
|
[
"MIT"
] | null | null | null |
postgresqleu/confreg/campaigns.py
|
dlangille/pgeu-system
|
3f1910010063bab118e94a55ed757b23f1d36bf5
|
[
"MIT"
] | null | null | null |
postgresqleu/confreg/campaigns.py
|
dlangille/pgeu-system
|
3f1910010063bab118e94a55ed757b23f1d36bf5
|
[
"MIT"
] | null | null | null |
from django import forms
from django.core.exceptions import ValidationError
from django.http import Http404, HttpResponse
from django.utils.dateparse import parse_datetime, parse_duration
from postgresqleu.confreg.jinjafunc import JinjaTemplateValidator, render_sandboxed_template
from postgresqleu.util.widgets import MonospaceTextarea
from postgresqleu.confreg.models import ConferenceSession, Track
from postgresqleu.confreg.twitter import post_conference_tweet
import datetime
import random
def _timestamps_for_tweets(conference, starttime, interval, randint, num):
if isinstance(starttime, datetime.datetime):
t = starttime
else:
t = parse_datetime(starttime)
if isinstance(interval, datetime.time):
ival = datetime.timedelta(hours=interval.hour, minutes=interval.minute, seconds=interval.second)
else:
ival = parse_duration(interval)
if isinstance(randint, datetime.time):
rsec = datetime.timedelta(hours=randint.hour, minutes=randint.minute, seconds=randint.second).total_seconds()
else:
rsec = parse_duration(randint).total_seconds()
for i in range(num):
yield t
t += ival
t += datetime.timedelta(seconds=rsec * random.random())
if t.time() > conference.twitter_timewindow_end:
t = datetime.datetime.combine(t.date() + datetime.timedelta(days=1), conference.twitter_timewindow_start)
class BaseCampaignForm(forms.Form):
starttime = forms.DateTimeField(label="Date and time of first tweet", initial=datetime.datetime.now)
timebetween = forms.TimeField(label="Time between tweets", initial=datetime.time(6, 0, 0))
timerandom = forms.TimeField(label="Time randomization", initial=datetime.time(0, 30, 0),
help_text="A random time from zero to this is added after each time interval")
content_template = forms.CharField(max_length=2000,
widget=MonospaceTextarea,
required=True)
dynamic_preview_fields = ['content_template', ]
confirm = forms.BooleanField(help_text="Confirm that you want to generate all the tweets for this campaign at this time", required=False)
def __init__(self, conference, *args, **kwargs):
self.conference = conference
self.field_order = ['starttime', 'timebetween', 'timerandom', 'content_template'] + self.custom_fields + ['confirm', ]
super(BaseCampaignForm, self).__init__(*args, *kwargs)
if not all([self.data.get(f) for f in ['starttime', 'timebetween', 'timerandom', 'content_template'] + self.custom_fields]):
del self.fields['confirm']
else:
num = self.get_queryset().count()
tsl = list(_timestamps_for_tweets(conference,
self.data.get('starttime'),
self.data.get('timebetween'),
self.data.get('timerandom'),
num,
))
if tsl:
approxend = tsl[-1]
self.fields['confirm'].help_text = "Confirm that you want to generate all the tweets for this campaign at this time. Campaign will go on until approximately {}, with {} posts.".format(approxend, num)
else:
self.fields['confirm'].help_text = "Campaign matches no entries. Try again."
def clean_confirm(self):
if not self.cleaned_data['confirm']:
if self.get_queryset().count == 0:
del self.fields['confirm']
else:
raise ValidationError("Please check thix box to confirm that you want to generate all tweets!")
def clean(self):
if self.get_queryset().count() == 0:
self.add_error(None, 'Current filters return no entries. Fix your filters and try again!')
del self.fields['confirm']
return self.cleaned_data
class ApprovedSessionsCampaignForm(BaseCampaignForm):
tracks = forms.ModelMultipleChoiceField(required=True, queryset=Track.objects.all())
custom_fields = ['tracks', ]
def __init__(self, *args, **kwargs):
super(ApprovedSessionsCampaignForm, self).__init__(*args, **kwargs)
self.fields['tracks'].queryset = Track.objects.filter(conference=self.conference)
@classmethod
def generate_tweet(cls, conference, session, s):
return render_sandboxed_template(s, {
'conference': conference,
'session': session,
}).strip()[:249]
def get_queryset(self):
return ConferenceSession.objects.filter(conference=self.conference, status=1, cross_schedule=False, track__in=self.data.getlist('tracks'))
def generate_tweets(self, author):
sessions = list(self.get_queryset().order_by('?'))
for ts, session in zip(_timestamps_for_tweets(self.conference, self.cleaned_data['starttime'], self.cleaned_data['timebetween'], self.cleaned_data['timerandom'], len(sessions)), sessions):
post_conference_tweet(self.conference,
self.generate_tweet(self.conference, session, self.cleaned_data['content_template']),
approved=False,
posttime=ts,
author=author)
class ApprovedSessionsCampaign(object):
name = "Approved sessions campaign"
form = ApprovedSessionsCampaignForm
note = "This campaign will create one tweet for each approved session in the system."
@classmethod
def get_dynamic_preview(self, conference, fieldname, s):
if fieldname == 'content_template':
# Generate a preview of 3 (an arbitrary number) sessions
return HttpResponse("\n\n-------------------------------\n\n".join([
self.form.generate_tweet(conference, session, s)
for session in ConferenceSession.objects.filter(conference=conference, status=1, cross_schedule=False)[:3]
]), content_type='text/plain')
allcampaigns = (
(1, ApprovedSessionsCampaign),
)
def get_campaign_from_id(id):
for i, c in allcampaigns:
if i == int(id):
return c
raise Http404()
| 44.716312
| 215
| 0.644409
|
79510915e3a84e62793522fc762dfb767a3322dd
| 3,256
|
py
|
Python
|
recipe_server/settings.py
|
Shouyin/Recipe
|
dffaafdebefd7c39a1438444db910f5d7943cf1f
|
[
"MIT"
] | null | null | null |
recipe_server/settings.py
|
Shouyin/Recipe
|
dffaafdebefd7c39a1438444db910f5d7943cf1f
|
[
"MIT"
] | null | null | null |
recipe_server/settings.py
|
Shouyin/Recipe
|
dffaafdebefd7c39a1438444db910f5d7943cf1f
|
[
"MIT"
] | null | null | null |
"""
Django settings for recipe_server project.
Generated by 'django-admin startproject' using Django 2.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'wqk&u-eqcds3hp8hwr_a88(=n3$nh!nmc&pxub-c%yknibua*+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["0.0.0.0", "127.0.0.1"]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'recipe_server.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "recipe_server//templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'recipe_server.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "recipe_server/static")
]
| 26.258065
| 91
| 0.698403
|
795109620dee96ad8eef48181ff1ae3077d016d2
| 477
|
py
|
Python
|
TradzQAI/tools/indicators/moving_average_convergence_divergence.py
|
kkuette/AI_project
|
1f46cb2536b24cb3716250f1e9705daa76af4f60
|
[
"Apache-2.0"
] | 164
|
2017-11-24T13:07:04.000Z
|
2022-03-10T04:54:46.000Z
|
TradzQAI/tools/indicators/moving_average_convergence_divergence.py
|
kkuette/AI_project
|
1f46cb2536b24cb3716250f1e9705daa76af4f60
|
[
"Apache-2.0"
] | 21
|
2018-09-29T10:27:10.000Z
|
2019-06-12T07:01:58.000Z
|
TradzQAI/tools/indicators/moving_average_convergence_divergence.py
|
kkuette/AI_project
|
1f46cb2536b24cb3716250f1e9705daa76af4f60
|
[
"Apache-2.0"
] | 49
|
2018-05-09T17:28:52.000Z
|
2022-02-27T04:50:45.000Z
|
from .catch_errors import check_for_period_error
from .exponential_moving_average import exponential_moving_average as ema
def moving_average_convergence_divergence(data, short_period, long_period):
"""
Moving Average Convergence Divergence.
Formula:
EMA(DATA, P1) - EMA(DATA, P2)
"""
check_for_period_error(data, short_period)
check_for_period_error(data, long_period)
macd = ema(data, short_period) - ema(data, long_period)
return macd
| 29.8125
| 75
| 0.761006
|
795109640595dd9efe5b95216067e6fd4053d037
| 2,795
|
py
|
Python
|
model/bisenet/cityscapes.bisenet.X39.speed/config.py
|
akinoriosamura/TorchSeg-mirror
|
34033fe85fc24015bcef7a92aad39d2a25a001a5
|
[
"MIT"
] | null | null | null |
model/bisenet/cityscapes.bisenet.X39.speed/config.py
|
akinoriosamura/TorchSeg-mirror
|
34033fe85fc24015bcef7a92aad39d2a25a001a5
|
[
"MIT"
] | 1
|
2021-06-08T20:36:43.000Z
|
2021-06-08T20:36:43.000Z
|
model/bisenet/cityscapes.bisenet.X39.speed/config.py
|
akinoriosamura/TorchSeg-mirror
|
34033fe85fc24015bcef7a92aad39d2a25a001a5
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path as osp
import sys
import time
import numpy as np
from easydict import EasyDict as edict
import argparse
import torch.utils.model_zoo as model_zoo
C = edict()
config = C
cfg = C
C.seed = 12345
"""please config ROOT_dir and user when u first using"""
C.repo_name = 'TorchSeg'
C.abs_dir = osp.realpath(".")
C.this_dir = C.abs_dir.split(osp.sep)[-1]
C.root_dir = C.abs_dir[:C.abs_dir.index(C.repo_name) + len(C.repo_name)]
C.log_dir = osp.abspath(osp.join(C.root_dir, 'log', C.this_dir))
C.log_dir_link = osp.join(C.abs_dir, 'log')
C.snapshot_dir = osp.abspath(osp.join(C.log_dir, "snapshot"))
exp_time = time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime())
C.log_file = C.log_dir + '/log_' + exp_time + '.log'
C.link_log_file = C.log_file + '/log_last.log'
C.val_log_file = C.log_dir + '/val_' + exp_time + '.log'
C.link_val_log_file = C.log_dir + '/val_last.log'
"""Data Dir and Weight Dir"""
C.dataset_path = "./Cityscapes/"
C.img_root_folder = C.dataset_path
C.gt_root_folder = C.dataset_path
C.train_source = osp.join(C.dataset_path, "config_new/train.txt")
C.eval_source = osp.join(C.dataset_path, "config_new/val.txt")
C.test_source = osp.join(C.dataset_path, "config_new/test.txt")
C.is_test = False
"""Path Config"""
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
add_path(osp.join(C.root_dir, 'furnace'))
"""Image Config"""
C.num_classes = 19
C.background = -1
C.image_mean = np.array([0.485, 0.456, 0.406]) # 0.485, 0.456, 0.406
C.image_std = np.array([0.229, 0.224, 0.225])
C.target_size = 1024
C.base_size = 832
C.image_height = 768
C.image_width = 1536
C.gt_down_sampling = 8
C.num_train_imgs = 2975
C.num_eval_imgs = 500
""" Settings for network, this would be different for each kind of model"""
C.fix_bias = True
C.fix_bn = False
C.sync_bn = True
C.bn_eps = 1e-5
C.bn_momentum = 0.1
C.pretrained_model = None
"""Train Config"""
C.lr = 1e-2
C.lr_power = 0.9
C.momentum = 0.9
C.weight_decay = 5e-4
C.batch_size = 16 #4 * C.num_gpu
C.nepochs = 140
C.niters_per_epoch = 1000
C.num_workers = 24
C.train_scale_array = [0.5, 0.75, 1, 1.25, 1.5, 1.75]
"""Eval Config"""
C.eval_iter = 30
C.eval_stride_rate = 2 / 3
C.eval_scale_array = [1, ]
C.eval_flip = False
C.eval_height = 768
C.eval_width = 1536
"""Display Config"""
C.snapshot_iter = 50
C.record_info_iter = 20
C.display_iter = 50
def open_tensorboard():
pass
if __name__ == '__main__':
print(config.epoch_num)
parser = argparse.ArgumentParser()
parser.add_argument(
'-tb', '--tensorboard', default=False, action='store_true')
args = parser.parse_args()
if args.tensorboard:
open_tensorboard()
| 24.517544
| 75
| 0.705903
|
79510a132a03ad52247d6225179f22e115a72115
| 44
|
py
|
Python
|
template_globals.py
|
d7d4af8/2047
|
bd6781b9502c6fdbd4745be5084977f679fa3fc5
|
[
"MIT"
] | 35
|
2020-09-01T00:34:50.000Z
|
2022-03-29T13:14:15.000Z
|
template_globals.py
|
d7d4af8/2047
|
bd6781b9502c6fdbd4745be5084977f679fa3fc5
|
[
"MIT"
] | 3
|
2020-08-19T20:47:19.000Z
|
2021-09-06T23:55:49.000Z
|
template_globals.py
|
d7d4af8/2047
|
bd6781b9502c6fdbd4745be5084977f679fa3fc5
|
[
"MIT"
] | 10
|
2020-08-07T02:20:09.000Z
|
2022-01-30T06:43:45.000Z
|
# ugh
tgr = template_globals_registry = {}
| 11
| 36
| 0.704545
|
79510a13d24ba241a9d15975500dc74c0c44de73
| 1,184
|
py
|
Python
|
octavia/amphorae/backends/agent/api_server/certificate_update.py
|
zhangi/octavia
|
e68c851fecf55e1b5ffe7d5b849f729626af28a3
|
[
"Apache-2.0"
] | 129
|
2015-06-23T08:06:23.000Z
|
2022-03-31T12:38:20.000Z
|
octavia/amphorae/backends/agent/api_server/certificate_update.py
|
zhangi/octavia
|
e68c851fecf55e1b5ffe7d5b849f729626af28a3
|
[
"Apache-2.0"
] | 10
|
2020-09-18T12:17:59.000Z
|
2022-03-14T15:45:38.000Z
|
octavia/amphorae/backends/agent/api_server/certificate_update.py
|
zhangi/octavia
|
e68c851fecf55e1b5ffe7d5b849f729626af28a3
|
[
"Apache-2.0"
] | 166
|
2015-07-15T16:24:05.000Z
|
2022-03-02T20:54:36.000Z
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import stat
import flask
from oslo_config import cfg
import webob
BUFFER = 1024
CONF = cfg.CONF
def upload_server_cert():
stream = flask.request.stream
file_path = CONF.amphora_agent.agent_server_cert
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
# mode 00600
mode = stat.S_IRUSR | stat.S_IWUSR
with os.fdopen(os.open(file_path, flags, mode), 'wb') as crt_file:
b = stream.read(BUFFER)
while b:
crt_file.write(b)
b = stream.read(BUFFER)
return webob.Response(json={'message': 'OK'}, status=202)
| 29.6
| 75
| 0.715372
|
79510a60ecc896e04e7a1aee05ef5fd4c130061f
| 6,164
|
py
|
Python
|
ask-sdk-dynamodb-persistence-adapter/tests/unit/test_partition_keygen.py
|
P2707951/alexa-skills-kit-sdk-for-python
|
dd16873682ecb8061eec66835c1dbddbb121467d
|
[
"Apache-2.0"
] | 1
|
2020-06-13T14:14:26.000Z
|
2020-06-13T14:14:26.000Z
|
ask-sdk-dynamodb-persistence-adapter/tests/unit/test_partition_keygen.py
|
P2707951/alexa-skills-kit-sdk-for-python
|
dd16873682ecb8061eec66835c1dbddbb121467d
|
[
"Apache-2.0"
] | null | null | null |
ask-sdk-dynamodb-persistence-adapter/tests/unit/test_partition_keygen.py
|
P2707951/alexa-skills-kit-sdk-for-python
|
dd16873682ecb8061eec66835c1dbddbb121467d
|
[
"Apache-2.0"
] | 2
|
2019-11-22T14:52:47.000Z
|
2021-06-18T13:46:15.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights
# Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the
# License.
#
import unittest
from ask_sdk_model import RequestEnvelope, Context, User, Device
from ask_sdk_model.interfaces.system import SystemState
from ask_sdk_core.exceptions import PersistenceException
from ask_sdk_dynamodb.partition_keygen import (
user_id_partition_keygen, device_id_partition_keygen)
class TestPartitionKeyGenerators(unittest.TestCase):
def setUp(self):
self.request_envelope = RequestEnvelope()
self.context = Context()
self.system = SystemState()
self.user = User()
self.device = Device()
def test_valid_user_id_partition_keygen(self):
self.user.user_id = "123"
self.system.user = self.user
self.context.system = self.system
self.request_envelope.context = self.context
assert user_id_partition_keygen(self.request_envelope) == "123", (
"User Id Partition Key Generation retrieved wrong user id from "
"valid request envelope")
def test_user_id_partition_keygen_raise_error_when_request_envelope_null(self):
with self.assertRaises(PersistenceException) as exc:
user_id_partition_keygen(request_envelope=None)
assert "Couldn't retrieve user id from request envelope" in str(
exc.exception), (
"User Id Partition Key Generation didn't throw exception when "
"null request envelope is provided")
def test_user_id_partition_keygen_raise_error_when_context_null(self):
with self.assertRaises(PersistenceException) as exc:
user_id_partition_keygen(request_envelope=self.request_envelope)
assert "Couldn't retrieve user id from request envelope" in str(
exc.exception), (
"User Id Partition Key Generation didn't throw exception when "
"null context provided in request envelope")
def test_user_id_partition_keygen_raise_error_when_system_null(self):
self.request_envelope.context = self.context
with self.assertRaises(PersistenceException) as exc:
user_id_partition_keygen(request_envelope=self.request_envelope)
assert "Couldn't retrieve user id from request envelope" in str(
exc.exception), (
"User Id Partition Key Generation didn't throw exception when "
"null system provided in context of "
"request envelope")
def test_user_id_partition_keygen_raise_error_when_user_null(self):
self.context.system = self.system
self.request_envelope.context = self.context
with self.assertRaises(PersistenceException) as exc:
user_id_partition_keygen(request_envelope=self.request_envelope)
assert "Couldn't retrieve user id from request envelope" in str(
exc.exception), (
"User Id Partition Key Generation didn't throw exception when "
"null user provided in context.system of "
"request envelope")
def test_valid_device_id_partition_keygen(self):
self.device.device_id = "123"
self.system.device = self.device
self.context.system = self.system
self.request_envelope.context = self.context
assert device_id_partition_keygen(self.request_envelope) == "123", (
"Device Id Partition Key Generation retrieved wrong device id "
"from valid request envelope")
def test_device_id_partition_keygen_raise_error_when_request_envelope_null(self):
with self.assertRaises(PersistenceException) as exc:
device_id_partition_keygen(request_envelope=None)
assert "Couldn't retrieve device id from request envelope" in str(
exc.exception), (
"Device Id Partition Key Generation didn't throw exception when "
"null request envelope is provided")
def test_device_id_partition_keygen_raise_error_when_context_null(self):
with self.assertRaises(PersistenceException) as exc:
device_id_partition_keygen(request_envelope=self.request_envelope)
assert "Couldn't retrieve device id from request envelope" in str(
exc.exception), (
"Device Id Partition Key Generation didn't throw exception when "
"null context provided in request envelope")
def test_device_id_partition_keygen_raise_error_when_system_null(self):
self.request_envelope.context = self.context
with self.assertRaises(PersistenceException) as exc:
device_id_partition_keygen(request_envelope=self.request_envelope)
assert "Couldn't retrieve device id from request envelope" in str(
exc.exception), (
"Device Id Partition Key Generation didn't throw exception when "
"null system provided in context of "
"request envelope")
def test_device_id_partition_keygen_raise_error_when_device_null(self):
self.context.system = self.system
self.request_envelope.context = self.context
with self.assertRaises(PersistenceException) as exc:
device_id_partition_keygen(request_envelope=self.request_envelope)
assert "Couldn't retrieve device id from request envelope" in str(
exc.exception), (
"Device Id Partition Key Generation didn't throw exception when "
"null device provided in context.system of "
"request envelope")
def tearDown(self):
self.request_envelope = None
self.context = None
self.system = None
self.user = None
self.device = None
| 41.931973
| 85
| 0.701979
|
79510a77e47c0d173f9641b524ceef32d4c5a3a2
| 3,940
|
py
|
Python
|
apps/article/dashboard/views.py
|
kharann/onlineweb4
|
1130128c6233b623780779a25934ea73ef62c264
|
[
"MIT"
] | null | null | null |
apps/article/dashboard/views.py
|
kharann/onlineweb4
|
1130128c6233b623780779a25934ea73ef62c264
|
[
"MIT"
] | null | null | null |
apps/article/dashboard/views.py
|
kharann/onlineweb4
|
1130128c6233b623780779a25934ea73ef62c264
|
[
"MIT"
] | null | null | null |
# -*- encoding: utf-8 -*-
from collections import Counter
from logging import getLogger
from django.contrib import messages
from django.contrib.contenttypes.models import ContentType
from django.shortcuts import get_object_or_404, redirect, render
from guardian.decorators import permission_required
from taggit.models import TaggedItem
from apps.article.dashboard.forms import ArticleForm
from apps.article.models import Article
from apps.dashboard.tools import check_access_or_403, get_base_context
@permission_required('article.view_article', return_403=True)
def article_index(request):
check_access_or_403(request)
context = get_base_context(request)
context['articles'] = Article.objects.all().order_by('-published_date')
context['years'] = sorted(list(set(a.published_date.year for a in context['articles'])), reverse=True)
context['pages'] = list(range(1, context['articles'].count() // 10 + 2))
# Fetch 30 most popular tags from the Django-taggit registry, using a Counter
queryset = TaggedItem.objects.filter(content_type=ContentType.objects.get_for_model(Article))
context['tags'] = Counter(map(lambda item: item.tag, queryset)).most_common(30)
return render(request, 'article/dashboard/article_index.html', context)
@permission_required('article.add_article', return_403=True)
def article_create(request):
check_access_or_403(request)
form = ArticleForm()
if request.method == 'POST':
form = ArticleForm(request.POST)
if form.is_valid():
instance = form.save(commit=False)
instance.changed_by = request.user
instance.created_by = request.user
instance.save()
form.save_m2m()
messages.success(request, 'Artikkelen ble opprettet.')
return redirect(article_detail, article_id=instance.pk)
else:
messages.error(request, 'Noen av de påkrevde feltene inneholder feil.')
context = get_base_context(request)
context['form'] = form
return render(request, 'article/dashboard/article_create.html', context)
@permission_required('article.view_article', return_403=True)
def article_detail(request, article_id):
check_access_or_403(request)
article = get_object_or_404(Article, pk=article_id)
context = get_base_context(request)
context['article'] = article
return render(request, 'article/dashboard/article_detail.html', context)
@permission_required('article.change_article', return_403=True)
def article_edit(request, article_id):
check_access_or_403(request)
article = get_object_or_404(Article, pk=article_id)
form = ArticleForm(instance=article)
if request.method == 'POST':
if 'action' in request.POST and request.POST['action'] == 'delete':
instance = get_object_or_404(Article, pk=article_id)
article_heading = instance.heading
article_id = instance.id
instance.delete()
messages.success(request, '%s ble slettet.' % article_heading)
getLogger(__name__).info('%s deleted article %d (%s)' % (request.user, article_id, article_heading))
return redirect(article_index)
form = ArticleForm(request.POST, instance=article)
if form.is_valid():
instance = form.save(commit=False)
instance.changed_by = request.user
instance.save()
form.save_m2m()
messages.success(request, 'Artikkelen ble lagret.')
getLogger(__name__).info('%s edited article %d (%s)' % (request.user, instance.id, instance.heading))
return redirect(article_index)
else:
messages.error(request, 'Noen av de påkrevde feltene inneholder feil.')
context = get_base_context(request)
context['form'] = form
context['edit'] = True
return render(request, 'article/dashboard/article_create.html', context)
| 36.146789
| 113
| 0.702284
|
79510be763eb0609efcba32abd9bcc49040f3f65
| 6,951
|
py
|
Python
|
tempest/tests/cmd/test_tempest_init.py
|
mail2nsrajesh/tempest
|
1a3b3dc50b418d3a15839830d7d1ff88c8c76cff
|
[
"Apache-2.0"
] | 1
|
2021-05-21T08:24:02.000Z
|
2021-05-21T08:24:02.000Z
|
tempest/tests/cmd/test_tempest_init.py
|
mail2nsrajesh/tempest
|
1a3b3dc50b418d3a15839830d7d1ff88c8c76cff
|
[
"Apache-2.0"
] | null | null | null |
tempest/tests/cmd/test_tempest_init.py
|
mail2nsrajesh/tempest
|
1a3b3dc50b418d3a15839830d7d1ff88c8c76cff
|
[
"Apache-2.0"
] | 5
|
2016-06-24T20:03:52.000Z
|
2020-02-05T10:14:54.000Z
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import fixtures
from tempest.cmd import init
from tempest.tests import base
class TestTempestInit(base.TestCase):
def test_generate_testr_conf(self):
# Create fake conf dir
conf_dir = self.useFixture(fixtures.TempDir())
init_cmd = init.TempestInit(None, None)
init_cmd.generate_testr_conf(conf_dir.path)
# Generate expected file contents
top_level_path = os.path.dirname(os.path.dirname(init.__file__))
discover_path = os.path.join(top_level_path, 'test_discover')
testr_conf_file = init.TESTR_CONF % (top_level_path, discover_path)
conf_path = conf_dir.join('.testr.conf')
with open(conf_path, 'r') as conf_file:
self.assertEqual(conf_file.read(), testr_conf_file)
def test_generate_sample_config(self):
local_dir = self.useFixture(fixtures.TempDir())
etc_dir_path = os.path.join(local_dir.path, 'etc/')
os.mkdir(etc_dir_path)
init_cmd = init.TempestInit(None, None)
local_sample_conf_file = os.path.join(etc_dir_path,
'tempest.conf.sample')
# Verify no sample config file exist
self.assertFalse(os.path.isfile(local_sample_conf_file))
init_cmd.generate_sample_config(local_dir.path)
# Verify sample config file exist with some content
self.assertTrue(os.path.isfile(local_sample_conf_file))
self.assertGreater(os.path.getsize(local_sample_conf_file), 0)
def test_update_local_conf(self):
local_dir = self.useFixture(fixtures.TempDir())
etc_dir_path = os.path.join(local_dir.path, 'etc/')
os.mkdir(etc_dir_path)
lock_dir = os.path.join(local_dir.path, 'tempest_lock')
config_path = os.path.join(etc_dir_path, 'tempest.conf')
log_dir = os.path.join(local_dir.path, 'logs')
init_cmd = init.TempestInit(None, None)
# Generate the config file
init_cmd.generate_sample_config(local_dir.path)
# Create a conf file with populated values
config_parser_pre = init_cmd.get_configparser(config_path)
with open(config_path, 'w+') as conf_file:
# create the same section init will check for and add values to
config_parser_pre.add_section('oslo_concurrency')
config_parser_pre.set('oslo_concurrency', 'TEST', local_dir.path)
# create a new section
config_parser_pre.add_section('TEST')
config_parser_pre.set('TEST', 'foo', "bar")
config_parser_pre.write(conf_file)
# Update the config file the same way tempest init does
init_cmd.update_local_conf(config_path, lock_dir, log_dir)
# parse the new config file to verify it
config_parser_post = init_cmd.get_configparser(config_path)
# check that our value in oslo_concurrency wasn't overwritten
self.assertTrue(config_parser_post.has_section('oslo_concurrency'))
self.assertEqual(config_parser_post.get('oslo_concurrency', 'TEST'),
local_dir.path)
# check that the lock directory was set correctly
self.assertEqual(config_parser_post.get('oslo_concurrency',
'lock_path'), lock_dir)
# check that our new section still exists and wasn't modified
self.assertTrue(config_parser_post.has_section('TEST'))
self.assertEqual(config_parser_post.get('TEST', 'foo'), 'bar')
# check that the DEFAULT values are correct
# NOTE(auggy): has_section ignores DEFAULT
self.assertEqual(config_parser_post.get('DEFAULT', 'log_dir'), log_dir)
def test_create_working_dir_with_existing_local_dir_non_empty(self):
fake_local_dir = self.useFixture(fixtures.TempDir())
fake_local_conf_dir = self.useFixture(fixtures.TempDir())
open("%s/foo" % fake_local_dir.path, 'w').close()
_init = init.TempestInit(None, None)
self.assertRaises(OSError,
_init.create_working_dir,
fake_local_dir.path,
fake_local_conf_dir.path)
def test_create_working_dir(self):
fake_local_dir = self.useFixture(fixtures.TempDir())
fake_local_conf_dir = self.useFixture(fixtures.TempDir())
os.rmdir(fake_local_dir.path)
# Create a fake conf file
fake_file = fake_local_conf_dir.join('conf_file.conf')
open(fake_file, 'w').close()
init_cmd = init.TempestInit(None, None)
init_cmd.create_working_dir(fake_local_dir.path,
fake_local_conf_dir.path)
# Assert directories are created
lock_path = os.path.join(fake_local_dir.path, 'tempest_lock')
etc_dir = os.path.join(fake_local_dir.path, 'etc')
log_dir = os.path.join(fake_local_dir.path, 'logs')
testr_dir = os.path.join(fake_local_dir.path, '.testrepository')
self.assertTrue(os.path.isdir(lock_path))
self.assertTrue(os.path.isdir(etc_dir))
self.assertTrue(os.path.isdir(log_dir))
self.assertTrue(os.path.isdir(testr_dir))
# Assert file creation
fake_file_moved = os.path.join(etc_dir, 'conf_file.conf')
local_conf_file = os.path.join(etc_dir, 'tempest.conf')
local_testr_conf = os.path.join(fake_local_dir.path, '.testr.conf')
self.assertTrue(os.path.isfile(fake_file_moved))
self.assertTrue(os.path.isfile(local_conf_file))
self.assertTrue(os.path.isfile(local_testr_conf))
def test_take_action_fails(self):
class ParsedArgs(object):
workspace_dir = self.useFixture(fixtures.TempDir()).path
workspace_path = os.path.join(workspace_dir, 'workspace.yaml')
name = 'test'
dir_base = self.useFixture(fixtures.TempDir()).path
dir = os.path.join(dir_base, 'foo', 'bar')
config_dir = self.useFixture(fixtures.TempDir()).path
show_global_dir = False
pa = ParsedArgs()
init_cmd = init.TempestInit(None, None)
self.assertRaises(OSError, init_cmd.take_action, pa)
# one more trying should be a same error not "workspace already exists"
self.assertRaises(OSError, init_cmd.take_action, pa)
| 44.845161
| 79
| 0.670983
|
79510c133ad4c43a88f58ebb297137fca5dbc14c
| 664
|
py
|
Python
|
05.py
|
DarkMio/AOC2016
|
e3ca794a9deffcc6a4be629ba593147e622ce648
|
[
"MIT"
] | null | null | null |
05.py
|
DarkMio/AOC2016
|
e3ca794a9deffcc6a4be629ba593147e622ce648
|
[
"MIT"
] | null | null | null |
05.py
|
DarkMio/AOC2016
|
e3ca794a9deffcc6a4be629ba593147e622ce648
|
[
"MIT"
] | null | null | null |
import hashlib
def find_code(door_id):
first = []
second = [None] * 8
i = 0
while None in second:
m = hashlib.md5(door_id + str(i).encode('utf-8')).hexdigest()
if m.startswith('00000'):
print("{}: {}".format(door_id + str(i).encode('utf-8'), m))
location = int(m[5], 16)
first.append(m[5])
if location < 8 and second[location] is None:
second[location] = m[6]
i += 1
return [''.join(first[:8]), ''.join(second)]
door_id = 'uqwqemis'.encode('utf-8')
code_part = find_code(door_id)
print('MD5 Part1: {} \nMD5 Part2: {}'.format(code_part[0], code_part[1]))
| 31.619048
| 73
| 0.546687
|
79510cea1f28fb3fd3f65f82b2226f9c7d6b6fcb
| 10,821
|
py
|
Python
|
tests/integration/shell/test_key.py
|
guoxiaod/salt
|
2cd6c03b40932be137e6e8a672967b59025a2d34
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/shell/test_key.py
|
guoxiaod/salt
|
2cd6c03b40932be137e6e8a672967b59025a2d34
|
[
"Apache-2.0"
] | 1
|
2019-08-18T07:03:30.000Z
|
2019-08-18T07:03:30.000Z
|
tests/integration/shell/test_key.py
|
guoxiaod/salt
|
2cd6c03b40932be137e6e8a672967b59025a2d34
|
[
"Apache-2.0"
] | 2
|
2020-11-04T06:24:32.000Z
|
2020-11-06T11:00:57.000Z
|
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import shutil
import tempfile
import textwrap
# Import Salt Testing libs
from tests.support.case import ShellCase
from tests.support.paths import TMP
from tests.support.mixins import ShellCaseCommonTestsMixin
# Import 3rd-party libs
from salt.ext import six
# Import Salt libs
import salt.utils.files
import salt.utils.platform
import salt.utils.yaml
USERA = 'saltdev'
USERA_PWD = 'saltdev'
HASHED_USERA_PWD = '$6$SALTsalt$ZZFD90fKFWq8AGmmX0L3uBtS9fXL62SrTk5zcnQ6EkD6zoiM3kB88G1Zvs0xm/gZ7WXJRs5nsTBybUvGSqZkT.'
class KeyTest(ShellCase, ShellCaseCommonTestsMixin):
'''
Test salt-key script
'''
_call_binary_ = 'salt-key'
def _add_user(self):
'''
helper method to add user
'''
try:
add_user = self.run_call('user.add {0} createhome=False'.format(USERA))
add_pwd = self.run_call('shadow.set_password {0} \'{1}\''.format(USERA,
USERA_PWD if salt.utils.platform.is_darwin() else HASHED_USERA_PWD))
self.assertTrue(add_user)
self.assertTrue(add_pwd)
user_list = self.run_call('user.list_users')
self.assertIn(USERA, six.text_type(user_list))
except AssertionError:
self.run_call('user.delete {0} remove=True'.format(USERA))
self.skipTest(
'Could not add user or password, skipping test'
)
def _remove_user(self):
'''
helper method to remove user
'''
user_list = self.run_call('user.list_users')
for user in user_list:
if USERA in user:
self.run_call('user.delete {0} remove=True'.format(USERA))
def test_remove_key(self):
'''
test salt-key -d usage
'''
min_name = 'minibar'
pki_dir = self.master_opts['pki_dir']
key = os.path.join(pki_dir, 'minions', min_name)
with salt.utils.files.fopen(key, 'w') as fp:
fp.write(textwrap.dedent('''\
-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAoqIZDtcQtqUNs0wC7qQz
JwFhXAVNT5C8M8zhI+pFtF/63KoN5k1WwAqP2j3LquTG68WpxcBwLtKfd7FVA/Kr
OF3kXDWFnDi+HDchW2lJObgfzLckWNRFaF8SBvFM2dys3CGSgCV0S/qxnRAjrJQb
B3uQwtZ64ncJAlkYpArv3GwsfRJ5UUQnYPDEJwGzMskZ0pHd60WwM1gMlfYmNX5O
RBEjybyNpYDzpda6e6Ypsn6ePGLkP/tuwUf+q9wpbRE3ZwqERC2XRPux+HX2rGP+
mkzpmuHkyi2wV33A9pDfMgRHdln2CLX0KgfRGixUQhW1o+Kmfv2rq4sGwpCgLbTh
NwIDAQAB
-----END PUBLIC KEY-----
'''))
check_key = self.run_key('-p {0}'.format(min_name))
self.assertIn('Accepted Keys:', check_key)
self.assertIn('minibar: -----BEGIN PUBLIC KEY-----', check_key)
remove_key = self.run_key('-d {0} -y'.format(min_name))
check_key = self.run_key('-p {0}'.format(min_name))
self.assertEqual([], check_key)
def test_list_accepted_args(self):
'''
test salt-key -l for accepted arguments
'''
for key in ('acc', 'pre', 'den', 'un', 'rej'):
# These should not trigger any error
data = self.run_key('-l {0}'.format(key), catch_stderr=True)
self.assertNotIn('error:', '\n'.join(data[1]))
data = self.run_key('-l foo-{0}'.format(key), catch_stderr=True)
self.assertIn('error:', '\n'.join(data[1]))
def test_list_all(self):
'''
test salt-key -L
'''
data = self.run_key('-L')
expect = None
if self.master_opts['transport'] in ('zeromq', 'tcp'):
expect = [
'Accepted Keys:',
'minion',
'sub_minion',
'Denied Keys:',
'Unaccepted Keys:',
'Rejected Keys:'
]
elif self.master_opts['transport'] == 'raet':
expect = [
'Accepted Keys:',
'minion',
'sub_minion',
'Unaccepted Keys:',
'Rejected Keys:'
]
self.assertEqual(data, expect)
def test_list_json_out(self):
'''
test salt-key -L --json-out
'''
data = self.run_key('-L --out json')
ret = {}
try:
import salt.utils.json
ret = salt.utils.json.loads('\n'.join(data))
except ValueError:
pass
expect = None
if self.master_opts['transport'] in ('zeromq', 'tcp'):
expect = {'minions_rejected': [],
'minions_denied': [],
'minions_pre': [],
'minions': ['minion', 'sub_minion']}
elif self.master_opts['transport'] == 'raet':
expect = {'accepted': ['minion', 'sub_minion'],
'rejected': [],
'pending': []}
self.assertEqual(ret, expect)
def test_list_yaml_out(self):
'''
test salt-key -L --yaml-out
'''
data = self.run_key('-L --out yaml')
ret = {}
try:
import salt.utils.yaml
ret = salt.utils.yaml.safe_load('\n'.join(data))
except Exception:
pass
expect = []
if self.master_opts['transport'] in ('zeromq', 'tcp'):
expect = {'minions_rejected': [],
'minions_denied': [],
'minions_pre': [],
'minions': ['minion', 'sub_minion']}
elif self.master_opts['transport'] == 'raet':
expect = {'accepted': ['minion', 'sub_minion'],
'rejected': [],
'pending': []}
self.assertEqual(ret, expect)
def test_list_raw_out(self):
'''
test salt-key -L --raw-out
'''
data = self.run_key('-L --out raw')
self.assertEqual(len(data), 1)
ret = {}
try:
import ast
ret = ast.literal_eval(data[0])
except ValueError:
pass
expect = None
if self.master_opts['transport'] in ('zeromq', 'tcp'):
expect = {'minions_rejected': [],
'minions_denied': [],
'minions_pre': [],
'minions': ['minion', 'sub_minion']}
elif self.master_opts['transport'] == 'raet':
expect = {'accepted': ['minion', 'sub_minion'],
'rejected': [],
'pending': []}
self.assertEqual(ret, expect)
def test_list_acc(self):
'''
test salt-key -l
'''
data = self.run_key('-l acc')
expect = ['Accepted Keys:', 'minion', 'sub_minion']
self.assertEqual(data, expect)
def test_list_acc_eauth(self):
'''
test salt-key -l with eauth
'''
self._add_user()
data = self.run_key('-l acc --eauth pam --username {0} --password {1}'.format(USERA, USERA_PWD))
expect = ['Accepted Keys:', 'minion', 'sub_minion']
self.assertEqual(data, expect)
self._remove_user()
def test_list_acc_eauth_bad_creds(self):
'''
test salt-key -l with eauth and bad creds
'''
self._add_user()
data = self.run_key('-l acc --eauth pam --username {0} --password wrongpassword'.format(USERA))
expect = ['Authentication failure of type "eauth" occurred for user {0}.'.format(USERA)]
self.assertEqual(data, expect)
self._remove_user()
def test_list_acc_wrong_eauth(self):
'''
test salt-key -l with wrong eauth
'''
data = self.run_key('-l acc --eauth wrongeauth --username {0} --password {1}'.format(USERA, USERA_PWD))
expect = r"^The specified external authentication system \"wrongeauth\" is not available\tAvailable eauth types: auto, .*"
self.assertRegex("\t".join(data), expect)
def test_list_un(self):
'''
test salt-key -l
'''
data = self.run_key('-l un')
expect = ['Unaccepted Keys:']
self.assertEqual(data, expect)
def test_keys_generation(self):
tempdir = tempfile.mkdtemp(dir=TMP)
arg_str = '--gen-keys minibar --gen-keys-dir {0}'.format(tempdir)
self.run_key(arg_str)
try:
key_names = None
if self.master_opts['transport'] in ('zeromq', 'tcp'):
key_names = ('minibar.pub', 'minibar.pem')
elif self.master_opts['transport'] == 'raet':
key_names = ('minibar.key',)
for fname in key_names:
self.assertTrue(os.path.isfile(os.path.join(tempdir, fname)))
finally:
shutil.rmtree(tempdir)
def test_keys_generation_keysize_minmax(self):
tempdir = tempfile.mkdtemp(dir=TMP)
arg_str = '--gen-keys minion --gen-keys-dir {0}'.format(tempdir)
try:
data, error = self.run_key(
arg_str + ' --keysize=1024', catch_stderr=True
)
self.assertIn(
'salt-key: error: The minimum value for keysize is 2048', error
)
data, error = self.run_key(
arg_str + ' --keysize=32769', catch_stderr=True
)
self.assertIn(
'salt-key: error: The maximum value for keysize is 32768',
error
)
finally:
shutil.rmtree(tempdir)
def test_issue_7754(self):
old_cwd = os.getcwd()
config_dir = os.path.join(TMP, 'issue-7754')
if not os.path.isdir(config_dir):
os.makedirs(config_dir)
os.chdir(config_dir)
config_file_name = 'master'
with salt.utils.files.fopen(self.get_config_file_path(config_file_name), 'r') as fhr:
config = salt.utils.yaml.safe_load(fhr)
config['log_file'] = 'file:///dev/log/LOG_LOCAL3'
with salt.utils.files.fopen(os.path.join(config_dir, config_file_name), 'w') as fhw:
salt.utils.yaml.safe_dump(config, fhw, default_flow_style=False)
ret = self.run_script(
self._call_binary_,
'--config-dir {0} -L'.format(
config_dir
),
timeout=60
)
try:
self.assertIn('minion', '\n'.join(ret))
self.assertFalse(os.path.isdir(os.path.join(config_dir, 'file:')))
finally:
self.chdir(old_cwd)
if os.path.isdir(config_dir):
shutil.rmtree(config_dir)
| 34.906452
| 130
| 0.541355
|
79510e08a1ee4a653959189a5b449906796e6997
| 2,129
|
py
|
Python
|
DiscardIrregularFieldNumberLines.py
|
neil92/MiscScripts2
|
b65444d99c057c305c1bebb437402004c5345b7b
|
[
"MIT"
] | null | null | null |
DiscardIrregularFieldNumberLines.py
|
neil92/MiscScripts2
|
b65444d99c057c305c1bebb437402004c5345b7b
|
[
"MIT"
] | null | null | null |
DiscardIrregularFieldNumberLines.py
|
neil92/MiscScripts2
|
b65444d99c057c305c1bebb437402004c5345b7b
|
[
"MIT"
] | null | null | null |
#!/usr/local/miniconda3/bin/python
import argparse
def parse_file(irregular_file, output_file, delimiter, number_of_fields):
total_lines = 0
removed_lines = 0
with open(irregular_file, "r") as irregular_file_object, open(output_file, "w") as output_file_object:
line = irregular_file_object.readline()
while line:
total_lines = total_lines + 1
split_line = line.split(sep=delimiter)
if (total_lines == 3):
print("Number of fields: {}".format(len(split_line)))
if (len(split_line) == number_of_fields):
output_file_object.write(line)
else:
removed_lines = removed_lines + 1
line = irregular_file_object.readline()
return (total_lines, removed_lines)
def setup_arguments():
"""
This is the function that sets up the flags and the arguements you can pass to the script.
:author: Neil A. Patel
"""
a_parser = argparse.ArgumentParser("Get's the arguments for the parse irregular file tool.")
a_parser.add_argument("-f", "--file", action="store", dest="file_input_data", required=True,
help="Please supply a file that has an irregular number of fields per line that you want to filter.")
a_parser.add_argument("-n", "--number_fields", action="store", dest="number_fields", required=False, default=1,
type=int, help="The number of fields per line (i.e. number of delimiters + 1).")
a_parser.add_argument("-o", "--output_file", action="store", dest="output_file", required=False,
default="output_file.txt", help="The output file that the filtered file goes into.")
a_parser.add_argument("-d", "--delimiter", action="store", dest="delimiter", required=False, default="\t",
help="The character used to seperate the fields in the line.")
return a_parser.parse_args()
def main():
args = setup_arguments()
total_lines, removed_lines = parse_file(args.file_input_data, args.output_file, args.delimiter, args.number_fields)
print("The total number of lines processed: {}".format(total_lines))
print("The total number removed lines: {}".format(removed_lines))
if __name__ == "__main__":
main()
| 38.709091
| 117
| 0.708314
|
79510eddd055dd2ddc17dac656a2016026065919
| 16,067
|
py
|
Python
|
CodeAnalysis/SourceMeter_Interface/SourceMeter-8.2.0-x64-linux/Python/Demo/ceilometer/ceilometer/event/converter.py
|
ishtjot/susereumutep
|
56e20c1777e0c938ac42bd8056f84af9e0b76e46
|
[
"Apache-2.0"
] | 2
|
2018-11-07T20:52:53.000Z
|
2019-10-20T15:57:01.000Z
|
CodeAnalysis/SourceMeter_Interface/SourceMeter-8.2.0-x64-linux/Python/Demo/ceilometer/ceilometer/event/converter.py
|
ishtjot/susereumutep
|
56e20c1777e0c938ac42bd8056f84af9e0b76e46
|
[
"Apache-2.0"
] | 3
|
2021-12-14T20:57:54.000Z
|
2022-01-21T23:50:36.000Z
|
CodeAnalysis/SourceMeter_Interface/SourceMeter-8.2.0-x64-linux/Python/Demo/ceilometer/ceilometer/event/converter.py
|
ishtjot/susereumutep
|
56e20c1777e0c938ac42bd8056f84af9e0b76e46
|
[
"Apache-2.0"
] | 2
|
2018-11-16T04:20:06.000Z
|
2019-03-28T23:49:13.000Z
|
#
# Copyright 2013 Rackspace Hosting.
#
# Author: Monsyne Dragon <mdragon@rackspace.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fnmatch
import os
import jsonpath_rw
from oslo.config import cfg
from oslo.utils import timeutils
import six
import yaml
from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import log
from ceilometer.storage import models
OPTS = [
cfg.StrOpt('definitions_cfg_file',
default="event_definitions.yaml",
help="Configuration file for event definitions."
),
cfg.BoolOpt('drop_unmatched_notifications',
default=False,
help='Drop notifications if no event definition matches. '
'(Otherwise, we convert them with just the default traits)'),
]
cfg.CONF.register_opts(OPTS, group='event')
LOG = log.getLogger(__name__)
class EventDefinitionException(Exception):
def __init__(self, message, definition_cfg):
super(EventDefinitionException, self).__init__(message)
self.definition_cfg = definition_cfg
def __str__(self):
return '%s %s: %s' % (self.__class__.__name__,
self.definition_cfg, self.message)
class TraitDefinition(object):
def __init__(self, name, trait_cfg, plugin_manager):
self.cfg = trait_cfg
self.name = name
type_name = trait_cfg.get('type', 'text')
if 'plugin' in trait_cfg:
plugin_cfg = trait_cfg['plugin']
if isinstance(plugin_cfg, six.string_types):
plugin_name = plugin_cfg
plugin_params = {}
else:
try:
plugin_name = plugin_cfg['name']
except KeyError:
raise EventDefinitionException(
_('Plugin specified, but no plugin name supplied for '
'trait %s') % name, self.cfg)
plugin_params = plugin_cfg.get('parameters')
if plugin_params is None:
plugin_params = {}
try:
plugin_ext = plugin_manager[plugin_name]
except KeyError:
raise EventDefinitionException(
_('No plugin named %(plugin)s available for '
'trait %(trait)s') % dict(plugin=plugin_name,
trait=name), self.cfg)
plugin_class = plugin_ext.plugin
self.plugin = plugin_class(**plugin_params)
else:
self.plugin = None
if 'fields' not in trait_cfg:
raise EventDefinitionException(
_("Required field in trait definition not specified: "
"'%s'") % 'fields',
self.cfg)
fields = trait_cfg['fields']
if not isinstance(fields, six.string_types):
# NOTE(mdragon): if not a string, we assume a list.
if len(fields) == 1:
fields = fields[0]
else:
fields = '|'.join('(%s)' % path for path in fields)
try:
self.fields = jsonpath_rw.parse(fields)
except Exception as e:
raise EventDefinitionException(
_("Parse error in JSONPath specification "
"'%(jsonpath)s' for %(trait)s: %(err)s")
% dict(jsonpath=fields, trait=name, err=e), self.cfg)
self.trait_type = models.Trait.get_type_by_name(type_name)
if self.trait_type is None:
raise EventDefinitionException(
_("Invalid trait type '%(type)s' for trait %(trait)s")
% dict(type=type_name, trait=name), self.cfg)
def _get_path(self, match):
if match.context is not None:
for path_element in self._get_path(match.context):
yield path_element
yield str(match.path)
def to_trait(self, notification_body):
values = [match for match in self.fields.find(notification_body)
if match.value is not None]
if self.plugin is not None:
value_map = [('.'.join(self._get_path(match)), match.value) for
match in values]
value = self.plugin.trait_value(value_map)
else:
value = values[0].value if values else None
if value is None:
return None
# NOTE(mdragon): some openstack projects (mostly Nova) emit ''
# for null fields for things like dates.
if self.trait_type != models.Trait.TEXT_TYPE and value == '':
return None
value = models.Trait.convert_value(self.trait_type, value)
return models.Trait(self.name, self.trait_type, value)
class EventDefinition(object):
DEFAULT_TRAITS = dict(
service=dict(type='text', fields='publisher_id'),
request_id=dict(type='text', fields='_context_request_id'),
tenant_id=dict(type='text', fields=['payload.tenant_id',
'_context_tenant']),
)
def __init__(self, definition_cfg, trait_plugin_mgr):
self._included_types = []
self._excluded_types = []
self.traits = dict()
self.cfg = definition_cfg
try:
event_type = definition_cfg['event_type']
traits = definition_cfg['traits']
except KeyError as err:
raise EventDefinitionException(
_("Required field %s not specified") % err.args[0], self.cfg)
if isinstance(event_type, six.string_types):
event_type = [event_type]
for t in event_type:
if t.startswith('!'):
self._excluded_types.append(t[1:])
else:
self._included_types.append(t)
if self._excluded_types and not self._included_types:
self._included_types.append('*')
for trait_name in self.DEFAULT_TRAITS:
self.traits[trait_name] = TraitDefinition(
trait_name,
self.DEFAULT_TRAITS[trait_name],
trait_plugin_mgr)
for trait_name in traits:
self.traits[trait_name] = TraitDefinition(
trait_name,
traits[trait_name],
trait_plugin_mgr)
def included_type(self, event_type):
for t in self._included_types:
if fnmatch.fnmatch(event_type, t):
return True
return False
def excluded_type(self, event_type):
for t in self._excluded_types:
if fnmatch.fnmatch(event_type, t):
return True
return False
def match_type(self, event_type):
return (self.included_type(event_type)
and not self.excluded_type(event_type))
@property
def is_catchall(self):
return '*' in self._included_types and not self._excluded_types
@staticmethod
def _extract_when(body):
"""Extract the generated datetime from the notification."""
# NOTE: I am keeping the logic the same as it was in the collector,
# However, *ALL* notifications should have a 'timestamp' field, it's
# part of the notification envelope spec. If this was put here because
# some openstack project is generating notifications without a
# timestamp, then that needs to be filed as a bug with the offending
# project (mdragon)
when = body.get('timestamp', body.get('_context_timestamp'))
if when:
return timeutils.normalize_time(timeutils.parse_isotime(when))
return timeutils.utcnow()
def to_event(self, notification_body):
event_type = notification_body['event_type']
message_id = notification_body['message_id']
when = self._extract_when(notification_body)
traits = (self.traits[t].to_trait(notification_body)
for t in self.traits)
# Only accept non-None value traits ...
traits = [trait for trait in traits if trait is not None]
event = models.Event(message_id, event_type, when, traits)
return event
class NotificationEventsConverter(object):
"""Notification Event Converter
The NotificationEventsConverter handles the conversion of Notifications
from openstack systems into Ceilometer Events.
The conversion is handled according to event definitions in a config file.
The config is a list of event definitions. Order is significant, a
notification will be processed according to the LAST definition that
matches it's event_type. (We use the last matching definition because that
allows you to use YAML merge syntax in the definitions file.)
Each definition is a dictionary with the following keys (all are
required):
- event_type: this is a list of notification event_types this definition
will handle. These can be wildcarded with unix shell glob (not regex!)
wildcards.
An exclusion listing (starting with a '!') will exclude any types listed
from matching. If ONLY exclusions are listed, the definition will match
anything not matching the exclusions.
This item can also be a string, which will be taken as equivalent to 1
item list.
Examples:
* ['compute.instance.exists'] will only match
compute.intance.exists notifications
* "compute.instance.exists" Same as above.
* ["image.create", "image.delete"] will match
image.create and image.delete, but not anything else.
* "compute.instance.*" will match
compute.instance.create.start but not image.upload
* ['*.start','*.end', '!scheduler.*'] will match
compute.instance.create.start, and image.delete.end,
but NOT compute.instance.exists or
scheduler.run_instance.start
* '!image.*' matches any notification except image
notifications.
* ['*', '!image.*'] same as above.
- traits: (dict) The keys are trait names, the values are the trait
definitions. Each trait definition is a dictionary with the following
keys:
- type (optional): The data type for this trait. (as a string)
Valid options are: 'text', 'int', 'float' and 'datetime', defaults to
'text' if not specified.
- fields: a path specification for the field(s) in the notification you
wish to extract. The paths can be specified with a dot syntax
(e.g. 'payload.host') or dictionary syntax (e.g. 'payload[host]') is
also supported.
In either case, if the key for the field you are looking for contains
special characters, like '.', it will need to be quoted (with double
or single quotes) like so::
"payload.image_meta.'org.openstack__1__architecture'"
The syntax used for the field specification is a variant of JSONPath,
and is fairly flexible.
(see: https://github.com/kennknowles/python-jsonpath-rw for more info)
Specifications can be written to match multiple possible fields, the
value for the trait will be derived from the matching fields that
exist and have a non-null (i.e. is not None) values in the
notification.
By default the value will be the first such field. (plugins can alter
that, if they wish)
This configuration value is normally a string, for convenience, it can
be specified as a list of specifications, which will be OR'ed together
(a union query in jsonpath terms)
- plugin (optional): (dictionary) with the following keys:
- name: (string) name of a plugin to load
- parameters: (optional) Dictionary of keyword args to pass
to the plugin on initialization. See documentation on each plugin to
see what arguments it accepts.
For convenience, this value can also be specified as a string, which is
interpreted as a plugin name, which will be loaded with no parameters.
"""
def __init__(self, events_config, trait_plugin_mgr, add_catchall=True):
self.definitions = [
EventDefinition(event_def, trait_plugin_mgr)
for event_def in reversed(events_config)]
if add_catchall and not any(d.is_catchall for d in self.definitions):
event_def = dict(event_type='*', traits={})
self.definitions.append(EventDefinition(event_def,
trait_plugin_mgr))
def to_event(self, notification_body):
event_type = notification_body['event_type']
message_id = notification_body['message_id']
edef = None
for d in self.definitions:
if d.match_type(event_type):
edef = d
break
if edef is None:
msg = (_('Dropping Notification %(type)s (uuid:%(msgid)s)')
% dict(type=event_type, msgid=message_id))
if cfg.CONF.event.drop_unmatched_notifications:
LOG.debug(msg)
else:
# If drop_unmatched_notifications is False, this should
# never happen. (mdragon)
LOG.error(msg)
return None
return edef.to_event(notification_body)
def get_config_file():
config_file = cfg.CONF.event.definitions_cfg_file
if not os.path.exists(config_file):
config_file = cfg.CONF.find_file(config_file)
return config_file
def setup_events(trait_plugin_mgr):
"""Setup the event definitions from yaml config file."""
config_file = get_config_file()
if config_file is not None:
LOG.debug(_("Event Definitions configuration file: %s"), config_file)
with open(config_file) as cf:
config = cf.read()
try:
events_config = yaml.safe_load(config)
except yaml.YAMLError as err:
if hasattr(err, 'problem_mark'):
mark = err.problem_mark
errmsg = (_("Invalid YAML syntax in Event Definitions file "
"%(file)s at line: %(line)s, column: %(column)s.")
% dict(file=config_file,
line=mark.line + 1,
column=mark.column + 1))
else:
errmsg = (_("YAML error reading Event Definitions file "
"%(file)s")
% dict(file=config_file))
LOG.error(errmsg)
raise
else:
LOG.debug(_("No Event Definitions configuration file found!"
" Using default config."))
events_config = []
LOG.info(_("Event Definitions: %s"), events_config)
allow_drop = cfg.CONF.event.drop_unmatched_notifications
return NotificationEventsConverter(events_config,
trait_plugin_mgr,
add_catchall=not allow_drop)
| 40.1675
| 79
| 0.594759
|
79510f532e108c2302f5d7535d5a431f4d3ea1e7
| 3,060
|
py
|
Python
|
smpp_client/settings.py
|
m4rtinpf/smpp-client
|
58f3b891121050861176e7c1ad9b4198d757d1f6
|
[
"MIT"
] | null | null | null |
smpp_client/settings.py
|
m4rtinpf/smpp-client
|
58f3b891121050861176e7c1ad9b4198d757d1f6
|
[
"MIT"
] | 1
|
2022-02-23T20:04:03.000Z
|
2022-02-23T20:04:03.000Z
|
smpp_client/settings.py
|
m4rtinpf/smpp-client
|
58f3b891121050861176e7c1ad9b4198d757d1f6
|
[
"MIT"
] | null | null | null |
"""
Django settings for smpp_client project.
Generated by 'django-admin startproject' using Django 3.2.11.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'api.apps.ApiConfig',
'rest_framework',
'frontend.apps.FrontendConfig',
'channels',
]
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'smpp_client.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'smpp_client.wsgi.application'
ASGI_APPLICATION = 'smpp_client.asgi.application'
CHANNEL_LAYERS = {
'default': {
'BACKEND': 'channels_redis.core.RedisChannelLayer',
'CONFIG': {
"hosts": [('127.0.0.1', 6379)],
'capacity': 1000,
},
},
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
# AUTH_PASSWORD_VALIDATORS = [
# {
# 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
# },
# ]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# REST_FRAMEWORK = {
# 'DEFAULT_AUTHENTICATION_CLASSES': [],
# 'DEFAULT_PERMISSION_CLASSES': [],
# 'UNAUTHENTICATED_USER': None,
# }
# Override production variables if DJANGO_DEVELOPMENT env variable is set
if os.environ.get('DJANGO_DEVELOPMENT'):
from .settings_development import *
else:
from .settings_production import *
| 26.153846
| 93
| 0.678105
|
795110179f342684a0c8eede7312c8c870ed189e
| 14,935
|
py
|
Python
|
conrad/cli.py
|
ashikjm/conrad
|
df9b99479c29906498c046724558222949439f1c
|
[
"Apache-2.0"
] | null | null | null |
conrad/cli.py
|
ashikjm/conrad
|
df9b99479c29906498c046724558222949439f1c
|
[
"Apache-2.0"
] | null | null | null |
conrad/cli.py
|
ashikjm/conrad
|
df9b99479c29906498c046724558222949439f1c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import re
import json
import hashlib
import datetime as dt
import click
import requests
import sqlalchemy
import textdistance
from colorama import Fore, Style
from cli_helpers import tabular_output
from . import __version__, CONRAD_HOME
from .db import engine, Session
from .models import Base, Event, Reminder
from .utils import initialize_database, validate
def set_default_pager():
os_environ_pager = os.environ.get("PAGER")
if os_environ_pager == "less":
os.environ["LESS"] = "-SRXF"
def get_events():
click.echo("Fetching latest events!")
response = requests.get(
"https://raw.githubusercontent.com/vinayak-mehta/conrad/master/data/events.json"
)
with open(os.path.join(CONRAD_HOME, "events.json"), "w") as f:
f.write(json.dumps(response.json()))
def rebuild_events_table():
with open(os.path.join(CONRAD_HOME, "events.json"), "r") as f:
events = json.load(f)
session = Session()
for event in events:
event_id = hashlib.md5(
(event["name"] + event["start_date"]).encode("utf-8")
).hexdigest()
e = Event(
id=event_id[:6],
name=event["name"],
url=event["url"],
city=event["city"],
state=event["state"],
country=event["country"],
cfp_open=event["cfp_open"],
cfp_end_date=dt.datetime.strptime(event["cfp_end_date"], "%Y-%m-%d"),
start_date=dt.datetime.strptime(event["start_date"], "%Y-%m-%d"),
end_date=dt.datetime.strptime(event["end_date"], "%Y-%m-%d"),
source=event["source"],
tags=json.dumps(event["tags"]),
kind=event["kind"],
by=event["by"],
)
session.add(e)
session.commit()
session.close()
def initialize_conrad():
conrad_update = os.path.join(CONRAD_HOME, ".conrad-update")
if not os.path.exists(conrad_update):
with open(conrad_update, "w") as f:
f.write(dt.datetime.now().strftime("%Y-%m-%dT%H:%M:%S"))
if not os.path.exists(os.path.join(CONRAD_HOME, "conrad.db")):
get_events()
initialize_database()
rebuild_events_table()
def refresh_conrad():
get_events()
if not os.path.exists(os.path.join(CONRAD_HOME, "conrad.db")):
initialize_database()
else:
Event.__table__.drop(engine)
Base.metadata.tables["event"].create(bind=engine)
rebuild_events_table()
def update_conrad_update():
conrad_update = os.path.join(CONRAD_HOME, ".conrad-update")
with open(conrad_update, "w") as f:
f.write(dt.datetime.now().strftime("%Y-%m-%dT%H:%M:%S"))
def clean_old_events():
session = Session()
now = dt.datetime.now()
reminders = list(
session.query(Event, Reminder)
.filter(Event.id == Reminder.id, Event.cfp_end_date < now)
.all()
)
for r, __ in reminders:
session.query(Reminder).filter(Reminder.id == r.id).delete()
events = list(session.query(Event).filter(Event.end_date < now).all())
for e in events:
session.query(Event).filter(Event.id == e.id).delete()
session.commit()
session.close()
def auto_refresh():
conrad_update = os.path.join(CONRAD_HOME, ".conrad-update")
if not os.path.exists(conrad_update):
update_conrad_update()
with open(conrad_update, "r") as f:
last_update = dt.datetime.strptime(f.read().strip(), "%Y-%m-%dT%H:%M:%S")
if (dt.datetime.now() - last_update) > dt.timedelta(days=1):
refresh_conrad()
clean_old_events()
update_conrad_update()
# https://stackoverflow.com/a/50889894
def make_exclude_hook_command(callback):
"""for any command that is not decorated, call the callback"""
hook_attr_name = "hook_" + callback.__name__
class HookGroup(click.Group):
"""group to hook context invoke to see if the callback is needed"""
def group(self, *args, **kwargs):
"""new group decorator to make sure sub groups are also hooked"""
if "cls" not in kwargs:
kwargs["cls"] = type(self)
return super(HookGroup, self).group(*args, **kwargs)
def command(self, *args, **kwargs):
"""new command decorator to monkey patch command invoke"""
cmd = super(HookGroup, self).command(*args, **kwargs)
def hook_command_decorate(f):
# decorate the command
ret = cmd(f)
# grab the original command invoke
orig_invoke = ret.invoke
def invoke(ctx):
"""call the call back right before command invoke"""
parent = ctx.parent
sub_cmd = (
parent and parent.command.commands[parent.invoked_subcommand]
)
if (
not sub_cmd
or not isinstance(sub_cmd, click.Group)
and getattr(sub_cmd, hook_attr_name, True)
):
# invoke the callback
callback()
return orig_invoke(ctx)
# hook our command invoke to command and return cmd
ret.invoke = invoke
return ret
# return hooked command decorator
return hook_command_decorate
def decorator(func=None):
if func is None:
# if called other than as decorator, return group class
return HookGroup
setattr(func, hook_attr_name, False)
return decorator
bypass_auto_refresh = make_exclude_hook_command(auto_refresh)
@click.group(name="conrad", cls=bypass_auto_refresh())
@click.version_option(version=__version__)
@click.pass_context
def cli(ctx, *args, **kwargs):
"""conrad: Track conferences and meetups on your terminal!"""
set_default_pager()
@bypass_auto_refresh
@cli.command("refresh", short_help="Refresh event database.")
@click.confirmation_option(prompt="Would you like conrad to look for new events?")
@click.pass_context
def _refresh(ctx, *args, **kwargs):
# TODO: print("10 new events found!")
refresh_conrad()
click.echo("All done! ✨ 🍰 ✨")
click.echo("Event database updated.")
@cli.command("show", short_help="Show all saved events.")
@click.option(
"--cfp",
"-c",
is_flag=True,
help="Show only events which have an open CFP (call for proposals).",
)
@click.option(
"--tag", "-t", default="", help="Look at conferences with a specific tag."
)
@click.option(
"--name",
"-n",
default="",
help="Look at conferences containing a specific word in their name.",
)
@click.option(
"--location",
"-l",
default="",
help="Look at conferences in a specific city, state or country.",
)
@click.option(
"--date",
"-d",
default=[],
multiple=True,
help='Look at conferences based on when they\'re happening. For example: conrad show --date ">= 2019-10-01" --date "<= 2020-01-01".',
)
@click.pass_context
def _show(ctx, *args, **kwargs):
# TODO: conrad show --new
initialize_conrad()
cfp = kwargs["cfp"]
tag = kwargs["tag"]
name = kwargs["name"]
date = list(kwargs["date"])
location = kwargs["location"]
filters = []
if cfp:
filters.append(Event.cfp_open.is_(cfp))
if tag:
filters.append(Event.tags.contains(tag))
if name:
filters.append(Event.name.ilike(f"%{name}%"))
if date:
date_filters = []
for d in date:
cmp, date = d.split(" ")
if not (">" in cmp or "<" in cmp):
raise click.UsageError("Wrong comparison operator!")
try:
__ = dt.datetime.strptime(date, "%Y-%m-%d")
except ValueError:
raise click.UsageError("Wrong date format!")
if ">" in cmp:
date_filters.append(Event.start_date >= date)
elif "<" in cmp:
date_filters.append(Event.start_date <= date)
filters.append(sqlalchemy.and_(*date_filters))
if location:
filters.append(
sqlalchemy.or_(
Event.city.ilike(f"%{location}%"),
Event.state.ilike(f"%{location}%"),
Event.country.ilike(f"%{location}%"),
)
)
session = Session()
events = list(
session.query(Event).filter(*filters).order_by(Event.start_date).all()
)
if len(events) > 0:
header = [
"id",
"Name",
"Website",
"City",
"State",
"Country",
"Start Date",
"End Date",
]
events_output = []
for event in events:
events_output.append(
[
event.id,
event.name,
event.url,
event.city,
event.state,
event.country,
event.start_date.strftime("%Y-%m-%d"),
event.end_date.strftime("%Y-%m-%d"),
]
)
session.close()
formatted = tabular_output.format_output(
events_output, header, format_name="ascii"
)
click.echo_via_pager("\n".join(formatted))
else:
click.echo("No events found!")
@cli.command("remind", short_help="Set and display reminders.")
@click.option("--id", "-i", default=None, help="Conference identifier.")
@click.pass_context
def _remind(ctx, *args, **kwargs):
initialize_conrad()
_id = kwargs["id"]
if _id is None:
session = Session()
reminders = list(
session.query(Event, Reminder)
.filter(Event.id == Reminder.id)
.order_by(Event.start_date)
.all()
)
if len(reminders) > 0:
header = ["id", "Name", "Start Date", "Days Left"]
reminders_output = []
for reminder, __ in reminders:
start = dt.datetime.now()
cfp_days_left = (reminder.cfp_end_date - start).days
event_days_left = (reminder.start_date - start).days
if reminder.cfp_open and cfp_days_left >= 0:
days_left = cfp_days_left
days_left_output = f"{days_left} days left to cfp deadline!"
elif event_days_left >= 0:
days_left = event_days_left
days_left_output = f"{days_left} days left!"
else:
days_left = -1
days_left_output = "Event ended."
if days_left >= 30:
style = f"{Fore.GREEN}{Style.BRIGHT}"
elif 30 > days_left >= 10:
style = f"{Fore.YELLOW}{Style.BRIGHT}"
elif 10 > days_left >= 0:
style = f"{Fore.RED}{Style.BRIGHT}"
else:
style = ""
days_left_output = (
f"{style}{days_left_output}{Style.RESET_ALL}"
)
reminders_output.append(
[
reminder.id,
reminder.name,
reminder.start_date.strftime("%Y-%m-%d"),
days_left_output,
]
)
session.close()
formatted = tabular_output.format_output(
reminders_output, header, format_name="ascii"
)
click.echo("\n".join(formatted))
else:
click.echo("No reminders found!")
else:
try:
session = Session()
if session.query(Event).filter(Event.id == _id).first() is None:
click.echo("Event not found!")
else:
reminder = Reminder(id=_id)
session.add(reminder)
session.commit()
session.close()
click.echo("Reminder set!")
except sqlalchemy.exc.IntegrityError:
session.rollback()
if click.confirm("Do you want to remove this reminder?"):
session = Session()
session.query(Reminder).filter(Reminder.id == _id).delete()
session.commit()
session.close()
click.echo("Reminder removed!")
@bypass_auto_refresh
@cli.command("import", short_help="Import new events into conrad.")
@click.option("--file", "-f", default=None, help="JSON file to import.")
@click.pass_context
def _import(ctx, *args, **kwargs):
file = kwargs["file"]
EVENTS_PATH = os.path.join(os.getcwd(), "data", "events.json")
if file is None:
raise click.UsageError("No file provided!")
if not os.path.exists(file):
raise click.UsageError("File does not exist!")
with open(file, "r") as f:
input_events = json.load(f)
failures = validate(input_events)
if len(failures) > 0:
raise click.UsageError(
"The following validations failed!\n{}".format(
"".join(
list(map(lambda x: "- " + x + "\n", failures[:-1]))
+ list(map(lambda x: "- " + x, failures[-1:]))
)
)
)
with open(EVENTS_PATH, "r") as f:
old_events = json.load(f)
now = dt.datetime.now()
events = []
for e in old_events:
event_end_date = dt.datetime.strptime(e["end_date"], "%Y-%m-%d")
if event_end_date < now:
continue
events.append(e)
removed = len(old_events) - len(events)
s = "s" if removed > 1 else ""
click.echo(f"Removed {removed} old event{s}!")
# TODO: update cfp to false when cfp_end_date < now
pattern = "[0-9]"
new_events = []
for ie in input_events:
match = False
for e in events:
input_event_name = ie["name"].replace(" ", "").lower()
input_event_name = re.sub(pattern, "", input_event_name)
event_name = e["name"].replace(" ", "").lower()
event_name = re.sub(pattern, "", event_name)
similarity = textdistance.levenshtein.normalized_similarity(
input_event_name, event_name
)
if similarity > 0.9:
click.echo(f"Updating {e['name']}")
e.update(ie)
match = True
if not match:
click.echo(f"Adding {ie['name']}")
new_events.append(ie)
events.extend(new_events)
s = "s" if len(new_events) > 1 else ""
click.echo(f"Added {len(new_events)} new event{s}!")
with open(EVENTS_PATH, "w") as f:
f.write(json.dumps(events, indent=4, sort_keys=True))
| 31.179541
| 137
| 0.550318
|
795110eaf93dc3b5ca004c203631c279010872ec
| 34,839
|
py
|
Python
|
var/spack/repos/builtin/packages/rust/package.py
|
MiddelkoopT/spack
|
4d94c4c4600f42a7a3bb3d06ec879140bc259304
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/rust/package.py
|
MiddelkoopT/spack
|
4d94c4c4600f42a7a3bb3d06ec879140bc259304
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/rust/package.py
|
MiddelkoopT/spack
|
4d94c4c4600f42a7a3bb3d06ec879140bc259304
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from six import iteritems
class Rust(Package):
"""The Rust programming language toolchain
This package can bootstrap any version of the Rust compiler since Rust
1.23. It does this by downloading the platform-appropriate binary
distribution of the desired version of the rust compiler, and then building
that compiler from source.
"""
homepage = "https://www.rust-lang.org"
url = "https://static.rust-lang.org/dist/rustc-1.42.0-src.tar.gz"
git = "https://github.com/rust-lang/rust.git"
maintainers = ["AndrewGaspar"]
phases = ['configure', 'build', 'install']
extendable = True
variant(
'rustfmt',
default=True,
description='Formatting tool for Rust code'
)
variant(
'analysis',
default=True,
description='Outputs code analysis that can be consumed by other tools'
)
variant(
'clippy',
default=True,
description='Linting tool for Rust'
)
variant(
'rls',
default=False,
description='The Rust Language Server can be used for IDE integration'
)
variant(
'src',
default=True,
description='Install Rust source files'
)
variant(
'extra_targets', default='none', multi=True,
description='Triples for extra targets to enable. For supported targets, see: https://doc.rust-lang.org/nightly/rustc/platform-support.html'
)
depends_on('python@2.7:', type='build')
depends_on('python@2.7:2.8', when='@:1.43', type='build')
depends_on('gmake@3.81:', type='build')
depends_on('cmake@3.4.3:', type='build')
depends_on('ninja', when='@1.48.0:', type='build')
depends_on('pkgconfig', type='build')
depends_on('openssl')
depends_on('libssh2')
depends_on('libgit2')
# Pre-release Versions
version('master', branch='master', submodules=True)
# These version strings are officially supported, but aren't explicitly
# listed because there's no stable checksum for them.
# version('nightly')
# version('beta')
# Version Notes:
# Here's some information on why your favorite Rust version may be missing.
#
# < 1.23:
# Rust seems to eagerly search for ar next to cc. Spack makes wrappers for
# cc and c++, but not for ar, so no ar is found. In future versions, ar
# can be specified in the config.
#
# < 1.17:
# The `x.py` bootstrapping script did not exist prior to Rust 1.17. It
# would be possible to support both, but for simplicitly, we only support
# Rust 1.17 and newer
version('1.48.0', sha256='0e763e6db47d5d6f91583284d2f989eacc49b84794d1443355b85c58d67ae43b')
version('1.47.0', sha256='3185df064c4747f2c8b9bb8c4468edd58ff4ad6d07880c879ac1b173b768d81d')
version('1.46.0', sha256='2d6a3b7196db474ba3f37b8f5d50a1ecedff00738d7846840605b42bfc922728')
version('1.45.1', sha256='ea53e6424e3d1fe56c6d77a00e72c5d594b509ec920c5a779a7b8e1dbd74219b')
version('1.44.1', sha256='7e2e64cb298dd5d5aea52eafe943ba0458fa82f2987fdcda1ff6f537b6f88473')
version('1.44.0', sha256='bf2df62317e533e84167c5bc7d4351a99fdab1f9cd6e6ba09f51996ad8561100')
version('1.43.1', sha256='cde177b4a8c687da96f20de27630a1eb55c9d146a15e4c900d5c31cd3c3ac41d')
version('1.43.0', sha256='75f6ac6c9da9f897f4634d5a07be4084692f7ccc2d2bb89337be86cfc18453a1')
version('1.42.0', sha256='d2e8f931d16a0539faaaacd801e0d92c58df190269014b2360c6ab2a90ee3475')
version('1.41.1', sha256='38c93d016e6d3e083aa15e8f65511d3b4983072c0218a529f5ee94dd1de84573')
version('1.41.0', sha256='5546822c09944c4d847968e9b7b3d0e299f143f307c00fa40e84a99fabf8d74b')
version('1.40.0', sha256='dd97005578defc10a482bff3e4e728350d2099c60ffcf1f5e189540c39a549ad')
version('1.39.0', sha256='b4a1f6b6a93931f270691aba4fc85eee032fecda973e6b9c774cd06857609357')
version('1.38.0', sha256='644263ca7c7106f8ee8fcde6bb16910d246b30668a74be20b8c7e0e9f4a52d80')
version('1.37.0', sha256='120e7020d065499cc6b28759ff04153bfdc2ac9b5adeb252331a4eb87cbe38c3')
version('1.36.0', sha256='04c4e4d7213d036d6aaed392841496d272146312c0290f728b7400fccd15bb1b')
version('1.35.0', sha256='5a4d637a716bac18d085f44dd87ef48b32195f71b967d872d80280b38cff712d')
version('1.34.2', sha256='c69a4a85a1c464368597df8878cb9e1121aae93e215616d45ad7d23af3052f56')
version('1.34.1', sha256='b0c785264d17e1dac4598627c248a2d5e07dd39b6666d1881fcfc8e2cf4c40a7')
version('1.34.0', sha256='7ac85acffd79dd3a7c44305d9eaabd1f1e7116e2e6e11e770e4bf5f92c0f1f59')
version('1.33.0', sha256='5a01a8d7e65126f6079042831385e77485fa5c014bf217e9f3e4aff36a485d94')
version('1.32.0', sha256='4c594c7712a0e7e8eae6526c464bf6ea1d82f77b4f61717c3fc28fb27ba2224a')
version('1.31.1', sha256='91d2fc22f08d986adab7a54eb3a6a9b99e490f677d2d092e5b9e4e069c23686a')
version('1.30.1', sha256='36a38902dbd9a3e1240d46ab0f2ca40d2fd07c2ab6508ed7970c6c4c036b5b29')
version('1.30.0', sha256='cd0ba83fcca55b64c0c9f23130fe731dfc1882b73ae21bef96be8f2362c108ee')
version('1.29.2', sha256='5088e796aa2e47478cdf41e7243fc5443fafab0a7c70a11423e57c80c04167c9')
version('1.29.1', sha256='f1b0728b66ce6bce6d72bbe5ea9e3a24ea22a045665da2ed8fcdfad14f61a349')
version('1.29.0', sha256='a4eb34ffd47f76afe2abd813f398512d5a19ef00989d37306217c9c9ec2f61e9')
version('1.28.0', sha256='1d5a81729c6f23a0a23b584dd249e35abe9c6f7569cee967cc42b1758ecd6486')
version('1.27.2', sha256='9a818c50cdb7880abeaa68b3d97792711e6c64c1cdfb6efdc23f75b8ced0e15d')
version('1.27.1', sha256='2133beb01ddc3aa09eebc769dd884533c6cfb08ce684f042497e097068d733d1')
version('1.27.0', sha256='2cb9803f690349c9fd429564d909ddd4676c68dc48b670b8ddf797c2613e2d21')
version('1.26.2', sha256='fb9ecf304488c9b56600ab20cfd1937482057f7e5db7899fddb86e0774548700')
version('1.26.1', sha256='70a7961bd8ec43b2c01e9896e90b0a06804a7fbe0a5c05acc7fd6fed19500df0')
version('1.26.0', sha256='4fb09bc4e233b71dcbe08a37a3f38cabc32219745ec6a628b18a55a1232281dd')
version('1.25.0', sha256='eef63a0aeea5147930a366aee78cbde248bb6e5c6868801bdf34849152965d2d')
version('1.24.1', sha256='3ea53d45e8d2e9a41afb3340cf54b9745f845b552d802d607707cf04450761ef')
version('1.24.0', sha256='bb8276f6044e877e447f29f566e4bbf820fa51fea2f912d59b73233ffd95639f')
version('1.23.0', sha256='7464953871dcfdfa8afcc536916a686dd156a83339d8ec4d5cb4eb2fe146cb91')
# The Rust bootstrapping process requires a bootstrapping compiler. The
# easiest way to do this is to download the binary distribution of the
# same version of the compiler and build with that.
#
# This dictionary contains a version: hash dictionary for each supported
# Rust target.
rust_releases = {
'1.48.0': {
'x86_64-unknown-linux-gnu': '950420a35b2dd9091f1b93a9ccd5abc026ca7112e667f246b1deb79204e2038b',
'powerpc64le-unknown-linux-gnu': 'e6457a0214f3b1b04bd5b2618bba7e3826e254216420dede2971b571a1c13bb1',
'aarch64-unknown-linux-gnu': 'c4769418d8d89f432e4a3a21ad60f99629e4b13bbfc29aef7d9d51c4e8ee8a8a',
'x86_64-apple-darwin': 'f30ce0162b39dc7cf877020cec64d4826cad50467af493d180b5b28cf5eb50b3'
},
'1.47.0': {
'x86_64-unknown-linux-gnu': 'd0e11e1756a072e8e246b05d54593402813d047d12e44df281fbabda91035d96',
'powerpc64le-unknown-linux-gnu': '5760c3b1897ea70791320c2565f3eef700a3d54059027b84bbe6b8d6157f81c8',
'aarch64-unknown-linux-gnu': '753c905e89a714ab9bce6fe1397b721f29c0760c32f09d2f328af3d39919c8e6',
'x86_64-apple-darwin': '84e5be6c5c78734deba911dcf80316be1e4c7da2c59413124d039ad96620612f'
},
'1.46.0': {
'x86_64-unknown-linux-gnu': 'e3b98bc3440fe92817881933f9564389eccb396f5f431f33d48b979fa2fbdcf5',
'powerpc64le-unknown-linux-gnu': '89e2f4761d257f017a4b6aa427f36ac0603195546fa2cfded8c899789832941c',
'aarch64-unknown-linux-gnu': 'f0c6d630f3dedb3db69d69ed9f833aa6b472363096f5164f1068c7001ca42aeb',
'x86_64-apple-darwin': '82d61582a3772932432a99789c3b3bd4abe6baca339e355048ca9efb9ea5b4db'
},
'1.45.1': {
'x86_64-unknown-linux-gnu': '76dc9f05b3bfd0465d6e6d22bc9fd5db0b473e3548e8b3d266ecfe4d9e5dca16',
'powerpc64le-unknown-linux-gnu': '271846e4f5adc9a33754794c2ffab851f9e0313c8c1315264e7db5c8f63ab7ab',
'aarch64-unknown-linux-gnu': 'd17fd560e8d5d12304835b71a7e22ac2c3babf4b9768db6a0e89868b4444f728',
'x86_64-apple-darwin': '7334c927e4d2d12d209bf941b97ba309e548413e241d2d263c39c6e12b3ce154'
},
'1.44.1': {
'x86_64-unknown-linux-gnu': 'a41df89a461a580536aeb42755e43037556fba2e527dd13a1e1bb0749de28202',
'powerpc64le-unknown-linux-gnu': '22deeca259459db31065af7c862fcab7fbfb623200520c65002ed2ba93d87ad2',
'aarch64-unknown-linux-gnu': 'a2d74ebeec0b6778026b6c37814cdc91d14db3b0d8b6d69d036216f4d9cf7e49',
'x86_64-apple-darwin': 'a5464e7bcbce9647607904a4afa8362382f1fc55d39e7bbaf4483ac00eb5d56a'
},
'1.44.0': {
'x86_64-unknown-linux-gnu': 'eaa34271b4ac4d2c281831117d4d335eed0b37fe7a34477d9855a6f1d930a624',
'powerpc64le-unknown-linux-gnu': '97038ea935c7a5b21f5aaaaad409c514e2b2ae8ea55994ba39645f453e98bc9f',
'aarch64-unknown-linux-gnu': 'bcc916003cb9c7ff44f5f9af348020b422dbc5bd4fe49bdbda2de6ce0a1bb745',
'x86_64-apple-darwin': 'f20388b80b2b0a8b122d89058f785a2cf3b14e93bcac53471d60fdb4106ffa35'
},
'1.43.1': {
'x86_64-unknown-linux-gnu': '25cd71b95bba0daef56bad8c943a87368c4185b90983f4412f46e3e2418c0505',
'powerpc64le-unknown-linux-gnu': '1670f00b00cc1bed38d523a25dba7420de3c06986c15a0248e06299f80ce6124',
'aarch64-unknown-linux-gnu': 'fbb612387a64c9da2869725afffc1f66a72d6e7ba6667ba717cd52c33080b7fb',
'x86_64-apple-darwin': 'e1c3e1426a9e615079159d6b619319235e3ca7b395e7603330375bfffcbb7003'
},
'1.43.0': {
'x86_64-unknown-linux-gnu': '069f34fa5cef92551724c83c36360df1ac66fe3942bc1d0e4d341ce79611a029',
'powerpc64le-unknown-linux-gnu': 'c75c7ae4c94715fd6cc43d1d6fdd0952bc151f7cbe3054f66d99a529d5bb996f',
'aarch64-unknown-linux-gnu': 'e5fa55f333c10cdae43d147438a80ffb435d6c7b9681cd2e2f0857c024556856',
'x86_64-apple-darwin': '504e8efb2cbb36f5a3db7bb36f339a1e5216082c910ad19039c370505cfbde99'
},
'1.42.0': {
'x86_64-unknown-linux-gnu': '7d1e07ad9c8a33d8d039def7c0a131c5917aa3ea0af3d0cc399c6faf7b789052',
'powerpc64le-unknown-linux-gnu': '805b08fa1e0aad4d706301ca1f13e2d80810d385cece2c15070360b3c4bd6e4a',
'aarch64-unknown-linux-gnu': 'fdd39f856a062af265012861949ff6654e2b7103be034d046bec84ebe46e8d2d',
'x86_64-apple-darwin': 'db1055c46e0d54b99da05e88c71fea21b3897e74a4f5ff9390e934f3f050c0a8'
},
'1.41.1': {
'x86_64-unknown-linux-gnu': 'a6d5a3b3f574aafc8f787fea37aad9fb8a7946b383ae5348146927192ff0bef0',
'powerpc64le-unknown-linux-gnu': 'f9b53ca636625b3a2dd87600b6274223c11f866c9b5a34b638ea0013186659d3',
'aarch64-unknown-linux-gnu': 'd54c0f9165b86216b6f1b499f451141407939c5dc6b36c89a3772895a1370242',
'x86_64-apple-darwin': '16615288cf74239783de1b435d329f3d56ed13803c7c10cd4b207d7c8ffa8f67'
},
'1.41.0': {
'x86_64-unknown-linux-gnu': '343ba8ef7397eab7b3bb2382e5e4cb08835a87bff5c8074382c0b6930a41948b',
'powerpc64le-unknown-linux-gnu': 'ba231b0d8273d6928f61e2be3456e816a1de8050135e20c0623dc7a6ea03ba68',
'aarch64-unknown-linux-gnu': '79ddfb5e2563d0ee09a567fbbe121a2aed3c3bc61255b2787f2dd42183a10f27',
'x86_64-apple-darwin': 'b6504003ab70b11f278e0243a43ba9d6bf75e8ad6819b4058a2b6e3991cc8d7a'
},
'1.40.0': {
'x86_64-unknown-linux-gnu': 'fc91f8b4bd18314e83a617f2389189fc7959146b7177b773370d62592d4b07d0',
'powerpc64le-unknown-linux-gnu': 'b1a23e35c383f99e647df6a9239b1dc9313e293deb70a76ba58e8ebe55ef623b',
'aarch64-unknown-linux-gnu': '639271f59766d291ebdade6050e7d05d61cb5c822a3ef9a1e2ab185fed68d729',
'x86_64-apple-darwin': '749ca5e0b94550369cc998416b8854c13157f5d11d35e9b3276064b6766bcb83'
},
'1.39.0': {
'x86_64-unknown-linux-gnu': 'b10a73e5ba90034fe51f0f02cb78f297ed3880deb7d3738aa09dc5a4d9704a25',
'powerpc64le-unknown-linux-gnu': '53b3fd942c52709f7e6fe11ea572d086e315a57a40b84b9b3290ac0ec8c7c84a',
'aarch64-unknown-linux-gnu': 'e27dc8112fe577012bd88f30e7c92dffd8c796478ce386c49465c03b6db8209f',
'x86_64-apple-darwin': '3736d49c5e9592844e1a5d5452883aeaf8f1e25d671c1bc8f01e81c1766603b5'
},
'1.38.0': {
'x86_64-unknown-linux-gnu': 'adda26b3f0609dbfbdc2019da4a20101879b9db2134fae322a4e863a069ec221',
'powerpc64le-unknown-linux-gnu': 'f9ed1bb6525abdd4dd6ef10782ad45d2f71496e0c3c88e806b510c81a91c4ff7',
'aarch64-unknown-linux-gnu': '06afd6d525326cea95c3aa658aaa8542eab26f44235565bb16913ac9d12b7bda',
'x86_64-apple-darwin': 'bd301b78ddcd5d4553962b115e1dca5436dd3755ed323f86f4485769286a8a5a'
},
'1.37.0': {
'x86_64-unknown-linux-gnu': 'cb573229bfd32928177c3835fdeb62d52da64806b844bc1095c6225b0665a1cb',
'powerpc64le-unknown-linux-gnu': '27c59ec40e9e9f71490dc00bf165156ae3ea77c20ffa4b5e5fd712e67527b477',
'aarch64-unknown-linux-gnu': '263ef98fa3a6b2911b56f89c06615cdebf6ef676eb9b2493ad1539602f79b6ba',
'x86_64-apple-darwin': 'b2310c97ffb964f253c4088c8d29865f876a49da2a45305493af5b5c7a3ca73d'
},
'1.36.0': {
'x86_64-unknown-linux-gnu': '15e592ec52f14a0586dcebc87a957e472c4544e07359314f6354e2b8bd284c55',
'powerpc64le-unknown-linux-gnu': '654a7a18d881811c09f630b0c917825b586e94a6142eceaede6b8046718e4054',
'aarch64-unknown-linux-gnu': 'db78c24d93756f9fe232f081dbc4a46d38f8eec98353a9e78b9b164f9628042d',
'x86_64-apple-darwin': '91f151ec7e24f5b0645948d439fc25172ec4012f0584dd16c3fb1acb709aa325'
},
'1.35.0': {
'x86_64-unknown-linux-gnu': 'cf600e2273644d8629ed57559c70ca8db4023fd0156346facca9ab3ad3e8f86c',
'powerpc64le-unknown-linux-gnu': 'a933955adec386d75d126e78df5b9941936e156acb3353fc44b85995a81c7bb2',
'aarch64-unknown-linux-gnu': '31e6da56e67838fd2874211ae896a433badf67c13a7b68481f1d5f7dedcc5952',
'x86_64-apple-darwin': 'ac14b1c7dc330dcb53d8641d74ebf9b32aa8b03b9d650bcb9258030d8b10dbd6'
},
'1.34.2': {
'x86_64-unknown-linux-gnu': '2bf6622d980a52832bae141304e96f317c8a1ccd2dfd69a134a14033e6e43c0f',
'powerpc64le-unknown-linux-gnu': '4ddd55014bbd954b3499859bfa3146bff471de21c1d73fc6e7cccde290fc1918',
'aarch64-unknown-linux-gnu': '15fc6b7ec121df9d4e42483dd12c677203680bec8c69b6f4f62e5a35a07341a8',
'x86_64-apple-darwin': '6fdd4bf7fe26dded0cd57b41ab5f0500a5a99b7bc770523a425e9e34f63d0fd8'
},
'1.34.1': {
'x86_64-unknown-linux-gnu': '8e2eead11bd5bf61409e29018d007c6fc874bcda2ff54db3d04d1691e779c14e',
'powerpc64le-unknown-linux-gnu': '94ac92d08afcfa2d77ae207e91b57c00cb48ff7ba08a27ed3deb2493f33e8fb1',
'aarch64-unknown-linux-gnu': '0565e50dae58759a3a5287abd61b1a49dfc086c4d6acf2ce604fe1053f704e53',
'x86_64-apple-darwin': 'f4e46b9994ccfab4a84059298d1dc8fd446b1bbb7449462e0459948f7debea0e'
},
'1.34.0': {
'x86_64-unknown-linux-gnu': '170647ed41b497dc937a6b2556700210bc4be187b1735029ef9ccf52e2cb5ab8',
'powerpc64le-unknown-linux-gnu': '3027e87802e161cce6f3a23d961f6d73b9ed6e829b2cd7af5dfccf6e1207e552',
'aarch64-unknown-linux-gnu': '370c3a8fb9a69df36d645a95e622fb59ac5b513baecddde706cedaf20defa269',
'x86_64-apple-darwin': 'e6bea8d865cc7341c17fa3b8f25f7989e6b04f53e9da24878addc524f3a32664'
},
'1.33.0': {
'x86_64-unknown-linux-gnu': '6623168b9ee9de79deb0d9274c577d741ea92003768660aca184e04fe774393f',
'powerpc64le-unknown-linux-gnu': 'db885aa4c2c6896c85257be2ade5c9edea660ca6878970683e8d5796618329b5',
'aarch64-unknown-linux-gnu': 'a308044e4076b62f637313ea803fa0a8f340b0f1b53136856f2c43afcabe5387',
'x86_64-apple-darwin': '864e7c074a0b88e38883c87c169513d072300bb52e1d320a067bd34cf14f66bd'
},
'1.32.0': {
'x86_64-unknown-linux-gnu': 'e024698320d76b74daf0e6e71be3681a1e7923122e3ebd03673fcac3ecc23810',
'powerpc64le-unknown-linux-gnu': 'd6d5c9154f4459465d68ebd4fa1e17bad4b6cfe219667dddd9123c3bfb5dd839',
'aarch64-unknown-linux-gnu': '60def40961728212da4b3a9767d5a2ddb748400e150a5f8a6d5aa0e1b8ba1cee',
'x86_64-apple-darwin': 'f0dfba507192f9b5c330b5984ba71d57d434475f3d62bd44a39201e36fa76304'
},
'1.31.1': {
'x86_64-unknown-linux-gnu': 'a64685535d0c457f49a8712a096a5c21564cd66fd2f7da739487f028192ebe3c',
'powerpc64le-unknown-linux-gnu': 'a6f61b7a8a06a2b0a785391cc3e6bb8004aa72095eea80db1561039f5bb3e975',
'aarch64-unknown-linux-gnu': '29a7c6eb536fefd0ca459e48dfaea006aa8bff8a87aa82a9b7d483487033632a',
'x86_64-apple-darwin': '8398b1b303bdf0e7605d08b87070a514a4f588797c6fb3593718cb9cec233ad6'
},
'1.30.1': {
'x86_64-unknown-linux-gnu': 'a01a493ed8946fc1c15f63e74fc53299b26ebf705938b4d04a388a746dfdbf9e',
'powerpc64le-unknown-linux-gnu': 'a7d4806e6702bdbad5017eeddc62f7ff7eb2438b1b9c39cbc90c2b1207f8e65f',
'aarch64-unknown-linux-gnu': '6d87d81561285abd6c1987e07b60b2d723936f037c4b46eedcc12e8566fd3874',
'x86_64-apple-darwin': '3ba1704a7defe3d9a6f0c1f68792c084da83bcba85e936d597bac0c019914b94'
},
'1.30.0': {
'x86_64-unknown-linux-gnu': 'f620e3125cc505c842150bd873c0603432b6cee984cdae8b226cf92c8aa1a80f',
'powerpc64le-unknown-linux-gnu': '0b53e257dc3d9f3d75cd97be569d3bf456d2c0af57ed0bd5e7a437227d8f465a',
'aarch64-unknown-linux-gnu': '9690c7c50eba5a8461184ee4138b4c284bad31ccc4aa1f2ddeec58b253e6363e',
'x86_64-apple-darwin': '07008d90932712282bc599f1e9a226e97879c758dc1f935e6e2675e45694cc1b'
},
'1.29.2': {
'x86_64-unknown-linux-gnu': 'e9809825c546969a9609ff94b2793c9107d7d9bed67d557ed9969e673137e8d8',
'powerpc64le-unknown-linux-gnu': '344003b808c20424c4699c9452bd37cdee23857dd4aa125e67d1d6e4bc992091',
'aarch64-unknown-linux-gnu': 'e11461015ca7106ef8ebf00859842bf4be518ee170226cb8eedaaa666946509f',
'x86_64-apple-darwin': '63f54e3013406b39fcb5b84bcf5e8ce85860d0b97a1e156700e467bf5fb5d5f2'
},
'1.29.1': {
'x86_64-unknown-linux-gnu': 'b36998aea6d58525f25d89f1813b6bfd4cad6ff467e27bd11e761a20dde43745',
'powerpc64le-unknown-linux-gnu': '26a6d652ade6b6a96e6af18e846701ee28f912233372dfe15432139252f88958',
'aarch64-unknown-linux-gnu': '2685224f67b2ef951e0e8b48829f786cbfed95e19448ba292ac33af719843dbe',
'x86_64-apple-darwin': '07b07fbd6fab2390e19550beb8008745a8626cc5e97b72dc659061c1c3b3d008'
},
'1.29.0': {
'x86_64-unknown-linux-gnu': '09f99986c17b1b6b1bfbc9dd8785e0e4693007c5feb67915395d115c1a3aea9d',
'powerpc64le-unknown-linux-gnu': 'd6954f1da53f7b3618fba3284330d99b6142bb25d9febba6dbfedad59ca53329',
'aarch64-unknown-linux-gnu': '0ed3be0fd9f847afeb4e587fff61f6769ea61b53719d3ea999326284e8975b36',
'x86_64-apple-darwin': '28a0473637585742f6d80ccd8afd88b6b400e65d623c33cb892412759444da93'
},
'1.28.0': {
'x86_64-unknown-linux-gnu': '2a1390340db1d24a9498036884e6b2748e9b4b057fc5219694e298bdaa37b810',
'powerpc64le-unknown-linux-gnu': '255818156ec1f795ed808a44b4fdb8019187d5ebb7f837ae8f55a1ca40862bb6',
'aarch64-unknown-linux-gnu': '9b6fbcee73070332c811c0ddff399fa31965bec62ef258656c0c90354f6231c1',
'x86_64-apple-darwin': '5d7a70ed4701fe9410041c1eea025c95cad97e5b3d8acc46426f9ac4f9f02393'
},
'1.27.2': {
'x86_64-unknown-linux-gnu': '5028a18e913ef3eb53e8d8119d2cc0594442725e055a9361012f8e26f754f2bf',
'powerpc64le-unknown-linux-gnu': '11034d150e811d4903b09fd42f0cb76d467a6365a158101493405fff1054572f',
'aarch64-unknown-linux-gnu': 'cf84da70269c0e50bb3cc3d248bae1ffcd70ee69dc5a4e3513b54fefc6685fb4',
'x86_64-apple-darwin': '30c5cc58759caa4efdf2ea7d8438633139c98bee3408beb29ceb26985f3f5f70'
},
'1.27.1': {
'x86_64-unknown-linux-gnu': '435778a837af764da2a7a7fb4d386b7b78516c7dfc732d892858e9a8a539989b',
'powerpc64le-unknown-linux-gnu': 'a08e6b6fed3329fcd1220b2ee4cd7a311d99121cf780fb6e1c6353bfeddfb176',
'aarch64-unknown-linux-gnu': 'd1146b240e6f628224c3a67e3aae2a57e6c25d544115e5ece9ce91861ec92b3a',
'x86_64-apple-darwin': '475be237962d6aef1038a2faada26fda1e0eaea5d71d6950229a027a9c2bfe08'
},
'1.27.0': {
'x86_64-unknown-linux-gnu': '235ad78e220b10a2d0267aea1e2c0f19ef5eaaff53ad6ff8b12c1d4370dec9a3',
'powerpc64le-unknown-linux-gnu': '847774a751e848568215739d384e3baf4d6ec37d27fb3add7a8789208c213aff',
'aarch64-unknown-linux-gnu': 'e74ebc33dc3fc19e501a677a87b619746efdba2901949a0319176352f556673a',
'x86_64-apple-darwin': 'a1d48190992e01aac1a181bce490c80cb2c1421724b4ff0e2fb7e224a958ce0f'
},
'1.26.2': {
'x86_64-unknown-linux-gnu': 'd2b4fb0c544874a73c463993bde122f031c34897bb1eeb653d2ba2b336db83e6',
'powerpc64le-unknown-linux-gnu': 'ea045869074ae3617eeb51207ce183e6915784b9ed615ecb92ce082ddb86ec1f',
'aarch64-unknown-linux-gnu': '3dfad0dc9c795f7ee54c2099c9b7edf06b942adbbf02e9ed9e5d4b5e3f1f3759',
'x86_64-apple-darwin': 'f193705d4c0572a358670dbacbf0ffadcd04b3989728b442f4680fa1e065fa72'
},
'1.26.1': {
'x86_64-unknown-linux-gnu': 'b7e964bace1286696d511c287b945f3ece476ba77a231f0c31f1867dfa5080e0',
'powerpc64le-unknown-linux-gnu': 'ad8b2f6dd8c5cca1251d65b75ed2120aae3c5375d2c8ed690259cf4a652d7d3c',
'aarch64-unknown-linux-gnu': 'd4a369053c2dfd5f457de6853557dab563944579fa4bb55bc919bacf259bff6d',
'x86_64-apple-darwin': 'ebf898b9fa7e2aafc53682a41f18af5ca6660ebe82dd78f28cd9799fe4dc189a'
},
'1.26.0': {
'x86_64-unknown-linux-gnu': '13691d7782577fc9f110924b26603ade1990de0b691a3ce2dc324b4a72a64a68',
'powerpc64le-unknown-linux-gnu': '3ba3a4905730ec01007ca1096d9fc3780f4e81f71139a619e1f526244301b7f4',
'aarch64-unknown-linux-gnu': 'e12dc84bdb569cdb382268a5fe6ae6a8e2e53810cb890ec3a7133c20ba8451ac',
'x86_64-apple-darwin': '38708803c3096b8f101d1919ee2d7e723b0adf1bc1bb986b060973b57d8c7c28'
},
'1.25.0': {
'x86_64-unknown-linux-gnu': '06fb45fb871330a2d1b32a27badfe9085847fe824c189ddc5204acbe27664f5e',
'powerpc64le-unknown-linux-gnu': '79eeb2a7fafa2e0f65f29a1dc360df69daa725347e4b6a533684f1c07308cc6e',
'aarch64-unknown-linux-gnu': '19a43451439e515a216d0a885d14203f9a92502ee958abf86bf7000a7d73d73d',
'x86_64-apple-darwin': 'fcd0302b15e857ba4a80873360cf5453275973c64fa82e33bfbed02d88d0ad17'
},
'1.24.1': {
'x86_64-unknown-linux-gnu': '4567e7f6e5e0be96e9a5a7f5149b5452828ab6a386099caca7931544f45d5327',
'powerpc64le-unknown-linux-gnu': '6f6c4bebbd7d6dc9989bf372c512dea55af8f56a1a0cfe97784667f0ac5430ee',
'aarch64-unknown-linux-gnu': '64bb25a9689b18ddadf025b90d9bdb150b809ebfb74432dc69cc2e46120adbb2',
'x86_64-apple-darwin': '9d4aacdb5849977ea619d399903c9378163bd9c76ea11dac5ef6eca27849f501'
},
'1.24.0': {
'x86_64-unknown-linux-gnu': '336cf7af6c857cdaa110e1425719fa3a1652351098dc73f156e5bf02ed86443c',
'powerpc64le-unknown-linux-gnu': '25d9b965a63ad2f345897028094d4c7eafa432237b478754ccbcc299f80629c8',
'aarch64-unknown-linux-gnu': 'a981de306164b47f3d433c1d53936185260642849c79963af7e07d36b063a557',
'x86_64-apple-darwin': '1aecba7cab4bc1a9e0e931c04aa00849e930b567d243da7b676ede8f527a2992'
},
'1.23.0': {
'x86_64-unknown-linux-gnu': '9a34b23a82d7f3c91637e10ceefb424539dcfa327c2dcd292ff10c047b1fdc7e',
'powerpc64le-unknown-linux-gnu': '60f1a1cc182c516de08c1f42ada01604a3d94383e9dded6b237ae2233999437b',
'aarch64-unknown-linux-gnu': '38379fbd976d2286cb73f21466db40a636a583b9f8a80af5eea73617c7912bc7',
'x86_64-apple-darwin': '9274e977322bb4b153f092255ac9bd85041142c73eaabf900cb2ef3d3abb2eba'
}
}
# This dictionary maps Rust target architectures to Spack constraints that
# match that target.
rust_archs = {
'x86_64-unknown-linux-gnu': [
{'platform': 'linux', 'target': 'x86_64:'},
{'platform': 'cray', 'target': 'x86_64:'}
],
'powerpc64le-unknown-linux-gnu': [
{'platform': 'linux', 'target': 'ppc64le:'},
{'platform': 'cray', 'target': 'ppc64le:'}
],
'aarch64-unknown-linux-gnu': [
{'platform': 'linux', 'target': 'aarch64:'},
{'platform': 'cray', 'target': 'aarch64:'}
],
'x86_64-apple-darwin': [
{'platform': 'darwin', 'target': 'x86_64:'}
]
}
# Specifies the strings which represent a pre-release Rust version. These
# always bootstrap with the latest beta release.
#
# NOTE: These are moving targets, and therefore have no stable checksum. Be
# sure to specify "-n" or "--no-checksum" when installing these versions.
rust_prerelease_versions = ["beta", "nightly", "master"]
for prerelease_version in rust_prerelease_versions:
for rust_target, rust_arch_list in iteritems(rust_archs):
for rust_arch in rust_arch_list:
# All pre-release builds are built with the latest beta
# compiler.
resource(
name='rust-beta-{target}'.format(
target=rust_target
),
url='https://static.rust-lang.org/dist/rust-beta-{target}.tar.gz'.format(
target=rust_target
),
# Fake SHA - checksums should never be checked for
# pre-release builds, anyway
sha256='0000000000000000000000000000000000000000000000000000000000000000',
destination='spack_bootstrap_stage',
when='@{version} platform={platform} target={target}'\
.format(
version=prerelease_version,
platform=rust_arch['platform'],
target=rust_arch['target']
)
)
# This loop generates resources for each binary distribution, and maps
# them to the version of the compiler they bootstrap. This is in place
# of listing each resource explicitly, which would be potentially even
# more verbose.
#
# NOTE: This loop should technically specify the architecture to be the
# _host_ architecture, not the target architecture, in order to support
# cross compiling. I'm not sure Spack provides a way to specify a
# distinction in the when clause, though.
for rust_version, rust_targets in iteritems(rust_releases):
for rust_target, rust_sha256 in iteritems(rust_targets):
for rust_arch in rust_archs[rust_target]:
resource(
name='rust-{version}-{target}'.format(
version=rust_version,
target=rust_target
),
url='https://static.rust-lang.org/dist/rust-{version}-{target}.tar.gz'.format(
version=rust_version,
target=rust_target
),
sha256=rust_sha256,
destination='spack_bootstrap_stage',
when='@{ver} platform={platform} target={target}'.format(
ver=rust_version,
platform=rust_arch['platform'],
target=rust_arch['target']
)
)
# This routine returns the target architecture we intend to build for.
def get_rust_target(self):
if 'platform=linux' in self.spec or 'platform=cray' in self.spec:
if 'target=x86_64:' in self.spec:
return 'x86_64-unknown-linux-gnu'
elif 'target=ppc64le:' in self.spec:
return 'powerpc64le-unknown-linux-gnu'
elif 'target=aarch64:' in self.spec:
return 'aarch64-unknown-linux-gnu'
elif 'platform=darwin target=x86_64:' in self.spec:
return 'x86_64-apple-darwin'
raise InstallError(
"rust is not supported for '{0}'".format(
self.spec.architecture
))
def check_newer(self, version):
if '@master' in self.spec or '@beta' in self.spec or \
'@nightly' in self.spec:
return True
return '@{0}:'.format(version) in self.spec
def configure(self, spec, prefix):
target = self.get_rust_target()
# Bootstrapping compiler selection:
# Pre-release compilers use the latest beta release for the
# bootstrapping compiler.
# Versioned releases bootstrap themselves.
if '@beta' in spec or '@nightly' in spec or '@master' in spec:
bootstrap_version = 'beta'
else:
bootstrap_version = spec.version
# See the NOTE above the resource loop - should be host architecture,
# not target aarchitecture if we're to support cross-compiling.
bootstrapping_install = Executable(
'./spack_bootstrap_stage/rust-{version}-{target}/install.sh'
.format(
version=bootstrap_version,
target=target
)
)
# install into the staging area
bootstrapping_install('--prefix={0}'.format(
join_path(self.stage.source_path, 'spack_bootstrap')
))
boot_bin = join_path(self.stage.source_path, 'spack_bootstrap/bin')
# Always build rustc and cargo
tools = ['rustc', 'cargo']
# Only make additional components available in 'rust-bootstrap'
if '+rustfmt' in self.spec:
tools.append('rustfmt')
if '+analysis' in self.spec:
tools.append('analysis')
if '@1.33: +clippy' in self.spec:
tools.append('clippy')
if '+rls' in self.spec:
tools.append('rls')
if '+src' in self.spec:
tools.append('src')
ar = which('ar', required=True)
extra_targets = []
if not self.spec.satisfies('extra_targets=none'):
extra_targets = list(self.spec.variants['extra_targets'].value)
targets = [self.get_rust_target()] + extra_targets
target_spec = 'target=[' + \
','.join('"{0}"'.format(target) for target in targets) + ']'
target_specs = '\n'.join(
'[target.{0}]\nar = "{1}"\n'.format(target, ar.path)
for target in targets)
# build.tools was introduced in Rust 1.25
tools_spec = \
'tools={0}'.format(tools) if self.check_newer('1.25') else ''
# This is a temporary fix due to rust 1.42 breaking self bootstrapping
# See: https://github.com/rust-lang/rust/issues/69953
#
# In general, this should be safe because bootstrapping typically
# ensures everything but the bootstrapping script is warning free for
# the latest set of warning.
deny_warnings_spec = \
'deny-warnings = false' if '@1.42.0' in self.spec else ''
# "Nightly" and master builds want a path to rustfmt - otherwise, it
# will try to download rustfmt from the Internet. We'll give it rustfmt
# for the bootstrapping compiler, but it ultimately shouldn't matter
# because this package never invokes it. To be clear, rustfmt from the
# bootstrapping compiler is probably incorrect. See: src/stage0.txt in
# Rust to see what the current "official" rustfmt version for Rust is.
if '@master' in spec or '@nightly' in spec:
rustfmt_spec = \
'rustfmt="{0}"'.format(join_path(boot_bin, 'rustfmt'))
else:
rustfmt_spec = ''
with open('config.toml', 'w') as out_file:
out_file.write("""\
[build]
cargo = "{cargo}"
rustc = "{rustc}"
docs = false
vendor = true
extended = true
verbose = 2
{target_spec}
{tools_spec}
{rustfmt_spec}
[rust]
channel = "stable"
rpath = true
{deny_warnings_spec}
{target_specs}
[install]
prefix = "{prefix}"
sysconfdir = "etc"
""".format(
cargo=join_path(boot_bin, 'cargo'),
rustc=join_path(boot_bin, 'rustc'),
prefix=prefix,
deny_warnings_spec=deny_warnings_spec,
target_spec=target_spec,
target_specs=target_specs,
tools_spec=tools_spec,
rustfmt_spec=rustfmt_spec
)
)
def build(self, spec, prefix):
python('./x.py', 'build', extra_env={
# vendored libgit2 wasn't correctly building (couldn't find the
# vendored libssh2), so let's just have spack build it
'LIBSSH2_SYS_USE_PKG_CONFIG': '1',
'LIBGIT2_SYS_USE_PKG_CONFIG': '1'
})
def install(self, spec, prefix):
python('./x.py', 'install')
| 57.872093
| 148
| 0.686472
|
7951121c0e28cb94ce2d862a2f81545ccdf04f04
| 1,063
|
py
|
Python
|
migrations/versions/b1983f13bd19_.py
|
lmontero88/Backend_FinalProject4Geek
|
36a7452723c250bd5fa2b3cd1d1f43b24b4db836
|
[
"MIT"
] | null | null | null |
migrations/versions/b1983f13bd19_.py
|
lmontero88/Backend_FinalProject4Geek
|
36a7452723c250bd5fa2b3cd1d1f43b24b4db836
|
[
"MIT"
] | null | null | null |
migrations/versions/b1983f13bd19_.py
|
lmontero88/Backend_FinalProject4Geek
|
36a7452723c250bd5fa2b3cd1d1f43b24b4db836
|
[
"MIT"
] | 2
|
2020-12-07T12:20:40.000Z
|
2020-12-10T18:52:50.000Z
|
"""empty message
Revision ID: b1983f13bd19
Revises: 0d697c545a40
Create Date: 2020-12-10 01:37:03.316289
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b1983f13bd19'
down_revision = '0d697c545a40'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('Friend',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('friend_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['friend_id'], ['user.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.add_column('Match', sa.Column('status_request', sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('Match', 'status_request')
op.drop_table('Friend')
# ### end Alembic commands ###
| 27.973684
| 84
| 0.678269
|
795112f83a5eed89af4c656dc5bbaa66a10380a2
| 946
|
py
|
Python
|
UrlChecker.py
|
evanRubinsteinIT/URLChecker
|
65f24c504a0885c6758753287512398590ecde9b
|
[
"MIT"
] | 2
|
2021-04-07T10:06:13.000Z
|
2021-04-07T14:42:42.000Z
|
UrlChecker.py
|
evanRubinsteinIT/URLChecker
|
65f24c504a0885c6758753287512398590ecde9b
|
[
"MIT"
] | null | null | null |
UrlChecker.py
|
evanRubinsteinIT/URLChecker
|
65f24c504a0885c6758753287512398590ecde9b
|
[
"MIT"
] | null | null | null |
import requests
def fileRead():
filePath = input("Please type in the path to the file you would like to use: ")
subdomainArray = []
with open(filePath) as domainFile:
for line in domainFile:
line = line.strip()
subdomainArray.append(line)
return(subdomainArray)
def makeRequests(subdomainList):
validSubdomains = []
checkedFile = open('checkedSubdomains.txt','w+')
for x in subdomainList:
http = "http://"+x
https = "https://"+x
try:
domainCheck = requests.get(http, timeout=1.5)
print(x,domainCheck.status_code)
validSubdomains.append(http)
except requests.ConnectionError as exception:
print("Valid URL not found for:",x)
for x in validSubdomains:
checkedFile.write(x)
checkedFile.write('\n')
checkedFile.close()
print("Results written to file!")
makeRequests(fileRead())
| 29.5625
| 83
| 0.624736
|
7951143fd0976ee97b216eb5679e8e5f8dfe16c5
| 18,855
|
py
|
Python
|
django/db/backends/sqlite3/base.py
|
limbongofficial/django-framework
|
f3fa86a89b3b85242f49b2b9acf58b5ea35acc1f
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 1
|
2020-01-10T23:06:36.000Z
|
2020-01-10T23:06:36.000Z
|
django/db/backends/sqlite3/base.py
|
limbongofficial/django-framework
|
f3fa86a89b3b85242f49b2b9acf58b5ea35acc1f
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
django/db/backends/sqlite3/base.py
|
limbongofficial/django-framework
|
f3fa86a89b3b85242f49b2b9acf58b5ea35acc1f
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 1
|
2018-12-10T03:06:36.000Z
|
2018-12-10T03:06:36.000Z
|
"""
SQLite3 backend for the sqlite3 module in the standard library.
"""
import datetime
import decimal
import math
import operator
import re
import warnings
from sqlite3 import dbapi2 as Database
import pytz
from django.core.exceptions import ImproperlyConfigured
from django.db import utils
from django.db.backends import utils as backend_utils
from django.db.backends.base.base import BaseDatabaseWrapper
from django.utils import timezone
from django.utils.dateparse import parse_datetime, parse_time
from django.utils.duration import duration_microseconds
from .client import DatabaseClient # isort:skip
from .creation import DatabaseCreation # isort:skip
from .features import DatabaseFeatures # isort:skip
from .introspection import DatabaseIntrospection # isort:skip
from .operations import DatabaseOperations # isort:skip
from .schema import DatabaseSchemaEditor # isort:skip
def decoder(conv_func):
"""
Convert bytestrings from Python's sqlite3 interface to a regular string.
"""
return lambda s: conv_func(s.decode())
Database.register_converter("bool", b'1'.__eq__)
Database.register_converter("time", decoder(parse_time))
Database.register_converter("datetime", decoder(parse_datetime))
Database.register_converter("timestamp", decoder(parse_datetime))
Database.register_converter("TIMESTAMP", decoder(parse_datetime))
Database.register_adapter(decimal.Decimal, str)
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'sqlite'
display_name = 'SQLite'
# SQLite doesn't actually support most of these types, but it "does the right
# thing" given more verbose field definitions, so leave them as is so that
# schema inspection is more useful.
data_types = {
'AutoField': 'integer',
'BigAutoField': 'integer',
'BinaryField': 'BLOB',
'BooleanField': 'bool',
'CharField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'datetime',
'DecimalField': 'decimal',
'DurationField': 'bigint',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'real',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'char(15)',
'GenericIPAddressField': 'char(39)',
'NullBooleanField': 'bool',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer unsigned',
'PositiveSmallIntegerField': 'smallint unsigned',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
'UUIDField': 'char(32)',
}
data_type_check_constraints = {
'PositiveIntegerField': '"%(column)s" >= 0',
'PositiveSmallIntegerField': '"%(column)s" >= 0',
}
data_types_suffix = {
'AutoField': 'AUTOINCREMENT',
'BigAutoField': 'AUTOINCREMENT',
}
# SQLite requires LIKE statements to include an ESCAPE clause if the value
# being escaped has a percent or underscore in it.
# See http://www.sqlite.org/lang_expr.html for an explanation.
operators = {
'exact': '= %s',
'iexact': "LIKE %s ESCAPE '\\'",
'contains': "LIKE %s ESCAPE '\\'",
'icontains': "LIKE %s ESCAPE '\\'",
'regex': 'REGEXP %s',
'iregex': "REGEXP '(?i)' || %s",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE %s ESCAPE '\\'",
'endswith': "LIKE %s ESCAPE '\\'",
'istartswith': "LIKE %s ESCAPE '\\'",
'iendswith': "LIKE %s ESCAPE '\\'",
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')"
pattern_ops = {
'contains': r"LIKE '%%' || {} || '%%' ESCAPE '\'",
'icontains': r"LIKE '%%' || UPPER({}) || '%%' ESCAPE '\'",
'startswith': r"LIKE {} || '%%' ESCAPE '\'",
'istartswith': r"LIKE UPPER({}) || '%%' ESCAPE '\'",
'endswith': r"LIKE '%%' || {} ESCAPE '\'",
'iendswith': r"LIKE '%%' || UPPER({}) ESCAPE '\'",
}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
# Classes instantiated in __init__().
client_class = DatabaseClient
creation_class = DatabaseCreation
features_class = DatabaseFeatures
introspection_class = DatabaseIntrospection
ops_class = DatabaseOperations
def get_connection_params(self):
settings_dict = self.settings_dict
if not settings_dict['NAME']:
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME value.")
kwargs = {
'database': settings_dict['NAME'],
'detect_types': Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES,
**settings_dict['OPTIONS'],
}
# Always allow the underlying SQLite connection to be shareable
# between multiple threads. The safe-guarding will be handled at a
# higher level by the `BaseDatabaseWrapper.allow_thread_sharing`
# property. This is necessary as the shareability is disabled by
# default in pysqlite and it cannot be changed once a connection is
# opened.
if 'check_same_thread' in kwargs and kwargs['check_same_thread']:
warnings.warn(
'The `check_same_thread` option was provided and set to '
'True. It will be overridden with False. Use the '
'`DatabaseWrapper.allow_thread_sharing` property instead '
'for controlling thread shareability.',
RuntimeWarning
)
kwargs.update({'check_same_thread': False, 'uri': True})
return kwargs
def get_new_connection(self, conn_params):
conn = Database.connect(**conn_params)
conn.create_function("django_date_extract", 2, _sqlite_date_extract)
conn.create_function("django_date_trunc", 2, _sqlite_date_trunc)
conn.create_function("django_datetime_cast_date", 2, _sqlite_datetime_cast_date)
conn.create_function("django_datetime_cast_time", 2, _sqlite_datetime_cast_time)
conn.create_function("django_datetime_extract", 3, _sqlite_datetime_extract)
conn.create_function("django_datetime_trunc", 3, _sqlite_datetime_trunc)
conn.create_function("django_time_extract", 2, _sqlite_time_extract)
conn.create_function("django_time_trunc", 2, _sqlite_time_trunc)
conn.create_function("django_time_diff", 2, _sqlite_time_diff)
conn.create_function("django_timestamp_diff", 2, _sqlite_timestamp_diff)
conn.create_function("regexp", 2, _sqlite_regexp)
conn.create_function("django_format_dtdelta", 3, _sqlite_format_dtdelta)
conn.create_function("django_power", 2, _sqlite_power)
conn.create_function('LPAD', 3, _sqlite_lpad)
conn.create_function('REPEAT', 2, operator.mul)
conn.create_function('RPAD', 3, _sqlite_rpad)
conn.execute('PRAGMA foreign_keys = ON')
return conn
def init_connection_state(self):
pass
def create_cursor(self, name=None):
return self.connection.cursor(factory=SQLiteCursorWrapper)
def close(self):
self.validate_thread_sharing()
# If database is in memory, closing the connection destroys the
# database. To prevent accidental data loss, ignore close requests on
# an in-memory db.
if not self.is_in_memory_db():
BaseDatabaseWrapper.close(self)
def _savepoint_allowed(self):
# Two conditions are required here:
# - A sufficiently recent version of SQLite to support savepoints,
# - Being in a transaction, which can only happen inside 'atomic'.
# When 'isolation_level' is not None, sqlite3 commits before each
# savepoint; it's a bug. When it is None, savepoints don't make sense
# because autocommit is enabled. The only exception is inside 'atomic'
# blocks. To work around that bug, on SQLite, 'atomic' starts a
# transaction explicitly rather than simply disable autocommit.
return self.features.uses_savepoints and self.in_atomic_block
def _set_autocommit(self, autocommit):
if autocommit:
level = None
else:
# sqlite3's internal default is ''. It's different from None.
# See Modules/_sqlite/connection.c.
level = ''
# 'isolation_level' is a misleading API.
# SQLite always runs at the SERIALIZABLE isolation level.
with self.wrap_database_errors:
self.connection.isolation_level = level
def disable_constraint_checking(self):
if self.in_atomic_block:
# sqlite3 cannot disable constraint checking inside a transaction.
return False
self.cursor().execute('PRAGMA foreign_keys = OFF')
return True
def enable_constraint_checking(self):
self.cursor().execute('PRAGMA foreign_keys = ON')
def check_constraints(self, table_names=None):
"""
Check each table name in `table_names` for rows with invalid foreign
key references. This method is intended to be used in conjunction with
`disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while constraint
checks were off.
"""
with self.cursor() as cursor:
if table_names is None:
table_names = self.introspection.table_names(cursor)
for table_name in table_names:
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
if not primary_key_column_name:
continue
key_columns = self.introspection.get_key_columns(cursor, table_name)
for column_name, referenced_table_name, referenced_column_name in key_columns:
cursor.execute(
"""
SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
LEFT JOIN `%s` as REFERRED
ON (REFERRING.`%s` = REFERRED.`%s`)
WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL
"""
% (
primary_key_column_name, column_name, table_name,
referenced_table_name, column_name, referenced_column_name,
column_name, referenced_column_name,
)
)
for bad_row in cursor.fetchall():
raise utils.IntegrityError(
"The row in table '%s' with primary key '%s' has an "
"invalid foreign key: %s.%s contains a value '%s' that "
"does not have a corresponding value in %s.%s." % (
table_name, bad_row[0], table_name, column_name,
bad_row[1], referenced_table_name, referenced_column_name,
)
)
def is_usable(self):
return True
def _start_transaction_under_autocommit(self):
"""
Start a transaction explicitly in autocommit mode.
Staying in autocommit mode works around a bug of sqlite3 that breaks
savepoints when autocommit is disabled.
"""
self.cursor().execute("BEGIN")
def is_in_memory_db(self):
return self.creation.is_in_memory_db(self.settings_dict['NAME'])
FORMAT_QMARK_REGEX = re.compile(r'(?<!%)%s')
class SQLiteCursorWrapper(Database.Cursor):
"""
Django uses "format" style placeholders, but pysqlite2 uses "qmark" style.
This fixes it -- but note that if you want to use a literal "%s" in a query,
you'll need to use "%%s".
"""
def execute(self, query, params=None):
if params is None:
return Database.Cursor.execute(self, query)
query = self.convert_query(query)
return Database.Cursor.execute(self, query, params)
def executemany(self, query, param_list):
query = self.convert_query(query)
return Database.Cursor.executemany(self, query, param_list)
def convert_query(self, query):
return FORMAT_QMARK_REGEX.sub('?', query).replace('%%', '%')
def _sqlite_date_extract(lookup_type, dt):
if dt is None:
return None
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
elif lookup_type == 'week':
return dt.isocalendar()[1]
elif lookup_type == 'quarter':
return math.ceil(dt.month / 3)
else:
return getattr(dt, lookup_type)
def _sqlite_date_trunc(lookup_type, dt):
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'year':
return "%i-01-01" % dt.year
elif lookup_type == 'quarter':
month_in_quarter = dt.month - (dt.month - 1) % 3
return '%i-%02i-01' % (dt.year, month_in_quarter)
elif lookup_type == 'month':
return "%i-%02i-01" % (dt.year, dt.month)
elif lookup_type == 'week':
dt = dt - datetime.timedelta(days=dt.weekday())
return "%i-%02i-%02i" % (dt.year, dt.month, dt.day)
elif lookup_type == 'day':
return "%i-%02i-%02i" % (dt.year, dt.month, dt.day)
def _sqlite_time_trunc(lookup_type, dt):
try:
dt = backend_utils.typecast_time(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'hour':
return "%02i:00:00" % dt.hour
elif lookup_type == 'minute':
return "%02i:%02i:00" % (dt.hour, dt.minute)
elif lookup_type == 'second':
return "%02i:%02i:%02i" % (dt.hour, dt.minute, dt.second)
def _sqlite_datetime_parse(dt, tzname):
if dt is None:
return None
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if tzname is not None:
dt = timezone.localtime(dt, pytz.timezone(tzname))
return dt
def _sqlite_datetime_cast_date(dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
return dt.date().isoformat()
def _sqlite_datetime_cast_time(dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
return dt.time().isoformat()
def _sqlite_datetime_extract(lookup_type, dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
elif lookup_type == 'week':
return dt.isocalendar()[1]
elif lookup_type == 'quarter':
return math.ceil(dt.month / 3)
else:
return getattr(dt, lookup_type)
def _sqlite_datetime_trunc(lookup_type, dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
if lookup_type == 'year':
return "%i-01-01 00:00:00" % dt.year
elif lookup_type == 'quarter':
month_in_quarter = dt.month - (dt.month - 1) % 3
return '%i-%02i-01 00:00:00' % (dt.year, month_in_quarter)
elif lookup_type == 'month':
return "%i-%02i-01 00:00:00" % (dt.year, dt.month)
elif lookup_type == 'week':
dt = dt - datetime.timedelta(days=dt.weekday())
return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day)
elif lookup_type == 'day':
return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day)
elif lookup_type == 'hour':
return "%i-%02i-%02i %02i:00:00" % (dt.year, dt.month, dt.day, dt.hour)
elif lookup_type == 'minute':
return "%i-%02i-%02i %02i:%02i:00" % (dt.year, dt.month, dt.day, dt.hour, dt.minute)
elif lookup_type == 'second':
return "%i-%02i-%02i %02i:%02i:%02i" % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
def _sqlite_time_extract(lookup_type, dt):
if dt is None:
return None
try:
dt = backend_utils.typecast_time(dt)
except (ValueError, TypeError):
return None
return getattr(dt, lookup_type)
def _sqlite_format_dtdelta(conn, lhs, rhs):
"""
LHS and RHS can be either:
- An integer number of microseconds
- A string representing a datetime
"""
try:
real_lhs = datetime.timedelta(0, 0, lhs) if isinstance(lhs, int) else backend_utils.typecast_timestamp(lhs)
real_rhs = datetime.timedelta(0, 0, rhs) if isinstance(rhs, int) else backend_utils.typecast_timestamp(rhs)
if conn.strip() == '+':
out = real_lhs + real_rhs
else:
out = real_lhs - real_rhs
except (ValueError, TypeError):
return None
# typecast_timestamp returns a date or a datetime without timezone.
# It will be formatted as "%Y-%m-%d" or "%Y-%m-%d %H:%M:%S[.%f]"
return str(out)
def _sqlite_time_diff(lhs, rhs):
left = backend_utils.typecast_time(lhs)
right = backend_utils.typecast_time(rhs)
return (
(left.hour * 60 * 60 * 1000000) +
(left.minute * 60 * 1000000) +
(left.second * 1000000) +
(left.microsecond) -
(right.hour * 60 * 60 * 1000000) -
(right.minute * 60 * 1000000) -
(right.second * 1000000) -
(right.microsecond)
)
def _sqlite_timestamp_diff(lhs, rhs):
left = backend_utils.typecast_timestamp(lhs)
right = backend_utils.typecast_timestamp(rhs)
return duration_microseconds(left - right)
def _sqlite_regexp(re_pattern, re_string):
return bool(re.search(re_pattern, str(re_string))) if re_string is not None else False
def _sqlite_lpad(text, length, fill_text):
if len(text) >= length:
return text[:length]
return (fill_text * length)[:length - len(text)] + text
def _sqlite_rpad(text, length, fill_text):
return (text + fill_text * length)[:length]
def _sqlite_power(x, y):
return x ** y
| 38.479592
| 115
| 0.624662
|
795114d037a9b6a713dca7d1b1231ec64d2c69c1
| 1,176
|
py
|
Python
|
Emotion_Model/voting.py
|
rogeroyer/2019-CCF-BDCI-Finance-Information-Negative-Judgment
|
06e0582b06f99ce3348ad91ea687ab3e9a0cf363
|
[
"MIT"
] | 30
|
2020-02-28T13:33:09.000Z
|
2021-09-30T08:21:26.000Z
|
Emotion_Model/voting.py
|
williamjiamin/2019-CCF-BDCI-Finance-Information-Negative-Judgment
|
06e0582b06f99ce3348ad91ea687ab3e9a0cf363
|
[
"MIT"
] | 1
|
2020-07-23T07:20:08.000Z
|
2020-07-24T13:29:58.000Z
|
Emotion_Model/voting.py
|
williamjiamin/2019-CCF-BDCI-Finance-Information-Negative-Judgment
|
06e0582b06f99ce3348ad91ea687ab3e9a0cf363
|
[
"MIT"
] | 21
|
2020-03-18T14:43:53.000Z
|
2022-03-09T08:34:12.000Z
|
import os
import path
import numpy as np
import pandas as pd
from collections import Counter
sub_path = './submit/'
sub1 = pd.read_csv(sub_path + 'emotion_res_1.csv', encoding='utf-8')[['id', 'negative']]
sub2 = pd.read_csv(sub_path + 'emotion_res_2.csv', encoding='utf-8')[['id', 'negative']]
sub3 = pd.read_csv('./roberta_wwm_large_ext_emotion_xiong/result/submit_emotion.csv', encoding='utf-8')[['id', 'negative']]
sub1.columns = ['id', 'negative_1']
sub2.columns = ['id', 'negative_2']
sub3.columns = ['id', 'negative_3']
sub = sub1.merge(sub2, on='id', how='left')
sub = sub.merge(sub3, on='id', how='left')
print(sub)
def vote(value_1, value_2, value_3):
count = Counter()
count[value_1] += 1
count[value_2] += 1
count[value_3] += 1
# print(count)
return count.most_common(1)[0][0]
sub['negative'] = sub.apply(lambda index: vote(index.negative_1, index.negative_2, index.negative_3), axis=1)
sub['key_entity'] = [np.nan for index in range(len(sub))]
print(sub)
sub[['id', 'negative', 'key_entity']].to_csv('./submit/emotion_voting_three_models.csv', encoding='utf-8', index=None)
print('store done.')
print(sub[sub['negative'] == 1].shape)
| 33.6
| 123
| 0.684524
|
7951156f1997765b869b5944c1eb717852c1a89f
| 996
|
py
|
Python
|
checkov/kubernetes/checks/ApiServerServiceAccountLookup.py
|
kylelaker/checkov
|
6eada26030a87f397a6bf1831827b3dc6c5dad2d
|
[
"Apache-2.0"
] | 5
|
2021-07-29T18:08:40.000Z
|
2022-03-21T04:39:32.000Z
|
checkov/kubernetes/checks/ApiServerServiceAccountLookup.py
|
kylelaker/checkov
|
6eada26030a87f397a6bf1831827b3dc6c5dad2d
|
[
"Apache-2.0"
] | 16
|
2021-03-09T07:38:38.000Z
|
2021-06-09T03:53:55.000Z
|
checkov/kubernetes/checks/ApiServerServiceAccountLookup.py
|
kylelaker/checkov
|
6eada26030a87f397a6bf1831827b3dc6c5dad2d
|
[
"Apache-2.0"
] | 2
|
2021-08-23T13:25:36.000Z
|
2021-11-05T21:44:52.000Z
|
from checkov.common.models.enums import CheckCategories, CheckResult
from checkov.kubernetes.base_spec_check import BaseK8Check
class ApiServerServiceAccountLookup(BaseK8Check):
def __init__(self):
id = "CKV_K8S_96"
name = "Ensure that the --service-account-lookup argument is set to true"
categories = [CheckCategories.KUBERNETES]
supported_entities = ['containers']
super().__init__(name=name, id=id, categories=categories, supported_entities=supported_entities)
def get_resource_id(self, conf):
return f'{conf["parent"]} - {conf["name"]}'
def scan_spec_conf(self, conf):
if conf.get("command") is not None:
if "kube-apiserver" in conf["command"]:
if "--service-account-lookup=false" in conf["command"] or "--service-account-lookup=true" not in conf["command"]:
return CheckResult.FAILED
return CheckResult.PASSED
check = ApiServerServiceAccountLookup()
| 41.5
| 129
| 0.678715
|
795115b2c7d3ee67116697dcc2ee4a742e6c328e
| 2,371
|
py
|
Python
|
pyleecan/Methods/Geometry/Surface/split_line.py
|
IrakozeFD/pyleecan
|
5a93bd98755d880176c1ce8ac90f36ca1b907055
|
[
"Apache-2.0"
] | 2
|
2020-08-28T14:54:55.000Z
|
2021-03-13T19:34:45.000Z
|
pyleecan/Methods/Geometry/Surface/split_line.py
|
IrakozeFD/pyleecan
|
5a93bd98755d880176c1ce8ac90f36ca1b907055
|
[
"Apache-2.0"
] | null | null | null |
pyleecan/Methods/Geometry/Surface/split_line.py
|
IrakozeFD/pyleecan
|
5a93bd98755d880176c1ce8ac90f36ca1b907055
|
[
"Apache-2.0"
] | null | null | null |
from ....Classes.Segment import Segment
from ....definitions import PACKAGE_NAME
def split_line(self, Z1, Z2, is_top=True, is_join=False, label_join=""):
"""Cut the Surface according to a line defined by two complex
Parameters
----------
self : Surface
An Surface object
Z1 : complex
First point of the cutting Line
Z2 : complex
Second point of the cutting Line
is_top : bool
True to keep the part above the cutting line.
"Above" is in the coordinate system with Z1 in 0 and Z2 on the X>0 axis
is_join : bool
True to join the split_list with Segment on the cutting line
label_join : str
Label of the join line
Returns
-------
split_surf : SurfLine
The selected part of the Surface
"""
# Dynamic import to avoid import loop
module = __import__(PACKAGE_NAME + ".Classes.SurfLine", fromlist=["SurfLine"])
SurfLine = getattr(module, "SurfLine")
# Split all the lines of the surface
lines = self.get_lines()
split_list = list()
for line in lines:
split_list.extend(
line.split_line(
Z1=Z1, Z2=Z2, is_top=is_top, is_join=is_join, label_join=label_join
)
)
# Make sure that the surface is closed (if needed)
if is_join:
final_list = list()
for ii in range(len(split_list) - 1):
final_list.append(split_list[ii])
if abs(split_list[ii].get_end() - split_list[ii + 1].get_begin()) > 1e-6:
final_list.append(
Segment(
begin=split_list[ii].get_end(),
end=split_list[ii + 1].get_begin(),
label=label_join,
)
)
final_list.append(split_list[-1])
# Add last line
if abs(split_list[-1].get_end() - split_list[0].get_begin()) > 1e-6:
final_list.append(
Segment(
begin=split_list[-1].get_end(),
end=split_list[0].get_begin(),
label=label_join,
)
)
split_list = final_list
# Create the resulting surface and update ref point
surf = SurfLine(label=self.label, line_list=split_list)
surf.comp_point_ref(is_set=True)
return surf
| 32.040541
| 85
| 0.571489
|
79511647deea627d21740b9d8e33f38b6db8b387
| 565
|
py
|
Python
|
variables_and_types/script.py
|
fabiomilheiro/Python-Experiments
|
9f5c3ab73dfb8ca5dde5a71393a3cf55ee81c641
|
[
"Unlicense"
] | null | null | null |
variables_and_types/script.py
|
fabiomilheiro/Python-Experiments
|
9f5c3ab73dfb8ca5dde5a71393a3cf55ee81c641
|
[
"Unlicense"
] | null | null | null |
variables_and_types/script.py
|
fabiomilheiro/Python-Experiments
|
9f5c3ab73dfb8ca5dde5a71393a3cf55ee81c641
|
[
"Unlicense"
] | null | null | null |
x = 1000
y = 3.14
print(x)
print(y)
print(float(7))
name = 'John Smith'
print(name)
name = "John Smith's jacket"
print(name)
one = 1
two = 2
three = one + two
print(three)
hello = "hello"
world = "world"
hello_word = hello + " " + world
print(hello_word)
a, b = 3, 4
print(a, b, "something else")
mystring = "hello"
myfloat = 10.0
myint = 20
if mystring == "hello":
print("String %s" % mystring)
if isinstance(myfloat, float) and myfloat == 10.0:
print("Float: %f" % myfloat)
if isinstance(myint, int) and myint == 20:
print("Integer: %d" % myint)
| 16.617647
| 50
| 0.635398
|
7951164e57e08ce004b584eaa9f49f327d04a151
| 4,772
|
py
|
Python
|
demos/demo_2d_tracer.py
|
jhill1/thetis
|
1be5d28d5d0d7248f2bbce4986b3e886116e103a
|
[
"MIT"
] | null | null | null |
demos/demo_2d_tracer.py
|
jhill1/thetis
|
1be5d28d5d0d7248f2bbce4986b3e886116e103a
|
[
"MIT"
] | null | null | null |
demos/demo_2d_tracer.py
|
jhill1/thetis
|
1be5d28d5d0d7248f2bbce4986b3e886116e103a
|
[
"MIT"
] | null | null | null |
# 2D tracer transport
# ===================
#
# This demo shows how the Firedrake DG advection equation
# `demo <https://firedrakeproject.org/demos/DG_advection.py.html>`__
# can be implemented in Thetis.
#
# The test case is the classic cosine-bell--cone--slotted-cylinder
# advection test case of :cite:`LeVeque:1996`. The domain is the unit
# square :math:`\Omega=[0,1]^2` and the velocity corresponds to the
# solid body rotation :math:`\vec{u} = (0.5 - y, x - 0.5)`.
#
# As usual, we start by importing Thetis. ::
from thetis import *
# Define a 40-by-40 mesh of squares. ::
mesh2d = UnitSquareMesh(40, 40, quadrilateral=True)
# We will solve a pure advection problem in non-conservative form,
# with no hydrodynamics. Therefore, bathymetry is not actually
# important. We set an arbitrary postive value, as this is required
# by Thetis to construct the solver object. ::
P1_2d = FunctionSpace(mesh2d, "CG", 1)
bathymetry2d = Function(P1_2d)
bathymetry2d.assign(1.0)
solver_obj = solver2d.FlowSolver2d(mesh2d, bathymetry2d)
# To activate the tracer functionality, we set the option
# ``solve_tracer = True``. As mentioned above, we are only solving
# the tracer equation, which can be specified by setting
# ``tracer_only = True``.
options = solver_obj.options
options.solve_tracer = True
options.tracer_only = True
options.fields_to_export = ['tracer_2d']
# We will run for time :math:`2\pi` -- a full rotation -- using a
# strong stability preserving third order Runge-Kutta method (SSPRK33).
# For consistency with the Firedrake demo, Thetis' automatic timestep
# computation functionality is switched off and the simulation time is
# split into 600 steps, giving a timestep close to the CFL limit. ::
options.timestepper_type = 'SSPRK33'
options.timestep = pi/300.0
options.simulation_end_time = 2*pi
options.simulation_export_time = pi/15.0
options.timestepper_options.use_automatic_timestep = False
# We have a pure advection problem with no diffusivity or source terms. However,
# such terms can be specified by replacing the ``None`` values below. For
# consistency with the Firedrake demo, we do not use stabilization or slope
# limiters, both of which are used by default in Thetis. Slope limiters are used
# to obtain non-oscillatory solutions. ::
options.horizontal_diffusivity = None
options.tracer_source_2d = None
options.use_lax_friedrichs_tracer = False
options.use_limiter_for_tracers = False
# The background tracer value is imposed as an upwind inflow condition.
# In general, this would be a ``Function``, but here we just use a ``Constant``
# value. ::
solver_obj.bnd_functions['tracer'] = {'on_boundary': {'value': Constant(1.0)}}
# The velocity field is set up using a simple analytic expression. ::
vP1_2d = VectorFunctionSpace(mesh2d, "CG", 1)
x, y = SpatialCoordinate(mesh2d)
uv_init = interpolate(as_vector([0.5 - y, x - 0.5]), vP1_2d)
# Now, we set up the cosine-bell--cone--slotted-cylinder initial condition. The
# first four lines declare various parameters relating to the positions of these
# objects, while the analytic expressions appear in the last three lines. This
# code is simply copied from the Firedrake version of the demo. ::
bell_r0 = 0.15; bell_x0 = 0.25; bell_y0 = 0.5
cone_r0 = 0.15; cone_x0 = 0.5; cone_y0 = 0.25
cyl_r0 = 0.15; cyl_x0 = 0.5; cyl_y0 = 0.75
slot_left = 0.475; slot_right = 0.525; slot_top = 0.85
bell = 0.25*(1+cos(pi*min_value(sqrt(pow(x-bell_x0, 2) + pow(y-bell_y0, 2))/bell_r0, 1.0)))
cone = 1.0 - min_value(sqrt(pow(x-cone_x0, 2) + pow(y-cone_y0, 2))/cone_r0, 1.0)
slot_cyl = conditional(sqrt(pow(x-cyl_x0, 2) + pow(y-cyl_y0, 2)) < cyl_r0,
conditional(And(And(x > slot_left, x < slot_right), y < slot_top),
0.0, 1.0), 0.0)
# We then declare the inital condition, ``q_init``, to be the sum of these fields.
# Furthermore, we add 1 to this, so that the initial field lies between 1 and 2,
# rather than between 0 and 1. This ensures that we can't get away with
# neglecting the inflow boundary condition. We also save the initial state so
# that we can check the :math:`L^2`-norm error at the end. ::
q_init = interpolate(1.0 + bell + cone + slot_cyl, P1_2d)
solver_obj.assign_initial_conditions(uv=uv_init, tracer_2d=q_init)
# Now we are in a position to run the time loop. ::
solver_obj.iterate()
# Finally, we display the normalised :math:`L^2` error, by comparing to the initial condition. ::
q = solver_obj.fields.tracer_2d
L2_err = sqrt(assemble((q - q_init)*(q - q_init)*dx))
L2_init = sqrt(assemble(q_init*q_init*dx))
print_output(L2_err/L2_init)
# This tutorial can be dowloaded as a Python script `here <demo_2d_tracer.py>`__.
#
#
# .. rubric:: References
#
# .. bibliography:: demo_references.bib
# :filter: docname in docnames
| 40.10084
| 97
| 0.732188
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.