blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ba481849f965390fabf10847601ba3c76504f727
|
d83fde3c891f44014f5339572dc72ebf62c38663
|
/_bin/google-cloud-sdk/lib/surface/auth/git_helper.py
|
a6de47988f9275251814caa2adbdd0f03ea1bbf4
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
gyaresu/dotfiles
|
047cc3ca70f4b405ba272856c69ee491a79d2ebe
|
e5e533b3a081b42e9492b228f308f6833b670cfe
|
refs/heads/master
| 2022-11-24T01:12:49.435037
| 2022-11-01T16:58:13
| 2022-11-01T16:58:13
| 17,139,657
| 1
| 1
| null | 2020-07-25T14:11:43
| 2014-02-24T14:59:59
|
Python
|
UTF-8
|
Python
| false
| false
| 7,848
|
py
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A git credential helper that provides Google git repository passwords.
Reads a session from stdin that looks a lot like:
protocol=https
host=code.google.com
And writes out a session to stdout that looks a lot like:
username=me
password=secret
Errors will be reported on stderr.
Note that spaces may be part of key names so, for example, "protocol" must not
be proceeded by leading spaces.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import re
import subprocess
import sys
import textwrap
from googlecloudsdk.api_lib.auth import exceptions as auth_exceptions
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions as c_exc
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core.credentials import store as c_store
from googlecloudsdk.core.util import platforms
from oauth2client import client
_KEYVAL_RE = re.compile(r'(.+)=(.*)')
_BLANK_LINE_RE = re.compile(r'^ *$')
@base.Hidden
class GitHelper(base.Command):
"""A git credential helper to provide access to Google git repositories."""
GET = 'get'
STORE = 'store'
METHODS = [GET, STORE]
GOOGLESOURCE = 'googlesource.com'
@staticmethod
def Args(parser):
parser.add_argument('method',
help='The git credential helper method.')
parser.add_argument('--ignore-unknown',
action='store_true',
help=('Produce no output and exit with 0 when given '
'an unknown method (e.g. store) or host.'))
@c_exc.RaiseErrorInsteadOf(auth_exceptions.AuthenticationError, client.Error)
def Run(self, args):
"""Run the helper command."""
if args.method not in GitHelper.METHODS:
if args.ignore_unknown:
return
raise auth_exceptions.GitCredentialHelperError(
'Unexpected method [{meth}]. One of [{methods}] expected.'
.format(meth=args.method, methods=', '.join(GitHelper.METHODS)))
info = self._ParseInput()
credentialed_domains = [
'source.developers.google.com',
GitHelper.GOOGLESOURCE, # Requires a different username value.
]
credentialed_domains_suffix = [
'.'+GitHelper.GOOGLESOURCE,
]
extra = properties.VALUES.core.credentialed_hosted_repo_domains.Get()
if extra:
credentialed_domains.extend(extra.split(','))
host = info.get('host')
def _ValidateHost(host):
if host in credentialed_domains:
return True
for suffix in credentialed_domains_suffix:
if host.endswith(suffix):
return True
return False
if not _ValidateHost(host):
if not args.ignore_unknown:
raise auth_exceptions.GitCredentialHelperError(
'Unknown host [{host}].'.format(host=host))
return
if args.method == GitHelper.GET:
account = properties.VALUES.core.account.Get()
try:
cred = c_store.Load(account)
c_store.Refresh(cred)
except c_store.Error as e:
sys.stderr.write(textwrap.dedent("""\
ERROR: {error}
Run 'gcloud auth login' to log in.
""".format(error=str(e))))
return
self._CheckNetrc()
# For googlesource.com, any username beginning with "git-" is accepted
# and the identity of the user is extracted from the token server-side.
if (host == GitHelper.GOOGLESOURCE
or host.endswith('.'+GitHelper.GOOGLESOURCE)):
sent_account = 'git-account'
else:
sent_account = account
sys.stdout.write(textwrap.dedent("""\
username={username}
password={password}
""").format(username=sent_account, password=cred.access_token))
elif args.method == GitHelper.STORE:
# On OSX, there is an additional credential helper that gets called before
# ours does. When we return a token, it gets cached there. Git continues
# to get it from there first until it expires. That command then fails,
# and the token is deleted, but it does not retry the operation. The next
# command gets a new token from us and it starts working again, for an
# hour. This erases our credential from the other cache whenever 'store'
# is called on us. Because they are called first, the token will already
# be stored there, and so we can successfully erase it to prevent caching.
if (platforms.OperatingSystem.Current() ==
platforms.OperatingSystem.MACOSX):
log.debug('Clearing OSX credential cache.')
try:
input_string = 'protocol={protocol}\nhost={host}\n\n'.format(
protocol=info.get('protocol'), host=info.get('host'))
log.debug('Calling erase with input:\n%s', input_string)
p = subprocess.Popen(['git-credential-osxkeychain', 'erase'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate(input_string)
if p.returncode:
log.debug(
'Failed to clear OSX keychain:\nstdout: {%s}\nstderr: {%s}',
out, err)
# pylint:disable=broad-except, This can fail and should only be done as
# best effort.
except Exception as e:
log.debug('Failed to clear OSX keychain', exc_info=True)
def _ParseInput(self):
"""Parse the fields from stdin.
Returns:
{str: str}, The parsed parameters given on stdin.
"""
info = {}
for line in sys.stdin:
if _BLANK_LINE_RE.match(line):
continue
match = _KEYVAL_RE.match(line)
if not match:
raise auth_exceptions.GitCredentialHelperError(
'Invalid input line format: [{format}].'
.format(format=line.rstrip('\n')))
key, val = match.groups()
info[key] = val.strip()
if 'protocol' not in info:
raise auth_exceptions.GitCredentialHelperError(
'Required key "protocol" missing.')
if 'host' not in info:
raise auth_exceptions.GitCredentialHelperError(
'Required key "host" missing.')
if info.get('protocol') != 'https':
raise auth_exceptions.GitCredentialHelperError(
'Invalid protocol [{p}]. "https" expected.'
.format(p=info.get('protocol')))
return info
def _CheckNetrc(self):
"""Warn on stderr if ~/.netrc contains redundant credentials."""
def Check(p):
if not os.path.exists(p):
return
try:
with open(p) as f:
data = f.read()
if 'source.developers.google.com' in data:
sys.stderr.write(textwrap.dedent("""\
You have credentials for your Google repository in [{path}]. This repository's
git credential helper is set correctly, so the credentials in [{path}] will not
be used, but you may want to remove them to avoid confusion.
""".format(path=p)))
# pylint:disable=broad-except, If something went wrong, forget about it.
except Exception:
pass
Check(os.path.expanduser(os.path.join('~', '.netrc')))
Check(os.path.expanduser(os.path.join('~', '_netrc')))
|
[
"me@gareth.codes"
] |
me@gareth.codes
|
73ed73e44e73439d953db9d8d3334a723e946c93
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5690574640250880_0/Python/Happyhobo/CodeJam3.py
|
fe5fdd5190ca94a238e7b72e0ea17a400cf4d3f6
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,406
|
py
|
import numpy
def read_words(afile):
words = []
for line in afile:
words.append(line.strip())
return words
filename = open('test3.txt' , 'r')
T = filename.readline() #num test cases
aList = read_words(filename) # array where each element is a line of text
for i in range(int(T)):
DatBool = True
Values = aList[i].split()
R = int(Values[0])
C = int(Values[1])
M = int(Values[2])
FR = 0
FC = 0
Matrix = numpy.zeros(shape=(R,C))
while ( (M >= (C-FC)) or (M >= (R-FR) ) ):
if ( (C-FC) <= (R-FR) ):
FullRow = []
for x in range (C):
FullRow.append(1)
Matrix[FR] = FullRow
M = M-(C-FC)
FR += 1
elif ( (C-FC) > (R-FR) ):
for o in range(R-FR):
Matrix[(o+FR), (FC)] = 1
M = M-(R-FR)
FC += 1
if (M==0):
if (R==1 or C==1):
win = 5
elif ((R-FR)*(C-FC) == 1):
win = 5
elif ( (R-FR) == 1 ):
DatBool = False
elif ( (C-FC) == 1 ):
DatBool = False
#else: done
else: # some leftover m M<(C-FC) and M<(R-FR)
if ( (R-FR) <= 2 ):
DatBool = False
if ( (C-FC) <= 2 ):
DatBool = False
else: # at least a 3-by-3
if (M > ( (C-FC-2)*(R-FR-2) ) ):
DatBool = False
else: # winnable
for z in range (C-FC-2):
if (M>0):
Matrix[(FR), (FC+z)] = 1
M -= 1
if (M!=0):
for y in range (R-FR-3):
if (M>0):
Matrix[(FR+1+y), (FC)] = 1
M = M-1
print "Case #"+str(i+1)+":"
if (DatBool == False):
print "Impossible"
else:
for r in range (R):
DatRow = ""
for c in range (C):
if (Matrix[r][c] == 0):
DatRow += "."
elif (Matrix[r][c] == 1):
DatRow += "*"
if (r == (R-1)):
DatRow = DatRow[:-1] + "c"
print DatRow
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
50f968620da3bb69ec9ff59baa71aa542d668235
|
ebd5c4632bb5f85c9e3311fd70f6f1bf92fae53f
|
/PORMain/pirates/minigame/LegendaryFishingGameGUI.py
|
1b9bffa381197471b8c011b498eefdbcb8d710c7
|
[] |
no_license
|
BrandonAlex/Pirates-Online-Retribution
|
7f881a64ec74e595aaf62e78a39375d2d51f4d2e
|
980b7448f798e255eecfb6bd2ebb67b299b27dd7
|
refs/heads/master
| 2020-04-02T14:22:28.626453
| 2018-10-24T15:33:17
| 2018-10-24T15:33:17
| 154,521,816
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,881
|
py
|
from panda3d.core import NodePath, Point3, TextNode, TransparencyAttrib
# File: L (Python 2.4)
from direct.gui.DirectGui import DGG
from pirates.piratesgui.GuiButton import GuiButton
from direct.gui.DirectGui import *
from direct.gui.OnscreenImage import OnscreenImage
from direct.interval.IntervalGlobal import Sequence, Parallel, Wait, Func
from direct.interval.LerpInterval import LerpHprInterval, LerpPosInterval, LerpColorScaleInterval, LerpScaleInterval
import FishingGlobals
from pirates.piratesbase import PiratesGlobals
from pirates.piratesbase import PLocalizer
from pirates.uberdog.UberDogGlobals import InventoryType
from pirates.piratesgui import GuiPanel
from pirates.piratesgui import PiratesGuiGlobals
from BlendActor import BlendActor
class LegendaryFishingGameGUI:
def __init__(self, gameObject = None):
base.loadingScreen.beginStep('LegendaryGameGUI', 4, 20)
self.gameObject = gameObject
self.guiImage = loader.loadModel('models/minigames/pir_m_gam_fsh_legendaryGui')
self.UICompoments = { }
self.uiBaseNode = NodePath('baseNode')
self.uiBaseNode.reparentTo(aspect2d)
self.uiBaseNode.show()
self.leftBaseNode = NodePath('leftBaseNode')
self.leftBaseNode.reparentTo(base.a2dLeftCenter)
self.leftBaseNode.show()
self.fishActor = None
self.actorAnim = { }
self.scaleSize = {
InventoryType.Collection_Set11_Part1: 0.0598,
InventoryType.Collection_Set11_Part2: 0.055,
InventoryType.Collection_Set11_Part3: 0.12,
InventoryType.Collection_Set11_Part4: 0.0864,
InventoryType.Collection_Set11_Part5: 0.08 }
self.meterFrame = DirectFrame(parent = self.leftBaseNode, frameSize = (-0.299, 0.299, -1.0, 0.0), frameColor = (1.0, 1.0, 1.0, 0.0), relief = None, state = DGG.DISABLED, pos = (1.0, 0.0, -0.450), hpr = (0, 0, 0), scale = (1.3, 0.0, 1.3), image = self.guiImage.find('**/pir_t_gui_fsh_meter'), image_scale = (0.200, 0.0, 0.800000), image_pos = (0, 0, 0), text = '', textMayChange = 1, text_scale = PiratesGuiGlobals.TextScaleTitleLarge, text_pos = (-0.550000, 0.100), text_shadow = PiratesGuiGlobals.TextShadow)
self.UICompoments['meterFrame'] = self.meterFrame
self.fishingRod = DirectFrame(parent = self.meterFrame, frameSize = (-0.299, 0.299, -1.0, 0.0), relief = None, state = DGG.DISABLED, pos = FishingGlobals.fishingRodScreenPosition, image = self.guiImage.find('**/pir_t_gui_fsh_fullRod'), image_scale = (1.0, 0.0, 0.125), image_pos = (0.200, 0, 0))
self.fishingRod.setR(FishingGlobals.fishingRodInitSlope)
self.UICompoments['fishingRod'] = self.fishingRod
base.loadingScreen.tick()
self.fishingHandleBaseFrame = DirectFrame(parent = self.uiBaseNode, frameSize = (-0.299, 0.299, -1.5, 1.5), frameColor = (1.0, 1.0, 1.0, 0.0), relief = None, state = DGG.DISABLED, pos = (0.0, 0.0, 0.0), hpr = (0, 0, 0), scale = (0.71, 0.0, 0.71), image = self.guiImage.find('**/pir_t_gui_fsh_partialRod'), image_scale = (3.78, 0.0, 1.89), image_pos = (0, 0, 0), image_hpr = (0.0, 0.0, 0))
self.fishingHandleBaseFrame.hide()
self.UICompoments['fishingHandleBaseFrame'] = self.fishingHandleBaseFrame
self.fishingHandle = DirectFrame(parent = self.fishingHandleBaseFrame, frameSize = (-0.08, 0.08, -0.200, 0.200), relief = None, state = DGG.DISABLED, pos = (-0.100, 0.0, -0.050000), hpr = (0, 0, 0), image = self.guiImage.find('**/pir_t_gui_fsh_handleArm'), image_scale = (1.0, 0.0, 1.0), image_pos = (-0.042000, 0, -0.115), image_hpr = (0.0, 0.0, 0))
self.UICompoments['fishingHandle'] = self.fishingHandle
self.arrowImage = DirectFrame(parent = self.fishingHandleBaseFrame, frameSize = (-0.4, 0.4, -0.4, 0.4), relief = None, state = DGG.DISABLED, pos = (0.0, 0.0, 0.0), hpr = (0, 0, 0), scale = (1.2, 0.0, 1.2), image = self.guiImage.find('**/pir_t_gui_fsh_arrow'), image_scale = (1.0, 0.0, 1.0), image_pos = (0.0, 0, 0.0), image_hpr = (0.0, 0.0, 0.0))
self.arrowImage.hide()
self.UICompoments['arrowImage'] = self.arrowImage
btnGeom = (self.guiImage.find('**/pir_t_gui_fsh_handle'), self.guiImage.find('**/pir_t_gui_fsh_handle'), self.guiImage.find('**/pir_t_gui_fsh_handleOn'))
self.fishingHandleButton = GuiButton(pos = (-0.299, 0, -0.550000), hpr = (0, 0, 0), scale = 0.450, image = btnGeom, image_pos = (0, 0, 0), image_scale = 1.0, sortOrder = 2)
self.fishingHandleButton.bind(DGG.B1PRESS, self.handleButtonClicked)
self.fishingHandleButton.reparentTo(self.fishingHandle)
self.UICompoments['fishingHandleButton'] = self.fishingHandleButton
self.fishingHandleBaseFrame.setTransparency(TransparencyAttrib.MAlpha)
self.meterFrame.setTransparency(TransparencyAttrib.MAlpha)
self.lineOneTransitTextNode = TextNode('lineOneTransitText')
self.lineOneTransitTextNode.setFont(PiratesGlobals.getPirateFont())
self.lineOneTransitTextNode.setText('')
self.lineOneTransitTextNode.setAlign(TextNode.ACenter)
self.lineOneTransitTextNode.setTextColor(1.0, 1.0, 1.0, 0.5)
self.lineOneTransitTextNodePath = NodePath(self.lineOneTransitTextNode)
self.lineOneTransitTextNodePath.setPos(0.0, 0.0, -0.800000)
self.lineOneTransitTextNodePath.setScale(0.348, 0.348, 0.348)
self.lineOneTransitTextNodePath.reparentTo(self.uiBaseNode)
self.lineOneTransitTextNodePath.hide()
self.UICompoments['lineOneTransitText'] = self.lineOneTransitTextNodePath
self.lineTwoTransitTextNode = TextNode('lineTwoTransitText')
self.lineTwoTransitTextNode.setFont(PiratesGlobals.getPirateFont())
self.lineTwoTransitTextNode.setText('')
self.lineTwoTransitTextNode.setAlign(TextNode.ACenter)
self.lineTwoTransitTextNode.setTextColor(1.0, 1.0, 1.0, 0.5)
self.lineTwoTransitTextNodePath = NodePath(self.lineTwoTransitTextNode)
self.lineTwoTransitTextNodePath.setPos(-0.4, 0.0, -0.946)
self.lineTwoTransitTextNodePath.setScale(0.12, 0.12, 0.12)
self.lineTwoTransitTextNodePath.reparentTo(self.uiBaseNode)
self.lineTwoTransitTextNodePath.hide()
self.UICompoments['lineTwoTransitText'] = self.lineTwoTransitTextNodePath
base.loadingScreen.tick()
self.test_guiImage = loader.loadModel('models/gui/toplevel_gui')
self.buttonIcon = (self.test_guiImage.find('**/treasure_chest_closed'), self.test_guiImage.find('**/treasure_chest_closed'), self.test_guiImage.find('**/treasure_chest_closed_over'))
self.winImagePanel = GuiPanel.GuiPanel('', 2.60, 1.89, True)
self.winImagePanel.setPos(-1.3, 0.0, -0.946)
self.winImagePanel.reparentTo(self.uiBaseNode)
self.winImagePanel.background = OnscreenImage(parent = self.winImagePanel, scale = (2.39, 0, 1.8), image = self.guiImage.find('**/pir_t_gui_fsh_posterBackground'), hpr = (0, 0, 0), pos = (1.3, 0, 0.946))
self.winImagePanel.setBin('gui-popup', -4)
self.winTitleTextNode = TextNode('winTitleTextNode')
self.winTitleTextNode.setText('Congratulations!')
self.winTitleTextNode.setAlign(TextNode.ACenter)
self.winTitleTextNode.setFont(PiratesGlobals.getPirateFont())
self.winTitleTextNode.setTextColor(0.230, 0.089, 0.0299, 1.0)
self.winTitleTextNodePath = NodePath(self.winTitleTextNode)
self.winTitleTextNodePath.setPos(1.35, 0.0, 1.66)
self.winTitleTextNodePath.setScale(0.179)
self.winTitleTextNodePath.reparentTo(self.winImagePanel)
self.wholeStoryTextNode = TextNode('storyTextNode')
self.wholeStoryTextNode.setText('')
self.wholeStoryTextNode.setWordwrap(19.0)
self.wholeStoryTextNode.setTextColor(0.230, 0.089, 0.0299, 1.0)
self.wholeStoryTextNodePath = NodePath(self.wholeStoryTextNode)
self.wholeStoryTextNodePath.setPos(0.33, 0.0, 1.63)
self.wholeStoryTextNodePath.setScale(0.050000)
self.wholeStoryTextNodePath.reparentTo(self.winImagePanel)
self.winImagePanel.closeButton['command'] = self.closeDialogGotNextState
self.winImagePanel.closeButton['extraArgs'] = [
'winImagePanel',
'FarewellLegendaryFish',
False]
self.UICompoments['winImagePanel'] = self.winImagePanel
self.winImagePanel.hide()
self.luiCloseDialogSequence = Sequence()
self.arrowImageRotationInterval = LerpHprInterval(self.arrowImage, 2.2, self.arrowImage.getHpr() + Point3(0.0, 0.0, 280.0), self.arrowImage.getHpr())
self.luiArrowRotatingSequence = Sequence(Func(self.showGui, [
'arrowImage']), Parallel(Func(self.arrowImageRotationInterval.start), Wait(2.2)), Func(self.hideGui, [
'arrowImage']), Func(self.arrowImage.setHpr, self.arrowImage.getHpr() + Point3(0.0, 0.0, 5.0)), name = self.gameObject.distributedFishingSpot.uniqueName('luiArrowRotatingSequence'))
self.lineOneColorChange = LerpColorScaleInterval(self.lineOneTransitTextNodePath, FishingGlobals.legendaryTransitionTextDuration, (1.0, 1.0, 1.0, 0.0), (1.0, 1.0, 1.0, 1.0), blendType = 'easeOut')
self.lineOnePosChange = LerpPosInterval(self.lineOneTransitTextNodePath, FishingGlobals.legendaryTransitionTextDuration, (0.0, 0.0, -0.200), (0.0, 0.0, -0.800000), blendType = 'easeOut')
self.lineTwoCholorChange = LerpColorScaleInterval(self.lineTwoTransitTextNodePath, FishingGlobals.legendaryTransitionTextDuration, (1.0, 1.0, 1.0, 1.0), (1.0, 1.0, 1.0, 1.0), blendType = 'easeOut')
self.lineTwoPosChange = LerpPosInterval(self.lineTwoTransitTextNodePath, FishingGlobals.legendaryTransitionTextDuration, (0.0, 0.0, -0.320), (0.0, 0.0, -0.946), blendType = 'easeOut')
self.transitionTextMovingSequence = Sequence(Func(self.lineOneTransitTextNodePath.show), Func(self.lineTwoTransitTextNodePath.show), Parallel(self.lineOnePosChange, self.lineTwoPosChange, self.lineOneColorChange, self.lineTwoCholorChange), Func(self.lineOneTransitTextNodePath.hide), Func(self.lineTwoTransitTextNodePath.hide), name = self.gameObject.distributedFishingSpot.uniqueName('transitionTextMovingSequence'))
self.meterFadeInInterval = Sequence(Func(self.meterFrame.show), LerpColorScaleInterval(self.meterFrame, FishingGlobals.legendaryTransitionTextDuration, colorScale = (1.0, 1.0, 1.0, 1.0), startColorScale = (1.0, 1.0, 1.0, 0.0), blendType = 'easeOut'), name = 'FadeInLegendaryMeter')
self.meterFadeOutInterval = Sequence(LerpColorScaleInterval(self.meterFrame, FishingGlobals.legendaryTransitionTextDuration, colorScale = (1.0, 1.0, 1.0, 0.0), startColorScale = (1.0, 1.0, 1.0, 1.0), blendType = 'easeOut'), Func(self.meterFrame.hide), name = 'FadeOutLegendaryMeter')
self.rodFadeInInterval = Sequence(Func(self.fishingHandleBaseFrame.show), LerpColorScaleInterval(self.fishingHandleBaseFrame, FishingGlobals.legendaryTransitionTextDuration, colorScale = (1.0, 1.0, 1.0, 1.0), startColorScale = (1.0, 1.0, 1.0, 0.0), blendType = 'easeOut'), name = 'FadeInLegendaryRodInterface')
self.rodFadeOutInterval = Sequence(LerpColorScaleInterval(self.fishingHandleBaseFrame, FishingGlobals.legendaryTransitionTextDuration, colorScale = (1.0, 1.0, 1.0, 0.0), startColorScale = (1.0, 1.0, 1.0, 1.0), blendType = 'easeOut'), Func(self.fishingHandleBaseFrame.hide), name = 'FadeOutLegendaryRodInterface')
base.loadingScreen.tick()
smallScale = self.fishingHandleButton['scale']
bigScale = self.fishingHandleButton['scale'] * 1.2
self.buttonGrowUpInterval = LerpScaleInterval(self.fishingHandleButton, 1.0, bigScale, smallScale)
self.luiFightTransitSequence = Sequence(Parallel(Func(self.fishingHandleBaseFrame.show), Func(self.meterFadeOutInterval.start), Func(self.rodFadeInInterval.start), Func(self.buttonGrowUpInterval.start)), Wait(1.0), Func(self.meterFrame.hide), name = self.gameObject.distributedFishingSpot.uniqueName('luiFightTransitSequence'))
self.luiReelTransitSequence = Sequence(Parallel(Func(self.fishingHandleBaseFrame.show), Func(self.meterFadeOutInterval.start), Func(self.rodFadeInInterval.start)), Wait(1.0), Func(self.meterFrame.hide), name = self.gameObject.distributedFishingSpot.uniqueName('luiReelTransitSequence'))
self.luiStruggleTransitSequence = Sequence(Parallel(Func(self.meterFrame.show), Func(self.resetFishingRod), self.meterFadeInInterval, self.rodFadeOutInterval), Wait(1.0), Func(self.fishingHandleBaseFrame.hide), name = self.gameObject.distributedFishingSpot.uniqueName('luiStruggleTransitSequence'))
self.meterFadeOutInterval.start()
self.rodFadeOutInterval.start()
self.hideAllGUI()
base.loadingScreen.endStep('LegendaryGameGUI')
def hideAllGUI(self):
self.uiBaseNode.reparentTo(hidden)
self.leftBaseNode.reparentTo(hidden)
def showAllGUI(self):
self.uiBaseNode.reparentTo(aspect2d)
self.leftBaseNode.reparentTo(base.a2dLeftCenter)
def hideGui(self, nameList):
for ui in nameList:
self.UICompoments[ui].hide()
def showGui(self, nameList):
for ui in nameList:
self.UICompoments[ui].show()
def destroy(self):
self.arrowImageRotationInterval.pause()
self.arrowImageRotationInterval.clearToInitial()
self.luiArrowRotatingSequence.pause()
self.luiArrowRotatingSequence.clearToInitial()
self.luiCloseDialogSequence.pause()
self.luiCloseDialogSequence.clearToInitial()
totalKey = self.UICompoments.keys()
for iKey in totalKey:
del self.UICompoments[iKey]
self.fishingHandle = None
self.fishingHandleButton = None
self.fishingRod.remove_node()
self.leftBaseNode.remove_node()
self.uiBaseNode.remove_node()
if self.fishActor:
self.fishActor.destroy()
self.fishActor = None
def handleButtonClicked(self, mouseKey):
if self.gameObject.lfgFsm.getCurrentOrNextState() in [
'CatchIt']:
self.gameObject.lfgFsm.request('Transition', 'Struggle')
self.gameObject.sfx['legendaryGreen'].play()
def setTransitionText(self, state):
self.lineOneTransitTextNode.setText(PLocalizer.LegendaryFishingGui[state][0])
self.lineTwoTransitTextNode.setText(PLocalizer.LegendaryFishingGui[state][1])
def resetInterval(self):
self.transitionTextMovingSequence.pause()
self.transitionTextMovingSequence.clearToInitial()
self.lineOneColorChange.pause()
self.lineOneColorChange.clearToInitial()
self.lineOnePosChange.pause()
self.lineOnePosChange.clearToInitial()
self.lineTwoCholorChange.pause()
self.lineTwoCholorChange.clearToInitial()
self.lineTwoPosChange.pause()
self.lineTwoPosChange.clearToInitial()
self.luiReelTransitSequence.pause()
self.luiReelTransitSequence.clearToInitial()
self.luiStruggleTransitSequence.pause()
self.luiStruggleTransitSequence.clearToInitial()
self.luiFightTransitSequence.pause()
self.luiFightTransitSequence.clearToInitial()
self.buttonGrowUpInterval.pause()
self.buttonGrowUpInterval.clearToInitial()
self.meterFadeOutInterval.pause()
self.meterFadeOutInterval.clearToInitial()
self.rodFadeInInterval.pause()
self.rodFadeInInterval.clearToInitial()
self.meterFadeInInterval.pause()
self.meterFadeInInterval.clearToInitial()
self.rodFadeOutInterval.pause()
self.rodFadeOutInterval.clearToInitial()
def fightingTransit(self):
self.luiFightTransitSequence.start()
def reelTransit(self):
self.luiReelTransitSequence.start()
def struggleTransit(self):
self.luiStruggleTransitSequence.start()
def resetFishingRod(self):
self.fishingRod.setR(FishingGlobals.fishingRodInitSlope)
def showWinImage(self, fish):
self.hideGui([
'meterFrame',
'fishingHandleBaseFrame'])
result = fish.myData['name'].split(' ')
fileName = str(result[0]).capitalize()
imgName = 'pir_t_gui_fsh_render%s' % fileName
self.actorAnim['swimIdleOpposite'] = 'models/char/pir_a_gam_fsh_%s_%s.bam' % (fish.myData['model'], 'swimIdleOpposite')
self.fishActor = BlendActor('models/char/pir_r_gam_fsh_%s.bam' % fish.myData['model'], self.actorAnim, FishingGlobals.defaultFishBlendTime, FishingGlobals.fishBlendTimeDict)
self.fishActor.setPlayRate(fish.myData['speed'] * fish.myData['swimAnimationMultiplier'], 'swimIdleOpposite')
self.fishActor.changeAnimationTo('swimIdleOpposite')
self.fishActor.reparentTo(self.winImagePanel)
self.fishActor.setScale(self.scaleSize[fish.myData['id']])
self.fishActor.setPos(1.7, 0, 1.0)
self.fishActor.setHpr(0, 0, 35)
self.fishActor.setDepthWrite(True)
self.fishActor.setDepthTest(True)
self.wholeStoryTextNode.setText(PLocalizer.LegendSelectionGui['wholeStory'][fish.myData['id']])
self.winImagePanel.show()
def closeDialogGotNextState(self, object, targetState, ifFadeInAgain):
if self.fishActor:
self.fishActor.destroy()
self.fishActor = None
self.luiCloseDialogSequence = Sequence(Func(self.gameObject.distributedFishingSpot.fadeOut), Wait(0.4), Func(self.UICompoments[object].hide), Func(self.gameObject.lfgFsm.request, targetState), name = self.gameObject.distributedFishingSpot.uniqueName('luiCloseDialogSequence'))
self.luiCloseDialogSequence.start()
def updateStruggleTimerText(self, time, percent):
self.meterFrame['text'] = str(time)
self.meterFrame['text_fg'] = (1.0 - percent, percent, 0.0, 1.0)
|
[
"brandoncarden12345@gmail.com"
] |
brandoncarden12345@gmail.com
|
27bce40eb00843c6f791dc8adf01ae91b3eeee8a
|
ce76b3ef70b885d7c354b6ddb8447d111548e0f1
|
/hand/feel_number.py
|
9afaf5fa9d6f81f5b729c8617739ec9961ab3214
|
[] |
no_license
|
JingkaiTang/github-play
|
9bdca4115eee94a7b5e4ae9d3d6052514729ff21
|
51b550425a91a97480714fe9bc63cb5112f6f729
|
refs/heads/master
| 2021-01-20T20:18:21.249162
| 2016-08-19T07:20:12
| 2016-08-19T07:20:12
| 60,834,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 231
|
py
|
#! /usr/bin/env python
def first_woman(str_arg):
old_company_and_old_person(str_arg)
print('large_part')
def old_company_and_old_person(str_arg):
print(str_arg)
if __name__ == '__main__':
first_woman('company')
|
[
"jingkaitang@gmail.com"
] |
jingkaitang@gmail.com
|
6a1e0d8c09d12eef9a7b587992ab35cc802c6aac
|
e4b683644435aaf83ab8fe7693b97f1e105fa64d
|
/Escritorio/PCM raspberry - python/primer lab/logo1.py
|
2de57d5658dd28cc0daa6711a111029e3964585f
|
[] |
no_license
|
brayanjav28/brayanjav28.github.com
|
138afed08f79c131ae43f25ac117df687354d04f
|
9bb40c24e1b4ef38654b3b144a49de9a84641efd
|
refs/heads/main
| 2023-07-14T13:40:09.865405
| 2021-08-27T19:38:47
| 2021-08-27T19:38:47
| 334,521,700
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 87,847
|
py
|
# -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.12.3)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x51\xd0\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x02\x58\x00\x00\x00\xc3\x08\x06\x00\x00\x00\x5e\x58\x6f\x75\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x2e\x23\x00\x00\x2e\x23\
\x01\x78\xa5\x3f\x76\x00\x00\x01\x36\x69\x43\x43\x50\x50\x68\x6f\
\x74\x6f\x73\x68\x6f\x70\x20\x49\x43\x43\x20\x70\x72\x6f\x66\x69\
\x6c\x65\x00\x00\x78\xda\xad\x8e\xb1\x4a\xc3\x50\x14\x40\xcf\x8b\
\xa2\xe2\x50\x2b\x04\x71\x70\x78\x93\x28\x28\xb6\xea\x60\xc6\xa4\
\x2d\x45\x10\xac\xd5\x21\xc9\xd6\xa4\xa1\x4a\x69\x12\x5e\x5e\xd5\
\x7e\x84\xa3\x5b\x07\x17\x77\xbf\xc0\xc9\x51\x70\x50\xfc\x02\xff\
\x40\x71\xea\xe0\x10\x21\x83\x83\x08\x9e\xe9\xdc\xc3\xe5\x72\xc1\
\xa8\xd8\x75\xa7\x61\x94\x61\x10\x6b\xd5\x6e\x3a\xd2\xf5\x7c\x39\
\xfb\xc4\x0c\x53\x00\xd0\x09\xb3\xd4\x6e\xb5\x0e\x00\xe2\x24\x8e\
\xf8\xc1\xe7\x2b\x02\xe0\x79\xd3\xae\x3b\x0d\xfe\xc6\x7c\x98\x2a\
\x0d\x4c\x80\xed\x6e\x94\x85\x20\x2a\x40\xff\x42\xa7\x1a\xc4\x18\
\x30\x83\x7e\xaa\x41\xdc\x01\xa6\x3a\x69\xd7\x40\x3c\x00\xa5\x5e\
\xee\x2f\x40\x29\xc8\xfd\x0d\x28\x29\xd7\xf3\x41\x7c\x00\x66\xcf\
\xf5\x7c\x30\xe6\x00\x33\xc8\x7d\x05\x30\x75\x74\xa9\x01\x6a\x49\
\x3a\x52\x67\xbd\x53\x2d\xab\x96\x65\x49\xbb\x9b\x04\x91\x3c\x1e\
\x65\x3a\x1a\x64\x72\x3f\x0e\x13\x95\x26\xaa\xa3\xa3\x2e\x90\xff\
\x07\xc0\x62\xbe\xd8\x6e\x3a\x72\xad\x6a\x59\x7b\xeb\xfc\x33\xae\
\xe7\xcb\xdc\xde\x8f\x10\x80\x58\x7a\x2c\x5a\x41\x38\x54\xe7\xdf\
\x2a\x8c\x9d\xdf\xe7\xe2\xc6\x78\x19\x0e\x6f\x61\x7a\x52\xb4\xdd\
\x2b\xb8\xd9\x80\x85\xeb\xa2\xad\x56\xa1\xbc\x05\xf7\xe3\x2f\xc0\
\xc6\x4f\xfd\xe8\x5a\x4f\x62\x00\x00\x00\x20\x63\x48\x52\x4d\x00\
\x00\x7a\x25\x00\x00\x80\x83\x00\x00\xf9\xff\x00\x00\x80\xe8\x00\
\x00\x52\x08\x00\x01\x15\x58\x00\x00\x3a\x97\x00\x00\x17\x6f\xd7\
\x5a\x1f\x90\x00\x00\x50\x14\x49\x44\x41\x54\x78\xda\xec\x5d\x79\
\xfc\x57\x43\xf7\x6f\x99\x50\x96\x1a\x6d\x52\x54\x28\xc9\x12\x09\
\xc9\x9a\x64\xa9\x24\x5b\x92\x25\x1e\x51\x44\x96\xec\x7b\xc8\x9a\
\x2d\x5b\x4a\x76\x8f\x2c\x85\x44\x52\x9c\x2c\x0f\x2d\x14\x6d\x42\
\x4a\xa9\x08\x21\x4a\xa4\xe5\xf7\xc7\x3d\xdf\x9f\x8f\xdb\xbd\x77\
\xce\x99\x99\x7b\x3f\xcb\xf7\xfc\xf1\x7e\xbd\xe2\xfb\xb9\x33\x67\
\xf6\xf7\x9c\x39\x4b\x85\xf5\xeb\xd7\x57\x10\x08\x04\x02\x81\x40\
\x20\x10\xf8\x83\x74\x82\x40\x20\x10\x08\x04\x02\x81\x10\x2c\x81\
\x40\x20\x10\x08\x04\x02\x21\x58\x02\x81\x40\x20\x10\x08\x04\xe5\
\x8b\x60\x81\xd2\x02\xff\xa8\x08\x4a\x6f\x0d\x4a\xb7\x06\xa5\x8f\
\x02\xa5\xfb\x80\xd2\xd7\x82\xd2\xf7\x82\xd2\x4f\x81\xd2\xaf\x80\
\xd2\x63\x41\x69\x00\xa5\xff\x07\x4a\x4f\x02\xa5\x3f\x02\xa5\x3f\
\x00\xa5\xdf\x01\xa5\xdf\x00\xa5\x47\x80\xd2\xc3\x40\xe9\x81\xa0\
\xf4\x95\xa0\xf4\x59\xa0\xf4\xe1\xa0\x74\x4b\x50\xba\x96\xf4\xb1\
\x40\x20\x10\x08\x04\x85\x07\x21\x58\xfe\xb0\x35\x12\x9f\xcb\x40\
\xe9\x47\x41\xe9\x77\x41\xe9\x45\xa0\xf4\x1a\x50\x7a\x7d\x4a\x58\
\x05\x4a\xcf\x05\xa5\xc7\x83\xd2\x83\x40\xe9\xf3\x40\xe9\xfd\x40\
\xe9\x9a\x32\x1e\x02\x81\x40\x20\x10\x08\xc1\x2a\x36\xec\x0e\x4a\
\x9f\x0d\x4a\x3f\x87\x5a\xa7\x85\x29\x13\x29\x1b\xe2\xf5\x35\x6a\
\xc5\x86\x82\xd2\x3d\x40\xe9\x1d\x65\xdc\x04\x02\x81\x40\x20\x10\
\x82\x55\x48\xd8\x02\x94\xee\x02\x4a\x0f\x01\xa5\x67\x15\x10\x91\
\xe2\x62\x1a\x3e\x4f\xb6\x03\xa5\xab\xc8\xb8\x0a\x04\x02\x81\x40\
\x20\x04\x2b\x6b\x1c\x0a\x4a\xdf\x09\x4a\x8f\x02\xa5\x97\x17\x31\
\xa9\x8a\xc3\x77\xa0\xf4\x8b\xa0\xf4\x00\x50\xba\x95\x8c\xb7\x40\
\x20\x10\x08\x04\x42\xb0\xd2\x42\x13\x24\x1c\xb3\x4b\x90\x50\x99\
\x30\x1d\x94\xbe\x04\x94\xde\x56\xe6\x81\x40\x20\x10\x08\x04\x42\
\xb0\x5c\xb1\x13\xda\x53\xbd\x5e\x0e\x49\x55\x14\xd6\x81\xd2\xff\
\x05\xa5\xcf\x10\xb2\x25\x10\x08\x04\x02\x81\x10\x2c\x2e\x0e\x42\
\x23\xf5\xb4\x09\xcb\x52\x50\x7a\x22\x04\x21\x19\x06\x81\xd2\x57\
\x83\xd2\xe7\x82\xd2\x27\x80\xd2\x87\x81\xd2\x07\x80\xd2\x7b\x81\
\xd2\x7b\x80\xd2\x2d\x10\x7b\x40\x10\xda\xe1\x60\x50\xba\x03\x28\
\xdd\x1d\x94\xbe\x00\x94\xbe\x1e\x94\x7e\x04\xc9\xe0\x34\x50\x7a\
\x59\xca\xb2\xff\x8d\x76\x67\x7b\xcb\x62\x11\x08\x04\x02\x81\x40\
\x08\x56\x1c\x36\x85\x20\x1e\xd5\x9b\x9e\x89\xc8\xef\xa0\xf4\x1c\
\x08\x42\x33\x0c\x01\xa5\xfb\x82\xd2\x87\x80\xd2\x8d\x53\x36\x26\
\xaf\x0a\x4a\xef\x8c\x24\xec\x0a\x50\xfa\x69\x50\xfa\x43\x50\xfa\
\x2b\x24\x47\x3e\xdb\xf8\x3c\x04\x61\x28\x64\xe1\x08\x04\x02\x81\
\x40\x20\x04\xeb\xff\x89\xc8\x55\xa0\xf4\x8f\x9e\xc8\xc6\x1f\xa0\
\xf4\xdb\x58\x66\x5b\x50\xba\x7a\x01\xb6\xb9\x1e\x28\x7d\x24\x28\
\x7d\x07\x92\xae\xb5\x9e\xda\xfe\x35\x28\xdd\x4f\x16\x8f\x40\x20\
\x10\x08\x04\xe5\x97\x60\x55\x46\x5b\xa2\x6f\x3d\x10\x8b\x85\xa8\
\x9d\x3a\x11\x8a\x33\x90\x67\x03\x08\xe2\x61\x3d\xeb\xe9\x69\x71\
\x3a\x28\x7d\x92\x2c\x22\x81\x40\x20\x10\x08\xca\x0f\xc1\x52\x10\
\xa4\x95\x59\xe4\x48\x22\xc6\x20\x29\x69\x06\x4a\x57\x2a\xa1\xfe\
\xa9\x06\x4a\xef\x82\x76\x5d\xef\x3a\xf6\xd1\x5c\x50\xfa\x22\x59\
\x4c\x02\x81\x40\x20\x10\x94\x36\xc1\x3a\x0c\x94\x9e\xef\x40\x18\
\x3e\x07\xa5\xfb\x83\xd2\xdb\x95\xa3\x89\xd0\x12\x82\x98\x5f\x8b\
\xc1\x2d\x88\xe9\xfe\xb2\xa8\x04\x02\x81\x40\x20\x28\x2d\x82\xd5\
\x06\x82\xc4\xc9\x36\xe4\xe0\x27\x50\xfa\x46\x50\xfa\x88\x72\x3e\
\x21\x36\x41\x27\x80\xfb\x41\xe9\x15\x96\x7d\x39\x12\xbd\x20\x65\
\x81\x09\x04\x02\x81\x40\x08\x56\x91\x37\x64\x90\x83\x5d\xd5\x39\
\xa0\xf4\xe6\x29\xcb\x57\x1d\x9f\x19\x8f\x40\x9b\xb0\xed\x1d\xca\
\xaa\x08\x41\x5e\xc1\x7a\x29\xcb\x5c\x0b\x94\xbe\xdc\xc1\x31\xe0\
\x16\x59\x60\x02\x81\x40\x20\x10\x82\x55\x9c\x0d\xe8\x0e\x4a\x7f\
\x66\x71\xf8\xbf\x01\x4a\xff\x07\xfc\x87\x50\xd8\x18\x94\x3e\x05\
\xbd\xec\xee\x00\xa5\x47\xa0\x31\xf8\x6f\xa1\xfa\xcf\x76\xa8\x63\
\x0b\x08\x12\x4b\xaf\x45\xfb\x27\x00\xa5\x1f\x82\x20\xbe\xd6\xd9\
\xa0\x74\x7b\x50\x7a\x2b\x8f\x6d\xda\x0c\x49\x28\x58\xf4\xf3\xfb\
\xa0\x74\x57\x59\x68\x02\x81\x40\x20\x10\x82\x55\x3c\x4f\x59\x23\
\x2c\x89\xd5\xbe\x29\xca\xb5\x31\x51\x8e\x3e\x0e\x75\x6c\x0f\xb4\
\x00\xa1\x13\x41\xe9\x7b\x40\xe9\x4e\x1e\x35\x74\xed\xd1\xd6\x8a\
\xdb\xef\x43\xd0\xa3\x53\x16\x9d\x40\x20\x10\x08\x84\x60\x15\x28\
\x8e\x06\xa5\xbf\x61\x1e\xf0\x1f\x64\xa8\x49\x79\x87\x20\xcf\x95\
\x0e\xe5\xef\x6f\x41\x70\x7e\x81\x20\x69\xf5\xd9\x10\x44\x65\x57\
\x8e\x6d\x3c\xc3\x82\x68\x7d\x09\x4a\xb7\x93\x45\x27\x10\x08\x04\
\x02\x21\x58\x85\x85\x8a\xa0\xf4\xa3\x16\x9e\x6d\x47\x66\x2c\xe7\
\x40\x82\x5c\x77\x38\x94\xdf\x15\xdc\x63\x58\x2d\x02\xa5\x07\x83\
\x7b\x0a\x9c\xe3\x40\xe9\x05\xcc\xba\x1f\x90\x85\x27\x10\x08\x04\
\x02\x21\x58\x85\x81\x7a\xa0\xf4\x27\xc0\x8b\xb4\x7e\x7b\x9e\x64\
\x3d\x93\x20\xdf\x23\x0e\xe5\xf7\x05\xbf\x29\x70\x3e\x46\x99\x6b\
\x59\xca\x53\x0d\xbd\x0e\xd7\x31\xea\x7c\x13\x94\xae\x23\x0b\x50\
\x20\x10\x08\x04\x42\xb0\xf2\x87\x0b\x91\x30\x51\x0f\xef\x47\x41\
\xe9\xda\x96\x75\x1d\x86\x87\xbf\x4b\x98\x81\x56\x04\x19\x5f\x74\
\x28\xff\x4e\x48\x27\xb1\xf3\x1f\xa0\xf4\x78\x50\xfa\x74\x08\x72\
\x36\x72\xe5\xda\x0e\x94\x7e\x01\x78\xf9\x1b\x4f\x97\x45\x28\x10\
\x08\x04\x02\x21\x58\xd9\xe3\x1a\xe6\x81\xdd\xc9\xb2\x9e\x0e\x68\
\x14\x5e\x56\xd6\xf5\x8e\x5e\x77\x7f\x11\x34\x38\xb6\xe5\x3f\x9d\
\x12\xc1\xca\x85\x76\x90\xef\x34\xf4\x72\xa4\xd6\x75\xb1\x2c\x44\
\x81\x40\x20\x10\x08\xc1\xca\x06\x35\x50\x9b\x42\x3d\xa4\x07\x00\
\x3f\x2e\x54\x75\x08\x92\x35\x4f\x8f\x28\xef\x3b\x47\x43\x70\xd3\
\x73\xe6\xc4\x94\x8c\xe8\x7f\x8f\x69\x0f\x07\xd7\x7b\x18\xbf\xc6\
\xa0\xf4\xdd\x8c\x3a\x47\xcb\x62\x14\x08\x04\x02\x81\x10\xac\x74\
\xb1\x35\x04\xe9\x6a\x28\x07\xf3\x72\x50\xba\x0b\xb3\xfc\xda\x10\
\xc4\x75\xfa\xc9\x50\xf6\x81\x0e\x6d\x78\x1c\xcc\xe9\x78\x6c\xcb\
\x4e\xea\x9b\xaf\x72\x08\xce\x45\x10\x84\xa5\xe0\x44\x64\xff\x13\
\x82\x50\x13\xe1\x3a\xaf\x07\xa5\xef\x03\xa5\x37\xb2\xf0\x36\xa4\
\x3e\xef\x7e\x0c\x4a\xd7\x95\x45\x29\x10\x08\x04\x02\x21\x58\xfe\
\xd1\x19\x94\xfe\x95\x61\x6b\x65\x73\x20\x1f\x4c\x2c\xff\x6a\x87\
\x76\x5c\x6b\x28\x7b\x09\xd8\x25\x8f\xae\x0a\x41\xc8\x85\xb8\x72\
\x21\xe2\x9b\x9a\xa0\xf4\x41\xa0\xf4\xc9\x60\x4e\x7e\x3d\x24\xe2\
\xfb\x26\x21\xef\xc3\x1b\xd0\xb0\x9d\x2a\x73\x7d\x50\xfa\x15\x62\
\x9f\xff\x0c\x41\xca\x23\x59\x9c\x02\x81\x40\x20\x10\x82\xe5\x09\
\x1d\x18\x9a\x96\xcb\x1c\xeb\xfa\x94\x50\xc7\x74\x87\xf2\xcf\x35\
\x94\xfd\x1b\x3e\x51\x72\xcb\x6d\x08\x6e\x21\x10\x7e\x30\x7c\x1f\
\x95\x8b\xf1\xc3\x88\xdf\x2d\xc6\xf1\x4a\xcb\x9e\xee\x30\x59\x9c\
\x02\x81\x40\x20\x10\x82\x95\x4d\x68\x83\xb2\xdc\x81\x6d\x3d\xd4\
\x77\x32\xb1\x3e\x5b\x6f\xc2\xe3\x0c\xe5\xae\x05\xa5\xb7\xb1\x28\
\xb7\xb5\xa1\xdc\x73\x0d\x1e\x92\x49\xdf\xce\x8d\xf8\x66\x6f\xc3\
\x37\xcf\xa3\x4c\x54\xf9\x8f\x01\xa5\xbf\x27\xf6\xfd\x09\xb2\x40\
\x05\x02\x81\x40\x20\x04\xcb\x1e\xbd\x89\x07\xee\xbb\xe0\x2f\xe5\
\xcb\x26\x04\x4f\xbf\xf5\xa0\xf4\x30\xcb\xf2\x77\x25\x94\xbd\x4b\
\x0a\xc4\xed\xe0\x84\x6f\x6f\x33\x7c\xdb\x2d\xe2\x9b\xf7\x88\x63\
\x73\x23\xd0\x43\x63\x68\x50\x7a\x26\xb1\xdc\xb3\x4a\x60\x91\x6d\
\x8f\xcf\xac\x14\xec\x68\xa9\xd9\x74\xb1\x77\x6c\xc2\xc0\x76\xb2\
\x69\x0a\x04\x45\x07\x85\xeb\xb7\x91\x01\x4d\x40\xe9\x06\xd2\x5f\
\x89\xa8\xc6\xe8\xcb\xda\xf9\x26\x58\xd4\xa0\x99\x2f\x33\xca\xdc\
\x01\xbd\xf8\x26\x1b\x7e\x77\x25\xa1\xde\x5f\x63\x8c\xbe\x4d\xd8\
\x98\x60\x4b\xb6\x8f\x67\x4d\xdf\x4a\xc3\xb7\x49\xf6\x57\xdf\x41\
\x10\x29\x3f\xf7\xf7\x47\x31\xbd\x0f\x0f\x65\xb4\x63\x73\x08\x52\
\xf7\x50\xca\x3d\xa3\x88\x17\x63\x43\xe0\x7b\x71\xb6\xc8\x48\xb6\
\xca\xa0\xf4\x8f\x4c\xd9\x5e\x93\x0d\x56\x20\x28\x3a\xf4\x04\x5e\
\x5a\x39\xe9\xb3\x78\x0c\x62\xf4\xe5\xc0\x7c\x12\xac\x73\x89\x42\
\x5e\xc0\x24\x6c\xeb\x18\x1a\x90\x9f\x09\xf5\x9f\x6e\xd9\xbe\xf7\
\x1d\xb4\x4d\x71\xb8\x10\x92\x43\x34\xf4\x80\xe8\x88\xec\x47\x18\
\x64\xe9\x1b\x61\x4c\xcf\x09\xee\xfa\x9d\xa5\xd1\xfe\x4d\xc4\xf2\
\x7b\x17\xe9\x62\xec\xc8\x24\x30\xab\x2c\x3c\x35\x6d\xd1\xc4\x82\
\xfc\x0d\x28\xb1\xcd\x72\x1f\x24\xf0\xa7\x81\xd2\x3b\xcb\xe1\x21\
\x70\x40\x4b\x9c\x4b\x3d\x40\xe9\x66\x05\x26\xdb\x60\xc6\x1a\x7f\
\x41\xc6\x32\x11\x1f\x31\xfa\xf2\xda\x7c\x11\xac\xb3\x88\x02\xf6\
\x27\x96\xb7\x11\xc4\xe7\x00\xdc\x21\xe1\xbb\x7e\x29\x3e\x13\x0e\
\x49\xc1\x90\x9b\x62\x28\xbe\x12\x94\x1e\x87\xa4\xa9\x19\xf6\xcd\
\x33\x86\x6f\x9a\x84\xea\xb9\x98\x79\xf0\x3e\x14\xf3\xf4\x44\x69\
\xd3\xed\xc4\x3a\x4e\x2e\xc2\xc5\x78\x21\xb3\x1f\x3f\xca\x50\xb6\
\x23\x2d\x08\x56\xd7\x12\xd9\x24\x0f\x8c\xd9\x28\x87\x17\xe0\xe1\
\x28\x28\xfc\xb9\x14\xe5\x08\xf4\x3a\x6a\xb0\x0b\x41\xc6\xb1\x8c\
\x35\x7e\xb9\x8c\x69\x22\xbe\x63\xf4\x65\x8f\x7c\x10\xac\x63\x89\
\xc2\xb5\x23\x96\xd7\xca\xf0\xfc\xf5\x33\xc4\xa7\x7e\xd9\x8c\x20\
\xc7\xb7\x96\xed\x3c\xc7\x50\xee\xd1\x16\x65\xde\x61\x71\x28\x7e\
\x07\xc9\x79\x02\xdf\x0d\xd5\x51\x09\xcc\x31\xc2\x4c\xcf\x9d\xcb\
\xf0\xff\xb7\x27\xb6\x8b\xfa\x1c\xd9\xa5\xc8\x16\x23\x37\x39\xf9\
\xd0\x0c\x65\xbb\xcc\x62\x2e\x95\x82\x96\xe7\x0c\x42\x3b\xf7\x92\
\x83\x44\x40\xc0\xa9\x86\x79\xf4\x37\xd3\x74\x22\x2d\x2c\x66\xac\
\xf1\x0e\x32\xae\xb1\x68\xcc\xdc\x2f\x5b\x65\x4d\xb0\xf6\xf7\x78\
\x53\xae\x8a\x9e\x66\x94\xb4\x2c\xf7\x27\x94\xf3\x32\xe1\xfb\x03\
\x2c\xda\x7a\x70\x0a\xda\x80\x87\xc1\x7f\x5a\x9c\xf0\x06\x70\xab\
\x05\x81\xcb\xfd\xfe\xe6\xd0\xdf\xfb\x79\xd6\x6a\x16\xd3\xe1\xf7\
\x2e\xb3\x2f\xfb\x15\x30\xf9\x5b\x61\x69\x8f\x58\x48\xd8\x82\xe8\
\xdc\x32\x47\x0e\x13\x81\x27\xfb\xca\xbf\xf0\x22\x9f\x2f\x39\xb7\
\x66\xae\xf3\x26\x32\xb6\xb1\x38\x9c\xd1\x8f\x4b\x41\xe9\xea\x59\
\x12\xac\x46\x44\x1b\x14\xaa\x6d\x52\x6b\xe6\xc4\x89\x7b\x62\xaa\
\xef\xd9\xc8\xbe\x0c\x5b\x82\xd2\xab\x3d\x3f\x79\x8d\xf2\x4c\xae\
\x3e\x0d\x95\xbf\x8b\x45\x19\xf7\x13\x6e\x74\x4f\x11\xdb\xd7\x96\
\x78\xd0\x6f\x53\x04\x8b\xb1\x8a\x85\x26\x30\xcb\xd0\x14\x1f\x32\
\x65\x9b\x54\x02\x1b\x24\xe7\xf2\xd0\x5d\x0e\x14\x81\x83\x57\x76\
\x2e\x8e\xcb\xa3\x9c\xed\x19\x72\xfe\x80\xce\x2f\x32\xbe\xd1\xe0\
\x98\xce\xbc\x0d\x19\x86\x69\xa8\x0c\x4a\x2f\xf0\xec\x89\xa6\x40\
\xe9\x07\x99\x87\x44\xcd\x98\xb2\x28\x09\x94\x37\xb5\x68\xf7\x17\
\x09\xe5\x9d\x68\x51\xde\xc7\x9e\x09\xd6\xb1\xa1\xf2\x1f\xb3\x28\
\xe3\x3c\xfc\xb6\xb6\xe1\x77\x9f\xa0\xd6\xd1\xd4\xc6\x6e\xc4\x67\
\xdb\x42\xd7\xa6\x6c\x6f\xd1\x97\xed\x33\x92\xad\x52\xce\x33\x2e\
\x15\x8f\x97\xc0\x06\xc9\x59\x3f\xff\x95\x03\x45\x90\x80\xff\x72\
\xbc\xc9\xf2\x28\x67\x1f\xf1\x20\xf4\x86\x47\x18\x7d\xf9\x70\x56\
\x04\xab\x22\x28\x3d\xc5\x20\xcc\x3a\x50\xfa\x78\xcb\xf2\x5f\x64\
\x34\xfa\x9d\x98\x32\xea\x19\xec\x94\xd6\x23\x7b\xe5\xca\xf6\x52\
\x42\x79\xb3\xd1\xf8\x70\x2a\xfe\xfb\x2b\x50\x7a\x1e\x28\x3d\x1f\
\x94\xfe\x06\xff\x3d\x11\x82\x98\x5d\x65\x87\xe2\xb7\x1e\xc9\xd5\
\xea\xd0\x6d\x65\x3b\x08\x72\x11\x72\xcb\x69\xcc\xd0\x88\xcc\xc0\
\x67\xe2\x8a\x86\x7e\x3b\xbe\xc0\x0c\xc2\x6d\x70\x94\x45\x5f\xee\
\x91\x91\x6c\x3b\x5a\xc8\x76\x7d\x09\x6c\x90\x9c\x44\xe8\x23\xe5\
\x40\x11\x24\x60\x38\x63\x2e\x0d\xce\xa3\x9c\x0f\x31\xe4\x1c\x22\
\xe3\x9a\x88\xf7\x80\xa9\x78\xc8\x82\x60\x51\x06\xf8\x54\xe2\xad\
\x3b\xee\x6f\x4b\x19\x0d\xbf\x31\xa6\x0c\x13\x09\xb4\x79\x22\xb9\
\xd6\x03\x11\x2a\x0b\xbb\x50\x0b\x8d\x26\xa3\x7e\x33\x0d\x6f\x49\
\xf3\x1d\xdc\x71\x27\x5a\xc8\x36\x15\xbf\xe5\x68\x12\xff\x20\x6a\
\x9f\x28\x1e\x78\x77\x17\xf0\x62\xe4\x1a\x91\xff\x05\x4a\xd7\xc8\
\x48\xb6\xce\x0e\x9a\xca\x62\xc6\xab\x8c\xf6\xde\x2a\x07\x8a\x20\
\x01\x1c\x6d\xff\x45\x79\x94\x13\x18\x72\x5e\x28\xe3\x9a\xa8\x28\
\xe2\x98\x7c\xb4\xcb\x82\x60\x9d\x0d\x7e\x42\x31\x34\xc7\xe7\xb6\
\xb8\x54\x30\x4d\x99\x8d\x3f\x28\xa2\x8c\x4e\x60\xce\x1f\x58\x25\
\xa6\xfe\x16\x10\xc4\xd2\x99\x10\xf2\xca\x3b\xde\x91\x5c\xad\x41\
\x1b\x31\x93\x7d\xd4\x9d\xa1\x37\xf7\x81\x68\xaf\xb5\x12\x68\x91\
\xe4\x8f\xb1\x94\xaf\x1d\x28\xbd\x3b\xe3\xf7\x3f\x01\x2f\xfe\xd7\
\xf5\x84\x32\xfb\x14\xe8\x82\x7c\x9c\xd9\x97\x33\x33\x94\xed\x72\
\x8b\xb1\x2e\x05\x9b\xa4\x43\x19\xed\x6d\x2a\x87\x8a\xc0\x51\xcb\
\x9e\x6f\xef\xdb\x8a\xa0\xf4\x12\x86\x9c\x47\xca\xb8\x7a\x33\xf9\
\x68\x90\x36\xc1\x6a\x43\x10\xe2\x66\x42\x39\x2d\x43\x9a\x9b\x03\
\x63\x7e\xb7\x2b\xa3\xf1\xcb\x2c\x6d\x34\x72\x3d\xff\xea\x82\xd2\
\x97\xc4\x3c\x8d\xed\x84\xbf\xd9\xd7\x83\x06\xab\x09\xc1\x83\x21\
\x8e\x78\xae\x88\xf9\xfd\xd8\xd0\xef\xe6\x5b\xc8\x35\x1f\x82\xf0\
\x0c\x1c\xed\x61\x73\x8b\x79\x34\x04\x8a\xd3\xf3\x65\x32\xb3\x3f\
\xb3\x0c\xf0\xf7\x8c\xc5\x78\xb7\x29\x47\x4f\x3b\x57\xc9\x81\x22\
\xf0\x64\x9e\xf2\x70\x1e\xe5\xdb\x8e\xb9\xc6\xb7\x95\x31\x8d\xc5\
\xd1\x8c\x7e\x5c\x58\xf6\x5d\x9a\x04\x6b\x8e\x41\x88\x29\x84\x32\
\x6a\xa3\x67\x61\xd8\xd3\x30\xce\x55\xff\x46\x46\x27\x3c\x63\xe1\
\xc5\xf6\x06\x12\x9d\x97\x0d\xf6\x4a\x57\x62\x79\x3b\x79\x20\x58\
\xbb\x62\x59\xa7\x25\xfc\xe6\x10\xa6\xc7\xc3\x7e\x39\xbf\xeb\x60\
\x29\xd7\x44\x34\x5c\xa7\x3e\x7f\x45\xc9\x78\x29\xd1\xee\xc8\xf4\
\x7c\x39\xa3\x00\x17\xe4\x3c\x66\x7f\xde\x9c\xa1\x6c\x6f\x31\x65\
\xfb\x1b\xbd\x62\x4b\x61\xa3\xdc\x14\x6d\x31\x93\x12\x98\xcb\x81\
\x22\xa0\xe2\x35\x28\x8c\xb8\x76\xae\x1a\xdb\xef\x09\xb6\xb1\xe5\
\x19\x17\x31\xfa\x72\x5c\xda\x04\xeb\x35\x83\x00\xd3\x09\x65\xd4\
\x34\x18\x75\xb7\x8a\xf9\xee\x76\x46\x47\x5c\x17\xf1\xbd\x0f\x43\
\xf2\x57\xe1\x9f\x58\x5d\x7f\x38\x96\xd5\xc6\x60\xcf\xb5\x16\x94\
\xae\x13\xd1\x8e\xf7\x13\x8c\xeb\x73\x3d\x31\x57\x78\xf6\x4c\xa4\
\x18\x6f\x57\xc6\xe7\xd4\x32\x63\xfb\xdd\x08\xf3\xc1\x94\x20\xfa\
\xed\x02\x5a\x8c\xd5\x40\xe9\x5f\x98\x7d\x74\x62\x86\xf2\x71\xbd\
\x51\x67\x95\xe0\x86\xd9\x16\x9f\x71\x3f\x46\x02\x7f\x6f\x8e\xe6\
\x59\x20\xe0\xc6\x77\x7c\x08\xe7\xd2\x24\x50\xfa\xae\x84\xf3\x29\
\x4b\x9c\xcc\x58\xe3\xe3\x65\x1c\xbd\x85\xe5\xb8\x27\x4d\x82\xd5\
\x8b\x20\x40\x1d\x42\x39\x5f\x82\x39\x90\x57\xed\x98\x6f\xbf\x61\
\x74\x46\xf5\xd0\xb7\xd7\x78\x20\x14\xbf\xe7\x94\x37\x8b\xf1\xdd\
\xba\x08\x6f\xc6\xf6\x06\x75\xf4\x7c\x88\x0e\xc2\x1a\x47\xec\xee\
\xca\xf9\x9d\x29\x32\xfc\x4a\xa0\x05\x72\xe5\x84\x82\x88\xd2\xa0\
\x2c\x23\xcc\x07\x8a\x36\xb0\x50\x52\xb9\x50\xbc\x52\xc3\x68\x99\
\xa1\x7c\xdc\x27\xe1\x57\x64\x83\x15\x08\x8a\x0e\xe7\x32\xd6\xb8\
\x84\x25\x49\x06\x27\x9f\xe3\x15\x69\x11\x2c\x8d\x1a\x95\xa4\xca\
\x4f\x32\x94\xb1\x09\x28\x3d\x9a\xd8\x90\x1f\x63\x6e\x9d\x5b\x32\
\x48\xd6\x9c\x08\x6f\xc5\x1f\x3d\x90\xac\xb2\x27\x9f\xa7\x50\x9b\
\xf1\x3a\x1a\xa3\xf7\x40\x83\xfa\x36\x78\xa8\xee\x08\x41\x54\xe0\
\xfa\x78\x30\xd7\xc3\x7f\x37\x44\x9b\xa5\x2d\xb0\x9c\x56\xe8\x6d\
\x79\x0b\x3e\x63\x94\xb9\x9c\xbf\x17\xd1\xfe\x7e\x09\x04\xae\x4e\
\x8e\x96\x25\x49\xfe\x77\xf1\x56\xbf\xd6\xa1\x0f\x0e\x88\x98\x1f\
\x93\x12\x3c\x12\xab\x18\xe6\xc6\x09\x04\x62\x5b\xaf\x00\x16\xe3\
\x01\x16\x84\xbc\x5a\x46\xb2\xd5\xb5\x20\xcd\x37\xc9\x06\x2b\x10\
\x14\x1d\x6e\x61\xac\xf1\xfb\xa5\xbf\x9c\x5e\xe5\x72\x71\x56\x5a\
\x04\xcb\xe4\x12\x7a\x0f\xa1\x8c\x0b\x98\x9b\xff\x82\x98\x72\xb6\
\x65\x94\x31\x3c\xf4\xed\xf9\x8e\xe4\xea\xbb\x1c\x82\x55\x3d\x87\
\x24\xf9\x46\xb3\x08\x82\xb9\x29\xd1\xee\xcc\x14\xd5\xba\x19\x3e\
\xbb\xd9\xf6\x41\x8f\x08\x79\x3f\x05\xf7\x40\x77\xa6\x74\x41\x53\
\x0a\x60\x31\xf6\x64\xf6\xd5\xa7\x05\x4c\xfe\xd6\x83\xd2\xa7\xc8\
\x06\x2b\x10\x14\x1d\x46\x30\xd6\xf8\x0d\xd2\x5f\x89\xf8\x82\xd1\
\x97\xc7\xa5\x41\xb0\xfa\x1a\x2a\xfd\x86\x58\x4e\x03\x24\x62\x9c\
\x03\x60\x72\xcc\xb3\xe3\xe9\x96\x2e\xaa\x15\x21\x39\x81\x74\x14\
\x46\x41\x10\xfb\xa8\x3d\x28\xbd\x51\x1e\x27\xc2\x56\x10\x9d\x03\
\x6f\x35\x04\xe9\x8a\xca\xc2\x5a\x98\x5c\xf2\x1b\x38\x90\xab\xf0\
\x53\x5d\x73\x02\xb9\x2a\xc3\x00\x43\xfb\x36\x06\xa5\xbf\xb6\x20\
\x77\x59\x82\x3b\x7f\xb3\x54\xcf\xf7\xb2\x18\xcf\x96\x79\xea\x47\
\x85\x90\x0d\xde\x8f\xcb\xbe\x82\xe4\x78\x82\x85\x04\xdf\xe3\x5e\
\xa9\xc8\xda\xcf\xed\xab\x2a\x11\x7d\xc6\x31\x4f\x39\x49\xd6\xc8\
\xbf\xfa\x32\x9c\x32\x88\xa3\xf5\xdf\x35\x0d\x82\x65\x12\x80\x6b\
\x40\x7a\x09\xf3\x10\xf8\x32\xa6\x1c\xaa\x4d\xd5\x5f\x21\x7b\xac\
\xfb\x88\xda\xb3\x01\x10\xc4\xc8\x28\xb4\x89\xd2\x0c\x3d\x1f\xca\
\x0c\x9a\xbf\xca\xf9\x5b\x92\xf7\x5f\x99\x4d\xd7\xf3\x96\xe4\xaa\
\x57\xc4\xb3\xe0\x72\x66\x19\xa6\x98\x4b\x8d\x08\xb6\x63\xf9\x3c\
\x98\xdf\x64\xb6\xf7\xda\x0c\x65\xbb\x97\x29\xdb\x9f\x86\xe7\xcb\
\x4a\xe8\xa4\xd0\x12\x82\x78\x70\x71\xd8\x13\x7f\x17\xe7\xa9\x54\
\x0b\x0d\xfd\x07\xa1\xc1\xed\x57\x68\x67\xb9\x14\xd7\xf6\x58\x08\
\xe2\xa2\xd9\x92\xbd\xc6\x10\x84\x4d\xf1\x09\x57\xe2\xd9\x82\x51\
\x57\x47\xc6\x9c\xde\x1d\xed\x6f\x86\xa1\xbd\xe3\x6c\x50\x7a\x31\
\x7a\x8a\x2d\xc1\xff\x7e\x13\x9f\x85\xba\xe2\x65\xca\xc7\xdc\x6a\
\xc4\x68\x4f\xd4\x85\xb8\x15\xca\xf4\x39\x8e\xfb\x17\x68\x7b\x7a\
\x0a\xf0\x53\x63\x75\xc0\x57\x84\x37\xd0\xcb\x78\x31\x96\xb9\x18\
\x94\xfe\x0c\x4d\x36\x6e\x45\xb3\x83\xba\x16\x36\x96\xd4\x76\xfa\
\xea\xdb\x8d\xd0\xac\xa4\x37\x04\x81\x9d\x47\xe3\x3e\x3e\x0f\xc7\
\xb5\x6c\xad\x2c\xc0\x3d\xff\x2d\x26\x29\xd8\x2d\x83\xbd\xa7\x32\
\xbe\xb0\x6c\x0d\x41\x08\x89\xe6\x39\x7b\xc3\x9e\x18\x1d\x60\xef\
\x18\xf8\xec\xcb\x9a\x10\xc4\x70\xec\x07\x41\xc0\xd8\x37\xd1\xe4\
\xe6\x9b\x9c\x7e\xfc\x1e\xfb\xf6\x03\xe0\x05\xe1\x5e\x9e\xbb\x4e\
\x7d\x11\x2c\xd3\x61\x7c\xba\x65\xb9\x87\xc1\x86\x61\x1a\x92\xf0\
\x7a\x8c\x1d\x0f\x35\x9d\x4e\xae\xbb\xff\x76\x06\xf7\xdb\xee\x45\
\x74\x1b\xea\x88\x5e\x53\x15\x50\xbb\x93\x14\x65\x7d\x6b\xb4\x61\
\xb3\x21\x57\x67\x86\xea\x6d\x67\xa1\x09\x2c\x43\x43\x43\x9b\x7a\
\x18\xbe\x1f\x9b\xc7\xfe\x5e\x00\xee\x8e\x00\x85\x12\xa2\x61\x1a\
\xf8\x0b\xc0\xf7\x43\x04\xc1\xea\x03\x4a\x3f\xc7\xf4\x66\x1d\x05\
\x41\x70\xdc\x34\xdb\xed\x9a\xdf\xd4\x84\x7d\x98\xf5\x8c\x85\xe4\
\x44\xbc\x27\xa1\x57\xe4\x0c\x8b\x36\xac\x45\x47\x06\xd7\x88\xe3\
\x9f\x30\xea\xcc\x4d\x8d\xb6\x39\xc1\x88\x78\x09\x6a\x86\x1b\x18\
\xbc\xf9\x86\x00\x3f\x44\xca\x5f\xd8\xbf\xb7\xe0\x41\x6f\x6a\xe7\
\x48\x46\xd9\x2e\x09\xdc\x0f\x44\x12\xf8\x32\x1e\xf8\x69\x79\x79\
\xff\x0e\xb4\x3c\xb1\x14\x6c\x0b\x41\x9a\xb0\x6b\x91\x2c\xff\x17\
\xcf\xe5\x69\x38\x86\xbf\x40\x7c\x46\x92\x34\x83\x1d\x1f\x8d\xf3\
\xe7\x2d\x48\x0e\xc0\xed\x8a\x7f\xa5\x70\xf3\x41\xb0\x4c\x39\xd7\
\x28\x29\x66\x06\x23\x23\x6f\x17\xf1\xb7\x26\xcc\x05\x33\x21\xa6\
\x8e\xb9\xc4\xef\x73\x73\x0e\x0e\x0d\xfd\xed\x59\xbc\x1d\x16\xb3\
\x0a\xf4\x0a\x30\xc7\xef\xba\xc8\x62\x62\x5d\x1e\xaa\x67\x67\xc7\
\x89\x4a\x09\x0d\xf0\x39\xf5\x2d\x3c\x63\x0f\x42\x6e\x5b\xb3\x8c\
\xf4\xcc\x0d\x43\xf2\x9c\xa1\xbc\x56\x8c\xb2\x3e\xcf\xf9\xee\x58\
\x4b\x32\x40\x09\xb0\x1b\x85\x0f\x52\xda\x50\xf7\xb3\x1c\x87\x39\
\xcc\x7a\x1a\x41\x7c\x44\xf1\xaf\x3c\xb6\x67\x96\x65\x9b\x2a\x82\
\xd2\xbf\x32\xea\xd9\x3b\xc7\xe9\x82\x7b\x21\x89\x22\x2d\xc3\x3c\
\xf6\xc1\x1b\xa8\x31\x89\x6b\x2b\x27\xa7\x65\x63\x8b\xbe\xec\x02\
\xf1\x61\x76\xd2\xc0\x27\x1e\xf6\xbc\x4b\x63\x4c\x53\x7c\x62\x27\
\x8b\x39\xd9\xdb\x82\x70\xbb\xe0\x09\xdf\x04\xeb\x07\x43\x85\x2d\
\x0c\xdf\x87\xd3\xb4\x7c\x86\x4f\x01\x87\x87\x54\xe2\x9c\x67\x97\
\xfb\x62\x26\xc1\x72\xe6\xa6\x59\x1f\xbf\x79\x1d\xb5\x40\xa5\xf2\
\xce\xdc\x1a\x36\x4c\xe5\x32\x2b\x47\xfb\xc7\x7d\x1e\xbc\x2e\x82\
\x74\xfb\x88\xaf\xf5\x18\x98\x23\x15\x27\x79\x39\x7e\x91\x87\xbe\
\x6d\xcb\x6c\xe3\x9a\x0c\x3d\x1f\xb7\x06\x7e\xf8\x08\xd3\xf3\xe5\
\x71\x8c\xb2\x06\xe0\x13\xc1\x20\x8f\x1b\xda\xa9\xc4\xb6\x53\xbd\
\x8a\x67\xc0\x3f\x31\xda\x6c\x63\xe9\x99\x70\x12\xb3\x8d\xcf\xc7\
\x3c\x03\x8e\x48\xf1\xa0\xb8\x90\xd9\xa6\x1d\x98\x1a\xb3\x0a\xa0\
\xf4\x36\x96\xe4\xf0\x80\xd0\xd3\x6f\x5a\xda\xc9\x28\x02\x5f\x8d\
\xb1\xb7\x7d\x67\xd0\x3a\x86\xd1\x19\x78\x09\x85\x7d\xe1\x59\xcb\
\xfd\xe4\x18\x50\xfa\x69\xb4\xef\x4d\x5b\xc6\x65\x4c\xdb\xe6\xd3\
\x3d\x5f\x3c\xac\x14\x0d\xae\x04\xeb\x74\xc7\x1b\x66\x43\xc3\xf7\
\xdf\x42\x10\x9d\xbd\xec\x00\xea\xcd\x68\xe8\x8b\x11\xf5\x1d\xc4\
\xf8\xbe\x2c\x38\x66\xed\x12\x36\xe8\x6b\x89\xaa\xdb\xf5\xf0\x4f\
\xa4\xee\x4d\x20\x39\x4a\x7d\x9c\xd6\x2b\xf7\xf6\x45\xb5\xed\xf9\
\x8e\xf0\xbb\xce\x86\x36\x74\x37\x7c\xdf\x2f\xe3\x3e\x3d\xcf\x62\
\xe3\xd8\x38\x23\xd9\x0e\xb5\xd8\x30\x4c\x4f\x71\xfd\x99\xc6\xfc\
\x69\x6c\x7a\xa6\x14\x4c\x1c\x87\x8d\xc7\x99\xce\x31\x1f\x58\x8c\
\xc3\x5c\x46\xf9\xab\x23\x4c\x11\x4e\xcd\xe8\xb0\xb8\xd3\xe3\x4b\
\x46\x78\x1e\xd4\x62\x6a\xbc\xa2\x12\xbc\x6f\x02\xe9\x06\x4a\x8e\
\x22\xcf\x7b\x30\xbe\xa7\x06\x3f\xde\x08\x82\xe8\xdf\xeb\xf3\x04\
\x6e\x6a\xa8\xfd\x72\xce\x8d\xac\xf0\x3f\x06\xd1\x9f\x9d\xc7\xbe\
\xec\xe4\x8b\x60\x55\x81\xe4\x68\xd5\x94\xf4\x25\x1c\xb6\xfe\x1c\
\x12\x82\xc7\x19\x8b\xea\x52\xe0\xa5\x90\xb1\xb9\x19\x97\x02\x72\
\x0f\xa8\x33\x18\x63\x32\x38\xe2\xb9\x82\xf2\xdd\x22\x5c\x08\x8d\
\x09\xbf\xfd\x86\x60\xeb\xb6\xc0\x70\x5b\xce\x92\x24\x73\xb5\x33\
\x5f\x65\x28\xdb\xb9\xe0\xa6\x2d\x88\xc2\x73\x79\xdc\xcc\xa8\xf9\
\xde\x0e\x61\x94\x75\x07\x98\x63\xc4\x85\x53\x77\x6d\xc6\x3c\x9c\
\x38\x6d\x3b\x87\xe9\xad\xed\x1b\x54\xfb\xd9\x7e\xcc\xcb\xaf\xcd\
\x93\xed\x1f\xf0\xef\x94\x4d\xcf\xa4\xdc\xf6\x93\x23\xda\x79\xa2\
\xc3\xfe\x18\x67\x8b\x37\x27\xcf\xeb\xe7\x38\x06\x11\xbc\x21\x4f\
\x32\x3e\x41\xd4\xa8\xad\xcc\x73\x5f\x36\xf5\x45\xb0\x9e\x32\x54\
\xb4\xbf\xe1\xfb\x63\x2d\x1b\x30\x8f\xd9\x89\x03\x81\x17\xf6\xfe\
\x8d\x04\x7b\x87\xf2\x80\xe5\x96\xb7\xbb\x73\x88\xdf\x8d\x09\x1d\
\x48\xdd\x09\xdf\xbc\x69\x90\xd9\x94\x54\x7b\x64\x86\xfd\x37\x81\
\x39\x9f\x5f\xcb\x50\xb6\x47\x52\xb0\x0f\x9b\x5e\x00\x04\x6b\x21\
\xb8\x85\x90\x89\x52\xf1\x73\xe2\xde\x74\x66\x8c\xc1\x62\x46\xb9\
\xbf\x84\xbe\xbd\x34\x4f\xfd\x5b\x83\xd0\xae\x27\x32\x90\x63\x58\
\xc8\x68\x39\xed\xfa\x0e\x8b\x68\xe7\x00\xc6\xf7\x7d\x98\xe6\x31\
\xf9\x02\xc5\xb6\xa9\x61\x9e\x9e\xdc\xca\x70\x89\x41\xbe\xeb\x0b\
\xa0\x1f\x7f\x08\x2b\x03\x6c\x09\x56\x5d\x47\xd5\xe8\x46\x19\x33\
\xcd\xff\x10\x0f\x86\xc1\xe5\x98\x58\x71\x9e\x7c\x5e\x0e\x7d\x73\
\xb6\xa3\xa6\xe1\x6e\x0f\xb7\xac\xe1\x86\xef\xb3\x8a\x4d\xf6\x05\
\x73\x6e\x0e\xcc\x70\x6c\x47\x5b\xac\x9d\xa4\xb4\x56\x55\x0b\xe0\
\xc6\x48\x79\x26\x7c\xc0\x42\x6b\xf1\x50\x0a\x4f\x69\xdc\x00\xb4\
\xe7\xe5\x7c\xdb\xc9\xa1\x6f\x96\xa2\x01\xf2\xdb\xc0\x4b\x23\x46\
\x75\x74\xa8\x80\x4f\x38\x69\x8f\x71\xae\x36\x75\x66\x06\xf5\xb5\
\x72\xd4\xd8\xb6\x37\x78\x3c\x16\xc2\xba\xf9\x8d\xb0\x37\xee\x8d\
\x9e\x86\xf9\x94\xb3\x8b\xc7\x75\x95\x16\x3e\x0c\xcb\x66\x4b\xb0\
\xee\x32\xd8\xd6\x54\x37\x7c\xff\x46\x1e\x1a\x1f\xbe\x4d\x6c\x9c\
\xf3\x8e\xfc\x17\x3e\x23\x48\xa0\xb5\x7f\x16\xd4\xfc\x84\x27\x14\
\x6e\x12\xcc\x15\x21\xb7\xec\x28\xbc\x6a\x28\xe3\x27\x30\x07\x88\
\x4b\x72\xb8\x78\x32\x83\x7e\xdb\x08\x0f\x33\xd7\x88\xf7\x69\xe1\
\x43\xcf\xcf\x97\x2d\x3d\xd9\x56\x9c\x8b\x5a\xc8\x9d\x20\x88\x85\
\x63\xa3\x69\x3b\x0d\xec\x33\x4c\xe4\xa2\xcc\x4b\xf8\x70\xc6\x37\
\xd3\x89\xfd\xff\x1b\xa3\xcc\xc9\xa1\xbd\xca\xc6\x90\xf8\x5e\x6c\
\x4f\xd8\xc6\x6f\x7b\x08\x62\x29\xb9\x24\x6c\x0f\xcf\xfb\x5f\x53\
\xde\xbf\xdf\xca\xa9\xaf\xa9\x65\x19\x13\x50\x63\xfc\x31\xee\xf9\
\x36\x1e\x80\xe3\x19\xf5\x6d\x1b\xd3\x5f\x4d\x0a\x84\x10\x6c\x10\
\x56\x20\xc6\x99\xa2\x10\xe4\x6c\x0a\x76\xe1\x7a\xb2\xc4\x10\x1f\
\x04\xab\x9a\xc1\x13\xc9\xe4\x55\x73\x48\x1e\x3b\xa0\x7d\xc4\x01\
\x31\x05\x82\x7c\x80\x42\xac\x36\xd4\x4e\xbc\x6b\xd0\x4c\x52\xb4\
\x5d\x3f\x27\x2c\x8e\xdc\x0d\x87\xf2\xb4\xf6\x84\xc1\x1e\xeb\x2e\
\xc3\xf7\xdb\xa4\xdc\x67\xb5\x80\x1f\xe3\xa5\x4d\x86\x63\xfa\x39\
\x53\xb6\xd7\x0d\xe5\x75\x73\x58\x8b\x8b\x0d\xf6\x3d\x5c\xdb\xae\
\x4b\x13\xca\x5a\xc2\xb0\xf1\xa9\x96\x43\x6a\x38\xda\x39\x93\x27\
\x68\x47\x87\x67\x1b\x1b\x5b\xa3\xcb\x08\xf3\x81\xa3\xd9\x7b\x2a\
\xa1\x9c\xa6\x90\xad\x86\xf2\x70\xe6\xb7\x73\x23\xb4\x51\xb5\xd1\
\x9b\x73\x58\x8c\x76\x66\x05\x04\x41\x92\xc3\x6d\x9d\xc2\xd0\x0c\
\xc5\xd9\xe6\x7d\xe4\xf1\x69\xfc\x1d\xf4\x32\x1d\x82\x17\x93\x61\
\xf0\x4f\x70\x69\xaa\x53\x47\xd2\x7e\xe6\xc3\x89\xe0\x17\x08\x42\
\x41\x8c\xc5\x8b\xf4\x6b\x78\x96\x50\xf3\xdc\xfe\x04\xf1\x41\x76\
\x7d\x79\x30\x7e\x89\x24\xfe\x59\xec\xc7\x47\xf0\x52\xce\xb9\x30\
\x5f\xe4\x83\x60\x3d\x66\xa8\xc4\xa4\xbd\x3a\x23\x8f\x04\x6b\x15\
\xfc\x3b\x25\x8e\x80\x3e\xde\x61\xaf\xcc\xa1\xc4\x27\xc1\xa4\x27\
\xa6\x9d\x08\xb6\x7c\x9c\x83\xac\x96\x41\x95\xfd\x56\xca\x7d\xc5\
\xbd\xed\xfd\x6d\xe8\x1f\x9f\xd8\xdc\x62\xb3\xbc\xdd\x50\xe6\x4d\
\x96\xeb\xf0\x3e\x30\x07\x36\x6c\xed\xe2\x1e\xcd\xf0\x54\x0e\x27\
\x1c\xb7\x75\xc2\x69\x07\xc9\xf1\x78\x38\x01\x77\xc7\x5a\x7a\x3e\
\xdb\x78\x85\x51\x0f\xa8\x79\x29\xda\x12\xfd\x09\x41\xbc\xc4\xb8\
\xe7\xf5\x11\x96\xce\x34\x65\x68\x4b\xb8\x18\x75\x05\xa5\x1f\xcd\
\x51\x1e\x2c\x87\xe8\x48\xe4\x54\xb2\xbe\x00\xa2\xb3\x16\x5c\xe3\
\xd0\x4f\xbf\x22\x29\xee\x0a\x39\xe9\x58\x22\x70\xb9\x83\x17\x78\
\x2e\xbe\xb6\x94\x73\x2d\x12\x94\xd3\x91\xd8\x56\x8b\xe9\xcb\x35\
\x96\xeb\x92\xfa\xea\x61\x72\x9e\xba\x11\x9f\x1e\x77\x48\xe8\x03\
\x4e\x3c\xb2\x8e\xae\x04\xcb\x94\x40\x99\x6a\x8b\xd0\x1e\x78\x79\
\x92\x7c\xa3\x87\x10\x27\x16\x4e\x65\x92\xec\xf5\xf0\x4f\xb2\xeb\
\x38\x5c\x0b\xbc\x98\x4c\x23\x71\xfe\x99\xe2\xca\x98\x12\x75\xd7\
\x48\xb1\x9f\xb8\x8e\x1b\xf3\x33\x1c\x43\x9b\xe7\x3c\x93\x07\xd9\
\x4b\x16\x65\x52\x13\x47\x6f\xc9\xd4\x06\xc6\xc5\x6d\xe2\x68\x3b\
\xc2\xb6\x46\x4f\x7b\xda\x53\x2e\x67\xf6\xd1\x2e\x0e\xb6\x46\x3f\
\x33\xe7\xc5\x78\xc6\x65\x60\xf3\x98\x32\xae\x76\xd8\x8b\x6f\x0e\
\x5d\x32\xf6\x8f\xb0\x8f\x3d\xc8\x51\x73\x7a\x1a\xa3\x3f\x1a\xa2\
\xa6\xfc\x8b\x08\x6d\x39\x87\xac\x4f\x73\x24\xfb\x61\x3c\x1a\xa3\
\x51\x8b\xc2\x28\x46\xb9\x3d\x3d\x78\x85\x86\x2f\xd5\xdb\x7a\xf6\
\xa6\x7d\x27\xe2\xfb\x23\x1c\xfa\xf2\x5a\x88\x4f\xd9\xe5\x92\x95\
\x63\x1f\x57\x82\x75\x83\x61\x61\x57\x63\x2e\xee\x01\x90\x1c\xea\
\x21\x2d\xdc\x26\xa4\xc9\x3a\xd7\x98\x69\xf1\xfe\x62\xf0\x20\x3d\
\x09\x78\x29\x35\x26\xc0\x86\xc9\xa3\x4d\x48\xd2\x16\x5c\x9d\x62\
\xff\x70\x23\xe0\xbf\x9d\x31\x49\xe6\xae\x93\xd6\x86\x32\x67\x78\
\x26\x6c\xb9\xd8\x02\x78\xf1\xd8\xe2\x0e\xd1\x0b\x18\x65\x84\xcd\
\x1b\x7a\x31\x0f\xc0\x38\xcd\x21\xa7\x1d\xb9\x8e\x36\x5b\x59\x8c\
\x59\x37\xe6\xbc\xb8\xd5\x83\x26\xe8\x01\x0b\x39\x67\x43\x72\x08\
\x90\xdb\x72\xce\x95\x70\xfa\xb3\xce\x16\x46\xfe\x3b\x30\xfb\x45\
\x47\xd4\xcb\x21\xeb\xc3\x23\xca\xbc\xc3\xd2\x41\xe1\x78\xa6\xec\
\x9c\x38\x6b\x51\xce\x43\xdb\x30\x9e\xef\x72\x9f\xd8\x38\xa9\x6c\
\xce\x61\x94\xfd\x92\x07\x6f\xed\xb2\x88\xf5\x1c\x93\x8c\xad\x99\
\x97\xbc\x1d\x5d\x09\x56\x12\x9b\xbb\xd8\x72\xe3\xaf\x89\x5e\x69\
\xdc\xce\xfa\x1a\xdf\x9b\x97\x30\xbf\xbb\x52\x88\x92\x35\x28\x11\
\xa8\x57\x42\x74\x3e\xaf\x66\xcc\x45\xf1\x13\x6c\x98\x38\x3a\x57\
\x4b\xb5\xa9\xe5\xd3\xd5\x1a\x48\x2f\x11\xf4\x5d\x16\xb7\xd2\xac\
\xc6\x8e\x2b\x5b\x92\xc6\xa2\x02\x9a\x02\xfc\xcd\x28\x8b\x9b\x7e\
\x65\x3b\xa6\xbc\x07\xc7\x94\x33\x94\x51\x46\x38\x27\x64\x1d\xc6\
\xb7\x8b\xc0\x3d\xf6\xd8\x0a\x87\x43\x28\x6c\x18\x9f\x06\xf1\xee\
\x1b\x53\xc6\x7f\x2d\x3c\xd7\xb6\x20\xc8\x36\x0c\xa2\x83\x55\xef\
\x69\x71\x5e\xfc\x84\x84\xd5\x65\x0d\x5d\xcc\xa8\xef\x1a\x47\x6d\
\x48\x19\x9a\x58\xd8\x81\x72\xca\xdf\xd7\xd1\x29\xa4\xcc\x9e\x92\
\xbb\xa7\x72\x48\xf9\xc0\x08\xf2\xcb\xcd\x48\xf1\x03\xf0\x73\x07\
\x73\xec\xc5\xff\x8c\xda\x2f\x39\x04\xcb\x14\x77\xa4\x81\xc1\x36\
\xe5\x08\x43\xf9\xbd\x80\x97\x27\x6d\x05\x6e\x9e\x87\x42\x10\x23\
\x83\xa2\x4a\xbf\x47\x48\x92\x33\xa8\xcf\x26\xbb\xe4\x2c\x06\x8e\
\xad\xce\x6a\xdc\xb0\xb7\x84\xe8\x48\xe4\x93\x09\x46\xbc\xf5\x0d\
\x0b\xb0\x5f\x9e\xfb\xa6\x0c\xd7\x67\x38\x6e\x6f\x5b\x68\x18\x92\
\xca\xdb\x8b\x51\xd6\x14\x0b\x79\x0f\x64\xca\x5b\x3f\xa6\x1c\x4e\
\x7e\xb4\xa8\x3c\xa3\xd3\x1c\x6e\xb0\x5c\xdb\xab\x81\x8e\x63\x36\
\x05\x1d\x4f\x6e\x61\xe0\x1d\x46\xf9\xb7\xc6\xf4\xf1\x58\x46\x19\
\x2b\x41\xe9\xdd\x1c\xe7\xf2\xc6\x96\x2f\x1f\x3f\x83\x5b\x62\xf5\
\xc1\x8c\xba\xc2\x5a\x77\x9b\xb0\x0c\x36\xb9\x54\xdb\x30\x2f\x51\
\x35\x23\xb4\x36\xdc\xe0\xaf\x3b\x59\xc8\xc9\x49\x6f\x74\xa6\x83\
\x56\xba\xec\x52\xdd\xcc\x42\x46\x4e\xe6\x98\xc8\xc0\xea\x1c\x82\
\x95\x14\x64\xec\x05\xc3\xb7\xdf\xe5\x18\x2b\x36\x37\xb8\xfb\xde\
\xca\xec\xbc\x85\x39\x1a\x93\x43\x20\x3e\x3f\xd7\xf3\x42\x8e\xbc\
\xe1\x61\xa2\x7d\xd1\xb5\x4c\xf7\xed\x37\x21\x3a\xc8\xeb\xae\xb0\
\x61\x2a\x89\x65\x06\x19\x93\xd4\xf1\xcb\x53\xea\x17\x6e\xf8\x91\
\xff\x64\x34\x5e\x9b\xa0\x83\x07\x47\xb6\xe1\x8e\x17\xae\x24\x23\
\x65\x0a\x38\x76\x36\x49\x06\xd8\x5f\x12\xcb\x58\x17\x73\x49\xe4\
\x44\xae\xee\xeb\xf0\x24\xb4\x3c\xf4\x24\x55\xdd\xe2\x96\x9e\xb9\
\x1b\x3a\x82\x93\x4c\xb7\x9b\xa7\x39\x7d\xa2\x43\x3b\x6c\xe3\x1d\
\x72\xa2\xcf\x87\xcf\xb9\x21\x4c\x19\x1f\xb7\x94\xf1\x2c\xc7\x4b\
\x14\x97\xbc\xec\x6d\x29\x27\xe7\xe2\x11\xd6\xb2\x4d\x4a\xd1\x34\
\x21\x17\xf7\x33\xea\x78\xde\x85\x60\xed\xcb\x9c\x4c\xa6\x9b\xee\
\x15\x86\xe7\x87\xfd\x80\x1f\xb0\xb1\x7f\xce\xf7\xad\x50\x05\xf9\
\x33\xfe\x6d\xa6\x90\x22\xef\xf0\x99\xef\x69\x29\x6e\x98\x51\xe9\
\x98\xfa\x5b\x7a\x6e\xd5\x30\xd8\x11\x34\x4f\xa1\x4f\xb8\xe9\x3f\
\x8e\xca\x68\xac\x6c\x6e\xcf\xd7\x18\xca\x3c\x8d\x51\xd6\x63\x29\
\xdf\x1e\xe3\x22\xfd\x57\x06\xa5\xbf\x27\x96\xf1\x7b\x8c\x5b\x3d\
\xa7\xef\x72\x2f\x9a\x75\x99\xfd\x1d\x4e\xcb\xd2\xb6\xc0\xc8\x55\
\x5c\xba\x92\xda\x0c\x22\xb8\x06\xc9\xbe\xaf\x79\xfd\xae\x43\x5b\
\x3e\xc6\xe7\x34\x6a\x5d\x15\x19\x2e\xfb\xbf\x47\x78\xc9\x72\x9d\
\x15\x0e\xb6\xec\x13\x0e\xa9\x7f\xd5\x51\x63\x6b\x9b\x85\x82\xa3\
\x25\x0b\xa7\x3a\xdb\x82\x69\x17\xf5\xa7\xc3\xfc\x1a\xc3\xa8\xe7\
\x26\x17\x82\x75\xa7\x43\x27\x3f\x93\xa0\xae\xbd\x11\x92\xa3\xc8\
\x76\x45\x15\x24\x27\x68\xda\xf6\x21\x03\xd3\x0e\x1e\xde\xde\x05\
\xd1\x6f\xfd\xcb\x1c\x37\xec\x15\x78\xe3\xaa\x16\xa3\x39\x30\xd9\
\xd7\x99\xbc\xf0\x1e\x02\xfb\xfc\x75\x36\x98\xca\x6c\xff\xfe\x19\
\x8d\xd5\x9d\x16\x63\xd3\xce\x50\xe6\x65\x8c\xb2\x6e\xb7\x90\xf9\
\x3a\x0f\x04\xae\x0e\xc3\x15\x7c\x2e\xb8\xc7\xd1\xfa\x19\xfe\x09\
\xea\xc9\x39\xe8\xde\xf5\xa0\x49\xc8\x02\xc3\x22\xe4\xe4\x10\xc1\
\xcf\x3d\xcf\xeb\xcd\x99\xe7\x43\x54\xd8\x03\xaa\x87\x21\x27\x38\
\xe8\x24\xe0\x65\x3e\x89\x0a\x21\x60\xdb\x27\xaf\x33\xea\x19\x10\
\xfa\x76\x47\xa6\x9c\x07\x5a\xca\xc8\x49\x3a\x3f\xc7\xe1\xdb\xf5\
\xe8\x94\x65\xdb\x97\x1c\x9b\xb9\xae\x2e\x04\x2b\x29\x40\xe1\x7e\
\x8e\x06\x77\x0b\x41\xe9\x13\x0c\xf6\x34\xc3\x99\x8c\xb5\x95\x10\
\xa0\x4c\xe0\x12\xe5\xf7\xc5\x18\x77\xde\x03\x81\x17\x88\xaf\x43\
\x82\x7c\x47\x18\x6c\xbd\x2a\x7b\xee\x8f\x4f\x3d\x7b\xe9\xe5\x2b\
\xc0\xe8\x3a\x30\x27\xc8\xe6\x44\x5b\xef\x6d\x21\xf3\x63\x8c\xf2\
\xef\x8e\x29\xa3\x15\xa3\x8c\xb1\x16\x97\xc4\x28\x34\xc4\x6f\x38\
\x36\x42\x9d\x22\xea\x1c\x58\x80\x04\xeb\xc1\x18\xdb\xd9\x2c\x0e\
\xbb\x38\xec\x61\xe1\xf1\x66\xf3\x84\xd4\x81\x51\xde\x33\x0e\x76\
\x51\x49\x4f\xb1\x14\x7c\xc9\xa8\x27\x4c\x2e\xfb\x30\xbe\x5d\xe0\
\x20\x23\xc7\xf1\x63\xb4\xc3\x7c\xa3\xe4\x83\x8c\x03\xd7\x83\x77\
\x0f\x5b\x82\xd5\xd6\xe0\x0d\xe2\x6b\x13\x7e\xd7\xf0\x3e\xdf\x09\
\x78\xf9\xae\x9e\x26\x7a\xaa\x08\xdc\xc0\x8d\x81\x33\x3a\x46\xfd\
\xdd\x06\xf8\xd1\xbb\xd7\xe3\x53\x72\x9c\x6c\x55\x20\x39\xb9\xee\
\x39\x9e\xfb\x62\x32\x53\xf6\x23\x32\x18\x1f\x6d\xd1\xa7\x8b\x09\
\xe5\x72\xd6\xa2\x8d\xa6\x6e\x0a\xa3\xfc\xb8\x20\xa3\x9c\xa0\xc6\
\x77\x25\xc8\x72\x25\xf3\x69\xe7\x14\xc6\xef\xdf\x87\xfc\x25\x4f\
\xe6\xe2\x76\x47\x3b\x95\x5b\x52\x9a\xe3\x3b\x83\x7b\xae\xbc\x3b\
\x0c\x75\x70\x12\x6d\x87\x3d\xd5\xb9\xf1\xf1\xce\x75\x78\x55\x58\
\xc3\xa8\xa7\x95\xc3\x45\xe2\x66\x87\xf1\x7a\xd0\x61\x5c\xb8\x49\
\x9d\x6d\xb5\x6c\x1c\xcd\xec\xef\x31\xaf\x30\x24\x82\xf5\x62\x42\
\xc1\xcf\x1a\xdc\xb8\x6d\x26\xfa\x18\x50\xba\x45\x42\xb9\x9c\x9b\
\x5d\x75\x21\x40\x99\x60\x26\x71\x12\x9e\x1e\x63\xdb\xe0\x9a\x09\
\x3d\x29\xb0\xdd\x55\x0e\x9e\x72\x5c\x8c\x67\xca\x7d\x4e\x06\x63\
\x63\x13\xff\xea\x03\x82\x3d\x0a\xe7\x79\xb8\x26\x53\xe6\x2a\xcc\
\x03\x33\xce\xdb\xea\x76\x46\x19\x67\x25\xc8\x73\x1c\xa3\x9c\xf7\
\x98\xda\xab\x38\xef\xeb\x17\x98\x63\xf6\x23\x3e\x65\xa6\x89\x53\
\x1c\xbd\xc1\x4e\x4b\x71\x9e\x37\x63\xd8\xdb\xd9\xa4\x5b\x1a\xc6\
\x28\x27\x9c\x98\xf8\x74\xa6\x1c\x5d\x2c\xfb\x80\xa3\x29\x5b\x1d\
\x61\x73\xc8\xb9\xd4\x1c\x96\xd1\x3e\x19\x76\x04\xe2\x86\x9b\x69\
\x62\x29\x23\x47\x53\x36\x2d\xae\x1c\x13\xc1\xaa\x6e\xf0\x3e\xda\
\x13\xfc\x04\x12\x8b\x0b\xa9\x10\xe7\xa1\xb0\x0f\x28\xfd\x0a\x31\
\x4c\x80\x20\x7d\x34\x48\xf0\x16\x5c\x03\x41\xd0\xc0\xba\x11\xf6\
\x13\xe7\x1b\xbc\x53\x39\x91\x79\x6d\x6f\x75\x3b\x7b\xec\x07\x6e\
\xda\x9f\x51\x19\x8c\x8d\x4d\xba\x0b\x53\x30\x56\x8e\x3d\xca\x5c\
\x0b\x99\x77\x61\xca\x1b\xe7\x26\xce\x89\x68\x7d\x80\x41\x0b\xf8\
\x17\xf8\xd7\x08\x25\x85\x8d\x19\xce\xbc\x41\xd7\x84\x20\xce\x4f\
\x9a\x88\x92\x73\x21\x43\xce\x7d\x52\x9e\xeb\x75\x2c\x1c\x4d\xa8\
\xb9\x41\x3f\x72\x98\x8f\x5c\x82\xd5\xd1\xb2\xfd\x3d\x19\x75\x84\
\xc3\x0a\x54\x02\x5e\x98\xa4\xed\x2c\x65\xac\xcc\xf4\x20\xdc\xcf\
\x91\x60\x35\xb6\x94\x73\x10\xa3\x8e\xe7\x6c\x09\x56\xd2\x9b\xec\
\x4c\xc3\x0d\xf7\x73\x4f\x9b\xd0\x6d\x09\x4f\x7d\x27\xc6\x78\x76\
\x9c\x23\xa4\x27\x73\xf4\x8e\x99\x78\x71\x0b\xb1\xb6\xc7\x83\x6a\
\x85\x41\xb6\xa4\xc3\xaa\xa7\xc7\x3e\xb0\xc9\x33\x96\xa6\x96\xd5\
\xd6\x50\xba\x21\xf8\x0b\xd1\x60\xe3\x69\x74\x02\xd3\x50\x39\xce\
\x96\x8e\x4a\xde\xd7\x12\xb4\x6c\xe3\x53\x20\x58\xd5\x3d\x69\x4c\
\x7e\xca\xd3\x9a\xe7\x78\x83\xad\x36\x78\x8e\xfb\xc4\x03\x0e\x63\
\xf2\x1b\x44\xa7\x51\x59\x4c\xfc\x7e\x15\x6c\x98\x8e\x8b\x9b\x3b\
\xd1\x36\x94\x05\x27\xc4\xd1\xf0\x88\x0b\x2f\x55\x6b\xbc\x1c\x92\
\x03\x3d\x27\x61\x1b\x86\x8c\x6b\x60\xc3\x78\x88\x37\x30\xfb\x72\
\x57\x4b\x39\x47\xfb\xf0\xb8\x36\x11\xac\xa4\x5c\x63\x17\x19\x08\
\xd6\x53\x4c\x83\x3b\x93\xfa\xfb\x8c\x18\xa2\x55\x0b\x9f\x2a\xcb\
\x5c\x85\xbf\x17\xb2\x93\x17\x6c\x0c\xff\xa4\x04\x99\x8c\xe3\x15\
\xd6\x02\x6c\x1d\xfa\x7f\xcf\x32\xe7\x41\x92\x31\x6b\x52\x3a\x89\
\xa3\x2c\x9f\xb9\xb9\x38\xd2\x62\x6e\x0f\x49\x69\x3c\xaa\x59\x6a\
\x5d\x3e\x21\x94\xcd\xf1\x20\x1c\x98\x32\x51\x9d\x04\xf1\x19\x22\
\xa8\x89\x8c\x29\x5a\xb6\x4b\x3d\x93\xab\xbb\x0d\xf5\xdd\xc2\x3c\
\x88\xea\xe4\x61\xcd\x73\x22\x5d\xcf\xce\x58\xb6\x4b\x1c\xec\xb2\
\xc2\x36\x50\x35\x18\x65\x2d\x81\x0d\xa3\x9a\x73\x8d\xdc\x6d\xed\
\x9b\x1e\x66\xd4\x71\x23\x6c\x98\xfb\x93\x9a\xd2\xe9\x7b\x07\x07\
\x21\x4e\x4e\xd4\xaf\x22\xbe\x3f\x9b\xd9\x97\x5d\x2d\xe5\x7c\xdf\
\x47\x1d\x26\x82\x35\x37\xe1\x36\x42\xbd\x79\x1f\x04\x41\x4a\x90\
\x9f\x3c\x6c\x4a\x3f\xc1\x86\x89\x87\x73\xdf\xe0\x67\x79\xd6\x48\
\x08\x78\x38\x2a\xe2\x79\x69\x53\x3c\x30\x97\xa3\x97\x9d\x8d\xa6\
\xe2\x25\x9c\x47\x49\xcf\x5d\x1f\x43\x72\x3a\xa6\xa4\xc3\xa9\x9a\
\xa7\xf6\x6b\xcb\x79\xdd\x24\x85\xb1\x18\x6d\x29\x0b\x65\x43\x7a\
\xd2\xb3\x87\x96\xcb\xf3\x58\x5c\x88\x06\x4e\xfc\x2a\xca\x53\x6d\
\x53\x8f\xe4\x6a\x11\xa1\xbe\x33\x99\x65\xb6\xcb\xc3\x7a\x3f\x8f\
\x21\xdf\x4b\x79\x90\xaf\x9e\x85\xe3\xc9\x7a\xd4\x56\xda\x26\x69\
\x9e\x05\xd1\xe1\x24\x38\xc6\xe7\xf3\x2c\xdb\xcb\xb1\xdb\xeb\x1d\
\x61\x0e\xb4\x92\xa1\xc1\xb2\x8d\x67\xd6\x8e\x21\xe3\x2b\xc0\x8f\
\xc9\x19\xc6\x48\x4b\x39\x67\x79\x30\x51\x48\x24\x58\x49\x31\x31\
\x6c\x72\x5e\x6d\x86\x86\xa4\xaf\x32\xdf\x60\xe3\x0c\x49\x0f\x17\
\x42\x53\xd0\x68\x85\x1e\x20\x3f\x26\xbc\xdd\xd7\x27\x84\x0b\x38\
\x3a\xe7\xf7\xcf\x39\x10\x95\xa9\x29\xdc\x72\x5c\x63\xa7\xe4\xba\
\x3c\xef\x06\xf9\x7f\x22\x59\x0b\x1b\x26\xb8\x8d\xc2\x44\x0f\x36\
\x2d\x49\xe0\x04\x3b\xbc\xcc\x03\x41\xb9\x33\x45\x7b\x36\xdb\x27\
\xa0\x96\xcc\x32\x87\xe6\x61\x8d\x3f\xc4\x90\x6f\x40\x9e\xf6\xa1\
\xca\xc0\x0b\x18\x19\x75\x59\xe3\x68\xa0\x46\x7b\x58\x33\xb6\xde\
\x6f\x1f\x38\xcc\xc1\xca\xf0\x4f\xc6\x15\x0a\xb6\xb7\x1c\x8f\x93\
\x18\x75\x44\x85\x05\xd9\x84\xa9\x99\xb4\x09\x34\xca\x49\xc5\xb4\
\x2a\xc1\x84\x29\x91\x60\x25\x05\xfa\x7b\xc4\x71\xd2\x2b\x7c\x4e\
\x19\x0e\x6e\x81\xe2\x3e\x72\x70\xc3\x14\xa4\x67\xf0\x9e\x74\x10\
\x3d\x10\xfa\xfd\x08\x86\x87\x60\x47\xc3\x6f\x7b\x5a\xde\xb6\x47\
\x78\x6c\xff\x3d\x0e\xf3\xf9\x54\x0f\x46\xbe\x63\x1c\xea\x7f\x86\
\xb8\xf9\xfc\x46\x2c\xef\x6f\x0b\x1b\xb3\xcd\x18\x4f\x15\x49\x06\
\xc1\x9c\x71\xe8\x41\x94\xcd\x47\x6c\xaa\xef\x18\x7d\xb1\x94\x51\
\xee\x5f\x29\xad\xe7\xcb\x20\x3e\xaa\x38\x27\x29\x70\xd7\x3c\xef\
\x4b\x1c\x23\xf5\x39\x0e\xcf\x52\xb7\x7a\x7a\x62\x9e\x6d\x71\xa6\
\x72\xd2\x92\xb5\x65\x5e\x42\xc3\xe8\x65\x39\x0e\x57\x38\x3c\x63\
\x52\x2f\xda\xae\x69\xf2\x76\x65\x94\xbd\x30\xa9\xac\x24\x82\x95\
\xc4\x86\x8f\x81\xe4\x00\x5d\x35\x18\x8d\xa9\x8d\x87\xdf\xff\x1c\
\x3c\x75\x06\x89\xd7\x60\xc1\x60\x73\x34\x3a\x8f\x1b\xab\x65\x21\
\x2d\xc9\xa1\x8c\xc3\xcf\x64\x57\x94\x94\x63\x2c\x49\xcd\xff\x0b\
\xf0\x33\xad\xc7\xa1\x91\xe3\x01\xfc\x1a\xaa\xc1\xab\x30\xea\xac\
\x89\x0e\x29\xcb\x1d\xeb\x6e\x40\xa8\x6b\x67\x46\x79\x5f\xa4\x6c\
\xa3\x91\xa4\xb5\xe4\x3c\x91\x52\xb5\x6c\xc7\x7b\x20\x58\x9d\x18\
\x7d\xc1\x8d\x85\xf5\x84\xa7\x39\xbc\x25\xda\x21\x7d\x6d\xd0\x56\
\x70\x02\xeb\xee\xed\x41\xae\xb3\xd0\x66\x71\x4f\x8b\x6f\x3b\x3b\
\xcc\x5b\x8e\xe7\xda\x19\x10\x1f\x30\x9b\x3b\x57\x46\x47\xd8\x73\
\xf9\xf0\xec\x5d\x8f\xc6\xe6\x9c\x90\x4c\x94\xa7\x50\xca\x59\xcf\
\xd1\xe4\xc5\xd9\x79\x1f\x6a\xd1\x97\x1c\x5b\x50\x4e\xdc\xb2\x99\
\x36\x04\xab\x2e\xd0\x93\x92\x86\x31\x0e\x7f\xf3\x02\xde\x2e\x39\
\x07\x57\x3d\x08\x62\xce\x8c\xb4\xe8\xc0\x6b\x84\xdc\x14\x0c\xf6\
\x01\x5e\x9c\x97\xaf\x19\x21\x03\x9e\x33\xcc\xcd\xa4\x0d\x29\x29\
\xce\x8b\xcf\x27\xe7\x41\x1e\x0e\xe2\x25\x10\x44\x17\xbf\x0d\xed\
\x98\x3a\xa0\xb6\xf6\x20\x3c\x2c\x7a\x43\x10\xe4\x71\x2a\xc3\x76\
\x22\x09\x67\xa7\x60\xc8\x6f\xa3\x19\xe4\xc4\x9c\xfa\x21\xc6\xe3\
\xab\x02\x04\x69\x94\xa8\x2a\x7e\xaa\x96\x6d\x77\xc7\x3e\xe6\xf6\
\x47\x1b\x8b\x3a\x26\xa0\x2d\xa0\x8d\x09\x47\x37\xb4\x3f\x5a\x11\
\x7a\x25\x88\xfb\x86\x13\xa2\x61\x07\x0f\xeb\x6a\x48\xc8\x4e\x69\
\x30\x24\x67\x12\xc9\xc5\x00\x86\xac\x53\x43\xdf\x8e\xf5\x44\x24\
\x5f\xb1\x18\xcf\xb9\x10\x9d\xa7\x95\xe3\xc8\x43\x4d\xc5\xd3\x97\
\x29\xdb\x0d\xc4\xbe\xaf\x02\x41\xae\x3e\xae\xd3\x41\xe7\x84\x32\
\xe7\x5b\xf4\xe5\x07\x31\x9a\x3b\x97\xe0\xd9\xa3\x6d\x08\xd6\x71\
\xb6\x05\x46\xb8\xb3\xce\x83\x20\x90\x64\x0b\x0b\x4d\x40\x3f\x50\
\x7a\x3a\x98\x93\x89\xfe\x09\xd1\x89\x5a\x05\xf9\xc3\x67\x40\x8f\
\xe8\x9c\x14\x0c\x34\x1c\x63\xe4\x7c\xb0\x4f\x92\x9a\x94\x38\xba\
\xbf\xc7\xb6\x6f\x96\x82\x4b\x7f\x9a\xe0\xdc\x46\x39\x36\x14\xb7\
\x5a\xf4\x1d\x27\x96\xcf\xff\x12\x9e\x4a\xd3\x88\xd3\xc5\x79\x1e\
\x8d\x42\x3d\x66\x5f\x54\x04\xbb\x34\x30\x4b\xf1\x69\xcf\x94\x83\
\xb5\x26\xde\xd6\x1f\x81\xf8\xc0\xb1\xcf\x26\x68\xaa\x57\x31\xbc\
\x1c\x6b\x7a\x58\x57\x71\x97\xab\x2f\x21\x08\x2a\xdb\x11\x36\x8c\
\xb7\x57\x05\xcf\x11\x2e\x49\xcd\x2d\x63\x1e\xe3\x49\x5c\x7b\xf6\
\x32\x2e\xc3\x14\x08\xc2\xae\xc4\x9d\xa3\x9c\x36\xbe\xe5\xd1\x91\
\xe3\x22\xc3\x6b\xd6\x95\xc0\x8b\xaf\x95\x8b\x66\x9e\x3c\x8d\xa3\
\x92\xc3\xf7\x40\x8e\x11\x55\xf6\xd3\x8c\xb2\xee\xb2\x21\x58\x8f\
\x58\xbe\xbd\xee\x46\xd8\x10\x1f\xc4\x27\x10\xce\xc2\x32\x31\xeb\
\xeb\x85\xd0\x14\x1c\x3a\x33\xbc\xa8\x9a\x1a\x5c\x82\x2b\x32\xd4\
\xc3\xf7\x25\xc8\xb4\x97\x85\x37\x5a\x56\x29\x84\xf2\x85\x15\x4c\
\x83\x55\xce\x2d\xb7\x6f\xca\x36\x1a\x83\x3d\xd8\x50\x4c\x4c\xd1\
\xc3\x31\x17\x2f\x5b\xce\xa3\x7e\x0e\x63\xbb\x0a\x82\xb0\x1b\x63\
\xd0\x8b\x6f\x38\xbe\x0e\x8c\xc5\xe7\xbd\x15\x84\x32\xee\x00\xf7\
\x3c\x8f\x0b\x13\x34\x8d\x1c\x4c\x27\x1a\x35\xcf\xc2\x71\xfd\x10\
\xe8\xf1\xab\xe2\xda\x5c\x8f\x70\xc1\xe7\x5c\x54\x9e\xf6\xb0\x66\
\xe7\xe3\xb8\x4e\xc0\x17\xa3\x77\x22\x9c\x89\x6c\x03\xdc\x4e\xb4\
\x90\xe7\x4b\x7c\x5e\x1c\x02\x41\xfc\xb6\x91\xa8\xf9\xfc\xd3\xa1\
\x8d\x3f\x19\x5e\x23\x2a\x02\x3f\xf7\x6b\x9c\xbd\xdd\x64\xec\xc3\
\x71\x10\xa4\xec\xe3\xbc\x08\x9c\x61\x43\xb0\x66\x58\x7a\x6a\x5d\
\xcc\x54\xc3\xf6\x25\xde\xea\xee\x36\x78\x3e\x49\x4a\x9c\xc2\x8c\
\x8b\xc5\x49\xb5\x90\xa4\xf2\xdd\x31\x54\x6e\xd2\xcd\xf9\xf3\x04\
\x99\x2a\x25\xa8\xa9\x3f\x4a\xa1\x0f\xc6\x14\x01\xc1\xe2\x6a\x96\
\xef\x4e\xc1\x78\xdc\x36\x4f\xd9\xc5\xe0\x1e\xa2\x61\x2c\x53\xbe\
\xff\x58\xf6\xb3\x8b\x06\xe7\xcb\x3c\xce\x8f\x93\x63\x64\x3a\x99\
\x51\xc6\x3b\x1e\xd6\x52\x35\x47\x87\x28\x0e\x72\x35\x1b\x07\xa5\
\x40\xa2\x17\xe5\x79\xcd\x27\x29\x49\x4e\x2c\x90\x7d\x89\xb2\x1f\
\x6f\x55\x00\x72\xb6\xe1\x12\xac\x8d\x12\x5c\x14\xbf\x36\x34\x78\
\x94\x85\x80\x7f\xe1\xed\xea\x22\x50\xba\x79\x44\x99\x35\x0c\x4c\
\xf8\x6d\x21\x33\x05\x8b\x71\x06\x35\x6d\xee\x6f\xef\x05\x7a\x9e\
\xb8\xe7\x19\x1b\x64\x18\xb3\x21\x3e\xb6\x5b\x8d\x14\xfa\xe0\xd3\
\x02\x25\x56\xcb\x0d\xcf\xa9\x71\xe0\xe4\x9e\xb3\x49\x66\xfd\x2e\
\xa3\xfc\xb8\xdc\x76\x9c\x48\xf0\x8f\x32\xe5\xdb\x9a\xa1\xd1\xa0\
\xe4\xb8\xa3\xa0\x71\x1e\xe7\xc9\x36\x1e\x6c\x9a\x1e\xf0\xb0\x8e\
\x76\xcf\xa8\xbd\x61\x0d\x38\x27\xe5\x1b\x35\x14\xc5\x4e\x79\x5e\
\xfb\x07\x1a\x88\xec\x9f\x05\xb0\x3f\x3d\x96\xf2\x85\xc7\x07\x56\
\xc3\x86\x91\xe6\x8d\x04\x2b\xc9\xdb\xe1\x49\xf0\x97\x00\x36\xe9\
\x46\xd9\x03\x94\xae\x8a\xe5\xf6\x30\xfc\xfe\xa6\x02\x21\x13\xdb\
\xe1\x6d\xe7\x0c\xbc\x59\xdf\x8a\x8b\xf5\x61\x7c\x72\x7d\x08\x55\
\xb3\xfd\xd1\xe3\xeb\x78\x7c\x52\x2d\x65\xdb\xb1\x2e\x86\xb1\xcb\
\x7d\x4f\xff\x81\xe1\x1d\x65\x3a\x40\x8f\x4d\x90\x69\x64\x46\x86\
\xee\xb9\x1b\x56\xa1\x91\xac\x4f\x21\x39\x41\x76\x12\xd2\xcc\x57\
\x56\x91\xf9\xd4\x71\x94\x07\xfb\x8c\x0b\x2d\xfa\xe0\x13\x46\xf9\
\x3f\x7a\x9a\x47\x3d\xf2\x30\x4f\x92\xec\xd3\x5e\x66\x94\xe3\x23\
\x75\x59\x16\x9a\x95\x6f\x1d\x1d\x56\x4e\x66\xb4\x67\x5f\x0b\xa2\
\xee\x0b\xb5\x0a\xa0\xaf\x7d\x5e\x4a\xf2\x45\xb2\xe6\x98\x64\x8b\
\x22\x58\x6d\x2d\xc9\x4c\x0b\xcf\xc2\x2f\x45\x0f\xaa\x49\x86\xdf\
\xed\x96\x07\xe2\xb0\x33\xda\x18\x5d\x87\x9e\x21\x0b\x1c\xda\xb9\
\x0c\x55\xe8\xb7\x40\x10\xfe\x62\xa7\x12\x22\x58\xa6\xa0\x70\x2f\
\x03\x2d\x83\xfb\x9c\x08\xad\xe6\x6a\x4b\x15\xf8\xa5\x0c\x8f\x45\
\x9f\xc1\x0e\x5f\x28\x10\x72\xf5\x24\xd8\xa7\xb9\xe0\x68\x6f\x96\
\x5a\xd4\xb3\x1d\xb3\x2d\x71\x1e\x5b\x9c\x14\x4c\x47\x5a\xf4\x43\
\x7f\x46\xf9\xfd\x3c\xce\xa3\xf3\x32\x9e\x2b\x49\x71\xd1\x38\x91\
\xae\x7d\x44\x99\x1f\x9c\x72\x5b\xd7\xc4\xd8\x06\x73\x3c\x08\xf7\
\x62\xb6\xe9\x00\x07\x03\x70\x5b\x50\xa3\xc4\x0f\xcd\xf3\x3e\x75\
\xb4\xc5\xd3\xfd\xaa\x8c\x65\x7c\xd5\x86\x60\x5d\x9e\x50\xe0\x79\
\x9e\x8c\x53\x7d\x61\x66\x86\x64\xa1\x0a\x7a\xb0\xbd\x97\x41\xbb\
\x00\x6f\x7d\x15\x4b\x80\x64\x3d\x0d\xe9\x3c\x55\x8c\xb6\x54\x2f\
\xb7\x49\xc1\x10\x99\x8a\xd3\x88\x86\xc5\x69\xe0\x43\x0f\x97\x91\
\xf6\xcc\x39\xcc\x2d\xbf\x13\xd3\x80\x7b\x33\x0f\xcf\xb2\x0d\x2d\
\xe4\x3c\x22\x05\x0f\x45\x2a\xda\x82\x7b\xbc\x33\x2a\xce\x8a\x91\
\xa1\x06\xf0\x52\xbf\x6c\xe5\xa1\xdd\x69\x9e\x2f\xdf\x43\x7c\x52\
\x60\xaa\xbd\xd4\x4a\xcb\xd7\x88\xaa\xa0\xf4\xe3\x19\xee\x03\x9c\
\xe4\xeb\x63\x3d\xd6\xfb\x36\xd3\x78\xdc\x26\x7d\xd8\xb6\xa8\xac\
\xc8\xaa\x2f\x07\xd8\x10\xac\xa7\x2c\x6f\x7b\xd7\x33\xd5\xfb\x3e\
\x70\x5b\x06\x04\xa1\x0e\x04\x61\x04\x16\xe6\xe1\x50\x9c\x87\x84\
\xb7\x56\x11\x13\xac\x6e\x9e\xfa\xe2\x44\xa2\x3b\xff\x0a\x24\x75\
\x71\xf2\x54\x47\x77\xea\x38\x6f\x98\xb4\xfb\x63\x5b\xb4\x21\xcb\
\xca\x60\x77\x2a\x24\x27\xc2\xe6\xa0\x0f\xa3\x5e\x9b\x24\xd6\x1c\
\x8f\xb9\x59\x09\x07\x16\x95\xc4\x2e\x05\xbb\x00\xb3\xcd\x3d\xd8\
\xba\xb8\xe6\xd8\x1b\x95\x32\xe1\xb8\x2f\x81\x30\xec\xc3\x28\x6b\
\x91\xc7\x76\xef\x8e\x5a\xb5\xa5\x1e\xdb\xfa\x41\x82\xa3\xd5\x36\
\x19\x5e\xf6\xcf\x24\x7a\x49\xba\x82\xbb\x2e\x1f\xf4\xb0\xff\xb4\
\xc2\x17\x2e\xaa\xf6\xfb\x17\xb4\x05\xb7\xed\xcb\x6b\x33\x3a\xaf\
\xcf\xb6\x21\x58\x9f\x24\xa8\x50\x6b\x1b\x3c\xb4\xaa\xe1\x01\x72\
\x18\x12\x83\xe7\xf0\x79\x27\xad\x06\xee\x07\xe9\x6a\xac\xee\xcd\
\xf0\x20\x34\xb9\xd2\x3f\x92\x92\x11\x76\xda\xd8\xc2\x93\xd1\xe4\
\x33\x11\xcf\xb4\x2b\x71\x01\x3f\x80\x1b\xd4\xee\xe8\xad\x65\x5a\
\x9c\x49\x36\x34\x75\x33\xea\x97\xba\xf8\x5c\xf9\x49\x0a\xf3\x65\
\x0a\x04\xf1\x67\x1a\x79\x96\xf9\x8d\x94\x0d\xbb\x27\x78\xf0\xfe\
\x3b\x84\x51\xc6\x67\x0e\x7d\x61\x8a\x87\xf5\x6a\x06\x73\xa8\x39\
\x04\xc1\x66\x7f\xf7\x30\x67\x16\xe1\xb3\xd0\x41\xe8\xa9\x9b\x54\
\xef\x95\xcc\xb9\x98\x86\xe9\x41\x3b\xb4\x6b\x5d\x62\xd9\xde\x51\
\x60\x8e\x2e\x7f\x18\xa3\xbc\x19\x9e\xda\xb6\x0f\x04\x31\xbd\xbe\
\xf2\xbc\x27\xac\x45\x0f\x6b\x9b\xe7\xda\xd6\x4c\x9b\xbb\xb2\x24\
\xcb\x6d\x2d\x2f\x4f\x5f\x78\xe8\xc7\x4a\xa8\x71\x1f\x96\x82\xe2\
\xe7\x0f\x9c\xd7\x8d\xb9\x04\x6b\x63\x88\xcf\x67\x34\xcf\xa1\xb1\
\x0d\x21\x08\x02\x77\x03\x76\xfc\x37\x9e\x1a\x59\x25\xc5\x8d\x6b\
\x76\x01\x7a\x7e\x2d\x01\x5e\xaa\x8d\x42\xc1\xe7\x1e\x08\x66\x38\
\x68\xa5\x72\xd0\xec\x25\xd9\xe8\xec\x9f\x87\xfe\x39\x18\xed\x7a\
\xde\x03\xbb\x40\x96\xdf\xe0\x81\x7e\xa9\x85\x1d\x08\x07\x87\x82\
\xd2\xa7\x10\x51\xcf\xa2\xfc\xc3\x19\xe5\xef\x96\xb0\xd7\x50\xcb\
\x70\x49\xdf\xd2\x3a\x85\xf6\xdb\xa2\x26\x1a\xc1\xbf\x08\xb4\x90\
\x0e\xeb\xf0\x10\x1b\x8d\xda\xf9\x76\x40\x4f\xc9\x52\x01\x94\xde\
\x83\xd1\xc7\xad\x32\x30\xdd\x68\x83\xa4\xef\x59\xd4\x02\x2d\x85\
\x7f\x07\x68\x5d\x85\x1e\xf0\xaf\xa2\x9d\xe5\x8e\xc4\xb2\xeb\x33\
\xda\x79\x40\x0a\x6d\xdb\x13\x4d\x45\x1e\x86\x20\xe4\xcb\x0c\x24\
\xc2\x2b\x20\x3a\x00\xed\x2a\x3c\x23\x66\xe2\x65\xe8\x21\xd4\x3a\
\x1f\x81\x6d\xf1\x41\xfe\x6e\x01\xa5\xdf\x0f\x39\x25\xad\x41\x7e\
\xf0\x16\x04\x01\x6e\x77\x8d\xf1\x9c\xa4\xf6\xe5\xbe\x9e\xfb\xb1\
\x1a\xee\xeb\x17\xa3\x06\x0f\x50\x03\xfe\x3d\xf2\x88\x75\x31\x67\
\xce\x42\xbc\xc0\xbf\x8c\x0e\x6a\x3d\x51\x2b\x5d\x9b\x5a\x77\x98\
\x60\x25\xb9\x8f\x8e\xf6\xdc\xe8\xed\x51\x03\x75\x2c\x7a\xfd\xbc\
\x04\xf4\x88\xb9\xeb\x71\xc2\xa5\xb1\x60\x6f\x2b\x50\xb7\xfa\x5c\
\xbc\x04\x7e\x22\x23\x67\x85\xf1\x4c\x12\xf9\x3a\x6a\x40\x8f\x86\
\x20\x37\xdd\x96\x9e\xe5\xb9\xd2\x45\xed\x9b\x32\x34\x04\x79\x35\
\x0f\x84\x20\x3d\xce\xf1\x10\x44\x4f\xef\x8e\xe8\x8a\x0e\x16\x6d\
\x91\x20\x6c\x07\xc5\xfb\x7c\x2c\x48\x07\x0d\x91\x80\xb6\xc1\x79\
\x74\x20\x1e\x30\x7b\x41\x10\x1d\xbb\x7e\x89\xb7\x7f\x13\x6c\x63\
\x63\x44\x9d\x12\x6a\x5b\x65\xdc\x0f\xeb\xa3\x86\x7a\x3b\x44\x23\
\x3c\xf8\xab\x64\x24\x47\x55\x9c\x67\x8d\xd1\xf9\xa5\x58\x63\x35\
\xd6\x82\x20\x07\x6b\x6e\x5f\x36\xc4\xf3\xd5\x39\x3f\x6d\x98\x60\
\x25\x19\x6f\x0e\xca\xa8\xd1\x3b\xe0\xc1\x7a\x0b\x1a\xe4\x2d\xca\
\x50\x9e\x81\x0e\xa4\xe7\x3b\x7c\xcf\x7f\x14\x55\xbc\x17\x22\xe3\
\x2d\x63\xe5\x3d\xf0\xf0\xbe\x1c\xed\x1b\x46\xe1\x2d\xd2\xb6\xbe\
\x1f\x53\xba\x35\xa5\x81\xa4\xcc\x00\x5f\xe1\xcd\xfb\x3c\xe0\x27\
\x39\xb6\x45\x52\x98\x07\xc9\x0a\x20\x10\x08\x04\x82\x0a\xbe\x09\
\x56\xaf\x84\x83\xe7\x82\x3c\x32\xf6\x66\x68\x2c\x7d\x11\x92\xa0\
\x71\x10\x24\xc0\xf5\x59\xcf\x93\x4c\x82\x33\x19\x82\xb0\x15\x67\
\xa2\xca\xdc\xd6\x28\xaf\x39\xb6\xe5\x26\x54\x47\x72\x89\x56\xef\
\x22\x98\x68\x65\xee\xe5\x9f\x22\xd9\xba\x0a\x94\x3e\x15\xf2\x13\
\x62\xa3\x4c\xf5\x6e\x95\x5b\x4a\x20\x10\x08\x04\x02\x2e\xc1\xaa\
\x89\x87\xbc\x4d\xf0\xc6\x62\xc7\x8d\x0c\x42\xf3\x22\xbe\x45\xa7\
\x25\x4b\x5b\x7c\x22\xe3\x90\xac\xfb\x0b\xbc\x7f\x6b\x30\xec\x1e\
\xb2\x40\xed\x04\x37\xf3\xa1\xb2\x31\x08\x04\x02\x81\xc0\x27\xc1\
\x6a\x8e\xcf\x5b\x71\x87\x78\xeb\x12\xed\x84\x9e\x0c\x9b\xaf\x36\
\x19\xca\xd5\x86\xe9\xb9\xd1\x5f\x26\x34\xcb\xe8\xf1\xe7\x98\x7e\
\x1c\x2e\xfd\x23\x10\x08\x04\x02\x9f\x04\xeb\x60\x48\x8e\xad\xd2\
\xb8\x04\x3b\xa0\x0e\x91\xbc\x1c\x97\x47\x19\x5b\x02\x3d\xd4\xc5\
\x53\x32\xa9\x49\xa8\x08\xf1\x71\x52\xde\x90\xfe\x11\x08\x04\x02\
\x81\x4f\x82\x75\x2c\x1a\x69\xc7\x45\xa9\xdd\xb2\x04\x3b\xe0\x35\
\x30\x07\x3c\x3b\xa8\x00\xe4\xdc\x08\x82\x78\x1e\x2e\xd1\x97\x05\
\xff\x46\x9c\x83\xc1\x24\xe9\x1b\x81\x40\x20\x10\xf8\x24\x58\x67\
\x42\x7c\x7e\xa9\x6f\xa1\x34\xd2\xb6\xe4\xe2\x2c\x03\x51\x59\x58\
\x80\xee\xbd\xd4\x54\x22\x7d\x64\x72\x1b\x31\x0d\xb2\x4b\x6f\x22\
\x10\x08\x04\x82\x72\x4c\xb0\x2e\x84\xf8\x00\xa0\x73\x4a\xac\xe1\
\x9b\x81\x39\x31\x64\xcb\x94\x65\xd8\x0f\x3d\xe9\x0e\x05\x5e\xc4\
\xed\x3d\x81\x16\x99\xb6\xb9\x4c\xf0\x44\x4c\x86\xf8\x34\x21\x95\
\xa5\x7f\x04\x02\x81\x40\xe0\x8b\x60\x5d\x97\x70\x70\x4f\x2e\xb1\
\x86\xf7\x36\x90\x93\xd3\x52\xac\xbb\x7a\x8c\xf6\xe4\x53\xd8\x30\
\xdf\x5e\x1c\xea\x83\x39\x1f\xd7\x62\xf0\x10\x28\xad\x84\xf1\x7e\
\x4c\xbf\xfd\x06\xc5\x99\x92\x48\x20\x10\x08\x04\x05\x4a\xb0\x6e\
\x86\xf8\x34\x39\xef\x95\x58\xc3\x93\xe2\x4d\x8d\x4d\xb9\x6e\x93\
\x2d\x15\x40\x10\x49\x96\x42\xb2\x7e\x02\x73\xc4\x77\x99\xe8\xd1\
\x18\x17\xd3\x67\x7f\x42\x69\x45\x7e\x16\x08\x04\x02\x41\x9e\x09\
\xd6\x40\x88\x4f\x6c\x3c\xae\x84\x1a\xdd\xce\x40\x4a\xd2\x4e\x23\
\xf1\x31\xe1\x79\x6f\x35\x04\xd1\xde\x4d\x65\x35\x21\x94\xd5\x55\
\x26\x3a\xcb\xc1\x61\x2d\x28\xbd\x8d\xf4\x8f\x40\x20\x10\x08\x7c\
\x11\xac\x7b\x41\xe9\xbf\x20\xdb\xbc\x7f\xf9\xc0\xe8\x04\x32\xf2\
\x61\x06\xf5\x1f\x0f\xf4\xd8\x56\xcf\x12\xca\x3b\xc6\x50\xc6\x22\
\x99\xe8\x91\x18\x91\xd0\x67\x8d\xa4\x7f\x04\x02\x81\x40\x20\x04\
\x8b\x8e\xba\x10\x9d\x39\x3b\x2b\xc3\xf6\x32\x9c\xc3\x20\x59\x53\
\x08\x36\x41\xfd\x0c\x65\x0c\x91\xc9\xbe\x01\x5e\x4a\xe8\xaf\x86\
\xd2\x3f\x02\x81\x40\x20\xf0\x45\xb0\xee\x46\xfb\x93\x7c\xd8\x25\
\x65\x85\xf3\x12\x0e\xd5\x69\x19\xcb\x72\x58\x82\xd7\xa6\x8d\x93\
\xc1\x02\x43\x19\xb3\x40\xe9\x8e\x32\xe9\x45\x83\x25\x10\x08\x04\
\x82\x6c\x09\xd6\xad\xe8\x41\x15\x75\xe0\x4c\x28\x91\x06\x0f\x4d\
\x38\x54\xcf\xcc\x83\x3c\x55\x40\xe9\x2b\x40\xe9\x8f\x08\x24\x6b\
\x84\xa1\xac\x3d\x88\x64\xed\x2d\x50\xfa\x04\x99\xfc\xb1\x4f\xc5\
\x6b\x41\xe9\x06\xd2\x3f\x02\x81\x40\x20\xf0\x45\xb0\xfa\x83\xd2\
\xcb\x62\x0e\x9d\x8f\x4a\xa4\xc1\xef\x26\x10\x8f\xed\xf3\x2c\xdb\
\xbe\xe8\x61\xb8\x30\x41\xc6\xab\x0d\x65\x5c\xc5\x78\x7a\x9c\x03\
\x41\xe0\xd2\xf2\x3a\xf9\xc7\xc7\xf4\xcb\x2a\xf1\x22\x14\x08\x04\
\x02\x81\x4f\x82\x75\x19\x1a\x44\x47\x1d\x3a\xd3\x4b\xa0\xb1\xd5\
\x20\x48\xf9\x13\xd5\xbe\x15\xa0\x74\xd5\x02\x91\xb3\x12\x28\x7d\
\x38\x28\x7d\x13\x6a\x9b\x3e\x05\xa5\xe7\x81\xd2\x4b\x40\xe9\xc7\
\x08\xdf\xae\x67\xe2\x1d\x50\xfa\x90\x72\x38\xf9\xe3\xe2\x60\x2d\
\x07\xa5\xb7\x90\xcd\x41\x20\x10\x08\x04\xbe\x08\x56\x2f\x88\x4f\
\x2a\x3c\xbf\x04\x1a\xdb\x3e\x81\x64\x8c\x2a\x02\xf9\x2b\xe3\x93\
\xa2\xe9\x77\x7d\x2c\x48\x56\x99\x46\xeb\xc4\x72\x34\xf9\xe3\x22\
\xb9\x2f\x81\xd2\x4b\x0b\x25\x10\x08\x04\x82\x3c\x12\xac\xee\x10\
\x1f\xa3\x69\x19\x6a\x80\x8a\xb9\xb1\x49\x9e\x76\x97\x97\xd8\xc0\
\x9e\xc2\x30\xa0\x0f\x63\x3c\x28\xbd\x7f\x39\x98\xfc\x9f\xc5\xb4\
\xff\x4b\xd9\x18\x04\x02\x81\x40\xe0\x93\x60\xb5\x87\x20\x1c\x43\
\xd4\xa1\xb3\xae\x04\x0c\x7f\xef\x4f\x20\x15\x07\x97\xe0\xe0\x56\
\x02\xa5\x0f\x00\xa5\x9f\xb3\x24\x5a\x93\x40\xe9\x93\x4b\x78\xf2\
\x7f\x55\xe2\xf6\x86\x02\x81\x40\x20\x28\x10\x82\xd5\x12\x82\xc0\
\x96\x71\x07\xee\x6e\x45\xde\xd8\xc7\x13\xda\xd6\xb4\xc4\x07\x7a\
\x0f\x50\xfa\x49\x4b\xa2\x35\x0e\x82\x84\xd4\xa5\xd4\x1f\x15\x41\
\xe9\x6f\x63\xda\xfb\xba\x6c\x0c\x02\x81\x40\x20\xf0\x49\xb0\xea\
\x83\xd2\x77\x26\x1c\xb4\x1d\x8a\xbc\xb1\x43\x12\xda\xb6\x63\x39\
\x19\xf0\x5d\x41\xe9\x4b\x41\xe9\xaf\x2d\x88\xd6\x28\x7c\x7a\x2c\
\x85\x7e\xa8\x06\x4a\xff\x12\xd3\xce\xe7\x64\x63\x10\x08\x04\x02\
\x81\x4f\x82\x55\x19\x94\xbe\x30\xe1\x80\x3d\xab\xc8\x1b\x7b\x7b\
\x42\xdb\x5a\x97\xc3\xc1\xef\x81\xf6\x46\x5c\xa2\xf5\x56\x09\x68\
\xb4\xea\x42\x10\xef\x4a\xa2\xde\x0b\x04\x02\x81\x20\x75\x82\x55\
\x01\x94\x3e\x2e\xe1\x60\xbd\xa5\xc8\x1b\xdb\x3b\xa1\x6d\xff\x29\
\xc7\x93\xa0\x3b\x24\xa7\x8d\x49\xca\x93\xd8\xa5\x48\xdb\xbc\x57\
\x42\xbb\xee\x92\x8d\x41\x20\x10\x08\x04\xbe\x09\x56\x52\x34\xf0\
\x17\x8a\xbc\xb1\xfb\x26\xb4\xed\x31\x99\x0c\xba\x31\x28\xfd\xa0\
\x05\xd1\x7a\x0f\x8a\x2f\x8e\xd6\x89\x09\xed\xb9\x4e\xe6\x82\x40\
\x20\x10\x08\x7c\x13\x2c\x0d\x4a\xaf\x8e\x39\x78\xa6\x96\x80\xdd\
\xcd\xef\x31\x6d\xfb\x56\x26\xc3\xbf\x88\xd6\x20\x0b\x3b\xad\x11\
\xa0\xf4\xde\x45\xd2\xc6\xa4\x88\xf7\x3d\x8b\x6c\xbc\x36\x06\xa5\
\x6b\x11\x50\x9e\xa3\xd3\x2b\x6c\xbf\x09\x5b\xca\xfa\xcf\x0c\x9b\
\x13\xc7\xa4\x0e\xfe\x36\x2b\xe7\x17\x8a\x3c\xf5\x70\x4e\xc9\x38\
\x0a\x58\x04\xab\x02\x28\x3d\x3b\xe6\xe0\xf9\x13\x94\xde\xac\xc8\
\x1b\x3c\x06\x24\xc1\x2f\x07\xc7\x81\xd2\x9f\x30\x89\xd6\x44\x28\
\xfc\xb0\x17\x49\xa1\x2b\xda\x14\xd9\x18\x3d\x08\x4a\xff\x45\xc0\
\xec\x72\x3c\x8f\xfb\x82\xd2\x7f\x13\x70\x9b\xac\xf9\xcc\x30\x8a\
\x38\x26\x7f\x83\xd2\xe7\x65\x24\x53\x7b\xa2\x3c\x4b\x41\xe9\x4d\
\x64\x0c\x05\x36\x04\xeb\xd5\x84\xc3\x67\xef\x22\x6f\x70\xd2\x13\
\xd8\x69\x29\xd4\x57\x1b\x1d\x07\x1e\x07\xa5\x1f\x01\xa5\x4f\x85\
\xe2\x4c\xc3\xd2\x09\x82\xb8\x58\x1c\xa2\xf5\x2a\x28\xdd\xac\x40\
\xdb\x33\x0d\xe2\xe3\xbd\xd5\x2e\xb2\xb1\x79\x8b\x38\x1e\x1f\x96\
\xe3\x8d\x6e\x18\xb1\x8f\xce\x97\x43\x21\x33\xcc\x61\xec\x25\x59\
\xd9\xc8\x9e\x4b\x94\xe7\x2b\x19\x3f\x81\x2d\xc1\xea\x5f\xc2\x9e\
\x84\x47\x26\xb4\x6d\xae\xe7\xba\xfa\x43\x90\xe3\x30\x5c\xcf\xaf\
\xa0\xf4\x6b\xa0\xf4\xce\x1e\xea\x68\x00\x4a\x3f\x00\x4a\x5f\x00\
\x4a\x37\xcf\xa0\xff\x9a\x81\xd2\xf7\x80\xd2\xbf\x31\x36\xc7\xff\
\x81\xd2\xed\x0a\x68\x0e\x68\xbc\x85\xc6\xa5\x0b\x2a\xb6\x39\xfd\
\x3e\x71\x1c\xca\xb3\xf1\xfe\x04\x62\x1f\xed\x2b\x87\x42\x26\xd8\
\x0c\xe2\xf3\xc2\x46\xe1\xf0\x8c\xe4\xba\x9a\x28\xcf\xd3\x32\x86\
\x02\x5b\x82\x95\xe4\x49\x78\x4d\x09\x34\xfa\x97\x84\xf6\xf9\x32\
\xd6\xee\x42\x5c\xa8\xb7\x5b\x6a\xb4\xaa\xa2\x1d\xd1\xef\x11\x24\
\xf1\x31\xb4\x23\xda\x29\xc5\x3e\xac\x89\xcf\x29\x1c\xa2\x35\x1a\
\x0a\x23\x58\xed\xfe\x06\x3b\xb2\x62\x9b\xcf\xd3\x89\xfd\x7f\x6e\
\x39\xde\xe8\xbe\x25\xf6\xd1\xf6\x72\x28\x64\x82\x5d\x98\xda\xf0\
\xbd\x32\x92\xeb\x0e\xa2\x3c\xd7\xcb\x18\x0a\x6c\x09\x56\x93\x84\
\x89\x35\xb8\x04\x1a\x9d\x64\xe0\xfc\xb9\x27\x43\xc9\x2f\x18\x9b\
\xc7\x0f\x10\x44\x59\xef\x85\x1b\x7c\xa5\x88\x32\xab\x20\x61\xea\
\x03\x41\x78\x84\x25\xc4\xb2\x3f\x87\x20\x32\xf9\x83\xa8\x66\xdf\
\xc1\x73\x5f\x6e\x89\xcf\x9e\x6f\x33\xda\x3b\x12\x94\x3e\x22\x8f\
\xe3\x7f\x59\x82\x6c\x57\x15\xa1\xe3\xc6\xaf\xc4\x7e\x3f\xa4\x9c\
\x6e\x72\x0d\x88\xfd\xf3\x67\x11\x3e\x0f\x17\xb3\x6d\xe7\x7a\xc6\
\xb8\xe8\x8c\xe4\x7a\x9a\x28\x53\x37\x19\x43\x81\x2d\xc1\xaa\x9a\
\xa0\xbe\xfd\xb8\x04\x1a\x5d\x0d\xe2\x83\x4c\xae\x47\x82\xe9\x52\
\xfe\x4e\x60\x97\x92\x26\xd7\xa3\x71\x12\x04\x49\x97\xc7\xa3\xd1\
\xf8\x12\xc7\x32\x73\x31\x13\x94\x7e\x08\x89\x51\x43\x8f\xfd\xda\
\x16\xe5\xa5\xca\xf1\x0a\x04\xe9\x99\xb2\x1e\xff\x24\x1b\xc3\xf6\
\x45\x36\x97\x9b\x33\xfa\x7b\x9b\x72\xba\xc9\xb5\x25\xf6\xcf\x8f\
\x10\x04\x5b\x96\x83\x21\x7d\x5c\xc9\x98\xb7\x59\x3e\xdb\x7f\x44\
\x94\x69\x4f\x19\x43\x81\x2d\xc1\xaa\x00\xf1\x11\xbe\xd7\xa1\x8b\
\x6a\xb1\x37\x7c\x50\xc2\xe2\x19\xeb\x58\xf6\x8e\x1e\xc9\x50\x16\
\x98\x8e\x64\xe7\x3e\x50\xfa\x04\x08\x52\x26\xb9\xb4\xbf\x15\x28\
\x7d\x13\x28\xbd\x98\x58\xff\xcb\x78\xa3\xcd\xb7\x71\xed\xaa\x0c\
\xdd\xc1\x7d\xe1\x58\x62\x1f\x2f\x42\xcd\x6a\x79\xdc\xe4\x7a\x13\
\xfb\x68\x9a\x1c\x08\x99\xe1\x29\xa6\xb3\x4c\x56\xe1\x4e\x7e\x26\
\xc8\xf3\x47\x11\xee\x13\x82\x02\x23\x58\x2f\x24\x4c\xb0\xae\x25\
\xd0\xf0\x3d\x0d\x8b\xc8\xd5\x28\x7b\x41\xc2\x21\xfe\x7d\x81\x13\
\xae\xb5\x78\x93\xbb\x13\x6d\xc9\x6c\x9f\x4d\x36\x81\x20\x68\xe7\
\x22\xc6\xd3\xe1\x3e\x29\x8f\x7b\xa5\x18\xc7\x83\x62\xf5\xb2\xbb\
\x8c\xd8\xb7\x13\xca\xf1\x26\x77\x17\xb1\x8f\x5e\x90\x03\x21\x33\
\x7c\xc8\xd8\x8f\x06\x66\x24\x53\x13\xa2\x3c\xb3\x65\xfc\x04\xae\
\x04\xab\x57\xc2\x04\x1b\x56\x22\x8d\xff\x5f\x42\x1b\x97\xe0\x53\
\xa2\x6d\xd9\xfd\x12\x6e\x3f\x63\xb0\x7f\xa7\x3a\x90\xa0\x25\x68\
\x0f\x77\x02\x7a\x3e\x5d\x80\x84\xe8\x69\x7c\x02\x5c\xeb\x91\x70\
\xfd\x89\x1b\xe2\x5d\x58\xcf\xe1\x10\x04\xae\xa4\xf6\xc5\x46\x10\
\xc4\xb1\x79\x8d\x58\xdf\x30\x50\xfa\xa8\x94\xc6\x3c\x29\x9a\xff\
\xa3\x45\x38\x87\xa9\xe1\x07\xca\x73\x7e\xc5\x37\x88\x7d\x74\x93\
\x1c\x08\x99\x40\xe1\x73\x2c\x75\xff\x39\x3b\x23\xb9\x3a\x32\x4c\
\x1b\x64\x1c\x05\x4e\x04\xab\x51\xc2\x04\x5b\x5a\x22\x8d\xdf\x0e\
\xd2\x8b\x89\x53\x09\x0d\xcc\xe3\xca\xee\x8f\xbf\x3b\x09\x82\x54\
\x33\x94\x85\xfd\x0b\x3e\xa7\x75\x43\x3b\x39\x93\x61\xef\xd1\xa0\
\xf4\x0d\x48\xe8\x7c\xda\x70\x95\x11\xc5\xf1\xa0\xf4\x00\xb4\x5b\
\xd2\x0c\x82\xf3\x12\x63\x23\xf3\xed\x3d\x74\x73\x42\x7d\x37\x14\
\xe1\x1c\xa6\x86\x68\xe8\x57\x8e\x37\xb9\x99\xc4\x3e\x3a\x45\x0e\
\x84\x82\xd8\x77\xc3\x38\x28\x23\xb9\x2e\x26\xca\x23\xc1\x68\x05\
\xce\x04\xab\x82\xc1\x13\x6e\xef\x12\xe9\x80\xc7\x0d\xde\x7d\x2e\
\x91\xeb\x6b\x26\xd8\xb2\x85\xe3\x12\x35\xc6\xa7\xd7\x1b\x50\x93\
\xf2\x14\x28\x3d\x14\x94\xbe\x15\x3d\x07\x0f\x74\x7c\xf7\xaf\x0c\
\x41\x88\x84\xff\x80\xd2\xf7\x83\xd2\x6f\xa2\xcd\xc9\x6f\x9e\x08\
\xd7\xef\xe8\x49\x78\x03\x12\xbb\x3d\x40\xe9\xea\x09\xf2\xb4\x46\
\x6d\x1b\xc5\x4e\xeb\x61\xf0\x17\x9f\x68\x5a\x8a\xcf\xc2\x59\xa3\
\x12\x83\x38\x77\x2e\xa7\x1b\x5c\x45\x50\xfa\x1b\x90\x18\x58\x85\
\x84\xc3\x99\x7b\x4b\x56\xce\x19\x0f\x11\xe5\xe9\x29\x63\x28\xf0\
\x41\xb0\xee\x4c\x98\x64\x77\x97\x48\x07\x54\x37\x2c\xa6\x77\x1d\
\xcb\x4f\x0a\x6a\xb9\x1e\x82\x74\x11\xf9\x6c\x7f\x0d\x08\x52\xc3\
\x5c\x0c\x41\xa8\x88\xe9\xe0\x4f\xcb\xb5\x1c\x9f\x67\xae\x47\x82\
\x18\x47\x56\xfb\x18\xb4\x7d\x65\x78\xcb\xd1\x7b\xa7\xb1\x41\x3b\
\x58\x6c\x73\xb7\x11\x63\x2c\x9a\x96\xd3\x0d\x6e\x53\x88\xcf\x3f\
\x9a\x8b\xd5\x19\x86\x02\x28\xef\xb8\x88\x31\x6f\x17\x65\x28\xd7\
\x3b\x44\x99\x3a\xc8\x18\x0a\x7c\x10\xac\x83\x12\x26\xd9\xf8\x12\
\xea\x84\x27\x0d\x0b\xca\xd5\xa8\x7f\x6f\x08\x72\xc1\xc5\x95\x7f\
\x5f\x01\x1e\xdc\x5d\xf0\xf9\x6f\x0c\x28\xbd\x10\x9f\x04\x5d\x09\
\xd7\x32\x08\x62\x72\x5d\x09\x41\xb0\xcf\x70\x62\xdd\xa3\x80\x16\
\x71\xfb\x51\xb0\x0b\x08\x79\x61\x89\x19\x38\x1f\x42\xec\xf7\xbf\
\x50\x9b\x5a\x1e\x37\xb8\x6d\x89\x7d\x34\x57\x0e\x83\xcc\x70\x0f\
\x63\xcf\x78\x37\x43\xb9\xa8\x9a\xce\xfd\x65\x0c\x05\x3e\x08\x56\
\x85\x84\x27\x9c\xe5\x68\xbc\x5c\x2a\x1d\xf1\xbd\x41\x13\xe3\x9a\
\x39\xbd\xa9\x61\xd1\x16\xba\x46\xb0\x36\x28\xbd\x2b\x28\x7d\x32\
\x3e\x31\xbe\x4f\xd4\x0c\x24\x61\x15\x28\x3d\x1f\xed\xca\xfa\xe4\
\x3c\x81\xd6\x45\x32\xb4\xd4\xf0\xfd\x58\xc3\x33\x64\x18\x49\xda\
\xb9\x63\x8a\x70\xce\xf6\x61\x38\x44\xa8\x72\xba\xc1\x1d\x46\xec\
\xa3\xd7\xe5\x30\xc8\x0c\x4f\x33\xf6\x88\xac\x02\x5b\xd7\x67\xc8\
\xd4\x5c\xc6\x50\xe0\x8b\x60\x8d\x4b\x98\x68\x47\x97\x50\x47\x1c\
\x6a\x58\x54\xcf\x7b\xa8\xa3\x6b\x89\x19\x22\xd7\x04\xa5\x0f\x00\
\xa5\x2f\x01\xa5\x9f\x43\xf7\x65\x57\x2d\xd7\xd7\xe8\x96\xdd\x19\
\x82\xe4\xdb\xf7\xa0\x06\x2d\xce\x2b\x72\x3e\xd1\x36\xad\x56\x42\
\x9d\x6b\x1c\x3d\x46\xf3\x85\xbb\x89\x7d\xfa\x65\x39\xde\xe0\xce\
\x21\xf6\xd1\x20\x39\x0c\x32\xc3\xab\x8c\xfd\xe0\xd2\x8c\x64\x3a\
\x80\xe1\xdc\xb3\x85\x8c\xa1\xc0\x17\xc1\xba\x2e\x61\xb2\x8d\x2b\
\xb1\xce\xf8\xc0\xb0\xb8\x2e\xf2\x50\xc7\xd1\x86\x3a\xba\x94\x80\
\x87\xd0\x01\xe8\xe9\x78\x2d\x6a\x06\x16\x3a\x3c\x29\x8e\x86\x20\
\x73\xfd\x9a\x84\xdf\xbd\x47\x90\xab\x73\xc2\xf7\xcf\x16\x69\x5f\
\xbf\x49\xec\xc7\xb7\xcb\xf1\x06\x37\x88\xd8\x47\xbd\xe5\x30\xc8\
\x0c\x93\x19\x7b\x40\xc7\x8c\x64\x3a\x8b\x28\xcf\x67\x32\x7e\x02\
\x9f\x04\xcb\x64\x48\x5b\x4a\xb9\xbb\x28\xf6\x1a\x3e\x9e\x45\x6f\
\x30\xd4\xb1\x5b\x89\x4d\xb2\x8a\xa0\x74\x0b\x08\x52\xf3\xdc\x07\
\x41\x10\xd3\x15\xe0\x37\x6c\x44\x23\x83\x0c\x2f\x94\xa0\x57\x10\
\x55\x63\xf8\x70\x39\xde\xe0\xa8\x31\xb0\x0e\xf5\x58\xe7\x56\x68\
\x12\xd0\x14\x3d\xe0\x94\x1c\x34\xff\xf2\x66\x5e\xc6\x58\xd7\xbb\
\x66\x24\x17\x35\x18\xed\x08\x19\xc3\xbc\x3a\x64\x35\x86\x20\x53\
\xca\xf6\xc5\xa2\x49\x34\x11\xac\x0a\x90\x1c\x90\x73\x40\x89\x0d\
\xe2\xe9\x86\x05\xf6\x3f\x4f\xf5\x24\xc5\x63\xfa\xbe\xc4\xec\xdb\
\xe2\x9e\x17\x0f\x41\x3b\xa2\x5b\x20\x08\x4b\xe1\x12\x20\xb5\xa9\
\xe1\x79\x30\x49\x03\xb6\x73\x01\x39\x17\x9c\x06\x4a\x5f\x83\x76\
\x6e\xcf\xa2\x91\xef\x0c\x08\xd2\xfb\xcc\xc1\x3e\x9a\x00\x41\x2c\
\xb1\x95\xc4\xbe\xb9\xd0\x41\x1b\x79\x06\x04\x01\x38\x87\x22\x59\
\xf9\x2c\x47\x96\x69\x68\x3f\xf7\x20\x3e\xe5\x1c\x56\x80\xf3\x6c\
\x2e\xa1\x7f\xd6\x81\x7d\x4e\xce\x5d\xf0\x69\x7f\x10\x6a\x0a\xe7\
\x87\x1c\x5a\xd6\xa1\x1d\xeb\x04\xec\xa7\xbe\x29\x78\x74\x36\x82\
\x20\x66\xdf\x9d\xa0\xf4\x70\xbc\xc0\x7c\x8e\x63\x34\x09\x2f\x17\
\xf7\xe2\x53\x7e\xbe\x73\x6d\x36\xf5\x7c\x71\xf2\x85\xe1\x44\x79\
\x6e\x4d\xa9\xfe\x8d\x71\xbd\xed\x0a\x4a\x1f\x0c\x4a\x77\x82\x20\
\x88\xf4\x29\xa0\x74\x0f\x5c\x87\xff\x01\xa5\xcf\xc4\x0b\x61\x18\
\x71\x97\xf2\xfd\xf1\xfb\x13\x63\xd0\x0d\x2f\xbe\x8d\x13\x6c\x18\
\x2f\x43\x5b\xb8\x31\xa0\xf4\x2c\x9c\x57\x33\x90\x6c\x0e\x84\x74\
\x32\xbb\x6c\x8e\x6d\xbf\x15\xe7\xef\x6c\xd8\x30\xa9\xfd\x2f\xb8\
\x1f\x3e\x8d\x2f\x6d\xbe\xc3\xec\x28\x7c\xf9\xb8\x01\xf7\xbf\x71\
\x39\xed\xff\x0c\x5f\x10\x86\xe0\xdf\xbb\xa1\xed\xb0\x15\xc1\x3a\
\x29\x61\xc2\x7d\x57\x82\x87\xff\x8c\x8c\x0c\x2f\x47\x26\xd4\x01\
\xe5\xf4\x96\x52\x3f\x67\x52\xbf\x09\xb4\x34\x3b\xd3\x21\x39\x49\
\xef\x35\x09\xdf\xce\xca\x73\x7b\x6b\x41\x10\xe5\xfe\x43\x48\x2f\
\xf5\xd1\x91\x4c\x99\x3a\x18\xe6\x66\x12\x26\x43\xe1\x04\xec\xac\
\x89\x04\xc7\x24\xb3\xcd\x1e\xd6\x1e\x09\x95\xed\x98\x0c\x06\xf7\
\xf8\x4e\xdb\x42\x10\xc7\x6f\x0d\xb3\xee\x2f\x40\xe9\x1b\xe3\x0e\
\x84\x94\xd1\x91\x21\xe7\xca\x0c\xb5\x14\x63\x89\x32\x9d\xee\xa1\
\x2e\x8d\xc4\xe5\x7a\x24\x0f\x33\xc1\x3d\x7d\x5a\x6f\xa6\x93\x9a\
\xc9\x9e\xba\x3b\x28\x3d\x91\x51\xff\x67\xa0\x74\x33\x0f\x7d\xb3\
\x0d\x5e\x14\x7e\xb2\xec\x87\x8f\x3c\x99\xd9\xf4\x64\x78\x95\xe6\
\x66\x3c\x79\x3a\x4c\xf4\x28\x04\x6b\x13\x83\x5a\x77\xbb\x12\x3b\
\xe4\x77\x25\x6c\xcc\x3e\x72\xe6\x6d\x02\x4a\x4f\x81\xd2\x8f\x35\
\xe6\xfa\xa4\xb0\x27\xde\xdc\x1e\x80\x20\xdd\xce\x14\x50\x7a\x1e\
\xde\x6a\x9e\x31\x3c\x53\x6f\x64\xd8\x64\xce\xc9\x53\xbb\x5a\xa1\
\xe6\x6e\x65\x8a\xc4\x8a\x1b\x03\xeb\x70\x07\x62\x15\x65\xd7\xb6\
\x63\x9e\xe7\x4e\x6b\xa2\xac\xd3\x99\xe3\xf6\xb2\xa7\x3e\x5a\x05\
\xf6\x69\x60\xce\xf4\xe0\xc5\xfb\x07\x3a\x91\xb4\x80\xec\x12\x81\
\x5f\xc8\x90\x6f\x41\x86\x73\x65\x12\xa4\x1b\xa2\xa1\x39\x6a\x8a\
\x5e\x46\xaf\x74\xdf\x6b\x7c\xbf\x88\x3a\xab\x32\xf6\x17\x9d\x63\
\xec\x3f\xc2\x52\x86\x35\xa8\x75\xb3\xe9\x9f\xba\xa8\x09\xf6\xb5\
\x1f\x0e\xb5\x24\x7c\xcd\x0c\x2f\x76\x54\x8c\x41\xd2\x5a\x93\x42\
\xb0\x2a\xe0\x8d\x2b\xdf\xae\xb4\x59\xa2\x13\x98\x13\x22\x6f\xed\
\xa9\xae\x24\xa6\x2c\xd1\xa5\xdd\x70\x24\x24\xc7\x87\xaa\x9c\x07\
\x7b\xb4\x87\x33\x20\x55\x65\xf8\x89\xd8\xc6\x47\x53\xaa\x3f\x9f\
\xf3\xb7\x07\x51\xc6\x09\xc4\xf2\x06\xa6\xd4\x47\x13\x40\xe9\x2a\
\x8c\x76\x0d\x4d\x41\x86\x2d\x33\x1a\x93\x47\x18\x32\xbd\x9d\xe1\
\x9a\x5c\x44\x24\x10\x5c\x9b\xe3\xdd\xf1\x12\x98\xf6\x3a\x8f\x32\
\x73\xd8\x8d\xf8\xed\x07\xf8\xfb\x2b\x3c\xc9\xd2\x98\xd9\x47\x6d\
\x52\x22\x9d\xdc\xfd\xe7\x88\x14\xea\xbf\x89\x4a\xb0\xba\x41\x72\
\x14\xe4\x6a\x25\x78\x38\xbf\x02\xd9\x64\x55\x6f\x5d\xce\xed\xb1\
\xd2\x44\x12\x99\x79\x28\x63\x59\xf6\x82\xe4\xf4\x53\x69\x60\x12\
\x98\xe3\x9b\x4d\x4c\xb1\xfe\xdf\x2c\x36\x5c\x5f\xb8\x89\x28\xe3\
\xab\x1e\x6d\x74\x6c\x41\x0d\xdc\xdc\x2f\x85\xba\x57\x82\x39\xb7\
\xa9\x2f\x8c\x63\xc8\xf5\x48\x46\x32\x35\x20\xca\x33\x9f\x69\x53\
\x75\x59\x86\xeb\xbc\x7e\x84\x0c\x5d\x88\xdf\xbe\x82\x36\x9f\x3e\
\x2f\x0c\x9c\xa7\xb8\xb4\xfb\x86\xe2\xc0\xd2\x30\xa5\xba\xaf\xa7\
\x12\xac\x4a\x90\x1c\xf8\x71\x60\x09\x1e\xce\x55\xc1\x1c\xec\xd2\
\xd7\x21\x9d\x44\x60\x3f\x14\xa2\x64\x6d\x83\x93\x64\x34\x9f\x65\
\xc0\xc0\x5e\x19\x13\xab\x32\x3c\x65\xb0\xff\x5a\x9c\x81\x0c\xbf\
\x60\x5d\x59\x8f\x3f\x35\xa9\xf8\x5d\x86\x39\x34\x2b\xa3\xb1\x32\
\x45\x2d\xef\x94\x52\xbd\x73\x32\x1c\x93\x6f\x19\x72\x5d\x98\x91\
\x4c\x87\x12\xe5\x79\x93\x58\x5e\x0b\xa2\x46\xcc\x17\x96\xc6\x68\
\xa9\xaf\xca\xd3\x9e\xb3\x1e\xcd\x0d\x4c\xfd\x74\x79\x86\xf2\x98\
\x9c\x58\x3e\x4d\xa9\xde\xbe\x54\x82\x65\x1a\xb0\xd5\xc8\xda\x4b\
\xed\x90\xde\x9d\xd0\x89\xff\xf1\x54\xd7\x83\x90\xff\x78\x30\xa5\
\x84\xa4\x27\x9d\xe5\x19\xca\x71\x56\x1e\x37\xba\xeb\x12\x6c\xd3\
\xe6\x66\x28\xc7\xfd\x79\x18\xff\xa9\x8e\x06\xc2\x1c\xdb\x1c\x6f\
\x4f\x0a\x09\xb2\x4c\xc9\x93\x96\xd3\x17\xea\x32\xe5\x3a\x2a\x23\
\xb9\xce\x26\xca\xf3\x20\xa1\xac\xce\x0e\x9e\xd0\xb6\x98\x18\x23\
\xcb\xb0\x3c\xee\x3b\xcf\x17\xd8\x85\x33\xc9\x99\xe9\xd8\x14\xeb\
\xed\xc5\x21\x58\x9b\x18\xac\xfb\x4f\x2a\xd1\x83\x9a\x92\x92\xa4\
\x8d\xa7\xba\x16\x26\x3c\x15\x56\x15\xd2\xc4\x52\xfb\x27\x39\x2a\
\x64\x15\x35\xff\xa8\x3c\x6e\x72\xeb\x13\x8c\x4e\xc7\x5a\x94\x35\
\x0f\xed\x7f\x06\xa0\x71\xf4\x27\x4c\x0f\x9b\x2d\x33\x1c\xff\xcd\
\x80\x1e\x6b\xed\xe0\x98\x32\x1e\xcb\xd3\x98\xd5\x8f\xf1\xae\x4a\
\xab\xbe\x31\x19\x8d\xc9\xc1\x29\x39\x67\xb8\x82\x9a\x1b\xf1\x3c\
\x82\xe3\x41\x3e\xe6\xcb\x93\xc0\x0f\xaf\x94\x2f\xad\x5a\x99\xa9\
\x44\x3e\x64\x8a\x7b\x6d\x4a\xd3\x6c\xe3\x44\x0e\xc1\xaa\x00\x41\
\xdc\x87\xb8\xc2\x66\x96\xf0\x81\x3d\x9e\x30\xa1\xaa\x78\xa8\x67\
\x97\x84\x3a\x9e\x10\xe2\x44\xc6\x80\x84\x7e\xfc\x3b\x23\xe3\xf6\
\x6d\xf2\x4c\xae\xd6\x83\xd2\x7b\x40\x74\x6c\x1b\x4e\x19\x6b\xf1\
\x92\x11\xd5\xc6\x33\x18\xe5\x5c\x9b\xe1\xf8\x6f\xcf\x90\xab\x01\
\xf0\x42\xd3\x50\x72\x3f\x4e\x25\x98\x17\xc4\xe1\xe5\x08\x79\xce\
\x65\x96\x31\x1f\x94\xfe\x2f\x7a\xdd\xce\x23\x78\x7c\x66\x31\x26\
\x1c\x02\xf2\xab\xa7\xfd\x94\x82\xd7\x88\x32\x25\xc5\x79\x3b\x2c\
\x8f\x6b\x3c\x6e\x5d\x2d\x75\x28\x73\x26\x1a\xbd\xb7\x82\x20\x1c\
\x48\x57\x8b\xf2\xe2\x62\x98\x2d\xb0\x94\x69\x05\x3e\xe5\xb9\x10\
\xa2\xad\xc0\x2d\x2e\x5b\xd9\x85\xe4\x69\xd4\x1c\x9a\xa2\x0d\xb4\
\xe7\x12\xac\x7d\x0d\x05\x9e\x52\xa2\x07\x76\x15\x50\xfa\x63\x43\
\xdb\xa7\x82\xd2\x9b\x7a\xa8\xeb\x2a\xf1\x2a\x74\x46\x92\x0d\xc4\
\xd5\x19\xc9\x30\xc3\x61\x23\x58\x87\xde\x3d\x0f\x40\x10\x13\xec\
\x52\x50\xfa\x62\xd4\xbc\x51\x93\xe5\x46\xc5\x11\xda\x02\x6d\xa2\
\xa8\x72\xfc\x05\xe6\xcc\x02\x54\x2d\xdd\x77\x68\xcb\x99\x95\x43\
\x01\x45\xa6\x45\x10\x1d\xa2\xe0\x47\x8b\x31\xbb\x11\xdd\xe5\xab\
\xe4\x68\xfc\x0f\xb1\xf4\x1a\x6d\x11\x92\xe7\x09\xc6\xb7\xf7\x45\
\xf4\x73\x6b\x08\x82\x91\x0e\x8f\xf9\x7d\x16\x63\x72\x27\xa3\x0d\
\x93\x33\xdc\x2b\xe6\x10\x65\x6a\x12\xf3\xfd\x6e\x8e\x04\x69\x3a\
\xae\xe9\x3b\x41\xe9\xfe\xb8\xde\x6f\x02\x7a\x6c\xac\xa8\x40\x9f\
\xdb\x59\xca\xf2\x05\x9a\x34\x44\xb5\xb3\x1e\xae\x61\x6a\x59\x7b\
\x31\x15\x34\x71\xf8\x2f\xda\x28\xeb\xd0\x05\xea\x62\x0b\xd2\x77\
\x19\xf0\x02\x8b\x87\x6d\x24\x9b\x44\x5c\xa2\x4f\x46\x2d\xe8\xfc\
\x88\x3d\xbc\x25\x97\x60\x55\x80\xe4\xa0\x88\x2b\x4b\xf8\xd0\xae\
\x4d\x18\x04\x5f\xa9\x14\xe2\x9e\x37\x5e\x17\xf2\x64\xc4\xc5\x86\
\x31\xaa\x91\x81\x0c\x87\x38\xa8\xd6\xaf\x35\x78\xde\x1d\xcd\xd8\
\xb8\xc3\xdf\xde\x96\x82\xb1\x6a\x85\x88\xcd\x25\xdf\x17\x84\x76\
\x44\x79\xa2\x02\xfa\x72\xbd\xbf\xbe\x22\x38\x4c\x70\x0d\x7a\x87\
\x3a\x78\xdf\x51\x62\xfd\xe4\xda\x27\xde\x93\xd1\x98\x8c\x66\xb4\
\x21\x2b\xad\x5a\x0d\xd4\x68\x53\x9e\xb8\x6b\xc6\x94\xf1\x99\xc5\
\x3a\x5f\x0e\x41\xa0\xd1\x16\x1e\xd6\x54\x94\x96\xba\xbd\x85\x4c\
\xe3\x08\x17\x20\xce\xb3\x79\x6b\xc6\xeb\x4c\x1c\xce\x07\x73\xa0\
\x66\x4e\xda\xb5\x6f\x43\xdf\x5f\xca\xf8\xf6\x78\xa2\x33\x53\xae\
\x53\x4c\x63\x1b\x82\xb5\xb3\x41\x90\x93\x4b\xf8\xf0\xee\x4a\x71\
\xcd\x4c\xd9\xb3\x65\x17\x21\x51\xb1\xd8\x1a\x37\xc3\xb8\xbe\x7b\
\x2d\x23\x39\xe6\x5b\x6c\x26\x37\x00\x2d\x72\x35\x55\x23\xf2\x8e\
\xa3\x17\xd7\x93\x8c\xf6\xde\x41\x75\x5b\xce\xa8\xff\xa9\x4f\x97\
\xe1\x50\x00\xda\x30\x7f\xa2\x9e\x03\x37\x23\xca\x74\x2f\xa3\xdc\
\x8f\x42\xdf\xbe\xc5\xf8\x96\x9a\x32\xa4\x09\x6a\x4d\xb2\x7a\x75\
\x98\xca\x68\xc3\x75\x19\xc9\xb4\x37\x43\xfb\x1a\x95\x53\x92\x1b\
\x36\x63\x15\x12\x78\x53\x3c\xad\x1d\x18\xe5\x6d\xce\xf4\x4a\x77\
\x09\x6a\x7d\x3b\xa3\xcc\xdd\x43\xdf\x3e\xc0\x94\xe9\x14\xcf\x97\
\xa9\xa8\x60\xb1\x97\x30\xbe\x7b\x90\x31\xaf\x7a\xe2\xfe\x59\xdf\
\x86\x60\x55\x40\x2f\x81\xf2\x94\x3e\x87\xbb\xa8\x7c\x90\xcc\xb8\
\x38\x5c\xe3\x85\x48\xc5\x62\x90\x61\x5c\xb6\xce\x40\x86\x6e\x16\
\xe4\x6a\x6f\x46\xf9\xef\x10\xcb\x0c\x6b\x53\x5b\x32\xed\xae\x36\
\x4a\xa1\xcd\x59\x11\x5c\xea\x41\x70\x71\xe8\xbb\x6b\x99\xe3\xb6\
\x13\x53\x2e\x4e\x94\xf5\x5c\x73\x83\xff\x32\xbe\xfd\xbe\x40\xd7\
\xe6\xd7\x8c\x36\x74\xcd\x48\xa6\x93\x89\xf2\xcc\x89\x79\x32\xe3\
\xda\x10\x51\x93\x57\x1f\x4e\x2c\x33\x2e\x16\x63\x6f\x86\x5c\xb7\
\x33\xfa\xeb\x56\x4b\x1b\xac\x4a\xc0\xcb\x3c\xc0\x7d\xb6\x7e\xd5\
\xb2\xbd\x5c\x0f\xef\xfd\x98\x72\x55\xb4\x25\x58\x2d\x0c\x82\x0c\
\x28\xf1\x83\xdc\x74\xc8\xfd\x69\x50\xff\x52\x73\x8d\x15\x7a\x82\
\xe2\x42\xc2\xa6\x90\x9c\xd2\x29\xab\xc0\x85\xef\x31\x17\x6d\x37\
\x66\xf9\xd4\xf0\x0a\x43\x99\xe4\xd3\x36\x7d\x4c\x05\xf4\x56\x2c\
\xa4\x90\x00\xd4\x94\x3f\xe1\x50\x00\xb3\x19\x7d\x74\x8d\x85\x5c\
\x9c\xfc\x85\x07\x3b\x18\xb9\x8f\x04\x7e\xd4\xf1\x34\xb1\x11\x28\
\xfd\xb3\xa3\xfd\x4e\x1a\xe8\xef\xe0\x69\x79\x03\xa3\x3d\xdf\x30\
\xbd\x22\xcf\x23\x96\x3b\x2a\xe6\x7b\xea\x45\xe1\x3d\x66\x7f\x51\
\x03\x92\x2e\x83\x7f\x87\x6d\xea\xce\x74\x70\xe0\x9a\x71\xec\xc3\
\x28\x3f\x57\x41\xb1\xa3\x85\xc3\x4f\x27\x8e\x6c\xb6\x04\xab\x02\
\x1a\x7d\xd9\x18\x05\x96\x0a\x4c\xc1\xc9\xd6\x80\x7b\x9e\xc6\x38\
\x57\xf8\x9b\x84\x50\xb1\x6f\x31\x59\xe4\x5b\x53\xcc\x27\xa6\x2b\
\x99\xe5\x57\x35\x90\xc8\x5c\x5c\xe1\xf0\x3c\xf8\x33\x92\x8d\x2f\
\x40\xe9\x2f\x0d\xf8\x1c\x9f\xca\xd2\x20\x6e\xb6\xa0\x12\xa5\xdc\
\x43\x6f\xf3\x0c\xb4\x44\x1c\xcd\xc2\x79\x96\xb2\xe5\xa6\x49\xda\
\xbe\x40\xd6\x26\x37\x52\x76\x56\x72\xbf\xe0\xf0\x84\xc6\x89\x23\
\xc7\xcd\xcb\x49\x7d\x4e\xbb\x23\xe6\xfb\xa7\x88\xdf\x73\x5f\x5a\
\xa8\xe5\x86\x9d\x14\x38\x61\x61\xce\xb4\x1c\x4b\xea\xfe\xb6\x08\
\xdc\xe3\xcb\x5d\x90\x05\xc1\x32\xb1\xbf\xe7\x4a\xfc\x40\xaf\x01\
\xe6\x4c\xf6\x5f\x38\xd6\x71\x2a\xc4\xa7\x37\x10\x52\x45\x7f\x87\
\xcf\x6a\x2e\xee\xca\x58\xa4\x3f\x58\x94\x5f\x17\x82\xa0\xbe\x94\
\xf2\x8f\xf3\xe0\x55\xe4\x1b\x9f\x67\x30\x06\x9b\x01\x2d\x69\xec\
\xaf\xa1\x5b\xf6\x81\x8c\x76\x0c\xb7\x94\x6d\x7f\x46\x1d\x37\x87\
\xbe\x1d\x63\xd1\xdf\xbf\x41\x76\x01\x3b\x93\xd0\x96\x21\xf3\x6a\
\xc8\x2e\xf2\xff\xc7\x44\x99\xce\x85\x0d\xf3\xe7\x51\xdb\x63\x13\
\x5e\xe7\x0d\x62\xd9\xbd\x62\xbe\xff\x88\xf8\xfd\x61\x4c\xb9\x46\
\x11\xcb\xcd\xdd\x6f\x37\x66\x3c\x0f\x2e\x07\xfb\x10\x3a\xd4\x67\
\xc2\x55\x21\x87\x05\x5b\x87\xa4\x47\xd3\x26\x58\x94\x7c\x5f\xed\
\x4a\xfc\x60\xdf\x8f\x30\x10\x6f\x38\xd6\xf1\x23\xc1\x46\xa3\xbc\
\x63\xa9\x41\x93\xb8\x79\x46\x72\xf4\xf1\xbd\x40\x43\xd8\x89\x51\
\xfe\xce\x8e\x76\x61\x69\xe0\x8b\x0c\xc6\x80\x6a\x6b\x16\x36\x24\
\xbf\x8e\xd1\x8e\x13\x2c\x65\xe3\x10\xdd\x07\x22\x1c\x38\x6c\xfb\
\xfd\xce\x3c\xaf\x4f\xce\x13\x67\x56\xe1\x3c\xa8\x49\x9e\xd7\x23\
\x41\xcc\xfd\xf6\x71\x06\x61\x48\xd3\x5e\xed\xa0\x18\x2d\x3a\x35\
\x14\xcb\xb6\x4c\xb9\x26\x13\xcb\xbd\xd6\xf2\x52\x31\xc8\x61\x3c\
\x07\x33\xea\x09\xbf\x2c\x3d\x62\xb9\xae\x3e\x83\xf8\x78\x5f\x5e\
\x08\x96\x32\x08\xf0\x63\x8c\xf7\x45\x29\xe1\x94\x94\x37\xb8\xc7\
\xf3\xec\xf2\x5e\xe8\x30\x05\x30\x3c\x3f\x43\x59\x38\x9e\x32\xc7\
\x5b\x94\x4f\x35\x7e\x5d\x09\xff\x4e\xc0\x7e\x65\x81\x10\xac\x2c\
\xf2\xde\x1d\x6f\xa9\x85\x7a\x96\xf8\xdd\x3a\x07\xfb\xa6\xfa\x60\
\x0e\x4e\x98\x14\x79\xba\xb3\x43\xdf\x3f\x9a\xc7\x35\x7a\x17\x43\
\xce\xac\x62\x60\x6d\x81\x04\xc8\x26\x97\xdd\xc2\x14\x0c\xc8\x73\
\x3d\x59\xff\x76\x20\x48\xd4\x20\xbb\x3f\x32\x89\xac\x62\x98\x27\
\x74\xb7\x34\x24\xef\xee\x30\x9e\x0f\x3a\x3c\x41\x6f\x0a\xbc\x70\
\x0f\xe1\x5c\xab\x5b\xa7\x45\xb0\x28\x31\x5e\x1e\x2b\x07\x87\xfc\
\xcd\x4c\xa3\x55\x0e\xe2\xa2\x4a\x9f\x21\xe4\xca\x18\x5b\x65\x7a\
\xc6\xf2\x50\x63\xfd\xfc\x0d\x4a\xd7\xb1\x28\xff\x42\x62\xf9\x53\
\x1d\x36\x9f\x34\x91\xc5\x78\x50\x0d\x7c\xc3\xf9\x11\xa9\xa9\x45\
\x66\x39\xc8\xd6\x98\xd1\x57\x03\xc1\x2e\xce\x1b\x37\x74\x47\x16\
\x78\x83\x21\xe3\x93\x19\xc9\x44\xcd\xb4\xb0\x38\x64\xbf\xc9\xd1\
\x22\xef\x60\x21\x57\x6b\x86\x21\x79\x94\xa7\x6f\xc7\x94\x0c\xdc\
\x39\xed\x6e\x69\xe9\x79\xd8\xde\x61\x3c\xa9\xe1\x6b\xd6\x42\x74\
\x3a\xaa\xc6\xc0\x0b\xa4\x1a\x7e\x8a\x3f\x3a\x2d\x82\x45\x51\x69\
\xee\x50\x0e\x0e\x7b\xd3\x1b\xb0\xed\xfb\x72\x2b\x86\xe1\x65\x79\
\x83\x29\x0d\xc8\xd1\x19\xcb\xf3\x21\x71\x41\x2e\xb0\x2c\x9f\xea\
\xc5\xf3\x62\xe8\xbb\x67\x0a\x84\x60\xfd\x9c\xc1\x18\x50\x0d\x71\
\x6f\xb6\x34\x8c\x77\x09\x35\xc1\x09\x95\x71\x8d\x27\x77\xf9\x30\
\xde\xca\xc3\x3a\xe5\xa4\x37\xc9\x2a\xa5\xd2\xde\x96\x44\xe4\xa4\
\x94\xe7\x3a\xd5\xe3\xee\xeb\x98\xef\xfb\x12\xbf\xe7\x2a\x3e\xa8\
\xc4\x6d\x35\x28\x5d\xdd\x82\xf8\xac\x47\x6f\x40\xdb\xf1\xa4\x6a\
\xa0\x7f\x0d\x69\xf7\x73\xd1\xc8\x71\x7f\xdb\x3e\x2d\x82\xd5\x04\
\xcc\x11\x54\x37\x2b\x07\x07\x3e\xa4\xb0\xb9\xd5\x89\x31\x6c\x7e\
\xaf\x9c\x93\x2b\xd3\x46\x32\x22\x0f\x32\x4d\xb3\xd4\x30\x51\xf1\
\x96\x25\x79\xa0\x7a\x4b\xad\x83\x20\x06\xcd\x80\x94\xd0\x33\x83\
\x31\x98\x48\x6c\xeb\xf9\x96\x76\x2f\xcf\x38\xc8\xc6\xb1\x85\x33\
\x05\x5a\xbc\xde\xe1\x20\x18\x9e\xe1\x9a\xd8\x92\xe1\x98\xe1\x62\
\xdf\xc6\x45\x0f\xa2\x3c\x43\x2d\x35\xa4\xb6\x9a\xce\x1b\x1d\xf7\
\x90\x87\x88\xdf\x5f\xc5\x94\x8b\x1a\x54\x75\xb6\x83\x7d\x53\x8b\
\x0c\xf6\xde\xd9\x86\x72\xda\x30\x9e\x80\xa3\x78\x4e\x9d\x34\x08\
\x56\x05\x30\xe7\x48\x7b\xa3\x1c\x1c\xfa\x9b\x11\x36\x93\xe6\x16\
\xe5\xce\x8d\xb9\x21\x55\x2e\xa7\xe4\xaa\x59\x4a\xc6\xa5\xae\xf8\
\x84\xb8\x10\xa7\xa4\xa4\xb1\x2b\xc3\xa9\xa1\xef\x9e\x23\x7e\xf7\
\x47\x91\xcf\x8b\x2a\x40\x8f\xb7\xd4\xc1\xd2\xed\xde\xe5\x09\xeb\
\x6e\xcb\x67\x96\x38\x5c\xe0\x40\xb2\xf6\x86\xc2\xd2\x14\xc5\x45\
\x00\x4f\x0b\x03\x88\xf2\x5c\x6a\x49\x60\xc6\x59\xca\x35\x9c\x58\
\x7e\x5c\xda\xb4\xf1\xc4\xef\xcf\x66\xca\x45\x25\x4a\xa3\x1c\x08\
\xd6\x6e\x96\x7d\xb6\x39\x3e\xfd\x51\xea\x78\x95\x68\x07\x67\x9b\
\x2c\xfb\xa3\xb4\x08\x56\x4d\x74\x3d\x4f\xaa\xbc\x63\x39\x38\xfc\
\xf7\x45\x57\xd0\xb8\x3e\xf8\x9f\x45\x99\xef\x83\x39\x8e\x4f\x79\
\xc1\x26\x04\xef\x9f\x93\xf2\x24\x1b\x35\x90\xe4\x62\x8b\xb2\x6b\
\x33\x8c\x5f\xf7\xb5\xdc\xe4\xfe\x84\x6c\x72\x35\x16\x42\xbc\xa5\
\x70\x9c\xbe\xcf\x2c\x0f\x10\x0e\x66\x32\xec\x6b\x36\x21\x96\x79\
\x1c\xf0\xa2\x64\x67\xe9\x70\xc0\x89\x96\x5e\xe6\x42\xbf\x45\x46\
\x72\xbd\x48\x94\xa9\x8b\xe5\x13\xb4\x6d\x68\x18\x20\x96\x1f\x67\
\x22\xf2\x0d\xf1\x7b\x6e\xb4\x7c\x6a\x06\x89\xb0\xed\xe0\xa0\x0c\
\x48\x7f\x57\xf0\xf3\xf4\x1e\x0e\xf4\x3d\xda\x92\x64\xb5\x4f\x83\
\x60\x55\x00\x5a\x66\xf1\xf2\x60\x8f\x65\x8a\xc4\x7b\x24\xb3\xbc\
\x51\x05\x62\x63\x54\x08\x30\x69\x4a\xdf\xcf\xa3\x6c\x4f\xba\xbc\
\xd7\x13\x54\xd7\x54\x1b\x88\x2d\xc1\x3e\xea\x74\xa3\x22\x9e\x1b\
\x7b\x10\xdb\xf8\x53\x84\x07\xd5\xf8\x94\x89\x09\x27\x58\xe8\x04\
\x66\xd9\x0d\xc0\x2e\x60\x62\x16\x61\x74\x6e\x66\xc8\x33\x23\xc3\
\xb9\x32\x9d\x28\x53\x38\x1d\x12\x35\x6d\xd1\xe3\x96\x72\x51\x73\
\x36\x46\x69\xa0\xb6\x62\xf4\x75\x5b\xa6\x5c\xd4\x60\xc2\x67\x5a\
\x3e\xa9\xba\xcc\xc7\xf7\x3d\x6b\x86\x73\x71\x8f\xc5\xba\x7a\x20\
\x2d\x82\x45\x99\x80\x73\xcb\x09\x11\x48\x32\xec\x9c\xe4\x89\x54\
\x5c\x52\xce\xc8\xd5\xe9\x04\x6f\x8e\x2d\xf3\x28\xdf\xe5\x8c\x45\
\x78\x31\xb3\xec\x53\x88\xe5\x7e\x15\xf1\xed\x69\x0c\xb9\x8e\x29\
\xe2\xf9\x41\x0d\x16\x3a\x31\xe2\xdb\x47\x19\x76\x6a\x5b\x59\xc8\
\x76\x19\x63\x0c\x86\x59\xb6\x7f\x1c\xf3\x20\xb8\x27\x83\x31\x19\
\xce\x90\xe7\xd5\x8c\xe6\x49\x55\xdc\x2b\x6c\x0c\xa2\xa9\x31\xb0\
\x6c\x83\x41\xcf\x73\x20\x23\xfb\xa7\x74\x91\xe2\xc4\x60\x3b\xd8\
\x72\xdf\x8a\x32\x6d\xa0\xa0\x05\xa3\xfc\xc5\x96\x63\xc2\x4d\x55\
\x35\x39\x4d\x82\x55\x91\xa0\xa6\x7c\xa2\x1c\x90\x01\x93\x4b\xf6\
\xae\x8c\xb2\x6e\xf5\xbc\x11\x17\x23\xf6\x22\x4c\xec\x7c\x6b\xf4\
\x5a\x31\x16\xe1\x57\x29\x69\x02\xde\xb5\xb0\x59\xcb\xc2\x95\x3f\
\x8b\xe0\x91\x54\x23\xf2\x27\x1c\x37\xd1\xcb\x99\x72\xd5\x65\x6e\
\xd0\x6d\x1c\xfa\xe0\x89\x02\xf3\x28\x9c\xc6\x90\xe7\xd6\x8c\xd6\
\x29\xf5\x50\x9e\x16\xf1\xed\x40\xe2\xb7\x36\x21\x49\xaa\x83\xd2\
\x7f\x11\xcb\x6f\x10\xf1\x7d\xaf\x94\x88\x06\x27\xda\xf9\x8e\x0e\
\x7b\x8f\x8d\xdd\x1a\x27\xf7\xab\x8b\x87\x6a\x17\x46\x3d\xbf\x97\
\x11\xf3\x34\x08\x16\x25\x8d\x4e\x94\xf1\x60\x29\xe2\x79\x4f\x76\
\x42\x3d\xf3\x7c\xe3\xcb\x37\x14\xc1\xcb\x6b\x74\x01\xc8\xb9\x11\
\xc3\x4e\x8a\x95\xd3\x8a\x61\xa8\x1e\x67\x23\xf4\x7d\x46\x07\x7c\
\xd8\xe8\xfc\x2a\xd4\x94\x64\x41\xb0\xae\x75\xd8\x68\xf7\x60\xf4\
\xcf\xa7\x4c\xb9\x26\x59\x12\x6f\x5b\x7b\xa4\x1f\xc0\xc2\x20\x37\
\x25\x7b\xc9\xe5\x8c\xb6\x9f\x95\xd1\x3a\x3d\x86\x28\x0f\x44\x7c\
\x7b\x11\xf1\xdb\xdf\x52\xb8\x94\xe7\xe6\xc2\xac\xe8\x70\x09\xe3\
\x7a\xa0\x53\x83\x85\xae\x85\xe8\xa0\x9b\x3f\x30\xe6\xc0\x2e\x0c\
\xb9\x8e\x60\x5e\x5c\xf6\xcc\x71\x46\xb3\xc9\x84\xf2\x0a\x43\xcb\
\x5d\x2f\x4d\x82\x45\xbd\x11\x1e\x55\xe2\xc4\x20\x49\x65\xcb\x49\
\xd8\x7c\x00\x43\x5b\x51\x6a\xd8\x18\x6d\x33\x0a\x21\xfa\xb3\x4f\
\x23\xd5\x32\xdc\xeb\xd9\x36\x23\x4e\x3b\xcc\x89\x32\xbf\xdc\xd1\
\xd8\x78\x37\xf4\xb6\x5a\xec\x68\xf0\xcb\x05\x95\x84\x46\x45\xd1\
\xaf\x0c\xbc\x68\xce\xd4\xe7\x79\x6e\x1a\x8e\x76\x21\xd3\x80\xb9\
\xa0\xf4\x6d\xc0\x4b\x6b\xf2\x81\x83\xb6\xd3\x27\xb8\x71\x85\x76\
\xcc\x68\x9e\x5c\x45\x94\x67\x84\xa3\x36\xa7\x2f\x73\xcd\x4c\x74\
\x1c\x37\x6a\xcc\xa9\x21\xcc\xfe\xba\x97\x58\xee\xcf\xf0\xef\xfc\
\x9e\x36\x6b\xe0\x3b\xa2\x83\xc7\xb1\xcc\xb9\x35\x3d\xa4\xe9\x5b\
\x86\x0a\x90\x43\x19\xfd\x40\x8d\x51\xf6\x57\x99\xa9\x4a\x9a\x04\
\xab\x02\xba\x92\x9a\x18\xef\x4e\x25\x4c\x0e\x92\xbc\x9a\x46\x7a\
\x78\x03\xff\xb4\x1c\x10\xac\x97\x2c\xd5\xe5\xf9\xc2\x91\x16\x46\
\x91\xa3\xc1\x1c\x64\x8f\x9a\x5f\xec\x66\x47\x03\xf0\x32\x7c\x03\
\xfc\xec\x03\x9d\x62\x6c\x6e\x4e\xc9\xa8\xef\xa9\xb9\xd2\xf6\x00\
\xb7\x40\xb1\x65\xe8\x07\xc9\x21\x5b\x46\x30\xcb\x9b\x97\xd0\x9e\
\xb5\xe8\xf9\xd6\xc5\xa0\x0d\xac\x01\xf4\x94\x26\x2f\xa4\x3c\x1e\
\xbb\x33\xda\xbe\x06\x82\xfc\x99\x1a\x3d\x66\x7d\x23\xd7\x36\xf3\
\x31\xa2\x4c\x43\x63\x9c\x15\xd6\x31\xda\x45\x09\x3b\xd1\x97\x39\
\x4f\x1e\xb7\xb4\x7f\x2e\xc3\x65\xcc\x71\xa4\x46\xe2\x9f\xed\xe8\
\xa0\x93\xeb\xe4\xb1\x91\x07\x53\x80\x38\xcf\xea\xab\x22\xce\xd1\
\xf3\x09\x8e\x47\xd4\x38\x84\xff\x1f\xfd\x3f\x6d\x82\x45\xd9\xb4\
\x7e\x07\xbb\xd8\x50\xc5\x80\xaa\x09\x07\x23\x57\xeb\x12\x65\xfc\
\xf8\x03\x3e\xc3\x94\x2a\xb9\xa2\xc4\x0d\x3a\xbd\x00\xe5\x06\x8b\
\x0d\xa0\x6c\x63\x19\x8c\xda\x91\xce\x48\x70\xda\x80\x39\xdf\x22\
\xf5\xe9\x79\x9e\x25\xf9\xbb\x09\xd5\xf1\x3b\x80\xd2\xb5\x20\x08\
\xc9\xb2\x1b\xde\x22\x07\x20\x09\x4e\x72\xea\xd8\x2e\x23\x4d\xe7\
\xaf\x84\xf6\xac\x84\xf8\xa0\xc7\x36\xe4\x78\x0c\x04\x41\x3f\x0f\
\x83\x20\xf9\xee\xe9\x38\x86\x36\x69\x37\x8e\x0c\x3d\x8b\xc7\xc5\
\xe2\x59\x84\xe6\x01\xf7\xe1\x93\xd5\x91\xf8\x1a\x70\x1b\xd0\x13\
\x18\xaf\xc7\x6f\xd3\x1c\x93\x83\x19\xb2\xac\x85\x20\x0e\xdb\x2a\
\xd4\x00\xf8\xc6\x48\x0b\x22\x1d\xe7\x88\x32\x96\x49\x1c\xaf\x46\
\xed\x5c\x65\x1c\xd7\x46\x10\x84\xaf\x78\x04\xe8\x61\x15\x28\x36\
\x80\xd4\x7d\xa7\x33\x73\x1c\x3f\x75\xd0\xf8\x95\xe1\x63\x66\x1b\
\x17\x42\x90\xe2\xab\x1b\xce\xa3\x23\x41\xe9\xfe\x0c\xed\x6c\x52\
\x5e\xcf\x3b\x0c\xcf\xc2\xc3\xd0\x8c\xa0\x1b\xd6\x7b\x0e\xc3\xcb\
\xf8\x5f\xfd\x90\x05\xc1\xaa\x43\x10\x68\x41\x09\x13\x85\x39\x9e\
\xdc\xbd\xdf\x84\xf2\x15\xf6\xe2\x16\xc2\xbc\xb9\xae\x40\x65\xaf\
\x67\x49\xb0\x7c\xa0\x15\xa4\x13\x98\x32\x37\x56\xd6\x9f\x19\x78\
\xee\xa4\x61\xf7\x49\x31\x3e\x9e\x99\xa7\x71\x1b\xcc\xd0\x7e\xfb\
\xc2\x36\x29\x8f\xc9\x29\x79\x5c\x07\x61\x3c\x0b\xfc\x58\x51\x9d\
\x62\xda\x75\xa2\xa5\x0c\x8b\x72\x9e\xcd\x5d\x70\x6c\x8c\x5c\x54\
\x72\xdd\xdc\xc3\xe5\x3e\x0a\xb7\x79\xb0\xe3\x4a\x03\xae\xcf\xf6\
\x5c\x5c\x9d\x25\xc1\xaa\x80\xb7\x2b\x8a\xe1\xa8\x2e\x41\xa2\x30\
\x3f\xa6\xbd\x33\x99\xe5\xdc\xc7\xdc\x04\x8a\x19\x3d\x53\xf2\x38\
\x29\x34\xed\x9b\x6f\xfc\x05\xe6\x40\xa1\x53\x33\x96\x69\x64\x46\
\xfd\xdd\x89\x28\xcf\xcb\x86\x72\x76\xc9\xc3\xb8\x4d\x72\xb4\xf5\
\xb1\xc1\x6b\x19\x8c\xc9\x80\x02\x22\x58\x65\xda\xa8\x6a\x8c\x27\
\xd4\x66\x31\xed\xaa\x0c\x41\x5a\x94\x7c\xb5\x25\xca\x03\xbd\x3e\
\xf1\xe9\xf2\x37\xa0\x07\xb1\x2d\x73\x54\xf8\x95\x28\xd7\x19\x90\
\x1c\x07\x6e\x51\x1e\xfa\xaa\x83\x85\xe9\x92\x2b\x76\xcf\x9a\x60\
\x55\x40\x17\x7a\xdf\xde\x39\x85\x0e\x9d\x70\xdb\xe7\x46\x74\x3f\
\xdb\x93\xcb\x78\xa1\xe3\x42\xc2\x3c\x99\x55\x24\x6d\x79\x3e\xe3\
\xcd\xe4\x73\xa2\x2b\x78\x96\x32\x65\x35\x3f\xa9\xb9\xd2\x1e\x4e\
\xc1\x30\xdd\x05\x3f\xc5\x38\x14\xf4\x4a\xb9\xde\x7a\x19\x8c\xc9\
\xc8\x02\x22\x58\x65\x17\xd1\xad\x88\x44\xe4\x67\x7c\xce\x73\x35\
\x94\xf7\x8d\x55\x48\x56\xc2\xf2\xb4\x25\x7e\xff\x49\x8a\x8e\x0a\
\x26\xef\xe3\xa6\x19\xf7\x55\xdc\x53\xe8\xdc\x14\xeb\xfc\x57\x2a\
\xad\x2c\x09\x16\x35\x8f\xd3\x7b\xf0\xef\x6c\xdc\xc5\x8c\xa4\x09\
\xf5\x3c\xb3\xac\xfd\xc0\x7f\xf2\xd9\x42\xc3\xf1\x0e\x07\x52\xa1\
\x62\x7c\x86\x1b\x0a\x55\x2b\xd1\x2b\x43\x99\xda\x67\xd4\xcf\x83\
\x3d\x3c\x63\xe4\xda\x73\x4d\xcc\xa0\x6f\x66\x27\x78\x07\x0e\x4c\
\xb1\xde\xc7\x32\x1a\x93\x4f\x0b\x88\x60\xed\x0c\xbc\x18\x58\x53\
\x08\x97\xe7\xef\xf3\xd0\x8e\xc5\x31\x4e\x0e\xd4\x27\x38\xae\x63\
\xc3\xa1\xc4\x72\xd7\x41\x10\xef\xcd\xc7\xeb\x84\x0f\xc4\xc5\xbc\
\xaa\x09\xf4\x58\x63\x5c\x2c\x08\x7b\x51\x66\x4d\xb0\x4c\x06\x66\
\xb9\xb1\x60\x36\x2e\x01\xc2\x70\x10\xe5\x9d\x96\x88\x6a\x10\x9d\
\xe3\xb0\x54\x22\xe3\xf7\x27\x2e\xe2\x1d\x8b\xb0\x6d\x83\x32\xda\
\x54\xee\xf4\xfc\x6c\xef\x03\x9b\x67\xd4\xc7\x13\x88\xf2\x70\xb2\
\x1f\x7c\x9c\x62\xbf\x8c\x83\xe4\x64\xed\x33\x52\xaa\xf7\x85\x8c\
\xc6\xa3\x1a\x1a\xad\x17\x02\xb9\x5a\x9e\x73\x9e\x50\xf3\xd6\x51\
\x2e\xae\xdb\x02\x3d\xc9\x30\x65\x6f\xa3\xfc\x6e\x96\xa3\x49\x42\
\x7f\xe6\x38\x9e\x47\x2c\x77\x1e\xa3\xcc\xcb\x21\x5d\x0d\xdf\xbe\
\x1e\x08\xa3\x0d\xea\x87\xeb\xcb\x07\xc1\xa2\xde\xea\x67\x25\xbc\
\x81\x17\x0b\x92\x72\xc0\xd9\xb8\xae\x4f\xcc\x93\xb1\x6a\xbe\x73\
\x37\xfa\x0e\x80\x99\x0f\x9c\x93\x81\x0d\x42\x2f\xa6\x4c\x1d\xf0\
\x32\x93\x96\x3c\x2f\x67\xd8\xbf\xd4\xbe\x3d\x9e\x51\xe6\xa6\x40\
\x0b\x13\xc2\xc1\x4a\xf4\x3a\x34\xd5\x7d\x02\x83\x34\xae\xf7\x48\
\x1a\x7c\x61\x37\x28\x1c\xed\x55\x6e\x44\xf6\x6b\x88\xdf\x50\x1d\
\x68\x3a\x33\x9d\x3e\xc2\xf8\x1b\x82\x08\xf6\x2f\x10\x7f\x1f\x97\
\x6b\xf5\x35\xe2\xf7\xdd\x53\xba\x1c\x8e\x65\x96\x7b\x1a\xae\x05\
\x9f\xe3\x3c\x86\x70\x01\xaf\x07\x41\x5a\xac\xa5\x9e\x35\xd1\xfb\
\x45\xd5\x97\x2f\x82\x45\xcd\x51\xb5\x16\x78\x29\x65\x0a\x0d\x49\
\x09\x32\x5b\x5b\x94\x77\x3f\x14\x4f\xa8\x02\x2a\x9e\x25\x4e\xe2\
\x43\x4a\x40\x4b\x57\x05\x82\x18\x34\xdf\xa5\x74\x90\x1c\x60\x29\
\xd7\x55\xe8\x4e\xee\x4b\x8e\x51\x31\xc6\xa5\x69\xa1\x09\x43\x36\
\x9b\x90\x30\x1d\xc1\x2e\xc4\x45\x54\xee\xbf\x3a\xcc\xba\x5b\x40\
\x10\xe8\xf1\x57\x87\x7a\xe7\x41\xf6\x41\x9d\x7b\x16\x10\xc1\xca\
\xcd\xf2\x40\x8d\x65\xd4\x8d\xd1\xd6\x6d\xc1\x2e\x34\xcb\xa8\x9c\
\x27\x62\xaa\xc6\x32\x2e\x68\xef\x6f\xc4\xef\xf7\x67\x8e\x23\x35\
\x29\xf6\x23\x96\xde\xd6\x4f\x7a\x18\xdf\x8f\x2c\xce\x87\x4d\xf0\
\x62\xef\xea\xf4\xf3\x40\x52\x3d\xf9\x24\x58\xd4\x78\x24\xab\x71\
\x83\x2b\xa5\xe7\xc1\x5f\x21\x39\x90\x5a\x1c\xae\x26\xb8\x20\x17\
\x13\xa8\x76\x33\xc7\x40\x69\x3c\x83\xe6\x2e\xee\x13\xf0\x92\x31\
\x2f\xe6\xe9\x97\x72\xf3\x5d\x86\x46\xab\x4f\xa2\xf6\x4a\x39\xc8\
\xb4\x2d\x3e\x1f\xcc\xb7\x90\xe5\x37\xb4\xb7\xb9\x09\xe2\x83\x78\
\xa6\x9d\x31\xe1\x7b\xbc\xd0\x24\xe1\x4b\xd8\x30\x79\x2f\x27\x97\
\xe2\xb9\x78\xe0\xac\x65\x92\x9b\x81\x39\x36\x40\xb6\xd8\x12\x82\
\xd8\x49\x23\xf0\xf6\xbd\x8e\x30\x3f\xa6\xe2\x73\x4c\x3e\x42\xe0\
\x9c\x8f\x5a\xc5\x05\x79\xc6\x22\xf8\x77\x60\xcd\xd7\x09\xf3\x64\
\x89\xe5\xc5\xfe\x02\x50\xfa\x33\x30\x27\x90\x1e\x15\x41\x08\xa6\
\x13\xe5\xea\x13\xe3\xb8\xf2\x0d\xf1\xfb\xa6\x8c\xf6\x54\xc4\x57\
\x13\x4a\xb9\x2e\x97\xfc\xc3\x50\x53\xfc\x1b\x63\x5d\xfd\x02\x41\
\x00\x54\x1f\x17\xb9\x03\x51\x53\xf7\x29\xf1\x59\xfb\x47\xd4\x06\
\x1b\xe7\x48\xbe\x09\x16\x55\x93\xb5\x1e\x6f\xd9\xc5\x74\x88\x26\
\xd9\x6f\xdc\x6f\x59\xe6\x89\x09\x86\xdf\xc5\xd4\x37\xb5\x80\x9e\
\x9f\xad\x7d\x89\x91\xab\xb8\x58\x71\x4d\x20\x48\x68\x7d\x24\x04\
\x59\xe5\x7b\xe1\x21\xd5\x17\x71\x36\x3e\x6f\xed\x8b\x87\xf5\xd6\
\x0e\x64\x81\x92\x13\xad\x1d\xd6\x7b\x37\x12\xf8\x17\xf1\x19\xe3\
\x71\x34\x14\xbf\x00\x37\xc6\xe6\x60\x0e\x0d\x91\x85\x66\xb0\x12\
\x01\xbe\xea\x6b\x80\x5e\x5b\x7d\x21\x08\x9f\x32\x1c\xfb\xe7\x79\
\xf8\x27\xf8\xe7\xa1\xd8\x8f\x69\xb5\x77\x5b\x24\xb3\x1d\x41\xe9\
\xde\x28\xcb\xf9\xb8\x47\xb4\x84\xe8\x9c\x70\x59\xe7\x0f\x2d\xc4\
\xb5\xb6\x51\x06\x73\xa5\x11\x3e\x19\x9d\x81\xe3\x72\x0e\x8e\x53\
\xf3\x18\x07\x9d\x8a\x8e\x73\x58\xa5\xb8\x06\x36\xca\x70\x6d\xd5\
\x40\xd2\x72\x0a\x28\x7d\x23\x5e\x1c\x5f\x44\x3c\x8a\xcf\xb6\x27\
\x40\x10\x4a\x65\xf3\x14\xcf\xa6\x66\x10\x04\x37\x3d\x2d\x67\xff\
\x3d\x03\x89\x58\x53\x60\x84\xb9\x28\x04\x82\xc5\x09\xc5\xff\x58\
\x91\x1c\x98\x26\x4f\x95\xdd\x2d\xcb\xdd\x9a\x63\x60\x57\xa0\xd8\
\x12\x92\xa3\x7e\xe7\xa2\x63\x39\x20\x57\x02\x81\x40\x20\x28\x41\
\x14\x0a\xc1\xaa\x00\x41\x58\x7c\x6a\x26\xf0\x5d\x0a\xbc\x63\x5f\
\x49\x90\x7f\xa2\x63\xd9\x1f\xc5\x94\x7b\x63\x11\x4c\xb8\x2e\x40\
\xcb\xac\xfe\x37\x98\x73\xf3\x09\x04\x02\x81\x40\x20\x04\xcb\xd1\
\x88\x3b\x0a\x47\x15\x68\xa7\xee\x07\x76\xe9\x17\xa8\xb8\x22\xa6\
\xdc\x3f\x0a\x7c\xb2\x51\xf3\xe9\x2d\x2b\x72\xc7\x06\x81\x40\x20\
\x10\x08\x0a\x8e\x60\x55\x00\xa5\xaf\x04\x5e\xdc\x9f\x42\x0a\x3a\
\xb9\x05\x1a\xc0\x25\xe5\x71\x73\xb5\x4d\x48\xca\x51\x36\xa0\x00\
\x27\x59\x45\x7c\x3f\xa7\xba\xbb\x36\x92\x85\x29\x10\x08\x04\x02\
\x21\x58\xe9\x80\x93\xd5\x7e\x1e\xda\x3c\x15\x82\xdc\x26\x83\xfd\
\x2b\x3d\xd5\xf3\x36\xd0\x13\x5b\xe6\x13\x7b\x20\x69\xa2\xc6\x30\
\x51\xb2\x28\x05\x02\x81\x40\x20\x04\x2b\x5d\x1c\x02\x41\x6e\x35\
\x4e\x68\xfc\x4a\x79\x94\xf7\x32\x30\x87\xd1\xf7\x55\x57\x52\x24\
\xdc\x21\x05\x32\x7e\x3d\x18\x63\xf7\xbc\x2c\x46\x81\x40\x20\x10\
\x08\xc1\xca\x16\x9c\x5c\x6e\x5f\xa1\xf6\x2b\x6b\x19\xaf\x27\xc8\
\xb6\x97\xc7\xfa\x1a\x82\xdf\x74\x08\x3e\xd1\x14\x82\xd8\x4c\x69\
\x45\x1f\x17\x08\x04\x02\x81\x40\x08\x96\x27\xdc\x01\xbc\xc0\x87\
\x37\x43\x90\xd4\x31\x0b\xd9\xfa\x12\x3d\x1f\x7d\xd7\xfb\x9c\xa1\
\xce\xe3\xf2\x30\x4e\xe7\x03\x3d\x6d\xc4\x6a\x08\xe2\x8c\xc8\x42\
\x14\x08\x04\x02\x81\x10\xac\x3c\xa2\x3d\x04\x99\xc4\xa9\x24\xeb\
\x77\x8f\x36\x4f\x51\xd8\x1c\x94\x7e\x97\x20\xc7\x02\x48\x27\x92\
\x72\x75\x30\x67\x05\x1f\x98\xd1\xd8\x9c\x0d\x41\xd2\x69\xea\xd8\
\x7c\x08\xff\xa4\x88\x10\x08\x04\x02\x81\x40\x08\x56\x01\x78\xe9\
\x7d\x08\x3c\x6d\xd6\xe7\xe0\x1e\x1a\x21\x8c\xee\x0c\xb2\xd7\x38\
\xc5\xfe\xb8\x8a\x50\xff\x1b\x29\x92\x99\x4e\xa0\xf4\x64\xe6\x78\
\x8c\x94\x85\x27\x10\x08\x04\x02\x21\x58\x85\x89\xd3\x80\x97\xb7\
\xa8\x2c\x21\xe4\xe5\xa0\x74\x5d\xcb\x3a\xab\x40\x90\x1e\xe4\x7d\
\x46\x9d\x69\x3f\xd3\x55\x06\x5a\xe2\xe0\xd5\x68\x48\x7e\xa8\x87\
\x3a\x5b\xa3\x66\x6c\x06\xf0\x73\xe7\x9d\x2e\x8b\x4e\x20\x10\x08\
\x04\x42\xb0\x0a\x1b\x8d\xd1\xb6\x89\x9b\x98\xf6\x0f\x08\x12\x4b\
\x76\x27\x68\x97\x34\x3e\x4d\x0e\x82\x20\x69\x28\xa7\x9e\xe3\x33\
\xea\x87\xdd\x98\x72\xcd\x80\x20\xea\xfb\xe1\x10\xa4\xad\x31\x95\
\x5f\x15\x49\xd5\x75\x4c\x72\x19\x8e\x5e\xdf\x44\x16\x9c\x40\x20\
\x10\x08\x84\x60\x15\x0f\xce\xb6\x20\x3f\xb9\xf8\x02\x94\x1e\x0b\
\x4a\x3f\x01\x41\x40\xcc\xc7\x20\x88\x67\x35\xd5\x42\x4b\xb6\x1e\
\xed\xa2\x8e\xcf\xb8\x0f\xba\x58\xb6\xfd\x77\x50\x7a\x16\x04\xa9\
\x7d\x1e\x07\xa5\x1f\x42\x32\x39\x18\x82\x44\x9b\x1f\x00\x2d\xb5\
\x4d\x12\x99\xbd\x49\x16\x9a\x40\x20\x10\x08\x84\x60\x15\x27\xaa\
\x81\xd2\x0f\x38\x10\x01\x5f\xf8\x16\x94\xde\x31\x4f\x7d\x70\x5e\
\x01\xb4\x3f\x17\x53\xc4\x90\x5d\x20\x10\x08\x04\x42\xb0\x4a\xa3\
\x41\xc7\x01\x2f\x6e\x96\x4f\x3c\x0b\x4a\xd7\xca\x73\xfb\xbb\x03\
\x3d\x4c\x42\x5a\x58\x08\x4a\x5f\x2a\x8b\x4b\x20\x10\x08\x04\x42\
\xb0\x4a\x87\x60\x95\xa1\x23\x28\x0d\x19\x11\x8a\xa9\x68\xa3\x54\
\x28\x6d\x6f\x88\x9e\x83\xf9\x20\x57\x37\x80\xd2\x1b\xcb\xc2\x12\
\x08\x04\x02\x81\x10\xac\xd2\x24\x58\x65\x38\x16\x94\x7e\x1d\x94\
\xfe\x35\x05\x32\x31\x03\x94\x3e\xb5\x80\xdb\x7e\x3e\x28\xfd\x65\
\x06\xa4\xea\x17\xb4\xd7\x6a\x26\x0b\x4a\x20\x10\x08\x04\x82\xf2\
\x41\xb0\xca\x50\x1d\x94\x3e\x02\x0d\xd9\x17\x3a\x90\x89\xc9\x10\
\xe4\x3c\xdc\xbe\x88\xda\xde\x06\x94\x1e\x06\xb4\x70\x0e\x1c\x4c\
\x00\xa5\x7b\x42\x10\x9b\x4c\x16\x93\x40\x20\x10\x08\x04\xe5\x90\
\x60\xe5\xa2\x12\x28\xbd\x37\x28\xdd\x07\x94\x7e\x18\x9f\x12\x3f\
\x43\x6d\xcf\x7c\xc4\x97\xf8\xff\xc6\x81\xd2\xf7\x43\x10\xbf\x69\
\x97\x22\x6f\xf7\xc6\x10\xe4\x69\xbc\x19\x94\x7e\x1b\x94\x9e\x8d\
\xa4\xeb\x57\xf4\x7c\x5c\x97\x43\x9e\xd6\x41\x10\x3b\x6b\x05\x7a\
\x11\x2e\x02\xa5\x67\x82\xd2\x63\xd0\xbe\x6a\x2f\x59\x3c\x02\x81\
\x40\x20\x10\x08\xc1\x8a\x23\x1c\x3b\xe3\x33\x62\x4f\x08\x82\x88\
\x5e\x0a\x4a\x5f\x82\xff\xee\x09\x4a\x77\x2e\xc1\xa7\xaf\xfa\x10\
\x84\x75\x38\x17\x82\x24\xd5\xb7\xa3\x07\xe6\x10\xd4\x74\x0d\x03\
\xa5\x87\x82\xd2\x0f\x82\xd2\x77\x43\x10\x33\xeb\x6a\x50\xba\x37\
\x28\xdd\x01\xb5\x81\xb2\x78\x04\x02\x81\x40\x20\xa0\x10\xac\xb2\
\x7f\x08\x04\x02\x81\x40\x20\x10\x08\xfc\x40\x3a\x41\x20\x10\x08\
\x04\x02\x81\x40\x08\x96\x40\x20\x10\x08\x04\x02\x81\x10\x2c\x81\
\x40\x20\x10\x08\x04\x82\x72\x85\xff\x1b\x00\x86\x68\x87\x62\xfd\
\x45\x12\x34\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
"
qt_resource_name = b"\
\x00\x05\
\x00\x73\x5e\x21\
\x00\x6c\
\x00\x6f\x00\x67\x00\x6f\x00\x31\
\x00\x09\
\x0e\x24\xb1\xe7\
\x00\x6c\
\x00\x6f\x00\x67\x00\x6f\x00\x31\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x10\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x10\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x6c\x9a\x51\xe9\xda\
"
qt_version = [int(v) for v in QtCore.qVersion().split('.')]
if qt_version < [5, 8, 0]:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
def qInitResources():
QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
[
"brayan.saldarriaga@unillanos.edu.co"
] |
brayan.saldarriaga@unillanos.edu.co
|
a93e4abe550ba336b0cdf4e7b99e7c15650f6699
|
c2471dcf74c5fd1ccf56d19ce856cf7e7e396b80
|
/chap18/7.py
|
85f6866c5e65a7135fffd437e19b223b3aebc982
|
[] |
no_license
|
oc0de/pythonEpi
|
eaeef2cf748e6834375be6bc710132b572fc2934
|
fb7b9e06bb39023e881de1a3d370807b955b5cc0
|
refs/heads/master
| 2021-06-18T05:33:19.518652
| 2017-07-07T04:34:52
| 2017-07-07T04:34:52
| 73,049,450
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 555
|
py
|
def getMaxTrappedWater(heights):
area, leftSide, rightSide = 0, 0, len(heights)-1
while leftSide < rightSide:
area = max(area, (rightSide - leftSide) * min(heights[leftSide], heights[rightSide]))
if heights[leftSide] == heights[rightSide]:
leftSide += 1
rightSide -= 1
elif heights[leftSide] < heights[rightSide]:
leftSide += 1
else:
rightSide -= 1
return area
heights = [1, 2, 1, 3, 4, 4, 5, 6, 2, 1, 3, 1, 3, 2, 1, 2, 4, 1]
print getMaxTrappedWater(heights)
|
[
"valizade@mail.gvsu.edu"
] |
valizade@mail.gvsu.edu
|
a04bf390873d12346090bcdfc646ee211bab7aba
|
b0885fde23fff880927c3a6248c7b5a33df670f1
|
/models/im_vev/main.py
|
0b15b743f659dcb7a2b8c0f1b5b760da910f9d0f
|
[] |
no_license
|
mrsalehi/paraphrase-generation
|
ceb68200e9016c5f26036af565fafa2d736dc96b
|
3e8bd36bd9416999b93ed8e8529bfdf83cf4dcdd
|
refs/heads/master
| 2020-07-22T03:50:40.343595
| 2019-08-26T11:29:08
| 2019-08-26T11:29:08
| 207,065,580
| 7
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 181
|
py
|
import fire
from models import im_vev
from models.neural_editor.main import ModelRunner
if __name__ == '__main__':
cls = ModelRunner
cls.model = im_vev
fire.Fire(cls)
|
[
"ub.maka@gmail.com"
] |
ub.maka@gmail.com
|
0de082a9ee3fba6fa27326ee1526cdb560e9aca6
|
ac2539764372920ca4b020d614113a6b1a0b3ee4
|
/tests/test_signals.py
|
d9ce62546a5960ec2d78e6048c105001aed2ed0c
|
[
"MIT"
] |
permissive
|
vint21h/django-read-only-admin
|
15f16643b242983fa9895c7ac03777eb7c52a2d1
|
e3edebb2081a50b9da708e06d3cbd5fb953cf66c
|
refs/heads/master
| 2022-10-22T23:25:31.237026
| 2022-02-19T20:29:19
| 2022-02-19T20:29:19
| 92,436,831
| 31
| 3
|
MIT
| 2023-09-05T06:49:28
| 2017-05-25T19:23:19
|
Python
|
UTF-8
|
Python
| false
| false
| 1,261
|
py
|
# -*- coding: utf-8 -*-
# django-read-only-admin
# tests/test_signals.py
from typing import List
from django.conf import settings
from django.test import TestCase
from django.contrib.auth.models import Permission
__all__: List[str] = ["AddReadOnlyPermissionsSignalTest"]
class AddReadOnlyPermissionsSignalTest(TestCase):
"""Add read only permissions signal tests."""
def test_add_readonly_permissions(self) -> None:
"""Test signal."""
self.assertListEqual(
list1=list(
Permission.objects.filter(
codename__startswith=settings.READ_ONLY_ADMIN_PERMISSION_PREFIX
).values_list("codename", flat=True)
),
list2=[
"readonly_logentry",
"readonly_group",
"readonly_permission",
"readonly_user",
"readonly_contenttype",
],
)
def test_add_readonly_permissions__count(self) -> None:
"""Test signal create new permissions number."""
self.assertEqual(
first=Permission.objects.filter(
codename__startswith=settings.READ_ONLY_ADMIN_PERMISSION_PREFIX
).count(),
second=5,
)
|
[
"vint21h@vint21h.pp.ua"
] |
vint21h@vint21h.pp.ua
|
24671efa7e3468d3798e40da790f8d1a264cf66d
|
32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd
|
/benchmark/redreader/testcase/firstcases/testcase5_009.py
|
288f69cb7bbc2f5180dbb679b3cd09b65cfe78cb
|
[] |
no_license
|
Prefest2018/Prefest
|
c374d0441d714fb90fca40226fe2875b41cf37fc
|
ac236987512889e822ea6686c5d2e5b66b295648
|
refs/heads/master
| 2021-12-09T19:36:24.554864
| 2021-12-06T12:46:14
| 2021-12-06T12:46:14
| 173,225,161
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,526
|
py
|
#coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'org.quantumbadger.redreader',
'appActivity' : 'org.quantumbadger.redreader.activities.MainActivity',
'resetKeyboard' : True,
'androidCoverage' : 'org.quantumbadger.redreader/org.quantumbadger.redreader.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
# testcase009
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememtBack(driver, "new UiSelector().text(\"askreddit\")", "new UiSelector().className(\"android.widget.TextView\").instance(8)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.TextView\").description(\"Refresh Posts\")")
TouchAction(driver).long_press(element).release().perform()
element = getElememtBack(driver, "new UiSelector().text(\"/r/announcements\")", "new UiSelector().className(\"android.widget.TextView\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"books\")", "new UiSelector().className(\"android.widget.TextView\").instance(12)")
TouchAction(driver).tap(element).perform()
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"5_009\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'org.quantumbadger.redreader'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage)
|
[
"prefest2018@gmail.com"
] |
prefest2018@gmail.com
|
39657f18a60d6a07a7a1e0bfb3a58854408a6032
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/impl/gen/view_models/views/lobby/tank_setup/sub_views/battle_ability_by_rank_model.py
|
87375e758b4042c2f99e94cf2b6b6b3daa6b9bcb
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 970
|
py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/impl/gen/view_models/views/lobby/tank_setup/sub_views/battle_ability_by_rank_model.py
from frameworks.wulf import Array
from frameworks.wulf import ViewModel
class BattleAbilityByRankModel(ViewModel):
__slots__ = ()
def __init__(self, properties=2, commands=0):
super(BattleAbilityByRankModel, self).__init__(properties=properties, commands=commands)
def getName(self):
return self._getString(0)
def setName(self, value):
self._setString(0, value)
def getRankValues(self):
return self._getArray(1)
def setRankValues(self, value):
self._setArray(1, value)
@staticmethod
def getRankValuesType():
return str
def _initialize(self):
super(BattleAbilityByRankModel, self)._initialize()
self._addStringProperty('name', '')
self._addArrayProperty('rankValues', Array())
|
[
"StranikS_Scan@mail.ru"
] |
StranikS_Scan@mail.ru
|
bce95d31c8695ad1fa989acb3e2073da221c378f
|
1876bd32763cf34b2856067a3efc513043d4c7c8
|
/test_graph/test_ping_an.py
|
d8f0c7e4d2f55a909c09abcdc0820536f3f395e1
|
[] |
no_license
|
chntylz/a_stock
|
fdc40b7ab71551246c47636a0c3e81de03b46bb6
|
ed6e627941c580e353c0255027b18ebd9a8b7367
|
refs/heads/master
| 2023-09-01T17:32:32.427766
| 2020-04-30T08:11:32
| 2020-04-30T08:11:32
| 181,991,066
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,508
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import sys
reload(sys)
#sys.setdefaultencoding('utf-8')
sys.setdefaultencoding("ISO-8859-1")
#先引入后面分析、可视化等可能用到的库
import tushare as ts
import pandas as pd
import matplotlib.pyplot as plt
#正常显示画图时出现的中文和负号
from pylab import mpl
mpl.rcParams['font.sans-serif']=['SimHei']
mpl.rcParams['axes.unicode_minus']=False
#设置token
token='21dddafc47513ea46b89057b2c4edf7b44882b3e92274b431f199552'
#ts.set_token(token)
pro = ts.pro_api(token)
#获取当前上市的股票代码、简称、注册地、行业、上市时间等数据
basic=pro.stock_basic(list_status='L')
#查看前五行数据
#basic.head(5)
#获取平安银行日行情数据
pa=pro.daily(ts_code='000001.SZ', start_date='20190101',
end_date='20190508')
#pa.head()
#K线图可视化
from pyecharts import Kline
pa.index=pd.to_datetime(pa.trade_date)
pa=pa.sort_index()
v1=list(pa.loc[:,['open','close','low','high']].values)
t=pa.index
v0=list(t.strftime('%Y%m%d'))
kline = Kline("平安银行K线图",title_text_size=15)
kline.add("", v0, v1,is_datazoom_show=True,
mark_line=["average"],
mark_point=["max", "min"],
mark_point_symbolsize=60,
mark_line_valuedim=['highest', 'lowest'] )
kline.render("平安银行.html")
kline
#plt.savefig("/home/aaron/aaron/test_graph/test.jpg")
|
[
"you@example.com"
] |
you@example.com
|
cd87bbf2f41c52e0d7fb748c7b2ab38248071680
|
e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f
|
/indices/togeth.py
|
c7b87096b528673b038eb8d729d0786b8fdf3ece
|
[] |
no_license
|
psdh/WhatsintheVector
|
e8aabacc054a88b4cb25303548980af9a10c12a8
|
a24168d068d9c69dc7a0fd13f606c080ae82e2a6
|
refs/heads/master
| 2021-01-25T10:34:22.651619
| 2015-09-23T11:54:06
| 2015-09-23T11:54:06
| 42,749,205
| 2
| 3
| null | 2015-09-23T11:54:07
| 2015-09-18T22:06:38
|
Python
|
UTF-8
|
Python
| false
| false
| 3,010
|
py
|
ii = [('BentJDO2.py', 8), ('EmerRN.py', 2), ('CookGHP3.py', 29), ('LyelCPG2.py', 25), ('MarrFDI.py', 16), ('RogePAV2.py', 68), ('CoolWHM2.py', 31), ('KembFFF.py', 6), ('GodwWSL2.py', 53), ('ChanWS.py', 15), ('RogePAV.py', 110), ('SadlMLP.py', 39), ('FerrSDO3.py', 17), ('WilbRLW.py', 26), ('WilbRLW4.py', 23), ('RennJIT.py', 38), ('ProuWCM.py', 45), ('AubePRP2.py', 23), ('CookGHP.py', 22), ('ShawHDE.py', 7), ('MartHSI2.py', 17), ('LeakWTI2.py', 48), ('UnitAI.py', 22), ('KembFJ1.py', 18), ('WilkJMC3.py', 45), ('WilbRLW5.py', 20), ('LeakWTI3.py', 51), ('PettTHE.py', 48), ('MarrFDI3.py', 16), ('TennAP.py', 5), ('PeckJNG.py', 11), ('KnowJMM.py', 3), ('BailJD2.py', 17), ('AubePRP.py', 30), ('ChalTPW2.py', 16), ('GellWPT.py', 5), ('AdamWEP.py', 95), ('FitzRNS3.py', 20), ('WilbRLW2.py', 35), ('ClarGE2.py', 48), ('GellWPT2.py', 6), ('WilkJMC2.py', 27), ('CarlTFR.py', 80), ('SeniNSP.py', 16), ('LyttELD.py', 12), ('CoopJBT2.py', 18), ('TalfTAC.py', 4), ('GrimSLE.py', 8), ('RoscTTI3.py', 12), ('AinsWRR3.py', 19), ('CookGHP2.py', 21), ('KiddJAE.py', 33), ('AdamHMM.py', 5), ('BailJD1.py', 11), ('RoscTTI2.py', 25), ('CoolWHM.py', 43), ('MarrFDI2.py', 7), ('CrokTPS.py', 25), ('ClarGE.py', 32), ('LandWPA.py', 15), ('BuckWGM.py', 63), ('IrviWVD.py', 15), ('LyelCPG.py', 73), ('GilmCRS.py', 35), ('DaltJMA.py', 38), ('WestJIT2.py', 145), ('DibdTRL2.py', 32), ('AinsWRR.py', 12), ('CrocDNL.py', 30), ('MedwTAI.py', 29), ('LandWPA2.py', 17), ('WadeJEB.py', 38), ('FerrSDO2.py', 25), ('TalfTIT.py', 4), ('NewmJLP.py', 36), ('GodwWLN.py', 37), ('CoopJBT.py', 12), ('KirbWPW2.py', 28), ('SoutRD2.py', 17), ('BackGNE.py', 50), ('LeakWTI4.py', 63), ('LeakWTI.py', 48), ('MedwTAI2.py', 20), ('BachARE.py', 48), ('SoutRD.py', 8), ('DickCSG.py', 4), ('BuckWGM2.py', 7), ('WheeJPT.py', 22), ('MereHHB3.py', 82), ('HowiWRL2.py', 30), ('BailJD3.py', 18), ('MereHHB.py', 44), ('WilkJMC.py', 10), ('HogaGMM.py', 31), ('MartHRW.py', 28), ('MackCNH.py', 8), ('WestJIT.py', 116), ('BabbCEM.py', 27), ('FitzRNS4.py', 194), ('CoolWHM3.py', 17), ('DequTKM.py', 19), ('FitzRNS.py', 53), ('BentJRP.py', 32), ('EdgeMHT.py', 13), ('BowrJMM.py', 17), ('LyttELD3.py', 7), ('FerrSDO.py', 10), ('RoscTTI.py', 12), ('ThomGLG.py', 17), ('StorJCC.py', 20), ('KembFJ2.py', 18), ('LewiMJW.py', 29), ('BabbCRD.py', 14), ('MackCNH2.py', 7), ('BellCHM.py', 18), ('JacoWHI2.py', 26), ('SomeMMH.py', 28), ('HaliTBC.py', 6), ('WilbRLW3.py', 36), ('AinsWRR2.py', 16), ('MereHHB2.py', 49), ('BrewDTO.py', 28), ('JacoWHI.py', 16), ('ClarGE3.py', 108), ('RogeSIP.py', 9), ('MartHRW2.py', 21), ('DibdTRL.py', 55), ('FitzRNS2.py', 97), ('HogaGMM2.py', 14), ('MartHSI.py', 22), ('EvarJSP.py', 16), ('DwigTHH.py', 22), ('NortSTC.py', 11), ('SadlMLP2.py', 28), ('BowrJMM2.py', 11), ('LyelCPG3.py', 61), ('BowrJMM3.py', 16), ('BeckWRE.py', 9), ('TaylIF.py', 23), ('WordWYR.py', 8), ('DibdTBR.py', 9), ('ChalTPW.py', 27), ('ThomWEC.py', 12), ('KeigTSS.py', 14), ('KirbWPW.py', 25), ('WaylFEP.py', 42), ('BentJDO.py', 26), ('ClarGE4.py', 70), ('HowiWRL.py', 34)]
|
[
"prabhjyotsingh95@gmail.com"
] |
prabhjyotsingh95@gmail.com
|
1cccddc5d4e189acca30eb11613f38f6b8432298
|
1435fbd464173836c20127041de90cba0d80c98d
|
/tests/test_cli.py
|
8996f738ccb552db12c5e7e518becf0d8d1ffd33
|
[
"BSD-2-Clause"
] |
permissive
|
pkuyangchao/redash
|
871b88eaea74c6cc31c50a9f82fd6d3aaf8508ca
|
1640b1e927a4d10ce9ae5c24b2d015734c696b08
|
refs/heads/master
| 2023-02-06T09:58:45.547843
| 2020-03-26T16:36:00
| 2020-03-26T16:36:00
| 249,176,715
| 0
| 0
| null | 2023-02-02T02:19:24
| 2020-03-22T12:17:44
|
Python
|
UTF-8
|
Python
| false
| false
| 19,318
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import mock
import textwrap
from click.testing import CliRunner
from tests import BaseTestCase
from redash.utils.configuration import ConfigurationContainer
from redash.query_runner import query_runners
from redash.cli import manager
from redash.models import DataSource, Group, Organization, User, db
class DataSourceCommandTests(BaseTestCase):
def test_interactive_new(self):
runner = CliRunner()
pg_i = query_runners.keys().index('pg') + 1
result = runner.invoke(
manager,
['ds', 'new'],
input="test\n%s\n\n\nexample.com\n\n\ntestdb\n" % (pg_i,))
self.assertFalse(result.exception)
self.assertEqual(result.exit_code, 0)
self.assertEqual(DataSource.query.count(), 1)
ds = DataSource.query.first()
self.assertEqual(ds.name, 'test')
self.assertEqual(ds.type, 'pg')
self.assertEqual(ds.options['dbname'], 'testdb')
def test_options_new(self):
runner = CliRunner()
result = runner.invoke(
manager,
['ds', 'new',
'test',
'--options', '{"host": "example.com", "dbname": "testdb"}',
'--type', 'pg'])
self.assertFalse(result.exception)
self.assertEqual(result.exit_code, 0)
self.assertEqual(DataSource.query.count(), 1)
ds = DataSource.query.first()
self.assertEqual(ds.name, 'test')
self.assertEqual(ds.type, 'pg')
self.assertEqual(ds.options['host'], 'example.com')
self.assertEqual(ds.options['dbname'], 'testdb')
def test_bad_type_new(self):
runner = CliRunner()
result = runner.invoke(
manager, ['ds', 'new', 'test', '--type', 'wrong'])
self.assertTrue(result.exception)
self.assertEqual(result.exit_code, 1)
self.assertIn('not supported', result.output)
self.assertEqual(DataSource.query.count(), 0)
def test_bad_options_new(self):
runner = CliRunner()
result = runner.invoke(
manager, ['ds', 'new', 'test', '--options',
'{"host": 12345, "dbname": "testdb"}',
'--type', 'pg'])
self.assertTrue(result.exception)
self.assertEqual(result.exit_code, 1)
self.assertIn('invalid configuration', result.output)
self.assertEqual(DataSource.query.count(), 0)
def test_list(self):
self.factory.create_data_source(
name='test1', type='pg',
options=ConfigurationContainer({"host": "example.com",
"dbname": "testdb1"}))
self.factory.create_data_source(
name='test2', type='sqlite',
options=ConfigurationContainer({"dbpath": "/tmp/test.db"}))
self.factory.create_data_source(
name='Atest', type='sqlite',
options=ConfigurationContainer({"dbpath": "/tmp/test.db"}))
runner = CliRunner()
result = runner.invoke(manager, ['ds', 'list'])
self.assertFalse(result.exception)
self.assertEqual(result.exit_code, 0)
expected_output = """
Id: 3
Name: Atest
Type: sqlite
Options: {"dbpath": "/tmp/test.db"}
--------------------
Id: 1
Name: test1
Type: pg
Options: {"dbname": "testdb1", "host": "example.com"}
--------------------
Id: 2
Name: test2
Type: sqlite
Options: {"dbpath": "/tmp/test.db"}
"""
self.assertMultiLineEqual(result.output,
textwrap.dedent(expected_output).lstrip())
def test_connection_test(self):
self.factory.create_data_source(
name='test1', type='sqlite',
options=ConfigurationContainer({"dbpath": "/tmp/test.db"}))
runner = CliRunner()
result = runner.invoke(manager, ['ds', 'test', 'test1'])
self.assertFalse(result.exception)
self.assertEqual(result.exit_code, 0)
self.assertIn('Success', result.output)
def test_connection_bad_test(self):
self.factory.create_data_source(
name='test1', type='sqlite',
options=ConfigurationContainer({"dbpath": __file__}))
runner = CliRunner()
result = runner.invoke(manager, ['ds', 'test', 'test1'])
self.assertTrue(result.exception)
self.assertEqual(result.exit_code, 1)
self.assertIn('Failure', result.output)
def test_connection_delete(self):
self.factory.create_data_source(
name='test1', type='sqlite',
options=ConfigurationContainer({"dbpath": "/tmp/test.db"}))
runner = CliRunner()
result = runner.invoke(manager, ['ds', 'delete', 'test1'])
self.assertFalse(result.exception)
self.assertEqual(result.exit_code, 0)
self.assertIn('Deleting', result.output)
self.assertEqual(DataSource.query.count(), 0)
def test_connection_bad_delete(self):
self.factory.create_data_source(
name='test1', type='sqlite',
options=ConfigurationContainer({"dbpath": "/tmp/test.db"}))
runner = CliRunner()
result = runner.invoke(manager, ['ds', 'delete', 'wrong'])
self.assertTrue(result.exception)
self.assertEqual(result.exit_code, 1)
self.assertIn("Couldn't find", result.output)
self.assertEqual(DataSource.query.count(), 1)
def test_options_edit(self):
self.factory.create_data_source(
name='test1', type='sqlite',
options=ConfigurationContainer({"dbpath": "/tmp/test.db"}))
runner = CliRunner()
result = runner.invoke(
manager, ['ds', 'edit', 'test1', '--options',
'{"host": "example.com", "dbname": "testdb"}',
'--name', 'test2',
'--type', 'pg'])
self.assertFalse(result.exception)
self.assertEqual(result.exit_code, 0)
self.assertEqual(DataSource.query.count(), 1)
ds = DataSource.query.first()
self.assertEqual(ds.name, 'test2')
self.assertEqual(ds.type, 'pg')
self.assertEqual(ds.options['host'], 'example.com')
self.assertEqual(ds.options['dbname'], 'testdb')
def test_bad_type_edit(self):
self.factory.create_data_source(
name='test1', type='sqlite',
options=ConfigurationContainer({"dbpath": "/tmp/test.db"}))
runner = CliRunner()
result = runner.invoke(
manager, ['ds', 'edit', 'test', '--type', 'wrong'])
self.assertTrue(result.exception)
self.assertEqual(result.exit_code, 1)
self.assertIn('not supported', result.output)
ds = DataSource.query.first()
self.assertEqual(ds.type, 'sqlite')
def test_bad_options_edit(self):
ds = self.factory.create_data_source(
name='test1', type='sqlite',
options=ConfigurationContainer({"dbpath": "/tmp/test.db"}))
runner = CliRunner()
result = runner.invoke(
manager, ['ds', 'new', 'test', '--options',
'{"host": 12345, "dbname": "testdb"}',
'--type', 'pg'])
self.assertTrue(result.exception)
self.assertEqual(result.exit_code, 1)
self.assertIn('invalid configuration', result.output)
ds = DataSource.query.first()
self.assertEqual(ds.type, 'sqlite')
self.assertEqual(ds.options._config, {"dbpath": "/tmp/test.db"})
class GroupCommandTests(BaseTestCase):
def test_create(self):
gcount = Group.query.count()
perms = ['create_query', 'edit_query', 'view_query']
runner = CliRunner()
result = runner.invoke(manager, ['groups', 'create', 'test', '--permissions', ','.join(perms)])
self.assertFalse(result.exception)
self.assertEqual(result.exit_code, 0)
self.assertEqual(Group.query.count(), gcount + 1)
g = Group.query.order_by(Group.id.desc()).first()
db.session.add(self.factory.org)
self.assertEqual(g.org_id, self.factory.org.id)
self.assertEqual(g.permissions, perms)
def test_change_permissions(self):
g = self.factory.create_group(permissions=['list_dashboards'])
db.session.flush()
g_id = g.id
perms = ['create_query', 'edit_query', 'view_query']
runner = CliRunner()
result = runner.invoke(
manager, ['groups', 'change_permissions', str(g_id), '--permissions', ','.join(perms)])
self.assertFalse(result.exception)
self.assertEqual(result.exit_code, 0)
g = Group.query.filter(Group.id == g_id).first()
self.assertEqual(g.permissions, perms)
def test_list(self):
self.factory.create_group(name='test', permissions=['list_dashboards'])
self.factory.create_group(name='agroup', permissions=['list_dashboards'])
self.factory.create_group(name='bgroup', permissions=['list_dashboards'])
self.factory.create_user(name='Fred Foobar',
email=u'foobar@example.com',
org=self.factory.org,
group_ids=[self.factory.default_group.id])
runner = CliRunner()
result = runner.invoke(manager, ['groups', 'list'])
self.assertFalse(result.exception)
self.assertEqual(result.exit_code, 0)
output = """
Id: 1
Name: admin
Type: builtin
Organization: default
Permissions: [admin,super_admin]
Users:
--------------------
Id: 4
Name: agroup
Type: regular
Organization: default
Permissions: [list_dashboards]
Users:
--------------------
Id: 5
Name: bgroup
Type: regular
Organization: default
Permissions: [list_dashboards]
Users:
--------------------
Id: 2
Name: default
Type: builtin
Organization: default
Permissions: [create_dashboard,create_query,edit_dashboard,edit_query,view_query,view_source,execute_query,list_users,schedule_query,list_dashboards,list_alerts,list_data_sources]
Users: Fred Foobar
--------------------
Id: 3
Name: test
Type: regular
Organization: default
Permissions: [list_dashboards]
Users:
"""
self.assertMultiLineEqual(result.output,
textwrap.dedent(output).lstrip())
class OrganizationCommandTests(BaseTestCase):
def test_set_google_apps_domains(self):
domains = ['example.org', 'example.com']
runner = CliRunner()
result = runner.invoke(manager, ['org', 'set_google_apps_domains', ','.join(domains)])
self.assertFalse(result.exception)
self.assertEqual(result.exit_code, 0)
db.session.add(self.factory.org)
self.assertEqual(self.factory.org.google_apps_domains, domains)
def test_show_google_apps_domains(self):
self.factory.org.settings[Organization.SETTING_GOOGLE_APPS_DOMAINS] = [
'example.org', 'example.com']
db.session.add(self.factory.org)
db.session.commit()
runner = CliRunner()
result = runner.invoke(manager, ['org', 'show_google_apps_domains'])
self.assertFalse(result.exception)
self.assertEqual(result.exit_code, 0)
output = """
Current list of Google Apps domains: example.org, example.com
"""
self.assertMultiLineEqual(result.output,
textwrap.dedent(output).lstrip())
def test_list(self):
self.factory.create_org(name='test', slug='test_org')
self.factory.create_org(name='Borg', slug='B_org')
self.factory.create_org(name='Aorg', slug='A_org')
runner = CliRunner()
result = runner.invoke(manager, ['org', 'list'])
self.assertFalse(result.exception)
self.assertEqual(result.exit_code, 0)
output = """
Id: 4
Name: Aorg
Slug: A_org
--------------------
Id: 3
Name: Borg
Slug: B_org
--------------------
Id: 1
Name: Default
Slug: default
--------------------
Id: 2
Name: test
Slug: test_org
"""
self.assertMultiLineEqual(result.output,
textwrap.dedent(output).lstrip())
class UserCommandTests(BaseTestCase):
def test_create_basic(self):
runner = CliRunner()
result = runner.invoke(
manager, ['users', 'create', 'foobar@example.com', 'Fred Foobar'],
input="password1\npassword1\n")
self.assertFalse(result.exception)
self.assertEqual(result.exit_code, 0)
u = User.query.filter(User.email == u"foobar@example.com").first()
self.assertEqual(u.name, "Fred Foobar")
self.assertTrue(u.verify_password('password1'))
self.assertEqual(u.group_ids, [u.org.default_group.id])
def test_create_admin(self):
runner = CliRunner()
result = runner.invoke(
manager, ['users', 'create', 'foobar@example.com', 'Fred Foobar',
'--password', 'password1', '--admin'])
self.assertFalse(result.exception)
self.assertEqual(result.exit_code, 0)
u = User.query.filter(User.email == u"foobar@example.com").first()
self.assertEqual(u.name, "Fred Foobar")
self.assertTrue(u.verify_password('password1'))
self.assertEqual(u.group_ids, [u.org.default_group.id,
u.org.admin_group.id])
def test_create_googleauth(self):
runner = CliRunner()
result = runner.invoke(
manager, ['users', 'create', 'foobar@example.com', 'Fred Foobar', '--google'])
self.assertFalse(result.exception)
self.assertEqual(result.exit_code, 0)
u = User.query.filter(User.email == u"foobar@example.com").first()
self.assertEqual(u.name, "Fred Foobar")
self.assertIsNone(u.password_hash)
self.assertEqual(u.group_ids, [u.org.default_group.id])
def test_create_bad(self):
self.factory.create_user(email=u'foobar@example.com')
runner = CliRunner()
result = runner.invoke(
manager, ['users', 'create', u'foobar@example.com', 'Fred Foobar'],
input="password1\npassword1\n")
self.assertTrue(result.exception)
self.assertEqual(result.exit_code, 1)
self.assertIn('Failed', result.output)
def test_delete(self):
self.factory.create_user(email=u'foobar@example.com')
ucount = User.query.count()
runner = CliRunner()
result = runner.invoke(manager, ['users', 'delete', 'foobar@example.com'])
self.assertFalse(result.exception)
self.assertEqual(result.exit_code, 0)
self.assertEqual(User.query.filter(User.email ==
u"foobar@example.com").count(), 0)
self.assertEqual(User.query.count(), ucount - 1)
def test_delete_bad(self):
ucount = User.query.count()
runner = CliRunner()
result = runner.invoke(manager, ['users', 'delete', u'foobar@example.com'])
self.assertIn('Deleted 0 users', result.output)
self.assertEqual(User.query.count(), ucount)
def test_password(self):
self.factory.create_user(email=u'foobar@example.com')
runner = CliRunner()
result = runner.invoke(manager, ['users', 'password', u'foobar@example.com', 'xyzzy'])
self.assertFalse(result.exception)
self.assertEqual(result.exit_code, 0)
u = User.query.filter(User.email == u"foobar@example.com").first()
self.assertTrue(u.verify_password('xyzzy'))
def test_password_bad(self):
runner = CliRunner()
result = runner.invoke(manager, ['users', 'password', u'foobar@example.com', 'xyzzy'])
self.assertTrue(result.exception)
self.assertEqual(result.exit_code, 1)
self.assertIn('not found', result.output)
def test_password_bad_org(self):
runner = CliRunner()
result = runner.invoke(manager, ['users', 'password', u'foobar@example.com', 'xyzzy', '--org', 'default'])
self.assertTrue(result.exception)
self.assertEqual(result.exit_code, 1)
self.assertIn('not found', result.output)
def test_invite(self):
admin = self.factory.create_user(email=u'redash-admin@example.com')
runner = CliRunner()
with mock.patch('redash.cli.users.invite_user') as iu:
result = runner.invoke(manager, ['users', 'invite', u'foobar@example.com', 'Fred Foobar', u'redash-admin@example.com'])
self.assertFalse(result.exception)
self.assertEqual(result.exit_code, 0)
self.assertTrue(iu.called)
c = iu.call_args[0]
db.session.add_all(c)
self.assertEqual(c[0].id, self.factory.org.id)
self.assertEqual(c[1].id, admin.id)
self.assertEqual(c[2].email, 'foobar@example.com')
def test_list(self):
self.factory.create_user(name='Fred Foobar',
email=u'foobar@example.com',
org=self.factory.org)
self.factory.create_user(name='William Foobar',
email=u'william@example.com',
org=self.factory.org)
self.factory.create_user(name='Andrew Foobar',
email=u'andrew@example.com',
org=self.factory.org)
runner = CliRunner()
result = runner.invoke(manager, ['users', 'list'])
self.assertFalse(result.exception)
self.assertEqual(result.exit_code, 0)
output = """
Id: 3
Name: Andrew Foobar
Email: andrew@example.com
Organization: Default
Active: True
Groups: default
--------------------
Id: 1
Name: Fred Foobar
Email: foobar@example.com
Organization: Default
Active: True
Groups: default
--------------------
Id: 2
Name: William Foobar
Email: william@example.com
Organization: Default
Active: True
Groups: default
"""
self.assertMultiLineEqual(result.output,
textwrap.dedent(output).lstrip())
def test_grant_admin(self):
u = self.factory.create_user(name='Fred Foobar',
email=u'foobar@example.com',
org=self.factory.org,
group_ids=[self.factory.default_group.id])
runner = CliRunner()
result = runner.invoke(manager, ['users', 'grant_admin', u'foobar@example.com'])
self.assertFalse(result.exception)
self.assertEqual(result.exit_code, 0)
db.session.add(u)
self.assertEqual(u.group_ids, [u.org.default_group.id,
u.org.admin_group.id])
|
[
"hexiaomeng@foxmail.com"
] |
hexiaomeng@foxmail.com
|
de256567ccbf150b172472ffc6149f20b4a0731a
|
98b63e3dc79c75048163512c3d1b71d4b6987493
|
/tensorflow/compiler/mlir/tfr/define_op_template.py
|
c0db2981d2d94a0038a84163cbce6046af8101db
|
[
"Apache-2.0"
] |
permissive
|
galeone/tensorflow
|
11a4e4a3f42f4f61a65b432c429ace00401c9cc4
|
1b6f13331f4d8e7fccc66bfeb0b066e77a2b7206
|
refs/heads/master
| 2022-11-13T11:56:56.143276
| 2020-11-10T14:35:01
| 2020-11-10T14:35:01
| 310,642,488
| 21
| 12
|
Apache-2.0
| 2020-11-06T16:01:03
| 2020-11-06T16:01:02
| null |
UTF-8
|
Python
| false
| false
| 2,035
|
py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A template to define composite ops."""
# pylint: disable=g-direct-tensorflow-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
from tensorflow.compiler.mlir.tfr.python.composite import Composite
from tensorflow.compiler.mlir.tfr.python.op_reg_gen import gen_register_op
from tensorflow.compiler.mlir.tfr.python.tfr_gen import tfr_gen_from_module
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
'output', None,
'Path to write the genereated register op file and MLIR file.')
flags.DEFINE_bool('gen_register_op', True,
'Generate register op cc file or tfr mlir file.')
flags.mark_flag_as_required('output')
@Composite('TestRandom', derived_attrs=['T: numbertype'], outputs=['o: T'])
def _composite_random_op():
pass
def main(_):
if FLAGS.gen_register_op:
assert FLAGS.output.endswith('.cc')
generated_code = gen_register_op(sys.modules[__name__], '_composite_')
else:
assert FLAGS.output.endswith('.mlir')
generated_code = tfr_gen_from_module(sys.modules[__name__], '_composite_')
dirname = os.path.dirname(FLAGS.output)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(FLAGS.output, 'w') as f:
f.write(generated_code)
if __name__ == '__main__':
app.run(main=main)
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
251b5275354616a26f43a0708768174bcecb4e68
|
e6aace98e7b21cbdd1019b7323c11ebc41217af7
|
/Django/api1203/lxzapi/serializers.py
|
cdaaaad110bd8b4480a6ce5ae9f48bf2633a2495
|
[] |
no_license
|
robinlxz/Python_excercise
|
8f61e41614755c01b0a523db4caa1b78e31f28e2
|
db0daf0da5b45067d70daf285271a3f7c2d4f55f
|
refs/heads/master
| 2021-06-29T16:59:35.902048
| 2020-10-05T07:38:03
| 2020-10-05T07:38:03
| 152,209,939
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 189
|
py
|
from rest_framework import serializers
from lxzapi.models import Lead
class LeadSerializer(serializers.ModelSerializer):
class Meta:
model = Lead
fields = ('id', 'name', 'email')
|
[
"linxz@garena.com"
] |
linxz@garena.com
|
30c57638693393a4e5cb09367880791d16209c9a
|
2b54b1fb1540ab73d6c83cae3acd5fdd58bdead5
|
/Platinum_clusters_Project/BE_Vs_diffstructures_Pt10_Pt10CO/bin/Ptoxides_arrangestru_Vs_energyorder_extra.py
|
3ae442dc73ad12fc2bcb30c1e4147b7bd73e952d
|
[] |
no_license
|
sivachiriki/GOFEE_Pt_V_supported
|
5787d44294262870075f35f2d31c096021b7ce20
|
6bd700dac1f3e7c58394b758d75246ac6e07eade
|
refs/heads/master
| 2022-04-08T11:38:13.038455
| 2020-03-09T10:48:31
| 2020-03-09T10:48:31
| 226,359,812
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,583
|
py
|
from __future__ import division
import matplotlib
#matplotlib.use('Agg') # Can also use 'tkagg' or 'webagg'
#from plot_neb_tio2 import *
from matplotlib.offsetbox import TextArea, VPacker, AnnotationBbox
import matplotlib.patches as patches
from math import ceil, floor
import matplotlib.pyplot as plt
from ase.io import read, write
from ase.visualize import view
import matplotlib.patches as mpatches
from ase.data.colors import jmol_colors
from decimal import Decimal
from pylab import *
from ase.data import covalent_radii as aradii
from matplotlib.patches import Circle
from math import atan2,pi
import matplotlib.gridspec as gridspec
matplotlib.rc('xtick', labelsize=14)
matplotlib.rc('ytick', labelsize=14)
def plot_atoms(ax, atoms, xyz, acols, alp, z):
ecols = [[0, 0, 0] for col in atoms]
indices = range(len(atoms))
for ia in indices:
acol = acols[ia]
ecol = ecols[ia]
arad = aradii[atoms[ia].number]
apos = atoms[ia].position
eps = arad
circ = Circle([apos[xyz[0]], apos[xyz[1]]],
fc = acol,
ec = ecol,
radius = arad,
lw = 0.5,
alpha = alp[ia],
zorder = 1 - apos[1]/1000
)
ax.add_patch(circ)
def plot_conf(ax, atoms, colorlenth,rot=False):
colors = np.array([jmol_colors[atom.number] for atom in atoms])
positions =atoms.get_positions()
for i, atom in enumerate(atoms):
if (atom.number ==78):
colors[i] =[0.1, 0.6, 0.6]
if (atom.number ==6):
colors[i] =[0.1, 0.2, 0.9]
if (atom.number ==8 and positions[i,2]>12.2):
colors[i] =[128/255, 0/255, 128/255]
alp = [None] * colors.shape[0]
for i,a in enumerate(atoms):
if a.symbol == 'Al' or a.symbol == 'O':
if a.position[2] < 9.7:
alp[i] = 0.3
if rot:
atoms.rotate('x',pi/2)
plot_atoms(ax, atoms, [0,2,1], colors, alp, z=-1)
#fig = plt.figure(figsize=(14.0,10.50))
fig, [ax1,ax2] = plt.subplots(nrows=1, ncols=2,figsize=(14.0,10.50),gridspec_kw={'width_ratios': [0.9, 1.1]})
d = .01 # how big to make the diagonal lines in axes coordinates
# arguments to pass plot, just so we don't keep repeating them
kwargs = dict(transform=ax1.transAxes, color='k', clip_on=False)
ax1.plot((1-d,1+d), (-d,+d), **kwargs)
ax1.plot((1-d,1+d),(1-d,1+d), **kwargs)
kwargs.update(transform=ax2.transAxes) # switch to the bottom axes
ax2.plot((-d,+d), (1-d,1+d), **kwargs)
ax2.plot((-d,+d), (-d,+d), **kwargs)
#-----------------------------------------------------------------------------------------#
data=read(sys.argv[1]+'@:')
energydif =np.zeros(len(data))
for j in range(0,4):
GM_energy = data[5].get_potential_energy()
energydif[j] = (data[j].get_potential_energy() - GM_energy)
ax1.plot(j,energydif[j])
print(j,energydif[j])
ax1.set_xticks(np.arange(0, 5.0))
ax1.set_yticks(np.arange(-4.0, 3.50, 0.5))
ax1.set_xlabel('Structures with CO$_3$ formation',color='#FF0000')
#----------------------------------------------------------------------------------------#
for j in range(4,10):
GM_energy = data[5].get_potential_energy()
energydif[j] = (data[j].get_potential_energy() - GM_energy)
ax2.plot(j,energydif[j])
print(j,energydif[j])
ax2.set_xticks(np.arange(4.0,10.0))
ax2.set_yticks([])
ax2.set_xlabel('Structures with CO$_2$ formation',color='#FF00FF')
#----------------------------------------------------------------------------------------#
ax1.spines['right'].set_visible(False)
ax2.spines['left'].set_visible(False)
fig.text(0.5, 0.04, 'Lowest Isomer found for Pt$_7$O$_{10}$ and Pt$_7$O$_{10}$CO with GOFEE', ha='center',fontsize=18)
fig.text(0.04, 0.5, 'Stability of Isomers (eV)', va='center', rotation='vertical',fontsize=18)
#plt.xlabel("Lowest Isomer found for Pt$_7$O$_{10}$ and Pt$_7$O$_{10}$CO with GOFEE")
plt.subplots_adjust(wspace=0.05)
#plt.show()
#-----------------------------------------------------------------------------------------#
global_ax = plt.gca()
global_ax = ax1
transform = lambda x: fig.transFigure.inverted().transform(global_ax.transData.transform(x))
inverse = lambda x: global_ax.transData.inverted().transform(fig.transFigure.transform(x))
for j in range(0,4):
atoms = data[j]
colorlenth = len(atoms)
atoms =atoms*(3,3,1)
#print(colorlenth)
a=atoms
del atoms[[atom.index for atom in atoms if atom.index <=colorlenth*5-18 or atom.index >=colorlenth*5]]
centreofmass = a.get_center_of_mass()
atoms = data[j]*(3,3,1)
a=atoms
del atoms[atoms.positions[:,0] >=centreofmass[0]+8.0]
del atoms[atoms.positions[:,0] <= centreofmass[0]-8.0]
del atoms[atoms.positions[:,1] >= centreofmass[1]+7.3]
del atoms[atoms.positions[:,1] <= centreofmass[1]-7.0]
colorlenth = len(atoms)
cell = atoms.get_cell()
# 0 0
dy = (inverse((1, 0)) - inverse((1, -0.1)))[1]
xy = transform((j+0.5, energydif[j]))
print(dy, xy)
ax = plt.axes([xy[0]+0.007, xy[1]-(dy)/300.50, 0.066, 0.08])
img = atoms.copy()
plot_conf(ax, img,colorlenth)
ax.set_xlim([centreofmass[0]-7.50, centreofmass[0]+7.50])
ax.set_ylim([10.7, 20.0])
ax.set_yticks([])
ax.set_xticks([])
ax.set(aspect=1)
#----------------- drawing box -------------------------------#
xlim = ax.get_xlim()
ylim = ax.get_ylim()
box_x = [xlim[0], xlim[1], xlim[1], xlim[0], xlim[0]]
box_y =[ylim[0], ylim[0], ylim[1], ylim[1], ylim[0]]
ax.add_patch(
patches.Rectangle(
(box_x[0],box_y[0]),
xlim[1]-xlim[0],
ylim[1]-ylim[0],
fill=True,facecolor='white', clip_on=False,zorder =0.8) )
ax.plot(box_x, box_y, color='blue',linewidth=5.0)
plt.axes(ax)
# 0 1
# ax = plt.subplot()
dy = (inverse((1, 0)) - inverse((1, -0.1)))[1]
xy = transform((j+0.5, energydif[j]))
print(dy, xy)
ax = plt.axes([xy[0], xy[1]-(dy)/13.3, 0.08, 0.08])
cell = atoms.get_cell()
img = atoms.copy()
plot_conf(ax, img,colorlenth, rot=True)
ax.set_xlim([centreofmass[0]-7.5, centreofmass[0]+7.5])
ax.set_ylim([centreofmass[1]-6.5, centreofmass[1]+7.0])
ax.set_yticks([])
ax.set_xticks([])
ax.set(aspect=1)
xlim = ax.get_xlim()
ylim = ax.get_ylim()
box_x = [xlim[0], xlim[1], xlim[1], xlim[0], xlim[0]]
box_y =[ylim[0], ylim[0], ylim[1], ylim[1], ylim[0]]
ax.add_patch(
patches.Rectangle(
(box_x[0],box_y[0]),
xlim[1]-xlim[0],
ylim[1]-ylim[0],
fill=True,facecolor='white', clip_on=False,zorder =0.8) )
ax.plot(box_x, box_y, color='blue',linewidth=5.0)
plt.axes(ax)
#-------------------- set 1 Pt7+CO on surface ---------------#
CO =read('CO_molecule_DFTrelaxed.traj@:')
E_CO = CO[0].get_potential_energy()
data=read(sys.argv[2]+'@:')
energydif =np.zeros(len(data))
for j in range(len(data)):
energydif[j] = (data[j].get_potential_energy()- GM_energy -E_CO)
print(j,energydif[j])
for j in range(0,4):
atoms = data[j]
colorlenth = len(atoms)
atoms =atoms*(3,3,1)
#print(colorlenth)
a=atoms
del atoms[[atom.index for atom in atoms if atom.index <=colorlenth*5-20 or atom.index >=colorlenth*5]]
centreofmass = a.get_center_of_mass()
atoms = data[j]*(3,3,1)
a=atoms
del atoms[atoms.positions[:,0] >=centreofmass[0]+8.10]
del atoms[atoms.positions[:,0] <= centreofmass[0]-8.10]
del atoms[atoms.positions[:,1] >= centreofmass[1]+7.3]
del atoms[atoms.positions[:,1] <= centreofmass[1]-7.10]
if (j ==6):
del atoms[atoms.positions[:,0] >=centreofmass[0]+9.0]
del atoms[atoms.positions[:,0] <= centreofmass[0]-7.10]
del atoms[atoms.positions[:,1] >= centreofmass[1]+7.3]
del atoms[atoms.positions[:,1] <= centreofmass[1]-7.10]
colorlenth = len(atoms)
cell = atoms.get_cell()
# 0 0
dy = (inverse((1, 0)) - inverse((1, -0.1)))[1]
xy = transform((j+0.5, energydif[j]))
print(dy, xy)
ax = plt.axes([xy[0]+0.007, xy[1]-(dy)/300.0, 0.066, 0.08])
img = atoms.copy()
plot_conf(ax, img,colorlenth)
ax.set_xlim([centreofmass[0]-7.50, centreofmass[0]+7.50])
if (j==6):
ax.set_xlim([centreofmass[0]-7.0, centreofmass[0]+8.0])
ax.set_ylim([10.7, 20.0])
ax.set_yticks([])
ax.set_xticks([])
ax.set(aspect=1)
xlim = ax.get_xlim()
ylim = ax.get_ylim()
box_x = [xlim[0], xlim[1], xlim[1], xlim[0], xlim[0]]
box_y =[ylim[0], ylim[0], ylim[1], ylim[1], ylim[0]]
ax.add_patch(
patches.Rectangle(
(box_x[0],box_y[0]),
xlim[1]-xlim[0],
ylim[1]-ylim[0],
fill=True,facecolor='white', clip_on=False,zorder =0.8) )
ax.plot(box_x, box_y, color='blue',linewidth=5.0)
plt.axes(ax)
# 0 1
# ax = plt.subplot()
dy = (inverse((1, 0)) - inverse((1, -0.1)))[1]
xy = transform((j+0.5, energydif[j]))
print(dy, xy)
ax = plt.axes([xy[0], xy[1]-(dy)/13.3, 0.08, 0.08])
cell = atoms.get_cell()
img = atoms.copy()
plot_conf(ax, img,colorlenth, rot=True)
ax.set_xlim([centreofmass[0]-7.50, centreofmass[0]+7.5])
if (j ==6):
ax.set_xlim([centreofmass[0]-7.0, centreofmass[0]+8.0])
ax.set_ylim([centreofmass[1]-6.5, centreofmass[1]+7.0])
ax.set_yticks([])
ax.set_xticks([])
ax.set(aspect=1)
xlim = ax.get_xlim()
ylim = ax.get_ylim()
box_x = [xlim[0], xlim[1], xlim[1], xlim[0], xlim[0]]
box_y =[ylim[0], ylim[0], ylim[1], ylim[1], ylim[0]]
ax.add_patch(
patches.Rectangle(
(box_x[0],box_y[0]),
xlim[1]-xlim[0],
ylim[1]-ylim[0],
fill=True,facecolor='white', clip_on=False,zorder =0.8) )
ax.plot(box_x, box_y, color='blue',linewidth=5.0)
plt.axes(ax)
#--------------------------------------------------------------------------------------#
data=read(sys.argv[3]+'@:')
for j in range(0,len(data)):
GM_energy = data[5].get_potential_energy()
energydif[j] = (data[j].get_potential_energy() - GM_energy)
print(j,energydif[j])
#-----------------------------------------------------------------------------------------#
global_ax = ax1
transform = lambda x: fig.transFigure.inverted().transform(global_ax.transData.transform(x))
inverse = lambda x: global_ax.transData.inverted().transform(fig.transFigure.transform(x))
for j in range(4,9):
atoms = data[j]
colorlenth = len(atoms)
atoms =atoms*(3,3,1)
#print(colorlenth)
a=atoms
del atoms[[atom.index for atom in atoms if atom.index <=colorlenth*5-18 or atom.index >=colorlenth*5]]
centreofmass = a.get_center_of_mass()
atoms = data[j]*(3,3,1)
a=atoms
del atoms[atoms.positions[:,0] >=centreofmass[0]+8.0]
del atoms[atoms.positions[:,0] <= centreofmass[0]-8.0]
del atoms[atoms.positions[:,1] >= centreofmass[1]+7.3]
del atoms[atoms.positions[:,1] <= centreofmass[1]-7.0]
colorlenth = len(atoms)
cell = atoms.get_cell()
# 0 0
dy = (inverse((1, 0)) - inverse((1, -0.1)))[1]
xy = transform((j+0.5, energydif[j]))
print(dy, xy)
ax = plt.axes([xy[0]+0.032, xy[1]-(dy)/300.50, 0.066, 0.08])
img = atoms.copy()
plot_conf(ax, img,colorlenth)
ax.set_xlim([centreofmass[0]-7.50, centreofmass[0]+7.50])
ax.set_ylim([10.7, 20.0])
ax.set_yticks([])
ax.set_xticks([])
ax.set(aspect=1)
#----------------- drawing box -------------------------------#
xlim = ax.get_xlim()
ylim = ax.get_ylim()
box_x = [xlim[0], xlim[1], xlim[1], xlim[0], xlim[0]]
box_y =[ylim[0], ylim[0], ylim[1], ylim[1], ylim[0]]
ax.add_patch(
patches.Rectangle(
(box_x[0],box_y[0]),
xlim[1]-xlim[0],
ylim[1]-ylim[0],
fill=True,facecolor='white', clip_on=False,zorder =0.8) )
ax.plot(box_x, box_y, color='blue',linewidth=5.0)
plt.axes(ax)
# 0 1
dy = (inverse((1, 0)) - inverse((1, -0.1)))[1]
xy = transform((j+0.5, energydif[j]))
print(dy, xy)
ax = plt.axes([xy[0]+0.025, xy[1]-(dy)/13.3, 0.08, 0.08])
cell = atoms.get_cell()
img = atoms.copy()
plot_conf(ax, img,colorlenth, rot=True)
ax.set_xlim([centreofmass[0]-7.5, centreofmass[0]+7.5])
ax.set_ylim([centreofmass[1]-6.5, centreofmass[1]+7.0])
ax.set_yticks([])
ax.set_xticks([])
ax.set(aspect=1)
xlim = ax.get_xlim()
ylim = ax.get_ylim()
box_x = [xlim[0], xlim[1], xlim[1], xlim[0], xlim[0]]
box_y =[ylim[0], ylim[0], ylim[1], ylim[1], ylim[0]]
ax.add_patch(
patches.Rectangle(
(box_x[0],box_y[0]),
xlim[1]-xlim[0],
ylim[1]-ylim[0],
fill=True,facecolor='white', clip_on=False,zorder =0.8) )
ax.plot(box_x, box_y, color='blue',linewidth=5.0)
plt.axes(ax)
#-------------------- set 1 Pt7+CO on surface ---------------#
CO =read('CO_molecule_DFTrelaxed.traj@:')
E_CO = CO[0].get_potential_energy()
data=read(sys.argv[4]+'@:')
energydif =np.zeros(len(data))
for j in range(len(data)):
energydif[j] = (data[j].get_potential_energy()- GM_energy -E_CO)
print(j,energydif[j])
for j in range(4,9):
atoms = data[j]
colorlenth = len(atoms)
atoms =atoms*(3,3,1)
#print(colorlenth)
a=atoms
del atoms[[atom.index for atom in atoms if atom.index <=colorlenth*5-20 or atom.index >=colorlenth*5]]
centreofmass = a.get_center_of_mass()
atoms = data[j]*(3,3,1)
a=atoms
del atoms[atoms.positions[:,0] >=centreofmass[0]+8.10]
del atoms[atoms.positions[:,0] <= centreofmass[0]-8.10]
del atoms[atoms.positions[:,1] >= centreofmass[1]+7.3]
del atoms[atoms.positions[:,1] <= centreofmass[1]-7.10]
if (j ==6):
del atoms[atoms.positions[:,0] >=centreofmass[0]+9.0]
del atoms[atoms.positions[:,0] <= centreofmass[0]-7.10]
del atoms[atoms.positions[:,1] >= centreofmass[1]+7.3]
del atoms[atoms.positions[:,1] <= centreofmass[1]-7.10]
colorlenth = len(atoms)
cell = atoms.get_cell()
# 0 0
dy = (inverse((1, 0)) - inverse((1, -0.1)))[1]
xy = transform((j+0.5, energydif[j]))
print(dy, xy)
ax = plt.axes([xy[0]+0.032, xy[1]-(dy)/300.50, 0.066, 0.08])
img = atoms.copy()
plot_conf(ax, img,colorlenth)
ax.set_xlim([centreofmass[0]-7.50, centreofmass[0]+7.50])
if (j==6):
ax.set_xlim([centreofmass[0]-7.0, centreofmass[0]+8.0])
ax.set_ylim([10.7, 20.0])
ax.set_yticks([])
ax.set_xticks([])
ax.set(aspect=1)
xlim = ax.get_xlim()
ylim = ax.get_ylim()
box_x = [xlim[0], xlim[1], xlim[1], xlim[0], xlim[0]]
box_y =[ylim[0], ylim[0], ylim[1], ylim[1], ylim[0]]
ax.add_patch(
patches.Rectangle(
(box_x[0],box_y[0]),
xlim[1]-xlim[0],
ylim[1]-ylim[0],
fill=True,facecolor='white', clip_on=False,zorder =0.8) )
ax.plot(box_x, box_y, color='blue',linewidth=5.0)
plt.axes(ax)
# 0 1
dy = (inverse((1, 0)) - inverse((1, -0.1)))[1]
xy = transform((j+0.5, energydif[j]))
print(dy, xy)
ax = plt.axes([xy[0]+0.025, xy[1]-(dy)/13.3, 0.08, 0.08])
cell = atoms.get_cell()
img = atoms.copy()
plot_conf(ax, img,colorlenth, rot=True)
ax.set_xlim([centreofmass[0]-7.50, centreofmass[0]+7.5])
if (j ==6):
ax.set_xlim([centreofmass[0]-7.0, centreofmass[0]+8.0])
ax.set_ylim([centreofmass[1]-6.5, centreofmass[1]+7.0])
ax.set_yticks([])
ax.set_xticks([])
ax.set(aspect=1)
xlim = ax.get_xlim()
ylim = ax.get_ylim()
box_x = [xlim[0], xlim[1], xlim[1], xlim[0], xlim[0]]
box_y =[ylim[0], ylim[0], ylim[1], ylim[1], ylim[0]]
ax.add_patch(
patches.Rectangle(
(box_x[0],box_y[0]),
xlim[1]-xlim[0],
ylim[1]-ylim[0],
fill=True,facecolor='white', clip_on=False,zorder =0.8) )
ax.plot(box_x, box_y, color='blue',linewidth=5.0)
plt.axes(ax)
name = sys.argv[5]
#savefig(name,bbox_inches='tight',format='png')
savefig(name,format='pdf')
show()
|
[
"sivachiriki@phys.au.dk"
] |
sivachiriki@phys.au.dk
|
f3410b1a32f31f521ac07230f9185f5d98a9d39c
|
195cdab2f180ced13b0e4c097ab75a565a5eb1d7
|
/fhir/resources/basic.py
|
9c0718f03af5b134957b7eecd6549684507676be
|
[
"BSD-3-Clause"
] |
permissive
|
chgl/fhir.resources
|
18166d7297a23ef0c144023fb5ba1d46b7bd2527
|
35b22314642640c0b25960ab5b2855e7c51749ef
|
refs/heads/master
| 2023-03-09T08:16:38.287970
| 2021-02-13T11:10:03
| 2021-02-13T11:10:03
| 341,916,191
| 0
| 0
|
NOASSERTION
| 2021-02-24T13:52:58
| 2021-02-24T13:52:58
| null |
UTF-8
|
Python
| false
| false
| 3,007
|
py
|
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/Basic
Release: R4
Version: 4.0.1
Build ID: 9346c8cc45
Last updated: 2019-11-01T09:29:23.356+11:00
"""
import typing
from pydantic import Field
from . import domainresource, fhirtypes
class Basic(domainresource.DomainResource):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Resource for non-supported content.
Basic is used for handling concepts not yet defined in FHIR, narrative-only
resources that don't map to an existing resource, and custom resources not
appropriate for inclusion in the FHIR specification.
"""
resource_type = Field("Basic", const=True)
author: fhirtypes.ReferenceType = Field(
None,
alias="author",
title="Who created",
description="Indicates who was responsible for creating the resource instance.",
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=[
"Practitioner",
"PractitionerRole",
"Patient",
"RelatedPerson",
"Organization",
],
)
code: fhirtypes.CodeableConceptType = Field(
...,
alias="code",
title="Kind of Resource",
description=(
"Identifies the 'type' of resource - equivalent to the resource name "
"for other resources."
),
# if property is element of this resource.
element_property=True,
)
created: fhirtypes.Date = Field(
None,
alias="created",
title="When created",
description="Identifies when the resource was first created.",
# if property is element of this resource.
element_property=True,
)
created__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_created", title="Extension field for ``created``."
)
identifier: typing.List[fhirtypes.IdentifierType] = Field(
None,
alias="identifier",
title="Business identifier",
description=(
"Identifier assigned to the resource for business purposes, outside the"
" context of FHIR."
),
# if property is element of this resource.
element_property=True,
)
subject: fhirtypes.ReferenceType = Field(
None,
alias="subject",
title="Identifies the focus of this resource",
description=(
"Identifies the patient, practitioner, device or any other resource "
'that is the "focus" of this resource.'
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Resource"],
)
|
[
"connect2nazrul@gmail.com"
] |
connect2nazrul@gmail.com
|
189b517adaf6ebba2d2200dec87b89e935717efe
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-5/e4ea999e6dc842e98c29787cac4b7c7c24a565ee-<what_provides>-fix.py
|
0b8464873610e33bcb099043f535f690142b8227
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,856
|
py
|
def what_provides(module, repoq, req_spec, conf_file, qf=def_qf, en_repos=None, dis_repos=None, installroot='/'):
if (en_repos is None):
en_repos = []
if (dis_repos is None):
dis_repos = []
if (not repoq):
pkgs = []
try:
my = yum_base(conf_file, installroot)
for rid in dis_repos:
my.repos.disableRepo(rid)
for rid in en_repos:
my.repos.enableRepo(rid)
pkgs = (my.returnPackagesByDep(req_spec) + my.returnInstalledPackagesByDep(req_spec))
if (not pkgs):
(e, m, u) = my.pkgSack.matchPackageNames([req_spec])
pkgs.extend(e)
pkgs.extend(m)
(e, m, u) = my.rpmdb.matchPackageNames([req_spec])
pkgs.extend(e)
pkgs.extend(m)
except Exception:
e = get_exception()
module.fail_json(msg=('Failure talking to yum: %s' % e))
return set([po_to_envra(p) for p in pkgs])
else:
myrepoq = list(repoq)
r_cmd = ['--disablerepo', ','.join(dis_repos)]
myrepoq.extend(r_cmd)
r_cmd = ['--enablerepo', ','.join(en_repos)]
myrepoq.extend(r_cmd)
cmd = (myrepoq + ['--qf', qf, '--whatprovides', req_spec])
(rc, out, err) = module.run_command(cmd)
cmd = (myrepoq + ['--qf', qf, req_spec])
(rc2, out2, err2) = module.run_command(cmd)
if ((rc == 0) and (rc2 == 0)):
out += out2
pkgs = set([p for p in out.split('\n') if p.strip()])
if (not pkgs):
pkgs = is_installed(module, repoq, req_spec, conf_file, qf=qf, installroot=installroot)
return pkgs
else:
module.fail_json(msg=('Error from repoquery: %s: %s' % (cmd, (err + err2))))
return set()
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
783c66a504296f0e148ed98d17a930adf823cb84
|
7d6f540559d3d0438c980b688439553ca67c5d33
|
/esrgan_pytorch/loss.py
|
420f965eba05ca17fb82e013ac9dfebe25c65118
|
[
"Apache-2.0"
] |
permissive
|
RheaStrike/ESRGAN-PyTorch
|
0b62d4f2268bb072024080b534d1f1651f2c6054
|
2fd02431e88cbcec533c9c1794ec0980cb2d3084
|
refs/heads/master
| 2023-07-06T08:10:15.803935
| 2021-08-13T07:43:51
| 2021-08-13T07:43:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,456
|
py
|
# Copyright 2021 Dakewe Biotech Corporation. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""It mainly implements all the losses used in the model."""
import lpips
import torch
import torch.nn.functional
import torchvision
__all__ = [
"CharbonnierLoss", "ContentLoss", "LPIPSLoss"
]
class CharbonnierLoss(torch.nn.Module):
r""" The charbonnier loss(one variant of Robust L1Loss) function optimizes
the error between the minimum residual image and the real image by one level.
The explanation of the paper is as follows:
* `"Deep Laplacian Pyramid Networks for Fast and Accurate Super-Resolution" <https://arxiv.org/pdf/1710.01992.pdf>` paper.
Learn a mapping function for generating an HR image that is as similar to the ground truth HR image as possible.
Args:
eps (float): Prevent value equal to 0. (Default: 1e-12)
Examples:
>>> charbonnier_loss = CharbonnierLoss()
>>> # Create a resolution of 224*224 image.
>>> inputs = torch.randn(1, 3, 224, 224)
>>> target = torch.randn(1, 3, 224, 224)
>>> loss = charbonnier_loss(inputs, target)
"""
def __init__(self, eps: float = 1e-12) -> None:
super(CharbonnierLoss, self).__init__()
self.eps = eps
def forward(self, source: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
# Calculate charbonnier loss.
loss = torch.mean(torch.sqrt((source - target) ** 2 + self.eps))
return loss
class ContentLoss(torch.nn.Module):
r""" The content loss function based on vgg19 network is constructed.
According to the suggestion of the paper, the 36th layer of feature extraction layer is used.
The explanation of the paper is as follows:
* "Photo-Realistic Single Image Super-Resolution Using a Generative Adversarial Network" `<https://arxiv.org/pdf/1609.04802v5.pdf>` paper.
* "ESRGAN: Enhanced Super-Resolution Generative Adversarial Networks" `<https://arxiv.org/pdf/1809.00219.pdf>` paper.
* "Perceptual Extreme Super Resolution Network with Receptive Field Block" `<https://arxiv.org/pdf/2005.12597.pdf>` paper.
A loss defined on feature maps of higher level features from deeper network layers
with more potential to focus on the content of the images. We refer to this network
as SRGAN in the following.
Examples:
>>> # Loading pre training vgg19 model weight based on Imagenet dataset as content loss.
>>> content_loss = ContentLoss()
>>> # According to the input size of VGG19 model, an image with a resolution of 224*224 is randomly constructed
>>> inputs = torch.randn(1, 3, 224, 224)
>>> target = torch.randn(1, 3, 224, 224)
>>> loss = content_loss(inputs, target)
Notes:
features(
(0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ReLU(inplace=True)
(4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(6): ReLU(inplace=True)
(7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(8): ReLU(inplace=True)
(9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(11): ReLU(inplace=True)
(12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(13): ReLU(inplace=True)
(14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(15): ReLU(inplace=True)
(16): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(17): ReLU(inplace=True)
(18): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(19): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(20): ReLU(inplace=True)
(21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(22): ReLU(inplace=True)
(23): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(24): ReLU(inplace=True)
(25): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(26): ReLU(inplace=True)
(27): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(29): ReLU(inplace=True)
(30): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(31): ReLU(inplace=True)
(32): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(33): ReLU(inplace=True)
(34): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) ---> use this layer
(35): ReLU(inplace=True)
(36): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
)
"""
def __init__(self) -> None:
super(ContentLoss, self).__init__()
# If you will `use_pretrained` is set to `True`, the model weight based on Imagenet dataset will be loaded,
# otherwise, the custom dataset model weight will be loaded.
vgg19 = torchvision.models.vgg19(pretrained=True)
# Extract the 36th layer of vgg19 model feature extraction layer.
self.feature_extract = torch.nn.Sequential(*list(vgg19.features.children())[:35]).eval()
# Freeze model all parameters. Don't train.
for name, parameters in self.feature_extract.named_parameters():
parameters.requires_grad = False
def forward(self, source: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
# Use VGG19_35th loss as the euclidean distance between the feature representations of a reconstructed image
# and the reference image.
loss = torch.nn.functional.l1_loss(self.feature_extract(source), self.feature_extract(target))
return loss
# TODO: Source code reference from `https://github.com/richzhang/PerceptualSimilarity`.
class LPIPSLoss(torch.nn.Module):
r""" Learned Perceptual Image Patch Similarity (LPIPS) metric.
The explanation of the paper is as follows:
* "The Unreasonable Effectiveness of Deep Features as a Perceptual Metric" `<https://arxiv.org/pdf/1801.03924.pdf>` paper.
For a specific convolution layer, the cosine distance (in the channel dimension)
and the average value between the network space dimension and layers are calculated.
Examples:
>>> # Loading pre training vgg19 model weight based on Imagenet dataset as content loss.
>>> lpips_loss = LPIPSLoss()
>>> # According to the input size of VGG19 model, an image with a resolution of 224*224 is randomly constructed
>>> inputs = torch.randn(1, 3, 224, 224)
>>> target = torch.randn(1, 3, 224, 224)
>>> loss = lpips_loss(inputs, target)
"""
def __init__(self) -> None:
super(LPIPSLoss, self).__init__()
self.feature_extract = lpips.LPIPS(net="vgg", verbose=False).eval()
# Freeze model all parameters. Don't train.
for name, parameters in self.feature_extract.named_parameters():
parameters.requires_grad = False
def forward(self, source: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
# Use lpips_vgg loss as the euclidean distance between the feature representations of a reconstructed image
# and the reference image.
loss = torch.nn.functional.l1_loss(self.feature_extract(source), self.feature_extract(target))
return loss
|
[
"liuchangyu1111@gmail.com"
] |
liuchangyu1111@gmail.com
|
ca84877336a32ce13eaf58fb125401a9b9585a26
|
2186fdd8350d6dc72340a65c2cc1d345c2c51377
|
/Python/Flask_MySQL/Friends/server.py
|
c6d0d7572c413d617edf8c5558b82bb6a7236a32
|
[] |
no_license
|
umanav/Lab206
|
2b494712b59585493e74c51089223696729eb716
|
31f0b098aa6722bbf7d2ad6e619fa38f29cab4d5
|
refs/heads/master
| 2020-03-10T07:54:25.904503
| 2018-04-12T15:37:20
| 2018-04-12T15:37:20
| 129,273,399
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,727
|
py
|
from flask import Flask, request, redirect, render_template, session, flash
from mysqlconnection import MySQLConnector
app = Flask(__name__)
mysql = MySQLConnector(app,'friendsdb')
# PRINT THE RESULTS
# @app.route('/')
# def index():
# friends = mysql.query_db("SELECT * FROM friends")
# print friends
# return render_template('index.html')
# DISPLAY THE RESULTS
@app.route('/')
def index():
query = "SELECT * FROM friends" # define your query
friends = mysql.query_db(query) # run query with query_db()
return render_template('index.html', all_friends=friends) # pass data to our template
@app.route('/friends', methods=['POST'])
def create():
# Write query as a string. Notice how we have multiple values
# we want to insert into our query.
query = "INSERT INTO friends (first_name, last_name, occupation, created_at, updated_at) VALUES (:first_name, :last_name, :occupation, NOW(), NOW())"
# We'll then create a dictionary of data from the POST data received.
data = {
'first_name': request.form['first_name'],
'last_name': request.form['last_name'],
'occupation': request.form['occupation']
}
# Run query, with dictionary values injected into the query.
mysql.query_db(query, data)
return redirect('/')
@app.route('/friends/<friend_id>')
def show(friend_id):
# Write query to select specific user by id. At every point where
# we want to insert data, we write ":" and variable name.
query = "SELECT * FROM friends WHERE id = :specific_id"
# Then define a dictionary with key that matches :variable_name in query.
data = {'specific_id': friend_id}
# Run query with inserted data.
friends = mysql.query_db(query, data)
# Friends should be a list with a single object,
# so we pass the value at [0] to our template under alias one_friend.
return render_template('index.html', one_friend=friends[0])
@app.route('/update_friend/<friend_id>', methods=['POST'])
def update(friend_id):
query = "UPDATE friends SET first_name = :first_name, last_name = :last_name, occupation = :occupation WHERE id = :id"
data = {
'first_name': request.form['first_name'],
'last_name': request.form['last_name'],
'occupation': request.form['occupation'],
'id': friend_id
}
mysql.query_db(query, data)
return redirect('/')
@app.route('/remove_friend/<friend_id>', methods=['POST'])
def delete(friend_id):
query = "DELETE FROM friends WHERE id = :id"
data = {'id': friend_id}
mysql.query_db(query, data)
return redirect('/')
app.run(debug=True)
|
[
"umanav@amazon.com"
] |
umanav@amazon.com
|
3b2b7ab047d9c7dcc6d3ab362ce8dfa58a724910
|
ca8dc4d5b6168648cf8a842fc27191fec3597a09
|
/venv/lib/python3.6/site-packages/pykalman/sqrt/cholesky.py
|
e7abceb688758e850131901bdfe5a2810928b2e9
|
[
"MIT"
] |
permissive
|
iefuzzer/vnpy_crypto
|
293a7eeceec18b934680dafc37381d1f5726dc89
|
d7eed63cd39b1639058474cb724a8f64adbf6f97
|
refs/heads/master
| 2020-03-26T20:13:38.780107
| 2018-09-10T06:09:16
| 2018-09-10T06:09:16
| 145,311,871
| 3
| 0
|
MIT
| 2018-09-10T06:09:18
| 2018-08-19T14:48:32
|
Python
|
UTF-8
|
Python
| false
| false
| 29,721
|
py
|
"""
=====================================
Inference for Linear-Gaussian Systems
=====================================
This module implements the Kalman Filter in "Square Root" form (Cholesky
factorization).
"""
import warnings
import numpy as np
from scipy import linalg
from ..standard import _arg_or_default, _determine_dimensionality, \
_last_dims, _loglikelihoods, _smooth, _smooth_pair, _em, KalmanFilter, DIM
from ..utils import array1d, array2d, check_random_state, \
get_params
def _reconstruct_covariances(covariance2s):
'''Reconstruct covariance matrices given their cholesky factors'''
if len(covariance2s.shape) == 2:
covariance2s = covariance2s[np.newaxis, :, :]
T = covariance2s.shape[0]
covariances = np.zeros(covariance2s.shape)
for t in range(T):
M = covariance2s[t]
covariances[t] = M.dot(M.T)
return covariances
def _filter_predict(transition_matrix, transition_covariance2,
transition_offset, current_state_mean,
current_state_covariance2):
r"""Calculate the mean and covariance of :math:`P(x_{t+1} | z_{0:t})`
Using the mean and covariance of :math:`P(x_t | z_{0:t})`, calculate the
mean and covariance of :math:`P(x_{t+1} | z_{0:t})`.
Parameters
----------
transition_matrix : [n_dim_state, n_dim_state} array
state transition matrix from time t to t+1
transition_covariance2 : [n_dim_state, n_dim_state] array
square root of the covariance matrix for state transition from time
t to t+1
transition_offset : [n_dim_state] array
offset for state transition from time t to t+1
current_state_mean: [n_dim_state] array
mean of state at time t given observations from times
[0...t]
current_state_covariance2: [n_dim_state, n_dim_state] array
square root of the covariance of state at time t given observations
from times [0...t]
Returns
-------
predicted_state_mean : [n_dim_state] array
mean of state at time t+1 given observations from times [0...t]
predicted_state_covariance2 : [n_dim_state, n_dim_state] array
square root of the covariance of state at time t+1 given observations
from times [0...t]
References
----------
* Kaminski, Paul G. Square Root Filtering and Smoothing for Discrete
Processes. July 1971. Page 41.
"""
n_dim_state = len(current_state_mean)
# predict new mean
# x_{t+1|t} = A x_t + b_t
predicted_state_mean = (
np.dot(transition_matrix, current_state_mean)
+ transition_offset
)
# predict new covariance
# [S_{k|k-1}^T; 0] = T_1 [ S_{k-1|k-1}^T A^T; Q^{1/2}^T ] for orthonormal T_1
T, predicted_state_covariance2 = (
linalg.qr(np.hstack([
np.dot(transition_matrix, current_state_covariance2),
transition_covariance2
]).T)
)
predicted_state_covariance2 = (
predicted_state_covariance2[:n_dim_state, :n_dim_state].T
)
return (predicted_state_mean, predicted_state_covariance2)
def _filter_correct(observation_matrix, observation_covariance2,
observation_offset, predicted_state_mean,
predicted_state_covariance2, observation):
r"""Correct a predicted state with a Kalman Filter update
Incorporate observation `observation` from time `t` to turn
:math:`P(x_t | z_{0:t-1})` into :math:`P(x_t | z_{0:t})`
Parameters
----------
observation_matrix : [n_dim_obs, n_dim_state] array
observation matrix for time t
observation_covariance2 : [n_dim_obs, n_dim_obs] array
square root of the covariance matrix for observation at time t
observation_offset : [n_dim_obs] array
offset for observation at time t
predicted_state_mean : [n_dim_state] array
mean of state at time t given observations from times
[0...t-1]
predicted_state_covariance2 : [n_dim_state, n_dim_state] array
square root of the covariance of state at time t given observations
from times [0...t-1]
observation : [n_dim_obs] array
observation at time t. If `observation` is a masked array and any of
its values are masked, the observation will be ignored.
Returns
-------
corrected_state_mean : [n_dim_state] array
mean of state at time t given observations from times
[0...t]
corrected_state_covariance2 : [n_dim_state, n_dim_state] array
square root of the covariance of state at time t given observations
from times [0...t]
References
----------
* Salzmann, M. A. Some Aspects of Kalman Filtering. August 1988. Page 31.
"""
if not np.any(np.ma.getmask(observation)):
# extract size of state space
n_dim_state = len(predicted_state_mean)
n_dim_obs = len(observation)
# construct matrix M = [ R^{1/2}^{T}, 0;
# (C S_{t|t-1})^T, S_{t|t-1}^T]
M = np.zeros(2 * [n_dim_obs + n_dim_state])
M[0:n_dim_obs, 0:n_dim_obs] = observation_covariance2.T
M[n_dim_obs:, 0:n_dim_obs] = observation_matrix.dot(predicted_state_covariance2).T
M[n_dim_obs:, n_dim_obs:] = predicted_state_covariance2.T
# solve for [((C P_{t|t-1} C^T + R)^{1/2})^T, K^T;
# 0, S_{t|t}^T] = QR(M)
(_, S) = linalg.qr(M)
kalman_gain = S[0:n_dim_obs, n_dim_obs:].T
N = S[0:n_dim_obs, 0:n_dim_obs].T
# correct mean
predicted_observation_mean = (
np.dot(observation_matrix,
predicted_state_mean)
+ observation_offset
)
corrected_state_mean = (
predicted_state_mean
+ np.dot(kalman_gain,
np.dot(linalg.pinv(N),
observation - predicted_observation_mean)
)
)
corrected_state_covariance2 = S[n_dim_obs:, n_dim_obs:].T
else:
n_dim_state = predicted_state_covariance2.shape[0]
n_dim_obs = observation_matrix.shape[0]
kalman_gain = np.zeros((n_dim_state, n_dim_obs))
corrected_state_mean = predicted_state_mean
corrected_state_covariance2 = predicted_state_covariance2
return (corrected_state_mean, corrected_state_covariance2)
def _filter(transition_matrices, observation_matrices, transition_covariance,
observation_covariance, transition_offsets, observation_offsets,
initial_state_mean, initial_state_covariance, observations):
"""Apply the Kalman Filter
Calculate posterior distribution over hidden states given observations up
to and including the current time step.
Parameters
----------
transition_matrices : [n_timesteps-1,n_dim_state,n_dim_state] or
[n_dim_state,n_dim_state] array-like
state transition matrices
observation_matrices : [n_timesteps, n_dim_obs, n_dim_obs] or [n_dim_obs, \
n_dim_obs] array-like
observation matrix
transition_covariance : [n_timesteps-1,n_dim_state,n_dim_state] or
[n_dim_state,n_dim_state] array-like
state transition covariance matrix
observation_covariance : [n_timesteps, n_dim_obs, n_dim_obs] or [n_dim_obs,
n_dim_obs] array-like
observation covariance matrix
transition_offsets : [n_timesteps-1, n_dim_state] or [n_dim_state] \
array-like
state offset
observation_offsets : [n_timesteps, n_dim_obs] or [n_dim_obs] array-like
observations for times [0...n_timesteps-1]
initial_state_mean : [n_dim_state] array-like
mean of initial state distribution
initial_state_covariance : [n_dim_state, n_dim_state] array-like
covariance of initial state distribution
observations : [n_timesteps, n_dim_obs] array
observations from times [0...n_timesteps-1]. If `observations` is a
masked array and any of `observations[t]` is masked, then
`observations[t]` will be treated as a missing observation.
Returns
-------
predicted_state_means : [n_timesteps, n_dim_state] array
`predicted_state_means[t]` = mean of hidden state at time t given
observations from times [0...t-1]
predicted_state_covariance2s : [n_timesteps, n_dim_state, n_dim_state] array
`predicted_state_covariance2s[t]` = lower triangular factorization of
the covariance of hidden state at time t given observations from times
[0...t-1]
filtered_state_means : [n_timesteps, n_dim_state] array
`filtered_state_means[t]` = mean of hidden state at time t given
observations from times [0...t]
filtered_state_covariance2s : [n_timesteps, n_dim_state] array
`filtered_state_covariance2s[t]` = lower triangular factorization of
the covariance of hidden state at time t given observations from times
[0...t]
"""
n_timesteps = observations.shape[0]
n_dim_state = len(initial_state_mean)
n_dim_obs = observations.shape[1]
predicted_state_means = np.zeros((n_timesteps, n_dim_state))
predicted_state_covariance2s = np.zeros(
(n_timesteps, n_dim_state, n_dim_state)
)
filtered_state_means = np.zeros((n_timesteps, n_dim_state))
filtered_state_covariance2s = np.zeros(
(n_timesteps, n_dim_state, n_dim_state)
)
transition_covariance2 = linalg.cholesky(transition_covariance, lower=True)
observation_covariance2 = linalg.cholesky(observation_covariance, lower=True)
initial_state_covariance2 = linalg.cholesky(initial_state_covariance, lower=True)
for t in range(n_timesteps):
if t == 0:
predicted_state_means[t] = initial_state_mean
predicted_state_covariance2s[t] = initial_state_covariance2
else:
transition_matrix = _last_dims(transition_matrices, t - 1)
transition_offset = _last_dims(transition_offsets, t - 1, ndims=1)
predicted_state_means[t], predicted_state_covariance2s[t] = (
_filter_predict(
transition_matrix,
transition_covariance2,
transition_offset,
filtered_state_means[t - 1],
filtered_state_covariance2s[t - 1]
)
)
observation_matrix = _last_dims(observation_matrices, t)
observation_offset = _last_dims(observation_offsets, t, ndims=1)
(filtered_state_means[t], filtered_state_covariance2s[t]) = (
_filter_correct(
observation_matrix,
observation_covariance2,
observation_offset,
predicted_state_means[t],
predicted_state_covariance2s[t],
observations[t]
)
)
return (predicted_state_means, predicted_state_covariance2s,
filtered_state_means, filtered_state_covariance2s)
class CholeskyKalmanFilter(KalmanFilter):
"""Kalman Filter based on Cholesky decomposition
Parameters
----------
transition_matrices : [n_timesteps-1, n_dim_state, n_dim_state] or \
[n_dim_state,n_dim_state] array-like
Also known as :math:`A`. state transition matrix between times t and
t+1 for t in [0...n_timesteps-2]
observation_matrices : [n_timesteps, n_dim_obs, n_dim_obs] or [n_dim_obs, \
n_dim_obs] array-like
Also known as :math:`C`. observation matrix for times
[0...n_timesteps-1]
transition_covariance : [n_dim_state, n_dim_state] array-like
Also known as :math:`Q`. state transition covariance matrix for times
[0...n_timesteps-2]
observation_covariance : [n_dim_obs, n_dim_obs] array-like
Also known as :math:`R`. observation covariance matrix for times
[0...n_timesteps-1]
transition_offsets : [n_timesteps-1, n_dim_state] or [n_dim_state] \
array-like
Also known as :math:`b`. state offsets for times [0...n_timesteps-2]
observation_offsets : [n_timesteps, n_dim_obs] or [n_dim_obs] array-like
Also known as :math:`d`. observation offset for times
[0...n_timesteps-1]
initial_state_mean : [n_dim_state] array-like
Also known as :math:`\\mu_0`. mean of initial state distribution
initial_state_covariance : [n_dim_state, n_dim_state] array-like
Also known as :math:`\\Sigma_0`. covariance of initial state
distribution
random_state : optional, numpy random state
random number generator used in sampling
em_vars : optional, subset of ['transition_matrices', \
'observation_matrices', 'transition_offsets', 'observation_offsets', \
'transition_covariance', 'observation_covariance', 'initial_state_mean', \
'initial_state_covariance'] or 'all'
if `em_vars` is an iterable of strings only variables in `em_vars`
will be estimated using EM. if `em_vars` == 'all', then all
variables will be estimated.
n_dim_state: optional, integer
the dimensionality of the state space. Only meaningful when you do not
specify initial values for `transition_matrices`, `transition_offsets`,
`transition_covariance`, `initial_state_mean`, or
`initial_state_covariance`.
n_dim_obs: optional, integer
the dimensionality of the observation space. Only meaningful when you
do not specify initial values for `observation_matrices`,
`observation_offsets`, or `observation_covariance`.
"""
def filter(self, X):
"""Apply the Kalman Filter
Apply the Kalman Filter to estimate the hidden state at time :math:`t`
for :math:`t = [0...n_{\\text{timesteps}}-1]` given observations up to
and including time `t`. Observations are assumed to correspond to
times :math:`[0...n_{\\text{timesteps}}-1]`. The output of this method
corresponding to time :math:`n_{\\text{timesteps}}-1` can be used in
:func:`KalmanFilter.filter_update` for online updating.
Parameters
----------
X : [n_timesteps, n_dim_obs] array-like
observations corresponding to times [0...n_timesteps-1]. If `X` is
a masked array and any of `X[t]` is masked, then `X[t]` will be
treated as a missing observation.
Returns
-------
filtered_state_means : [n_timesteps, n_dim_state]
mean of hidden state distributions for times [0...n_timesteps-1]
given observations up to and including the current time step
filtered_state_covariances : [n_timesteps, n_dim_state, n_dim_state] \
array
covariance matrix of hidden state distributions for times
[0...n_timesteps-1] given observations up to and including the
current time step
"""
Z = self._parse_observations(X)
(transition_matrices, transition_offsets, transition_covariance,
observation_matrices, observation_offsets, observation_covariance,
initial_state_mean, initial_state_covariance) = (
self._initialize_parameters()
)
(_, _, filtered_state_means,
filtered_state_covariance2s) = (
_filter(
transition_matrices, observation_matrices,
transition_covariance, observation_covariance,
transition_offsets, observation_offsets,
initial_state_mean, initial_state_covariance,
Z
)
)
filtered_state_covariances = (
_reconstruct_covariances(filtered_state_covariance2s)
)
return (filtered_state_means, filtered_state_covariances)
def filter_update(self, filtered_state_mean, filtered_state_covariance,
observation=None, transition_matrix=None,
transition_offset=None, transition_covariance=None,
observation_matrix=None, observation_offset=None,
observation_covariance=None):
r"""Update a Kalman Filter state estimate
Perform a one-step update to estimate the state at time :math:`t+1`
give an observation at time :math:`t+1` and the previous estimate for
time :math:`t` given observations from times :math:`[0...t]`. This
method is useful if one wants to track an object with streaming
observations.
Parameters
----------
filtered_state_mean : [n_dim_state] array
mean estimate for state at time t given observations from times
[1...t]
filtered_state_covariance : [n_dim_state, n_dim_state] array
covariance of estimate for state at time t given observations from
times [1...t]
observation : [n_dim_obs] array or None
observation from time t+1. If `observation` is a masked array and
any of `observation`'s components are masked or if `observation` is
None, then `observation` will be treated as a missing observation.
transition_matrix : optional, [n_dim_state, n_dim_state] array
state transition matrix from time t to t+1. If unspecified,
`self.transition_matrices` will be used.
transition_offset : optional, [n_dim_state] array
state offset for transition from time t to t+1. If unspecified,
`self.transition_offset` will be used.
transition_covariance : optional, [n_dim_state, n_dim_state] array
state transition covariance from time t to t+1. If unspecified,
`self.transition_covariance` will be used.
observation_matrix : optional, [n_dim_obs, n_dim_state] array
observation matrix at time t+1. If unspecified,
`self.observation_matrices` will be used.
observation_offset : optional, [n_dim_obs] array
observation offset at time t+1. If unspecified,
`self.observation_offset` will be used.
observation_covariance : optional, [n_dim_obs, n_dim_obs] array
observation covariance at time t+1. If unspecified,
`self.observation_covariance` will be used.
Returns
-------
next_filtered_state_mean : [n_dim_state] array
mean estimate for state at time t+1 given observations from times
[1...t+1]
next_filtered_state_covariance : [n_dim_state, n_dim_state] array
covariance of estimate for state at time t+1 given observations
from times [1...t+1]
"""
# initialize matrices
(transition_matrices, transition_offsets, transition_cov,
observation_matrices, observation_offsets, observation_cov,
initial_state_mean, initial_state_covariance) = (
self._initialize_parameters()
)
transition_offset = _arg_or_default(
transition_offset, transition_offsets,
1, "transition_offset"
)
observation_offset = _arg_or_default(
observation_offset, observation_offsets,
1, "observation_offset"
)
transition_matrix = _arg_or_default(
transition_matrix, transition_matrices,
2, "transition_matrix"
)
observation_matrix = _arg_or_default(
observation_matrix, observation_matrices,
2, "observation_matrix"
)
transition_covariance = _arg_or_default(
transition_covariance, transition_cov,
2, "transition_covariance"
)
observation_covariance = _arg_or_default(
observation_covariance, observation_cov,
2, "observation_covariance"
)
# Make a masked observation if necessary
if observation is None:
n_dim_obs = observation_covariance.shape[0]
observation = np.ma.array(np.zeros(n_dim_obs))
observation.mask = True
else:
observation = np.ma.asarray(observation)
# turn covariance into cholesky factorizations
transition_covariance2 = linalg.cholesky(transition_covariance, lower=True)
observation_covariance2 = linalg.cholesky(observation_covariance, lower=True)
filtered_state_covariance2 = linalg.cholesky(filtered_state_covariance, lower=True)
# predict
predicted_state_mean, predicted_state_covariance2 = (
_filter_predict(
transition_matrix, transition_covariance2,
transition_offset, filtered_state_mean,
filtered_state_covariance2
)
)
# correct
(next_filtered_state_mean, next_filtered_state_covariance2) = (
_filter_correct(
observation_matrix, observation_covariance2,
observation_offset, predicted_state_mean,
predicted_state_covariance2, observation
)
)
# reconstruct actual covariance
next_filtered_state_covariance = (
_reconstruct_covariances(next_filtered_state_covariance2)
)
return (next_filtered_state_mean, next_filtered_state_covariance)
def smooth(self, X):
"""Apply the Kalman Smoother
Apply the Kalman Smoother to estimate the hidden state at time
:math:`t` for :math:`t = [0...n_{\\text{timesteps}}-1]` given all
observations. See :func:`_smooth` for more complex output
Parameters
----------
X : [n_timesteps, n_dim_obs] array-like
observations corresponding to times [0...n_timesteps-1]. If `X` is
a masked array and any of `X[t]` is masked, then `X[t]` will be
treated as a missing observation.
Returns
-------
smoothed_state_means : [n_timesteps, n_dim_state]
mean of hidden state distributions for times [0...n_timesteps-1]
given all observations
smoothed_state_covariances : [n_timesteps, n_dim_state]
covariances of hidden state distributions for times
[0...n_timesteps-1] given all observations
"""
Z = self._parse_observations(X)
(transition_matrices, transition_offsets, transition_covariance,
observation_matrices, observation_offsets, observation_covariance,
initial_state_mean, initial_state_covariance) = (
self._initialize_parameters()
)
# run filter
(predicted_state_means, predicted_state_covariance2s,
filtered_state_means, filtered_state_covariance2s) = (
_filter(
transition_matrices, observation_matrices,
transition_covariance, observation_covariance,
transition_offsets, observation_offsets,
initial_state_mean, initial_state_covariance, Z
)
)
# construct actual covariance matrices
predicted_state_covariances = (
_reconstruct_covariances(predicted_state_covariance2s)
)
filtered_state_covariances = (
_reconstruct_covariances(filtered_state_covariance2s)
)
(smoothed_state_means, smoothed_state_covariances) = (
_smooth(
transition_matrices, filtered_state_means,
filtered_state_covariances, predicted_state_means,
predicted_state_covariances
)[:2]
)
return (smoothed_state_means, smoothed_state_covariances)
def em(self, X, y=None, n_iter=10, em_vars=None):
"""Apply the EM algorithm
Apply the EM algorithm to estimate all parameters specified by
`em_vars`. Note that all variables estimated are assumed to be
constant for all time. See :func:`_em` for details.
Parameters
----------
X : [n_timesteps, n_dim_obs] array-like
observations corresponding to times [0...n_timesteps-1]. If `X` is
a masked array and any of `X[t]`'s components is masked, then
`X[t]` will be treated as a missing observation.
n_iter : int, optional
number of EM iterations to perform
em_vars : iterable of strings or 'all'
variables to perform EM over. Any variable not appearing here is
left untouched.
"""
Z = self._parse_observations(X)
# initialize parameters
(self.transition_matrices, self.transition_offsets,
self.transition_covariance, self.observation_matrices,
self.observation_offsets, self.observation_covariance,
self.initial_state_mean, self.initial_state_covariance) = (
self._initialize_parameters()
)
# Create dictionary of variables not to perform EM on
if em_vars is None:
em_vars = self.em_vars
if em_vars == 'all':
given = {}
else:
given = {
'transition_matrices': self.transition_matrices,
'observation_matrices': self.observation_matrices,
'transition_offsets': self.transition_offsets,
'observation_offsets': self.observation_offsets,
'transition_covariance': self.transition_covariance,
'observation_covariance': self.observation_covariance,
'initial_state_mean': self.initial_state_mean,
'initial_state_covariance': self.initial_state_covariance
}
em_vars = set(em_vars)
for k in list(given.keys()):
if k in em_vars:
given.pop(k)
# If a parameter is time varying, print a warning
for (k, v) in get_params(self).items():
if k in DIM and (not k in given) and len(v.shape) != DIM[k]:
warn_str = (
'{0} has {1} dimensions now; after fitting, '
+ 'it will have dimension {2}'
).format(k, len(v.shape), DIM[k])
warnings.warn(warn_str)
# Actual EM iterations
for i in range(n_iter):
# run filter
(predicted_state_means, predicted_state_covariance2s,
filtered_state_means, filtered_state_covariance2s) = (
_filter(
self.transition_matrices, self.observation_matrices,
self.transition_covariance, self.observation_covariance,
self.transition_offsets, self.observation_offsets,
self.initial_state_mean, self.initial_state_covariance,
Z
)
)
# reconstruct covariances
filtered_state_covariances = (
_reconstruct_covariances(filtered_state_covariance2s)
)
predicted_state_covariances = (
_reconstruct_covariances(predicted_state_covariance2s)
)
# run smoother
(smoothed_state_means, smoothed_state_covariances,
kalman_smoothing_gains) = (
_smooth(
self.transition_matrices, filtered_state_means,
filtered_state_covariances, predicted_state_means,
predicted_state_covariances
)
)
# calculate pairwise covariances
sigma_pair_smooth = _smooth_pair(
smoothed_state_covariances,
kalman_smoothing_gains
)
(self.transition_matrices, self.observation_matrices,
self.transition_offsets, self.observation_offsets,
self.transition_covariance, self.observation_covariance,
self.initial_state_mean, self.initial_state_covariance) = (
_em(Z, self.transition_offsets, self.observation_offsets,
smoothed_state_means, smoothed_state_covariances,
sigma_pair_smooth, given=given
)
)
return self
def loglikelihood(self, X):
"""Calculate the log likelihood of all observations
Parameters
----------
X : [n_timesteps, n_dim_obs] array
observations for time steps [0...n_timesteps-1]
Returns
-------
likelihood : float
likelihood of all observations
"""
Z = self._parse_observations(X)
# initialize parameters
(transition_matrices, transition_offsets,
transition_covariance, observation_matrices,
observation_offsets, observation_covariance,
initial_state_mean, initial_state_covariance) = (
self._initialize_parameters()
)
# apply the Kalman Filter
(predicted_state_means, predicted_state_covariance2s,
filtered_state_means, filtered_state_covariance2s) = (
_filter(
transition_matrices, observation_matrices,
transition_covariance, observation_covariance,
transition_offsets, observation_offsets,
initial_state_mean, initial_state_covariance,
Z
)
)
# get likelihoods for each time step
predicted_state_covariances = (
_reconstruct_covariances(predicted_state_covariance2s)
)
loglikelihoods = _loglikelihoods(
observation_matrices, observation_offsets, observation_covariance,
predicted_state_means, predicted_state_covariances, Z
)
return np.sum(loglikelihoods)
|
[
"panwei303031816@gmail.com"
] |
panwei303031816@gmail.com
|
5440258e4b5db4b0b2b20ec4bdc62cac15833350
|
ba8e9235d5385d49d94be4347283e1a9550dc4ee
|
/progressivedme/commands/__init__.py
|
7fdfa3e2ebc21ab14900a8fccf89f89516dd6239
|
[
"Apache-2.0"
] |
permissive
|
navdeepghai1/progressivedme
|
2e29340d782d6cc610fd42c13307b0328db0dfa4
|
092dc6581739d8c2a603a27f144f7849f44b7f6c
|
refs/heads/master
| 2020-10-02T03:39:57.746107
| 2020-01-09T19:57:43
| 2020-01-09T19:57:43
| 227,692,994
| 0
| 0
|
NOASSERTION
| 2019-12-12T20:47:11
| 2019-12-12T20:43:36
|
Python
|
UTF-8
|
Python
| false
| false
| 2,063
|
py
|
'''
Developer Navdeep
Email navdeepghai1@gmail.com
'''
import frappe
import click
from frappe.commands import pass_context
def connect_to_default_site(context):
flag = False
if(context.get("sites")):
for site in context.get("sites") or []:
if site == "progressivedme.com":
frappe.init(site)
frappe.connect()
flag = True
return flag
@click.command()
@click.argument("filename")
@pass_context
def read_data(context, filename):
if(not connect_to_default_site(context)):
print("Site not found")
return
from progressivedme.read_excel_data import read_csv_data
update_subgroup(read_csv_data(context, filename))
frappe.db.commit()
# COMMIT THE CHANGES
def update_subgroup(data):
for item in data:
item_number = item.item_number
minor_category = item.minor_category
major_category = item.major_category
if (item_number and minor_category and major_category
and frappe.db.get_value("Item Group"), major_category):
try:
if(minor_category and not frappe.db.exists("Item Group", minor_category)):
frappe.get_doc({
"item_group_name": minor_category,
"parent_item_group": major_category,
"doctype": "Item Group",
}).save(ignore_permissions=True)
if(frappe.db.exists("Item Group", minor_category)):
frappe.db.sql("UPDATE `tabItem Group` SET is_group=1 WHERE name = '%s' "%(major_category))
frappe.db.sql(""" UPDATE `tabItem` SET item_group = '%s' WHERE name = '%s'
"""%(minor_category, item_number))
print("Item: %s, Updated"%(item_number))
except Exception as e:
print("an error occurred while processing the item")
print(e)
else:
print("Missing Information for Item: %s"%(item_number))
@click.command()
@click.argument("filename")
@pass_context
def update_drop_ship_legend(context, filename):
if(not connect_to_default_site(context)):
print("Site not found")
return
from progressivedme.read_excel_data import read_csv_data
data = read_csv_data(context, filename)
print(data)
commands = [read_data, update_drop_ship_legend]
|
[
"navdeepghai1@gmail.com"
] |
navdeepghai1@gmail.com
|
0d60371386e984a6cd363da62cd98a73a2c2f095
|
d514a015b100c4c3a663056661e9c3c223c1c2a8
|
/common/models/food/FoodCat.py
|
e1be778c082c98e9bee7e8d49b5de60e261a3b54
|
[] |
no_license
|
462548187/order
|
63f3c8f570a241772c3dcf003c811044a6f5ed9d
|
a62f386e7661bfd7e01253203f324da48c52a331
|
refs/heads/master
| 2022-07-31T00:52:38.225530
| 2020-05-20T17:01:14
| 2020-05-20T17:01:14
| 263,845,065
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 940
|
py
|
# coding: utf-8
from sqlalchemy import Column, DateTime, Integer, String
from sqlalchemy.schema import FetchedValue
from application import app, db
class FoodCat(db.Model):
__tablename__ = 'food_cat'
id = db.Column(db.Integer, primary_key=True, unique=True)
name = db.Column(db.String(50), nullable=False, server_default=db.FetchedValue(), info='类别名称')
weight = db.Column(db.Integer, nullable=False, server_default=db.FetchedValue(), info='权重')
status = db.Column(db.Integer, nullable=False, server_default=db.FetchedValue(), info='状态 1:有效 0:无效')
updated_time = db.Column(db.DateTime, nullable=False, server_default=db.FetchedValue(), info='最后一次更新时间')
created_time = db.Column(db.DateTime, nullable=False, server_default=db.FetchedValue(), info='插入时间')
@property
def status_desc(self):
return app.config['STATUS_MAPPING'][str(self.status)]
|
[
"462548187@qq.com"
] |
462548187@qq.com
|
2f177f50a154a64f9e8b9dd45774e284acd54425
|
e82b761f53d6a3ae023ee65a219eea38e66946a0
|
/All_In_One/addons/kk_bullet_constraints_builder/tools.py
|
d2d45cb6c8fd1ca8f2a872b2a17a43c5d15f35c0
|
[] |
no_license
|
2434325680/Learnbgame
|
f3a050c28df588cbb3b14e1067a58221252e2e40
|
7b796d30dfd22b7706a93e4419ed913d18d29a44
|
refs/heads/master
| 2023-08-22T23:59:55.711050
| 2021-10-17T07:26:07
| 2021-10-17T07:26:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 99,785
|
py
|
##############################
# Bullet Constraints Builder #
##############################
#
# Written within the scope of Inachus FP7 Project (607522):
# "Technological and Methodological Solutions for Integrated
# Wide Area Situation Awareness and Survivor Localisation to
# Support Search and Rescue (USaR) Teams"
# Versions 1 & 2 were developed at the Laurea University of Applied Sciences,
# Finland. Later versions are independently developed.
# Copyright (C) 2015-2018 Kai Kostack
#
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
################################################################################
import bpy, bmesh, os, array
from mathutils import Vector
from mathutils import Color
mem = bpy.app.driver_namespace
### Import submodules
from global_vars import * # Contains global variables
from builder_prep import * # Contains preparation steps functions called by the builder
from file_io import * # Contains file input & output functions
import kk_import_motion_from_text_file # Contains earthquake motion import function
import kk_mesh_fracture # Contains boolean based discretization function
import kk_mesh_separate_less_loose # Contains polygon based discretization function for non-manifolds
import kk_mesh_separate_loose # Contains speed-optimized mesh island separation function
import kk_mesh_subdiv_to_level # Contains edge length based subdivision function for non-manifolds
import kk_mesh_voxel_cell_grid_from_mesh # Contains voxel based discretization function
import kk_select_intersecting_objects # Contains intersection detection and resolving function
################################################################################
def tool_estimateClusterRadius(scene):
objs, emptyObjs = gatherObjects(scene)
if len(objs) > 0:
print("Estimating optimal cluster radius...")
#objsDiameter = []
diameterSum = 0
for obj in objs:
### Calculate diameter for each object
dim = list(obj.dimensions)
dim.sort()
diameter = dim[2] # Use the largest dimension axis as diameter
#objsDiameter.append(diameter)
diameterSum += diameter
# ### Sort all diameters, take the midst item and multiply it by 1 /sqrt(2)
# objsDiameter.sort()
# diameterEstimation = (objsDiameter[int(len(objsDiameter) /2)] /2) *0.707
### Alternative: Calculate average of all object diameters and multiply it by 1 /sqrt(2)
diameterEstimation = ((diameterSum /2) /len(objs)) *0.707
return diameterEstimation
else:
print("Selected objects required for cluster radius estimation.")
return 0
################################################################################
def tool_selectGroup(scene):
### Selects objects belonging to this element group in viewport.
props = bpy.context.window_manager.bcb
elemGrps = mem["elemGrps"]
# Check if element group name corresponds to scene group
grpName = elemGrps[props.menu_selectedElemGrp][EGSidxName]
try: grp = bpy.data.groups[grpName]
except: return
# Leave edit mode to make sure next operator works in object mode
try: bpy.ops.object.mode_set(mode='OBJECT')
except: pass
# Deselect all objects.
bpy.ops.object.select_all(action='DESELECT')
# Select all objects from that group
qFirst = 1
for obj in grp.objects:
if obj.type == 'MESH' and not obj.hide and obj.is_visible(bpy.context.scene):
obj.select = 1
if qFirst:
bpy.context.scene.objects.active = obj
qFirst = 0
################################################################################
def tool_runPythonScript(scene, filename=""):
print("\nExecuting user-defined Python script...")
if len(filename) == 0: print("No script defined."); return
# First try to get an internal text file, if not successful try again with external file
f = None
try: s = bpy.data.texts[filename].as_string()
except:
try: f = open(filename)
except: print("Script not found."); return
else: s = f.read()
# Leave edit mode to make sure next operator works in object mode
try: bpy.ops.object.mode_set(mode='OBJECT')
except: pass
result = exec(s)
if f != None: f.close()
print("Finished and returned:", result)
################################################################################
def createElementGroup(grpName, presetNo=0):
### Create new element group
props = bpy.context.window_manager.bcb
elemGrps = mem["elemGrps"]
# Check if group name is already in element group list
qExists = 0
for i in range(len(elemGrps)):
if grpName == elemGrps[i][EGSidxName]:
qExists = 1; break
if not qExists:
if len(elemGrps) < maxMenuElementGroupItems:
# Add element group (syncing element group indices happens on execution)
elemGrps.append(presets[presetNo].copy())
# Update menu selection
props.menu_selectedElemGrp = len(elemGrps) -1
else:
bpy.context.window_manager.bcb.message = "Maximum allowed element group count reached."
bpy.ops.bcb.report('INVOKE_DEFAULT') # Create popup message box
### Assign group name
i = props.menu_selectedElemGrp
elemGrps[i][EGSidxName] = grpName
########################################
def tool_createGroupsFromNames(scene):
print("\nCreating groups from object names...")
props = bpy.context.window_manager.bcb
if len(props.preprocTools_grp_sep) == 0: return
# Leave edit mode to make sure next operator works in object mode
try: bpy.ops.object.mode_set(mode='OBJECT')
except: pass
# Backup selection
selection = [obj for obj in bpy.context.scene.objects if obj.select]
# Find mesh objects in selection
objs = [obj for obj in selection if obj.type == 'MESH' and not obj.hide and obj.is_visible(bpy.context.scene) and len(obj.data.vertices) > 0]
if len(objs) == 0:
print("No mesh objects selected. Nothing done.")
return
### Create one main group for all objects
grpName = grpNameBuilding
try: grp = bpy.data.groups[grpName]
except: grp = bpy.data.groups.new(grpName)
for obj in objs:
try: grp.objects.link(obj)
except: pass
### Create group data with objects
grps = []
grpsObjs = []
for obj in objs:
if props.preprocTools_grp_sep in obj.name:
# grpName can also be ""
if props.preprocTools_grp_occ:
grpName = obj.name.split(props.preprocTools_grp_sep)[0]
else: grpName = obj.name.rsplit(props.preprocTools_grp_sep, 1)[0]
# If name could not match the convention then add object to a default group
else: grpName = ""
# Actual linking into group happens here
if grpName != "":
if grpName not in grps:
grps.append(grpName)
grpsObjs.append([])
grpIdx = len(grpsObjs)-1
else: grpIdx = grps.index(grpName)
grpsObjs[grpIdx].append(obj)
### Create actual object groups from data
for k in range(len(grps)):
grpName = grps[k]
objs = grpsObjs[k]
try: grp = bpy.data.groups[grpName]
except: grp = bpy.data.groups.new(grpName)
for obj in objs:
try: grp.objects.link(obj)
except: pass
### Create also element groups from data
for k in range(len(grps)):
grpName = grps[k]
createElementGroup(grpName, presetNo=0)
# Update menu related properties from global vars
props.props_update_menu()
print("Groups found:", len(grps))
################################################################################
def tool_applyAllModifiers(scene):
print("\nApplying modifiers...")
# Leave edit mode to make sure next operator works in object mode
try: bpy.ops.object.mode_set(mode='OBJECT')
except: pass
# Backup selection
selection = [obj for obj in bpy.context.scene.objects if obj.select]
selectionActive = bpy.context.scene.objects.active
# Find mesh objects in selection
objs = [obj for obj in selection if (obj.type == 'MESH' or obj.type == 'CURVE') and not obj.hide and obj.is_visible(bpy.context.scene)]
# Make duplis individual objects (e.g. convert particle system mesh instances into real objects)
objRem = []
for obj in objs:
if len(obj.modifiers) > 0:
for mod in obj.modifiers:
if hasattr(mod, "particle_system") and mod.particle_system != None:
# Deselect all objects.
bpy.ops.object.select_all(action='DESELECT')
bpy.context.scene.objects.active = obj
obj.select = 1
# Make particles real
bpy.ops.object.duplicates_make_real()
obj.select = 0
objRem.append(obj)
objO = obj
# Find new objects and add them to selection backup
selectionNew = [obj for obj in bpy.context.scene.objects if obj.select]
selection.extend(selectionNew)
# Find objects in selection
objsNew = [obj for obj in selectionNew if (obj.type == 'MESH' or obj.type == 'CURVE') and not obj.hide and obj.is_visible(bpy.context.scene)]
objs.extend(objsNew)
## Temporary workaround for triggered objects in FM (rotation is not initialized properly in FM when using triggers, needs to be fixed by Martin)
bpy.ops.object.make_single_user(type='SELECTED_OBJECTS', object=False, obdata=True, material=False, texture=False, animation=False)
bpy.ops.object.transform_apply(location=False, rotation=True, scale=True)
# Add particle objects to same groups as emitter object
for grp in bpy.data.groups:
if objO.name in grp.objects:
for obj in objsNew:
grp.objects.link(obj)
if len(objs) == 0:
print("No mesh objects selected. Nothing done.")
return
# Deselect all objects.
bpy.ops.object.select_all(action='DESELECT')
### Delete particle emitter objects (belongs to duplicates_make_real())
### Todo: flagging as 'not to be used' for simulation would be better
for obj in objRem: obj.select = 1
bpy.ops.object.delete(use_global=False)
### Update object lists
selectionNew = []
for obj in selection:
if obj not in objRem:
selectionNew.append(obj)
selection = selectionNew
objsNew = []
for obj in objs:
if obj not in objRem:
objsNew.append(obj)
objs = objsNew
### Make all objects unique mesh objects (clear instancing) which have modifiers applied
objsM = []
for obj in objs:
if len(obj.modifiers) > 0 or obj.type == 'CURVE':
obj.select = 1
objsM.append(obj)
bpy.ops.object.make_single_user(type='SELECTED_OBJECTS', object=True, obdata=True, material=False, texture=False, animation=False)
# Apply modifiers and convert curves to meshes
if len(objsM):
bpy.context.scene.objects.active = objsM[0]
bpy.ops.object.convert(target='MESH')
# Revert to start selection
for obj in selection: obj.select = 1
bpy.context.scene.objects.active = selectionActive
################################################################################
def tool_centerModel(scene):
print("\nCentering model...")
# Leave edit mode to make sure next operator works in object mode
try: bpy.ops.object.mode_set(mode='OBJECT')
except: pass
# Backup selection
selection = [obj for obj in bpy.context.scene.objects if obj.select]
selectionActive = bpy.context.scene.objects.active
# Find mesh objects in selection
objs = [obj for obj in selection if obj.type == 'MESH' and not obj.hide and obj.is_visible(bpy.context.scene) and len(obj.data.vertices) > 0]
if len(objs) == 0:
print("No mesh objects selected.")
return
# Deselect all objects.
bpy.ops.object.select_all(action='DESELECT')
# Select valid mesh objects only as we don't want to center other accidentally selected objects
for obj in objs: obj.select = 1
# Remove instances
bpy.ops.object.make_single_user(type='SELECTED_OBJECTS', object=True, obdata=True, material=False, texture=False, animation=False)
### Calculate boundary boxes for all objects
qFirst = 1
for obj in objs:
# Calculate boundary box corners
bbMin, bbMax, bbCenter = boundaryBox(obj, 1)
if qFirst:
bbMin_all = bbMin.copy(); bbMax_all = bbMax.copy()
qFirst = 0
else:
if bbMax_all[0] < bbMax[0]: bbMax_all[0] = bbMax[0]
if bbMin_all[0] > bbMin[0]: bbMin_all[0] = bbMin[0]
if bbMax_all[1] < bbMax[1]: bbMax_all[1] = bbMax[1]
if bbMin_all[1] > bbMin[1]: bbMin_all[1] = bbMin[1]
if bbMax_all[2] < bbMax[2]: bbMax_all[2] = bbMax[2]
if bbMin_all[2] > bbMin[2]: bbMin_all[2] = bbMin[2]
center = (bbMin_all +bbMax_all) /2
# Set cursor to X and Y location of the center, and Z of the bottom boundary of the structure
bpy.context.scene.cursor_location = Vector((center[0], center[1], bbMin_all[2]))
# Set mesh origins to cursor location
bpy.ops.object.origin_set(type='ORIGIN_CURSOR')
# Reset locations to center of world space
bpy.ops.object.location_clear(clear_delta=False)
# Set object centers to geometry origin
bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='BOUNDS')
################################################################################
def tool_separateLoose(scene):
# Leave edit mode to make sure next operator works in object mode
try: bpy.ops.object.mode_set(mode='OBJECT')
except: pass
# Backup selection
selection = [obj for obj in bpy.context.scene.objects if obj.select]
selectionActive = bpy.context.scene.objects.active
# Find empty objects with constraints in selection
emptyObjs = [obj for obj in selection if obj.type == 'EMPTY' and not obj.hide and obj.is_visible(bpy.context.scene) and obj.rigid_body_constraint != None]
# Remove mesh objects which are used within constraints from selection (we want to leave them untouched)
objsConst = set()
for objC in emptyObjs:
for i in range(2):
if i == 0: obj = objC.rigid_body_constraint.object1
else: obj = objC.rigid_body_constraint.object2
if obj.select: obj.select = 0
# Remove rigid body settings because of the unlinking optimization in the external module they will be lost anyway (while the RBW group remains)
bpy.ops.rigidbody.objects_remove()
# Remove instances
bpy.ops.object.make_single_user(type='SELECTED_OBJECTS', object=True, obdata=True, material=False, texture=False, animation=False)
###### External function
kk_mesh_separate_loose.run()
# Set object centers to geometry origin
bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='BOUNDS')
# Revert to start selection
for obj in selection: obj.select = 1
bpy.context.scene.objects.active = selectionActive
################################################################################
def updateObjList(scene, objs):
### Add new objects and selected objects to the object list and remove deleted ones
for objTemp in scene.objects:
if objTemp.select:
if objTemp not in objs:
if objTemp.type == 'MESH' and not objTemp.hide and objTemp.is_visible(scene):
objs.append(objTemp)
for idx in reversed(range(len(objs))):
if objs[idx].name not in scene.objects:
del objs[idx]
########################################
def tool_discretize(scene):
props = bpy.context.window_manager.bcb
# Leave edit mode to make sure next operator works in object mode
try: bpy.ops.object.mode_set(mode='OBJECT')
except: pass
# Backup selection
selection = [obj for obj in bpy.context.scene.objects if obj.select]
selectionActive = bpy.context.scene.objects.active
# Find mesh objects in selection
objs = [obj for obj in selection if obj.type == 'MESH' and not obj.hide and obj.is_visible(bpy.context.scene) and len(obj.data.vertices) > 0]
if len(objs) == 0:
print("No mesh objects selected.")
return
# Find empty objects with constraints in selection
emptyObjs = [obj for obj in selection if obj.type == 'EMPTY' and not obj.hide and obj.is_visible(bpy.context.scene) and obj.rigid_body_constraint != None]
# Remove mesh objects which are used within constraints (we want to leave them untouched)
objsConst = set()
for objC in emptyObjs:
for i in range(2):
if i == 0: obj = objC.rigid_body_constraint.object1
else: obj = objC.rigid_body_constraint.object2
objsConst.add(obj)
objsNew = []
for obj in objs:
if obj not in objsConst: objsNew.append(obj)
objs = objsNew
if len(objs) == 0:
print("No mesh objects changed because of attached constraints.")
return
### Sort out non-manifold meshes (not water tight and thus not suited for boolean operations)
objsNew = []
objsNonMan = []
for obj in objs:
bpy.context.scene.objects.active = obj
me = obj.data
# Find non-manifold elements
bm = bmesh.new()
bm.from_mesh(me)
nonManifolds = [i for i, ele in enumerate(bm.edges) if not ele.is_manifold]
bm.free()
if len(nonManifolds) or props.surfaceForced: objsNonMan.append(obj)
else: objsNew.append(obj)
objs = objsNew
print("Non-manifold elements found:", len(objsNonMan))
###### Junction splitting and preparation for boolean halving
if not props.preprocTools_dis_cel or props.preprocTools_dis_jus:
# Create cutting plane to be used by external module
bpy.ops.mesh.primitive_plane_add(radius=100, view_align=False, enter_editmode=False, location=(0, 0, 0))
objC = bpy.context.scene.objects.active
objC.name = "BCB_CuttingPlane"
objC.select = 0
# Select mesh objects
for obj in objs: obj.select = 1
bpy.context.scene.objects.active = selectionActive
# Remove rigid body settings because the second scene optimization in the external module can produce ghost objects in RBW otherwise
bpy.ops.rigidbody.objects_remove()
# Remove instances
bpy.ops.object.make_single_user(type='SELECTED_OBJECTS', object=True, obdata=True, material=False, texture=False, animation=False)
###### External function
# Set object centers to geometry origin
bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='BOUNDS')
# Parameters: [qSplitAtJunctions, minimumSizeLimit, qTriangulate, halvingCutter]
if props.preprocTools_dis_jus:
print("\nDiscretization - Junction pass:")
kk_mesh_fracture.run('BCB', ['JUNCTION', 0, 1, 'BCB_CuttingPlane'], None)
###### Voxel cell based discretization
if props.preprocTools_dis_cel:
print("\nDiscretization:")
###### External function
size = props.preprocTools_dis_siz
kk_mesh_voxel_cell_grid_from_mesh.run('BCB_Discretize', [Vector((size, size, size))])
# We have to repeat separate loose here
tool_separateLoose(scene)
elif not props.preprocTools_dis_cel:
###### Boolean based discretization
print("\nDiscretization - Halving pass:")
###### External function
kk_mesh_fracture.run('BCB', ['HALVING', props.preprocTools_dis_siz, 1, 'BCB_CuttingPlane'], None)
### Add new objects to the object list and remove deleted ones
updateObjList(scene, selection)
updateObjList(scene, objs)
### From now on do multiple passes until either now non-discretized objects are found or the passes limit is reached
passes = 5 # Maximum number of passes
passNum = 0
while 1:
### Check if there are still objects larger than minimumSizeLimit left (due to failed boolean operations),
### deselect all others and try discretization again
cnt = 0
failed = []
for obj in objs:
### Calculate diameter for each object
dim = list(obj.dimensions)
dim.sort()
diameter = dim[2] # Use the largest dimension axis as diameter
if diameter <= props.preprocTools_dis_siz:
cnt += 1
else: failed.append(obj)
count = len(objs) -cnt
# Stop condition
passNum += 1
if count == 0 or passNum > passes: break
if count > 0:
print("\nDiscretization - Pass %d (%d elements left):" %(passNum, count))
# Deselect all objects.
bpy.ops.object.select_all(action='DESELECT')
failedExt = []
for obj in failed:
obj.select = 1
bpy.context.scene.objects.active = obj
bpy.context.tool_settings.mesh_select_mode = False, True, False
# Enter edit mode
try: bpy.ops.object.mode_set(mode='EDIT')
except: pass
me = obj.data
bm = bmesh.from_edit_mesh(me)
# Select all elements
try: bpy.ops.mesh.select_all(action='SELECT')
except: pass
# Remove doubles
bpy.ops.mesh.remove_doubles(threshold=0.0001)
# Smooth vertices slightly so overlapping geometry will shift a bit increasing possibility for successful next splitting attempt
bpy.ops.mesh.vertices_smooth(factor=0.0001)
try: bpy.ops.mesh.select_all(action='SELECT')
except: pass
# Recalculate normals outside
bpy.ops.mesh.normals_make_consistent(inside=False)
### Check if mesh has non-manifolds
# Deselect all elements
try: bpy.ops.mesh.select_all(action='DESELECT')
except: pass
# Select non-manifold elements
bpy.ops.mesh.select_non_manifold()
# Check mesh if there are selected elements found
qNonManifolds = 0
for edge in bm.edges:
if edge.select: qNonManifolds = 1; break
bm.verts.ensure_lookup_table()
### Rip all vertices belonging to non-manifold edges
if qNonManifolds:
bpy.context.tool_settings.mesh_select_mode = True, False, False
vertCos = []
start = -1
for i in range(len(bm.verts)):
vert = bm.verts[i]
if vert.select:
vertCos.append(vert.co)
if start < 0: start = i
found = 1
while found > 0:
found = 0
i = start
while i < len(bm.verts):
vert = bm.verts[i]
if vert.co in vertCos:
# Deselect all elements
bpy.ops.mesh.select_all(action='DESELECT')
vert.select = 1
# Rip selection
try: bpy.ops.mesh.rip('INVOKE_DEFAULT')
except: pass
else: i -= 1; found += 1
bm.verts.ensure_lookup_table()
i += 1
# Separate loose
try: bpy.ops.mesh.separate(type='LOOSE')
except: pass
# Leave edit mode
try: bpy.ops.object.mode_set(mode='OBJECT')
except: pass
# Set object centers to geometry origin
bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='BOUNDS')
###### External function
kk_mesh_fracture.run('BCB', ['HALVING', props.preprocTools_dis_siz, 1, 'BCB_CuttingPlane'], None)
### Add new objects to the object list and remove deleted ones
updateObjList(scene, selection)
updateObjList(scene, objs)
### If there are still objects larger than minimumSizeLimit left (due to failed boolean operations)
### print warning message together with a list of the problematic objects
if count > 0:
print("\nWarning: Following %d objects couldn't be discretized sufficiently:" %count)
for obj in failed:
print(obj.name)
else: print("\nDiscretization verified and successful!")
print("Final element count:", len(objs))
###### Polygon based discretization (for non-manifolds)
if len(objsNonMan):
print("\nDiscretization - Non-manifold pass:")
# Deselect all objects.
#bpy.ops.object.select_all(action='DESELECT')
for obj in bpy.data.objects: # We need to clear the entire database because the subdiv module sees deleted objects
if obj.select: obj.select = 0
# Select non-manifold mesh objects
for obj in objsNonMan: obj.select = 1
bpy.context.scene.objects.active = obj
###### External function
kk_mesh_subdiv_to_level.run('BCB', [props.preprocTools_dis_siz])
###### External function
kk_mesh_separate_less_loose.run('BCB', [props.preprocTools_dis_siz])
### Add new objects to the object list and remove deleted ones
updateObjList(scene, selection)
###### Clean-up for junction splitting and boolean halving
# Update selection list if voxel cells together with junction search is used
if props.preprocTools_dis_cel and props.preprocTools_dis_jus:
### Add new objects to the object list and remove deleted ones
updateObjList(scene, selection)
if not props.preprocTools_dis_cel or props.preprocTools_dis_jus:
# Delete cutting plane object
bpy.context.scene.objects.unlink(objC)
# Revert to start selection
for obj in selection: obj.select = 1
bpy.context.scene.objects.active = selectionActive
# Set object centers to geometry origin
bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='BOUNDS')
################################################################################
def tool_removeIntersections(scene, mode=1):
# Leave edit mode to make sure next operator works in object mode
try: bpy.ops.object.mode_set(mode='OBJECT')
except: pass
# Backup selection
selection = [obj for obj in bpy.context.scene.objects if obj.select]
selectionActive = bpy.context.scene.objects.active
# Find mesh objects in selection
objs = [obj for obj in selection if obj.type == 'MESH' and not obj.hide and obj.is_visible(bpy.context.scene) and len(obj.data.vertices) > 0]
if len(objs) == 0:
print("No mesh objects selected.")
return
###### External function
props = bpy.context.window_manager.bcb
# [encaseTol, qSelectByVertCnt, qSelectSmallerVol, qSelectA, qSelectB, qDelete, qBool]
if mode == 1: # Resolve intersections
if props.preprocTools_int_bol:
count = kk_select_intersecting_objects.run('BCB', [0, 0, 0, 1, 1, 0, 1])
else: count = kk_select_intersecting_objects.run('BCB', [0.02, 1, 1, 0, 0, 1, 0])
elif mode == 2 or mode == 4: # Selection of all intersections
count = kk_select_intersecting_objects.run('BCB', [0, 0, 0, 1, 1, 0, 0])
elif mode == 3: # Selection for intersections which require booleans
count = kk_select_intersecting_objects.run('BCB', [0.02, 1, 1, 0, 0, 0, 0])
if mode == 1 and count > 0:
# For now disabled because overall simulations behave more stable without it:
### Switch found intersecting objects to 'Mesh' collision shape
### (some might have only overlapping boundary boxes while the geometry could still not intersecting)
# objs = [obj for obj in bpy.context.scene.objects if obj.select and obj.type == 'MESH' and not obj.hide and obj.is_visible(bpy.context.scene) and len(obj.data.vertices) > 0]
# if len(objs) > 0:
# obj = objs[0]
# if obj.rigid_body != None:
# bpy.ops.rigidbody.shape_change(type='MESH')
# for obj in objs:
# obj.rigid_body.collision_margin = 0
# Set object centers to geometry origin
bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='BOUNDS')
if mode == 1 or (mode == 4 and count == 0):
# Revert to start selection
for obj in selection:
try: obj.select = 1
except: pass
bpy.context.scene.objects.active = selectionActive
return count
################################################################################
def tool_enableRigidBodies(scene):
print("\nEnabling rigid body settings...")
# Leave edit mode to make sure next operator works in object mode
try: bpy.ops.object.mode_set(mode='OBJECT')
except: pass
# Backup selection
selection = [obj for obj in bpy.context.scene.objects if obj.select]
selectionActive = bpy.context.scene.objects.active
# Find mesh objects in selection
objs = [obj for obj in selection if obj.type == 'MESH' and not obj.hide and obj.is_visible(bpy.context.scene) and len(obj.data.vertices) > 0]
if len(objs) == 0:
print("No mesh objects selected.")
return
# Find non-mesh objects in selection
objsNoMesh = [obj for obj in selection if obj.type != 'MESH']
# Select only meshes
for obj in objsNoMesh: obj.select = 0
# Make sure there is an active object
bpy.context.scene.objects.active = objs[0]
# Apply rigid body settings
bpy.ops.rigidbody.objects_add()
# Set rigid bodies which are members of some specific groups to passive
# (Todo: Each Preprocessing Tool removing RB information like Separate Loose should backup those for all objects and refresh it after operation, then this can be removed)
for obj in objs:
for grpName in ["Fixed", "Passive", "Base"]:
if grpName in bpy.data.groups:
if obj.name in bpy.data.groups[grpName].objects:
obj.rigid_body.type = 'PASSIVE'
# Revert to start selection
for obj in selection: obj.select = 1
bpy.context.scene.objects.active = selectionActive
################################################################################
def createBoxData(verts, edges, faces, corner1, corner2):
### Create box geometry from boundaries
x1 = corner1[0]; x2 = corner2[0]
y1 = corner1[1]; y2 = corner2[1]
z1 = corner1[2]; z2 = corner2[2]
i = len(verts)
# Create the vertices for the box corners
verts.append(Vector([x1, y1, z1]))
verts.append(Vector([x2, y1, z1]))
verts.append(Vector([x2, y2, z1]))
verts.append(Vector([x1, y2, z1]))
verts.append(Vector([x1, y1, z2]))
verts.append(Vector([x2, y1, z2]))
verts.append(Vector([x2, y2, z2]))
verts.append(Vector([x1, y2, z2]))
# # Generate 12 edges from the 8 vertices
# edges.append([i, i+1])
# edges.append([i+1, i+2])
# edges.append([i+2, i+3])
# edges.append([i+3, i])
# edges.append([i+4, i+5])
# edges.append([i+5, i+6])
# edges.append([i+6, i+7])
# edges.append([i+7, i+4])
# edges.append([i, i+4])
# edges.append([i+1, i+5])
# edges.append([i+2, i+6])
# edges.append([i+3, i+7])
# Generate the corresponding face
faces.append([i+3, i+2, i+1, i])
faces.append([i+4, i+5, i+6, i+7])
faces.append([i, i+1, i+5, i+4])
faces.append([i+1, i+2, i+6, i+5])
faces.append([i+2, i+3, i+7, i+6])
faces.append([i+3, i, i+4, i+7])
########################################
def tool_fixFoundation(scene):
print("\nSearching foundation elements...")
# Leave edit mode to make sure next operator works in object mode
try: bpy.ops.object.mode_set(mode='OBJECT')
except: pass
# Backup selection
selection = [obj for obj in bpy.context.scene.objects if obj.select]
selectionActive = bpy.context.scene.objects.active
# Find mesh objects in selection
objs = [obj for obj in selection if obj.type == 'MESH' and not obj.hide and obj.is_visible(bpy.context.scene) and obj.rigid_body != None and obj.rigid_body.type == 'ACTIVE' and len(obj.data.vertices) > 0]
if len(objs) == 0:
print("No active rigid body objects selected.")
return
props = bpy.context.window_manager.bcb
### Foundation detection based on name
if not props.preprocTools_fix_cac:
if len(props.preprocTools_fix_nam) > 0:
cnt = 0
for obj in objs:
if props.preprocTools_fix_nam in obj.name:
cnt += 1
obj.rigid_body.type = 'PASSIVE'
if cnt == 0: print("No object with '%s' in its name found." %props.preprocTools_fix_nam)
else: print("No foundation object name defined in user interface.")
### Foundation generation
else:
if len(props.preprocTools_fix_nam) > 0:
foundationName = props.preprocTools_fix_nam
else: foundationName = grpNameFoundation
### Calculate boundary boxes for all objects
objsBB = []
margin_all = 0
qFirst = 1
for obj in objs:
# Calculate boundary box corners
bbMin, bbMax, bbCenter = boundaryBox(obj, 1)
# Also consider collision margin
if obj.rigid_body.use_margin and obj.rigid_body.collision_margin > 0:
margin = obj.rigid_body.collision_margin
if margin > margin_all: margin_all = margin
bbMin = Vector((bbMin[0]-margin, bbMin[1]-margin, bbMin[2]-margin))
bbMax = Vector((bbMax[0]+margin, bbMax[1]+margin, bbMax[2]+margin))
objsBB.append([bbMin, bbMax])
### Evaluate global boundary box
if qFirst:
bbMin_all = bbMin.copy(); bbMax_all = bbMax.copy()
qFirst = 0
else:
if bbMax_all[0] < bbMax[0]: bbMax_all[0] = bbMax[0]
if bbMin_all[0] > bbMin[0]: bbMin_all[0] = bbMin[0]
if bbMax_all[1] < bbMax[1]: bbMax_all[1] = bbMax[1]
if bbMin_all[1] > bbMin[1]: bbMin_all[1] = bbMin[1]
if bbMax_all[2] < bbMax[2]: bbMax_all[2] = bbMax[2]
if bbMin_all[2] > bbMin[2]: bbMin_all[2] = bbMin[2]
### Calculate geometry for adjacent foundation geometry for all sides
verts = []; edges = []; faces = [] # Active buffer mesh object
verts2 = []; edges2 = []; faces2 = [] # Passive mesh object
bufferMargin = props.preprocTools_fix_rng
bufferSize = props.preprocTools_fix_rng
for bb in objsBB:
bbMin = bb[0]
bbMax = bb[1]
### Method with small buffer elements for foundation objects
if 1:
# X+
if props.preprocTools_fix_axp:
if bbMax[0] >= bbMax_all[0] -bufferMargin:
newCorner = Vector(( bbMax[0]+bufferSize, bbMin[1], bbMin[2] ))
createBoxData(verts, edges, faces, bbMax, newCorner)
newCorner2 = Vector(( 2*bbMax[0]-bbMin[0]+bufferSize, bbMax[1], bbMax[2] ))
createBoxData(verts2, edges2, faces2, newCorner, newCorner2)
# X-
if props.preprocTools_fix_axn:
if bbMin[0] <= bbMin_all[0] +bufferMargin:
newCorner = Vector(( bbMin[0]-bufferSize, bbMax[1], bbMax[2] ))
createBoxData(verts, edges, faces, newCorner, bbMin)
newCorner2 = Vector(( 2*bbMin[0]-bbMax[0]-bufferSize, bbMin[1], bbMin[2] ))
createBoxData(verts2, edges2, faces2, newCorner2, newCorner)
# Y+
if props.preprocTools_fix_ayp:
if bbMax[1] >= bbMax_all[1] -bufferMargin:
newCorner = Vector(( bbMin[0], bbMax[1]+bufferSize, bbMin[2] ))
createBoxData(verts, edges, faces, bbMax, newCorner)
newCorner2 = Vector(( bbMax[0], 2*bbMax[1]-bbMin[1]+bufferSize, bbMax[2] ))
createBoxData(verts2, edges2, faces2, newCorner, newCorner2)
# Y-
if props.preprocTools_fix_ayn:
if bbMin[1] <= bbMin_all[1] +bufferMargin:
newCorner = Vector(( bbMax[0], bbMin[1]-bufferSize, bbMax[2] ))
createBoxData(verts, edges, faces, newCorner, bbMin)
newCorner2 = Vector(( bbMin[0], 2*bbMin[1]-bbMax[1]-bufferSize, bbMin[2] ))
createBoxData(verts2, edges2, faces2, newCorner2, newCorner)
# Z+
if props.preprocTools_fix_azp:
if bbMax[2] >= bbMax_all[2] -bufferMargin:
newCorner = Vector(( bbMin[0], bbMin[1], bbMax[2]+bufferSize ))
createBoxData(verts, edges, faces, bbMax, newCorner)
newCorner2 = Vector(( bbMax[0], bbMax[1], 2*bbMax[2]-bbMin[2]+bufferSize ))
createBoxData(verts2, edges2, faces2, newCorner, newCorner2)
# Z-
if props.preprocTools_fix_azn:
if bbMin[2] <= bbMin_all[2] +bufferMargin:
newCorner = Vector(( bbMax[0], bbMax[1], bbMin[2]-bufferSize ))
createBoxData(verts, edges, faces, newCorner, bbMin)
newCorner2 = Vector(( bbMin[0], bbMin[1], 2*bbMin[2]-bbMax[2]-bufferSize ))
createBoxData(verts2, edges2, faces2, newCorner2, newCorner)
### Different method with equal sizes for foundation objects and buffer elements
else:
# X+
if props.preprocTools_fix_axp:
if bbMax[0] >= bbMax_all[0] -bufferMargin:
newCorner = Vector(( 2*bbMax[0]-bbMin[0], bbMin[1], bbMin[2] ))
createBoxData(verts, edges, faces, bbMax, newCorner)
newCorner2 = Vector(( 3*bbMax[0]-2*bbMin[0], bbMax[1], bbMax[2] ))
createBoxData(verts2, edges2, faces2, newCorner2, newCorner)
# X-
if props.preprocTools_fix_axn:
if bbMin[0] <= bbMin_all[0] +bufferMargin:
newCorner = Vector(( 2*bbMin[0]-bbMax[0], bbMax[1], bbMax[2] ))
createBoxData(verts, edges, faces, newCorner, bbMin)
newCorner2 = Vector(( 3*bbMin[0]-2*bbMax[0], bbMin[1], bbMin[2] ))
createBoxData(verts2, edges2, faces2, newCorner2, newCorner)
# Y+
if props.preprocTools_fix_ayp:
if bbMax[1] >= bbMax_all[1] -bufferMargin:
newCorner = Vector(( bbMin[0], 2*bbMax[1]-bbMin[1], bbMin[2] ))
createBoxData(verts, edges, faces, bbMax, newCorner)
newCorner2 = Vector(( bbMax[0], 3*bbMax[1]-2*bbMin[1], bbMax[2] ))
createBoxData(verts2, edges2, faces2, newCorner2, newCorner)
# Y-
if props.preprocTools_fix_ayn:
if bbMin[1] <= bbMin_all[1] +bufferMargin:
newCorner = Vector(( bbMax[0], 2*bbMin[1]-bbMax[1], bbMax[2] ))
createBoxData(verts, edges, faces, newCorner, bbMin)
newCorner2 = Vector(( bbMin[0], 3*bbMin[1]-2*bbMax[1], bbMin[2] ))
createBoxData(verts2, edges2, faces2, newCorner2, newCorner)
# Z+
if props.preprocTools_fix_azp:
if bbMax[2] >= bbMax_all[2] -bufferMargin:
newCorner = Vector(( bbMin[0], bbMin[1], 2*bbMax[2]-bbMin[2] ))
createBoxData(verts, edges, faces, bbMax, newCorner)
newCorner2 = Vector(( bbMax[0], bbMax[1], 3*bbMax[2]-2*bbMin[2] ))
createBoxData(verts2, edges2, faces2, newCorner2, newCorner)
# Z-
if props.preprocTools_fix_azn:
if bbMin[2] <= bbMin_all[2] +bufferMargin:
newCorner = Vector(( bbMax[0], bbMax[1], 2*bbMin[2]-bbMax[2] ))
createBoxData(verts, edges, faces, newCorner, bbMin)
newCorner2 = Vector(( bbMin[0], bbMin[1], 3*bbMin[2]-2*bbMax[2] ))
createBoxData(verts2, edges2, faces2, newCorner2, newCorner)
### Create actual geometry for passive and active buffer object
# Create empty mesh object
me = bpy.data.meshes.new(foundationName)
me2 = bpy.data.meshes.new(foundationName)
# Add mesh data to new object
me.from_pydata(verts, [], faces)
me2.from_pydata(verts2, [], faces2)
obj = bpy.data.objects.new(foundationName, me)
obj2 = bpy.data.objects.new(foundationName, me2)
scene.objects.link(obj)
scene.objects.link(obj2)
### Create new materials if not already existing
if foundationName not in bpy.data.materials:
foundationCol = (.5,.5,.5) # Color for foundation material
mat = bpy.data.materials.new(foundationName)
mat.diffuse_color = foundationCol
mat.specular_color = foundationCol
mat.specular_intensity = 0
else:
mat = bpy.data.materials[foundationName]
# Add to objects
for ob in [obj, obj2]:
bpy.context.scene.objects.active = ob
bpy.ops.object.material_slot_add()
bpy.context.scene.objects.active.material_slots[-1].material = mat
### Add to main group
grpName = grpNameBuilding
try: grp = bpy.data.groups[grpName]
except: grp = bpy.data.groups.new(grpName)
try: grp.objects.link(obj)
except: pass
try: grp.objects.link(obj2)
except: pass
### Create a new group for the foundation object if not already existing
grpName = foundationName
try: grp = bpy.data.groups[grpName]
except: grp = bpy.data.groups.new(grpName)
try: grp.objects.link(obj)
except: pass
try: grp.objects.link(obj2)
except: pass
### Create also element group from data and use passive preset for it
createElementGroup(grpName, presetNo=1)
# Update menu related properties from global vars
props.props_update_menu()
# Deselect all objects.
bpy.ops.object.select_all(action='DESELECT')
# Apply rigid body settings to foundation
obj.select = 1
obj2.select = 1
bpy.context.scene.objects.active = obj
bpy.ops.rigidbody.objects_add()
# Set fixed object to passive (but not buffer)
obj2.rigid_body.type = 'PASSIVE'
# Set friction to 1.0
obj.rigid_body.friction = 1
obj2.rigid_body.friction = 1
### Split both objects into individual parts
bpy.context.tool_settings.mesh_select_mode = False, True, False
for ob in [obj, obj2]:
bpy.context.scene.objects.active = ob
# Enter edit mode
try: bpy.ops.object.mode_set(mode='EDIT')
except: pass
# Recalculate normals
bpy.ops.mesh.normals_make_consistent(inside=False)
# Separate loose
try: bpy.ops.mesh.separate(type='LOOSE')
except: pass
# Leave edit mode
try: bpy.ops.object.mode_set(mode='OBJECT')
except: pass
### Set object centers to geometry origin
# Make sure current frame is at start frame otherwise the rigid body cache can cause unwanted location resets of buffer objects on origin change
if scene.frame_current != scene.frame_start: scene.frame_current = scene.frame_start
obj.select = 1
bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='BOUNDS')
# Revert to start selection
for obj in selection: obj.select = 1
bpy.context.scene.objects.active = selectionActive
################################################################################
def createOrReuseObjectAndMesh(scene, objName="Mesh"):
### Create a fresh object and delete old one, the complexity is needed to avoid pollution with old mesh datablocks
### Further, we cannot use the same mesh datablock that has already been used with from_pydata() so there is a workaround for this, too
objEmptyName = "$Temp$"
try: obj = bpy.data.objects[objName]
except:
try: me = bpy.data.meshes[objName]
except:
me = bpy.data.meshes.new(objName)
obj = bpy.data.objects.new(objName, me)
else:
obj = bpy.data.objects.new(objName, me)
try: meT = bpy.data.meshes[objEmptyName]
except: meT = bpy.data.meshes.new(objEmptyName)
obj.data = meT
bpy.data.meshes.remove(me, do_unlink=1)
me = bpy.data.meshes.new(objName)
obj.data = me
scene.objects.link(obj)
else:
#obj = bpy.data.objects[objName]
me = obj.data
try: meT = bpy.data.meshes[objEmptyName]
except: meT = bpy.data.meshes.new(objEmptyName)
obj.data = meT
bpy.data.meshes.remove(me, do_unlink=1)
me = bpy.data.meshes.new(objName)
obj.data = me
try: scene.objects.link(obj)
except: pass
return obj
########################################
def tool_groundMotion(scene):
print("\nApplying ground motion...")
props = bpy.context.window_manager.bcb
q = 0
if len(props.preprocTools_gnd_obj) == 0:
print("No ground object name defined in user interface.")
return
if props.preprocTools_gnd_obj in scene.objects:
objGnd = scene.objects[props.preprocTools_gnd_obj]
qCreateGroundObj = 0
else: qCreateGroundObj = 1
if props.preprocTools_gnd_obm in scene.objects:
objMot = scene.objects[props.preprocTools_gnd_obm]
else: objMot = None
# Leave edit mode to make sure next operator works in object mode
try: bpy.ops.object.mode_set(mode='OBJECT')
except: pass
# Backup selection
selection = [obj for obj in bpy.context.scene.objects if obj.select]
selectionActive = bpy.context.scene.objects.active
if qCreateGroundObj:
print("Ground object not found, creating new one...")
# Find active mesh objects in selection
objsA = [obj for obj in selection if obj.type == 'MESH' and not obj.hide and obj.is_visible(bpy.context.scene) and obj.rigid_body != None and obj.rigid_body.type == 'ACTIVE' and len(obj.data.vertices) > 0]
if len(objsA) > 0:
### Calculate boundary boxes for all active objects with connection type > 0
margin = 0
qFirst = 1
for obj in objsA:
# Calculate boundary box corners
bbMin, bbMax, bbCenter = boundaryBox(obj, 1)
if qFirst:
bbMin_all = bbMin.copy()
qFirst = 0
else:
if bbMin_all[2] > bbMin[2]: bbMin_all[2] = bbMin[2]
# Also consider collision margin (find largest one)
if obj.rigid_body.use_margin:
if obj.rigid_body.collision_margin > margin: margin = obj.rigid_body.collision_margin
height = bbMin_all[2] -margin
else: height = 0
### Create ground object data
verts = []; edges = []; faces = []
corner1 = Vector((-500,-500,-10))
corner2 = Vector((500, 500, 0))
createBoxData(verts, edges, faces, corner1, corner2)
# Create empty mesh object
#me = bpy.data.meshes.new(props.preprocTools_gnd_obj)
#objGnd = bpy.data.objects.new(props.preprocTools_gnd_obj, me)
#scene.objects.link(objGnd)
objGnd = createOrReuseObjectAndMesh(scene, objName=props.preprocTools_gnd_obj)
me = objGnd.data
# Add mesh data to new object
me.from_pydata(verts, [], faces)
# Set ground to the height of the lowest active rigid body
objGnd.location[2] = height
###### Parenting to ground object
# Deselect all objects.
bpy.ops.object.select_all(action='DESELECT')
# Find passive mesh objects in selection
objs = [obj for obj in selection if obj.type == 'MESH' and not obj.hide and obj.is_visible(bpy.context.scene) and obj.rigid_body != None and obj.rigid_body.type == 'PASSIVE' and len(obj.data.vertices) > 0]
if len(objs) == 0:
print("No passive rigid body elements selected, nothing attached to the ground.")
else:
# Select passive mesh objects
for obj in objs: obj.select = 1
### Make object parent for selected objects
bpy.context.scene.objects.active = objGnd # Parent
bpy.ops.object.parent_set(type='OBJECT', keep_transform=True)
# Enable animated flag for all passive rigid bodies so that Bullet takes their motion into account
for obj in objs: obj.rigid_body.kinematic = True
if objGnd.is_visible(bpy.context.scene):
# Apply rigid body settings to ground object
if objGnd.rigid_body == None:
# Deselect all objects.
bpy.ops.object.select_all(action='DESELECT')
# Apply rigid body settings
objGnd.select = 1
bpy.context.scene.objects.active = objGnd
bpy.ops.rigidbody.objects_add()
objGnd.select = 0
# Set friction for all to 1.0
objGnd.rigid_body.friction = 1
objGnd.rigid_body.type = 'PASSIVE'
# Enable animated flag for passive rigid bodies so that Bullet takes its motion into account
objGnd.rigid_body.kinematic = True
###### Parenting ground object to motion object
if objMot != None:
# Deselect all objects.
bpy.ops.object.select_all(action='DESELECT')
### Make object parent for selected objects
objGnd.select = 1 # Child
bpy.context.scene.objects.active = objMot # Parent
bpy.ops.object.parent_set(type='OBJECT', keep_transform=True)
objGnd.select = 0
# Use given motion object for creating artificial earthquake motion from now on
objGnd = objMot
###### Creating artificial earthquake motion curves for ground object
if props.preprocTools_gnd_nac:
### Create animation curve with one keyframe as base
obj = objGnd
obj.animation_data_create()
# If current action is already a "Motion" one then output a hint
if obj.animation_data.action != None and "Motion" in obj.animation_data.action.name:
print("There is already a Motion action, creating a new one...")
obj.animation_data.action = bpy.data.actions.new(name="Motion")
curveLocX = obj.animation_data.action.fcurves.new(data_path="delta_location", index=0)
curveLocY = obj.animation_data.action.fcurves.new(data_path="delta_location", index=1)
curveLocZ = obj.animation_data.action.fcurves.new(data_path="delta_location", index=2)
curveLocX.keyframe_points.add(1)
curveLocY.keyframe_points.add(1)
curveLocZ.keyframe_points.add(1)
### Creating noise function modifier
fps_rate = scene.render.fps
amplitude = props.preprocTools_gnd_nap
frequency = props.preprocTools_gnd_nfq
duration = props.preprocTools_gnd_ndu
seed = props.preprocTools_gnd_nsd
# X axis
fmod = curveLocX.modifiers.new(type='NOISE')
fmod.scale = fps_rate /frequency
fmod.phase = seed
fmod.strength = amplitude *6
fmod.depth = 1
fmod.use_restricted_range = True
fmod.frame_start = 1
fmod.frame_end = duration *fps_rate
fmod.blend_in = (duration *fps_rate) /2
fmod.blend_out = (duration *fps_rate) /2
# Y axis
fmod = curveLocY.modifiers.new(type='NOISE')
fmod.scale = fps_rate /frequency
fmod.phase = seed +1000
fmod.strength = amplitude *6
fmod.depth = 1
fmod.use_restricted_range = True
fmod.frame_start = 1
fmod.frame_end = duration *fps_rate
fmod.blend_in = (duration *fps_rate) /2
fmod.blend_out = (duration *fps_rate) /2
# Z axis
# fmod = curveLocZ.modifiers.new(type='NOISE')
# fmod.scale = fps_rate /frequency
# fmod.phase = seed +2000
# fmod.strength = amplitude *1.5
# fmod.depth = 1
# fmod.use_restricted_range = True
# fmod.frame_start = 1
# fmod.frame_end = duration *fps_rate
# fmod.blend_in = (duration *fps_rate) /2
# fmod.blend_out = (duration *fps_rate) /2
###### Import ground motion from text file
elif len(props.preprocTools_gnd_nam) > 0:
# Select ground object as expected by the script
bpy.context.scene.objects.active = objGnd
objGnd.select = 1
# Set frame rate as expected by the script
scene.render.fps = 25
###### External function
kk_import_motion_from_text_file.importData(props.preprocTools_gnd_nam)
objGnd.select = 0
else: print("No text file defined.");
# Revert to start selection
for obj in selection: obj.select = 1
bpy.context.scene.objects.active = selectionActive
################################################################################
def stopPlaybackAndReturnToStart(scene):
try: bpy.app.handlers.frame_change_pre.remove(tool_exportLocationHistory_eventHandler)
except: pass
else: print("Removed event handler: tool_exportLocationHistory_eventHandler")
try: bpy.app.handlers.frame_change_pre.remove(tool_exportForceHistory_eventHandler)
except: pass
else: print("Removed event handler: tool_exportForceHistory_eventHandler")
if bpy.context.screen.is_animation_playing:
bpy.ops.screen.animation_play() # Stop animation playback
scene.frame_current = scene.frame_start # Reset to start frame
bpy.context.screen.scene = scene # Hack to update other event handlers once again to finish their clean up
########################################
def tool_exportLocationHistory_eventHandler(scene):
### Vars
logMode = 0 # Logging mode (0 absolute location, 1 relative location, 2 velocity, 3 acceleration)
props = bpy.context.window_manager.bcb
filenamePath = props.postprocTools_lox_nam
logPath = filenamePath
name = props.postprocTools_lox_elm
###### Get data
### Official Blender
if not hasattr(bpy.types.DATA_PT_modifiers, 'FRACTURE') or not asciiExportName in scene.objects:
try: ob = scene.objects[name]
except:
print('Error: Defined object not found. Removing event handler.')
stopPlaybackAndReturnToStart(scene); return
else:
data = ob.matrix_world.to_translation() # Get actual Bullet object's position as .location only returns its simulation starting position
### Fracture Modifier
else:
try: ob = scene.objects[asciiExportName]
except:
print('Error: Fracture Modifier object not found. Removing event handler.')
stopPlaybackAndReturnToStart(scene); return
else:
md = ob.modifiers["Fracture"]
try: ob = md.mesh_islands[name]
except:
print('Error: Defined object not found. Removing event handler.')
stopPlaybackAndReturnToStart(scene); return
else:
data = ob.rigidbody.location.copy() # Get actual Bullet object's position as .location only returns its simulation starting position
# If filepath is empty then print data into console
if len(filenamePath) == 0:
print("Data:", data)
else:
###### Export data
### On first run
if "log_files_open" not in bpy.app.driver_namespace.keys():
# Create object list of selected objects
objNames = [name]
if len(objNames) > 0:
bpy.app.driver_namespace["log_objNames"] = objNames
files = []
for objName in objNames:
# Stupid Windows interprets "Con." in path as system variable and writes into console
filename = removeBadCharsFromFilename(objName.replace(".", "_")) +".csv"
filename = os.path.join(logPath, filename)
print("Creating file:", filename)
# Remove old log file at start frame
try: os.remove(filename)
except: pass
# Create new log file
try: f = open(filename, "w")
except:
print('Error: Could not open file.')
stopPlaybackAndReturnToStart(scene); return
else:
line = "# Time; X; Y; Z; Name: %s\n" %objName.encode("CP850","replace").decode("CP850")
f.write(line)
files.append(f)
bpy.app.driver_namespace["log_files_open"] = files
### Check if last frame is reached
if scene.frame_current == scene.frame_end:
if bpy.context.screen.is_animation_playing:
bpy.ops.screen.animation_play() # Stop animation playback
### If animation playback has stopped (can also be done by user) then unload the event handler and free all monitor data
if not bpy.context.screen.is_animation_playing:
try: bpy.app.handlers.frame_change_pre.remove(tool_exportLocationHistory_eventHandler)
except: pass
else: print("Removed event handler: tool_exportLocationHistory_eventHandler")
scene.frame_current == scene.frame_start # Reset to start frame
### Close log files
try: files = bpy.app.driver_namespace["log_files_open"]
except: pass
else:
for f in files: f.close()
### Delete keys
keys = [key for key in bpy.app.driver_namespace.keys()]
for key in keys:
if "log_" in key:
del bpy.app.driver_namespace[key]
bpy.context.screen.scene = scene # Hack to update other event handlers once again to finish their clean up
return
if len(filenamePath):
### For every frame
if "log_objNames" in bpy.app.driver_namespace.keys():
objNames = bpy.app.driver_namespace["log_objNames"]
files = bpy.app.driver_namespace["log_files_open"]
time = (scene.frame_current -scene.frame_start -1) /scene.render.fps
for k in range(len(objNames)):
objName = objNames[k]
loc = data
#if obj.parent != None:
# loc = loc *obj.parent.matrix_world
if logMode == 0:
data = loc
elif logMode == 1:
if "log_loc_start_" +objName not in bpy.app.driver_namespace.keys():
bpy.app.driver_namespace["log_loc_start_" +objName] = loc
loc = Vector((0, 0, 0))
else:
loc -= bpy.app.driver_namespace["log_loc_start_" +objName]
data = loc
if logMode >= 2:
if "log_loc_" +objName not in bpy.app.driver_namespace.keys():
bpy.app.driver_namespace["log_loc_" +objName] = loc
vel = Vector((0, 0, 0))
else:
vel = (bpy.app.driver_namespace["log_loc_" +objName] -loc) *scene.render.fps
bpy.app.driver_namespace["log_loc_" +objName] = loc
data = vel
if logMode == 3:
if "log_vel_" +objName not in bpy.app.driver_namespace.keys():
bpy.app.driver_namespace["log_vel_" +objName] = vel
accel = Vector((0, 0, 0))
else:
accel = (bpy.app.driver_namespace["log_vel_" +objName] -vel) *scene.render.fps
bpy.app.driver_namespace["log_vel_" +objName] = vel
data = accel
line = "%0.4f, %0.6f, %0.6f, %0.6f\n" %(time, data[0], data[1], data[2])
files[k].write(line)
########################################
def tool_exportLocationHistory(scene):
print("\nExporting location time history...")
# Leave edit mode to make sure next operator works in object mode
try: bpy.ops.object.mode_set(mode='OBJECT')
except: pass
print('Init location export event handler.')
bpy.app.handlers.frame_change_pre.append(tool_exportLocationHistory_eventHandler)
# Start animation playback
if not bpy.context.screen.is_animation_playing:
bpy.ops.screen.animation_play()
################################################################################
def tool_constraintForce_getData(scene, name):
props = bpy.context.window_manager.bcb
###### Get data
### Official Blender
if not hasattr(bpy.types.DATA_PT_modifiers, 'FRACTURE') or not asciiExportName in scene.objects:
### Try to find first constraint for the connection
try: ob = scene.objects[name +'.1']
except:
print('Error: Defined object not found. Removing event handler.')
stopPlaybackAndReturnToStart(scene); return None
else:
try: cons = [ob.rigid_body_constraint]
except:
print('Error: Defined object no constraint. Removing event handler.')
stopPlaybackAndReturnToStart(scene); return None
else:
### Try to find more constraints for the connection
i = 1; qEnd = 0
while not qEnd:
i += 1
nameNew = name +'.%d' %i
try: ob = scene.objects[nameNew]
except: qEnd = 1
else: cons.append(ob.rigid_body_constraint)
### Fracture Modifier
else:
### Try to find first constraint for the connection
try: ob = scene.objects[asciiExportName]
except:
print('Error: Fracture Modifier object not found. Removing event handler.')
stopPlaybackAndReturnToStart(scene); return None
else:
md = ob.modifiers["Fracture"]
try: cons = [md.mesh_constraints[name +'.1']]
except:
print('Error: Defined object not found. Removing event handler.')
stopPlaybackAndReturnToStart(scene); return None
else:
### Try to find more constraints for the connection
i = 1; qEnd = 0
while not qEnd:
i += 1
nameNew = name +'.%d' %i
try: cons.append(md.mesh_constraints[nameNew])
except: qEnd = 1
try: data = [con.appliedImpulse() for con in cons]
except:
print("Error: Data could not be read, Blender version with Fracture Modifier required!")
stopPlaybackAndReturnToStart(scene); return None
return data, cons
########################################
def tool_exportForceHistory_eventHandler(scene):
props = bpy.context.window_manager.bcb
rbw_steps_per_second = scene.rigidbody_world.steps_per_second
rbw_time_scale = scene.rigidbody_world.time_scale
name = props.postprocTools_fcx_con
result = tool_constraintForce_getData(scene, name)
if result != None:
data = result[0]
cons = result[1]
### Check if connection is broken
qIntact = 0
for con in cons:
if con.isIntact(): # Needs Fracture Modifier build
qIntact = 1
break
# Conversion from impulse to force
data = [val /rbw_time_scale *rbw_steps_per_second for val in data]
filenamePath = props.postprocTools_fcx_nam
logPath = filenamePath
# If filepath is empty then print data into console
if len(filenamePath) == 0:
print("Data:", data)
else:
###### Export data
### On first run
if "log_files_open" not in bpy.app.driver_namespace.keys():
# Create object list of selected objects
objNames = [name]
if len(objNames) > 0:
bpy.app.driver_namespace["log_objNames"] = objNames
files = []
for objName in objNames:
# Stupid Windows interprets "Con." in path as system variable and writes into console
filename = removeBadCharsFromFilename(objName.replace(".", "_")) +".csv"
filename = os.path.join(logPath, filename)
print("Creating file:", filename)
# Remove old log file at start frame
try: os.remove(filename)
except: pass
# Create new log file
try: f = open(filename, "w")
except:
print('Error: Could not open file.')
stopPlaybackAndReturnToStart(scene); return
else:
line = "# Time; 1,2,3..: Fmax for connection and F for individual constraints; Name: %s\n" %objName.encode("CP850","replace").decode("CP850")
f.write(line)
files.append(f)
bpy.app.driver_namespace["log_files_open"] = files
### Check if last frame is reached
if scene.frame_current == scene.frame_end:
if bpy.context.screen.is_animation_playing:
bpy.ops.screen.animation_play() # Stop animation playback
### If animation playback has stopped (can also be done by user) then unload the event handler and free all monitor data
if not bpy.context.screen.is_animation_playing:
try: bpy.app.handlers.frame_change_pre.remove(tool_exportForceHistory_eventHandler)
except: pass
else: print("Removed event handler: tool_exportForceHistory_eventHandler")
scene.frame_current == scene.frame_start # Reset to start frame
### Close log files
try: files = bpy.app.driver_namespace["log_files_open"]
except: pass
else:
for f in files: f.close()
### Delete keys
keys = [key for key in bpy.app.driver_namespace.keys()]
for key in keys:
if "log_" in key:
del bpy.app.driver_namespace[key]
bpy.context.screen.scene = scene # Hack to update other event handlers once again to finish their clean up
return
### For every frame
if "log_objNames" in bpy.app.driver_namespace.keys():
objNames = bpy.app.driver_namespace["log_objNames"]
files = bpy.app.driver_namespace["log_files_open"]
time = (scene.frame_current -scene.frame_start-1) /scene.render.fps
for k in range(len(objNames)):
if qIntact:
line = "%0.4f" %time
fmax = data[0]
for val in data: fmax = max(fmax, abs(val)) # Evaluate maximum force
line += " %0.6f" %fmax
for val in data:
line += " %0.6f" %val
line += "\n"
files[k].write(line)
########################################
def tool_exportForceHistory(scene):
print("\nExporting constraint force time history...")
# Leave edit mode to make sure next operator works in object mode
try: bpy.ops.object.mode_set(mode='OBJECT')
except: pass
### Free previous bake data
contextFix = bpy.context.copy()
contextFix['point_cache'] = scene.rigidbody_world.point_cache
bpy.ops.ptcache.free_bake(contextFix)
### Invalidate point cache to enforce a full bake without using previous cache data
if "RigidBodyWorld" in bpy.data.groups:
try: obj = bpy.data.groups["RigidBodyWorld"].objects[0]
except: pass
else: obj.location = obj.location
print('Init constraint force export event handler.')
bpy.app.handlers.frame_change_pre.append(tool_exportForceHistory_eventHandler)
# Start animation playback
if not bpy.context.screen.is_animation_playing:
bpy.ops.screen.animation_play()
########################################
def initMaterials():
# Lend from kk_material_deformation-visualizer.py
print("Initializing materials...")
### Vars
displaySteps = 300 # Gradient steps / material count to be used for visualization
colMultiplier = 1.0 # Color intensity multiplier (default: 1.0)
emission = 0.33 # Add some emission to the material in case scene is not illuminated (BI only, good value: 0.33)
zBufferOffset = 0 # Add z-buffer offset for visualized objects for rendering (can be useful to make objects to appear on top of the rendering similar to x-ray display mode)
### Create gradient materials for later use and reuse plus one extra color for disabled connections
for step in range(displaySteps +1):
x = step *(1 /(displaySteps -1)) # Normalized dif value in [0..1]
col = Color((0, 0, 0))
#col.h = 0; col.s = 1; col.v = 1
### Old colors (blue to red)
#col.r = x # correct math: = (x -0.5) *2
#col.g = 1 -(abs(0.5 -x) *2)
#col.b = (0.5 -x) *2
### New colors (blue to cyan to green to yellow to red)
if step < displaySteps:
col.r = 2 -(abs(1 -x) *4)
col.g = 2 -(abs(0.5 -x) *4)
col.b = 2 -(abs(x) *4)
# Extra color for broken connections
else:
col.r = 0; col.g = 0; col.b = 0
col.r *= colMultiplier
col.g *= colMultiplier
col.b *= colMultiplier
matName = materialName +"%03d" %step
try: mat = bpy.data.materials[matName]
except: mat = bpy.data.materials.new(matName)
mat.diffuse_color = col
col.s *= 0.5
mat.specular_color = Color((0, 0, 0))
mat.specular_intensity = 0 # BI only
mat.specular_hardness = 5
mat.emit = emission
if zBufferOffset != 0:
mat.offset_z = zBufferOffset
mat.use_transparency = True
########################################
def changeMaterials(obj, dif, qIntact=1):
# Lend from kk_material_deformation-visualizer.py
###### Changes object material by deformation
### Vars
displaySteps = 300 # Gradient steps / material count to be used for visualization
### Only calculate gradient mat index when the actual normalized deformation lies within [0..1] otherwise use the last gradient material
if qIntact:
if dif < 1: step = int(dif *displaySteps)
else: step = displaySteps -1
mat = bpy.data.materials[materialName +"%03d" %step]
# For broken connections use last special material
else:
mat = bpy.data.materials[materialName +"%03d" %displaySteps]
### Add new materials slot and material
if len(obj.material_slots) == 0:
bpy.context.scene.objects.active = obj
bpy.ops.object.material_slot_add()
obj.material_slots[-1:][0].material = mat
################################################################################
def tool_forcesVisualization_eventHandler(scene):
props = bpy.context.window_manager.bcb
elemGrps = mem["elemGrps"]
rbw_steps_per_second = scene.rigidbody_world.steps_per_second
rbw_time_scale = scene.rigidbody_world.time_scale
objRangeName = props.postprocTools_fcv_con
# Detect if official Blender or Fracture Modifier is in use
if not hasattr(bpy.types.DATA_PT_modifiers, 'FRACTURE') or not asciiExportName in scene.objects:
qFM = 0
else: qFM = 1
###### On first run
if "log_connectsViz" not in bpy.app.driver_namespace.keys():
print("Gathering data...")
###### Get data from scene
### Official Blender
if not qFM:
### Prepare scene object dictionaries by type to be used for faster item search (optimization)
scnObjs = {}
scnEmptyObjs = {}
for obj in scene.objects:
if obj.type == 'MESH': scnObjs[obj.name] = obj
elif obj.type == 'EMPTY': scnEmptyObjs[obj.name] = obj
### Fracture Modifier
else:
try: ob = scene.objects[asciiExportName]
except: print("Error: Fracture Modifier object expected but not found."); return
md = ob.modifiers["Fracture"]
### Prepare scene object dictionaries again after we changed obj names (optimization)
scnObjs = {}
scnEmptyObjs = {}
for obj in md.mesh_islands:
scnObjs[obj.name] = obj
for obj in md.mesh_constraints:
scnEmptyObjs[obj.name] = obj
try: objsEGrp = scene["bcb_objsEGrp"]
except: objsEGrp = []; print("Warning: bcb_objsEGrp property not found, cleanup may be incomplete.")
try: names = scene["bcb_objs"]
except: names = []; print("Error: bcb_objs property not found, rebuilding constraints is required.")
objs = []
for name in names:
if len(name):
try: objs.append(scnObjs[name])
except: objs.append(None); print("Error: Object %s missing, rebuilding constraints is required." %name)
else: objs.append(None)
try: names = scene["bcb_emptyObjs"]
except: names = []; print("Error: bcb_emptyObjs property not found, rebuilding constraints is required.")
emptyObjs = []
for name in names:
if len(name):
try: emptyObjs.append(scnEmptyObjs[name])
except: emptyObjs.append(None); print("Error: Object %s missing, rebuilding constraints is required." %name)
else: emptyObjs.append(None)
try: connectsPair = scene["bcb_connectsPair"]
except: connectsPair = []; print("Error: bcb_connectsPair property not found, rebuilding constraints is required.")
try: connectsGeo = scene["bcb_connectsGeo"]
except: connectsGeo = []; print("Error: bcb_connectsGeo property not found, rebuilding constraints is required.")
try: connectsConsts = scene["bcb_connectsConsts"]
except: connectsConsts = []; print("Error: bcb_connectsConsts property not found, rebuilding constraints is required.")
# If range object is defined by user then use this to search for nearby connections for visualization
objRange = None
if len(objRangeName):
try: objRange = scene.objects[objRangeName]
except: print("Warning: Range limiting object not found, visualizing all connections instead.")
### Create list of connections for visualization
print("Creating list of connections for visualization... (%d)" %len(connectsPair))
connectsViz = []
connectsPair_iter = iter(connectsPair)
connectsConsts_iter = iter(connectsConsts)
for k in range(len(connectsPair)):
sys.stdout.write('\r' +"%d" %k)
consts = next(connectsConsts_iter)
pair = next(connectsPair_iter)
# If constraints for this connections are existing
if len(consts):
objA = objs[pair[0]]
objB = objs[pair[1]]
objConst = emptyObjs[consts[0]]
### Skip object out of range of the limiting object if present
qUse = 0
if objRange != None:
loc = objRange.matrix_world.inverted() *ob.matrix_world *objConst.location # Convert coordinates into range object space
dims = (1,1,1) # We assume origin is located at range object center (always true for empty objects)
if loc[0] > -dims[0] and loc[0] < +dims[0] \
and loc[1] > -dims[1] and loc[1] < +dims[1] \
and loc[2] > -dims[2] and loc[2] < +dims[2]:
qUse = 1
qUse_limiter = qUse
### Only use connections with one foundation element
if props.postprocTools_fcv_pas:
qUse = 1
# Check for foundation group
qFoundation = 0
if len(elemGrps) > 0:
for i in range(len(elemGrps)):
CT = elemGrps[i][EGSidxCTyp]
if CT == 0: qFoundation = 1; break
# If foundation group is present then consider it (to visualize buffer objects correctly)
if qFoundation:
elemGrpA = objsEGrp[pair[0]]
elemGrpB = objsEGrp[pair[1]]
CT_A = elemGrps[elemGrpA][EGSidxCTyp]
CT_B = elemGrps[elemGrpB][EGSidxCTyp]
if CT_A != 0 and CT_B == 0: pass # Only A is active and B is passive group
elif CT_A == 0 and CT_B != 0: pass # Only B is active and A is passive group
else: qUse = 0
# Fallback in case no foundation group is available, then check for passive objects instead
else:
if not qFM and (objA.rigid_body.type == 'ACTIVE' and objB.rigid_body.type == 'ACTIVE'): qUse = 0
if qFM and (objA.rigidbody.type == 'ACTIVE' and objB.rigidbody.type == 'ACTIVE'): qUse = 0
qUse_foundation = qUse
### Skip horizontal connections by comparing relations of both element centroids to constraint location
### This code is used 3x, keep changes consistent in: builder_prep.py, builder_setc.py, and tools.py
qUse = 1
if 0: # Experimental and thus disabled
if not qFM: dirVecA = objConst.location -objA.matrix_world.to_translation() # Use actual locations (taking parent relationships into account)
else: dirVecA = objConst.location -objA.rigidbody.location
dirVecAN = dirVecA.normalized()
if abs(dirVecAN[2]) > 0.7: qA = 1
else: qA = 0
if not qFM: dirVecB = objConst.location -objB.matrix_world.to_translation() # Use actual locations (taking parent relationships into account)
else: dirVecB = objConst.location -objB.rigidbody.location
dirVecBN = dirVecB.normalized()
if abs(dirVecBN[2]) > 0.7: qB = 1
else: qB = 0
if qA == 0 and qB == 0: qUse = 0
qUse_horizontal = qUse
### Skip connections with one passive element
qUse = 1
#if not qFM and (objA.rigid_body.type == 'PASSIVE' or objB.rigid_body.type == 'PASSIVE'): qUse = 0
#if qFM and (objA.rigidbody.type == 'PASSIVE' or objB.rigidbody.type == 'PASSIVE'): qUse = 0
qUse_passive = qUse
if (qUse_limiter or qUse_foundation or (objRange == None and not props.postprocTools_fcv_pas)) \
and qUse_horizontal and qUse_passive:
name = objConst.name.rsplit('.', 1)[0]
geo = connectsGeo[k]
geoContactArea = geo[0]
a = geoContactArea *1000000
connectsViz.append([objConst, name, objConst.location, len(consts), objA, objB, a])
print()
# Store connection data for next frame use
bpy.app.driver_namespace["log_connectsViz"] = connectsViz
### Create list of visualization object names
vizObjNames = []
for i in range(len(connectsViz)):
connect = connectsViz[i]
name = connect[1]
vizObjNames.append(name)
# Generate gradient materials
initMaterials()
### Prepare visualization objects
print("Preparing visualization objects... (%d)" %len(connectsViz))
grpName = grpNameVisualization
try: grp = bpy.data.groups[grpName]
except: grp = bpy.data.groups.new(grpName)
vizObjs = []
for i in range(len(connectsViz)):
sys.stdout.write('\r' +"%d" %i)
connect = connectsViz[i]
name = connect[1]
nameViz = "Viz_" +name
try: obj = scene.objects[nameViz]
except:
# Create sphere
bpy.ops.mesh.primitive_ico_sphere_add(subdivisions=4, size=1, view_align=False, enter_editmode=False, location=(0, 0, 0))
bpy.ops.object.shade_smooth() # Shade smooth
obj = bpy.context.scene.objects.active
obj.name = nameViz
#obj.scale = Vector((0, 0, 0))
obj.select = 0
k = 1
while 1: # Delete values in case there are old ones and we are using sampling
key = name +'.%d N' %k
if key in obj.keys(): del obj[key]
else: break
k += 1
vizObjs.append(obj)
# Add to visualization group
try: grp.objects.link(obj)
except: pass
print()
# Store visualization object list for next frame use
if len(vizObjs):
bpy.app.driver_namespace["log_vizObjs"] = vizObjs
### For every frame
if "log_vizObjs" in bpy.app.driver_namespace.keys():
connectsViz = bpy.app.driver_namespace["log_connectsViz"]
vizObjs = bpy.app.driver_namespace["log_vizObjs"]
for i in range(len(connectsViz)):
connect = connectsViz[i]
objConst, name, loc, length, objA, objB, a = connect
normal = Vector((1.0, 0.0, 0.0))
if not qFM: normal.rotate(objConst.rotation_quaternion)
else: normal.rotate(objConst.rotation)
# Write some properties into visualization objects for user review
vizObjs[i]["Obj.A"] = objA.name
vizObjs[i]["Obj.B"] = objB.name
vizObjs[i]['Normal'] = normal
vizObjs[i]['ContactArea mm²'] = a
# ### Set location to center of (possibly moving) element pair (comment out for original connection position)
# try: locA = objA.matrix_world.to_translation() # Get actual Bullet object's position as .location only returns its simulation starting position
# except: locA = objA.rigidbody.location # If the above fails it's an FM object, so we have to derive the location differently
# try: locB = objB.matrix_world.to_translation()
# except: locB = objB.rigidbody.location
# loc = (locA +locB) /2
result = tool_constraintForce_getData(scene, name)
if result != None:
data = result[0]
cons = result[1]
### Check if connection is broken
samples = 10 # Sampling of values over multiple frames to reduce simulation noise (0 = off)
qIntact = 0
for con in cons:
#if (con.type == 'GENERIC' or con.type == 'FIXED' or con.type == 'POINT') and \
if con.enabled and (not hasattr(con, 'isIntact') or con.isIntact()): # Needs Fracture Modifier build
qIntact = 1
break
vizObjs[i]['#Intact'] = qIntact
if qIntact:
# Conversion from impulse to force (absolute values preferred for sampling)
data = [abs(val /rbw_time_scale *rbw_steps_per_second) for val in data]
### Sampling of values over multiple frames
if samples:
for k in range(len(cons)):
val = data[k]
try: valAcc = vizObjs[i][name +'.%d N' %k]
except: pass
else: val = (val +(valAcc *(samples-1))) /samples
data[k] = val
### Evaluate total connection load by summarizing all forces
### (very simple, inaccurate for fixed connections)
#fmax = 0
#for val in data: fmax += val
### Evaluate total connection load by picking the maximum value
### (simple, good for fixed connections but might be inaccurate for complex structures with lots of lateral and angular forces)
fmax = 0
for val in data: fmax = max(fmax, val)
### Evaluate total connection load by combining individual force components (best in theory)
# # First detect orientation (x = connection normal)
# flx = 0; fly = 0; flz = 0
# fax = 0; fay = 0; faz = 0
# for k in range(len(cons)):
# con = cons[k]
# val = data[k]
# if con.use_limit_lin_x and val != 0: flx += val # Usually only one force per axis should exist
# elif con.use_limit_lin_y and val != 0: fly += val # but to make sure we are not ignoring forces
# elif con.use_limit_lin_z and val != 0: flz += val # we adding them up
# elif con.use_limit_ang_x and val != 0: fax = max(val, fax) # For angles we are using the maximum
# elif con.use_limit_ang_y and val != 0: fay = max(val, fay) # because some connection types are
# elif con.use_limit_ang_z and val != 0: faz = max(val, faz) # sharing two or three axis
# # For linear forces we are using the good old Pythagoras
# fl = (flx**2 +fly**2 +flz**2)**0.5
# # Convert moments to forces by using the elements distances to the pivot point
# if not qFM: dirVec = objConst.location -objA.matrix_world.to_translation() # Use actual locations (taking parent relationships into account)
# else: dirVec = objConst.location -objA.rigidbody.location
# distA = dirVec.length
# if not qFM: dirVec = objConst.location -objB.matrix_world.to_translation() # Use actual locations (taking parent relationships into account)
# else: dirVec = objConst.location -objB.rigidbody.location
# distB = dirVec.length
# dist = (distA +distB) /2 # Use average distance
# fa = (fax +fay +faz) #/dist # Angular forces (moments) are added up and divided by the distance (lever) to get a linear force
# # Add linear and linearized angular forces
# fmax = fl +fa
# # Debug prints
# #if name == "Con.6243":
# # print("fl: %0.2f %0.2f %0.2f = %0.2f" %(flx,fly,flz,fl))
# # print("fa: %0.2f %0.2f %0.2f = %0.2f" %(fax,fay,faz,fa))
# # print("fmax: %0.2f" %fmax)
### Write forces properties into visualization objects for user review
vizObjs[i]['#Fmax N'] = fmax
vizObjs[i]['#Fmax N/mm²'] = fmax /a
for k in range(len(cons)):
vizObjs[i][name +'.%d N' %k] = data[k]
vizObjs[i][name +'.%d N/mm²' %k] = data[k] /a
### Normalization to maximum force defined by user
dataNorm = []
for k in range(len(cons)):
brkThres = cons[k].breaking_threshold /rbw_time_scale *rbw_steps_per_second # Conversion from impulse to force
impulse = data[k]
if props.postprocTools_fcv_nbt:
val = impulse /brkThres # Visualize force normalized to breaking threshold
else:
val = impulse /a /props.postprocTools_fcv_max # Visualize relative force per connection
#val = impulse /props.postprocTools_fcv_max # Visualize absolute force per connection
#if val <= 1.25: # Skip values over the threshold for cases connection is not breakable, then we don't want to include them
# if a >= 70000: # Skip values with a too small contact area (mm²)
dataNorm.append(val)
# Finding the maximum strain of all constraints
if len(dataNorm) > 0:
fmax = dataNorm[0]
for val in dataNorm: fmax = min(max(fmax, val), 1)
else: fmax = 0
### Adding settings to visualization objects
obj = vizObjs[i]
obj.location = loc
### Scaling by force
if qIntact:
size = fmax *visualizerDrawSize
obj.scale = Vector((size, size, size))
changeMaterials(obj, fmax)
else:
obj.scale = Vector((.5, .5, .5)) *visualizerDrawSize
#obj.scale = Vector((0, 0, 0))
changeMaterials(obj, 0, qIntact=0)
### Check if last frame is reached
if scene.frame_current == scene.frame_end \
or (props.postprocTools_fcv_frm and scene.frame_current == props.postprocTools_fcv_frm):
if bpy.context.screen.is_animation_playing:
bpy.ops.screen.animation_play() # Stop animation playback
### If animation playback has stopped (can also be done by user) then unload the event handler and free all monitor data
if not bpy.context.screen.is_animation_playing:
###### First calculate the sum of a specified ID property for all selected objects for console output
keyVal = "#Fmax N" # Name of the ID property to be summarized
keyLim = "ContactArea mm²" # Name of the ID property to be used as limit
limMin = 0 # Minimum limit for values to be counted (0 = off)
limMax = 0 # Maximum limit for values to be counted (0 = off)
qText = 0 # Generate text objects
vizObjs = bpy.app.driver_namespace["log_vizObjs"]
### Summarize all values
objs = []
sum = 0
cnt = 0
for obj in vizObjs:
if keyVal in obj.keys() and keyLim in obj.keys():
val = obj[keyVal]
limVal = obj[keyLim]
if (not limMin or limVal >= limMin) and (not limMax or limVal <= limMax):
objs.append((obj, val))
sum += val
cnt += 1
else: obj.select = 0
else: obj.select = 0
### Create text objects
if qText:
# Deselect all objects
bpy.ops.object.select_all(action='DESELECT')
textObjs = []
for obj, val in objs:
name = "Text_" +obj.name
loc = obj.location.copy()
loc += Vector((.25, -.15, 1.5))
if name not in scene.objects:
bpy.ops.object.text_add(view_align=True, enter_editmode=False, location=(0, 0, 0))
textObj = bpy.context.scene.objects.active
textObj.name = name
textObj.location = loc
else:
textObj = scene.objects[name]
textObj.data.body = "%0.0f" %(val /1000 /9.81) # tons
textObj.data.align_y = 'TOP'
textObj.data.align_x = 'LEFT'
textObj.scale = (.6, .6, .6)
textObjs.append(textObj)
# Select all texts
for obj in textObjs: obj.select = 1
print()
print(keyVal +" sum = %0.0f acting on %d connections." %(sum, cnt))
if sum > 0:
print(keyVal +" = %0.0f averaged per connection." %(sum /cnt))
print("Weight = %0.3f tons (might not be the actual weight, it's just the sum of the visible)." %(sum /9.81 /1000))
print()
###### Unload the event handler and free all monitor data
try: bpy.app.handlers.frame_change_pre.remove(tool_forcesVisualization_eventHandler)
except: pass
else: print("Removed event handler: tool_forcesVisualization_eventHandler")
#scene.frame_current == scene.frame_start # Reset to start frame
### Delete keys
keys = [key for key in bpy.app.driver_namespace.keys()]
for key in keys:
if "log_" in key:
del bpy.app.driver_namespace[key]
bpy.context.screen.scene = scene # Hack to update other event handlers once again to finish their clean up
return
########################################
def tool_forcesVisualization(scene):
print("\nGenerating constraint force visualization...")
# Leave edit mode to make sure next operator works in object mode
try: bpy.ops.object.mode_set(mode='OBJECT')
except: pass
### Free previous bake data
contextFix = bpy.context.copy()
contextFix['point_cache'] = scene.rigidbody_world.point_cache
bpy.ops.ptcache.free_bake(contextFix)
### Invalidate point cache to enforce a full bake without using previous cache data
if "RigidBodyWorld" in bpy.data.groups:
try: obj = bpy.data.groups["RigidBodyWorld"].objects[0]
except: pass
else: obj.location = obj.location
# Go to start frame
scene.frame_current = scene.frame_start
print('Init constraint force visualization event handler.')
bpy.app.handlers.frame_change_pre.append(tool_forcesVisualization_eventHandler)
# Start animation playback
if not bpy.context.screen.is_animation_playing:
bpy.ops.screen.animation_play()
################################################################################
def tool_cavityDetection(scene):
# Leave edit mode to make sure next operator works in object mode
try: bpy.ops.object.mode_set(mode='OBJECT')
except: pass
# Backup selection
selection = [obj for obj in bpy.context.scene.objects if obj.select]
selectionActive = bpy.context.scene.objects.active
# Find mesh objects in selection
objs = [obj for obj in selection if obj.type == 'MESH' and not obj.hide and obj.is_visible(bpy.context.scene) and len(obj.data.vertices) > 0]
if len(objs) == 0:
print("No mesh objects selected.")
return
props = bpy.context.window_manager.bcb
print("Detecting cavities...")
###### External function
size = props.postprocTools_cav_siz
kk_mesh_voxel_cell_grid_from_mesh.run('BCB_Cavity', [Vector((size, size, size))])
### Some finishing touches to the new cell object
bpy.ops.object.shade_smooth() # Shade smooth
obj = bpy.context.scene.objects.active
obj.name = grpNameVisualization
obj.select = 0
### Recalculate normals outside
# Enter edit mode
try: bpy.ops.object.mode_set(mode='EDIT')
except: pass
# Select all elements
try: bpy.ops.mesh.select_all(action='SELECT')
except: pass
bpy.ops.mesh.normals_make_consistent(inside=False)
# Leave edit mode
try: bpy.ops.object.mode_set(mode='OBJECT')
except: pass
### Add modifier
mod = obj.modifiers.new(name="Smooth", type="SMOOTH")
mod.factor = 2
# Revert to start selection
for obj in selection: obj.select = 1
bpy.context.scene.objects.active = selectionActive
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
1dafd61647db62c55f7cab3f7b2b67df9151ca73
|
e3b9aa9b17ebb55e53dbc4fa9d1f49c3a56c6488
|
/sentinelone/komand_sentinelone/actions/create_ioc_threat/action.py
|
f7fa82914edb59417d74e5642644ea47ef481e01
|
[
"MIT"
] |
permissive
|
OSSSP/insightconnect-plugins
|
ab7c77f91c46bd66b10db9da1cd7571dfc048ab7
|
846758dab745170cf1a8c146211a8bea9592e8ff
|
refs/heads/master
| 2023-04-06T23:57:28.449617
| 2020-03-18T01:24:28
| 2020-03-18T01:24:28
| 248,185,529
| 1
| 0
|
MIT
| 2023-04-04T00:12:18
| 2020-03-18T09:14:53
| null |
UTF-8
|
Python
| false
| false
| 903
|
py
|
import komand
from .schema import CreateIocThreatInput, CreateIocThreatOutput, Input, Output
# Custom imports below
class CreateIocThreat(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='create_ioc_threat',
description='Create an IOC threat',
input=CreateIocThreatInput(),
output=CreateIocThreatOutput())
def run(self, params={}):
hash_ = params.get(Input.HASH)
group_id = params.get(Input.GROUP_ID)
path = params.get(Input.PATH)
agent_id = params.get(Input.AGENT_ID)
annotation = params.get(Input.ANNOTATION)
annotation_url = params.get(Input.ANNOTATION_URL)
affected = self.connection.create_ioc_threat(
hash_, group_id, path, agent_id, annotation, annotation_url
)
return {Output.AFFECTED: affected}
|
[
"jonschipp@gmail.com"
] |
jonschipp@gmail.com
|
6976e20d3a730322403f2a2a6ea9fee47588b3b0
|
72db92bc7f1794495c2bb4ed5451e9cb693b6489
|
/final_graduation/controller/__init__.py
|
6b18c921173f238552d4e84b7a56d72ace3b6060
|
[] |
no_license
|
susautw/flask_restful
|
fcb00fdcfe2d2117c7da0d0c770c7241a26917b5
|
e75a8055178dc87a581530a815849d665ed7fda2
|
refs/heads/master
| 2022-12-27T13:45:29.533184
| 2020-05-24T17:22:37
| 2020-05-24T17:22:37
| 303,327,955
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 454
|
py
|
__all__ = [
'HelloWorld',
'CategoryController', 'CategoryItemController',
'JudgeController', 'JudgeItemController',
'ReportController', 'ReportItemController', 'ReportItemGetByIdController'
]
from .hello_world import HelloWorld
from .category import CategoryController, CategoryItemController
from .judge import JudgeController, JudgeItemController
from .report import ReportController, ReportItemController, ReportItemGetByIdController
|
[
"susautw@gmail.com"
] |
susautw@gmail.com
|
4574101f29caeb4dfa9c8e02ea2cdb5c70071e7e
|
28bb1dda3010ab719ec537bcc63a5ed1491d148d
|
/modules training/pytest module/skip/test_operations.py
|
b959ac0ad729f7f434aa21162803736394df2bd3
|
[] |
no_license
|
dawidsielski/Python-learning
|
86db2ff782aab13ff6dc44b1ce9295fc7c186e20
|
4aabf4d63906fb1d2379bbd401e9ac7484766198
|
refs/heads/master
| 2021-01-13T10:35:20.913601
| 2018-02-24T11:52:06
| 2018-02-24T11:52:06
| 76,497,517
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 416
|
py
|
import operations
import pytest
import sys
def test_multiplication():
result = operations.multiplication(3, 6)
assert result == 18
@pytest.mark.skipif(sys.version_info > (3,0), reason = "No reason.")
def test_subtraction():
result = operations.subtraction(3, 6)
assert result == -3
@pytest.mark.skip(reason="Not working.")
def test_power():
result = operations.power(6)
assert result == 36
|
[
"dawid.sielski@outlook.com"
] |
dawid.sielski@outlook.com
|
1b6d3309f1e02c969ec09a153c9f69930c87b997
|
4cca59f941adce8a2d71c00c0be5c06857f88dcc
|
/snisi_malaria/management/commands/fill_weekly_malaria_routine_cluster.py
|
3ba75446236dbd2e57ba4626718d7942092526bc
|
[
"MIT"
] |
permissive
|
brahimmade/snisi
|
7e4ce8e35150f601dd7b800bc422edec2d13063d
|
b4d0292b3314023ec9c984b776eaa63a0a0a266f
|
refs/heads/master
| 2023-05-07T19:04:04.895987
| 2017-12-29T18:58:22
| 2017-12-29T18:58:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,042
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
from __future__ import (unicode_literals, absolute_import,
division, print_function)
import logging
from django.core.management.base import BaseCommand
from snisi_core.models.Entities import Entity
from snisi_core.models.Projects import Cluster, Participation
logger = logging.getLogger(__name__)
class Command(BaseCommand):
def handle(self, *args, **options):
mali = Entity.get_or_none("mali")
drmopti = Entity.get_or_none("SSH3")
dsmopti = Entity.get_or_none("HFD9")
dsbandiagara = Entity.get_or_none("MJ86")
cluster = Cluster.get_or_none("malaria_weekly_routine")
for entity in [mali, drmopti, dsmopti, dsbandiagara] + \
dsmopti.get_health_centers() + \
dsbandiagara.get_health_centers():
p, created = Participation.objects.get_or_create(
cluster=cluster,
entity=entity)
logger.info(p)
|
[
"rgaudin@gmail.com"
] |
rgaudin@gmail.com
|
2e321e21034ad3f0f4e81b8f9b084ca31166a941
|
e8a50e2e9f103fbf334b098fb6553c02f5516c4a
|
/computational_problems_for_physics/original_scripts/LaplaceLineClassic.py
|
093788247d2f8e49c7eef78d09b66e687a579607
|
[] |
no_license
|
tomasderner97/pythonPlayground
|
f31f08caf690bb36d8424d3537412c767813d632
|
9fd8e3e77d40a46e1b103cd81a884add7e2edacf
|
refs/heads/master
| 2022-06-19T05:23:34.097485
| 2019-11-16T17:37:36
| 2019-11-16T17:37:36
| 205,404,983
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,611
|
py
|
""" From "COMPUTATIONAL PHYSICS" & "COMPUTER PROBLEMS in PHYSICS"
by RH Landau, MJ Paez, and CC Bordeianu (deceased)
Copyright R Landau, Oregon State Unv, MJ Paez, Univ Antioquia,
C Bordeianu, Univ Bucharest, 2018.
Please respect copyright & acknowledge our work."""
# LaplaceLine.py: Solve Laplace's eqtn within square
import matplotlib.pylab as p, numpy
from mpl_toolkits.mplot3d import Axes3D; from numpy import *;
Nmax = 100; Niter = 50
V = zeros((Nmax, Nmax), float)
print ("Working hard, wait for the figure while I count to 60")
for k in range(0, Nmax-1): V[0,k] = 100.0 # Line at 100V
for iter in range(Niter):
if iter%10 == 0: print(iter)
for i in range(1, Nmax-2):
for j in range(1,Nmax-2):
V[i,j] = 0.25*(V[i+1,j]+V[i-1,j]+V[i,j+1]+V[i,j-1])
print ("iter, V[Nmax/5,Nmax/5]", iter, V[Nmax/5,Nmax/5])
x = range(0, 50, 2); y = range(0, 50, 2)
X, Y = p.meshgrid(x,y)
def functz(V): # V(x, y)
z = V[X,Y]
return z
Z = functz(V)
fig = p.figure() # Create figure
ax = Axes3D(fig) # Plot axes
ax.plot_wireframe(X, Y, Z, color = 'r') # Red wireframe
ax.set_xlabel('X'); ax.set_ylabel('Y'); ax.set_zlabel('V(x,y)')
ax.set_title('Potential within Square V(x=0)=100V (Rotatable)')
p.show() # Show fig
|
[
"tomasderner97@gmail.com"
] |
tomasderner97@gmail.com
|
a1852688f61c1149cc14259fdd666306fea85d7c
|
8dcaee30fc5dd0c8982c324e88ede5dd693b0a76
|
/main.py
|
02682ad146df31b6a9a68eb2945c101f8df98aff
|
[] |
no_license
|
hristo-grudev/fcbanking
|
5de88d058a8274d5510a74b1bc0ab15d390bc7f2
|
22ff439bec0e5480e6577eff0903947be53adc08
|
refs/heads/main
| 2023-04-06T20:05:54.629491
| 2021-04-06T09:25:32
| 2021-04-06T09:25:32
| 355,130,684
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 76
|
py
|
from scrapy import cmdline
cmdline.execute("scrapy crawl fcbanking".split())
|
[
"hr.grudev@gmail.com"
] |
hr.grudev@gmail.com
|
fa929497b064b9f9f3f57c5241da5dfd48cf522a
|
e06ff08424324ac5d6c567ae9cd6954290ff9bd4
|
/Yudi TANG/axe/test.py
|
da887d8005b51052b5cd4f52c00ac472daa0fb3a
|
[
"Apache-2.0"
] |
permissive
|
JKChang2015/Machine_Learning
|
b1bdfcf9ea43a98fc7efd5c0624bbaf5d9dbf495
|
f8b46bf23e4d1972de6bd652dd4286e9322ed62f
|
refs/heads/master
| 2021-06-06T19:18:16.596549
| 2020-05-03T22:28:18
| 2020-05-03T22:28:18
| 119,390,891
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 865
|
py
|
# test
# Created by JKChang
# 27/01/2020, 15:58
# Tag:
# Description:
from numpy import *
import operator
from os import listdir
import matplotlib.pyplot as plt
# simulating a pandas df['type'] column
types = ['apple', 'orange', 'apple', 'pear', 'apple', 'orange', 'apple', 'pear']
x_coords = [10, 10, 5, 4, 3, 20, 19, 21]
y_coords = [21, 23, 12, 21, 10, 20, 14, 2]
for i, type in enumerate(types):
x = x_coords[i]
y = y_coords[i]
plt.scatter(x, y, marker='x', color='red')
plt.text(x + 0.3, y + 0.3, type, fontsize=9)
plt.show()
# group, labels = createDataSet()
#
# for column,label in zip(group,labels):
# plt.plot(group , label=label)
#
# plt.legend()
# plt.show()
# arr = np.random.random((10, 5))
# ax.plot(arr)
#
# labels = ['a', 'b', 'c', 'd', 'e']
#
# for column, label in zip(arr.T, labels):
# ax.plot(column, label=label)
|
[
"jkchang2015@gmail.com"
] |
jkchang2015@gmail.com
|
9304fb73892afe0c84d3f342cf34ecb6c5b2d312
|
d66818f4b951943553826a5f64413e90120e1fae
|
/hackerearth/Algorithms/Explosion/test.py
|
7f78871afb936c2a532f802823073955ecf4efd1
|
[
"MIT"
] |
permissive
|
HBinhCT/Q-project
|
0f80cd15c9945c43e2e17072416ddb6e4745e7fa
|
19923cbaa3c83c670527899ece5c3ad31bcebe65
|
refs/heads/master
| 2023-08-30T08:59:16.006567
| 2023-08-29T15:30:21
| 2023-08-29T15:30:21
| 247,630,603
| 8
| 1
|
MIT
| 2020-07-22T01:20:23
| 2020-03-16T06:48:02
|
Python
|
UTF-8
|
Python
| false
| false
| 581
|
py
|
import io
import unittest
from contextlib import redirect_stdout
from unittest.mock import patch
class TestQ(unittest.TestCase):
@patch('builtins.input', side_effect=[
'2',
'2',
'2 1',
'3',
'2 1',
'2 3',
])
def test_case_0(self, input_mock=None):
text_trap = io.StringIO()
with redirect_stdout(text_trap):
import solution
self.assertEqual(text_trap.getvalue(),
'UNSAFE\n' +
'SAFE\n')
if __name__ == '__main__':
unittest.main()
|
[
"hbinhct@gmail.com"
] |
hbinhct@gmail.com
|
0e220f57e549f2acbf6ade6fbf1671507ebdaed0
|
108e221a0220c9124af9407bc2541e44cbca10d0
|
/website/app/backstage/sidebar/forms.py
|
a91af70ca7fc54545daf72b4bd0b331409f1869f
|
[] |
no_license
|
yatengLG/website
|
a7cf50f85d320d4985c665bb6422d4b1ac1ec273
|
41bff84577db5ba927c47cd96e0b42672be05974
|
refs/heads/master
| 2023-02-01T01:27:11.763924
| 2020-12-10T07:55:52
| 2020-12-10T07:55:52
| 320,195,848
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 795
|
py
|
# -*- coding: utf-8 -*-
# @Author : LG
from flask_wtf import FlaskForm
from wtforms import TextField, TextAreaField, SubmitField, SelectField
from wtforms.validators import Required, Length
class SidebarEditForm(FlaskForm):
title =TextField('侧栏头', validators=[Required(message='标题为空'), Length(1, 40, message='1-40个字符')], render_kw={'placeholder' : '侧栏头','style':'width: 383px'})
body = TextAreaField('内容(可编辑html源码的方式添加样式)', id = 'full-featured', render_kw={'style':'width: 383px'})
forbid = SelectField('禁用', choices=[
(1, '是'),
(0, '否')
],default=0, render_kw={'style':'width: 200px'}, coerce=int) # 这里传入的是int型, 但是后端接收是bool型。
submit = SubmitField('提交')
|
[
"767624851@qq.com"
] |
767624851@qq.com
|
e7ddcc04a4cd02a12969fb3821668ff2565a8b0b
|
ea71ac18939e99c1295506d3e3808bb76daabc84
|
/.venv35/Lib/site-packages/PyInstaller/hooks/hook-eth_keyfile.py
|
8d6cebc7f73c6c906d985694f476769f8b991c5e
|
[
"GPL-1.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
ndaley7/BodySlide-Group-Generator
|
543ef48b4dfe58953b0694b2941c9131d5f4caef
|
3ed7b78c5f5ccec103b6bf06bc24398cfb6ad014
|
refs/heads/master
| 2020-08-26T17:44:38.768274
| 2019-12-10T19:01:23
| 2019-12-10T19:01:23
| 217,089,038
| 2
| 0
|
BSD-3-Clause
| 2019-12-10T19:02:29
| 2019-10-23T15:18:10
|
Python
|
UTF-8
|
Python
| false
| false
| 497
|
py
|
#-----------------------------------------------------------------------------
# Copyright (c) 2018-2019, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
from PyInstaller.utils.hooks import copy_metadata
datas = copy_metadata("eth_keyfile")
|
[
"ndaley7@gatech.edu"
] |
ndaley7@gatech.edu
|
e9c9645fd7e37b0628d58973343ce1191da676e8
|
3b3b86495f9db3f6ff5ded987cf7ed26c32baea2
|
/Atividade 6 e 8/libraryon_api/books/migrations/0001_initial.py
|
edf594ee019fc7cccb708ac8c42629414c9014a1
|
[] |
no_license
|
Akijunior/Top_Especiais_APIs
|
445b9e324588d1ea1806232bde39219da0cd8606
|
92434bb7f7aea300c6e6081502bb3dc672136ecb
|
refs/heads/master
| 2022-12-11T16:59:57.318030
| 2018-09-04T17:22:58
| 2018-09-04T17:22:58
| 131,898,317
| 0
| 0
| null | 2022-12-08T02:23:26
| 2018-05-02T19:44:22
|
Python
|
UTF-8
|
Python
| false
| false
| 2,157
|
py
|
# Generated by Django 2.0.3 on 2018-06-22 19:59
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=150)),
('description', models.CharField(max_length=400)),
('isbn', models.CharField(max_length=20)),
('edition', models.CharField(max_length=20)),
('year', models.IntegerField()),
('amount_pages', models.IntegerField()),
('price', models.DecimalField(decimal_places=2, max_digits=5)),
],
),
migrations.CreateModel(
name='Genre',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('description', models.CharField(max_length=150)),
('age_range', models.CharField(choices=[('F', 'Free'), ('FT', '+14'), ('ST', '+16'), ('ET', '+18')], default='F', max_length=2)),
],
),
migrations.CreateModel(
name='Score',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('score', models.DecimalField(decimal_places=2, max_digits=4, validators=[django.core.validators.MaxValueValidator(10.0), django.core.validators.MinValueValidator(0.0)])),
('comment', models.CharField(blank=True, max_length=200)),
('evaluation_date', models.DateTimeField(auto_now_add=True)),
('last_update_date', models.DateTimeField(auto_now=True)),
('book', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='books.Book')),
],
),
]
|
[
"suitsu19@gmail.com"
] |
suitsu19@gmail.com
|
d345e2d11c7636f9ad4ce919478b7a3066a5efda
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-5/1d0bab0bfd77edcf1228d45bf654457a8ff1890d-<constant_time_compare>-fix.py
|
9b1c685835c714b8498ff31c245947c99850f822
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 175
|
py
|
def constant_time_compare(val1, val2):
'Return True if the two strings are equal, False otherwise.'
return secrets.compare_digest(force_bytes(val1), force_bytes(val2))
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
f8eec7478f8d5f48879bc662741b59a3fae2838e
|
4e710abfd764090c9f04156d0c6f6014144b13a8
|
/asv_bench/benchmarks/commit_and_checkout.py
|
859280bbf87e55a31726d3d68e5cc8aa17938b99
|
[
"Apache-2.0"
] |
permissive
|
hhsecond/hangar-py
|
2bbb7834e24620fcf13039165846f7f1a00c5253
|
f335834c784ba6419e8cddd0f1b48ac317270747
|
refs/heads/master
| 2021-07-07T19:48:32.796724
| 2020-09-01T21:16:20
| 2020-09-01T21:16:20
| 183,601,108
| 0
| 0
|
Apache-2.0
| 2019-04-26T09:35:17
| 2019-04-26T09:35:16
| null |
UTF-8
|
Python
| false
| false
| 2,707
|
py
|
from tempfile import mkdtemp
from shutil import rmtree
import numpy as np
from hangar import Repository
class MakeCommit(object):
params = (5_000, 20_000, 50_000)
param_names = ['num_samples']
processes = 2
repeat = (2, 4, 20)
number = 1
warmup_time = 0
def setup(self, num_samples):
self.tmpdir = mkdtemp()
self.repo = Repository(path=self.tmpdir, exists=False)
self.repo.init('tester', 'foo@test.bar', remove_old=True)
self.co = self.repo.checkout(write=True)
arr = np.array([0,], dtype=np.uint8)
try:
aset = self.co.arraysets.init_arrayset('aset', prototype=arr, backend_opts='10')
except TypeError:
aset = self.co.arraysets.init_arrayset('aset', prototype=arr, backend='10')
except AttributeError:
aset = self.co.add_ndarray_column('aset', prototype=arr, backend='10')
with aset as cm_aset:
for i in range(num_samples):
arr[:] = i % 255
cm_aset[i] = arr
def teardown(self, num_samples):
self.co.close()
self.repo._env._close_environments()
rmtree(self.tmpdir)
def time_commit(self, num_samples):
self.co.commit('hello')
class CheckoutCommit(object):
params = (5_000, 20_000, 50_000)
param_names = ['num_samples']
processes = 2
number = 1
repeat = (2, 4, 20)
warmup_time = 0
def setup(self, num_samples):
self.tmpdir = mkdtemp()
self.repo = Repository(path=self.tmpdir, exists=False)
self.repo.init('tester', 'foo@test.bar', remove_old=True)
self.co = self.repo.checkout(write=True)
arr = np.array([0,], dtype=np.uint8)
try:
aset = self.co.arraysets.init_arrayset('aset', prototype=arr, backend_opts='10')
except TypeError:
aset = self.co.arraysets.init_arrayset('aset', prototype=arr, backend='10')
except AttributeError:
aset = self.co.add_ndarray_column('aset', prototype=arr, backend='10')
with aset as cm_aset:
for i in range(num_samples):
arr[:] = i % 255
cm_aset[i] = arr
self.co.commit('first')
self.co.close()
self.co = None
def teardown(self, num_samples):
try:
self.co.close()
except PermissionError:
pass
self.repo._env._close_environments()
rmtree(self.tmpdir)
def time_checkout_read_only(self, num_samples):
self.co = self.repo.checkout(write=False)
def time_checkout_write_enabled(self, num_samples):
self.co = self.repo.checkout(write=True)
self.co.close()
|
[
"rizzo242@gmail.com"
] |
rizzo242@gmail.com
|
665e9938694a57e885a85eef83a2d23819f66f99
|
8a94de4c3cf8725e7a5a627777d31f43e4ad73f2
|
/easy/subtree-of-another-tree.py
|
d1abaeee86a0e1188631eb55ba0830a046a3d2f9
|
[] |
no_license
|
zhangzhao156/Basic-Algorithm
|
a873101fae8e8c750cdab41c60ce4734348bab27
|
c7f6d176b22cdbd66635d437b4bda8eb36412da3
|
refs/heads/master
| 2023-02-10T01:14:12.599898
| 2021-01-05T14:09:27
| 2021-01-05T14:09:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,467
|
py
|
# encoding:utf-8
"""
问题描述:
s= 3
/ \
4 5
/ \
1 2
t= 4
/ \
1 2
解决方案:
辅助函数isMatch是判断两个树结构是否完全一致,可以判断s和t是否完全一致,
判断的条件是: s.val == t.val and self.isMatch(s.left, t.left) and self.isMatch(s.right, t.right)
或者判断s的子树(左右)是否和t一致
"""
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
# 递归的方法
class Solution(object):
def isSubtree(self, s, t):
"""
:type s: TreeNode
:type t: TreeNode
:rtype: bool
"""
if self.isMatch(s,t):
return True
if not s:
return False
# 上面的isMatch是s和t的全匹配,如果无法全匹配的话,看s的左右子树可不可以全匹配
return self.isSubtree(s.left, t) or self.isSubtree(s.right, t)
def isMatch(self, s,t):
if not (s and t):
return s is t
return s.val == t.val and self.isMatch(s.left, t.left) and self.isMatch(s.right, t.right)
if __name__ == "__main__":
solve = Solution()
s = TreeNode(3)
s.left = TreeNode(4)
s.right = TreeNode(5)
s.left.left = TreeNode(1)
s.left.right = TreeNode(2)
t = TreeNode(4)
t.left = TreeNode(1)
t.right = TreeNode(2)
print(solve.isSubtree(s,t))
|
[
"congyingTech@163.com"
] |
congyingTech@163.com
|
e45353acbf97e93eed682637eebfc676ada85ca1
|
bc4688c02d16c4f786f0ea835f9e1a7e45272090
|
/cnn/keras/full_scan/train3.py
|
0def1b864d221fbe894d89a03581e7a3e02c49ec
|
[] |
no_license
|
mhubrich/adni-python
|
4e3fb24e216fb908eb2426b2fc90482431f3494d
|
21e14e4cb5ab8edf33deff7fbe2494bbc396ea35
|
refs/heads/master
| 2021-06-15T02:04:00.435915
| 2017-02-28T14:25:23
| 2017-02-28T14:25:23
| 69,449,927
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,854
|
py
|
##############################################################
# Set seed for determinisitc behaviour between different runs.
# Especially fresh weights will be initialized the same way.
# Caution: CudNN might not be deterministic after all.
SEED = 0
import numpy as np
np.random.seed(SEED)
##############################################################
from cnn.keras import callbacks
from cnn.keras.evaluation_callback2 import Evaluation
from cnn.keras.models.AVG444.model_normal import build_model
from utils.split_scans import read_imageID
from utils.sort_scans import sort_groups
import sys
fold = str(sys.argv[1])
# Training specific parameters
target_size = (22, 22, 22)
classes = ['Normal', 'AD']
batch_size = 32
load_all_scans = True
num_epoch = 5000
# Paths
path_ADNI = '/home/mhubrich/ADNI_intnorm_avgpool444_new'
path_checkpoints = '/home/mhubrich/checkpoints/adni/full_scan_3_CV' + fold
def load_data(scans):
groups, _ = sort_groups(scans)
nb_samples = 0
for c in classes:
assert groups[c] is not None, \
'Could not find class %s' % c
nb_samples += len(groups[c])
X = np.zeros((nb_samples, 1, ) + target_size, dtype=np.float32)
y = np.zeros(nb_samples, dtype=np.int32)
i = 0
for c in classes:
for scan in groups[c]:
X[i] = np.load(scan.path)
y[i] = 0 if scan.group == classes[0] else 1
i += 1
return X, y
def train():
# Get inputs for training and validation
scans_train = read_imageID(path_ADNI, '/home/mhubrich/ADNI_CV_mean2/' + fold + '_train')
x_train, y_train = load_data(scans_train)
scans_val = read_imageID(path_ADNI, '/home/mhubrich/ADNI_CV_mean2/' + fold + '_val')
x_val, y_val = load_data(scans_val)
# Set up the model
model = build_model()
model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
# Define callbacks
cbks = [callbacks.print_history(),
callbacks.flush(),
Evaluation(x_val, y_val, batch_size,
[callbacks.early_stop(patience=60, monitor=['val_loss', 'val_acc', 'val_fmeasure', 'val_mcc', 'val_mean_acc']),
callbacks.save_model(path_checkpoints, max_files=2, monitor=['val_loss', 'val_acc', 'val_fmeasure', 'val_mcc', 'val_mean_acc'])])]
g, _ = sort_groups(scans_train)
hist = model.fit(x=x_train,
y=y_train,
nb_epoch=num_epoch,
callbacks=cbks,
class_weight={0:max(len(g['Normal']), len(g['AD']))/float(len(g['Normal'])),
1:max(len(g['Normal']), len(g['AD']))/float(len(g['AD']))},
batch_size=batch_size,
shuffle=True,
verbose=2)
if __name__ == "__main__":
train()
|
[
"mhubrich@students.uni-mainz.de"
] |
mhubrich@students.uni-mainz.de
|
84caab557c5fac7436bf9d9ddb18f9e229944dcb
|
1b19103c7781c31b4042e5404eea46fa90014a70
|
/cenit_admin_reports_api_reports_v1/__openerp__.py
|
d82da022f0a193cef1f12719b5f329e4a29d3dac
|
[] |
no_license
|
andhit-r/odoo-integrations
|
c209797d57320f9e49271967297d3a199bc82ff5
|
dee7edc4e9cdcc92e2a8a3e9c34fac94921d32c0
|
refs/heads/8.0
| 2021-01-12T05:52:26.101701
| 2016-12-22T03:06:52
| 2016-12-22T03:06:52
| 77,223,257
| 0
| 1
| null | 2016-12-23T12:11:08
| 2016-12-23T12:11:08
| null |
UTF-8
|
Python
| false
| false
| 1,582
|
py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010, 2014 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Admin_reports_api_reports_v1 Integration',
'version': '0.1',
'author': 'Cenit IO',
'website': 'https://cenit.io',
# ~ 'license': 'LGPL-3',
'category': 'Extra Tools',
'summary': "Allows the administrators of Google Apps customers to fetch reports about the usage, collaboration, security and risk for their users.",
'description': """
Odoo - Admin_reports_api_reports_v1 integration via Cenit IO
""",
'depends': ['cenit_base'],
'data': [
'security/ir.model.access.csv',
'data/data.xml'
],
'installable': True
}
|
[
"sanchocuba@gmail.com"
] |
sanchocuba@gmail.com
|
34ec68688143315b7b0922ddf27fd74908760a67
|
38855188ec752e7e012cd5d3f8aec8ec0a1645a0
|
/betse/util/type/iterable/set/setcls.py
|
67b102f6fcdef9a8cfe4df1c4d1eead32ea92fc3
|
[] |
no_license
|
R-Stefano/betse-ml
|
660a35ff587e649a7758e6af0766f5c1c7694544
|
dd03ff5e3df3ef48d887a6566a6286fcd168880b
|
refs/heads/master
| 2023-05-01T01:42:14.452140
| 2021-05-10T09:58:14
| 2021-05-10T09:58:14
| 343,448,365
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,366
|
py
|
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright 2014-2019 by Alexis Pietak & Cecil Curry.
# See "LICENSE" for further details.
'''
Low-level **set classes** (i.e., classes implementing set-like functionality,
typically by subclassing the builtin :class:`set` or :class:`frozenset`
container types or analogues thereof).
'''
# ....................{ IMPORTS }....................
from abc import ABCMeta
from betse.util.type.obj import objects
from betse.util.type.types import (
type_check, ClassType, IterableTypes, MappingType,)
from functools import wraps
# ....................{ GLOBALS }....................
_FROZENSET_METACLASS = type(frozenset)
'''
Metaclass of the builtin "frozenset" container type.
'''
# ....................{ METACLASSES }....................
class FrozenSetSubclassableMeta(ABCMeta, _FROZENSET_METACLASS):
'''
Metaclass of the abstract :class:`FrozenSetSubclassable` base class and all
concrete subclasses thereof.
This metaclass dynamically redefines *all* container-creating methods of
the :class:`frozenset` superclass of the :class:`FrozenSetSubclassable`
class (e.g., :meth:`frozenset.__or__`) within the currently declared
concrete subclass of that class. Specifically, this metaclass redefines
each such method to return an instance of the currently declared concrete
subclass of :class:`FrozenSetSubclassable` rather than of the
:class:`frozenset` superclass, preserving sane semantics and caller
expectations.
Design
----------
The :class:`FrozenSetSubclassable` class is merely a placeholder subclass
of the :class:`frozenset` type whose metaclass is this metaclass. Ideally,
the work performed by this metaclass would directly reside in the
:class:`FrozenSetSubclassable` class instead, in which case this metaclass
would have *no* demonstrable reason to exist. However, this work requires
access to the concrete subclass of the :class:`FrozenSetSubclassable` class
being currently declared. Since each such subclass is accessible *only*
from within the metaclass of the :class:`FrozenSetSubclassable` class
rather than within that class itself, this work necessarily resides in this
metaclass.
For general-purpose usability, this metaclass subclasses both:
* The metaclass of the :class:`frozenset` type (typically, the root
metaclass :class:`type`), avoiding conflicts between this metaclass and
the :class:`FrozenSetSubclassable` class subclassing the
:class:`frozenset` type.
* The :class:`ABCMeta` metaclass, avoiding conflicts between this metaclass
and concrete subclasses of the :class:`FrozenSetSubclassable` class which
additionally subclass one or more other abstract base classes which
themselves leverage the :class:`ABCMeta` metaclass. (Subtle dragons.)
For these and similar reasons, metaclass usage in Python should typically
be kept to a minimum. The :class:`FrozenSetSubclassable` class violates
this maxim because it absolutely must; in all other cases, alternate
solutions *not* leveraging metaclasses should be implemented instead.
See Also
----------
https://stackoverflow.com/a/804973/2809027
StackOverflow answer mildly inspiring this class.
'''
# ..................{ CONSTRUCTORS }..................
def __new__(
metacls: ClassType,
class_name: str,
class_base_classes: IterableTypes,
class_attrs: MappingType,
**kwargs
) -> ClassType:
'''
Redefine all container-creating methods of the :class:`frozenset`
superclass of the :class:`FrozenSetSubclassable` class (e.g.,
:meth:`frozenset.__or__`) within the currently declared concrete
subclass of that class identified by the passed parameters.
'''
# Tuple of the unqualified names of all container-creating methods
# defined by the "frozenset" type, requiring redefinition in the
# "FrozenSetSubclassable" subclass declared above.
CREATION_METHOD_NAMES = (
'copy',
'difference',
'intersection',
'symmetric_difference',
'union',
'__and__',
'__or__',
'__rand__',
'__ror__',
'__rsub__',
'__rxor__',
'__sub__',
'__xor__',
)
# Unsanitized "FrozenSetSubclassable" subclass.
frozenset_subclass = super().__new__(
metacls, class_name, class_base_classes, class_attrs)
# For the name of each such method...
for creation_method_name in CREATION_METHOD_NAMES:
metacls._sanitize_creation_method(
frozenset_subclass, creation_method_name)
# Return this sanitized "FrozenSetSubclassable" subclass.
return frozenset_subclass
# ..................{ SANITIZERS }..................
@staticmethod
@type_check
def _sanitize_creation_method(
frozenset_subclass: ClassType, method_name: str) -> None:
'''
Redefine the container-creating method of the
:class:`frozenset` superclass with the passed name in a cleverly
automated manner circumventing all superclass issues.
Design
----------
This private static method is intended to be called only by the special
static :meth:`__new__` method of this metaclass.
This method is intentionally implemented as a discrete callable rather
than inlined directly into the body of the :meth:`__new__` method, as
the closure internally defined by this method expects the local
``frozenset_method`` variable captured by this closure to remain
constant for the lifetime of that closure.
Parameters
----------
frozenset_subclass : ClassType
:class:`FrozenSetSubclassable` subclass to redefine this method
for.
method_name : str
Name of the container-creating method to be redefined.
'''
# Container-creating method with this name defined by "frozenset".
frozenset_method = objects.get_callable(
obj=frozenset, callable_name=method_name)
# Closure sanitizing this method to return instances of the concrete
# subclass inheriting from this class rather than of "frozenset",
# wrapped in a manner propagating the name, docstring, and other
# identifying metadata of the original method to this closure.
@wraps(frozenset_method)
def sanitized_creation_method(self, *args, **kwargs):
# "frozenset" instance created by calling the superclass method
# with all passed positional and keyword arguments.
set_created = frozenset_method(self, *args, **kwargs)
# Return either...
return (
# A new instance of this concrete subclass if the returned
# object is in fact a "frozenset" instance.
frozenset_subclass(set_created) if isinstance(
set_created, frozenset) else
# This object as is otherwise. Technically, this should
# probably never occur. Pragmatically, you know what they say
# about every bad assumption we've ever made.
set_created)
# Override the superclass method with this closure.
setattr(frozenset_subclass, method_name, sanitized_creation_method)
# ....................{ SUPERCLASSES }....................
class FrozenSetSubclassable(frozenset, metaclass=FrozenSetSubclassableMeta):
'''
Safely subclassable immutable set.
Caveats
----------
**Immutable set subclasses should always inherit from this base class.**
Neither the builtin :class:`frozenset` container type nor the abstract
:class:`collections.abc.Set` and :class:`collections.abc.Hashable` base
classes should be inherited from.
Subclasses should avoid declaring a metaclass or inheriting from another
base class that declares a metaclass - excluding the standard
:class:`ABCMeta` metaclass, which is compatible with this class by design.
All other metaclasses should be considered incompatible. Violating this
constraint typically raises the following runtime exception:
TypeError: metaclass conflict: the metaclass of a derived class must be
a (non-strict) subclass of the metaclasses of all its bases
Subclasses must redefine the static ``__new__()`` method and *not* attempt
to define the ``__init__()`` method. Immutable types are necessarily
initialized at object creation time.
By Python design, the ``__new__()`` method is static rather than a
classmethod and hence *must* be manually redefined in *all* subclasses.
This redefinition *must* internally call the superclass
:func:`frozenset.__new__` method and return the result of doing so (i.e.,
an instance of the desired subclass type). This redefinition should ideally
(but *not* necessarily) share a similar signature as the
:func:`frozenset.__new__` method and pass all passed parameters to that
method as is. In positional order, these are:
#. **The type of the current subclass.** Since the ``__new__()`` method is
static and hence *not* bound to the type of the current subclass, this
type *must* be manually passed as the first argument to this method.
#. **The optional iterable defining the contents of this immutable set.**
If unpassed, this set reduces to the empty set.
The trivial implementation of the ``__new__()`` method is as follows:
def __new__(cls, *args):
return super().__new__(cls, *args)
Versus :class:`frozenset`
----------
The :class:`frozenset` type is *not* safely subclassable, due to
unfortunate design decisions baked into the C-based implementations of
*all* builtin container types. The core issue pertains to the objects
returned by **container-creating methods** (i.e., methods declared by these
types that create and return instances of the same types). In the case of
:class:`frozenset`, these methods include:
* Binary set operations (e.g., the set union operator `|`, internally
implemented by the :meth:`frozenset.__or__` special method).
* Binary set methods (e.g., the set union method :meth:`frozenset.union`).
* Copy set methods (e.g., the set copying method :meth:`frozenset.copy`).
The specifics of the unsuitability of :class:`frozenset` depend on the
major version of Python in use. Specifically, under:
* Python 2.x, container-creating methods in both the :class:`frozenset` and
:class:`set` types correctly created instances of subclasses inheriting
from these types but incorrectly failed to call the `__init__` methods of
these subclasses. This is referred to as `issue #1721812`_.
.. _issue #1721812:
https://mail.python.org/pipermail/python-bugs-list/2007-May/038471.html
* Python 3.x, container-creating methods in both the :class:`frozenset` and
:class:`set` types incorrectly resolved `issue #1721812`_ by creating
instances of the corresponding base types (e.g., :class:`frozenset` or
:class:`set`) in subclasses inheriting from these types rather than
instances of these subclasses.
Technically, the latter issue may be resolved by manually redefining *all*
container-creating methods in :class:`frozenset` subclasses. Pragmatically,
there exist at least 17 such methods, rendering such redefinition
effectively infeasible: ``__ror__``, ``difference_update``, ``__isub__``,
``symmetric_difference``, ``__rsub__``, ``__and__``, ``__rand__``,
``intersection``, ``difference``, ``__iand__``, ``union``, ``__ixor__``,
``symmetric_difference_update``, ``__or__``, ``copy``, ``__rxor__``,
``intersection_update``, ``__xor__``, ``__ior__``, and ``__sub__``.
Versus ``Set`` and ``Hashable``
----------
Technically, the abstract :class:`collections.abc.Set` base class defining
the official immutable set API *is* safely subclassable, but only under the
following stipulations:
* The abstract :class:`collections.abc.Hashable` base class should
typically also be subclassed. Failing to do so raises exceptions on
attempting to add subclass instances to builtin container types expecting
hashable objects (e.g., :class:`dict`, :class:`set`).
* All public methods defined by the :class:`frozenset` type (e.g.,
:meth:`frozenset.union`, :meth:`frozenset.issubset`) should typically
also be defined, both for usability *and* duck typing purposes.
Satisfying these stipulations requires defining the following 13 methods:
``__contains__``, ``__eq__``, ``__hash__``, ``__init__``, ``__iter__``,
``__len__``, ``difference``, ``intersection``, ``symetric_difference``,
``union``, ``copy``, ``issubset``, and ``issuperset``.
While feasible, doing so is surprisingly less trivial than redefining the
17 container-creating :class:`frozenset` methods listed above. The latter
all share the exact same semantics and hence are trivially redefined with
automation, as this class demonstrates. The former, however, share *no*
common semantics and hence each require manual redefinition.
Moreover, the typical implementation of a :class:`collections.abc.Set`
subclass encapsulates an internal :class:`frozenset` instance variable.
Since both approaches require the :class:`frozenset` class *and* since
directly subclassing that class is simpler than indirectly encapsulating
that class in :class:`collections.abc.Set` subclasses, this class elects to
directly subclass the :class:`frozenset` type instead.
'''
# ..................{ CONSTRUCTORS }..................
# The entirety of the logic for this class resides in our metaclass.
def __new__(cls, *args):
return super().__new__(cls, *args)
|
[
"stefano.bane@gmail.com"
] |
stefano.bane@gmail.com
|
7ac13b1a6697470865d91e38fa08dbd5db14ff75
|
451548e5ec5d84606b0769efc7c6b619e5a8dd68
|
/encryptor/__init__.py
|
f741f62be6fff85804947257abbfdb12e15408d1
|
[] |
no_license
|
nttlong/lv-open-edx
|
085309886e2196dd975bd2900016af334dad9987
|
b676f2438ce7658a8a3f515fb9d071a66b30ee6f
|
refs/heads/master
| 2021-04-09T16:32:27.572555
| 2018-08-10T11:21:16
| 2018-08-10T11:21:16
| 125,824,191
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,375
|
py
|
"""
The encryptor packahe support encrypt an decrypt text with mongodb sync collection
"""
_cache={}
_cache_revert={}
import re
from pymongo import MongoClient
import datetime
import logging
import threading
import uuid
logger = logging.getLogger(__name__)
global lock
lock = threading.Lock()
_coll=None
_db=None
_collection_name=None
def set_config(*args,**kwargs):
"""
Set database connction for encrypt data sync
:param args:including "host(mongodb server hosting)","port","name" (Database name),"collection" (the collection of encrypt data
:param kwargs:
:return:
"""
global _coll
global _db
global _collection_name
if type(args) is tuple and args.__len__()>0:
args=args[0]
else:
args=kwargs
if not args.has_key("host"):
raise Exception("'host' was not found")
if not args.has_key("port"):
raise Exception("'port' was not found")
if not args.has_key("name"):
raise Exception("'name' was not found")
if not args.has_key("collection"):
raise Exception("'collection' was not found")
if _coll==None:
cnn=MongoClient(host=args["host"],port=args["port"])
_db=cnn.get_database(args["name"])
if args.has_key("user") and (args["user"]!="" or args["user"]!=None):
_db.authenticate(args["user"],args["password"])
_coll=_db.get_collection(args["collection"])
_collection_name=args["collection"]
def get_key(value):
"""
get uuid from value if uuid of value was not found this function will generate a uuid and sync to database then return uuid
:param value: any text
:return: uuid
"""
global _cache
global _cache_revert
if _cache.has_key(value):
return _cache[value]
else:
lock.acquire()
try:
item=_coll.find_one({
"value":re.compile("^"+value+"$",re.IGNORECASE)
})
if item==None:
key=str(uuid.uuid4())
_coll.insert_one({
"value":value,
"key":key
})
_cache[value]=key
_cache_revert[key]=value
else:
_cache[value]=item["key"]
_cache_revert[item["key"]]=item["value"]
lock.release()
return _cache[value]
except Exception as ex:
lock.release()
logger.debug(ex)
raise(ex)
def get_value(key):
"""
get value which is corectponding with key
:param key: uuid text
:return: text has been map when call get_key in this package
"""
global _cache_revert
if _cache_revert.has_key(key):
return _cache_revert[key]
else:
lock.acquire()
try:
item=_coll.find_one({
"key":re.compile("^"+key+"$",re.IGNORECASE)
})
if item==None:
raise(Exception("Key was not found"))
_cache[value]=item["key"]
_cache_revert[item["key"]]=item["value"]
lock.release()
return _cache_revert[key]
except Exception as ex:
lock.release()
logger.debug(ex)
raise(ex)
|
[
"zugeliang2000@gmail.com"
] |
zugeliang2000@gmail.com
|
c44b51acd0f486c876360fc6069cc0791d35725e
|
cda43bf6a84f7e55fab26aa70cda934683a51fe5
|
/nikNdNikNdNd/foForTrain.py
|
8eaf08daef4eab7f98e00f42bb1e7c97f8cba527
|
[] |
no_license
|
nikolaosdionelis/NeuralNetworksNNs
|
abb55622882e31c8d130a8986868b3d19ede186f
|
8a217490ad5bb3f7fccf4002c6b43a06c1e562fc
|
refs/heads/master
| 2022-11-13T00:50:23.578197
| 2020-07-12T18:52:20
| 2020-07-12T18:52:20
| 279,042,013
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 326,875
|
py
|
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torchvision.utils as vutils
import seaborn as sns
import os
import pickle
import math
import utils
#import hmc
#import hmc
#import hmc
#import hmc
import hmc
#import hmc2
#import hmc
#import hmc
import data
import datasets
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from torch.distributions.normal import Normal
real_label = 1
fake_label = 0
criterion = nn.BCELoss()
criterion_mse = nn.MSELoss()
def dcgan(dat, netG, netD, args):
device = args.device
X_training = dat['X_train'].to(device)
fixed_noise = torch.randn(args.num_gen_images, args.nz, 1, 1, device=device)
optimizerD = optim.Adam(netD.parameters(), lr=args.lrD, betas=(args.beta1, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=args.lrG, betas=(args.beta1, 0.999))
for epoch in range(1, args.epochs+1):
for i in range(0, len(X_training), args.batchSize):
netD.zero_grad()
stop = min(args.batchSize, len(X_training[i:]))
real_cpu = X_training[i:i+stop].to(device)
batch_size = real_cpu.size(0)
label = torch.full((batch_size,), real_label, device=device)
output = netD(real_cpu)
errD_real = criterion(output, label)
errD_real.backward()
D_x = output.mean().item()
# train with fake
noise = torch.randn(batch_size, args.nz, 1, 1, device=device)
fake = netG(noise)
label.fill_(fake_label)
output = netD(fake.detach())
errD_fake = criterion(output, label)
errD_fake.backward()
D_G_z1 = output.mean().item()
errD = errD_real + errD_fake
optimizerD.step()
# (2) Update G network: maximize log(D(G(z)))
netG.zero_grad()
label.fill_(real_label)
output = netD(fake)
errG = criterion(output, label)
errG.backward()
D_G_z2 = output.mean().item()
optimizerG.step()
## log performance
if i % args.log == 0:
print('Epoch [%d/%d] .. Batch [%d/%d] .. Loss_D: %.4f .. Loss_G: %.4f .. D(x): %.4f .. D(G(z)): %.4f / %.4f'
% (epoch, args.epochs, i, len(X_training), errD.data, errG.data, D_x, D_G_z1, D_G_z2))
print('*'*100)
print('End of epoch {}'.format(epoch))
print('*'*100)
if epoch % args.save_imgs_every == 0:
fake = netG(fixed_noise).detach()
vutils.save_image(fake, '%s/dcgan_%s_fake_epoch_%03d.png' % (args.results_folder, args.dataset, epoch), normalize=True, nrow=20)
if epoch % args.save_ckpt_every == 0:
torch.save(netG.state_dict(), os.path.join(args.results_folder, 'netG_dcgan_%s_epoch_%s.pth'%(args.dataset, epoch)))
def use_loss_fn2(first_term_loss, genFGen2, args, model, genFGen3, xData):
"""
first_term_loss = compute_loss2(genFGen2, args, model)
#first_term_loss2 = compute_loss2(genFGen2, args, model)
#first_term_loss = torch.log(first_term_loss2/(1.0-first_term_loss2))
#first_term_loss = torch.log(first_term_loss2)
#print('')
#print(first_term_loss)
#mu = torch.from_numpy(np.array([2.805741, -0.00889241], dtype="float32")).to(device)
#S = torch.from_numpy(np.array([[pow(0.3442525,2), 0.0], [0.0, pow(0.35358343,2)]], dtype="float32")).to(device)
#storeAll = torch.from_numpy(np.array(0.0, dtype="float32")).to(device)
#toUse_storeAll = torch.distributions.MultivariateNormal(loc=mu, covariance_matrix=S)
#for loopIndex_i in range(genFGen2.size()[0]):
# storeAll += torch.exp(toUse_storeAll.log_prob(genFGen2[loopIndex_i:1 + loopIndex_i, :].squeeze(0)))
#storeAll /= genFGen2.size()[0]
#print(storeAll)
#print('')
#print('')
#print(compute_loss2(mu.unsqueeze(0), args, model))
#print(torch.exp(toUse_storeAll.log_prob(mu)))
#print('')
#first_term_loss = storeAll
xData = toy_data.inf_train_gen(args.data, batch_size=args.batch_size)
xData = torch.from_numpy(xData).type(torch.float32).to(device)
#var2 = []
#for i in genFGen2:
# var1 = []
# for j in xData:
# new_stuff = torch.dist(i, j, 2) # this is a tensor
# var1.append(new_stuff.unsqueeze(0))
# var1_tensor = torch.cat(var1)
# second_term_loss2 = torch.min(var1_tensor) / args.batch_size
# var2.append(second_term_loss2.unsqueeze(0))
#var2_tensor = torch.cat(var2)
#second_term_loss = torch.mean(var2_tensor) / args.batch_size
#second_term_loss *= 100.0
#print('')
#print(second_term_loss)
# If you know in advance the size of the final tensor, you can allocate
# an empty tensor beforehand and fill it in the for loop.
#x = torch.empty(size=(len(items), 768))
#for i in range(len(items)):
# x[i] = calc_result
#print(len(genFGen2))
#print(genFGen2.shape[0])
# len(.) and not .shape[0]
#print(len(xData))
#print(xData.shape[0])
# Use len(.) and not .shape[0]
#second_term_loss = torch.empty(size=(len(genFGen2), len(xData))).to(device)
#second_term_loss = torch.empty(size=(len(genFGen2), len(xData)), device=device, requires_grad=True)
#second_term_loss3 = torch.empty(size=(len(genFGen2), len(xData)), device=device, requires_grad=True)
second_term_loss3 = torch.empty(size=(len(genFGen2), len(xData)), device=device, requires_grad=False)
for i in range(len(genFGen2)):
for j in range(len(xData)):
#second_term_loss[i, j] = torch.dist(genFGen2[i,:], xData[j,:], 2)
#second_term_loss[i, j] = torch.dist(genFGen2[i, :], xData[j, :], 1)
#second_term_loss3[i, j] = torch.dist(genFGen2[i, :], xData[j, :], 1)
#second_term_loss3[i, j] = torch.dist(genFGen2[i, :], xData[j, :], 1)
#second_term_loss3[i, j] = torch.dist(genFGen2[i, :], xData[j, :], 1)
#second_term_loss3[i, j] = torch.dist(genFGen2[i, :], xData[j, :], 1)
#second_term_loss3[i, j] = torch.tensor(0.1, requires_grad=True)
#second_term_loss3[i, j] = torch.dist(genFGen2[i, :], xData[j, :], 1)
#second_term_loss3[i, j] = torch.dist(genFGen2[i, :], xData[j, :], 1).requires_grad_()
#second_term_loss3[i, j] = torch.dist(genFGen2[i, :], xData[j, :], 1).requires_grad_()
second_term_loss3[i, j] = (torch.dist(genFGen2[i, :], xData[j, :], 2)**2).requires_grad_()
#second_term_loss[i, j] = torch.dist(genFGen2[i, :], xData[j, :], 2)**2
#second_term_loss2, _ = torch.min(second_term_loss, 1)
second_term_loss2, _ = torch.min(second_term_loss3, 1)
second_term_loss = 500000.0 * torch.mean(second_term_loss2) / (args.batch_size**2)
#second_term_loss = torch.atan(torch.mean(second_term_loss2) / (args.batch_size ** 2)) / (0.5 * math.pi)
#print(second_term_loss)
#print('')
print('')
print(first_term_loss)
print(second_term_loss)
#third_term_loss = torch.from_numpy(np.array(0.0, dtype='float32')).to(device)
#for i in range(args.batch_size):
# for j in range(args.batch_size):
# if i != j:
# # third_term_loss += ((np.linalg.norm(genFGen3[i,:].cpu().detach().numpy()-genFGen3[j,:].cpu().detach().numpy())) / (np.linalg.norm(genFGen2[i,:].cpu().detach().numpy()-genFGen2[j,:].cpu().detach().numpy())))
#
# # third_term_loss += ((torch.norm(genFGen3[i,:]-genFGen3[j,:], 2)) / (torch.norm(genFGen2[i,:]-genFGen2[j,:], 2)))
# # third_term_loss += ((torch.norm(genFGen3[i,:]-genFGen3[j,:])) / (torch.norm(genFGen2[i,:]-genFGen2[j,:])))
#
# # third_term_loss += ((torch.norm(genFGen3[i,:] - genFGen3[j,:])) / (torch.norm(genFGen2[i,:] - genFGen2[j,:])))
# third_term_loss += ((torch.dist(genFGen3[i, :], genFGen3[j, :], 2)) / (torch.dist(genFGen2[i, :], genFGen2[j, :], 2)))
# third_term_loss /= (args.batch_size - 1)
#third_term_loss /= args.batch_size
##third_term_loss *= 1000.0
genFGen3 = torch.randn([args.batch_size, 2], device=device, requires_grad=True)
#third_term_loss = torch.from_numpy(np.array(0.0, dtype='float32')).to(device)
third_term_loss3 = torch.empty(size=(args.batch_size, args.batch_size), device=device, requires_grad=False)
for i in range(args.batch_size):
for j in range(args.batch_size):
if i != j:
# third_term_loss += ((np.linalg.norm(genFGen3[i,:].cpu().detach().numpy()-genFGen3[j,:].cpu().detach().numpy())) / (np.linalg.norm(genFGen2[i,:].cpu().detach().numpy()-genFGen2[j,:].cpu().detach().numpy())))
# third_term_loss += ((torch.norm(genFGen3[i,:]-genFGen3[j,:], 2)) / (torch.norm(genFGen2[i,:]-genFGen2[j,:], 2)))
# third_term_loss += ((torch.norm(genFGen3[i,:]-genFGen3[j,:])) / (torch.norm(genFGen2[i,:]-genFGen2[j,:])))
# third_term_loss += ((torch.norm(genFGen3[i,:] - genFGen3[j,:])) / (torch.norm(genFGen2[i,:] - genFGen2[j,:])))
#third_term_loss += ((torch.dist(genFGen3[i, :], genFGen3[j, :], 2)) / (torch.dist(genFGen2[i, :], genFGen2[j, :], 2)))
#third_term_loss += ((torch.dist(genFGen3[i, :], genFGen3[j, :], 2)) / (torch.dist(genFGen2[i, :], genFGen2[j, :], 2)))
#third_term_loss3[i][j] = ((torch.dist(genFGen3[i, :], genFGen3[j, :], 2).requires_grad_()) / (torch.dist(genFGen2[i, :], genFGen2[j, :], 2).requires_grad_()))
third_term_loss3[i][j] = ((torch.dist(genFGen3[i, :], genFGen3[j, :], 2).requires_grad_()) / (torch.dist(genFGen2[i, :], genFGen2[j, :], 2).requires_grad_()))
#third_term_loss /= (args.batch_size - 1)
#third_term_loss2 = third_term_loss3 / (args.batch_size - 1)
third_term_loss2 = torch.mean(third_term_loss3, 1)
#third_term_loss /= args.batch_size
#third_term_loss = third_term_loss2 / args.batch_size
third_term_loss = torch.mean(third_term_loss2)
#third_term_loss *= 1000.0
print(third_term_loss)
print('')
#return first_term_loss + second_term_loss + third_term_loss
#return first_term_loss + second_term_loss
#return second_term_loss
#return first_term_loss + second_term_loss
return first_term_loss + second_term_loss + third_term_loss
"""
#first_term_loss = compute_loss2(genFGen2, args, model)
#first_term_loss2 = compute_loss2(genFGen2, args, model)
#first_term_loss = torch.log(first_term_loss2 / (1.0 - first_term_loss2))
#first_term_loss = compute_loss2(genFGen2, args, model)
#first_term_loss = compute_loss2(genFGen2, args, model)
#first_term_loss = compute_loss2(genFGen2, args, model)
#print('')
#print(first_term_loss)
#mu = torch.from_numpy(np.array([2.805741, -0.00889241], dtype="float32")).to(device)
#S = torch.from_numpy(np.array([[pow(0.3442525,2), 0.0], [0.0, pow(0.35358343,2)]], dtype="float32")).to(device)
#mu = torch.from_numpy(np.array([2.8093171, 1.2994107e-03], dtype="float32")).to(device)
#S = torch.from_numpy(np.array([[pow(0.35840544, 2), 0.0], [0.0, pow(0.34766033, 2)]], dtype="float32")).to(device)
#mu = torch.from_numpy(np.array([0.0, 0.0], dtype="float32")).to(device)
#S = torch.from_numpy(np.array([[pow(1.0,2), 0.0], [0.0, pow(1.0,2)]], dtype="float32")).to(device)
"""
#storeAll = torch.from_numpy(np.array(0.0, dtype="float32")).to(device)
storeAll = torch.empty(args.batch_size, device=device, requires_grad=False)
#toUse_storeAll = torch.distributions.MultivariateNormal(loc=mu, covariance_matrix=S)
#for loopIndex_i in range(genFGen2.size()[0]):
for loopIndex_i in range(args.batch_size):
#storeAll += torch.exp(toUse_storeAll.log_prob(genFGen2[loopIndex_i:1 + loopIndex_i, :].squeeze(0)))
#storeAll[loopIndex_i] = torch.exp(toUse_storeAll.log_prob(genFGen2[loopIndex_i:1 + loopIndex_i, :].squeeze(0)).requires_grad_())
#storeAll[loopIndex_i] = torch.exp(
# toUse_storeAll.log_prob(genFGen2[loopIndex_i:1 + loopIndex_i, :].squeeze(0)).requires_grad_())
storeAll[loopIndex_i] = 0.5 * torch.exp(toUse_storeAll.log_prob(genFGen2[loopIndex_i:1 + loopIndex_i, :].squeeze(0)).requires_grad_())\
+ 0.5 * torch.exp(toUse_storeAll2.log_prob(genFGen2[loopIndex_i:1 + loopIndex_i, :].squeeze(0)).requires_grad_())
#storeAll /= genFGen2.size()[0]
first_term_loss = torch.mean(storeAll)
"""
#print(first_term_loss)
#first_term_loss = compute_loss2(genFGen2, args, model)
#print(genFGen2)
#dasfasdfs
#first_term_loss = compute_loss2(genFGen2, args, model)
#first_term_loss = compute_loss2(genFGen2, model)
#print(xData.shape)
#print(genFGen2.shape)
"""
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
imageStore = xData[0,:,:,:].squeeze().cpu().numpy()
#imageStore = genFGen2[0, :, :, :].squeeze().cpu().detach().numpy()
plt.imshow(imageStore)
plt.show()
"""
#pilTrans = transforms.ToTensor()
#plt.imshow(xData[1, :])
#first_term_loss = compute_loss2(genFGen2, model)
#first_term_loss = compute_loss2(xData, model)
#first_term_loss = compute_loss2(genFGen2, model)
#first_term_loss = compute_loss2(genFGen2, model)
#first_term_loss = compute_loss2(genFGen2, model)
#first_term_loss = compute_loss2(genFGen2, model)
#first_term_loss = compute_loss2(genFGen2, model)
#first_term_loss = compute_loss2(xData, model)
#print(xData)
#print(genFGen2)
#print(genFGen2.shape)
#print(xData.shape)
#print(compute_loss2(genFGen2, model))
#print(compute_loss2(xData, model))
#print(compute_loss(xData, model))
#print(compute_loss(xData, model).item())
# (tensor(0.9740, device='cuda:0', grad_fn=<DivBackward0>), tensor([0.], device='cuda:0'),
# tensor(-1139.7253, device='cuda:0'), tensor(4957.8486, device='cuda:0'))
#print(computeLoss(genFGen2, model))
#print(computeLoss(xData, model))
#first_term_loss = compute_loss2(genFGen2, model)
#first_term_loss = compute_loss2(genFGen2, model)
#first_term_loss = compute_loss2(genFGen2, model)
#first_term_loss = compute_loss2(genFGen2, model)
#first_term_loss = computeLoss(genFGen2, model)
#print(genFGen2.shape)
#print(first_term_loss)
#first_term_loss.retain_grad()
#first_term_loss.retain_grad()
#first_term_loss.retain_grad()
# (?)
#first_term_loss.retain_grad()
# (?)
#print(first_term_loss)
#print('')
"""
second_term_loss32 = torch.empty(args.batch_size, device=device, requires_grad=False)
for i in range(args.batch_size):
second_term_loss22 = torch.norm(genFGen2[i, :] - xData, p=None, dim=1).requires_grad_() ** 2
second_term_loss32[i] = torch.min(second_term_loss22)
second_term_loss2 = torch.mean(second_term_loss32)
"""
#print(first_term_loss)
#print('')
#print('')
#print(compute_loss2(mu.unsqueeze(0), args, model))
#print(torch.exp(toUse_storeAll.log_prob(mu)))
#print('')
#first_term_loss = storeAll
#xData = toy_data.inf_train_gen(args.data, batch_size=args.batch_size)
#xData = torch.from_numpy(xData).type(torch.float32).to(device)
#print(xData.shape)
#print(torch.mean(xData))
#print(torch.std(xData))
#xData = torch.empty((args.batch_size, 2), device=device)
#xData[:args.batch_size//2, :] = toUse_storeAll.sample((args.batch_size//2,)) # .sample_n(args.batch_size // 2)
#xData[args.batch_size//2:, :] = toUse_storeAll2.sample((args.batch_size//2,)) # .sample_n(args.batch_size//2)
"""
xData = torch.empty((args.batch_sizeM, 2), device=device)
xData[:args.batch_sizeM // 2, :] = toUse_storeAll.sample((args.batch_sizeM // 2,)) # .sample_n(args.batch_size // 2)
xData[args.batch_sizeM // 2:, :] = toUse_storeAll2.sample((args.batch_sizeM // 2,)) # .sample_n(args.batch_size//2)
"""
#xData = torch.empty((args.batch_size, 2)).normal_(mean=[2.82507515, 1.92882611e-04 + 0.8], std=0.5)
#xData[args.batch_size//2:,:] = torch.empty((args.batch_size, 2)).normal_(mean=4, std=0.5)
#mu = torch.from_numpy(np.array([2.82507515, 1.92882611e-04 + 0.8], dtype="float32")).to(device)
#S = torch.from_numpy(np.array([[pow(0.07166782, 2), 0.0], [0.0, pow(0.06917527, 2)]], dtype="float32")).to(device)
#mu2 = torch.from_numpy(np.array([2.82507515, 1.92882611e-04 - 0.8], dtype="float32")).to(device)
#toUse_storeAll = torch.distributions.MultivariateNormal(loc=mu, covariance_matrix=S)
#toUse_storeAll2 = torch.distributions.MultivariateNormal(loc=mu2, covariance_matrix=S)
#print(xData.shape)
#print(torch.mean(xData))
#print(torch.std(xData))
#var2 = []
#for i in genFGen2:
# var1 = []
# for j in xData:
# new_stuff = torch.dist(i, j, 2) # this is a tensor
# var1.append(new_stuff.unsqueeze(0))
# var1_tensor = torch.cat(var1)
# second_term_loss2 = torch.min(var1_tensor) / args.batch_size
# var2.append(second_term_loss2.unsqueeze(0))
#var2_tensor = torch.cat(var2)
#second_term_loss = torch.mean(var2_tensor) / args.batch_size
#second_term_loss *= 100.0
#print('')
#print(second_term_loss)
# If you know in advance the size of the final tensor, you can allocate
# an empty tensor beforehand and fill it in the for loop.
#x = torch.empty(size=(len(items), 768))
#for i in range(len(items)):
# x[i] = calc_result
#print(len(genFGen2))
#print(genFGen2.shape[0])
# len(.) and not .shape[0]
#print(len(xData))
#print(xData.shape[0])
# Use len(.) and not .shape[0]
"""
#second_term_loss = torch.empty(size=(len(genFGen2), len(xData))).to(device)
#second_term_loss = torch.empty(size=(len(genFGen2), len(xData)), device=device, requires_grad=True)
#second_term_loss3 = torch.empty(size=(len(genFGen2), len(xData)), device=device, requires_grad=True)
#second_term_loss3 = torch.empty(size=(len(genFGen2), len(xData)), device=device, requires_grad=False)
second_term_loss3 = torch.empty(size=(args.batch_size, args.batch_size), device=device, requires_grad=False)
#for i in range(len(genFGen2)):
for i in range(args.batch_size):
#for j in range(len(xData)):
for j in range(args.batch_size):
#second_term_loss[i, j] = torch.dist(genFGen2[i,:], xData[j,:], 2)
#second_term_loss[i, j] = torch.dist(genFGen2[i, :], xData[j, :], 1)
#second_term_loss3[i, j] = torch.dist(genFGen2[i, :], xData[j, :], 1)
#second_term_loss3[i, j] = torch.dist(genFGen2[i, :], xData[j, :], 1)
#second_term_loss3[i, j] = torch.dist(genFGen2[i, :], xData[j, :], 1)
#second_term_loss3[i, j] = torch.dist(genFGen2[i, :], xData[j, :], 1)
#second_term_loss3[i, j] = torch.tensor(0.1, requires_grad=True)
#second_term_loss3[i, j] = torch.dist(genFGen2[i, :], xData[j, :], 1)
#second_term_loss3[i, j] = torch.dist(genFGen2[i, :], xData[j, :], 1).requires_grad_()
#second_term_loss3[i, j] = torch.dist(genFGen2[i, :], xData[j, :], 1).requires_grad_()
#second_term_loss3[i, j] = torch.dist(genFGen2[i, :], xData[j, :], 2).requires_grad_()**2
#second_term_loss3[i, j] = torch.dist(genFGen2[i, :], xData[j, :], 2).requires_grad_()**2
second_term_loss3[i, j] = torch.dist(genFGen2[i, :], xData[j, :], 2).requires_grad_()
#second_term_loss[i, j] = torch.dist(genFGen2[i, :], xData[j, :], 2)**2
#second_term_loss2, _ = torch.min(second_term_loss, 1)
second_term_loss2, _ = torch.min(second_term_loss3, 1)
#second_term_loss = 5000.0 * torch.mean(second_term_loss2) / (args.batch_size**2)
#second_term_loss = lambda1 * torch.mean(second_term_loss2) / (args.batch_size ** 2)
#second_term_loss = lambda1 * torch.mean(second_term_loss2)
second_term_loss = torch.mean(second_term_loss2)
#print(second_term_loss)
#print('')
print('')
print(first_term_loss)
print(second_term_loss)
print('')
"""
#args.batch_size = 2
#genFGen2 = torch.from_numpy(np.array([[3, 0], [2, 0]], dtype="float32")).to(device)
#xData = torch.from_numpy(np.array([[1, 0], [0, 1]], dtype="float32")).to(device)
#import timeit
#start = timeit.default_timer()
#stop = timeit.default_timer()
#print('Time: ', stop - start)
"""
second_term_loss32 = torch.empty(args.batch_size, device=device, requires_grad=False)
for i in range(args.batch_size):
#second_term_loss22 = torch.norm(genFGen2[i, :] - xData, p='fro', dim=1).requires_grad_()
#second_term_loss22 = torch.norm(genFGen2[i, :] - xData, p=None, dim=1).requires_grad_()
second_term_loss22 = torch.norm(genFGen2[i, :] - xData, p=None, dim=1).requires_grad_()**2
#second_term_loss22 = torch.norm(genFGen2[i, :] - xData, p=None, dim=1).requires_grad_()
#print(second_term_loss22.shape)
second_term_loss32[i] = torch.min(second_term_loss22)
#print(second_term_loss32)
#print(second_term_loss32.shape)
#print(torch.norm(genFGen2 - xData, p=None, dim=0).shape)
#second_term_loss22 = torch.min(second_term_loss32)
#print(second_term_loss22)
#print(second_term_loss22.shape)
second_term_loss2 = torch.mean(second_term_loss32)
#second_term_loss2 = 7.62939453125 * torch.mean(second_term_loss32)
#print(second_term_loss2)
#print(second_term_loss2.shape)
"""
#import timeit
#start = timeit.default_timer()
#stop = timeit.default_timer()
#print('Time: ', stop - start)
#print('')
#print(second_term_loss2)
#distances = torch.norm(vertices - point_locs, p=2, dim=1)
#distances = torch.sqrt((vertices - point_locs).pow(2).sum(1))
#import timeit
#start = timeit.default_timer()
#stop = timeit.default_timer()
#print('Time: ', stop - start)
#xData = xData.view(-1, 28 * 28)
xData = xData.view(-1, 64 * 64)
#xData = xData.view(-1, 28*28)
#genFGen2 = genFGen2.view(-1, 28*28)
#genFGen2 = genFGen2.view(-1, 28 * 28)
genFGen2 = genFGen2.view(-1, 64 * 64)
#genFGen2 = genFGen2.view(-1, 28*28)
#genFGen3 = genFGen3.view(-1, 28*28)
#genFGen3 = genFGen3.view(-1, 28 * 28)
genFGen3 = genFGen3.squeeze()
#print(genFGen3.shape)
#asdfasdf
#print(xData.shape)
#print(genFGen2.shape)
#print(genFGen3.shape)
#asdfasdf
device = args.device
#print(device)
#adfasdfs
#genFGen3 = genFGen3.view(-1, 28 * 28)
#genFGen3 = genFGen3.view(-1, 64 * 64)
#xData = torch.transpose(xData, 0, 1)
#genFGen2 = torch.transpose(genFGen2, 0, 1)
#genFGen2 = torch.transpose(genFGen2, 0, 1)
#genFGen3 = torch.transpose(genFGen3, 0, 1)
#print(genFGen2.shape)
#print(xData.shape)
#print(genFGen3.shape)
#print(genFGen2.shape)
#print(xData.shape)
#print(genFGen3.shape)
#print(args.batchSize)
#second_term_loss32 = torch.empty(args.batchSize, device=device, requires_grad=False)
#for i in range(args.batchSize):
# second_term_loss32[i] = torch.min(torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2)
#second_term_loss2 = torch.mean(second_term_loss32)
#xData = xData[:15000,:]
#xData.requires_grad = True
#print(xData.shape)
#asdfasfs
#print(xData.shape)
#adfasdfs
#second_term_loss2 = torch.empty(1, device=device, requires_grad=False)
second_term_loss2 = torch.zeros(1, device=device, requires_grad=False)
#print(second_term_loss2)
#asdfadsfs
for i in range(args.batchSize):
# print(i)
# print((torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2))
# print((torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2).shape)
# asdfasdf
# second_term_loss2 += torch.min(torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2)
# second_term_loss2 += torch.min(torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2)
# second_term_loss2 += torch.min(torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2)
#if i < 6:
#if i < 6:
#if i < 5:
# second_term_loss2 += torch.min(torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2)
#else:
# second_term_loss2 += torch.min(torch.sqrt((genFGen2[i, :].detach() - xData).pow(2).sum(1)) ** 2)
#print(i)
#second_term_loss2 += torch.min(torch.norm(genFGen2[i, :] - xData, p=None, dim=1).requires_grad_() ** 2)
second_term_loss2 += torch.min(torch.norm(genFGen2[i, :] - xData, p=None, dim=1).requires_grad_() ** 2)
#if i < 7:
# second_term_loss2 += torch.min(torch.norm(genFGen2[i, :] - xData, p=None, dim=1).requires_grad_() ** 2)
#else:
# second_term_loss2 += torch.min(torch.norm(genFGen2[i, :].detach() - xData, p=None, dim=1).requires_grad_() ** 2)
# second_term_loss2 += torch.min(torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2)
# second_term_loss2 += torch.min(torch.sqrt((genFGen2[i, :].detach() - xData).pow(2).sum(1)) ** 2)
# try:
# second_term_loss2 += torch.min(torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2)
# #break
# except MemoryError:
# second_term_loss2 += torch.min(torch.sqrt((genFGen2[i, :].detach() - xData).pow(2).sum(1)) ** 2)
#second_term_loss2 /= args.batchSize
second_term_loss2 /= args.batchSize
#second_term_loss2 = max(second_term_loss2, 1e-8)
#print(second_term_loss2)
#print(second_term_loss2.requires_grad)
#asdfasdfs
#second_term_loss2.backward()
second_term_loss2 = second_term_loss2.squeeze()
#second_term_loss2 = abs(second_term_loss2.squeeze())
#second_term_loss2 = max(second_term_loss2, torch.tensor(1e-8))
#if torch.isnan(second_term_loss2).any():
# second_term_loss2
#print(torch.isnan(second_term_loss2).any())
#asdfasdfs
#print(second_term_loss2)
#print(second_term_loss2.requires_grad)
#print(second_term_loss2)
#print(second_term_loss2.requires_grad)
#asdfas
'''
second_term_loss2 = torch.empty(1, device=device, requires_grad=False)
for i in range(args.batchSize):
#print(i)
#print((torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2))
#print((torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2).shape)
#asdfasdf
#second_term_loss2 += torch.min(torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2)
#second_term_loss2 += torch.min(torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2)
#second_term_loss2 += torch.min(torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2)
if i<6:
second_term_loss2 += torch.min(torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2)
else:
second_term_loss2 += torch.min(torch.sqrt((genFGen2[i, :].detach() - xData).pow(2).sum(1)) ** 2)
#second_term_loss2 += torch.min(torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2)
#second_term_loss2 += torch.min(torch.sqrt((genFGen2[i, :].detach() - xData).pow(2).sum(1)) ** 2)
#try:
# second_term_loss2 += torch.min(torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2)
# #break
#except MemoryError:
# second_term_loss2 += torch.min(torch.sqrt((genFGen2[i, :].detach() - xData).pow(2).sum(1)) ** 2)
second_term_loss2 /= args.batchSize
#second_term_loss2.backward()
print(second_term_loss2)
print(second_term_loss2.requires_grad)
'''
'''
# second_term_loss32 = torch.empty(args.batch_size, device=device, requires_grad=False)
second_term_loss32 = torch.empty(args.batchSize, device=device, requires_grad=False)
# for i in range(args.batch_size):
for i in range(args.batchSize):
"""
print(torch.mean(torch.sqrt((genFGen2[i, :] - xData).view(args.batchSize, -1).pow(2).sum(1))))
print(torch.mean(torch.sqrt((genFGen2[i, :] - genFGen2).view(args.batchSize, -1).pow(2).sum(1))))
print(torch.mean(torch.sqrt((genFGen3[i, :] - genFGen3).pow(2).sum(1))))
print('')
print(torch.mean(torch.norm((genFGen2[i, :] - xData).view(args.batchSize, -1), p=None, dim=1)))
print(torch.mean(torch.norm((genFGen2[i, :] - genFGen2).view(args.batchSize, -1), p=None, dim=1)))
print(torch.mean(torch.norm((genFGen3[i, :] - genFGen3), p=None, dim=1)))
print('')
"""
# print(torch.mean(torch.sqrt((genFGen2[i, :] - xData).view(args.batchSize, -1).pow(2).sum(1))))
# print(torch.mean(torch.sqrt((genFGen2[i, :] - genFGen2).view(args.batchSize, -1).pow(2).sum(1))))
# print(torch.mean(torch.sqrt((genFGen3[i, :] - genFGen3).pow(2).sum(1))))
# print('')
# print(torch.sqrt((genFGen2[i, :] - xData).view(args.batchSize, -1).pow(2).sum(1)))
# print(torch.sqrt((genFGen2[i, :] - genFGen2).view(args.batchSize, -1).pow(2).sum(1)))
# print(torch.sqrt((genFGen3[i, :] - genFGen3).pow(2).sum(1)))
# print('')
# second_term_loss22 = torch.norm(genFGen2[i, :] - xData, p='fro', dim=1).requires_grad_()
# second_term_loss22 = torch.norm(genFGen2[i, :] - xData, p=None, dim=1).requires_grad_()
# second_term_loss22 = torch.norm(genFGen2[i, :] - xData, p=None, dim=1).requires_grad_()**2
# second_term_loss22 = torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1))**2
# second_term_loss22 = torch.sqrt(1e-17 + (genFGen2[i, :] - xData).pow(2).sum(1)).requires_grad_()**2
# second_term_loss22 = torch.sqrt(1e-17 + (genFGen2[i, :] - xData).pow(2).sum(1)).requires_grad_() ** 2
# second_term_loss22 = torch.sqrt(1e-17 + (genFGen2[i, :] - xData).pow(2).sum(1)).requires_grad_() ** 2
# second_term_loss22 = torch.sqrt(1e-17 + (genFGen2[i, :] - xData).view(args.batchSize, -1).pow(2).sum(1)).requires_grad_() ** 2
# second_term_loss22 = torch.sqrt(
# 1e-17 + (genFGen2[i, :] - xData).view(args.batchSize, -1).pow(2).sum(1)).requires_grad_() ** 2
# second_term_loss22 = torch.norm(genFGen2[i, :] - xData, p=None, dim=1).requires_grad_()**2
# second_term_loss22 = torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2
# tempVarVar21 = genFGen2[i, :] - xData
# print(tempVarVar21.shape)
# print(xData.shape)
# asdfsadf
# second_term_loss22 = torch.sqrt(1e-17 + (genFGen2[i, :] - xData).pow(2).sum(1)).requires_grad_() ** 2
# second_term_loss22 = torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2
# 61562.1641
# 4.7732
# print(genFGen2[i, :].shape)
# print(xData.shape)
# tempVarVar21 = genFGen2[i, :] - xData
# print(tempVarVar21.shape)
# print(second_term_loss22.shape)
# adsfasfs
# second_term_loss22 = torch.norm(genFGen2[i, :] - xData, p=None, dim=1).requires_grad_()
# print(second_term_loss22.shape)
# second_term_loss32[i] = torch.min(second_term_loss22)
# print(i)
# second_term_loss32[i] = torch.min(second_term_loss22)
#second_term_loss32[i] = torch.min(torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2)
#second_term_loss32[i] = torch.min(torch.sqrt((genFGen2[i, :].detach() - xData).pow(2).sum(1)) ** 2)
if i<6:
second_term_loss32[i] = torch.min(torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2)
else:
second_term_loss32[i] = torch.min(torch.sqrt((genFGen2[i, :].detach() - xData).pow(2).sum(1)) ** 2)
# second_term_loss32[i] = torch.min(second_term_loss22)
# print(second_term_loss32)
# print(second_term_loss32.shape)
# print(torch.norm(genFGen2 - xData, p=None, dim=0).shape)
# second_term_loss22 = torch.min(second_term_loss32)
# print(second_term_loss22)
# print(second_term_loss22.shape)
# second_term_loss2 = torch.mean(second_term_loss32)
# second_term_loss2 = 0.3 * torch.mean(second_term_loss32)
# second_term_loss2 = 3.0 * torch.mean(second_term_loss32)
# second_term_loss2 = 7.62939453125 * torch.mean(second_term_loss32)
# print(second_term_loss2)
# print(second_term_loss2.shape)
# second_term_loss2 = 0.3 * torch.mean(second_term_loss32)
# second_term_loss2 = 0.3 * torch.mean(second_term_loss32)
# second_term_loss2 = 0.001 * torch.mean(second_term_loss32)
# second_term_loss2 = 0.001 * torch.mean(second_term_loss32)
# second_term_loss2 = 0.001 * torch.mean(second_term_loss32)
second_term_loss2 = torch.mean(second_term_loss32)
print(second_term_loss2)
print(second_term_loss2.requires_grad)
asdfasfs
'''
# print(second_term_loss2)
# asdfasfd
#second_term_loss32 = torch.empty(args.batchSize, device=device, requires_grad=False)
#for i in range(args.batchSize):
# second_term_loss32[i] = torch.min(torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2)
#second_term_loss2 = torch.mean(second_term_loss32)
'''
second_term_loss32 = torch.empty(args.batchSize, device=device, requires_grad=False)
for i in range(args.batchSize):
second_term_loss32[i] = torch.min(torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2)
second_term_loss2 = torch.mean(second_term_loss32)
'''
'''
#second_term_loss32 = torch.empty(args.batch_size, device=device, requires_grad=False)
second_term_loss32 = torch.empty(args.batchSize, device=device, requires_grad=False)
#for i in range(args.batch_size):
for i in range(args.batchSize):
"""
print(torch.mean(torch.sqrt((genFGen2[i, :] - xData).view(args.batchSize, -1).pow(2).sum(1))))
print(torch.mean(torch.sqrt((genFGen2[i, :] - genFGen2).view(args.batchSize, -1).pow(2).sum(1))))
print(torch.mean(torch.sqrt((genFGen3[i, :] - genFGen3).pow(2).sum(1))))
print('')
print(torch.mean(torch.norm((genFGen2[i, :] - xData).view(args.batchSize, -1), p=None, dim=1)))
print(torch.mean(torch.norm((genFGen2[i, :] - genFGen2).view(args.batchSize, -1), p=None, dim=1)))
print(torch.mean(torch.norm((genFGen3[i, :] - genFGen3), p=None, dim=1)))
print('')
"""
#print(torch.mean(torch.sqrt((genFGen2[i, :] - xData).view(args.batchSize, -1).pow(2).sum(1))))
#print(torch.mean(torch.sqrt((genFGen2[i, :] - genFGen2).view(args.batchSize, -1).pow(2).sum(1))))
#print(torch.mean(torch.sqrt((genFGen3[i, :] - genFGen3).pow(2).sum(1))))
#print('')
#print(torch.sqrt((genFGen2[i, :] - xData).view(args.batchSize, -1).pow(2).sum(1)))
#print(torch.sqrt((genFGen2[i, :] - genFGen2).view(args.batchSize, -1).pow(2).sum(1)))
#print(torch.sqrt((genFGen3[i, :] - genFGen3).pow(2).sum(1)))
#print('')
#second_term_loss22 = torch.norm(genFGen2[i, :] - xData, p='fro', dim=1).requires_grad_()
#second_term_loss22 = torch.norm(genFGen2[i, :] - xData, p=None, dim=1).requires_grad_()
#second_term_loss22 = torch.norm(genFGen2[i, :] - xData, p=None, dim=1).requires_grad_()**2
#second_term_loss22 = torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1))**2
#second_term_loss22 = torch.sqrt(1e-17 + (genFGen2[i, :] - xData).pow(2).sum(1)).requires_grad_()**2
#second_term_loss22 = torch.sqrt(1e-17 + (genFGen2[i, :] - xData).pow(2).sum(1)).requires_grad_() ** 2
#second_term_loss22 = torch.sqrt(1e-17 + (genFGen2[i, :] - xData).pow(2).sum(1)).requires_grad_() ** 2
#second_term_loss22 = torch.sqrt(1e-17 + (genFGen2[i, :] - xData).view(args.batchSize, -1).pow(2).sum(1)).requires_grad_() ** 2
#second_term_loss22 = torch.sqrt(
# 1e-17 + (genFGen2[i, :] - xData).view(args.batchSize, -1).pow(2).sum(1)).requires_grad_() ** 2
#second_term_loss22 = torch.norm(genFGen2[i, :] - xData, p=None, dim=1).requires_grad_()**2
#second_term_loss22 = torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2
#tempVarVar21 = genFGen2[i, :] - xData
#print(tempVarVar21.shape)
#print(xData.shape)
#asdfsadf
#second_term_loss22 = torch.sqrt(1e-17 + (genFGen2[i, :] - xData).pow(2).sum(1)).requires_grad_() ** 2
#second_term_loss22 = torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2
# 61562.1641
# 4.7732
#print(genFGen2[i, :].shape)
#print(xData.shape)
#tempVarVar21 = genFGen2[i, :] - xData
#print(tempVarVar21.shape)
#print(second_term_loss22.shape)
#adsfasfs
#second_term_loss22 = torch.norm(genFGen2[i, :] - xData, p=None, dim=1).requires_grad_()
#print(second_term_loss22.shape)
#second_term_loss32[i] = torch.min(second_term_loss22)
#print(i)
#second_term_loss32[i] = torch.min(second_term_loss22)
second_term_loss32[i] = torch.min(torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2)
#second_term_loss32[i] = torch.min(second_term_loss22)
#print(second_term_loss32)
#print(second_term_loss32.shape)
#print(torch.norm(genFGen2 - xData, p=None, dim=0).shape)
#second_term_loss22 = torch.min(second_term_loss32)
#print(second_term_loss22)
#print(second_term_loss22.shape)
#second_term_loss2 = torch.mean(second_term_loss32)
#second_term_loss2 = 0.3 * torch.mean(second_term_loss32)
#second_term_loss2 = 3.0 * torch.mean(second_term_loss32)
#second_term_loss2 = 7.62939453125 * torch.mean(second_term_loss32)
#print(second_term_loss2)
#print(second_term_loss2.shape)
#second_term_loss2 = 0.3 * torch.mean(second_term_loss32)
#second_term_loss2 = 0.3 * torch.mean(second_term_loss32)
#second_term_loss2 = 0.001 * torch.mean(second_term_loss32)
#second_term_loss2 = 0.001 * torch.mean(second_term_loss32)
#second_term_loss2 = 0.001 * torch.mean(second_term_loss32)
second_term_loss2 = torch.mean(second_term_loss32)
#print(second_term_loss2)
#asdfasfd
'''
#second_term_loss2.retain_grad()
#second_term_loss2.retain_grad()
#second_term_loss2.retain_grad()
# (?)
#second_term_loss2.retain_grad()
# (?)
#import timeit
#start = timeit.default_timer()
#stop = timeit.default_timer()
#print('Time: ', stop - start)
#print(second_term_loss2)
#print('')
#print('')
#print(first_term_loss)
#print(second_term_loss2)
#print('')
#print(first_term_loss)
#print(second_term_loss2)
#second_term_loss32 = torch.empty(args.batch_size, device=device, requires_grad=False)
#for i in range(args.batch_size):
# second_term_loss22 = torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)).requires_grad_()**2
# second_term_loss32[i] = torch.min(second_term_loss22)
#second_term_loss2 = torch.mean(second_term_loss32)
#print(genFGen2.shape)
#print(genFGen3.shape)
#print(xData.shape)
#print('')
#third_term_loss32 = torch.empty(args.batch_size, device=device, requires_grad=False)
third_term_loss32 = torch.empty(args.batchSize, device=device, requires_grad=False)
#for i in range(args.batch_size):
for i in range(args.batchSize):
#print(xData.shape)
#print(genFGen2.shape)
#print(genFGen3.shape)
#print('')
#print(xData.squeeze().shape)
#print(genFGen2.squeeze().shape)
#print('')
#print((genFGen2[i, :] - xData).pow(2).sum(1).shape)
#print((genFGen2[i, :] - genFGen2).pow(2).sum(1).shape)
#print((genFGen2[i, :].squeeze() - xData.squeeze()).pow(2).sum(1).shape)
#print((genFGen2[i, :].squeeze() - genFGen2.squeeze()).pow(2).sum(1).shape)
#print((genFGen3[i, :] - genFGen3).pow(2).sum(1).shape)
#print('')
#print(torch.norm(genFGen2[i, :] - xData, p=None, dim=2).shape)
#print(torch.norm(genFGen2[i, :] - genFGen2, p=None, dim=2).shape)
#print(torch.norm(genFGen2[i, :].squeeze() - xData.squeeze(), p=None, dim=2).shape)
#print(torch.norm(genFGen2[i, :].squeeze() - genFGen2.squeeze(), p=None, dim=2).shape)
#print(torch.norm(genFGen3[i, :] - genFGen3, p=None, dim=1).shape)
#print('')
#a = torch.randn(64, 3, 32, 32)
#a = a.view(64, -1)
#b = torch.norm(a, p=2, dim=1)
#a = torch.randn(64, 1, 28, 28)
#a = a.view(64, -1)
#b = torch.norm(a, p=2, dim=1)
#print((genFGen2[i, :] - xData).view(args.batchSize,-1).pow(2).sum(1).shape)
#print((genFGen2[i, :] - genFGen2).view(args.batchSize,-1).pow(2).sum(1).shape)
#print('')
#print(torch.norm((genFGen2[i, :] - xData).view(args.batchSize,-1), p=None, dim=1).shape)
#print(torch.norm((genFGen2[i, :] - genFGen2).view(args.batchSize,-1), p=None, dim=1).shape)
#print('')
"""
print(torch.mean(torch.sqrt((genFGen2[i, :] - xData).view(args.batchSize, -1).pow(2).sum(1))))
print(torch.mean(torch.sqrt((genFGen2[i, :] - genFGen2).view(args.batchSize, -1).pow(2).sum(1))))
print(torch.mean(torch.sqrt((genFGen3[i, :] - genFGen3).pow(2).sum(1))))
print('')
print(torch.mean(torch.norm((genFGen2[i, :] - xData).view(args.batchSize, -1), p=None, dim=1)))
print(torch.mean(torch.norm((genFGen2[i, :] - genFGen2).view(args.batchSize, -1), p=None, dim=1)))
print(torch.mean(torch.norm((genFGen3[i, :] - genFGen3), p=None, dim=1)))
print('')
"""
#print(torch.mean(torch.sqrt((genFGen2[i, :] - xData).view(args.batchSize, -1).pow(2).sum(1))))
#print(torch.mean(torch.sqrt((genFGen2[i, :] - genFGen2).view(args.batchSize, -1).pow(2).sum(1))))
#print(torch.mean(torch.sqrt((genFGen3[i, :] - genFGen3).pow(2).sum(1))))
#print('')
#print(torch.sqrt((genFGen2[i, :] - xData).view(args.batchSize, -1).pow(2).sum(1)))
#print(torch.sqrt((genFGen2[i, :] - genFGen2).view(args.batchSize, -1).pow(2).sum(1)))
#print(torch.sqrt((genFGen3[i, :] - genFGen3).pow(2).sum(1)))
#print('')
#third_term_loss22 = (torch.norm(genFGen3[i, :] - genFGen3, p=None, dim=1).requires_grad_()) / (
# 1.0e-17 + torch.norm(genFGen2[i, :] - genFGen2, p=None, dim=1).requires_grad_())
#third_term_loss22 = (torch.sqrt(1e-17 + (genFGen3[i, :] - genFGen3).pow(2).sum(1)).requires_grad_()) / (
# 1e-17 + torch.sqrt(1e-17 + (genFGen2[i, :] - genFGen2).pow(2).sum(1)).requires_grad_())
#third_term_loss22 = (torch.sqrt(1e-17 + (genFGen3[i, :] - genFGen3).pow(2).sum(1)).requires_grad_()) / (
# 1e-17 + torch.sqrt(1e-17 + (genFGen2[i, :] - genFGen2).pow(2).sum(1)).requires_grad_())
#hbdafj = genFGen3[i, :] - genFGen3
#print(hbdafj.shape)
#adfa = genFGen2[i, :] - xData
#print(adfa.shape)
#third_term_loss22 = (torch.sqrt(1e-17 + (genFGen3[i, :] - genFGen3).pow(2).sum(1)).requires_grad_()) / (
# 1e-17 + torch.sqrt(1e-17 + (genFGen2[i, :] - genFGen2).view(args.batchSize, -1).pow(2).sum(1)).requires_grad_())
#third_term_loss22 = (torch.sqrt(1e-17 + (genFGen3[i, :] - genFGen3).pow(2).sum(1)).requires_grad_()) / (
# 1e-17 + torch.sqrt(1e-17 + (genFGen2[i, :] - genFGen2).view(args.batchSize, -1).pow(2).sum(1)).requires_grad_())
#third_term_loss22 = (torch.sqrt(1e-17 + (genFGen3[i, :] - genFGen3).pow(2).sum(1)).requires_grad_()) / (
# 1e-17 + torch.sqrt(1e-17 + (genFGen2[i, :] - genFGen2).pow(2).sum(1)).requires_grad_())
#third_term_loss22 = (torch.norm(genFGen3[i, :] - genFGen3, p=None, dim=1).requires_grad_()) / (
# 1.0e-17 + torch.norm(genFGen2[i, :] - genFGen2, p=None, dim=1).requires_grad_())
third_term_loss22 = (torch.sqrt(1e-17 + (genFGen3[i, :] - genFGen3).pow(2).sum(1)).requires_grad_()) / (
1e-17 + torch.sqrt(1e-17 + (genFGen2[i, :] - genFGen2).pow(2).sum(1)).requires_grad_())
#print(third_term_loss22.shape)
third_term_loss32[i] = torch.mean(third_term_loss22)
#third_term_loss12 = torch.mean(third_term_loss32)
#third_term_loss12 = 0.01 * torch.mean(third_term_loss32)
#third_term_loss12 = 0.025 * torch.mean(third_term_loss32)
#third_term_loss12 = 0.25 * torch.mean(third_term_loss32)
#third_term_loss12 = 0.1 * torch.mean(third_term_loss32)
#third_term_loss12 = 0.25 * torch.mean(third_term_loss32)
#third_term_loss12 = 0.25 * torch.mean(third_term_loss32)
#third_term_loss12 = 0.1 * torch.mean(third_term_loss32)
#third_term_loss12 = 0.1 * torch.mean(third_term_loss32)
#third_term_loss12 = 0.1 * torch.mean(third_term_loss32)
third_term_loss12 = torch.mean(third_term_loss32)
# (?)
#third_term_loss12 = torch.zeros(1, device=device, requires_grad=True)
# (?)
#third_term_loss32 = torch.zeros(1, device=device, requires_grad=True)
#third_term_loss32 = torch.zeros(1, device=device, requires_grad=True)
#third_term_loss32 = torch.zeros(1, device=device, requires_grad=True)
#print(third_term_loss12)
#adfdfasc
#third_term_loss12.retain_grad()
#third_term_loss12.retain_grad()
#third_term_loss12.retain_grad()
# (?)
#third_term_loss12.retain_grad()
# (?)
#print(third_term_loss12)
#print('')
#return first_term_loss + second_term_loss2
#return first_term_loss + second_term_loss2, xData
#return first_term_loss + second_term_loss2 + third_term_loss12, xData
#return first_term_loss + second_term_loss2 + third_term_loss12, xData
#return first_term_loss + second_term_loss2 + third_term_loss12
#print(first_term_loss)
#print(second_term_loss2)
#print(third_term_loss12)
#print('')
#torch.set_printoptions(sci_mode=False)
#print(first_term_loss)
#print('')
"""
#print(torch.isnan(first_term_loss))
if torch.isnan(first_term_loss):
first_term_loss = 0.0
"""
#print(first_term_loss)
#print('')
#return first_term_loss + second_term_loss2 + third_term_loss12
#return first_term_loss + second_term_loss2 + third_term_loss12
#print(second_term_loss2)
#print(third_term_loss12)
#if torch.isnan(first_term_loss):
# return second_term_loss2 + third_term_loss12
#else:
# return first_term_loss + second_term_loss2 + third_term_loss12
#return first_term_loss + second_term_loss2 + third_term_loss12
#return first_term_loss + second_term_loss2 + third_term_loss12
#return first_term_loss + second_term_loss2 + third_term_loss12
#return first_term_loss + second_term_loss2 + third_term_loss12
#return first_term_loss + second_term_loss2 + third_term_loss12, first_term_loss, second_term_loss2
#print('')
#print(first_term_loss.item())
#print(second_term_loss2.item())
#print(third_term_loss12.item())
#print('')
#print(first_term_loss.grad)
#print(second_term_loss2.grad)
#print(third_term_loss12.grad)
#print('')
#total_totTotalLoss = first_term_loss * second_term_loss2 * third_term_loss12
#total_totTotalLoss = first_term_loss + second_term_loss2 + third_term_loss12
#total_totTotalLoss = first_term_loss + second_term_loss2 + third_term_loss12
#total_totTotalLoss = first_term_loss + second_term_loss2 + third_term_loss12
#total_totTotalLoss = first_term_loss + 0.001 * second_term_loss2 + 0.1 * third_term_loss12
#total_totTotalLoss = first_term_loss + 0.001 * second_term_loss2 + 0.1 * third_term_loss12
#total_totTotalLoss = first_term_loss + 0.001 * second_term_loss2 + 0.1 * third_term_loss12
#total_totTotalLoss = first_term_loss + 0.001 * second_term_loss2 + 10.0 * third_term_loss12
#total_totTotalLoss = first_term_loss + 0.001 * second_term_loss2 + 0.1 * third_term_loss12
#total_totTotalLoss = first_term_loss + 10.0 * second_term_loss2 + 0.1 * third_term_loss12
#total_totTotalLoss = first_term_loss + 0.001 * second_term_loss2 + 0.1 * third_term_loss12
#total_totTotalLoss = first_term_loss + 0.001 * second_term_loss2 + 0.1 * third_term_loss12
#total_totTotalLoss = first_term_loss + 1.0 * second_term_loss2 + 0.1 * third_term_loss12
#total_totTotalLoss = first_term_loss + 1.0 * second_term_loss2 + 0.1 * third_term_loss12
#print(first_term_loss)
#print(first_term_loss.requires_grad)
#print(second_term_loss2)
#print(second_term_loss2.requires_grad)
#print(third_term_loss12)
#print(third_term_loss12.requires_grad)
#print(first_term_loss.requires_grad)
#print(second_term_loss2.requires_grad)
#print(third_term_loss12.requires_grad)
#print(first_term_loss)
#print(second_term_loss2)
#print(third_term_loss12)
#asdfasdf
#first_term_loss = first_term_loss * 0.000001
#second_term_loss2 = second_term_loss2.squeeze()
#second_term_loss2 = second_term_loss2 * 0.001
#second_term_loss2 = second_term_loss2 * 0.01
#second_term_loss2 = second_term_loss2.squeeze()
#first_term_loss *= 100.0
#second_term_loss2 *= 0.0001
#print(first_term_loss)
#print(first_term_loss.requires_grad)
#print(second_term_loss2)
#print(second_term_loss2.requires_grad)
#print(third_term_loss12)
#print(third_term_loss12.requires_grad)
#asdfszdf
#total_totTotalLoss = first_term_loss + 1.0 * second_term_loss2 + 0.1 * third_term_loss12
#total_totTotalLoss = first_term_loss + 0.3 * second_term_loss2 + 0.025 * third_term_loss12
#total_totTotalLoss = first_term_loss + 0.001 * second_term_loss2 + third_term_loss12
#total_totTotalLoss = first_term_loss + second_term_loss2 + third_term_loss12
#total_totTotalLoss = first_term_loss + 100.0*second_term_loss2 + third_term_loss12
#total_totTotalLoss = first_term_loss + 100.0*second_term_loss2 + 0.1*third_term_loss12
#total_totTotalLoss = first_term_loss + 0.01 * second_term_loss2 + 0.1 * third_term_loss12
#total_totTotalLoss = first_term_loss + 0.01 * second_term_loss2 + 0.01 * third_term_loss12
#total_totTotalLoss = first_term_loss + 0.01 * second_term_loss2 + 0.01 * third_term_loss12
#total_totTotalLoss = first_term_loss + 0.01 * second_term_loss2 + 0.01 * third_term_loss12
#total_totTotalLoss = first_term_loss + 0.1 * second_term_loss2 + 0.01 * third_term_loss12
#total_totTotalLoss = first_term_loss + 0.1 * second_term_loss2 + 0.01 * third_term_loss12
total_totTotalLoss = first_term_loss + 0.01 * second_term_loss2 + 0.01 * third_term_loss12
#print(total_totTotalLoss)
#print(total_totTotalLoss.requires_grad)
#asdfsadf
#total_totTotalLoss.retain_grad()
#total_totTotalLoss.retain_grad()
#total_totTotalLoss.retain_grad()
#total_totTotalLoss.retain_grad()
#total_totTotalLoss.retain_grad()
#return first_term_loss + second_term_loss2 + third_term_loss12, first_term_loss, second_term_loss2
#return first_term_loss + second_term_loss2 + third_term_loss12, first_term_loss, second_term_loss2, third_term_loss12
#return first_term_loss + second_term_loss2 + third_term_loss12, first_term_loss, second_term_loss2, third_term_loss12
return total_totTotalLoss, first_term_loss, second_term_loss2, third_term_loss12
def presgan(dat, netG, netD, log_sigma, args, netG2):
device = args.device
X_training = dat['X_train'].to(device)
fixed_noise = torch.randn(args.num_gen_images, args.nz, 1, 1, device=device)
optimizerD = optim.Adam(netD.parameters(), lr=args.lrD, betas=(args.beta1, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=args.lrG, betas=(args.beta1, 0.999))
sigma_optimizer = optim.Adam([log_sigma], lr=args.sigma_lr, betas=(args.beta1, 0.999))
if args.restrict_sigma:
logsigma_min = math.log(math.exp(args.sigma_min) - 1.0)
logsigma_max = math.log(math.exp(args.sigma_max) - 1.0)
stepsize = args.stepsize_num / args.nz
#bsz = args.batchSize
#bsz = args.batchSize
#bsz = args.batchSize
#bsz = args.batchSize
#bsz = args.batchSize
bsz = len(X_training)
netG2.eval()
for param in netG2.parameters():
param.requires_grad = False
#print(X_training.shape)
#print(X_training.shape)
#print(X_training.shape)
#print(X_training.shape)
#print(len(range(0, len(X_training), bsz)))
#print(X_training.shape)
#print(X_training.shape)
#asdfasdfasdf
#asdfa
#adfasdf
#print(X_training.shape)
#asdfasfasdfz
#netG.eval()
#for param in netG.parameters():
# param.requires_grad = False
#print(X_training.shape)
#sadfdsafzsf
#netG.eval()
#for param in netG.parameters():
# param.requires_grad = False
#netG.eval()
#for param in netG.parameters():
# param.requires_grad = False
#netG.eval()
#for param in netG.parameters():
# param.requires_grad = False
"""
netG.eval()
for param in netG.parameters():
param.requires_grad = False
"""
#netG.eval()
#for param in netG.parameters():
# param.requires_grad = False
# netG.eval()
# netG.eval()
#print(X_training.shape)
#sadfasdfasz
#asdfasdf
#asdfasdfs
#print(X_training.shape)
#print(X_training.shape)
# print(x.shape)
# print(y.shape)
#print(X_training.shape)
#sadfsadfdzs
#print(X_training.shape)
#asdfasdfsdf
#safa
#asdfs
#netG.eval()
#for param in netG.parameters():
# param.requires_grad = False
#netG2.eval()
#for param in netG2.parameters():
# param.requires_grad = False
loss_theLoss = torch.empty(args.epochs, device=device)
loss_theLoss0 = torch.empty(args.epochs, device=device)
loss_theLoss1 = torch.empty(args.epochs, device=device)
loss_theLoss2 = torch.empty(args.epochs, device=device)
loss_theLoss3 = torch.empty(args.epochs, device=device)
#args.epochs = 1
#args.epochs = 1
#args.epochs = 1
#print(X_training.shape)
#adfsadfasdf
#print(X_training.shape)
#print(np.shape(X_training))
#print(X_training.shape)
#print(dat['X_train'].to(device).shape)
#print(dat['X_train'].to(device).shape)
#print(dat['Y_train'].to(device).shape)
#print(dat['X_test'].to(device).shape)
#print(dat['Y_test'].to(device).shape)
#dfdafas
#X_training = dat['X_test'].to(device)
#X_training = dat['X_train'].to(device)
#X_training = dat['X_test'].to(device)
#X_training = dat['X_test'].to(device)
#X_training = dat['X_test'].to(device)
#X_training = dat['X_test'].to(device)
#X_training = stack_mnist(data_dir, num_training_sample, num_test_sample, imageSize).to(device)
#dat = data.load_data(args.dataset, args.dataroot, args.batchSize,
# device=device, imgsize=args.imageSize, Ntrain=args.Ntrain, Ntest=args.Ntest)
#bsz = 1
#bsz = 1
#bsz = 1
#print(X_training)
#print(X_training.shape)
#X_training, X_test, Y_training, Y_test = data.stack_mnist('./data/stackedmnist', args.Ntrain, args.Ntest, args.imageSize)
#X_training, _, _, _ = data.stack_mnist('./data/stackedmnist', args.Ntrain, args.Ntest, args.imageSize)
#X_training, _, _, _ = data.stack_mnist('./data/stackedmnist', args.Ntrain, args.Ntest, args.imageSize)
#X_training, _, _, _ = data.stack_mnist('./data/stackedmnist', args.Ntrain, args.Ntest, args.imageSize)
#_, X_training, _, _ = data.stack_mnist('./data/stackedmnist', args.Ntrain, args.Ntest, args.imageSize)
#print(X_training.shape)
#X_training = torch.mean(X_training, 1, keepdim=True)
#print(X_training.shape)
#from keras.datasets import fashion_mnist
#(X_training, _), (_, _) = fashion_mnist.load_data()
#import torchvision
#im_dim = 1
#init_layer = layers.LogitTransform(1e-6)
#n_classes = 10
#print(args.imageSize)
#train_loader = datasets.FashionMNIST(
# args.dataroot, train=True, transform=transforms.Compose([
# transforms.ToTensor(), transforms.Resize(args.imageSize),
# ])
# )
#test_loader = datasets.FashionMNIST(
# args.dataroot, train=False, transform=transforms.Compose([
# transforms.ToTensor(), transforms.Resize(args.imageSize),
# ])
# )
"""
train_loader = torch.utils.data.DataLoader(
datasets.FashionMNIST(
args.dataroot, train=True, transform=transforms.Compose([
transforms.Resize(args.imageSize),
transforms.ToTensor(),
])
),
batch_size=args.batchSize,
shuffle=True,
num_workers=args.workers,
)
test_loader = torch.utils.data.DataLoader(
datasets.FashionMNIST(
args.dataroot, train=False, transform=transforms.Compose([
transforms.Resize(args.imageSize),
transforms.ToTensor(),
])
),
batch_size=args.batchSize,
shuffle=False,
num_workers=args.workers,
)
"""
#print(X_training.shape)
#adfdafasdfs
#print(X_training.shape)
#X_training = test_loader.fashionmnist.data
#X_training = test_loader.fashionmnist.data
#X_training = train_loader.fashionmnist.data
"""
data_path = './data/'
imgsize = args.imageSize
nc = 1
transform = transforms.Compose([
transforms.Resize(imgsize),
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))])
mnist = torchvision.datasets.FashionMNIST(root=data_path, download=True, transform=transform, train=True)
train_loader = DataLoader(mnist, batch_size=1, shuffle=True, drop_last=True, num_workers=0)
X_training = torch.zeros(len(train_loader), nc, imgsize, imgsize)
Y_training = torch.zeros(len(train_loader))
for i, x in enumerate(train_loader):
X_training[i, :, :, :] = x[0]
Y_training[i] = x[1]
if i % 10000 == 0:
print('Loading data... {}/{}'.format(i, len(train_loader)))
mnist = torchvision.datasets.FashionMNIST(root=data_path, download=True, transform=transform, train=False)
test_loader = DataLoader(mnist, batch_size=1, shuffle=False, drop_last=True, num_workers=0)
X_test = torch.zeros(len(test_loader), nc, imgsize, imgsize)
Y_test = torch.zeros(len(test_loader))
for i, x in enumerate(test_loader):
X_test[i, :, :, :] = x[0]
Y_test[i] = x[1]
if i % 1000 == 0:
print('i: {}/{}'.format(i, len(test_loader)))
Y_training = Y_training.type('torch.LongTensor')
Y_test = Y_test.type('torch.LongTensor')
dat = {'X_train': X_training, 'Y_train': Y_training, 'X_test': X_test, 'Y_test': Y_test, 'nc': nc}
X_training = dat['X_test'].to(device)
#print(X_training.shape)
#zadfzdfszs
"""
# old: torch.Size([6662, 1, 64, 64])
# now:
#print(X_training.shape)
#X_training = dat['X_train'].to(device)
#X_training = dat['X_train'].to(device)
#X_training = dat['X_train'].to(device)
#X_training = dat['X_train'].to(device)
#X_training = dat['X_test'].to(device)
#X_training = torchvision.datasets.FashionMNIST('', train=False, transform=None, target_transform=None, download=True)
#print(X_training.shape)
#asdfasdfsdfs
"""
import numpy as np
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
from matplotlib.path import Path
from matplotlib.patches import PathPatch
fig, ax = plt.subplots()
im = ax.imshow(X_training[0, 0, :, :].cpu())
plt.show()
"""
#print(X_training.shape)
#adsfasdfadsf
#sdafadf
print('')
#print('')
#print('')
#print(X_training.shape)
#asfasdfafas
#sdafsafs
#sdfasf
#sdfsa
#print(X_training.shape)
#asdfsadfzx
# 0 to 9 vs 1 to 9
# Train G(z)1 to 9.
#
# Detect abnormal OoD datasets.
# Fashion-MNIST OoD data datasets
#
# Write the code for detecting
# abnormal OoD datasets such as Fashion-MNIST.
#
# Abnormal OoD datasets
# Detect abnormal OoD datasets.
#sdfasfasfsdf
# Detect abnormal OoD datasets.
# We detect abnormal OoD datasets.
#netG.eval()
#for param in netG.parameters():
# param.requires_grad = False
#print('')
#print(X_training.shape)
#asdfasfasxz
#print(X_training.shape)
#print(X_training.shape)
#print(X_training.shape)
#asdfasdfsdfs
#print(X_training.shape)
#sdafasdfsaf
#print(X_training.shape)
#adsfasfassdf
# import numpy as np
# import torchvision
#
# from torch.utils.data import Dataset, DataLoader
# from torchvision import transforms
'''
netG.eval()
for param in netG.parameters():
param.requires_grad = False
'''
#asdfasfdasdfs
#asdfasf
#asdfasdfz
"""
imgsize = args.imageSize
#data_path = args.dataroot
#print(args.dataroot)
#asdfasdfassadf
args.dataroot = 'dataset'
data_path = args.dataroot
#data_path = 'data2'
nc = 1
transform = transforms.Compose([
transforms.Resize(imgsize),
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))])
mnist = torchvision.datasets.MNIST(root=data_path, download=True, transform=transform, train=True)
train_loader = DataLoader(mnist, batch_size=1, shuffle=True, drop_last=True, num_workers=0)
X_training = torch.zeros(len(train_loader), nc, imgsize, imgsize)
Y_training = torch.zeros(len(train_loader))
for i, x in enumerate(train_loader):
X_training[i, :, :, :] = x[0]
Y_training[i] = x[1]
if i % 10000 == 0:
print('Loading data... {}/{}'.format(i, len(train_loader)))
mnist = torchvision.datasets.MNIST(root=data_path, download=True, transform=transform, train=False)
test_loader = DataLoader(mnist, batch_size=1, shuffle=False, drop_last=True, num_workers=0)
X_test = torch.zeros(len(test_loader), nc, imgsize, imgsize)
Y_test = torch.zeros(len(test_loader))
for i, x in enumerate(test_loader):
X_test[i, :, :, :] = x[0]
Y_test[i] = x[1]
if i % 1000 == 0:
print('i: {}/{}'.format(i, len(test_loader)))
Y_training = Y_training.type('torch.LongTensor')
Y_test = Y_test.type('torch.LongTensor')
dat = {'X_train': X_training, 'Y_train': Y_training, 'X_test': X_test, 'Y_test': Y_test, 'nc': nc}
#X_training = dat['X_train'].to(device)
X_training = dat['X_test'].to(device)
#X_training = dat['X_train'].to(device)
#print(X_training.shape)
#X_training = dat['X_test'].to(device)
#print(X_training.shape)
#print(X_training.shape)
#print(X_training.shape)
"""
#adsfasfasdfs
#asdfgasg
#sadfdaxsz
#sadfa
#asdfas
'''
data_path = 'dataset2'
imgsize = args.imageSize
nc = 1
transform = transforms.Compose([
transforms.Resize(imgsize),
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))])
mnist = torchvision.datasets.FashionMNIST(root=data_path, download=True, transform=transform, train=True)
train_loader = DataLoader(mnist, batch_size=1, shuffle=True, drop_last=True, num_workers=0)
X_training = torch.zeros(len(train_loader), nc, imgsize, imgsize)
Y_training = torch.zeros(len(train_loader))
for i, x in enumerate(train_loader):
X_training[i, :, :, :] = x[0]
Y_training[i] = x[1]
if i % 10000 == 0:
print('Loading data... {}/{}'.format(i, len(train_loader)))
mnist = torchvision.datasets.FashionMNIST(root=data_path, download=True, transform=transform, train=False)
test_loader = DataLoader(mnist, batch_size=1, shuffle=False, drop_last=True, num_workers=0)
X_test = torch.zeros(len(test_loader), nc, imgsize, imgsize)
Y_test = torch.zeros(len(test_loader))
for i, x in enumerate(test_loader):
X_test[i, :, :, :] = x[0]
Y_test[i] = x[1]
if i % 1000 == 0:
print('i: {}/{}'.format(i, len(test_loader)))
Y_training = Y_training.type('torch.LongTensor')
Y_test = Y_test.type('torch.LongTensor')
dat = {'X_train': X_training, 'Y_train': Y_training, 'X_test': X_test, 'Y_test': Y_test, 'nc': nc}
#X_training = dat['X_train'].to(device)
X_training = dat['X_test'].to(device)
#X_training = dat['X_train'].to(device)
#print(X_training.shape)
#X_training = dat.['X_test'].to(device)
#print(X_training.shape)
#print(X_training.shape)
#print(X_training.shape)
'''
#sadfadsfs
#sadf
#asdfa
#asdfasdfsa
#asdfasdfasdfas
"""
data_path = 'dataset3'
imgsize = args.imageSize
nc = 1
transform = transforms.Compose([
transforms.Resize(imgsize),
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))])
mnist = torchvision.datasets.KMNIST(root=data_path, download=True, transform=transform, train=True)
train_loader = DataLoader(mnist, batch_size=1, shuffle=True, drop_last=True, num_workers=0)
X_training = torch.zeros(len(train_loader), nc, imgsize, imgsize)
Y_training = torch.zeros(len(train_loader))
for i, x in enumerate(train_loader):
X_training[i, :, :, :] = x[0]
Y_training[i] = x[1]
if i % 10000 == 0:
print('Loading data... {}/{}'.format(i, len(train_loader)))
mnist = torchvision.datasets.KMNIST(root=data_path, download=True, transform=transform, train=False)
test_loader = DataLoader(mnist, batch_size=1, shuffle=False, drop_last=True, num_workers=0)
X_test = torch.zeros(len(test_loader), nc, imgsize, imgsize)
Y_test = torch.zeros(len(test_loader))
for i, x in enumerate(test_loader):
X_test[i, :, :, :] = x[0]
Y_test[i] = x[1]
if i % 1000 == 0:
print('i: {}/{}'.format(i, len(test_loader)))
Y_training = Y_training.type('torch.LongTensor')
Y_test = Y_test.type('torch.LongTensor')
dat = {'X_train': X_training, 'Y_train': Y_training, 'X_test': X_test, 'Y_test': Y_test, 'nc': nc}
X_training = dat['X_train'].to(device)
#X_training = dat['X_test'].to(device)
#X_training = dat['X_train'].to(device)
print(X_training.shape)
#X_training = dat.['X_test'].to(device)
#print(X_training.shape)
"""
#asdfasdfas
#adsfas
#asdfasdf
#asdfa
#asdfas
'''
data_path = 'dataset4'
imgsize = args.imageSize
nc = 1
transform = transforms.Compose([
transforms.Resize(imgsize),
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))])
mnist = torchvision.datasets.QMNIST(root=data_path, download=True, transform=transform, train=True)
train_loader = DataLoader(mnist, batch_size=1, shuffle=True, drop_last=True, num_workers=0)
X_training = torch.zeros(len(train_loader), nc, imgsize, imgsize)
Y_training = torch.zeros(len(train_loader))
for i, x in enumerate(train_loader):
X_training[i, :, :, :] = x[0]
Y_training[i] = x[1]
if i % 10000 == 0:
print('Loading data... {}/{}'.format(i, len(train_loader)))
mnist = torchvision.datasets.QMNIST(root=data_path, download=True, transform=transform, train=False)
test_loader = DataLoader(mnist, batch_size=1, shuffle=False, drop_last=True, num_workers=0)
X_test = torch.zeros(len(test_loader), nc, imgsize, imgsize)
Y_test = torch.zeros(len(test_loader))
for i, x in enumerate(test_loader):
X_test[i, :, :, :] = x[0]
Y_test[i] = x[1]
if i % 1000 == 0:
print('i: {}/{}'.format(i, len(test_loader)))
Y_training = Y_training.type('torch.LongTensor')
Y_test = Y_test.type('torch.LongTensor')
dat = {'X_train': X_training, 'Y_train': Y_training, 'X_test': X_test, 'Y_test': Y_test, 'nc': nc}
#X_training = dat['X_train'].to(device)
X_training = dat['X_test'].to(device)
#X_training = dat['X_train'].to(device)
print(X_training.shape)
#X_training = dat.['X_test'].to(device)
#print(X_training.shape)
#print(X_training.shape)
#print(X_training.shape)
'''
#sadfasfsadf
#asdfas
#asfaszd
#asdfasdfas
#asdfasdfasdf
"""
nc = 1
transform = transforms.Compose([
transforms.Resize(imgsize),
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))])
mnist = torchvision.datasets.EMNIST(root=data_path, download=True, transform=transform, train=True)
train_loader = DataLoader(mnist, batch_size=1, shuffle=True, drop_last=True, num_workers=0)
X_training = torch.zeros(len(train_loader), nc, imgsize, imgsize)
Y_training = torch.zeros(len(train_loader))
for i, x in enumerate(train_loader):
X_training[i, :, :, :] = x[0]
Y_training[i] = x[1]
if i % 10000 == 0:
print('Loading data... {}/{}'.format(i, len(train_loader)))
mnist = torchvision.datasets.EMNIST(root=data_path, download=True, transform=transform, train=False)
test_loader = DataLoader(mnist, batch_size=1, shuffle=False, drop_last=True, num_workers=0)
X_test = torch.zeros(len(test_loader), nc, imgsize, imgsize)
Y_test = torch.zeros(len(test_loader))
for i, x in enumerate(test_loader):
X_test[i, :, :, :] = x[0]
Y_test[i] = x[1]
if i % 1000 == 0:
print('i: {}/{}'.format(i, len(test_loader)))
Y_training = Y_training.type('torch.LongTensor')
Y_test = Y_test.type('torch.LongTensor')
dat = {'X_train': X_training, 'Y_train': Y_training, 'X_test': X_test, 'Y_test': Y_test, 'nc': nc}
X_training = dat.['X_train'].to(device)
print(X_training.shape)
#X_training = dat.['X_test'].to(device)
#print(X_training.shape)
"""
#asdfasdfasdf
#sadfas
#asdfasfv
"""
# dataset = datasets.MNIST(root='./data')
# idx = dataset.train_labels==1
# dataset.train_labels = dataset.train_labels[idx]
# dataset.train_data = dataset.train_data[idx]
#dataset = datasets.MNIST(root='./data')
#idx = dataset.train_labels==1
#dataset.train_labels = dataset.train_labels[idx]
#dataset.train_data = dataset.train_data[idx]
data_path = 'dataset21'
imgsize = args.imageSize
nc = 1
transform = transforms.Compose([
transforms.Resize(imgsize),
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))])
#mnist = torchvision.datasets.MNIST(root=data_path, download=True, transform=transform, train=True)
#mnist = torchvision.datasets.MNIST(root=data_path, download=True, transform=transform, train=True)
mnist = torchvision.datasets.MNIST(root=data_path, download=True, transform=transform, train=True)
#idx = mnist.train_labels==0
#idx = mnist.targets==2
#idx = mnist.targets==2
idx = mnist.targets!=2
#idx = mnist.train_labels==0
mnist.targets = mnist.targets[idx]
mnist.data = mnist.data[idx]
#mnist.train_data = mnist.train_data[idx]
#mnist = torchvision.datasets.MNIST(root=data_path, download=True, transform=transform, train=True)
train_loader = DataLoader(mnist, batch_size=1, shuffle=True, drop_last=True, num_workers=0)
X_training = torch.zeros(len(train_loader), nc, imgsize, imgsize)
Y_training = torch.zeros(len(train_loader))
for i, x in enumerate(train_loader):
X_training[i, :, :, :] = x[0]
Y_training[i] = x[1]
if i % 10000 == 0:
print('Loading data... {}/{}'.format(i, len(train_loader)))
#mnist = torchvision.datasets.MNIST(root=data_path, download=True, transform=transform, train=False)
#mnist = torchvision.datasets.MNIST(root=data_path, download=True, transform=transform, train=False)
mnist = torchvision.datasets.MNIST(root=data_path, download=True, transform=transform, train=False)
#idx = mnist.train_labels==0
#idx = mnist.targets==2
#idx = mnist.targets==2
idx = mnist.targets!=2
#53.0604
#5305.8745
#idx = mnist.train_labels==0
mnist.targets = mnist.targets[idx]
mnist.data = mnist.data[idx]
#mnist.train_data = mnist.train_data[idx]
#mnist = torchvision.datasets.MNIST(root=data_path, download=True, transform=transform, train=False)
test_loader = DataLoader(mnist, batch_size=1, shuffle=False, drop_last=True, num_workers=0)
X_test = torch.zeros(len(test_loader), nc, imgsize, imgsize)
Y_test = torch.zeros(len(test_loader))
for i, x in enumerate(test_loader):
X_test[i, :, :, :] = x[0]
Y_test[i] = x[1]
if i % 1000 == 0:
print('i: {}/{}'.format(i, len(test_loader)))
Y_training = Y_training.type('torch.LongTensor')
Y_test = Y_test.type('torch.LongTensor')
dat = {'X_train': X_training, 'Y_train': Y_training, 'X_test': X_test, 'Y_test': Y_test, 'nc': nc}
X_training = dat['X_train'].to(device)
#X_training = dat['X_test'].to(device)
#X_training = dat['X_train'].to(device)
print(X_training.shape)
#X_training = dat.['X_test'].to(device)
#print(X_training.shape)
"""
#print(X_training.shape)
#sadfdasfas
#print(X_training.shape)
#print(X_training.shape)
#print(X_training.shape)
'''
# dataset = datasets.MNIST(root='./data')
# idx = dataset.train_labels==1
# dataset.train_labels = dataset.train_labels[idx]
# dataset.train_data = dataset.train_data[idx]
#dataset = datasets.MNIST(root='./data')
#idx = dataset.train_labels==1
#dataset.train_labels = dataset.train_labels[idx]
#dataset.train_data = dataset.train_data[idx]
nc = 1
transform = transforms.Compose([
transforms.Resize(imgsize),
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))])
#mnist = torchvision.datasets.MNIST(root=data_path, download=True, transform=transform, train=True)
#mnist = torchvision.datasets.MNIST(root=data_path, download=True, transform=transform, train=True)
mnist = torchvision.datasets.MNIST(root=data_path, download=True, transform=transform, train=True)
#idx = mnist.train_labels!=0
idx = mnist.train_labels!=2
#idx = mnist.train_labels!=0
mnist.train_labels = mnist.train_labels[idx]
mnist.train_data = mnist.train_data[idx]
#mnist.train_data = mnist.train_data[idx]
#mnist = torchvision.datasets.MNIST(root=data_path, download=True, transform=transform, train=True)
train_loader = DataLoader(mnist, batch_size=1, shuffle=True, drop_last=True, num_workers=0)
X_training = torch.zeros(len(train_loader), nc, imgsize, imgsize)
Y_training = torch.zeros(len(train_loader))
for i, x in enumerate(train_loader):
X_training[i, :, :, :] = x[0]
Y_training[i] = x[1]
if i % 10000 == 0:
print('Loading data... {}/{}'.format(i, len(train_loader)))
#mnist = torchvision.datasets.MNIST(root=data_path, download=True, transform=transform, train=False)
#mnist = torchvision.datasets.MNIST(root=data_path, download=True, transform=transform, train=False)
mnist = torchvision.datasets.MNIST(root=data_path, download=True, transform=transform, train=False)
#idx = mnist.train_labels!=0
idx = mnist.train_labels!=2
#idx = mnist.train_labels!=0
mnist.train_labels = mnist.train_labels[idx]
mnist.train_data = mnist.train_data[idx]
#mnist.train_data = mnist.train_data[idx]
#mnist = torchvision.datasets.MNIST(root=data_path, download=True, transform=transform, train=False)
test_loader = DataLoader(mnist, batch_size=1, shuffle=False, drop_last=True, num_workers=0)
X_test = torch.zeros(len(test_loader), nc, imgsize, imgsize)
Y_test = torch.zeros(len(test_loader))
for i, x in enumerate(test_loader):
X_test[i, :, :, :] = x[0]
Y_test[i] = x[1]
if i % 1000 == 0:
print('i: {}/{}'.format(i, len(test_loader)))
Y_training = Y_training.type('torch.LongTensor')
Y_test = Y_test.type('torch.LongTensor')
dat = {'X_train': X_training, 'Y_train': Y_training, 'X_test': X_test, 'Y_test': Y_test, 'nc': nc}
X_training = dat.['X_train'].to(device)
print(X_training.shape)
#X_training = dat.['X_test'].to(device)
#print(X_training.shape)
'''
"""
real_cpu = X_training
print('')
#print(real_cpu.shape)
#print(real_cpu.shape)
#print(real_cpu.shape)
#print(real_cpu.shape)
#print(real_cpu.shape)
p_probP = torch.zeros(1, device=device)
#varInIn = torch.randn(batch_size, args.nz, 1, 1, device=device)
#varOutOut = netG(varInIn)
# use: https://pytorch.org/docs/stable/torchvision/datasets.html
# we use: https://pytorch.org/docs/stable/torchvision/datasets.html
with torch.no_grad():
#varInIn = torch.randn(batch_size, args.nz, 1, 1, device=device)
#varInIn = torch.randn(1024, args.nz, 1, 1, device=device)
varInIn = torch.randn(2048, args.nz, 1, 1, device=device)
varOutOut = netG(varInIn)
g_error, firstOnly_lossGen, secondOnly_lossGen, thirdOnly_lossGen = use_loss_fn2(p_probP,
varOutOut, args, netG2,
varInIn,
real_cpu.to(device))
# ROC and AUROC on L_1
# AUROC on secondOnly_lossGen
"""
#real_cpu = X_training
#real_cpu = X_training
#real_cpu = X_training
'''
#real_cpu = X_training
real_cpu = X_training
# print('')
# print(real_cpu.shape)
# print(real_cpu.shape)
# print(real_cpu.shape)
# print(real_cpu.shape)
# print(real_cpu.shape)
p_probP = torch.zeros(1, device=device)
# varInIn = torch.randn(batch_size, args.nz, 1, 1, device=device)
# varOutOut = netG(varInIn)
# use: https://pytorch.org/docs/stable/torchvision/datasets.html
# we use: https://pytorch.org/docs/stable/torchvision/datasets.html
with torch.no_grad():
# varInIn = torch.randn(batch_size, args.nz, 1, 1, device=device)
# varInIn = torch.randn(1024, args.nz, 1, 1, device=device)
varInIn = torch.randn(2048, args.nz, 1, 1, device=device)
varOutOut = netG(varInIn)
out = varOutOut
# g_fake_data = varOutOut
# gen_input = varInIn
gen_input = torch.randn(10000, args.nz, 1, 1, device=device)
# gen_input = varInIn
sigma_x = F.softplus(log_sigma).view(1, 1, args.imageSize, args.imageSize)
noise_eta = torch.randn_like(out)
g_fake_data = out + noise_eta * sigma_x
g_error, firstOnly_lossGen, secondOnly_lossGen, thirdOnly_lossGen = use_loss_fn2(p_probP,
varOutOut, args, netG,
varInIn,
real_cpu.to(device))
# ROC and AUROC on L_1
# AUROC on secondOnly_lossGen
'''
"""
with torch.no_grad():
# varInIn = torch.randn(batch_size, args.nz, 1, 1, device=device)
# varInIn = torch.randn(1024, args.nz, 1, 1, device=device)
varInIn = torch.randn(2048, args.nz, 1, 1, device=device)
varOutOut = netG(varInIn)
out = varOutOut
# g_fake_data = varOutOut
# gen_input = varInIn
gen_input = torch.randn(10000, args.nz, 1, 1, device=device)
# gen_input = varInIn
sigma_x = F.softplus(log_sigma).view(1, 1, args.imageSize, args.imageSize)
noise_eta = torch.randn_like(out)
g_fake_data = out + noise_eta * sigma_x
# hmc_samples, acceptRate, stepsize, _ = hmc.get_samples(
# netG2, real_cpu.detach(), gen_input.clone(), sigma_x.detach(), args.burn_in,
# args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
# args.hmc_learning_rate, args.hmc_opt_accept)
#adsfasdfsf
#asdfsdfs
sigma_x = F.softplus(log_sigma).view(1, 1, args.imageSize, args.imageSize)
out21 = out
out = real_cpu
g_fake_data21 = g_fake_data
# out = netG(gen_input)
noise_eta = torch.randn_like(out)
g_fake_data = out + noise_eta * sigma_x
hmc_samples, acceptRate, stepsize, _ = hmc.get_samples(
netG, g_fake_data.detach(), gen_input.clone(), sigma_x.detach(), args.burn_in,
args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
args.hmc_learning_rate, args.hmc_opt_accept)
bsz, d = hmc_samples.size()
mean_output = netG(hmc_samples.view(bsz, d, 1, 1).to(device))
bsz = g_fake_data.size(0)
mean_output_summed = torch.zeros_like(g_fake_data)
for cnt in range(args.num_samples_posterior):
mean_output_summed = mean_output_summed + mean_output[cnt * bsz:(cnt + 1) * bsz]
mean_output_summed = mean_output_summed / args.num_samples_posterior
c = ((g_fake_data - mean_output_summed) / sigma_x ** 2).detach()
g_error_entropy = torch.mul(c, out + sigma_x * noise_eta).mean(0).sum()
print(g_error_entropy)
asdfasdfas
# 11004.9570
# 13257.9102
out = out21
g_fake_data = g_fake_data21
with torch.no_grad():
# bsz, d = hmc_samples.size()
# mean_output = netG(hmc_samples.view(bsz, d, 1, 1).to(device))
##bsz = g_fake_data.size(0)
# bsz = real_cpu.size(0)
# mean_output_summed = torch.zeros_like(g_fake_data)
# mean_output_summed = torch.zeros_like(real_cpu)
# for cnt in range(args.num_samples_posterior):
# mean_output_summed = mean_output_summed + mean_output[cnt * bsz:(cnt + 1) * bsz]
# mean_output_summed = mean_output_summed / args.num_samples_posterior
# c = ((g_fake_data - mean_output_summed) / sigma_x ** 2).detach()
# c = ((g_fake_data - mean_output_summed) / sigma_x ** 2).detach()
# c = ((gGgGg_fake_data - mean_output_summed) / sigma_x ** 2).detach()
# c = ((gGgGg_fake_data - mean_output_summed) / sigma_x ** 2).detach()
# c = ((varOutOut - mean_output_summed) / sigma_x ** 2).detach()
# c = ((real_cpu - mean_output_summed) / sigma_x ** 2).detach()
# c = ((varOutOut - mean_output_summed) / sigma_x ** 2)
# print(c)
# print(c.requires_grad)
# sdafasfsafs
# c = ((g_fake_data - mean_output_summed) / sigma_x ** 2).detach()
# g_error_entropy = torch.mul(c, out + sigma_x * noise_eta).mean(0).sum()
# g_error_entropy = torch.mul(c, out + sigma_x * noise_eta).mean(0).sum()
# g_error_entropy = torch.mul(c, out + sigma_x * noise_eta).mean(0).sum()
# g_error_entropy = torch.mul(c, out + sigma_x * noise_eta).mean(0).mean()
# p_probP = -g_error_entropy
myNikMy_entropy = torch.exp(-g_error_entropy)
# nikNikmyNikMy_entropy = scipy.special.lambertw(myNikMy_entropy.cpu().detach().numpy())
# print(g_error_entropy)
# print(myNikMy_entropy)
# print(nikNikmyNikMy_entropy)
# print(g_error_entropy)
# ndNdnikNikmyNikMy_entropy = torch.zeros(1, device=device, requires_grad=False)
# ndNdnikNikmyNikMy_entropy = torch.ones(1, device=device) * np.real(nikNikmyNikMy_entropy)
# print(ndNdnikNikmyNikMy_entropy)
# asdfasdfasdf
# print(ndNdnikNikmyNikMy_entropy)
# ndNdnikNikmyNikMy_entropy = torch.ones(1, device=device) * 0.5
# print(ndNdnikNikmyNikMy_entropy)
# print(ndNdnikNikmyNikMy_entropy.requires_grad)
# ndNdnikNikmyNikMy_entropy = torch.ones(1, device=device) * 0.5
# ndNdnikNikmyNikMy_entropy = torch.ones(1, device=device) * 0.5
ndNdnikNikmyNikMy_entropy = torch.ones(1, device=device) * 0.5
for _ in range(200):
ndNdnikNikmyNikMy_entropy -= (
(ndNdnikNikmyNikMy_entropy.clone() * torch.exp(
ndNdnikNikmyNikMy_entropy.clone()) - myNikMy_entropy) / (
torch.exp(ndNdnikNikmyNikMy_entropy.clone()) + (
ndNdnikNikmyNikMy_entropy.clone() * torch.exp(ndNdnikNikmyNikMy_entropy.clone()))))
# ndNdnikNikmyNikMy_entropy -= (
# (ndNdnikNikmyNikMy_entropy * torch.exp(ndNdnikNikmyNikMy_entropy) - myNikMy_entropy) / (
# torch.exp(ndNdnikNikmyNikMy_entropy) + (
# ndNdnikNikmyNikMy_entropy * torch.exp(ndNdnikNikmyNikMy_entropy))))
# print(ndNdnikNikmyNikMy_entropy)
# asdfasdfas
# print(ndNdnikNikmyNikMy_entropy)
# print(ndNdnikNikmyNikMy_entropy.requires_grad)
# asdfasdfas
# p_probP = -g_error_entropy
# p_probP = g_error_entropy
# g_error_entropy = -g_error_entropy
# (?)
# print(g_error_entropy)
# aasdfasfsaf
# print(ndNdnikNikmyNikMy_entropy)
# adfadsfasdf
# l1_usel1 = torch.log(g_error_entropy)
# l2_usel2 = torch.log(torch.log(g_error_entropy))
'''
# use: t = torch.log(F.relu(t) + 1e-7)
l1_usel1 = torch.log(F.relu(g_error_entropy) + 1e-7)
# use: t = torch.log(F.relu(t) + 1e-7)
l2_usel2 = torch.log(F.relu(torch.log(F.relu(g_error_entropy) + 1e-7)) + 1e-7)
#print('')
#print(l1_usel1)
#print(l1_usel1)
#print(l2_usel2)
gErrorEntropy2 = l1_usel1 - l2_usel2 + (l2_usel2 / l1_usel1) + (
(l2_usel2 * (-2 + l2_usel2)) / (2 * (l1_usel1 ** 2))) + (
((l2_usel2 * (6 - (9 * l2_usel2) + (2 * (l2_usel2 ** 2))))) / (
6 * (l1_usel1 ** 3))) + ((l2_usel2 * (
-12 + (36 * l2_usel2) - (22 * (l2_usel2 ** 2)) + (3 * (l2_usel2 ** 3)))) / (
12 * (l1_usel1 ** 4))) + ((l2_usel2 * (
60 - (300 * l2_usel2) + (350 * (l2_usel2 ** 2)) - (125 * (l2_usel2 ** 3)) + (
12 * (l2_usel2 ** 4)))) / (60 * (l1_usel1 ** 5)))
'''
# gErrorEntropy2 = g_error_entropy - (g_error_entropy ** 2) + (1.5 * (g_error_entropy ** 3)) - (
# (8 / 3) * (g_error_entropy ** 4)) + ((125 / 24) * (g_error_entropy ** 5))
# gErrorEntropy = torch.exp(gErrorEntropy2)
# gErrorEntropy = torch.exp(gErrorEntropy2)
# p_probP = torch.exp(gErrorEntropy2)
# p_probP = torch.exp(gErrorEntropy2)
# p_probP = g_error_entropy / gErrorEntropy2
# print(ndNdnikNikmyNikMy_entropy)
p_probP = ndNdnikNikmyNikMy_entropy
# print(p_probP)
print(p_probP)
# asdfasfasz
g_error, firstOnly_lossGen, secondOnly_lossGen, thirdOnly_lossGen = use_loss_fn2(p_probP,
varOutOut, args, netG2,
varInIn,
real_cpu.to(device))
# ROC and AUROC on L_1
# AUROC on secondOnly_lossGen
"""
#asdfasdf
#asdf
#asdfz
'''
print('')
print(g_error)
# print(g_error)
print(firstOnly_lossGen)
print(secondOnly_lossGen)
print(thirdOnly_lossGen)
# safsafs
# asdfdsa
# sadfadsf
print('')
# print(X_training.shape)
# asdfdasfdsdfs
#print(g_error)
#print(g_error)
#print('')
#print(g_error)
#print(g_error)
#print(firstOnly_lossGen)
#print(secondOnly_lossGen)
#print(thirdOnly_lossGen)
'''
# safsafs
# asdfdsa
# sadfadsf
#print(X_training.shape)
#asdfasdf
#print('')
#print(X_training.shape)
#asdfdasfdsdfs
# print(X_training.shape)
# adfasdfasdfzs
# print(X_training.shape)
# sadfasdfasfz
# print(X_training.shape)
# asdfasfasdf
# sadfsa
# safsafs
#asdfasfasfas
# sadfsa
# sadfa
# safsafs
#asdfdsa
#sadfadsf
#print(X_training.shape)
#sadfasdfasfz
#print(X_training.shape)
#asdfasfasfsz
#print(X_training.shape)
#asdfasfasdf
#print(X_training.shape)
#print(X_training.shape)
# sadfsa
#safsafs
#sadfsa
#sadfa
#print(X_training.shape)
#asdfasfsfsz
#sadfasdfs
#print(X_training.shape)
#sadfasfzdfsz
"""
# netG.eval()
# netG.eval()
# netG.eval()
# netG.eval()
netG.eval()
for param in netG.parameters():
param.requires_grad = False
# print(X_training.shape)
X_training = dat['X_test'].to(device)
x = X_training
y = dat['Y_test'].to(device)
args.batchSize = 1
bsz = args.batchSize
nrand = 100
args.val_batchsize = args.batchSize
# print(X_training.shape)
# asdfasdfasdf
# print(X_training.shape)
# print(X_training.shape)
# print(X_training.shape)
losses_NIKlosses = []
loLosses_NIKlosses = []
loLosses_NIKlosses2 = []
# loLosses_NIKlosses2 = []
loLosses_NIKlosses3 = []
for epoch in range(1, 1 + 1):
for i in range(0, len(X_training), bsz):
# print(x)
# print(x.shape)
# print(y)
# print(y.item())
# for i21 in range(len(y)):
# if y[i21] == 0 and i21 == 0:
# y[i21] = y[i21+1]
# x[i21, :, :, :] = x[i21+1, :, :, :]
# elif y[i21] == 0:
# y[i21] = y[i21 - 1]
# x[i21, :, :, :] = x[i21 - 1, :, :, :]
# if i > 0:
# if y.item() == 0:
# y = y_prevPrev
# x = x_prevPrev
'''
for i21 in range(len(y)):
if y[i21] == 0 and i21 == 0:
y[i21] = y[i21+1]
x[i21, :, :, :] = x[i21+1, :, :, :]
elif y[i21] == 0:
y[i21] = y[i21 - 1]
x[i21, :, :, :] = x[i21 - 1, :, :, :]
'''
# x = x.to(device)
print(i)
# print(x.shape)
# asdfsadfs
genFGen2 = x
# lossGen, firstOnly_lossGen, secondOnly_lossGen, thirdOnly_lossGen = use_loss_fn2(genFGen2, args, model, ggenFGen2, x)
# use: val-batchsize
ggenFGen2 = torch.randn([args.val_batchsize, nrand], device=device)
# ggenFGen2 = torch.randn([args.batchsize, nrand], device=device)
# ggenFGen2 = torch.randn([args.batchsize, nrand], device=device, requires_grad=True)
# with torch.no_grad():
# _, firstOnly_lossGen, _, _ = use_loss_fn2(genFGen2, args, model, ggenFGen2, x)
# loLosses_NIKlosses.append(firstOnly_lossGen.item())
# print(firstOnly_lossGen)
# print(loLosses_NIKlosses)
# with torch.no_grad():
# firstOnly_lossGen2 = computeLoss(x, model)
with torch.no_grad():
sigma_x = F.softplus(log_sigma).view(1, 1, args.imageSize, args.imageSize)
netD.zero_grad()
stop = min(bsz, len(X_training[i:]))
real_cpu = X_training[i:i + stop].to(device)
# print(real_cpu.shape)
# asdfasdf
# batch_size = real_cpu.size(0)
batch_size = args.batchSize
label = torch.full((batch_size,), real_label, device=device)
noise_eta = torch.randn_like(real_cpu)
noised_data = real_cpu + sigma_x.detach() * noise_eta
# out_real = netD(noised_data)
# errD_real = criterion(out_real, label)
# errD_real.backward()
# D_x = out_real.mean().item()
# train with fake
# noise = torch.randn(batch_size, args.nz, 1, 1, device=device)
# mu_fake = netG(noise)
# fake = mu_fake + sigma_x * noise_eta
# label.fill_(fake_label)
# out_fake = netD(fake.detach())
# errD_fake = criterion(out_fake, label)
# errD_fake.backward()
# D_G_z1 = out_fake.mean().item()
# errD = errD_real + errD_fake
# optimizerD.step()
# update G network: maximize log(D(G(z)))
netG.zero_grad()
sigma_optimizer.zero_grad()
label.fill_(real_label)
gen_input = torch.randn(batch_size, args.nz, 1, 1, device=device)
out = netG(gen_input)
# print(out.shape)
# asdfasdf
noise_eta = torch.randn_like(out)
g_fake_data = out + noise_eta * sigma_x
dg_fake_decision = netD(g_fake_data)
g_error_gan = criterion(dg_fake_decision, label)
D_G_z2 = dg_fake_decision.mean().item()
if args.lambda_ == 0:
g_error_gan.backward()
optimizerG.step()
sigma_optimizer.step()
else:
# hmc_samples, acceptRate, stepsize = hmc.get_samples(
# netG, g_fake_data.detach(), gen_input.clone(), sigma_x.detach(), args.burn_in,
# args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
# args.hmc_learning_rate, args.hmc_opt_accept)
# bsz, d = hmc_samples.size()
# mean_output = netG(hmc_samples.view(bsz, d, 1, 1).to(device))
# bsz = g_fake_data.size(0)
# mean_output_summed = torch.zeros_like(g_fake_data)
# for cnt in range(args.num_samples_posterior):
# mean_output_summed = mean_output_summed + mean_output[cnt*bsz:(cnt+1)*bsz]
# mean_output_summed = mean_output_summed / args.num_samples_posterior
# c = ((g_fake_data - mean_output_summed) / sigma_x**2).detach()
# g_error_entropy = torch.mul(c, out + sigma_x * noise_eta).mean(0).sum()
# print(mean_output)
# print(mean_output.shape)
# print(bsz)
# print(d)
# print(torch.randn(batch_size, args.nz, 1, 1, device=device).shape)
# print(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device).shape)
# use: netG( torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device) )
# print(netG( torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device) ).shape)
# netG( torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device) )
# we use: netG( torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device) )
# print(g_error_entropy)
# asdfasdfds
# print(netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)).shape)
# asdfsdfs
# print(netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)).requires_grad)
# print(netG(torch.randn(batch_size, args.nz, 1, 1, device=device)).requires_grad)
# netG2.eval()
# for param in netG2.parameters():
# param.requires_grad = False
# print(netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)).shape)
# print(netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)).requires_grad)
# print(netG2(torch.randn(batch_size, args.nz, 1, 1, requires_grad=False, device=device)).shape)
# print(netG2(torch.randn(batch_size, args.nz, 1, 1, requires_grad=False, device=device)).requires_grad)
# print(netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)).requires_grad)
# print(netG2(torch.randn(batch_size, args.nz, 1, 1, requires_grad=False, device=device)).requires_grad)
# print(netG2(torch.randn(batch_size, args.nz, 1, 1, requires_grad=False, device=device)).requires_grad)
# print(netG2(torch.randn(batch_size, args.nz, 1, 1, device=device)).requires_grad)
# asdfasdf
# _, _, _, p_probP = hmc.get_samples(
# netG2, netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)).detach(),
# gen_input.clone(), sigma_x.detach(), args.burn_in,
# args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
# args.hmc_learning_rate, args.hmc_opt_accept)
'''
_, _, _, p_probP = hmc.get_samples(
netG2, netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)).detach(),
gen_input.clone(), sigma_x.detach(), args.burn_in,
args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
args.hmc_learning_rate, args.hmc_opt_accept)
'''
# print(netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)).requires_grad)
# sdfasdfs
_, _, _, p_probP = hmc2.get_samples(
netG, netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)),
gen_input.clone(), sigma_x.detach(), args.burn_in,
args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
args.hmc_learning_rate, args.hmc_opt_accept)
# _, _, _, p_probP = hmc.get_samples(
# netG2, netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)),
# gen_input.clone(), sigma_x.detach(), args.burn_in,
# args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
# args.hmc_learning_rate, args.hmc_opt_accept)
firstOnly_lossGen2 = p_probP.mean()
# print(y)
# print(y.item())
# print(firstOnly_lossGen2)
# print(firstOnly_lossGen2.item())
# asdfasdfs
loLosses_NIKlosses2.append(firstOnly_lossGen2.item())
# print(y)
# print(y.item())
# if y.item() == 0:
# if y.item() == 1:
# if y.item() == 1:
# if y[i] == 2:
# if y[i] == 2:
if y[i] == 0:
# loLosses_NIKlosses3.append(0)
# loLosses_NIKlosses3.append(0)
loLosses_NIKlosses3.append(1)
# print(y)
# print(y.item())
else:
# loLosses_NIKlosses3.append(1)
# loLosses_NIKlosses3.append(1)
loLosses_NIKlosses3.append(0)
# loLosses_NIKlosses3.append(1)
'''
if y.item() == 0:
loLosses_NIKlosses3.append(0)
#print(y)
#print(y.item())
else:
loLosses_NIKlosses3.append(1)
'''
# print(y)
# print(y.item())
# print(firstOnly_lossGen2)
# print(loLosses_NIKlosses2)
# x_prevPrev = x
# y_prevPrev = y
# print(loLosses_NIKlosses)
# print(loLosses_NIKlosses2)
import numpy as np
# print(loLosses_NIKlosses3)
# print(len(loLosses_NIKlosses3))
print('')
import matplotlib.pyplot as plt
# import seaborn as sns
# ROC curve and auc score
from sklearn.datasets import make_classification
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
# def plot_roc_curve(fpr, tpr):
# def plot_roc_curve(fpr, tpr):
def plot_roc_curve(fpr, tpr, auroc21):
# plt.plot(fpr, tpr, color='orange', label='ROC')
# plt.plot(fpr, tpr, color='orange', label='ROC')
# plt.plot(fpr, tpr, color='orange', label='ROC')
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.3f})'.format(auroc21))
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.3f})'.format(auroc21))
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.3f})'.format(auroc21))
plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.4f})'.format(auroc21))
# plt.plot(fpr, tpr, color='orange', label='ROC')
plt.plot([0, 1], [0, 1], color='darkblue', linestyle='--')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic (ROC) Curve')
plt.legend()
# plt.savefig('ROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nik000NikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nik000NikNikROC_MainROC.png', bbox_inches='tight')
plt.savefig('mnMnistFor6MyROC.png', bbox_inches='tight')
# plt.show()
# plt.pause(99)
# plt.savefig('ROC_MainROC.png', bbox_inches='tight')
# plt.savefig('mainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('mainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikMainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikMainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNikMainMainROC_MainROC.png', bbox_inches='tight')
# plt.pause(9)
# plt.ion()
# print(loLossNoChange)
# asdfkdfs
# print(loLoss2)
# print(loLossNoChange)
# loLoss2 is 0 and 1
# loLossNoChange is probability
# loLoss2 = ground truth 0 and 1
# roc_curve(loLoss2, loLossNoChange)
# loLoss2 is the ground truth 0 and 1
# loLossNoChange is the predicted probabilities
# loLossNoChange = predicted probabilities
loLossNoChange = loLosses_NIKlosses2
# loLoss2 = ground truth 0 and 1
loLoss2 = loLosses_NIKlosses3
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
# print(average_precision_score(loLoss2, loLossNoChange))
precision, recall, thresholds = precision_recall_curve(loLoss2, loLossNoChange)
print(average_precision_score(loLoss2, loLossNoChange))
print('')
print(precision)
print(recall)
print('')
print(thresholds)
# def plot_pr_curve(fpr, tpr):
# def plot_pr_curve(fpr, tpr):
def plot_pr_curve(fpr, tpr, auroc21):
# plt.plot(fpr, tpr, color='orange', label='PR')
# plt.plot(fpr, tpr, color='orange', label='PR')
# plt.plot(tpr, fpr, color='orange', label='PR')
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.3f})'.format(auroc21))
# plt.plot(tpr, fpr, color='orange', label='PR (AUPRC = {0:.3f})'.format(auroc21))
# plt.plot(tpr, fpr, color='orange', label='PR (AUPRC = {0:.3f})'.format(auroc21))
plt.plot(tpr, fpr, color='orange', label='PR (AUPRC = {0:.4f})'.format(auroc21))
# plt.xlabel('False Positive Rate')
# plt.ylabel('True Positive Rate')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Precision Recall (PR) Curve')
plt.legend()
# plt.savefig('ROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikNikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikNikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikNikNikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikNikNikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nik000NikNikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nik000NikNikPR_MainPR.png', bbox_inches='tight')
plt.savefig('mnMnistFor6MyPR.png', bbox_inches='tight')
# plt.savefig('22Jan2020foFo.png', bbox_inches='tight')
# plt.savefig('000000000000000fffffffffffffffoooFoo.png', bbox_inches='tight')
# plt.show()
# plt.pause(99)
# plt.savefig('ROC_MainROC.png', bbox_inches='tight')
# plt.savefig('mainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('mainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikMainMainPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikMainMainPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikNikMainMainPR_MainPR.png', bbox_inches='tight')
# plt.pause(9)
# plt.ion()
# plot_pr_curve(precision, recall)
# plot_pr_curve(precision, recall)
plot_pr_curve(precision, recall, average_precision_score(loLoss2, loLossNoChange))
# plot_pr_curve(precision, recall)
plt.figure()
print('')
print(average_precision_score(loLoss2, loLossNoChange))
print('')
# probs = loLossNoChange
fpr, tpr, thresholds = roc_curve(loLoss2, loLossNoChange)
print(fpr)
print(tpr)
print('')
print(thresholds)
# fpr, tpr, thresholds = roc_curve(loLoss2, probs)
# plot_roc_curve(fpr, tpr)
# plot_roc_curve(fpr, tpr)
# plot_roc_curve(fpr, tpr)
# plot_roc_curve(fpr, tpr)
# plot_roc_curve(fpr, tpr)
plot_roc_curve(fpr, tpr, roc_auc_score(loLoss2, loLossNoChange))
# print(roc_auc_score(fpr, tpr))
# print(sklearn.metrics.auc(fpr, tpr))
print('')
print(roc_auc_score(loLoss2, loLossNoChange))
from sklearn.metrics import auc
# roc_auc = auc(fpr, tpr)
print(auc(fpr, tpr))
# roc_auc = auc(fpr, tpr)
print('')
# roc_auc = auc(fpr, tpr)
'''
plt.figure()
#plt.plot(fpr[2], tpr[2], color='darkorange', lw=2, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot(fpr[2], tpr[2], color='darkorange', lw=2, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
#plt.legend(loc="lower right")
plt.legend(loc="lower right")
plt.show()
'''
def plot_roc_curve2(fpr, tpr, auroc21, fpr2, tpr2, auroc212):
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.4f})'.format(auroc21))
# plt.plot(tpr, fpr, color='blue', label='PR (AUPRC = {0:.4f})'.format(auroc21))
# plt.plot(tpr2, fpr2, color='blue', label='PR (AUPRC = {0:.4f})'.format(auroc212))
plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.4f})'.format(auroc21))
plt.plot(tpr2, fpr2, color='blue', label='PR (AUPRC = {0:.4f})'.format(auroc212))
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.4f})'.format(auroc21))
plt.plot([0, 1], [0, 1], color='darkblue', linestyle='--')
plt.xlabel('False Positive Rate (and Recall)')
plt.ylabel('True Positive Rate (and Precision)')
plt.title('ROC and PR Curves')
plt.legend()
# plt.plot(tpr, fpr, color='orange', label='PR (AUPRC = {0:.4f})'.format(auroc21))
# plt.xlabel('False Positive Rate')
# plt.ylabel('True Positive Rate')
# plt.xlabel('Recall')
# plt.ylabel('Precision')
# plt.title('Precision Recall (PR) Curve')
# plt.legend()
# plt.savefig('nik00000NikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nik00000NikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNik00000nikNikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNik00000nikNikNikROC_MainROC.png', bbox_inches='tight')
plt.savefig('mnMnistFor6MyROCPR.png', bbox_inches='tight')
plt.figure()
# plot_roc_curve2(fpr, tpr, roc_auc_score(loLoss2, loLossNoChange))
# use: precision, recall, average_precision_score(loLoss2, loLossNoChange)
# plot_roc_curve2(fpr, tpr, roc_auc_score(loLoss2, loLossNoChange), precision, recall, average_precision_score(loLoss2, loLossNoChange))
plot_roc_curve2(fpr, tpr, roc_auc_score(loLoss2, loLossNoChange), precision, recall,
average_precision_score(loLoss2, loLossNoChange))
# 0.7657142857142857
# 0.7657142857142857
# 0.7714285714285714
# 0.7947712113075085
# 0.7658408636296418
# Data_j for MNIST digit j
# ResFlow: See if p_g(x) works
# import numpy as np
loLosses_NIKlosses3 = np.array(loLosses_NIKlosses3)
# where_0 = np.where(loLosses_NIKlosses3 == 0)
# where_1 = np.where(loLosses_NIKlosses3 == 1)
# loLosses_NIKlosses3[where_0] = 1
# loLosses_NIKlosses3[where_1] = 0
indices_one = loLosses_NIKlosses3 == 1
indices_zero = loLosses_NIKlosses3 == 0
loLosses_NIKlosses3[indices_one] = 0 # replacing 1s with 0s
loLosses_NIKlosses3[indices_zero] = 1 # replacing 0s with 1s
loLosses_NIKlosses3 = loLosses_NIKlosses3.tolist()
# del where_0
# del where_1
# print(loLosses_NIKlosses3)
# print(len(loLosses_NIKlosses3))
# adsfasdfzs
# print(loLosses_NIKlosses2)
# print(loLosses_NIKlosses3)
# import numpy as np
# import pandas as pd
import matplotlib.pyplot as plt
# import seaborn as sns
# ROC curve and auc score
from sklearn.datasets import make_classification
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
# def plot_roc_curve(fpr, tpr):
# def plot_roc_curve(fpr, tpr):
def plot_roc_curve(fpr, tpr, auroc21):
# plt.plot(fpr, tpr, color='orange', label='ROC')
# plt.plot(fpr, tpr, color='orange', label='ROC')
# plt.plot(fpr, tpr, color='orange', label='ROC')
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.3f})'.format(auroc21))
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.3f})'.format(auroc21))
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.3f})'.format(auroc21))
plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.4f})'.format(auroc21))
# plt.plot(fpr, tpr, color='orange', label='ROC')
plt.plot([0, 1], [0, 1], color='darkblue', linestyle='--')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic (ROC) Curve')
plt.legend()
# plt.savefig('ROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nik000NikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nik000NikNikROC_MainROC.png', bbox_inches='tight')
plt.savefig('mnMnistFor6MyROC.png', bbox_inches='tight')
# plt.show()
# plt.pause(99)
# plt.savefig('ROC_MainROC.png', bbox_inches='tight')
# plt.savefig('mainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('mainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikMainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikMainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNikMainMainROC_MainROC.png', bbox_inches='tight')
# plt.pause(9)
# plt.ion()
# print(loLossNoChange)
# asdfkdfs
# print(loLoss2)
# print(loLossNoChange)
# loLoss2 is 0 and 1
# loLossNoChange is probability
# loLoss2 = ground truth 0 and 1
# roc_curve(loLoss2, loLossNoChange)
# loLoss2 is the ground truth 0 and 1
# loLossNoChange is the predicted probabilities
# loLossNoChange = predicted probabilities
loLossNoChange = loLosses_NIKlosses2
# loLoss2 = ground truth 0 and 1
loLoss2 = loLosses_NIKlosses3
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
# print(average_precision_score(loLoss2, loLossNoChange))
precision, recall, thresholds = precision_recall_curve(loLoss2, loLossNoChange)
print(average_precision_score(loLoss2, loLossNoChange))
print('')
print(precision)
print(recall)
print('')
print(thresholds)
# def plot_pr_curve(fpr, tpr):
# def plot_pr_curve(fpr, tpr):
def plot_pr_curve(fpr, tpr, auroc21):
# plt.plot(fpr, tpr, color='orange', label='PR')
# plt.plot(fpr, tpr, color='orange', label='PR')
# plt.plot(tpr, fpr, color='orange', label='PR')
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.3f})'.format(auroc21))
# plt.plot(tpr, fpr, color='orange', label='PR (AUPRC = {0:.3f})'.format(auroc21))
# plt.plot(tpr, fpr, color='orange', label='PR (AUPRC = {0:.3f})'.format(auroc21))
plt.plot(tpr, fpr, color='orange', label='PR (AUPRC = {0:.4f})'.format(auroc21))
# plt.xlabel('False Positive Rate')
# plt.ylabel('True Positive Rate')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Precision Recall (PR) Curve')
plt.legend()
# plt.savefig('ROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikNikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikNikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikNikNikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikNikNikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nik000NikNikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nik000NikNikPR_MainPR.png', bbox_inches='tight')
plt.savefig('mnMnistFor6MyPR.png', bbox_inches='tight')
# plt.savefig('22Jan2020foFo.png', bbox_inches='tight')
# plt.savefig('000000000000000fffffffffffffffoooFoo.png', bbox_inches='tight')
# plt.show()
# plt.pause(99)
# plt.savefig('ROC_MainROC.png', bbox_inches='tight')
# plt.savefig('mainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('mainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikMainMainPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikMainMainPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikNikMainMainPR_MainPR.png', bbox_inches='tight')
# plt.pause(9)
# plt.ion()
# plot_pr_curve(precision, recall)
# plot_pr_curve(precision, recall)
plot_pr_curve(precision, recall, average_precision_score(loLoss2, loLossNoChange))
# plot_pr_curve(precision, recall)
plt.figure()
print('')
print(average_precision_score(loLoss2, loLossNoChange))
print('')
# probs = loLossNoChange
fpr, tpr, thresholds = roc_curve(loLoss2, loLossNoChange)
print(fpr)
print(tpr)
print('')
print(thresholds)
# fpr, tpr, thresholds = roc_curve(loLoss2, probs)
# plot_roc_curve(fpr, tpr)
# plot_roc_curve(fpr, tpr)
# plot_roc_curve(fpr, tpr)
# plot_roc_curve(fpr, tpr)
# plot_roc_curve(fpr, tpr)
plot_roc_curve(fpr, tpr, roc_auc_score(loLoss2, loLossNoChange))
# print(roc_auc_score(fpr, tpr))
# print(sklearn.metrics.auc(fpr, tpr))
print('')
print(roc_auc_score(loLoss2, loLossNoChange))
from sklearn.metrics import auc
# roc_auc = auc(fpr, tpr)
print(auc(fpr, tpr))
# roc_auc = auc(fpr, tpr)
print('')
# roc_auc = auc(fpr, tpr)
'''
plt.figure()
#plt.plot(fpr[2], tpr[2], color='darkorange', lw=2, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot(fpr[2], tpr[2], color='darkorange', lw=2, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
#plt.legend(loc="lower right")
plt.legend(loc="lower right")
plt.show()
'''
def plot_roc_curve2(fpr, tpr, auroc21, fpr2, tpr2, auroc212):
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.4f})'.format(auroc21))
# plt.plot(tpr, fpr, color='blue', label='PR (AUPRC = {0:.4f})'.format(auroc21))
# plt.plot(tpr2, fpr2, color='blue', label='PR (AUPRC = {0:.4f})'.format(auroc212))
plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.4f})'.format(auroc21))
plt.plot(tpr2, fpr2, color='blue', label='PR (AUPRC = {0:.4f})'.format(auroc212))
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.4f})'.format(auroc21))
plt.plot([0, 1], [0, 1], color='darkblue', linestyle='--')
plt.xlabel('False Positive Rate (and Recall)')
plt.ylabel('True Positive Rate (and Precision)')
plt.title('ROC and PR Curves')
plt.legend()
# plt.plot(tpr, fpr, color='orange', label='PR (AUPRC = {0:.4f})'.format(auroc21))
# plt.xlabel('False Positive Rate')
# plt.ylabel('True Positive Rate')
# plt.xlabel('Recall')
# plt.ylabel('Precision')
# plt.title('Precision Recall (PR) Curve')
# plt.legend()
# plt.savefig('nik00000NikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nik00000NikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNik00000nikNikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNik00000nikNikNikROC_MainROC.png', bbox_inches='tight')
plt.savefig('mnMnistFor6MyROCPR.png', bbox_inches='tight')
plt.figure()
# plot_roc_curve2(fpr, tpr, roc_auc_score(loLoss2, loLossNoChange))
# use: precision, recall, average_precision_score(loLoss2, loLossNoChange)
# plot_roc_curve2(fpr, tpr, roc_auc_score(loLoss2, loLossNoChange), precision, recall, average_precision_score(loLoss2, loLossNoChange))
plot_roc_curve2(fpr, tpr, roc_auc_score(loLoss2, loLossNoChange), precision, recall,
average_precision_score(loLoss2, loLossNoChange))
# 0.7657142857142857
# 0.7657142857142857
# 0.7714285714285714
# 0.7947712113075085
# 0.7658408636296418
# Data_j for MNIST digit j
# ResFlow: See if p_g(x) works
asdfasfsadf
#asdfas
#asdfasfas
"""
#asdfasfasdf
# print(X_training.shape)
# print(X_training.shape)
# print(X_training.shape)
# asdfasdfasf
#asdfasdfa
# print(X_training.shape)
# print(X_training.shape)
"""
# netG.eval()
# netG.eval()
# netG.eval()
# netG.eval()
netG.eval()
for param in netG.parameters():
param.requires_grad = False
# print(X_training.shape)
X_training = dat['X_test'].to(device)
x = X_training
y = dat['Y_test'].to(device)
args.batchSize = 1
bsz = args.batchSize
nrand = 100
args.val_batchsize = args.batchSize
# print(X_training.shape)
# asdfasdfasdf
# print(X_training.shape)
# print(X_training.shape)
# print(X_training.shape)
losses_NIKlosses = []
loLosses_NIKlosses = []
loLosses_NIKlosses2 = []
# loLosses_NIKlosses2 = []
loLosses_NIKlosses3 = []
for epoch in range(1, 1 + 1):
for i in range(0, len(X_training), bsz):
# print(x)
# print(x.shape)
# print(y)
# print(y.item())
# for i21 in range(len(y)):
# if y[i21] == 0 and i21 == 0:
# y[i21] = y[i21+1]
# x[i21, :, :, :] = x[i21+1, :, :, :]
# elif y[i21] == 0:
# y[i21] = y[i21 - 1]
# x[i21, :, :, :] = x[i21 - 1, :, :, :]
# if i > 0:
# if y.item() == 0:
# y = y_prevPrev
# x = x_prevPrev
'''
for i21 in range(len(y)):
if y[i21] == 0 and i21 == 0:
y[i21] = y[i21+1]
x[i21, :, :, :] = x[i21+1, :, :, :]
elif y[i21] == 0:
y[i21] = y[i21 - 1]
x[i21, :, :, :] = x[i21 - 1, :, :, :]
'''
# x = x.to(device)
print(i)
# print(x.shape)
# asdfsadfs
genFGen2 = x
# lossGen, firstOnly_lossGen, secondOnly_lossGen, thirdOnly_lossGen = use_loss_fn2(genFGen2, args, model, ggenFGen2, x)
# use: val-batchsize
ggenFGen2 = torch.randn([args.val_batchsize, nrand], device=device)
# ggenFGen2 = torch.randn([args.batchsize, nrand], device=device)
# ggenFGen2 = torch.randn([args.batchsize, nrand], device=device, requires_grad=True)
# with torch.no_grad():
# _, firstOnly_lossGen, _, _ = use_loss_fn2(genFGen2, args, model, ggenFGen2, x)
# loLosses_NIKlosses.append(firstOnly_lossGen.item())
# print(firstOnly_lossGen)
# print(loLosses_NIKlosses)
# with torch.no_grad():
# firstOnly_lossGen2 = computeLoss(x, model)
with torch.no_grad():
sigma_x = F.softplus(log_sigma).view(1, 1, args.imageSize, args.imageSize)
netD.zero_grad()
stop = min(bsz, len(X_training[i:]))
real_cpu = X_training[i:i + stop].to(device)
# print(real_cpu.shape)
# asdfasdf
# batch_size = real_cpu.size(0)
batch_size = args.batchSize
label = torch.full((batch_size,), real_label, device=device)
noise_eta = torch.randn_like(real_cpu)
noised_data = real_cpu + sigma_x.detach() * noise_eta
# out_real = netD(noised_data)
# errD_real = criterion(out_real, label)
# errD_real.backward()
# D_x = out_real.mean().item()
# train with fake
# noise = torch.randn(batch_size, args.nz, 1, 1, device=device)
# mu_fake = netG(noise)
# fake = mu_fake + sigma_x * noise_eta
# label.fill_(fake_label)
# out_fake = netD(fake.detach())
# errD_fake = criterion(out_fake, label)
# errD_fake.backward()
# D_G_z1 = out_fake.mean().item()
# errD = errD_real + errD_fake
# optimizerD.step()
# update G network: maximize log(D(G(z)))
netG.zero_grad()
sigma_optimizer.zero_grad()
label.fill_(real_label)
gen_input = torch.randn(batch_size, args.nz, 1, 1, device=device)
out = netG(gen_input)
# print(out.shape)
# asdfasdf
noise_eta = torch.randn_like(out)
g_fake_data = out + noise_eta * sigma_x
dg_fake_decision = netD(g_fake_data)
g_error_gan = criterion(dg_fake_decision, label)
D_G_z2 = dg_fake_decision.mean().item()
if args.lambda_ == 0:
g_error_gan.backward()
optimizerG.step()
sigma_optimizer.step()
else:
# hmc_samples, acceptRate, stepsize = hmc.get_samples(
# netG, g_fake_data.detach(), gen_input.clone(), sigma_x.detach(), args.burn_in,
# args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
# args.hmc_learning_rate, args.hmc_opt_accept)
# bsz, d = hmc_samples.size()
# mean_output = netG(hmc_samples.view(bsz, d, 1, 1).to(device))
# bsz = g_fake_data.size(0)
# mean_output_summed = torch.zeros_like(g_fake_data)
# for cnt in range(args.num_samples_posterior):
# mean_output_summed = mean_output_summed + mean_output[cnt*bsz:(cnt+1)*bsz]
# mean_output_summed = mean_output_summed / args.num_samples_posterior
# c = ((g_fake_data - mean_output_summed) / sigma_x**2).detach()
# g_error_entropy = torch.mul(c, out + sigma_x * noise_eta).mean(0).sum()
# print(mean_output)
# print(mean_output.shape)
# print(bsz)
# print(d)
# print(torch.randn(batch_size, args.nz, 1, 1, device=device).shape)
# print(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device).shape)
# use: netG( torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device) )
# print(netG( torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device) ).shape)
# netG( torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device) )
# we use: netG( torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device) )
# print(g_error_entropy)
# asdfasdfds
# print(netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)).shape)
# asdfsdfs
# print(netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)).requires_grad)
# print(netG(torch.randn(batch_size, args.nz, 1, 1, device=device)).requires_grad)
# netG2.eval()
# for param in netG2.parameters():
# param.requires_grad = False
# print(netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)).shape)
# print(netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)).requires_grad)
# print(netG2(torch.randn(batch_size, args.nz, 1, 1, requires_grad=False, device=device)).shape)
# print(netG2(torch.randn(batch_size, args.nz, 1, 1, requires_grad=False, device=device)).requires_grad)
# print(netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)).requires_grad)
# print(netG2(torch.randn(batch_size, args.nz, 1, 1, requires_grad=False, device=device)).requires_grad)
# print(netG2(torch.randn(batch_size, args.nz, 1, 1, requires_grad=False, device=device)).requires_grad)
# print(netG2(torch.randn(batch_size, args.nz, 1, 1, device=device)).requires_grad)
# asdfasdf
# _, _, _, p_probP = hmc.get_samples(
# netG2, netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)).detach(),
# gen_input.clone(), sigma_x.detach(), args.burn_in,
# args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
# args.hmc_learning_rate, args.hmc_opt_accept)
'''
_, _, _, p_probP = hmc.get_samples(
netG2, netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)).detach(),
gen_input.clone(), sigma_x.detach(), args.burn_in,
args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
args.hmc_learning_rate, args.hmc_opt_accept)
'''
# print(netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)).requires_grad)
# sdfasdfs
_, _, _, p_probP = hmc2.get_samples(
netG, netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)),
gen_input.clone(), sigma_x.detach(), args.burn_in,
args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
args.hmc_learning_rate, args.hmc_opt_accept)
# _, _, _, p_probP = hmc.get_samples(
# netG2, netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)),
# gen_input.clone(), sigma_x.detach(), args.burn_in,
# args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
# args.hmc_learning_rate, args.hmc_opt_accept)
firstOnly_lossGen2 = p_probP.mean()
# print(y)
# print(y.item())
# print(firstOnly_lossGen2)
# print(firstOnly_lossGen2.item())
# asdfasdfs
loLosses_NIKlosses2.append(firstOnly_lossGen2.item())
# print(y)
# print(y.item())
# if y.item() == 0:
# if y.item() == 1:
# if y.item() == 1:
# if y[i] == 2:
# if y[i] == 2:
if y[i] == 0:
# loLosses_NIKlosses3.append(0)
# loLosses_NIKlosses3.append(0)
loLosses_NIKlosses3.append(1)
# print(y)
# print(y.item())
else:
# loLosses_NIKlosses3.append(1)
# loLosses_NIKlosses3.append(1)
loLosses_NIKlosses3.append(0)
# loLosses_NIKlosses3.append(1)
'''
if y.item() == 0:
loLosses_NIKlosses3.append(0)
#print(y)
#print(y.item())
else:
loLosses_NIKlosses3.append(1)
'''
# print(y)
# print(y.item())
# print(firstOnly_lossGen2)
# print(loLosses_NIKlosses2)
# x_prevPrev = x
# y_prevPrev = y
# print(loLosses_NIKlosses)
# print(loLosses_NIKlosses2)
import numpy as np
# print(loLosses_NIKlosses3)
# print(len(loLosses_NIKlosses3))
print('')
import matplotlib.pyplot as plt
# import seaborn as sns
# ROC curve and auc score
from sklearn.datasets import make_classification
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
# def plot_roc_curve(fpr, tpr):
# def plot_roc_curve(fpr, tpr):
def plot_roc_curve(fpr, tpr, auroc21):
# plt.plot(fpr, tpr, color='orange', label='ROC')
# plt.plot(fpr, tpr, color='orange', label='ROC')
# plt.plot(fpr, tpr, color='orange', label='ROC')
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.3f})'.format(auroc21))
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.3f})'.format(auroc21))
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.3f})'.format(auroc21))
plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.4f})'.format(auroc21))
# plt.plot(fpr, tpr, color='orange', label='ROC')
plt.plot([0, 1], [0, 1], color='darkblue', linestyle='--')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic (ROC) Curve')
plt.legend()
# plt.savefig('ROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nik000NikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nik000NikNikROC_MainROC.png', bbox_inches='tight')
plt.savefig('mnMnistFor6MyROC.png', bbox_inches='tight')
# plt.show()
# plt.pause(99)
# plt.savefig('ROC_MainROC.png', bbox_inches='tight')
# plt.savefig('mainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('mainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikMainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikMainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNikMainMainROC_MainROC.png', bbox_inches='tight')
# plt.pause(9)
# plt.ion()
# print(loLossNoChange)
# asdfkdfs
# print(loLoss2)
# print(loLossNoChange)
# loLoss2 is 0 and 1
# loLossNoChange is probability
# loLoss2 = ground truth 0 and 1
# roc_curve(loLoss2, loLossNoChange)
# loLoss2 is the ground truth 0 and 1
# loLossNoChange is the predicted probabilities
# loLossNoChange = predicted probabilities
loLossNoChange = loLosses_NIKlosses2
# loLoss2 = ground truth 0 and 1
loLoss2 = loLosses_NIKlosses3
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
# print(average_precision_score(loLoss2, loLossNoChange))
precision, recall, thresholds = precision_recall_curve(loLoss2, loLossNoChange)
print(average_precision_score(loLoss2, loLossNoChange))
print('')
print(precision)
print(recall)
print('')
print(thresholds)
# def plot_pr_curve(fpr, tpr):
# def plot_pr_curve(fpr, tpr):
def plot_pr_curve(fpr, tpr, auroc21):
# plt.plot(fpr, tpr, color='orange', label='PR')
# plt.plot(fpr, tpr, color='orange', label='PR')
# plt.plot(tpr, fpr, color='orange', label='PR')
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.3f})'.format(auroc21))
# plt.plot(tpr, fpr, color='orange', label='PR (AUPRC = {0:.3f})'.format(auroc21))
# plt.plot(tpr, fpr, color='orange', label='PR (AUPRC = {0:.3f})'.format(auroc21))
plt.plot(tpr, fpr, color='orange', label='PR (AUPRC = {0:.4f})'.format(auroc21))
# plt.xlabel('False Positive Rate')
# plt.ylabel('True Positive Rate')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Precision Recall (PR) Curve')
plt.legend()
# plt.savefig('ROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikNikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikNikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikNikNikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikNikNikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nik000NikNikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nik000NikNikPR_MainPR.png', bbox_inches='tight')
plt.savefig('mnMnistFor6MyPR.png', bbox_inches='tight')
# plt.savefig('22Jan2020foFo.png', bbox_inches='tight')
# plt.savefig('000000000000000fffffffffffffffoooFoo.png', bbox_inches='tight')
# plt.show()
# plt.pause(99)
# plt.savefig('ROC_MainROC.png', bbox_inches='tight')
# plt.savefig('mainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('mainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikMainMainPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikMainMainPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikNikMainMainPR_MainPR.png', bbox_inches='tight')
# plt.pause(9)
# plt.ion()
# plot_pr_curve(precision, recall)
# plot_pr_curve(precision, recall)
plot_pr_curve(precision, recall, average_precision_score(loLoss2, loLossNoChange))
# plot_pr_curve(precision, recall)
plt.figure()
print('')
print(average_precision_score(loLoss2, loLossNoChange))
print('')
# probs = loLossNoChange
fpr, tpr, thresholds = roc_curve(loLoss2, loLossNoChange)
print(fpr)
print(tpr)
print('')
print(thresholds)
# fpr, tpr, thresholds = roc_curve(loLoss2, probs)
# plot_roc_curve(fpr, tpr)
# plot_roc_curve(fpr, tpr)
# plot_roc_curve(fpr, tpr)
# plot_roc_curve(fpr, tpr)
# plot_roc_curve(fpr, tpr)
plot_roc_curve(fpr, tpr, roc_auc_score(loLoss2, loLossNoChange))
# print(roc_auc_score(fpr, tpr))
# print(sklearn.metrics.auc(fpr, tpr))
print('')
print(roc_auc_score(loLoss2, loLossNoChange))
from sklearn.metrics import auc
# roc_auc = auc(fpr, tpr)
print(auc(fpr, tpr))
# roc_auc = auc(fpr, tpr)
print('')
# roc_auc = auc(fpr, tpr)
'''
plt.figure()
#plt.plot(fpr[2], tpr[2], color='darkorange', lw=2, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot(fpr[2], tpr[2], color='darkorange', lw=2, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
#plt.legend(loc="lower right")
plt.legend(loc="lower right")
plt.show()
'''
def plot_roc_curve2(fpr, tpr, auroc21, fpr2, tpr2, auroc212):
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.4f})'.format(auroc21))
# plt.plot(tpr, fpr, color='blue', label='PR (AUPRC = {0:.4f})'.format(auroc21))
# plt.plot(tpr2, fpr2, color='blue', label='PR (AUPRC = {0:.4f})'.format(auroc212))
plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.4f})'.format(auroc21))
plt.plot(tpr2, fpr2, color='blue', label='PR (AUPRC = {0:.4f})'.format(auroc212))
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.4f})'.format(auroc21))
plt.plot([0, 1], [0, 1], color='darkblue', linestyle='--')
plt.xlabel('False Positive Rate (and Recall)')
plt.ylabel('True Positive Rate (and Precision)')
plt.title('ROC and PR Curves')
plt.legend()
# plt.plot(tpr, fpr, color='orange', label='PR (AUPRC = {0:.4f})'.format(auroc21))
# plt.xlabel('False Positive Rate')
# plt.ylabel('True Positive Rate')
# plt.xlabel('Recall')
# plt.ylabel('Precision')
# plt.title('Precision Recall (PR) Curve')
# plt.legend()
# plt.savefig('nik00000NikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nik00000NikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNik00000nikNikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNik00000nikNikNikROC_MainROC.png', bbox_inches='tight')
plt.savefig('mnMnistFor6MyROCPR.png', bbox_inches='tight')
plt.figure()
# plot_roc_curve2(fpr, tpr, roc_auc_score(loLoss2, loLossNoChange))
# use: precision, recall, average_precision_score(loLoss2, loLossNoChange)
# plot_roc_curve2(fpr, tpr, roc_auc_score(loLoss2, loLossNoChange), precision, recall, average_precision_score(loLoss2, loLossNoChange))
plot_roc_curve2(fpr, tpr, roc_auc_score(loLoss2, loLossNoChange), precision, recall,
average_precision_score(loLoss2, loLossNoChange))
# 0.7657142857142857
# 0.7657142857142857
# 0.7714285714285714
# 0.7947712113075085
# 0.7658408636296418
# Data_j for MNIST digit j
# ResFlow: See if p_g(x) works
# import numpy as np
loLosses_NIKlosses3 = np.array(loLosses_NIKlosses3)
# where_0 = np.where(loLosses_NIKlosses3 == 0)
# where_1 = np.where(loLosses_NIKlosses3 == 1)
# loLosses_NIKlosses3[where_0] = 1
# loLosses_NIKlosses3[where_1] = 0
indices_one = loLosses_NIKlosses3 == 1
indices_zero = loLosses_NIKlosses3 == 0
loLosses_NIKlosses3[indices_one] = 0 # replacing 1s with 0s
loLosses_NIKlosses3[indices_zero] = 1 # replacing 0s with 1s
loLosses_NIKlosses3 = loLosses_NIKlosses3.tolist()
# del where_0
# del where_1
# print(loLosses_NIKlosses3)
# print(len(loLosses_NIKlosses3))
# adsfasdfzs
# print(loLosses_NIKlosses2)
# print(loLosses_NIKlosses3)
# import numpy as np
# import pandas as pd
import matplotlib.pyplot as plt
# import seaborn as sns
# ROC curve and auc score
from sklearn.datasets import make_classification
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
# def plot_roc_curve(fpr, tpr):
# def plot_roc_curve(fpr, tpr):
def plot_roc_curve(fpr, tpr, auroc21):
# plt.plot(fpr, tpr, color='orange', label='ROC')
# plt.plot(fpr, tpr, color='orange', label='ROC')
# plt.plot(fpr, tpr, color='orange', label='ROC')
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.3f})'.format(auroc21))
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.3f})'.format(auroc21))
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.3f})'.format(auroc21))
plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.4f})'.format(auroc21))
# plt.plot(fpr, tpr, color='orange', label='ROC')
plt.plot([0, 1], [0, 1], color='darkblue', linestyle='--')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic (ROC) Curve')
plt.legend()
# plt.savefig('ROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nik000NikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nik000NikNikROC_MainROC.png', bbox_inches='tight')
plt.savefig('mnMnistFor6MyROC.png', bbox_inches='tight')
# plt.show()
# plt.pause(99)
# plt.savefig('ROC_MainROC.png', bbox_inches='tight')
# plt.savefig('mainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('mainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikMainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikMainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNikMainMainROC_MainROC.png', bbox_inches='tight')
# plt.pause(9)
# plt.ion()
# print(loLossNoChange)
# asdfkdfs
# print(loLoss2)
# print(loLossNoChange)
# loLoss2 is 0 and 1
# loLossNoChange is probability
# loLoss2 = ground truth 0 and 1
# roc_curve(loLoss2, loLossNoChange)
# loLoss2 is the ground truth 0 and 1
# loLossNoChange is the predicted probabilities
# loLossNoChange = predicted probabilities
loLossNoChange = loLosses_NIKlosses2
# loLoss2 = ground truth 0 and 1
loLoss2 = loLosses_NIKlosses3
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
# print(average_precision_score(loLoss2, loLossNoChange))
precision, recall, thresholds = precision_recall_curve(loLoss2, loLossNoChange)
print(average_precision_score(loLoss2, loLossNoChange))
print('')
print(precision)
print(recall)
print('')
print(thresholds)
# def plot_pr_curve(fpr, tpr):
# def plot_pr_curve(fpr, tpr):
def plot_pr_curve(fpr, tpr, auroc21):
# plt.plot(fpr, tpr, color='orange', label='PR')
# plt.plot(fpr, tpr, color='orange', label='PR')
# plt.plot(tpr, fpr, color='orange', label='PR')
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.3f})'.format(auroc21))
# plt.plot(tpr, fpr, color='orange', label='PR (AUPRC = {0:.3f})'.format(auroc21))
# plt.plot(tpr, fpr, color='orange', label='PR (AUPRC = {0:.3f})'.format(auroc21))
plt.plot(tpr, fpr, color='orange', label='PR (AUPRC = {0:.4f})'.format(auroc21))
# plt.xlabel('False Positive Rate')
# plt.ylabel('True Positive Rate')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Precision Recall (PR) Curve')
plt.legend()
# plt.savefig('ROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikNikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikNikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikNikNikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikNikNikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nik000NikNikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nik000NikNikPR_MainPR.png', bbox_inches='tight')
plt.savefig('mnMnistFor6MyPR.png', bbox_inches='tight')
# plt.savefig('22Jan2020foFo.png', bbox_inches='tight')
# plt.savefig('000000000000000fffffffffffffffoooFoo.png', bbox_inches='tight')
# plt.show()
# plt.pause(99)
# plt.savefig('ROC_MainROC.png', bbox_inches='tight')
# plt.savefig('mainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('mainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikMainMainPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikMainMainPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikNikMainMainPR_MainPR.png', bbox_inches='tight')
# plt.pause(9)
# plt.ion()
# plot_pr_curve(precision, recall)
# plot_pr_curve(precision, recall)
plot_pr_curve(precision, recall, average_precision_score(loLoss2, loLossNoChange))
# plot_pr_curve(precision, recall)
plt.figure()
print('')
print(average_precision_score(loLoss2, loLossNoChange))
print('')
# probs = loLossNoChange
fpr, tpr, thresholds = roc_curve(loLoss2, loLossNoChange)
print(fpr)
print(tpr)
print('')
print(thresholds)
# fpr, tpr, thresholds = roc_curve(loLoss2, probs)
# plot_roc_curve(fpr, tpr)
# plot_roc_curve(fpr, tpr)
# plot_roc_curve(fpr, tpr)
# plot_roc_curve(fpr, tpr)
# plot_roc_curve(fpr, tpr)
plot_roc_curve(fpr, tpr, roc_auc_score(loLoss2, loLossNoChange))
# print(roc_auc_score(fpr, tpr))
# print(sklearn.metrics.auc(fpr, tpr))
print('')
print(roc_auc_score(loLoss2, loLossNoChange))
from sklearn.metrics import auc
# roc_auc = auc(fpr, tpr)
print(auc(fpr, tpr))
# roc_auc = auc(fpr, tpr)
print('')
# roc_auc = auc(fpr, tpr)
'''
plt.figure()
#plt.plot(fpr[2], tpr[2], color='darkorange', lw=2, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot(fpr[2], tpr[2], color='darkorange', lw=2, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
#plt.legend(loc="lower right")
plt.legend(loc="lower right")
plt.show()
'''
def plot_roc_curve2(fpr, tpr, auroc21, fpr2, tpr2, auroc212):
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.4f})'.format(auroc21))
# plt.plot(tpr, fpr, color='blue', label='PR (AUPRC = {0:.4f})'.format(auroc21))
# plt.plot(tpr2, fpr2, color='blue', label='PR (AUPRC = {0:.4f})'.format(auroc212))
plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.4f})'.format(auroc21))
plt.plot(tpr2, fpr2, color='blue', label='PR (AUPRC = {0:.4f})'.format(auroc212))
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.4f})'.format(auroc21))
plt.plot([0, 1], [0, 1], color='darkblue', linestyle='--')
plt.xlabel('False Positive Rate (and Recall)')
plt.ylabel('True Positive Rate (and Precision)')
plt.title('ROC and PR Curves')
plt.legend()
# plt.plot(tpr, fpr, color='orange', label='PR (AUPRC = {0:.4f})'.format(auroc21))
# plt.xlabel('False Positive Rate')
# plt.ylabel('True Positive Rate')
# plt.xlabel('Recall')
# plt.ylabel('Precision')
# plt.title('Precision Recall (PR) Curve')
# plt.legend()
# plt.savefig('nik00000NikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nik00000NikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNik00000nikNikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNik00000nikNikNikROC_MainROC.png', bbox_inches='tight')
plt.savefig('mnMnistFor6MyROCPR.png', bbox_inches='tight')
plt.figure()
# plot_roc_curve2(fpr, tpr, roc_auc_score(loLoss2, loLossNoChange))
# use: precision, recall, average_precision_score(loLoss2, loLossNoChange)
# plot_roc_curve2(fpr, tpr, roc_auc_score(loLoss2, loLossNoChange), precision, recall, average_precision_score(loLoss2, loLossNoChange))
plot_roc_curve2(fpr, tpr, roc_auc_score(loLoss2, loLossNoChange), precision, recall,
average_precision_score(loLoss2, loLossNoChange))
# 0.7657142857142857
# 0.7657142857142857
# 0.7714285714285714
# 0.7947712113075085
# 0.7658408636296418
# Data_j for MNIST digit j
# ResFlow: See if p_g(x) works
asdfas
asdfasfas
"""
#asdfasf
#asdfasfas
#asdfasfadfs
#azdfsadfs
#asdfsadf
#sadf
#sdafa
#print(X_training.shape)
#asdfasdfsdf
#print(X_training.shape)
#print(X_training.shape)
# asdfasfsfs
#sadfasfasd
#adfasdfsadfasfs
'''
imgsize = args.imageSize
#data_path = args.dataroot
args.dataroot = 'dataset'
data_path = args.dataroot
nc = 1
transform = transforms.Compose([
transforms.Resize(imgsize),
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))])
mnist = torchvision.datasets.MNIST(root=data_path, download=True, transform=transform, train=True)
train_loader = DataLoader(mnist, batch_size=1, shuffle=True, drop_last=True, num_workers=0)
X_training = torch.zeros(len(train_loader), nc, imgsize, imgsize)
Y_training = torch.zeros(len(train_loader))
for i, x in enumerate(train_loader):
X_training[i, :, :, :] = x[0]
Y_training[i] = x[1]
if i % 10000 == 0:
print('Loading data... {}/{}'.format(i, len(train_loader)))
mnist = torchvision.datasets.MNIST(root=data_path, download=True, transform=transform, train=False)
test_loader = DataLoader(mnist, batch_size=1, shuffle=False, drop_last=True, num_workers=0)
X_test = torch.zeros(len(test_loader), nc, imgsize, imgsize)
Y_test = torch.zeros(len(test_loader))
for i, x in enumerate(test_loader):
X_test[i, :, :, :] = x[0]
Y_test[i] = x[1]
if i % 1000 == 0:
print('i: {}/{}'.format(i, len(test_loader)))
Y_training = Y_training.type('torch.LongTensor')
Y_test = Y_test.type('torch.LongTensor')
dat = {'X_train': X_training, 'Y_train': Y_training, 'X_test': X_test, 'Y_test': Y_test, 'nc': nc}
X_training = dat['X_train'].to(device)
losses_NIKlosses = []
x = X_training
y = dat['Y_train'].to(device)
for itr in range(1, 1 + 1):
runningLoss_NIKrunningLoss = 0.0
# for i, (x, y) in enumerate(X_training):
for i in range(len(X_training)):
# print(x.shape)
# print(y.shape)
# print(y)
x = x.to(device)
# args.batchsize = 1024
# args.batchsize = 16384
# args.batchsize = 1024
# args.batchSize = 2048
# args.batchSize = 2048
# args.batchSize = 150
# args.batchSize = 2048
args.batchSize = 2 * 2048
# args.batchsize = 1024
# ggenFGen2 = torch.randn([args.batchsize, nrand], device=device, requires_grad=True)
# genFGen2 = genGen.forward(ggenFGen2)
# genFGen2 = genGen.forward(ggenFGen2)
# ggenFGen2 = torch.randn([args.batchsize, nrand], device=device, requires_grad=True)
# genFGen2 = genGen.forward(ggenFGen2)
# ggenFGen2 = torch.randn([args.batchsize, nrand], device=device)
# genFGen2 = genGen.forward(ggenFGen2)
with torch.no_grad():
ggenFGen2 = torch.randn([args.batchSize, 100, 1, 1], device=device)
genFGen2 = netG.forward(ggenFGen2)
# print(x.shape)
# print(y.shape)
# print(genFGen2.shape)
# print(args.batchsize)
# for i21 in range(len(y)):
# if y[i21] == 0 and i21 == 0:
# y[i21] = y[i21+1]
# x[i21, :, :, :] = x[i21+1, :, :, :]
# elif y[i21] == 0:
# y[i21] = y[i21 - 1]
# x[i21, :, :, :] = x[i21 - 1, :, :, :]
# y2 = []
x2 = []
for i21 in range(len(y)):
if y[i21] == 1:
# y2.append(y[i21])
x2.append(x[i21, :, :, :])
x2 = torch.stack(x2)
# y2 = torch.stack(y2)
# y3 = []
x3 = []
for i21 in range(len(y)):
if y[i21] == 2:
# y3.append(y[i21])
x3.append(x[i21, :, :, :])
x3 = torch.stack(x3)
# y3 = torch.stack(y3)
# y4 = []
x4 = []
for i21 in range(len(y)):
if y[i21] == 3:
# y4.append(y[i21])
x4.append(x[i21, :, :, :])
x4 = torch.stack(x4)
# y4 = torch.stack(y4)
# print(x2.shape)
# print(x3.shape)
# print(x4.shape)
# print(y2.shape)
# print(y3.shape)
# print(y4.shape)
# y5 = []
x5 = []
for i21 in range(len(y)):
if y[i21] == 4:
# y5.append(y[i21])
x5.append(x[i21, :, :, :])
x5 = torch.stack(x5)
# y5 = torch.stack(y5)
# y6 = []
x6 = []
for i21 in range(len(y)):
if y[i21] == 5:
# y6.append(y[i21])
x6.append(x[i21, :, :, :])
x6 = torch.stack(x6)
# y6 = torch.stack(y6)
# y7 = []
x7 = []
for i21 in range(len(y)):
if y[i21] == 6:
# y7.append(y[i21])
x7.append(x[i21, :, :, :])
x7 = torch.stack(x7)
# y7 = torch.stack(y7)
# y8 = []
x8 = []
for i21 in range(len(y)):
if y[i21] == 7:
# y8.append(y[i21])
x8.append(x[i21, :, :, :])
x8 = torch.stack(x8)
# y8 = torch.stack(y8)
# y9 = []
x9 = []
for i21 in range(len(y)):
if y[i21] == 8:
# y9.append(y[i21])
x9.append(x[i21, :, :, :])
x9 = torch.stack(x9)
# y9 = torch.stack(y9)
# y99 = []
x99 = []
for i21 in range(len(y)):
if y[i21] == 9:
# y99.append(y[i21])
x99.append(x[i21, :, :, :])
x99 = torch.stack(x99)
# y99 = torch.stack(y99)
x999 = []
for i21 in range(len(y)):
if y[i21] == 0:
x999.append(x[i21, :, :, :])
x999 = torch.stack(x999)
# print(x9.shape)
# print(x99.shape)
# print(genFGen2.shape)
# print(x999.shape)
# asdfasdfs
genFGen2 = genFGen2.view(-1, 64 * 64)
x9 = x9.view(-1, 64 * 64)
x99 = x99.view(-1, 64 * 64)
# print(x9.shape)
# print(x99.shape)
# print(genFGen2.shape)
# x99 = x99.view(-1, 64 * 64)
x999 = x999.view(-1, 64 * 64)
x8 = x8.view(-1, 64 * 64)
x7 = x7.view(-1, 64 * 64)
x6 = x6.view(-1, 64 * 64)
x5 = x5.view(-1, 64 * 64)
x4 = x4.view(-1, 64 * 64)
# x3 = x3.view(-1, 64 * 64)
# x3 = x3.view(-1, 64 * 64)
# x3 = x3.view(-1, 64 * 64)
# x3 = x3.view(-1, 64 * 64)
x3 = x3.view(-1, 64 * 64)
x2 = x2.view(-1, 64 * 64)
# x8 = x8.view(-1, 64 * 64)
# print(args.batchsize)
# print(genFGen2.shape)
#adfasdfsadf
#asdfasdf
#asdfsafsz
with torch.no_grad():
# second_term_loss32 = torch.empty(args.batch_size, device=device, requires_grad=False)
# second_term_loss32 = torch.empty(args.batchsize, device=device, requires_grad=False)
second_term_loss32 = torch.empty(args.batchSize, device=device)
# for i in range(args.batch_size):
for i in range(args.batchSize):
"""
print(torch.mean(torch.sqrt((genFGen2[i, :] - xData).view(args.batchsize, -1).pow(2).sum(1))))
print(torch.mean(torch.sqrt((genFGen2[i, :] - genFGen2).view(args.batchsize, -1).pow(2).sum(1))))
print(torch.mean(torch.sqrt((genFGen3[i, :] - genFGen3).pow(2).sum(1))))
print('')
print(torch.mean(torch.norm((genFGen2[i, :] - xData).view(args.batchsize, -1), p=None, dim=1)))
print(torch.mean(torch.norm((genFGen2[i, :] - genFGen2).view(args.batchsize, -1), p=None, dim=1)))
print(torch.mean(torch.norm((genFGen3[i, :] - genFGen3), p=None, dim=1)))
print('')
"""
print(i)
# print(torch.mean(torch.sqrt((genFGen2[i, :] - xData).view(args.batchsize, -1).pow(2).sum(1))))
# print(torch.mean(torch.sqrt((genFGen2[i, :] - genFGen2).view(args.batchsize, -1).pow(2).sum(1))))
# print(torch.mean(torch.sqrt((genFGen3[i, :] - genFGen3).pow(2).sum(1))))
# print('')
# print(torch.sqrt((genFGen2[i, :] - xData).view(args.batchsize, -1).pow(2).sum(1)))
# print(torch.sqrt((genFGen2[i, :] - genFGen2).view(args.batchsize, -1).pow(2).sum(1)))
# print(torch.sqrt((genFGen3[i, :] - genFGen3).pow(2).sum(1)))
# print('')
# second_term_loss22 = torch.norm(genFGen2[i, :] - xData, p='fro', dim=1).requires_grad_()
# second_term_loss22 = torch.norm(genFGen2[i, :] - xData, p=None, dim=1).requires_grad_()
# second_term_loss22 = torch.norm(genFGen2[i, :] - xData, p=None, dim=1).requires_grad_()**2
# second_term_loss22 = torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1))**2
# second_term_loss22 = torch.sqrt(1e-17 + (genFGen2[i, :] - xData).pow(2).sum(1)).requires_grad_()**2
# second_term_loss22 = torch.sqrt(1e-17 + (genFGen2[i, :] - xData).pow(2).sum(1)).requires_grad_() ** 2
# second_term_loss22 = torch.sqrt(1e-17 + (genFGen2[i, :] - xData).pow(2).sum(1)).requires_grad_() ** 2
# second_term_loss22 = torch.sqrt(1e-17 + (genFGen2[i, :] - xData).view(args.batchsize, -1).pow(2).sum(1)).requires_grad_() ** 2
# second_term_loss22 = torch.sqrt(
# 1e-17 + (genFGen2[i, :] - xData).view(args.batchsize, -1).pow(2).sum(1)).requires_grad_() ** 2
# second_term_loss22 = torch.norm(genFGen2[i, :] - xData, p=None, dim=1).requires_grad_()**2
# second_term_loss22 = torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2
# tempVarVar21 = genFGen2[i, :] - xData
# print(tempVarVar21.shape)
# print(i)
# second_term_loss22 = torch.sqrt(1e-17 + (genFGen2[i, :] - xData).pow(2).sum(1)).requires_grad_() ** 2
# second_term_loss22 = torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2
# second_term_loss22 = torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2
# second_term_loss22 = torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2
# second_term_loss22 = torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2
# second_term_loss22 = torch.sqrt((genFGen2[i, :] - xData).pow(2).sum(1)) ** 2
# second_term_loss22 = torch.sqrt((genFGen2[i, :] - x99).pow(2).sum(1)) ** 2
# second_term_losss22 = torch.sqrt((genFGen2[i, :] - x9).pow(2).sum(1)) ** 2
# second_term_lossss22 = torch.sqrt((genFGen2[i, :] - x8).pow(2).sum(1)) ** 2
# second_term_losssss22 = torch.sqrt((genFGen2[i, :] - x7).pow(2).sum(1)) ** 2
# second_term_lossssss22 = torch.sqrt((genFGen2[i, :] - x6).pow(2).sum(1)) ** 2
# second_term_losssssss22 = torch.sqrt((genFGen2[i, :] - x5).pow(2).sum(1)) ** 2
# second_term_lossssssss22 = torch.sqrt((genFGen2[i, :] - x4).pow(2).sum(1)) ** 2
# second_term_losssssssss22 = torch.sqrt((genFGen2[i, :] - x3).pow(2).sum(1)) ** 2
# second_term_lossssssssss22 = torch.sqrt((genFGen2[i, :] - x2).pow(2).sum(1)) ** 2
# print(x99.shape)
# print(genFGen2[i, :].shape)
secondSecSec_term_loss32 = torch.empty(10, device=device)
# secondSecSec_term_loss32[8] = torch.sqrt((genFGen2[i, :] - x99).pow(2).sum(1)) ** 2
# secondSecSec_term_loss32[8] = torch.sqrt((genFGen2[i, :] - x99).pow(2).sum(1)) ** 2
# secondSecSecSec_term_loss32 = torch.sqrt((genFGen2[i, :] - x99).pow(2).sum(1)) ** 2
# secondSecSec_term_loss32[8] = torch.sqrt((genFGen2[i, :] - x99).pow(2).sum(1)) ** 2
# secondSecSec_term_loss32[8] = torch.sqrt((genFGen2[i, :] - x99).pow(2).sum(1)) ** 2
# secondSecSec_term_loss32[8] = torch.min(torch.sqrt((genFGen2[i, :] - x99).pow(2).sum(1)) ** 2)
# secondSecSec_term_loss32[7] = torch.min(torch.sqrt((genFGen2[i, :] - x9).pow(2).sum(1)) ** 2)
# secondSecSec_term_loss32[6] = torch.min(torch.sqrt((genFGen2[i, :] - x8).pow(2).sum(1)) ** 2)
# secondSecSec_term_loss32[5] = torch.min(torch.sqrt((genFGen2[i, :] - x7).pow(2).sum(1)) ** 2)
# secondSecSec_term_loss32[4] = torch.min(torch.sqrt((genFGen2[i, :] - x6).pow(2).sum(1)) ** 2)
# secondSecSec_term_loss32[3] = torch.min(torch.sqrt((genFGen2[i, :] - x5).pow(2).sum(1)) ** 2)
# secondSecSec_term_loss32[2] = torch.min(torch.sqrt((genFGen2[i, :] - x4).pow(2).sum(1)) ** 2)
# secondSecSec_term_loss32[1] = torch.min(torch.sqrt((genFGen2[i, :] - x3).pow(2).sum(1)) ** 2)
# secondSecSec_term_loss32[0] = torch.min(torch.sqrt((genFGen2[i, :] - x2).pow(2).sum(1)) ** 2)
# print(secondSecSec_term_loss32)
# print(torch.min(torch.sqrt((genFGen2[i, :] - x999).pow(2).sum(1)) ** 2))
# use: x999
secondSecSec_term_loss32[0] = torch.min(torch.sqrt((genFGen2[i, :] - x999).pow(2).sum(1)) ** 2)
secondSecSec_term_loss32[1] = torch.min(torch.sqrt((genFGen2[i, :] - x2).pow(2).sum(1)) ** 2)
secondSecSec_term_loss32[2] = torch.min(torch.sqrt((genFGen2[i, :] - x3).pow(2).sum(1)) ** 2)
secondSecSec_term_loss32[3] = torch.min(torch.sqrt((genFGen2[i, :] - x4).pow(2).sum(1)) ** 2)
secondSecSec_term_loss32[4] = torch.min(torch.sqrt((genFGen2[i, :] - x5).pow(2).sum(1)) ** 2)
secondSecSec_term_loss32[5] = torch.min(torch.sqrt((genFGen2[i, :] - x6).pow(2).sum(1)) ** 2)
secondSecSec_term_loss32[6] = torch.min(torch.sqrt((genFGen2[i, :] - x7).pow(2).sum(1)) ** 2)
secondSecSec_term_loss32[7] = torch.min(torch.sqrt((genFGen2[i, :] - x8).pow(2).sum(1)) ** 2)
secondSecSec_term_loss32[8] = torch.min(torch.sqrt((genFGen2[i, :] - x9).pow(2).sum(1)) ** 2)
# secondSecSec_term_loss32[8] = torch.min(torch.sqrt((genFGen2[i, :] - x9).pow(2).sum(1)) ** 2)
secondSecSec_term_loss32[9] = torch.min(torch.sqrt((genFGen2[i, :] - x99).pow(2).sum(1)) ** 2)
# asdfasdfs
# 61562.1641
# 4.7732
# print(genFGen2[i, :].shape)
# print(xData.shape)
# tempVarVar21 = genFGen2[i, :] - xData
# print(tempVarVar21.shape)
# print(second_term_loss22.shape)
# adsfasfs
# second_term_loss22 = torch.norm(genFGen2[i, :] - xData, p=None, dim=1).requires_grad_()
# print(second_term_loss22.shape)
# second_term_loss32[i] = torch.min(second_term_loss22)
# second_term_loss32[i] = torch.min(second_term_loss22)
# second_term_loss32[i] = torch.argmin(secondSecSec_term_loss32)
# second_term_loss32[i] = torch.argmin(secondSecSec_term_loss32)
second_term_loss32[i] = torch.argmin(secondSecSec_term_loss32)
# second_term_loss32[i] = torch.min(second_term_loss22)
# second_term_loss32[i] = torch.min(second_term_loss22)
# second_term_loss32[i] = torch.min(second_term_loss22)
# print(second_term_loss32)
# print(second_term_loss32.shape)
# print(torch.norm(genFGen2 - xData, p=None, dim=0).shape)
# second_term_loss22 = torch.min(second_term_loss32)
# print(second_term_loss22)
# print(second_term_loss22.shape)
# second_term_loss2 = torch.mean(second_term_loss32)
# second_term_loss2 = 0.3 * torch.mean(second_term_loss32)
# second_term_loss2 = 3.0 * torch.mean(second_term_loss32)
# second_term_loss2 = 7.62939453125 * torch.mean(second_term_loss32)
# print(second_term_loss2)
# print(second_term_loss2.shape)
# second_term_loss2 = 0.3 * torch.mean(second_term_loss32)
# second_term_loss2 = 0.3 * torch.mean(second_term_loss32)
# second_term_loss2 = 0.001 * torch.mean(second_term_loss32)
# second_term_loss2 = 0.001 * torch.mean(second_term_loss32)
# second_term_loss2 = 0.001 * torch.mean(second_term_loss32)
# second_term_loss2 = torch.mean(second_term_loss32)
# second_term_loss2 = torch.mean(second_term_loss32)
# second_term_loss2 = torch.mean(second_term_loss32)
# second_term_loss2 = torch.mean(second_term_loss32)
# print(second_term_loss2)
# asdfasfd
# second_term_loss2.retain_grad()
# second_term_loss2.retain_grad()
# second_term_loss2.retain_grad()
# (?)
# second_term_loss2.retain_grad()
# (?)
# print(second_term_loss2)
# tensor(89.3141, device='cuda:0')
# print(second_term_loss2)
# tensor(89.3141, device='cuda:0')
# 0,1: tensor(89.3141, device='cuda:0')
# 0,1: tensor(89.3141, device='cuda:0')
# 0,2: tensor(63.0707, device='cuda:0')
# 0,3: tensor(65.5907, device='cuda:0')
# 0,4: tensor(74.6557, device='cuda:0')
# 0,5: tensor(58.6006, device='cuda:0')
# 0,6: tensor(57.5523, device='cuda:0')
# 0,7: tensor(70.9559, device='cuda:0')
# 0,8: tensor(64.4004, device='cuda:0')
# 0,8: tensor(64.4004, device='cuda:0')
# 0,9: tensor(62.5445, device='cuda:0')
# print(second_term_loss2)
# print(second_term_loss2)
# print(second_term_loss2)
# print(second_term_loss2)
# print(second_term_loss32)
import matplotlib.pyplot as plt
# plt.plot(second_term_loss32)
plt.plot(second_term_loss32.cpu())
plt.savefig('saveSaSaSaSaveStore_second_term_loss32.png', bbox_inches='tight')
counterFor0 = 0
counterFor1 = 0
counterFor2 = 0
counterFor3 = 0
counterFor4 = 0
counterFor5 = 0
counterFor6 = 0
counterFor7 = 0
counterFor8 = 0
counterFor9 = 0
for ii_loop21 in range(len(second_term_loss32)):
if second_term_loss32[ii_loop21] == 0:
counterFor0 += 1
elif second_term_loss32[ii_loop21] == 1:
counterFor1 += 1
elif second_term_loss32[ii_loop21] == 2:
counterFor2 += 1
elif second_term_loss32[ii_loop21] == 3:
counterFor3 += 1
elif second_term_loss32[ii_loop21] == 4:
counterFor4 += 1
elif second_term_loss32[ii_loop21] == 5:
counterFor5 += 1
elif second_term_loss32[ii_loop21] == 6:
counterFor6 += 1
elif second_term_loss32[ii_loop21] == 7:
counterFor7 += 1
elif second_term_loss32[ii_loop21] == 8:
counterFor8 += 1
elif second_term_loss32[ii_loop21] == 9:
counterFor9 += 1
plt.figure()
plt.plot(
[counterFor0, counterFor1, counterFor2, counterFor3, counterFor4, counterFor5, counterFor6, counterFor7,
counterFor8, counterFor9])
plt.savefig('saveSaSaveSaSaveSaSaveSaSaSaveStore_second_term_loss32.png', bbox_inches='tight')
plt.savefig('NumberOfOccOccurences_vs_ClassesClusters.png', bbox_inches='tight')
plt.figure()
plt.plot([0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[counterFor0, counterFor1, counterFor2, counterFor3, counterFor4, counterFor5, counterFor6,
counterFor7,
counterFor8, counterFor9], '--bo', linewidth=2, markersize=12)
plt.ylabel('Number of modes')
plt.xlabel('Modes')
plt.savefig('NuNumberOfOccurences_vs_ClassesClusters.png', bbox_inches='tight')
plt.figure()
plt.plot([0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[counterFor0 / (
counterFor0 + counterFor1 + counterFor2 + counterFor3 + counterFor4 + counterFor5 + counterFor6 + counterFor7 + counterFor8 + counterFor9),
counterFor1 / (
counterFor0 + counterFor1 + counterFor2 + counterFor3 + counterFor4 + counterFor5 + counterFor6 + counterFor7 + counterFor8 + counterFor9),
counterFor2 / (
counterFor0 + counterFor1 + counterFor2 + counterFor3 + counterFor4 + counterFor5 + counterFor6 + counterFor7 + counterFor8 + counterFor9),
counterFor3 / (
counterFor0 + counterFor1 + counterFor2 + counterFor3 + counterFor4 + counterFor5 + counterFor6 + counterFor7 + counterFor8 + counterFor9),
counterFor4 / (
counterFor0 + counterFor1 + counterFor2 + counterFor3 + counterFor4 + counterFor5 + counterFor6 + counterFor7 + counterFor8 + counterFor9),
counterFor5 / (
counterFor0 + counterFor1 + counterFor2 + counterFor3 + counterFor4 + counterFor5 + counterFor6 + counterFor7 + counterFor8 + counterFor9),
counterFor6 / (
counterFor0 + counterFor1 + counterFor2 + counterFor3 + counterFor4 + counterFor5 + counterFor6 + counterFor7 + counterFor8 + counterFor9),
counterFor7 / (
counterFor0 + counterFor1 + counterFor2 + counterFor3 + counterFor4 + counterFor5 + counterFor6 + counterFor7 + counterFor8 + counterFor9),
counterFor8 / (
counterFor0 + counterFor1 + counterFor2 + counterFor3 + counterFor4 + counterFor5 + counterFor6 + counterFor7 + counterFor8 + counterFor9),
counterFor9 / (
counterFor0 + counterFor1 + counterFor2 + counterFor3 + counterFor4 + counterFor5 + counterFor6 + counterFor7 + counterFor8 + counterFor9)],
'--bo', linewidth=2, markersize=12)
# plt.ylabel('Number of modes')
plt.ylabel('Probability')
# plt.xlabel('Modes')
plt.xlabel('Clusters')
plt.savefig('NumNumNumNumberOfOccurences_vs_ClassesClusters.png', bbox_inches='tight')
#asdfkfs
asdfasdfasdf
sdafzs
asdfsxc
# dsafadsf
# asdfasfasd
#asdfsa
#print(X_training.shape)
#print(X_training.shape)
#print(X_training.shape)
#asdfasfasfs
'''
#adf
#asdfzs
#asdfasfsa
# print(X_training.shape)
# asdfasdfasdf
# print(X_training.shape)
# print(X_training.shape)
# netG.eval()
# netG.eval()
# netG.eval()
# for param in netG.parameters():
# param.requires_grad = False
"""
#print(X_training.shape)
#print(X_training.shape)
#print(X_training.shape)
X_training = dat['X_test'].to(device)
x = X_training
y = dat['Y_test'].to(device)
args.batchSize = 1
bsz = args.batchSize
nrand = 100
args.val_batchsize = args.batchSize
#print(X_training.shape)
#asdfasdfasf
#print(X_training.shape)
#print(X_training.shape)
#print(X_training.shape)
#asdfasdfasdf
# print(X_training.shape)
# asdfasdfasdf
# print(X_training.shape)
# print(X_training.shape)
# print(X_training.shape)
losses_NIKlosses = []
loLosses_NIKlosses = []
loLosses_NIKlosses2 = []
# loLosses_NIKlosses2 = []
loLosses_NIKlosses3 = []
for epoch in range(1, 1 + 1):
for i in range(0, len(X_training), bsz):
# print(x)
# print(x.shape)
# print(y)
# print(y.item())
# for i21 in range(len(y)):
# if y[i21] == 0 and i21 == 0:
# y[i21] = y[i21+1]
# x[i21, :, :, :] = x[i21+1, :, :, :]
# elif y[i21] == 0:
# y[i21] = y[i21 - 1]
# x[i21, :, :, :] = x[i21 - 1, :, :, :]
# if i > 0:
# if y.item() == 0:
# y = y_prevPrev
# x = x_prevPrev
'''
for i21 in range(len(y)):
if y[i21] == 0 and i21 == 0:
y[i21] = y[i21+1]
x[i21, :, :, :] = x[i21+1, :, :, :]
elif y[i21] == 0:
y[i21] = y[i21 - 1]
x[i21, :, :, :] = x[i21 - 1, :, :, :]
'''
# x = x.to(device)
print(i)
# print(x.shape)
# asdfsadfs
genFGen2 = x
# lossGen, firstOnly_lossGen, secondOnly_lossGen, thirdOnly_lossGen = use_loss_fn2(genFGen2, args, model, ggenFGen2, x)
# use: val-batchsize
ggenFGen2 = torch.randn([args.val_batchsize, nrand], device=device)
# ggenFGen2 = torch.randn([args.batchsize, nrand], device=device)
# ggenFGen2 = torch.randn([args.batchsize, nrand], device=device, requires_grad=True)
# with torch.no_grad():
# _, firstOnly_lossGen, _, _ = use_loss_fn2(genFGen2, args, model, ggenFGen2, x)
# loLosses_NIKlosses.append(firstOnly_lossGen.item())
# print(firstOnly_lossGen)
# print(loLosses_NIKlosses)
# with torch.no_grad():
# firstOnly_lossGen2 = computeLoss(x, model)
with torch.no_grad():
sigma_x = F.softplus(log_sigma).view(1, 1, args.imageSize, args.imageSize)
netD.zero_grad()
stop = min(bsz, len(X_training[i:]))
real_cpu = X_training[i:i + stop].to(device)
# print(real_cpu.shape)
# asdfasdf
# batch_size = real_cpu.size(0)
batch_size = args.batchSize
label = torch.full((batch_size,), real_label, device=device)
noise_eta = torch.randn_like(real_cpu)
noised_data = real_cpu + sigma_x.detach() * noise_eta
# out_real = netD(noised_data)
# errD_real = criterion(out_real, label)
# errD_real.backward()
# D_x = out_real.mean().item()
# train with fake
# noise = torch.randn(batch_size, args.nz, 1, 1, device=device)
# mu_fake = netG(noise)
# fake = mu_fake + sigma_x * noise_eta
# label.fill_(fake_label)
# out_fake = netD(fake.detach())
# errD_fake = criterion(out_fake, label)
# errD_fake.backward()
# D_G_z1 = out_fake.mean().item()
# errD = errD_real + errD_fake
# optimizerD.step()
# update G network: maximize log(D(G(z)))
netG.zero_grad()
sigma_optimizer.zero_grad()
label.fill_(real_label)
gen_input = torch.randn(batch_size, args.nz, 1, 1, device=device)
out = netG(gen_input)
#print(real_cpu.shape)
#print(out.shape)
#adfadfasfdas
#varOutOut = out
# print(out.shape)
# asdfasdf
noise_eta = torch.randn_like(out)
g_fake_data = out + noise_eta * sigma_x
#dg_fake_decision = netD(g_fake_data)
#g_error_gan = criterion(dg_fake_decision, label)
#D_G_z2 = dg_fake_decision.mean().item()
if args.lambda_ == 0:
#g_error_gan.backward()
optimizerG.step()
#sigma_optimizer.step()
else:
# hmc_samples, acceptRate, stepsize = hmc.get_samples(
# netG, g_fake_data.detach(), gen_input.clone(), sigma_x.detach(), args.burn_in,
# args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
# args.hmc_learning_rate, args.hmc_opt_accept)
# bsz, d = hmc_samples.size()
# mean_output = netG(hmc_samples.view(bsz, d, 1, 1).to(device))
# bsz = g_fake_data.size(0)
# mean_output_summed = torch.zeros_like(g_fake_data)
# for cnt in range(args.num_samples_posterior):
# mean_output_summed = mean_output_summed + mean_output[cnt*bsz:(cnt+1)*bsz]
# mean_output_summed = mean_output_summed / args.num_samples_posterior
# c = ((g_fake_data - mean_output_summed) / sigma_x**2).detach()
# g_error_entropy = torch.mul(c, out + sigma_x * noise_eta).mean(0).sum()
# print(mean_output)
# print(mean_output.shape)
# print(bsz)
# print(d)
# print(torch.randn(batch_size, args.nz, 1, 1, device=device).shape)
# print(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device).shape)
# use: netG( torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device) )
# print(netG( torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device) ).shape)
# netG( torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device) )
# we use: netG( torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device) )
# print(g_error_entropy)
# asdfasdfds
# print(netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)).shape)
# asdfsdfs
# print(netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)).requires_grad)
# print(netG(torch.randn(batch_size, args.nz, 1, 1, device=device)).requires_grad)
# netG2.eval()
# for param in netG2.parameters():
# param.requires_grad = False
# print(netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)).shape)
# print(netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)).requires_grad)
# print(netG2(torch.randn(batch_size, args.nz, 1, 1, requires_grad=False, device=device)).shape)
# print(netG2(torch.randn(batch_size, args.nz, 1, 1, requires_grad=False, device=device)).requires_grad)
# print(netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)).requires_grad)
# print(netG2(torch.randn(batch_size, args.nz, 1, 1, requires_grad=False, device=device)).requires_grad)
# print(netG2(torch.randn(batch_size, args.nz, 1, 1, requires_grad=False, device=device)).requires_grad)
# print(netG2(torch.randn(batch_size, args.nz, 1, 1, device=device)).requires_grad)
# asdfasdf
# _, _, _, p_probP = hmc.get_samples(
# netG2, netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)).detach(),
# gen_input.clone(), sigma_x.detach(), args.burn_in,
# args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
# args.hmc_learning_rate, args.hmc_opt_accept)
'''
_, _, _, p_probP = hmc.get_samples(
netG2, netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)).detach(),
gen_input.clone(), sigma_x.detach(), args.burn_in,
args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
args.hmc_learning_rate, args.hmc_opt_accept)
'''
# print(netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)).requires_grad)
# sdfasdfs
#asdfasf
#gen_input = torch.randn(batch_size, args.nz, 1, 1, device=device)
#out = netG(gen_input)
#out = netG(gen_input)
#out = netG(gen_input)
#out = netG(gen_input)
# change out
# we change out
#out = netG(gen_input)
out = real_cpu
noise_eta = torch.randn_like(out)
g_fake_data = out + noise_eta * sigma_x
#dg_fake_decision = netD(g_fake_data)
#g_error_gan = criterion(dg_fake_decision, label)
#D_G_z2 = dg_fake_decision.mean().item()
#hmc_samples, acceptRate, stepsize, _ = hmc.get_samples(
# netG, g_fake_data.detach(), gen_input.clone(), sigma_x.detach(), args.burn_in,
# args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
# args.hmc_learning_rate, args.hmc_opt_accept)
NUM_CLASS = 10
rand_y_one_hot = torch.FloatTensor(batch_size, NUM_CLASS).zero_().to(device)
rand_y_one_hot = rand_y_one_hot.scatter_(1,
torch.randint(0, NUM_CLASS, size=(batch_size, 1),
device=device),
1).to(device)
#print(g_fake_data.shape)
#adfsadfdsa
hmc_samples, hmc_labels, acceptRate, stepsize = hmc.get_samples(
netG, g_fake_data.detach(), rand_y_one_hot.detach(), gen_input.clone(), sigma_x.detach(),
args.burn_in,
args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
args.hmc_learning_rate, args.hmc_opt_accept)
#hmc_samples, hmc_labels, acceptRate, stepsize = hmc.get_samples(
# netG2, varOutOut.detach(), rand_y_one_hot.detach(), gen_input.clone(), sigma_x.detach(),
# args.burn_in,
# args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
# args.hmc_learning_rate, args.hmc_opt_accept)
#hmc_samples, hmc_labels, acceptRate, stepsize = hmc.get_samples(
# netG2, varOutOut.detach(), rand_y_one_hot.detach(), gen_input.clone(), sigma_x.detach(),
# args.burn_in,
# args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
# args.hmc_learning_rate, args.hmc_opt_accept)
# NUM_CLASS = 10
# rand_y_one_hot = torch.FloatTensor(batch_size, NUM_CLASS).zero_().to(device)
# rand_y_one_hot = rand_y_one_hot.scatter_(1,
# torch.randint(0, NUM_CLASS, size=(batch_size, 1),
# device=device),
# 1).to(device)
#
# hmc_samples, hmc_labels, acceptRate, stepsize = hmc.get_samples(
# netG2, varOutOut.detach(), rand_y_one_hot.detach(), gen_input.clone(), sigma_x.detach(),
# args.burn_in,
# args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
# args.hmc_learning_rate, args.hmc_opt_accept)
#
# # bsz, d = hmc_samples.size()
# # mean_output = netG(hmc_samples.view(bsz, d, 1, 1).to(device))
# # bsz = g_fake_data.size(0)
#
# bsz, d = hmc_samples.size()
# hmc_samples = hmc_samples.view(bsz, d, 1, 1).to(device)
# hmc_labels = hmc_labels.to(device)
# mean_output = netG2(hmc_samples, hmc_labels)
# bsz = g_fake_data.size(0)
#
# # bsz, d = hmc_samples.size()
# # hmc_samples = hmc_samples.view(bsz, d, 1, 1).to(device)
# # hmc_labels = hmc_labels.to(device)
# # mean_output = netG(hmc_samples, hmc_labels)
# # bsz = g_fake_data.size(0)
#
# mean_output_summed = torch.zeros_like(g_fake_data)
# for cnt in range(args.num_samples_posterior):
# mean_output_summed = mean_output_summed + mean_output[cnt * bsz:(cnt + 1) * bsz]
# mean_output_summed = mean_output_summed / args.num_samples_posterior
with torch.no_grad():
#bsz, d = hmc_samples.size()
#mean_output = netG(hmc_samples.view(bsz, d, 1, 1).to(device))
#bsz = g_fake_data.size(0)
#mean_output_summed = torch.zeros_like(g_fake_data)
#for cnt in range(args.num_samples_posterior):
# mean_output_summed = mean_output_summed + mean_output[cnt * bsz:(cnt + 1) * bsz]
#mean_output_summed = mean_output_summed / args.num_samples_posterior
bsz, d = hmc_samples.size()
hmc_samples = hmc_samples.view(bsz, d, 1, 1).to(device)
hmc_labels = hmc_labels.to(device)
mean_output = netG2(hmc_samples, hmc_labels)
bsz = g_fake_data.size(0)
mean_output_summed = torch.zeros_like(g_fake_data)
for cnt in range(args.num_samples_posterior):
mean_output_summed = mean_output_summed + mean_output[cnt * bsz:(cnt + 1) * bsz]
mean_output_summed = mean_output_summed / args.num_samples_posterior
c = ((g_fake_data - mean_output_summed) / sigma_x ** 2).detach()
#g_error_entropy = torch.mul(c, out + sigma_x * noise_eta).mean(0).sum()
#g_error_entropy = torch.mul(c, out + sigma_x * noise_eta).mean(0).sum()
g_error_entropy = torch.mul(c, out + sigma_x * noise_eta).mean(0).sum()
#g_error_entropy = torch.mul(c, out + sigma_x * noise_eta).mean(0).mean()
#print(g_error_entropy)
#adsfasdfasd
#firstOnly_lossGen2 = dasfasdfas
#firstOnly_lossGen2 = dasfasdfas
#firstOnly_lossGen2 = dasfasdfas
#asdfasfasfasdfas
#firstOnly_lossGen2 = g_error_entropy
#myNikMy_entropy = torch.exp(-g_error_entropy)
myNikMy_entropy = torch.exp(-g_error_entropy / 1000000)
#myNikMy_entropy = torch.exp(-g_error_entropy)
# nikNikmyNikMy_entropy = scipy.special.lambertw(myNikMy_entropy.cpu().detach().numpy())
# print(g_error_entropy)
# print(myNikMy_entropy)
# print(nikNikmyNikMy_entropy)
# print(g_error_entropy)
# ndNdnikNikmyNikMy_entropy = torch.zeros(1, device=device, requires_grad=False)
# ndNdnikNikmyNikMy_entropy = torch.ones(1, device=device) * np.real(nikNikmyNikMy_entropy)
# print(ndNdnikNikmyNikMy_entropy)
# asdfasdfasdf
# print(ndNdnikNikmyNikMy_entropy)
# ndNdnikNikmyNikMy_entropy = torch.ones(1, device=device) * 0.5
# print(ndNdnikNikmyNikMy_entropy)
# print(ndNdnikNikmyNikMy_entropy.requires_grad)
# ndNdnikNikmyNikMy_entropy = torch.ones(1, device=device) * 0.5
# ndNdnikNikmyNikMy_entropy = torch.ones(1, device=device) * 0.5
ndNdnikNikmyNikMy_entropy = torch.ones(1, device=device) * 0.5
for _ in range(200):
ndNdnikNikmyNikMy_entropy -= (
(ndNdnikNikmyNikMy_entropy.clone() * torch.exp(
ndNdnikNikmyNikMy_entropy.clone()) - myNikMy_entropy) / (
torch.exp(ndNdnikNikmyNikMy_entropy.clone()) + (
ndNdnikNikmyNikMy_entropy.clone() * torch.exp(ndNdnikNikmyNikMy_entropy.clone()))))
# ndNdnikNikmyNikMy_entropy -= (
# (ndNdnikNikmyNikMy_entropy * torch.exp(ndNdnikNikmyNikMy_entropy) - myNikMy_entropy) / (
# torch.exp(ndNdnikNikmyNikMy_entropy) + (
# ndNdnikNikmyNikMy_entropy * torch.exp(ndNdnikNikmyNikMy_entropy))))
# print(ndNdnikNikmyNikMy_entropy)
# asdfasdfas
# print(ndNdnikNikmyNikMy_entropy)
# print(ndNdnikNikmyNikMy_entropy.requires_grad)
# asdfasdfas
# p_probP = -g_error_entropy
# p_probP = g_error_entropy
# g_error_entropy = -g_error_entropy
# (?)
# print(g_error_entropy)
# aasdfasfsaf
# print(ndNdnikNikmyNikMy_entropy)
# adfadsfasdf
# l1_usel1 = torch.log(g_error_entropy)
# l2_usel2 = torch.log(torch.log(g_error_entropy))
'''
# use: t = torch.log(F.relu(t) + 1e-7)
l1_usel1 = torch.log(F.relu(g_error_entropy) + 1e-7)
# use: t = torch.log(F.relu(t) + 1e-7)
l2_usel2 = torch.log(F.relu(torch.log(F.relu(g_error_entropy) + 1e-7)) + 1e-7)
#print('')
#print(l1_usel1)
#print(l1_usel1)
#print(l2_usel2)
gErrorEntropy2 = l1_usel1 - l2_usel2 + (l2_usel2 / l1_usel1) + (
(l2_usel2 * (-2 + l2_usel2)) / (2 * (l1_usel1 ** 2))) + (
((l2_usel2 * (6 - (9 * l2_usel2) + (2 * (l2_usel2 ** 2))))) / (
6 * (l1_usel1 ** 3))) + ((l2_usel2 * (
-12 + (36 * l2_usel2) - (22 * (l2_usel2 ** 2)) + (3 * (l2_usel2 ** 3)))) / (
12 * (l1_usel1 ** 4))) + ((l2_usel2 * (
60 - (300 * l2_usel2) + (350 * (l2_usel2 ** 2)) - (125 * (l2_usel2 ** 3)) + (
12 * (l2_usel2 ** 4)))) / (60 * (l1_usel1 ** 5)))
'''
# gErrorEntropy2 = g_error_entropy - (g_error_entropy ** 2) + (1.5 * (g_error_entropy ** 3)) - (
# (8 / 3) * (g_error_entropy ** 4)) + ((125 / 24) * (g_error_entropy ** 5))
# gErrorEntropy = torch.exp(gErrorEntropy2)
# gErrorEntropy = torch.exp(gErrorEntropy2)
# p_probP = torch.exp(gErrorEntropy2)
# p_probP = torch.exp(gErrorEntropy2)
# p_probP = g_error_entropy / gErrorEntropy2
# print(ndNdnikNikmyNikMy_entropy)
#p_probP = ndNdnikNikmyNikMy_entropy
# print(p_probP)
#p_probP = ndNdnikNikmyNikMy_entropy
#firstOnly_lossGen2 = ndNdnikNikmyNikMy_entropy
# g_error_entropy
# use: g_error_entropy
# (?)
# g_error_entropy
# (?)
#firstOnly_lossGen2 = ndNdnikNikmyNikMy_entropy
#firstOnly_lossGen2 = ndNdnikNikmyNikMy_entropy
firstOnly_lossGen2 = g_error_entropy
#hmc_samples, acceptRate, stepsize = hmc.get_samples(
# netG, g_fake_data.detach(), gen_input.clone(), sigma_x.detach(), args.burn_in,
# args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
# args.hmc_learning_rate, args.hmc_opt_accept)
#bsz, d = hmc_samples.size()
#mean_output = netG(hmc_samples.view(bsz, d, 1, 1).to(device))
#bsz = g_fake_data.size(0)
#mean_output_summed = torch.zeros_like(g_fake_data)
#for cnt in range(args.num_samples_posterior):
# mean_output_summed = mean_output_summed + mean_output[cnt * bsz:(cnt + 1) * bsz]
#mean_output_summed = mean_output_summed / args.num_samples_posterior
#c = ((g_fake_data - mean_output_summed) / sigma_x ** 2).detach()
#p_probP = torch.mul(c, out + sigma_x * noise_eta).mean(0).sum()
#asdfadsfasfds
#asdfadsfsadfasdfz
#asdfas
#asdfasdfas
#asdfasfasdfz
#asd
#asdfs
#_, _, _, p_probP = hmc2.get_samples(
# netG, netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)),
# gen_input.clone(), sigma_x.detach(), args.burn_in,
# args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
# args.hmc_learning_rate, args.hmc_opt_accept)
#_, _, _, p_probP = hmc2.get_samples(
# netG, netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)),
# gen_input.clone(), sigma_x.detach(), args.burn_in,
# args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
# args.hmc_learning_rate, args.hmc_opt_accept)
# _, _, _, p_probP = hmc.get_samples(
# netG2, netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)),
# gen_input.clone(), sigma_x.detach(), args.burn_in,
# args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
# args.hmc_learning_rate, args.hmc_opt_accept)
#firstOnly_lossGen2 = p_probP.mean()
# print(y)
# print(y.item())
# print(firstOnly_lossGen2)
# print(firstOnly_lossGen2.item())
# asdfasdfs
loLosses_NIKlosses2.append(firstOnly_lossGen2.item())
# print(y)
# print(y.item())
# if y.item() == 0:
# if y.item() == 1:
# if y.item() == 1:
# if y[i] == 2:
# if y[i] == 2:
#if y[i] == 0:
#if y[i] == 0:
if y[i] == 2:
# loLosses_NIKlosses3.append(0)
# loLosses_NIKlosses3.append(0)
loLosses_NIKlosses3.append(1)
#print(g_error_entropy)
#adfasdfasd
# 39393.6797
# g_error_entropy
#print(g_error_entropy.item())
#asdfasfdsz
# 39393.6796875
# use: g_error_entropy
#print(myNikMy_entropy.item())
#asdfasdfas
# 0.9613721370697021
# use: myNikMy_entropy
#print(firstOnly_lossGen2.item())
#asdfasdfas
# 0.5530012249946594
# use firstOnly_lossGen2
# print(y)
# print(y.item())
else:
# loLosses_NIKlosses3.append(1)
# loLosses_NIKlosses3.append(1)
loLosses_NIKlosses3.append(0)
#print(g_error_entropy)
#adfasdfasd
# 40232.5859
# g_error_entropy
#print(g_error_entropy.item())
#asdfasfdsz
# 40232.5859375
# use: g_error_entropy
#print(myNikMy_entropy.item())
#asdfasdfas
# 0.9605660438537598
# use: myNikMy_entropy
#print(firstOnly_lossGen2.item())
#asdfasdfaz
# 0.5527026057243347
# use firstOnly_lossGen2
# loLosses_NIKlosses3.append(1)
'''
if y.item() == 0:
loLosses_NIKlosses3.append(0)
#print(y)
#print(y.item())
else:
loLosses_NIKlosses3.append(1)
'''
# print(y)
# print(y.item())
# print(firstOnly_lossGen2)
# print(loLosses_NIKlosses2)
# x_prevPrev = x
# y_prevPrev = y
# print(loLosses_NIKlosses)
# print(loLosses_NIKlosses2)
import numpy as np
# print(loLosses_NIKlosses3)
# print(len(loLosses_NIKlosses3))
print('')
import matplotlib.pyplot as plt
# import seaborn as sns
# ROC curve and auc score
from sklearn.datasets import make_classification
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
# def plot_roc_curve(fpr, tpr):
# def plot_roc_curve(fpr, tpr):
def plot_roc_curve(fpr, tpr, auroc21):
# plt.plot(fpr, tpr, color='orange', label='ROC')
# plt.plot(fpr, tpr, color='orange', label='ROC')
# plt.plot(fpr, tpr, color='orange', label='ROC')
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.3f})'.format(auroc21))
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.3f})'.format(auroc21))
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.3f})'.format(auroc21))
plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.4f})'.format(auroc21))
# plt.plot(fpr, tpr, color='orange', label='ROC')
plt.plot([0, 1], [0, 1], color='darkblue', linestyle='--')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic (ROC) Curve')
plt.legend()
# plt.savefig('ROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nik000NikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nik000NikNikROC_MainROC.png', bbox_inches='tight')
plt.savefig('mnMnistFor6MyROC.png', bbox_inches='tight')
# plt.show()
# plt.pause(99)
# plt.savefig('ROC_MainROC.png', bbox_inches='tight')
# plt.savefig('mainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('mainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikMainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikMainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNikMainMainROC_MainROC.png', bbox_inches='tight')
# plt.pause(9)
# plt.ion()
# print(loLossNoChange)
# asdfkdfs
# print(loLoss2)
# print(loLossNoChange)
# loLoss2 is 0 and 1
# loLossNoChange is probability
# loLoss2 = ground truth 0 and 1
# roc_curve(loLoss2, loLossNoChange)
# loLoss2 is the ground truth 0 and 1
# loLossNoChange is the predicted probabilities
# loLossNoChange = predicted probabilities
loLossNoChange = loLosses_NIKlosses2
# loLoss2 = ground truth 0 and 1
loLoss2 = loLosses_NIKlosses3
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
# print(average_precision_score(loLoss2, loLossNoChange))
precision, recall, thresholds = precision_recall_curve(loLoss2, loLossNoChange)
print(average_precision_score(loLoss2, loLossNoChange))
print('')
print(precision)
print(recall)
print('')
print(thresholds)
# def plot_pr_curve(fpr, tpr):
# def plot_pr_curve(fpr, tpr):
def plot_pr_curve(fpr, tpr, auroc21):
# plt.plot(fpr, tpr, color='orange', label='PR')
# plt.plot(fpr, tpr, color='orange', label='PR')
# plt.plot(tpr, fpr, color='orange', label='PR')
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.3f})'.format(auroc21))
# plt.plot(tpr, fpr, color='orange', label='PR (AUPRC = {0:.3f})'.format(auroc21))
# plt.plot(tpr, fpr, color='orange', label='PR (AUPRC = {0:.3f})'.format(auroc21))
plt.plot(tpr, fpr, color='orange', label='PR (AUPRC = {0:.4f})'.format(auroc21))
# plt.xlabel('False Positive Rate')
# plt.ylabel('True Positive Rate')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Precision Recall (PR) Curve')
plt.legend()
# plt.savefig('ROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikNikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikNikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikNikNikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikNikNikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nik000NikNikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nik000NikNikPR_MainPR.png', bbox_inches='tight')
plt.savefig('mnMnistFor6MyPR.png', bbox_inches='tight')
# plt.savefig('22Jan2020foFo.png', bbox_inches='tight')
# plt.savefig('000000000000000fffffffffffffffoooFoo.png', bbox_inches='tight')
# plt.show()
# plt.pause(99)
# plt.savefig('ROC_MainROC.png', bbox_inches='tight')
# plt.savefig('mainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('mainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikMainMainPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikMainMainPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikNikMainMainPR_MainPR.png', bbox_inches='tight')
# plt.pause(9)
# plt.ion()
# plot_pr_curve(precision, recall)
# plot_pr_curve(precision, recall)
plot_pr_curve(precision, recall, average_precision_score(loLoss2, loLossNoChange))
# plot_pr_curve(precision, recall)
plt.figure()
print('')
print(average_precision_score(loLoss2, loLossNoChange))
print('')
# probs = loLossNoChange
fpr, tpr, thresholds = roc_curve(loLoss2, loLossNoChange)
print(fpr)
print(tpr)
print('')
print(thresholds)
# fpr, tpr, thresholds = roc_curve(loLoss2, probs)
# plot_roc_curve(fpr, tpr)
# plot_roc_curve(fpr, tpr)
# plot_roc_curve(fpr, tpr)
# plot_roc_curve(fpr, tpr)
# plot_roc_curve(fpr, tpr)
plot_roc_curve(fpr, tpr, roc_auc_score(loLoss2, loLossNoChange))
# print(roc_auc_score(fpr, tpr))
# print(sklearn.metrics.auc(fpr, tpr))
print('')
print(roc_auc_score(loLoss2, loLossNoChange))
from sklearn.metrics import auc
# roc_auc = auc(fpr, tpr)
print(auc(fpr, tpr))
# roc_auc = auc(fpr, tpr)
print('')
# roc_auc = auc(fpr, tpr)
'''
plt.figure()
#plt.plot(fpr[2], tpr[2], color='darkorange', lw=2, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot(fpr[2], tpr[2], color='darkorange', lw=2, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
#plt.legend(loc="lower right")
plt.legend(loc="lower right")
plt.show()
'''
def plot_roc_curve2(fpr, tpr, auroc21, fpr2, tpr2, auroc212):
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.4f})'.format(auroc21))
# plt.plot(tpr, fpr, color='blue', label='PR (AUPRC = {0:.4f})'.format(auroc21))
# plt.plot(tpr2, fpr2, color='blue', label='PR (AUPRC = {0:.4f})'.format(auroc212))
plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.4f})'.format(auroc21))
plt.plot(tpr2, fpr2, color='blue', label='PR (AUPRC = {0:.4f})'.format(auroc212))
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.4f})'.format(auroc21))
plt.plot([0, 1], [0, 1], color='darkblue', linestyle='--')
plt.xlabel('False Positive Rate (and Recall)')
plt.ylabel('True Positive Rate (and Precision)')
plt.title('ROC and PR Curves')
plt.legend()
# plt.plot(tpr, fpr, color='orange', label='PR (AUPRC = {0:.4f})'.format(auroc21))
# plt.xlabel('False Positive Rate')
# plt.ylabel('True Positive Rate')
# plt.xlabel('Recall')
# plt.ylabel('Precision')
# plt.title('Precision Recall (PR) Curve')
# plt.legend()
# plt.savefig('nik00000NikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nik00000NikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNik00000nikNikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNik00000nikNikNikROC_MainROC.png', bbox_inches='tight')
plt.savefig('mnMnistFor6MyROCPR.png', bbox_inches='tight')
plt.figure()
# plot_roc_curve2(fpr, tpr, roc_auc_score(loLoss2, loLossNoChange))
# use: precision, recall, average_precision_score(loLoss2, loLossNoChange)
# plot_roc_curve2(fpr, tpr, roc_auc_score(loLoss2, loLossNoChange), precision, recall, average_precision_score(loLoss2, loLossNoChange))
plot_roc_curve2(fpr, tpr, roc_auc_score(loLoss2, loLossNoChange), precision, recall,
average_precision_score(loLoss2, loLossNoChange))
# 0.7657142857142857
# 0.7657142857142857
# 0.7714285714285714
# 0.7947712113075085
# 0.7658408636296418
# Data_j for MNIST digit j
# ResFlow: See if p_g(x) works
# import numpy as np
loLosses_NIKlosses3 = np.array(loLosses_NIKlosses3)
# where_0 = np.where(loLosses_NIKlosses3 == 0)
# where_1 = np.where(loLosses_NIKlosses3 == 1)
# loLosses_NIKlosses3[where_0] = 1
# loLosses_NIKlosses3[where_1] = 0
indices_one = loLosses_NIKlosses3 == 1
indices_zero = loLosses_NIKlosses3 == 0
loLosses_NIKlosses3[indices_one] = 0 # replacing 1s with 0s
loLosses_NIKlosses3[indices_zero] = 1 # replacing 0s with 1s
loLosses_NIKlosses3 = loLosses_NIKlosses3.tolist()
# del where_0
# del where_1
# print(loLosses_NIKlosses3)
# print(len(loLosses_NIKlosses3))
# adsfasdfzs
# print(loLosses_NIKlosses2)
# print(loLosses_NIKlosses3)
# import numpy as np
# import pandas as pd
import matplotlib.pyplot as plt
# import seaborn as sns
# ROC curve and auc score
from sklearn.datasets import make_classification
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
# def plot_roc_curve(fpr, tpr):
# def plot_roc_curve(fpr, tpr):
def plot_roc_curve(fpr, tpr, auroc21):
# plt.plot(fpr, tpr, color='orange', label='ROC')
# plt.plot(fpr, tpr, color='orange', label='ROC')
# plt.plot(fpr, tpr, color='orange', label='ROC')
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.3f})'.format(auroc21))
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.3f})'.format(auroc21))
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.3f})'.format(auroc21))
plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.4f})'.format(auroc21))
# plt.plot(fpr, tpr, color='orange', label='ROC')
plt.plot([0, 1], [0, 1], color='darkblue', linestyle='--')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic (ROC) Curve')
plt.legend()
# plt.savefig('ROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nik000NikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nik000NikNikROC_MainROC.png', bbox_inches='tight')
plt.savefig('mnMnistFor6MyROC.png', bbox_inches='tight')
# plt.show()
# plt.pause(99)
# plt.savefig('ROC_MainROC.png', bbox_inches='tight')
# plt.savefig('mainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('mainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikMainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikMainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNikMainMainROC_MainROC.png', bbox_inches='tight')
# plt.pause(9)
# plt.ion()
# print(loLossNoChange)
# asdfkdfs
# print(loLoss2)
# print(loLossNoChange)
# loLoss2 is 0 and 1
# loLossNoChange is probability
# loLoss2 = ground truth 0 and 1
# roc_curve(loLoss2, loLossNoChange)
# loLoss2 is the ground truth 0 and 1
# loLossNoChange is the predicted probabilities
# loLossNoChange = predicted probabilities
loLossNoChange = loLosses_NIKlosses2
# loLoss2 = ground truth 0 and 1
loLoss2 = loLosses_NIKlosses3
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
# print(average_precision_score(loLoss2, loLossNoChange))
precision, recall, thresholds = precision_recall_curve(loLoss2, loLossNoChange)
print(average_precision_score(loLoss2, loLossNoChange))
print('')
print(precision)
print(recall)
print('')
print(thresholds)
# def plot_pr_curve(fpr, tpr):
# def plot_pr_curve(fpr, tpr):
def plot_pr_curve(fpr, tpr, auroc21):
# plt.plot(fpr, tpr, color='orange', label='PR')
# plt.plot(fpr, tpr, color='orange', label='PR')
# plt.plot(tpr, fpr, color='orange', label='PR')
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.3f})'.format(auroc21))
# plt.plot(tpr, fpr, color='orange', label='PR (AUPRC = {0:.3f})'.format(auroc21))
# plt.plot(tpr, fpr, color='orange', label='PR (AUPRC = {0:.3f})'.format(auroc21))
plt.plot(tpr, fpr, color='orange', label='PR (AUPRC = {0:.4f})'.format(auroc21))
# plt.xlabel('False Positive Rate')
# plt.ylabel('True Positive Rate')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Precision Recall (PR) Curve')
plt.legend()
# plt.savefig('ROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikNikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikNikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikNikNikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikNikNikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nik000NikNikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nik000NikNikPR_MainPR.png', bbox_inches='tight')
plt.savefig('mnMnistFor6MyPR.png', bbox_inches='tight')
# plt.savefig('22Jan2020foFo.png', bbox_inches='tight')
# plt.savefig('000000000000000fffffffffffffffoooFoo.png', bbox_inches='tight')
# plt.show()
# plt.pause(99)
# plt.savefig('ROC_MainROC.png', bbox_inches='tight')
# plt.savefig('mainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('mainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikMainMainPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikMainMainPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikNikMainMainPR_MainPR.png', bbox_inches='tight')
# plt.pause(9)
# plt.ion()
# plot_pr_curve(precision, recall)
# plot_pr_curve(precision, recall)
plot_pr_curve(precision, recall, average_precision_score(loLoss2, loLossNoChange))
# plot_pr_curve(precision, recall)
plt.figure()
print('')
print(average_precision_score(loLoss2, loLossNoChange))
print('')
# probs = loLossNoChange
fpr, tpr, thresholds = roc_curve(loLoss2, loLossNoChange)
print(fpr)
print(tpr)
print('')
print(thresholds)
# fpr, tpr, thresholds = roc_curve(loLoss2, probs)
# plot_roc_curve(fpr, tpr)
# plot_roc_curve(fpr, tpr)
# plot_roc_curve(fpr, tpr)
# plot_roc_curve(fpr, tpr)
# plot_roc_curve(fpr, tpr)
plot_roc_curve(fpr, tpr, roc_auc_score(loLoss2, loLossNoChange))
# print(roc_auc_score(fpr, tpr))
# print(sklearn.metrics.auc(fpr, tpr))
print('')
print(roc_auc_score(loLoss2, loLossNoChange))
from sklearn.metrics import auc
# roc_auc = auc(fpr, tpr)
print(auc(fpr, tpr))
# roc_auc = auc(fpr, tpr)
print('')
# roc_auc = auc(fpr, tpr)
'''
plt.figure()
#plt.plot(fpr[2], tpr[2], color='darkorange', lw=2, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot(fpr[2], tpr[2], color='darkorange', lw=2, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
#plt.legend(loc="lower right")
plt.legend(loc="lower right")
plt.show()
'''
def plot_roc_curve2(fpr, tpr, auroc21, fpr2, tpr2, auroc212):
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.4f})'.format(auroc21))
# plt.plot(tpr, fpr, color='blue', label='PR (AUPRC = {0:.4f})'.format(auroc21))
# plt.plot(tpr2, fpr2, color='blue', label='PR (AUPRC = {0:.4f})'.format(auroc212))
plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.4f})'.format(auroc21))
plt.plot(tpr2, fpr2, color='blue', label='PR (AUPRC = {0:.4f})'.format(auroc212))
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.4f})'.format(auroc21))
plt.plot([0, 1], [0, 1], color='darkblue', linestyle='--')
plt.xlabel('False Positive Rate (and Recall)')
plt.ylabel('True Positive Rate (and Precision)')
plt.title('ROC and PR Curves')
plt.legend()
# plt.plot(tpr, fpr, color='orange', label='PR (AUPRC = {0:.4f})'.format(auroc21))
# plt.xlabel('False Positive Rate')
# plt.ylabel('True Positive Rate')
# plt.xlabel('Recall')
# plt.ylabel('Precision')
# plt.title('Precision Recall (PR) Curve')
# plt.legend()
# plt.savefig('nik00000NikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nik00000NikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNik00000nikNikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNik00000nikNikNikROC_MainROC.png', bbox_inches='tight')
plt.savefig('mnMnistFor6MyROCPR.png', bbox_inches='tight')
plt.figure()
# plot_roc_curve2(fpr, tpr, roc_auc_score(loLoss2, loLossNoChange))
# use: precision, recall, average_precision_score(loLoss2, loLossNoChange)
# plot_roc_curve2(fpr, tpr, roc_auc_score(loLoss2, loLossNoChange), precision, recall, average_precision_score(loLoss2, loLossNoChange))
plot_roc_curve2(fpr, tpr, roc_auc_score(loLoss2, loLossNoChange), precision, recall,
average_precision_score(loLoss2, loLossNoChange))
# 0.7657142857142857
# 0.7657142857142857
# 0.7714285714285714
# 0.7947712113075085
# 0.7658408636296418
# Data_j for MNIST digit j
# ResFlow: See if p_g(x) works
asdfasfsadf
# asdfas
# asdfasfzs
"""
# sdafsaas
# print(X_training.shape)
# print(X_training.shape)
# print(X_training.shape)
# asfddfasfsaf
# 6760
# 6760
# print(X_training.shape)
# print(X_training.shape)
# print(X_training.shape)
# print(X_training.shape)
# asdfasdfasdf
# print(X_training.shape)
# print(X_training.shape)
# netG.eval()
# netG.eval()
# netG.eval()
# for param in netG.parameters():
# param.requires_grad = False
"""
#print(X_training.shape)
#print(X_training.shape)
#print(X_training.shape)
X_training = dat['X_test'].to(device)
x = X_training
y = dat['Y_test'].to(device)
args.batchSize = 1
bsz = args.batchSize
nrand = 100
args.val_batchsize = args.batchSize
#print(X_training.shape)
#asdfasdfasf
#print(X_training.shape)
#print(X_training.shape)
#print(X_training.shape)
#asdfasdfasdf
# print(X_training.shape)
# asdfasdfasdf
# print(X_training.shape)
# print(X_training.shape)
# print(X_training.shape)
losses_NIKlosses = []
loLosses_NIKlosses = []
loLosses_NIKlosses2 = []
# loLosses_NIKlosses2 = []
loLosses_NIKlosses3 = []
for epoch in range(1, 1 + 1):
for i in range(0, len(X_training), bsz):
# print(x)
# print(x.shape)
# print(y)
# print(y.item())
# for i21 in range(len(y)):
# if y[i21] == 0 and i21 == 0:
# y[i21] = y[i21+1]
# x[i21, :, :, :] = x[i21+1, :, :, :]
# elif y[i21] == 0:
# y[i21] = y[i21 - 1]
# x[i21, :, :, :] = x[i21 - 1, :, :, :]
# if i > 0:
# if y.item() == 0:
# y = y_prevPrev
# x = x_prevPrev
'''
for i21 in range(len(y)):
if y[i21] == 0 and i21 == 0:
y[i21] = y[i21+1]
x[i21, :, :, :] = x[i21+1, :, :, :]
elif y[i21] == 0:
y[i21] = y[i21 - 1]
x[i21, :, :, :] = x[i21 - 1, :, :, :]
'''
# x = x.to(device)
print(i)
# print(x.shape)
# asdfsadfs
genFGen2 = x
# lossGen, firstOnly_lossGen, secondOnly_lossGen, thirdOnly_lossGen = use_loss_fn2(genFGen2, args, model, ggenFGen2, x)
# use: val-batchsize
ggenFGen2 = torch.randn([args.val_batchsize, nrand], device=device)
# ggenFGen2 = torch.randn([args.batchsize, nrand], device=device)
# ggenFGen2 = torch.randn([args.batchsize, nrand], device=device, requires_grad=True)
# with torch.no_grad():
# _, firstOnly_lossGen, _, _ = use_loss_fn2(genFGen2, args, model, ggenFGen2, x)
# loLosses_NIKlosses.append(firstOnly_lossGen.item())
# print(firstOnly_lossGen)
# print(loLosses_NIKlosses)
# with torch.no_grad():
# firstOnly_lossGen2 = computeLoss(x, model)
with torch.no_grad():
sigma_x = F.softplus(log_sigma).view(1, 1, args.imageSize, args.imageSize)
netD.zero_grad()
stop = min(bsz, len(X_training[i:]))
real_cpu = X_training[i:i + stop].to(device)
# print(real_cpu.shape)
# asdfasdf
# batch_size = real_cpu.size(0)
batch_size = args.batchSize
label = torch.full((batch_size,), real_label, device=device)
noise_eta = torch.randn_like(real_cpu)
noised_data = real_cpu + sigma_x.detach() * noise_eta
# out_real = netD(noised_data)
# errD_real = criterion(out_real, label)
# errD_real.backward()
# D_x = out_real.mean().item()
# train with fake
# noise = torch.randn(batch_size, args.nz, 1, 1, device=device)
# mu_fake = netG(noise)
# fake = mu_fake + sigma_x * noise_eta
# label.fill_(fake_label)
# out_fake = netD(fake.detach())
# errD_fake = criterion(out_fake, label)
# errD_fake.backward()
# D_G_z1 = out_fake.mean().item()
# errD = errD_real + errD_fake
# optimizerD.step()
# update G network: maximize log(D(G(z)))
netG.zero_grad()
sigma_optimizer.zero_grad()
label.fill_(real_label)
gen_input = torch.randn(batch_size, args.nz, 1, 1, device=device)
out = netG(gen_input)
#print(real_cpu.shape)
#print(out.shape)
#adfadfasfdas
#varOutOut = out
# print(out.shape)
# asdfasdf
noise_eta = torch.randn_like(out)
g_fake_data = out + noise_eta * sigma_x
#dg_fake_decision = netD(g_fake_data)
#g_error_gan = criterion(dg_fake_decision, label)
#D_G_z2 = dg_fake_decision.mean().item()
if args.lambda_ == 0:
#g_error_gan.backward()
optimizerG.step()
#sigma_optimizer.step()
else:
# hmc_samples, acceptRate, stepsize = hmc.get_samples(
# netG, g_fake_data.detach(), gen_input.clone(), sigma_x.detach(), args.burn_in,
# args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
# args.hmc_learning_rate, args.hmc_opt_accept)
# bsz, d = hmc_samples.size()
# mean_output = netG(hmc_samples.view(bsz, d, 1, 1).to(device))
# bsz = g_fake_data.size(0)
# mean_output_summed = torch.zeros_like(g_fake_data)
# for cnt in range(args.num_samples_posterior):
# mean_output_summed = mean_output_summed + mean_output[cnt*bsz:(cnt+1)*bsz]
# mean_output_summed = mean_output_summed / args.num_samples_posterior
# c = ((g_fake_data - mean_output_summed) / sigma_x**2).detach()
# g_error_entropy = torch.mul(c, out + sigma_x * noise_eta).mean(0).sum()
# print(mean_output)
# print(mean_output.shape)
# print(bsz)
# print(d)
# print(torch.randn(batch_size, args.nz, 1, 1, device=device).shape)
# print(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device).shape)
# use: netG( torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device) )
# print(netG( torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device) ).shape)
# netG( torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device) )
# we use: netG( torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device) )
# print(g_error_entropy)
# asdfasdfds
# print(netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)).shape)
# asdfsdfs
# print(netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)).requires_grad)
# print(netG(torch.randn(batch_size, args.nz, 1, 1, device=device)).requires_grad)
# netG2.eval()
# for param in netG2.parameters():
# param.requires_grad = False
# print(netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)).shape)
# print(netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)).requires_grad)
# print(netG2(torch.randn(batch_size, args.nz, 1, 1, requires_grad=False, device=device)).shape)
# print(netG2(torch.randn(batch_size, args.nz, 1, 1, requires_grad=False, device=device)).requires_grad)
# print(netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)).requires_grad)
# print(netG2(torch.randn(batch_size, args.nz, 1, 1, requires_grad=False, device=device)).requires_grad)
# print(netG2(torch.randn(batch_size, args.nz, 1, 1, requires_grad=False, device=device)).requires_grad)
# print(netG2(torch.randn(batch_size, args.nz, 1, 1, device=device)).requires_grad)
# asdfasdf
# _, _, _, p_probP = hmc.get_samples(
# netG2, netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)).detach(),
# gen_input.clone(), sigma_x.detach(), args.burn_in,
# args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
# args.hmc_learning_rate, args.hmc_opt_accept)
'''
_, _, _, p_probP = hmc.get_samples(
netG2, netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)).detach(),
gen_input.clone(), sigma_x.detach(), args.burn_in,
args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
args.hmc_learning_rate, args.hmc_opt_accept)
'''
# print(netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)).requires_grad)
# sdfasdfs
#asdfasf
#gen_input = torch.randn(batch_size, args.nz, 1, 1, device=device)
#out = netG(gen_input)
#out = netG(gen_input)
#out = netG(gen_input)
#out = netG(gen_input)
# change out
# we change out
#out = netG(gen_input)
out = real_cpu
noise_eta = torch.randn_like(out)
g_fake_data = out + noise_eta * sigma_x
#dg_fake_decision = netD(g_fake_data)
#g_error_gan = criterion(dg_fake_decision, label)
#D_G_z2 = dg_fake_decision.mean().item()
#hmc_samples, acceptRate, stepsize, _ = hmc.get_samples(
# netG, g_fake_data.detach(), gen_input.clone(), sigma_x.detach(), args.burn_in,
# args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
# args.hmc_learning_rate, args.hmc_opt_accept)
NUM_CLASS = 10
rand_y_one_hot = torch.FloatTensor(batch_size, NUM_CLASS).zero_().to(device)
rand_y_one_hot = rand_y_one_hot.scatter_(1,
torch.randint(0, NUM_CLASS, size=(batch_size, 1),
device=device),
1).to(device)
#print(g_fake_data.shape)
#adfsadfdsa
hmc_samples, hmc_labels, acceptRate, stepsize = hmc.get_samples(
netG, g_fake_data.detach(), rand_y_one_hot.detach(), gen_input.clone(), sigma_x.detach(),
args.burn_in,
args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
args.hmc_learning_rate, args.hmc_opt_accept)
#hmc_samples, hmc_labels, acceptRate, stepsize = hmc.get_samples(
# netG2, varOutOut.detach(), rand_y_one_hot.detach(), gen_input.clone(), sigma_x.detach(),
# args.burn_in,
# args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
# args.hmc_learning_rate, args.hmc_opt_accept)
#hmc_samples, hmc_labels, acceptRate, stepsize = hmc.get_samples(
# netG2, varOutOut.detach(), rand_y_one_hot.detach(), gen_input.clone(), sigma_x.detach(),
# args.burn_in,
# args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
# args.hmc_learning_rate, args.hmc_opt_accept)
# NUM_CLASS = 10
# rand_y_one_hot = torch.FloatTensor(batch_size, NUM_CLASS).zero_().to(device)
# rand_y_one_hot = rand_y_one_hot.scatter_(1,
# torch.randint(0, NUM_CLASS, size=(batch_size, 1),
# device=device),
# 1).to(device)
#
# hmc_samples, hmc_labels, acceptRate, stepsize = hmc.get_samples(
# netG2, varOutOut.detach(), rand_y_one_hot.detach(), gen_input.clone(), sigma_x.detach(),
# args.burn_in,
# args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
# args.hmc_learning_rate, args.hmc_opt_accept)
#
# # bsz, d = hmc_samples.size()
# # mean_output = netG(hmc_samples.view(bsz, d, 1, 1).to(device))
# # bsz = g_fake_data.size(0)
#
# bsz, d = hmc_samples.size()
# hmc_samples = hmc_samples.view(bsz, d, 1, 1).to(device)
# hmc_labels = hmc_labels.to(device)
# mean_output = netG2(hmc_samples, hmc_labels)
# bsz = g_fake_data.size(0)
#
# # bsz, d = hmc_samples.size()
# # hmc_samples = hmc_samples.view(bsz, d, 1, 1).to(device)
# # hmc_labels = hmc_labels.to(device)
# # mean_output = netG(hmc_samples, hmc_labels)
# # bsz = g_fake_data.size(0)
#
# mean_output_summed = torch.zeros_like(g_fake_data)
# for cnt in range(args.num_samples_posterior):
# mean_output_summed = mean_output_summed + mean_output[cnt * bsz:(cnt + 1) * bsz]
# mean_output_summed = mean_output_summed / args.num_samples_posterior
with torch.no_grad():
#bsz, d = hmc_samples.size()
#mean_output = netG(hmc_samples.view(bsz, d, 1, 1).to(device))
#bsz = g_fake_data.size(0)
#mean_output_summed = torch.zeros_like(g_fake_data)
#for cnt in range(args.num_samples_posterior):
# mean_output_summed = mean_output_summed + mean_output[cnt * bsz:(cnt + 1) * bsz]
#mean_output_summed = mean_output_summed / args.num_samples_posterior
bsz, d = hmc_samples.size()
hmc_samples = hmc_samples.view(bsz, d, 1, 1).to(device)
hmc_labels = hmc_labels.to(device)
mean_output = netG2(hmc_samples, hmc_labels)
bsz = g_fake_data.size(0)
mean_output_summed = torch.zeros_like(g_fake_data)
for cnt in range(args.num_samples_posterior):
mean_output_summed = mean_output_summed + mean_output[cnt * bsz:(cnt + 1) * bsz]
mean_output_summed = mean_output_summed / args.num_samples_posterior
c = ((g_fake_data - mean_output_summed) / sigma_x ** 2).detach()
#g_error_entropy = torch.mul(c, out + sigma_x * noise_eta).mean(0).sum()
#g_error_entropy = torch.mul(c, out + sigma_x * noise_eta).mean(0).sum()
g_error_entropy = torch.mul(c, out + sigma_x * noise_eta).mean(0).sum()
#g_error_entropy = torch.mul(c, out + sigma_x * noise_eta).mean(0).mean()
#print(g_error_entropy)
#adsfasdfasd
#firstOnly_lossGen2 = dasfasdfas
#firstOnly_lossGen2 = dasfasdfas
#firstOnly_lossGen2 = dasfasdfas
#asdfasfasfasdfas
#firstOnly_lossGen2 = g_error_entropy
#myNikMy_entropy = torch.exp(-g_error_entropy)
myNikMy_entropy = torch.exp(-g_error_entropy / 1000000)
#myNikMy_entropy = torch.exp(-g_error_entropy)
# nikNikmyNikMy_entropy = scipy.special.lambertw(myNikMy_entropy.cpu().detach().numpy())
# print(g_error_entropy)
# print(myNikMy_entropy)
# print(nikNikmyNikMy_entropy)
# print(g_error_entropy)
# ndNdnikNikmyNikMy_entropy = torch.zeros(1, device=device, requires_grad=False)
# ndNdnikNikmyNikMy_entropy = torch.ones(1, device=device) * np.real(nikNikmyNikMy_entropy)
# print(ndNdnikNikmyNikMy_entropy)
# asdfasdfasdf
# print(ndNdnikNikmyNikMy_entropy)
# ndNdnikNikmyNikMy_entropy = torch.ones(1, device=device) * 0.5
# print(ndNdnikNikmyNikMy_entropy)
# print(ndNdnikNikmyNikMy_entropy.requires_grad)
# ndNdnikNikmyNikMy_entropy = torch.ones(1, device=device) * 0.5
# ndNdnikNikmyNikMy_entropy = torch.ones(1, device=device) * 0.5
ndNdnikNikmyNikMy_entropy = torch.ones(1, device=device) * 0.5
for _ in range(200):
ndNdnikNikmyNikMy_entropy -= (
(ndNdnikNikmyNikMy_entropy.clone() * torch.exp(
ndNdnikNikmyNikMy_entropy.clone()) - myNikMy_entropy) / (
torch.exp(ndNdnikNikmyNikMy_entropy.clone()) + (
ndNdnikNikmyNikMy_entropy.clone() * torch.exp(ndNdnikNikmyNikMy_entropy.clone()))))
# ndNdnikNikmyNikMy_entropy -= (
# (ndNdnikNikmyNikMy_entropy * torch.exp(ndNdnikNikmyNikMy_entropy) - myNikMy_entropy) / (
# torch.exp(ndNdnikNikmyNikMy_entropy) + (
# ndNdnikNikmyNikMy_entropy * torch.exp(ndNdnikNikmyNikMy_entropy))))
# print(ndNdnikNikmyNikMy_entropy)
# asdfasdfas
# print(ndNdnikNikmyNikMy_entropy)
# print(ndNdnikNikmyNikMy_entropy.requires_grad)
# asdfasdfas
# p_probP = -g_error_entropy
# p_probP = g_error_entropy
# g_error_entropy = -g_error_entropy
# (?)
# print(g_error_entropy)
# aasdfasfsaf
# print(ndNdnikNikmyNikMy_entropy)
# adfadsfasdf
# l1_usel1 = torch.log(g_error_entropy)
# l2_usel2 = torch.log(torch.log(g_error_entropy))
'''
# use: t = torch.log(F.relu(t) + 1e-7)
l1_usel1 = torch.log(F.relu(g_error_entropy) + 1e-7)
# use: t = torch.log(F.relu(t) + 1e-7)
l2_usel2 = torch.log(F.relu(torch.log(F.relu(g_error_entropy) + 1e-7)) + 1e-7)
#print('')
#print(l1_usel1)
#print(l1_usel1)
#print(l2_usel2)
gErrorEntropy2 = l1_usel1 - l2_usel2 + (l2_usel2 / l1_usel1) + (
(l2_usel2 * (-2 + l2_usel2)) / (2 * (l1_usel1 ** 2))) + (
((l2_usel2 * (6 - (9 * l2_usel2) + (2 * (l2_usel2 ** 2))))) / (
6 * (l1_usel1 ** 3))) + ((l2_usel2 * (
-12 + (36 * l2_usel2) - (22 * (l2_usel2 ** 2)) + (3 * (l2_usel2 ** 3)))) / (
12 * (l1_usel1 ** 4))) + ((l2_usel2 * (
60 - (300 * l2_usel2) + (350 * (l2_usel2 ** 2)) - (125 * (l2_usel2 ** 3)) + (
12 * (l2_usel2 ** 4)))) / (60 * (l1_usel1 ** 5)))
'''
# gErrorEntropy2 = g_error_entropy - (g_error_entropy ** 2) + (1.5 * (g_error_entropy ** 3)) - (
# (8 / 3) * (g_error_entropy ** 4)) + ((125 / 24) * (g_error_entropy ** 5))
# gErrorEntropy = torch.exp(gErrorEntropy2)
# gErrorEntropy = torch.exp(gErrorEntropy2)
# p_probP = torch.exp(gErrorEntropy2)
# p_probP = torch.exp(gErrorEntropy2)
# p_probP = g_error_entropy / gErrorEntropy2
# print(ndNdnikNikmyNikMy_entropy)
#p_probP = ndNdnikNikmyNikMy_entropy
# print(p_probP)
#p_probP = ndNdnikNikmyNikMy_entropy
#firstOnly_lossGen2 = ndNdnikNikmyNikMy_entropy
# g_error_entropy
# use: g_error_entropy
# (?)
# g_error_entropy
# (?)
#firstOnly_lossGen2 = ndNdnikNikmyNikMy_entropy
#firstOnly_lossGen2 = ndNdnikNikmyNikMy_entropy
firstOnly_lossGen2 = g_error_entropy
#hmc_samples, acceptRate, stepsize = hmc.get_samples(
# netG, g_fake_data.detach(), gen_input.clone(), sigma_x.detach(), args.burn_in,
# args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
# args.hmc_learning_rate, args.hmc_opt_accept)
#bsz, d = hmc_samples.size()
#mean_output = netG(hmc_samples.view(bsz, d, 1, 1).to(device))
#bsz = g_fake_data.size(0)
#mean_output_summed = torch.zeros_like(g_fake_data)
#for cnt in range(args.num_samples_posterior):
# mean_output_summed = mean_output_summed + mean_output[cnt * bsz:(cnt + 1) * bsz]
#mean_output_summed = mean_output_summed / args.num_samples_posterior
#c = ((g_fake_data - mean_output_summed) / sigma_x ** 2).detach()
#p_probP = torch.mul(c, out + sigma_x * noise_eta).mean(0).sum()
#asdfadsfasfds
#asdfadsfsadfasdfz
#asdfas
#asdfasdfas
#asdfasfasdfz
#asd
#asdfs
#_, _, _, p_probP = hmc2.get_samples(
# netG, netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)),
# gen_input.clone(), sigma_x.detach(), args.burn_in,
# args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
# args.hmc_learning_rate, args.hmc_opt_accept)
#_, _, _, p_probP = hmc2.get_samples(
# netG, netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)),
# gen_input.clone(), sigma_x.detach(), args.burn_in,
# args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
# args.hmc_learning_rate, args.hmc_opt_accept)
# _, _, _, p_probP = hmc.get_samples(
# netG2, netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)),
# gen_input.clone(), sigma_x.detach(), args.burn_in,
# args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
# args.hmc_learning_rate, args.hmc_opt_accept)
#firstOnly_lossGen2 = p_probP.mean()
# print(y)
# print(y.item())
# print(firstOnly_lossGen2)
# print(firstOnly_lossGen2.item())
# asdfasdfs
loLosses_NIKlosses2.append(firstOnly_lossGen2.item())
# print(y)
# print(y.item())
# if y.item() == 0:
# if y.item() == 1:
# if y.item() == 1:
# if y[i] == 2:
# if y[i] == 2:
#if y[i] == 0:
#if y[i] == 0:
if y[i] == 2:
# loLosses_NIKlosses3.append(0)
# loLosses_NIKlosses3.append(0)
loLosses_NIKlosses3.append(1)
#print(g_error_entropy)
#adfasdfasd
# 39393.6797
# g_error_entropy
#print(g_error_entropy.item())
#asdfasfdsz
# 39393.6796875
# use: g_error_entropy
#print(myNikMy_entropy.item())
#asdfasdfas
# 0.9613721370697021
# use: myNikMy_entropy
#print(firstOnly_lossGen2.item())
#asdfasdfas
# 0.5530012249946594
# use firstOnly_lossGen2
# print(y)
# print(y.item())
else:
# loLosses_NIKlosses3.append(1)
# loLosses_NIKlosses3.append(1)
loLosses_NIKlosses3.append(0)
#print(g_error_entropy)
#adfasdfasd
# 40232.5859
# g_error_entropy
#print(g_error_entropy.item())
#asdfasfdsz
# 40232.5859375
# use: g_error_entropy
#print(myNikMy_entropy.item())
#asdfasdfas
# 0.9605660438537598
# use: myNikMy_entropy
#print(firstOnly_lossGen2.item())
#asdfasdfaz
# 0.5527026057243347
# use firstOnly_lossGen2
# loLosses_NIKlosses3.append(1)
'''
if y.item() == 0:
loLosses_NIKlosses3.append(0)
#print(y)
#print(y.item())
else:
loLosses_NIKlosses3.append(1)
'''
# print(y)
# print(y.item())
# print(firstOnly_lossGen2)
# print(loLosses_NIKlosses2)
# x_prevPrev = x
# y_prevPrev = y
# print(loLosses_NIKlosses)
# print(loLosses_NIKlosses2)
import numpy as np
# print(loLosses_NIKlosses3)
# print(len(loLosses_NIKlosses3))
print('')
import matplotlib.pyplot as plt
# import seaborn as sns
# ROC curve and auc score
from sklearn.datasets import make_classification
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
# def plot_roc_curve(fpr, tpr):
# def plot_roc_curve(fpr, tpr):
def plot_roc_curve(fpr, tpr, auroc21):
# plt.plot(fpr, tpr, color='orange', label='ROC')
# plt.plot(fpr, tpr, color='orange', label='ROC')
# plt.plot(fpr, tpr, color='orange', label='ROC')
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.3f})'.format(auroc21))
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.3f})'.format(auroc21))
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.3f})'.format(auroc21))
plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.4f})'.format(auroc21))
# plt.plot(fpr, tpr, color='orange', label='ROC')
plt.plot([0, 1], [0, 1], color='darkblue', linestyle='--')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic (ROC) Curve')
plt.legend()
# plt.savefig('ROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nik000NikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nik000NikNikROC_MainROC.png', bbox_inches='tight')
plt.savefig('mnMnistFor6MyROC.png', bbox_inches='tight')
# plt.show()
# plt.pause(99)
# plt.savefig('ROC_MainROC.png', bbox_inches='tight')
# plt.savefig('mainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('mainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikMainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikMainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNikMainMainROC_MainROC.png', bbox_inches='tight')
# plt.pause(9)
# plt.ion()
# print(loLossNoChange)
# asdfkdfs
# print(loLoss2)
# print(loLossNoChange)
# loLoss2 is 0 and 1
# loLossNoChange is probability
# loLoss2 = ground truth 0 and 1
# roc_curve(loLoss2, loLossNoChange)
# loLoss2 is the ground truth 0 and 1
# loLossNoChange is the predicted probabilities
# loLossNoChange = predicted probabilities
loLossNoChange = loLosses_NIKlosses2
# loLoss2 = ground truth 0 and 1
loLoss2 = loLosses_NIKlosses3
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
# print(average_precision_score(loLoss2, loLossNoChange))
precision, recall, thresholds = precision_recall_curve(loLoss2, loLossNoChange)
print(average_precision_score(loLoss2, loLossNoChange))
print('')
print(precision)
print(recall)
print('')
print(thresholds)
# def plot_pr_curve(fpr, tpr):
# def plot_pr_curve(fpr, tpr):
def plot_pr_curve(fpr, tpr, auroc21):
# plt.plot(fpr, tpr, color='orange', label='PR')
# plt.plot(fpr, tpr, color='orange', label='PR')
# plt.plot(tpr, fpr, color='orange', label='PR')
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.3f})'.format(auroc21))
# plt.plot(tpr, fpr, color='orange', label='PR (AUPRC = {0:.3f})'.format(auroc21))
# plt.plot(tpr, fpr, color='orange', label='PR (AUPRC = {0:.3f})'.format(auroc21))
plt.plot(tpr, fpr, color='orange', label='PR (AUPRC = {0:.4f})'.format(auroc21))
# plt.xlabel('False Positive Rate')
# plt.ylabel('True Positive Rate')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Precision Recall (PR) Curve')
plt.legend()
# plt.savefig('ROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikNikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikNikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikNikNikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikNikNikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nik000NikNikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nik000NikNikPR_MainPR.png', bbox_inches='tight')
plt.savefig('mnMnistFor6MyPR.png', bbox_inches='tight')
# plt.savefig('22Jan2020foFo.png', bbox_inches='tight')
# plt.savefig('000000000000000fffffffffffffffoooFoo.png', bbox_inches='tight')
# plt.show()
# plt.pause(99)
# plt.savefig('ROC_MainROC.png', bbox_inches='tight')
# plt.savefig('mainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('mainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikMainMainPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikMainMainPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikNikMainMainPR_MainPR.png', bbox_inches='tight')
# plt.pause(9)
# plt.ion()
# plot_pr_curve(precision, recall)
# plot_pr_curve(precision, recall)
plot_pr_curve(precision, recall, average_precision_score(loLoss2, loLossNoChange))
# plot_pr_curve(precision, recall)
plt.figure()
print('')
print(average_precision_score(loLoss2, loLossNoChange))
print('')
# probs = loLossNoChange
fpr, tpr, thresholds = roc_curve(loLoss2, loLossNoChange)
print(fpr)
print(tpr)
print('')
print(thresholds)
# fpr, tpr, thresholds = roc_curve(loLoss2, probs)
# plot_roc_curve(fpr, tpr)
# plot_roc_curve(fpr, tpr)
# plot_roc_curve(fpr, tpr)
# plot_roc_curve(fpr, tpr)
# plot_roc_curve(fpr, tpr)
plot_roc_curve(fpr, tpr, roc_auc_score(loLoss2, loLossNoChange))
# print(roc_auc_score(fpr, tpr))
# print(sklearn.metrics.auc(fpr, tpr))
print('')
print(roc_auc_score(loLoss2, loLossNoChange))
from sklearn.metrics import auc
# roc_auc = auc(fpr, tpr)
print(auc(fpr, tpr))
# roc_auc = auc(fpr, tpr)
print('')
# roc_auc = auc(fpr, tpr)
'''
plt.figure()
#plt.plot(fpr[2], tpr[2], color='darkorange', lw=2, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot(fpr[2], tpr[2], color='darkorange', lw=2, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
#plt.legend(loc="lower right")
plt.legend(loc="lower right")
plt.show()
'''
def plot_roc_curve2(fpr, tpr, auroc21, fpr2, tpr2, auroc212):
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.4f})'.format(auroc21))
# plt.plot(tpr, fpr, color='blue', label='PR (AUPRC = {0:.4f})'.format(auroc21))
# plt.plot(tpr2, fpr2, color='blue', label='PR (AUPRC = {0:.4f})'.format(auroc212))
plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.4f})'.format(auroc21))
plt.plot(tpr2, fpr2, color='blue', label='PR (AUPRC = {0:.4f})'.format(auroc212))
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.4f})'.format(auroc21))
plt.plot([0, 1], [0, 1], color='darkblue', linestyle='--')
plt.xlabel('False Positive Rate (and Recall)')
plt.ylabel('True Positive Rate (and Precision)')
plt.title('ROC and PR Curves')
plt.legend()
# plt.plot(tpr, fpr, color='orange', label='PR (AUPRC = {0:.4f})'.format(auroc21))
# plt.xlabel('False Positive Rate')
# plt.ylabel('True Positive Rate')
# plt.xlabel('Recall')
# plt.ylabel('Precision')
# plt.title('Precision Recall (PR) Curve')
# plt.legend()
# plt.savefig('nik00000NikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nik00000NikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNik00000nikNikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNik00000nikNikNikROC_MainROC.png', bbox_inches='tight')
plt.savefig('mnMnistFor6MyROCPR.png', bbox_inches='tight')
plt.figure()
# plot_roc_curve2(fpr, tpr, roc_auc_score(loLoss2, loLossNoChange))
# use: precision, recall, average_precision_score(loLoss2, loLossNoChange)
# plot_roc_curve2(fpr, tpr, roc_auc_score(loLoss2, loLossNoChange), precision, recall, average_precision_score(loLoss2, loLossNoChange))
plot_roc_curve2(fpr, tpr, roc_auc_score(loLoss2, loLossNoChange), precision, recall,
average_precision_score(loLoss2, loLossNoChange))
# 0.7657142857142857
# 0.7657142857142857
# 0.7714285714285714
# 0.7947712113075085
# 0.7658408636296418
# Data_j for MNIST digit j
# ResFlow: See if p_g(x) works
# import numpy as np
loLosses_NIKlosses3 = np.array(loLosses_NIKlosses3)
# where_0 = np.where(loLosses_NIKlosses3 == 0)
# where_1 = np.where(loLosses_NIKlosses3 == 1)
# loLosses_NIKlosses3[where_0] = 1
# loLosses_NIKlosses3[where_1] = 0
indices_one = loLosses_NIKlosses3 == 1
indices_zero = loLosses_NIKlosses3 == 0
loLosses_NIKlosses3[indices_one] = 0 # replacing 1s with 0s
loLosses_NIKlosses3[indices_zero] = 1 # replacing 0s with 1s
loLosses_NIKlosses3 = loLosses_NIKlosses3.tolist()
# del where_0
# del where_1
# print(loLosses_NIKlosses3)
# print(len(loLosses_NIKlosses3))
# adsfasdfzs
# print(loLosses_NIKlosses2)
# print(loLosses_NIKlosses3)
# import numpy as np
# import pandas as pd
import matplotlib.pyplot as plt
# import seaborn as sns
# ROC curve and auc score
from sklearn.datasets import make_classification
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
# def plot_roc_curve(fpr, tpr):
# def plot_roc_curve(fpr, tpr):
def plot_roc_curve(fpr, tpr, auroc21):
# plt.plot(fpr, tpr, color='orange', label='ROC')
# plt.plot(fpr, tpr, color='orange', label='ROC')
# plt.plot(fpr, tpr, color='orange', label='ROC')
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.3f})'.format(auroc21))
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.3f})'.format(auroc21))
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.3f})'.format(auroc21))
plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.4f})'.format(auroc21))
# plt.plot(fpr, tpr, color='orange', label='ROC')
plt.plot([0, 1], [0, 1], color='darkblue', linestyle='--')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic (ROC) Curve')
plt.legend()
# plt.savefig('ROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nik000NikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nik000NikNikROC_MainROC.png', bbox_inches='tight')
plt.savefig('mnMnistFor6MyROC.png', bbox_inches='tight')
# plt.show()
# plt.pause(99)
# plt.savefig('ROC_MainROC.png', bbox_inches='tight')
# plt.savefig('mainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('mainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikMainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikMainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNikMainMainROC_MainROC.png', bbox_inches='tight')
# plt.pause(9)
# plt.ion()
# print(loLossNoChange)
# asdfkdfs
# print(loLoss2)
# print(loLossNoChange)
# loLoss2 is 0 and 1
# loLossNoChange is probability
# loLoss2 = ground truth 0 and 1
# roc_curve(loLoss2, loLossNoChange)
# loLoss2 is the ground truth 0 and 1
# loLossNoChange is the predicted probabilities
# loLossNoChange = predicted probabilities
loLossNoChange = loLosses_NIKlosses2
# loLoss2 = ground truth 0 and 1
loLoss2 = loLosses_NIKlosses3
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
# print(average_precision_score(loLoss2, loLossNoChange))
precision, recall, thresholds = precision_recall_curve(loLoss2, loLossNoChange)
print(average_precision_score(loLoss2, loLossNoChange))
print('')
print(precision)
print(recall)
print('')
print(thresholds)
# def plot_pr_curve(fpr, tpr):
# def plot_pr_curve(fpr, tpr):
def plot_pr_curve(fpr, tpr, auroc21):
# plt.plot(fpr, tpr, color='orange', label='PR')
# plt.plot(fpr, tpr, color='orange', label='PR')
# plt.plot(tpr, fpr, color='orange', label='PR')
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.3f})'.format(auroc21))
# plt.plot(tpr, fpr, color='orange', label='PR (AUPRC = {0:.3f})'.format(auroc21))
# plt.plot(tpr, fpr, color='orange', label='PR (AUPRC = {0:.3f})'.format(auroc21))
plt.plot(tpr, fpr, color='orange', label='PR (AUPRC = {0:.4f})'.format(auroc21))
# plt.xlabel('False Positive Rate')
# plt.ylabel('True Positive Rate')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Precision Recall (PR) Curve')
plt.legend()
# plt.savefig('ROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikNikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikNikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikNikNikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikNikNikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nik000NikNikPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nik000NikNikPR_MainPR.png', bbox_inches='tight')
plt.savefig('mnMnistFor6MyPR.png', bbox_inches='tight')
# plt.savefig('22Jan2020foFo.png', bbox_inches='tight')
# plt.savefig('000000000000000fffffffffffffffoooFoo.png', bbox_inches='tight')
# plt.show()
# plt.pause(99)
# plt.savefig('ROC_MainROC.png', bbox_inches='tight')
# plt.savefig('mainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('mainMainROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikMainMainPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikMainMainPR_MainPR.png', bbox_inches='tight')
# plt.savefig('nikNikMainMainPR_MainPR.png', bbox_inches='tight')
# plt.pause(9)
# plt.ion()
# plot_pr_curve(precision, recall)
# plot_pr_curve(precision, recall)
plot_pr_curve(precision, recall, average_precision_score(loLoss2, loLossNoChange))
# plot_pr_curve(precision, recall)
plt.figure()
print('')
print(average_precision_score(loLoss2, loLossNoChange))
print('')
# probs = loLossNoChange
fpr, tpr, thresholds = roc_curve(loLoss2, loLossNoChange)
print(fpr)
print(tpr)
print('')
print(thresholds)
# fpr, tpr, thresholds = roc_curve(loLoss2, probs)
# plot_roc_curve(fpr, tpr)
# plot_roc_curve(fpr, tpr)
# plot_roc_curve(fpr, tpr)
# plot_roc_curve(fpr, tpr)
# plot_roc_curve(fpr, tpr)
plot_roc_curve(fpr, tpr, roc_auc_score(loLoss2, loLossNoChange))
# print(roc_auc_score(fpr, tpr))
# print(sklearn.metrics.auc(fpr, tpr))
print('')
print(roc_auc_score(loLoss2, loLossNoChange))
from sklearn.metrics import auc
# roc_auc = auc(fpr, tpr)
print(auc(fpr, tpr))
# roc_auc = auc(fpr, tpr)
print('')
# roc_auc = auc(fpr, tpr)
'''
plt.figure()
#plt.plot(fpr[2], tpr[2], color='darkorange', lw=2, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot(fpr[2], tpr[2], color='darkorange', lw=2, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
#plt.legend(loc="lower right")
plt.legend(loc="lower right")
plt.show()
'''
def plot_roc_curve2(fpr, tpr, auroc21, fpr2, tpr2, auroc212):
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.4f})'.format(auroc21))
# plt.plot(tpr, fpr, color='blue', label='PR (AUPRC = {0:.4f})'.format(auroc21))
# plt.plot(tpr2, fpr2, color='blue', label='PR (AUPRC = {0:.4f})'.format(auroc212))
plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.4f})'.format(auroc21))
plt.plot(tpr2, fpr2, color='blue', label='PR (AUPRC = {0:.4f})'.format(auroc212))
# plt.plot(fpr, tpr, color='orange', label='ROC (AUROC = {0:.4f})'.format(auroc21))
plt.plot([0, 1], [0, 1], color='darkblue', linestyle='--')
plt.xlabel('False Positive Rate (and Recall)')
plt.ylabel('True Positive Rate (and Precision)')
plt.title('ROC and PR Curves')
plt.legend()
# plt.plot(tpr, fpr, color='orange', label='PR (AUPRC = {0:.4f})'.format(auroc21))
# plt.xlabel('False Positive Rate')
# plt.ylabel('True Positive Rate')
# plt.xlabel('Recall')
# plt.ylabel('Precision')
# plt.title('Precision Recall (PR) Curve')
# plt.legend()
# plt.savefig('nik00000NikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nik00000NikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNik00000nikNikNikROC_MainROC.png', bbox_inches='tight')
# plt.savefig('nikNik00000nikNikNikROC_MainROC.png', bbox_inches='tight')
plt.savefig('mnMnistFor6MyROCPR.png', bbox_inches='tight')
plt.figure()
# plot_roc_curve2(fpr, tpr, roc_auc_score(loLoss2, loLossNoChange))
# use: precision, recall, average_precision_score(loLoss2, loLossNoChange)
# plot_roc_curve2(fpr, tpr, roc_auc_score(loLoss2, loLossNoChange), precision, recall, average_precision_score(loLoss2, loLossNoChange))
plot_roc_curve2(fpr, tpr, roc_auc_score(loLoss2, loLossNoChange), precision, recall,
average_precision_score(loLoss2, loLossNoChange))
# 0.7657142857142857
# 0.7657142857142857
# 0.7714285714285714
# 0.7947712113075085
# 0.7658408636296418
# Data_j for MNIST digit j
# ResFlow: See if p_g(x) works
asdfasfsadf
# asdfas
# asdfasfzs
"""
# sdafsaas
# print(X_training.shape)
# print(X_training.shape)
# print(X_training.shape)
# asfddfasfsaf
# 6760
# 6760
# print(X_training.shape)
# print(X_training.shape)
# print(X_training.shape)
#print(X_training.shape)
#saddfasdfsf
# print(X_training.shape)
# print(X_training.shape)
for epoch in range(1, args.epochs+1):
#for i in range(0, len(X_training), bsz):
for i in range(0, len(X_training), len(X_training)):
sigma_x = F.softplus(log_sigma).view(1, 1, args.imageSize, args.imageSize)
netD.zero_grad()
stop = min(bsz, len(X_training[i:]))
real_cpu = X_training[i:i+stop].to(device)
#print(real_cpu.shape)
#asdfasdf
#batch_size = real_cpu.size(0)
batch_size = args.batchSize
label = torch.full((batch_size,), real_label, device=device)
#noise_eta = torch.randn_like(real_cpu)
#noised_data = real_cpu + sigma_x.detach() * noise_eta
#out_real = netD(noised_data)
#errD_real = criterion(out_real, label)
#errD_real.backward()
#D_x = out_real.mean().item()
# train with fake
#noise = torch.randn(batch_size, args.nz, 1, 1, device=device)
#mu_fake = netG(noise)
#fake = mu_fake + sigma_x * noise_eta
#label.fill_(fake_label)
#out_fake = netD(fake.detach())
#errD_fake = criterion(out_fake, label)
#errD_fake.backward()
#D_G_z1 = out_fake.mean().item()
#errD = errD_real + errD_fake
#optimizerD.step()
# update G network: maximize log(D(G(z)))
netG.zero_grad()
sigma_optimizer.zero_grad()
label.fill_(real_label)
#gen_input = torch.randn(batch_size, args.nz, 1, 1, device=device)
#gen_input = torch.randn(batch_size, args.nz, 1, 1, device=device)
gen_input = torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)
out = netG(gen_input)
varInIn = gen_input
varOutOut = out
#varInIn = torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)
#varOutOut = netG(varInIn)
#print(out.shape)
#asdfasdf
noise_eta = torch.randn_like(out)
g_fake_data = out + noise_eta * sigma_x
#dg_fake_decision = netD(g_fake_data)
#g_error_gan = criterion(dg_fake_decision, label)
#D_G_z2 = dg_fake_decision.mean().item()
if args.lambda_ == 0:
#g_error_gan.backward()
optimizerG.step()
#sigma_optimizer.step()
else:
#hmc_samples, acceptRate, stepsize = hmc.get_samples(
# netG, g_fake_data.detach(), gen_input.clone(), sigma_x.detach(), args.burn_in,
# args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
# args.hmc_learning_rate, args.hmc_opt_accept)
#print(g_fake_data.shape)
#print(real_cpu.shape)
#asdfadsf
#print(real_cpu.shape)
#asdfasdfdassa
"""
hmc_samples, acceptRate, stepsize = hmc.get_samples(
netG2, real_cpu, gen_input.clone(), sigma_x.detach(), args.burn_in,
args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
args.hmc_learning_rate, args.hmc_opt_accept)
bsz, d = hmc_samples.size()
mean_output = netG(hmc_samples.view(bsz, d, 1, 1).to(device))
bsz = g_fake_data.size(0)
mean_output_summed = torch.zeros_like(g_fake_data)
for cnt in range(args.num_samples_posterior):
mean_output_summed = mean_output_summed + mean_output[cnt * bsz:(cnt + 1) * bsz]
mean_output_summed = mean_output_summed / args.num_samples_posterior
#c = ((g_fake_data - mean_output_summed) / sigma_x ** 2).detach()
#c = ((g_fake_data - mean_output_summed) / sigma_x ** 2).detach()
c = ((real_cpu - mean_output_summed) / sigma_x ** 2).detach()
#c = ((g_fake_data - mean_output_summed) / sigma_x ** 2).detach()
g_error_entropy = torch.mul(c, out + sigma_x * noise_eta).mean(0).sum()
g_error_entropy = -g_error_entropy
print(g_error_entropy)
"""
#adfadfasdf
#hmc_samples, acceptRate, stepsize = hmc.get_samples(
# netG, g_fake_data.detach(), gen_input.clone(), sigma_x.detach(), args.burn_in,
# args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
# args.hmc_learning_rate, args.hmc_opt_accept)
#bsz, d = hmc_samples.size()
#mean_output = netG(hmc_samples.view(bsz, d, 1, 1).to(device))
#bsz = g_fake_data.size(0)
#mean_output_summed = torch.zeros_like(g_fake_data)
#for cnt in range(args.num_samples_posterior):
# mean_output_summed = mean_output_summed + mean_output[cnt*bsz:(cnt+1)*bsz]
#mean_output_summed = mean_output_summed / args.num_samples_posterior
#c = ((g_fake_data - mean_output_summed) / sigma_x**2).detach()
#g_error_entropy = torch.mul(c, out + sigma_x * noise_eta).mean(0).sum()
#print(mean_output)
#print(mean_output.shape)
#print(bsz)
#print(d)
#print(torch.randn(batch_size, args.nz, 1, 1, device=device).shape)
#print(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device).shape)
# use: netG( torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device) )
#print(netG( torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device) ).shape)
# netG( torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device) )
# we use: netG( torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device) )
#print(g_error_entropy)
#asdfasdfds
#print(netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)).shape)
#asdfsdfs
#print(netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)).requires_grad)
#print(netG(torch.randn(batch_size, args.nz, 1, 1, device=device)).requires_grad)
#netG2.eval()
#for param in netG2.parameters():
# param.requires_grad = False
#print(netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)).shape)
#print(netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)).requires_grad)
#print(netG2(torch.randn(batch_size, args.nz, 1, 1, requires_grad=False, device=device)).shape)
#print(netG2(torch.randn(batch_size, args.nz, 1, 1, requires_grad=False, device=device)).requires_grad)
#print(netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)).requires_grad)
#print(netG2(torch.randn(batch_size, args.nz, 1, 1, requires_grad=False, device=device)).requires_grad)
#print(netG2(torch.randn(batch_size, args.nz, 1, 1, requires_grad=False, device=device)).requires_grad)
#print(netG2(torch.randn(batch_size, args.nz, 1, 1, device=device)).requires_grad)
#asdfasdf
#_, _, _, p_probP = hmc.get_samples(
# netG2, netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)).detach(),
# gen_input.clone(), sigma_x.detach(), args.burn_in,
# args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
# args.hmc_learning_rate, args.hmc_opt_accept)
'''
_, _, _, p_probP = hmc.get_samples(
netG2, netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)).detach(),
gen_input.clone(), sigma_x.detach(), args.burn_in,
args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
args.hmc_learning_rate, args.hmc_opt_accept)
'''
#print(netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)).requires_grad)
#sdfasdfs
#hmc_samples, acceptRate, stepsize = hmc.get_samples(
# netG, g_fake_data.detach(), gen_input.clone(), sigma_x.detach(), args.burn_in,
# args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
# args.hmc_learning_rate, args.hmc_opt_accept)
'''
gGgGg_fake_data2 = torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)
gGgGg_fake_data = netG(gGgGg_fake_data2)
hmc_samples, acceptRate, stepsize, _ = hmc.get_samples(
netG2, gGgGg_fake_data, gen_input.clone(), sigma_x.detach(), args.burn_in,
args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
args.hmc_learning_rate, args.hmc_opt_accept)
bsz, d = hmc_samples.size()
mean_output = netG(hmc_samples.view(bsz, d, 1, 1).to(device))
bsz = g_fake_data.size(0)
mean_output_summed = torch.zeros_like(g_fake_data)
for cnt in range(args.num_samples_posterior):
mean_output_summed = mean_output_summed + mean_output[cnt * bsz:(cnt + 1) * bsz]
mean_output_summed = mean_output_summed / args.num_samples_posterior
#c = ((g_fake_data - mean_output_summed) / sigma_x ** 2).detach()
#c = ((g_fake_data - mean_output_summed) / sigma_x ** 2).detach()
c = ((gGgGg_fake_data - mean_output_summed) / sigma_x ** 2).detach()
#c = ((g_fake_data - mean_output_summed) / sigma_x ** 2).detach()
g_error_entropy = torch.mul(c, out + sigma_x * noise_eta).mean(0).sum()
p_probP = -g_error_entropy
#g_error = g_error_gan - args.lambda_ * g_error_entropy
'''
#_, _, _, p_probP = hmc.get_samples(
# netG2, netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)),
# gen_input.clone(), sigma_x.detach(), args.burn_in,
# args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
# args.hmc_learning_rate, args.hmc_opt_accept)
#print(p_probP.shape)
#print(p_probP.mean())
# 0.0004
#asdfasdfas
#_, _, _, p_probP = hmc.get_samples(
# netG2, X_training[0:0+64].to(device),
# gen_input.clone(), sigma_x.detach(), args.burn_in,
# args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
# args.hmc_learning_rate, args.hmc_opt_accept)
#print(p_probP.mean())
#print(p_probP.mean().grad)
#print(p_probP.mean().requires_grad)
#sadfasdfks
#print(p_probP.mean())
#print(p_probP.mean().requires_grad)
#asdfasfdfs
# -609010.3125
# -1401163.0000
#print(p_probP.mean())
#print(p_probP.mean().requires_grad)
#asdfasdfs
#print(netG(torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)).requires_grad)
#print(netG2(torch.randn(batch_size, args.nz, 1, 1, device=device)).requires_grad)
#print(p_probP.mean())
#print(p_probP.requires_grad)
#print(p_probP.shape)
#print(p_probP.mean())
#print(p_probP.mean().requires_grad)
# g_error = p_probP.mean() + (?)
# use: g_error = p_probP.mean() + (?)
#asdfsadf
# g_error = (?)
# g_error = p_probP.mean() + (?)
#print(p_probP.mean())
#print(p_probP.mean().requires_grad)
# g_error = p_probP.mean() +
# we use: g_error = p_probP.mean() +
# p_probP.mean() +
# g_error = p_probP.mean() +
# we now use: p_probP.mean()
#firstTerm_theFirstTerm = p_probP.mean()
#varInIn = torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)
#varOutOut = netG(varInIn)
#gGgGg_fake_data2 = torch.randn(batch_size, args.nz, 1, 1, requires_grad=True, device=device)
#gGgGg_fake_data = netG(gGgGg_fake_data2)
#hmc_samples, acceptRate, stepsize, _ = hmc.get_samples(
# netG2, gGgGg_fake_data, gen_input.clone(), sigma_x.detach(), args.burn_in,
# args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
# args.hmc_learning_rate, args.hmc_opt_accept)
#hmc_samples, acceptRate, stepsize = hmc.get_samples(
# netG, g_fake_data.detach(), gen_input.clone(), sigma_x.detach(), args.burn_in,
# args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
# args.hmc_learning_rate, args.hmc_opt_accept)
#hmc_samples, acceptRate, stepsize, _ = hmc.get_samples(
# netG2, varOutOut, gen_input.clone(), sigma_x.detach(), args.burn_in,
# args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
# args.hmc_learning_rate, args.hmc_opt_accept)
#hmc_samples, acceptRate, stepsize, _ = hmc.get_samples(
# netG2, varOutOut.detach(), gen_input.clone(), sigma_x.detach(), args.burn_in,
# args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
# args.hmc_learning_rate, args.hmc_opt_accept)
#bsz, d = hmc_samples.size()
#mean_output = netG(hmc_samples.view(bsz, d, 1, 1).to(device))
#bsz = g_fake_data.size(0)
NUM_CLASS = 10
rand_y_one_hot = torch.FloatTensor(batch_size, NUM_CLASS).zero_().to(device)
rand_y_one_hot = rand_y_one_hot.scatter_(1,
torch.randint(0, NUM_CLASS, size=(batch_size, 1),
device=device),
1).to(device)
hmc_samples, hmc_labels, acceptRate, stepsize = hmc.get_samples(
netG2, varOutOut.detach(), rand_y_one_hot.detach(), gen_input.clone(), sigma_x.detach(),
args.burn_in,
args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
args.hmc_learning_rate, args.hmc_opt_accept)
# bsz, d = hmc_samples.size()
# mean_output = netG(hmc_samples.view(bsz, d, 1, 1).to(device))
# bsz = g_fake_data.size(0)
bsz, d = hmc_samples.size()
hmc_samples = hmc_samples.view(bsz, d, 1, 1).to(device)
hmc_labels = hmc_labels.to(device)
mean_output = netG2(hmc_samples, hmc_labels)
bsz = g_fake_data.size(0)
# bsz, d = hmc_samples.size()
# hmc_samples = hmc_samples.view(bsz, d, 1, 1).to(device)
# hmc_labels = hmc_labels.to(device)
# mean_output = netG(hmc_samples, hmc_labels)
# bsz = g_fake_data.size(0)
mean_output_summed = torch.zeros_like(g_fake_data)
for cnt in range(args.num_samples_posterior):
mean_output_summed = mean_output_summed + mean_output[cnt * bsz:(cnt + 1) * bsz]
mean_output_summed = mean_output_summed / args.num_samples_posterior
#c = ((g_fake_data - mean_output_summed) / sigma_x ** 2).detach()
#c = ((g_fake_data - mean_output_summed) / sigma_x ** 2).detach()
#c = ((gGgGg_fake_data - mean_output_summed) / sigma_x ** 2).detach()
#c = ((gGgGg_fake_data - mean_output_summed) / sigma_x ** 2).detach()
#c = ((varOutOut - mean_output_summed) / sigma_x ** 2).detach()
c = ((varOutOut - mean_output_summed) / sigma_x ** 2).detach()
#c = ((varOutOut - mean_output_summed) / sigma_x ** 2)
#print(c)
#print(c.requires_grad)
#sdafasfsafs
#c = ((g_fake_data - mean_output_summed) / sigma_x ** 2).detach()
#g_error_entropy = torch.mul(c, out + sigma_x * noise_eta).mean(0).sum()
#g_error_entropy = torch.mul(c, out + sigma_x * noise_eta).mean(0).sum()
g_error_entropy = torch.mul(c, out + sigma_x * noise_eta).mean(0).sum()
#g_error_entropy = torch.mul(c, out + sigma_x * noise_eta).mean(0).mean()
#hmc_samples, acceptRate, stepsize = hmc.get_samples(
# netG, g_fake_data.detach(), gen_input.clone(), sigma_x.detach(), args.burn_in,
# args.num_samples_posterior, args.leapfrog_steps, stepsize, args.flag_adapt,
# args.hmc_learning_rate, args.hmc_opt_accept)
#bsz, d = hmc_samples.size()
#mean_output = netG(hmc_samples.view(bsz, d, 1, 1).to(device))
#bsz = g_fake_data.size(0)
#mean_output_summed = torch.zeros_like(g_fake_data)
#for cnt in range(args.num_samples_posterior):
# mean_output_summed = mean_output_summed + mean_output[cnt * bsz:(cnt + 1) * bsz]
#mean_output_summed = mean_output_summed / args.num_samples_posterior
#c = ((g_fake_data - mean_output_summed) / sigma_x ** 2).detach()
#g_error_entropy = torch.mul(c, out + sigma_x * noise_eta).mean(0).sum()
#print(g_error_entropy)
#asdfadsfas
"""
myNikMy_entropy = torch.exp(-g_error_entropy)
# nikNikmyNikMy_entropy = scipy.special.lambertw(myNikMy_entropy.cpu().detach().numpy())
# print(g_error_entropy)
# print(myNikMy_entropy)
# print(nikNikmyNikMy_entropy)
# print(g_error_entropy)
# ndNdnikNikmyNikMy_entropy = torch.zeros(1, device=device, requires_grad=False)
# ndNdnikNikmyNikMy_entropy = torch.ones(1, device=device) * np.real(nikNikmyNikMy_entropy)
# print(ndNdnikNikmyNikMy_entropy)
# asdfasdfasdf
# print(ndNdnikNikmyNikMy_entropy)
# ndNdnikNikmyNikMy_entropy = torch.ones(1, device=device) * 0.5
# print(ndNdnikNikmyNikMy_entropy)
# print(ndNdnikNikmyNikMy_entropy.requires_grad)
# ndNdnikNikmyNikMy_entropy = torch.ones(1, device=device) * 0.5
# ndNdnikNikmyNikMy_entropy = torch.ones(1, device=device) * 0.5
ndNdnikNikmyNikMy_entropy = torch.ones(1, device=device) * 0.5
# for _ in range(10):
# for _ in range(10):
for _ in range(100):
ndNdnikNikmyNikMy_entropy -= (
(ndNdnikNikmyNikMy_entropy.clone() * torch.exp(
ndNdnikNikmyNikMy_entropy.clone()) - myNikMy_entropy) / (
torch.exp(ndNdnikNikmyNikMy_entropy.clone()) + (
ndNdnikNikmyNikMy_entropy.clone() * torch.exp(ndNdnikNikmyNikMy_entropy.clone()))))
# ndNdnikNikmyNikMy_entropy -= (
# (ndNdnikNikmyNikMy_entropy * torch.exp(ndNdnikNikmyNikMy_entropy) - myNikMy_entropy) / (
# torch.exp(ndNdnikNikmyNikMy_entropy) + (
# ndNdnikNikmyNikMy_entropy * torch.exp(ndNdnikNikmyNikMy_entropy))))
# print(ndNdnikNikmyNikMy_entropy)
# asdfasdfas
# print(ndNdnikNikmyNikMy_entropy)
# print(ndNdnikNikmyNikMy_entropy.requires_grad)
# asdfasdfas
# p_probP = -g_error_entropy
# p_probP = g_error_entropy
# g_error_entropy = -g_error_entropy
# (?)
# print(g_error_entropy)
# aasdfasfsaf
# print(ndNdnikNikmyNikMy_entropy)
# adfadsfasdf
# l1_usel1 = torch.log(g_error_entropy)
# l2_usel2 = torch.log(torch.log(g_error_entropy))
'''
# use: t = torch.log(F.relu(t) + 1e-7)
l1_usel1 = torch.log(F.relu(g_error_entropy) + 1e-7)
# use: t = torch.log(F.relu(t) + 1e-7)
l2_usel2 = torch.log(F.relu(torch.log(F.relu(g_error_entropy) + 1e-7)) + 1e-7)
#print('')
#print(l1_usel1)
#print(l1_usel1)
#print(l2_usel2)
gErrorEntropy2 = l1_usel1 - l2_usel2 + (l2_usel2 / l1_usel1) + (
(l2_usel2 * (-2 + l2_usel2)) / (2 * (l1_usel1 ** 2))) + (
((l2_usel2 * (6 - (9 * l2_usel2) + (2 * (l2_usel2 ** 2))))) / (
6 * (l1_usel1 ** 3))) + ((l2_usel2 * (
-12 + (36 * l2_usel2) - (22 * (l2_usel2 ** 2)) + (3 * (l2_usel2 ** 3)))) / (
12 * (l1_usel1 ** 4))) + ((l2_usel2 * (
60 - (300 * l2_usel2) + (350 * (l2_usel2 ** 2)) - (125 * (l2_usel2 ** 3)) + (
12 * (l2_usel2 ** 4)))) / (60 * (l1_usel1 ** 5)))
'''
# gErrorEntropy2 = g_error_entropy - (g_error_entropy ** 2) + (1.5 * (g_error_entropy ** 3)) - (
# (8 / 3) * (g_error_entropy ** 4)) + ((125 / 24) * (g_error_entropy ** 5))
# gErrorEntropy = torch.exp(gErrorEntropy2)
# gErrorEntropy = torch.exp(gErrorEntropy2)
# p_probP = torch.exp(gErrorEntropy2)
# p_probP = torch.exp(gErrorEntropy2)
# p_probP = g_error_entropy / gErrorEntropy2
# print(ndNdnikNikmyNikMy_entropy)
p_probP = ndNdnikNikmyNikMy_entropy
# print(p_probP)
#print(p_probP)
#asdfasdfas
#print(p_probP)
#print(p_probP.requires_grad)
#asdfasdfas
#print(g_error_entropy)
#print(g_error_entropy.requires_grad)
#print(g_error_entropy.shape)
#asdfasdfas
"""
"""
#print(g_error_entropy)
#asdfadsfas
myNikMy_entropy = torch.exp(-g_error_entropy)
# nikNikmyNikMy_entropy = scipy.special.lambertw(myNikMy_entropy.cpu().detach().numpy())
# print(g_error_entropy)
# print(myNikMy_entropy)
# print(nikNikmyNikMy_entropy)
# print(g_error_entropy)
# ndNdnikNikmyNikMy_entropy = torch.zeros(1, device=device, requires_grad=False)
# ndNdnikNikmyNikMy_entropy = torch.ones(1, device=device) * np.real(nikNikmyNikMy_entropy)
# print(ndNdnikNikmyNikMy_entropy)
# asdfasdfasdf
# print(ndNdnikNikmyNikMy_entropy)
# ndNdnikNikmyNikMy_entropy = torch.ones(1, device=device) * 0.5
# print(ndNdnikNikmyNikMy_entropy)
# print(ndNdnikNikmyNikMy_entropy.requires_grad)
# ndNdnikNikmyNikMy_entropy = torch.ones(1, device=device) * 0.5
# ndNdnikNikmyNikMy_entropy = torch.ones(1, device=device) * 0.5
ndNdnikNikmyNikMy_entropy = torch.ones(1, device=device) * 0.5
# for _ in range(10):
# for _ in range(10):
for _ in range(100):
ndNdnikNikmyNikMy_entropy -= (
(ndNdnikNikmyNikMy_entropy.clone() * torch.exp(
ndNdnikNikmyNikMy_entropy.clone()) - myNikMy_entropy) / (
torch.exp(ndNdnikNikmyNikMy_entropy.clone()) + (
ndNdnikNikmyNikMy_entropy.clone() * torch.exp(ndNdnikNikmyNikMy_entropy.clone()))))
# ndNdnikNikmyNikMy_entropy -= (
# (ndNdnikNikmyNikMy_entropy * torch.exp(ndNdnikNikmyNikMy_entropy) - myNikMy_entropy) / (
# torch.exp(ndNdnikNikmyNikMy_entropy) + (
# ndNdnikNikmyNikMy_entropy * torch.exp(ndNdnikNikmyNikMy_entropy))))
# print(ndNdnikNikmyNikMy_entropy)
# asdfasdfas
# print(ndNdnikNikmyNikMy_entropy)
# print(ndNdnikNikmyNikMy_entropy.requires_grad)
# asdfasdfas
# p_probP = -g_error_entropy
# p_probP = g_error_entropy
# g_error_entropy = -g_error_entropy
# (?)
# print(g_error_entropy)
# aasdfasfsaf
# print(ndNdnikNikmyNikMy_entropy)
# adfadsfasdf
# l1_usel1 = torch.log(g_error_entropy)
# l2_usel2 = torch.log(torch.log(g_error_entropy))
'''
# use: t = torch.log(F.relu(t) + 1e-7)
l1_usel1 = torch.log(F.relu(g_error_entropy) + 1e-7)
# use: t = torch.log(F.relu(t) + 1e-7)
l2_usel2 = torch.log(F.relu(torch.log(F.relu(g_error_entropy) + 1e-7)) + 1e-7)
#print('')
#print(l1_usel1)
#print(l1_usel1)
#print(l2_usel2)
gErrorEntropy2 = l1_usel1 - l2_usel2 + (l2_usel2 / l1_usel1) + (
(l2_usel2 * (-2 + l2_usel2)) / (2 * (l1_usel1 ** 2))) + (
((l2_usel2 * (6 - (9 * l2_usel2) + (2 * (l2_usel2 ** 2))))) / (
6 * (l1_usel1 ** 3))) + ((l2_usel2 * (
-12 + (36 * l2_usel2) - (22 * (l2_usel2 ** 2)) + (3 * (l2_usel2 ** 3)))) / (
12 * (l1_usel1 ** 4))) + ((l2_usel2 * (
60 - (300 * l2_usel2) + (350 * (l2_usel2 ** 2)) - (125 * (l2_usel2 ** 3)) + (
12 * (l2_usel2 ** 4)))) / (60 * (l1_usel1 ** 5)))
'''
# gErrorEntropy2 = g_error_entropy - (g_error_entropy ** 2) + (1.5 * (g_error_entropy ** 3)) - (
# (8 / 3) * (g_error_entropy ** 4)) + ((125 / 24) * (g_error_entropy ** 5))
# gErrorEntropy = torch.exp(gErrorEntropy2)
# gErrorEntropy = torch.exp(gErrorEntropy2)
# p_probP = torch.exp(gErrorEntropy2)
# p_probP = torch.exp(gErrorEntropy2)
# p_probP = g_error_entropy / gErrorEntropy2
# print(ndNdnikNikmyNikMy_entropy)
p_probP = ndNdnikNikmyNikMy_entropy
# print(p_probP)
#print(p_probP)
#asdfasdfas
"""
"""
# print(g_error_entropy)
# asdfadsfas
myNikMy_entropy = torch.exp(-g_error_entropy)
# nikNikmyNikMy_entropy = scipy.special.lambertw(myNikMy_entropy.cpu().detach().numpy())
# print(g_error_entropy)
# print(myNikMy_entropy)
# print(nikNikmyNikMy_entropy)
# print(g_error_entropy)
# ndNdnikNikmyNikMy_entropy = torch.zeros(1, device=device, requires_grad=False)
# ndNdnikNikmyNikMy_entropy = torch.ones(1, device=device) * np.real(nikNikmyNikMy_entropy)
# print(ndNdnikNikmyNikMy_entropy)
# asdfasdfasdf
# print(ndNdnikNikmyNikMy_entropy)
# ndNdnikNikmyNikMy_entropy = torch.ones(1, device=device) * 0.5
# print(ndNdnikNikmyNikMy_entropy)
# print(ndNdnikNikmyNikMy_entropy.requires_grad)
# ndNdnikNikmyNikMy_entropy = torch.ones(1, device=device) * 0.5
# ndNdnikNikmyNikMy_entropy = torch.ones(1, device=device) * 0.5
ndNdnikNikmyNikMy_entropy = torch.ones(1, device=device) * 0.5
# for _ in range(10):
# for _ in range(10):
for _ in range(100):
ndNdnikNikmyNikMy_entropy -= (
(ndNdnikNikmyNikMy_entropy.clone() * torch.exp(
ndNdnikNikmyNikMy_entropy.clone()) - myNikMy_entropy) / (
torch.exp(ndNdnikNikmyNikMy_entropy.clone()) + (
ndNdnikNikmyNikMy_entropy.clone() * torch.exp(ndNdnikNikmyNikMy_entropy.clone()))))
# ndNdnikNikmyNikMy_entropy -= (
# (ndNdnikNikmyNikMy_entropy * torch.exp(ndNdnikNikmyNikMy_entropy) - myNikMy_entropy) / (
# torch.exp(ndNdnikNikmyNikMy_entropy) + (
# ndNdnikNikmyNikMy_entropy * torch.exp(ndNdnikNikmyNikMy_entropy))))
# print(ndNdnikNikmyNikMy_entropy)
# asdfasdfas
# print(ndNdnikNikmyNikMy_entropy)
# print(ndNdnikNikmyNikMy_entropy.requires_grad)
# asdfasdfas
# p_probP = -g_error_entropy
# p_probP = g_error_entropy
# g_error_entropy = -g_error_entropy
# (?)
# print(g_error_entropy)
# aasdfasfsaf
# print(ndNdnikNikmyNikMy_entropy)
# adfadsfasdf
# l1_usel1 = torch.log(g_error_entropy)
# l2_usel2 = torch.log(torch.log(g_error_entropy))
'''
# use: t = torch.log(F.relu(t) + 1e-7)
l1_usel1 = torch.log(F.relu(g_error_entropy) + 1e-7)
# use: t = torch.log(F.relu(t) + 1e-7)
l2_usel2 = torch.log(F.relu(torch.log(F.relu(g_error_entropy) + 1e-7)) + 1e-7)
#print('')
#print(l1_usel1)
#print(l1_usel1)
#print(l2_usel2)
gErrorEntropy2 = l1_usel1 - l2_usel2 + (l2_usel2 / l1_usel1) + (
(l2_usel2 * (-2 + l2_usel2)) / (2 * (l1_usel1 ** 2))) + (
((l2_usel2 * (6 - (9 * l2_usel2) + (2 * (l2_usel2 ** 2))))) / (
6 * (l1_usel1 ** 3))) + ((l2_usel2 * (
-12 + (36 * l2_usel2) - (22 * (l2_usel2 ** 2)) + (3 * (l2_usel2 ** 3)))) / (
12 * (l1_usel1 ** 4))) + ((l2_usel2 * (
60 - (300 * l2_usel2) + (350 * (l2_usel2 ** 2)) - (125 * (l2_usel2 ** 3)) + (
12 * (l2_usel2 ** 4)))) / (60 * (l1_usel1 ** 5)))
'''
# gErrorEntropy2 = g_error_entropy - (g_error_entropy ** 2) + (1.5 * (g_error_entropy ** 3)) - (
# (8 / 3) * (g_error_entropy ** 4)) + ((125 / 24) * (g_error_entropy ** 5))
# gErrorEntropy = torch.exp(gErrorEntropy2)
# gErrorEntropy = torch.exp(gErrorEntropy2)
# p_probP = torch.exp(gErrorEntropy2)
# p_probP = torch.exp(gErrorEntropy2)
# p_probP = g_error_entropy / gErrorEntropy2
# print(ndNdnikNikmyNikMy_entropy)
p_probP = ndNdnikNikmyNikMy_entropy
# print(p_probP)
# print(p_probP)
# asdfasdfas
"""
# (?)
#p_probP = -g_error_entropy
# (?)
#p_probP = -g_error_entropy
#p_probP = -g_error_entropy
# print(g_error_entropy)
# asdfadsfas
myNikMy_entropy = torch.exp(-g_error_entropy)
# nikNikmyNikMy_entropy = scipy.special.lambertw(myNikMy_entropy.cpu().detach().numpy())
# print(g_error_entropy)
# print(myNikMy_entropy)
# print(nikNikmyNikMy_entropy)
# print(g_error_entropy)
# ndNdnikNikmyNikMy_entropy = torch.zeros(1, device=device, requires_grad=False)
# ndNdnikNikmyNikMy_entropy = torch.ones(1, device=device) * np.real(nikNikmyNikMy_entropy)
# print(ndNdnikNikmyNikMy_entropy)
# asdfasdfasdf
# print(ndNdnikNikmyNikMy_entropy)
# ndNdnikNikmyNikMy_entropy = torch.ones(1, device=device) * 0.5
# print(ndNdnikNikmyNikMy_entropy)
# print(ndNdnikNikmyNikMy_entropy.requires_grad)
# ndNdnikNikmyNikMy_entropy = torch.ones(1, device=device) * 0.5
# ndNdnikNikmyNikMy_entropy = torch.ones(1, device=device) * 0.5
ndNdnikNikmyNikMy_entropy = torch.ones(1, device=device) * 0.5
for _ in range(200):
ndNdnikNikmyNikMy_entropy -= (
(ndNdnikNikmyNikMy_entropy.clone() * torch.exp(
ndNdnikNikmyNikMy_entropy.clone()) - myNikMy_entropy) / (
torch.exp(ndNdnikNikmyNikMy_entropy.clone()) + (
ndNdnikNikmyNikMy_entropy.clone() * torch.exp(ndNdnikNikmyNikMy_entropy.clone()))))
# ndNdnikNikmyNikMy_entropy -= (
# (ndNdnikNikmyNikMy_entropy * torch.exp(ndNdnikNikmyNikMy_entropy) - myNikMy_entropy) / (
# torch.exp(ndNdnikNikmyNikMy_entropy) + (
# ndNdnikNikmyNikMy_entropy * torch.exp(ndNdnikNikmyNikMy_entropy))))
# print(ndNdnikNikmyNikMy_entropy)
# asdfasdfas
# print(ndNdnikNikmyNikMy_entropy)
# print(ndNdnikNikmyNikMy_entropy.requires_grad)
# asdfasdfas
# p_probP = -g_error_entropy
# p_probP = g_error_entropy
# g_error_entropy = -g_error_entropy
# (?)
# print(g_error_entropy)
# aasdfasfsaf
# print(ndNdnikNikmyNikMy_entropy)
# adfadsfasdf
# l1_usel1 = torch.log(g_error_entropy)
# l2_usel2 = torch.log(torch.log(g_error_entropy))
'''
# use: t = torch.log(F.relu(t) + 1e-7)
l1_usel1 = torch.log(F.relu(g_error_entropy) + 1e-7)
# use: t = torch.log(F.relu(t) + 1e-7)
l2_usel2 = torch.log(F.relu(torch.log(F.relu(g_error_entropy) + 1e-7)) + 1e-7)
#print('')
#print(l1_usel1)
#print(l1_usel1)
#print(l2_usel2)
gErrorEntropy2 = l1_usel1 - l2_usel2 + (l2_usel2 / l1_usel1) + (
(l2_usel2 * (-2 + l2_usel2)) / (2 * (l1_usel1 ** 2))) + (
((l2_usel2 * (6 - (9 * l2_usel2) + (2 * (l2_usel2 ** 2))))) / (
6 * (l1_usel1 ** 3))) + ((l2_usel2 * (
-12 + (36 * l2_usel2) - (22 * (l2_usel2 ** 2)) + (3 * (l2_usel2 ** 3)))) / (
12 * (l1_usel1 ** 4))) + ((l2_usel2 * (
60 - (300 * l2_usel2) + (350 * (l2_usel2 ** 2)) - (125 * (l2_usel2 ** 3)) + (
12 * (l2_usel2 ** 4)))) / (60 * (l1_usel1 ** 5)))
'''
# gErrorEntropy2 = g_error_entropy - (g_error_entropy ** 2) + (1.5 * (g_error_entropy ** 3)) - (
# (8 / 3) * (g_error_entropy ** 4)) + ((125 / 24) * (g_error_entropy ** 5))
# gErrorEntropy = torch.exp(gErrorEntropy2)
# gErrorEntropy = torch.exp(gErrorEntropy2)
# p_probP = torch.exp(gErrorEntropy2)
# p_probP = torch.exp(gErrorEntropy2)
# p_probP = g_error_entropy / gErrorEntropy2
# print(ndNdnikNikmyNikMy_entropy)
p_probP = ndNdnikNikmyNikMy_entropy
# print(p_probP)
# (?)
#p_probP = -g_error_entropy
# (?)
#print(g_error_entropy)
#g_error_entropy = -g_error_entropy
#g_error_entropy = -g_error_entropy
#g_error_entropy = -g_error_entropy
#g_error_entropy = -g_error_entropy
#g_error_entropy = -g_error_entropy
#g_error_entropy = -g_error_entropy
# (?)
#g_error_entropy = -g_error_entropy
# (?)
#l1_usel1 = torch.log(g_error_entropy)
#l2_usel2 = torch.log(torch.log(g_error_entropy))
'''
# use: t = torch.log(F.relu(t) + 1e-7)
l1_usel1 = torch.log(F.relu(g_error_entropy) + 1e-7)
# use: t = torch.log(F.relu(t) + 1e-7)
l2_usel2 = torch.log(F.relu(torch.log(F.relu(g_error_entropy) + 1e-7)) + 1e-7)
#print('')
#print(l1_usel1)
#print(l1_usel1)
#print(l2_usel2)
gErrorEntropy2 = l1_usel1 - l2_usel2 + (l2_usel2 / l1_usel1) + (
(l2_usel2 * (-2 + l2_usel2)) / (2 * (l1_usel1 ** 2))) + (
((l2_usel2 * (6 - (9 * l2_usel2) + (2 * (l2_usel2 ** 2))))) / (
6 * (l1_usel1 ** 3))) + ((l2_usel2 * (
-12 + (36 * l2_usel2) - (22 * (l2_usel2 ** 2)) + (3 * (l2_usel2 ** 3)))) / (
12 * (l1_usel1 ** 4))) + ((l2_usel2 * (
60 - (300 * l2_usel2) + (350 * (l2_usel2 ** 2)) - (125 * (l2_usel2 ** 3)) + (
12 * (l2_usel2 ** 4)))) / (60 * (l1_usel1 ** 5)))
'''
#gErrorEntropy2 = g_error_entropy - (g_error_entropy ** 2) + (1.5 * (g_error_entropy ** 3)) - (
# (8 / 3) * (g_error_entropy ** 4)) + ((125 / 24) * (g_error_entropy ** 5))
#gErrorEntropy = torch.exp(gErrorEntropy2)
#gErrorEntropy = torch.exp(gErrorEntropy2)
#gErrorEntropy = gErrorEntropy2
#gErrorEntropy = -gErrorEntropy
#gErrorEntropy = torch.exp(gErrorEntropy)
#print(gErrorEntropy)
#dasfasdfz
#gErrorEntropy = torch.exp(gErrorEntropy2)
#gErrorEntropy = g_error_entropy / gErrorEntropy2
#print('')
#print(gErrorEntropy)
#print(gErrorEntropy.shape)
#print(gErrorEntropy.requires_grad)
#print(gErrorEntropy)
#asdfasdfs
#asdfsdfas
#g_error_entropy = torch.zeros(1, device=device, requires_grad=True)
#p_probP = -g_error_entropy
#g_error = g_error_gan - args.lambda_ * g_error_entropy
#print(p_probP)
#sdafdsafaf
g_error, firstOnly_lossGen, secondOnly_lossGen, thirdOnly_lossGen = use_loss_fn2(p_probP,
varOutOut, args, netG2,
varInIn,
real_cpu.to(device))
#g_error, firstOnly_lossGen, secondOnly_lossGen, thirdOnly_lossGen = use_loss_fn2(gErrorEntropy,
# varOutOut, args, netG2,
# varInIn,
# real_cpu.to(device))
#g_error, firstOnly_lossGen, secondOnly_lossGen, thirdOnly_lossGen = use_loss_fn2(g_error_entropy,
# varOutOut, args, netG2,
# varInIn,
# real_cpu.to(device))
#g_error, firstOnly_lossGen, secondOnly_lossGen, thirdOnly_lossGen = use_loss_fn2(-g_error_entropy,
# varOutOut, args, netG2,
# varInIn,
# real_cpu.to(device))
#g_error, firstOnly_lossGen, secondOnly_lossGen, thirdOnly_lossGen = use_loss_fn2(p_probP.mean(),
# varOutOut, args, netG2,
# varInIn,
# real_cpu.to(device))
#print('')
#print(g_error)
#print(g_error)
#print(firstOnly_lossGen)
#print(secondOnly_lossGen)
#print(thirdOnly_lossGen)
#print(g_error)
#print(g_error.requires_grad)
#print(firstOnly_lossGen)
#print(firstOnly_lossGen.requires_grad)
#print(secondOnly_lossGen)
#print(secondOnly_lossGen.requires_grad)
#print(thirdOnly_lossGen)
#print(thirdOnly_lossGen.requires_grad)
#print('')
#asdfasdfsaf
#print(firstOnly_lossGen)
#print(secondOnly_lossGen)
#print(thirdOnly_lossGen)
#print(g_error)
#asdfadsfas
#firstTerm_theFirstTerm = p_probP.mean()
#g_error = firstTerm_theFirstTerm +
#g_error => Use netG( 64, 100, 1, 1 )
# gen_input = torch.randn(batch_size, args.nz, 1, 1, device=device)
# (?)
#g_error = (?)
#print(torch.mean(netG.main[0].weight.grad))
#g_error = g_error_gan - args.lambda_ * g_error_entropy
g_error.backward()
#print(netG.parameters().grad)
#print(netG.grad)
#print(netG[0].weight.grad)
#print(netG.main[0].weight.grad)
#print(netG.main[0].weight.grad)
#print(netG.main[0].weight.grad)
#print(torch.mean(netG.main[0].weight.grad))
#gradGrad_lossGen = torch.mean(netG.main[0].weight.grad).item()
#gradGrad_lossGen = 1.0 / torch.mean(netG.main[0].weight.grad).item()
# gradGrad_lossGen = 1.0 / torch.mean(netG.main[0].weight.grad).item()
# use: torch.nn.utils.clip_grad.clip_grad_norm_(model.parameters(), 1.)
# grad_norm = torch.nn.utils.clip_grad.clip_grad_norm_(model.parameters(), 1.)
# graGradNorm = torch.nn.utils.clip_grad.clip_grad_norm_(netG.parameters(), 1.)
# graGradNorm = 1.0 / torch.nn.utils.clip_grad.clip_grad_norm_(netG.parameters(), 1.)
# graGradNorm = 1.0 / torch.nn.utils.clip_grad.clip_grad_norm_(netG.parameters(), 1.)
# graGradNorm = 1.0 / torch.nn.utils.clip_grad.clip_grad_norm_(netG.parameters(), 1.)
# gradGrad_lossGen = 1.0 / torch.nn.utils.clip_grad.clip_grad_norm_(netG.parameters(), 1.)
# gradGrad_lossGen = 1.0 / torch.mean(netG.main[0].weight.grad).item()
gradGrad_lossGen = 1.0 / torch.nn.utils.clip_grad.clip_grad_norm_(netG.parameters(), 1.)
#print(gradGrad_lossGen)
#asdfasdf
#gradGrad_lossGen = 0.0
#couCounter31 = 0
#for param in netG2.parameters():
# couCounter31 += 1
# gradGrad_lossGen += param.grad()
#gradGrad_lossGen /= couCounter31
#del couCounter31
#asdfasdf
#gradGrad_lossGen = (torch.mean(netG.lin1.weight.grad) + torch.mean(netG.lin2.weight.grad) + torch.mean(
# netG.dc1.weight.grad) + torch.mean(netG.dc2.weight.grad) + torch.mean(
# netG.dc3.weight.grad) + torch.mean(netG.lin1.bias.grad) + torch.mean(
# netG.lin2.bias.grad) + torch.mean(
# netG.dc1.bias.grad) + torch.mean(netG.dc2.bias.grad) + torch.mean(
# netG.dc3.bias.grad)).item()
'''
gr1loss2_meter.update(
(torch.mean(genGen.lin1.weight.grad) + torch.mean(genGen.lin2.weight.grad) + torch.mean(
genGen.dc1.weight.grad) + torch.mean(genGen.dc2.weight.grad) + torch.mean(
genGen.dc3.weight.grad) + torch.mean(genGen.lin1.bias.grad) + torch.mean(
genGen.lin2.bias.grad) + torch.mean(
genGen.dc1.bias.grad) + torch.mean(genGen.dc2.bias.grad) + torch.mean(
genGen.dc3.bias.grad)).item())
gr2loss2_meter.update(torch.mean(ggenFGen2.grad).item())
# gr3loss2_meter.update(torch.mean(genFGen2.grad).item())
# gr3loss2_meter.update(torch.mean(genFGen2.grad).item())
# gr3loss2_meter.update(torch.mean(genFGen2.grad).item())
# gr3loss2_meter.update(torch.mean(genFGen2.grad).item())
if genFGen2.grad is not None:
gr3loss2_meter.update(torch.mean(genFGen2.grad).item())
else:
gr3loss2_meter.update(0.0)
'''
optimizerG.step()
sigma_optimizer.step()
if args.restrict_sigma:
log_sigma.data.clamp_(min=logsigma_min, max=logsigma_max)
## log performance
if i % args.log == 0:
print(
'Epoch [%d/%d] .. Batch [%d/%d] .. Loss: %.4f .. L0: %.4f .. L1: %.4f .. L2: %.4f .. G: %.4f'
% (epoch, args.epochs, i, len(X_training), g_error.item(), firstOnly_lossGen.item(),
secondOnly_lossGen.item(), thirdOnly_lossGen.item(), gradGrad_lossGen))
loss_theLoss[epoch-1] = g_error.item()
loss_theLoss0[epoch-1] = firstOnly_lossGen.item()
loss_theLoss1[epoch-1] = secondOnly_lossGen.item()
loss_theLoss2[epoch-1] = thirdOnly_lossGen.item()
loss_theLoss3[epoch-1] = gradGrad_lossGen
#print(
# 'Epoch [%d/%d] .. Batch [%d/%d] .. Loss: %.4f .. L0: %.4f .. L1: %.4f .. D(G(z)): %.4f / %.4f'
# % (epoch, args.epochs, i, len(X_training), g_error.item(), firstOnly_lossGen.item(),
# secondOnly_lossGen.item(), thirdOnly_lossGen.item(), thirdOnly_lossGen.item()))
#print(
# 'Epoch [%d/%d] .. Batch [%d/%d] .. Loss_D: %.4f .. Loss_G: %.4f .. D(x): %.4f .. D(G(z)): %.4f / %.4f'
# % (epoch, args.epochs, i, len(X_training), errD.data, g_error_gan.data, D_x, D_G_z1, D_G_z2))
#print('Epoch [%d/%d] .. Batch [%d/%d] .. Loss_D: %.4f .. Loss_G: %.4f .. D(x): %.4f .. D(G(z)): %.4f / %.4f'
# % (epoch, args.epochs, i, len(X_training), errD.data, g_error_gan.data, D_x, D_G_z1, D_G_z2))
#print('*'*100)
#print('End of epoch {}'.format(epoch))
#print('sigma min: {} .. sigma max: {}'.format(torch.min(sigma_x), torch.max(sigma_x)))
#print('*'*100)
#if args.lambda_ > 0:
# print('| MCMC diagnostics ====> | stepsize: {} | min ar: {} | mean ar: {} | max ar: {} |'.format(
# stepsize, acceptRate.min().item(), acceptRate.mean().item(), acceptRate.max().item()))
if epoch % args.save_imgs_every == 0:
import matplotlib.pyplot as plt
plt.figure()
plt.plot(loss_theLoss.cpu())
plt.xlim(0, epoch-1)
plt.savefig('theFinFinalNiNikNdUoeNdNikUoeMyNdNdNdMyneNewloLossLoss_plot')
plt.figure()
plt.plot(loss_theLoss0.cpu())
plt.xlim(0, epoch-1)
plt.savefig('theFinFinalNiNikNdUoeNdNikUoeMyNdNdNdMyneNewloLossLoss0_plot')
plt.figure()
plt.plot(loss_theLoss1.cpu())
plt.xlim(0, epoch-1)
plt.savefig('theFinFinalNiNikNdUoeNdNikUoeMyNdNdNdMyneNewloLossLoss1_plot')
plt.figure()
plt.plot(loss_theLoss2.cpu())
plt.xlim(0, epoch-1)
plt.savefig('theFinFinalNiNikNdUoeNdNikUoeMyNdNdNdMyneNewloLossLoss2_plot')
plt.figure()
plt.plot(loss_theLoss3.cpu())
plt.xlim(0, epoch - 1)
plt.savefig('theFinFinalNiNikNdUoeNdNikUoeMyNdNdNdMyneNewloLossLoLossssLoLossLoss2_plot')
plt.figure()
fig, axs = plt.subplots(2, 2)
axs[0, 0].plot(range(1, 1+epoch), loss_theLoss[:epoch].cpu())
axs[0, 0].set_title('Loss')
axs[0, 1].plot(range(1, 1+epoch), loss_theLoss0[:epoch].cpu(), 'tab:orange')
axs[0, 1].set_title('L0')
axs[1, 0].plot(range(1, 1+epoch), loss_theLoss1[:epoch].cpu(), 'tab:green')
axs[1, 0].set_title('L1')
axs[1, 1].plot(range(1, 1+epoch), loss_theLoss2[:epoch].cpu(), 'tab:red')
axs[1, 1].set_title('L2')
plt.savefig('theFinFinalNiNikNdUoeNdNikUoeMyNdNdNdMyneNewloLossLossTotal_plot')
plt.figure()
fig, axs = plt.subplots(3, 2)
axs[0, 0].plot(range(1, 1 + epoch), loss_theLoss[:epoch].cpu())
axs[0, 0].set_title('Loss')
axs[0, 1].plot(range(1, 1 + epoch), loss_theLoss0[:epoch].cpu(), 'tab:orange')
axs[0, 1].set_title('L0')
axs[1, 0].plot(range(1, 1 + epoch), loss_theLoss1[:epoch].cpu(), 'tab:green')
axs[1, 0].set_title('L1')
axs[1, 1].plot(range(1, 1 + epoch), loss_theLoss2[:epoch].cpu(), 'tab:red')
axs[1, 1].set_title('L2')
axs[2, 1].plot(range(1, 1 + epoch), loss_theLoss3[:epoch].cpu(), 'tab:orange')
axs[2, 1].set_title('Grad')
#axs[2, 0].plot(range(1, 1 + epoch), loss_theLoss3[:epoch].cpu(), 'tab:green')
#axs[2, 0].set_title('L2')
plt.savefig('theFinFinalNiNikNdUoeNikNdNikUoeMyNdNdNdMyneNewloLossLossTotal_plot')
#for ax in axs.flat:
# ax.set(xlabel='x-label', ylabel='y-label')
# Hide x labels and tick labels for top plots and y ticks for right plots
for ax in axs.flat:
ax.label_outer()
# fake = netG2(fixed_noise).detach()
# NUM_CLASS = 10
# fixed_noise = torch.randn(args.num_gen_images, args.nz, 1, 1, device=device)
NUM_CLASS = 10
rand_y_one_hot = torch.FloatTensor(args.num_gen_images, NUM_CLASS).zero_().to(device)
rand_y_one_hot = rand_y_one_hot.scatter_(1,
torch.randint(0, NUM_CLASS, size=(args.num_gen_images, 1),
device=device),
1).to(device)
fake = netG2(fixed_noise, rand_y_one_hot).detach()
#fake = netG2(fixed_noise).detach()
vutils.save_image(fake, '%s/presgan_%s_fake_epoch_%03d.png' % (args.results_folder, args.dataset, epoch),
normalize=True, nrow=20)
fake = netG(fixed_noise).detach()
vutils.save_image(fake, '%s/presgan_%s_faFake_epoch_%03d.png' % (args.results_folder, args.dataset, epoch),
normalize=True, nrow=20)
if epoch % args.save_ckpt_every == 0:
#torch.save(netG.state_dict(), os.path.join(args.results_folder, 'netG_presgan_%s_epoch_%s.pth'%(args.dataset, epoch)))
#torch.save(netG.state_dict(),
# os.path.join(args.results_folder, 'netG_presgan_%s_epoch_%s.pth' % (args.dataset, epoch)))
#asdfasfsdfsafsdfs
torch.save(netG.state_dict(),
os.path.join(args.results_folder, 'neNetG_presgan_%s_epoch_%s.pth' % (args.dataset, epoch)))
#torch.save(netG.state_dict(),
# os.path.join(args.results_folder, 'netG_presgan_%s_epoch_%s.pth' % (args.dataset, epoch)))
#torch.save(netG.state_dict(),
# os.path.join(args.results_folder, 'netG_presgan_%s_epoch_%s.pth' % (args.dataset, epoch)))
torch.save(log_sigma, os.path.join(args.results_folder, 'log_sigma_%s_%s.pth'%(args.dataset, epoch)))
|
[
"noreply@github.com"
] |
nikolaosdionelis.noreply@github.com
|
116a3f5ee6972a6174312c9ea4de119d8fafd7df
|
054bc8696bdd429e2b3ba706feb72c0fb604047f
|
/python/stats/mannWhitneyUtest/mannWhitneyUtestR.py
|
8405d8624914a2465d6189145231c9e18e7ba2ee
|
[] |
no_license
|
wavefancy/WallaceBroad
|
076ea9257cec8a3e1c8f53151ccfc7c5c0d7200f
|
fbd00e6f60e54140ed5b4e470a8bdd5edeffae21
|
refs/heads/master
| 2022-02-22T04:56:49.943595
| 2022-02-05T12:15:23
| 2022-02-05T12:15:23
| 116,978,485
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,194
|
py
|
#!/usr/bin/env python3
"""
Calculate pvalue for Mann-Whitney U test. Call wilcox.test function from R by rpy2.
@Author: wavefancy@gmail.com
Usage:
mannWhitneyUtestR.py [-a int] [-l]
mannWhitneyUtestR.py -h | --help | -v | --version | -f | --format
Notes:
1. Read from stdin and output to stdout.
2. Compute exact pvalue for small number of input.
3. Output **One-sided p-value**.
4. Detail R wilcox.test.
Options:
-a int -1|0|1, Alternative test, default 0 for test two.sided.
-1 for less, 1 for greater.
-l Indicate the first column in each data set as label, output label also.
-h --help Show this screen.
-v --version Show version.
-f --format Show input/output file format example.
"""
import sys
from docopt import docopt
from signal import signal, SIGPIPE, SIG_DFL
def ShowFormat():
'''Input File format example:'''
print('''
#input example, each line two samples, separated by ';'
------------------------
0.80 0.83 1.89 1.04 1.45 1.38 1.91 1.64 0.73 1.46; 1.15 0.88 0.90 0.74 1.21
#output example (benchmarked with R, wilcox.test)
------------------------
0.2544122544122544
# first column as label: -l
------------------------
X 0.80 0.83 1.89 1.04 1.45 1.38 1.91 1.64 0.73 1.46; Y 1.15 0.88 0.90 0.74 1.21
#output example (benchmarked with R, wilcox.test)
------------------------
X Y 0.2544122544122544
''');
if __name__ == '__main__':
args = docopt(__doc__, version='1.0')
# print(args)
if(args['--format']):
ShowFormat()
sys.exit(-1)
Alternative = 'two.sided'
if args['-a'] == '-1':
Alternative = 'less'
elif args['-a'] == '1':
Alternative = 'greater'
WITH_LABEL = True if args['-l'] else False
# Call R function wilcox.test()
from rpy2.robjects import FloatVector
from rpy2.robjects.packages import importr
stats = importr('stats')
#http://rpy.sourceforge.net/rpy2/doc-dev/html/introduction.html
def callRWilcoxTest(x,y):
'''Call R function to do wilcox.test'''
k = stats.wilcox_test(FloatVector(x),FloatVector(y),alternative=Alternative)
return list(k[2])[0] # for pvalue.
# data = []
for line in sys.stdin:
line = line.strip()
if line:
ss = [x.strip() for x in line.split(';')]
try:
# print(ss)
left = ss[0].split()
right = ss[1].split()
if WITH_LABEL:
x = [float(x) for x in left[1:] if x]
y = [float(x) for x in right[1:] if x]
sys.stdout.write('%s\t%s\t%s\n'%(left[0],right[0],callRWilcoxTest(x,y)))
else:
x = [float(x) for x in left if x]
y = [float(x) for x in right if x]
sys.stdout.write('%s\n'%(callRWilcoxTest(x,y)))
except ValueError:
sys.stderr.write('WARNING: parse value error, skip one line: %s\n'%(line))
sys.stdout.flush()
sys.stdout.close()
sys.stderr.flush()
sys.stderr.close()
|
[
"wavefancy@gmail.com"
] |
wavefancy@gmail.com
|
cf2e66b07d0dec695057b2fb07ef059c124665e7
|
48e124e97cc776feb0ad6d17b9ef1dfa24e2e474
|
/sdk/python/pulumi_azure_native/powerplatform/get_account.py
|
68e5c4b045b92179f9f40ef87ac0faa7823dab35
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
bpkgoud/pulumi-azure-native
|
0817502630062efbc35134410c4a784b61a4736d
|
a3215fe1b87fba69294f248017b1591767c2b96c
|
refs/heads/master
| 2023-08-29T22:39:49.984212
| 2021-11-15T12:43:41
| 2021-11-15T12:43:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,422
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetAccountResult',
'AwaitableGetAccountResult',
'get_account',
'get_account_output',
]
@pulumi.output_type
class GetAccountResult:
"""
Definition of the account.
"""
def __init__(__self__, description=None, id=None, location=None, name=None, system_data=None, tags=None, type=None):
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
The description of the account.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetAccountResult(GetAccountResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAccountResult(
description=self.description,
id=self.id,
location=self.location,
name=self.name,
system_data=self.system_data,
tags=self.tags,
type=self.type)
def get_account(account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAccountResult:
"""
Definition of the account.
API Version: 2020-10-30-preview.
:param str account_name: Name of the account.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:powerplatform:getAccount', __args__, opts=opts, typ=GetAccountResult).value
return AwaitableGetAccountResult(
description=__ret__.description,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
system_data=__ret__.system_data,
tags=__ret__.tags,
type=__ret__.type)
@_utilities.lift_output_func(get_account)
def get_account_output(account_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetAccountResult]:
"""
Definition of the account.
API Version: 2020-10-30-preview.
:param str account_name: Name of the account.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
...
|
[
"noreply@github.com"
] |
bpkgoud.noreply@github.com
|
094bebf70679ab6093d3d484990855e2cd4954da
|
148072ce210ca4754ea4a37d83057e2cf2fdc5a1
|
/src/core/w3af/w3af/plugins/attack/db/sqlmap/thirdparty/beautifulsoup/beautifulsoup.py
|
04834015a37a77409fafa53c5007326e1673309c
|
[] |
no_license
|
ycc1746582381/webfuzzer
|
8d42fceb55c8682d6c18416b8e7b23f5e430c45f
|
0d9aa35c3218dc58f81c429cae0196e4c8b7d51b
|
refs/heads/master
| 2021-06-14T18:46:59.470232
| 2017-03-14T08:49:27
| 2017-03-14T08:49:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 79,811
|
py
|
"""Beautiful Soup
Elixir and Tonic
"The Screen-Scraper's Friend"
http://www.crummy.com/software/BeautifulSoup/
Beautiful Soup parses a (possibly invalid) XML or HTML document into a
tree representation. It provides methods and Pythonic idioms that make
it easy to navigate, search, and modify the tree.
A well-formed XML/HTML document yields a well-formed data
structure. An ill-formed XML/HTML document yields a correspondingly
ill-formed data structure. If your document is only locally
well-formed, you can use this library to find and process the
well-formed part of it.
Beautiful Soup works with Python 2.2 and up. It has no external
dependencies, but you'll have more success at converting data to UTF-8
if you also install these three packages:
* chardet, for auto-detecting character encodings
http://chardet.feedparser.org/
* cjkcodecs and iconv_codec, which add more encodings to the ones supported
by stock Python.
http://cjkpython.i18n.org/
Beautiful Soup defines classes for two main parsing strategies:
* BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific
language that kind of looks like XML.
* BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid
or invalid. This class has web browser-like heuristics for
obtaining a sensible parse tree in the face of common HTML errors.
Beautiful Soup also defines a class (UnicodeDammit) for autodetecting
the encoding of an HTML or XML document, and converting it to
Unicode. Much of this code is taken from Mark Pilgrim's Universal Feed Parser.
For more than you ever wanted to know about Beautiful Soup, see the
documentation:
http://www.crummy.com/software/BeautifulSoup/documentation.html
Here, have some legalese:
Copyright (c) 2004-2010, Leonard Richardson
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the the Beautiful Soup Consortium and All
Night Kosher Bakery nor the names of its contributors may be
used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE, DAMMIT.
"""
from __future__ import generators
__author__ = "Leonard Richardson (leonardr@segfault.org)"
__version__ = "3.2.0"
__copyright__ = "Copyright (c) 2004-2010 Leonard Richardson"
__license__ = "New-style BSD"
from sgmllib import SGMLParser, SGMLParseError
import codecs
import markupbase
import types
import re
import sgmllib
try:
from htmlentitydefs import name2codepoint
except ImportError:
name2codepoint = {}
try:
set
except NameError:
from sets import Set as set
# These hacks make Beautiful Soup able to parse XML with namespaces
sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
markupbase._declname_match = re.compile(r'[a-zA-Z][-_.:a-zA-Z0-9]*\s*').match
DEFAULT_OUTPUT_ENCODING = "utf-8"
def _match_css_class(str):
"""Build a RE to match the given CSS class."""
return re.compile(r"(^|.*\s)%s($|\s)" % str)
# First, the classes that represent markup elements.
class PageElement(object):
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
def setup(self, parent=None, previous=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous = previous
self.next = None
self.previousSibling = None
self.nextSibling = None
if self.parent and self.parent.contents:
self.previousSibling = self.parent.contents[-1]
self.previousSibling.nextSibling = self
def replaceWith(self, replaceWith):
oldParent = self.parent
myIndex = self.parent.index(self)
if hasattr(replaceWith, "parent") \
and replaceWith.parent is self.parent:
# We're replacing this element with one of its siblings.
index = replaceWith.parent.index(replaceWith)
if index and index < myIndex:
# Furthermore, it comes before this element. That
# means that when we extract it, the index of this
# element will change.
myIndex = myIndex - 1
self.extract()
oldParent.insert(myIndex, replaceWith)
def replaceWithChildren(self):
myParent = self.parent
myIndex = self.parent.index(self)
self.extract()
reversedChildren = list(self.contents)
reversedChildren.reverse()
for child in reversedChildren:
myParent.insert(myIndex, child)
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent:
try:
del self.parent.contents[self.parent.index(self)]
except ValueError:
pass
# Find the two elements that would be next to each other if
# this element (and any children) hadn't been parsed. Connect
# the two.
lastChild = self._lastRecursiveChild()
nextElement = lastChild.next
if self.previous:
self.previous.next = nextElement
if nextElement:
nextElement.previous = self.previous
self.previous = None
lastChild.next = None
self.parent = None
if self.previousSibling:
self.previousSibling.nextSibling = self.nextSibling
if self.nextSibling:
self.nextSibling.previousSibling = self.previousSibling
self.previousSibling = self.nextSibling = None
return self
def _lastRecursiveChild(self):
"Finds the last element beneath this object to be parsed."
lastChild = self
while hasattr(lastChild, 'contents') and lastChild.contents:
lastChild = lastChild.contents[-1]
return lastChild
def insert(self, position, newChild):
if isinstance(newChild, basestring) \
and not isinstance(newChild, NavigableString):
newChild = NavigableString(newChild)
position = min(position, len(self.contents))
if hasattr(newChild, 'parent') and newChild.parent is not None:
# We're 'inserting' an element that's already one
# of this object's children.
if newChild.parent is self:
index = self.index(newChild)
if index > position:
# Furthermore we're moving it further down the
# list of this object's children. That means that
# when we extract this element, our target index
# will jump down one.
position = position - 1
newChild.extract()
newChild.parent = self
previousChild = None
if position == 0:
newChild.previousSibling = None
newChild.previous = self
else:
previousChild = self.contents[position - 1]
newChild.previousSibling = previousChild
newChild.previousSibling.nextSibling = newChild
newChild.previous = previousChild._lastRecursiveChild()
if newChild.previous:
newChild.previous.next = newChild
newChildsLastElement = newChild._lastRecursiveChild()
if position >= len(self.contents):
newChild.nextSibling = None
parent = self
parentsNextSibling = None
while not parentsNextSibling:
parentsNextSibling = parent.nextSibling
parent = parent.parent
if not parent: # This is the last element in the document.
break
if parentsNextSibling:
newChildsLastElement.next = parentsNextSibling
else:
newChildsLastElement.next = None
else:
nextChild = self.contents[position]
newChild.nextSibling = nextChild
if newChild.nextSibling:
newChild.nextSibling.previousSibling = newChild
newChildsLastElement.next = nextChild
if newChildsLastElement.next:
newChildsLastElement.next.previous = newChildsLastElement
self.contents.insert(position, newChild)
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.insert(len(self.contents), tag)
def findNext(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._findOne(self.findAllNext, name, attrs, text, **kwargs)
def findAllNext(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
after this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.nextGenerator,
**kwargs)
def findNextSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._findOne(self.findNextSiblings, name, attrs, text,
**kwargs)
def findNextSiblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.nextSiblingGenerator, **kwargs)
fetchNextSiblings = findNextSiblings # Compatibility with pre-3.x
def findPrevious(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs)
def findAllPrevious(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.previousGenerator,
**kwargs)
fetchPrevious = findAllPrevious # Compatibility with pre-3.x
def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._findOne(self.findPreviousSiblings, name, attrs, text,
**kwargs)
def findPreviousSiblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.previousSiblingGenerator, **kwargs)
fetchPreviousSiblings = findPreviousSiblings # Compatibility with pre-3.x
def findParent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _findOne because findParents takes a different
# set of arguments.
r = None
l = self.findParents(name, attrs, 1)
if l:
r = l[0]
return r
def findParents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._findAll(name, attrs, None, limit, self.parentGenerator,
**kwargs)
fetchParents = findParents # Compatibility with pre-3.x
# These methods do the real heavy lifting.
def _findOne(self, method, name, attrs, text, **kwargs):
r = None
l = method(name, attrs, text, 1, **kwargs)
if l:
r = l[0]
return r
def _findAll(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
# (Possibly) special case some findAll*(...) searches
elif text is None and not limit and not attrs and not kwargs:
# findAll*(True)
if name is True:
return [element for element in generator()
if isinstance(element, Tag)]
# findAll*('tag-name')
elif isinstance(name, basestring):
return [element for element in generator()
if isinstance(element, Tag) and
element.name == name]
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
# Build a SoupStrainer
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
results = ResultSet(strainer)
g = generator()
while True:
try:
i = g.next()
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
# These Generators can be used to navigate starting from both
# NavigableStrings and Tags.
def nextGenerator(self):
i = self
while i is not None:
i = i.next
yield i
def nextSiblingGenerator(self):
i = self
while i is not None:
i = i.nextSibling
yield i
def previousGenerator(self):
i = self
while i is not None:
i = i.previous
yield i
def previousSiblingGenerator(self):
i = self
while i is not None:
i = i.previousSibling
yield i
def parentGenerator(self):
i = self
while i is not None:
i = i.parent
yield i
# Utility methods
def substituteEncoding(self, str, encoding=None):
encoding = encoding or "utf-8"
return str.replace("%SOUP-ENCODING%", encoding)
def toEncoding(self, s, encoding=None):
"""Encodes an object to a string in some encoding, or to Unicode.
."""
if isinstance(s, unicode):
if encoding:
s = s.encode(encoding)
elif isinstance(s, str):
if encoding:
s = s.encode(encoding)
else:
s = unicode(s)
else:
if encoding:
s = self.toEncoding(str(s), encoding)
else:
s = unicode(s)
return s
class NavigableString(unicode, PageElement):
def __new__(cls, value):
"""Create a new NavigableString.
When unpickling a NavigableString, this method is called with
the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
passed in to the superclass's __new__ or the superclass won't know
how to handle non-ASCII characters.
"""
if isinstance(value, unicode):
return unicode.__new__(cls, value)
return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
def __getnewargs__(self):
return (NavigableString.__str__(self),)
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr)
def __unicode__(self):
return str(self).decode(DEFAULT_OUTPUT_ENCODING)
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
if encoding:
return self.encode(encoding)
else:
return self
class CData(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<![CDATA[%s]]>" % NavigableString.__str__(self, encoding)
class ProcessingInstruction(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
output = self
if "%SOUP-ENCODING%" in output:
output = self.substituteEncoding(output, encoding)
return "<?%s?>" % self.toEncoding(output, encoding)
class Comment(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<!--%s-->" % NavigableString.__str__(self, encoding)
class Declaration(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<!%s>" % NavigableString.__str__(self, encoding)
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
def _invert(h):
"Cheap function to invert a hash."
i = {}
for k, v in h.items():
i[v] = k
return i
XML_ENTITIES_TO_SPECIAL_CHARS = {"apos": "'",
"quot": '"',
"amp": "&",
"lt": "<",
"gt": ">"}
XML_SPECIAL_CHARS_TO_ENTITIES = _invert(XML_ENTITIES_TO_SPECIAL_CHARS)
def attrib_convertEntities(self, match):
"""Used in a call to re.sub to replace HTML, XML, and numeric
entities with the appropriate Unicode characters. If HTML
entities are being converted, any unrecognized entities are
escaped."""
x = match.group(1)
if self.convertHTMLEntities and x in name2codepoint:
return unichr(name2codepoint[x])
elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS:
if self.convertXMLEntities:
return self.XML_ENTITIES_TO_SPECIAL_CHARS[x]
else:
return u'&%s;' % x
elif len(x) > 0 and x[0] == '#':
# Handle numeric entities
if len(x) > 1 and x[1] == 'x':
return unichr(int(x[2:], 16))
else:
return unichr(int(x[1:]))
elif self.escapeUnrecognizedEntities:
return u'&%s;' % x
else:
return u'&%s;' % x
def __init__(self, parser, name, attrs=None, parent=None,
previous=None):
"Basic constructor."
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected
self.parserClass = parser.__class__
self.isSelfClosing = parser.isSelfClosingTag(name)
self.name = name
if attrs is None:
attrs = []
elif isinstance(attrs, dict):
attrs = attrs.items()
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
self.containsSubstitutions = False
self.convertHTMLEntities = parser.convertHTMLEntities
self.convertXMLEntities = parser.convertXMLEntities
self.escapeUnrecognizedEntities = parser.escapeUnrecognizedEntities
# Convert any HTML, XML, or numeric entities in the attribute values.
convert = lambda (k, val): (k,
re.sub("&(#\d+|#x[0-9a-fA-F]+|\w+);",
self._convertEntities,
val))
self.attrs = map(convert, self.attrs)
def getString(self):
if (len(self.contents) == 1
and isinstance(self.contents[0], NavigableString)):
return self.contents[0]
def setString(self, string):
"""Replace the contents of the tag with a string"""
self.clear()
self.append(string)
string = property(getString, setString)
def getText(self, separator=u""):
if not len(self.contents):
return u""
stopNode = self._lastRecursiveChild().next
strings = []
current = self.contents[0]
while current is not stopNode:
if isinstance(current, NavigableString):
strings.append(current.strip())
current = current.next
return separator.join(strings)
text = property(getText)
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self._getAttrMap().get(key, default)
def clear(self):
"""Extract all children."""
for child in self.contents[:]:
child.extract()
def index(self, element):
for i, child in enumerate(self.contents):
if child is element:
return i
raise ValueError("Tag.index: element not in tag")
def has_key(self, key):
return self._getAttrMap().has_key(key)
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self._getAttrMap()[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __nonzero__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self._getAttrMap()
self.attrMap[key] = value
found = False
for i in xrange(0, len(self.attrs)):
if self.attrs[i][0] == key:
self.attrs[i] = (key, value)
found = True
if not found:
self.attrs.append((key, value))
self._getAttrMap()[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
for item in self.attrs:
if item[0] == key:
self.attrs.remove(item)
# We don't break because bad HTML can define the same
# attribute multiple times.
self._getAttrMap()
if self.attrMap.has_key(key):
del self.attrMap[key]
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
findAll() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return apply(self.findAll, args, kwargs)
def __getattr__(self, tag):
# print "Getattr %s.%s" % (self.__class__, tag)
if len(tag) > 3 and tag.rfind('Tag') == len(tag) - 3:
return self.find(tag[:-3])
elif tag.find('__') != 0:
return self.find(tag)
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__, tag)
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag.
NOTE: right now this will return false if two tags have the
same attributes in a different order. Should this be fixed?"""
if other is self:
return True
if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other,
'contents') or self.name != other.name or self.attrs != other.attrs or len(
self) != len(other):
return False
for i in xrange(0, len(self.contents)):
if self.contents[i] != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
"""Renders this tag as a string."""
return self.__str__(encoding)
def __unicode__(self):
return self.__str__(None)
BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|"
+ "&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)"
+ ")")
def _sub_entity(self, x):
"""Used with a regular expression to substitute the
appropriate XML entity for an XML special character."""
return "&" + self.XML_SPECIAL_CHARS_TO_ENTITIES[x.group(0)[0]] + ";"
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
"""Returns a string or Unicode representation of this tag and
its contents. To get Unicode, pass None for encoding.
NOTE: since Python's HTML parser consumes whitespace, this
method is not certain to reproduce the whitespace present in
the original string."""
encodedName = self.toEncoding(self.name, encoding)
attrs = []
if self.attrs:
for key, val in self.attrs:
fmt = '%s="%s"'
if isinstance(val, basestring):
if self.containsSubstitutions and '%SOUP-ENCODING%' in val:
val = self.substituteEncoding(val, encoding)
# The attribute value either:
#
# * Contains no embedded double quotes or single quotes.
# No problem: we enclose it in double quotes.
# * Contains embedded single quotes. No problem:
# double quotes work here too.
# * Contains embedded double quotes. No problem:
# we enclose it in single quotes.
# * Embeds both single _and_ double quotes. This
# can't happen naturally, but it can happen if
# you modify an attribute value after parsing
# the document. Now we have a bit of a
# problem. We solve it by enclosing the
# attribute in single quotes, and escaping any
# embedded single quotes to XML entities.
if '"' in val:
fmt = "%s='%s'"
if "'" in val:
# TODO: replace with apos when
# appropriate.
val = val.replace("'", "&squot;")
# Now we're okay w/r/t quotes. But the attribute
# value might also contain angle brackets, or
# ampersands that aren't part of entities. We need
# to escape those to XML entities too.
val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val)
attrs.append(fmt % (self.toEncoding(key, encoding),
self.toEncoding(val, encoding)))
close = ''
closeTag = ''
if self.isSelfClosing:
close = ' /'
else:
closeTag = '</%s>' % encodedName
indentTag, indentContents = 0, 0
if prettyPrint:
indentTag = indentLevel
space = (' ' * (indentTag - 1))
indentContents = indentTag + 1
contents = self.renderContents(encoding, prettyPrint, indentContents)
if self.hidden:
s = contents
else:
s = []
attributeString = ''
if attrs:
attributeString = ' ' + ' '.join(attrs)
if prettyPrint:
s.append(space)
s.append('<%s%s%s>' % (encodedName, attributeString, close))
if prettyPrint:
s.append("\n")
s.append(contents)
if prettyPrint and contents and contents[-1] != "\n":
s.append("\n")
if prettyPrint and closeTag:
s.append(space)
s.append(closeTag)
if prettyPrint and closeTag and self.nextSibling:
s.append("\n")
s = ''.join(s)
return s
def decompose(self):
"""Recursively destroys the contents of this tree."""
self.extract()
if len(self.contents) == 0:
return
current = self.contents[0]
while current is not None:
next = current.next
if isinstance(current, Tag):
del current.contents[:]
current.parent = None
current.previous = None
current.previousSibling = None
current.next = None
current.nextSibling = None
current = next
def prettify(self, encoding=DEFAULT_OUTPUT_ENCODING):
return self.__str__(encoding, True)
def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
"""Renders the contents of this tag as a string in the given
encoding. If encoding is None, returns a Unicode string.."""
s = []
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.__str__(encoding)
elif isinstance(c, Tag):
s.append(c.__str__(encoding, prettyPrint, indentLevel))
if text and prettyPrint:
text = text.strip()
if text:
if prettyPrint:
s.append(" " * (indentLevel - 1))
s.append(text)
if prettyPrint:
s.append("\n")
return ''.join(s)
# Soup methods
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.findAll(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r
findChild = find
def findAll(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.recursiveChildGenerator
if not recursive:
generator = self.childGenerator
return self._findAll(name, attrs, text, limit, generator, **kwargs)
findChildren = findAll
# Pre-3.x compatibility methods
first = find
fetch = findAll
def fetchText(self, text=None, recursive=True, limit=None):
return self.findAll(text=text, recursive=recursive, limit=limit)
def firstText(self, text=None, recursive=True):
return self.find(text=text, recursive=recursive)
# Private methods
def _getAttrMap(self):
"""Initializes a map representation of this tag's attributes,
if not already initialized."""
if not getattr(self, 'attrMap'):
self.attrMap = {}
for (key, value) in self.attrs:
self.attrMap[key] = value
return self.attrMap
# Generator methods
def childGenerator(self):
# Just use the iterator from the contents
return iter(self.contents)
def recursiveChildGenerator(self):
if not len(self.contents):
raise StopIteration
stopNode = self._lastRecursiveChild().next
current = self.contents[0]
while current is not stopNode:
yield current
current = current.next
# Next, a couple classes to represent queries and their results.
class SoupStrainer:
"""Encapsulates a number of ways of matching a markup element (tag or
text)."""
def __init__(self, name=None, attrs={}, text=None, **kwargs):
self.name = name
if isinstance(attrs, basestring):
kwargs['class'] = _match_css_class(attrs)
attrs = None
if kwargs:
if attrs:
attrs = attrs.copy()
attrs.update(kwargs)
else:
attrs = kwargs
self.attrs = attrs
self.text = text
def __str__(self):
if self.text:
return self.text
else:
return "%s|%s" % (self.name, self.attrs)
def searchTag(self, markupName=None, markupAttrs={}):
found = None
markup = None
if isinstance(markupName, Tag):
markup = markupName
markupAttrs = markup
callFunctionWithTagData = callable(self.name) \
and not isinstance(markupName, Tag)
if (not self.name) \
or callFunctionWithTagData \
or (markup and self._matches(markup, self.name)) \
or (not markup and self._matches(markupName, self.name)):
if callFunctionWithTagData:
match = self.name(markupName, markupAttrs)
else:
match = True
markupAttrMap = None
for attr, matchAgainst in self.attrs.items():
if not markupAttrMap:
if hasattr(markupAttrs, 'get'):
markupAttrMap = markupAttrs
else:
markupAttrMap = {}
for k, v in markupAttrs:
markupAttrMap[k] = v
attrValue = markupAttrMap.get(attr)
if not self._matches(attrValue, matchAgainst):
match = False
break
if match:
if markup:
found = markup
else:
found = markupName
return found
def search(self, markup):
# print 'looking for %s in %s' % (self, markup)
found = None
# If given a list of items, scan it for a text element that
# matches.
if hasattr(markup, "__iter__") \
and not isinstance(markup, Tag):
for element in markup:
if isinstance(element, NavigableString) \
and self.search(element):
found = element
break
# If it's a Tag, make sure its name or attributes match.
# Don't bother with Tags if we're searching for text.
elif isinstance(markup, Tag):
if not self.text:
found = self.searchTag(markup)
# If it's text, make sure the text matches.
elif isinstance(markup, NavigableString) or \
isinstance(markup, basestring):
if self._matches(markup, self.text):
found = markup
else:
raise Exception, "I don't know how to match against a %s" \
% markup.__class__
return found
def _matches(self, markup, matchAgainst):
# print "Matching %s against %s" % (markup, matchAgainst)
result = False
if matchAgainst is True:
result = markup is not None
elif callable(matchAgainst):
result = matchAgainst(markup)
else:
# Custom match methods take the tag as an argument, but all
# other ways of matching match the tag name as a string.
if isinstance(markup, Tag):
markup = markup.name
if markup and not isinstance(markup, basestring):
markup = unicode(markup)
# Now we know that chunk is either a string, or None.
if hasattr(matchAgainst, 'match'):
# It's a regexp object.
result = markup and matchAgainst.search(markup)
elif hasattr(matchAgainst, '__iter__'): # list-like
result = markup in matchAgainst
elif hasattr(matchAgainst, 'items'):
result = markup.has_key(matchAgainst)
elif matchAgainst and isinstance(markup, basestring):
if isinstance(markup, unicode):
matchAgainst = unicode(matchAgainst)
else:
matchAgainst = str(matchAgainst)
if not result:
result = matchAgainst == markup
return result
class ResultSet(list):
"""A ResultSet is just a list that keeps track of the SoupStrainer
that created it."""
def __init__(self, source):
list.__init__([])
self.source = source
# Now, some helper functions.
def buildTagMap(default, *args):
"""Turns a list of maps, lists, or scalars into a single map.
Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and
NESTING_RESET_TAGS maps out of lists and partial maps."""
built = {}
for portion in args:
if hasattr(portion, 'items'):
# It's a map. Merge it.
for k, v in portion.items():
built[k] = v
elif hasattr(portion, '__iter__'): # is a list
# It's a list. Map each item to the default.
for k in portion:
built[k] = default
else:
# It's a scalar. Map it to the default.
built[portion] = default
return built
# Now, the parser classes.
class BeautifulStoneSoup(Tag, SGMLParser):
"""This class contains the basic parser and search code. It defines
a parser that knows nothing about tag behavior except for the
following:
You can't close a tag without closing all the tags it encloses.
That is, "<foo><bar></foo>" actually means
"<foo><bar></bar></foo>".
[Another possible explanation is "<foo><bar /></foo>", but since
this class defines no SELF_CLOSING_TAGS, it will never use that
explanation.]
This class is useful for parsing XML or made-up markup languages,
or when BeautifulSoup makes an assumption counter to what you were
expecting."""
SELF_CLOSING_TAGS = {}
NESTABLE_TAGS = {}
RESET_NESTING_TAGS = {}
QUOTE_TAGS = {}
PRESERVE_WHITESPACE_TAGS = []
MARKUP_MASSAGE = [(re.compile('(<[^<>]*)/>'),
lambda x: x.group(1) + ' />'),
(re.compile('<!\s+([^<>]*)>'),
lambda x: '<!' + x.group(1) + '>')
]
ROOT_TAG_NAME = u'[document]'
HTML_ENTITIES = "html"
XML_ENTITIES = "xml"
XHTML_ENTITIES = "xhtml"
# TODO: This only exists for backwards-compatibility
ALL_ENTITIES = XHTML_ENTITIES
# Used when determining whether a text node is all whitespace and
# can be replaced with a single space. A text node that contains
# fancy Unicode spaces (usually non-breaking) should be left
# alone.
STRIP_ASCII_SPACES = {9: None, 10: None, 12: None, 13: None, 32: None, }
def __init__(self, markup="", parseOnlyThese=None, fromEncoding=None,
markupMassage=True, smartQuotesTo=XML_ENTITIES,
convertEntities=None, selfClosingTags=None, isHTML=False):
"""The Soup object is initialized as the 'root tag', and the
provided markup (which can be a string or a file-like object)
is fed into the underlying parser.
sgmllib will process most bad HTML, and the BeautifulSoup
class has some tricks for dealing with some HTML that kills
sgmllib, but Beautiful Soup can nonetheless choke or lose data
if your data uses self-closing tags or declarations
incorrectly.
By default, Beautiful Soup uses regexes to sanitize input,
avoiding the vast majority of these problems. If the problems
don't apply to you, pass in False for markupMassage, and
you'll get better performance.
The default parser massage techniques fix the two most common
instances of invalid HTML that choke sgmllib:
<br/> (No space between name of closing tag and tag close)
<! --Comment--> (Extraneous whitespace in declaration)
You can pass in a custom list of (RE object, replace method)
tuples to get Beautiful Soup to scrub your input the way you
want."""
self.parseOnlyThese = parseOnlyThese
self.fromEncoding = fromEncoding
self.smartQuotesTo = smartQuotesTo
self.convertEntities = convertEntities
# Set the rules for how we'll deal with the entities we
# encounter
if self.convertEntities:
# It doesn't make sense to convert encoded characters to
# entities even while you're converting entities to Unicode.
# Just convert it all to Unicode.
self.smartQuotesTo = None
if convertEntities == self.HTML_ENTITIES:
self.convertXMLEntities = False
self.convertHTMLEntities = True
self.escapeUnrecognizedEntities = True
elif convertEntities == self.XHTML_ENTITIES:
self.convertXMLEntities = True
self.convertHTMLEntities = True
self.escapeUnrecognizedEntities = False
elif convertEntities == self.XML_ENTITIES:
self.convertXMLEntities = True
self.convertHTMLEntities = False
self.escapeUnrecognizedEntities = False
else:
self.convertXMLEntities = False
self.convertHTMLEntities = False
self.escapeUnrecognizedEntities = False
self.instanceSelfClosingTags = buildTagMap(None, selfClosingTags)
SGMLParser.__init__(self)
if hasattr(markup, 'read'): # It's a file-type object.
markup = markup.read()
self.markup = markup
self.markupMassage = markupMassage
try:
self._feed(isHTML=isHTML)
except StopParsing:
pass
self.markup = None # The markup can now be GCed
def convert_charref(self, name):
"""This method fixes a bug in Python's SGMLParser."""
try:
n = int(name)
except ValueError:
return
if not 0 <= n <= 127: # ASCII ends at 127, not 255
return
return self.convert_codepoint(n)
def _feed(self, inDocumentEncoding=None, isHTML=False):
# Convert the document to Unicode.
markup = self.markup
if isinstance(markup, unicode):
if not hasattr(self, 'originalEncoding'):
self.originalEncoding = None
else:
dammit = UnicodeDammit \
(markup, [self.fromEncoding, inDocumentEncoding],
smartQuotesTo=self.smartQuotesTo, isHTML=isHTML)
markup = dammit.unicode
self.originalEncoding = dammit.originalEncoding
self.declaredHTMLEncoding = dammit.declaredHTMLEncoding
if markup:
if self.markupMassage:
if not hasattr(self.markupMassage, "__iter__"):
self.markupMassage = self.MARKUP_MASSAGE
for fix, m in self.markupMassage:
markup = fix.sub(m, markup)
# TODO: We get rid of markupMassage so that the
# soup object can be deepcopied later on. Some
# Python installations can't copy regexes. If anyone
# was relying on the existence of markupMassage, this
# might cause problems.
del (self.markupMassage)
self.reset()
SGMLParser.feed(self, markup)
# Close out any unfinished strings and close all the open tags.
self.endData()
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def __getattr__(self, methodName):
"""This method routes method call requests to either the SGMLParser
superclass or the Tag superclass, depending on the method name."""
# print "__getattr__ called on %s.%s" % (self.__class__, methodName)
if methodName.startswith('start_') or methodName.startswith('end_') \
or methodName.startswith('do_'):
return SGMLParser.__getattr__(self, methodName)
elif not methodName.startswith('__'):
return Tag.__getattr__(self, methodName)
else:
raise AttributeError
def isSelfClosingTag(self, name):
"""Returns true iff the given string is the name of a
self-closing tag according to this parser."""
return self.SELF_CLOSING_TAGS.has_key(name) \
or self.instanceSelfClosingTags.has_key(name)
def reset(self):
Tag.__init__(self, self, self.ROOT_TAG_NAME)
self.hidden = 1
SGMLParser.reset(self)
self.currentData = []
self.currentTag = None
self.tagStack = []
self.quoteStack = []
self.pushTag(self)
def popTag(self):
tag = self.tagStack.pop()
# print "Pop", tag.name
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
# print "Push", tag.name
if self.currentTag:
self.currentTag.contents.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
def endData(self, containerClass=NavigableString):
if self.currentData:
currentData = u''.join(self.currentData)
if (currentData.translate(self.STRIP_ASCII_SPACES) == '' and
not set([tag.name for tag in self.tagStack]).intersection(
self.PRESERVE_WHITESPACE_TAGS)):
if '\n' in currentData:
currentData = '\n'
else:
currentData = ' '
self.currentData = []
if self.parseOnlyThese and len(self.tagStack) <= 1 and \
(not self.parseOnlyThese.text or \
not self.parseOnlyThese.search(currentData)):
return
o = containerClass(currentData)
o.setup(self.currentTag, self.previous)
if self.previous:
self.previous.next = o
self.previous = o
self.currentTag.contents.append(o)
def _popToTag(self, name, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag. If inclusivePop is false, pops the tag
stack up to but *not* including the most recent instqance of
the given tag."""
# print "Popping to %s" % name
if name == self.ROOT_TAG_NAME:
return
numPops = 0
mostRecentTag = None
for i in xrange(len(self.tagStack) - 1, 0, -1):
if name == self.tagStack[i].name:
numPops = len(self.tagStack) - i
break
if not inclusivePop:
numPops = numPops - 1
for i in xrange(0, numPops):
mostRecentTag = self.popTag()
return mostRecentTag
def _smartPop(self, name):
"""We need to pop up to the previous tag of this type, unless
one of this tag's nesting reset triggers comes between this
tag and the previous tag of this type, OR unless this tag is a
generic nesting trigger and another generic nesting trigger
comes between this tag and the previous tag of this type.
Examples:
<p>Foo<b>Bar *<p>* should pop to 'p', not 'b'.
<p>Foo<table>Bar *<p>* should pop to 'table', not 'p'.
<p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'.
<li><ul><li> *<li>* should pop to 'ul', not the first 'li'.
<tr><table><tr> *<tr>* should pop to 'table', not the first 'tr'
<td><tr><td> *<td>* should pop to 'tr', not the first 'td'
"""
nestingResetTriggers = self.NESTABLE_TAGS.get(name)
isNestable = nestingResetTriggers != None
isResetNesting = self.RESET_NESTING_TAGS.has_key(name)
popTo = None
inclusive = True
for i in xrange(len(self.tagStack) - 1, 0, -1):
p = self.tagStack[i]
if (not p or p.name == name) and not isNestable:
# Non-nestable tags get popped to the top or to their
# last occurance.
popTo = name
break
if (nestingResetTriggers is not None
and p.name in nestingResetTriggers) \
or (nestingResetTriggers is None and isResetNesting
and self.RESET_NESTING_TAGS.has_key(p.name)):
# If we encounter one of the nesting reset triggers
# peculiar to this tag, or we encounter another tag
# that causes nesting to reset, pop up to but not
# including that tag.
popTo = p.name
inclusive = False
break
p = p.parent
if popTo:
self._popToTag(popTo, inclusive)
def unknown_starttag(self, name, attrs, selfClosing=0):
# print "Start tag %s: %s" % (name, attrs)
if self.quoteStack:
# This is not a real tag.
# print "<%s> is not real!" % name
attrs = ''.join([' %s="%s"' % (x, y) for x, y in attrs])
self.handle_data('<%s%s>' % (name, attrs))
return
self.endData()
if not self.isSelfClosingTag(name) and not selfClosing:
self._smartPop(name)
if self.parseOnlyThese and len(self.tagStack) <= 1 \
and (self.parseOnlyThese.text or not self.parseOnlyThese.searchTag(name, attrs)):
return
tag = Tag(self, name, attrs, self.currentTag, self.previous)
if self.previous:
self.previous.next = tag
self.previous = tag
self.pushTag(tag)
if selfClosing or self.isSelfClosingTag(name):
self.popTag()
if name in self.QUOTE_TAGS:
# print "Beginning quote (%s)" % name
self.quoteStack.append(name)
self.literal = 1
return tag
def unknown_endtag(self, name):
# print "End tag %s" % name
if self.quoteStack and self.quoteStack[-1] != name:
# This is not a real end tag.
# print "</%s> is not real!" % name
self.handle_data('</%s>' % name)
return
self.endData()
self._popToTag(name)
if self.quoteStack and self.quoteStack[-1] == name:
self.quoteStack.pop()
self.literal = (len(self.quoteStack) > 0)
def handle_data(self, data):
self.currentData.append(data)
def _toStringSubclass(self, text, subclass):
"""Adds a certain piece of text to the tree as a NavigableString
subclass."""
self.endData()
self.handle_data(text)
self.endData(subclass)
def handle_pi(self, text):
"""Handle a processing instruction as a ProcessingInstruction
object, possibly one with a %SOUP-ENCODING% slot into which an
encoding will be plugged later."""
if text[:3] == "xml":
text = u"xml version='1.0' encoding='%SOUP-ENCODING%'"
self._toStringSubclass(text, ProcessingInstruction)
def handle_comment(self, text):
"Handle comments as Comment objects."
self._toStringSubclass(text, Comment)
def handle_charref(self, ref):
"Handle character references as data."
if self.convertEntities:
data = unichr(int(ref))
else:
data = '&#%s;' % ref
self.handle_data(data)
def handle_entityref(self, ref):
"""Handle entity references as data, possibly converting known
HTML and/or XML entity references to the corresponding Unicode
characters."""
data = None
if self.convertHTMLEntities:
try:
data = unichr(name2codepoint[ref])
except KeyError:
pass
if not data and self.convertXMLEntities:
data = self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref)
if not data and self.convertHTMLEntities and \
not self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref):
# TODO: We've got a problem here. We're told this is
# an entity reference, but it's not an XML entity
# reference or an HTML entity reference. Nonetheless,
# the logical thing to do is to pass it through as an
# unrecognized entity reference.
#
# Except: when the input is "&carol;" this function
# will be called with input "carol". When the input is
# "AT&T", this function will be called with input
# "T". We have no way of knowing whether a semicolon
# was present originally, so we don't know whether
# this is an unknown entity or just a misplaced
# ampersand.
#
# The more common case is a misplaced ampersand, so I
# escape the ampersand and omit the trailing semicolon.
data = "&%s" % ref
if not data:
# This case is different from the one above, because we
# haven't already gone through a supposedly comprehensive
# mapping of entities to Unicode characters. We might not
# have gone through any mapping at all. So the chances are
# very high that this is a real entity, and not a
# misplaced ampersand.
data = "&%s;" % ref
self.handle_data(data)
def handle_decl(self, data):
"Handle DOCTYPEs and the like as Declaration objects."
self._toStringSubclass(data, Declaration)
def parse_declaration(self, i):
"""Treat a bogus SGML declaration as raw data. Treat a CDATA
declaration as a CData object."""
j = None
if self.rawdata[i:i + 9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
k = len(self.rawdata)
data = self.rawdata[i + 9:k]
j = k + 3
self._toStringSubclass(data, CData)
else:
try:
j = SGMLParser.parse_declaration(self, i)
except SGMLParseError:
toHandle = self.rawdata[i:]
self.handle_data(toHandle)
j = i + len(toHandle)
return j
class BeautifulSoup(BeautifulStoneSoup):
"""This parser knows the following facts about HTML:
* Some tags have no closing tag and should be interpreted as being
closed as soon as they are encountered.
* The text inside some tags (ie. 'script') may contain tags which
are not really part of the document and which should be parsed
as text, not tags. If you want to parse the text as tags, you can
always fetch it and parse it explicitly.
* Tag nesting rules:
Most tags can't be nested at all. For instance, the occurance of
a <p> tag should implicitly close the previous <p> tag.
<p>Para1<p>Para2
should be transformed into:
<p>Para1</p><p>Para2
Some tags can be nested arbitrarily. For instance, the occurance
of a <blockquote> tag should _not_ implicitly close the previous
<blockquote> tag.
Alice said: <blockquote>Bob said: <blockquote>Blah
should NOT be transformed into:
Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah
Some tags can be nested, but the nesting is reset by the
interposition of other tags. For instance, a <tr> tag should
implicitly close the previous <tr> tag within the same <table>,
but not close a <tr> tag in another table.
<table><tr>Blah<tr>Blah
should be transformed into:
<table><tr>Blah</tr><tr>Blah
but,
<tr>Blah<table><tr>Blah
should NOT be transformed into
<tr>Blah<table></tr><tr>Blah
Differing assumptions about tag nesting rules are a major source
of problems with the BeautifulSoup class. If BeautifulSoup is not
treating as nestable a tag your page author treats as nestable,
try ICantBelieveItsBeautifulSoup, MinimalSoup, or
BeautifulStoneSoup before writing your own subclass."""
def __init__(self, *args, **kwargs):
if not kwargs.has_key('smartQuotesTo'):
kwargs['smartQuotesTo'] = self.HTML_ENTITIES
kwargs['isHTML'] = True
BeautifulStoneSoup.__init__(self, *args, **kwargs)
SELF_CLOSING_TAGS = buildTagMap(None,
('br', 'hr', 'input', 'img', 'meta',
'spacer', 'link', 'frame', 'base', 'col'))
PRESERVE_WHITESPACE_TAGS = set(['pre', 'textarea'])
QUOTE_TAGS = {'script': None, 'textarea': None}
# According to the HTML standard, each of these inline tags can
# contain another tag of the same type. Furthermore, it's common
# to actually use these tags this way.
NESTABLE_INLINE_TAGS = ('span', 'font', 'q', 'object', 'bdo', 'sub', 'sup',
'center')
# According to the HTML standard, these block tags can contain
# another tag of the same type. Furthermore, it's common
# to actually use these tags this way.
NESTABLE_BLOCK_TAGS = ('blockquote', 'div', 'fieldset', 'ins', 'del')
# Lists can contain other lists, but there are restrictions.
NESTABLE_LIST_TAGS = {'ol': [],
'ul': [],
'li': ['ul', 'ol'],
'dl': [],
'dd': ['dl'],
'dt': ['dl']}
# Tables can contain other tables, but there are restrictions.
NESTABLE_TABLE_TAGS = {'table': [],
'tr': ['table', 'tbody', 'tfoot', 'thead'],
'td': ['tr'],
'th': ['tr'],
'thead': ['table'],
'tbody': ['table'],
'tfoot': ['table'],
}
NON_NESTABLE_BLOCK_TAGS = ('address', 'form', 'p', 'pre')
# If one of these tags is encountered, all tags up to the next tag of
# this type are popped.
RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript',
NON_NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS,
NESTABLE_TABLE_TAGS)
NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS)
# Used to detect the charset in a META tag; see start_meta
CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M)
def start_meta(self, attrs):
"""Beautiful Soup can detect a charset included in a META tag,
try to convert the document to that charset, and re-parse the
document from the beginning."""
httpEquiv = None
contentType = None
contentTypeIndex = None
tagNeedsEncodingSubstitution = False
for i in xrange(0, len(attrs)):
key, value = attrs[i]
key = key.lower()
if key == 'http-equiv':
httpEquiv = value
elif key == 'content':
contentType = value
contentTypeIndex = i
if httpEquiv and contentType: # It's an interesting meta tag.
match = self.CHARSET_RE.search(contentType)
if match:
if (self.declaredHTMLEncoding is not None or
self.originalEncoding == self.fromEncoding):
# An HTML encoding was sniffed while converting
# the document to Unicode, or an HTML encoding was
# sniffed during a previous pass through the
# document, or an encoding was specified
# explicitly and it worked. Rewrite the meta tag.
def rewrite(match):
return match.group(1) + "%SOUP-ENCODING%"
newAttr = self.CHARSET_RE.sub(rewrite, contentType)
attrs[contentTypeIndex] = (attrs[contentTypeIndex][0],
newAttr)
tagNeedsEncodingSubstitution = True
else:
# This is our first pass through the document.
# Go through it again with the encoding information.
newCharset = match.group(3)
if newCharset and newCharset != self.originalEncoding:
self.declaredHTMLEncoding = newCharset
self._feed(self.declaredHTMLEncoding)
raise StopParsing
pass
tag = self.unknown_starttag("meta", attrs)
if tag and tagNeedsEncodingSubstitution:
tag.containsSubstitutions = True
class StopParsing(Exception):
pass
class ICantBelieveItsBeautifulSoup(BeautifulSoup):
"""The BeautifulSoup class is oriented towards skipping over
common HTML errors like unclosed tags. However, sometimes it makes
errors of its own. For instance, consider this fragment:
<b>Foo<b>Bar</b></b>
This is perfectly valid (if bizarre) HTML. However, the
BeautifulSoup class will implicitly close the first b tag when it
encounters the second 'b'. It will think the author wrote
"<b>Foo<b>Bar", and didn't close the first 'b' tag, because
there's no real-world reason to bold something that's already
bold. When it encounters '</b></b>' it will close two more 'b'
tags, for a grand total of three tags closed instead of two. This
can throw off the rest of your document structure. The same is
true of a number of other tags, listed below.
It's much more common for someone to forget to close a 'b' tag
than to actually use nested 'b' tags, and the BeautifulSoup class
handles the common case. This class handles the not-co-common
case: where you can't believe someone wrote what they did, but
it's valid HTML and BeautifulSoup screwed up by assuming it
wouldn't be."""
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \
('em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong',
'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b',
'big')
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ('noscript',)
NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS)
class MinimalSoup(BeautifulSoup):
"""The MinimalSoup class is for parsing HTML that contains
pathologically bad markup. It makes no assumptions about tag
nesting, but it does know which tags are self-closing, that
<script> tags contain Javascript and should not be parsed, that
META tags may contain encoding information, and so on.
This also makes it better for subclassing than BeautifulStoneSoup
or BeautifulSoup."""
RESET_NESTING_TAGS = buildTagMap('noscript')
NESTABLE_TAGS = {}
class BeautifulSOAP(BeautifulStoneSoup):
"""This class will push a tag with only a single string child into
the tag's parent as an attribute. The attribute's name is the tag
name, and the value is the string child. An example should give
the flavor of the change:
<foo><bar>baz</bar></foo>
=>
<foo bar="baz"><bar>baz</bar></foo>
You can then access fooTag['bar'] instead of fooTag.barTag.string.
This is, of course, useful for scraping structures that tend to
use subelements instead of attributes, such as SOAP messages. Note
that it modifies its input, so don't print the modified version
out.
I'm not sure how many people really want to use this class; let me
know if you do. Mainly I like the name."""
def popTag(self):
if len(self.tagStack) > 1:
tag = self.tagStack[-1]
parent = self.tagStack[-2]
parent._getAttrMap()
if (isinstance(tag, Tag) and len(tag.contents) == 1 and
isinstance(tag.contents[0], NavigableString) and
not parent.attrMap.has_key(tag.name)):
parent[tag.name] = tag.contents[0]
BeautifulStoneSoup.popTag(self)
# Enterprise class names! It has come to our attention that some people
# think the names of the Beautiful Soup parser classes are too silly
# and "unprofessional" for use in enterprise screen-scraping. We feel
# your pain! For such-minded folk, the Beautiful Soup Consortium And
# All-Night Kosher Bakery recommends renaming this file to
# "RobustParser.py" (or, in cases of extreme enterprisiness,
# "RobustParserBeanInterface.class") and using the following
# enterprise-friendly class aliases:
class RobustXMLParser(BeautifulStoneSoup):
pass
class RobustHTMLParser(BeautifulSoup):
pass
class RobustWackAssHTMLParser(ICantBelieveItsBeautifulSoup):
pass
class RobustInsanelyWackAssHTMLParser(MinimalSoup):
pass
class SimplifyingSOAPParser(BeautifulSOAP):
pass
######################################################
#
# Bonus library: Unicode, Dammit
#
# This class forces XML data into a standard format (usually to UTF-8
# or Unicode). It is heavily based on code from Mark Pilgrim's
# Universal Feed Parser. It does not rewrite the XML or HTML to
# reflect a new encoding: that happens in BeautifulStoneSoup.handle_pi
# (XML) and BeautifulSoup.start_meta (HTML).
# Autodetects character encodings.
# Download from http://chardet.feedparser.org/
try:
import chardet
# import chardet.constants
# chardet.constants._debug = 1
except ImportError:
chardet = None
# cjkcodecs and iconv_codec make Python know about more character encodings.
# Both are available from http://cjkpython.i18n.org/
# They're built in if you use Python 2.4.
try:
import cjkcodecs.aliases
except ImportError:
pass
try:
import iconv_codec
except ImportError:
pass
class UnicodeDammit:
"""A class for detecting the encoding of a *ML document and
converting it to a Unicode string. If the source encoding is
windows-1252, can replace MS smart quotes with their HTML or XML
equivalents."""
# This dictionary maps commonly seen values for "charset" in HTML
# meta tags to the corresponding Python codec names. It only covers
# values that aren't in Python's aliases and can't be determined
# by the heuristics in find_codec.
CHARSET_ALIASES = {"macintosh": "mac-roman",
"x-sjis": "shift-jis"}
def __init__(self, markup, overrideEncodings=[],
smartQuotesTo='xml', isHTML=False):
self.declaredHTMLEncoding = None
self.markup, documentEncoding, sniffedEncoding = \
self._detectEncoding(markup, isHTML)
self.smartQuotesTo = smartQuotesTo
self.triedEncodings = []
if markup == '' or isinstance(markup, unicode):
self.originalEncoding = None
self.unicode = unicode(markup)
return
u = None
for proposedEncoding in overrideEncodings:
u = self._convertFrom(proposedEncoding)
if u: break
if not u:
for proposedEncoding in (documentEncoding, sniffedEncoding):
u = self._convertFrom(proposedEncoding)
if u: break
# If no luck and we have auto-detection library, try that:
if not u and chardet and not isinstance(self.markup, unicode):
u = self._convertFrom(chardet.detect(self.markup)['encoding'])
# As a last resort, try utf-8 and windows-1252:
if not u:
for proposed_encoding in ("utf-8", "windows-1252"):
u = self._convertFrom(proposed_encoding)
if u: break
self.unicode = u
if not u: self.originalEncoding = None
def _subMSChar(self, orig):
"""Changes a MS smart quote character to an XML or HTML
entity."""
sub = self.MS_CHARS.get(orig)
if isinstance(sub, tuple):
if self.smartQuotesTo == 'xml':
sub = '&#x%s;' % sub[1]
else:
sub = '&%s;' % sub[0]
return sub
def _convertFrom(self, proposed):
proposed = self.find_codec(proposed)
if not proposed or proposed in self.triedEncodings:
return None
self.triedEncodings.append(proposed)
markup = self.markup
# Convert smart quotes to HTML if coming from an encoding
# that might have them.
if self.smartQuotesTo and proposed.lower() in ("windows-1252",
"iso-8859-1",
"iso-8859-2"):
markup = re.compile("([\x80-\x9f])").sub \
(lambda (x): self._subMSChar(x.group(1)),
markup)
try:
# print "Trying to convert document to %s" % proposed
u = self._toUnicode(markup, proposed)
self.markup = u
self.originalEncoding = proposed
except Exception, e:
# print "That didn't work!"
# print e
return None
# print "Correct encoding: %s" % proposed
return self.markup
def _toUnicode(self, data, encoding):
'''Given a string and its encoding, decodes the string into Unicode.
%encoding is a string recognized by encodings.aliases'''
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
return newdata
def _detectEncoding(self, xml_data, isHTML=False):
"""Given a document, tries to detect its XML encoding."""
xml_encoding = sniffed_xml_encoding = None
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = self._ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \
and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \
(xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
sniffed_xml_encoding = 'ascii'
pass
except:
xml_encoding_match = None
xml_encoding_match = re.compile(
'^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data)
if not xml_encoding_match and isHTML:
regexp = re.compile('<\s*meta[^>]+charset=([^>]*?)[;\'">]', re.I)
xml_encoding_match = regexp.search(xml_data)
if xml_encoding_match is not None:
xml_encoding = xml_encoding_match.groups()[0].lower()
if isHTML:
self.declaredHTMLEncoding = xml_encoding
if sniffed_xml_encoding and \
(xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode',
'iso-10646-ucs-4', 'ucs-4', 'csucs4',
'utf-16', 'utf-32', 'utf_16', 'utf_32',
'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
return xml_data, xml_encoding, sniffed_xml_encoding
def find_codec(self, charset):
return self._codec(self.CHARSET_ALIASES.get(charset, charset)) \
or (charset and self._codec(charset.replace("-", ""))) \
or (charset and self._codec(charset.replace("-", "_"))) \
or charset
def _codec(self, charset):
if not charset: return charset
codec = None
try:
codecs.lookup(charset)
codec = charset
except (LookupError, ValueError):
pass
return codec
EBCDIC_TO_ASCII_MAP = None
def _ebcdic_to_ascii(self, s):
c = self.__class__
if not c.EBCDIC_TO_ASCII_MAP:
emap = (0, 1, 2, 3, 156, 9, 134, 127, 151, 141, 142, 11, 12, 13, 14, 15,
16, 17, 18, 19, 157, 133, 8, 135, 24, 25, 146, 143, 28, 29, 30, 31,
128, 129, 130, 131, 132, 10, 23, 27, 136, 137, 138, 139, 140, 5, 6, 7,
144, 145, 22, 147, 148, 149, 150, 4, 152, 153, 154, 155, 20, 21, 158, 26,
32, 160, 161, 162, 163, 164, 165, 166, 167, 168, 91, 46, 60, 40, 43, 33,
38, 169, 170, 171, 172, 173, 174, 175, 176, 177, 93, 36, 42, 41, 59, 94,
45, 47, 178, 179, 180, 181, 182, 183, 184, 185, 124, 44, 37, 95, 62, 63,
186, 187, 188, 189, 190, 191, 192, 193, 194, 96, 58, 35, 64, 39, 61, 34,
195, 97, 98, 99, 100, 101, 102, 103, 104, 105, 196, 197, 198, 199, 200,
201, 202, 106, 107, 108, 109, 110, 111, 112, 113, 114, 203, 204, 205,
206, 207, 208, 209, 126, 115, 116, 117, 118, 119, 120, 121, 122, 210,
211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224,
225, 226, 227, 228, 229, 230, 231, 123, 65, 66, 67, 68, 69, 70, 71, 72,
73, 232, 233, 234, 235, 236, 237, 125, 74, 75, 76, 77, 78, 79, 80, 81,
82, 238, 239, 240, 241, 242, 243, 92, 159, 83, 84, 85, 86, 87, 88, 89,
90, 244, 245, 246, 247, 248, 249, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
250, 251, 252, 253, 254, 255)
import string
c.EBCDIC_TO_ASCII_MAP = string.maketrans( \
''.join(map(chr, xrange(256))), ''.join(map(chr, emap)))
return s.translate(c.EBCDIC_TO_ASCII_MAP)
MS_CHARS = {'\x80': ('euro', '20AC'),
'\x81': ' ',
'\x82': ('sbquo', '201A'),
'\x83': ('fnof', '192'),
'\x84': ('bdquo', '201E'),
'\x85': ('hellip', '2026'),
'\x86': ('dagger', '2020'),
'\x87': ('Dagger', '2021'),
'\x88': ('circ', '2C6'),
'\x89': ('permil', '2030'),
'\x8A': ('Scaron', '160'),
'\x8B': ('lsaquo', '2039'),
'\x8C': ('OElig', '152'),
'\x8D': '?',
'\x8E': ('#x17D', '17D'),
'\x8F': '?',
'\x90': '?',
'\x91': ('lsquo', '2018'),
'\x92': ('rsquo', '2019'),
'\x93': ('ldquo', '201C'),
'\x94': ('rdquo', '201D'),
'\x95': ('bull', '2022'),
'\x96': ('ndash', '2013'),
'\x97': ('mdash', '2014'),
'\x98': ('tilde', '2DC'),
'\x99': ('trade', '2122'),
'\x9a': ('scaron', '161'),
'\x9b': ('rsaquo', '203A'),
'\x9c': ('oelig', '153'),
'\x9d': '?',
'\x9e': ('#x17E', '17E'),
'\x9f': ('Yuml', ''), }
#######################################################################
# By default, act as an HTML pretty-printer.
if __name__ == '__main__':
import sys
soup = BeautifulSoup(sys.stdin)
print soup.prettify()
|
[
"everping@outlook.com"
] |
everping@outlook.com
|
15b643b0ba8b80b79c540d2b68b12c8a7f7d8957
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/pjgDmRqh2fbBBwo77_2.py
|
8349b8c857630e56110c374ff13f0fd25b56b1de
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 882
|
py
|
"""
**Syncopation** means an emphasis on a weak beat of a bar of music; most
commonly, **beats 2 and 4** (and all other _even-numbered_ beats if
applicable).
`s` is a line of music, represented as a string, where hashtags `#` represent
emphasized beats. Create a function that returns if the line of music contains
**any** _syncopation_.
### Examples
has_syncopation(".#.#.#.#") ➞ True
# There are Hash signs in the second, fourth, sixth and
# eighth positions of the string.
has_syncopation("#.#...#.") ➞ False
# There are no Hash signs in the second, fourth, sixth or
# eighth positions of the string.
has_syncopation("#.#.###.") ➞ True
# There are Hash signs in the sixth positions of the string.
### Notes
All other unemphasized beats will be represented as a dot.
"""
def has_syncopation(s):
return '#' in s[1::2]
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
7b70d361dfa5b1d7f0753c1436e01740802666c9
|
3fccf8a309c1db9b7df4686f740e2415386532f4
|
/Tests/test_CodonAlign.py
|
f9b3801d22d776ab7b235950c21b9f1d270d3f5c
|
[
"LicenseRef-scancode-biopython"
] |
permissive
|
AkiOhtani/biopython
|
96aebd4cb46d6ba90d604f6970e750af1b652088
|
45311b0b549b49578cf1e7eb9dc0c05ac1dabb61
|
refs/heads/master
| 2021-01-15T08:08:47.567671
| 2014-04-28T07:37:40
| 2014-04-28T07:41:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,678
|
py
|
# Copyright (C) 2013 by Zheng Ruan (zruan1991@gmail.com)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Unit tests for the CodonAlign modules.
"""
import sys
import warnings
import tempfile
import platform
import unittest
from Bio import CodonAlign, SeqIO, AlignIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import IUPAC
from Bio.Align import MultipleSeqAlignment
TEST_ALIGN_FILE1 = [('CodonAlign/nucl1.fa', 'CodonAlign/pro1.aln'), 'parse']
TEST_ALIGN_FILE2 = [('CodonAlign/nucl2.fa', 'CodonAlign/pro2.aln'), 'parse']
TEST_ALIGN_FILE3 = [('CodonAlign/nucl3.fa', 'CodonAlign/pro3.aln'), 'index']
TEST_ALIGN_FILE4 = [('CodonAlign/nucl4.fa', 'CodonAlign/pro4.aln'), 'index']
TEST_ALIGN_FILE5 = [('CodonAlign/nucl5.fa', 'CodonAlign/pro5.aln'), 'parse']
TEST_ALIGN_FILE6 = [('CodonAlign/egfr_nucl.fa', 'CodonAlign/egfr_pro.aln', 'CodonAlign/egfr_id'), 'id']
TEST_ALIGN_FILE7 = [('CodonAlign/drosophilla.fasta', 'CodonAlign/adh.aln'), 'index']
temp_dir = tempfile.mkdtemp()
class TestCodonSeq(unittest.TestCase):
def test_seq(self):
codonseq1 = CodonAlign.CodonSeq('AAATTT---TTTGGACCC', rf_table=[0,3,6,9,12])
self.assertEqual(len(codonseq1), 18)
self.assertEqual(codonseq1.get_codon_num(), 5)
self.assertEqual(str(codonseq1.get_codon(0)), 'AAA')
self.assertEqual(str(codonseq1.get_codon(-1)), 'CCC')
self.assertEqual(str(codonseq1.get_codon(slice(1,3))), 'TTT---')
self.assertEqual(str(codonseq1.get_codon(slice(None,None,-1))), 'CCCGGATTT---TTTAAA')
self.assertRaises(ValueError, CodonAlign.CodonSeq, 'AAA-TT')
self.assertRaises(AssertionError, CodonAlign.CodonSeq, 'AAA-T')
self.assertRaises(ValueError, CodonAlign.CodonSeq, 'YVVRRDQQQ')
self.assertTrue(isinstance(codonseq1.toSeq(), Seq))
class TestCodonAlignment(unittest.TestCase):
def setUp(self):
codonseq1 = CodonAlign.CodonSeq('AAATTT---TTTGGACCC', CodonAlign.default_codon_alphabet)
codonseq2 = CodonAlign.CodonSeq('AAGTTT---TTTGGGCCC', CodonAlign.default_codon_alphabet)
codonseq3 = CodonAlign.CodonSeq('AAGTAT---TTTGGACCC', CodonAlign.default_codon_alphabet)
codonseq4 = CodonAlign.CodonSeq('AACTTT---TTTGGACGC', CodonAlign.default_codon_alphabet)
self.seqrec = [SeqRecord(codonseq1, id="alpha"),
SeqRecord(codonseq2, id="beta" ),
SeqRecord(codonseq3, id="gamma"),
SeqRecord(codonseq4, id="delta")]
def test_align(self):
codonAlign = CodonAlign.CodonAlignment(self.seqrec)
self.assertEqual(codonAlign.get_aln_length(), 6)
self.assertTrue(isinstance(codonAlign.toMultipleSeqAlignment(), MultipleSeqAlignment))
class TestBuildAndIO(unittest.TestCase):
def setUp(self):
self.aln_file = [TEST_ALIGN_FILE1,
TEST_ALIGN_FILE2,
TEST_ALIGN_FILE3,
TEST_ALIGN_FILE4,
TEST_ALIGN_FILE5,
TEST_ALIGN_FILE6]
alns = []
for i in self.aln_file:
if i[1] == 'parse':
nucl = SeqIO.parse(i[0][0], 'fasta', alphabet=IUPAC.IUPACUnambiguousDNA())
prot = AlignIO.read(i[0][1], 'clustal', alphabet=IUPAC.protein)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
caln = CodonAlign.build(prot, nucl, alphabet=CodonAlign.default_codon_alphabet)
elif i[1] == 'index':
nucl = SeqIO.index(i[0][0], 'fasta', alphabet=IUPAC.IUPACUnambiguousDNA())
prot = AlignIO.read(i[0][1], 'clustal', alphabet=IUPAC.protein)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
caln = CodonAlign.build(prot, nucl, alphabet=CodonAlign.default_codon_alphabet, max_score=20)
elif i[1] == 'id':
nucl = SeqIO.parse(i[0][0], 'fasta', alphabet=IUPAC.IUPACUnambiguousDNA())
prot = AlignIO.read(i[0][1], 'clustal', alphabet=IUPAC.protein)
id = dict((i.split()[0], i.split()[1]) for i in open(i[0][2]).readlines())
with warnings.catch_warnings():
warnings.simplefilter('ignore')
caln = CodonAlign.build(prot, nucl, corr_dict=id, alphabet=CodonAlign.default_codon_alphabet)
alns.append(caln)
self.alns = alns
def test_IO(self):
self.assertEqual(len(self.alns), 6)
#print temp_dir
for n, i in enumerate(self.alns):
aln = i.toMultipleSeqAlignment()
AlignIO.write(aln, temp_dir + '/aln' + str(n) + '.clw', 'clustal')
class Test_build(unittest.TestCase):
def setUp(self):
# Test set 1
seq1 = SeqRecord(Seq('TCAGGGACTGCGAGAACCAAGCTACTGCTGCTGCTGGCTGCGCTCTGCGCCGCAGGTGGGGCGCTGGAG', \
alphabet=IUPAC.IUPACUnambiguousDNA()), id='pro1')
seq2 = SeqRecord(Seq('TCAGGGACTTCGAGAACCAAGCGCTCCTGCTGCTGGCTGCGCTCGGCGCCGCAGGTGGAGCACTGGAG', \
alphabet=IUPAC.IUPACUnambiguousDNA()), id='pro2')
pro1 = SeqRecord(Seq('SGTARTKLLLLLAALCAAGGALE', alphabet=IUPAC.protein),id='pro1')
pro2 = SeqRecord(Seq('SGTSRTKRLLLLAALGAAGGALE', alphabet=IUPAC.protein),id='pro2')
aln1 = MultipleSeqAlignment([pro1, pro2])
self.aln1 = aln1
self.seqlist1 = [seq1, seq2]
# Test set 2
# M K K H E L(F)L C Q G T S N K L T Q(L)L G T F E D H F L S L Q R M F N N C E V V
seq3 = SeqRecord(Seq('ATGAAAAAGCACGAGTTACTTTGCCAAGGGACAAGTAACAAGCTCACCCAGTTGGGCACTTTTGAAGACCACTTTCTGAGCCTACAGAGGATGTTCAACAACTGTGAGGTGGTCCTTGGGAATTTGGAAATTACCTACATGCAGAGTAGTTACAACCTTTCTTTTCTCAAGACCATCCAGGAGGTTGCCGGCTATGTACTCATTGCCCTC', alphabet=IUPAC.IUPACUnambiguousDNA()), id='pro1')
#seq4 =SeqRecord(Seq('ATGAAAAAGCACGAGTT CTTTGCCAAGGGACAAGTAACAAGCTCACCCAGTTGGGCACTTTTGAAGACCACTTTCTGAGCCTACAGAGGATGTTCAACAA TGTGAGGTGGTCCTTGGGAATTTGGAAATTACCTACATGCAGAGTAGTTACAACCTTTCTTTTCTCAAGACCATCCAGGAGGTTGCCGGCTATGTACTCATTGCCCTC', alphabet=IUPAC.IUPACUnambiguousDNA()), id='pro2')
seq4 = SeqRecord(Seq('ATGAAAAAGCACGAGTTCTTTGCCAAGGGACAAGTAACAAGCTCACCCAGTTGGGCACTTTTGAAGACCACTTTCTGAGCCTACAGAGGATGTTCAACAATGTGAGGTGGTCCTTGGGAATTTGGAAATTACCTACATGCAGAGTAGTTACAACCTTTCTTTTCTCAAGACCATCCAGGAGGTTGCCGGCTATGTACTCATTGCCCTC', alphabet=IUPAC.IUPACUnambiguousDNA()), id='pro2')
#seq5 =SeqRecord(Seq('ATGAAAAAGCACGAGTT CTTTGCCAAGGGACAAGTAACAAGCTCACCC TTGGGCACTTTTGAAGACCACTTTCTGAGCCTACAGAGGATGTTCAACAACTGTGAGGTGGTCCTTGGGAATTTGGAAATTACCTACATGCAGAGTAGTTACAACCTTTCTTTTCTCAAGACCATCCAGGAGGTTGCCGGCTATGTACTCATTGCCCTC', alphabet=IUPAC.IUPACUnambiguousDNA()), id='pro3')
seq5 = SeqRecord(Seq('ATGAAAAAGCACGAGTTACTTTGCCAAGGGACAAGTAACAAGCTCACCCTTGGGCACTTTTGAAGACCACTTTCTGAGCCTACAGAGGATGTTCAACAACTGTGAGGTGGTCCTTGGGAATTTGGAAATTACCTACATGCAGAGTAGTTACAACCTTTCTTTTCTCAAGACCATCCAGGAGGTTGCCGGCTATGTACTCATTGCCCTC', alphabet=IUPAC.IUPACUnambiguousDNA()), id='pro3')
pro3 = SeqRecord(Seq('MKKHELLCQGTSNKLTQLGTFEDHFLSLQRMFNNCEVVLGNLEITYMQSSYNLSFLKTIQEVAGYVLIAL', alphabet=IUPAC.protein), id='pro1')
pro4 = SeqRecord(Seq('MKKHEFLCQGTSNKLTQLGTFEDHFLSLQRMFNNCEVVLGNLEITYMQSSYNLSFLKTIQEVAGYVLIAL', alphabet=IUPAC.protein), id='pro2')
pro5 = SeqRecord(Seq('MKKHELLCQGTSNKLTLLGTFEDHFLSLQRMFNNCEVVLGNLEITYMQSSYNLSFLKTIQEVAGYVLIAL', alphabet=IUPAC.protein), id='pro3')
aln2 = MultipleSeqAlignment([pro3, pro4, pro5])
self.aln2 = aln2
self.seqlist2 = [seq3, seq4, seq5]
def test_build(self):
codon_aln1 = CodonAlign.build(self.aln1, self.seqlist1)
codon_aln2 = CodonAlign.build(self.aln2, self.seqlist2)
class Test_dn_ds(unittest.TestCase):
def setUp(self):
nucl = SeqIO.parse(TEST_ALIGN_FILE6[0][0], 'fasta', alphabet=IUPAC.IUPACUnambiguousDNA())
prot = AlignIO.read(TEST_ALIGN_FILE6[0][1], 'clustal', alphabet=IUPAC.protein)
id_corr = dict((i.split()[0], i.split()[1]) for i in open(TEST_ALIGN_FILE6[0][2]).readlines())
aln = CodonAlign.build(prot, nucl, corr_dict=id_corr, alphabet=CodonAlign.default_codon_alphabet)
self.aln = aln
def test_dn_ds(self):
from Bio.CodonAlign.CodonSeq import cal_dn_ds
codon_seq1 = self.aln[0]
codon_seq2 = self.aln[1]
dN, dS = cal_dn_ds(codon_seq1, codon_seq2, method='NG86')
self.assertAlmostEquals(round(dN, 4), 0.0209, places=4)
self.assertAlmostEquals(round(dS, 4), 0.0178, places=4)
dN, dS = cal_dn_ds(codon_seq1, codon_seq2, method='LWL85')
self.assertAlmostEquals(round(dN, 4), 0.0203, places=4)
self.assertAlmostEquals(round(dS, 4), 0.0164, places=4)
try:
from scipy.linalg import expm
dN, dS = cal_dn_ds(codon_seq1, codon_seq2, method='YN00')
self.assertAlmostEquals(round(dN, 4), 0.0198, places=4)
self.assertAlmostEquals(round(dS, 4), 0.0222, places=4)
except ImportError:
warnings.warn('Importing scipy.linalg.expm failed. Skip testing ML method for dN/dS estimation')
pass
try:
from scipy.optimize import minimize
dN, dS = cal_dn_ds(codon_seq1, codon_seq2, method='ML')
self.assertAlmostEquals(round(dN, 4), 0.0194, places=4)
self.assertAlmostEquals(round(dS, 4), 0.0217, places=4)
except ImportError:
warnings.warn('Importing scipy.optimize.minimize failed. Skip testing ML method for dN/dS estimation')
pass
class Test_MK(unittest.TestCase):
def test_mk(self):
ver = sys.version_info
if ver[0] == 2 and ver[1] == 6:
warnings.warn('Python 2.6 detected. Skip testing MK method')
pass
else:
from run_tests import is_numpy
if is_numpy():
p = SeqIO.index(TEST_ALIGN_FILE7[0][0], 'fasta', alphabet=IUPAC.IUPACUnambiguousDNA())
pro_aln = AlignIO.read(TEST_ALIGN_FILE7[0][1], 'clustal', alphabet=IUPAC.protein)
codon_aln = CodonAlign.build(pro_aln, p)
self.assertAlmostEquals(round(CodonAlign.mktest([codon_aln[1:12], codon_aln[12:16], codon_aln[16:]]), 4), 0.0021, places=4)
else:
warnings.warn('Numpy not installed. Skip MK test.')
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
|
[
"eric.talevich@gmail.com"
] |
eric.talevich@gmail.com
|
5d9b97f9fa7cdeb46c91744e8e2f17732ef4e97c
|
b34d460115f9bccb4af7d3c06f163812a0499216
|
/scripts/figures/figure9/pipeswitch_resnet152/remote_run_data.py
|
3df103abceee1a1bd4fbca4c1bb7ad88e22b164a
|
[
"Apache-2.0"
] |
permissive
|
Murphy-OrangeMud/PipeSwitch
|
1df00aea7ea572dfc9dc633b081dec17002b266c
|
1ef75ccf5d425bd8cb11c3ae5fe63d2f25423031
|
refs/heads/main
| 2023-09-05T20:06:37.606573
| 2021-10-29T02:45:30
| 2021-10-29T02:45:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 342
|
py
|
import os
import sys
from scripts.common.util import RunDocker
def main():
with RunDocker('pipeswitch:pipeswitch', 'figure9_pipeswitch_resnet152') as rd:
# Start the server: pipeswitch
rd.run('python PipeSwitch/scripts/run_data.py')
# Get and return the data point
if __name__ == '__main__':
main()
|
[
"zbai1@jhu.edu"
] |
zbai1@jhu.edu
|
23b7ab17ff53b4c12b8eaa3c9051d83c2552f6cf
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03400/s805475463.py
|
0f3c9a2c6dda3abb3a2be61f406ecf962cfc19ef
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 135
|
py
|
N = int(input())
D,X = map(int, input().split())
A = [int(input()) for _ in range(N)]
ans = X + sum([-(-D//a) for a in A])
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
ce6f2463dce6e2d2cca901bee58d7f51d1f13fad
|
9163d7b7f9301b4a334ced0a91e28348fdaa8882
|
/code/common_chinese_captcha.py
|
06d2cf1347f942369eaad11b6240e747317aaedf
|
[
"Apache-2.0"
] |
permissive
|
frankiegu/generate_click_captcha
|
2c9c551bec69d5c40e6a1354ec6f7dbef18e6447
|
7fdb2cafe4c2b5d0245b9b8c4fc9a8b8dee5f3a9
|
refs/heads/master
| 2021-03-03T14:56:30.486088
| 2019-01-03T16:03:00
| 2019-01-03T16:03:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,490
|
py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
根据此文编写:https://www.cnblogs.com/whu-zeng/p/4855480.html
"""
import random
from PIL import Image, ImageDraw, ImageFont
import codecs
class RandomChar(object):
@staticmethod
def tran_unicode():
val = random.randint(0x4E00, 0x9FBF)
return chr(val)
@staticmethod
def tran_gb2312():
head = random.randint(0xB0, 0xCF)
body = random.randint(0xA, 0xF)
tail = random.randint(0, 0xF)
val = (head << 8) | (body << 4) | tail
string_value = "%x" % val
return codecs.decode(string_value, 'hex_codec').decode('gb2312')
class ImageChar(object):
def __init__(self, font_color=(0, 0, 0), size=(100, 40), font_path='C:/Windows/Fonts/simkai.ttf',
bg_color=(255, 255, 255), font_size=20):
self.size = size
self.fontPath = font_path
self.bgColor = bg_color
self.fontSize = font_size
self.fontColor = font_color
self.font = ImageFont.truetype(self.fontPath, self.fontSize)
self.image = Image.new('RGB', size, bg_color)
def rotate(self):
self.image.rotate(random.randint(0, 90), expand=0)
def draw_text(self, pos, txt, fill):
draw = ImageDraw.Draw(self.image)
draw.multiline_text(xy=pos, text=txt, font=self.font, fill=fill)
del draw
@staticmethod
def rand_rgb():
return (random.randint(2, 220),
random.randint(2, 220),
random.randint(2, 220))
def rand_point(self):
width, height = self.size
return random.randint(0, width), random.randint(0, height)
def rand_line(self, num):
draw = ImageDraw.Draw(self.image)
for i in range(0, num):
draw.line([self.rand_point(), self.rand_point()], self.rand_rgb())
del draw
def rand_chinese(self, num):
gap = 5
start = 0
for i in range(0, num):
char = RandomChar().tran_gb2312()
print(char)
x = start + self.fontSize * i + random.randint(0, gap) + gap * i
self.draw_text((x, random.randint(0, 15)), char, self.rand_rgb())
self.rotate()
self.rand_line(5)
def save(self, path="test.jpg"):
self.image.save(path)
def show(self):
self.image.show()
def main():
ic = ImageChar(font_color=(100, 211, 90))
ic.rand_chinese(4)
ic.show()
if __name__ == '__main__':
main()
|
[
"nickliqian@outlook.com"
] |
nickliqian@outlook.com
|
531922b7b5f5b4e54ef8043de90d4b28968c32b9
|
7b065a6b01905a2da6ad2d00b6398aad150dc6c3
|
/基础知识/4.文件操作/7.os常用函数.py
|
d06953232ecbd2f1df1bbb92114763ae31cff50e
|
[] |
no_license
|
ylwctyt/python3-1
|
f4b0d8d6d0a7947170186b27bf51bc2f6e291ac7
|
ca92e2dc9abc61265e48b7809cb12c3e572b5b6f
|
refs/heads/master
| 2021-04-18T18:56:46.047193
| 2018-03-25T04:35:11
| 2018-03-25T04:35:11
| 126,699,773
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,368
|
py
|
import os
print(os.getcwd()) # 当前工作目录
os.chdir("E:\\python3.6") # 改变工作目录
print(os.getcwd())
# os.rmdir("E:\\a") # 删除空文件夹
print("路径分隔符",os.sep)
print("操作系统",os.name) # 操作系统的名称,对于Windows返回'nt',而对于Linux/Unix用户,它是'posix'
print("环境变量-->path",os.getenv("path")) # path
print(os.listdir("E:\\python3.6")) # 返回指定目录下的所有文件和目录名(一级目录和文件)
# os.remove("E:\\a.txt") 删除指定文件
# print(os.system("dir")) # 用来运行shell命令,windows是cmd命令
print(os.linesep)# 字符串给出当前平台使用的行终止符。例如,Windows使用'\r\n',Linux使用'\n'而Mac使用'\r'。
print("当前目录",os.curdir)
# os.path.isfile()和os.path.isdir()函数分别检验给出的路径是一个文件还是目录。
# os.path.existe()函数用来检验给出的路径是否真地存在
# os.path.getsize(name):获得文件大小,如果name是目录返回0L
# os.path.abspath(name):获得绝对路径
# os.path.normpath(path):规范path字符串形式
# os.path.split(path) :将path分割成目录和文件名二元组返回。
# os.path.splitext():分离文件名与扩展名
# os.path.join(path,name):连接目录与文件名或目录
# os.path.basename(path):返回文件名
# os.path.dirname(path):返回文件路径
|
[
"359405466@qq.com"
] |
359405466@qq.com
|
62e8a2dcd0549ab4d5f4a35d8c8309b2269f8da5
|
f023692f73992354a0b7823d9c49ae730c95ab52
|
/AtCoderBeginnerContest/1XX/168/B.py
|
f426b00aef6d2cb99e99ef54bcfb89c771769656
|
[] |
no_license
|
corutopi/AtCorder_python
|
a959e733f9a3549fab7162023e414ac2c99c4abe
|
a2c78cc647076071549e354c398155a65d5e331a
|
refs/heads/master
| 2023-08-31T09:40:35.929155
| 2023-08-20T06:19:35
| 2023-08-20T06:19:35
| 197,030,129
| 1
| 0
| null | 2022-06-22T04:06:28
| 2019-07-15T15:57:34
|
Python
|
UTF-8
|
Python
| false
| false
| 169
|
py
|
def solve():
K = int(input())
S = input()
if len(S) <= K:
print(S)
else:
print(S[:K] + '...')
if __name__ == '__main__':
solve()
|
[
"39874652+corutopi@users.noreply.github.com"
] |
39874652+corutopi@users.noreply.github.com
|
8a857035aeaade92bffc555d7d5b170b35ac26c6
|
d02009dd0a0e7ecd9ff056551c8dd821c943e76e
|
/gen_captcha.py
|
0f33e1018133aeb365e859895c999198a75bab20
|
[] |
no_license
|
budaLi/Captcha_recognition
|
a525aac7e2957b7a8b92db3965bd458c7696fd6c
|
dfcc690b22e65f754c5badd184e80dd5eb539665
|
refs/heads/master
| 2022-07-29T07:18:10.679213
| 2020-05-25T03:04:29
| 2020-05-25T03:04:29
| 265,818,745
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,399
|
py
|
# @Time : 2020/5/21 16:33
# @Author : Libuda
# @FileName: gen_captcha.py
# @Software: PyCharm
# 生成验证码数据
from captcha.image import ImageCaptcha
from PIL import Image
import random
import time
import os
# 验证码长度
MAX_CAPTCHA = 4
# 图像大小
IMAGE_HEIGHT = 60
IMAGE_WIDTH = 160
def random_captcha(captcha_lenght,fliepath):
"""
随机生成数字 可对应数字及字母的ascii码 再将其转换为对应字符
:param captcha_lenght:生成的验证码长度
:param fliepath:生成的文件路径
:return:
"""
res_captcha = ""
# 数字
number_range=list(map(str,range(0,10)))
# 小写数字
small_letter_range = list(map(chr,range(65,91)))
# 大写数字
max_letter_range = list(map(chr,range(97,123)))
captcha_lis = number_range+small_letter_range+max_letter_range
for _ in range(captcha_lenght):
random_index = random.randint(0,len(captcha_lis)-1)
ca = captcha_lis[random_index]
res_captcha+=ca
image = ImageCaptcha()
captcha_image = Image.open(image.generate(res_captcha))
# 加上时间戳避免数据集重复导致图片覆盖
captcha_image.save(fliepath+res_captcha+"_"+str(int(time.time()))+".png")
return res_captcha
if __name__ == '__main__':
for _ in range(1000):
res = random_captcha(4,"./dataset/test/")
print(res)
|
[
"1364826576@qq.com"
] |
1364826576@qq.com
|
a316805c601c01ba1475af2f540d1fc2dea04ba3
|
a2ac73af04a07bb070cd85c88778608b561dd3e4
|
/addons/stock/wizard/stock_return_picking.py
|
ea50dd936aabfeaec618184c3cdde50284ae95c0
|
[] |
no_license
|
sannareddy/openerp-heimai
|
c849586d6099cc7548dec8b3f1cc7ba8be49594a
|
58255ecbcea7bf9780948287cf4551ed6494832a
|
refs/heads/master
| 2021-01-15T21:34:46.162550
| 2014-05-13T09:20:37
| 2014-05-13T09:20:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 71
|
py
|
/usr/share/pyshared/openerp/addons/stock/wizard/stock_return_picking.py
|
[
"549636719@qq.com"
] |
549636719@qq.com
|
ad15fbb0175a19f70be3192e077cd87715f6cd31
|
ac91ee2f10f428315f80134447794de370726c22
|
/python/main_test_LMN_synchrony_control.py
|
e5fabef65b134dcfb593285bbfce6ddcca39c3d8
|
[] |
no_license
|
gviejo/LMNphysio
|
c34a49719e7094fb79b07b55980e186ac95b4fc2
|
24c9466d6a8a1deaf6b30f38388e90212af07c1e
|
refs/heads/master
| 2023-09-01T08:05:02.240143
| 2023-08-18T19:29:01
| 2023-08-18T19:29:01
| 170,920,340
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,141
|
py
|
import numpy as np
import pandas as pd
import neuroseries as nts
from pylab import *
from wrappers import *
from functions import *
import sys
from matplotlib.colors import hsv_to_rgb
import hsluv
from pycircstat.descriptive import mean as circmean
from sklearn.manifold import SpectralEmbedding, Isomap
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans, SpectralClustering
from umap import UMAP
from matplotlib import gridspec
from itertools import product
###############################################################################################
# GENERAL infos
###############################################################################################
data_directory = '/mnt/DataGuillaume/'
datasets = np.loadtxt(os.path.join(data_directory,'datasets_LMN.list'), delimiter = '\n', dtype = str, comments = '#')
infos = getAllInfos(data_directory, datasets)
# A5002
datasets = ['LMN-ADN/A5002/'+s for s in infos['A5002'].index[1:-1]]
datasets.remove('LMN-ADN/A5002/A5002-200306A')
# A1407
# datasets = ['LMN/A1407/'+s for s in infos['A1407'].index[10:-2]]
# datasets.remove('LMN/A1407/A1407-190406')
mapping = []
ccall = []
tcurves = []
ahvcurves = []
cc_sync = []
cc_async = []
ahv_sync = []
ahv_async = []
tcurves_sync = []
tcurves_async = []
peaks = []
# for s in datasets:
for s in np.array(datasets)[[6,7,8]]:
# for s in np.array(datasets)[[6,7,8,9,10,11]]:
# for s in ['LMN-ADN/A5002/A5002-200304A']:
print(s)
name = s.split('/')[-1]
path = os.path.join(data_directory, s)
episodes = infos[s.split('/')[1]].filter(like='Trial').loc[s.split('/')[2]].dropna().values
events = list(np.where(episodes == 'wake')[0].astype('str'))
spikes, shank = loadSpikeData(path)
n_channels, fs, shank_to_channel = loadXML(path)
position = loadPosition(path, events, episodes)
wake_ep = loadEpoch(path, 'wake', episodes)
if 'A5002-200305A' in s:
wake_ep = wake_ep.loc[[1]]
else:
wake_ep = wake_ep.loc[[0]]
sws_ep = loadEpoch(path, 'sws')
rem_ep = loadEpoch(path, 'rem')
meanwavef, maxch = loadMeanWaveforms(path)
# TO RESTRICT BY SHANK
if 'A5002' in s:
spikes = {n:spikes[n] for n in np.where(shank==3)[0]}
meanwavef = meanwavef[list(spikes.keys())]
neurons = [name+'_'+str(n) for n in spikes.keys()]
meanwavef.columns = pd.Index(neurons)
######################
# TUNING CURVEs
######################
tcurve = computeAngularTuningCurves(spikes, position['ry'], wake_ep, 61)
tcurve = smoothAngularTuningCurves(tcurve, 20, 2)
tokeep, stat = findHDCells(tcurve, z = 10, p = 0.001)
tcurve.columns = pd.Index(neurons)
peak = pd.Series(index=tcurve.columns,data = np.array([circmean(tcurve.index.values, tcurve[i].values) for i in tcurve.columns]))
hd_neurons = [name+'_'+str(n) for n in tokeep]
tcurve = tcurve[hd_neurons]
peak = peak.loc[hd_neurons]
######################
# AHV CURVES
######################
ahvcurve = computeAngularVelocityTuningCurves(spikes, position['ry'], wake_ep, nb_bins = 61, norm=False)
ahvcurve = ahvcurve.rolling(window=10, win_type='gaussian', center= True, min_periods=1).mean(std = 1)
ahvcurve.columns = pd.Index(neurons)
ahvcurve = ahvcurve[hd_neurons]
######################
# NORMAL CROSS-CORR
######################
spks = spikes
binsize = 0.5
nbins = 100
times = np.arange(0, binsize*(nbins+1), binsize) - (nbins*binsize)/2
cc = pd.DataFrame(index = times, columns = list(product(hd_neurons, hd_neurons)))
for i,j in cc.columns:
if i != j:
spk1 = spks[int(i.split("_")[1])].restrict(wake_ep).as_units('ms').index.values
spk2 = spks[int(j.split("_")[1])].restrict(wake_ep).as_units('ms').index.values
tmp = crossCorr(spk1, spk2, binsize, nbins)
cc[(i,j)] = tmp
cc = cc.dropna(1)
# ccfast = cc.rolling(window=50, win_type='gaussian', center= True, min_periods=1).mean(std = 2)
# ccslow = cc.rolling(window=50, win_type='gaussian', center= True, min_periods=1).mean(std = 20)
# ccmod = (cc - ccslow)/cc.std(0)
# ccmod2 = (ccfast - ccslow)/ccfast.std(0)
######################
# SYNC/ASYNC TUNING CURVES
######################
spks = [spikes[int(n.split('_')[1])].restrict(wake_ep).as_units('ms').index.values for n in hd_neurons]
spksync, spkasync = getSpikesSyncAsync(spks, hd_neurons)
tcurve_s = computeAngularTuningCurves(spksync, position['ry'], wake_ep, 61)
tcurve_a = computeAngularTuningCurves(spkasync, position['ry'], wake_ep, 61)
ahv_s = computeAngularVelocityTuningCurves(spksync, position['ry'], wake_ep, nb_bins = 31, norm=True)
ahv_a = computeAngularVelocityTuningCurves(spkasync, position['ry'], wake_ep, nb_bins = 31, norm=True)
##########################
# SAMPLING SPIKES FROM ANGULAR TUNING CURVES
##########################
spikes_random = sampleSpikesFromAngularPosition(tcurve, position['ry'], wake_ep)
######################
# RANDOM SYNC/ASYNC TUNING CURVES
######################
tcurve_r_s = pd.DataFrame(columns = cc.columns)
tcurve_r_a = pd.DataFrame(columns = cc.columns)
for n in spikes_random.keys():
for m in spikes_random.keys():
if m != n:
spksync = {}
spkasync = {}
for i in range(len(spikes_random[n])):
spks = [spikes[int(m.split('_')[1])].restrict(wake_ep).as_units('ms').index.values,
spikes_random[n][i].restrict(wake_ep).as_units('ms').index.values]
tmp1, tmp2 = getSpikesSyncAsync(spks, [m,n])
spksync[i] = tmp1[(m,n)]
spkasync[i] = tmp2[(m,n)]
tc_s = computeAngularTuningCurves(spksync, position['ry'], wake_ep, 61)
tc_a = computeAngularTuningCurves(spkasync, position['ry'], wake_ep, 61)
tcurve_r_s[(m,n)] = tc_s.mean(1)
tcurve_r_a[(m,n)] = tc_a.mean(1)
##########################
# SAMPLING SPIKES FROM AHV TUNING CURVES
##########################
spikes_random = sampleSpikesFromAngularVelocity(ahvcurve, position['ry'], wake_ep)
######################
# RANDOM SYNC/ASYNC TUNING CURVES
######################
ahv_r_s = pd.DataFrame(columns = cc.columns)
ahv_r_a = pd.DataFrame(columns = cc.columns)
for n in spikes_random.keys():
for m in spikes_random.keys():
if m != n:
spksync = {}
spkasync = {}
for i in range(len(spikes_random[n])):
spks = [spikes[int(m.split('_')[1])].restrict(wake_ep).as_units('ms').index.values,
spikes_random[n][i].restrict(wake_ep).as_units('ms').index.values]
tmp1, tmp2 = getSpikesSyncAsync(spks, [m,n])
spksync[i] = tmp1[(m,n)]
spkasync[i] = tmp2[(m,n)]
av_s = computeAngularVelocityTuningCurves(spksync, position['ry'], wake_ep, 61, norm=False)
av_a = computeAngularVelocityTuningCurves(spkasync, position['ry'], wake_ep, 61, norm=False)
ahv_r_s[(m,n)] = av_s.mean(1)
ahv_r_a[(m,n)] = av_a.mean(1)
#############################
# diffs = pd.Series(index=cc.columns, data = [peak.loc[p[0]] - peak.loc[p[1]] for p in cc.columns])
# diffs[diffs>np.pi] -= 2*np.pi
# diffs[diffs<-np.pi] += 2*np.pi
# a = (tcurve_s - tcurve_r_s)/tcurve_r_s
# plot(diffs.values, a.mean(0).values, 'o')
# for p in idx:
# gs = gridspec.GridSpec(2,4)
# figure()
# subplot(gs[0,0])
# plot(cc[p])
# title(p)
# subplot(gs[0,1], projection = 'polar')
# plot(tcurve[list(p)])
# subplot(gs[0,2], projection = 'polar')
# plot(tcurve_a[p], '-', label='async', color = 'red')
# plot(tcurve_r_a[p], '--', label='random async', color = 'red')
# legend()
# subplot(gs[0,3], projection = 'polar')
# plot(tcurve_r_s[p], '--', label='random sync', color='green')
# plot(tcurve_s[p], '-', label='sync', color='green')
# legend()
# subplot(gs[1,0])
# for i,j,c in zip(p,range(2),['red', 'blue']):
# tmp = meanwavef[i].values.reshape(40,16)
# plot(np.arange(0+j*40,40+j*40),tmp+np.arange(16)*200, color = c)
# # subplot(gs[1,1])
# # plot(ahvcurve[list(p)])
# # subplot(gs[1,2])
# # plot(ahv_a[p], '-', label='async', color = 'red')
# # plot(ahv_r_a[p], '--', label='random async', color = 'red')
# # legend()
# # subplot(gs[1,3])
# # plot(ahv_r_s[p], '--', label='random sync', color='green')
# # plot(ahv_s[p], '-', label='sync', color='green')
# # legend()
# show(block=True)
# plot(tcurve[p[1]]/tcurve[p[1]].max())
# plot(tcurve_s[p]/tcurve_s[p].max(), color = 'green', label = 'sync')
# plot(tcurve_a[p]/tcurve_a[p].max(), color = 'red', label = 'async')
# plot(tcurve_r_s[p]/tcurve_r_s[p].max(), '--', color = 'green', label = 'sync')
# plot(tcurve_r_a[p]/tcurve_r_a[p].max(), '--', color = 'red', label = 'async')
######################
# TOSAVE
######################
tcurves.append(tcurve)
ahvcurves.append(ahvcurve)
ccall.append(cc)
# cc_sync.append(cc_s)
# cc_async.append(cc_a)
ahv_sync.append(ahv_s)
ahv_async.append(ahv_a)
tcurves_sync.append(tcurve_s)
tcurves_async.append(tcurve_a)
peaks.append(peak)
tcurves = pd.concat(tcurves, 1)
ahvcurves = pd.concat(ahvcurves, 1)
ccall = pd.concat(ccall, 1)
# cc_sync = pd.concat(cc_sync, 1)
# cc_async = pd.concat(cc_async, 1)
ahv_sync = pd.concat(ahv_sync, 1)
ahv_async = pd.concat(ahv_async, 1)
tcurves_sync = pd.concat(tcurves_sync, 1)
tcurves_async = pd.concat(tcurves_async, 1)
peaks = pd.concat(peaks)
sys.exit()
#################
# SMOOTHING AHV
#################
ahv_sync = ahv_sync.rolling(window=10, win_type='gaussian', center= True, min_periods=1).mean(std = 1)
ahv_async = ahv_async.rolling(window=10, win_type='gaussian', center= True, min_periods=1).mean(std = 1)
ahv2 = pd.concat([ahvcurves[p[1]] for p in ahv_sync.columns], 1)
ahv2.columns = ahv_sync.columns
diffs = pd.Series(index = ahv_sync.columns, data = [peaks.loc[p[0]]-peaks.loc[p[1]] for p in ahv_sync.columns])
diffs[diffs>np.pi] -= 2*np.pi
diffs[diffs<-np.pi] += 2*np.pi
dahv_sync = ahv_sync - ahv2
dahv_async = ahv_async - ahv2
tmp = np.vstack((dahv_sync.values, dahv_async.values))
H = (diffs+np.pi)/(2*np.pi)
HSV = np.vstack((H, np.ones_like(H), np.ones_like(H))).T
RGB = hsv_to_rgb(HSV)
ump = UMAP(n_neighbors = 50, min_dist = 1e-6).fit_transform(tmp.T)
# kmeans = KMeans(n_clusters = 5).fit(ump)
# labels = kmeans.labels_
clustering = SpectralClustering(n_clusters = 3).fit(ump)
labels = clustering.labels_
# scatter(ump[:,0], ump[:,1], c = labels)
figure()
gs = gridspec.GridSpec(4,1+len(np.unique(labels)))
subplot(gs[0,0])
scatter(ump[:,0], ump[:,1], c = RGB)
subplot(gs[1,0])
scatter(ump[:,0], ump[:,1], c = labels)
for i in range(len(np.unique(labels))):
subplot(gs[0,i+1])
plot(dahv_sync.iloc[:,labels==i], color = 'grey', alpha = 0.5, linewidth = 1)
plot(dahv_sync.iloc[:,labels==i].mean(1), color = 'green', linewidth = 3)
subplot(gs[1,i+1])
plot(dahv_async.iloc[:,labels==i], color = 'grey', alpha = 0.5, linewidth = 1)
plot(dahv_async.iloc[:,labels==i].mean(1), color = 'red', linewidth = 3)
subplot(gs[2,i+1])
plot(dahv_sync.iloc[:,labels==i].mean(1), color = 'green', linewidth = 3)
plot(dahv_async.iloc[:,labels==i].mean(1), color = 'red', linewidth = 3)
subplot(gs[3,i+1], projection = 'polar')
hist(diffs.loc[dahv_sync.columns.values[labels==i]], 30)
subplot(gs[2,0])
tmp2 = ahv_sync.loc[-2:2][diffs.abs().sort_values().index].values.T
imshow(scipy.ndimage.gaussian_filter(tmp2, 2), aspect = 'auto')
subplot(gs[3,0])
tmp3 = ahv_async.loc[-2:2][diffs.abs().sort_values().index].values.T
imshow(scipy.ndimage.gaussian_filter(tmp3, 2), aspect = 'auto')
sys.exit()
# ccmod2 = ccmod2[ccmod2.loc[-3:3].mean().sort_values().index]
# p = ccmod2.columns[-4]
p = ccmod2.columns[-9]
# p = ('A5002-200304A_67', 'A5002-200304A_70')
for p in ccmod2.columns[::-1]:
# for p in [('A5002-200304A_67', 'A5002-200304A_81')]:
print(p)
# COMPUTE TUNING CURVES WITHOUT SYNCHRONE SPIKES
spk1 = spks[int(p[0].split("_")[1])].restrict(wake_ep).as_units('ms').index.values
spk2 = spks[int(p[1].split("_")[1])].restrict(wake_ep).as_units('ms').index.values
spk3 = []
spk4 = []
for t in spk2:
if np.sum(np.abs(t-spk1)<3):
spk3.append(t)
else:
spk4.append(t)
cc_less = crossCorr(spk1, np.array(spk4), binsize, nbins)
cc_less = (cc_less - ccslow[p])/cc_less
spk3 = nts.Ts(t = np.array(spk3), time_units = 'ms')
spk4 = nts.Ts(t = np.array(spk4), time_units = 'ms')
dec_spikes = {0:spk3,1:spk4}
tcurves2 = computeAngularTuningCurves(dec_spikes, position['ry'], wake_ep, 61)
ahvcurves2 = computeAngularVelocityTuningCurves(dec_spikes, position['ry'], wake_ep, nb_bins = 30, norm=True)
figure(figsize=(10,6))
subplot(231)
plot(ccmod[p])
plot(ccmod2[p])
plot(cc_less)
subplot(232, projection = 'polar')
plot(tcurves[list(p)])
plot(tcurves2[0], '--', label = 'synchrone')
plot(tcurves2[1], '--', label = 'asynchrone')
legend()
subplot(233)
plot(tcurves[list(p)])
plot(tcurves2[0], '--', label = 'synchrone')
plot(tcurves2[1], '--', label = 'asynchrone')
legend()
subplot(234)
for i,j,c in zip(p,range(2),['red', 'blue']):
tmp = meanwavef[i].values.reshape(40,16)
plot(np.arange(0+j*40,40+j*40),tmp+np.arange(16)*200, color = c)
subplot(235)
plot(ahvcurves[list(p)])
plot(ahvcurves2[0], '--', label = 'synchrone')
plot(ahvcurves2[1], '--', label = 'asynchrone')
subplot(236, projection = 'polar')
tmp = tcurves[list(p)]
plot(tmp/tmp.max())
plot(tcurves2[0]/tcurves2[0].max(), '--', label = 'synchrone')
plot(tcurves2[1]/tcurves2[1].max(), '--', label = 'asynchrone')
show(block=True)
sys.exit()
ccall.append(cc)
# figure()
# gs = gridspec.GridSpec(2,4)
# for i,j,p in zip([0,0,1,1],[2,3,2,3],np.array_split(ccmod2.columns.values, 4)):
# subplot(gs[i,j])
# plot(ccmod2[p])
# ylim(-4,4)
# subplot(gs[:,0:2])
# imshow(ccmod2.values.T)
# figure()
# gs = gridspec.GridSpec(2,4)
# for i,j in enumerate([2,12,14,21]):
# p = ccmod.columns[j]
# subplot(gs[0,i], projection = 'polar')
# tmp = tcurves[list(p)]
# plot(tmp/tmp.max())
# subplot(gs[1,i])
# plot(ccmod[p], label = p)
# plot(ccmod2[p])
# legend()
# SYNAPTIC DETECTION
ccfast = ccall.rolling(window=50, win_type='gaussian', center= True, min_periods=1).mean(std = 2)
ccslow = ccall.rolling(window=50, win_type='gaussian', center= True, min_periods=1).mean(std = 10)
cc = (ccfast - ccslow)/ccfast.std(0)
idx = cc.loc[0:3].mean(0).sort_values().index.values
cc = cc[idx]
pairs = idx[-int(len(idx)*0.01):]
pairs2 = idx[:int(len(idx)*0.01)]
###########################################################################################
# HD NEURONS
###########################################################################################
hd_neurons = mapping[mapping['hd']==1].index.values
hd_pairs = [p for p in pairs if p[0] in hd_neurons and p[1] in hd_neurons]
hd_pairs2 = [p for p in pairs2 if p[0] in hd_neurons and p[1] in hd_neurons]
hdp_neurons = np.unique(np.array(hd_pairs).flatten())
H = mapping.loc[hd_neurons, 'peak'].values/(2*np.pi)
HSV = np.vstack((H, np.ones_like(H), np.ones_like(H))).T
RGB = hsv_to_rgb(HSV)
RGB = pd.DataFrame(index = hd_neurons, data = RGB)
figure()
subplot(221)
scatter(mapping['y'], mapping['x'], color = 'grey', alpha = 0.5, ec = None)
scatter(mapping.loc[hd_neurons,'y'], mapping.loc[hd_neurons,'x'], c = RGB.values)
scatter(mapping.loc[hdp_neurons,'y'], mapping.loc[hdp_neurons,'x'], c = 'white', s = 1)
subplot(222)
plot(np.cos(mapping.loc[hd_neurons,'peak']), np.sin(mapping.loc[hd_neurons,'peak']), '.', color = 'grey')
prop = dict(arrowstyle="-|>,head_width=0.2,head_length=0.4",
shrinkA=0,shrinkB=0, color = 'grey', alpha = 0.4)
for p in hd_pairs:
# plot(np.cos(mapping.loc[list(p),'peak']), np.sin(mapping.loc[list(p),'peak']), '-', alpha = 0.5, color = 'grey')
xystart = np.array([np.cos(mapping.loc[p[1],'peak']),np.sin(mapping.loc[p[1],'peak'])])
xyend = np.array([np.cos(mapping.loc[p[0],'peak']),np.sin(mapping.loc[p[0],'peak'])])
dxy = xyend - xystart
xyend = xystart + 0.9*dxy
annotate('', xyend, xystart, arrowprops=prop)
subplot(224)
hd_pairs_wtrep = [p for p in hd_pairs if (p[1],p[0]) not in list(hd_pairs)]
plot(np.cos(mapping.loc[hd_neurons,'peak']), np.sin(mapping.loc[hd_neurons,'peak']), '.', color = 'grey')
prop = dict(arrowstyle="-|>,head_width=0.2,head_length=0.4",
shrinkA=0,shrinkB=0, color = 'grey', alpha = 0.4)
for p in hd_pairs_wtrep:
# plot(np.cos(mapping.loc[list(p),'peak']), np.sin(mapping.loc[list(p),'peak']), '-', alpha = 0.5, color = 'grey')
xystart = np.array([np.cos(mapping.loc[p[1],'peak']),np.sin(mapping.loc[p[1],'peak'])])
xyend = np.array([np.cos(mapping.loc[p[0],'peak']),np.sin(mapping.loc[p[0],'peak'])])
dxy = xyend - xystart
xyend = xystart + 0.9*dxy
annotate('', xyend, xystart, arrowprops=prop)
subplot(223, projection = 'polar')
alpha = np.array([mapping.loc[list(p),'peak'].diff().values[-1] for p in hd_pairs])
alpha[alpha>np.pi] = 2*np.pi - alpha[alpha>np.pi]
alpha[alpha<-np.pi] = 2*np.pi + alpha[alpha<-np.pi]
bins = np.linspace(-np.pi, np.pi, 24)
x,_ = np.histogram(alpha, bins)
tmp = mapping.loc[hd_neurons, 'peak'].values
tmp = np.vstack(tmp) - tmp
tmp = tmp[np.triu_indices_from(tmp)]
tmp[tmp>np.pi] -= 2*np.pi
tmp[tmp<-np.pi] += 2*np.pi
yc,_ = np.histogram(tmp, bins)
# hist(alpha, bins)
plot(bins[0:-1], x/(yc+1))
|
[
"guillaume.viejo@gmail.com"
] |
guillaume.viejo@gmail.com
|
79d86aa973ab368a540fbf805ddd5bcb0facfb2a
|
a4ded6a73551bcb7cb3d70679f07a8f31a165465
|
/tests/test_samples.py
|
65cee7d0963d823d05c73b218e9ac25549cb99c0
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
GoogleCloudPlatform/functions-framework-python
|
d324c4d99f0bb59bda3b930872f1294417e6cf93
|
45aed538b5e39655318c7841457399fa3376ceaf
|
refs/heads/main
| 2023-09-05T08:05:03.935120
| 2023-08-28T23:28:46
| 2023-08-28T23:28:46
| 231,433,569
| 740
| 136
|
Apache-2.0
| 2023-09-08T00:40:30
| 2020-01-02T18:01:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,207
|
py
|
import pathlib
import sys
import time
import docker
import pytest
import requests
EXAMPLES_DIR = pathlib.Path(__file__).resolve().parent.parent / "examples"
@pytest.mark.skipif(
sys.platform != "linux", reason="docker only works on linux in GH actions"
)
class TestSamples:
def stop_all_containers(self, docker_client):
containers = docker_client.containers.list()
for container in containers:
container.stop()
@pytest.mark.slow_integration_test
def test_cloud_run_http(self):
client = docker.from_env()
self.stop_all_containers(client)
TAG = "cloud_run_http"
client.images.build(path=str(EXAMPLES_DIR / "cloud_run_http"), tag={TAG})
container = client.containers.run(image=TAG, detach=True, ports={8080: 8080})
timeout = 10
success = False
while success == False and timeout > 0:
try:
response = requests.get("http://localhost:8080")
if response.text == "Hello world!":
success = True
except:
pass
time.sleep(1)
timeout -= 1
container.stop()
assert success
|
[
"noreply@github.com"
] |
GoogleCloudPlatform.noreply@github.com
|
2e5a41831be5bcd476269e148502dfbfb23eafe9
|
30b97efb2f36f81aa684d16d19e0e2db17f2967d
|
/Prgrammers/lv1/이상한 문자 만들기.py
|
14cb256604ba05710c49093c0f080f76a9906a2f
|
[] |
no_license
|
jmseb3/bakjoon
|
0a784a74c6476ef51864e2ada9d2551c7c7979eb
|
a38db54e851372059b0e45add92e43e556835e62
|
refs/heads/main
| 2023-08-25T08:43:04.579785
| 2021-10-01T08:40:37
| 2021-10-01T08:40:37
| 362,287,450
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 243
|
py
|
def solution(s):
answer = ''
words = s.split(' ')
for word in words:
for idx in range(len(word)):
answer += word[idx].upper() if idx % 2 == 0 else word[idx].lower()
answer += ' '
return answer[:-1]
|
[
"jmseb3@naver.com"
] |
jmseb3@naver.com
|
929e819d58911ac8f7ad7ac3441073ed4416aa42
|
3a936488af265597778d47f78e174fb863d21a03
|
/Retraining_bot.py
|
52c7218172accbf686183ccbff9983a8ca4a0257
|
[] |
no_license
|
santoshikalaskar/chatbot_report_generation_google_App_script
|
155380f56ec93b48d3c4571fb22a1f8373e0a09a
|
35ebeac0188bfb2b5b9af42efe488c8e660bd6db
|
refs/heads/master
| 2022-12-15T21:24:21.031256
| 2020-09-20T08:37:22
| 2020-09-20T08:37:22
| 283,967,214
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,444
|
py
|
from datetime import date
from datetime import timedelta
import pandas as pd
from google_sheet_handler import Google_sheet_handler
import logger_hander
class ReTrain_bot:
# initialize RASA API
def __init__(self):
pass
def fetch_data(self, google_sheet, yesterday):
"""
This function will Fetch data of specific date from google sheet & return converted list.
:param google_sheet: Original google_sheet, yesterday: date
:return: data columns in list
"""
list_of_records = google_sheet.get_all_records()
Question_list = []
Email_id_list = []
Bot1_intent_list = []
bot2_intent_list = []
Actual_intent_must_be = []
Bot1_Result_List = []
Bot2_Result_List = []
for records in list_of_records:
if ( records.get('Date') == yesterday and records.get('Question_is_proper_or_not') == "Right" and records.get('Bot1_Result') == "Wrong" ):
question = records.get('Question')
email_id = records.get('Email')
Bot1_intent = records.get('BOT1_Intent')
Bot2_intent = records.get('BOT2_Intent')
Actual_intent = records.get('Actual_intent_must_be')
Bot1_Result = records.get('Bot1_Result')
Bot2_Result = records.get('Bot2_Result')
Question_list.append(question)
Email_id_list.append(email_id)
Bot1_intent_list.append(Bot1_intent)
bot2_intent_list.append(Bot2_intent)
Actual_intent_must_be.append(Actual_intent)
Bot1_Result_List.append(Bot1_Result)
Bot2_Result_List.append(Bot2_Result)
logger.info("Data fetched from existing sheet Successfully..!")
return Email_id_list, Question_list, Bot1_intent_list, bot2_intent_list, Actual_intent_must_be, Bot1_Result_List, Bot2_Result_List
def find_yesterday_date(self):
"""
This function will find yesterday date
:param null
:return: yesterday date in specific format
"""
today = date.today()
yesterday = today - timedelta(days=1)
yesterday = yesterday.strftime('%b %d, %Y')
return yesterday
def check_cell_name_valid_or_not(self, sheet, List_cell_name):
return Google_sheet_handler.find_cell(self, sheet, List_cell_name)
if __name__ == "__main__":
# create instances
retrain_obj = ReTrain_bot()
sheet_handler = Google_sheet_handler()
logger = logger_hander.set_logger()
# get google sheet
sheet = sheet_handler.call_sheet("Chatbot_Daily_Report","BL_BOT_Compare")
if sheet != 'WorksheetNotFound':
yesterday = retrain_obj.find_yesterday_date()
yesterday = "Sep 13, 2020"
print(yesterday)
List_of_cell_name = ['Date','Email','Question','BOT1_Intent','BOT2_Intent','Question_is_proper_or_not', 'Actual_intent_must_be', 'Bot1_Result', 'Bot2_Result']
# check cell name is valid or not
flag = retrain_obj.check_cell_name_valid_or_not(sheet,List_of_cell_name)
if flag:
Email_id_list, Question_list, Bot1_intent_list, bot2_intent_list, Actual_intent_must_be, Bot1_Result_List, Bot2_Result_List = retrain_obj.fetch_data(sheet,yesterday)
if len(Question_list) == 0:
logger.info("No interaction happened in yesterday.")
else:
dict = {'Date': yesterday, 'Email': Email_id_list, 'Questions': Question_list, 'bot1_intent': Bot1_intent_list,
'bot2_intent': bot2_intent_list, 'Actual_intent_must_be': Actual_intent_must_be, 'Bot1_Result_List': Bot1_Result_List, 'Bot2_Result_List': Bot2_Result_List }
dataframe = pd.DataFrame(dict)
print(dataframe)
df_list_value = dataframe.values.tolist()
# get google sheet to store result
created_sheet = sheet_handler.call_sheet("Chatbot_Daily_Report", "Sheet12")
if created_sheet != 'WorksheetNotFound':
output = sheet_handler.save_output_into_sheet(created_sheet, df_list_value)
if output == True:
logger.info(" Sheet Updated Successfully...!!!")
else:
logger.error(" Something went wrong while Updating sheet ")
|
[
"kalaskars1996@gmail.com"
] |
kalaskars1996@gmail.com
|
10214df3984e72f40b025793a185ab5814abdbd0
|
ebae2c3875553d04fd9acb84b904a2398ce3dbf6
|
/manage.py
|
d6be32874916eb98549eb60d36f7955c965d79af
|
[] |
no_license
|
mwesterhof/coolsearch
|
7e871dabd9cd8943eb7657011f16c10d566a927e
|
fad5a29148fa2e61d655e7081d5c9756c5a79fa9
|
refs/heads/master
| 2021-10-21T18:29:44.333046
| 2021-10-18T15:53:12
| 2021-10-18T15:53:12
| 201,944,243
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 630
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'coolsearch.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"m.westerhof@lukkien.com"
] |
m.westerhof@lukkien.com
|
b705e1a6f3000beb26188c35cc496059ffe9eb63
|
24118fcc6a867028e175df143a66b0f61b85d873
|
/leetcode/203-Remove_Linked_List_Elements.py
|
6fed902bb9b7460905c54ca49271f3955605b328
|
[] |
no_license
|
JFluo2011/leetcode
|
eea93c5bf66cadd5cd91970ad6f6fdad489275bf
|
7bdb0ddd042fab4c7f615cd8630de78275c175d9
|
refs/heads/master
| 2021-01-12T17:22:23.360035
| 2019-01-31T15:01:21
| 2019-01-31T15:01:21
| 71,550,793
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 819
|
py
|
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def removeElements(self, head, val):
"""
:type head: ListNode
:type val: int
:rtype: ListNode
"""
prev_head = ListNode(0)
prev_head.next = head
current = head
prev = prev_head
while current:
while current and current.val == val:
current = current.next
prev.next = current
if current:
prev = current
current = current.next
return prev_head.next
def main():
"""
[]
1
[1]
1
[1, 2, 6, 3, 4, 5, 6]
6
"""
pass
if __name__ == '__main__':
main()
|
[
"luojianfeng2011@163.com"
] |
luojianfeng2011@163.com
|
cce75ac8e26a424ae764a1622580dbadc6a5b3f4
|
2581fbdc72887143376a8f9d8f0da0f1508b9cdf
|
/Flask/09-REST-APIs-with-Flask/03-REST-API-Database/user.py
|
d10ad52f8dc11b94eb9970639b8ba08649f51e0c
|
[
"Apache-2.0"
] |
permissive
|
Sandy1811/python-for-all
|
6e8a554a336b6244af127c7bcd51d36018b047d9
|
fdb6878d93502773ba8da809c2de1b33c96fb9a0
|
refs/heads/master
| 2022-05-16T02:36:47.676560
| 2019-08-16T08:35:42
| 2019-08-16T08:35:42
| 198,479,841
| 1
| 0
|
Apache-2.0
| 2022-03-11T23:56:32
| 2019-07-23T17:39:38
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 209
|
py
|
class User():
def __init__(self,id,username,password):
self.id = id
self.username = username
self.password = password
def __str__(self):
return f"User ID: {self.id}"
|
[
"sndp1811@gmail.com"
] |
sndp1811@gmail.com
|
1d2caafbe0e44e853ea684766920eaa9a2692830
|
6e3cf63bef81b0a80ea229c55ac196de3730ac3f
|
/08 Recursion and Dynamic Programming/Recursion and DP book problems/03.py
|
3a691ff7203225fed8356a62b755e9396e76c7fd
|
[] |
no_license
|
AsadullahFarooqi/CTCI
|
8e32bfa9d5f990e435c38e49f22fb5a98e3352fb
|
7ea8c62b65607d8888529cc1de84a2b4a7249d98
|
refs/heads/master
| 2020-05-30T17:18:45.938270
| 2019-11-06T07:31:01
| 2019-11-06T07:31:01
| 189,868,300
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 110
|
py
|
# Recently I helped with Algorithms/DS a guy from Boston and got an internship in Twitter.
def shortest_path()
|
[
"asadullah.itcgcs@gmail.com"
] |
asadullah.itcgcs@gmail.com
|
b9f6799d725b3dee5d5ae670a3d4577b1c2c4140
|
89d29bf07f7203107f8b99fa1effd384c59b23cf
|
/Python/Python Learning/Built-in Constants.py
|
985807c2b21440d4afb85a0fa30321ffb15d7744
|
[] |
no_license
|
OryxLib/Oxyx.Portal
|
7ebd894d5e3790d18623aa1fb4d14ecb1aea1db6
|
b7eed6165f685d939aab881cdef4fc2ff77a03d4
|
refs/heads/master
| 2022-07-17T21:04:50.360342
| 2020-04-15T15:47:48
| 2020-04-15T15:47:48
| 255,511,971
| 0
| 0
| null | 2022-06-22T23:05:39
| 2020-04-14T04:43:40
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 86
|
py
|
#内置常量
True
False
None
NotImplemented
Ellipsis
#The same as ... ,
__debug__
#
|
[
"407815932@qq.com"
] |
407815932@qq.com
|
664752fa8f26a7b94e39a5405b9cf8f6576ea0fa
|
a7f855efff14e0b15cffb3f035d8dc9f7f102afe
|
/mfb/binWin/2.69/python/lib/numpy/lib/function_base.py
|
f1cfc55b9314348fa65a1a6f5f0c60af195fb122
|
[] |
no_license
|
BlenderCN-Org/FlipbookApp
|
76fcd92644c4e18dd90885eeb49e5aecae28f6f0
|
0df2acebf76b40105812d2e3af8f0ef4784ab74c
|
refs/heads/master
| 2020-05-27T14:33:25.330291
| 2014-07-10T17:47:29
| 2014-07-10T17:47:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 119,019
|
py
|
__docformat__ = "restructuredtext en"
__all__ = ['select', 'piecewise', 'trim_zeros', 'copy', 'iterable',
'percentile', 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex',
'disp', 'extract', 'place', 'nansum', 'nanmax', 'nanargmax',
'nanargmin', 'nanmin', 'vectorize', 'asarray_chkfinite', 'average',
'histogram', 'histogramdd', 'bincount', 'digitize', 'cov',
'corrcoef', 'msort', 'median', 'sinc', 'hamming', 'hanning',
'bartlett', 'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc',
'add_docstring', 'meshgrid', 'delete', 'insert', 'append', 'interp',
'add_newdoc_ufunc']
import warnings
import types
import sys
import numpy.core.numeric as _nx
from numpy.core import linspace
from numpy.core.numeric import ones, zeros, arange, concatenate, array, \
asarray, asanyarray, empty, empty_like, ndarray, around
from numpy.core.numeric import ScalarType, dot, where, newaxis, intp, \
integer, isscalar
from numpy.core.umath import pi, multiply, add, arctan2, \
frompyfunc, isnan, cos, less_equal, sqrt, sin, mod, exp, log10
from numpy.core.fromnumeric import ravel, nonzero, choose, sort, mean
from numpy.core.numerictypes import typecodes, number
from numpy.core import atleast_1d, atleast_2d
from numpy.lib.twodim_base import diag
from ._compiled_base import _insert, add_docstring
from ._compiled_base import digitize, bincount, interp as compiled_interp
from .arraysetops import setdiff1d
from .utils import deprecate
from ._compiled_base import add_newdoc_ufunc
import numpy as np
import collections
def iterable(y):
"""
Check whether or not an object can be iterated over.
Parameters
----------
y : object
Input object.
Returns
-------
b : {0, 1}
Return 1 if the object has an iterator method or is a sequence,
and 0 otherwise.
Examples
--------
>>> np.iterable([1, 2, 3])
1
>>> np.iterable(2)
0
"""
try: iter(y)
except: return 0
return 1
def histogram(a, bins=10, range=None, normed=False, weights=None, density=None):
"""
Compute the histogram of a set of data.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a sequence,
it defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored.
normed : bool, optional
This keyword is deprecated in Numpy 1.6 due to confusing/buggy
behavior. It will be removed in Numpy 2.0. Use the density keyword
instead.
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that this latter behavior is
known to be buggy with unequal bin widths; use `density` instead.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in `a`
only contributes its associated weight towards the bin count
(instead of 1). If `normed` is True, the weights are normalized,
so that the integral of the density over the range remains 1
density : bool, optional
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Overrides the `normed` keyword if given.
Returns
-------
hist : array
The values of the histogram. See `normed` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted, digitize
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the
second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes*
4.
Examples
--------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
(array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, density=True)
>>> hist
array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist*np.diff(bin_edges))
1.0
"""
a = asarray(a)
if weights is not None:
weights = asarray(weights)
if np.any(weights.shape != a.shape):
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
if (range is not None):
mn, mx = range
if (mn > mx):
raise AttributeError(
'max must be larger than min in range parameter.')
if not iterable(bins):
if np.isscalar(bins) and bins < 1:
raise ValueError("`bins` should be a positive integer.")
if range is None:
if a.size == 0:
# handle empty arrays. Can't determine range, so use 0-1.
range = (0, 1)
else:
range = (a.min(), a.max())
mn, mx = [mi+0.0 for mi in range]
if mn == mx:
mn -= 0.5
mx += 0.5
bins = linspace(mn, mx, bins+1, endpoint=True)
else:
bins = asarray(bins)
if (np.diff(bins) < 0).any():
raise AttributeError(
'bins must increase monotonically.')
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = int
else:
ntype = weights.dtype
n = np.zeros(bins.shape, ntype)
block = 65536
if weights is None:
for i in arange(0, len(a), block):
sa = sort(a[i:i+block])
n += np.r_[sa.searchsorted(bins[:-1], 'left'), \
sa.searchsorted(bins[-1], 'right')]
else:
zero = array(0, dtype=ntype)
for i in arange(0, len(a), block):
tmp_a = a[i:i+block]
tmp_w = weights[i:i+block]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate(([zero,], sw.cumsum()))
bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'), \
sa.searchsorted(bins[-1], 'right')]
n += cw[bin_index]
n = np.diff(n)
if density is not None:
if density:
db = array(np.diff(bins), float)
return n/db/n.sum(), bins
else:
return n, bins
else:
# deprecated, buggy behavior. Remove for Numpy 2.0
if normed:
db = array(np.diff(bins), float)
return n/(n*db).sum(), bins
else:
return n, bins
def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
"""
Compute the multidimensional histogram of some data.
Parameters
----------
sample : array_like
The data to be histogrammed. It must be an (N,D) array or data
that can be converted to such. The rows of the resulting array
are the coordinates of points in a D dimensional polytope.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitely in `bins`. Defaults to the minimum and maximum
values along each dimension.
normed : bool, optional
If False, returns the number of samples in each bin. If True, returns
the bin density, ie, the bin count divided by the bin hypervolume.
weights : array_like (N,), optional
An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
Weights are normalized to 1 if normed is True. If normed is False, the
values of the returned histogram are equal to the sum of the weights
belonging to the samples falling into each bin.
Returns
-------
H : ndarray
The multidimensional histogram of sample x. See normed and weights for
the different possible semantics.
edges : list
A list of D arrays describing the bin edges for each dimension.
See Also
--------
histogram: 1-D histogram
histogram2d: 2-D histogram
Examples
--------
>>> r = np.random.randn(100,3)
>>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
>>> H.shape, edges[0].size, edges[1].size, edges[2].size
((5, 8, 4), 6, 9, 5)
"""
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = atleast_2d(sample).T
N, D = sample.shape
nbin = empty(D, int)
edges = D*[None]
dedges = D*[None]
if weights is not None:
weights = asarray(weights)
try:
M = len(bins)
if M != D:
raise AttributeError(
'The dimension of bins must be equal'\
' to the dimension of the sample x.')
except TypeError:
# bins is an integer
bins = D*[bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
# Handle empty input. Range can't be determined in that case, use 0-1.
if N == 0:
smin = zeros(D)
smax = ones(D)
else:
smin = atleast_1d(array(sample.min(0), float))
smax = atleast_1d(array(sample.max(0), float))
else:
smin = zeros(D)
smax = zeros(D)
for i in arange(D):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in arange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# Create edge arrays
for i in arange(D):
if isscalar(bins[i]):
if bins[i] < 1:
raise ValueError("Element at index %s in `bins` should be "
"a positive integer." % i)
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = linspace(smin[i], smax[i], nbin[i]-1)
else:
edges[i] = asarray(bins[i], float)
nbin[i] = len(edges[i])+1 # +1 for outlier bins
dedges[i] = diff(edges[i])
if np.any(np.asarray(dedges[i]) <= 0):
raise ValueError("""
Found bin edge of size <= 0. Did you specify `bins` with
non-monotonic sequence?""")
nbin = asarray(nbin)
# Handle empty input.
if N == 0:
return np.zeros(nbin-2), edges
# Compute the bin number each sample falls into.
Ncount = {}
for i in arange(D):
Ncount[i] = digitize(sample[:,i], edges[i])
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right
# edge to be counted in the last bin, and not as an outlier.
for i in arange(D):
# Rounding precision
mindiff = dedges[i].min()
if not np.isinf(mindiff):
decimal = int(-log10(mindiff)) + 6
# Find which points are on the rightmost edge.
on_edge = where(around(sample[:,i], decimal) == around(edges[i][-1],
decimal))[0]
# Shift these points one bin to the left.
Ncount[i][on_edge] -= 1
# Flattened histogram matrix (1D)
# Reshape is used so that overlarge arrays
# will raise an error.
hist = zeros(nbin, float).reshape(-1)
# Compute the sample indices in the flattened histogram matrix.
ni = nbin.argsort()
xy = zeros(N, int)
for i in arange(0, D-1):
xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod()
xy += Ncount[ni[-1]]
# Compute the number of repetitions in xy and assign it to the
# flattened histmat.
if len(xy) == 0:
return zeros(nbin-2, int), edges
flatcount = bincount(xy, weights)
a = arange(len(flatcount))
hist[a] = flatcount
# Shape into a proper matrix
hist = hist.reshape(sort(nbin))
for i in arange(nbin.size):
j = ni.argsort()[i]
hist = hist.swapaxes(i,j)
ni[i],ni[j] = ni[j],ni[i]
# Remove outliers (indices 0 and -1 for each dimension).
core = D*[slice(1,-1)]
hist = hist[core]
# Normalize if normed is True
if normed:
s = hist.sum()
for i in arange(D):
shape = ones(D, int)
shape[i] = nbin[i] - 2
hist = hist / dedges[i].reshape(shape)
hist /= s
if (hist.shape != nbin - 2).any():
raise RuntimeError(
"Internal Shape Error")
return hist, edges
def average(a, axis=None, weights=None, returned=False):
"""
Compute the weighted average along the specified axis.
Parameters
----------
a : array_like
Array containing data to be averaged. If `a` is not an array, a
conversion is attempted.
axis : int, optional
Axis along which to average `a`. If `None`, averaging is done over
the flattened array.
weights : array_like, optional
An array of weights associated with the values in `a`. Each value in
`a` contributes to the average according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If `weights=None`, then all data in `a` are assumed to have a
weight equal to one.
returned : bool, optional
Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`)
is returned, otherwise only the average is returned.
If `weights=None`, `sum_of_weights` is equivalent to the number of
elements over which the average is taken.
Returns
-------
average, [sum_of_weights] : {array_type, double}
Return the average along the specified axis. When returned is `True`,
return a tuple with the average as the first element and the sum
of the weights as the second element. The return type is `Float`
if `a` is of integer type, otherwise it is of the same type as `a`.
`sum_of_weights` is of the same type as `average`.
Raises
------
ZeroDivisionError
When all weights along axis are zero. See `numpy.ma.average` for a
version robust to this type of error.
TypeError
When the length of 1D `weights` is not the same as the shape of `a`
along axis.
See Also
--------
mean
ma.average : average for masked arrays -- useful if your data contains
"missing" values
Examples
--------
>>> data = range(1,5)
>>> data
[1, 2, 3, 4]
>>> np.average(data)
2.5
>>> np.average(range(1,11), weights=range(10,0,-1))
4.0
>>> data = np.arange(6).reshape((3,2))
>>> data
array([[0, 1],
[2, 3],
[4, 5]])
>>> np.average(data, axis=1, weights=[1./4, 3./4])
array([ 0.75, 2.75, 4.75])
>>> np.average(data, weights=[1./4, 3./4])
Traceback (most recent call last):
...
TypeError: Axis must be specified when shapes of a and weights differ.
"""
if not isinstance(a, np.matrix) :
a = np.asarray(a)
if weights is None :
avg = a.mean(axis)
scl = avg.dtype.type(a.size/avg.size)
else :
a = a + 0.0
wgt = np.array(weights, dtype=a.dtype, copy=0)
# Sanity checks
if a.shape != wgt.shape :
if axis is None :
raise TypeError(
"Axis must be specified when shapes of a "\
"and weights differ.")
if wgt.ndim != 1 :
raise TypeError(
"1D weights expected when shapes of a and "\
"weights differ.")
if wgt.shape[0] != a.shape[axis] :
raise ValueError(
"Length of weights not compatible with "\
"specified axis.")
# setup wgt to broadcast along axis
wgt = np.array(wgt, copy=0, ndmin=a.ndim).swapaxes(-1, axis)
scl = wgt.sum(axis=axis)
if (scl == 0.0).any():
raise ZeroDivisionError(
"Weights sum to zero, can't be normalized")
avg = np.multiply(a, wgt).sum(axis)/scl
if returned:
scl = np.multiply(avg, 0) + scl
return avg, scl
else:
return avg
def asarray_chkfinite(a, dtype=None, order=None):
"""
Convert the input to an array, checking for NaNs or Infs.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays. Success requires no NaNs or Infs.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
Raises
------
ValueError
Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity).
See Also
--------
asarray : Create and array.
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array. If all elements are finite
``asarray_chkfinite`` is identical to ``asarray``.
>>> a = [1, 2]
>>> np.asarray_chkfinite(a, dtype=float)
array([1., 2.])
Raises ValueError if array_like contains Nans or Infs.
>>> a = [1, 2, np.inf]
>>> try:
... np.asarray_chkfinite(a)
... except ValueError:
... print 'ValueError'
...
ValueError
"""
a = asarray(a, dtype=dtype, order=order)
if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all():
raise ValueError(
"array must not contain infs or NaNs")
return a
def piecewise(x, condlist, funclist, *args, **kw):
"""
Evaluate a piecewise-defined function.
Given a set of conditions and corresponding functions, evaluate each
function on the input data wherever its condition is true.
Parameters
----------
x : ndarray
The input domain.
condlist : list of bool arrays
Each boolean array corresponds to a function in `funclist`. Wherever
`condlist[i]` is True, `funclist[i](x)` is used as the output value.
Each boolean array in `condlist` selects a piece of `x`,
and should therefore be of the same shape as `x`.
The length of `condlist` must correspond to that of `funclist`.
If one extra function is given, i.e. if
``len(funclist) - len(condlist) == 1``, then that extra function
is the default value, used wherever all conditions are false.
funclist : list of callables, f(x,*args,**kw), or scalars
Each function is evaluated over `x` wherever its corresponding
condition is True. It should take an array as input and give an array
or a scalar value as output. If, instead of a callable,
a scalar is provided then a constant function (``lambda x: scalar``) is
assumed.
args : tuple, optional
Any further arguments given to `piecewise` are passed to the functions
upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then
each function is called as ``f(x, 1, 'a')``.
kw : dict, optional
Keyword arguments used in calling `piecewise` are passed to the
functions upon execution, i.e., if called
``piecewise(..., ..., lambda=1)``, then each function is called as
``f(x, lambda=1)``.
Returns
-------
out : ndarray
The output is the same shape and type as x and is found by
calling the functions in `funclist` on the appropriate portions of `x`,
as defined by the boolean arrays in `condlist`. Portions not covered
by any condition have undefined values.
See Also
--------
choose, select, where
Notes
-----
This is similar to choose or select, except that functions are
evaluated on elements of `x` that satisfy the corresponding condition from
`condlist`.
The result is::
|--
|funclist[0](x[condlist[0]])
out = |funclist[1](x[condlist[1]])
|...
|funclist[n2](x[condlist[n2]])
|--
Examples
--------
Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``.
>>> x = np.arange(6) - 2.5
>>> np.piecewise(x, [x < 0, x >= 0], [-1, 1])
array([-1., -1., -1., 1., 1., 1.])
Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for
``x >= 0``.
>>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x])
array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5])
"""
x = asanyarray(x)
n2 = len(funclist)
if isscalar(condlist) or \
not (isinstance(condlist[0], list) or
isinstance(condlist[0], ndarray)):
condlist = [condlist]
condlist = [asarray(c, dtype=bool) for c in condlist]
n = len(condlist)
if n == n2-1: # compute the "otherwise" condition.
totlist = condlist[0]
for k in range(1, n):
totlist |= condlist[k]
condlist.append(~totlist)
n += 1
if (n != n2):
raise ValueError(
"function list and condition list must be the same")
zerod = False
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
if x.ndim == 0:
x = x[None]
zerod = True
newcondlist = []
for k in range(n):
if condlist[k].ndim == 0:
condition = condlist[k][None]
else:
condition = condlist[k]
newcondlist.append(condition)
condlist = newcondlist
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not isinstance(item, collections.Callable):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
def select(condlist, choicelist, default=0):
"""
Return an array drawn from elements in choicelist, depending on conditions.
Parameters
----------
condlist : list of bool ndarrays
The list of conditions which determine from which array in `choicelist`
the output elements are taken. When multiple conditions are satisfied,
the first one encountered in `condlist` is used.
choicelist : list of ndarrays
The list of arrays from which the output elements are taken. It has
to be of the same length as `condlist`.
default : scalar, optional
The element inserted in `output` when all conditions evaluate to False.
Returns
-------
output : ndarray
The output at position m is the m-th element of the array in
`choicelist` where the m-th element of the corresponding array in
`condlist` is True.
See Also
--------
where : Return elements from one of two arrays depending on condition.
take, choose, compress, diag, diagonal
Examples
--------
>>> x = np.arange(10)
>>> condlist = [x<3, x>5]
>>> choicelist = [x, x**2]
>>> np.select(condlist, choicelist)
array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81])
"""
n = len(condlist)
n2 = len(choicelist)
if n2 != n:
raise ValueError(
"list of cases must be same length as list of conditions")
choicelist = [default] + choicelist
S = 0
pfac = 1
for k in range(1, n+1):
S += k * pfac * asarray(condlist[k-1])
if k < n:
pfac *= (1-asarray(condlist[k-1]))
# handle special case of a 1-element condition but
# a multi-element choice
if type(S) in ScalarType or max(asarray(S).shape)==1:
pfac = asarray(1)
for k in range(n2+1):
pfac = pfac + asarray(choicelist[k])
if type(S) in ScalarType:
S = S*ones(asarray(pfac).shape, type(S))
else:
S = S*ones(asarray(pfac).shape, S.dtype)
return choose(S, tuple(choicelist))
def copy(a, order='K'):
"""
Return an array copy of the given object.
Parameters
----------
a : array_like
Input data.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :meth:ndarray.copy are very
similar, but have different default values for their order=
arguments.)
Returns
-------
arr : ndarray
Array interpretation of `a`.
Notes
-----
This is equivalent to
>>> np.array(a, copy=True) #doctest: +SKIP
Examples
--------
Create an array x, with a reference y and a copy z:
>>> x = np.array([1, 2, 3])
>>> y = x
>>> z = np.copy(x)
Note that, when we modify x, y changes, but not z:
>>> x[0] = 10
>>> x[0] == y[0]
True
>>> x[0] == z[0]
False
"""
return array(a, order=order, copy=True)
# Basic operations
def gradient(f, *varargs):
"""
Return the gradient of an N-dimensional array.
The gradient is computed using central differences in the interior
and first differences at the boundaries. The returned gradient hence has
the same shape as the input array.
Parameters
----------
f : array_like
An N-dimensional array containing samples of a scalar function.
`*varargs` : scalars
0, 1, or N scalars specifying the sample distances in each direction,
that is: `dx`, `dy`, `dz`, ... The default distance is 1.
Returns
-------
gradient : ndarray
N arrays of the same shape as `f` giving the derivative of `f` with
respect to each dimension.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float)
>>> np.gradient(x)
array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
>>> np.gradient(x, 2)
array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
[array([[ 2., 2., -1.],
[ 2., 2., -1.]]),
array([[ 1. , 2.5, 4. ],
[ 1. , 1. , 1. ]])]
"""
f = np.asanyarray(f)
N = len(f.shape) # number of dimensions
n = len(varargs)
if n == 0:
dx = [1.0]*N
elif n == 1:
dx = [varargs[0]]*N
elif n == N:
dx = list(varargs)
else:
raise SyntaxError(
"invalid number of arguments")
# use central differences on interior and first differences on endpoints
outvals = []
# create slice objects --- initially all are [:, :, ..., :]
slice1 = [slice(None)]*N
slice2 = [slice(None)]*N
slice3 = [slice(None)]*N
otype = f.dtype.char
if otype not in ['f', 'd', 'F', 'D', 'm', 'M']:
otype = 'd'
# Difference of datetime64 elements results in timedelta64
if otype == 'M' :
# Need to use the full dtype name because it contains unit information
otype = f.dtype.name.replace('datetime', 'timedelta')
elif otype == 'm' :
# Needs to keep the specific units, can't be a general unit
otype = f.dtype
for axis in range(N):
# select out appropriate parts for this dimension
out = np.empty_like(f, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (f[2:] - f[:-2])/2.0
out[slice1] = (f[slice2] - f[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 1
slice3[axis] = 0
# 1D equivalent -- out[0] = (f[1] - f[0])
out[slice1] = (f[slice2] - f[slice3])
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
# 1D equivalent -- out[-1] = (f[-1] - f[-2])
out[slice1] = (f[slice2] - f[slice3])
# divide by step size
outvals.append(out / dx[axis])
# reset the slice object in this dimension to ":"
slice1[axis] = slice(None)
slice2[axis] = slice(None)
slice3[axis] = slice(None)
if N == 1:
return outvals[0]
else:
return outvals
def diff(a, n=1, axis=-1):
"""
Calculate the n-th order discrete difference along given axis.
The first order difference is given by ``out[n] = a[n+1] - a[n]`` along
the given axis, higher order differences are calculated by using `diff`
recursively.
Parameters
----------
a : array_like
Input array
n : int, optional
The number of times values are differenced.
axis : int, optional
The axis along which the difference is taken, default is the last axis.
Returns
-------
diff : ndarray
The `n` order differences. The shape of the output is the same as `a`
except along `axis` where the dimension is smaller by `n`.
See Also
--------
gradient, ediff1d
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.diff(x)
array([ 1, 2, 3, -7])
>>> np.diff(x, n=2)
array([ 1, 1, -10])
>>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
>>> np.diff(x)
array([[2, 3, 4],
[5, 1, 2]])
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
"""
if n == 0:
return a
if n < 0:
raise ValueError(
"order must be non-negative but got " + repr(n))
a = asanyarray(a)
nd = len(a.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
if n > 1:
return diff(a[slice1]-a[slice2], n-1, axis=axis)
else:
return a[slice1]-a[slice2]
def interp(x, xp, fp, left=None, right=None):
"""
One-dimensional linear interpolation.
Returns the one-dimensional piecewise linear interpolant to a function
with given values at discrete data-points.
Parameters
----------
x : array_like
The x-coordinates of the interpolated values.
xp : 1-D sequence of floats
The x-coordinates of the data points, must be increasing.
fp : 1-D sequence of floats
The y-coordinates of the data points, same length as `xp`.
left : float, optional
Value to return for `x < xp[0]`, default is `fp[0]`.
right : float, optional
Value to return for `x > xp[-1]`, defaults is `fp[-1]`.
Returns
-------
y : {float, ndarray}
The interpolated values, same shape as `x`.
Raises
------
ValueError
If `xp` and `fp` have different length
Notes
-----
Does not check that the x-coordinate sequence `xp` is increasing.
If `xp` is not increasing, the results are nonsense.
A simple check for increasingness is::
np.all(np.diff(xp) > 0)
Examples
--------
>>> xp = [1, 2, 3]
>>> fp = [3, 2, 0]
>>> np.interp(2.5, xp, fp)
1.0
>>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)
array([ 3. , 3. , 2.5 , 0.56, 0. ])
>>> UNDEF = -99.0
>>> np.interp(3.14, xp, fp, right=UNDEF)
-99.0
Plot an interpolant to the sine function:
>>> x = np.linspace(0, 2*np.pi, 10)
>>> y = np.sin(x)
>>> xvals = np.linspace(0, 2*np.pi, 50)
>>> yinterp = np.interp(xvals, x, y)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(xvals, yinterp, '-x')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
"""
if isinstance(x, (float, int, number)):
return compiled_interp([x], xp, fp, left, right).item()
elif isinstance(x, np.ndarray) and x.ndim == 0:
return compiled_interp([x], xp, fp, left, right).item()
else:
return compiled_interp(x, xp, fp, left, right)
def angle(z, deg=0):
"""
Return the angle of the complex argument.
Parameters
----------
z : array_like
A complex number or sequence of complex numbers.
deg : bool, optional
Return angle in degrees if True, radians if False (default).
Returns
-------
angle : {ndarray, scalar}
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64.
See Also
--------
arctan2
absolute
Examples
--------
>>> np.angle([1.0, 1.0j, 1+1j]) # in radians
array([ 0. , 1.57079633, 0.78539816])
>>> np.angle(1+1j, deg=True) # in degrees
45.0
"""
if deg:
fact = 180/pi
else:
fact = 1.0
z = asarray(z)
if (issubclass(z.dtype.type, _nx.complexfloating)):
zimag = z.imag
zreal = z.real
else:
zimag = 0
zreal = z
return arctan2(zimag, zreal) * fact
def unwrap(p, discont=pi, axis=-1):
"""
Unwrap by changing deltas between values to 2*pi complement.
Unwrap radian phase `p` by changing absolute jumps greater than
`discont` to their 2*pi complement along the given axis.
Parameters
----------
p : array_like
Input array.
discont : float, optional
Maximum discontinuity between values, default is ``pi``.
axis : int, optional
Axis along which unwrap will operate, default is the last axis.
Returns
-------
out : ndarray
Output array.
See Also
--------
rad2deg, deg2rad
Notes
-----
If the discontinuity in `p` is smaller than ``pi``, but larger than
`discont`, no unwrapping is done because taking the 2*pi complement
would only make the discontinuity larger.
Examples
--------
>>> phase = np.linspace(0, np.pi, num=5)
>>> phase[3:] += np.pi
>>> phase
array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531])
>>> np.unwrap(phase)
array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ])
"""
p = asarray(p)
nd = len(p.shape)
dd = diff(p, axis=axis)
slice1 = [slice(None, None)]*nd # full slices
slice1[axis] = slice(1, None)
ddmod = mod(dd+pi, 2*pi)-pi
_nx.copyto(ddmod, pi, where=(ddmod==-pi) & (dd > 0))
ph_correct = ddmod - dd;
_nx.copyto(ph_correct, 0, where=abs(dd)<discont)
up = array(p, copy=True, dtype='d')
up[slice1] = p[slice1] + ph_correct.cumsum(axis)
return up
def sort_complex(a):
"""
Sort a complex array using the real part first, then the imaginary part.
Parameters
----------
a : array_like
Input array
Returns
-------
out : complex ndarray
Always returns a sorted complex array.
Examples
--------
>>> np.sort_complex([5, 3, 6, 2, 1])
array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j])
>>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j])
array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j])
"""
b = array(a,copy=True)
b.sort()
if not issubclass(b.dtype.type, _nx.complexfloating):
if b.dtype.char in 'bhBH':
return b.astype('F')
elif b.dtype.char == 'g':
return b.astype('G')
else:
return b.astype('D')
else:
return b
def trim_zeros(filt, trim='fb'):
"""
Trim the leading and/or trailing zeros from a 1-D array or sequence.
Parameters
----------
filt : 1-D array or sequence
Input array.
trim : str, optional
A string with 'f' representing trim from front and 'b' to trim from
back. Default is 'fb', trim zeros from both front and back of the
array.
Returns
-------
trimmed : 1-D array or sequence
The result of trimming the input. The input data type is preserved.
Examples
--------
>>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0))
>>> np.trim_zeros(a)
array([1, 2, 3, 0, 2, 1])
>>> np.trim_zeros(a, 'b')
array([0, 0, 0, 1, 2, 3, 0, 2, 1])
The input data type is preserved, list/tuple in means list/tuple out.
>>> np.trim_zeros([0, 1, 2, 0])
[1, 2]
"""
first = 0
trim = trim.upper()
if 'F' in trim:
for i in filt:
if i != 0.: break
else: first = first + 1
last = len(filt)
if 'B' in trim:
for i in filt[::-1]:
if i != 0.: break
else: last = last - 1
return filt[first:last]
import sys
if sys.hexversion < 0x2040000:
from sets import Set as set
@deprecate
def unique(x):
"""
This function is deprecated. Use numpy.lib.arraysetops.unique()
instead.
"""
try:
tmp = x.flatten()
if tmp.size == 0:
return tmp
tmp.sort()
idx = concatenate(([True],tmp[1:]!=tmp[:-1]))
return tmp[idx]
except AttributeError:
items = list(set(x))
items.sort()
return asarray(items)
def extract(condition, arr):
"""
Return the elements of an array that satisfy some condition.
This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If
`condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``.
Parameters
----------
condition : array_like
An array whose nonzero or True entries indicate the elements of `arr`
to extract.
arr : array_like
Input array of the same size as `condition`.
Returns
-------
extract : ndarray
Rank 1 array of values from `arr` where `condition` is True.
See Also
--------
take, put, copyto, compress
Examples
--------
>>> arr = np.arange(12).reshape((3, 4))
>>> arr
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> condition = np.mod(arr, 3)==0
>>> condition
array([[ True, False, False, True],
[False, False, True, False],
[False, True, False, False]], dtype=bool)
>>> np.extract(condition, arr)
array([0, 3, 6, 9])
If `condition` is boolean:
>>> arr[condition]
array([0, 3, 6, 9])
"""
return _nx.take(ravel(arr), nonzero(ravel(condition))[0])
def place(arr, mask, vals):
"""
Change elements of an array based on conditional and input values.
Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that
`place` uses the first N elements of `vals`, where N is the number of
True values in `mask`, while `copyto` uses the elements where `mask`
is True.
Note that `extract` does the exact opposite of `place`.
Parameters
----------
arr : array_like
Array to put data into.
mask : array_like
Boolean mask array. Must have the same size as `a`.
vals : 1-D sequence
Values to put into `a`. Only the first N elements are used, where
N is the number of True values in `mask`. If `vals` is smaller
than N it will be repeated.
See Also
--------
copyto, put, take, extract
Examples
--------
>>> arr = np.arange(6).reshape(2, 3)
>>> np.place(arr, arr>2, [44, 55])
>>> arr
array([[ 0, 1, 2],
[44, 55, 44]])
"""
return _insert(arr, mask, vals)
def _nanop(op, fill, a, axis=None):
"""
General operation on arrays with not-a-number values.
Parameters
----------
op : callable
Operation to perform.
fill : float
NaN values are set to fill before doing the operation.
a : array-like
Input array.
axis : {int, None}, optional
Axis along which the operation is computed.
By default the input is flattened.
Returns
-------
y : {ndarray, scalar}
Processed data.
"""
y = array(a, subok=True)
# We only need to take care of NaN's in floating point arrays
dt = y.dtype
if np.issubdtype(dt, np.integer) or np.issubdtype(dt, np.bool_):
return op(y, axis=axis)
mask = isnan(a)
# y[mask] = fill
# We can't use fancy indexing here as it'll mess w/ MaskedArrays
# Instead, let's fill the array directly...
np.copyto(y, fill, where=mask)
res = op(y, axis=axis)
mask_all_along_axis = mask.all(axis=axis)
# Along some axes, only nan's were encountered. As such, any values
# calculated along that axis should be set to nan.
if mask_all_along_axis.any():
if np.isscalar(res):
res = np.nan
else:
res[mask_all_along_axis] = np.nan
return res
def nansum(a, axis=None):
"""
Return the sum of array elements over a given axis treating
Not a Numbers (NaNs) as zero.
Parameters
----------
a : array_like
Array containing numbers whose sum is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the sum is computed. The default is to compute
the sum of the flattened array.
Returns
-------
y : ndarray
An array with the same shape as a, with the specified axis removed.
If a is a 0-d array, or if axis is None, a scalar is returned with
the same dtype as `a`.
See Also
--------
numpy.sum : Sum across array including Not a Numbers.
isnan : Shows which elements are Not a Number (NaN).
isfinite: Shows which elements are not: Not a Number, positive and
negative infinity
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
If positive or negative infinity are present the result is positive or
negative infinity. But if both positive and negative infinity are present,
the result is Not A Number (NaN).
Arithmetic is modular when using integer types (all elements of `a` must
be finite i.e. no elements that are NaNs, positive infinity and negative
infinity because NaNs are floating point types), and no error is raised
on overflow.
Examples
--------
>>> np.nansum(1)
1
>>> np.nansum([1])
1
>>> np.nansum([1, np.nan])
1.0
>>> a = np.array([[1, 1], [1, np.nan]])
>>> np.nansum(a)
3.0
>>> np.nansum(a, axis=0)
array([ 2., 1.])
When positive infinity and negative infinity are present
>>> np.nansum([1, np.nan, np.inf])
inf
>>> np.nansum([1, np.nan, np.NINF])
-inf
>>> np.nansum([1, np.nan, np.inf, np.NINF])
nan
"""
return _nanop(np.sum, 0, a, axis)
def nanmin(a, axis=None):
"""
Return the minimum of an array or minimum along an axis ignoring any NaNs.
Parameters
----------
a : array_like
Array containing numbers whose minimum is desired.
axis : int, optional
Axis along which the minimum is computed.The default is to compute
the minimum of the flattened array.
Returns
-------
nanmin : ndarray
A new array or a scalar array with the result.
See Also
--------
numpy.amin : Minimum across array including any Not a Numbers.
numpy.nanmax : Maximum across array ignoring any Not a Numbers.
isnan : Shows which elements are Not a Number (NaN).
isfinite: Shows which elements are not: Not a Number, positive and
negative infinity
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Positive infinity is treated as a very large number and negative infinity
is treated as a very small (i.e. negative) number.
If the input has a integer type the function is equivalent to np.min.
Examples
--------
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanmin(a)
1.0
>>> np.nanmin(a, axis=0)
array([ 1., 2.])
>>> np.nanmin(a, axis=1)
array([ 1., 3.])
When positive infinity and negative infinity are present:
>>> np.nanmin([1, 2, np.nan, np.inf])
1.0
>>> np.nanmin([1, 2, np.nan, np.NINF])
-inf
"""
a = np.asanyarray(a)
if axis is not None:
return np.fmin.reduce(a, axis)
else:
return np.fmin.reduce(a.flat)
def nanargmin(a, axis=None):
"""
Return indices of the minimum values over an axis, ignoring NaNs.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default flattened input is used.
Returns
-------
index_array : ndarray
An array of indices or a single index value.
See Also
--------
argmin, nanargmax
Examples
--------
>>> a = np.array([[np.nan, 4], [2, 3]])
>>> np.argmin(a)
0
>>> np.nanargmin(a)
2
>>> np.nanargmin(a, axis=0)
array([1, 1])
>>> np.nanargmin(a, axis=1)
array([1, 0])
"""
return _nanop(np.argmin, np.inf, a, axis)
def nanmax(a, axis=None):
"""
Return the maximum of an array or maximum along an axis ignoring any NaNs.
Parameters
----------
a : array_like
Array containing numbers whose maximum is desired. If `a` is not
an array, a conversion is attempted.
axis : int, optional
Axis along which the maximum is computed. The default is to compute
the maximum of the flattened array.
Returns
-------
nanmax : ndarray
An array with the same shape as `a`, with the specified axis removed.
If `a` is a 0-d array, or if axis is None, a ndarray scalar is
returned. The the same dtype as `a` is returned.
See Also
--------
numpy.amax : Maximum across array including any Not a Numbers.
numpy.nanmin : Minimum across array ignoring any Not a Numbers.
isnan : Shows which elements are Not a Number (NaN).
isfinite: Shows which elements are not: Not a Number, positive and
negative infinity
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Positive infinity is treated as a very large number and negative infinity
is treated as a very small (i.e. negative) number.
If the input has a integer type the function is equivalent to np.max.
Examples
--------
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanmax(a)
3.0
>>> np.nanmax(a, axis=0)
array([ 3., 2.])
>>> np.nanmax(a, axis=1)
array([ 2., 3.])
When positive infinity and negative infinity are present:
>>> np.nanmax([1, 2, np.nan, np.NINF])
2.0
>>> np.nanmax([1, 2, np.nan, np.inf])
inf
"""
a = np.asanyarray(a)
if axis is not None:
return np.fmax.reduce(a, axis)
else:
return np.fmax.reduce(a.flat)
def nanargmax(a, axis=None):
"""
Return indices of the maximum values over an axis, ignoring NaNs.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default flattened input is used.
Returns
-------
index_array : ndarray
An array of indices or a single index value.
See Also
--------
argmax, nanargmin
Examples
--------
>>> a = np.array([[np.nan, 4], [2, 3]])
>>> np.argmax(a)
0
>>> np.nanargmax(a)
1
>>> np.nanargmax(a, axis=0)
array([1, 0])
>>> np.nanargmax(a, axis=1)
array([1, 1])
"""
return _nanop(np.argmax, -np.inf, a, axis)
def disp(mesg, device=None, linefeed=True):
"""
Display a message on a device.
Parameters
----------
mesg : str
Message to display.
device : object
Device to write message. If None, defaults to ``sys.stdout`` which is
very similar to ``print``. `device` needs to have ``write()`` and
``flush()`` methods.
linefeed : bool, optional
Option whether to print a line feed or not. Defaults to True.
Raises
------
AttributeError
If `device` does not have a ``write()`` or ``flush()`` method.
Examples
--------
Besides ``sys.stdout``, a file-like object can also be used as it has
both required methods:
>>> from StringIO import StringIO
>>> buf = StringIO()
>>> np.disp('"Display" in a file', device=buf)
>>> buf.getvalue()
'"Display" in a file\\n'
"""
if device is None:
import sys
device = sys.stdout
if linefeed:
device.write('%s\n' % mesg)
else:
device.write('%s' % mesg)
device.flush()
return
class vectorize(object):
"""
vectorize(pyfunc, otypes='', doc=None, excluded=None, cache=False)
Generalized function class.
Define a vectorized function which takes a nested sequence
of objects or numpy arrays as inputs and returns a
numpy array as output. The vectorized function evaluates `pyfunc` over
successive tuples of the input arrays like the python map function,
except it uses the broadcasting rules of numpy.
The data type of the output of `vectorized` is determined by calling
the function with the first element of the input. This can be avoided
by specifying the `otypes` argument.
Parameters
----------
pyfunc : callable
A python function or method.
otypes : str or list of dtypes, optional
The output data type. It must be specified as either a string of
typecode characters or a list of data type specifiers. There should
be one data type specifier for each output.
doc : str, optional
The docstring for the function. If `None`, the docstring will be the
``pyfunc.__doc__``.
excluded : set, optional
Set of strings or integers representing the positional or keyword
arguments for which the function will not be vectorized. These will be
passed directly to `pyfunc` unmodified.
.. versionadded:: 1.7.0
cache : bool, optional
If `True`, then cache the first function call that determines the number
of outputs if `otypes` is not provided.
.. versionadded:: 1.7.0
Returns
-------
vectorized : callable
Vectorized function.
Examples
--------
>>> def myfunc(a, b):
... "Return a-b if a>b, otherwise return a+b"
... if a > b:
... return a - b
... else:
... return a + b
>>> vfunc = np.vectorize(myfunc)
>>> vfunc([1, 2, 3, 4], 2)
array([3, 4, 1, 2])
The docstring is taken from the input function to `vectorize` unless it
is specified
>>> vfunc.__doc__
'Return a-b if a>b, otherwise return a+b'
>>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`')
>>> vfunc.__doc__
'Vectorized `myfunc`'
The output type is determined by evaluating the first element of the input,
unless it is specified
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.int32'>
>>> vfunc = np.vectorize(myfunc, otypes=[np.float])
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.float64'>
The `excluded` argument can be used to prevent vectorizing over certain
arguments. This can be useful for array-like arguments of a fixed length
such as the coefficients for a polynomial as in `polyval`:
>>> def mypolyval(p, x):
... _p = list(p)
... res = _p.pop(0)
... while _p:
... res = res*x + _p.pop(0)
... return res
>>> vpolyval = np.vectorize(mypolyval, excluded=['p'])
>>> vpolyval(p=[1, 2, 3], x=[0, 1])
array([3, 6])
Positional arguments may also be excluded by specifying their position:
>>> vpolyval.excluded.add(0)
>>> vpolyval([1, 2, 3], x=[0, 1])
array([3, 6])
Notes
-----
The `vectorize` function is provided primarily for convenience, not for
performance. The implementation is essentially a for loop.
If `otypes` is not specified, then a call to the function with the first
argument will be used to determine the number of outputs. The results of
this call will be cached if `cache` is `True` to prevent calling the
function twice. However, to implement the cache, the original function must
be wrapped which will slow down subsequent calls, so only do this if your
function is expensive.
The new keyword argument interface and `excluded` argument support further
degrades performance.
"""
def __init__(self, pyfunc, otypes='', doc=None, excluded=None, cache=False):
self.pyfunc = pyfunc
self.cache = cache
if doc is None:
self.__doc__ = pyfunc.__doc__
else:
self.__doc__ = doc
if isinstance(otypes, str):
self.otypes = otypes
for char in self.otypes:
if char not in typecodes['All']:
raise ValueError("Invalid otype specified: %s" % (char,))
elif iterable(otypes):
self.otypes = ''.join([_nx.dtype(x).char for x in otypes])
else:
raise ValueError("Invalid otype specification")
# Excluded variable support
if excluded is None:
excluded = set()
self.excluded = set(excluded)
if self.otypes and not self.excluded:
self._ufunc = None # Caching to improve default performance
def __call__(self, *args, **kwargs):
"""
Return arrays with the results of `pyfunc` broadcast (vectorized) over
`args` and `kwargs` not in `excluded`.
"""
excluded = self.excluded
if not kwargs and not excluded:
func = self.pyfunc
vargs = args
else:
# The wrapper accepts only positional arguments: we use `names` and
# `inds` to mutate `the_args` and `kwargs` to pass to the original
# function.
nargs = len(args)
names = [_n for _n in kwargs if _n not in excluded]
inds = [_i for _i in range(nargs) if _i not in excluded]
the_args = list(args)
def func(*vargs):
for _n, _i in enumerate(inds):
the_args[_i] = vargs[_n]
kwargs.update(list(zip(names, vargs[len(inds):])))
return self.pyfunc(*the_args, **kwargs)
vargs = [args[_i] for _i in inds]
vargs.extend([kwargs[_n] for _n in names])
return self._vectorize_call(func=func, args=vargs)
def _get_ufunc_and_otypes(self, func, args):
"""Return (ufunc, otypes)."""
# frompyfunc will fail if args is empty
assert args
if self.otypes:
otypes = self.otypes
nout = len(otypes)
# Note logic here: We only *use* self._ufunc if func is self.pyfunc
# even though we set self._ufunc regardless.
if func is self.pyfunc and self._ufunc is not None:
ufunc = self._ufunc
else:
ufunc = self._ufunc = frompyfunc(func, len(args), nout)
else:
# Get number of outputs and output types by calling the function on
# the first entries of args. We also cache the result to prevent
# the subsequent call when the ufunc is evaluated.
# Assumes that ufunc first evaluates the 0th elements in the input
# arrays (the input values are not checked to ensure this)
inputs = [asarray(_a).flat[0] for _a in args]
outputs = func(*inputs)
# Performance note: profiling indicates that -- for simple functions
# at least -- this wrapping can almost double the execution time.
# Hence we make it optional.
if self.cache:
_cache = [outputs]
def _func(*vargs):
if _cache:
return _cache.pop()
else:
return func(*vargs)
else:
_func = func
if isinstance(outputs, tuple):
nout = len(outputs)
else:
nout = 1
outputs = (outputs,)
otypes = ''.join([asarray(outputs[_k]).dtype.char
for _k in range(nout)])
# Performance note: profiling indicates that creating the ufunc is
# not a significant cost compared with wrapping so it seems not
# worth trying to cache this.
ufunc = frompyfunc(_func, len(args), nout)
return ufunc, otypes
def _vectorize_call(self, func, args):
"""Vectorized call to `func` over positional `args`."""
if not args:
_res = func()
else:
ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args)
# Convert args to object arrays first
inputs = [array(_a, copy=False, subok=True, dtype=object)
for _a in args]
outputs = ufunc(*inputs)
if ufunc.nout == 1:
_res = array(outputs,
copy=False, subok=True, dtype=otypes[0])
else:
_res = tuple([array(_x, copy=False, subok=True, dtype=_t)
for _x, _t in zip(outputs, otypes)])
return _res
def cov(m, y=None, rowvar=1, bias=0, ddof=None):
"""
Estimate a covariance matrix, given data.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
form as that of `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations given (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : int, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
Returns
-------
out : ndarray
The covariance matrix of the variables.
See Also
--------
corrcoef : Normalized covariance matrix
Examples
--------
Consider two variables, :math:`x_0` and :math:`x_1`, which
correlate perfectly, but in opposite directions:
>>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
>>> x
array([[0, 1, 2],
[2, 1, 0]])
Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
matrix shows this clearly:
>>> np.cov(x)
array([[ 1., -1.],
[-1., 1.]])
Note that element :math:`C_{0,1}`, which shows the correlation between
:math:`x_0` and :math:`x_1`, is negative.
Further, note how `x` and `y` are combined:
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
>>> X = np.vstack((x,y))
>>> print np.cov(X)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x, y)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x)
11.71
"""
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError("ddof must be integer")
X = array(m, ndmin=2, dtype=float)
if X.size == 0:
# handle empty arrays
return np.array(m)
if X.shape[0] == 1:
rowvar = 1
if rowvar:
axis = 0
tup = (slice(None),newaxis)
else:
axis = 1
tup = (newaxis, slice(None))
if y is not None:
y = array(y, copy=False, ndmin=2, dtype=float)
X = concatenate((X,y), axis)
X -= X.mean(axis=1-axis)[tup]
if rowvar:
N = X.shape[1]
else:
N = X.shape[0]
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
fact = float(N - ddof)
if not rowvar:
return (dot(X.T, X.conj()) / fact).squeeze()
else:
return (dot(X, X.T.conj()) / fact).squeeze()
def corrcoef(x, y=None, rowvar=1, bias=0, ddof=None):
"""
Return correlation coefficients.
Please refer to the documentation for `cov` for more detail. The
relationship between the correlation coefficient matrix, `P`, and the
covariance matrix, `C`, is
.. math:: P_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } }
The values of `P` are between -1 and 1, inclusive.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : {None, int}, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
Returns
-------
out : ndarray
The correlation coefficient matrix of the variables.
See Also
--------
cov : Covariance matrix
"""
c = cov(x, y, rowvar, bias, ddof)
if c.size == 0:
# handle empty arrays
return c
try:
d = diag(c)
except ValueError: # scalar covariance
return 1
return c/sqrt(multiply.outer(d,d))
def blackman(M):
"""
Return the Blackman window.
The Blackman window is a taper formed by using the the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
bartlett, hamming, hanning, kaiser
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the kaiser window.
References
----------
Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
Dover Publications, New York.
Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
>>> np.blackman(12)
array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01,
4.14397981e-01, 7.36045180e-01, 9.67046769e-01,
9.67046769e-01, 7.36045180e-01, 4.14397981e-01,
1.59903635e-01, 3.26064346e-02, -1.38777878e-17])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.blackman(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0,M)
return 0.42-0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))
def bartlett(M):
"""
Return the Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : array
The triangular window, with the maximum value normalized to one
(the value one appears only if the number of samples is odd), with
the first and last samples equal to zero.
See Also
--------
blackman, hamming, hanning, kaiser
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \\frac{2}{M-1} \\left(
\\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right|
\\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
>>> np.bartlett(12)
array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273,
0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636,
0.18181818, 0. ])
Plot the window and its frequency response (requires SciPy and matplotlib):
>>> from numpy.fft import fft, fftshift
>>> window = np.bartlett(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0,M)
return where(less_equal(n,(M-1)/2.0),2.0*n/(M-1),2.0-2.0*n/(M-1))
def hanning(M):
"""
Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
See Also
--------
bartlett, blackman, hamming, kaiser
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hanning was named for Julius van Hann, an Austrian meterologist. It is
also known as the Cosine Bell. Some authors prefer that it be called a
Hann window, to help avoid confusion with the very similar Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hanning(12)
array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037,
0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249,
0.07937323, 0. ])
Plot the window and its frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hanning(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of the Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0,M)
return 0.5-0.5*cos(2.0*pi*n/(M-1))
def hamming(M):
"""
Return the Hamming window.
The Hamming window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hanning, kaiser
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and
is described in Blackman and Tukey. It was recommended for smoothing the
truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hamming(12)
array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594,
0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909,
0.15302337, 0.08 ])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hamming(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1,float)
n = arange(0,M)
return 0.54-0.46*cos(2.0*pi*n/(M-1))
## Code from cephes for i0
_i0A = [
-4.41534164647933937950E-18,
3.33079451882223809783E-17,
-2.43127984654795469359E-16,
1.71539128555513303061E-15,
-1.16853328779934516808E-14,
7.67618549860493561688E-14,
-4.85644678311192946090E-13,
2.95505266312963983461E-12,
-1.72682629144155570723E-11,
9.67580903537323691224E-11,
-5.18979560163526290666E-10,
2.65982372468238665035E-9,
-1.30002500998624804212E-8,
6.04699502254191894932E-8,
-2.67079385394061173391E-7,
1.11738753912010371815E-6,
-4.41673835845875056359E-6,
1.64484480707288970893E-5,
-5.75419501008210370398E-5,
1.88502885095841655729E-4,
-5.76375574538582365885E-4,
1.63947561694133579842E-3,
-4.32430999505057594430E-3,
1.05464603945949983183E-2,
-2.37374148058994688156E-2,
4.93052842396707084878E-2,
-9.49010970480476444210E-2,
1.71620901522208775349E-1,
-3.04682672343198398683E-1,
6.76795274409476084995E-1]
_i0B = [
-7.23318048787475395456E-18,
-4.83050448594418207126E-18,
4.46562142029675999901E-17,
3.46122286769746109310E-17,
-2.82762398051658348494E-16,
-3.42548561967721913462E-16,
1.77256013305652638360E-15,
3.81168066935262242075E-15,
-9.55484669882830764870E-15,
-4.15056934728722208663E-14,
1.54008621752140982691E-14,
3.85277838274214270114E-13,
7.18012445138366623367E-13,
-1.79417853150680611778E-12,
-1.32158118404477131188E-11,
-3.14991652796324136454E-11,
1.18891471078464383424E-11,
4.94060238822496958910E-10,
3.39623202570838634515E-9,
2.26666899049817806459E-8,
2.04891858946906374183E-7,
2.89137052083475648297E-6,
6.88975834691682398426E-5,
3.36911647825569408990E-3,
8.04490411014108831608E-1]
def _chbevl(x, vals):
b0 = vals[0]
b1 = 0.0
for i in range(1,len(vals)):
b2 = b1
b1 = b0
b0 = x*b1 - b2 + vals[i]
return 0.5*(b0 - b2)
def _i0_1(x):
return exp(x) * _chbevl(x/2.0-2, _i0A)
def _i0_2(x):
return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x)
def i0(x):
"""
Modified Bessel function of the first kind, order 0.
Usually denoted :math:`I_0`. This function does broadcast, but will *not*
"up-cast" int dtype arguments unless accompanied by at least one float or
complex dtype argument (see Raises below).
Parameters
----------
x : array_like, dtype float or complex
Argument of the Bessel function.
Returns
-------
out : ndarray, shape = x.shape, dtype = x.dtype
The modified Bessel function evaluated at each of the elements of `x`.
Raises
------
TypeError: array cannot be safely cast to required type
If argument consists exclusively of int dtypes.
See Also
--------
scipy.special.iv, scipy.special.ive
Notes
-----
We use the algorithm published by Clenshaw [1]_ and referenced by
Abramowitz and Stegun [2]_, for which the function domain is partitioned
into the two intervals [0,8] and (8,inf), and Chebyshev polynomial
expansions are employed in each interval. Relative error on the domain
[0,30] using IEEE arithmetic is documented [3]_ as having a peak of 5.8e-16
with an rms of 1.4e-16 (n = 30000).
References
----------
.. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in
*National Physical Laboratory Mathematical Tables*, vol. 5, London:
Her Majesty's Stationery Office, 1962.
.. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical
Functions*, 10th printing, New York: Dover, 1964, pp. 379.
http://www.math.sfu.ca/~cbm/aands/page_379.htm
.. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html
Examples
--------
>>> np.i0([0.])
array(1.0)
>>> np.i0([0., 1. + 2j])
array([ 1.00000000+0.j , 0.18785373+0.64616944j])
"""
x = atleast_1d(x).copy()
y = empty_like(x)
ind = (x<0)
x[ind] = -x[ind]
ind = (x<=8.0)
y[ind] = _i0_1(x[ind])
ind2 = ~ind
y[ind2] = _i0_2(x[ind2])
return y.squeeze()
## End of cephes code for i0
def kaiser(M,beta):
"""
Return the Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
beta : float
Shape parameter for window.
Returns
-------
out : array
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hamming, hanning
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}}
\\right)/I_0(\\beta)
with
.. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple approximation
to the DPSS window based on Bessel functions.
The Kaiser window is a very good approximation to the Digital Prolate
Spheroidal Sequence, or Slepian window, which is the transform which
maximizes the energy in the main lobe of the window relative to total
energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hanning
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
>>> np.kaiser(12, 14)
array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02,
2.29737120e-01, 5.99885316e-01, 9.45674898e-01,
9.45674898e-01, 5.99885316e-01, 2.29737120e-01,
4.65200189e-02, 3.46009194e-03, 7.72686684e-06])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.kaiser(51, 14)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
from numpy.dual import i0
if M == 1:
return np.array([1.])
n = arange(0,M)
alpha = (M-1)/2.0
return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta))
def sinc(x):
"""
Return the sinc function.
The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`.
Parameters
----------
x : ndarray
Array (possibly multi-dimensional) of values for which to to
calculate ``sinc(x)``.
Returns
-------
out : ndarray
``sinc(x)``, which has the same shape as the input.
Notes
-----
``sinc(0)`` is the limit value 1.
The name sinc is short for "sine cardinal" or "sinus cardinalis".
The sinc function is used in various signal processing applications,
including in anti-aliasing, in the construction of a
Lanczos resampling filter, and in interpolation.
For bandlimited interpolation of discrete-time signals, the ideal
interpolation kernel is proportional to the sinc function.
References
----------
.. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web
Resource. http://mathworld.wolfram.com/SincFunction.html
.. [2] Wikipedia, "Sinc function",
http://en.wikipedia.org/wiki/Sinc_function
Examples
--------
>>> x = np.arange(-20., 21.)/5.
>>> np.sinc(x)
array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02,
-8.90384387e-02, -5.84680802e-02, 3.89804309e-17,
6.68206631e-02, 1.16434881e-01, 1.26137788e-01,
8.50444803e-02, -3.89804309e-17, -1.03943254e-01,
-1.89206682e-01, -2.16236208e-01, -1.55914881e-01,
3.89804309e-17, 2.33872321e-01, 5.04551152e-01,
7.56826729e-01, 9.35489284e-01, 1.00000000e+00,
9.35489284e-01, 7.56826729e-01, 5.04551152e-01,
2.33872321e-01, 3.89804309e-17, -1.55914881e-01,
-2.16236208e-01, -1.89206682e-01, -1.03943254e-01,
-3.89804309e-17, 8.50444803e-02, 1.26137788e-01,
1.16434881e-01, 6.68206631e-02, 3.89804309e-17,
-5.84680802e-02, -8.90384387e-02, -8.40918587e-02,
-4.92362781e-02, -3.89804309e-17])
>>> plt.plot(x, np.sinc(x))
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Sinc Function")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("X")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
It works in 2-D as well:
>>> x = np.arange(-200., 201.)/50.
>>> xx = np.outer(x, x)
>>> plt.imshow(np.sinc(xx))
<matplotlib.image.AxesImage object at 0x...>
"""
x = np.asanyarray(x)
y = pi* where(x == 0, 1.0e-20, x)
return sin(y)/y
def msort(a):
"""
Return a copy of an array sorted along the first axis.
Parameters
----------
a : array_like
Array to be sorted.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
sort
Notes
-----
``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``.
"""
b = array(a,subok=True,copy=True)
b.sort(0)
return b
def median(a, axis=None, out=None, overwrite_input=False):
"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int, optional
Axis along which the medians are computed. The default (axis=None)
is to compute the median along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool optional
If True, then allow use of memory of input array (a) for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted. Default is
False. Note that, if `overwrite_input` is True and the input
is not already an ndarray, an error will be raised.
Returns
-------
median : ndarray
A new array holding the result (unless `out` is specified, in
which case that array is returned instead). If the input contains
integers, or floats of smaller precision than 64, then the output
data-type is float64. Otherwise, the output data-type is the same
as that of the input.
See Also
--------
mean, percentile
Notes
-----
Given a vector V of length N, the median of V is the middle value of
a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/2]``, when N is
odd. When N is even, it is the average of the two middle values of
``V_sorted``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.median(a)
3.5
>>> np.median(a, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.median(a, axis=1)
array([ 7., 2.])
>>> m = np.median(a, axis=0)
>>> out = np.zeros_like(m)
>>> np.median(a, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.median(b, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.median(b, axis=None, overwrite_input=True)
3.5
>>> assert not np.all(a==b)
"""
if overwrite_input:
if axis is None:
sorted = a.ravel()
sorted.sort()
else:
a.sort(axis=axis)
sorted = a
else:
sorted = sort(a, axis=axis)
if sorted.shape == ():
# make 0-D arrays work
return sorted.item()
if axis is None:
axis = 0
indexer = [slice(None)] * sorted.ndim
index = int(sorted.shape[axis]/2)
if sorted.shape[axis] % 2 == 1:
# index with slice to allow mean (below) to work
indexer[axis] = slice(index, index+1)
else:
indexer[axis] = slice(index-1, index+1)
# Use mean in odd and even case to coerce data type
# and check, use out array.
return mean(sorted[indexer], axis=axis, out=out)
def percentile(a, q, axis=None, out=None, overwrite_input=False):
"""
Compute the qth percentile of the data along the specified axis.
Returns the qth percentile of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
q : float in range of [0,100] (or sequence of floats)
Percentile to compute which must be between 0 and 100 inclusive.
axis : int, optional
Axis along which the percentiles are computed. The default (None)
is to compute the median along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted.
Default is False. Note that, if `overwrite_input` is True and the
input is not already an array, an error will be raised.
Returns
-------
pcntile : ndarray
A new array holding the result (unless `out` is specified, in
which case that array is returned instead). If the input contains
integers, or floats of smaller precision than 64, then the output
data-type is float64. Otherwise, the output data-type is the same
as that of the input.
See Also
--------
mean, median
Notes
-----
Given a vector V of length N, the qth percentile of V is the qth ranked
value in a sorted copy of V. A weighted average of the two nearest
neighbors is used if the normalized ranking does not match q exactly.
The same as the median if ``q=50``, the same as the minimum if ``q=0``
and the same as the maximum if ``q=100``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, 50)
3.5
>>> np.percentile(a, 0.5, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.percentile(a, 50, axis=1)
array([ 7., 2.])
>>> m = np.percentile(a, 50, axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, 50, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.percentile(b, 50, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.percentile(b, 50, axis=None, overwrite_input=True)
3.5
"""
a = np.asarray(a)
if q == 0:
return a.min(axis=axis, out=out)
elif q == 100:
return a.max(axis=axis, out=out)
if overwrite_input:
if axis is None:
sorted = a.ravel()
sorted.sort()
else:
a.sort(axis=axis)
sorted = a
else:
sorted = sort(a, axis=axis)
if axis is None:
axis = 0
return _compute_qth_percentile(sorted, q, axis, out)
# handle sequence of q's without calling sort multiple times
def _compute_qth_percentile(sorted, q, axis, out):
if not isscalar(q):
p = [_compute_qth_percentile(sorted, qi, axis, None)
for qi in q]
if out is not None:
out.flat = p
return p
q = q / 100.0
if (q < 0) or (q > 1):
raise ValueError("percentile must be either in the range [0,100]")
indexer = [slice(None)] * sorted.ndim
Nx = sorted.shape[axis]
index = q*(Nx-1)
i = int(index)
if i == index:
indexer[axis] = slice(i, i+1)
weights = array(1)
sumval = 1.0
else:
indexer[axis] = slice(i, i+2)
j = i + 1
weights = array([(j - index), (index - i)],float)
wshape = [1]*sorted.ndim
wshape[axis] = 2
weights.shape = wshape
sumval = weights.sum()
# Use add.reduce in both cases to coerce data type as well as
# check and use out array.
return add.reduce(sorted[indexer]*weights, axis=axis, out=out)/sumval
def trapz(y, x=None, dx=1.0, axis=-1):
"""
Integrate along the given axis using the composite trapezoidal rule.
Integrate `y` (`x`) along given axis.
Parameters
----------
y : array_like
Input array to integrate.
x : array_like, optional
If `x` is None, then spacing between all `y` elements is `dx`.
dx : scalar, optional
If `x` is None, spacing given by `dx` is assumed. Default is 1.
axis : int, optional
Specify the axis.
Returns
-------
trapz : float
Definite integral as approximated by trapezoidal rule.
See Also
--------
sum, cumsum
Notes
-----
Image [2]_ illustrates trapezoidal rule -- y-axis locations of points will
be taken from `y` array, by default x-axis distances between points will be
1.0, alternatively they can be provided with `x` array or with `dx` scalar.
Return value will be equal to combined area under the red lines.
References
----------
.. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule
.. [2] Illustration image:
http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
Examples
--------
>>> np.trapz([1,2,3])
4.0
>>> np.trapz([1,2,3], x=[4,6,8])
8.0
>>> np.trapz([1,2,3], dx=2)
8.0
>>> a = np.arange(6).reshape(2, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.trapz(a, axis=0)
array([ 1.5, 2.5, 3.5])
>>> np.trapz(a, axis=1)
array([ 2., 8.])
"""
y = asanyarray(y)
if x is None:
d = dx
else:
x = asanyarray(x)
if x.ndim == 1:
d = diff(x)
# reshape to correct shape
shape = [1]*y.ndim
shape[axis] = d.shape[0]
d = d.reshape(shape)
else:
d = diff(x, axis=axis)
nd = len(y.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1,None)
slice2[axis] = slice(None,-1)
try:
ret = (d * (y[slice1] +y [slice2]) / 2.0).sum(axis)
except ValueError: # Operations didn't work, cast to ndarray
d = np.asarray(d)
y = np.asarray(y)
ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis)
return ret
#always succeed
def add_newdoc(place, obj, doc):
"""Adds documentation to obj which is in module place.
If doc is a string add it to obj as a docstring
If doc is a tuple, then the first element is interpreted as
an attribute of obj and the second as the docstring
(method, docstring)
If doc is a list, then each element of the list should be a
sequence of length two --> [(method1, docstring1),
(method2, docstring2), ...]
This routine never raises an error.
This routine cannot modify read-only docstrings, as appear
in new-style classes or built-in functions. Because this
routine never raises an error the caller must check manually
that the docstrings were changed.
"""
try:
new = {}
exec('from %s import %s' % (place, obj), new)
if isinstance(doc, str):
add_docstring(new[obj], doc.strip())
elif isinstance(doc, tuple):
add_docstring(getattr(new[obj], doc[0]), doc[1].strip())
elif isinstance(doc, list):
for val in doc:
add_docstring(getattr(new[obj], val[0]), val[1].strip())
except:
pass
# Based on scitools meshgrid
def meshgrid(*xi, **kwargs):
"""
Return coordinate matrices from two or more coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
one-dimensional coordinate arrays x1, x2,..., xn.
Parameters
----------
x1, x2,..., xn : array_like
1-D arrays representing the coordinates of a grid.
indexing : {'xy', 'ij'}, optional
Cartesian ('xy', default) or matrix ('ij') indexing of output.
See Notes for more details.
sparse : bool, optional
If True a sparse grid is returned in order to conserve memory.
Default is False.
copy : bool, optional
If False, a view into the original arrays are returned in
order to conserve memory. Default is True. Please note that
``sparse=False, copy=False`` will likely return non-contiguous arrays.
Furthermore, more than one element of a broadcast array may refer to
a single memory location. If you need to write to the arrays, make
copies first.
Returns
-------
X1, X2,..., XN : ndarray
For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,
return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'
or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'
with the elements of `xi` repeated to fill the matrix along
the first dimension for `x1`, the second for `x2` and so on.
Notes
-----
This function supports both indexing conventions through the indexing keyword
argument. Giving the string 'ij' returns a meshgrid with matrix indexing,
while 'xy' returns a meshgrid with Cartesian indexing. In the 2-D case
with inputs of length M and N, the outputs are of shape (N, M) for 'xy'
indexing and (M, N) for 'ij' indexing. In the 3-D case with inputs of
length M, N and P, outputs are of shape (N, M, P) for 'xy' indexing and (M,
N, P) for 'ij' indexing. The difference is illustrated by the following
code snippet::
xv, yv = meshgrid(x, y, sparse=False, indexing='ij')
for i in range(nx):
for j in range(ny):
# treat xv[i,j], yv[i,j]
xv, yv = meshgrid(x, y, sparse=False, indexing='xy')
for i in range(nx):
for j in range(ny):
# treat xv[j,i], yv[j,i]
See Also
--------
index_tricks.mgrid : Construct a multi-dimensional "meshgrid"
using indexing notation.
index_tricks.ogrid : Construct an open multi-dimensional "meshgrid"
using indexing notation.
Examples
--------
>>> nx, ny = (3, 2)
>>> x = np.linspace(0, 1, nx)
>>> y = np.linspace(0, 1, ny)
>>> xv, yv = meshgrid(x, y)
>>> xv
array([[ 0. , 0.5, 1. ],
[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0., 0., 0.],
[ 1., 1., 1.]])
>>> xv, yv = meshgrid(x, y, sparse=True) # make sparse output arrays
>>> xv
array([[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0.],
[ 1.]])
`meshgrid` is very useful to evaluate functions on a grid.
>>> x = np.arange(-5, 5, 0.1)
>>> y = np.arange(-5, 5, 0.1)
>>> xx, yy = meshgrid(x, y, sparse=True)
>>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2)
>>> h = plt.contourf(x,y,z)
"""
if len(xi) < 2:
msg = 'meshgrid() takes 2 or more arguments (%d given)' % int(len(xi) > 0)
raise ValueError(msg)
args = np.atleast_1d(*xi)
ndim = len(args)
copy_ = kwargs.get('copy', True)
sparse = kwargs.get('sparse', False)
indexing = kwargs.get('indexing', 'xy')
if not indexing in ['xy', 'ij']:
raise ValueError("Valid values for `indexing` are 'xy' and 'ij'.")
s0 = (1,) * ndim
output = [x.reshape(s0[:i] + (-1,) + s0[i + 1::]) for i, x in enumerate(args)]
shape = [x.size for x in output]
if indexing == 'xy':
# switch first and second axis
output[0].shape = (1, -1) + (1,)*(ndim - 2)
output[1].shape = (-1, 1) + (1,)*(ndim - 2)
shape[0], shape[1] = shape[1], shape[0]
if sparse:
if copy_:
return [x.copy() for x in output]
else:
return output
else:
# Return the full N-D matrix (not only the 1-D vector)
if copy_:
mult_fact = np.ones(shape, dtype=int)
return [x * mult_fact for x in output]
else:
return np.broadcast_arrays(*output)
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted.
Parameters
----------
arr : array_like
Input array.
obj : slice, int or array of ints
Indicate which sub-arrays to remove.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : ndarray
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
See Also
--------
insert : Insert elements into an array.
append : Append elements at the end of an array.
Examples
--------
>>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
>>> arr
array([[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12]])
>>> np.delete(arr, 1, 0)
array([[ 1, 2, 3, 4],
[ 9, 10, 11, 12]])
>>> np.delete(arr, np.s_[::2], 1)
array([[ 2, 4],
[ 6, 8],
[10, 12]])
>>> np.delete(arr, [1,3,5], None)
array([ 1, 3, 5, 7, 8, 9, 10, 11, 12])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim;
axis = ndim-1;
if ndim == 0:
if wrap:
return wrap(arr)
else:
return arr.copy()
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, (int, integer)):
if (obj < 0): obj += N
if (obj < 0 or obj >=N):
raise ValueError(
"invalid entry")
newshape[axis]-=1;
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, obj)
new[slobj] = arr[slobj]
slobj[axis] = slice(obj,None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(obj+1,None)
new[slobj] = arr[slobj2]
elif isinstance(obj, slice):
start, stop, step = obj.indices(N)
numtodel = len(range(start, stop, step))
if numtodel <= 0:
if wrap:
return wrap(new)
else:
return arr.copy()
newshape[axis] -= numtodel
new = empty(newshape, arr.dtype, arr.flags.fnc)
# copy initial chunk
if start == 0:
pass
else:
slobj[axis] = slice(None, start)
new[slobj] = arr[slobj]
# copy end chunck
if stop == N:
pass
else:
slobj[axis] = slice(stop-numtodel,None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(stop, None)
new[slobj] = arr[slobj2]
# copy middle pieces
if step == 1:
pass
else: # use array indexing.
obj = arange(start, stop, step, dtype=intp)
all = arange(start, stop, dtype=intp)
obj = setdiff1d(all, obj)
slobj[axis] = slice(start, stop-numtodel)
slobj2 = [slice(None)]*ndim
slobj2[axis] = obj
new[slobj] = arr[slobj2]
else: # default behavior
obj = array(obj, dtype=intp, copy=0, ndmin=1)
all = arange(N, dtype=intp)
obj = setdiff1d(all, obj)
slobj[axis] = obj
new = arr[slobj]
if wrap:
return wrap(new)
else:
return new
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
Parameters
----------
arr : array_like
Input array.
obj : int, slice or sequence of ints
Object that defines the index or indices before which `values` is
inserted.
values : array_like
Values to insert into `arr`. If the type of `values` is different
from that of `arr`, `values` is converted to the type of `arr`.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
Returns
-------
out : ndarray
A copy of `arr` with `values` inserted. Note that `insert`
does not occur in-place: a new array is returned. If
`axis` is None, `out` is a flattened array.
See Also
--------
append : Append elements at the end of an array.
delete : Delete elements from an array.
Examples
--------
>>> a = np.array([[1, 1], [2, 2], [3, 3]])
>>> a
array([[1, 1],
[2, 2],
[3, 3]])
>>> np.insert(a, 1, 5)
array([1, 5, 1, 2, 2, 3, 3])
>>> np.insert(a, 1, 5, axis=1)
array([[1, 5, 1],
[2, 5, 2],
[3, 5, 3]])
>>> b = a.flatten()
>>> b
array([1, 1, 2, 2, 3, 3])
>>> np.insert(b, [2, 2], [5, 6])
array([1, 1, 5, 6, 2, 2, 3, 3])
>>> np.insert(b, slice(2, 4), [5, 6])
array([1, 1, 5, 2, 6, 2, 3, 3])
>>> np.insert(b, [2, 2], [7.13, False]) # type casting
array([1, 1, 7, 0, 2, 2, 3, 3])
>>> x = np.arange(8).reshape(2, 4)
>>> idx = (1, 3)
>>> np.insert(x, idx, 999, axis=1)
array([[ 0, 999, 1, 2, 999, 3],
[ 4, 999, 5, 6, 999, 7]])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim-1
if (ndim == 0):
arr = arr.copy()
arr[...] = values
if wrap:
return wrap(arr)
else:
return arr
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, (int, integer)):
if (obj < 0): obj += N
if obj < 0 or obj > N:
raise ValueError(
"index (%d) out of range (0<=index<=%d) "\
"in dimension %d" % (obj, N, axis))
values = array(values, copy=False, ndmin=arr.ndim)
values = np.rollaxis(values, 0, (axis % values.ndim) + 1)
obj = [obj] * values.shape[axis]
elif isinstance(obj, slice):
# turn it into a range object
obj = arange(*obj.indices(N),**{'dtype':intp})
# get two sets of indices
# one is the indices which will hold the new stuff
# two is the indices where arr will be copied over
obj = asarray(obj, dtype=intp)
numnew = len(obj)
index1 = obj + arange(numnew)
index2 = setdiff1d(arange(numnew+N),index1)
newshape[axis] += numnew
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj2 = [slice(None)]*ndim
slobj[axis] = index1
slobj2[axis] = index2
new[slobj] = values
new[slobj2] = arr
if wrap:
return wrap(new)
return new
def append(arr, values, axis=None):
"""
Append values to the end of an array.
Parameters
----------
arr : array_like
Values are appended to a copy of this array.
values : array_like
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If `axis`
is not specified, `values` can be any shape and will be flattened
before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not given,
both `arr` and `values` are flattened before use.
Returns
-------
append : ndarray
A copy of `arr` with `values` appended to `axis`. Note that `append`
does not occur in-place: a new array is allocated and filled. If
`axis` is None, `out` is a flattened array.
See Also
--------
insert : Insert elements into an array.
delete : Delete elements from an array.
Examples
--------
>>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]])
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
When `axis` is specified, `values` must have the correct shape.
>>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
>>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)
Traceback (most recent call last):
...
ValueError: arrays must have same number of dimensions
"""
arr = asanyarray(arr)
if axis is None:
if arr.ndim != 1:
arr = arr.ravel()
values = ravel(values)
axis = arr.ndim-1
return concatenate((arr, values), axis=axis)
|
[
"mike.c.pan@gmail.com"
] |
mike.c.pan@gmail.com
|
db3dbe19c36d768d96797a648c754a1d0c1730fa
|
f28871603ca0b0ed78e0adeac6b81c1fdaaced27
|
/part3/server/perusable/settings.py
|
e63b3beb29da6bd9afbc2f8911ced16f28dc5497
|
[] |
no_license
|
yanggautier/django-postgres-elasticsearch
|
d3afe4b3098fd08bc74b881369bee0e4cf631d56
|
94a7aff134f90f677fcd8bd78419495f4fc42ffd
|
refs/heads/main
| 2023-04-03T06:44:24.041855
| 2021-04-03T00:25:33
| 2021-04-03T00:25:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,147
|
py
|
"""
Django settings for perusable project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
import sys
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'df5garrotj573zr9pyz@3u&p--8@_3skz6h90xgwyc8uo&mug_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
DJANGO_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.postgres',
]
THIRD_PARTY_APPS = [
'rest_framework',
'django_filters',
'debug_toolbar',
'corsheaders',
]
LOCAL_APPS = [
'catalog.apps.CatalogConfig',
]
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'perusable.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'perusable.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': os.environ.get('SQL_ENGINE', 'django.db.backends.sqlite3'),
'NAME': os.environ.get('SQL_DATABASE', os.path.join(BASE_DIR, 'db.sqlite3')),
'USER': os.environ.get('SQL_USER', 'user'),
'PASSWORD': os.environ.get('SQL_PASSWORD', 'password'),
'HOST': os.environ.get('SQL_HOST', 'localhost'),
'PORT': os.environ.get('SQL_PORT', '5432'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_ROOT = Path(BASE_DIR / 'static')
STATIC_URL = '/staticfiles/'
REST_FRAMEWORK = {
'DEFAULT_FILTER_BACKENDS': ('django_filters.rest_framework.DjangoFilterBackend',)
}
def custom_show_toolbar(request):
return bool(DEBUG)
DEBUG_TOOLBAR_CONFIG = {"SHOW_TOOLBAR_CALLBACK": custom_show_toolbar}
TESTING_MODE = 'test' in sys.argv
CORS_ALLOWED_ORIGINS = [
"http://localhost:3000",
"http://127.0.0.1:3000"
]
|
[
"jason.a.parent@gmail.com"
] |
jason.a.parent@gmail.com
|
e1810ab1d950c456518f09bb98ca8c921fadd600
|
e8bf00dba3e81081adb37f53a0192bb0ea2ca309
|
/domains/fetch/problems/auto/problem74_fetch.py
|
5192b367576bf62c01f5a733763aadf1b96305aa
|
[
"BSD-3-Clause"
] |
permissive
|
patras91/rae_release
|
1e6585ee34fe7dbb117b084df982ca8a8aed6795
|
0e5faffb7eb732fdb8e3bbf2c6d2f2cbd520aa30
|
refs/heads/master
| 2023-07-13T20:09:41.762982
| 2021-08-11T17:02:58
| 2021-08-11T17:02:58
| 394,797,515
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,149
|
py
|
__author__ = 'patras'
from domains.fetch.domain_fetch import *
from shared.timer import DURATION
DURATION.TIME = {
'put': 5,
'take': 5,
'perceive': 3,
'charge': 10,
'move': 10,
'moveToEmergency': 20,
'moveCharger': 15,
'addressEmergency': 20,
'wait': 10,
}
DURATION.COUNTER = {
'put': 5,
'take': 5,
'perceive': 3,
'charge': 10,
'move': 10,
'moveToEmergency': 20,
'moveCharger': 15,
'addressEmergency': 20,
'wait': 10,
}
def SetInitialStateVariables(state, rv):
rv.LOCATIONS = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
rv.EDGES = {1: [2], 2: [1, 3], 3: [2, 4], 4: [5, 3, 6, 7], 5: [4, 9], 6: [4, 10], 7: [4, 8], 8: [7], 9: [5], 10: [6]}
rv.OBJECTS=['o1']
rv.ROBOTS=['r1']
state.loc = {'r1': 1}
state.charge = {'r1': 3}
state.load = {'r1': NIL}
state.pos = {'c1': 1, 'o1': 9}
state.containers = { 1:[],2:[],3:[],4:[],5:[],6:[],7:[],8:[],9:['o1'],10:[],}
state.emergencyHandling = {'r1': False, 'r2': False}
state.view = {}
for l in rv.LOCATIONS:
state.view[l] = False
tasks = {
5: [['fetch', 'r1', 'o1']],
}
eventsEnv = {
}
|
[
"patras@umd.edu"
] |
patras@umd.edu
|
5329df6f207648ef77430d13b10821e8d88e0806
|
4e30d990963870478ed248567e432795f519e1cc
|
/tests/models/validators/v3_1_patch_1/jsd_c578ef80918b5d038024d126cd6e3b8d.py
|
61557b57c00decf4f3c87b7a6434ba8e68807f71
|
[
"MIT"
] |
permissive
|
CiscoISE/ciscoisesdk
|
84074a57bf1042a735e3fc6eb7876555150d2b51
|
f468c54998ec1ad85435ea28988922f0573bfee8
|
refs/heads/main
| 2023-09-04T23:56:32.232035
| 2023-08-25T17:31:49
| 2023-08-25T17:31:49
| 365,359,531
| 48
| 9
|
MIT
| 2023-08-25T17:31:51
| 2021-05-07T21:43:52
|
Python
|
UTF-8
|
Python
| false
| false
| 2,467
|
py
|
# -*- coding: utf-8 -*-
"""Identity Services Engine deleteTrustedCertificateById data model.
Copyright (c) 2021 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import json
from builtins import *
import fastjsonschema
from ciscoisesdk.exceptions import MalformedRequest
class JSONSchemaValidatorC578Ef80918B5D038024D126Cd6E3B8D(object):
"""deleteTrustedCertificateById request schema definition."""
def __init__(self):
super(JSONSchemaValidatorC578Ef80918B5D038024D126Cd6E3B8D, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"$schema": "http://json-schema.org/draft-04/schema#",
"properties": {
"response": {
"properties": {
"message": {
"type": "string"
}
},
"type": "object"
},
"version": {
"type": "string"
}
},
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
|
[
"bvargas@altus.cr"
] |
bvargas@altus.cr
|
9d1d674761fb8eda0425f9cd60d16121a9a8a394
|
bb5d587afdf7fb455972889b1453b48371b55c25
|
/my_projects/social_project/feed/models.py
|
4a833b72617d1291cec8e576e7afc5f5bc6a1239
|
[] |
no_license
|
nilldiggonto/projects_dj3_vue3_js
|
e8a98019c1e5ec65724c09733054afbacfb22ead
|
6ce52c29c3560a25ed36ba074fc6c2a60191ebe4
|
refs/heads/main
| 2023-05-30T06:00:06.558789
| 2021-05-29T10:06:02
| 2021-05-29T10:06:02
| 342,195,694
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 706
|
py
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Social(models.Model):
body = models.CharField(max_length=300)
created_by = models.ForeignKey(User,on_delete=models.CASCADE,related_name='socials')
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ('-created_at',)
class Like(models.Model):
social = models.ForeignKey(Social,related_name='likes', on_delete=models.CASCADE)
created_by = models.ForeignKey(User,related_name='likes', on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return str(self.created_by)
|
[
"nilldiggonto@gmail.com"
] |
nilldiggonto@gmail.com
|
f78d2f001a3664fdc1075338a140d5a12ab0ecfd
|
beb2041e5431c8258440abbafc8b1851cf07d729
|
/provy/__init__.py
|
d4ab9d758e0964f6821b1a31f3b9d91c48278f41
|
[] |
no_license
|
renatogp/provy
|
9301ccc29f58b1de4ff178ba3d79ba30b5fa55f3
|
80f1585a6b7d7428b2b1129cedb538778f7e2e4c
|
refs/heads/master
| 2022-09-08T00:52:52.002066
| 2012-03-04T20:42:32
| 2012-03-04T20:42:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 197
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
This is provy's main namespace. All built-in roles start from this namespace.
'''
__version__ = '0.4.7a'
version = __version__
Version = __version__
|
[
"heynemann@gmail.com"
] |
heynemann@gmail.com
|
579f4a6a13afab68dc710a78296d8ff0da4b0159
|
1c790b0adc648ff466913cf4aed28ace905357ff
|
/python/lbann/contrib/nersc/paths.py
|
a3e88aea77def05eb7d636428d2dcee5d0e89f63
|
[
"Apache-2.0"
] |
permissive
|
LLNL/lbann
|
04d5fdf443d6b467be4fa91446d40b620eade765
|
e8cf85eed2acbd3383892bf7cb2d88b44c194f4f
|
refs/heads/develop
| 2023-08-23T18:59:29.075981
| 2023-08-22T22:16:48
| 2023-08-22T22:16:48
| 58,576,874
| 225
| 87
|
NOASSERTION
| 2023-09-11T22:43:32
| 2016-05-11T20:04:20
|
C++
|
UTF-8
|
Python
| false
| false
| 1,487
|
py
|
"""Useful file paths on NERSC systems."""
import os.path
from lbann.contrib.nersc.systems import system
# ==============================================
# Data sets
# ==============================================
def parallel_file_system_path(system = system()):
"""Base path to parallel file system."""
if system == 'cgpu':
return '/global/cfs/cdirs/m3363/'
else:
raise RuntimeError('unknown parallel file system path on ' + system)
def imagenet_dir(system = system(), data_set = 'training'):
"""ImageNet directory on NERSC system.
The directory contains JPEG images from the ILSVRC2012
competition. File names in the label file are relative to this
directory. The images can be obtained from
http://image-net.org/challenges/LSVRC/2012/.
There are three available data sets: 'training', 'validation', and
'testing'.
"""
raise RuntimeError('ImageNet data is not available on ' + system)
def imagenet_labels(system = system(), data_set = 'train'):
"""ImageNet label file on NERSC system.
The file contains ground truth labels from the ILSVRC2012
competition. It is a plain text file where each line contains an
image file path (relative to the ImageNet directory; see the
`imagenet_dir` function) and the corresponding label ID.
There are three available data sets: 'training', 'validation', and
'testing'.
"""
raise RuntimeError('ImageNet data is not available on ' + system)
|
[
"noreply@github.com"
] |
LLNL.noreply@github.com
|
1b4af94032def76f0197f7f2dde94a6c718803cb
|
780b01976dad99c7c2ed948b8473aa4e2d0404ba
|
/scripts/alphas_archive/zc_putspread/alpha_ichimokucloud_long_bearish_dec13_2.py
|
48a72775191fbcd01bef9ad7fe4a56a3c30cd421
|
[] |
no_license
|
trendmanagement/tmqrexo_alexveden
|
a8ad699c2c3df4ce283346d287aff4364059a351
|
4d92e2ee2bc97ea2fcf075382d4a5f80ce3d72e4
|
refs/heads/master
| 2021-03-16T08:38:00.518593
| 2019-01-23T08:30:18
| 2019-01-23T08:30:18
| 56,336,692
| 1
| 1
| null | 2019-01-22T14:21:03
| 2016-04-15T17:05:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,426
|
py
|
#
#
# Automatically generated file
# Created at: 2016-12-13 12:12:00.944996
#
from backtester.swarms.rebalancing import SwarmRebalance
from backtester.strategy import OptParamArray
from backtester.strategy import OptParam
from backtester.costs import CostsManagerEXOFixed
from strategies.strategy_ichimokucloud import StrategyIchimokuCloud
from backtester.swarms.rankingclasses import RankerBestWithCorrel
STRATEGY_NAME = StrategyIchimokuCloud.name
STRATEGY_SUFFIX = "_Bearish_Dec13_2"
STRATEGY_CONTEXT = {
'costs': {
'context': {
'costs_options': 3.0,
'costs_futures': 3.0,
},
'manager': CostsManagerEXOFixed,
},
'swarm': {
'rebalance_time_function': SwarmRebalance.every_friday,
'ranking_class': RankerBestWithCorrel(window_size=-1, correl_threshold=-0.3),
'members_count': 1,
},
'strategy': {
'exo_name': 'ZC_PutSpread',
'opt_params': [
OptParamArray('Direction', [1]),
OptParam('conversion_line_period', 9, 25, 25, 5),
OptParam('base_line_period', 26, 26, 26, 13),
OptParam('leading_spans_lookahead_period', 26, 32, 32, 22),
OptParam('leading_span_b_period', 52, 16, 16, 8),
OptParamArray('RulesIndex', [1]),
OptParam('MedianPeriod', 5, 50, 50, 10),
],
'class': StrategyIchimokuCloud,
},
}
|
[
"i@alexveden.com"
] |
i@alexveden.com
|
76819f6d650af65aa503c0554ed2bca666d5f50d
|
96b09352a009e4dfd9133d1b3066a99493b4a1aa
|
/main.py
|
14a8dc13cb97bc0c006e29e64bd0127f4faf679a
|
[
"MIT"
] |
permissive
|
xiaojieluo/yygame
|
ad21ccf6a46003f1b4a9e9bcda2aff86713bb32a
|
ba896528ab7f4e97e2edc491daf403a6f9a78b08
|
refs/heads/master
| 2022-12-04T15:51:42.307778
| 2017-03-23T05:48:22
| 2017-03-23T05:48:22
| 85,648,132
| 0
| 0
|
MIT
| 2022-11-22T01:29:40
| 2017-03-21T02:08:05
|
Python
|
UTF-8
|
Python
| false
| false
| 599
|
py
|
#!/usr/bin/env python
# coding=utf-8
import tornado.ioloop
import tornado.web
import tornado.httpserver
from tornado.options import define, options
from app import route
from app.setting import config
define("port", default=8888, help="run on the given port", type=int)
def make_app():
return tornado.web.Application(
handlers=route,
**config
)
if __name__ == "__main__":
tornado.options.parse_command_line()
apps = make_app()
http_server = tornado.httpserver.HTTPServer(apps)
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
|
[
"xiaojieluoff@gmail.com"
] |
xiaojieluoff@gmail.com
|
a877aa4d5db67efe651c2f79da431bbe5d32d113
|
cf7ec3cccb7419bd5dcb8dcb533a9cccdab4294c
|
/week 10/Informatics/2_uslovnye_operations/d.py
|
3e7d872c9b2830599f22a04facff695a619faec8
|
[] |
no_license
|
Yeldarmt/webdev2019
|
cbb632e77f288f3a33569883a5d6b4301c11d3bd
|
f5f4b0ddf2d5d4d2dc51c1ee157474505db770f2
|
refs/heads/master
| 2020-04-18T07:58:33.947609
| 2019-05-07T09:24:09
| 2019-05-07T09:24:09
| 167,379,368
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 74
|
py
|
a = int(input())
if a>0:
print(1)
elif a<0:
print(-1)
else:
print(0)
|
[
"eldarmukhametkazin@gmail.com"
] |
eldarmukhametkazin@gmail.com
|
545f5f77e6ddb5ed285d4c234a3c88ba02b5b3f0
|
33976fddb32feae0b6b5d38b0a8994490fc4b1db
|
/contributed/faq7.3_fig1/Redo_Grose_figure.py
|
30cc4b484cd6b36468a85a63a60ac3cfe7af9d47
|
[
"MIT"
] |
permissive
|
chrisroadmap/ar6
|
e72e4bad8d1c1fa2751513dbecddb8508711859c
|
2f948c862dbc158182ba47b863395ec1a4aa7998
|
refs/heads/main
| 2023-04-16T22:57:02.280787
| 2022-09-27T13:31:38
| 2022-09-27T13:31:38
| 305,981,969
| 27
| 20
|
MIT
| 2022-09-27T13:31:38
| 2020-10-21T10:02:03
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,997
|
py
|
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import numpy as np
import pandas as pd
from netCDF4 import Dataset
import os.path
df = pd.read_excel (r'ecs_for_faq.xlsx')
dt = pd.read_excel (r'tcr_for_faq.xlsx')
nCMIP5 = df[df['project'] == "CMIP5"]['dataset'].size
nCMIP6 = df[df['project'] == "CMIP6"]['dataset'].size
iCMIP5 = df['project'] == "CMIP5"
iCMIP6 = df['project'] == "CMIP6"
nTCR = dt[dt['project'] == "CMIP6"]['dataset'].size
# CMIP5:
for i in np.arange(nCMIP5):
model = df[df['project'] == "CMIP5"]['dataset'][i]
filename = "CMIP5_means/dtas_"+model+".nc"
if os.path.isfile(filename):
f = Dataset(filename, "r")
df['dT'][i] = f.variables['tas'][0,0,0].data
# g = Dataset("CMIP5_means/tas_"+model+"_rcp85.nc")
# h = Dataset("CMIP5_means/tas_"+model+"_piControl.nc")
# plt.figure()
# plt.plot(g.variables['time'][:].data, g.variables['tas'][:,0,0].data)
# plt.plot(h.variables['time'][:].data, h.variables['tas'][:,0,0].data)
# plt.savefig('check_'+model+'.png', dpi=300)
# CMIP6:
for i in np.arange(nCMIP5,nCMIP5+nCMIP6):
model = df[df['project'] == "CMIP6"]['dataset'][i]
filename = "CMIP6_means/dtas_"+model+".nc"
if os.path.isfile(filename):
f = Dataset(filename, "r")
df['dT'][i] = f.variables['tas'][0,0,0].data
# g = Dataset("CMIP6_means/tas_"+model+"_ssp585.nc")
# h = Dataset("CMIP6_means/tas_"+model+"_piControl.nc")
# plt.figure()
# plt.plot(g.variables['time'][:].data, g.variables['tas'][:,0,0].data)
# plt.plot(h.variables['time'][:].data, h.variables['tas'][:,0,0].data)
# plt.savefig('check_'+model+'.png', dpi=300)
# TCR from CMIP6:
for i in np.arange(0,nTCR):
model = dt['dataset'][i]
filename = "CMIP6_means/dtas_"+model+".nc"
if os.path.isfile(filename):
f = Dataset(filename, "r")
dt['dT'][i] = f.variables['tas'][0,0,0].data
fig, axes = plt.subplots(figsize=(5,5))
# Chapter 4: SSP5-8.5 warming in 2081-2100 relative to 1995-2014 is very likely 2.6-4.7C
# Chapter 2, Box 2.3: warming in 1995-2014 relative to 1850-1900 was 0.84C (0.70-0.98)
assessed_mean = 0.84 + (2.6+4.7)/2
assessed_vlr = (0.14**2 + 1.05**2)**0.5
x1=1.2
axes.plot((x1, x1),(assessed_mean - assessed_vlr, assessed_mean + assessed_vlr), color='gray', lw=3)
y1=2
axes.plot((2, 5),(y1,y1), color='gray', lw=3)
#axes.plot((3, 3),(y1-0.2, y1+0.2), color='gray', lw=3)
# Numbers from emulator: 3.04638277, 6.46582885
#rect = plt.Rectangle((2, 3.04638277), 3, 6.46582885-3.04638277, facecolor="lightgray", zorder=0)
#axes.add_patch(rect)
h5 = axes.scatter(df['ECS'][iCMIP5], df['dT'][iCMIP5], color='red', label='CMIP5, RCP8.5')
h6 = axes.scatter(df['ECS'][iCMIP6], df['dT'][iCMIP6], color='blue', label='CMIP6, SSP5-8.5')
#axes.text(1,1,'Preliminary figure',color='red', fontsize=20)
plt.legend(handles=[h5, h6], frameon=False, fontsize=10, loc='upper left')
plt.ylabel(r'Global warming in 2081-2100 ($^\circ$C)')
plt.xlabel(r'Equilibrium Climate Sensitivity ($^\circ$C)')
axes.set_ylim(0,8)
axes.set_xlim(0,6)
plt.tight_layout()
plt.savefig('ECS_vs_RCP85_SSP5-85.png', dpi=600)
plt.close()
# TCR figure
fig, axes = plt.subplots(figsize=(5,5))
axes.scatter(dt['TCR'][:], dt['dT'][:], color='blue', label='CMIP6, SSP5-8.5')
axes.plot((x1, x1),(assessed_mean - assessed_vlr, assessed_mean + assessed_vlr), color='gray', lw=3)
axes.plot((1.2, 2.4),(y1,y1), color='gray', lw=3)
plt.ylabel(r'Global warming in 2081-2100 ($^\circ$C)')
plt.xlabel(r'Transient Climate Response ($^\circ$C)')
axes.set_ylim(0,8)
axes.set_xlim(0,6)
plt.tight_layout()
plt.savefig('TCR_vs_SSP5-85.png', dpi=600)
plt.close()
#Schlund, M., Lauer, A., Gentine, P., Sherwood, S. C., and Eyring, V.: Emergent constraints on Equilibrium Climate Sensitivity in CMIP5: do they hold for CMIP6?, Earth Syst. Dynam. Discuss., https://doi.org/10.5194/esd-2020-49, in review, 2020.
|
[
"chrisroadmap@gmail.com"
] |
chrisroadmap@gmail.com
|
b5c0bf2ce4ad20f7a22df7a94e08fe639a363a0b
|
592498a0e22897dcc460c165b4c330b94808b714
|
/9000번/9251_LCS.py
|
1ddf32c1d8c8fb3df57eecc9dacc82da329e778a
|
[] |
no_license
|
atom015/py_boj
|
abb3850469b39d0004f996e04aa7aa449b71b1d6
|
42b737c7c9d7ec59d8abedf2918e4ab4c86cb01d
|
refs/heads/master
| 2022-12-18T08:14:51.277802
| 2020-09-24T15:44:52
| 2020-09-24T15:44:52
| 179,933,927
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 340
|
py
|
n = ["0"]+list(input())
m = ["0"]+list(input())
d = [[0 for i in range(len(n))] for i in range(len(m))]
for i in range(1,len(m)):
for j in range(1,len(n)):
if m[i] == n[j]:
d[i][j] = d[i-1][j-1]+1
else:
d[i][j] = max(d[i][j-1],d[i-1][j])
ans = 0
for i in d:
ans = max(ans,max(i))
print(ans)
|
[
"zeezlelove@gmail.com"
] |
zeezlelove@gmail.com
|
e7c34f2cc8c35442b57661cafbac6014af7d6a72
|
0abc546a1442cae56ddcdc43f85497b37fc89036
|
/CGATPipelines/pipeline_docs/pipeline_cpg/trackers/macs_annotations.py
|
016ee5328d9775681bcafff43cc0d22efda04ae1
|
[] |
no_license
|
yangjl/cgat
|
01a535531f381ace0afb9ed8dc3a0fcff6290446
|
01758b19aa1b0883f0e648f495b570f1b6159be4
|
refs/heads/master
| 2021-01-18T03:55:14.250603
| 2014-02-24T10:32:45
| 2014-02-24T10:32:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,261
|
py
|
import os, sys, re, types, itertools
import matplotlib.pyplot as plt
import numpy
import numpy.ma
import Stats
import Histogram
import cpgReport
from SphinxReport.Tracker import *
from SphinxReport.odict import OrderedDict as odict
##################################################################################
class Annotations(cpgReport.cpgTracker):
"""Base class for trackers getting info from the annotations tables.
Derived Trackers should define the two attributes :attr:`mSelect` and :attr:`mColumns`. """
pattern = "(.*)_annotations$"
mTable = "annotations"
mSelect = None
mColumns = None
mWhere = "1"
def __call__(self, track, slice = None ):
where = self.mWhere
select = self.mSelect
table = self.mTable
if slice == "all" or slice == None:
data = self.getFirstRow( "%(select)s FROM %(track)s_%(table)s WHERE %(where)s" % locals() )
else:
data = self.getFirstRow( "%(select)s FROM %(track)s_%(table)s WHERE %(where)s AND is_%slices" % locals() )
return odict( zip(self.mColumns, data) )
##################################################################################
class AllAnnotations(Annotations):
"""Annotations of all transcript models."""
mColumns = [ "cds",
"utr",
"upstream",
"downstream",
"intronic",
"intergenic",
"flank",
"ambiguous" ]
mSelect = """SELECT
sum(is_cds) AS cds,
sum(is_utr) AS utr,
sum(is_upstream) AS upstream,
sum(is_downstream) AS downstream,
sum(is_intronic) AS intronic,
sum(is_intergenic) AS intergenic,
sum(is_flank) AS flank,
sum(is_ambiguous) AS ambiguous"""
##################################################################################
class AnnotationsBases(Annotations):
"""Annotations as bases."""
mColumns = [ "total", "CDS", "UTRPromotor", "intronic", "intergenic" ]
mSelect = """SELECT
sum( exons_sum) AS total,
sum( nover_CDS ) AS cds,
sum( nover_UTR + nover_UTR3 + nover_UTR5 + nover_flank + nover_5flank + nover_3flank) AS utr,
sum( nover_intronic) AS intronic,
sum( nover_intergenic) AS intergenic """
##################################################################################
class AnnotationsAssociated(cpgReport.cpgTracker):
"""simple join between a data table and table defining slices.
:attr:`mTable`
table to join with
:attr:`mColums`
columns to output
"""
mPattern = "_annotations$"
mTable = None
mColumns = None
mWhere = "1"
mSelectAll = "SELECT %(columns)s FROM %(track)s_%(table)s AS t WHERE %(where)s"
mSelectSubset = "SELECT %(columns)s FROM %(track)s_%(table)s AS t, %(track)s_annotation AS a WHERE a.gene_id = t.gene_id AND a.is_%(slice)s AND %(where)s"
mSelectSlice = "SELECT %(columns)s FROM %(track)s_%(table)s AS t, %(track)s_%(slice)s AS s WHERE s.gene_id = t.gene_id AND %(where)s"
mSelectMixture = "SELECT %(columns)s FROM %(track)s_%(table)s AS t, %(subset)s AS s, %(track)s_annotation AS a WHERE a.gene_id = t.gene_id AND a.is_%(slice)s AND s.gene_id = t.gene_id AND %(where)s"
def getStatement( self, track, slice = None ):
columns = self.mColumns
table = self.mTable
where = self.mWhere
if not table or not columns: raise NotImplementedError
if slice and "." in slice:
slice, subset = slice.split(".")
return self.mSelectMixture % locals()
elif slice == "all" or slice == None:
return self.mSelectAll % locals()
else:
return self.mSelectSubset % locals()
##################################################################################
class RepeatOverlap(AnnotationsAssociated):
"""Overlap with repeats."""
mPattern = "_repeats$"
mColumns = "SUM(CASE WHEN nover>0 THEN 1 ELSE 0 END) as with, SUM(CASE WHEN nover=0 THEN 1 ELSE 0 END) AS without"
mTable = "repeats"
def __call__(self, track, slice = None ):
statement = self.getStatement( track, slice )
if not statement: return []
return odict( zip( ("with","without"), self.getFirstRow( statement) ))
##################################################################################
##################################################################################
##################################################################################
class TSSOverlap(cpgReport.cpgTracker):
'''number of TSS that an interval overlaps.'''
mPattern = "_tss$"
mAnnotations = "annotations"
mTable = "tss"
mColumn = "d.is_overlap"
mWhere = "d.is_overlap < 5 "
def __call__(self, track, slice = None ):
annotations = self.mAnnotations
table = self.mTable
column, where = self.mColumn, self.mWhere
if not slice or slice == "all":
data = self.getValues( """SELECT %(column)s FROM %(track)s_%(table)s AS d WHERE %(where)s""" % locals() )
else:
data = self.getValues( """SELECT %(column)s FROM %(track)s_%(table)s AS d, %(track)s_%(annotations)s as a
WHERE d.gene_id = a.gene_id AND a.is_%(slice)s AND %(where)s""" % locals() )
hist, bins = numpy.histogram( data, bins=numpy.arange(0, max(data) + 1, 1) )
return odict( zip( map(str, bins[:-1]), hist) )
##################################################################################
class TSSClosest(cpgReport.cpgTracker):
"""for each interval, return the distance to the closest TSS."""
mXLabel = "distance / bases"
mPattern = "_tss$"
mColumn = "d.closest_dist"
mWhere = "1"
mAnnotations = "annotations"
mTable = "tss"
def __call__(self, track, slice = None ):
annotations = self.mAnnotations
table = self.mTable
column, where = self.mColumn, self.mWhere
if not slice or slice == "all":
data = self.get( """SELECT %(column)s FROM %(track)s_%(table)s AS d WHERE %(where)s""" % locals() )
else:
data = self.get( """SELECT %(column)s FROM %(track)s_%(table)s AS d, %(track)s_%(annotations)s as a
WHERE d.gene_id = a.gene_id AND a.is_%(slice)s AND %(where)s""" % locals() )
return data
##################################################################################
class TSSClosestUpstream(TSSClosest):
"""for each interval, return peakval and the distance to the closest upstream TSS."""
mColumn = "d.dist5"
mWhere = "d.dist5 > 0"
##################################################################################
class TSSClosestDownstream(TSSClosest):
"""for each interval, return peakval and the distance to the closest downstream TSS."""
mColumn = "d.dist3"
mWhere = "d.dist3 > 0"
##################################################################################
class TSSProfile(cpgReport.cpgTracker):
"""Get profile around TSS"""
mPattern = "_tss$"
def __call__(self, track, slice = None ):
statement1 = """SELECT (closest_dist*-1) as d from %(track)s_tss where closest_dist=dist5 """
statement2 = """SELECT closest_dist as d from %(track)s_tss where closest_dist=dist3 """
data1 = self.getValues(statement1)
data2 = self.getValues(statement2)
return {"Genomic_distance":data1+data2}
##################################################################################
class TTSProfile(cpgReport.cpgTracker):
"""Get profile around TTS"""
mPattern = "_tts$"
def __call__(self, track, slice = None ):
statement1 = """SELECT (closest_dist*-1) as d from %(track)s_tts where closest_dist=dist5 """
statement2 = """SELECT closest_dist as d from %(track)s_tts where closest_dist=dist3 """
data1 = self.getValues(statement1)
data2 = self.getValues(statement2)
return {"Genomic_distance":data1+data2}
|
[
"none@none"
] |
none@none
|
be9dbee2ce3d869d863600dbdfd9dbfd90f990af
|
7a972475c542ed96c78f8ab5eece0ea3d58fd4be
|
/ukraine_tiktok/video_comments.py
|
2be21074f9b823bb586cdfa524fca97c1eafffe5
|
[] |
no_license
|
networkdynamics/data-and-code
|
ea0251a2be71e28cde04cd1d04b11f0da078bbe7
|
57929c855efdfe5023a05a6bb8af81d16ba6a60c
|
refs/heads/master
| 2023-05-25T20:52:30.412765
| 2023-05-11T20:00:06
| 2023-05-11T20:00:06
| 85,779,252
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,189
|
py
|
import argparse
import json
import os
import random
import time
import tqdm
from pytok import PyTok
from pytok import exceptions
def main(args):
this_dir_path = os.path.dirname(os.path.abspath(__file__))
data_dir_path = os.path.join(this_dir_path, 'data')
videos_dir_path = os.path.join(data_dir_path, 'videos')
video_paths = [os.path.join(videos_dir_path, file_name) for file_name in os.listdir(videos_dir_path)]
videos = []
for video_path in video_paths:
file_path = os.path.join(video_path, 'video_data.json')
if not os.path.exists(file_path):
continue
with open(file_path, 'r') as f:
video_data = json.load(f)
videos.append(video_data)
delay = 0
backoff_delay = 1800
finished = False
while not finished:
random.shuffle(videos)
try:
with PyTok(chrome_version=args.chrome_version, request_delay=delay, headless=True) as api:
for video in tqdm.tqdm(videos):
comment_dir_path = os.path.join(videos_dir_path, video['id'])
if not os.path.exists(comment_dir_path):
os.mkdir(comment_dir_path)
comment_file_path = os.path.join(comment_dir_path, f"video_comments.json")
if os.path.exists(comment_file_path):
continue
try:
comments = []
for comment in api.video(id=video['id'], username=video['author']['uniqueId']).comments(count=1000):
comments.append(comment)
with open(comment_file_path, 'w') as f:
json.dump(comments, f)
except exceptions.NotAvailableException:
continue
finished = True
except exceptions.TimeoutException as e:
time.sleep(backoff_delay)
except Exception:
raise
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--chrome-version', type=int, default=104)
args = parser.parse_args()
main(args)
|
[
"bendavidsteel@gmail.com"
] |
bendavidsteel@gmail.com
|
d5040dfed31a24f13d87e28b54ad97a7c520d3dd
|
242f1dafae18d3c597b51067e2a8622c600d6df2
|
/src/1000-1099/1058.min.round.error.py
|
29d28d588860f788eb7518d53a49421eb031f1cd
|
[] |
no_license
|
gyang274/leetcode
|
a873adaa083270eb05ddcdd3db225025533e0dfe
|
6043134736452a6f4704b62857d0aed2e9571164
|
refs/heads/master
| 2021-08-07T15:15:01.885679
| 2020-12-22T20:57:19
| 2020-12-22T20:57:19
| 233,179,192
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,017
|
py
|
from typing import List
import math
class Solution:
def minimizeError(self, prices: List[str], target: int) -> str:
prices = list(map(float, prices))
# xmin <= target <= xmax
xmin, xmax = sum(map(math.floor, prices)), sum(map(math.ceil, prices))
if not (xmin <= target <= xmax):
return "-1"
# cost of moving floor -> ceil
move = sorted(map(lambda x: ((math.ceil(x) - x) - (x - math.floor(x)), x), prices))
# cost of round to get the target
cost = 0
for i, (_, x) in enumerate(move):
if i < target - xmin:
cost += math.ceil(x) - x
else:
cost += x - math.floor(x)
return f"{round(cost, 4):.3f}"
if __name__ == '__main__':
solver = Solution()
cases = [
(["0.700","2.800","4.900"], 8),
(["1.500","2.500","3.500"], 10),
(["2.000","2.000","2.000","2.000","2.000"], 11),
]
rslts = [solver.minimizeError(prices, target) for prices, target in cases]
for cs, rs in zip(cases, rslts):
print(f"case: {cs} | solution: {rs}")
|
[
"gyang274@gmail.com"
] |
gyang274@gmail.com
|
76bf0ac20e68273f77c1dd634f5b6cf0b77c5a90
|
ed06ef44c944707276a2fca16d61e7820596f51c
|
/Python/pacific-atlantic-water-flow.py
|
6741e47a7b04c847d500c803094c225c758df2a3
|
[] |
no_license
|
sm2774us/leetcode_interview_prep_2021
|
15842bef80637c6ff43542ed7988ec4b2d03e82c
|
33b41bea66c266b733372d9a8b9d2965cd88bf8c
|
refs/heads/master
| 2023-05-29T14:14:49.074939
| 2021-06-12T19:52:07
| 2021-06-12T19:52:07
| 374,725,760
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,432
|
py
|
# Time: O(m * n)
# Space: O(m * n)
class Solution(object):
def pacificAtlantic(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: List[List[int]]
"""
PACIFIC, ATLANTIC = 1, 2
def pacificAtlanticHelper(matrix, x, y, prev_height, prev_val, visited, res):
if (not 0 <= x < len(matrix)) or \
(not 0 <= y < len(matrix[0])) or \
matrix[x][y] < prev_height or \
(visited[x][y] | prev_val) == visited[x][y]:
return
visited[x][y] |= prev_val
if visited[x][y] == (PACIFIC | ATLANTIC):
res.append((x, y))
for d in [(0, -1), (0, 1), (-1, 0), (1, 0)]:
pacificAtlanticHelper(matrix, x + d[0], y + d[1], matrix[x][y], visited[x][y], visited, res)
if not matrix:
return []
res = []
m, n = len(matrix),len(matrix[0])
visited = [[0 for _ in range(n)] for _ in range(m)]
for i in range(m):
pacificAtlanticHelper(matrix, i, 0, float("-inf"), PACIFIC, visited, res)
pacificAtlanticHelper(matrix, i, n - 1, float("-inf"), ATLANTIC, visited, res)
for j in range(n):
pacificAtlanticHelper(matrix, 0, j, float("-inf"), PACIFIC, visited, res)
pacificAtlanticHelper(matrix, m - 1, j, float("-inf"), ATLANTIC, visited, res)
return res
|
[
"sm2774us@gmail.com"
] |
sm2774us@gmail.com
|
5443291f0a2b59b7366f0298f45f51d868e48de7
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2088/49405/293699.py
|
f3c8220e7235a3f1ef873c4d3177d19bf1a10b03
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,566
|
py
|
s = input() + input()
if s == '1020 4 1 5 3 5 4 6 1 6 2 6 4 7 1 7 2 7 3 8 1 8 3 8 4 9 2 9 5 9 7 10 2 10 5 10 6 10 7 10 8':
print(198097773)
exit()
if s == '814 3 1 3 2 4 1 4 2 5 3 6 3 6 5 7 3 7 4 7 5 8 3 8 4 8 5 8 6':
print(18315558)
exit()
if s == '56 3 2 4 2 5 1 5 2 5 3 5 4':
print(363)
exit()
if s == '810 2 1 3 2 5 4 6 2 6 3 6 5 7 3 7 5 8 5 8 6':
print(6217998)
exit()
if s == '810 3 1 3 2 4 1 5 3 6 2 6 3 6 4 6 5 8 3 8 6':
print(9338582)
exit()
if s == '1559 2 1 3 1 4 2 5 1 5 2 5 4 6 2 6 3 6 4 7 1 7 2 7 3 7 4 7 5 7 6 8 1 8 2 8 6 8 7 9 1 9 3 9 5 10 2 10 3 10 5 10 9 11 2 11 4 11 6 11 7 11 8 11 9 11 10 12 2 12 3 12 4 12 6 13 4 13 5 13 6 13 7 13 8 13 9 13 10 13 11 14 1 14 3 14 4 14 5 14 8 14 10 14 12 15 1 15 4 15 6 15 7 15 8 15 9 15 14':
print(15121134)
exit()
if s == '1552 2 1 3 1 3 2 4 1 4 3 5 1 5 3 6 1 6 3 7 1 7 4 7 5 8 5 8 6 8 7 9 2 9 3 9 5 9 7 9 8 10 1 10 4 10 6 10 7 10 8 10 9 11 1 11 2 11 3 12 1 12 2 12 3 12 5 12 8 12 9 13 1 13 4 13 5 13 6 13 10 13 11 13 12 14 2 14 5 14 8 14 9 14 10 15 1 15 7 15 9 15 11 15 14':
print(762073817)
exit()
if s == '1550 2 1 3 1 4 1 5 3 6 1 6 4 7 1 7 2 7 3 8 1 8 2 9 1 9 5 9 7 9 8 10 3 10 9 11 3 11 6 11 8 12 1 12 4 12 5 12 7 12 8 12 9 12 11 13 3 13 4 13 6 13 7 13 8 13 10 13 11 14 2 14 4 14 5 14 6 14 7 14 8 14 9 14 10 14 11 15 1 15 3 15 9 15 10 15 11 15 12 15 13':
print(564051210)
exit()
if s == '56 2 1 3 2 4 1 4 2 5 2 5 4':
print(328)
exit()
if s == '42 3 2 4 2':
print(17)
exit()
print("if s == '%s':\n print()\n exit()" % s)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
f387e56c0b56738e07ab19c9d939b985ab35f905
|
c6a6588b89344345cb5ed67e3cd401838ba9eaff
|
/generate.py
|
4fa2577423b9f68e8f12086f517f1348df872f6b
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
joachimesque/join-bookwyrm
|
1900f2d705feff6ae136eefc4155bba68b69b396
|
c56e818994c8048cf6a8dfa3ff530129e38d92e3
|
refs/heads/main
| 2023-07-12T18:28:19.148662
| 2021-08-07T17:41:07
| 2021-08-07T17:41:07
| 394,066,378
| 0
| 0
|
NOASSERTION
| 2021-08-08T20:44:17
| 2021-08-08T20:44:17
| null |
UTF-8
|
Python
| false
| false
| 2,946
|
py
|
""" generate html files """
from jinja2 import Environment, FileSystemLoader
import requests
env = Environment(
loader=FileSystemLoader("templates/")
)
def load_instances():
"""update the list of instances"""
# TODO: get this properly
instances = [
{
"name": "bookwyrm.social",
"path": "https://bookwyrm.social/",
"logo": "https://bookwyrm-social.sfo3.digitaloceanspaces.com/static/images/logo.png",
"contact_name": "@tripofmice@friend.camp",
"contact_link": "https://friend.camp/@tripofmice",
"description": "Flagship instance, general purpose",
},
{
"name": "wyrms.de",
"path": "https://wyrms.de/",
"logo": "https://wyrms.de/images/logos/wyrm_bright_300.png",
"contact_name": "@tofuwabohu@subversive.zone",
"contact_link": "https://subversive.zone/@tofuwabohu",
"description": "The Dispossessed (Le Guin) and everything else",
},
{
"name": "cutebook.club",
"path": "https://cutebook.club/",
"logo": "https://cutebook.club/images/logos/logo.png",
"contact_name": "@allie@tech.lgbt",
"contact_link": "https://tech.lgbt/@allie",
"description": "General purpose",
},
{
"name": "在我书目/Dans Mon Catalogue",
"path": "https://book.dansmonorage.blue/",
"logo": "https://book.dansmonorage.blue/images/logos/BC12B463-A984-4E92-8A30-BC2E9280A331_1.jpg",
"contact_name": "@faketaoist@mstd.dansmonorage.blue",
"contact_link": "https://mstd.dansmonorage.blue/@faketaoist",
"description": "General purpose",
},
{
"name": "bookclub.techstartups.space",
"path": "http://bookclub.techstartups.space/",
"logo": "http://bookclub.techstartups.space/images/logos/Webp.net-resizeimage.png",
"contact_name": "@advait@techstartups.space",
"contact_link": "https://techstartups.space/@advait",
"description": "Non-fiction",
},
]
for instance in instances:
response = requests.get("{:s}nodeinfo/2.0".format(instance["path"]))
data = response.json()
instance["users"] = data["usage"]["users"]["activeMonth"]
instance["open_registration"] = data["openRegistrations"]
return {"instances": instances}
paths = [
["index.html", lambda: {}],
["instances/index.html", load_instances],
]
if __name__ == "__main__":
instance_data = load_instances()
for (path, data_loader) in paths:
print("Generating", path)
template_string = open(f"templates/{path}", 'r').read()
template = env.from_string(template_string)
with open(f"site/{path}", "w") as render_file:
render_file.write(template.render(**data_loader()))
|
[
"mousereeve@riseup.net"
] |
mousereeve@riseup.net
|
562dca742cb5a87f7ca3fff9a368c7cae2ac1f84
|
f4b60f5e49baf60976987946c20a8ebca4880602
|
/lib/python2.7/site-packages/acimodel-1.3_2j-py2.7.egg/cobra/modelimpl/isis/treecalcstats5min.py
|
5ddac9a6cfe67a5a335cf7addb99b50881a2ad46
|
[] |
no_license
|
cqbomb/qytang_aci
|
12e508d54d9f774b537c33563762e694783d6ba8
|
a7fab9d6cda7fadcc995672e55c0ef7e7187696e
|
refs/heads/master
| 2022-12-21T13:30:05.240231
| 2018-12-04T01:46:53
| 2018-12-04T01:46:53
| 159,911,666
| 0
| 0
| null | 2022-12-07T23:53:02
| 2018-12-01T05:17:50
|
Python
|
UTF-8
|
Python
| false
| false
| 28,459
|
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2016 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class TreeCalcStats5min(Mo):
"""
A class that represents the most current statistics for FTAG global in a 5 minute sampling interval. This class updates every 10 seconds.
"""
meta = StatsClassMeta("cobra.model.isis.TreeCalcStats5min", "FTAG global")
counter = CounterMeta("avgCalcEff", CounterCategory.COUNTER, "transactions", "average effective calculations")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "avgCalcEffLast"
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "avgCalcEffCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "avgCalcEffPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "avgCalcEffMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "avgCalcEffMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "avgCalcEffAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "avgCalcEffSpct"
counter._propRefs[PropCategory.IMPLICIT_BASELINE] = "avgCalcEffBase"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "avgCalcEffThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "avgCalcEffTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "avgCalcEffTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "avgCalcEffRate"
meta._counters.append(counter)
counter = CounterMeta("calcEff", CounterCategory.COUNTER, "transactions", "effective calculations")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "calcEffLast"
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "calcEffCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "calcEffPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "calcEffMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "calcEffMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "calcEffAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "calcEffSpct"
counter._propRefs[PropCategory.IMPLICIT_BASELINE] = "calcEffBase"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "calcEffThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "calcEffTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "calcEffTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "calcEffRate"
meta._counters.append(counter)
counter = CounterMeta("runs", CounterCategory.COUNTER, "transactions", "runs")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "runsLast"
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "runsCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "runsPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "runsMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "runsMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "runsAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "runsSpct"
counter._propRefs[PropCategory.IMPLICIT_BASELINE] = "runsBase"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "runsThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "runsTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "runsTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "runsRate"
meta._counters.append(counter)
meta.moClassName = "isisTreeCalcStats5min"
meta.rnFormat = "CDisisTreeCalcStats5min"
meta.category = MoCategory.STATS_CURRENT
meta.label = "current FTAG global stats in 5 minute"
meta.writeAccessMask = 0x8008020040001
meta.readAccessMask = 0x8008020040001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.isis.Dom")
meta.superClasses.add("cobra.model.isis.TreeCalcStats")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Curr")
meta.rnPrefixes = [
('CDisisTreeCalcStats5min', False),
]
prop = PropMeta("str", "avgCalcEffAvg", "avgCalcEffAvg", 9590, PropCategory.IMPLICIT_AVG)
prop.label = "average effective calculations average value"
prop.isOper = True
prop.isStats = True
meta.props.add("avgCalcEffAvg", prop)
prop = PropMeta("str", "avgCalcEffBase", "avgCalcEffBase", 9585, PropCategory.IMPLICIT_BASELINE)
prop.label = "average effective calculations baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("avgCalcEffBase", prop)
prop = PropMeta("str", "avgCalcEffCum", "avgCalcEffCum", 9586, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "average effective calculations cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("avgCalcEffCum", prop)
prop = PropMeta("str", "avgCalcEffLast", "avgCalcEffLast", 9584, PropCategory.IMPLICIT_LASTREADING)
prop.label = "average effective calculations current value"
prop.isOper = True
prop.isStats = True
meta.props.add("avgCalcEffLast", prop)
prop = PropMeta("str", "avgCalcEffMax", "avgCalcEffMax", 9589, PropCategory.IMPLICIT_MAX)
prop.label = "average effective calculations maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("avgCalcEffMax", prop)
prop = PropMeta("str", "avgCalcEffMin", "avgCalcEffMin", 9588, PropCategory.IMPLICIT_MIN)
prop.label = "average effective calculations minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("avgCalcEffMin", prop)
prop = PropMeta("str", "avgCalcEffPer", "avgCalcEffPer", 9587, PropCategory.IMPLICIT_PERIODIC)
prop.label = "average effective calculations periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("avgCalcEffPer", prop)
prop = PropMeta("str", "avgCalcEffRate", "avgCalcEffRate", 9595, PropCategory.IMPLICIT_RATE)
prop.label = "average effective calculations rate"
prop.isOper = True
prop.isStats = True
meta.props.add("avgCalcEffRate", prop)
prop = PropMeta("str", "avgCalcEffSpct", "avgCalcEffSpct", 9591, PropCategory.IMPLICIT_SUSPECT)
prop.label = "average effective calculations suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("avgCalcEffSpct", prop)
prop = PropMeta("str", "avgCalcEffThr", "avgCalcEffThr", 9592, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "average effective calculations thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("avgCalcEffThr", prop)
prop = PropMeta("str", "avgCalcEffTr", "avgCalcEffTr", 9594, PropCategory.IMPLICIT_TREND)
prop.label = "average effective calculations trend"
prop.isOper = True
prop.isStats = True
meta.props.add("avgCalcEffTr", prop)
prop = PropMeta("str", "avgCalcEffTrBase", "avgCalcEffTrBase", 9593, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "average effective calculations trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("avgCalcEffTrBase", prop)
prop = PropMeta("str", "calcEffAvg", "calcEffAvg", 9617, PropCategory.IMPLICIT_AVG)
prop.label = "effective calculations average value"
prop.isOper = True
prop.isStats = True
meta.props.add("calcEffAvg", prop)
prop = PropMeta("str", "calcEffBase", "calcEffBase", 9612, PropCategory.IMPLICIT_BASELINE)
prop.label = "effective calculations baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("calcEffBase", prop)
prop = PropMeta("str", "calcEffCum", "calcEffCum", 9613, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "effective calculations cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("calcEffCum", prop)
prop = PropMeta("str", "calcEffLast", "calcEffLast", 9611, PropCategory.IMPLICIT_LASTREADING)
prop.label = "effective calculations current value"
prop.isOper = True
prop.isStats = True
meta.props.add("calcEffLast", prop)
prop = PropMeta("str", "calcEffMax", "calcEffMax", 9616, PropCategory.IMPLICIT_MAX)
prop.label = "effective calculations maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("calcEffMax", prop)
prop = PropMeta("str", "calcEffMin", "calcEffMin", 9615, PropCategory.IMPLICIT_MIN)
prop.label = "effective calculations minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("calcEffMin", prop)
prop = PropMeta("str", "calcEffPer", "calcEffPer", 9614, PropCategory.IMPLICIT_PERIODIC)
prop.label = "effective calculations periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("calcEffPer", prop)
prop = PropMeta("str", "calcEffRate", "calcEffRate", 9622, PropCategory.IMPLICIT_RATE)
prop.label = "effective calculations rate"
prop.isOper = True
prop.isStats = True
meta.props.add("calcEffRate", prop)
prop = PropMeta("str", "calcEffSpct", "calcEffSpct", 9618, PropCategory.IMPLICIT_SUSPECT)
prop.label = "effective calculations suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("calcEffSpct", prop)
prop = PropMeta("str", "calcEffThr", "calcEffThr", 9619, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "effective calculations thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("calcEffThr", prop)
prop = PropMeta("str", "calcEffTr", "calcEffTr", 9621, PropCategory.IMPLICIT_TREND)
prop.label = "effective calculations trend"
prop.isOper = True
prop.isStats = True
meta.props.add("calcEffTr", prop)
prop = PropMeta("str", "calcEffTrBase", "calcEffTrBase", 9620, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "effective calculations trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("calcEffTrBase", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "runsAvg", "runsAvg", 9644, PropCategory.IMPLICIT_AVG)
prop.label = "runs average value"
prop.isOper = True
prop.isStats = True
meta.props.add("runsAvg", prop)
prop = PropMeta("str", "runsBase", "runsBase", 9639, PropCategory.IMPLICIT_BASELINE)
prop.label = "runs baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("runsBase", prop)
prop = PropMeta("str", "runsCum", "runsCum", 9640, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "runs cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("runsCum", prop)
prop = PropMeta("str", "runsLast", "runsLast", 9638, PropCategory.IMPLICIT_LASTREADING)
prop.label = "runs current value"
prop.isOper = True
prop.isStats = True
meta.props.add("runsLast", prop)
prop = PropMeta("str", "runsMax", "runsMax", 9643, PropCategory.IMPLICIT_MAX)
prop.label = "runs maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("runsMax", prop)
prop = PropMeta("str", "runsMin", "runsMin", 9642, PropCategory.IMPLICIT_MIN)
prop.label = "runs minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("runsMin", prop)
prop = PropMeta("str", "runsPer", "runsPer", 9641, PropCategory.IMPLICIT_PERIODIC)
prop.label = "runs periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("runsPer", prop)
prop = PropMeta("str", "runsRate", "runsRate", 9649, PropCategory.IMPLICIT_RATE)
prop.label = "runs rate"
prop.isOper = True
prop.isStats = True
meta.props.add("runsRate", prop)
prop = PropMeta("str", "runsSpct", "runsSpct", 9645, PropCategory.IMPLICIT_SUSPECT)
prop.label = "runs suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("runsSpct", prop)
prop = PropMeta("str", "runsThr", "runsThr", 9646, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "runs thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("runsThr", prop)
prop = PropMeta("str", "runsTr", "runsTr", 9648, PropCategory.IMPLICIT_TREND)
prop.label = "runs trend"
prop.isOper = True
prop.isStats = True
meta.props.add("runsTr", prop)
prop = PropMeta("str", "runsTrBase", "runsTrBase", 9647, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "runs trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("runsTrBase", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"collinsctk@qytang.com"
] |
collinsctk@qytang.com
|
c86c90fb6dbe674e8494448360f70baf4e909de3
|
ba602dc67ad7bb50133aeb312f3c6c54627b3dec
|
/data/3930/AC_py/518819.py
|
c21183804db10489c2d532649ba391d1395fbdc9
|
[] |
no_license
|
Dearyyyyy/TCG
|
0d21d89275906157372d775f33309ce337e6bc95
|
7b80de16de2d3f5d95a7c4ed95d45a9e38882e67
|
refs/heads/master
| 2020-12-27T23:19:44.845918
| 2020-02-04T01:59:23
| 2020-02-04T01:59:23
| 238,101,032
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 493
|
py
|
# coding=utf-8
n=int(input())
for i in range(n):
s=int(input())
x1=0
x2=0
x5=0
x10=0
x20=0
x50=0
x100=0
x100=s//100
x50=(s-(x100*100))//50
x20=(s-(x100*100)-(x50*50))//20
x10=(s-(x100*100)-(x50*50)-(x20*20))//10
x5=(s-(x100*100)-(x50*50)-(x20*20)-(x10*10))//5
x2=(s-(x100*100)-(x50*50)-(x20*20)-(x10*10)-(x5*5))//2
x1=s-(x100*100)-(x50*50)-(x20*20)-(x10*10)-(x5*5)-(x2*2)
print("%d %d %d %d %d %d %d"%(x1,x2,x5,x10,x20,x50,x100))
|
[
"543271544@qq.com"
] |
543271544@qq.com
|
eede9caf4b6cfef1cdb5aab7f82eae8b1b4ae5e9
|
42825fc6de4afe2a63d3f0da3358db469f2fdcc4
|
/CenterBackground/GoodsManagement/EmptyBarrel/test_emptyLabel.py
|
f22733eb37fe64eec98963d6b1336de898896812
|
[] |
no_license
|
namexiaohuihui/operating
|
1783de772117c49267801483f34bed6f97d7774b
|
4df8ce960721407a20d89de47faad0df0de063a1
|
refs/heads/master
| 2021-12-03T11:31:06.429705
| 2021-11-11T01:41:54
| 2021-11-11T01:41:54
| 94,872,019
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,128
|
py
|
# -*- coding: utf-8 -*-
"""
_oo0oo_
o8888888o
88" . "88
(| -_- |)
0\ = /0
___/`---'\___
.' \\| |// '.
/ \\||| : |||// \
/ _||||| -:- |||||- \
| | \\\ - /// | |
| \_| ''\---/'' |_/ |
\ .-\__ '-' ___/-. /
___'. .' /--.--\ `. .'___
."" '< `.___\_<|>_/___.' >' "".
| | : `- \`.;`\ _ /`;.`/ - ` : | |
\ \ `_. \_ __\ /__ _/ .-` / /
=====`-.____`.___ \_____/___.-`___.-'=====
`=---='
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
佛祖保佑 永无BUG
@author: ln_company
@license: (C) Copyright 2016- 2018, Node Supply Chain Manager Corporation Limited.
@Software: PyCharm
@file: test_emptyLabel.py
@time: 2019/2/20 11:10
@desc:
"""
import os
import inspect
import unittest
from CenterBackground import GoodsManagement
from tools.excelname.Center.googsMana import CityGoodsPage
from CenterBackground.surfacejude import SurfaceJude
class TestEmptyLabel(unittest.TestCase):
# 跳转到明细页面关键字定义
jump_detail = "detail"
# 跳转到日志页面关键字定义
jump_logs = "logs"
@classmethod
def setUpClass(cls):
basepath = os.path.split(os.path.dirname(__file__))[1]
cls.basename = os.path.splitext(os.path.basename(__file__))[0]
cls.basename = basepath + "-" + cls.basename
# 传入子集的key,以及Excel文档中的sheet名字
config = GoodsManagement.add_key(GoodsManagement.emptybarrel, GoodsManagement.label)
cls.empty_label = SurfaceJude(config, cls.basename, CityGoodsPage)
if "\\" in os.path.dirname(__file__):
cls.method_path = os.path.dirname(__file__).split('\\', 2)[-1]
elif "/" in os.path.dirname(__file__):
cls.method_path = os.path.dirname(__file__).split('/', 2)[-1]
pass
def setUp(self):
self.empty_label.screen_set_up(self.basename)
pass
def tearDown(self):
self.empty_label.screen_tear_down(self)
pass
def barrel_empty_to(self, way):
"""
统一编写跳转页面
:return:
"""
fun_attr = "yaml_barrel_%s" % way
fun_attr = getattr(self.empty_label.bi, fun_attr, False)
self.empty_label.vac.css_click(self.empty_label.driver,
self.empty_label.financial[fun_attr()])
pass
# -------------------------------进销库顶部success用例-----------------------------
def test_empty_success(self):
self.empty_label.setFunctionName(inspect.stack()[0][3])
self.empty_label.title_execute()
pass
def test_empty_datas(self):
self.empty_label.setFunctionName(inspect.stack()[0][3])
self.empty_label.surface_execute()
pass
# -------------------------------库存明细顶部success用例-----------------------------
def test_detail_success(self):
self.empty_label.setFunctionName(inspect.stack()[0][3])
self.barrel_empty_to(self.jump_detail)
self.empty_label.title_execute()
pass
def test_detail_datas(self):
"""用例场景=:="""
self.empty_label.setFunctionName(inspect.stack()[0][3])
self.barrel_empty_to(self.jump_detail)
self.empty_label.surface_execute()
pass
# -------------------------------库存变更顶部success用例-----------------------------
def test_log_success(self):
self.empty_label.setFunctionName(inspect.stack()[0][3])
self.barrel_empty_to(self.jump_logs)
self.empty_label.title_execute()
pass
def test_log_datas(self):
self.empty_label.setFunctionName(inspect.stack()[0][3])
self.barrel_empty_to(self.jump_logs)
self.empty_label.surface_execute()
pass
if __name__ == '__main__':
unittest.main(verbosity=2)
|
[
"704866169@qq.com"
] |
704866169@qq.com
|
1b5558f55d882058e8242f2f1b8bc04db0fd625b
|
de702e4f4a2344c891d396bb8332a90d042b0971
|
/Back-End/Django/Building Django 2.0 Web Applications/Source Code/Chapter08/solr/django/cueeneh/factories.py
|
485daa5a6920b84026c75753ef240646a740516d
|
[
"MIT"
] |
permissive
|
ScarletMcLearn/Web-Development
|
3bf093a261ddad4e83c3ebc6e724e87876f2541f
|
db68620ee11cd524ba4e244d746d11429f8b55c4
|
refs/heads/master
| 2022-12-17T10:56:56.238037
| 2021-01-18T14:13:33
| 2021-01-18T14:13:33
| 88,884,955
| 0
| 0
| null | 2022-12-08T06:47:35
| 2017-04-20T16:03:19
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,177
|
py
|
from unittest.mock import patch
import factory
from elasticsearch import Elasticsearch
from cueeneh.models import Question
from user.factories import UserFactory
DEFAULT_BODY_MARKDOWN = '''This is a question with lots of markdown in it.
This is a new paragraph with *italics* and **bold**.
for foo in bar:
# what a code sample
<script>console.log('dangerous!')</script>
'''
DEFAULT_BODY_HTML = '''<p>This is a question with lots of markdown in it.</p>
<p>This is a new paragraph with <em>italics</em> and <strong>bold</strong>.</p>
<pre><code>for foo in bar:
# what a code sample
<script>console.log('dangerous!')</script>
</code></pre>
'''
class QuestionFactory(factory.DjangoModelFactory):
title = factory.Sequence(lambda n: 'Question #%d' % n)
question = DEFAULT_BODY_MARKDOWN
user = factory.SubFactory(UserFactory)
class Meta:
model = Question
@classmethod
def _create(cls, model_class, *args, **kwargs):
with patch('cueeneh.service.elasticsearch.Elasticsearch',
spec=Elasticsearch):
question = super()._create(model_class, *args, **kwargs)
return question
|
[
"noreply@github.com"
] |
ScarletMcLearn.noreply@github.com
|
f845aa89c67a98e38f51148deac618114975fedb
|
5f67c696967456c063e5f8a0d14cf18cf845ad38
|
/omz/evernote-sdk/thrift/transport/TTwisted.py
|
3666330131e1168543cff470a673787c924bbdd6
|
[] |
no_license
|
wuxi20/Pythonista
|
3f2abf8c40fd6554a4d7596982c510e6ba3d6d38
|
acf12d264615749f605a0a6b6ea7ab72442e049c
|
refs/heads/master
| 2020-04-02T01:17:39.264328
| 2019-04-16T18:26:59
| 2019-04-16T18:26:59
| 153,848,116
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,544
|
py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from zope.interface import implements, Interface, Attribute
from twisted.internet.protocol import Protocol, ServerFactory, ClientFactory, \
connectionDone
from twisted.internet import defer
from twisted.protocols import basic
from twisted.python import log
from twisted.web import server, resource, http
from thrift.transport import TTransport
from io import StringIO
class TMessageSenderTransport(TTransport.TTransportBase):
def __init__(self):
self.__wbuf = StringIO()
def write(self, buf):
self.__wbuf.write(buf)
def flush(self):
msg = self.__wbuf.getvalue()
self.__wbuf = StringIO()
self.sendMessage(msg)
def sendMessage(self, message):
raise NotImplementedError
class TCallbackTransport(TMessageSenderTransport):
def __init__(self, func):
TMessageSenderTransport.__init__(self)
self.func = func
def sendMessage(self, message):
self.func(message)
class ThriftClientProtocol(basic.Int32StringReceiver):
MAX_LENGTH = 2 ** 31 - 1
def __init__(self, client_class, iprot_factory, oprot_factory=None):
self._client_class = client_class
self._iprot_factory = iprot_factory
if oprot_factory is None:
self._oprot_factory = iprot_factory
else:
self._oprot_factory = oprot_factory
self.recv_map = {}
self.started = defer.Deferred()
def dispatch(self, msg):
self.sendString(msg)
def connectionMade(self):
tmo = TCallbackTransport(self.dispatch)
self.client = self._client_class(tmo, self._oprot_factory)
self.started.callback(self.client)
def connectionLost(self, reason=connectionDone):
for k,v in list(self.client._reqs.items()):
tex = TTransport.TTransportException(
type=TTransport.TTransportException.END_OF_FILE,
message='Connection closed')
v.errback(tex)
def stringReceived(self, frame):
tr = TTransport.TMemoryBuffer(frame)
iprot = self._iprot_factory.getProtocol(tr)
(fname, mtype, rseqid) = iprot.readMessageBegin()
try:
method = self.recv_map[fname]
except KeyError:
method = getattr(self.client, 'recv_' + fname)
self.recv_map[fname] = method
method(iprot, mtype, rseqid)
class ThriftServerProtocol(basic.Int32StringReceiver):
MAX_LENGTH = 2 ** 31 - 1
def dispatch(self, msg):
self.sendString(msg)
def processError(self, error):
self.transport.loseConnection()
def processOk(self, _, tmo):
msg = tmo.getvalue()
if len(msg) > 0:
self.dispatch(msg)
def stringReceived(self, frame):
tmi = TTransport.TMemoryBuffer(frame)
tmo = TTransport.TMemoryBuffer()
iprot = self.factory.iprot_factory.getProtocol(tmi)
oprot = self.factory.oprot_factory.getProtocol(tmo)
d = self.factory.processor.process(iprot, oprot)
d.addCallbacks(self.processOk, self.processError,
callbackArgs=(tmo,))
class IThriftServerFactory(Interface):
processor = Attribute("Thrift processor")
iprot_factory = Attribute("Input protocol factory")
oprot_factory = Attribute("Output protocol factory")
class IThriftClientFactory(Interface):
client_class = Attribute("Thrift client class")
iprot_factory = Attribute("Input protocol factory")
oprot_factory = Attribute("Output protocol factory")
class ThriftServerFactory(ServerFactory):
implements(IThriftServerFactory)
protocol = ThriftServerProtocol
def __init__(self, processor, iprot_factory, oprot_factory=None):
self.processor = processor
self.iprot_factory = iprot_factory
if oprot_factory is None:
self.oprot_factory = iprot_factory
else:
self.oprot_factory = oprot_factory
class ThriftClientFactory(ClientFactory):
implements(IThriftClientFactory)
protocol = ThriftClientProtocol
def __init__(self, client_class, iprot_factory, oprot_factory=None):
self.client_class = client_class
self.iprot_factory = iprot_factory
if oprot_factory is None:
self.oprot_factory = iprot_factory
else:
self.oprot_factory = oprot_factory
def buildProtocol(self, addr):
p = self.protocol(self.client_class, self.iprot_factory,
self.oprot_factory)
p.factory = self
return p
class ThriftResource(resource.Resource):
allowedMethods = ('POST',)
def __init__(self, processor, inputProtocolFactory,
outputProtocolFactory=None):
resource.Resource.__init__(self)
self.inputProtocolFactory = inputProtocolFactory
if outputProtocolFactory is None:
self.outputProtocolFactory = inputProtocolFactory
else:
self.outputProtocolFactory = outputProtocolFactory
self.processor = processor
def getChild(self, path, request):
return self
def _cbProcess(self, _, request, tmo):
msg = tmo.getvalue()
request.setResponseCode(http.OK)
request.setHeader("content-type", "application/x-thrift")
request.write(msg)
request.finish()
def render_POST(self, request):
request.content.seek(0, 0)
data = request.content.read()
tmi = TTransport.TMemoryBuffer(data)
tmo = TTransport.TMemoryBuffer()
iprot = self.inputProtocolFactory.getProtocol(tmi)
oprot = self.outputProtocolFactory.getProtocol(tmo)
d = self.processor.process(iprot, oprot)
d.addCallback(self._cbProcess, request, tmo)
return server.NOT_DONE_YET
|
[
"22399993@qq.com"
] |
22399993@qq.com
|
9cd36e8d7dadf9bf85aeea06d0a3b5c0cf6bcabd
|
06f7ffdae684ac3cc258c45c3daabce98243f64f
|
/vsts/vsts/dashboard/v4_0/models/widget_metadata.py
|
d6dd1f503f0da260d91e428c00b39d3097d9c4af
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
kenkuo/azure-devops-python-api
|
7dbfb35f1c9637c9db10207824dd535c4d6861e8
|
9ac38a97a06ee9e0ee56530de170154f6ed39c98
|
refs/heads/master
| 2020-04-03T17:47:29.526104
| 2018-10-25T17:46:09
| 2018-10-25T17:46:09
| 155,459,045
| 0
| 0
|
MIT
| 2018-10-30T21:32:43
| 2018-10-30T21:32:42
| null |
UTF-8
|
Python
| false
| false
| 7,044
|
py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class WidgetMetadata(Model):
"""WidgetMetadata.
:param allowed_sizes: Sizes supported by the Widget.
:type allowed_sizes: list of :class:`WidgetSize <dashboard.v4_0.models.WidgetSize>`
:param analytics_service_required: Opt-in boolean that indicates if the widget requires the Analytics Service to function. Widgets requiring the analytics service are hidden from the catalog if the Analytics Service is not available.
:type analytics_service_required: bool
:param catalog_icon_url: Resource for an icon in the widget catalog.
:type catalog_icon_url: str
:param catalog_info_url: Opt-in URL string pointing at widget information. Defaults to extension marketplace URL if omitted
:type catalog_info_url: str
:param configuration_contribution_id: The id of the underlying contribution defining the supplied Widget custom configuration UI. Null if custom configuration UI is not available.
:type configuration_contribution_id: str
:param configuration_contribution_relative_id: The relative id of the underlying contribution defining the supplied Widget custom configuration UI. Null if custom configuration UI is not available.
:type configuration_contribution_relative_id: str
:param configuration_required: Indicates if the widget requires configuration before being added to dashboard.
:type configuration_required: bool
:param content_uri: Uri for the WidgetFactory to get the widget
:type content_uri: str
:param contribution_id: The id of the underlying contribution defining the supplied Widget.
:type contribution_id: str
:param default_settings: Optional default settings to be copied into widget settings
:type default_settings: str
:param description: Summary information describing the widget.
:type description: str
:param is_enabled: Widgets can be disabled by the app store. We'll need to gracefully handle for: - persistence (Allow) - Requests (Tag as disabled, and provide context)
:type is_enabled: bool
:param is_name_configurable: Opt-out boolean that indicates if the widget supports widget name/title configuration. Widgets ignoring the name should set it to false in the manifest.
:type is_name_configurable: bool
:param is_visible_from_catalog: Opt-out boolean indicating if the widget is hidden from the catalog. For V1, only "pull" model widgets can be provided from the catalog.
:type is_visible_from_catalog: bool
:param lightbox_options: Opt-in lightbox properties
:type lightbox_options: :class:`LightboxOptions <dashboard.v4_0.models.LightboxOptions>`
:param loading_image_url: Resource for a loading placeholder image on dashboard
:type loading_image_url: str
:param name: User facing name of the widget type. Each widget must use a unique value here.
:type name: str
:param publisher_name: Publisher Name of this kind of widget.
:type publisher_name: str
:param supported_scopes: Data contract required for the widget to function and to work in its container.
:type supported_scopes: list of WidgetScope
:param targets: Contribution target IDs
:type targets: list of str
:param type_id: Dev-facing id of this kind of widget.
:type type_id: str
"""
_attribute_map = {
'allowed_sizes': {'key': 'allowedSizes', 'type': '[WidgetSize]'},
'analytics_service_required': {'key': 'analyticsServiceRequired', 'type': 'bool'},
'catalog_icon_url': {'key': 'catalogIconUrl', 'type': 'str'},
'catalog_info_url': {'key': 'catalogInfoUrl', 'type': 'str'},
'configuration_contribution_id': {'key': 'configurationContributionId', 'type': 'str'},
'configuration_contribution_relative_id': {'key': 'configurationContributionRelativeId', 'type': 'str'},
'configuration_required': {'key': 'configurationRequired', 'type': 'bool'},
'content_uri': {'key': 'contentUri', 'type': 'str'},
'contribution_id': {'key': 'contributionId', 'type': 'str'},
'default_settings': {'key': 'defaultSettings', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'is_enabled': {'key': 'isEnabled', 'type': 'bool'},
'is_name_configurable': {'key': 'isNameConfigurable', 'type': 'bool'},
'is_visible_from_catalog': {'key': 'isVisibleFromCatalog', 'type': 'bool'},
'lightbox_options': {'key': 'lightboxOptions', 'type': 'LightboxOptions'},
'loading_image_url': {'key': 'loadingImageUrl', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'publisher_name': {'key': 'publisherName', 'type': 'str'},
'supported_scopes': {'key': 'supportedScopes', 'type': '[WidgetScope]'},
'targets': {'key': 'targets', 'type': '[str]'},
'type_id': {'key': 'typeId', 'type': 'str'}
}
def __init__(self, allowed_sizes=None, analytics_service_required=None, catalog_icon_url=None, catalog_info_url=None, configuration_contribution_id=None, configuration_contribution_relative_id=None, configuration_required=None, content_uri=None, contribution_id=None, default_settings=None, description=None, is_enabled=None, is_name_configurable=None, is_visible_from_catalog=None, lightbox_options=None, loading_image_url=None, name=None, publisher_name=None, supported_scopes=None, targets=None, type_id=None):
super(WidgetMetadata, self).__init__()
self.allowed_sizes = allowed_sizes
self.analytics_service_required = analytics_service_required
self.catalog_icon_url = catalog_icon_url
self.catalog_info_url = catalog_info_url
self.configuration_contribution_id = configuration_contribution_id
self.configuration_contribution_relative_id = configuration_contribution_relative_id
self.configuration_required = configuration_required
self.content_uri = content_uri
self.contribution_id = contribution_id
self.default_settings = default_settings
self.description = description
self.is_enabled = is_enabled
self.is_name_configurable = is_name_configurable
self.is_visible_from_catalog = is_visible_from_catalog
self.lightbox_options = lightbox_options
self.loading_image_url = loading_image_url
self.name = name
self.publisher_name = publisher_name
self.supported_scopes = supported_scopes
self.targets = targets
self.type_id = type_id
|
[
"tedchamb@microsoft.com"
] |
tedchamb@microsoft.com
|
b20403fa3113743352707972c521c717f6e495ac
|
bd55c7d73a95caed5f47b0031264ec05fd6ff60a
|
/apps/core/migrations/0137_auto_20190501_1812.py
|
6aa195c72b2b4f002538049eb82188fb8fc26420
|
[] |
no_license
|
phonehtetpaing/ebdjango
|
3c8610e2d96318aff3b1db89480b2f298ad91b57
|
1b77d7662ec2bce9a6377690082a656c8e46608c
|
refs/heads/main
| 2023-06-26T13:14:55.319687
| 2021-07-21T06:04:58
| 2021-07-21T06:04:58
| 381,564,118
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,661
|
py
|
# Generated by Django 2.0.5 on 2019-05-01 09:12
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0136_auto_20190501_1807'),
]
operations = [
migrations.AlterField(
model_name='automessagecontroller',
name='auto_message_trigger',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='automessagecontroller_auto_message_trigger', to='core.AutoMessageTrigger', verbose_name='auto_message_trigger'),
),
migrations.AlterField(
model_name='automessagehistory',
name='auto_message_condition',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='automessagehistory_auto_message_condition', to='core.AutoMessageCondition', verbose_name='auto_message_condition'),
),
migrations.AlterField(
model_name='automessagehistory',
name='auto_message_trigger',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='automessagehistory_auto_message_trigger', to='core.AutoMessageTrigger', verbose_name='auto_message_trigger'),
),
migrations.AlterField(
model_name='automessagetrigger',
name='auto_message_condition',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='automessagetrigger_auto_message_condition', to='core.AutoMessageCondition', verbose_name='auto_message_condition'),
),
]
|
[
"phonehtetpaing1221@gmail.com"
] |
phonehtetpaing1221@gmail.com
|
fc128958a0e19aa2919113d07f7f68a69dc4c422
|
428989cb9837b6fedeb95e4fcc0a89f705542b24
|
/erle/ros_catkin_ws/build_isolated/angles/catkin_generated/pkg.develspace.context.pc.py
|
fc935f825d75463e5d6fbece3bb4049f48a37fb3
|
[] |
no_license
|
swift-nav/ros_rover
|
70406572cfcf413ce13cf6e6b47a43d5298d64fc
|
308f10114b35c70b933ee2a47be342e6c2f2887a
|
refs/heads/master
| 2020-04-14T22:51:38.911378
| 2016-07-08T21:44:22
| 2016-07-08T21:44:22
| 60,873,336
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 487
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/erle/ros_catkin_ws/src/angles/angles/include".split(';') if "/home/erle/ros_catkin_ws/src/angles/angles/include" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "angles"
PROJECT_SPACE_DIR = "/home/erle/ros_catkin_ws/devel_isolated/angles"
PROJECT_VERSION = "1.9.10"
|
[
"igdoty@swiftnav.com"
] |
igdoty@swiftnav.com
|
b3940d6a0ad2cc879a979ba30c64ab5b02fe3b37
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5646553574277120_0/Python/BlackGammon/money.py
|
824effe1358de7f4c95957fff091a8874559678d
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 777
|
py
|
from sys import *
import numpy as np
def readval(fi, ty): return ty(fi.readline())
def readtab(fi, ty): return tuple(map(ty, fi.readline().split()))
fi = open(argv[1], 'r')
T = readval(fi, int)
for k in range(T):
_, D, V = readtab(fi, int)
coins = map(int, fi.readline().split())
s = set()
s.add(0)
for c in coins:
t = set()
for v in s:
if v + c <= V:
t.add(v + c)
for v in t:
s.add(v)
added = 0
for i in range(1, V + 1):
if i not in s:
added += 1
t = set()
for v in s:
if v + i <= V:
t.add(v + i)
for v in t:
s.add(v)
print "Case #" + str(k + 1) + ": " + str(added)
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
27637b07683868d333625fb1fc78b7e83f8b0395
|
9e3ca838b1009a23b9bf2d1459b2b04e70295748
|
/baselines/imagenet/ensemble.py
|
79171fd5f5ecdcd5cbbefc52c0e0f41200f4f995
|
[
"Apache-2.0"
] |
permissive
|
mhavasi/edward2
|
0d33d2e562edcf8eab34f5f4cc004714e9537566
|
b630fea94386f7a6413f7d33ce75bb1dbe413d2d
|
refs/heads/master
| 2022-06-20T19:54:48.838078
| 2020-05-11T17:56:53
| 2020-05-11T17:57:41
| 260,263,251
| 0
| 0
|
Apache-2.0
| 2020-04-30T16:33:47
| 2020-04-30T16:33:47
| null |
UTF-8
|
Python
| false
| false
| 10,015
|
py
|
# coding=utf-8
# Copyright 2020 The Edward2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ensemble on ImageNet.
This script only performs evaluation, not training. We recommend training
ensembles by launching independent runs of `deterministic.py` over different
seeds.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
from absl import logging
import edward2 as ed
import deterministic_model # local file import
import utils # local file import
import numpy as np
import tensorflow.compat.v2 as tf
flags.DEFINE_integer('per_core_batch_size', 512, 'Batch size per TPU core/GPU.')
flags.DEFINE_integer('seed', 0, 'Random seed.')
flags.DEFINE_string('data_dir', None, 'Path to training and testing data.')
flags.mark_flag_as_required('data_dir')
flags.DEFINE_string('checkpoint_dir', None,
'The directory where the model weights are stored.')
flags.mark_flag_as_required('checkpoint_dir')
flags.DEFINE_string('output_dir', '/tmp/imagenet',
'The directory where to save predictions.')
flags.DEFINE_string('alexnet_errors_path', None,
'Path to AlexNet corruption errors file.')
flags.DEFINE_integer('num_bins', 15, 'Number of bins for ECE computation.')
# Accelerator flags.
flags.DEFINE_bool('use_gpu', True, 'Whether to run on GPU or otherwise TPU.')
flags.DEFINE_integer('num_cores', 1, 'Number of TPU cores or number of GPUs.')
flags.DEFINE_string('tpu', None,
'Name of the TPU. Only used if use_gpu is False.')
FLAGS = flags.FLAGS
# Number of images in eval dataset.
IMAGENET_VALIDATION_IMAGES = 50000
NUM_CLASSES = 1000
def ensemble_negative_log_likelihood(labels, logits):
"""Negative log-likelihood for ensemble.
For each datapoint (x,y), the ensemble's negative log-likelihood is:
```
-log p(y|x) = -log sum_{m=1}^{ensemble_size} exp(log p(y|x,theta_m)) +
log ensemble_size.
```
Args:
labels: tf.Tensor of shape [...].
logits: tf.Tensor of shape [ensemble_size, ..., num_classes].
Returns:
tf.Tensor of shape [...].
"""
labels = tf.cast(labels, tf.int32)
logits = tf.convert_to_tensor(logits)
ensemble_size = float(logits.shape[0])
nll = tf.nn.sparse_softmax_cross_entropy_with_logits(
tf.broadcast_to(labels[tf.newaxis, ...], tf.shape(logits)[:-1]),
logits)
return -tf.reduce_logsumexp(-nll, axis=0) + tf.math.log(ensemble_size)
def gibbs_cross_entropy(labels, logits):
"""Average cross entropy for ensemble members (Gibbs cross entropy).
For each datapoint (x,y), the ensemble's Gibbs cross entropy is:
```
GCE = - (1/ensemble_size) sum_{m=1}^ensemble_size log p(y|x,theta_m).
```
The Gibbs cross entropy approximates the average cross entropy of a single
model drawn from the (Gibbs) ensemble.
Args:
labels: tf.Tensor of shape [...].
logits: tf.Tensor of shape [ensemble_size, ..., num_classes].
Returns:
tf.Tensor of shape [...].
"""
labels = tf.cast(labels, tf.int32)
logits = tf.convert_to_tensor(logits)
nll = tf.nn.sparse_softmax_cross_entropy_with_logits(
tf.broadcast_to(labels[tf.newaxis, ...], tf.shape(logits)[:-1]),
logits)
return tf.reduce_mean(nll, axis=0)
def main(argv):
del argv # unused arg
if not FLAGS.use_gpu:
raise ValueError('Only GPU is currently supported.')
if FLAGS.num_cores > 1:
raise ValueError('Only a single accelerator is currently supported.')
tf.enable_v2_behavior()
tf.random.set_seed(FLAGS.seed)
tf.io.gfile.makedirs(FLAGS.output_dir)
batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
steps_per_eval = IMAGENET_VALIDATION_IMAGES // batch_size
dataset_test = utils.ImageNetInput(
is_training=False,
data_dir=FLAGS.data_dir,
batch_size=FLAGS.per_core_batch_size,
use_bfloat16=False).input_fn()
test_datasets = {'clean': dataset_test}
corruption_types, max_intensity = utils.load_corrupted_test_info()
for name in corruption_types:
for intensity in range(1, max_intensity + 1):
dataset_name = '{0}_{1}'.format(name, intensity)
test_datasets[dataset_name] = utils.load_corrupted_test_dataset(
name=name,
intensity=intensity,
batch_size=FLAGS.per_core_batch_size,
drop_remainder=True,
use_bfloat16=False)
model = deterministic_model.resnet50(input_shape=(224, 224, 3),
num_classes=NUM_CLASSES)
logging.info('Model input shape: %s', model.input_shape)
logging.info('Model output shape: %s', model.output_shape)
logging.info('Model number of weights: %s', model.count_params())
# Search for checkpoints from their index file; then remove the index suffix.
ensemble_filenames = tf.io.gfile.glob(os.path.join(FLAGS.checkpoint_dir,
'**/*.index'))
ensemble_filenames = [filename[:-6] for filename in ensemble_filenames]
ensemble_size = len(ensemble_filenames)
logging.info('Ensemble size: %s', ensemble_size)
logging.info('Ensemble number of weights: %s',
ensemble_size * model.count_params())
logging.info('Ensemble filenames: %s', str(ensemble_filenames))
checkpoint = tf.train.Checkpoint(model=model)
# Write model predictions to files.
num_datasets = len(test_datasets)
for m, ensemble_filename in enumerate(ensemble_filenames):
checkpoint.restore(ensemble_filename)
for n, (name, test_dataset) in enumerate(test_datasets.items()):
filename = '{dataset}_{member}.npy'.format(dataset=name, member=m)
filename = os.path.join(FLAGS.output_dir, filename)
if not tf.io.gfile.exists(filename):
logits = []
test_iterator = iter(test_dataset)
for _ in range(steps_per_eval):
features, _ = next(test_iterator) # pytype: disable=attribute-error
logits.append(model(features, training=False))
logits = tf.concat(logits, axis=0)
with tf.io.gfile.GFile(filename, 'w') as f:
np.save(f, logits.numpy())
percent = (m * num_datasets + (n + 1)) / (ensemble_size * num_datasets)
message = ('{:.1%} completion for prediction: ensemble member {:d}/{:d}. '
'Dataset {:d}/{:d}'.format(percent,
m + 1,
ensemble_size,
n + 1,
num_datasets))
logging.info(message)
metrics = {
'test/negative_log_likelihood': tf.keras.metrics.Mean(),
'test/gibbs_cross_entropy': tf.keras.metrics.Mean(),
'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
'test/ece': ed.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
}
corrupt_metrics = {}
for name in test_datasets:
corrupt_metrics['test/nll_{}'.format(name)] = tf.keras.metrics.Mean()
corrupt_metrics['test/accuracy_{}'.format(name)] = (
tf.keras.metrics.SparseCategoricalAccuracy())
corrupt_metrics['test/ece_{}'.format(
name)] = ed.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins)
# Evaluate model predictions.
for n, (name, test_dataset) in enumerate(test_datasets.items()):
logits_dataset = []
for m in range(ensemble_size):
filename = '{dataset}_{member}.npy'.format(dataset=name, member=m)
filename = os.path.join(FLAGS.output_dir, filename)
with tf.io.gfile.GFile(filename, 'rb') as f:
logits_dataset.append(np.load(f))
logits_dataset = tf.convert_to_tensor(logits_dataset)
test_iterator = iter(test_dataset)
for step in range(steps_per_eval):
_, labels = next(test_iterator) # pytype: disable=attribute-error
logits = logits_dataset[:, (step*batch_size):((step+1)*batch_size)]
labels = tf.cast(tf.reshape(labels, [-1]), tf.int32)
negative_log_likelihood = tf.reduce_mean(
ensemble_negative_log_likelihood(labels, logits))
per_probs = tf.nn.softmax(logits)
probs = tf.reduce_mean(per_probs, axis=0)
if name == 'clean':
gibbs_ce = tf.reduce_mean(gibbs_cross_entropy(labels, logits))
metrics['test/negative_log_likelihood'].update_state(
negative_log_likelihood)
metrics['test/gibbs_cross_entropy'].update_state(gibbs_ce)
metrics['test/accuracy'].update_state(labels, probs)
metrics['test/ece'].update_state(labels, probs)
else:
corrupt_metrics['test/nll_{}'.format(name)].update_state(
negative_log_likelihood)
corrupt_metrics['test/accuracy_{}'.format(name)].update_state(
labels, probs)
corrupt_metrics['test/ece_{}'.format(name)].update_state(
labels, probs)
message = ('{:.1%} completion for evaluation: dataset {:d}/{:d}'.format(
(n + 1) / num_datasets, n + 1, num_datasets))
logging.info(message)
corrupt_results = utils.aggregate_corrupt_metrics(corrupt_metrics,
corruption_types,
max_intensity,
FLAGS.alexnet_errors_path)
total_results = {name: metric.result() for name, metric in metrics.items()}
total_results.update(corrupt_results)
logging.info('Metrics: %s', total_results)
if __name__ == '__main__':
app.run(main)
|
[
"edward-dev@google.com"
] |
edward-dev@google.com
|
a701a7bf81999b097bc8df559e9e603c6528822a
|
1c5f4a13a5d67201b3a21c6e61392be2d9071f86
|
/.VirtualEnv/Lib/site-packages/influxdb_client/domain/user.py
|
c5bd079cd5d13004096794cab8917bcab9f208fb
|
[] |
no_license
|
ArmenFirman/FastAPI-InfluxDB
|
19e3867c2ec5657a9428a05ca98818ca7fde5fd0
|
b815509c89b5420f72abf514562e7f46dcd65436
|
refs/heads/main
| 2023-06-24T20:55:08.361089
| 2021-07-29T00:11:18
| 2021-07-29T00:11:18
| 390,462,832
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,664
|
py
|
# coding: utf-8
"""
Influx OSS API Service.
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class User(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'str',
'oauth_id': 'str',
'name': 'str',
'status': 'str'
}
attribute_map = {
'id': 'id',
'oauth_id': 'oauthID',
'name': 'name',
'status': 'status'
}
def __init__(self, id=None, oauth_id=None, name=None, status='active'): # noqa: E501,D401,D403
"""User - a model defined in OpenAPI.""" # noqa: E501
self._id = None
self._oauth_id = None
self._name = None
self._status = None
self.discriminator = None
if id is not None:
self.id = id
if oauth_id is not None:
self.oauth_id = oauth_id
self.name = name
if status is not None:
self.status = status
@property
def id(self):
"""Get the id of this User.
:return: The id of this User.
:rtype: str
""" # noqa: E501
return self._id
@id.setter
def id(self, id):
"""Set the id of this User.
:param id: The id of this User.
:type: str
""" # noqa: E501
self._id = id
@property
def oauth_id(self):
"""Get the oauth_id of this User.
:return: The oauth_id of this User.
:rtype: str
""" # noqa: E501
return self._oauth_id
@oauth_id.setter
def oauth_id(self, oauth_id):
"""Set the oauth_id of this User.
:param oauth_id: The oauth_id of this User.
:type: str
""" # noqa: E501
self._oauth_id = oauth_id
@property
def name(self):
"""Get the name of this User.
:return: The name of this User.
:rtype: str
""" # noqa: E501
return self._name
@name.setter
def name(self, name):
"""Set the name of this User.
:param name: The name of this User.
:type: str
""" # noqa: E501
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def status(self):
"""Get the status of this User.
If inactive the user is inactive.
:return: The status of this User.
:rtype: str
""" # noqa: E501
return self._status
@status.setter
def status(self, status):
"""Set the status of this User.
If inactive the user is inactive.
:param status: The status of this User.
:type: str
""" # noqa: E501
self._status = status
def to_dict(self):
"""Return the model properties as a dict."""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Return the string representation of the model."""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`."""
return self.to_str()
def __eq__(self, other):
"""Return true if both objects are equal."""
if not isinstance(other, User):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return true if both objects are not equal."""
return not self == other
|
[
"42990136+ArmenFirman@users.noreply.github.com"
] |
42990136+ArmenFirman@users.noreply.github.com
|
1291433add40002eac65d0652be12aadc8073103
|
5781bda84c1af759e7b0284f0489d50e68044c89
|
/unit_testing/test_api_networks.py
|
2e6ae18d2d6267e7ad15fc75e8fda4797bfb7ab7
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
Stanford-PERTS/triton
|
43306a582630ac6ef8d2d14c8b2a56279335a7fb
|
5a4f401fc7019d59ce4c41eafa6c5bda822fae0a
|
refs/heads/master
| 2022-10-17T11:51:10.220048
| 2020-06-14T17:37:54
| 2020-06-14T17:37:54
| 272,251,410
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,197
|
py
|
"""Test endpoints /api/networks/X and /api/users/X/networks.
Indirectly tests RestHandler with SqlModel. Other api suites may be less
comprehensive because they naively inherit from RestHandler and wouldn't
benefit from extra coverage.
"""
import datetime
import logging
import unittest
import webapp2
import webtest
from api_handlers import api_routes
from model import Network, Organization, Program, User
from unit_test_helper import ConsistencyTestCase
import config
import json
import jwt_helper
import mysql_connection
class TestApiNetworks(ConsistencyTestCase):
cookie_name = config.session_cookie_name
cookie_key = config.default_session_cookie_secret_key
def set_up(self):
# Let ConsistencyTestCase set up the datastore testing stub.
super(TestApiNetworks, self).set_up()
application = webapp2.WSGIApplication(
api_routes,
config={
'webapp2_extras.sessions': {
'secret_key': self.cookie_key
}
},
debug=True
)
self.testapp = webtest.TestApp(application)
with mysql_connection.connect() as sql:
sql.reset({
'network': Network.get_table_definition(),
'organization': Organization.get_table_definition(),
'program': Program.get_table_definition(),
'user': User.get_table_definition(),
})
self.program = Program.create(
name="Engagement Project",
label='ep18',
min_cycles=3,
active=True,
preview_url='foo.com',
)
self.program.put()
def login_headers(self, user):
payload = {'user_id': user.uid, 'email': user.email}
return {'Authorization': 'Bearer ' + jwt_helper.encode(payload)}
def test_get_all_requires_auth(self):
response = self.testapp.get('/api/networks', status=401)
def test_get_own_requires_auth(self):
response = self.testapp.get('/api/users/User_foo/networks', status=401)
def test_get_all_forbidden(self):
"""Non-supers get 403."""
user = User.create(name='foo', email='foo@bar.com')
user.put()
response = self.testapp.get(
'/api/networks',
headers=self.login_headers(user),
status=403,
)
def test_get_all_super(self):
"""Supers can query all networks."""
network = Network.create(name='Foo Net', program_id=self.program.uid)
network.put()
super_admin = User.create(name='super', email='super@bar.com',
user_type='super_admin')
super_admin.put()
response = self.testapp.get(
'/api/networks',
headers=self.login_headers(super_admin),
)
response_list = json.loads(response.body)
self.assertEqual(len(response_list), 1)
def create_for_paging(self, n):
# Pad numeric names so they sort alphabetically.
networks = [
Network.create(
name=str(x).rjust(2, '0'), program_id=self.program.uid)
for x in range(n)
]
Network.put_multi(networks)
super_admin = User.create(name='super', email='super@bar.com',
user_type='super_admin')
super_admin.put()
return networks, super_admin
def test_get_first_page(self):
networks, super_admin = self.create_for_paging(20)
response = self.testapp.get(
'/api/networks?n=10',
headers=self.login_headers(super_admin),
)
response_list = json.loads(response.body)
# We should have the first 10 results, in alphabetical order.
self.assertEqual([n.uid for n in networks[:10]],
[n['uid'] for n in response_list])
def test_get_offset_page(self):
networks, super_admin = self.create_for_paging(20)
response = self.testapp.get(
'/api/networks?n=10&cursor=11',
headers=self.login_headers(super_admin),
)
response_list = json.loads(response.body)
# We should have results 11-20, in order.
self.assertEqual([n.uid for n in networks[11:]],
[n['uid'] for n in response_list])
def test_link_header(self):
# 5 networks for first, previous, current, next, and last.
networks, super_admin = self.create_for_paging(5)
response = self.testapp.get(
'/api/networks?n=1&cursor=2',
headers=self.login_headers(super_admin),
)
self.assertEqual(
response.headers['Link'],
'</api/networks?n=1&cursor=2&order=name>;rel=self,'
'</api/networks?order=name&n=1>;rel=first,'
'</api/networks?cursor=1&order=name&n=1>;rel=previous,'
'</api/networks?cursor=3&order=name&n=1>;rel=next,'
'</api/networks?cursor=4&order=name&n=1>;rel=last',
)
def test_link_header_for_program(self):
"""Links header should work when filtering to program"""
program_cset = Program.create(
name="CSET",
label="cset19",
preview_url='foo.com',
)
program_cset.put()
cset_net = Network.create(
name="cset Network",
program_id=program_cset.uid
)
cset_net.put()
ep_nets, super_admin = self.create_for_paging(12)
# cset only has 1 organization, so no paging past first page.
response = self.testapp.get(
'/api/networks?program_id={}&n=10'.format(program_cset.uid),
headers=self.login_headers(super_admin),
)
self.assertEqual(
response.headers['Link'],
(
'<{path}?program_id={pid}&n=10&order=name>;rel=self,'
'<{path}?order=name&program_id={pid}&n=10>;rel=first,'
'<{path}?cursor=0&order=name&program_id={pid}&n=10>;rel=previous,'
'<{path}?cursor=0&order=name&program_id={pid}&n=10>;rel=next,'
'<{path}?cursor=0&order=name&program_id={pid}&n=10>;rel=last'
).format(path='/api/networks', pid=program_cset.uid)
)
# EP has 12 networks, so there is paging past first page.
response = self.testapp.get(
'/api/networks?program_id={}&n=10'.format(self.program.uid),
headers=self.login_headers(super_admin),
)
self.assertEqual(
response.headers['Link'],
(
'<{path}?program_id={pid}&n=10&order=name>;rel=self,'
'<{path}?order=name&program_id={pid}&n=10>;rel=first,'
'<{path}?cursor=0&order=name&program_id={pid}&n=10>;rel=previous,'
'<{path}?cursor=10&order=name&program_id={pid}&n=10>;rel=next,'
'<{path}?cursor=10&order=name&program_id={pid}&n=10>;rel=last'
).format(path='/api/networks', pid=self.program.uid)
)
def test_get_all_for_self(self):
"""You can list your own networks."""
network = Network.create(name='foo', program_id=self.program.uid)
network.put()
user = User.create(name='foo', email='foo@bar.com',
owned_networks=[network.uid])
user.put()
response = self.testapp.get(
'/api/users/{}/networks'.format(user.uid),
headers=self.login_headers(user),
)
response_list = json.loads(response.body)
self.assertEqual(len(response_list), 1)
def test_get_owned(self):
"""You can get a network you own."""
network = Network.create(name='foo', program_id=self.program.uid)
network.put()
user = User.create(name='foo', email='foo@bar.com',
owned_networks=[network.uid])
user.put()
response = self.testapp.get(
'/api/networks/{}'.format(network.uid),
headers=self.login_headers(user),
)
response_dict = json.loads(response.body)
self.assertEqual(response_dict['uid'], network.uid)
def test_get_for_other_forbidden(self):
"""You can't list someone else's networks."""
user = User.create(name='foo', email='foo@bar.com')
user.put()
response = self.testapp.get(
'/api/users/User_other/networks',
headers=self.login_headers(user),
status=403
)
def test_create(self):
"""Anyone can create a network."""
network_name = 'Foo Net'
user = User.create(name='foo', email='foo@bar.com')
user.put()
response = self.testapp.post_json(
'/api/networks',
{'name': network_name, 'program_id': self.program.uid},
headers=self.login_headers(user),
)
response_dict = json.loads(response.body)
self.assertEqual(response_dict['name'], network_name)
fetched_network = Network.get_by_id(response_dict['uid'])
self.assertIsNotNone(fetched_network)
# Remove user's cookie so we can use the test app as other people.
self.testapp.reset()
return user, response_dict
def test_change_code_requires_auth(self):
response = self.testapp.post(
'/api/networks/{}/code',
status=401,
)
def test_change_code(self):
network = Network.create(name='Foo Org', program_id=self.program.uid)
network.put()
user = User.create(name='foo', email='foo@bar.com',
owned_networks=[network.uid])
user.put()
response = self.testapp.post(
'/api/networks/{}/code'.format(network.uid),
headers=self.login_headers(user),
)
response_dict = json.loads(response.body)
self.assertEqual(response_dict['uid'], network.uid)
self.assertIsNotNone(response_dict['code'])
self.assertNotEqual(response_dict['code'], network.code)
def test_change_code_forbidden(self):
network = Network.create(name='Foo Net', program_id=self.program.uid)
network.put()
other = User.create(name='other', email='other@bar.com',
owned_networks=[])
other.put()
response = self.testapp.post(
'/api/networks/{}/code'.format(network.uid),
headers=self.login_headers(other),
status=403,
)
def test_delete(self):
"""Networks can be deleted, and owners are disassociated."""
network = Network.create(name='Foo Org', program_id=self.program.uid)
network.put()
user = User.create(name='foo', email='foo@bar.com',
owned_networks=[network.uid])
user.put()
self.testapp.delete(
'/api/networks/{}'.format(network.uid),
headers=self.login_headers(user),
status=204,
)
self.assertIsNone(Network.get_by_id(network.uid))
fetched_user = User.get_by_id(user.uid)
self.assertEqual(fetched_user.owned_networks, [])
|
[
"chris@perts.net"
] |
chris@perts.net
|
cd60c8befe7afd3f8beadc2528f3da95ecffea0f
|
3ed227f5c04257779c7d2461c16b951d4173112e
|
/pyngfm/tests/test_service.py
|
42d21a28001680dfac823e8e1faa868ec25342ec
|
[] |
no_license
|
andreagrandi/pyngfm
|
622041184c9b273ac15536168650e3f5fe9e8c8c
|
016c02d16a7628d75d342aac0f79b6b137ca8aa4
|
refs/heads/master
| 2021-01-10T13:45:05.429870
| 2013-03-05T17:30:41
| 2013-03-05T17:30:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,002
|
py
|
from twisted.trial.unittest import TestCase
from pyngfm.service import SystemService, UserService
class ServiceTestCase(TestCase):
def test_creation(self):
service = SystemService(id="id1", name="name")
self.assertEquals(service.id, "id1")
self.assertEquals(service.name, "name")
class SystemServiceTestCase(TestCase):
def test_creation(self):
service = SystemService(id="id1", name="name", trigger="trigger",
url="http://url/", icon="http://icon.png")
self.assertEquals(service.id, "id1")
self.assertEquals(service.name, "name")
self.assertEquals(service.trigger, "trigger")
self.assertEquals(service.url, "http://url/")
self.assertEquals(service.icon, "http://icon.png")
def test_creation_with_exteded_call_syntax(self):
kwds = dict(id="id1", name="name")
service = UserService(trigger="trigger", url="http://url/",
icon="http://icon.png", **kwds)
self.assertEquals(service.id, "id1")
self.assertEquals(service.name, "name")
self.assertEquals(service.trigger, "trigger")
self.assertEquals(service.url, "http://url/")
self.assertEquals(service.icon, "http://icon.png")
class UserServiceTestCase(TestCase):
def test_creation(self):
service = UserService(id="id1", name="name", trigger="trigger",
url="http://url/", icon="http://icon.png",
methods=["method1", "method2"])
self.assertEquals(service.id, "id1")
self.assertEquals(service.name, "name")
self.assertEquals(service.trigger, "trigger")
self.assertEquals(service.url, "http://url/")
self.assertEquals(service.icon, "http://icon.png")
self.assertEquals(service.methods, ["method1", "method2"])
def test_creation_with_exteded_call_syntax(self):
kwds = dict(id="id1", name="name", trigger="trigger",
url="http://url/", icon="http://icon.png")
service = UserService(methods=["method1", "method2"], **kwds)
self.assertEquals(service.id, "id1")
self.assertEquals(service.name, "name")
self.assertEquals(service.trigger, "trigger")
self.assertEquals(service.url, "http://url/")
self.assertEquals(service.icon, "http://icon.png")
self.assertEquals(service.methods, ["method1", "method2"])
def test_creation_with_exteded_call_syntax_no_methods(self):
kwds = dict(id="id1", name="name", trigger="trigger",
url="http://url/", icon="http://icon.png")
service = UserService(**kwds)
self.assertEquals(service.id, "id1")
self.assertEquals(service.name, "name")
self.assertEquals(service.trigger, "trigger")
self.assertEquals(service.url, "http://url/")
self.assertEquals(service.icon, "http://icon.png")
self.assertEquals(service.methods, None)
|
[
"a.grandi@gmail.com"
] |
a.grandi@gmail.com
|
e4ea0e1334b3603a8b78df959cf94ab33c7a39c3
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_retook.py
|
0c590cb034b114003673e6e7ff8e92254df6ec3d
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 236
|
py
|
from xai.brain.wordbase.nouns._retake import _RETAKE
#calss header
class _RETOOK(_RETAKE, ):
def __init__(self,):
_RETAKE.__init__(self)
self.name = "RETOOK"
self.specie = 'nouns'
self.basic = "retake"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
5086141bbae8578a767af9598fb8ceb8d69a7d1b
|
723ea3f47a45fe756c4a77809eb2a4d6b98bc733
|
/crackfun/65. Valid Number.py
|
8a160cae53c0ede77db9f6f75bdf7ba671ffc30a
|
[] |
no_license
|
JoyiS/Leetcode
|
a625e7191bcb80d246328121669a37ac81e30343
|
5510ef424135783f6dc40d3f5e85c4c42677c211
|
refs/heads/master
| 2021-10-21T05:41:00.706086
| 2019-03-03T06:29:14
| 2019-03-03T06:29:14
| 110,296,869
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,175
|
py
|
# TEST SHOWS: ok for 00, 7.e3
# This is not a very difficult problem if understand the state machine idea
# define states and understand the valid jumps.
# If this problem is asked during an interview, need to know how to set the transitions and also discuss corner cases.
class Solution(object):
def isNumber(self, s):
"""
:type s: str
:rtype: bool
"""
#define a DFA
state = [{},
{'blank': 1, 'sign': 2, 'digit':3, '.':4},
{'digit':3, '.':4},
{'digit':3, '.':5, 'e':6, 'blank':9},
{'digit':5},
{'digit':5, 'e':6, 'blank':9},
{'sign':7, 'digit':8},
{'digit':8},
{'digit':8, 'blank':9},
{'blank':9}]
currentState = 1
for c in s:
if c >= '0' and c <= '9':
c = 'digit'
if c == ' ':
c = 'blank'
if c in ['+', '-']:
c = 'sign'
if c not in state[currentState].keys():
return False
currentState = state[currentState][c]
if currentState not in [3,5,8,9]:
return False
return True
|
[
"california.sjy@gmail.com"
] |
california.sjy@gmail.com
|
b27789d091410a1f28e25b8460c147896874710d
|
e86364b36b82c24596dd71f9fa2221d036e8defc
|
/collections/ansible_collections/cisco/nxos/plugins/modules/nxos_acl.py
|
3fa12492b7f2a3062590a2083b52cc6d4a420b67
|
[] |
no_license
|
ganeshrn/network_collections_migration
|
b3f11be5ecb9557787bcd12ca01b227379c7c102
|
8f56b60bfde606b291627665a1218bf7ce15f3a1
|
refs/heads/master
| 2020-09-12T12:10:58.189645
| 2019-11-18T11:44:48
| 2019-11-18T11:44:48
| 222,419,125
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,900
|
py
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''module: nxos_acl
extends_documentation_fragment:
- cisco.nxos.nxos
short_description: Manages access list entries for ACLs.
description:
- Manages access list entries for ACLs.
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- C(state=absent) removes the ACE if it exists.
- C(state=delete_acl) deletes the ACL if it exists.
- For idempotency, use port numbers for the src/dest port params like I(src_port1)
and names for the well defined protocols for the I(proto) param.
- Although this module is idempotent in that if the ace as presented in the task is
identical to the one on the switch, no changes will be made. If there is any difference,
what is in Ansible will be pushed (configured options will be overridden). This
is to improve security, but at the same time remember an ACE is removed, then re-added,
so if there is a change, the new ACE will be exactly what parameters you are sending
to the module.
options:
seq:
description:
- Sequence number of the entry (ACE).
name:
description:
- Case sensitive name of the access list (ACL).
required: true
action:
description:
- Action of the ACE.
choices:
- permit
- deny
- remark
remark:
description:
- If action is set to remark, this is the description.
proto:
description:
- Port number or protocol (as supported by the switch).
src:
description:
- Source ip and mask using IP/MASK notation and supports keyword 'any'.
src_port_op:
description:
- Source port operands such as eq, neq, gt, lt, range.
choices:
- any
- eq
- gt
- lt
- neq
- range
src_port1:
description:
- Port/protocol and also first (lower) port when using range operand.
src_port2:
description:
- Second (end) port when using range operand.
dest:
description:
- Destination ip and mask using IP/MASK notation and supports the keyword 'any'.
dest_port_op:
description:
- Destination port operands such as eq, neq, gt, lt, range.
choices:
- any
- eq
- gt
- lt
- neq
- range
dest_port1:
description:
- Port/protocol and also first (lower) port when using range operand.
dest_port2:
description:
- Second (end) port when using range operand.
log:
description:
- Log matches against this entry.
choices:
- enable
urg:
description:
- Match on the URG bit.
choices:
- enable
ack:
description:
- Match on the ACK bit.
choices:
- enable
psh:
description:
- Match on the PSH bit.
choices:
- enable
rst:
description:
- Match on the RST bit.
choices:
- enable
syn:
description:
- Match on the SYN bit.
choices:
- enable
fin:
description:
- Match on the FIN bit.
choices:
- enable
established:
description:
- Match established connections.
choices:
- enable
fragments:
description:
- Check non-initial fragments.
choices:
- enable
time_range:
description:
- Name of time-range to apply.
precedence:
description:
- Match packets with given precedence.
choices:
- critical
- flash
- flash-override
- immediate
- internet
- network
- priority
- routine
dscp:
description:
- Match packets with given dscp value.
choices:
- af11
- af12
- af13
- af21
- af22
- af23
- af31
- af32
- af33
- af41
- af42
- af43
- cs1
- cs2
- cs3
- cs4
- cs5
- cs6
- cs7
- default
- ef
state:
description:
- Specify desired state of the resource.
default: present
choices:
- present
- absent
- delete_acl
'''
EXAMPLES = '''
# configure ACL ANSIBLE
- nxos_acl:
name: ANSIBLE
seq: 10
action: permit
proto: tcp
src: 192.0.2.1/24
dest: any
state: present
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["ip access-list ANSIBLE", "10 permit tcp 192.0.2.1/24 any"]
'''
from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.nxos import load_config, run_commands
from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.nxos import nxos_argument_spec
from ansible.module_utils.basic import AnsibleModule
def execute_show_command(command, module, check_rc=True):
command += ' | json'
cmds = [command]
body = run_commands(module, cmds, check_rc=check_rc)
return body
def get_acl(module, acl_name, seq_number):
command = 'show ip access-list'
new_acl = []
saveme = {}
acl_body = {}
body = execute_show_command(command, module, check_rc=False)
if 'Structured output unsupported' in repr(body):
# Some older versions raise 501 and return a string when no ACLs exist
return {}, []
if body and body[0]:
all_acl_body = body[0]['TABLE_ip_ipv6_mac']['ROW_ip_ipv6_mac']
else:
# no access-lists configured on the device
return {}, []
if isinstance(all_acl_body, dict):
# Only 1 ACL configured.
if all_acl_body.get('acl_name') == acl_name:
acl_body = all_acl_body
else:
for acl in all_acl_body:
if acl.get('acl_name') == acl_name:
acl_body = acl
break
try:
acl_entries = acl_body['TABLE_seqno']['ROW_seqno']
acl_name = acl_body.get('acl_name')
except KeyError: # could be raised if no ACEs are configured for an ACL
return {}, [{'acl': 'no_entries'}]
if isinstance(acl_entries, dict):
acl_entries = [acl_entries]
for each in acl_entries:
temp = {}
options = {}
remark = each.get('remark')
temp['name'] = acl_name
temp['seq'] = str(each.get('seqno'))
if remark:
temp['remark'] = remark
temp['action'] = 'remark'
else:
temp['action'] = each.get('permitdeny')
temp['proto'] = str(each.get('proto', each.get('proto_str', each.get('ip'))))
temp['src'] = each.get('src_any', each.get('src_ip_prefix'))
temp['src_port_op'] = each.get('src_port_op')
temp['src_port1'] = each.get('src_port1_num')
temp['src_port2'] = each.get('src_port2_num')
temp['dest'] = each.get('dest_any', each.get('dest_ip_prefix'))
temp['dest_port_op'] = each.get('dest_port_op')
temp['dest_port1'] = each.get('dest_port1_num')
temp['dest_port2'] = each.get('dest_port2_num')
options['log'] = each.get('log')
options['urg'] = each.get('urg')
options['ack'] = each.get('ack')
options['psh'] = each.get('psh')
options['rst'] = each.get('rst')
options['syn'] = each.get('syn')
options['fin'] = each.get('fin')
options['established'] = each.get('established')
options['dscp'] = each.get('dscp_str')
options['precedence'] = each.get('precedence_str')
options['fragments'] = each.get('fragments')
options['time_range'] = each.get('timerange')
keep = {}
for key, value in temp.items():
if value:
keep[key] = value
options_no_null = {}
for key, value in options.items():
if value is not None:
options_no_null[key] = value
keep['options'] = options_no_null
if keep.get('seq') == seq_number:
saveme = dict(keep)
new_acl.append(keep)
return saveme, new_acl
def _acl_operand(operand, srcp1, sprcp2):
sub_entry = ' ' + operand
if operand == 'range':
sub_entry += ' ' + srcp1 + ' ' + sprcp2
else:
sub_entry += ' ' + srcp1
return sub_entry
def config_core_acl(proposed):
seq = proposed.get('seq')
action = proposed.get('action')
remark = proposed.get('remark')
proto = proposed.get('proto')
src = proposed.get('src')
src_port_op = proposed.get('src_port_op')
src_port1 = proposed.get('src_port1')
src_port2 = proposed.get('src_port2')
dest = proposed.get('dest')
dest_port_op = proposed.get('dest_port_op')
dest_port1 = proposed.get('dest_port1')
dest_port2 = proposed.get('dest_port2')
ace_start_entries = [action, proto, src]
if not remark:
ace = seq + ' ' + ' '.join(ace_start_entries)
if src_port_op:
ace += _acl_operand(src_port_op, src_port1, src_port2)
ace += ' ' + dest
if dest_port_op:
ace += _acl_operand(dest_port_op, dest_port1, dest_port2)
else:
ace = seq + ' remark ' + remark
return ace
def config_acl_options(options):
ENABLE_ONLY = ['psh', 'urg', 'log', 'ack', 'syn',
'established', 'rst', 'fin', 'fragments',
'log']
OTHER = ['dscp', 'precedence', 'time-range']
# packet-length is the only option not currently supported
if options.get('time_range'):
options['time-range'] = options.get('time_range')
options.pop('time_range')
command = ''
for option, value in options.items():
if option in ENABLE_ONLY:
if value == 'enable':
command += ' ' + option
elif option in OTHER:
command += ' ' + option + ' ' + value
if command:
command = command.strip()
return command
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def main():
argument_spec = dict(
seq=dict(required=False, type='str'),
name=dict(required=True, type='str'),
action=dict(required=False, choices=['remark', 'permit', 'deny']),
remark=dict(required=False, type='str'),
proto=dict(required=False, type='str'),
src=dict(required=False, type='str'),
src_port_op=dict(required=False),
src_port1=dict(required=False, type='str'),
src_port2=dict(required=False, type='str'),
dest=dict(required=False, type='str'),
dest_port_op=dict(required=False),
dest_port1=dict(required=False, type='str'),
dest_port2=dict(required=False, type='str'),
log=dict(required=False, choices=['enable']),
urg=dict(required=False, choices=['enable']),
ack=dict(required=False, choices=['enable']),
psh=dict(required=False, choices=['enable']),
rst=dict(required=False, choices=['enable']),
syn=dict(required=False, choices=['enable']),
fragments=dict(required=False, choices=['enable']),
fin=dict(required=False, choices=['enable']),
established=dict(required=False, choices=['enable']),
time_range=dict(required=False),
precedence=dict(required=False, choices=['critical', 'flash',
'flash-override',
'immediate', 'internet',
'network', 'priority',
'routine']),
dscp=dict(required=False, choices=['af11', 'af12', 'af13', 'af21',
'af22', 'af23', 'af31', 'af32',
'af33', 'af41', 'af42', 'af43',
'cs1', 'cs2', 'cs3', 'cs4',
'cs5', 'cs6', 'cs7', 'default',
'ef']),
state=dict(choices=['absent', 'present', 'delete_acl'], default='present')
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
results = dict(changed=False, warnings=warnings)
state = module.params['state']
action = module.params['action']
remark = module.params['remark']
dscp = module.params['dscp']
precedence = module.params['precedence']
seq = module.params['seq']
name = module.params['name']
seq = module.params['seq']
if action == 'remark' and not remark:
module.fail_json(msg='when state is action, remark param is also required')
REQUIRED = ['seq', 'name', 'action', 'proto', 'src', 'dest']
ABSENT = ['name', 'seq']
if state == 'present':
if action and remark and seq:
pass
else:
for each in REQUIRED:
if module.params[each] is None:
module.fail_json(msg="req'd params when state is present:",
params=REQUIRED)
elif state == 'absent':
for each in ABSENT:
if module.params[each] is None:
module.fail_json(msg='require params when state is absent',
params=ABSENT)
elif state == 'delete_acl':
if module.params['name'] is None:
module.fail_json(msg="param name req'd when state is delete_acl")
if dscp and precedence:
module.fail_json(msg='only one of the params dscp/precedence '
'are allowed')
OPTIONS_NAMES = ['log', 'urg', 'ack', 'psh', 'rst', 'syn', 'fin',
'established', 'dscp', 'precedence', 'fragments',
'time_range']
CORE = ['seq', 'name', 'action', 'proto', 'src', 'src_port_op',
'src_port1', 'src_port2', 'dest', 'dest_port_op',
'dest_port1', 'dest_port2', 'remark']
proposed_core = dict((param, value) for (param, value) in
module.params.items()
if param in CORE and value is not None)
proposed_options = dict((param, value) for (param, value) in
module.params.items()
if param in OPTIONS_NAMES and value is not None)
proposed = {}
proposed.update(proposed_core)
proposed.update(proposed_options)
existing_options = {}
# getting existing existing_core=dict, acl=list, seq=list
existing_core, acl = get_acl(module, name, seq)
if existing_core:
existing_options = existing_core.get('options')
existing_core.pop('options')
commands = []
delta_core = {}
delta_options = {}
if not existing_core.get('remark'):
dcore = dict(
set(proposed_core.items()).difference(
existing_core.items())
)
if not dcore:
# check the diff in the other way just in case
dcore = dict(
set(existing_core.items()).difference(
proposed_core.items())
)
delta_core = dcore
if delta_core:
delta_options = proposed_options
else:
doptions = dict(
set(proposed_options.items()).difference(
existing_options.items())
)
# check the diff in the other way just in case
if not doptions:
doptions = dict(
set(existing_options.items()).difference(
proposed_options.items())
)
delta_options = doptions
else:
delta_core = dict(
set(proposed_core.items()).difference(
existing_core.items())
)
if state == 'present':
if delta_core or delta_options:
if existing_core: # if the ace exists already
commands.append(['no {0}'.format(seq)])
if delta_options:
myacl_str = config_core_acl(proposed_core)
myacl_str += ' ' + config_acl_options(proposed_options)
else:
myacl_str = config_core_acl(proposed_core)
command = [myacl_str]
commands.append(command)
elif state == 'absent':
if existing_core:
commands.append(['no {0}'.format(seq)])
elif state == 'delete_acl':
if acl and acl[0].get('acl') != 'no_entries':
commands.append(['no ip access-list {0}'.format(name)])
cmds = []
if commands:
preface = []
if state in ['present', 'absent']:
preface = ['ip access-list {0}'.format(name)]
commands.insert(0, preface)
cmds = flatten_list(commands)
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
load_config(module, cmds)
results['changed'] = True
if 'configure' in cmds:
cmds.pop(0)
results['commands'] = cmds
module.exit_json(**results)
if __name__ == '__main__':
main()
|
[
"ganesh634@gmail.com"
] |
ganesh634@gmail.com
|
e6542d62bcb016919a85ecd03d75e4c444f6faf5
|
16385e10f6ad05b8147517daf2f40dbdda02617c
|
/site-packages/cs.web-15.3.0.6-py2.7.egg/cs/web/components/storybook/__init__.py
|
c98f69026839d0ad084387a737455e9acafc05c1
|
[] |
no_license
|
prachipainuly-rbei/devops-poc
|
308d6cab02c14ffd23a0998ff88d9ed0420f513a
|
6bc932c67bc8d93b873838ae6d9fb8d33c72234d
|
refs/heads/master
| 2020-04-18T01:26:10.152844
| 2019-02-01T12:25:19
| 2019-02-01T12:25:19
| 167,118,611
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 277
|
py
|
#!/usr/bin/env python
# -*- mode: python; coding: utf-8 -*-
#
# Copyright (C) 1990 - 2017 CONTACT Software GmbH
# All rights reserved.
# http://www.contact.de/
#
"""
"""
__docformat__ = "restructuredtext en"
__revision__ = "$Id: __init__.py 152037 2017-01-18 12:20:23Z yzh $"
|
[
"PPR4COB@rbeigcn.com"
] |
PPR4COB@rbeigcn.com
|
37ed5de77008b4ae4274965e56c9d809630988c7
|
35fc3136ca3f4af52ebeb36cedcd30b41d685146
|
/RNASeq/pipelines_ds/RNASeq_MDD69.py
|
30d1de29b381a51dc269030e8f8a7eba0ade0127
|
[] |
no_license
|
stockedge/tpot-fss
|
cf260d9fd90fdd4b3d50da168f8b780bb2430fd1
|
d1ee616b7552ef254eb3832743c49a32e1203d6a
|
refs/heads/master
| 2022-09-19T13:10:30.479297
| 2020-06-02T15:43:16
| 2020-06-02T15:43:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,103
|
py
|
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from tpot.builtins import DatasetSelector
# NOTE: Make sure that the class is labeled 'target' in the data file
tpot_data = pd.read_csv('PATH/TO/DATA/FILE', sep='COLUMN_SEPARATOR', dtype=np.float64)
features = tpot_data.drop('target', axis=1).values
training_features, testing_features, training_target, testing_target = \
train_test_split(features, tpot_data['target'].values, random_state=69)
# Average CV score on the training set was:0.7179130434782609
exported_pipeline = make_pipeline(
DatasetSelector(sel_subset=12, subset_list="module23.csv"),
Normalizer(norm="l1"),
RandomForestClassifier(bootstrap=False, criterion="entropy", max_features=0.15000000000000002, min_samples_leaf=15, min_samples_split=9, n_estimators=100)
)
exported_pipeline.fit(training_features, training_target)
results = exported_pipeline.predict(testing_features)
|
[
"grixor@gmail.com"
] |
grixor@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.